max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
kitsune/users/migrations/0027_profile_zendesk_id.py | erdal-pb/kitsune | 929 | 11109791 | <filename>kitsune/users/migrations/0027_profile_zendesk_id.py
# Generated by Django 2.2.24 on 2021-09-02 06:24
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0026_profile_fxa_refresh_token'),
]
operations = [
migrations.AddField(
model_name='profile',
name='zendesk_id',
field=models.CharField(blank=True, default='', max_length=1024),
),
]
|
tests/unit/TestTypechecker.py | rainoftime/yinyang | 143 | 11109798 | <gh_stars>100-1000
# MIT License
#
# Copyright (c) [2020 - 2021] The yinyang authors
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import unittest
import sys
import pathlib
sys.path.append("../../")
from yinyang.src.parsing.Ast import (
Assert,
)
from yinyang.src.parsing.Parse import (
parse_str, parse_file
)
from yinyang.src.parsing.Types import (
UNKNOWN,
BOOLEAN_TYPE,
INTEGER_TYPE,
STRING_TYPE,
)
from yinyang.src.parsing.Typechecker import Context, typecheck_expr, typecheck
def check_type(expr):
"""
recursive traversal of expressions to check whether none of them has
unknown type.
"""
if expr.type == UNKNOWN:
raise Exception(expr.__str__() + " has unknown type")
if expr.var_binders:
for i, _ in enumerate(expr.var_binders):
check_type(expr.let_terms[i])
for e in expr.subterms:
check_type(e)
else:
if expr.subterms:
for e in expr.subterms:
check_type(e)
def oracle(formula):
for cmd in formula.commands:
if isinstance(cmd, Assert):
check_type(cmd.term)
return True
class TypecheckerTestCase(unittest.TestCase):
def test_core_theory(self):
formula_str = """
(declare-const y Int)
(declare-const v Bool)
(assert (= v (not (= y (- 1)))))
(check-sat)
"""
formula, globals = parse_str(formula_str)
ctxt = Context(globals, {})
equals = formula.commands[2].term
self.assertEqual(typecheck_expr(equals, ctxt), BOOLEAN_TYPE)
v = equals.subterms[0]
self.assertEqual(typecheck_expr(v, ctxt), BOOLEAN_TYPE)
not_op = equals.subterms[1]
self.assertEqual(typecheck_expr(not_op, ctxt), BOOLEAN_TYPE)
y = equals.subterms[1].subterms[0].subterms[0]
self.assertEqual(typecheck_expr(y, ctxt), INTEGER_TYPE)
minusone = equals.subterms[1].subterms[0].subterms[1]
self.assertEqual(typecheck_expr(minusone, ctxt), INTEGER_TYPE)
formula_str = """
(declare-const y Int)
(declare-const v Bool)
(assert (ite v false (= y (- 1))))
(check-sat)
"""
formula, globals = parse_str(formula_str)
ite = formula.commands[2].term
ctxt = Context(globals, {})
self.assertEqual(typecheck_expr(ite, ctxt), BOOLEAN_TYPE)
def test_error(self):
formula_str = """
(declare-const y Int)
(declare-const v Bool)
(assert (= v (not (= v (- 1)))))
(check-sat)
"""
formula, globals = parse_str(formula_str)
ctxt = Context(globals, {})
equals = formula.commands[2].term
try:
typecheck_expr(equals, ctxt)
except Exception:
no_except = False
self.assertFalse(no_except)
def test_typecheck_nary_int_ret(self):
formula_str = """
(declare-const v Int)
(declare-const w Int)
(assert (= v (+ v v w)))
(check-sat)
"""
formula, globals = parse_str(formula_str)
ctxt = Context(globals, {})
nary_plus = formula.commands[2].term.subterms[1]
self.assertEqual(typecheck_expr(nary_plus, ctxt), INTEGER_TYPE)
def test_typecheck_comp_ops(self):
formula_str = """
(declare-const v Int)
(declare-const w Int)
(assert (> v (+ v v w)))
(check-sat)
"""
formula, globals = parse_str(formula_str)
greater = formula.commands[2].term
ctxt = Context(globals, {})
self.assertEqual(typecheck_expr(greater, ctxt), BOOLEAN_TYPE)
def test_typecheck_string_ops(self):
formula_str = """
(assert (distinct (str.replace_all "B" "A" "") "B"))
(check-sat)
"""
formula, globals = parse_str(formula_str)
ctxt = Context(globals, {})
distinct = formula.commands[0].term
self.assertEqual(typecheck_expr(distinct, ctxt), BOOLEAN_TYPE)
str_repl = distinct.subterms[0]
self.assertEqual(typecheck_expr(str_repl, ctxt), STRING_TYPE)
formula_str = """
(declare-fun a () String)
(assert (str.contains (str.substr a 0 (- (str.len a) 1)) "A"))
(assert (= (ite (= (str.at a 0) "B") 1 0) (ite (= (str.at a 0) "A") 1 0) 0))
(assert (= (str.at a (- (str.len a) 1)) "B"))
(check-sat)
"""
formula, globals = parse_str(formula_str)
ctxt = Context(globals, {})
for i in range(1, 4):
typecheck_expr(formula.commands[i].term, ctxt)
def test_typechecking_formula_small(self):
formula_str = """
(declare-fun x () Int)
(declare-fun y () Int)
(declare-fun z () Int)
(assert (> (* (+ 3 x) (- y 2)) (/ 5 z)))
(check-sat)
"""
formula, glob = parse_str(formula_str)
typecheck(formula, glob)
self.assertEqual(oracle(formula), True)
def test_typechecking_formula_large(self):
script_path = pathlib.Path(__file__).parent.absolute()
formula, glob = parse_file(
str(script_path) + "/test.smt2", silent=False
)
typecheck(formula, glob)
oracle(formula)
self.assertEqual(oracle(formula), True)
if __name__ == "__main__":
TypecheckerTestCase.test_typechecker()
unittest.main()
|
卷积模块/Res2Net.py | 1044197988/TF.Keras-Commonly-used-models | 160 | 11109809 | <reponame>1044197988/TF.Keras-Commonly-used-models
from tensorflow.keras.layers import Conv2D
from tensorflow.keras.layers import BatchNormalization
from tensorflow.keras.layers import Activation
from tensorflow.keras.layers import concatenate
from tensorflow.keras.layers import add
def Conv_Relu_BN(num_filters,kernel_size,batchnorm=True,strides=(1, 1),padding='same'):
def layer(input_tensor):
x = Conv2D(num_filters, kernel_size,padding=padding, kernel_initializer='he_normal',strides=strides)(input_tensor)
x = Activation('relu')(x)
if batchnorm:
x = BatchNormalization()(x)
return x
return layer
def slice_layer(x, slice_num, channel_input):
output_list = []
single_channel = channel_input//slice_num
for i in range(slice_num):
out = x[:, :, :, i*single_channel:(i+1)*single_channel]
output_list.append(out)
return output_list
def res2net_block(num_filters, slice_num):
def layer(input_tensor):
short_cut = input_tensor
x = Conv_Mish_BN(num_filters=num_filters, kernel_size=(1, 1))(input_tensor)
slice_list = slice_layer(x, slice_num, x.shape[-1])
side = Conv_Mish_BN(num_filters=num_filters//slice_num, kernel_size=(3, 3))(slice_list[1])
z = concatenate([slice_list[0], side]) # for one and second stage
for i in range(2, len(slice_list)):
y = Conv_Mish_BN(num_filters=num_filters//slice_num, kernel_size=(3, 3))(add([side, slice_list[i]]))
side = y
z = concatenate([z, y])
z = Conv_Mish_BN(num_filters=num_filters, kernel_size=(1, 1))(z)
out = concatenate([z, short_cut])
return out
return layer
|
tools/third_party/websockets/tests/test_http.py | meyerweb/wpt | 2,479 | 11109810 | import asyncio
import unittest
from websockets.exceptions import SecurityError
from websockets.http import *
from websockets.http import read_headers
from .utils import AsyncioTestCase
class HTTPAsyncTests(AsyncioTestCase):
def setUp(self):
super().setUp()
self.stream = asyncio.StreamReader(loop=self.loop)
async def test_read_request(self):
# Example from the protocol overview in RFC 6455
self.stream.feed_data(
b"GET /chat HTTP/1.1\r\n"
b"Host: server.example.com\r\n"
b"Upgrade: websocket\r\n"
b"Connection: Upgrade\r\n"
b"Sec-WebSocket-Key: <KEY>"
b"Origin: http://example.com\r\n"
b"Sec-WebSocket-Protocol: chat, superchat\r\n"
b"Sec-WebSocket-Version: 13\r\n"
b"\r\n"
)
path, headers = await read_request(self.stream)
self.assertEqual(path, "/chat")
self.assertEqual(headers["Upgrade"], "websocket")
async def test_read_request_empty(self):
self.stream.feed_eof()
with self.assertRaisesRegex(
EOFError, "connection closed while reading HTTP request line"
):
await read_request(self.stream)
async def test_read_request_invalid_request_line(self):
self.stream.feed_data(b"GET /\r\n\r\n")
with self.assertRaisesRegex(ValueError, "invalid HTTP request line: GET /"):
await read_request(self.stream)
async def test_read_request_unsupported_method(self):
self.stream.feed_data(b"OPTIONS * HTTP/1.1\r\n\r\n")
with self.assertRaisesRegex(ValueError, "unsupported HTTP method: OPTIONS"):
await read_request(self.stream)
async def test_read_request_unsupported_version(self):
self.stream.feed_data(b"GET /chat HTTP/1.0\r\n\r\n")
with self.assertRaisesRegex(ValueError, "unsupported HTTP version: HTTP/1.0"):
await read_request(self.stream)
async def test_read_request_invalid_header(self):
self.stream.feed_data(b"GET /chat HTTP/1.1\r\nOops\r\n")
with self.assertRaisesRegex(ValueError, "invalid HTTP header line: Oops"):
await read_request(self.stream)
async def test_read_response(self):
# Example from the protocol overview in RFC 6455
self.stream.feed_data(
b"HTTP/1.1 101 Switching Protocols\r\n"
b"Upgrade: websocket\r\n"
b"Connection: Upgrade\r\n"
b"Sec-WebSocket-Accept: s3pPLMBiTxaQ9kYGzzhZRbK+xOo=\r\n"
b"Sec-WebSocket-Protocol: chat\r\n"
b"\r\n"
)
status_code, reason, headers = await read_response(self.stream)
self.assertEqual(status_code, 101)
self.assertEqual(reason, "Switching Protocols")
self.assertEqual(headers["Upgrade"], "websocket")
async def test_read_response_empty(self):
self.stream.feed_eof()
with self.assertRaisesRegex(
EOFError, "connection closed while reading HTTP status line"
):
await read_response(self.stream)
async def test_read_request_invalid_status_line(self):
self.stream.feed_data(b"Hello!\r\n")
with self.assertRaisesRegex(ValueError, "invalid HTTP status line: Hello!"):
await read_response(self.stream)
async def test_read_response_unsupported_version(self):
self.stream.feed_data(b"HTTP/1.0 400 Bad Request\r\n\r\n")
with self.assertRaisesRegex(ValueError, "unsupported HTTP version: HTTP/1.0"):
await read_response(self.stream)
async def test_read_response_invalid_status(self):
self.stream.feed_data(b"HTTP/1.1 OMG WTF\r\n\r\n")
with self.assertRaisesRegex(ValueError, "invalid HTTP status code: OMG"):
await read_response(self.stream)
async def test_read_response_unsupported_status(self):
self.stream.feed_data(b"HTTP/1.1 007 My name is Bond\r\n\r\n")
with self.assertRaisesRegex(ValueError, "unsupported HTTP status code: 007"):
await read_response(self.stream)
async def test_read_response_invalid_reason(self):
self.stream.feed_data(b"HTTP/1.1 200 \x7f\r\n\r\n")
with self.assertRaisesRegex(ValueError, "invalid HTTP reason phrase: \\x7f"):
await read_response(self.stream)
async def test_read_response_invalid_header(self):
self.stream.feed_data(b"HTTP/1.1 500 Internal Server Error\r\nOops\r\n")
with self.assertRaisesRegex(ValueError, "invalid HTTP header line: Oops"):
await read_response(self.stream)
async def test_header_name(self):
self.stream.feed_data(b"foo bar: baz qux\r\n\r\n")
with self.assertRaises(ValueError):
await read_headers(self.stream)
async def test_header_value(self):
self.stream.feed_data(b"foo: \x00\x00\x0f\r\n\r\n")
with self.assertRaises(ValueError):
await read_headers(self.stream)
async def test_headers_limit(self):
self.stream.feed_data(b"foo: bar\r\n" * 257 + b"\r\n")
with self.assertRaises(SecurityError):
await read_headers(self.stream)
async def test_line_limit(self):
# Header line contains 5 + 4090 + 2 = 4097 bytes.
self.stream.feed_data(b"foo: " + b"a" * 4090 + b"\r\n\r\n")
with self.assertRaises(SecurityError):
await read_headers(self.stream)
async def test_line_ending(self):
self.stream.feed_data(b"foo: bar\n\n")
with self.assertRaises(EOFError):
await read_headers(self.stream)
class HeadersTests(unittest.TestCase):
def setUp(self):
self.headers = Headers([("Connection", "Upgrade"), ("Server", USER_AGENT)])
def test_str(self):
self.assertEqual(
str(self.headers), f"Connection: Upgrade\r\nServer: {USER_AGENT}\r\n\r\n"
)
def test_repr(self):
self.assertEqual(
repr(self.headers),
f"Headers([('Connection', 'Upgrade'), " f"('Server', '{USER_AGENT}')])",
)
def test_multiple_values_error_str(self):
self.assertEqual(str(MultipleValuesError("Connection")), "'Connection'")
self.assertEqual(str(MultipleValuesError()), "")
def test_contains(self):
self.assertIn("Server", self.headers)
def test_contains_case_insensitive(self):
self.assertIn("server", self.headers)
def test_contains_not_found(self):
self.assertNotIn("Date", self.headers)
def test_contains_non_string_key(self):
self.assertNotIn(42, self.headers)
def test_iter(self):
self.assertEqual(set(iter(self.headers)), {"connection", "server"})
def test_len(self):
self.assertEqual(len(self.headers), 2)
def test_getitem(self):
self.assertEqual(self.headers["Server"], USER_AGENT)
def test_getitem_case_insensitive(self):
self.assertEqual(self.headers["server"], USER_AGENT)
def test_getitem_key_error(self):
with self.assertRaises(KeyError):
self.headers["Upgrade"]
def test_getitem_multiple_values_error(self):
self.headers["Server"] = "2"
with self.assertRaises(MultipleValuesError):
self.headers["Server"]
def test_setitem(self):
self.headers["Upgrade"] = "websocket"
self.assertEqual(self.headers["Upgrade"], "websocket")
def test_setitem_case_insensitive(self):
self.headers["upgrade"] = "websocket"
self.assertEqual(self.headers["Upgrade"], "websocket")
def test_setitem_multiple_values(self):
self.headers["Connection"] = "close"
with self.assertRaises(MultipleValuesError):
self.headers["Connection"]
def test_delitem(self):
del self.headers["Connection"]
with self.assertRaises(KeyError):
self.headers["Connection"]
def test_delitem_case_insensitive(self):
del self.headers["connection"]
with self.assertRaises(KeyError):
self.headers["Connection"]
def test_delitem_multiple_values(self):
self.headers["Connection"] = "close"
del self.headers["Connection"]
with self.assertRaises(KeyError):
self.headers["Connection"]
def test_eq(self):
other_headers = self.headers.copy()
self.assertEqual(self.headers, other_headers)
def test_eq_not_equal(self):
self.assertNotEqual(self.headers, [])
def test_clear(self):
self.headers.clear()
self.assertFalse(self.headers)
self.assertEqual(self.headers, Headers())
def test_get_all(self):
self.assertEqual(self.headers.get_all("Connection"), ["Upgrade"])
def test_get_all_case_insensitive(self):
self.assertEqual(self.headers.get_all("connection"), ["Upgrade"])
def test_get_all_no_values(self):
self.assertEqual(self.headers.get_all("Upgrade"), [])
def test_get_all_multiple_values(self):
self.headers["Connection"] = "close"
self.assertEqual(self.headers.get_all("Connection"), ["Upgrade", "close"])
def test_raw_items(self):
self.assertEqual(
list(self.headers.raw_items()),
[("Connection", "Upgrade"), ("Server", USER_AGENT)],
)
|
tests/not_tests.py | TakenBrandi/python-precisely | 238 | 11109813 | <reponame>TakenBrandi/python-precisely
from nose.tools import istest, assert_equal
from precisely import equal_to, not_
from precisely.results import matched, unmatched
@istest
def matches_when_negated_matcher_does_not_match():
assert_equal(matched(), not_(equal_to(1)).match(2))
@istest
def does_not_match_when_negated_matcher_matches():
assert_equal(unmatched("matched: 1"), not_(equal_to(1)).match(1))
@istest
def description_includes_description_of_negated_matcher():
assert_equal("not: 'hello'", not_(equal_to("hello")).describe())
|
test/core/test_serde_flat.py | Xiaoxiong-Liu/gluon-ts | 2,648 | 11109818 | <gh_stars>1000+
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
from gluonts.core import serde
from gluonts.core.component import equals
from gluonts.model.deepar import DeepAREstimator
def test_nested_params():
deepar = DeepAREstimator(prediction_length=7, freq="D")
assert equals(deepar, serde.flat.decode(serde.flat.encode(deepar)))
deepar2 = serde.flat.clone(deepar, {"trainer.epochs": 999})
assert deepar2.trainer.epochs == 999
|
skills_ml/job_postings/aggregate/pandas.py | bhagyaramgpo/skills-ml | 147 | 11109824 | """Aggregation functions that can be used with pandas dataframes"""
from collections import Counter
class AggregateFunction(object):
"""Wrap a function with an attribute that indicates the return type name"""
def __init__(self, returns):
self.returns = returns
def __call__(self, function, *params, **kwparams):
class DecoratedFunction(object):
def __init__(self, returns, function):
self.returns = returns
self.function = function
self.__name__ = function.__name__
self.__qualname__ = function.__qualname__
self.__doc__ = function.__doc__
def __call__(self, *params, **kwparams):
return self.function(*params, **kwparams)
return DecoratedFunction(self.returns, function)
@AggregateFunction(returns='list')
def n_most_common(n, iterable):
return [mc[0] for mc in Counter(iterable).most_common(n)]
@AggregateFunction(returns='list')
def listy_n_most_common(n, iterable):
"""Expects each item to be iterable, each sub-item to be addable"""
bc = Counter()
for i in iterable:
bc += Counter(i)
if bc:
return [mc[0] for mc in bc.most_common(n)]
else:
return []
|
cinder/volume/drivers/dell_emc/unity/replication.py | lightsey/cinder | 571 | 11109842 | <filename>cinder/volume/drivers/dell_emc/unity/replication.py
# Copyright (c) 2016 - 2019 Dell Inc. or its subsidiaries.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import random
from oslo_log import log as logging
from oslo_utils import excutils
from cinder import exception
from cinder.volume.drivers.dell_emc.unity import adapter as unity_adapter
LOG = logging.getLogger(__name__)
class ReplicationDevice(object):
def __init__(self, conf_dict, driver):
"""Constructs a replication device from driver configuration.
:param conf_dict: the conf of one replication device entry. It's a
dict with content like
`{backend_id: vendor-id-1, key-1: val-1, ...}`
:param driver: the backend driver.
"""
driver_conf = driver.configuration
self.backend_id = conf_dict.get('backend_id')
self.san_ip = conf_dict.get('san_ip', None)
if (self.backend_id is None or not self.backend_id.strip()
or self.san_ip is None or not self.san_ip.strip()):
LOG.error('No backend_id or san_ip in %(conf)s of '
'%(group)s.replication_device',
conf=conf_dict, group=driver_conf.config_group)
raise exception.InvalidConfigurationValue(
option='%s.replication_device' % driver_conf.config_group,
value=driver_conf.replication_device)
# Use the driver settings if not configured in replication_device.
self.san_login = conf_dict.get('san_login', driver_conf.san_login)
self.san_password = conf_dict.get('san_password',
driver_conf.san_password)
# Max time (in minute) out of sync is a setting for replication.
# It means maximum time to wait before syncing the source and
# destination. `0` means it is a sync replication. Default is `60`.
try:
self.max_time_out_of_sync = int(
conf_dict.get('max_time_out_of_sync', 60))
except ValueError:
LOG.error('max_time_out_of_sync is not a number, %(conf)s of '
'%(group)s.replication_device',
conf=conf_dict, group=driver_conf.config_group)
raise exception.InvalidConfigurationValue(
option='%s.replication_device' % driver_conf.config_group,
value=driver_conf.replication_device)
if self.max_time_out_of_sync < 0:
LOG.error('max_time_out_of_sync should be greater than 0, '
'%(conf)s of %(group)s.replication_device',
conf=conf_dict, group=driver_conf.config_group)
raise exception.InvalidConfigurationValue(
option='%s.replication_device' % driver_conf.config_group,
value=driver_conf.replication_device)
self.driver = driver
self._adapter = init_adapter(driver.get_version(), driver.protocol)
self._dst_pool = None
self._serial_number = None
@property
def device_conf(self):
conf = self.driver.configuration
conf.san_ip = self.san_ip
conf.san_login = self.san_login
conf.san_password = self.san_password
return conf
def setup_adapter(self):
if not self._adapter.is_setup:
try:
self._adapter.do_setup(self.driver, self.device_conf)
except exception.CinderException:
with excutils.save_and_reraise_exception():
LOG.error('replication_device configured but its adapter '
'setup failed: %s', self.backend_id)
@property
def adapter(self):
self.setup_adapter()
return self._adapter
@property
def destination_pool(self):
if self._dst_pool is None:
LOG.debug('getting destination pool for replication device: %s',
self.backend_id)
pools_dict = self.adapter.storage_pools_map
pool_name = random.choice(list(pools_dict))
LOG.debug('got destination pool for replication device: %s, '
'pool: %s', self.backend_id, pool_name)
self._dst_pool = pools_dict[pool_name]
return self._dst_pool
def init_adapter(version, protocol):
if protocol == unity_adapter.PROTOCOL_FC:
return unity_adapter.FCAdapter(version)
return unity_adapter.ISCSIAdapter(version)
DEFAULT_ADAPTER_NAME = 'default'
class ReplicationManager(object):
def __init__(self):
self.is_replication_configured = False
self.default_conf = None
self.default_device = None
self.replication_devices = None
self.active_backend_id = None
def do_setup(self, driver):
self.default_conf = driver.configuration
self.replication_devices = self.parse_rep_device(driver)
if DEFAULT_ADAPTER_NAME in self.replication_devices:
LOG.error('backend_id cannot be `default`')
raise exception.InvalidConfigurationValue(
option=('%s.replication_device'
% self.default_conf.config_group),
value=self.default_conf.replication_device)
# Only support one replication device currently.
if len(self.replication_devices) > 1:
LOG.error('At most one replication_device is supported')
raise exception.InvalidConfigurationValue(
option=('%s.replication_device'
% self.default_conf.config_group),
value=self.default_conf.replication_device)
self.is_replication_configured = len(self.replication_devices) >= 1
self.active_backend_id = driver.active_backend_id
if self.active_backend_id:
if self.active_backend_id not in self.replication_devices:
LOG.error('Service starts under failed-over status, '
'active_backend_id: %s is not empty, but not in '
'replication_device.', self.active_backend_id)
raise exception.InvalidConfigurationValue(
option=('%s.replication_device'
% self.default_conf.config_group),
value=self.default_conf.replication_device)
else:
self.active_backend_id = DEFAULT_ADAPTER_NAME
default_device_conf = {
'backend_id': DEFAULT_ADAPTER_NAME,
'san_ip': driver.configuration.san_ip
}
self.default_device = ReplicationDevice(default_device_conf, driver)
if not self.is_service_failed_over:
# If service doesn't fail over, setup the adapter.
# Otherwise, the primary backend could be down, adapter setup could
# fail.
self.default_device.setup_adapter()
if self.is_replication_configured:
# If replication_device is configured, consider the replication is
# enabled and check the same configuration is valid for secondary
# backend or not.
self.setup_rep_adapters()
@property
def is_service_failed_over(self):
return (self.active_backend_id is not None
and self.active_backend_id != DEFAULT_ADAPTER_NAME)
def setup_rep_adapters(self):
for backend_id, rep_device in self.replication_devices.items():
rep_device.setup_adapter()
@property
def active_adapter(self):
if self.is_service_failed_over:
return self.replication_devices[self.active_backend_id].adapter
else:
self.active_backend_id = DEFAULT_ADAPTER_NAME
return self.default_device.adapter
@staticmethod
def parse_rep_device(driver):
driver_conf = driver.configuration
rep_devices = {}
if not driver_conf.replication_device:
return rep_devices
for device_conf in driver_conf.replication_device:
rep_device = ReplicationDevice(device_conf, driver)
rep_devices[rep_device.backend_id] = rep_device
return rep_devices
def failover_service(self, backend_id):
self.active_backend_id = backend_id
|
src/plugins/python/templates/tokenizer.template.py | ruby-on-rust/syntax | 409 | 11109858 | <filename>src/plugins/python/templates/tokenizer.template.py
##
# Generic tokenizer used by the parser in the Syntax tool.
#
# https://www.npmjs.com/package/syntax-cli
#
# See `--custom-tokinzer` to skip this generation, and use a custom one.
##
import re as _syntax_tool_re
{{{LEX_RULE_HANDLERS}}}
_lex_rules = {{{LEX_RULES}}}
_lex_rules_by_conditions = {{{LEX_RULES_BY_START_CONDITIONS}}}
EOF_TOKEN = {
'type': EOF,
'value': ''
}
class Tokenizer(object):
_string = None
_cursor = 0
# Line-based location tracking.
_current_line = 1
_current_column = 0
_current_line_begin_offset = 0
# Location data of a matched token.
token_start_offset = 0
token_end_offset = 0
token_start_line = 0
token_end_line = 0
token_start_column = 0
token_end_column = 0
_tokens_queue = []
_states = []
def __init__(self, string=None):
if not string is None:
self.init_string(string)
def init_string(self, string):
self._string = string
self._string_len = len(string)
self._cursor = 0
self._tokens_queue = []
self._states = ['INITIAL']
self._current_line = 1
self._current_column = 0
self._current_line_begin_offset = 0
# Location data of a matched token.
self.token_start_offset = 0
self.token_end_offset = 0
self.token_start_line = 0
self.token_end_line = 0
self.token_start_column = 0
self.token_end_column = 0
# --------------------------------------------
# States.
def get_current_state(self):
return self._states[-1]
def push_state(self, state):
self._states.append(state)
# Alias for `push_state`.
def begin(self, state):
self.push_state(state)
def pop_state(self):
if len(self._states) > 1:
return self._states.pop()
return self._states[0]
def get_next_token(self):
global __, yytext, yyleng
if len(self._tokens_queue) > 0:
return self._to_token(self._tokens_queue.pop(0))
if not self.has_more_tokens():
return EOF_TOKEN
string = self._string[self._cursor:]
lex_rules_for_state = _lex_rules_by_conditions[self.get_current_state()]
for lex_rule_index in lex_rules_for_state:
lex_rule = _lex_rules[lex_rule_index]
matched = self._match(string, lex_rule[0])
# Manual handling of EOF token (the end of string). Return it
# as `EOF` symbol.
if string == '' and matched == '':
self._cursor += 1
if matched != None:
yytext = matched
yyleng = len(yytext)
token = lex_rule[1](self)
if token is None:
return self.get_next_token()
if isinstance(token, list):
tokens_to_queue = token[1:]
token = token[0]
if len(tokens_to_queue) > 0:
self._tokens_queue.extend(tokens_to_queue)
return self._to_token(token, yytext)
if self.is_eof():
self._cursor += 1
return EOF_TOKEN
self.throw_unexpected_token(
string[0],
self._current_line,
self._current_column
)
def _capture_location(self, matched):
nl_re = _syntax_tool_re.compile("\n")
# Absolute offsets.
self.token_start_offset = self._cursor
# Line-based locations, start.
self.token_start_line = self._current_line
self.token_start_column = self.token_start_offset - self._current_line_begin_offset
# Extract `\n` in the matched token.
for nl_match in nl_re.finditer(matched):
self._current_line += 1
self._current_line_begin_offset = self.token_start_offset + nl_match.start() + 1
self.token_end_offset = self._cursor + len(matched)
# Line-based locations, end.
self.token_end_line = self._current_line
self.token_end_column = self._current_column = (self.token_end_offset - self._current_line_begin_offset)
def _to_token(self, token_type, yytext=''):
return {
'type': token_type,
'value': yytext,
'start_offset': self.token_start_offset,
'end_offset': self.token_end_offset,
'start_line': self.token_start_line,
'end_line': self.token_end_line,
'start_column': self.token_start_column,
'end_column': self.token_end_column,
}
##
# Throws default "Unexpected token" exception, showing the actual
# line from the source, pointing with the ^ marker to the bad token.
# In addition, shows `line:column` location.
#
def throw_unexpected_token(self, symbol, line, column):
line_source = self._string.split('\n')[line - 1]
pad = ' ' * column;
line_data = '\n\n' + line_source + '\n' + pad + '^\n'
raise Exception(
line_data + 'Unexpected token: "' + str(symbol) + '" at ' +
str(line) + ':' + str(column) + '.'
)
def is_eof(self):
return self._cursor == self._string_len
def has_more_tokens(self):
return self._cursor <= self._string_len
def _match(self, string, regexp):
matched = _syntax_tool_re.search(regexp, string)
if matched != None:
self._capture_location(matched.group(0))
self._cursor += matched.end()
return matched.group(0)
return None
_tokenizer = Tokenizer()
|
model/layers/deform_conv.py | destinyls/MonoFlex | 131 | 11109921 | import torch.nn.functional as F
from torch import nn
from .dcn_v2 import DCN
class DeformConv(nn.Module):
def __init__(self,
in_channel,
out_channel,
norm_func,
kernel_size=3,
stride=1,
padding=0):
super(DeformConv, self).__init__()
self.norm = norm_func(out_channel)
self.relu = nn.ReLU(inplace=True)
self.deform_conv = DCN(in_channels=in_channel,
out_channels=out_channel,
kernel_size=(kernel_size, kernel_size),
stride=stride,
padding=padding,
dilation=1,
deformable_groups=1)
def forward(self, x):
x = self.deform_conv(x)
x = self.norm(x)
x = self.relu(x)
return x
|
transparency/commands/settings.py | nevermorea/SublimeTextTrans | 286 | 11109931 | import sublime_plugin
import sublime
import os
from os.path import dirname
ST2 = int(sublime.version()) < 3000
if not ST2:
PLUGIN_DIR = dirname(dirname(dirname(os.path.abspath(__file__))))
else:
_st_pkgs_dir = sublime.packages_path()
_cur_file_abspath = os.path.abspath(__file__)
if _st_pkgs_dir not in _cur_file_abspath:
for p in os.listdir(_st_pkgs_dir):
link_path = _st_pkgs_dir + os.sep + p
if os.path.realpath(link_path) in _cur_file_abspath:
PLUGIN_DIR = link_path
break
else:
PLUGIN_DIR = dirname(dirname(dirname(os.path.abspath(__file__))))
class TransparencyOpenPluginDefaultSettingsFile(sublime_plugin.WindowCommand):
def run(self):
default_plugin_settings_path = os.path.join(PLUGIN_DIR, "SublimeTextTrans.sublime-settings")
sublime.active_window().open_file(default_plugin_settings_path)
class TransparencyOpenHelpFile(sublime_plugin.WindowCommand):
def run(self):
help_file_path = os.path.join(PLUGIN_DIR, "messages/install.txt")
sublime.active_window().open_file(help_file_path) |
pytweet/tweet.py | TheFarGG/PyTweet | 614 | 11109941 | from __future__ import annotations
import datetime
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Union
from .attachments import Poll, Geo, File, Media
from .enums import ReplySetting
from .constants import (
TWEET_EXPANSION,
TWEET_FIELD,
USER_FIELD,
PINNED_TWEET_EXPANSION,
MEDIA_FIELD,
PLACE_FIELD,
POLL_FIELD,
)
from .relations import RelationHide, RelationLike, RelationRetweet, RelationDelete
from .user import User
from .utils import time_parse_todt, convert
from .message import Message
from .paginations import UserPagination, TweetPagination
from .dataclass import (
Embed,
EmbedImage,
PublicTweetMetrics,
NonPublicTweetMetrics,
OrganicTweetMetrics,
PromotedTweetMetrics,
)
if TYPE_CHECKING:
from .http import HTTPClient
from .type import ID
__all__ = ("Tweet",)
class Tweet(Message):
"""Represents a tweet message from Twitter.
A Tweet is any message posted to Twitter which may contain photos, videos, links, and text. This class inherits :class:`Message`,
.. describe:: x == y
Check if one tweet id is equal to another.
.. describe:: x != y
Check if one tweet id is not equal to another.
.. describe:: str(x)
Get the Tweet's text.
.. versionadded:: 1.0.0
"""
__slots__ = (
"__original_payload",
"_payload",
"_includes",
"tweet_metrics",
"http_client",
"deleted_timestamp",
"_public_metrics",
"_non_public_metrics",
"_organic_metrics",
"_promoted_metrics",
)
def __init__(
self,
data: Dict[str, Any],
*,
deleted_timestamp: Optional[int] = None,
http_client: Optional[HTTPClient] = None,
) -> None:
self.__original_payload = data
self._payload = data.get("data") or data
self._includes = self.__original_payload.get("includes")
self._referenced_tweets = self._payload.get("referenced_tweets")
self._entities = self._payload.get("entities")
self.http_client = http_client
self.deleted_timestamp = deleted_timestamp
self._public_metrics = PublicTweetMetrics(
**self._payload.get("public_metrics", None) or self.__original_payload.get("public_metrics")
)
self._non_public_metrics = self._payload.get("non_public_metrics", None) or self.__original_payload.get(
"non_public_metrics"
)
self._organic_metrics = self._payload.get("organic_metrics", None) or self.__original_payload.get(
"organic_metrics"
)
self._promoted_metrics = self._payload.get("promoted_metrics", None) or self.__original_payload.get(
"promoted_metrics"
)
if self._non_public_metrics:
non_public_metrics = self.http_client.payload_parser.parse_metric_data(self._non_public_metrics)
self._non_public_metrics = NonPublicTweetMetrics(**non_public_metrics)
if self._organic_metrics:
organic_metrics = self.http_client.payload_parser.parse_metric_data(self._organic_metrics)
self._organic_metrics = OrganicTweetMetrics(**organic_metrics)
if self._promoted_metrics:
promoted_metrics = self.http_client.payload_parser.parse_metric_data(self._promoted_metrics)
self._promoted_metrics = PromotedTweetMetrics(**promoted_metrics)
if self._entities and self._entities.get("urls"):
data = []
for url in self._entities["urls"]:
url = self.http_client.payload_parser.parse_embed_data(url)
data.append(url)
self._embeds = data
else:
self._embeds = None
super().__init__(self._payload.get("text"), self._payload.get("id"), 1)
def __repr__(self) -> str:
return "Tweet(text={0.text} id={0.id} author={0.author!r})".format(self)
@property
def author(self) -> Optional[User]:
"""Optional[:class:`User`]: Returns a user (object) who posted the tweet.
.. versionadded: 1.0.0
"""
if self._includes and self._includes.get("users"):
return User(self._includes.get("users")[0], http_client=self.http_client)
return None
@property
def possibly_sensitive(self) -> bool:
""":class:`bool`: Returns True if the tweet is possible sensitive to some users, else False.
.. versionadded: 1.0.0
"""
return self._payload.get("possibly_sensitive")
@property
def sensitive(self) -> bool:
""":class:`bool`: An alias to :meth:`Tweet.possibly_sensitive`.
.. versionadded: 1.5.0
"""
return self.possibly_sensitive
@property
def created_at(self) -> datetime.datetime:
""":class:`datetime.datetime`: Returns a :class:`datetime.datetime` object when the tweet was created.
.. versionadded: 1.0.0
"""
if self._payload.get("timestamp", None):
return datetime.datetime.fromtimestamp(int(self._payload.get("timestamp", None)) / 1000)
return time_parse_todt(self._payload.get("created_at"))
@property
def deleted_at(self) -> Optional[datetime.datetime]:
"""Optional[:class:`datetime.datetime`]: Returns a :class:`datetime.datetime` object when the tweet was deleted. Returns None when the tweet is not deleted.
.. note::
This property can only returns :class:`datetime.datetime` object through a tweet object from `on_tweet_delete` event.
.. versionadded: 1.5.0
"""
if not self.deleted_timestamp:
return None
return datetime.datetime.fromtimestamp(self.deleted_timestamp / 1000)
@property
def source(self) -> str:
""":class:`str`: Returns the source of the tweet. e.g if you post a tweet from a website, the source is gonna be 'Twitter Web App'
.. versionadded: 1.0.0
"""
return self._payload.get("source")
@property
def reply_setting(self) -> ReplySetting:
""":class:`ReplySetting`: Returns a :class:`ReplySetting` object with the tweet's reply setting. If everyone can reply, this method return :class:`ReplySetting.everyone`.
.. versionadded: 1.3.5
"""
return ReplySetting(self._payload.get("reply_settings"))
@property
def raw_reply_setting(self) -> str:
""":class:`str`: Returns the raw reply setting value. If everyone can replied, this method return 'Everyone'.
.. versionadded: 1.0.0
"""
return self._payload.get("reply_settings")
@property
def lang(self) -> str:
""":class:`str`: Returns the tweet's lang, if its english it return en.
.. versionadded: 1.0.0
"""
return self._payload.get("lang")
@property
def conversation_id(self) -> Optional[int]:
"""Optional[:class:`int`]: All replies are bind to the original tweet, this property returns the tweet's id if the tweet is a reply tweet else it returns None.
.. versionadded: 1.0.0
"""
try:
return int(self._payload.get("conversation_id"))
except ValueError:
return None
@property
def url(self) -> Optional[str]:
"""Optional[:class:`str`]: Get the tweet's url.
.. versionadded:: 1.1.0
.. versionchanged:: 1.5.0
Returns None if the author is invalid or the tweet doesn't have id.
"""
try:
return f"https://twitter.com/{self.author.username}/status/{self.id}"
except TypeError:
return None
@property
def mentions(self) -> Optional[List[User]]:
"""Optional[List[:class:`User`]]: Returns a list of :class:`User` objects that were mentioned in the tweet or an empty list / `[]` if no users were mentioned.
.. versionadded:: 1.1.3
.. versionchanged:: 1.5.0
Now returns a list of :class:`User` objects rather then a list of :class:`str` objects.
"""
users = []
for user in self._includes.get("users", {}):
for mention in self._entities.get("mentions", {}):
if user["id"] == mention["id"]:
users.append(User(user, http_client=self.http_client))
return users
@property
def poll(self) -> Optional[Poll]:
""":class:`Poll`: Returns a Poll object with the tweet's poll.
.. versionadded:: 1.1.0
"""
if self._includes:
if self._includes.get("polls"):
data = self._includes.get("polls")[0]
poll = Poll(
duration=data.get("duration_minutes"),
id=data.get("id"),
voting_status=data.get("voting_status"),
end_date=data.get("end_datetime"),
)
for option in data.get("options"):
poll.add_option(**option)
return poll
return None
@property
def medias(self) -> Optional[List[Media]]:
"""Optional[List[:class:`Media`]]: Returns a list of media(s) in a tweet.
.. versionadded:: 1.1.0
"""
if self._includes and self._includes.get("media"):
return [Media(img, http_client=self.http_client) for img in self._includes.get("media")]
return None
@property
def reference_user(self) -> Optional[User]:
"""Optional[:class:`User`]: Returns the referenced user. This can means:
The tweet is a retweet, which means the method returns the retweeted tweet's author.
The tweet is a quote tweet(retweet with comments), which means the method returns the quoted tweet's author.
The tweet is a reply tweet, which means the method returns the replied tweet's author.
.. versionadded:: 1.5.0
"""
if not self._includes or not self._includes.get("users") or not self._referenced_tweets:
return None
type = self._referenced_tweets[0].get("type", " ")
for user in self._includes["users"]:
if type == "replied_to" and user["id"] == self._payload.get("in_reply_to_user_id", 0):
# If the tweet count as a reply tweet,
# it would returns a user data that match the user's id with 'in_reply_to_user_id' data.
return User(user, http_client=self.http_client)
elif type == "quoted":
# If the tweet count as a quote tweet,
# it would returns a user data if the url contains the user's id. Every quote tweets have at least 1 url, the quoted tweet's url that contain the quoted tweet's author's id and the tweet id itself.
for embed in self.embeds:
if (
embed.expanded_url.startswith("https://twitter.com/")
and embed.expanded_url.split("/")[3] == user["id"]
):
return User(user, http_client=self.http_client)
elif type == "retweeted":
# If the tweet count as a retweet,
# it would returns a user data if the user are mention with a specific format, that is: 'RT @Mention: {The retweeted tweet's content}'
# The code only checks characters before the colon with the colon includes (i.e 'RT @Mention:').
for mentioned_user in self.mentions:
if self.text.startswith(f"RT {mentioned_user.mention}:"):
return mentioned_user
return None
@property
def reference_tweet(self) -> Optional[Tweet]:
"""Optional[:class:`Tweet`]: Returns the tweet's parent tweet or the referenced tweet. This can mean the parent tweet of the requested tweet is:
A retweeted tweet (The child Tweet is a Retweet),
A quoted tweet (The child Tweet is a Retweet with comment, also known as Quoted Tweet),
A replied tweet (The child Tweet is a reply tweet).
.. versionadded:: 1.5.0
"""
tweets = self._includes.get("tweets")
if not self._includes or not tweets or not self._referenced_tweets:
return None
for tweet in tweets:
if tweet["id"] == self._referenced_tweets[0]["id"]:
self.http_client.payload_parser.insert_object_author(tweet, self.reference_user)
return Tweet(tweet, http_client=self.http_client)
return None
@property
def embeds(self) -> Optional[List[Embed]]:
"""List[:class:`Embed`]: Returns a list of Embedded urls from the tweet
.. versionadded:: 1.1.3
"""
for embed in self._embeds:
if embed.get("images"):
for index, image in enumerate(embed["images"]):
if isinstance(image, EmbedImage):
break
embed["images"][index] = EmbedImage(**image)
return [Embed(**data) for data in self._embeds]
@property
def like_count(self) -> Optional[int]:
"""Optional[:class:`int`]: Returns the total of likes in the tweet.
.. versionadded: 1.1.0
"""
return convert(self._metrics.like_count, int)
@property
def retweet_count(self) -> Optional[int]:
"""Optional[:class:`int`]: Returns the total of retweetes in the tweet.
.. versionadded: 1.1.0
"""
return convert(self._metrics.retweet_count, int)
@property
def reply_count(self) -> Optional[int]:
"""Optional[:class:`int`]: Returns the total of replies in the tweet.
.. versionadded: 1.1.0
"""
return convert(self._metrics.reply_count, int)
@property
def quote_count(self) -> Optional[int]:
"""Optional[:class:`int`]: Returns the total of quotes in the tweet.
.. versionadded: 1.1.0
"""
return convert(self._metrics.quote_count, int)
@property
def non_public_metrics(self) -> Optional[OrganicTweetMetrics]:
"""Optional[:class:`OrganicTweetMetrics`]: The tweet's metrics that are not available for anyone to view on Twitter, such as `impressions_count` and `video view quartiles`.
.. versionadded:: 1.5.0
"""
return self._non_public_metrics
@property
def organic_metrics(self) -> Optional[OrganicTweetMetrics]:
"""Optional[:class:`OrganicTweetMetrics`]: The tweet's metrics in organic context (posted and viewed in a regular manner), such as `impression_count`, `user_profile_clicks` and `url_link_clicks`.
.. versionadded:: 1.5.0
"""
return self._organic_metrics
@property
def promoted_metrics(self) -> Optional[PromotedTweetMetrics]:
"""Optional[:class:`PromotedTweetMetrics`]: The tweet's metrics in promoted context (posted or viewed as part of an Ads campaign), such as `impression_count`, `user_profile_clicks` and `url_link_clicks`.
.. versionadded:: 1.5.0
"""
return self._promoted_metrics
def like(self) -> Optional[RelationLike]:
"""Like the tweet.
Returns
---------
Optional[:class:`RelationLike`]
This method returns a :class:`RelationLike` object.
.. versionadded:: 1.2.0
"""
my_id = self.http_client.access_token.partition("-")[0]
res = self.http_client.request("POST", "2", f"/users/{my_id}/likes", json={"tweet_id": str(self.id)}, auth=True)
return RelationLike(res)
def unlike(self) -> Optional[RelationLike]:
"""Unlike the tweet.
Returns
---------
:class:`RelationLike`
This method returns a :class:`RelationLike` object.
.. versionadded:: 1.2.0
"""
my_id = self.http_client.access_token.partition("-")[0]
res = self.http_client.request("DELETE", "2", f"/users/{my_id}/likes/{self.id}", auth=True)
return RelationLike(res)
def retweet(self) -> RelationRetweet:
"""Retweet the tweet.
Returns
---------
:class:`RelationRetweet`
This method returns a :class:`RelationRetweet` object.
.. versionadded:: 1.2.0
"""
my_id = self.http_client.access_token.partition("-")[0]
res = self.http_client.request(
"POST",
"2",
f"/users/{my_id}/retweets",
json={"tweet_id": str(self.id)},
auth=True,
)
return RelationRetweet(res)
def unretweet(self) -> RelationRetweet:
"""Unretweet the tweet.
Returns
---------
:class:`RelationRetweet`
This method returns a :class:`RelationRetweet` object.
.. versionadded:: 1.2.0
"""
my_id = self.http_client.access_token.partition("-")[0]
res = self.http_client.request("DELETE", "2", f"/users/{my_id}/retweets/{self.id}", auth=True)
return RelationRetweet(res)
def delete(self) -> RelationDelete:
"""Delete the client's tweet.
.. note::
You can only delete the client's tweet.
.. versionadded:: 1.2.0
"""
res = self.http_client.request("DELETE", "2", f"/tweets/{self.id}", auth=True)
try:
self.http_client.tweet_cache.pop(self.id)
except KeyError:
pass
return RelationDelete(res)
def reply(
self,
text: str = None,
*,
file: Optional[File] = None,
files: Optional[List[File]] = None,
geo: Optional[Union[Geo, str]] = None,
direct_message_deep_link: Optional[str] = None,
reply_setting: Optional[Union[ReplySetting, str]] = None,
exclude_reply_users: Optional[List[User, ID]] = None,
media_tagged_users: Optional[List[User, ID]] = None,
) -> Optional[Tweet]:
"""Post a tweet to reply to the tweet present by the tweet's id. Returns a :class:`Tweet` object or :class:`Message` if the tweet is not found in the cache.
.. note::
Note that if the tweet is a retweet you cannot reply to that tweet, it might not raise an error but it will post the tweet has a normal tweet rather then a reply tweet and it ping the :class:`Tweet.author`.
Parameters
------------
text: :class:`str`
The tweet's text, it will show up as the main text in a tweet.
file: Optional[:class:`File`]
Represents a single file attachment. It could be an image, gif, or video. It also have to be an instance of pytweet.File
files: Optional[List[:class:`File`]]
Represents multiple file attachments in a list. It could be an image, gif, or video. the item in the list must also be an instance of pytweet.File
geo: Optional[Union[:class:`Geo`, :class:`str`]]
The geo attachment, you can put an object that is an instance of :class:`Geo` or the place ID in a string.
direct_message_deep_link: Optional[:class:`str`]
The direct message deep link, It will showup as a CTA(call-to-action) with button attachment. Example of direct message deep link:
reply_setting: Optional[Union[:class:`ReplySetting`, :class:`str`]]
The reply setting that you can set to minimize users that can reply. If None is specified, the default is set to 'everyone' can reply.
exclude_reply_users: Optional[List[:class:`User`]]
A list of users or user ids to be excluded from the reply :class:`Tweet` thus removing a user from a thread, if you dont want to mention a reply with 3 mentions, You can use this argument and provide the user id you don't want to mention.
media_tagged_users: Optional[List[:class:`User`]]
A list of users or user ids being tagged in the Tweet with Media. If the user you're tagging doesn't have photo-tagging enabled, their names won't show up in the list of tagged users even though the Tweet is successfully created.
Returns
---------
Union[:class:`Tweet`, :class:`Message`]
Returns a :class:`Tweet` object or :class:`Message` object if the tweet is not found in the cache.
.. versionadded:: 1.2.5
"""
return self.http_client.post_tweet(
text,
file=file,
files=files,
geo=geo,
direct_message_deep_link=direct_message_deep_link,
reply_setting=reply_setting,
reply_tweet=self.id,
exclude_reply_users=exclude_reply_users,
media_tagged_users=media_tagged_users,
)
def hide(self) -> RelationHide:
"""Hide a reply tweet.
Returns
---------
:class:`RelationHide`
This method returns a :class:`RelationHide` object.
.. versionadded:: 1.2.5
"""
res = self.http_client.request("PUT", "2", f"/tweets/{self.id}/hidden", json={"hidden": False}, auth=True)
return RelationHide(res)
def unhide(self) -> RelationHide:
"""Unhide a hide reply.
Returns
---------
:class:`RelationHide`
This method returns a :class:`RelationHide` object.
.. versionadded:: 1.2.5
"""
res = self.http_client.request("PUT", "2", f"/tweets/{self.id}/hidden", json={"hidden": False}, auth=True)
return RelationHide(res)
def fetch_retweeters(self) -> Optional[UserPagination]:
"""Returns a pagination object with the users that retweeted the tweet.
Returns
---------
Optional[:class:`UserPagination`]
This method returns a :class:`UserPagination` object.
.. versionadded:: 1.1.3
"""
res = self.http_client.request(
"GET",
"2",
f"/tweets/{self.id}/retweeted_by",
params={
"expansions": PINNED_TWEET_EXPANSION,
"user.fields": USER_FIELD,
"tweet.fields": TWEET_FIELD,
},
)
if not res:
return []
return UserPagination(
res,
endpoint_request=f"/tweets/{self.id}/retweeted_by",
http_client=self.http_client,
params={
"expansions": PINNED_TWEET_EXPANSION,
"user.fields": USER_FIELD,
"tweet.fields": TWEET_FIELD,
},
)
def fetch_likers(self) -> Optional[UserPagination]:
"""Returns a pagination object with the users that liked the tweet.
Returns
---------
Optional[:class:`UserPagination`]
This method returns a :class:`UserPagination` object.
.. versionadded:: 1.1.3
"""
res = self.http_client.request(
"GET",
"2",
f"/tweets/{self.id}/liking_users",
params={
"expansions": PINNED_TWEET_EXPANSION,
"user.fields": USER_FIELD,
"tweet.fields": TWEET_FIELD,
},
)
if not res:
return []
return UserPagination(
res,
endpoint_request=f"/tweets/{self.id}/liking_users",
http_client=self.http_client,
params={
"expansions": PINNED_TWEET_EXPANSION,
"user.fields": USER_FIELD,
"tweet.fields": TWEET_FIELD,
},
)
def fetch_quoted_tweets(self) -> Optional[TweetPagination]:
"""Returns a pagination object for tweets that quoted the tweet
Returns
---------
Optional[:class:`TweetPagination`]
This method returns :class:`TweetPagination` or an empty list if the tweet does not contain any quoted tweets.
.. versionadded:: 1.5.0
"""
params = {
"expansions": TWEET_EXPANSION,
"user.fields": USER_FIELD,
"tweet.fields": TWEET_FIELD,
"media.fields": MEDIA_FIELD,
"place.fields": PLACE_FIELD,
"poll.fields": POLL_FIELD,
"max_results": 100,
}
res = self.http_client.request("GET", "2", f"/tweets/{self.id}/quote_tweets", params=params)
if not res:
return []
return TweetPagination(
res,
endpoint_request=f"/tweets/{self.id}/quote_tweets",
http_client=self.http_client,
params=params,
)
|
wtf/static/static/settings.py | tigefa4u/wtfismyip | 297 | 11109951 | <filename>wtf/static/static/settings.py
import base64
SECRET_KEY = base64.b64decode('<KEY>')
|
08_concurrency/cralwer/parallel_requests.py | siddheshmhatre/high_performance_python | 698 | 11109967 | <reponame>siddheshmhatre/high_performance_python<filename>08_concurrency/cralwer/parallel_requests.py<gh_stars>100-1000
from gevent import monkey
monkey.patch_socket()
import gevent
from gevent.coros import Semaphore
import urllib2
import string
import random
from contextlib import closing
import numpy as np
import pylab as py
from itertools import cycle
import json
markers = cycle('h*o>Dxsp8')
linestyles = cycle(['-', ':', '--', '-.'])
def generate_urls(base_url, num_urls):
for i in xrange(num_urls):
yield base_url + "".join(random.sample(string.ascii_lowercase, 10))
def download(url, semaphore):
try:
with semaphore, closing(urllib2.urlopen(url)) as data:
return data.read()
except Exception as e:
print "retrying: ", e
return download(url, semaphore)
def chunked_requests(urls, chunk_size=100):
semaphore = Semaphore(chunk_size)
requests = [gevent.spawn(download, u, semaphore) for u in urls]
for response in gevent.iwait(requests):
yield response
def run_experiment(base_url, num_iter=500, parallel_requests=100):
urls = generate_urls(base_url, num_iter)
response_futures = chunked_requests(urls, parallel_requests)
response_size = sum(len(r.value) for r in response_futures)
return response_size
if __name__ == "__main__":
try:
data = json.load(open("parallel_requests.json"))
except IOError:
import time
delay = 100
num_iter = 500
data = {}
for delay in xrange(50, 1000, 250):
base_url = "http://1172.16.58.3:8080/add?name=concurrency_test&delay={}&".format(
delay)
data[delay] = []
for parallel_requests in xrange(1, num_iter, 25):
start = time.time()
result = run_experiment(base_url, num_iter, parallel_requests)
t = time.time() - start
print("{},{},{}".format(delay, parallel_requests, t))
data[delay].append((parallel_requests, t))
json.dump(data, open("parallel_requests.json", "w+"))
finally:
py.figure()
for delay, values in data.iteritems():
values = np.asarray(values)
py.plot(values[:, 0], values[:, 1],
label="{}s request time".format(delay),
linestyle=linestyles.next(),
marker=markers.next(),
linewidth=4,
)
py.axvline(x=100, alpha=0.5, c='r')
ax = py.gca()
ax.set_yscale('log')
py.xlabel("Number of concurrent downloads")
py.ylabel("Time to download 500 concurrent files")
py.title("Finding the right number of concurrent requests")
py.legend()
py.savefig("images/parallel_requests.png")
|
socialNetwork/test/TestUserService.py | rodrigo-bruno/DeathStarBench | 364 | 11110051 | import sys
sys.path.append('../gen-py')
import uuid
from social_network import UserService
from social_network.ttypes import ServiceException
from thrift import Thrift
from thrift.transport import TSocket
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol
def register():
socket = TSocket.TSocket("ath-8.ece.cornell.edu", 10005)
transport = TTransport.TFramedTransport(socket)
protocol = TBinaryProtocol.TBinaryProtocol(transport)
client = UserService.Client(protocol)
transport.open()
req_id = uuid.uuid4().int & 0x7FFFFFFFFFFFFFFF
client.RegisterUser(req_id, "first_name_0", "last_name_0",
"username_11114", "<PASSWORD>", {})
client.RegisterUserWithId(req_id, "first_name_1", "last_name_1",
"username_1", "<PASSWORD>", 1, {})
client.RegisterUserWithId(req_id, "first_name_2", "last_name_2",
"username_2", "<PASSWORD>", 2, {})
transport.close()
def login():
socket = TSocket.TSocket("ath-8.ece.cornell.edu", 10005)
transport = TTransport.TFramedTransport(socket)
protocol = TBinaryProtocol.TBinaryProtocol(transport)
client = UserService.Client(protocol)
transport.open()
req_id = uuid.uuid4().int & 0x7FFFFFFFFFFFFFFF
print(client.Login(req_id, "username_0", "<PASSWORD>", {}))
# print(client.Login(req_id, "username_1", "password_2", {}))
# print(client.Login(req_id, "username_2", "<PASSWORD>", {}))
transport.close()
if __name__ == '__main__':
try:
login()
except ServiceException as se:
print('%s' % se.message)
except Thrift.TException as tx:
print('%s' % tx.message) |
alipay/aop/api/domain/AlipayIserviceCcmRoleModifyModel.py | antopen/alipay-sdk-python-all | 213 | 11110055 | <reponame>antopen/alipay-sdk-python-all<filename>alipay/aop/api/domain/AlipayIserviceCcmRoleModifyModel.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class AlipayIserviceCcmRoleModifyModel(object):
def __init__(self):
self._ccs_instance_id = None
self._description = None
self._function_ids = None
self._id = None
self._updater_id = None
@property
def ccs_instance_id(self):
return self._ccs_instance_id
@ccs_instance_id.setter
def ccs_instance_id(self, value):
self._ccs_instance_id = value
@property
def description(self):
return self._description
@description.setter
def description(self, value):
self._description = value
@property
def function_ids(self):
return self._function_ids
@function_ids.setter
def function_ids(self, value):
if isinstance(value, list):
self._function_ids = list()
for i in value:
self._function_ids.append(i)
@property
def id(self):
return self._id
@id.setter
def id(self, value):
self._id = value
@property
def updater_id(self):
return self._updater_id
@updater_id.setter
def updater_id(self, value):
self._updater_id = value
def to_alipay_dict(self):
params = dict()
if self.ccs_instance_id:
if hasattr(self.ccs_instance_id, 'to_alipay_dict'):
params['ccs_instance_id'] = self.ccs_instance_id.to_alipay_dict()
else:
params['ccs_instance_id'] = self.ccs_instance_id
if self.description:
if hasattr(self.description, 'to_alipay_dict'):
params['description'] = self.description.to_alipay_dict()
else:
params['description'] = self.description
if self.function_ids:
if isinstance(self.function_ids, list):
for i in range(0, len(self.function_ids)):
element = self.function_ids[i]
if hasattr(element, 'to_alipay_dict'):
self.function_ids[i] = element.to_alipay_dict()
if hasattr(self.function_ids, 'to_alipay_dict'):
params['function_ids'] = self.function_ids.to_alipay_dict()
else:
params['function_ids'] = self.function_ids
if self.id:
if hasattr(self.id, 'to_alipay_dict'):
params['id'] = self.id.to_alipay_dict()
else:
params['id'] = self.id
if self.updater_id:
if hasattr(self.updater_id, 'to_alipay_dict'):
params['updater_id'] = self.updater_id.to_alipay_dict()
else:
params['updater_id'] = self.updater_id
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayIserviceCcmRoleModifyModel()
if 'ccs_instance_id' in d:
o.ccs_instance_id = d['ccs_instance_id']
if 'description' in d:
o.description = d['description']
if 'function_ids' in d:
o.function_ids = d['function_ids']
if 'id' in d:
o.id = d['id']
if 'updater_id' in d:
o.updater_id = d['updater_id']
return o
|
python-leetcode/laozhang/tree/leetcode_1302_.py | sweeneycai/cs-summary-reflection | 227 | 11110105 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# coding=utf-8
"""
1302. 层数最深叶子节点的和
"""
from laozhang import TreeNode
class Solution:
def deepestLeavesSum(self, root: TreeNode) -> int:
max_depth = 0
map = {}
def helper(root: TreeNode, depth: int):
nonlocal max_depth, map
if root:
if depth > max_depth:
max_depth = depth
helper(root.left, depth + 1)
helper(root.right, depth + 1)
if not root.left and not root.right and depth == max_depth:
temp = map.get(depth, 0)
map[depth] = temp + root.val
helper(root, 1)
for key in map:
if key > max_depth:
max_depth = key
return map[max_depth]
|
src/python/tests/unittests/test_common/test_config.py | annihilatethee/seedsync | 255 | 11110108 | # Copyright 2017, <NAME>, All rights reserved.
import unittest
import os
import tempfile
from common import Config, ConfigError, PersistError
from common.config import InnerConfig, Checkers, Converters
class TestConverters(unittest.TestCase):
def test_int(self):
self.assertEqual(0, Converters.int(None, "", "0"))
self.assertEqual(1, Converters.int(None, "", "1"))
self.assertEqual(-1, Converters.int(None, "", "-1"))
self.assertEqual(5000, Converters.int(None, "", "5000"))
self.assertEqual(-5000, Converters.int(None, "", "-5000"))
with self.assertRaises(ConfigError) as e:
Converters.int(TestConverters, "bad", "")
self.assertEqual("Bad config: TestConverters.bad is empty", str(e.exception))
with self.assertRaises(ConfigError) as e:
Converters.int(TestConverters, "bad", "3.14")
self.assertEqual("Bad config: TestConverters.bad (3.14) must be an integer value", str(e.exception))
with self.assertRaises(ConfigError) as e:
Converters.int(TestConverters, "bad", "cat")
self.assertEqual("Bad config: TestConverters.bad (cat) must be an integer value", str(e.exception))
def test_bool(self):
self.assertEqual(True, Converters.bool(None, "", "True"))
self.assertEqual(False, Converters.bool(None, "", "False"))
self.assertEqual(True, Converters.bool(None, "", "true"))
self.assertEqual(False, Converters.bool(None, "", "false"))
self.assertEqual(True, Converters.bool(None, "", "TRUE"))
self.assertEqual(False, Converters.bool(None, "", "FALSE"))
self.assertEqual(True, Converters.bool(None, "", "1"))
self.assertEqual(False, Converters.bool(None, "", "0"))
with self.assertRaises(ConfigError) as e:
Converters.bool(TestConverters, "bad", "")
self.assertEqual("Bad config: TestConverters.bad is empty", str(e.exception))
with self.assertRaises(ConfigError) as e:
Converters.bool(TestConverters, "bad", "cat")
self.assertEqual("Bad config: TestConverters.bad (cat) must be a boolean value", str(e.exception))
with self.assertRaises(ConfigError) as e:
Converters.bool(TestConverters, "bad", "-3.14")
self.assertEqual("Bad config: TestConverters.bad (-3.14) must be a boolean value", str(e.exception))
class DummyInnerConfig(InnerConfig):
c_prop1 = InnerConfig._create_property("prop1", Checkers.null, Converters.null)
a_prop2 = InnerConfig._create_property("prop2", Checkers.null, Converters.null)
b_prop3 = InnerConfig._create_property("prop3", Checkers.null, Converters.null)
def __init__(self):
self.c_prop1 = "1"
self.a_prop2 = "2"
self.b_prop3 = "3"
class DummyInnerConfig2(InnerConfig):
prop_int = InnerConfig._create_property("prop_int", Checkers.null, Converters.int)
prop_str = InnerConfig._create_property("prop_str", Checkers.string_nonempty, Converters.null)
def __init__(self):
self.prop_int = None
self.prop_str = None
class TestInnerConfig(unittest.TestCase):
def test_property_order(self):
dummy_config = DummyInnerConfig()
self.assertEqual(["c_prop1", "a_prop2", "b_prop3"], list(dummy_config.as_dict().keys()))
def test_has_property(self):
dummy_config = DummyInnerConfig()
self.assertTrue(dummy_config.has_property("c_prop1"))
self.assertTrue(dummy_config.has_property("a_prop2"))
self.assertTrue(dummy_config.has_property("b_prop3"))
self.assertFalse(dummy_config.has_property("not_prop"))
self.assertFalse(dummy_config.has_property("__init__"))
self.assertFalse(dummy_config.has_property(""))
def test_checker_is_called(self):
dummy_config = DummyInnerConfig2()
dummy_config.prop_str = "a string"
self.assertEqual("a string", dummy_config.prop_str)
with self.assertRaises(ConfigError) as e:
dummy_config.prop_str = ""
self.assertEqual("Bad config: DummyInnerConfig2.prop_str is empty", str(e.exception))
def test_converter_is_called(self):
dummy_config = DummyInnerConfig2.from_dict({"prop_int": "5", "prop_str": "a"})
self.assertEqual(5, dummy_config.prop_int)
with self.assertRaises(ConfigError) as e:
DummyInnerConfig2.from_dict({"prop_int": "cat", "prop_str": "a"})
self.assertEqual("Bad config: DummyInnerConfig2.prop_int (cat) must be an integer value", str(e.exception))
class TestConfig(unittest.TestCase):
def __check_unknown_error(self, cls, good_dict):
"""
Helper method to check that a config class raises an error on
an unknown key
:param cls:
:param good_dict:
:return:
"""
bad_dict = dict(good_dict)
bad_dict["unknown"] = "how did this get here"
with self.assertRaises(ConfigError) as error:
cls.from_dict(bad_dict)
self.assertTrue(str(error.exception).startswith("Unknown config"))
def __check_missing_error(self, cls, good_dict, key):
"""
Helper method to check that a config class raises an error on
a missing key
:param cls:
:param good_dict:
:param key:
:return:
"""
bad_dict = dict(good_dict)
del bad_dict[key]
with self.assertRaises(ConfigError) as error:
cls.from_dict(bad_dict)
self.assertTrue(str(error.exception).startswith("Missing config"))
def __check_empty_error(self, cls, good_dict, key):
"""
Helper method to check that a config class raises an error on
a empty value
:param cls:
:param good_dict:
:param key:
:return:
"""
bad_dict = dict(good_dict)
bad_dict[key] = ""
with self.assertRaises(ConfigError) as error:
cls.from_dict(bad_dict)
self.assertTrue(str(error.exception).startswith("Bad config"))
bad_dict[key] = " "
with self.assertRaises(ConfigError) as error:
cls.from_dict(bad_dict)
self.assertTrue(str(error.exception).startswith("Bad config"))
def check_common(self, cls, good_dict, keys):
"""
Helper method to run some common checks
:param cls:
:param good_dict:
:param keys:
:return:
"""
# unknown
self.__check_unknown_error(cls, good_dict)
for key in keys:
# missing key
self.__check_missing_error(cls, good_dict, key)
# empty value
self.__check_empty_error(cls, good_dict, key)
def check_bad_value_error(self, cls, good_dict, key, value):
"""
Helper method to check that a config class raises an error on
a bad value
:param cls:
:param good_dict:
:param key:
:param value:
:return:
"""
bad_dict = dict(good_dict)
bad_dict[key] = value
with self.assertRaises(ConfigError) as error:
cls.from_dict(bad_dict)
self.assertTrue(str(error.exception).startswith("Bad config"))
def test_has_section(self):
config = Config()
self.assertTrue(config.has_section("general"))
self.assertTrue(config.has_section("lftp"))
self.assertTrue(config.has_section("controller"))
self.assertTrue(config.has_section("web"))
self.assertTrue(config.has_section("autoqueue"))
self.assertFalse(config.has_section("nope"))
self.assertFalse(config.has_section("from_file"))
self.assertFalse(config.has_section("__init__"))
def test_general(self):
good_dict = {
"debug": "True",
"verbose": "False",
}
general = Config.General.from_dict(good_dict)
self.assertEqual(True, general.debug)
self.assertEqual(False, general.verbose)
self.check_common(Config.General,
good_dict,
{
"debug",
"verbose"
})
# bad values
self.check_bad_value_error(Config.General, good_dict, "debug", "SomeString")
self.check_bad_value_error(Config.General, good_dict, "debug", "-1")
self.check_bad_value_error(Config.General, good_dict, "verbose", "SomeString")
self.check_bad_value_error(Config.General, good_dict, "verbose", "-1")
def test_lftp(self):
good_dict = {
"remote_address": "remote.server.com",
"remote_username": "remote-user",
"remote_password": "password",
"remote_port": "3456",
"remote_path": "/path/on/remote/server",
"local_path": "/path/on/local/server",
"remote_path_to_scan_script": "/path/on/remote/server/to/scan/script",
"use_ssh_key": "False",
"num_max_parallel_downloads": "2",
"num_max_parallel_files_per_download": "3",
"num_max_connections_per_root_file": "4",
"num_max_connections_per_dir_file": "6",
"num_max_total_connections": "7",
"use_temp_file": "True"
}
lftp = Config.Lftp.from_dict(good_dict)
self.assertEqual("remote.server.com", lftp.remote_address)
self.assertEqual("remote-user", lftp.remote_username)
self.assertEqual("password", lftp.remote_password)
self.assertEqual(3456, lftp.remote_port)
self.assertEqual("/path/on/remote/server", lftp.remote_path)
self.assertEqual("/path/on/local/server", lftp.local_path)
self.assertEqual("/path/on/remote/server/to/scan/script", lftp.remote_path_to_scan_script)
self.assertEqual(False, lftp.use_ssh_key)
self.assertEqual(2, lftp.num_max_parallel_downloads)
self.assertEqual(3, lftp.num_max_parallel_files_per_download)
self.assertEqual(4, lftp.num_max_connections_per_root_file)
self.assertEqual(6, lftp.num_max_connections_per_dir_file)
self.assertEqual(7, lftp.num_max_total_connections)
self.assertEqual(True, lftp.use_temp_file)
self.check_common(Config.Lftp,
good_dict,
{
"remote_address",
"remote_username",
"remote_password",
"remote_port",
"remote_path",
"local_path",
"remote_path_to_scan_script",
"use_ssh_key",
"num_max_parallel_downloads",
"num_max_parallel_files_per_download",
"num_max_connections_per_root_file",
"num_max_connections_per_dir_file",
"num_max_total_connections",
"use_temp_file"
})
# bad values
self.check_bad_value_error(Config.Lftp, good_dict, "remote_port", "-1")
self.check_bad_value_error(Config.Lftp, good_dict, "remote_port", "0")
self.check_bad_value_error(Config.Lftp, good_dict, "use_ssh_key", "-1")
self.check_bad_value_error(Config.Lftp, good_dict, "use_ssh_key", "SomeString")
self.check_bad_value_error(Config.Lftp, good_dict, "num_max_parallel_downloads", "-1")
self.check_bad_value_error(Config.Lftp, good_dict, "num_max_parallel_downloads", "0")
self.check_bad_value_error(Config.Lftp, good_dict, "num_max_parallel_files_per_download", "-1")
self.check_bad_value_error(Config.Lftp, good_dict, "num_max_parallel_files_per_download", "0")
self.check_bad_value_error(Config.Lftp, good_dict, "num_max_connections_per_root_file", "-1")
self.check_bad_value_error(Config.Lftp, good_dict, "num_max_connections_per_root_file", "0")
self.check_bad_value_error(Config.Lftp, good_dict, "num_max_connections_per_dir_file", "-1")
self.check_bad_value_error(Config.Lftp, good_dict, "num_max_connections_per_dir_file", "0")
self.check_bad_value_error(Config.Lftp, good_dict, "num_max_total_connections", "-1")
self.check_bad_value_error(Config.Lftp, good_dict, "use_temp_file", "-1")
self.check_bad_value_error(Config.Lftp, good_dict, "use_temp_file", "SomeString")
def test_controller(self):
good_dict = {
"interval_ms_remote_scan": "30000",
"interval_ms_local_scan": "10000",
"interval_ms_downloading_scan": "2000",
"extract_path": "/extract/path",
"use_local_path_as_extract_path": "True"
}
controller = Config.Controller.from_dict(good_dict)
self.assertEqual(30000, controller.interval_ms_remote_scan)
self.assertEqual(10000, controller.interval_ms_local_scan)
self.assertEqual(2000, controller.interval_ms_downloading_scan)
self.assertEqual("/extract/path", controller.extract_path)
self.assertEqual(True, controller.use_local_path_as_extract_path)
self.check_common(Config.Controller,
good_dict,
{
"interval_ms_remote_scan",
"interval_ms_local_scan",
"interval_ms_downloading_scan",
"extract_path",
"use_local_path_as_extract_path"
})
# bad values
self.check_bad_value_error(Config.Controller, good_dict, "interval_ms_remote_scan", "-1")
self.check_bad_value_error(Config.Controller, good_dict, "interval_ms_remote_scan", "0")
self.check_bad_value_error(Config.Controller, good_dict, "interval_ms_local_scan", "-1")
self.check_bad_value_error(Config.Controller, good_dict, "interval_ms_local_scan", "0")
self.check_bad_value_error(Config.Controller, good_dict, "interval_ms_downloading_scan", "-1")
self.check_bad_value_error(Config.Controller, good_dict, "interval_ms_downloading_scan", "0")
self.check_bad_value_error(Config.Controller, good_dict, "use_local_path_as_extract_path", "SomeString")
self.check_bad_value_error(Config.Controller, good_dict, "use_local_path_as_extract_path", "-1")
def test_web(self):
good_dict = {
"port": "1234",
}
web = Config.Web.from_dict(good_dict)
self.assertEqual(1234, web.port)
self.check_common(Config.Web,
good_dict,
{
"port"
})
# bad values
self.check_bad_value_error(Config.Web, good_dict, "port", "-1")
self.check_bad_value_error(Config.Web, good_dict, "port", "0")
def test_autoqueue(self):
good_dict = {
"enabled": "True",
"patterns_only": "False",
"auto_extract": "True"
}
autoqueue = Config.AutoQueue.from_dict(good_dict)
self.assertEqual(True, autoqueue.enabled)
self.assertEqual(False, autoqueue.patterns_only)
self.check_common(Config.AutoQueue,
good_dict,
{
"enabled",
"patterns_only",
"auto_extract"
})
# bad values
self.check_bad_value_error(Config.AutoQueue, good_dict, "enabled", "SomeString")
self.check_bad_value_error(Config.AutoQueue, good_dict, "enabled", "-1")
self.check_bad_value_error(Config.AutoQueue, good_dict, "patterns_only", "SomeString")
self.check_bad_value_error(Config.AutoQueue, good_dict, "patterns_only", "-1")
self.check_bad_value_error(Config.AutoQueue, good_dict, "auto_extract", "SomeString")
self.check_bad_value_error(Config.AutoQueue, good_dict, "auto_extract", "-1")
def test_from_file(self):
# Create empty config file
config_file = open(tempfile.mktemp(suffix="test_config"), "w")
config_file.write("""
[General]
debug=False
verbose=True
[Lftp]
remote_address=remote.server.com
remote_username=remote-user
remote_password=<PASSWORD>
remote_port = 3456
remote_path=/path/on/remote/server
local_path=/path/on/local/server
remote_path_to_scan_script=/path/on/remote/server/to/scan/script
use_ssh_key=True
num_max_parallel_downloads=2
num_max_parallel_files_per_download=3
num_max_connections_per_root_file=4
num_max_connections_per_dir_file=5
num_max_total_connections=7
use_temp_file=False
[Controller]
interval_ms_remote_scan=30000
interval_ms_local_scan=10000
interval_ms_downloading_scan=2000
extract_path=/path/where/to/extract/stuff
use_local_path_as_extract_path=False
[Web]
port=88
[AutoQueue]
enabled=False
patterns_only=True
auto_extract=True
""")
config_file.flush()
config = Config.from_file(config_file.name)
self.assertEqual(False, config.general.debug)
self.assertEqual(True, config.general.verbose)
self.assertEqual("remote.server.com", config.lftp.remote_address)
self.assertEqual("remote-user", config.lftp.remote_username)
self.assertEqual("remote-pass", config.lftp.remote_password)
self.assertEqual(3456, config.lftp.remote_port)
self.assertEqual("/path/on/remote/server", config.lftp.remote_path)
self.assertEqual("/path/on/local/server", config.lftp.local_path)
self.assertEqual("/path/on/remote/server/to/scan/script", config.lftp.remote_path_to_scan_script)
self.assertEqual(True, config.lftp.use_ssh_key)
self.assertEqual(2, config.lftp.num_max_parallel_downloads)
self.assertEqual(3, config.lftp.num_max_parallel_files_per_download)
self.assertEqual(4, config.lftp.num_max_connections_per_root_file)
self.assertEqual(5, config.lftp.num_max_connections_per_dir_file)
self.assertEqual(7, config.lftp.num_max_total_connections)
self.assertEqual(False, config.lftp.use_temp_file)
self.assertEqual(30000, config.controller.interval_ms_remote_scan)
self.assertEqual(10000, config.controller.interval_ms_local_scan)
self.assertEqual(2000, config.controller.interval_ms_downloading_scan)
self.assertEqual("/path/where/to/extract/stuff", config.controller.extract_path)
self.assertEqual(False, config.controller.use_local_path_as_extract_path)
self.assertEqual(88, config.web.port)
self.assertEqual(False, config.autoqueue.enabled)
self.assertEqual(True, config.autoqueue.patterns_only)
self.assertEqual(True, config.autoqueue.auto_extract)
# unknown section error
config_file.write("""
[Unknown]
key=value
""")
config_file.flush()
with self.assertRaises(ConfigError) as error:
Config.from_file(config_file.name)
self.assertTrue(str(error.exception).startswith("Unknown section"))
# Remove config file
config_file.close()
os.remove(config_file.name)
def test_to_file(self):
config_file_path = tempfile.mktemp(suffix="test_config")
config = Config()
config.general.debug = True
config.general.verbose = False
config.lftp.remote_address = "server.remote.com"
config.lftp.remote_username = "user-on-remote-server"
config.lftp.remote_password = "<PASSWORD>"
config.lftp.remote_port = 3456
config.lftp.remote_path = "/remote/server/path"
config.lftp.local_path = "/local/server/path"
config.lftp.remote_path_to_scan_script = "/remote/server/path/to/script"
config.lftp.use_ssh_key = True
config.lftp.num_max_parallel_downloads = 6
config.lftp.num_max_parallel_files_per_download = 7
config.lftp.num_max_connections_per_root_file = 2
config.lftp.num_max_connections_per_dir_file = 3
config.lftp.num_max_total_connections = 4
config.lftp.use_temp_file = True
config.controller.interval_ms_remote_scan = 1234
config.controller.interval_ms_local_scan = 5678
config.controller.interval_ms_downloading_scan = 9012
config.controller.extract_path = "/path/extract/stuff"
config.controller.use_local_path_as_extract_path = True
config.web.port = 13
config.autoqueue.enabled = True
config.autoqueue.patterns_only = True
config.autoqueue.auto_extract = False
config.to_file(config_file_path)
with open(config_file_path, "r") as f:
actual_str = f.read()
print(actual_str)
golden_str = """
[General]
debug = True
verbose = False
[Lftp]
remote_address = server.remote.com
remote_username = user-on-remote-server
remote_password = <PASSWORD>
remote_port = 3456
remote_path = /remote/server/path
local_path = /local/server/path
remote_path_to_scan_script = /remote/server/path/to/script
use_ssh_key = True
num_max_parallel_downloads = 6
num_max_parallel_files_per_download = 7
num_max_connections_per_root_file = 2
num_max_connections_per_dir_file = 3
num_max_total_connections = 4
use_temp_file = True
[Controller]
interval_ms_remote_scan = 1234
interval_ms_local_scan = 5678
interval_ms_downloading_scan = 9012
extract_path = /path/extract/stuff
use_local_path_as_extract_path = True
[Web]
port = 13
[AutoQueue]
enabled = True
patterns_only = True
auto_extract = False
"""
golden_lines = [s.strip() for s in golden_str.splitlines()]
golden_lines = list(filter(None, golden_lines)) # remove blank lines
actual_lines = [s.strip() for s in actual_str.splitlines()]
actual_lines = list(filter(None, actual_lines)) # remove blank lines
self.assertEqual(len(golden_lines), len(actual_lines))
for i, _ in enumerate(golden_lines):
self.assertEqual(golden_lines[i], actual_lines[i])
def test_persist_read_error(self):
# bad section
content = """
[Web
port=88
"""
with self.assertRaises(PersistError):
Config.from_str(content)
# bad value
content = """
[Web]
port88
"""
with self.assertRaises(PersistError):
Config.from_str(content)
# bad line
content = """
[Web]
port=88
what am i doing here
"""
with self.assertRaises(PersistError):
Config.from_str(content)
|
tests/preprocessing/test_replace.py | austinjp/textacy | 1,929 | 11110110 | import pytest
from textacy import preprocessing
@pytest.mark.parametrize(
"text_in, text_out",
[
("$1.00 equals 100¢.", "_CUR_1.00 equals 100_CUR_."),
("How much is ¥100 in £?", "How much is _CUR_100 in _CUR_?"),
("My password is <PASSWORD>฿.", "My password is <PASSWORD>_CUR_<PASSWORD>_."),
]
)
def test_replace_currency_symbols(text_in, text_out):
assert preprocessing.replace.currency_symbols(text_in) == text_out
@pytest.mark.parametrize(
"text_in, text_out",
[
("Reach out at <EMAIL>.", "Reach out at _EMAIL_."),
("Click here: mailto:<EMAIL>.", "Click here: _EMAIL_."),
]
)
def test_replace_emails(text_in, text_out):
assert preprocessing.replace.emails(text_in) == text_out
@pytest.mark.parametrize(
"text_in, text_out",
[
("ugh, it's raining *again* ☔", "ugh, it's raining *again* _EMOJI_"),
("✌ tests are passing ✌", "_EMOJI_ tests are passing _EMOJI_"),
]
)
def test_replace_emojis(text_in, text_out):
assert preprocessing.replace.emojis(text_in) == text_out
@pytest.mark.parametrize(
"text_in, text_out",
[
("like omg it's #ThrowbackThursday", "like omg it's _TAG_"),
("#TextacyIn4Words: \"but it's honest work\"", "_TAG_: \"but it's honest work\""),
("wth twitter #ican'teven #why-even-try", "wth twitter _TAG_'teven _TAG_-even-try"),
("www.foo.com#fragment is not a hashtag", "www.foo.com#fragment is not a hashtag"),
]
)
def test_replace_hashtags(text_in, text_out):
assert preprocessing.replace.hashtags(text_in) == text_out
@pytest.mark.parametrize(
"text_in, text_out",
[
(
"I owe $1,000.99 to 123 people for 2 +1 reasons.",
"I owe $_NUMBER_ to _NUMBER_ people for _NUMBER_ _NUMBER_ reasons.",
),
]
)
def test_replace_numbers(text_in, text_out):
assert preprocessing.replace.numbers(text_in) == text_out
@pytest.mark.parametrize(
"text_in, text_out",
[
(
"I can be reached at 555-123-4567 through next Friday.",
"I can be reached at _PHONE_ through next Friday.",
),
]
)
def test_replace_phone_numbers(text_in, text_out):
assert preprocessing.replace.phone_numbers(text_in) == text_out
@pytest.mark.parametrize(
"text_in, text_out",
[
(
"I learned everything I know from www.stackoverflow.com and http://wikipedia.org/ and Mom.",
"I learned everything I know from _URL_ and _URL_ and Mom.",
),
]
)
def test_replace_urls(text_in, text_out):
assert preprocessing.replace.urls(text_in) == text_out
@pytest.mark.parametrize(
"text_in, text_out",
[
("like omg it's @bjdewilde", "like omg it's _USER_"),
("@Real_Burton_DeWilde: definitely not a bot", "_USER_: definitely not a bot"),
("wth twitter @b.j.dewilde", "wth twitter _USER_.j.dewilde"),
("<EMAIL> is not a user handle", "<EMAIL> is not a user handle"),
]
)
def test_replace_user_handles(text_in, text_out):
assert preprocessing.replace.user_handles(text_in) == text_out
|
lib/utils.py | juliocamposmachado/gain2 | 242 | 11110136 | import os
import chainer.functions as F
from chainer.dataset import download
from chainer.serializers import npz
from chainer.backends.cuda import get_array_module
import numpy as np
from PIL import Image
from cupy import get_array_module
def convert_caffemodel_to_npz(path_caffemodel, path_npz):
from chainer.links.caffe.caffe_function import CaffeFunction
caffemodel = CaffeFunction(path_caffemodel)
npz.save_npz(path_npz, caffemodel, compression=False)
def _make_npz(path_npz, url, model):
path_caffemodel = download.cached_download(url)
print('Now loading caffemodel (usually it may take few minutes)')
convert_caffemodel_to_npz(path_caffemodel, path_npz)
npz.load_npz(path_npz, model)
return model
def _retrieve(name, url, model):
root = download.get_dataset_directory('pfnet/chainer/models/')
path = os.path.join(root, name)
return download.cache_or_load_file(
path, lambda path: _make_npz(path, url, model),
lambda path: npz.load_npz(path, model))
def read_image(path, dtype=np.float32, color=True):
"""Read an image from a file.
This function reads an image from given file. The image is CHW format and
the range of its value is :math:`[0, 255]`. If :obj:`color = True`, the
order of the channels is RGB.
Args:
path (string): A path of image file.
dtype: The type of array. The default value is :obj:`~numpy.float32`.
color (bool): This option determines the number of channels.
If :obj:`True`, the number of channels is three. In this case,
the order of the channels is RGB. This is the default behaviour.
If :obj:`False`, this function returns a grayscale image.
Returns:
~numpy.ndarray: An image.
"""
f = Image.open(path)
try:
if color:
img = f.convert('RGB')
else:
img = f.convert('P')
img = np.asarray(img, dtype=dtype)
finally:
if hasattr(f, 'close'):
f.close()
return img
def VGGprepare(image=None, path=None, size=(224, 224)):
"""Converts the given image to the numpy array for VGG models.
Note that you have to call this method before ``__call__``
because the pre-trained vgg model requires to resize the given image,
covert the RGB to the BGR, subtract the mean,
and permute the dimensions before calling.
Args:
image (PIL.Image or numpy.ndarray): Input image.
If an input is ``numpy.ndarray``, its shape must be
``(height, width)``, ``(height, width, channels)``,
or ``(channels, height, width)``, and
the order of the channels must be RGB.
size (pair of ints): Size of converted images.
If ``None``, the given image is not resized.
Returns:
numpy.ndarray: The converted output array.
"""
if path is not None:
image = read_image(path)
if image.ndim == 4:
image = np.squeeze(image, 0)
if isinstance(image, np.ndarray):
if image.ndim == 3:
if image.shape[0] == 1:
image = image[0, :, :]
elif image.shape[0] == 3:
image = image.transpose((1, 2, 0))
image = Image.fromarray(image.astype(np.uint8))
image = image.convert('RGB')
if size:
image = image.resize(size)
image = np.asarray(image, dtype=np.float32)
image = image[:, :, ::-1]
image -= np.array(
[103.939, 116.779, 123.68], dtype=np.float32)
image = image.transpose((2, 0, 1))
return np.expand_dims(image, 0)
def VGGprepare_am_input(var):
xp = get_array_module(var)
# var = F.resize_images(var, size)
var = F.transpose(var, (0, 2, 3, 1)) # [[W, H, C]]
var = F.flip(var, 3)
var -= xp.array([[103.939, 116.779, 123.68]], dtype=xp.float32)
var = F.transpose(var, (0, 3, 1, 2))
return var
|
sohu.py | 1py/youku-lixian | 445 | 11110156 | #!/usr/bin/env python
__all__ = ['sohu_download']
from common import *
def real_url(host, prot, file, new):
url = 'http://%s/?prot=%s&file=%s&new=%s' % (host, prot, file, new)
start, _, host, key, _, _ = get_html(url).split('|')
return '%s%s?key=%s' % (start[:-1], new, key)
def sohu_download(url, merge=True):
vid = r1('vid="(\d+)"', get_html(url))
assert vid
import json
data = json.loads(get_decoded_html('http://hot.vrs.sohu.com/vrs_flash.action?vid=%s' % vid))
host = data['allot']
prot = data['prot']
urls = []
data = data['data']
title = data['tvName']
size = sum(data['clipsBytes'])
assert len(data['clipsURL']) == len(data['clipsBytes']) == len(data['su'])
for file, new in zip(data['clipsURL'], data['su']):
urls.append(real_url(host, prot, file, new))
assert data['clipsURL'][0].endswith('.mp4')
download_urls(urls, title, 'mp4', total_size=size, refer=url, merge=merge)
download = sohu_download
download_playlist = playlist_not_supported('sohu')
def main():
script_main('sohu', sohu_download)
if __name__ == '__main__':
main()
|
pxr/usdImaging/bin/testusdview/testenv/testUsdviewInterpreterNoRender/testUsdviewInterpreterNoRender.py | DougRogers-DigitalFish/USD | 3,680 | 11110163 | #!/pxrpythonsubst
#
# Copyright 2017 Pixar
#
# Licensed under the Apache License, Version 2.0 (the "Apache License")
# with the following modification; you may not use this file except in
# compliance with the Apache License and the following modification to it:
# Section 6. Trademarks. is deleted and replaced with:
#
# 6. Trademarks. This License does not grant permission to use the trade
# names, trademarks, service marks, or product names of the Licensor
# and its affiliates, except as required to comply with Section 4(c) of
# the License and to reproduce the content of the NOTICE file.
#
# You may obtain a copy of the Apache License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the Apache License with the above modification is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the Apache License for the specific
# language governing permissions and limitations under the Apache License.
#
# Leaving these in for future debugging
from __future__ import print_function
import sys
from pxr.Usdviewq.qt import QtCore, QtGui, QtWidgets
def _emitShowInterpreter(appController):
appController._ui.showInterpreter.triggered.emit()
QtWidgets.QApplication.processEvents()
def _postKeyPress(key, widget):
event = QtGui.QKeyEvent(QtCore.QEvent.KeyPress,
key,
QtCore.Qt.NoModifier)
QtWidgets.QApplication.postEvent(widget, event)
QtWidgets.QApplication.processEvents()
def _testInterpreterWorks(appController):
#
# Trigger the interpreter console to display, then verify it worked
# by making sure the appController._console's context got initialized
# properly. This is somewhat unfortunate a test, but sending key events
# to the console to mimic typing doesn't seem to work.
#
# usdview happily swallows exceptions. It would be really nice if we could
# put it into a mode where it didn't - were that the case, the following
# try/except block is all we would need. Currently it has no value, but
# we leave it in for future aspirations.
try:
_emitShowInterpreter(appController)
except:
successfullyBroughtUpInterpreter = False
assert successfullyBroughtUpInterpreter
assert appController._console
# So instead, we check the last context variable that the interpreter
# initializes. This is pretty brittle, but other options seem just as
# brittle,
assert "usdviewApi" in appController._console.locals()
def testUsdviewInputFunction(appController):
_testInterpreterWorks(appController)
|
backend/avatar/service.py | restato/bunnybook | 131 | 11110165 | import glob
import hashlib
import random
from pathlib import Path
from typing import Any, List
from PIL import Image
from injector import singleton, inject
from common.concurrency import cpu_bound_task
from config import cfg
@singleton
class AvatarService:
@inject
def __init__(self):
self._avatar_images_path = Path(cfg.avatar_data_folder)
self._avatar_images_path.mkdir(exist_ok=True, parents=True)
self._bodies = self._get_layers_paths("bodies")
self._accessories = self._get_layers_paths("accessories")
self._glasses = self._get_layers_paths("glasses")
self._hats = self._get_layers_paths("hats")
async def generate_and_save_avatar(self, identifier: str, filename: str) \
-> None:
await cpu_bound_task(
self._generate_and_save_avatar, identifier, filename)
async def generate_avatar(self, identifier: str) -> Any:
return await cpu_bound_task(self._generate_avatar, identifier)
def _generate_and_save_avatar(self, identifier: str, filename: str) -> None:
avatar_image = self._generate_avatar(identifier)
avatar_image.save(
self._avatar_images_path / f"{filename}.png",
"PNG",
optimize=True)
def _generate_avatar(self, identifier: str) -> Any:
identifier_hash = int(hashlib.md5(str(identifier).encode()).hexdigest(),
base=16)
random.seed(identifier_hash)
layer0 = self._bodies[random.randint(0, len(self._bodies) - 1)]
layer1 = self._accessories[
random.randint(0, len(self._accessories) - 1)]
layer2 = self._glasses[random.randint(0, len(self._glasses) - 1)]
layer3 = self._hats[random.randint(0, len(self._hats) - 1)]
avatar = Image.alpha_composite(Image.open(layer0), Image.open(layer1))
avatar = Image.alpha_composite(avatar, Image.open(layer2))
avatar = Image.alpha_composite(avatar, Image.open(layer3))
return avatar
def _get_layers_paths(self, layer_type: str) -> List[str]:
paths = glob.glob(f"avatar/images/{layer_type}/*")
paths.sort()
return paths
|
test/point_tier_tests.py | timmahrt/praatIO | 208 | 11110193 | import unittest
from praatio import textgrid
from praatio.utilities.constants import Interval, Point, POINT_TIER
from praatio.utilities import constants
from praatio.utilities import errors
from test.praatio_test_case import PraatioTestCase
def makePointTier(name="pitch_values", points=None, minT=0, maxT=5.0):
if points is None:
points = [Point(1.3, "55"), Point(3.7, "99")]
return textgrid.PointTier(name, points, minT, maxT)
class PointTierTests(PraatioTestCase):
def test_append_tier_with_mixed_type_throws_exception(self):
pointTier = makePointTier()
intervalTier = textgrid.IntervalTier(
"words",
[Interval(1, 2, "hello"), Interval(3.5, 4.0, "world")],
minT=0,
maxT=5.0,
)
with self.assertRaises(errors.ArgumentError) as _:
pointTier.appendTier(intervalTier)
def test_append_tier_with_point_tiers(self):
pointTier = textgrid.PointTier(
"pitch_values", [Point(1.3, "55"), Point(3.7, "99")], minT=0, maxT=5
)
pointTier2 = textgrid.PointTier(
"new_pitch_values", [Point(4.2, "153"), Point(7.1, "89")], minT=0, maxT=10
)
sut = pointTier.appendTier(pointTier2)
self.assertEqual(0, sut.minTimestamp)
self.assertEqual(15, sut.maxTimestamp)
self.assertEqual(
[Point(1.3, "55"), Point(3.7, "99"), Point(9.2, "153"), Point(12.1, "89")],
sut.entryList,
)
self.assertEqual(POINT_TIER, sut.tierType)
def test_find_with_point_tiers(self):
sut = makePointTier(
points=[
Point(1, "hello"),
Point(2.5, "the"),
Point(3.5, "world"),
]
)
self.assertEqual([], sut.find("mage", substrMatchFlag=False, usingRE=False))
self.assertEqual([1], sut.find("the", substrMatchFlag=False, usingRE=False))
self.assertEqual([], sut.find("mage", substrMatchFlag=True, usingRE=False))
self.assertEqual([0, 1], sut.find("he", substrMatchFlag=True, usingRE=False))
self.assertEqual([], sut.find("mage", substrMatchFlag=False, usingRE=True))
self.assertEqual([0, 1], sut.find("he", substrMatchFlag=False, usingRE=True))
self.assertEqual(
[0, 1, 2], sut.find("[eo]", substrMatchFlag=False, usingRE=True)
)
def test_point_tier_creation_with_no_times(self):
with self.assertRaises(errors.TimelessTextgridTierException) as cm:
textgrid.PointTier("pitch_values", [], None, None)
self.assertEqual(
"All textgrid tiers much have a min and max duration", str(cm.exception)
)
def test_crop_raises_error_if_crop_start_time_occurs_after_crop_end_time(self):
sut = makePointTier()
with self.assertRaises(errors.ArgumentError) as cm:
sut.crop(2.1, 1.1, "lax", True)
self.assertEqual(
"Crop error: start time (2.1) must occur before end time (1.1)",
str(cm.exception),
)
def test_crop_when_rebase_to_zero_is_true(self):
pointTier = makePointTier(
points=[
Point(0.5, "12"),
Point(1.3, "55"),
Point(3.7, "99"),
Point(4.5, "32"),
],
minT=0,
maxT=5,
)
sut = pointTier.crop(1.0, 3.8, rebaseToZero=True)
expectedPointTier = makePointTier(
points=[Point(0.3, "55"), Point(2.7, "99")],
minT=0,
maxT=2.8,
)
self.assertEqual(expectedPointTier, sut)
def test_crop_when_rebase_to_zero_is_false(self):
pointTier = makePointTier(
points=[
Point(0.5, "12"),
Point(1.3, "55"),
Point(3.7, "99"),
Point(4.5, "32"),
],
minT=0,
maxT=5,
)
sut = pointTier.crop(1.0, 3.8, rebaseToZero=False)
expectedPointTier = makePointTier(
points=[Point(1.3, "55"), Point(3.7, "99")],
minT=1,
maxT=3.8,
)
self.assertEqual(expectedPointTier, sut)
def test_erase_region_when_do_shrink_is_true(self):
pointTier = makePointTier(
points=[
Point(0.5, "12"),
Point(1.3, "55"),
Point(3.7, "99"),
Point(4.5, "32"),
],
minT=0,
maxT=5,
)
sut = pointTier.eraseRegion(1.0, 2.1, doShrink=True)
expectedPointTier = makePointTier(
points=[Point(0.5, "12"), Point(2.6, "99"), Point(3.4, "32")],
minT=0,
maxT=3.9,
)
self.assertEqual(expectedPointTier, sut)
def test_erase_region_when_do_shrink_is_false(self):
pointTier = makePointTier(
points=[
Point(0.5, "12"),
Point(1.3, "55"),
Point(3.7, "99"),
Point(4.5, "32"),
]
)
sut = pointTier.eraseRegion(1.0, 2.1, doShrink=False)
expectedPointTier = makePointTier(
points=[Point(0.5, "12"), Point(3.7, "99"), Point(4.5, "32")],
)
self.assertEqual(expectedPointTier, sut)
def test_get_values_at_points_when_fuzzy_matching_is_false(self):
sut = makePointTier(
points=[Point(1.3, "55"), Point(3.7, "99"), Point(4.5, "32")],
)
dataList = [
(0.9, 100, 55),
(1.3, 34, 92),
(1.5, 32, 15),
(1.8, 21, 34),
(4.5, 31, 2),
(4.8, 99, 44),
]
self.assertEqual(
[(1.3, 34, 92), (), (4.5, 31, 2)],
sut.getValuesAtPoints(dataList, fuzzyMatching=False),
)
dataList2 = [(0.9, 100), (1.3, 34), (1.5, 32), (1.8, 21)]
self.assertEqual(
[(1.3, 34), (), ()],
sut.getValuesAtPoints(dataList2, fuzzyMatching=False),
)
def test_get_values_at_points_when_fuzzy_matching_is_true(self):
sut = makePointTier(
points=[Point(1.3, "55"), Point(3.7, "99"), Point(4.5, "32")],
)
dataList = [
(0.9, 100, 55),
(1.3, 34, 92),
(1.5, 32, 15),
(1.8, 21, 34),
(4.5, 31, 2),
(4.8, 99, 44),
]
self.assertEqual(
[(1.3, 34, 92), (4.5, 31, 2), (4.5, 31, 2)],
sut.getValuesAtPoints(dataList, fuzzyMatching=True),
)
dataList2 = [(0.9, 100), (1.3, 34), (1.5, 32), (1.8, 21)]
self.assertEqual(
[(1.3, 34), (1.8, 21), (1.8, 21)],
sut.getValuesAtPoints(dataList2, fuzzyMatching=True),
)
def test_insert_point_at_start_of_point_tier(self):
sut = makePointTier(
points=[Point(1.3, "55"), Point(3.7, "99"), Point(4.5, "32")],
)
sut.insertEntry(Point(0.5, "21"))
self.assertEqual(
[Point(0.5, "21"), Point(1.3, "55"), Point(3.7, "99"), Point(4.5, "32")],
sut.entryList,
)
def test_insert_point_at_middle_of_point_tier(self):
sut = makePointTier(
points=[Point(1.3, "55"), Point(3.7, "99"), Point(4.5, "32")],
)
sut.insertEntry(Point(3.9, "21"))
self.assertEqual(
[Point(1.3, "55"), Point(3.7, "99"), Point(3.9, "21"), Point(4.5, "32")],
sut.entryList,
)
def test_insert_entry_works_with_points_tuples_or_lists(self):
sut = makePointTier(
points=[Point(1.3, "55"), Point(3.7, "99"), Point(4.5, "32")],
)
sut.insertEntry(Point(3.9, "21"))
sut.insertEntry((4.0, "23"))
sut.insertEntry((4.1, "99"))
self.assertEqual(
[
Point(1.3, "55"),
Point(3.7, "99"),
Point(3.9, "21"),
Point(4.0, "23"),
Point(4.1, "99"),
Point(4.5, "32"),
],
sut.entryList,
)
def test_insert_point_at_end_of_point_tier(self):
sut = makePointTier(
points=[Point(1.3, "55"), Point(3.7, "99"), Point(4.5, "32")],
)
sut.insertEntry(Point(4.9, "21"))
self.assertEqual(
[Point(1.3, "55"), Point(3.7, "99"), Point(4.5, "32"), Point(4.9, "21")],
sut.entryList,
)
def test_insert_point_when_collision_occurs(self):
sut = makePointTier(
points=[Point(1.3, "55"), Point(3.7, "99"), Point(4.5, "32")],
)
with self.assertRaises(errors.CollisionError) as _:
sut.insertEntry(
Point(3.7, "hello"),
constants.ErrorReportingMode.ERROR,
constants.ErrorReportingMode.SILENCE,
)
def test_insert_point_when_collision_occurs_and_merge(self):
sut = makePointTier(
points=[Point(1.3, "55"), Point(3.7, "99"), Point(4.5, "32")],
)
sut.insertEntry(
Point(3.7, "hello"),
constants.IntervalCollision.MERGE,
constants.ErrorReportingMode.SILENCE,
)
self.assertEqual(
[Point(1.3, "55"), Point(3.7, "99-hello"), Point(4.5, "32")],
sut.entryList,
)
def test_insert_point_when_collision_occurs_and_replace(self):
sut = makePointTier(
points=[Point(1.3, "55"), Point(3.7, "99"), Point(4.5, "32")],
)
sut.insertEntry(
Point(3.7, "hello"),
constants.IntervalCollision.REPLACE,
constants.ErrorReportingMode.SILENCE,
)
self.assertEqual(
[Point(1.3, "55"), Point(3.7, "hello"), Point(4.5, "32")],
sut.entryList,
)
def test_edit_timestamps_throws_error_if_reporting_mode_is_invalid(self):
sut = makePointTier(
points=[Point(1.3, "55"), Point(3.7, "99"), Point(4.5, "32")],
)
with self.assertRaises(errors.WrongOption) as _:
sut.editTimestamps(
2.0,
"cats",
)
def test_edit_timestamps_can_make_points_appear_later(self):
originalPointTier = makePointTier(
points=[Point(1.3, "55"), Point(3.7, "99"), Point(4.5, "32")],
)
sut = originalPointTier.editTimestamps(0.4)
expectedPointTier = makePointTier(
points=[Point(1.7, "55"), Point(4.1, "99"), Point(4.9, "32")],
)
self.assertEqual(expectedPointTier, sut)
def test_edit_timestamps_can_make_points_appear_earlier(self):
originalPointTier = makePointTier(
points=[Point(1.3, "55"), Point(3.7, "99"), Point(4.5, "32")],
)
sut = originalPointTier.editTimestamps(-0.4)
expectedPointTier = makePointTier(
points=[Point(0.9, "55"), Point(3.3, "99"), Point(4.1, "32")],
)
self.assertEqual(expectedPointTier, sut)
def test_edit_timestamp_can_raise_exception_when_reporting_mode_is_silence(self):
sut = makePointTier(
points=[Point(1.3, "55"), Point(3.7, "99"), Point(4.5, "32")],
minT=0,
maxT=5,
)
with self.assertRaises(errors.OutOfBounds) as _:
sut.editTimestamps(
-1.4,
constants.ErrorReportingMode.ERROR,
)
with self.assertRaises(errors.OutOfBounds) as _:
sut.editTimestamps(
1.4,
constants.ErrorReportingMode.ERROR,
)
def test_edit_timestamp_can_exceed_maxtimestamp_when_reporting_mode_is_silence(
self,
):
originalPointTier = makePointTier(
points=[Point(1.3, "55"), Point(3.7, "99"), Point(4.5, "32")],
maxT=5,
)
sut = originalPointTier.editTimestamps(
1.4, constants.ErrorReportingMode.SILENCE
)
expectedPointTier = makePointTier(
points=[Point(2.7, "55"), Point(5.1, "99"), Point(5.9, "32")],
maxT=5.9,
)
self.assertEqual(expectedPointTier, sut)
def test_edit_timestamp_drops_points_that_are_moved_before_zero(self):
originalPointTier = makePointTier(
points=[Point(1.3, "55"), Point(3.7, "99"), Point(4.5, "32")],
)
sut = originalPointTier.editTimestamps(
-1.4, constants.ErrorReportingMode.SILENCE
)
expectedPointTier = makePointTier(
points=[Point(2.3, "99"), Point(3.1, "32")],
)
self.assertEqual(expectedPointTier, sut)
def test_insert_space(self):
originalPointTier = makePointTier(
points=[Point(1.3, "55"), Point(3.7, "99"), Point(4.5, "32")],
maxT=5,
)
sut = originalPointTier.insertSpace(2.0, 1.1)
predictedPointTier = makePointTier(
points=[Point(1.3, "55"), Point(4.8, "99"), Point(5.6, "32")],
maxT=6.1,
)
self.assertEqual(predictedPointTier, sut)
def test_validate_throws_error_if_points_are_not_in_sequence(self):
sut = makePointTier(
points=[Point(1.3, "55"), Point(3.7, "99"), Point(4.5, "32")],
)
self.assertTrue(sut.validate())
sut.entryList.append(Point(3.9, "21"))
self.assertFalse(sut.validate(constants.ErrorReportingMode.SILENCE))
with self.assertRaises(errors.TextgridStateError) as _:
sut.validate(constants.ErrorReportingMode.ERROR)
def test_validate_throws_error_if_points_are_less_than_minimum_time(self):
sut = makePointTier(
points=[Point(1.3, "55"), Point(3.7, "99"), Point(4.5, "32")],
minT=0,
)
self.assertTrue(sut.validate())
sut.minTimestamp = 2.0
self.assertFalse(sut.validate(constants.ErrorReportingMode.SILENCE))
with self.assertRaises(errors.OutOfBounds) as _:
sut.validate(constants.ErrorReportingMode.ERROR)
def test_validate_throws_error_if_points_are_more_than_maximum_time(self):
sut = makePointTier(
[Point(1.3, "55"), Point(3.7, "99"), Point(4.5, "32")],
maxT=5,
)
self.assertTrue(sut.validate())
sut.maxTimestamp = 3.0
self.assertFalse(sut.validate(constants.ErrorReportingMode.SILENCE))
with self.assertRaises(errors.OutOfBounds) as _:
sut.validate(constants.ErrorReportingMode.ERROR)
if __name__ == "__main__":
unittest.main()
|
ch08/birds.py | ricjuanflores/practice-of-the-python | 319 | 11110226 | class Bird:
def fly(self):
print('flying!')
class Hummingbird(Bird):
def fly(self):
print('zzzzzooommm!')
class Penguin(Bird):
def fly(self):
print('no can do.')
|
tensorflow_model_analysis/notebook/jupyter/renderer.py | rtg0795/model-analysis | 1,118 | 11110257 | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Jupyter renderer API."""
import tensorflow_model_analysis.notebook.jupyter.tfma_widget as tfma_widget
def render_slicing_metrics(data, config, event_handlers=None):
"""Renders the slicing metrics view in Jupyter.
Args:
data: A list of dictionary containing metrics for correpsonding slices.
config: A dictionary of the configuration.
event_handlers: A dictionary of where keys are event types and values are
event handlers.
Returns:
A SlicingMetricsViewer.
"""
view = tfma_widget.SlicingMetricsViewer()
view.data = data
view.config = config
view.event_handlers = event_handlers
return view
def render_time_series(data, config):
"""Renders the time series view in Jupyter.
Args:
data: A list of dictionary containing metrics for different evaluation runs.
config: A dictionary of the configuration.
Returns:
A TimeSeriesViewer.
"""
view = tfma_widget.TimeSeriesViewer()
view.data = data
view.config = config
return view
def render_plot(data, config):
"""Renders the plot view in Jupyter.
Args:
data: A dictionary containing plot data.
config: A dictionary of the configuration.
Returns:
A PlotViewer.
"""
view = tfma_widget.PlotViewer()
view.data = data
view.config = config
return view
|
tests/opytimizer/optimizers/evolutionary/test_es.py | anukaal/opytimizer | 528 | 11110276 | <gh_stars>100-1000
import numpy as np
from opytimizer.optimizers.evolutionary import es
from opytimizer.spaces import search
def test_es_params():
params = {
'child_ratio': 0.5
}
new_es = es.ES(params=params)
assert new_es.child_ratio == 0.5
def test_es_params_setter():
new_es = es.ES()
try:
new_es.child_ratio = 'a'
except:
new_es.child_ratio = 0.5
try:
new_es.child_ratio = -1
except:
new_es.child_ratio = 0.5
assert new_es.child_ratio == 0.5
def test_es_compile():
search_space = search.SearchSpace(n_agents=10, n_variables=2,
lower_bound=[0, 0], upper_bound=[10, 10])
new_es = es.ES()
new_es.compile(search_space)
try:
new_es.n_children = 'a'
except:
new_es.n_children = 0
assert new_es.n_children == 0
try:
new_es.n_children = -1
except:
new_es.n_children = 0
assert new_es.n_children == 0
try:
new_es.strategy = 1
except:
new_es.strategy = np.array([1])
assert new_es.strategy == np.array([1])
def test_es_mutate_parent():
def square(x):
return np.sum(x**2)
search_space = search.SearchSpace(n_agents=10, n_variables=2,
lower_bound=[0, 0], upper_bound=[10, 10])
new_es = es.ES()
new_es.compile(search_space)
agent = new_es._mutate_parent(search_space.agents[0], 0, square)
assert agent.position[0][0] > 0
def test_es_update_strategy():
search_space = search.SearchSpace(n_agents=10, n_variables=2,
lower_bound=[0, 0], upper_bound=[10, 10])
new_es = es.ES()
new_es.compile(search_space)
new_es._update_strategy(0)
assert new_es.strategy[0][0] > 0
def test_es_update():
def square(x):
return np.sum(x**2)
search_space = search.SearchSpace(n_agents=10, n_variables=2,
lower_bound=[0, 0], upper_bound=[10, 10])
new_es = es.ES()
new_es.compile(search_space)
new_es.update(search_space, square)
|
caffe2/python/operator_test/mul_gradient_benchmark.py | raven38/pytorch | 206 | 11110282 |
import argparse
import numpy as np
from caffe2.python import core, workspace
def benchmark_mul_gradient(args):
workspace.FeedBlob("dC", np.random.rand(args.m, args.n).astype(np.float32))
workspace.FeedBlob("A", np.random.rand(args.m, args.n).astype(np.float32))
workspace.FeedBlob("B", np.random.rand(args.m).astype(np.float32))
net = core.Net("mynet")
net.MulGradient(["dC", "A", "B"], ["dA", "dB"], broadcast=True, axis=0)
workspace.CreateNet(net)
workspace.BenchmarkNet(net.Name(), 1, args.iteration, True)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="benchmark for MulGradient.")
parser.add_argument(
'-m', type=int, default=9508,
help="The number of rows of A")
parser.add_argument(
"-n", type=int, default=80,
help="The number of columns of A")
parser.add_argument(
'-i', "--iteration", type=int, default=100,
help="The number of iterations.")
args, extra_args = parser.parse_known_args()
core.GlobalInit(['python'] + extra_args)
benchmark_mul_gradient(args)
|
scripts/measure_classifier_score.py | BrandoZhang/alis | 176 | 11110332 | import os
import argparse
import random
import pickle
from shutil import copyfile
from typing import Optional, Callable, List
from torchvision.datasets import VisionDataset
from torchvision.datasets.folder import pil_loader
import numpy as np
from PIL import Image
import torchvision.transforms.functional as TVF
from torchvision import transforms
import torch
from torchvision.models import wide_resnet50_2
from torch.utils.data import DataLoader
from tqdm import tqdm
import torch.nn.functional as F
class ImagePathsDataset(VisionDataset):
def __init__(self, img_paths: List[os.PathLike], transform: Callable):
self.transform = transform
self.imgs_paths = img_paths
def __len__(self):
return len(self.imgs_paths) * 2
def __getitem__(self, idx: int):
image = pil_loader(self.imgs_paths[idx // 2])
image = self.transform(image)
w = image.shape[2]
if idx % 2 == 0:
half = image[:, :, :w//2]
y = 0
else:
half = image[:, :, w//2:]
y = 1
return {"img": half, "label": y}
@torch.no_grad()
def validate(model, dataloader):
model.eval()
accs = []
losses = []
for batch in dataloader:
img, label = batch['img'].to(device), batch['label'].to(device)
preds = model(img).squeeze(1)
loss = F.binary_cross_entropy_with_logits(preds, label.float())
acc = ((preds.sigmoid() > 0.5).long() == label).float().mean()
losses.append(loss.item())
accs.append(acc.item())
return np.mean(losses), np.mean(accs)
if __name__ == '__main__':
random.seed(42)
np.random.seed(42)
torch.manual_seed(42)
torch.cuda.manual_seed_all(42)
parser = argparse.ArgumentParser()
parser.add_argument('data_dir', type=str, help='Path to the dataset directory')
parser.add_argument('--train_ratio', type=float, default=0.15, help='Amount of training images')
parser.add_argument('--val_ratio', type=float, default=0.05, help='Amount of training images')
parser.add_argument('--num_epochs', type=int, default=10, help='Number of training epochs')
parser.add_argument('--batch_size', type=int, default=64, help='Batch size for training/inference')
args = parser.parse_args()
data_dir = args.data_dir
all_img_names = [f for f in os.listdir(data_dir) if os.path.splitext(f)[1].lower() in Image.EXTENSION]
random.shuffle(all_img_names)
NUM_TRAIN_IMGS = int(args.train_ratio * len(all_img_names))
NUM_VAL_IMGS = int(args.val_ratio * len(all_img_names))
img_paths_train = [os.path.join(data_dir, f) for f in all_img_names[:NUM_TRAIN_IMGS]]
img_paths_val = [os.path.join(data_dir, f) for f in all_img_names[NUM_TRAIN_IMGS:NUM_TRAIN_IMGS+NUM_VAL_IMGS]]
model = wide_resnet50_2(pretrained=True)
model.fc = torch.nn.Linear(2048, 1)
optim = torch.optim.Adam([
{'params': [p for n, p in model.named_parameters() if not n.startswith('fc.')], 'lr': 1e-5},
{'params': model.fc.parameters(), 'lr': 1e-4},
])
transform_train = transforms.Compose([
transforms.Resize(256, interpolation=Image.LANCZOS),
transforms.RandomHorizontalFlip(0.5),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
])
transform_val = transforms.Compose([
transforms.Resize(256, interpolation=Image.LANCZOS),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
])
dataset_train = ImagePathsDataset(img_paths_train, transform=transform_train)
dataset_val = ImagePathsDataset(img_paths_val, transform=transform_val)
batch_size = args.batch_size
dataloader_train = DataLoader(dataset_train, batch_size=batch_size, shuffle=True, num_workers=5)
dataloader_val = DataLoader(dataset_val, batch_size=batch_size, shuffle=False, num_workers=5)
device = 'cuda'
model = model.to(device)
total_num_epochs = args.num_epochs
for epoch in range(total_num_epochs):
pbar = tqdm(enumerate(dataloader_train), total=len(dataloader_train))
for i, batch in pbar:
model.train()
img, label = batch['img'].to(device), batch['label'].to(device)
preds = model(img).squeeze(1)
loss = F.binary_cross_entropy_with_logits(preds, label.float())
acc = ((preds.sigmoid() > 0.5).long() == label).float().mean()
optim.zero_grad()
loss.backward()
optim.step()
pbar.set_description(f'Epoch {epoch}. Loss: {loss.detach().item():.03f}. Acc: {acc.cpu().item():.03f}')
val_loss, val_acc = validate(model, dataloader_val)
print(f'Val loss: {val_loss:.03f}. Val acc: {val_acc: .03f}')
### Testing ###
img_paths_test = [os.path.join(data_dir, f) for f in all_img_names[NUM_TRAIN_IMGS+NUM_VAL_IMGS:]]
transform_test = transforms.Compose([
transforms.Resize(256, interpolation=Image.LANCZOS),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
])
dataset_test = ImagePathsDataset(img_paths_test, transform=transform_test)
dataloader_test = DataLoader(dataset_test, batch_size=args.batch_size, shuffle=False, num_workers=10)
scores = []
model.eval()
with torch.no_grad():
for batch in tqdm(dataloader_test):
img = batch['img'].to(device)
preds = model(img).sigmoid()
# We compute the scores as the maximum between left and right
# Because even if one side is easily predictable, then it will be very
# difficult to connect/continue it
curr_scores = preds.view(-1, 2).max(dim=1)[0]
scores.extend(curr_scores.cpu().tolist())
assert len(scores) == len(img_paths_test)
print(f'[{data_dir}] Average score on the test set:', np.mean(scores))
save_dir = f'{data_dir}_spatial_inv'
dataset_name = os.path.basename(data_dir)
os.makedirs(save_dir, exist_ok=True)
### Preprocessing data and saving ###
with open(f'{save_dir}/{dataset_name}_scores.pkl', 'wb') as f:
pickle.dump(scores, f)
for threshold in [0.5, 0.7, 0.95, 0.99]:
final_img_paths = np.array(img_paths_test)[np.array(scores) < threshold].tolist()
target_dir = f'{save_dir}/{dataset_name}_t_{threshold}'
os.makedirs(target_dir, exist_ok=True)
for src_img_path in tqdm(final_img_paths):
trg_img_path = os.path.join(target_dir, os.path.basename(src_img_path))
copyfile(src_img_path, trg_img_path)
|
src/Testing/utils.py | CMYanko/Zope | 289 | 11110363 | <reponame>CMYanko/Zope
##############################################################################
#
# Copyright (c) 2002 Zope Foundation and Contributors.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
##############################################################################
""" Some utility functions for unit tests
"""
import contextlib
import sys
@contextlib.contextmanager
def capture_stdout(file):
old_out = sys.stdout
sys.stdout = file
yield
sys.stdout = old_out
|
gratipay/models/participant/packages.py | kant/gratipay.com | 517 | 11110382 | <gh_stars>100-1000
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
from gratipay.exceptions import NoPackages
class Packages(object):
def get_packages_for_claiming(self, manager):
"""Return a list of packages on the named ``manager`` for which the
participant has verified an email address on Gratipay, along with the
current package owner on Gratipay (if any).
:param string manager: the name of the package manager on which to look
for potential packages
:return: a list of (:py:class:`~gratipay.models.package.Package`,
:py:class:`Participant`, is_primary, email_address) tuples, where the
participant is the one who has already claimed the package (or ``None``),
and the email address is the single best match (primary, then
alphabetically first from among non-primary verified addresses)
"""
return self.db.all('''
WITH verified_email_addresses AS (
SELECT e.address
, e.address = p.email_address is_primary
FROM email_addresses e
LEFT JOIN participants p
ON p.id = e.participant_id
WHERE e.participant_id=%s
AND e.verified is true
)
SELECT pkg.*::packages package
, p.*::participants claimed_by
, (SELECT is_primary
FROM verified_email_addresses
WHERE address = ANY(emails)
ORDER BY is_primary DESC, address
LIMIT 1) email_address_is_primary
, (SELECT address
FROM verified_email_addresses
WHERE address = ANY(emails)
ORDER BY is_primary DESC, address
LIMIT 1) email_address
FROM packages pkg
LEFT JOIN teams_to_packages tp
ON pkg.id = tp.package_id
LEFT JOIN teams t
ON t.id = tp.team_id
LEFT JOIN participants p
ON t.owner = p.username
WHERE package_manager=%s
AND pkg.emails && array(SELECT address FROM verified_email_addresses)
ORDER BY email_address_is_primary DESC
, email_address ASC
, pkg.name ASC
''', (self.id, manager))
def start_package_claims(self, c, nonce, *packages):
"""Takes a cursor, nonce and list of packages, inserts into ``claims``
and returns ``None`` (or raise :py:exc:`NoPackages`).
"""
if not packages:
raise NoPackages()
# We want to make a single db call to insert all claims, so we need to
# do a little SQL construction. Do it in such a way that we still avoid
# Python string interpolation (~= SQLi vector).
extra_sql, values = [], []
for p in packages:
extra_sql.append('(%s, %s)')
values += [nonce, p.id]
c.run('INSERT INTO claims (nonce, package_id) VALUES' + ', '.join(extra_sql), values)
self.app.add_event( c
, 'participant'
, dict( id=self.id
, action='start-claim'
, values=dict(package_ids=[p.id for p in packages])
)
)
def get_packages_claiming(self, cursor, nonce):
"""Given a nonce, return :py:class:`~gratipay.models.package.Package`
objects associated with it.
"""
return cursor.all("""
SELECT p.*::packages
FROM packages p
JOIN claims c
ON p.id = c.package_id
WHERE c.nonce=%s
ORDER BY p.name ASC
""", (nonce,))
def finish_package_claims(self, cursor, nonce, *packages):
"""Create teams if needed and associate them with the packages.
"""
if not packages:
raise NoPackages()
package_ids, teams, team_ids = [], [], []
for package in packages:
package_ids.append(package.id)
team = package.get_or_create_linked_team(cursor, self)
teams.append(team)
team_ids.append(team.id)
review_url = self.app.project_review_process.start(*teams)
cursor.run('DELETE FROM claims WHERE nonce=%s', (nonce,))
cursor.run('UPDATE teams SET review_url=%s WHERE id=ANY(%s)', (review_url, team_ids,))
self.app.add_event( cursor
, 'participant'
, dict( id=self.id
, action='finish-claim'
, values=dict(package_ids=package_ids)
)
)
|
nmtpytorch/vocabulary.py | tejas1995/nmtpytorch | 420 | 11110414 | <reponame>tejas1995/nmtpytorch<filename>nmtpytorch/vocabulary.py<gh_stars>100-1000
# -*- coding: utf-8 -*-
import json
import pathlib
import logging
from collections import OrderedDict
logger = logging.getLogger('nmtpytorch')
class Vocabulary:
r"""Vocabulary class for integer<->token mapping.
Arguments:
fname (str): The filename of the JSON vocabulary file created by
`nmtpy-build-vocab` script.
short_list (int, optional): If > 0, only the most frequent `short_list`
items are kept in the vocabulary.
Attributes:
vocab (pathlib.Path): A :class:`pathlib.Path` instance holding the
filepath of the vocabulary file.
short_list (int): Short-list threshold.
freqs (dict): A dictionary which maps vocabulary strings to their
normalized frequency across the training set.
counts (dict): A dictionary which maps vocabulary strings to their
occurrence counts across the training set.
n_tokens (int): The final number of elements in the vocabulary.
has_bos (bool): `True` if the vocabulary has <bos> token.
has_eos (bool): `True` if the vocabulary has <eos> token.
has_pad (bool): `True` if the vocabulary has <pad> token.
has_unk (bool): `True` if the vocabulary has <unk> token.
Note:
The final instance can be easily queried in both directions with
bracket notation using integers and strings.
Example:
>>> vocab = Vocabulary('train.vocab.en')
>>> vocab['woman']
23
>>> vocab[23]
'woman'
Returns:
A :class:`Vocabulary` instance.
"""
TOKENS = {"<pad>": 0, "<bos>": 1, "<eos>": 2, "<unk>": 3}
def __init__(self, fname, short_list=0):
self.vocab = pathlib.Path(fname).expanduser()
self.short_list = short_list
self._map = None
self._imap = None
self.freqs = None
self.counts = None
self._allmap = None
self.n_tokens = None
# Load file
with open(self.vocab) as f:
data = json.load(f)
if self.short_list > 0:
# Get a slice of most frequent `short_list` items
data = dict(list(data.items())[:self.short_list])
self._map = {k: int(v.split()[0]) for k, v in data.items()}
self.counts = {k: int(v.split()[1]) for k, v in data.items()}
total_count = sum(self.counts.values())
self.freqs = {k: v / total_count for k, v in self.counts.items()}
# Sanity check for placeholder tokens
for tok, idx in self.TOKENS.items():
if self._map.get(tok, -1) != idx:
logger.info(f'{tok} not found in {self.vocab.name!r}')
setattr(self, f'has_{tok[1:-1]}', False)
else:
setattr(self, f'has_{tok[1:-1]}', True)
# Set # of tokens
self.n_tokens = len(self._map)
# Invert dictionary
self._imap = OrderedDict([(v, k) for k, v in self._map.items()])
# Merge forward and backward lookups into single dict for convenience
self._allmap = OrderedDict()
self._allmap.update(self._map)
self._allmap.update(self._imap)
assert len(self._allmap) == (len(self._map) + len(self._imap)), \
"Merged vocabulary size is not equal to sum of both."
def sent_to_idxs(self, line, explicit_bos=False, explicit_eos=True):
"""Convert from list of strings to list of token indices."""
tidxs = []
if explicit_bos and self.has_bos:
tidxs.append(self.TOKENS["<bos>"])
if self.has_unk:
for tok in line.split():
tidxs.append(self._map.get(tok, self.TOKENS["<unk>"]))
else:
# Remove unknown tokens from the words
for tok in line.split():
try:
tidxs.append(self._map[tok])
except KeyError as _:
# make this verbose and repetitive as this should be
# used cautiously only for some specific models
logger.info('No <unk> token, removing word from sentence')
if explicit_eos and self.has_eos:
tidxs.append(self.TOKENS["<eos>"])
return tidxs
def idxs_to_sent(self, idxs, debug=False):
r"""Converts list of integers to string representation.
Arguments:
idxs (list): Python list of integers as previously mapped from
string tokens by this instance.
debug (bool, optional): If `True`, the string representation
will go beyond and include the end-of-sentence token as well.
Returns:
A whitespace separated string representing the given list of integers.
"""
result = []
for idx in idxs:
if not debug and self.has_eos and idx == self.TOKENS["<eos>"]:
break
result.append(self._imap.get(idx, self.TOKENS["<unk>"]))
return " ".join(result)
def list_of_idxs_to_sents(self, lidxs):
r"""Converts list of list of integers to string representations. This is
handy for batched conversion after beam search for example.
Arguments:
lidxs(list): A list containing multiple lists of integers as
previously mapped from string tokens by this instance.
Returns:
A list of whitespace separated strings representing the given input.
"""
results = []
unk = self.TOKENS["<unk>"]
for idxs in lidxs:
result = []
for idx in idxs:
if idx == self.TOKENS["<eos>"]:
break
result.append(self._imap.get(idx, unk))
results.append(" ".join(result))
return results
def __getitem__(self, key):
return self._allmap[key]
def __len__(self):
return len(self._map)
def __repr__(self):
return f"Vocabulary of {self.n_tokens} items ({self.vocab.name!r})"
|
exercises/zh/test_01_08_01.py | Jette16/spacy-course | 2,085 | 11110416 | def test():
assert (
"token_text = token.text" in __solution__
), "你有正确拿到词符的文本吗?"
assert (
"token_pos = token.pos_" in __solution__
), "你有正确拿到词符的词性标注了吗?记着要用带下划线的属性。"
assert (
"token_dep = token.dep_" in __solution__
), "你有正确拿到词符的依存关系标签了吗?记着要用带下划线的属性。"
__msg__.good("完美!")
|
deprecated_gallery/ext2.py | guyongqiangx/construct | 629 | 11110487 | <filename>deprecated_gallery/ext2.py
"""
Extension 2 (ext2) used in Linux systems
"""
from construct import *
Char = SLInt8
UChar = ULInt8
Short = SLInt16
UShort = ULInt16
Long = SLInt32
ULong = ULInt32
BlockPointer = Struct(
"block_number" / ULong,
# WARNING: unnamed field?
OnDemandPointer(this.block_number),
)
superblock = Struct(
"inodes_count" / ULong,
"blocks_count" / ULong,
"reserved_blocks_count" / ULong,
"free_blocks_count" / ULong,
"free_inodes_count" / ULong,
"first_data_block" / ULong,
"log_block_size" / Enum(ULong,
OneKB = 0,
TwoKB = 1,
FourKB = 2,
),
"log_frag_size" / Long,
"blocks_per_group" / ULong,
"frags_per_group" / ULong,
"inodes_per_group" / ULong,
"mtime" / ULong,
"wtime" / ULong,
"mnt_count" / UShort,
"max_mnt_count" / Short,
"magic" / Const(UShort, 0xEF53),
"state" / UShort,
"errors" / UShort,
Padding(2),
"lastcheck" / ULong,
"checkinterval" / ULong,
"creator_os" / ULong,
"rev_level" / ULong,
Padding(235*4),
)
group_descriptor = Struct(
"block_bitmap" / ULong,
"inode_bitmap" / ULong,
"inode_table" / ULong,
"free_blocks_count" / UShort,
"free_inodes_count" / UShort,
"used_dirs_count" / UShort,
Padding(14),
)
inode = Struct(
"mode" / FlagsEnum(UShort,
IXOTH = 0x0001,
IWOTH = 0x0002,
IROTH = 0x0004,
IRWXO = 0x0007,
IXGRP = 0x0008,
IWGRP = 0x0010,
IRGRP = 0x0020,
IRWXG = 0x0038,
IXUSR = 0x0040,
IWUSR = 0x0080,
IRUSR = 0x0100,
IRWXU = 0x01C0,
ISVTX = 0x0200,
ISGID = 0x0400,
ISUID = 0x0800,
IFIFO = 0x1000,
IFCHR = 0x2000,
IFDIR = 0x4000,
IFBLK = 0x6000,
IFREG = 0x8000,
IFLNK = 0xC000,
IFSOCK = 0xA000,
IFMT = 0xF000,
),
"uid" / UShort,
"size" / ULong,
"atime" / ULong,
"ctime" / ULong,
"mtime" / ULong,
"dtime" / ULong,
"gid" / UShort,
"links_count" / UShort,
"blocks" / ULong,
"flags" / FlagsEnum(ULong,
SecureDelete = 0x0001,
AllowUndelete = 0x0002,
Compressed = 0x0004,
Synchronous = 0x0008,
),
Padding(4),
# WARNING: doubled name
"blocks" / ULong[12],
"indirect1_block" / ULong,
"indirect2_block" / ULong,
"indirect3_block" / ULong,
"version" / ULong,
"file_acl" / ULong,
"dir_acl" / ULong,
"faddr" / ULong,
"frag" / UChar,
"fsize" / UChar,
Padding(10),
)
# special inodes
EXT2_BAD_INO = 1
EXT2_ROOT_INO = 2
EXT2_ACL_IDX_INO = 3
EXT2_ACL_DATA_INO = 4
EXT2_BOOT_LOADER_INO = 5
EXT2_UNDEL_DIR_INO = 6
EXT2_FIRST_INO = 11
directory_record = Struct(
"inode" / ULong,
"rec_length" / UShort,
"name_length" / UShort,
"name" / Bytes(this.name_length),
Padding(this.rec_length - this.name_length),
)
|
lldb/packages/Python/lldbsuite/test/lang/objc/objc-new-syntax/TestObjCNewSyntaxArray.py | medismailben/llvm-project | 305 | 11110488 | """Test that the Objective-C syntax for dictionary/array literals and indexing works"""
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
from ObjCNewSyntaxTest import ObjCNewSyntaxTest
class ObjCNewSyntaxTestCaseArray(ObjCNewSyntaxTest):
@skipUnlessDarwin
@skipIf(macos_version=["<", "10.12"])
@expectedFailureAll(archs=["i[3-6]86"])
def test_read_array(self):
self.runToBreakpoint()
self.expect(
"expr --object-description -- immutable_array[0]",
VARIABLES_DISPLAYED_CORRECTLY,
substrs=["foo"])
self.expect(
"expr --object-description -- mutable_array[0]",
VARIABLES_DISPLAYED_CORRECTLY,
substrs=["foo"])
@skipUnlessDarwin
@skipIf(macos_version=["<", "10.12"])
@expectedFailureAll(archs=["i[3-6]86"])
def test_update_array(self):
self.runToBreakpoint()
self.expect(
"expr --object-description -- mutable_array[0] = @\"bar\"",
VARIABLES_DISPLAYED_CORRECTLY,
substrs=["bar"])
self.expect(
"expr --object-description -- mutable_array[0]",
VARIABLES_DISPLAYED_CORRECTLY,
substrs=["bar"])
@skipUnlessDarwin
@skipIf(macos_version=["<", "10.12"])
@expectedFailureAll(archs=["i[3-6]86"])
def test_array_literal(self):
self.runToBreakpoint()
self.expect(
"expr --object-description -- @[ @\"foo\", @\"bar\" ]",
VARIABLES_DISPLAYED_CORRECTLY,
substrs=[
"NSArray",
"foo",
"bar"])
|
tfne/encodings/codeepneat/modules/codeepneat_module_densedropout.py | githealthy18/Tensorflow-Neuroevolution | 121 | 11110559 | from __future__ import annotations
import math
import random
import statistics
import numpy as np
import tensorflow as tf
from .codeepneat_module_base import CoDeepNEATModuleBase
from tfne.helper_functions import round_with_step
class CoDeepNEATModuleDenseDropout(CoDeepNEATModuleBase):
"""
TFNE CoDeepNEAT module encapsulating a Dense layer followed by an optional Dropout layer. No Downsampling layer
defined.
"""
def __init__(self,
config_params,
module_id,
parent_mutation,
dtype,
merge_method=None,
units=None,
activation=None,
kernel_init=None,
bias_init=None,
dropout_flag=None,
dropout_rate=None,
self_initialization_flag=False):
"""
Create module by storing supplied parameters. If self initialization flag is supplied, randomly initialize the
module parameters based on the range of parameters allowed by config_params
@param config_params: dict of the module parameter range supplied via config
@param module_id: int of unique module ID
@param parent_mutation: dict summarizing the mutation of the parent module
@param dtype: string of deserializable TF dtype
@param merge_method: dict representing a TF deserializable merge layer
@param units: see TF documentation
@param activation: see TF documentation
@param kernel_init: see TF documentation
@param bias_init: see TF documentation
@param dropout_flag: see TF documentation
@param dropout_rate: see TF documentation
@param self_initialization_flag: bool flag indicating if all module parameters should be randomly initialized
"""
# Register the implementation specifics by calling parent class
super().__init__(config_params, module_id, parent_mutation, dtype)
# Register the module parameters
self.merge_method = merge_method
self.units = units
self.activation = activation
self.kernel_init = kernel_init
self.bias_init = bias_init
self.dropout_flag = dropout_flag
self.dropout_rate = dropout_rate
# If self initialization flag is provided, initialize the module parameters as they are currently set to None
if self_initialization_flag:
self._initialize()
def __str__(self) -> str:
"""
@return: string representation of the module
"""
return "CoDeepNEAT DENSE Module | ID: {:>6} | Fitness: {:>6} | Units: {:>4} | Activ: {:>6} | Dropout: {:>4}" \
.format('#' + str(self.module_id),
self.fitness,
self.units,
self.activation,
"None" if self.dropout_flag is False else self.dropout_rate)
def _initialize(self):
"""
Randomly initialize all parameters of the module based on the range of parameters allowed by the config_params
variable.
"""
# Uniform randomly set module parameters
self.merge_method = random.choice(self.config_params['merge_method'])
self.merge_method['config']['dtype'] = self.dtype
random_units = random.randint(self.config_params['units']['min'],
self.config_params['units']['max'])
self.units = round_with_step(random_units,
self.config_params['units']['min'],
self.config_params['units']['max'],
self.config_params['units']['step'])
self.activation = random.choice(self.config_params['activation'])
self.kernel_init = random.choice(self.config_params['kernel_init'])
self.bias_init = random.choice(self.config_params['bias_init'])
self.dropout_flag = random.random() < self.config_params['dropout_flag']
random_dropout_rate = random.uniform(self.config_params['dropout_rate']['min'],
self.config_params['dropout_rate']['max'])
self.dropout_rate = round_with_step(random_dropout_rate,
self.config_params['dropout_rate']['min'],
self.config_params['dropout_rate']['max'],
self.config_params['dropout_rate']['step'])
def create_module_layers(self) -> (tf.keras.layers.Layer, ...):
"""
Instantiate TF layers with their respective configuration that are represented by the current module
configuration. Return the instantiated module layers in their respective order as a tuple.
@return: tuple of instantiated TF layers represented by the module configuration.
"""
# Create the basic keras Dense layer, needed in all variants of the module
dense_layer = tf.keras.layers.Dense(units=self.units,
activation=self.activation,
kernel_initializer=self.kernel_init,
bias_initializer=self.bias_init,
dtype=self.dtype)
# If no dropout flag present, return solely the created dense layer as iterable. If dropout flag present, return
# the dense layer and together with the dropout layer
if not self.dropout_flag:
return (dense_layer,)
else:
dropout_layer = tf.keras.layers.Dropout(rate=self.dropout_rate,
dtype=self.dtype)
return dense_layer, dropout_layer
def create_downsampling_layer(self, in_shape, out_shape) -> tf.keras.layers.Layer:
""""""
raise NotImplementedError("Downsampling has not yet been implemented for DenseDropout Modules")
def create_mutation(self,
offspring_id,
max_degree_of_mutation) -> CoDeepNEATModuleDenseDropout:
"""
Create mutated DenseDropout module and return it. Categorical parameters are chosen randomly from all available
values. Sortable parameters are perturbed through a random normal distribution with the current value as mean
and the config specified stddev
@param offspring_id: int of unique module ID of the offspring
@param max_degree_of_mutation: float between 0 and 1 specifying the maximum degree of mutation
@return: instantiated DenseDropout module with mutated parameters
"""
# Copy the parameters of this parent module for the parameters of the offspring
offspring_params = {'merge_method': self.merge_method,
'units': self.units,
'activation': self.activation,
'kernel_init': self.kernel_init,
'bias_init': self.bias_init,
'dropout_flag': self.dropout_flag,
'dropout_rate': self.dropout_rate}
# Create the dict that keeps track of the mutations occuring for the offspring
parent_mutation = {'parent_id': self.module_id,
'mutation': 'mutation',
'mutated_params': dict()}
# Determine exact integer amount of parameters to be mutated, though minimum is 1
param_mutation_count = math.ceil(max_degree_of_mutation * 7)
# Uniform randomly choose the parameters to be mutated
parameters_to_mutate = random.sample(range(7), k=param_mutation_count)
# Mutate offspring parameters. Categorical parameters are chosen randomly from all available values. Sortable
# parameters are perturbed through a random normal distribution with the current value as mean and the config
# specified stddev
for param_to_mutate in parameters_to_mutate:
if param_to_mutate == 0:
offspring_params['merge_method'] = random.choice(self.config_params['merge_method'])
parent_mutation['mutated_params']['merge_method'] = self.merge_method
elif param_to_mutate == 1:
perturbed_units = int(np.random.normal(loc=self.units,
scale=self.config_params['units']['stddev']))
offspring_params['units'] = round_with_step(perturbed_units,
self.config_params['units']['min'],
self.config_params['units']['max'],
self.config_params['units']['step'])
parent_mutation['mutated_params']['units'] = self.units
elif param_to_mutate == 2:
offspring_params['activation'] = random.choice(self.config_params['activation'])
parent_mutation['mutated_params']['activation'] = self.activation
elif param_to_mutate == 3:
offspring_params['kernel_init'] = random.choice(self.config_params['kernel_init'])
parent_mutation['mutated_params']['kernel_init'] = self.kernel_init
elif param_to_mutate == 4:
offspring_params['bias_init'] = random.choice(self.config_params['bias_init'])
parent_mutation['mutated_params']['bias_init'] = self.bias_init
elif param_to_mutate == 5:
offspring_params['dropout_flag'] = not self.dropout_flag
parent_mutation['mutated_params']['dropout_flag'] = self.dropout_flag
else: # param_to_mutate == 6:
perturbed_dropout_rate = np.random.normal(loc=self.dropout_rate,
scale=self.config_params['dropout_rate']['stddev'])
offspring_params['dropout_rate'] = round_with_step(perturbed_dropout_rate,
self.config_params['dropout_rate']['min'],
self.config_params['dropout_rate']['max'],
self.config_params['dropout_rate']['step'])
parent_mutation['mutated_params']['dropout_rate'] = self.dropout_rate
return CoDeepNEATModuleDenseDropout(config_params=self.config_params,
module_id=offspring_id,
parent_mutation=parent_mutation,
dtype=self.dtype,
**offspring_params)
def create_crossover(self,
offspring_id,
less_fit_module,
max_degree_of_mutation) -> CoDeepNEATModuleDenseDropout:
"""
Create crossed over DenseDropout module and return it. Carry over parameters of fitter parent for categorical
parameters and calculate parameter average between both modules for sortable parameters
@param offspring_id: int of unique module ID of the offspring
@param less_fit_module: second DenseDropout module with lower fitness
@param max_degree_of_mutation: float between 0 and 1 specifying the maximum degree of mutation
@return: instantiated DenseDropout module with crossed over parameters
"""
# Create offspring parameters by carrying over parameters of fitter parent for categorical parameters and
# calculating parameter average between both modules for sortable parameters
offspring_params = dict()
# Create the dict that keeps track of the mutations occuring for the offspring
parent_mutation = {'parent_id': (self.module_id, less_fit_module.get_id()),
'mutation': 'crossover'}
offspring_params['merge_method'] = self.merge_method
offspring_params['units'] = round_with_step(int((self.units + less_fit_module.units) / 2),
self.config_params['units']['min'],
self.config_params['units']['max'],
self.config_params['units']['step'])
offspring_params['activation'] = self.activation
offspring_params['kernel_init'] = self.kernel_init
offspring_params['bias_init'] = self.bias_init
offspring_params['dropout_flag'] = self.dropout_flag
offspring_params['dropout_rate'] = round_with_step((self.dropout_rate + less_fit_module.dropout_rate) / 2,
self.config_params['dropout_rate']['min'],
self.config_params['dropout_rate']['max'],
self.config_params['dropout_rate']['step'])
return CoDeepNEATModuleDenseDropout(config_params=self.config_params,
module_id=offspring_id,
parent_mutation=parent_mutation,
dtype=self.dtype,
**offspring_params)
def serialize(self) -> dict:
"""
@return: serialized constructor variables of the module as json compatible dict
"""
return {
'module_type': self.get_module_type(),
'module_id': self.module_id,
'parent_mutation': self.parent_mutation,
'merge_method': self.merge_method,
'units': self.units,
'activation': self.activation,
'kernel_init': self.kernel_init,
'bias_init': self.bias_init,
'dropout_flag': self.dropout_flag,
'dropout_rate': self.dropout_rate
}
def get_distance(self, other_module) -> float:
"""
Calculate distance between 2 DenseDropout modules by inspecting each parameter, calculating the congruence
between each and eventually averaging the out the congruence. The distance is returned as the average
congruences distance to 1.0. The congruence of continuous parameters is calculated by their relative distance.
The congruence of categorical parameters is either 1.0 in case they are the same or it's 1 divided to the amount
of possible values for that specific parameter. Return the calculated distance.
@param other_module: second DenseDropout module to which the distance has to be calculated
@return: float between 0 and 1. High values indicating difference, low values indicating similarity
"""
congruence_list = list()
if self.merge_method == other_module.merge_method:
congruence_list.append(1.0)
else:
congruence_list.append(1 / len(self.config_params['merge_method']))
if self.units >= other_module.units:
congruence_list.append(other_module.units / self.units)
else:
congruence_list.append(self.units / other_module.units)
if self.activation == other_module.activation:
congruence_list.append(1.0)
else:
congruence_list.append(1 / len(self.config_params['activation']))
if self.kernel_init == other_module.kernel_init:
congruence_list.append(1.0)
else:
congruence_list.append(1 / len(self.config_params['kernel_init']))
if self.bias_init == other_module.bias_init:
congruence_list.append(1.0)
else:
congruence_list.append(1 / len(self.config_params['bias_init']))
congruence_list.append(abs(self.dropout_flag - other_module.dropout_flag))
if self.dropout_rate >= other_module.dropout_rate:
congruence_list.append(other_module.dropout_rate / self.dropout_rate)
else:
congruence_list.append(self.dropout_rate / other_module.dropout_rate)
# Return the distance as the distance of the average congruence to the perfect congruence of 1.0
return round(1.0 - statistics.mean(congruence_list), 4)
def get_module_type(self) -> str:
""""""
return 'DenseDropout'
|
tools/bin/gppylib/test/regress/test_regress_gpssh.py | YangHao666666/hawq | 450 | 11110598 | #!/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os, signal, time, re
import unittest2 as unittest
import psi.process, subprocess
class GpsshTestCase(unittest.TestCase):
# return count of stranded ssh processes
def searchForProcessOrChildren(self):
euid = os.getuid()
count = 0
for p in psi.process.ProcessTable().values():
if p.euid != euid:
continue
if not re.search('ssh', p.command):
continue
if p.ppid != 1:
continue
count += 1
return count
def test00_gpssh_sighup(self):
"""Verify that gppsh handles sighup
and terminates cleanly.
"""
before_count = self.searchForProcessOrChildren()
p = subprocess.Popen("gpssh -h localhost", shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
pid = p.pid
time.sleep(3)
try:
os.kill(int(pid), signal.SIGHUP)
except Exception:
pass
max_attempts = 6
for i in range(max_attempts):
after_count = self.searchForProcessOrChildren()
error_count = after_count - before_count
if error_count:
if (i + 1) == max_attempts:
self.fail("Found %d new stranded gpssh processes after issuing sig HUP" % error_count)
time.sleep(.5)
if __name__ == "__main__":
unittest.main()
|
vumi/transports/tests/helpers.py | seidu626/vumi | 199 | 11110615 | <reponame>seidu626/vumi
from twisted.internet.defer import inlineCallbacks
from zope.interface import implements
from vumi.transports.failures import FailureMessage
from vumi.tests.helpers import (
MessageHelper, WorkerHelper, PersistenceHelper, MessageDispatchHelper,
generate_proxies, IHelper,
)
class TransportHelper(object):
"""
Test helper for transport workers.
This helper construct and wraps several lower-level helpers and provides
higher-level functionality for transport tests.
:param transport_class:
The worker class for the transport being tested.
:param bool use_riak:
Set to ``True`` if the test requires Riak. This is passed to the
underlying :class:`~vumi.tests.helpers.PersistenceHelper`.
:param \**msg_helper_args:
All other keyword params are passed to the underlying
:class:`~vumi.tests.helpers.MessageHelper`.
"""
implements(IHelper)
def __init__(self, transport_class, use_riak=False, **msg_helper_args):
self.transport_class = transport_class
self.persistence_helper = PersistenceHelper(use_riak=use_riak)
self.msg_helper = MessageHelper(**msg_helper_args)
self.transport_name = self.msg_helper.transport_name
self.worker_helper = WorkerHelper(
connector_name=self.transport_name,
status_connector_name="%s.status" % (self.transport_name,))
self.dispatch_helper = MessageDispatchHelper(
self.msg_helper, self.worker_helper)
# Proxy methods from our helpers.
generate_proxies(self, self.msg_helper)
generate_proxies(self, self.worker_helper)
generate_proxies(self, self.dispatch_helper)
generate_proxies(self, self.persistence_helper)
def setup(self):
self.persistence_helper.setup()
self.worker_helper.setup()
@inlineCallbacks
def cleanup(self):
yield self.worker_helper.cleanup()
yield self.persistence_helper.cleanup()
def get_transport(self, config, cls=None, start=True):
"""
Get an instance of a transport class.
:param config: Config dict.
:param cls: The transport class to instantiate.
Defaults to :attr:`transport_class`
:param start: True to start the transport (default), False otherwise.
Some default config values are helpfully provided in the
interests of reducing boilerplate:
* ``transport_name`` defaults to :attr:`self.transport_name`
"""
if cls is None:
cls = self.transport_class
config = self.mk_config(config)
config.setdefault('transport_name', self.transport_name)
return self.get_worker(cls, config, start)
def get_dispatched_failures(self, connector_name=None):
"""
Get failures dispatched by a transport.
:param str connector_name:
Connector name. If ``None``, the default connector name for the
helper instance will be used.
:returns:
A list of :class:`~vumi.transports.failures.FailureMessage`
instances.
"""
return self.get_dispatched(connector_name, 'failures', FailureMessage)
|
data/tracking/sampler/_sampling_algos/stateful/sequential.py | zhangzhengde0225/SwinTrack | 143 | 11110648 | <reponame>zhangzhengde0225/SwinTrack
class SamplingAlgo_SequentialSampling:
def __init__(self, length):
assert length > 0
self.position = -1
self.length_ = length
def __getstate__(self):
return self.position, self.length_
def __setstate__(self, state):
position, length = state
self.position = position
self.length_ = length
def move_next(self):
self.position += 1
return self.position < self.length_
def current(self):
if not 0 <= self.position < self.length_:
raise IndexError
return self.position
def reset(self):
self.position = -1
def length(self):
return self.length_
|
tests/test_modeling_gpt2.py | legacyai/tf-transformers | 116 | 11110666 | <filename>tests/test_modeling_gpt2.py
# coding=utf-8
# Copyright 2021 TF-Transformers Authors.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test GPT2 Models"""
import unittest
import tensorflow as tf
import tempfile
import shutil
from absl import logging
from transformers import GPT2TokenizerFast as Tokenizer
from tf_transformers.text import TextDecoder, TextDecoderSerializable
from tf_transformers.models import GPT2Model as Model
logging.get_absl_logger().name = "gpt2_testing"
MODEL_NAME = 'gpt2'
class ModelTest(unittest.TestCase):
@classmethod
def setUpClass(self):
print("--------------------setUP--------------------------------------")
self.model = Model.from_pretrained(MODEL_NAME)
self.model_ar = Model.from_pretrained(MODEL_NAME, use_auto_regressive=True)
self.tokenizer = Tokenizer.from_pretrained(MODEL_NAME)
# @unittest.skip
def test_tf_conversion(self):
import shutil
try:
shutil.rmtree("/tmp/tf_transformers_cache/{}".format(MODEL_NAME))
except:
pass
_ = Model.from_pretrained(MODEL_NAME, convert_fn_type='tf')
logging.info("Test: TF Conversion. ✅")
# @unittest.skip
def test_pt_conversion(self):
import shutil
try:
shutil.rmtree("/tmp/tf_transformers_cache/{}".format(MODEL_NAME))
except:
pass
_ = Model.from_pretrained(MODEL_NAME, convert_fn_type='pt')
logging.info("Test: PT Conversion. ✅")
# @unittest.skip
def test_auto_regressive(self):
"""Test Text Generation using Non Cache and Cached"""
text = "<NAME> is one of the finest"
inputs_tf = self.tokenizer(text, return_tensors="tf")
inputs = {}
inputs["input_ids"] = inputs_tf["input_ids"]
predictions_non_auto_regressive = []
predictions_prob_non_auto_regressive = []
for i in range(10):
outputs = self.model(inputs)
predicted_ids = tf.cast(tf.expand_dims(tf.argmax(outputs["last_token_logits"], axis=1), 1), tf.int32)
inputs["input_ids"] = tf.concat([inputs["input_ids"], predicted_ids], axis=1)
predictions_non_auto_regressive.append(predicted_ids)
predictions_prob_non_auto_regressive.append(
tf.expand_dims(tf.reduce_max(outputs["last_token_logits"], axis=1), 1)
)
predictions_non_auto_regressive = tf.concat(predictions_non_auto_regressive, axis=1)
predictions_prob_non_auto_regressive = tf.concat(predictions_prob_non_auto_regressive, axis=1)
# -------------------------------------------------------------------------------------------- # noqa
# Cached
inputs_tf = self.tokenizer(text, return_tensors="tf")
inputs = {}
inputs["input_ids"] = inputs_tf["input_ids"]
seq_length = tf.shape(inputs["input_ids"])[1]
batch_size = tf.shape(inputs["input_ids"])[0]
inputs["all_cache_key"] = tf.zeros((12, batch_size, 12, seq_length, 64))
inputs["all_cache_value"] = tf.zeros((12, batch_size, 12, seq_length, 64))
inputs["past_length"] = tf.zeros(shape=(1, batch_size), dtype=tf.int32)
predictions_auto_regressive = []
predictions_prob_auto_regressive = []
past_lengths = []
for i in range(10):
outputs = self.model_ar(inputs)
predicted_ids = tf.cast(tf.expand_dims(tf.argmax(outputs["last_token_logits"], axis=1), 1), tf.int32)
inputs["input_ids"] = predicted_ids
inputs["all_cache_key"] = outputs["all_cache_key"]
inputs["all_cache_value"] = outputs["all_cache_value"]
inputs["past_length"] = outputs["past_length"]
past_lengths.append(inputs["past_length"])
predictions_auto_regressive.append(predicted_ids)
predictions_prob_auto_regressive.append(
tf.expand_dims(tf.reduce_max(outputs["last_token_logits"], axis=1), 1)
)
predictions_auto_regressive = tf.concat(predictions_auto_regressive, axis=1)
predictions_prob_auto_regressive = tf.concat(predictions_prob_auto_regressive, axis=1)
# Assert predictions
tf.debugging.assert_near(predictions_prob_auto_regressive, predictions_prob_non_auto_regressive, rtol=1.0)
tf.debugging.assert_equal(predictions_auto_regressive, predictions_non_auto_regressive)
logging.info("Test: Successful Auto Regressive Encoder. ✅")
# @unittest.skip
def test_auto_regressive_batch(self):
"""Test Batch Text Generation Auto Regressive"""
text = ['<NAME> is one of the finest', 'I love stars because']
# -1 is important
input_ids = tf.ragged.constant(self.tokenizer(text)["input_ids"]).to_tensor(-1)
inputs = {}
inputs["input_ids"] = input_ids
seq_length = tf.shape(inputs["input_ids"])[1]
batch_size = tf.shape(inputs["input_ids"])[0]
inputs["all_cache_key"] = tf.zeros((12, batch_size, 12, seq_length, 64))
inputs["all_cache_value"] = tf.zeros((12, batch_size, 12, seq_length, 64))
inputs["past_length"] = tf.zeros(shape=(1, batch_size), dtype=tf.int32)
predictions_auto_regressive = []
predictions_prob_auto_regressive = []
past_lengths = []
for i in range(10):
outputs = self.model_ar(inputs)
predicted_ids = tf.cast(tf.expand_dims(tf.argmax(outputs["last_token_logits"], axis=1), 1), tf.int32)
if i == 0:
masks = tf.cast(tf.not_equal(input_ids, -1), tf.float32)
masks = tf.reshape(masks, (1, batch_size, 1, seq_length, 1),)
outputs["all_cache_key"] = outputs["all_cache_key"] * masks
outputs["all_cache_value"] = outputs["all_cache_value"] * masks
inputs["input_ids"] = predicted_ids
inputs["all_cache_key"] = outputs["all_cache_key"]
inputs["all_cache_value"] = outputs["all_cache_value"]
inputs["past_length"] = outputs["past_length"]
past_lengths.append(inputs["past_length"])
predictions_auto_regressive.append(predicted_ids)
predictions_prob_auto_regressive.append(
tf.expand_dims(tf.reduce_max(outputs["last_token_logits"], axis=1), 1)
)
predictions_auto_regressive = tf.concat(predictions_auto_regressive, axis=1)
predictions_prob_auto_regressive = tf.concat(predictions_prob_auto_regressive, axis=1)
expected_prediction = [
[1938, 287, 262, 995, 13, 679, 318, 257, 845, 922],
[484, 821, 523, 881, 517, 621, 655, 257, 3491, 13],
]
expected_probs = [
[
-110.00343322753906,
-84.10372161865234,
-60.758541107177734,
-94.87692260742188,
-72.66572570800781,
-124.67924499511719,
-100.1087417602539,
-103.07884216308594,
-108.038330078125,
-108.75567626953125,
],
[
-92.4664535522461,
-122.232177734375,
-114.12687683105469,
-110.21340942382812,
-106.74520111083984,
-108.79459381103516,
-89.76094055175781,
-84.4063720703125,
-102.25302124023438,
-78.72990417480469,
],
]
tf.debugging.assert_equal(predictions_auto_regressive.numpy().tolist(), expected_prediction)
tf.debugging.assert_near(predictions_prob_auto_regressive.numpy().tolist(), expected_probs)
logging.info("Test: Successful Batch Auto Regressive Encoder. ✅")
# @unittest.skip
def test_auto_regressive_saved_model_greedy(self):
"""Test Auto Regressive using Decoder Saved Model"""
text = ['<NAME> is one of the finest', 'I love stars because']
dirpath = tempfile.mkdtemp()
saved_model_dir = "{}/model_pb".format(dirpath)
self.model_ar.save_as_serialize_module(saved_model_dir, overwrite=True)
# Load saved model .
loaded = tf.saved_model.load(saved_model_dir)
decoder = TextDecoder(model=loaded)
# Pad -1
input_ids = tf.ragged.constant(self.tokenizer(text)["input_ids"]).to_tensor(-1)
inputs = {}
inputs["input_ids"] = input_ids
decoder_results = decoder.decode(inputs, mode="greedy", max_iterations=10, eos_id=-100)
predicted_ids = decoder_results["predicted_ids"].numpy().tolist()
expected_ids = [
[1938, 287, 262, 995, 13, 679, 318, 257, 845, 922],
[484, 821, 523, 881, 517, 621, 655, 257, 3491, 13],
]
tf.debugging.assert_equal(predicted_ids, expected_ids)
shutil.rmtree(dirpath)
logging.info("Test: Successful Auto Regressive Saved Model Greedy. ✅")
# @unittest.skip
def test_auto_regressive_keras_model_greedy(self):
"""Test Auto Regressive using Decoder Keras Model"""
text = ['<NAME> is one of the finest', 'I love stars because']
decoder = TextDecoder(model=self.model_ar)
# Pad -1
input_ids = tf.ragged.constant(self.tokenizer(text)["input_ids"]).to_tensor(-1)
inputs = {}
inputs["input_ids"] = input_ids
decoder_results = decoder.decode(inputs, mode="greedy", max_iterations=10, eos_id=-100)
predicted_ids = decoder_results["predicted_ids"].numpy().tolist()
expected_ids = [
[1938, 287, 262, 995, 13, 679, 318, 257, 845, 922],
[484, 821, 523, 881, 517, 621, 655, 257, 3491, 13],
]
tf.debugging.assert_equal(predicted_ids, expected_ids)
logging.info("Test: Successful Auto Regressive Keras Model Greedy. ✅")
# @unittest.skip
def test_auto_regressive_saved_model_beam(self):
"""Test Auto Regressive using Decoder Saved Model"""
text = ['<NAME> is one of the finest', 'I love stars because']
dirpath = tempfile.mkdtemp()
saved_model_dir = "{}/model_pb".format(dirpath)
self.model_ar.save_as_serialize_module(saved_model_dir, overwrite=True)
# Load saved model .
loaded = tf.saved_model.load(saved_model_dir)
decoder = TextDecoder(model=loaded)
# Pad -1
input_ids = tf.ragged.constant(self.tokenizer(text)["input_ids"]).to_tensor(-1)
inputs = {}
inputs["input_ids"] = input_ids
decoder_results = decoder.decode(inputs, mode="beam", num_beams=3, max_iterations=10, eos_id=-100)
predicted_ids = decoder_results["predicted_ids"].numpy().tolist()
print("Predicted ids beam", predicted_ids)
expected_ids = [
[[19553, 3653, 287, 262, 995, 13, 679, 318, 530, 286]],
[[484, 821, 262, 691, 1517, 326, 6067, 284, 502, 13]],
]
# tf.debugging.assert_equal(predicted_ids, expected_ids)
shutil.rmtree(dirpath)
logging.info("Test: Successful Auto Regressive Saved Model Beam. ✅")
# @unittest.skip
def test_auto_regressive_keras_model_beam(self):
"""Test Auto Regressive using Decoder Keras Model"""
text = ['<NAME> is one of the finest', 'I love stars because']
decoder = TextDecoder(model=self.model_ar)
# Pad -1
input_ids = tf.ragged.constant(self.tokenizer(text)["input_ids"]).to_tensor(-1)
inputs = {}
inputs["input_ids"] = input_ids
decoder_results = decoder.decode(inputs, mode="beam", num_beams=3, max_iterations=10, eos_id=-100)
predicted_ids = decoder_results["predicted_ids"].numpy().tolist()
expected_ids = [
[[19553, 3653, 287, 262, 995, 13, 679, 318, 530, 286]],
[[484, 821, 262, 691, 1517, 326, 6067, 284, 502, 13]],
]
# tf.debugging.assert_equal(predicted_ids, expected_ids)
logging.info("Test: Successful Auto Regressive Keras Model Greedy. ✅")
@unittest.skip
def test_auto_regressive_saved_model_top_k_top_p(self):
"""Test Auto Regressive using Decoder Saved Model"""
text = ['<NAME> is one of the finest', 'I love stars because']
dirpath = tempfile.mkdtemp()
saved_model_dir = "{}/model_pb".format(dirpath)
self.model_ar.save_as_serialize_module(saved_model_dir, overwrite=True)
# Load saved model .
loaded = tf.saved_model.load(saved_model_dir)
decoder = TextDecoder(model=loaded)
# Pad -1
input_ids = tf.ragged.constant(self.tokenizer(text)["input_ids"]).to_tensor(-1)
inputs = {}
inputs["input_ids"] = input_ids
decoder_results = decoder.decode(
inputs, mode="top_k_top_p", top_k=100, top_p=0.7, max_iterations=10, eos_id=-100
)
_ = decoder_results["predicted_ids"].numpy().tolist()
shutil.rmtree(dirpath)
logging.info("Test: Successful Auto Regressive Saved Model top K top P. ✅")
@unittest.skip
def test_auto_regressive_keras_model_top_k_top_p(self):
"""Test Auto Regressive using Decoder Keras Model"""
text = ['<NAME> is one of the finest', 'I love stars because']
decoder = TextDecoder(model=self.model_ar)
# Pad -1
input_ids = tf.ragged.constant(self.tokenizer(text)["input_ids"]).to_tensor(-1)
inputs = {}
inputs["input_ids"] = input_ids
decoder_results = decoder.decode(
inputs, mode="top_k_top_p", top_k=100, top_p=0.7, max_iterations=10, eos_id=-100
)
_ = decoder_results["predicted_ids"].numpy().tolist()
logging.info("Test: Successful Auto Regressive Keras Model top k top P. ✅")
@unittest.skip
def test_tflite(self):
"""Test GPT2 Tflite"""
model = Model.from_pretrained(model_name=MODEL_NAME, batch_size=1, sequence_length=32,)
tempdir = tempfile.mkdtemp()
model.save_serialized(tempdir, overwrite=True)
converter = tf.lite.TFLiteConverter.from_saved_model("{}".format(tempdir)) # path to the SavedModel directory
converter.experimental_new_converter = True
tflite_model = converter.convert()
open("{}/converted_model.tflite".format(tempdir), "wb").write(tflite_model)
# Load the TFLite model and allocate tensors.
interpreter = tf.lite.Interpreter(model_path="{}/converted_model.tflite".format(tempdir))
interpreter.allocate_tensors()
# Get input and output tensors.
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
# Get result
# encoder input_ids
interpreter.set_tensor(
input_details[0]['index'],
tf.random.uniform(input_details[0]['shape'], minval=0, maxval=100, dtype=tf.int32),
)
interpreter.invoke()
tflite_output = interpreter.get_tensor(output_details[-1]['index'])
print("Tflite output shape", tflite_output.shape)
# tf.debugging.assert_equal(tflite_output.shape, (1, 32, 32128))
logging.info("Test: TFlite Conversion. ✅")
shutil.rmtree(tempdir)
if __name__ == '__main__':
unittest.main()
|
examples/swap.py | bpmbank/pyql | 488 | 11110683 | <gh_stars>100-1000
""" Port of the swap example of QuantLib SWIG to PyQL.
Warning: this is work in progress and currently not working.
"""
from __future__ import print_function
from quantlib.indexes.api import Euribor6M
from quantlib.instruments.swap import VanillaSwap, Payer
from quantlib.pricingengines.swap import DiscountingSwapEngine
from quantlib.settings import Settings
from quantlib.quotes import SimpleQuote
from quantlib.termstructures.yields.api import DepositRateHelper, FraRateHelper
from quantlib.termstructures.yields.api import FuturesRateHelper, SwapRateHelper
from quantlib.termstructures.yields.api import YieldTermStructure
from quantlib.termstructures.yields.api import (
PiecewiseYieldCurve, BootstrapTrait, Interpolator )
from quantlib.time.api import Actual360, Date, November, TARGET, Weeks, Annual
from quantlib.time.api import Months, Years, Period, ModifiedFollowing
from quantlib.time.api import Unadjusted, Thirty360, Semiannual, Schedule
from quantlib.time.api import ActualActual, ISDA
from quantlib.time.schedule import Forward
# global data
calendar = TARGET()
todaysDate = Date(6,November,2001);
Settings.instance().evaluation_date = todaysDate
settlementDate = Date(8,November,2001);
# market quotes
deposits = { (1,Weeks): 0.0382,
(1,Months): 0.0372,
(3,Months): 0.0363,
(6,Months): 0.0353,
(9,Months): 0.0348,
(1,Years): 0.0345 }
FRAs = { (3,6): 0.037125,
(6,9): 0.037125,
(9,12): 0.037125 }
futures = { Date(19,12,2001): 96.2875,
Date(20,3,2002): 96.7875,
Date(19,6,2002): 96.9875,
Date(18,9,2002): 96.6875,
Date(18,12,2002): 96.4875,
Date(19,3,2003): 96.3875,
Date(18,6,2003): 96.2875,
Date(17,9,2003): 96.0875 }
swaps = { (2,Years): 0.037125,
(3,Years): 0.0398,
(5,Years): 0.0443,
(10,Years): 0.05165,
(15,Years): 0.055175 }
# convert them to Quote objects
#for n,unit in deposits.keys():
# deposits[(n,unit)] = SimpleQuote(deposits[(n,unit)])
for n,m in FRAs.keys():
FRAs[(n,m)] = SimpleQuote(FRAs[(n,m)])
for d in futures.keys():
futures[d] = SimpleQuote(futures[d])
for s in swaps.keys():
swaps[s] = SimpleQuote(swaps[s])
#for n,unit in swaps.keys():
# swaps[(n,unit)] = SimpleQuote(swaps[(n,unit)])
# build rate helpers
day_counter = Actual360()
settlementDays = 2
depositHelpers = [ DepositRateHelper(v,
Period(n,unit), settlementDays,
calendar, ModifiedFollowing,
False, day_counter)
for (n, unit), v in deposits.items()]
day_counter = Actual360()
settlementDays = 2
fraHelpers = [ FraRateHelper(v,
n, m, settlementDays,
calendar, ModifiedFollowing,
False, day_counter)
for (n, m), v in FRAs.items() ]
day_counter = Actual360()
months = 3
futuresHelpers = [ FuturesRateHelper(futures[d],
d, months,
calendar, ModifiedFollowing,
True, day_counter)
for d in futures.keys() ]
settlementDays = 2
fixedLegFrequency = Annual
fixedLegTenor = Period(1,Years)
fixedLegAdjustment = Unadjusted
fixedLegDayCounter = Thirty360()
floatingLegFrequency = Semiannual
floatingLegTenor = Period(6,Months)
floatingLegAdjustment = ModifiedFollowing
swapHelpers = [ SwapRateHelper.from_tenor(swaps[(n,unit)],
Period(n,unit), calendar,
fixedLegFrequency, fixedLegAdjustment,
fixedLegDayCounter, Euribor6M())
for n, unit in swaps.keys() ]
### Curve building
ts_daycounter = ActualActual(ISDA)
# term-structure construction
helpers = depositHelpers + swapHelpers
depoSwapCurve = PiecewiseYieldCurve.from_reference_date(
BootstrapTrait.Discount, Interpolator.LogLinear, settlementDate, helpers, ts_daycounter
)
helpers = depositHelpers[:2] + futuresHelpers + swapHelpers[1:]
depoFuturesSwapCurve = PiecewiseYieldCurve.from_reference_date(
BootstrapTrait.Discount, Interpolator.LogLinear,settlementDate, helpers, ts_daycounter
)
helpers = depositHelpers[:3] + fraHelpers + swapHelpers
depoFraSwapCurve = PiecewiseYieldCurve.from_reference_date(
BootstrapTrait.Discount, Interpolator.LogLinear, settlementDate, helpers, ts_daycounter
)
# Term structures that will be used for pricing:
discountTermStructure = YieldTermStructure()
forecastTermStructure = YieldTermStructure()
### SWAPS TO BE PRICED
nominal = 1000000
length = 5
maturity = calendar.advance(settlementDate,length,Years)
payFixed = True
fixedLegFrequency = Annual
fixedLegAdjustment = Unadjusted
fixedLegDayCounter = Thirty360()
fixedRate = 0.04
floatingLegFrequency = Semiannual
spread = 0.0
fixingDays = 2
index = Euribor6M(forecastTermStructure)
floatingLegAdjustment = ModifiedFollowing
floatingLegDayCounter = index.day_counter
fixedSchedule = Schedule.from_rule(settlementDate, maturity,
fixedLegTenor, calendar,
fixedLegAdjustment, fixedLegAdjustment,
Forward, False)
floatingSchedule = Schedule.from_rule(settlementDate, maturity,
floatingLegTenor, calendar,
floatingLegAdjustment, floatingLegAdjustment,
Forward, False)
swapEngine = DiscountingSwapEngine(discountTermStructure)
spot = VanillaSwap(Payer, nominal,
fixedSchedule, fixedRate, fixedLegDayCounter,
floatingSchedule, index, spread,
floatingLegDayCounter)
spot.set_pricing_engine(swapEngine)
forwardStart = calendar.advance(settlementDate,1,Years)
forwardEnd = calendar.advance(forwardStart,length,Years)
fixedSchedule = Schedule.from_rule(forwardStart, forwardEnd,
fixedLegTenor, calendar,
fixedLegAdjustment, fixedLegAdjustment,
Forward, False)
floatingSchedule = Schedule.from_rule(forwardStart, forwardEnd,
floatingLegTenor, calendar,
floatingLegAdjustment, floatingLegAdjustment,
Forward, False)
forward = VanillaSwap(Payer, nominal,
fixedSchedule, fixedRate, fixedLegDayCounter,
floatingSchedule, index, spread,
floatingLegDayCounter)
forward.set_pricing_engine(swapEngine)
# price on the bootstrapped curves
def formatPrice(p, digits=2):
format = '%%.%df' % digits
return format % p
def formatRate(r, digits=2):
format = '%%.%df %%%%' % digits
if isinstance(r, SimpleQuote):
r = r.value
return format % (r * 100)
headers = ("term structure", "net present value",
"fair spread", "fair fixed rate" )
separator = " | "
format = ''
width = 0
for h in headers[:-1]:
format += '%%%ds' % len(h)
format += separator
width += len(h) + len(separator)
format += '%%%ds' % len(headers[-1])
width += len(headers[-1])
rule = "-" * width
dblrule = "=" * width
tab = " " * 8
def report(swap, name):
print(format % (name, formatPrice(swap.npv,2),
formatRate(swap.fair_spread,4),
formatRate(swap.fair_rate,4)))
print(dblrule)
print("5-year market swap-rate = %s" % formatRate(swaps[(5,Years)]))
print(dblrule)
# price on two different term structures
print(tab + "5-years swap paying %s" % formatRate(fixedRate))
print(separator.join(headers))
print(rule)
discountTermStructure.link_to(depoFuturesSwapCurve)
forecastTermStructure.link_to(depoFuturesSwapCurve)
report(spot,'depo-fut-swap')
discountTermStructure.link_to(depoFraSwapCurve)
forecastTermStructure.link_to(depoFraSwapCurve)
report(spot,'depo-FRA-swap')
print(rule)
# price the 1-year forward swap
print(tab + "5-years, 1-year forward swap paying %s" % formatRate(fixedRate))
print(rule)
discountTermStructure.link_to(depoFuturesSwapCurve)
forecastTermStructure.link_to(depoFuturesSwapCurve)
report(forward,'depo-fut-swap')
discountTermStructure.link_to(depoFraSwapCurve)
forecastTermStructure.link_to(depoFraSwapCurve)
report(forward,'depo-FRA-swap')
# modify the 5-years swap rate and reprice
swaps[(5,Years)].value = 0.046
print(dblrule)
print("5-year market swap-rate = %s" % formatRate(swaps[(5,Years)]))
print(dblrule)
print(tab + "5-years swap paying %s" % formatRate(fixedRate))
print(separator.join(headers))
print(rule)
discountTermStructure.link_to(depoFuturesSwapCurve)
forecastTermStructure.link_to(depoFuturesSwapCurve)
report(spot,'depo-fut-swap')
discountTermStructure.link_to(depoFraSwapCurve)
forecastTermStructure.link_to(depoFraSwapCurve)
report(spot,'depo-FRA-swap')
print(rule)
print(tab + "5-years, 1-year forward swap paying %s" % formatRate(fixedRate))
print(rule)
discountTermStructure.link_to(depoFuturesSwapCurve)
forecastTermStructure.link_to(depoFuturesSwapCurve)
report(forward,'depo-fut-swap')
discountTermStructure.link_to(depoFraSwapCurve)
forecastTermStructure.link_to(depoFraSwapCurve)
report(forward,'depo-FRA-swap')
|
boto3_type_annotations_with_docs/boto3_type_annotations/iot_jobs_data/client.py | cowboygneox/boto3_type_annotations | 119 | 11110704 | from typing import Optional
from botocore.client import BaseClient
from typing import Dict
from botocore.paginate import Paginator
from botocore.waiter import Waiter
from typing import Union
class Client(BaseClient):
def can_paginate(self, operation_name: str = None):
"""
Check if an operation can be paginated.
:type operation_name: string
:param operation_name: The operation name. This is the same name
as the method name on the client. For example, if the
method name is ``create_foo``, and you\'d normally invoke the
operation as ``client.create_foo(**kwargs)``, if the
``create_foo`` operation can be paginated, you can use the
call ``client.get_paginator(\"create_foo\")``.
:return: ``True`` if the operation can be paginated,
``False`` otherwise.
"""
pass
def describe_job_execution(self, jobId: str, thingName: str, includeJobDocument: bool = None, executionNumber: int = None) -> Dict:
"""
Gets details of a job execution.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/iot-jobs-data-2017-09-29/DescribeJobExecution>`_
**Request Syntax**
::
response = client.describe_job_execution(
jobId='string',
thingName='string',
includeJobDocument=True|False,
executionNumber=123
)
**Response Syntax**
::
{
'execution': {
'jobId': 'string',
'thingName': 'string',
'status': 'QUEUED'|'IN_PROGRESS'|'SUCCEEDED'|'FAILED'|'TIMED_OUT'|'REJECTED'|'REMOVED'|'CANCELED',
'statusDetails': {
'string': 'string'
},
'queuedAt': 123,
'startedAt': 123,
'lastUpdatedAt': 123,
'approximateSecondsBeforeTimedOut': 123,
'versionNumber': 123,
'executionNumber': 123,
'jobDocument': 'string'
}
}
**Response Structure**
- *(dict) --*
- **execution** *(dict) --*
Contains data about a job execution.
- **jobId** *(string) --*
The unique identifier you assigned to this job when it was created.
- **thingName** *(string) --*
The name of the thing that is executing the job.
- **status** *(string) --*
The status of the job execution. Can be one of: "QUEUED", "IN_PROGRESS", "FAILED", "SUCCESS", "CANCELED", "REJECTED", or "REMOVED".
- **statusDetails** *(dict) --*
A collection of name/value pairs that describe the status of the job execution.
- *(string) --*
- *(string) --*
- **queuedAt** *(integer) --*
The time, in milliseconds since the epoch, when the job execution was enqueued.
- **startedAt** *(integer) --*
The time, in milliseconds since the epoch, when the job execution was started.
- **lastUpdatedAt** *(integer) --*
The time, in milliseconds since the epoch, when the job execution was last updated.
- **approximateSecondsBeforeTimedOut** *(integer) --*
The estimated number of seconds that remain before the job execution status will be changed to ``TIMED_OUT`` .
- **versionNumber** *(integer) --*
The version of the job execution. Job execution versions are incremented each time they are updated by a device.
- **executionNumber** *(integer) --*
A number that identifies a particular job execution on a particular device. It can be used later in commands that return or update job execution information.
- **jobDocument** *(string) --*
The content of the job document.
:type jobId: string
:param jobId: **[REQUIRED]**
The unique identifier assigned to this job when it was created.
:type thingName: string
:param thingName: **[REQUIRED]**
The thing name associated with the device the job execution is running on.
:type includeJobDocument: boolean
:param includeJobDocument:
Optional. When set to true, the response contains the job document. The default is false.
:type executionNumber: integer
:param executionNumber:
Optional. A number that identifies a particular job execution on a particular device. If not specified, the latest job execution is returned.
:rtype: dict
:returns:
"""
pass
def generate_presigned_url(self, ClientMethod: str = None, Params: Dict = None, ExpiresIn: int = None, HttpMethod: str = None):
"""
Generate a presigned url given a client, its method, and arguments
:type ClientMethod: string
:param ClientMethod: The client method to presign for
:type Params: dict
:param Params: The parameters normally passed to
``ClientMethod``.
:type ExpiresIn: int
:param ExpiresIn: The number of seconds the presigned url is valid
for. By default it expires in an hour (3600 seconds)
:type HttpMethod: string
:param HttpMethod: The http method to use on the generated url. By
default, the http method is whatever is used in the method\'s model.
:returns: The presigned url
"""
pass
def get_paginator(self, operation_name: str = None) -> Paginator:
"""
Create a paginator for an operation.
:type operation_name: string
:param operation_name: The operation name. This is the same name
as the method name on the client. For example, if the
method name is ``create_foo``, and you\'d normally invoke the
operation as ``client.create_foo(**kwargs)``, if the
``create_foo`` operation can be paginated, you can use the
call ``client.get_paginator(\"create_foo\")``.
:raise OperationNotPageableError: Raised if the operation is not
pageable. You can use the ``client.can_paginate`` method to
check if an operation is pageable.
:rtype: L{botocore.paginate.Paginator}
:return: A paginator object.
"""
pass
def get_pending_job_executions(self, thingName: str) -> Dict:
"""
Gets the list of all jobs for a thing that are not in a terminal status.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/iot-jobs-data-2017-09-29/GetPendingJobExecutions>`_
**Request Syntax**
::
response = client.get_pending_job_executions(
thingName='string'
)
**Response Syntax**
::
{
'inProgressJobs': [
{
'jobId': 'string',
'queuedAt': 123,
'startedAt': 123,
'lastUpdatedAt': 123,
'versionNumber': 123,
'executionNumber': 123
},
],
'queuedJobs': [
{
'jobId': 'string',
'queuedAt': 123,
'startedAt': 123,
'lastUpdatedAt': 123,
'versionNumber': 123,
'executionNumber': 123
},
]
}
**Response Structure**
- *(dict) --*
- **inProgressJobs** *(list) --*
A list of JobExecutionSummary objects with status IN_PROGRESS.
- *(dict) --*
Contains a subset of information about a job execution.
- **jobId** *(string) --*
The unique identifier you assigned to this job when it was created.
- **queuedAt** *(integer) --*
The time, in milliseconds since the epoch, when the job execution was enqueued.
- **startedAt** *(integer) --*
The time, in milliseconds since the epoch, when the job execution started.
- **lastUpdatedAt** *(integer) --*
The time, in milliseconds since the epoch, when the job execution was last updated.
- **versionNumber** *(integer) --*
The version of the job execution. Job execution versions are incremented each time AWS IoT Jobs receives an update from a device.
- **executionNumber** *(integer) --*
A number that identifies a particular job execution on a particular device.
- **queuedJobs** *(list) --*
A list of JobExecutionSummary objects with status QUEUED.
- *(dict) --*
Contains a subset of information about a job execution.
- **jobId** *(string) --*
The unique identifier you assigned to this job when it was created.
- **queuedAt** *(integer) --*
The time, in milliseconds since the epoch, when the job execution was enqueued.
- **startedAt** *(integer) --*
The time, in milliseconds since the epoch, when the job execution started.
- **lastUpdatedAt** *(integer) --*
The time, in milliseconds since the epoch, when the job execution was last updated.
- **versionNumber** *(integer) --*
The version of the job execution. Job execution versions are incremented each time AWS IoT Jobs receives an update from a device.
- **executionNumber** *(integer) --*
A number that identifies a particular job execution on a particular device.
:type thingName: string
:param thingName: **[REQUIRED]**
The name of the thing that is executing the job.
:rtype: dict
:returns:
"""
pass
def get_waiter(self, waiter_name: str = None) -> Waiter:
"""
Returns an object that can wait for some condition.
:type waiter_name: str
:param waiter_name: The name of the waiter to get. See the waiters
section of the service docs for a list of available waiters.
:returns: The specified waiter object.
:rtype: botocore.waiter.Waiter
"""
pass
def start_next_pending_job_execution(self, thingName: str, statusDetails: Dict = None, stepTimeoutInMinutes: int = None) -> Dict:
"""
Gets and starts the next pending (status IN_PROGRESS or QUEUED) job execution for a thing.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/iot-jobs-data-2017-09-29/StartNextPendingJobExecution>`_
**Request Syntax**
::
response = client.start_next_pending_job_execution(
thingName='string',
statusDetails={
'string': 'string'
},
stepTimeoutInMinutes=123
)
**Response Syntax**
::
{
'execution': {
'jobId': 'string',
'thingName': 'string',
'status': 'QUEUED'|'IN_PROGRESS'|'SUCCEEDED'|'FAILED'|'TIMED_OUT'|'REJECTED'|'REMOVED'|'CANCELED',
'statusDetails': {
'string': 'string'
},
'queuedAt': 123,
'startedAt': 123,
'lastUpdatedAt': 123,
'approximateSecondsBeforeTimedOut': 123,
'versionNumber': 123,
'executionNumber': 123,
'jobDocument': 'string'
}
}
**Response Structure**
- *(dict) --*
- **execution** *(dict) --*
A JobExecution object.
- **jobId** *(string) --*
The unique identifier you assigned to this job when it was created.
- **thingName** *(string) --*
The name of the thing that is executing the job.
- **status** *(string) --*
The status of the job execution. Can be one of: "QUEUED", "IN_PROGRESS", "FAILED", "SUCCESS", "CANCELED", "REJECTED", or "REMOVED".
- **statusDetails** *(dict) --*
A collection of name/value pairs that describe the status of the job execution.
- *(string) --*
- *(string) --*
- **queuedAt** *(integer) --*
The time, in milliseconds since the epoch, when the job execution was enqueued.
- **startedAt** *(integer) --*
The time, in milliseconds since the epoch, when the job execution was started.
- **lastUpdatedAt** *(integer) --*
The time, in milliseconds since the epoch, when the job execution was last updated.
- **approximateSecondsBeforeTimedOut** *(integer) --*
The estimated number of seconds that remain before the job execution status will be changed to ``TIMED_OUT`` .
- **versionNumber** *(integer) --*
The version of the job execution. Job execution versions are incremented each time they are updated by a device.
- **executionNumber** *(integer) --*
A number that identifies a particular job execution on a particular device. It can be used later in commands that return or update job execution information.
- **jobDocument** *(string) --*
The content of the job document.
:type thingName: string
:param thingName: **[REQUIRED]**
The name of the thing associated with the device.
:type statusDetails: dict
:param statusDetails:
A collection of name/value pairs that describe the status of the job execution. If not specified, the statusDetails are unchanged.
- *(string) --*
- *(string) --*
:type stepTimeoutInMinutes: integer
:param stepTimeoutInMinutes:
Specifies the amount of time this device has to finish execution of this job. If the job execution status is not set to a terminal state before this timer expires, or before the timer is reset (by calling ``UpdateJobExecution`` , setting the status to ``IN_PROGRESS`` and specifying a new timeout value in field ``stepTimeoutInMinutes`` ) the job execution status will be automatically set to ``TIMED_OUT`` . Note that setting this timeout has no effect on that job execution timeout which may have been specified when the job was created (``CreateJob`` using field ``timeoutConfig`` ).
:rtype: dict
:returns:
"""
pass
def update_job_execution(self, jobId: str, thingName: str, status: str, statusDetails: Dict = None, stepTimeoutInMinutes: int = None, expectedVersion: int = None, includeJobExecutionState: bool = None, includeJobDocument: bool = None, executionNumber: int = None) -> Dict:
"""
Updates the status of a job execution.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/iot-jobs-data-2017-09-29/UpdateJobExecution>`_
**Request Syntax**
::
response = client.update_job_execution(
jobId='string',
thingName='string',
status='QUEUED'|'IN_PROGRESS'|'SUCCEEDED'|'FAILED'|'TIMED_OUT'|'REJECTED'|'REMOVED'|'CANCELED',
statusDetails={
'string': 'string'
},
stepTimeoutInMinutes=123,
expectedVersion=123,
includeJobExecutionState=True|False,
includeJobDocument=True|False,
executionNumber=123
)
**Response Syntax**
::
{
'executionState': {
'status': 'QUEUED'|'IN_PROGRESS'|'SUCCEEDED'|'FAILED'|'TIMED_OUT'|'REJECTED'|'REMOVED'|'CANCELED',
'statusDetails': {
'string': 'string'
},
'versionNumber': 123
},
'jobDocument': 'string'
}
**Response Structure**
- *(dict) --*
- **executionState** *(dict) --*
A JobExecutionState object.
- **status** *(string) --*
The status of the job execution. Can be one of: "QUEUED", "IN_PROGRESS", "FAILED", "SUCCESS", "CANCELED", "REJECTED", or "REMOVED".
- **statusDetails** *(dict) --*
A collection of name/value pairs that describe the status of the job execution.
- *(string) --*
- *(string) --*
- **versionNumber** *(integer) --*
The version of the job execution. Job execution versions are incremented each time they are updated by a device.
- **jobDocument** *(string) --*
The contents of the Job Documents.
:type jobId: string
:param jobId: **[REQUIRED]**
The unique identifier assigned to this job when it was created.
:type thingName: string
:param thingName: **[REQUIRED]**
The name of the thing associated with the device.
:type status: string
:param status: **[REQUIRED]**
The new status for the job execution (IN_PROGRESS, FAILED, SUCCESS, or REJECTED). This must be specified on every update.
:type statusDetails: dict
:param statusDetails:
Optional. A collection of name/value pairs that describe the status of the job execution. If not specified, the statusDetails are unchanged.
- *(string) --*
- *(string) --*
:type stepTimeoutInMinutes: integer
:param stepTimeoutInMinutes:
Specifies the amount of time this device has to finish execution of this job. If the job execution status is not set to a terminal state before this timer expires, or before the timer is reset (by again calling ``UpdateJobExecution`` , setting the status to ``IN_PROGRESS`` and specifying a new timeout value in this field) the job execution status will be automatically set to ``TIMED_OUT`` . Note that setting or resetting this timeout has no effect on that job execution timeout which may have been specified when the job was created (``CreateJob`` using field ``timeoutConfig`` ).
:type expectedVersion: integer
:param expectedVersion:
Optional. The expected current version of the job execution. Each time you update the job execution, its version is incremented. If the version of the job execution stored in Jobs does not match, the update is rejected with a VersionMismatch error, and an ErrorResponse that contains the current job execution status data is returned. (This makes it unnecessary to perform a separate DescribeJobExecution request in order to obtain the job execution status data.)
:type includeJobExecutionState: boolean
:param includeJobExecutionState:
Optional. When included and set to true, the response contains the JobExecutionState data. The default is false.
:type includeJobDocument: boolean
:param includeJobDocument:
Optional. When set to true, the response contains the job document. The default is false.
:type executionNumber: integer
:param executionNumber:
Optional. A number that identifies a particular job execution on a particular device.
:rtype: dict
:returns:
"""
pass
|
butterfree/transform/transformations/__init__.py | fossabot/butterfree | 208 | 11110713 | """Holds all transformations to be used by Features.
A transformation must inherit from a TransformComponent and handle data modification,
renaming and cast types using parent's (a Feature) information.
"""
from butterfree.transform.transformations.aggregated_transform import (
AggregatedTransform,
)
from butterfree.transform.transformations.custom_transform import CustomTransform
from butterfree.transform.transformations.spark_function_transform import (
SparkFunctionTransform,
)
from butterfree.transform.transformations.sql_expression_transform import (
SQLExpressionTransform,
)
from butterfree.transform.transformations.stack_transform import StackTransform
from butterfree.transform.transformations.transform_component import TransformComponent
__all__ = [
"AggregatedTransform",
"CustomTransform",
"SparkFunctionTransform",
"SQLExpressionTransform",
"StackTransform",
"TransformComponent",
]
|
model/third_party/HMNet/DataLoader/infinibatch/infinibatch/torch/data.py | NickSchoelkopf/SummerTime | 178 | 11110727 | <filename>model/third_party/HMNet/DataLoader/infinibatch/infinibatch/torch/data.py
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import torch
from infinibatch.iterators import CheckpointableIterator
from infinibatch.datasets import chunked_dataset_iterator
from typing import Union, Iterable, Any
# @TODO: This has been tested once, but we have no regression test presently. I am worried tests will fail if Torch is not installed.
class IterableCheckpointedDataset(torch.utils.data.IterableDataset):
"""
Wraps a CheckpointableIterator into a PyTorch IterableDataset, which is recognized by its type by
PyTorch's DataLoader class.
"""
def __init__(self, source: CheckpointableIterator):
super().__init__()
self._source = source
def __iter__(self): # this is called in the forked clone
worker_info = torch.utils.data.get_worker_info()
assert (
worker_info is None or worker_info.num_workers == 1
) # not supported since we can't get at the checkpoint for each worker
return iter(self._source)
# @TODO: This is currently untested, and may not work presently.
class IterableChunkedDataset(torch.utils.data.IterableDataset):
def __init__(
self,
paths: Union[str, Iterable[str]],
shuffle: bool = True,
buffer_size: int = 2 ** 20,
transform=None,
seed: int = None,
world_size: int = 1,
rank: int = 0,
num_workers_per_rank: int = 1,
):
super().__init__()
self.rank = rank
self.num_workers_per_rank = num_workers_per_rank
# instance_rank is set assuming that num_workers_per_rank = 1 and adapted dynamically in __iter__
self.dataset = chunked_dataset_iterator(
paths,
shuffle=shuffle,
buffer_size=buffer_size,
transform=transform,
seed=seed,
num_instances=world_size * num_workers_per_rank,
instance_rank=rank,
)
def __iter__(self):
worker_info = torch.utils.data.get_worker_info()
if worker_info is None: # single-process data loading
self.dataset._instance_rank = self.rank
else:
assert worker_info.num_workers == self.num_workers_per_rank
self.dataset._instance_rank = (
self.rank * self.num_workers_per_rank + worker_info.id
)
return iter(self.dataset)
|
Lib/test/test_compiler/testcorpus/60_try_except.py | diogommartins/cinder | 1,886 | 11110742 | try:
a
except Exc:
b
|
ckg/report_manager/apps/apps_config.py | igg-bioinfo/CKG | 189 | 11110776 | <reponame>igg-bioinfo/CKG
stats_file = "../../data/imports/stats/stats.hdf"
logo = '../static/img/logo.png'
footer = '<div id="PageFooter"><p> The Clinical Knowledge Graph has been implemented by <a href="mailto:<EMAIL>"><NAME></a>, <a><NAME></a> and <a href="mailto:<EMAIL>"><NAME></a></p></br> <p>This tool is used by the Clinical Proteomics Department, Prof. <NAME>, at <a href="http://www.cpr.ku.dk">Novo Nordisk Foundation Center for Protein Reseach</a></p></div>'
projectPage = {"overview":[
("overview", [], ["basicTable"], {}),
("number_subjects", [], ["basicTable"], {}),
("number_analytical_samples", [], ["basicTable"], {})
]
}
proteomicsPage= {"overview":[
("number_peptides_analytical_sample", [], ["basicBarPlot"], {"x_title":"Analytical sample", "y_title":"number of peptides"}),
("number_proteins_analytical_sample", [], ["basicBarPlot"], {"x_title":"Analytical sample", "y_title":"number of proteins"}),
("number_modified_proteins_analytical_sample", [], ["basicBarPlot"], {"x_title":"Analytical sample", "y_title":"number of modified proteins"})
],
"stratification":[
("identified_proteins_sample_group",
["pca", "tsne", "umap"],
["scatterPlot"],
{"imputation":True, "imputation_method":"Mixed", "x_title":"PC1", "y_title":"PC2", "components":2, "perplexity":40, "n_iter":1000, "init":'pca'})
],
"regulation":[
("identified_proteins_sample_group_with_gene",
["ttest"],
["volcanoPlot", "basicTable"],
{"imputation":True, "imputation_method":"Mixed", "alpha":0.05, "drop_cols":["sample","gene_name"], "name":"name"})
],
#"correlation":[
# ("identified_proteins_sample_group",
# ["correlation"],
# ["3Dnetwork", "basicTable"],
# {"source":"node1", "target":"node2"})
# ],
"action":[
("drug_acts_on_proteins",
[],
["basicTable", "3Dnetwork"],
{"replace":[("ACTION","inhibition"),("PROTEINS",'"O60341"')]})
]
}
wesPage= {"overview":[
("number_somatic_mutations_by_type_analytical_sample",["basicBarPlot", "basicTable"]),
],
"targets":[
("target_analysis_variants",["basicTable", "3dNetwork"])
]
}
### Project Page configuration
pages = {"projectPage":{
"project": projectPage,
"proteomics": proteomicsPage,
#"wes": wesPage
},
"importsPage":{"stats_file":stats_file},
"projectCreationPage":{}
}
## Overview
## Project Name
## Project description
## Studied disease
## Person responsible
## Participants
## Number of enrolled subjects
|
pyvisa/rname.py | jpsecher/pyvisa | 393 | 11110812 | # -*- coding: utf-8 -*-
"""Functions and classes to parse and assemble resource name.
:copyright: 2014-2020 by PyVISA Authors, see AUTHORS for more details.
:license: MIT, see LICENSE for more details.
"""
import contextlib
import re
from collections import defaultdict
from dataclasses import dataclass, field, fields
from typing import (
TYPE_CHECKING,
Callable,
Dict,
Iterable,
List,
Optional,
Set,
Tuple,
Type,
TypeVar,
)
from typing_extensions import ClassVar
from . import constants, errors, logger
if TYPE_CHECKING:
from .resources import Resource # noqa # pragma: no cover
#: Interface types for which a subclass of ResourName exists
_INTERFACE_TYPES: Set[str] = set()
#: Resource Class for Interface type
_RESOURCE_CLASSES: Dict[str, Set[str]] = defaultdict(set)
#: Subclasses of ResourceName matching an interface type, resource class pair
_SUBCLASSES: Dict[Tuple[str, str], Type["ResourceName"]] = {}
# DEFAULT Resource Class for a given interface type.
_DEFAULT_RC: Dict[str, str] = {}
class InvalidResourceName(ValueError):
"""Exception raised when the resource name cannot be parsed."""
def __init__(self, msg: str) -> None:
self.msg = msg
@classmethod
def bad_syntax(
cls, syntax: str, resource_name: str, ex: Exception = None
) -> "InvalidResourceName":
"""Build an exception when the resource name cannot be parsed."""
if ex:
msg = "The syntax is '%s' (%s)." % (syntax, ex)
else:
msg = "The syntax is '%s'." % syntax
msg = "Could not parse '%s'. %s" % (resource_name, msg)
return cls(msg)
@classmethod
def subclass_notfound(
cls, interface_type_resource_class: Tuple[str, str], resource_name: str = None
) -> "InvalidResourceName":
"""Build an exception when no parser has been registered for a pair."""
msg = "Parser not found for: %s." % (interface_type_resource_class,)
if resource_name:
msg = "Could not parse '%s'. %s" % (resource_name, msg)
return cls(msg)
@classmethod
def rc_notfound(
cls, interface_type: str, resource_name: str = None
) -> "InvalidResourceName":
"""Build an exception when no resource class is provided and no default is found."""
msg = (
"Resource class for %s not provided and default not found." % interface_type
)
if resource_name:
msg = "Could not parse '%s'. %s" % (resource_name, msg)
return cls(msg)
def __str__(self) -> str:
return self.msg
T = TypeVar("T", bound=Type["ResourceName"])
def register_subclass(cls: T) -> T:
"""Register a subclass for a given interface type and resource class."""
# Assemble the format string based on the resource parts
fmt = cls.interface_type
syntax = cls.interface_type
for ndx, f in enumerate(fields(cls)):
sep = "::" if ndx else ""
fmt += sep + "{0.%s}" % f.name
if not f.default:
syntax += sep + f.name.replace("_", " ")
else:
syntax += "[" + sep + f.name.replace("_", " ") + "]"
fmt += "::" + cls.resource_class
if not cls.is_rc_optional:
syntax += "::" + cls.resource_class
else:
syntax += "[" + "::" + cls.resource_class + "]"
cls._visa_syntax = syntax
cls._canonical_fmt = fmt
key = cls.interface_type, cls.resource_class
if key in _SUBCLASSES:
raise ValueError("Class already registered for %s and %s" % key)
_SUBCLASSES[(cls.interface_type, cls.resource_class)] = cls
_INTERFACE_TYPES.add(cls.interface_type)
_RESOURCE_CLASSES[cls.interface_type].add(cls.resource_class)
if cls.is_rc_optional:
if cls.interface_type in _DEFAULT_RC:
raise ValueError("Default already specified for %s" % cls.interface_type)
_DEFAULT_RC[cls.interface_type] = cls.resource_class
return cls
class ResourceName:
"""Base class for ResourceNames to be used as a mixin."""
#: Interface type string
interface_type: ClassVar[str]
#: Resource class string
resource_class: ClassVar[str]
#: Specifices if the resource class part of the string is optional.
is_rc_optional: ClassVar[bool] = False
#: Formatting string for canonical
_canonical_fmt: str = field(init=False)
#: VISA syntax for resource
_visa_syntax: str = field(init=False)
#: VISA syntax for resource
_fields: Tuple[str, ...] = field(init=False)
#: Resource name provided by the user (not empty only when parsing)
user: str = field(init=False)
def __post_init__(self):
# Ensure that all mandatory arguments have been passed
for f in fields(self):
if not getattr(self, f.name):
raise TypeError(f.name + " is a required parameter")
self._fields = tuple(f.name for f in fields(self))
@property
def interface_type_const(self) -> constants.InterfaceType:
try:
return getattr(constants.InterfaceType, self.interface_type.lower())
except Exception:
return constants.InterfaceType.unknown
@classmethod
def from_string(cls, resource_name: str) -> "ResourceName":
"""Parse a resource name and return a ResourceName
Parameters
----------
resource_name : str
Name of the resource
Raises
------
InvalidResourceName
Raised if the resource name is invalid.
"""
# TODO Remote VISA
uname = resource_name.upper()
for interface_type in _INTERFACE_TYPES:
# Loop through all known interface types until we found one
# that matches the beginning of the resource name
if not uname.startswith(interface_type):
continue
parts: List[str]
if len(resource_name) == len(interface_type):
parts = []
else:
parts = resource_name[len(interface_type) :].split("::")
# Try to match the last part of the resource name to
# one of the known resource classes for the given interface type.
# If not possible, use the default resource class
# for the given interface type.
if parts and parts[-1] in _RESOURCE_CLASSES[interface_type]:
parts, resource_class = parts[:-1], parts[-1]
else:
try:
resource_class = _DEFAULT_RC[interface_type]
except KeyError:
raise InvalidResourceName.rc_notfound(interface_type, resource_name)
# Look for the subclass
try:
subclass = _SUBCLASSES[(interface_type, resource_class)]
except KeyError:
raise InvalidResourceName.subclass_notfound(
(interface_type, resource_class), resource_name
)
# And create the object
try:
rn = subclass.from_parts(*parts)
rn.user = resource_name
return rn
except (ValueError, TypeError) as ex:
raise InvalidResourceName.bad_syntax(
subclass._visa_syntax, resource_name, ex
)
raise InvalidResourceName(
"Could not parse %s: unknown interface type" % resource_name
)
@classmethod
def from_kwargs(cls, **kwargs) -> "ResourceName":
"""Build a resource from keyword arguments."""
interface_type = kwargs.pop("interface_type")
if interface_type not in _INTERFACE_TYPES:
raise InvalidResourceName("Unknown interface type: %s" % interface_type)
try:
resource_class = kwargs.pop("resource_class", _DEFAULT_RC[interface_type])
except KeyError:
raise InvalidResourceName.rc_notfound(interface_type)
# Look for the subclass
try:
subclass = _SUBCLASSES[(interface_type, resource_class)]
except KeyError:
raise InvalidResourceName.subclass_notfound(
(interface_type, resource_class)
)
# And create the object
try:
# Always use for subclasses that do take arguments
return subclass(**kwargs) # type: ignore
except (ValueError, TypeError) as ex:
raise InvalidResourceName(str(ex))
# Implemented when building concrete subclass in build_rn_class
@classmethod
def from_parts(cls, *parts):
"""Construct a resource name from a list of parts."""
resource_parts = fields(cls)
if len(parts) < sum(1 for f in resource_parts if f.default):
raise ValueError("not enough parts")
elif len(parts) > len(resource_parts):
raise ValueError("too many parts")
k, rp = resource_parts[0], resource_parts[1:]
# The first part (just after the interface_type) is the only
# optional part which can be empty and therefore the
# default value should be used.
p, pending = parts[0], parts[1:]
kwargs = {k.name: k.default if p == "" else p}
# The rest of the parts are consumed when mandatory elements are required.
while len(pending) < len(rp):
k, rp = rp[0], rp[1:]
if not k.default:
# This is impossible as far as I can tell for currently implemented
# resource names
if not pending:
raise ValueError(k.name + " part is mandatory") # pragma: no cover
p, pending = pending[0], pending[1:]
if not p:
raise ValueError(k.name + " part is mandatory")
kwargs[k.name] = p
else:
kwargs[k.name] = k.default
# When the length of the pending provided and resource parts
# are equal, we just consume everything.
kwargs.update((k.name, p) for k, p in zip(rp, pending))
return cls(**kwargs)
def __str__(self):
return self._canonical_fmt.format(self)
# Build subclasses for each resource
@register_subclass
@dataclass
class GPIBInstr(ResourceName):
"""GPIB INSTR
The syntax is:
GPIB[board]::primary_address[::secondary_address][::INSTR]
"""
#: GPIB board to use.
board: str = "0"
#: Primary address of the device to connect to
primary_address: str = ""
#: Secondary address of the device to connect to
# Reference for the GPIB secondary address
# https://www.mathworks.com/help/instrument/secondaryaddress.html
secondary_address: str = "0"
interface_type: ClassVar[str] = "GPIB"
resource_class: ClassVar[str] = "INSTR"
is_rc_optional: ClassVar[bool] = True
@register_subclass
@dataclass
class GPIBIntfc(ResourceName):
"""GPIB INTFC
The syntax is:
GPIB[board]::INTFC
"""
#: GPIB board to use.
board: str = "0"
interface_type: ClassVar[str] = "GPIB"
resource_class: ClassVar[str] = "INTFC"
@register_subclass
@dataclass
class ASRLInstr(ResourceName):
"""ASRL INSTR
The syntax is:
ASRL[board]::INSTR
"""
#: Serial connection to use.
board: str = "0"
interface_type: ClassVar[str] = "ASRL"
resource_class: ClassVar[str] = "INSTR"
is_rc_optional: ClassVar[bool] = True
@register_subclass
@dataclass
class TCPIPInstr(ResourceName):
"""TCPIP INSTR
The syntax is:
TCPIP[board]::host address[::LAN device name][::INSTR]
"""
#: Board to use.
board: str = "0"
#: Host address of the device (IPv4 or host name)
host_address: str = ""
#: LAN device name of the device
lan_device_name: str = "inst0"
interface_type: ClassVar[str] = "TCPIP"
resource_class: ClassVar[str] = "INSTR"
is_rc_optional: ClassVar[bool] = True
@register_subclass
@dataclass
class TCPIPSocket(ResourceName):
"""TCPIP SOCKET
The syntax is:
TCPIP[board]::host address[::port]::SOCKET
"""
#: Board to use
board: str = "0"
#: Host address of the device (IPv4 or host name)
host_address: str = ""
#: Port on which to establish the connection
port: str = ""
interface_type: ClassVar[str] = "TCPIP"
resource_class: ClassVar[str] = "SOCKET"
@register_subclass
@dataclass
class USBInstr(ResourceName):
"""USB INSTR
The syntax is:
USB[board]::manufacturer ID::model code::serial number[::USB interface number][::INSTR]
"""
#: USB board to use.
board: str = "0"
#: ID of the instrument manufacturer.
manufacturer_id: str = ""
#: Code identifying the model of the instrument.
model_code: str = ""
#: Serial number of the instrument.
serial_number: str = ""
#: USB interface number.
usb_interface_number: str = "0"
interface_type: ClassVar[str] = "USB"
resource_class: ClassVar[str] = "INSTR"
is_rc_optional: ClassVar[bool] = True
@register_subclass
@dataclass
class USBRaw(ResourceName):
"""USB RAW
The syntax is:
USB[board]::manufacturer ID::model code::serial number[::USB interface number]::RAW
"""
#: USB board to use.
board: str = "0"
#: ID of the instrument manufacturer.
manufacturer_id: str = ""
#: Code identifying the model of the instrument.
model_code: str = ""
#: Serial number of the instrument.
serial_number: str = ""
#: USB interface number.
usb_interface_number: str = "0"
interface_type: ClassVar[str] = "USB"
resource_class: ClassVar[str] = "RAW"
@register_subclass
@dataclass
class PXIBackplane(ResourceName):
"""PXI BACKPLANE
The syntax is:
PXI[interface]::chassis number::BACKPLANE
"""
#: PXI interface number.
interface: str = "0"
#: PXI chassis number
chassis_number: str = ""
interface_type: ClassVar[str] = "PXI"
resource_class: ClassVar[str] = "BACKPLANE"
@register_subclass
@dataclass
class PXIMemacc(ResourceName):
"""PXI MEMACC
The syntax is:
PXI[interface]::MEMACC
"""
#: PXI interface number
interface: str = "0"
interface_type: ClassVar[str] = "PXI"
resource_class: ClassVar[str] = "MEMACC"
@register_subclass
@dataclass
class VXIBackplane(ResourceName):
"""VXI BACKPLANE
The syntax is:
VXI[board]::VXI logical address::BACKPLANE
"""
#: VXI board
board: str = "0"
#: VXI logical address
vxi_logical_address: str = ""
interface_type: ClassVar[str] = "VXI"
resource_class: ClassVar[str] = "BACKPLANE"
@register_subclass
@dataclass
class VXIInstr(ResourceName):
"""VXI INSTR
The syntax is:
VXI[board]::VXI logical address[::INSTR]
"""
#: VXI board
board: str = "0"
#: VXI logical address
vxi_logical_address: str = ""
interface_type: ClassVar[str] = "VXI"
resource_class: ClassVar[str] = "INSTR"
is_rc_optional: ClassVar[bool] = True
@register_subclass
@dataclass
class VXIMemacc(ResourceName):
"""VXI MEMACC
The syntax is:
VXI[board]::MEMACC
"""
#: VXI board
board: str = "0"
interface_type: ClassVar[str] = "VXI"
resource_class: ClassVar[str] = "MEMACC"
@register_subclass
@dataclass
class VXIServant(ResourceName):
"""VXI SERVANT
The syntax is:
VXI[board]::SERVANT
"""
#: VXI board
board: str = "0"
interface_type: ClassVar[str] = "VXI"
resource_class: ClassVar[str] = "SERVANT"
# TODO 3 types of PXI INSTR
# TODO ENET-Serial INSTR
# TODO Remote NI-VISA
def assemble_canonical_name(**kwargs) -> str:
"""Build the canonical resource name from a set of keyword arguments."""
return str(ResourceName.from_kwargs(**kwargs))
def to_canonical_name(resource_name: str) -> str:
"""Parse a resource name and return the canonical version."""
return str(ResourceName.from_string(resource_name))
parse_resource_name = ResourceName.from_string
def filter(resources: Iterable[str], query: str) -> Tuple[str, ...]:
r"""Filter a list of resources according to a query expression.
The search criteria specified in the query parameter has two parts:
1. a VISA regular expression over a resource string.
2. optional logical expression over attribute values
(not implemented in this function, see below).
.. note: The VISA regular expression syntax is not the same as the
Python regular expression syntax. (see below)
The regular expression is matched against the resource strings of resources
known to the VISA Resource Manager. If the resource string matches the
regular expression, the attribute values of the resource are then matched
against the expression over attribute values. If the match is successful,
the resource has met the search criteria and gets added to the list of
resources found.
By using the optional attribute expression, you can construct flexible
and powerful expressions with the use of logical ANDs (&&), ORs(||),
and NOTs (!). You can use equal (==) and unequal (!=) comparators to
compare attributes of any type, and other inequality comparators
(>, <, >=, <=) to compare attributes of numeric type. Use only global
attributes in the attribute expression. Local attributes are not allowed
in the logical expression part of the expr parameter.
Symbol Meaning
---------- ----------
? Matches any one character.
\ Makes the character that follows it an ordinary character
instead of special character. For example, when a question
mark follows a backslash (\?), it matches the ? character
instead of any one character.
[list] Matches any one character from the enclosed list. You can
use a hyphen to match a range of characters.
[^list] Matches any character not in the enclosed list. You can use
a hyphen to match a range of characters.
* Matches 0 or more occurrences of the preceding character or
expression.
+ Matches 1 or more occurrences of the preceding character or
expression.
Exp|exp Matches either the preceding or following expression. The or
operator | matches the entire expression that precedes or
follows it and not just the character that precedes or follows
it. For example, VXI|GPIB means (VXI)|(GPIB), not VX(I|G)PIB.
(exp) Grouping characters or expressions.
"""
if "{" in query:
query, _ = query.split("{")
logger.warning(
"optional part of the query expression not supported. " "See filter2"
)
try:
query = query.replace("?", ".")
matcher = re.compile(query, re.IGNORECASE)
except re.error:
raise errors.VisaIOError(constants.VI_ERROR_INV_EXPR)
return tuple(res for res in resources if matcher.match(res))
class _AttrGetter:
"""Smart attr getter infering common attribute from resource name.
Used to implement filter2
"""
def __init__(
self, resource_name: str, open_resource: Callable[[str], "Resource"]
) -> None:
self.resource_name = resource_name
self.parsed = parse_resource_name(resource_name)
self.resource = None
self.open_resource = open_resource
def __getattr__(self, item): # noqa: C901
if item == "VI_ATTR_INTF_NUM":
try:
return int(self.parsed.board)
except AttributeError:
return int(self.interface)
elif item == "VI_ATTR_MANF_ID":
if not isinstance(self.parsed, (USBInstr, USBRaw)):
raise self.raise_missing_attr(item)
else:
return self.parsed.manufacturer_id
elif item == "VI_ATTR_MODEL_CODE":
if not isinstance(self.parsed, (USBInstr, USBRaw)):
raise self.raise_missing_attr(item)
else:
return self.parsed.model_code
elif item == "VI_ATTR_USB_SERIAL_NUM":
if not isinstance(self.parsed, (USBInstr, USBRaw)):
raise self.raise_missing_attr(item)
else:
return self.parsed.serial_number
elif item == "VI_ATTR_USB_INTFC_NUM":
if not isinstance(self.parsed, (USBInstr, USBRaw)):
raise self.raise_missing_attr(item)
else:
return int(self.parsed.board)
elif item == "VI_ATTR_TCPIP_ADDR":
if not isinstance(self.parsed, (TCPIPInstr, TCPIPSocket)):
raise self.raise_missing_attr(item)
else:
return self.parsed.host_address
elif item == "VI_ATTR_TCPIP_DEVICE_NAME":
if not isinstance(self.parsed, TCPIPInstr):
raise self.raise_missing_attr(item)
else:
return self.parsed.lan_device_name
elif item == "VI_ATTR_TCPIP_PORT":
if not isinstance(self.parsed, TCPIPSocket):
raise self.raise_missing_attr(item)
else:
return int(self.parsed.port)
elif item == "VI_ATTR_GPIB_PRIMARY_ADDR":
if not isinstance(self.parsed, GPIBInstr):
raise self.raise_missing_attr(item)
else:
return int(self.parsed.primary_address)
elif item == "VI_ATTR_GPIB_SECONDARY_ADDR":
if not isinstance(self.parsed, GPIBInstr):
raise self.raise_missing_attr(item)
else:
return int(self.parsed.secondary_address)
elif item == "VI_ATTR_PXI_CHASSIS":
if not isinstance(self.parsed, PXIBackplane):
raise self.raise_missing_attr(item)
else:
return int(self.parsed.chassis_number)
elif item == "VI_ATTR_MAINFRAME_LA":
if not isinstance(self.parsed, (VXIInstr, VXIBackplane)):
raise self.raise_missing_attr(item)
else:
return int(self.parsed.vxi_logical_address)
if self.resource is None:
self.resource = self.open_resource(self.resource_name)
return self.resource.get_visa_attribute(item)
def raise_missing_attr(self, item):
raise errors.VisaIOError(constants.VI_ERROR_NSUP_ATTR)
def filter2(
resources: Iterable[str], query: str, open_resource: Callable[[str], "Resource"]
) -> Tuple[str, ...]:
"""Filter a list of resources according to a query expression.
It accepts the optional part of the expression.
.. warning: This function is experimental and unsafe as it uses eval,
It also might require to open the resource.
Parameters
----------
resources : Iterable[str]
Iterable of resource name to filter.
query : str
The pattern to use for filtering
open_resource : Callable[[str], Resource]
Function to open a resource (typically ResourceManager().open_resource)
"""
optional: Optional[str]
if "{" in query:
try:
query, optional = query.split("{")
optional, _ = optional.split("}")
except ValueError:
raise errors.VisaIOError(constants.VI_ERROR_INV_EXPR)
else:
optional = None
filtered = filter(resources, query)
if not optional:
return tuple(filtered)
optional = optional.replace("&&", "and").replace("||", "or").replace("!", "not ")
optional = optional.replace("VI_", "res.VI_")
@contextlib.contextmanager
def open_close(resource_name):
getter = _AttrGetter(resource_name, open_resource)
yield getter
if getter.resource is not None:
getter.resource.close()
selected = []
for rn in filtered:
with open_close(rn) as getter:
try:
if eval(optional, None, dict(res=getter)):
selected.append(rn)
except Exception:
logger.exception("Failed to evaluate %s on %s", optional, rn)
return tuple(selected)
|
utils/plot_debug.py | jswulff/mrflow | 124 | 11110839 | <reponame>jswulff/mrflow
# -*- coding: utf-8 -*-
"""
Created on Sat Oct 24 18:16:46 2015
@author: jonas
"""
import numpy as np
import flow_viz as viz
import os
have_plt=False
try:
from matplotlib import pyplot as plt
plt._INSTALL_FIG_OBSERVER=True
plt.rcParams['image.cmap'] = 'jet'
plt.figure()
plt.close()
have_plt=True
except:
plt=None
def imshow(*args, **kwargs):
if plt is None:
return
plt.ion()
plt.figure()
plt.imshow(*args, **kwargs)
plt.show()
plt.pause(3)
def imshow_perc(*args, **kwargs):
if plt is None:
return
vmin=np.percentile(args[0],5)
vmax=np.percentile(args[0],95)
kwargs['vmin']=vmin
kwargs['vmax']=vmax
plt.ion()
plt.figure()
plt.imshow(*args, **kwargs)
plt.show()
plt.pause(3)
def plot(*args):
if plt is None:
return
plt.ion()
plt.figure()
plt.plot(*args)
plt.show()
plt.pause(3)
def quiver(*args):
if plt is None:
return
plt.ion()
plt.figure()
plt.quiver(*args)
plt.show()
plt.pause(3)
def plot_quiver(pt, uv, title, masks=None, norm=-1, outpath='.'):
if plt is None:
return
plt.ioff()
if masks is None:
masks = [np.ones(pt.shape[0])>0,]
if norm > 0:
uvlen = np.sqrt((uv**2).sum(axis=1))
uv[uvlen<norm,:] /= (1.0/norm) * uvlen[uvlen<norm][:,np.newaxis]
colors = ['r','b','g','c','y']
plt.figure()
for i,m in enumerate(masks):
plt.quiver(pt[m,0],
pt[m,1],
uv[m,0],
uv[m,1],
color=colors[i%len(colors)],
angles='xy',
scale_units='xy',
scale=1)
plt.axis('equal')
plt.title(title)
plt.ylim([pt[:,1].max(),0])
save_figure(title, outpath)
def plot_scatter(pt, title, I=None,masks=None, outpath='.'):
if plt is None:
return
plt.ioff()
if masks is None:
masks = [np.ones(pt.shape[0])>0,]
colors = ['r','b','g','c','y']
plt.figure()
if I is not None:
plt.imshow(I)
for i,m in enumerate(masks):
plt.plot(pt[m,0],
pt[m,1],
'.{}'.format(colors[i%len(colors)]))
if I is not None:
ymax = I.shape[0]
xmax = I.shape[1]
else:
ymax = pt[:,1].max()
xmax = pt[:,0].max()
plt.axis('equal')
plt.title(title)
plt.ylim([ymax,0])
plt.xlim([0,xmax])
save_figure(title, outpath)
def show():
if plt is None:
return
plt.show()
def plot_flow(u,v,title,outpath='.'):
if plt is None:
return
plt.ioff()
Iuv = viz.computeFlowImage(u,v)
plt.figure()
plt.imshow(Iuv)
plt.title(title)
save_figure(title,outpath)
def plot_image(x,title,colorbar=False,vmin=None,vmax=None,outpath='.',cmap=None):
if plt is None:
return
plt.ioff()
plt.figure()
plt.imshow(x,interpolation='nearest',vmin=vmin,vmax=vmax,cmap=cmap)
plt.xticks([])
plt.yticks([])
plt.title(title)
if colorbar:
plt.colorbar()
save_figure(title,outpath)
def plot_plot(y, title, legends=None,outpath='.'):
if plt is None:
return
if np.array(y).ndim == 1:
y = np.array(y).reshape((-1,1))
no_legends = legends is None
if legends is None or len(legends) < y.shape[1]:
legends = [''] * y.shape[1]
plt.ioff()
plt.figure()
for d in range(y.shape[1]):
plt.plot(y[:,d],label=legends[d])
if not no_legends:
plt.legend()
plt.title(title)
save_figure(title,outpath)
def save_figure(title,outpath='.'):
if plt is None:
return
outdir = os.path.join(outpath, 'images')
if not os.path.isdir(outdir):
os.makedirs(outdir)
fname = outdir + '/' + title.replace(' ', '') + '.png'
plt.savefig(fname, dpi=200,bbox_inches='tight', pad_inches=0)
plt.close()
|
examples/showcase/src/demos_widgets/hyperlink.py | takipsizad/pyjs | 739 | 11110846 | <reponame>takipsizad/pyjs
"""
The ``ui.Hyperlink`` class acts as an "internal" hyperlink to a particular
state of the application. These states are stored in the application's
history, allowing for the use of the Back and Next buttons in the browser to
move between application states.
The ``ui.Hyperlink`` class only makes sense in an application which keeps track
of state using the ``History`` module. When the user clicks on a hyperlink,
the application changes state by calling ``History.newItem(newState)``. The
application then uses a history listener function to respond to the change in
state in whatever way makes sense.
"""
from pyjamas.ui.SimplePanel import SimplePanel
from pyjamas.ui.VerticalPanel import VerticalPanel
from pyjamas.ui.HorizontalPanel import HorizontalPanel
from pyjamas.ui.Hyperlink import Hyperlink
from pyjamas.ui.Label import Label
from pyjamas import History
class HyperlinkDemo(SimplePanel):
def __init__(self):
SimplePanel.__init__(self)
History.addHistoryListener(self)
vPanel = VerticalPanel()
self.stateDisplay = Label()
vPanel.add(self.stateDisplay)
hPanel = HorizontalPanel(Spacing=5)
hPanel.add(Hyperlink("State 1", False, TargetHistoryToken="state number 1"))
hPanel.add(Hyperlink("State 2", False, TargetHistoryToken="state number 2"))
vPanel.add(hPanel)
self.add(vPanel)
def onHistoryChanged(self, state):
self.stateDisplay.setText(state)
|
health_check/conf.py | witold-gren/django-health-check | 739 | 11110863 | from django.conf import settings
HEALTH_CHECK = getattr(settings, 'HEALTH_CHECK', {})
HEALTH_CHECK.setdefault('DISK_USAGE_MAX', 90)
HEALTH_CHECK.setdefault('MEMORY_MIN', 100)
HEALTH_CHECK.setdefault('WARNINGS_AS_ERRORS', True)
|
REST/python/Targets/register-listening-tentacle.py | gdesai1234/OctopusDeploy-Api | 199 | 11110895 | <reponame>gdesai1234/OctopusDeploy-Api
import json
import requests
octopus_server_uri = 'https://your.octopus.app/api'
octopus_api_key = 'API-YOURAPIKEY'
headers = {'X-Octopus-ApiKey': octopus_api_key}
def get_octopus_resource(uri):
response = requests.get(uri, headers=headers)
response.raise_for_status()
return json.loads(response.content.decode('utf-8'))
def get_by_name(uri, name):
resources = get_octopus_resource(uri)
return next((x for x in resources if x['Name'] == name), None)
space_name = 'Default'
environment_names = ['Development', 'Test']
space = get_by_name('{0}/spaces/all'.format(octopus_server_uri), space_name)
environments = get_octopus_resource('{0}/{1}/environments/all'.format(octopus_server_uri, space['Id']))
environment_ids = [environment['Id'] for environment in environments if environment['Name'] in environment_names]
params = {
'host': 'your target hostname',
'port': '10933',
'type': 'TentaclePassive'
}
uri = '{0}/{1}/machines/discover'.format(octopus_server_uri, space['Id'])
response = requests.get(uri, headers=headers, params=params)
response.raise_for_status()
discovered = json.loads(response.content.decode('utf-8'))
target = {
'Endpoint': discovered['Endpoint'],
'EnvironmentIds': environment_ids,
'Name': discovered['Name'],
'Roles': ['your-target-role']
}
uri = '{0}/{1}/machines'.format(octopus_server_uri, space['Id'])
response = requests.post(uri, headers=headers, json=target)
response.raise_for_status() |
osf/migrations/0123_merge_20180803_1346.py | gaybro8777/osf.io | 628 | 11110897 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.13 on 2018-08-03 13:46
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('osf', '0122_auto_20180801_2105'),
('osf', '0121_generalize_schema_models'),
]
operations = [
]
|
examples/ls_hf_transformer_encoder_layer.py | HyeongminMoon/PatrickStar | 494 | 11110903 | <reponame>HyeongminMoon/PatrickStar
# BSD 3-Clause License
#
# Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the psutil authors nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
try:
from lightseq.training.ops.pytorch.transformer_encoder_layer import (
LSTransformerEncoderLayer,
)
except ImportError:
raise RuntimeError("pip install lightseq first!")
class LSHFTransformerEncoderLayer(LSTransformerEncoderLayer):
def __init__(self, *args, **kwargs):
super(LSHFTransformerEncoderLayer, self).__init__(*args, **kwargs)
def forward(self, hidden_states, encoder_padding_mask, *args, **kwargs):
encoder_padding_mask /= -10000.0
encoder_padding_mask = encoder_padding_mask.squeeze()
output = super().forward(hidden_states, encoder_padding_mask)
return (output, None, None, None)
def gen_bert_config(training_args, config):
bert_config = LSTransformerEncoderLayer.get_config(
max_batch_tokens=4096,
max_seq_len=config.max_position_embeddings,
hidden_size=config.hidden_size,
intermediate_size=config.intermediate_size,
nhead=config.num_attention_heads,
attn_prob_dropout_ratio=config.attention_probs_dropout_prob,
activation_dropout_ratio=config.hidden_dropout_prob,
hidden_dropout_ratio=config.hidden_dropout_prob,
pre_layer_norm=False,
fp16=training_args.use_fp16,
local_rank=training_args.local_rank,
activation_fn="gelu",
)
return bert_config
def get_hf_bert_enc_layer_params(layer):
init_ws = []
init_bs = []
init_ws.append(layer.attention.self.query.weight.detach().clone())
init_bs.append(layer.attention.self.query.bias.detach().clone())
init_ws.append(layer.attention.self.key.weight.detach().clone())
init_bs.append(layer.attention.self.key.bias.detach().clone())
init_ws.append(layer.attention.self.value.weight.detach().clone())
init_bs.append(layer.attention.self.value.bias.detach().clone())
init_ws.append(layer.attention.output.dense.weight.detach().clone())
init_bs.append(layer.attention.output.dense.bias.detach().clone())
init_ws.append(layer.attention.output.LayerNorm.weight.detach().clone())
init_bs.append(layer.attention.output.LayerNorm.bias.detach().clone())
init_ws.append(layer.intermediate.dense.weight.detach().clone())
init_bs.append(layer.intermediate.dense.bias.detach().clone())
init_ws.append(layer.output.dense.weight.detach().clone())
init_bs.append(layer.output.dense.bias.detach().clone())
init_ws.append(layer.output.LayerNorm.weight.detach().clone())
init_bs.append(layer.output.LayerNorm.bias.detach().clone())
return init_ws, init_bs
def inject_ls_enc_layer(model, training_args, config):
for i in range(config.num_hidden_layers):
bert_config = gen_bert_config(training_args, config)
init_ws, init_bs = get_hf_bert_enc_layer_params(model.bert.encoder.layer[i])
model.bert.encoder.layer[i] = LSHFTransformerEncoderLayer(
bert_config, init_ws, init_bs
).cuda()
|
fast_arrow/resources/option_chain.py | jwschmo/fast_arrow | 143 | 11110933 | <reponame>jwschmo/fast_arrow<gh_stars>100-1000
class OptionChain(object):
@classmethod
def fetch(cls, client, _id, symbol):
"""
fetch option chain for instrument
"""
url = "https://api.robinhood.com/options/chains/"
params = {
"equity_instrument_ids": _id,
"state": "active",
"tradability": "tradable"
}
data = client.get(url, params=params)
def filter_func(x):
return x["symbol"] == symbol
results = list(filter(filter_func, data["results"]))
return results[0]
|
examples/tile.py | salt-die/nurses_2 | 171 | 11110961 | <filename>examples/tile.py
import asyncio
from pathlib import Path
from nurses_2.app import App
from nurses_2.widgets.image import Image
from nurses_2.widgets.tiled_image import TiledImage
LOGO_PATH = Path("images") / "python_discord_logo.png"
LOGO_FLAT = Path("images") / "logo_solo_flat_256.png"
class MyApp(App):
async def on_start(self):
tile_1 = Image(size=(10, 25), path=LOGO_PATH)
tile_2 = Image(size=(9, 19), path=LOGO_FLAT)
tiled_image = TiledImage(size=(25, 50), tile=tile_1)
self.add_widget(tiled_image)
await asyncio.sleep(5)
tiled_image.tile = tile_2
await asyncio.sleep(4)
tiled_image.allow_partial_tiling = False
MyApp().run()
|
tools/query_tabular/filters.py | supernord/tools-iuc | 142 | 11110984 | #!/usr/binsenv python
from __future__ import print_function
import re
import sys
from itertools import chain
class LineFilter(object):
def __init__(self, source, filter_dict):
self.source = source
self.filter_dict = filter_dict
self.func = lambda i, l: l.rstrip('\r\n') if l else None
self.src_lines = []
self.src_line_cnt = 0
def xint(x):
if isinstance(x, int):
return x
try:
return int(x)
except Exception:
return x if x else None
if not filter_dict:
return
if filter_dict['filter'] == 'regex':
rgx = re.compile(filter_dict['pattern'])
if filter_dict['action'] == 'exclude_match':
self.func = lambda i, l: l if not rgx.match(l) else None
elif filter_dict['action'] == 'include_match':
self.func = lambda i, l: l if rgx.match(l) else None
elif filter_dict['action'] == 'exclude_find':
self.func = lambda i, l: l if not rgx.search(l) else None
elif filter_dict['action'] == 'include_find':
self.func = lambda i, l: l if rgx.search(l) else None
elif filter_dict['filter'] == 'select_columns':
cols = [int(c) - 1 for c in filter_dict['columns']]
self.func = lambda i, l: self.select_columns(l, cols)
elif filter_dict['filter'] == 'select_column_slices':
cols = [x if isinstance(x, int) else [y if y is not None else None for y in [xint(k) for k in x.split(':')]] for x in [xint(c) for c in filter_dict['columns']]]
if all([isinstance(x, int) for x in cols]):
self.func = lambda i, l: self.select_columns(l, cols)
else:
cols = [slice(x[0], x[1], x[2] if len(x) > 2 else None) if isinstance(x, list) else x for x in cols]
self.func = lambda i, l: self.select_slices(l, cols)
elif filter_dict['filter'] == 'replace':
p = filter_dict['pattern']
r = filter_dict['replace']
c = int(filter_dict['column']) - 1
if 'add' not in filter_dict\
or filter_dict['add'] not in ['prepend',
'append',
'before',
'after']:
self.func = lambda i, l: '\t'.join(
[x if j != c else re.sub(p, r, x)
for j, x in enumerate(l.split('\t'))])
else:
a = 0 if filter_dict['add'] == 'prepend'\
else min(0, c - 1) if filter_dict['add'] == 'before'\
else c + 1 if filter_dict['add'] == 'after'\
else None
self.func = lambda i, l: self.replace_add(l, p, r, c, a)
elif filter_dict['filter'] == 'prepend_line_num':
self.func = lambda i, l: '%d\t%s' % (i, l)
elif filter_dict['filter'] == 'append_line_num':
self.func = lambda i, l: '%s\t%d' % (l.rstrip('\r\n'), i)
elif filter_dict['filter'] == 'prepend_text':
s = filter_dict['column_text']
self.func = lambda i, l: '%s\t%s' % (s, l)
elif filter_dict['filter'] == 'append_text':
s = filter_dict['column_text']
self.func = lambda i, l: '%s\t%s' % (l.rstrip('\r\n'), s)
elif filter_dict['filter'] == 'skip':
cnt = filter_dict['count']
self.func = lambda i, l: l if i > cnt else None
elif filter_dict['filter'] == 'normalize':
cols = [int(c) - 1 for c in filter_dict['columns']]
sep = filter_dict['separator']
self.func = lambda i, l: self.normalize(l, cols, sep)
def __iter__(self):
return self
def __next__(self):
if not self.src_lines:
self.get_lines()
if self.src_lines:
return self.src_lines.pop(0)
raise StopIteration
next = __next__
def select_columns(self, line, cols):
fields = line.split('\t')
return '\t'.join([fields[x] for x in cols])
def select_slices(self, line, cols):
fields = line.split('\t')
return '\t'.join(chain.from_iterable([y if isinstance(y, list) else [y] for y in [fields[x] for x in cols]]))
def replace_add(self, line, pat, rep, col, pos):
fields = line.rstrip('\r\n').split('\t')
i = pos if pos is not None else len(fields)
val = ''
if col < len(fields) and re.search(pat, fields[col]):
val = re.sub(pat, rep, fields[col]).replace('\t', ' ')
return '\t'.join(fields[:i] + [val] + fields[i:])
def normalize(self, line, split_cols, sep):
lines = []
fields = line.rstrip('\r\n').split('\t')
split_fields = dict()
cnt = 0
for c in split_cols:
if c < len(fields):
split_fields[c] = fields[c].split(sep)
cnt = max(cnt, len(split_fields[c]))
if cnt == 0:
lines.append('\t'.join(fields))
else:
for n in range(0, cnt):
flds = [x if c not in split_cols else split_fields[c][n]
if n < len(split_fields[c])
else '' for (c, x) in enumerate(fields)]
lines.append('\t'.join(flds))
return lines
def get_lines(self):
for i, next_line in enumerate(self.source):
self.src_line_cnt += 1
line = self.func(self.src_line_cnt, next_line)
if line:
if isinstance(line, list):
self.src_lines.extend(line)
else:
self.src_lines.append(line)
return
class TabularReader:
"""
Tabular file iterator. Returns a list
"""
def __init__(self, input_file, skip=0, comment_char=None, col_idx=None,
filters=None):
self.skip = skip
self.comment_char = comment_char
self.col_idx = col_idx
self.filters = filters
self.tsv_file = \
input_file if hasattr(input_file, 'readline') else open(input_file)
if skip and skip > 0:
for i in range(skip):
if not self.tsv_file.readline():
break
source = LineFilter(self.tsv_file, None)
if comment_char:
source = LineFilter(source,
{"filter": "regex", "pattern": comment_char,
"action": "exclude_match"})
if filters:
for f in filters:
source = LineFilter(source, f)
self.source = source
def __iter__(self):
return self
def __next__(self):
''' Iteration '''
for i, line in enumerate(self.source):
fields = line.rstrip('\r\n').split('\t')
if self.col_idx:
fields = [fields[i] for i in self.col_idx]
return fields
raise StopIteration
next = __next__
def filter_file(input_file, output, skip=0, comment_char='#', filters=None):
data_lines = 0
try:
tr = TabularReader(input_file, skip=skip, comment_char=comment_char,
filters=filters)
for linenum, fields in enumerate(tr):
data_lines += 1
try:
output.write('%s\n' % '\t'.join(fields))
except Exception as e:
print('Failed at line: %d err: %s' % (linenum, e),
file=sys.stderr)
except Exception as e:
exit('Error: %s' % (e))
|
examples/highcharts/column-stacked-and-grouped.py | Jbrunn/python-highcharts | 370 | 11110988 | <gh_stars>100-1000
# -*- coding: utf-8 -*-
"""
Highcharts Demos
Stacked and grouped column: http://www.highcharts.com/demo/column-stacked-and-grouped
"""
from highcharts import Highchart
H = Highchart(width=750, height=600)
data1 = [5, 3, 4, 7, 2]
data2 = [3, 4, 4, 2, 5]
data3 = [2, 5, 6, 2, 1]
data4 = [3, 0, 4, 4, 3]
options = {
'title': {
'text': 'Total fruit consumtion, grouped by gender'
},
'xAxis': {
'categories': ['Apples', 'Oranges', 'Pears', 'Grapes', 'Bananas']
},
'yAxis': {
'allowDecimals': False,
'min': 0,
'title': {
'text': 'Number of fruits'
}
},
'tooltip': {
'formatter': "function () {\
return '<b>' + this.x + '</b><br/>' +\
this.series.name + ': ' + this.y + '<br/>' +\
'Total: ' + this.point.stackTotal;\
}"
},
'plotOptions': {
'column': {
'stacking': 'normal'
}
}
}
H.set_dict_options(options)
H.add_data_set(data1, 'column', 'John', stack='male' )
H.add_data_set(data2, 'column', 'Joe', stack='male')
H.add_data_set(data3, 'column', 'Jane', stack='female')
H.add_data_set(data4, 'column', 'Janet', stack='female')
H.htmlcontent |
plugins/headings/__init__.py | GiovanH/MarkdownEditing | 2,612 | 11111001 | from .common import *
from .goto import *
from .level import *
from .style import *
from .underlined import *
|
tests/util/test_get_control_variate_coef.py | xiangze/edward | 5,200 | 11111044 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from edward.util.tensorflow import get_control_variate_coef
class test_get_control_variate_coef(tf.test.TestCase):
def test_calculate_correct_coefficient(self):
with self.test_session():
f = tf.constant([1.0, 2.0, 3.0, 4.0])
h = tf.constant([2.0, 3.0, 8.0, 1.0])
self.assertAllClose(get_control_variate_coef(f, h).eval(),
0.03448276)
if __name__ == '__main__':
tf.test.main()
|
state_representation/client.py | anonymous-authors-2018/robotics-repo | 524 | 11111057 | """
Client to communicate with SRL server
"""
import os
import json
from enum import Enum
import zmq
HOSTNAME = 'localhost'
SERVER_PORT = 7777
class Command(Enum):
HELLO = 0
LEARN = 1
READY = 2
ERROR = 3
EXIT = 4
class SRLClient(object):
def __init__(self, data_folder, hostname='localhost', server_port=7777):
super(SRLClient, self).__init__()
self.hostname = hostname
self.server_port = server_port
context = zmq.Context()
self.socket = context.socket(zmq.PAIR)
self.socket.connect("tcp://{}:{}".format(hostname, server_port))
self.path_to_srl_server = None
self.data_folder = data_folder
def __del__(self):
self.socket.close()
def waitForServer(self):
print("Waiting for server...")
msg = self.socket.recv_json()
assert Command(msg['command']) == Command.HELLO
self.path_to_srl_server = msg.get('path')
self.socket.send_json({"command": Command.HELLO.value, 'data_folder': self.data_folder})
print("Connected to server")
def sendLearnCommand(self, state_dim, seed=1):
"""
:param state_dim: (int)
:param seed: (int)
"""
self.socket.send_json({"command": Command.LEARN.value, 'state_dim': state_dim, 'seed': seed})
def sendExitCommand(self):
self.socket.send_json({"command": Command.EXIT.value})
def receiveMessage(self):
"""
:return: (Command, dict)
"""
msg = self.socket.recv_json()
try:
# Convert to a command object
command = Command(msg.get('command'))
except ValueError:
raise ValueError("Unknown command: {}".format(msg))
return command, msg
def waitForSRLModel(self, state_dim):
"""
Wait until SRL is trained
:param state_dim: (int)
:return: (bool, str) (True if no error, path to learned model)
"""
self.sendLearnCommand(state_dim)
command, msg = self.receiveMessage()
if command == Command.ERROR:
print("An error occured during SRL")
return False, ""
elif command != Command.READY:
print("Unsupported command:{}".format(command))
return False, ""
else:
path_to_model = msg.get('path') + '/srl_model.pth'
return True, path_to_model
if __name__ == '__main__':
data_folder = 'test_server'
os.makedirs('srl_zoo/data/' + data_folder, exist_ok=True)
dataset_config = {'relative_pos': False}
with open("srl_zoo/data/{}/dataset_config.json".format(data_folder), "w") as f:
json.dump(dataset_config, f)
socket_client = SRLClient(data_folder)
socket_client.waitForServer()
try:
while True:
ok, path_to_model = socket_client.waitForSRLModel(state_dim=3)
print(path_to_model)
break
except KeyboardInterrupt:
pass
socket_client.sendExitCommand()
print("Client exiting...")
|
unicorn/tests/regress/mips_kernel_mmu.py | clayne/unicorn_pe | 491 | 11111071 | <filename>unicorn/tests/regress/mips_kernel_mmu.py
#!/usr/bin/python
from unicorn import *
from unicorn.mips_const import *
import regress
class MipsSyscall(regress.RegressTest):
def test(self):
addr = 0x80000000
code = '34213456'.decode('hex') # ori $at, $at, 0x3456
uc = Uc(UC_ARCH_MIPS, UC_MODE_MIPS32 + UC_MODE_BIG_ENDIAN)
uc.mem_map(addr, 0x1000)
uc.mem_write(addr, code)
uc.reg_write(UC_MIPS_REG_AT, 0)
uc.emu_start(addr, addr + len(code))
self.assertEqual(uc.reg_read(UC_MIPS_REG_AT), 0x3456)
if __name__ == '__main__':
regress.main()
|
src/programy/nlp/stemming.py | cdoebler1/AIML2 | 345 | 11111077 | """
Copyright (c) 2016-2020 <NAME> http://www.keithsterling.com
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import nltk
from nltk.stem.regexp import RegexpStemmer
from nltk.stem.lancaster import LancasterStemmer
from nltk.stem.isri import ISRIStemmer
from nltk.stem.porter import PorterStemmer
from nltk.stem.snowball import SnowballStemmer
from nltk.stem.rslp import RSLPStemmer
from nltk.stem.cistem import Cistem
class Stemmer:
@staticmethod
def download_additional():
nltk.download('rslp') # pragma: no cover
def __init__(self, stemmer="porter", **kwargs):
self._impl = self._get_stemmer(stemmer, **kwargs)
def stem(self, string):
if self._impl is not None:
return self._impl.stem(string)
return string
def _get_porter_stemmer(self):
return PorterStemmer()
def _get_lancaster_stemmer(self):
return LancasterStemmer()
def _get_regex_stemmer(self, regexp, minimum):
return RegexpStemmer(regexp=regexp, min=minimum)
def _get_iris_stemmer(self):
return ISRIStemmer()
def _get_snowball_stemmer(self, language):
return SnowballStemmer(language=language)
def _get_rslp_stemmer(self):
return RSLPStemmer()
def _get_cis_stemmer(self, case_insensitive):
return Cistem(case_insensitive=case_insensitive)
def _get_stemmer(self, stemmer="porter", **kwargs):
if stemmer == "porter":
return self._get_porter_stemmer()
elif stemmer == "lancaster":
return self._get_lancaster_stemmer()
elif stemmer == "regex":
regexp = kwargs['regexp']
if 'min' in kwargs:
minimum = kwargs['min']
else:
minimum = 0
return self._get_regex_stemmer(regexp=regexp, minimum=minimum)
elif stemmer == "isri":
return self._get_iris_stemmer()
elif stemmer == "snowball":
if 'language' in kwargs:
language = kwargs['language']
else:
language = 'english'
return self._get_snowball_stemmer(language=language)
elif stemmer == "rslp":
return self._get_rslp_stemmer()
elif stemmer == "cistem":
if 'case_insensitive' in kwargs:
case_insensitive = kwargs['case_insensitive']
else:
case_insensitive = False
return self._get_cis_stemmer(case_insensitive=case_insensitive)
else:
raise ValueError("Unknown stemmer [%s]"%stemmer)
|
nodes/0.9.x/python/Workset.Kind.py | jdehotin/Clockworkfordynamo | 147 | 11111079 | import clr
clr.AddReference('RevitAPI')
from Autodesk.Revit.DB import *
clr.AddReference("RevitServices")
import RevitServices
from RevitServices.Persistence import DocumentManager
doc = DocumentManager.Instance.CurrentDBDocument
worksets = UnwrapElement(IN[0])
elementlist = list()
for workset in worksets:
try:
elementlist.append(str(workset.Kind))
except:
elementlist.append(list())
OUT = elementlist |
api/sdlc-integration/run_scan.py | knassar702/community-scripts | 629 | 11111095 | # This script should be used to run the actual ZAP scan
import sys
import core.scan_module.scan as scan
scan.main(sys.argv[1:])
|
network/src/network/stream/visual_stream.py | dropitlikecross/looking-to-listen | 123 | 11111138 | <reponame>dropitlikecross/looking-to-listen
import chainer
import chainer.links as L
import chainer.functions as F
import env
class Visual_Stream(chainer.Chain):
# @chainer.static_graph
def __call__(self, visual):
b = F.leaky_relu(self.bn1(self.conv1(visual)))
b = F.leaky_relu(self.bn2(self.conv2(b)))
b = F.leaky_relu(self.bn3(self.conv3(b)))
b = F.leaky_relu(self.bn4(self.conv4(b)))
b = F.leaky_relu(self.bn5(self.conv5(b)))
b = F.leaky_relu(self.bn6(self.conv6(b))) # (b, 256, 75, 1)
b = F.resize_images(b, (env.AUDIO_LEN, 1)) # (b, 256, 301, 1)
return b
def __init__(self, trained=None):
super(Visual_Stream, self).__init__()
with self.init_scope():
initial = chainer.initializers.HeNormal()
self.conv1 = L.DilatedConvolution2D(
in_channels=env.VIS_CHANNNEL, out_channels=256,
stride=1, ksize=(7, 1), dilate=1, pad=(3, 0),
nobias=True, initialW=initial)
self.conv2 = L.DilatedConvolution2D(
in_channels=256, out_channels=256,
stride=1, ksize=(5, 1), dilate=1, pad=(2, 0),
nobias=True, initialW=initial)
self.conv3 = L.DilatedConvolution2D(
in_channels=256, out_channels=256,
stride=1, ksize=(5, 1), dilate=(2, 1), pad=(4, 0),
nobias=True, initialW=initial)
self.conv4 = L.DilatedConvolution2D(
in_channels=256, out_channels=256,
stride=1, ksize=(5, 1), dilate=(4, 1), pad=(8, 0),
nobias=True, initialW=initial)
self.conv5 = L.DilatedConvolution2D(
in_channels=256, out_channels=256,
stride=1, ksize=(5, 1), dilate=(8, 1), pad=(16, 0),
nobias=True, initialW=initial)
self.conv6 = L.DilatedConvolution2D(
in_channels=256, out_channels=256,
stride=1, ksize=(5, 1), dilate=(16, 1), pad=(32, 0),
nobias=True, initialW=initial)
self.bn1 = L.BatchNormalization(256)
self.bn2 = L.BatchNormalization(256)
self.bn3 = L.BatchNormalization(256)
self.bn4 = L.BatchNormalization(256)
self.bn5 = L.BatchNormalization(256)
self.bn6 = L.BatchNormalization(256)
if trained is not None:
self.conv1.W = trained["conv1"].W
self.conv2.W = trained["conv2"].W
self.conv3.W = trained["conv3"].W
self.conv4.W = trained["conv4"].W
self.conv5.W = trained["conv5"].W
self.conv6.W = trained["conv6"].W
self.bn1.gamma = trained["bn1"].gamma
self.bn1.beta = trained["bn1"].beta
self.bn1.avg_mean = trained["bn1"].avg_mean
self.bn1.avg_var = trained["bn1"].avg_var
self.bn2.gamma = trained["bn2"].gamma
self.bn2.beta = trained["bn2"].beta
self.bn2.avg_mean = trained["bn2"].avg_mean
self.bn2.avg_var = trained["bn2"].avg_var
self.bn3.gamma = trained["bn3"].gamma
self.bn3.beta = trained["bn3"].beta
self.bn3.avg_mean = trained["bn3"].avg_mean
self.bn3.avg_var = trained["bn3"].avg_var
self.bn4.gamma = trained["bn4"].gamma
self.bn4.beta = trained["bn4"].beta
self.bn4.avg_mean = trained["bn4"].avg_mean
self.bn4.avg_var = trained["bn4"].avg_var
self.bn5.gamma = trained["bn5"].gamma
self.bn5.beta = trained["bn5"].beta
self.bn5.avg_mean = trained["bn5"].avg_mean
self.bn5.avg_var = trained["bn5"].avg_var
self.bn6.gamma = trained["bn6"].gamma
self.bn6.beta = trained["bn6"].beta
self.bn6.avg_mean = trained["bn6"].avg_mean
self.bn6.avg_var = trained["bn6"].avg_var
|
examples/synthetic/syn_cnn_1/syn_cnn_1.py | hase1128/dragonfly | 675 | 11111156 | """
A synthetic function on CNNs.
-- <EMAIL>
"""
# pylint: disable=invalid-name
# Local
from dragonfly.nn.syn_nn_functions import cnn_syn_func1
def syn_cnn_1(x):
""" Computes the Branin function. """
return cnn_syn_func1(x[0])
# Write a function like this called 'obj'.
def objective(x):
""" Objective. """
return syn_cnn_1(x)
def main(x):
""" main function. """
return syn_cnn_1(x)
|
tests/r/test_wong.py | hajime9652/observations | 199 | 11111169 | <reponame>hajime9652/observations
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import shutil
import sys
import tempfile
from observations.r.wong import wong
def test_wong():
"""Test module wong.py by downloading
wong.csv and testing shape of
extracted data has 331 rows and 7 columns
"""
test_path = tempfile.mkdtemp()
x_train, metadata = wong(test_path)
try:
assert x_train.shape == (331, 7)
except:
shutil.rmtree(test_path)
raise()
|
lahja/exceptions.py | ethereum/lahja | 400 | 11111178 | class LahjaError(Exception):
"""
Base class for all lahja errors
"""
pass
class BindError(LahjaError):
"""
Raise when an attempt was made to bind an event that is already bound.
"""
class ConnectionAttemptRejected(LahjaError):
"""
Raised when an attempt was made to connect to an endpoint that is already connected.
"""
pass
class LifecycleError(LahjaError):
"""
Raised when attempting to violate the lifecycle of an endpoint such as
starting an already started endpoint or starting an endpoint that has
already stopped.
"""
pass
class NoSubscribers(LahjaError):
"""
Raised when attempting to send an event or make a request while there are no listeners for the
specific type of event or request.
This is a safety check, set ``require_subscriber`` of :class:`~lahja.base.BroadcastConfig`
to ``False`` to allow propagation without listeners.
"""
pass
class UnexpectedResponse(LahjaError):
"""
Raised when the type of a response did not match the ``expected_response_type``.
"""
pass
class RemoteDisconnected(LahjaError):
"""
Raise when a remote disconnects while we attempting to read a message.
"""
pass
|
integration_tests/samples/socket_mode/bolt_adapter/websocket_client.py | priya1puresoftware/python-slack-sdk | 2,486 | 11111208 | <reponame>priya1puresoftware/python-slack-sdk
import os
from time import time
from typing import Optional
from slack_sdk.socket_mode.request import SocketModeRequest
from slack_sdk.socket_mode.websocket_client import SocketModeClient
from slack_bolt import App
from .base_handler import BaseSocketModeHandler
from .internals import run_bolt_app, send_response
from slack_bolt.response import BoltResponse
class SocketModeHandler(BaseSocketModeHandler):
app: App # type: ignore
app_token: str
client: SocketModeClient
def __init__( # type: ignore
self,
app: App, # type: ignore
app_token: Optional[str] = None,
):
self.app = app
self.app_token = app_token or os.environ["SLACK_APP_TOKEN"]
self.client = SocketModeClient(app_token=self.app_token)
self.client.socket_mode_request_listeners.append(self.handle)
def handle(self, client: SocketModeClient, req: SocketModeRequest) -> None:
start = time()
bolt_resp: BoltResponse = run_bolt_app(self.app, req)
send_response(client, req, bolt_resp, start)
|
MLFeatureSelection/__init__.py | yutiansut/Feature-Selection | 596 | 11111279 | <reponame>yutiansut/Feature-Selection
# __init__.py
__version__ = "0.0.9.5.1"
__author__ = "<NAME>"
|
houdini/constants.py | Oblivion-Max/houdini | 444 | 11111294 | <filename>houdini/constants.py<gh_stars>100-1000
import enum
class StatusField(enum.IntEnum):
OpenedIglooViewer = 1
ActiveIglooLayoutOpenFlag = 2
PuffleTreasureInfographic = 512
PlayerOptInAbTestDayZero = 1024
PlayerSwapPuffle = 2048
MoreThanTenPufflesBackyardMessage = 4096
VisitBackyardFirstTime = 8192
HasWalkedPuffleFirstTime = 65536
HasWalkedPuffleSecondTime = 131072
class ConflictResolution(enum.Enum):
Silent = 0
Append = 1
Exception = 2
class Language(enum.IntEnum):
En = 1
Pt = 2
Fr = 4
Es = 8
De = 32
Ru = 64
class ClientType(enum.Enum):
Legacy = 'legacy'
Vanilla = 'vanilla'
|
optimus/helpers/debug.py | atwoodjw/Optimus | 1,045 | 11111301 | <reponame>atwoodjw/Optimus<filename>optimus/helpers/debug.py
import inspect
from inspect import getframeinfo, stack
def get_var_name(var):
"""
Get the var name from the var passed to a function
:param var:
:return:
"""
_locals = inspect.stack()[2][0].f_locals
for name in _locals:
if id(var) == id(_locals[name]):
return name
return None
def debug(value):
"""
Print a message with line and file name
:param value: string to be printed
:return:
"""
frame_info = getframeinfo((stack()[1][0]))
print("{}->{}:{}".format(value, frame_info.filename, frame_info.lineno, ))
|
examples/common_scenarios/sequence_without_tf_binding_sites.py | sukolsak/DnaChisel | 124 | 11111306 | from dnachisel import DnaOptimizationProblem, AvoidPattern, random_dna_sequence
from urllib import request
import pandas as pd
from io import StringIO
# DOWNLOAD THE LIST OF TF BINDING SITES
url = "http://regulondb.ccg.unam.mx/menu/download/datasets/files/BindingSiteSet.txt"
data = request.urlopen(url).read().decode("utf-8")
df = pd.read_csv(
StringIO(data), sep="\t", skiprows=(range(0, 46)), header=None
) # First 46 lines are description
# OBTAIN A LIST OF TF BINDING SEQUENCES
tf_column = df[13] # 14th column contains TF binding sites
tf_column.dropna(inplace=True)
tf_list = tf_column.to_list()
# According to the description, the binding sites are in uppercase, so we remove lowercase:
tf_binding_sequences = ["".join(ch for ch in tf if not ch.islower()) for tf in tf_list]
# DEFINE AND SOLVE THE OPTIMIZATION PROBLEM
problem = DnaOptimizationProblem(
sequence=random_dna_sequence(50000),
constraints=[AvoidPattern(pattern) for pattern in tf_binding_sequences],
)
problem.resolve_constraints()
problem.to_record("sequence_without_tf_binding_sites.gb")
|
docs/gallery/tutorials/pipeshard_parallelism.py | alpa-projects/alpa | 114 | 11111315 | <reponame>alpa-projects/alpa
"""
Distributed Training with Both Shard and Pipeline Parallelism
=============================================================
Alpa can automatically parallelizes jax functions with both shard
parallelism (a.k.a. intra-operator parallelism) and pipeline parallelism
(a.k.a. inter-operator parallelism). Shard parallelism includes
data parallelism, operator parallelism, and their combinations.
The :ref:`quick start <Alpa Quickstart>` focuses on using Alpa for shard parallelism.
In this tutorial, we show how to use Alpa with both shard and pipeline parallelism.
First, we show how to use Alpa to manually assign stages for pipeline parallelism.
Then we show how to use Alpa to automate this process.
"""
################################################################################
# Import Libraries and Initialize Environment
# -------------------------------------------
# We first import the required libraries.
import alpa
from alpa.testing import assert_allclose
import copy
from flax import linen as nn
from flax.training.train_state import TrainState
import jax
import jax.numpy as jnp
from jax import random
import optax
import ray
################################################################################
# Connect to a Ray Cluster
# -------------------------------------------
# Alpa uses a distributed framework `ray <https://docs.ray.io/>`_ to manage
# the cluster and disributed workers. We initialize ray and alpa.
ray.init()
alpa.init(cluster="ray")
# Alternatively, you can use the following command to connect to an existing
# ray cluster.
# ray.init(address="auto")
################################################################################
# Train an MLP on a Single Device
# -------------------------------
# In this tutorial, we use a toy dataset to train an MLP model.
# Specifically, we use the model to fit the function: :math:`y = Wx + b`.
# Note that now this model is being executed on CPU because we force the driver
# process to use the CPU.
class MLPModel(nn.Module):
hidden_dim: int
@nn.compact
def __call__(self, x):
x = nn.Dense(features=self.hidden_dim * 4)(x)
x = nn.relu(x)
x = nn.Dense(features=self.hidden_dim)(x)
x = nn.relu(x)
x = nn.Dense(features=self.hidden_dim * 4)(x)
x = nn.relu(x)
x = nn.Dense(features=self.hidden_dim)(x)
x = nn.relu(x)
return x
dim = 2048
batch_size = 2048
# Generate ground truth W and b
rngkey = jax.random.PRNGKey(0)
k1, k2 = random.split(rngkey)
W = random.normal(k1, (dim, dim), jnp.float32)
b = random.normal(k2, (dim,), jnp.float32)
# Generate the training data
ksample, knoise = random.split(k1)
x = random.normal(ksample, (batch_size, dim), jnp.float32)
y = (x @ W + b) + 0.1 * random.normal(knoise, (batch_size, dim), jnp.float32)
# Initialize a train state, which includes the model paramter and optimizer
# state.
model = MLPModel(hidden_dim=dim)
params = model.init(rngkey, x)
tx = optax.adam(learning_rate=1e-3)
state = TrainState.create(apply_fn=model.apply, params=params, tx=tx)
# Define training step
def train_step(state, batch):
def loss_func(params):
out = model.apply(params, batch["x"])
loss = jnp.mean((out - batch["y"])**2)
return loss
grads = jax.grad(loss_func)(state.params)
new_state = state.apply_gradients(grads=grads)
return new_state
batch = {"x": x, "y": y}
expected_state = train_step(state, batch)
################################################################################
# Pipeline Parallelism with Manual Assignment
# -------------------------------------------
# To manually assign stages for pipeline parallelism, we can use the
# ``alpa.mark_pipeline`` function to mark the start and end of each pipeline
# stage, and use the ``@alpa.manual_layer_construction`` decorator to indicate
# that we are manually assigning stages. Note that each the pipeline stage is
# also automatically parallelized by the shard parallel pass.
# Define the manually parallelized model with pipeline markers.
class ManualPipelineMLPModel(nn.Module):
hidden_dim: int
@nn.compact
def __call__(self, x):
x = nn.Dense(features=self.hidden_dim * 4)(x)
x = nn.relu(x)
x = nn.Dense(features=self.hidden_dim)(x)
x = nn.relu(x)
# Mark the end of the 0th pipeline stage and the start of the 1st
# pipeline stage. the start marker of the 0th stage and the end
# marker of the 1st stage are marked in the train_step below.
alpa.mark_pipeline(name='0', mark_type='end')
alpa.mark_pipeline(name='1', mark_type='start')
x = nn.Dense(features=self.hidden_dim * 4)(x)
x = nn.relu(x)
x = nn.Dense(features=self.hidden_dim)(x)
x = nn.relu(x)
return x
# Initialize the train state with the same parameters as the single-device
# model.
manual_pipeline_model = ManualPipelineMLPModel(hidden_dim=dim)
manual_pipeline_state = TrainState.create(apply_fn=manual_pipeline_model.apply,
params=copy.deepcopy(params), tx=tx)
# Define the training step with manually parallelized pipeline stages.
# We use the "alpa.PipeshardParallel" option to let alpa use both
# pipeline parallelism and shard parallelism.
@alpa.parallelize(method=alpa.PipeshardParallel(num_micro_batches=16))
def manual_pipeline_train_step(state, batch):
# Indicate that we are manually assigning pipeline stages.
@alpa.manual_layer_construction
def loss_func(params):
# Mark the start of the 0th pipeline stage.
alpa.mark_pipeline(name='0', mark_type='start')
out = state.apply_fn(params, batch["x"])
loss = jnp.mean((out - batch["y"])**2)
# Mark the end of the 1st pipeline stage.
alpa.mark_pipeline(name='1', mark_type='end')
return loss
# We use `alpa.grad` here to seperate the apply gradient stage with the
# forward/backward stages in the pipeline. This is necessary to ensure that
# the gradient accumulation is correct.
grads = alpa.grad(loss_func)(state.params)
new_state = state.apply_gradients(grads=grads)
return new_state
manual_pipeline_actual_state = manual_pipeline_train_step(manual_pipeline_state,
batch)
assert_allclose(expected_state.params, manual_pipeline_actual_state.params,
atol=5e-3)
alpa.shutdown()
################################################################################
# Pipeline Parallelism with Automatic Assignment
# ----------------------------------------------
# Alpa also supports automatically partitioning the model into multiple
# pipeline stages and assign each pipeline stage a device mesh such that
# the total execution latency is minimized. Specifically, the automatic
# partitioning algorithm consists of the following steps:
#
# 1. **Layer Construction:** In this step, the operators in the model are
# clustered into "layers" based on a graph clustering algorithm. The
# user needs to specify the total number of layers (i.e. clusters) as
# a hyperparameter.
# 2. **Stage Construction and Mesh Slicing:** In this step, we partition
# the device cluster (device mesh) to multiple submeshes and assign
# layers to submeshes to form pipeline stages to minimize the total
# pipeline execution latency.
alpa.init(cluster="ray")
# Define training step with automatic pipeline-operator parallelism. Note that
# we reuse the same model and state as the single device case. The only
# modification required is the two decorators. The stage construction and
# mesh slicing are performed within the `parallelize` decorator.
@alpa.parallelize(method=alpa.PipeshardParallel(num_micro_batches=16, stage_mode="auto"))
def auto_pipeline_train_step(state, batch):
# Indicate that we use automatic layer construction. The `layer_num` here
# is a hyperparameter to control how many layers we get from the
# layer construction algorithm.
@alpa.automatic_layer_construction(layer_num=2)
def loss_func(params):
out = state.apply_fn(params, batch["x"])
loss = jnp.mean((out - batch["y"])**2)
return loss
# Again, we use `alpa.grad` here to seperate the apply gradient stage with
# the forward/backward stages in the pipeline.
grads = alpa.grad(loss_func)(state.params)
new_state = state.apply_gradients(grads=grads)
return new_state
# In the first call, alpa triggers the compilation.
# The compilation first profiles several costs and solves an optimization
# problem to get the optimal pipeline assignments.
auto_pipeline_actual_state = auto_pipeline_train_step(state, batch)
assert_allclose(expected_state.params, auto_pipeline_actual_state.params,
atol=5e-3)
alpa.shutdown()
|
examples/AHEG.py | SignorMercurio/petlib | 112 | 11111344 | <reponame>SignorMercurio/petlib
## An implementation of an additivelly homomorphic
## ECC El-Gamal scheme, used in Privex.
from petlib.ec import EcGroup
import pytest
def params_gen(nid=713):
"""Generates the AHEG for an EC group nid"""
G = EcGroup(nid)
g = G.generator()
o = G.order()
return (G, g, o)
def key_gen(params):
"""Generates a fresh key pair"""
_, g, o = params
priv = o.random()
pub = priv * g
return (pub, priv)
def enc(params, pub, counter):
"""Encrypts the values of a small counter"""
assert -2**8 < counter < 2**8
G, g, o = params
k = o.random()
a = k * g
b = k * pub + counter * g
return (a, b)
def enc_side(params, pub, counter):
"""Encrypts the values of a small counter"""
assert -2**8 < counter < 2**8
G, g, o = params
k = o.random()
a = k * g
b = k * pub + counter * g
return (a, b, k)
def add(c1, c2):
"""Add two encrypted counters"""
a1, b1 = c1
a2, b2 = c2
return (a1 + a2, b1 + b2)
def mul(c1, val):
"""Multiplies an encrypted counter by a public value"""
a1, b1 = c1
return (val*a1, val*b1)
def randomize(params, pub, c1):
"""Rerandomize an encrypted counter"""
zero = enc(params, pub, 0)
return add(c1, zero)
def make_table(params):
"""Make a decryption table"""
_, g, o = params
table = {}
for i in range(-1000, 1000):
table[i * g] = i
return table
def dec(params, table, priv, c1):
"""Decrypt an encrypted counter"""
_, g, o = params
a, b = c1
plain = b + (-priv * a)
return table[plain]
def test_AHEG():
params = params_gen()
(pub, priv) = key_gen(params)
table = make_table(params)
# Check encryption and decryption
one = enc(params, pub, 1)
assert dec(params, table, priv, one) == 1
# Check addition
tmp = add(one, one)
two = randomize(params, pub, tmp)
assert dec(params, table, priv, two) == 2
# Check multiplication
tmp1 = mul(two, 2)
four = randomize(params, pub, tmp1)
assert dec(params, table, priv, four) == 4
|
src/genie/libs/parser/iosxe/tests/ShowBridgeDomain/cli/equal/golden_output_4_expected.py | balmasea/genieparser | 204 | 11111346 | <reponame>balmasea/genieparser<filename>src/genie/libs/parser/iosxe/tests/ShowBridgeDomain/cli/equal/golden_output_4_expected.py
expected_output = {
"bridge_domain": {
4050: {
"aging_timer": 3600,
"bd_domain_id": 4050,
"mac_learning_state": "Enabled",
"number_of_ports_in_all": 0,
"state": "UP",
}
}
}
|
h2o-py/tests/testdir_algos/stackedensemble/pyunit_stackedensemble_gaussian.py | ahmedengu/h2o-3 | 6,098 | 11111362 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
from __future__ import print_function
import h2o
import sys
sys.path.insert(1,"../../../") # allow us to run this standalone
from h2o.estimators.random_forest import H2ORandomForestEstimator
from h2o.estimators.gbm import H2OGradientBoostingEstimator
from h2o.estimators.stackedensemble import H2OStackedEnsembleEstimator
from tests import pyunit_utils as pu
from tests.pyunit_utils import assert_warn
seed = 1
def prepare_data(blending=False):
col_types = ["numeric", "numeric", "numeric", "enum", "enum", "numeric", "numeric", "numeric", "numeric"]
dat = h2o.upload_file(path=pu.locate("smalldata/extdata/prostate.csv"),
destination_frame="prostate_hex",
col_types=col_types)
train, test = dat.split_frame(ratios=[.8], seed=1)
x = ["CAPSULE", "GLEASON", "RACE", "DPROS", "DCAPS", "PSA", "VOL"]
y = "AGE"
ds = pu.ns(x=x, y=y, train=train, test=test)
if blending:
train, blend = train.split_frame(ratios=[.7], seed=seed)
return ds.extend(train=train, blend=blend)
else:
return ds
def train_base_models(dataset, **kwargs):
model_args = kwargs if hasattr(dataset, 'blend') else dict(nfolds=3, fold_assignment="Modulo", keep_cross_validation_predictions=True, **kwargs)
gbm = H2OGradientBoostingEstimator(distribution="gaussian",
ntrees=10,
max_depth=3,
min_rows=2,
learn_rate=0.2,
seed=seed,
**model_args)
gbm.train(x=dataset.x, y=dataset.y, training_frame=dataset.train)
rf = H2ORandomForestEstimator(ntrees=10,
seed=seed,
**model_args)
rf.train(x=dataset.x, y=dataset.y, training_frame=dataset.train)
xrf = H2ORandomForestEstimator(ntrees=20,
histogram_type="Random",
seed=seed,
**model_args)
xrf.train(x=dataset.x, y=dataset.y, training_frame=dataset.train)
return [gbm, rf, xrf]
def train_stacked_ensemble(dataset, base_models, **kwargs):
se = H2OStackedEnsembleEstimator(base_models=base_models, seed=seed)
se.train(x=dataset.x, y=dataset.y,
training_frame=dataset.train,
blending_frame=dataset.blend if hasattr(dataset, 'blend') else None,
**kwargs)
return se
def test_suite_stackedensemble_gaussian(blending=False):
def test_predict_on_se_model():
ds = prepare_data(blending)
models = train_base_models(ds)
se = train_stacked_ensemble(ds, models)
for i in range(2): # repeat predict to verify consistency
pred = se.predict(test_data=ds.test)
assert pred.nrow == ds.test.nrow, "expected " + str(pred.nrow) + " to be equal to " + str(ds.test.nrow)
assert pred.ncol == 1, "expected " + str(pred.ncol) + " to be equal to 1 but it was equal to " + str(pred.ncol)
def test_se_performance_is_better_than_individual_models():
ds = prepare_data(blending)
base_models = train_base_models(ds)
def compute_perf(model):
perf = pu.ns(
train=model.model_performance(train=True),
test=model.model_performance(test_data=ds.test)
)
print("{} training performance: ".format(model.model_id))
print(perf.train)
print("{} test performance: ".format(model.model_id))
print(perf.test)
return perf
base_perfs = {}
for model in base_models:
base_perfs[model.model_id] = compute_perf(model)
se = train_stacked_ensemble(ds, base_models)
perf_se = compute_perf(se)
# Check that stack perf is better (smaller) than the best (smaller) base learner perf:
# Training RMSE for each base learner
baselearner_best_rmse_train = min([perf.train.rmse() for perf in base_perfs.values()])
stack_rmse_train = perf_se.train.rmse()
print("Best Base-learner Training RMSE: {}".format(baselearner_best_rmse_train))
print("Ensemble Training RMSE: {}".format(stack_rmse_train))
assert_warn(stack_rmse_train < baselearner_best_rmse_train,
"expected SE training RMSE would be smaller than the best of base learner training RMSE, but obtained: " \
"RMSE (SE) = {}, RMSE (best base learner) = {}".format(stack_rmse_train, baselearner_best_rmse_train))
# Test RMSE for each base learner
baselearner_best_rmse_test = min([perf.test.rmse() for perf in base_perfs.values()])
stack_rmse_test = perf_se.test.rmse()
print("Best Base-learner Test RMSE: {}".format(baselearner_best_rmse_test))
print("Ensemble Test RMSE: {}".format(stack_rmse_test))
assert_warn(stack_rmse_test < baselearner_best_rmse_test,
"expected SE test RMSE would be smaller than the best of base learner test RMSE, but obtained: " \
"RMSE (SE) = {}, RMSE (best base learner) = {}".format(stack_rmse_test, baselearner_best_rmse_test))
def test_validation_frame_produces_same_metric_as_perf_test():
ds = prepare_data(blending)
models = train_base_models(ds)
se = train_stacked_ensemble(ds, models, validation_frame=ds.test)
se_perf = se.model_performance(test_data=ds.test)
se_perf_validation_frame = se.model_performance(valid=True)
# since the metrics object is not exactly the same, we can just test that RSME is the same
assert se_perf.rmse() == se_perf_validation_frame.rmse(), \
"expected SE test RMSE to be the same as SE validation frame RMSE, but obtained: " \
"RMSE (perf on test) = {}, RMSE (test passed as validation frame) = {}".format(se_perf.rmse(), se_perf_validation_frame.rmse())
return [pu.tag_test(test, 'blending' if blending else None) for test in [
test_predict_on_se_model,
test_se_performance_is_better_than_individual_models,
test_validation_frame_produces_same_metric_as_perf_test
]]
pu.run_tests([
test_suite_stackedensemble_gaussian(),
test_suite_stackedensemble_gaussian(blending=True)
])
|
dash/misc/misc_data_app.py | jingmouren/QuantResearch | 623 | 11111364 | <reponame>jingmouren/QuantResearch
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import pandas as pd
import numpy as np
import scipy, scipy.stats
from datetime import date, datetime, timedelta
from dateutil.relativedelta import relativedelta
from sklearn import linear_model
import plotly.graph_objs as go
import plotly.figure_factory as ff
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output, State
import dash_table as dt
import dash_table.FormatTemplate as FormatTemplate
import ta
import data_loader
from time_tools import convert_date_input
from app import app
app.config.suppress_callback_exceptions = True
app.scripts.config.serve_locally = True
# -------------------------------------------------------- data preparation ---------------------------------------- $
misc_data_dict = data_loader.load_misc()
# -------------------------------------------------------- help functions ---------------------------------------- $
# -------------------------------------------------------- define layout ---------------------------------------- $
layout = html.Div([
html.Div([
html.H2("misc data")
], className='banner'),
html.Div(id='cached-data-market-misc-data', style={'display': 'none'}),
html.Div([
html.Div([
dcc.Dropdown(
id="data-item-selection-market-misc-data",
options=[
{'label': md, 'value': md} for md in sorted(misc_data_dict.keys())
],
value='USDT',
),
], className='two columns wind-polar'),
html.Div([
dcc.Dropdown(
id="is-cross-sectional-market-misc-data",
options=[
{'label': md, 'value': md} for md in ['Time-Series', 'Cross-Sectional']
],
value='Time-Series',
),
], className='two columns wind-polar'),
], className='twelve columns row wind-speed-row'),
html.Div([
dt.DataTable(
style_table={'overflowX': 'scroll'},
style_cell={
'minWidth': '0px', 'maxWidth': '100px',
'overflow': 'hidden',
'textOverflow': 'ellipsis',
},
data=misc_data_dict['USDT'].to_dict('records'),
columns=[{"name": i, "id": i, "deletable": False} for i in misc_data_dict['USDT'].columns],
editable=False,
row_deletable=False,
filter_action="native",
sort_action="native",
sort_mode='multi',
row_selectable='single', # multi
selected_rows=[],
page_action='native',
page_current=0,
page_size=15,
id='overview-table-market-misc-data'
)
], className='twelve columns row wind-speed-row'),
html.Div([
html.Div([
dcc.Input(id='cross-section-selection-1-market-misc-data', placeholder='(yyyy-mm-dd) or 5Y',
type='text', value='')
], className='two columns wind-polar'),
html.Div([
dcc.Input(id='cross-section-selection-2-market-misc-data', placeholder='(yyyy-mm-dd) or 5Y',
type='text', value='')
], className='two columns wind-polar'),
html.Div([
dcc.Input(id='cross-section-selection-3-market-misc-data', placeholder='(yyyy-mm-dd) or 5Y',
type='text', value='')
], className='two columns wind-polar'),
html.Div([
dcc.Input(id='cross-section-selection-4-market-misc-data', placeholder='(yyyy-mm-dd) or 5Y',
type='text', value='')
], className='two columns wind-polar'),
html.Div([
dcc.Input(id='cross-section-selection-5-market-misc-data', placeholder='(yyyy-mm-dd) or 5Y',
type='text', value='')
], className='two columns wind-polar'),
html.Div([
html.Button('Update Graph', id='update-button-market-misc-data')
], className='one columns wind-polar'),
], className='twelve columns row wind-speed-row'),
html.Div([dcc.Graph(id='historical-time-series-market-misc-data')], className='twelve columns row wind-speed-row')
])
# -------------------------------------------------------- define event handler -------------------------------------- $
@app.callback(
[Output("overview-table-market-misc-data", "data"), Output('overview-table-market-misc-data', 'columns')],
[Input('data-item-selection-market-misc-data', 'value')]
)
def update_datatable_market_misc_data(item_selected):
df = misc_data_dict[item_selected].copy()
df.insert(0, column='Date', value=df.index)
return df.to_dict('records'), [{"name": i, "id": i, "deletable": False} for i in df.columns]
@app.callback(
Output('historical-time-series-market-misc-data', 'figure'),
[Input('data-item-selection-market-misc-data', 'value'),
Input('update-button-market-misc-data', 'n_clicks')],
[State('is-cross-sectional-market-misc-data', 'value'),
State('cross-section-selection-1-market-misc-data', 'value'),
State('cross-section-selection-2-market-misc-data', 'value'),
State('cross-section-selection-3-market-misc-data', 'value'),
State('cross-section-selection-4-market-misc-data', 'value'),
State('cross-section-selection-5-market-misc-data', 'value')]
)
def update_historical_data_plot_markete_misc_data(item_selected, n_clicks, is_cross_sectional, ione, itwo, ithree, ifour, ifive):
print(is_cross_sectional)
if is_cross_sectional == 'Time-Series':
try:
return plot_time_series_market_misc_data(item_selected, ione)
except:
return None
else:
try:
return plot_cross_sectional_market_misc_data(item_selected, ione, itwo, ithree, ifour, ifive)
except:
return None
def plot_time_series_market_misc_data(item_selected, lookback_window):
lookback_date = convert_date_input(lookback_window, datetime(2008, 1, 1))
df_raw = misc_data_dict[item_selected][lookback_date.date():]
if item_selected in ['USDT']:
df = df_raw
elif item_selected in ['PCR:VIX', 'PCR:SPX', 'PCR:SPY']:
df = pd.concat([df_raw['PV']/df_raw['CV'], df_raw['POI']/df_raw['COI']], axis=1)
df.columns = ['PCR:V', 'PCR:OI']
elif 'COT:' in item_selected:
if item_selected not in ['COT:ES', 'COT:NQ', 'COT:UX']:
df = pd.concat([df_raw['Open Interest:F'],
df_raw['Producer/Merchant/Processor/User:Long:F'] - df_raw['Producer/Merchant/Processor/User:Short:F'],
df_raw['Swap Dealers:Long:F'] - df_raw['Swap Dealers:Short:F'],
df_raw['Managed Money:Long:F'] - df_raw['Managed Money:Short:F'],
df_raw['Other Reportables:Long:F'] - df_raw['Other Reportables:Short:F']], axis=1)
df.columns = ['Open Interest', 'Producers', 'Swap Dealers', 'Managed Money', 'Other Report']
df['Commercial'] = df['Producers'] + df['Swap Dealers']
df['Large Spec'] = df['Managed Money'] + df['Other Report']
df['Small Spec'] = 0.0 - df['Commercial'] - df['Large Spec']
else:
df = pd.concat([df_raw['Open Interest:F'],
df_raw['Dealer Intermediary:Long:F'] - df_raw['Dealer Intermediary:Short:F'],
df_raw['Asset Manager/Institutional:Long:F'] - df_raw['Asset Manager/Institutional:Short:F'],
df_raw['Leveraged Funds:Long:F'] - df_raw['Leveraged Funds:Short:F'],
df_raw['Other Reportables:Long:F'] - df_raw['Other Reportables:Short:F'],
df_raw['Nonreportable Positions:Long:F'] - df_raw['Nonreportable Positions:Short:F'],], axis=1)
df.columns = ['Open Interest', 'Dealer Intermediary', 'Asset Manager', 'Leveraged Funds', 'Other Reportables','Nonreportable Positions']
# sym_root = item_selected.split(':')[1]
# hist_price = generic_futures_hist_prices_dict[sym_root][lookback_date.date():].iloc[:,0]
else:
return None
traces = [go.Scatter(x=df.index,
y=df[col],
mode='lines',
name=col)
for col in df.columns]
layout_fig = go.Layout(
title=item_selected,
xaxis=dict(title=item_selected,
rangeslider=dict(
visible=False
),
type='date'),
yaxis=dict(title='Value'),
legend=dict(orientation="h"),
height=800, margin=dict(l=0, r=0, t=0, b=0),
paper_bgcolor='rgba(0,0,0,0)',
plot_bgcolor='rgba(0,0,0,0)'
)
return go.Figure(data=traces, layout=layout_fig)
def plot_cross_sectional_market_misc_data(item_selected, ione, itwo, ithree, ifour, ifive):
df_raw = misc_data_dict[item_selected]
if item_selected in ['USDT']:
df = df_raw
elif item_selected in ['PCR:VIX', 'PCR:SPX', 'PCR:SPY']:
df = pd.concat([df_raw['PV'] / df_raw['CV'], df_raw['POI'] / df_raw['COI']], axis=1)
df.columns = ['PCR:V', 'PCR:OI']
elif 'COT:' in item_selected:
if item_selected not in ['COT:ES', 'COT:NQ', 'COT:UX']:
df = pd.concat([df_raw['Open Interest:F'],
df_raw['Producer/Merchant/Processor/User:Long:F'] - df_raw[
'Producer/Merchant/Processor/User:Short:F'],
df_raw['Swap Dealers:Long:F'] - df_raw['Swap Dealers:Short:F'],
df_raw['Managed Money:Long:F'] - df_raw['Managed Money:Short:F'],
df_raw['Other Reportables:Long:F'] - df_raw['Other Reportables:Short:F']], axis=1)
df.columns = ['Open Interest', 'Producers', 'Swap Dealers', 'Managed Money', 'Other Report']
df['Commercial'] = df['Producers'] + df['Swap Dealers']
df['Large Spec'] = df['Managed Money'] + df['Other Report']
df['Small Spec'] = 0.0 - df['Commercial'] - df['Large Spec']
else:
df = pd.concat([df_raw['Open Interest:F'],
df_raw['Dealer Intermediary:Long:F'] - df_raw['Dealer Intermediary:Short:F'],
df_raw['Asset Manager/Institutional:Long:F'] - df_raw[
'Asset Manager/Institutional:Short:F'],
df_raw['Leveraged Funds:Long:F'] - df_raw['Leveraged Funds:Short:F'],
df_raw['Other Reportables:Long:F'] - df_raw['Other Reportables:Short:F'],
df_raw['Nonreportable Positions:Long:F'] - df_raw['Nonreportable Positions:Short:F'], ],
axis=1)
df.columns = ['Open Interest', 'Dealer Intermediary', 'Asset Manager', 'Leveraged Funds',
'Other Reportables', 'Nonreportable Positions']
else:
return None
asofdate = df.index[-1]
s0 = df.loc[asofdate]
s = s0.to_frame()
if (ione is not None) and (not not ione):
t1 = convert_date_input(ione, datetime.today())
t1 = t1.date()
dateidx1 = df.index.searchsorted(t1) # first one greater than or equal to
s1 = df.iloc[dateidx1]
s = pd.concat([s, s1], axis=1)
if (itwo is not None) and (not not itwo):
t2 = convert_date_input(itwo, datetime.today())
t2 = t2.date()
dateidx2 = df.index.searchsorted(t2) # first one greater than or equal to
s2 = df.iloc[dateidx2]
s = pd.concat([s, s2], axis=1)
if (ithree is not None) and (not not ithree):
t3 = convert_date_input(ithree, datetime.today())
t3 = t3.date()
dateidx3 = df.index.searchsorted(t3) # first one greater than or equal to
s3 = df.iloc[dateidx3]
s = pd.concat([s, s3], axis=1)
if (ifour is not None) and (not not ifour):
t4 = convert_date_input(ifour, datetime.today())
t4 = t4.date()
dateidx4 = df.index.searchsorted(t4) # first one greater than or equal to
s4 = df.iloc[dateidx4]
s = pd.concat([s, s4], axis=1)
if (ifive is not None) and (not not ifive):
t5 = convert_date_input(ifive, datetime.today())
t5 = t5.date()
dateidx5 = df.index.searchsorted(t5) # first one greater than or equal to
s5 = df.iloc[dateidx5]
s = pd.concat([s, s5], axis=1)
traces = [go.Scatter(x=s.index, y=s[c], name=c.strftime('%Y-%m-%d'), mode='lines+markers',
hovertext=s.index) for c in s.columns]
layout_fig = go.Layout(title=item_selected, xaxis={'title': item_selected}, yaxis={'title': 'Value'},
legend=dict(orientation="h"),
paper_bgcolor='rgba(0,0,0,0)',
plot_bgcolor='rgba(0,0,0,0)'
)
# plotly.offline.plot({'data': traces, 'layout': layout})
return go.Figure(data=traces, layout=layout_fig)
|
app/grandchallenge/uploads/views.py | kaczmarj/grand-challenge.org | 101 | 11111387 | import logging
from django.http import Http404
from rest_framework import mixins
from rest_framework.decorators import action
from rest_framework.permissions import DjangoObjectPermissions
from rest_framework.response import Response
from rest_framework.status import HTTP_400_BAD_REQUEST
from rest_framework.viewsets import GenericViewSet
from rest_framework_guardian.filters import ObjectPermissionsFilter
from grandchallenge.uploads.models import UserUpload
from grandchallenge.uploads.serializers import (
UserUploadCompleteSerializer,
UserUploadCreateSerializer,
UserUploadPartsSerializer,
UserUploadPresignedURLsSerializer,
UserUploadSerializer,
)
logger = logging.getLogger(__name__)
class UserUploadViewSet(
mixins.CreateModelMixin,
mixins.RetrieveModelMixin,
mixins.ListModelMixin,
GenericViewSet,
):
queryset = UserUpload.objects.all()
permission_classes = (DjangoObjectPermissions,)
filter_backends = (ObjectPermissionsFilter,)
def get_serializer_class(self):
if self.serializer_class is None:
if self.action == "create":
return UserUploadCreateSerializer
else:
return UserUploadSerializer
else:
return self.serializer_class
@action(
detail=True,
methods=["get"],
serializer_class=UserUploadPartsSerializer,
url_path="(?P<s3_upload_id>[^/]+)/list-parts",
)
def list_parts(self, request, pk, s3_upload_id):
object = self.get_object()
if object.s3_upload_id != s3_upload_id:
logger.warning(
f"Upload ID did not match: {object=}, {s3_upload_id=}"
)
raise Http404
serializer = self.get_serializer(instance=object)
return Response(data=serializer.data)
@action(
detail=True,
methods=["patch"],
serializer_class=UserUploadPresignedURLsSerializer,
url_path="(?P<s3_upload_id>[^/]+)/generate-presigned-urls",
)
def generate_presigned_urls(self, request, pk, s3_upload_id):
object = self.get_object()
if object.s3_upload_id != s3_upload_id:
logger.warning(
f"Upload ID did not match: {object=}, {s3_upload_id=}"
)
raise Http404
if not object.can_upload_more:
self.permission_denied(request, message="Upload limit reached")
serializer = self.get_serializer(
instance=object, data=request.data, partial=True
)
if serializer.is_valid():
return Response(data=serializer.data)
else:
return Response(
data=serializer.errors, status=HTTP_400_BAD_REQUEST
)
@action(
detail=True,
methods=["patch"],
serializer_class=UserUploadCompleteSerializer,
url_path="(?P<s3_upload_id>[^/]+)/complete-multipart-upload",
)
def complete_multipart_upload(self, request, pk, s3_upload_id):
object = self.get_object()
if object.s3_upload_id != s3_upload_id:
logger.warning(
f"Upload ID did not match: {object=}, {s3_upload_id=}"
)
raise Http404
serializer = self.get_serializer(
instance=object, data=request.data, partial=True
)
if serializer.is_valid():
serializer.save()
return Response(data=serializer.data)
else:
return Response(
data=serializer.errors, status=HTTP_400_BAD_REQUEST
)
@action(
detail=True,
methods=["patch"],
url_path="(?P<s3_upload_id>[^/]+)/abort-multipart-upload",
)
def abort_multipart_upload(self, request, pk, s3_upload_id):
object = self.get_object()
if object.s3_upload_id != s3_upload_id:
logger.warning(
f"Upload ID did not match: {object=}, {s3_upload_id=}"
)
raise Http404
object.abort_multipart_upload()
object.save()
serializer = self.get_serializer(instance=object)
return Response(serializer.data)
|
python3/monitor_console.py | charlesnchr/jupyter-vim | 358 | 11111445 | <filename>python3/monitor_console.py
"""
Feature to get a buffer with jupyter output
"""
# Standard
import asyncio
from queue import Queue
# Local
from jupyter_util import echom, unquote_string, str_to_vim, get_vim
# Process local
import vim
class Monitor():
"""Jupyter kernel monitor buffer and message line"""
def __init__(self, kernel_client):
self.kernel_client = kernel_client
self.cmd = None
self.cmd_id = None
self.cmd_count = 0
self.line_queue = Queue()
## Open the Jupyter terminal in vim, and move cursor to it
if -1 == vim.eval('jupyter#monitor_console#OpenJupyterMonitor()'):
echom('__jupyter_monitor__ failed to open!', 'Error')
return
# Launch timer that will update the buffer
timer_interval = get_vim('g:jupyter_timer_interval', 500)
vim.command(f'call timer_start({timer_interval}, "jupyter#monitor_console#UpdateConsoleBuffer")')
for channel in ['shell', 'iopub', 'control']:
asyncio.run_coroutine_threadsafe(self.monitor(channel), kernel_client.loop)
async def monitor(self, channel):
"""Start monitoring a channel.
Parameters
----------
channel : 'shell' | 'iopub' | 'control'
The channel to monitor.
"""
while not self.kernel_client.loop.is_closed():
msg = await self.kernel_client.get_next_msg(channel)
self.line_queue.put(f'[{channel}] {msg["header"]["msg_type"]}: {msg["content"]}')
def timer_write_msgs(self):
"""Write kernel <-> vim messages to monitor buffer"""
timer_interval = get_vim('g:jupyter_timer_interval', 500)
vim.command(f'call timer_start({timer_interval}, "jupyter#monitor_console#UpdateConsoleBuffer")')
# Check in
if self.line_queue.empty():
return
# Get buffer (same indexes as vim)
b_nb = int(vim.eval('bufnr("__jupyter_monitor__")'))
buf = vim.buffers[b_nb]
cur_win = vim.eval('win_getid()')
term_win = vim.eval('bufwinid({})'.format(str(b_nb)))
vim.command('call win_gotoid({})'.format(term_win))
vim.command('set modifiable')
# Append mesage to jupyter terminal buffer
while not self.line_queue.empty():
msg = self.line_queue.get_nowait()
for line in msg.splitlines():
line = unquote_string(str_to_vim(line))
buf.append(line)
vim.command('normal! G')
vim.command('set nomodifiable')
vim.command('call win_gotoid({})'.format(cur_win))
|
shadowproxy/proxies/socks/client.py | xiaoshihu/shadowproxy | 180 | 11111470 | <reponame>xiaoshihu/shadowproxy
import random
from curio import socket
from ...protocols import socks4, socks5
from ...utils import run_parser_curio, set_disposable_recv
from ..base.client import ClientBase
class SocksClient(ClientBase):
proto = "SOCKS"
async def init(self):
auth = getattr(self.ns, "auth", None)
client_parser = socks5.client.parser(auth, self.target_addr)
await run_parser_curio(client_parser, self.sock)
redundant = client_parser.readall()
set_disposable_recv(self.sock, redundant)
class Socks4Client(ClientBase):
proto = "SOCKS4"
async def init(self):
info = await socket.getaddrinfo(
*self.target_addr, socket.AF_INET, socket.SOCK_STREAM, socket.IPPROTO_TCP
)
addr = random.choice(info)[-1]
socks4_client_parser = socks4.client.parser(addr)
await run_parser_curio(socks4_client_parser, self.sock)
redundant = socks4_client_parser.readall()
set_disposable_recv(self.sock, redundant)
|
examples/contouring.py | JNDanielson/mplstereonet | 120 | 11111562 | <filename>examples/contouring.py<gh_stars>100-1000
"""A basic example of producing a density contour plot of poles to planes."""
import matplotlib.pyplot as plt
import numpy as np
import mplstereonet
# Fix random seed so that output is consistent
np.random.seed(1977)
fig, ax = mplstereonet.subplots()
# Generate a random scatter of planes around the given plane
# All measurements follow the right-hand-rule to indicate dip direction
strike, dip = 90, 80
num = 10
strikes = strike + 10 * np.random.randn(num)
dips = dip + 10 * np.random.randn(num)
# Create filled contours of the poles of the generated planes...
# By default this uses a modified Kamb contouring technique with exponential
# smoothing (See Vollmer, 1995)
cax = ax.density_contourf(strikes, dips, measurement='poles')
# Plot the poles as points on top of the contours
ax.pole(strikes, dips)
# Turn on a grid and add a colorbar
ax.grid(True)
fig.colorbar(cax)
plt.show()
|
tests/test_fields.py | Highpolar-Softwares/django-url-filter | 303 | 11111590 | # -*- coding: utf-8 -*-
from __future__ import print_function, unicode_literals
import pytest
from django import forms
from url_filter.fields import MultipleValuesField
from url_filter.validators import MaxLengthValidator, MinLengthValidator
class TestMultipleValuesField(object):
def test_init(self):
field = MultipleValuesField(
child=forms.IntegerField(), min_values=2, max_values=100, delimiter=";"
)
assert isinstance(field.child, forms.IntegerField)
assert field.delimiter == ";"
assert any((isinstance(i, MinLengthValidator) for i in field.many_validators))
assert any((isinstance(i, MaxLengthValidator) for i in field.many_validators))
def test_clean_empty(self):
assert MultipleValuesField(required=False).clean("") is None
def test_clean(self):
field = MultipleValuesField(min_values=2, max_values=3)
assert field.clean("hello,world") == ["hello", "world"]
with pytest.raises(forms.ValidationError):
field.clean("hello")
with pytest.raises(forms.ValidationError):
field.clean("hello,world,and,many,happy,rainbows")
def test_clean_all_valid(self):
field = MultipleValuesField(forms.IntegerField(), all_valid=False)
assert field.clean("1,2,3") == [1, 2, 3]
assert field.clean("a,1,b,2") == [1, 2]
def test_many_to_python(self):
field = MultipleValuesField()
assert field.many_to_python("hello,world") == ["hello", "world"]
def test_many_validate(self):
assert MultipleValuesField().many_validate([1, 2]) is None
with pytest.raises(forms.ValidationError):
MultipleValuesField().many_validate([])
def test_many_run_validators(self):
field = MultipleValuesField(error_messages={"min_length": "foo"})
assert field.many_run_validators(None) is None
with pytest.raises(forms.ValidationError) as e:
field.many_run_validators(["hello"])
assert e.value.error_list[0].message == "foo"
|
tests/test_import_concept_pairs.py | vishalbelsare/kgtk | 222 | 11111611 | import shutil
import unittest
import tempfile
import pandas as pd
from kgtk.cli_entry import cli_entry
from kgtk.exceptions import KGTKException
class TestKGTKImportConceptPairs(unittest.TestCase):
def setUp(self) -> None:
self.temp_dir=tempfile.mkdtemp()
def tearDown(self) -> None:
shutil.rmtree(self.temp_dir)
def test_kgtk_import_concept_pairs(self):
cli_entry("kgtk", "import-concept-pairs", "-i", "data/synonyms.txt", "--source", "RG", "--relation", "/r/Synonym", "-o", f'{self.temp_dir}/roget_syn.tsv')
df = pd.read_csv(f'{self.temp_dir}/roget_syn.tsv', sep='\t')
self.assertEqual(len(df.columns), 9)
for i, row in df.iterrows():
self.assertTrue(row['relation']=='/r/Synonym')
print('ROGET', df)
|
python_modules/libraries/dagster-mysql/dagster_mysql/event_log/__init__.py | dbatten5/dagster | 4,606 | 11111614 | <filename>python_modules/libraries/dagster-mysql/dagster_mysql/event_log/__init__.py
from .event_log import MySQLEventLogStorage
|
bokeh/sphinxext/util.py | kevin1kevin1k/bokeh | 445 | 11111680 | from ..settings import settings
from ..resources import Resources
def get_sphinx_resources(include_bokehjs_api=False):
docs_cdn = settings.docs_cdn()
# if BOKEH_DOCS_CDN is unset just use default CDN resources
if docs_cdn is None:
resources = Resources(mode="cdn")
else:
# "BOKEH_DOCS_CDN=local" is used for building and displaying the docs locally
if docs_cdn == "local":
resources = Resources(mode="server", root_url="/en/latest/")
# "BOKEH_DOCS_CDN=test:newthing" is used for building and deploying test docs to
# a one-off location "en/newthing" on the docs site
elif docs_cdn.startswith("test:"):
resources = Resources(
mode="server", root_url="/en/%s/" % docs_cdn.split(":")[1])
# Otherwise assume it is a dev/rc/full release version and use CDN for it
else:
resources = Resources(mode="cdn", version=docs_cdn)
if include_bokehjs_api:
resources.js_components.append("bokeh-api")
return resources
|
test/crc16.py | pengdao/simcc | 108 | 11111729 | <filename>test/crc16.py
def crc16(x):
b=0xA001
a=0xFFFF
for byte in x:
a=a^ord(byte)
for i in range(8):
last=a%2
a=a>>1
if last==1:a=a^b
aa='0'*(6-len(hex(a)))+hex(a)[2:]
ll,hh=int(aa[:2],16),int(aa[2:],16)
return hh+ll*256
print crc16('hello')
print crc16('world')
|
python/mask_detection/models/pd_model.py | windstamp/Paddle-Inference-Demo | 115 | 11111730 | import numpy as np
from paddle.inference import Config
from paddle.inference import create_predictor
class Model:
def __init__(self,
model_file,
params_file,
use_mkldnn=True,
use_gpu=False,
device_id=0):
config = Config(model_file, params_file)
config.enable_memory_optim()
if use_gpu:
print("ENABLE_GPU")
config.enable_use_gpu(100, device_id)
if use_mkldnn:
config.enable_mkldnn()
self.predictor = create_predictor(config)
def run(self, img_list):
input_names = self.predictor.get_input_names()
for i, name in enumerate(input_names):
input_tensor = self.predictor.get_input_handle(input_names[i])
input_tensor.reshape(img_list[i].shape)
input_tensor.copy_from_cpu(img_list[i].copy())
self.predictor.run()
results = []
output_names = self.predictor.get_output_names()
for i, name in enumerate(output_names):
output_tensor = self.predictor.get_output_handle(output_names[i])
output_data = output_tensor.copy_to_cpu()
results.append(output_data)
return results
|
examples/sharepoint/migration/export_list.py | theodoriss/Office365-REST-Python-Client | 544 | 11111739 | from office365.sharepoint.client_context import ClientContext
from tests import test_team_site_url, test_client_credentials
ctx = ClientContext(test_team_site_url).with_credentials(test_client_credentials)
ctx.web.lists.get_by_title("Tasks").save_as_template("Tasks.stp", "Tasks", "", True).execute_query()
|
Lib/test/test_compiler/sbs_code_tests/06_funcall_varargs.py | diogommartins/cinder | 1,886 | 11111750 | c = (a, b)
fun(a, b, *c)
# EXPECTED:
[
...,
BUILD_TUPLE_UNPACK_WITH_CALL(2),
CALL_FUNCTION_EX(0),
...,
]
|
tools/perf/page_sets/loading_mobile.py | zipated/src | 2,151 | 11111757 | # Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from page_sets import page_cycler_story
from telemetry.page import cache_temperature as cache_temperature_module
from telemetry.page import shared_page_state
from telemetry.page import traffic_setting as traffic_setting_module
from telemetry import story
class LoadingMobileStorySet(story.StorySet):
""" A collection of tests to measure loading performance of mobile sites.
Design doc: https://docs.google.com/document/d/1QKlZIoURAxZk-brrXsKYZl9O8ieqXht3ogeF9yLNFCI/edit
"""
def __init__(self, cache_temperatures=None, cache_temperatures_for_pwa=None,
traffic_settings=None):
super(LoadingMobileStorySet, self).__init__(
archive_data_file='data/loading_mobile.json',
cloud_storage_bucket=story.PARTNER_BUCKET)
if cache_temperatures is None:
cache_temperatures = [cache_temperature_module.ANY]
if cache_temperatures_for_pwa is None:
cache_temperatures_for_pwa = [cache_temperature_module.ANY]
if traffic_settings is None:
traffic_settings = [traffic_setting_module.NONE]
self.AddStories(['global'], [
('https://www.google.com/search?q=flower#q=flower+delivery',
'GoogleRedirectToGoogleJapan'),
('https://www.youtube.com/watch?v=MU3YuvNRhVY', 'Youtube'),
# pylint: disable=line-too-long
('https://www.google.co.in/search?q=%E0%A4%AB%E0%A5%82%E0%A4%B2&rct=j#q=%E0%A4%AB%E0%A5%82%E0%A4%B2+%E0%A4%B5%E0%A4%BF%E0%A4%A4%E0%A4%B0%E0%A4%A3',
'GoogleIndia'),
('https://www.google.com.br/search?q=flor#q=Entrega+de+flores&start=10',
'GoogleBrazil'),
('https://www.google.co.id/#q=pengiriman+bunga', 'GoogleIndonesia'),
('https://m.facebook.com/?soft=messages', 'Facebook'),
# pylint: disable=line-too-long
('http://g1.globo.com/politica/noticia/2016/02/maioria-do-stf-autoriza-fisco-obter-dados-bancarios-sem-decisao-judicial.html',
'G1'),
# pylint: disable=line-too-long
('https://m.baidu.com/s?word=%E9%B2%9C%E8%8A%B1%E9%80%9F%E9%80%92&oq=%E9%B2%9C%E8%8A%B1',
'Baidu'),
# pylint: disable=line-too-long
('http://news.yahoo.com/were-top-10-most-visited-us-national-parks-105323727.html',
'YahooNews'),
('https://en.m.wikipedia.org/wiki/Solo_Foods', 'Wikipedia'),
# pylint: disable=line-too-long
('http://noticias.bol.uol.com.br/ultimas-noticias/brasil/2016/08/03/tufao-nida-nao-deixa-vitimas-mas-prejuizos-de-us-43-milhoes.htm',
'BOLNoticias'),
('http://www.amazon.com/gp/aw/s/ref=is_s/189-8585431-1246432?k=shoes',
'Amazon'),
# pylint: disable=line-too-long
('http://m.tribunnews.com/superskor/2016/08/03/ribuan-polisi-dikerahkan-mengawal-bonek',
'TribunNews'),
('http://xw.qq.com/news/20160803025029/NEW2016080302502901', 'QQNews'),
# pylint: disable=line-too-long
('http://m.kaskus.co.id/thread/57a03a3214088d91068b4567/inilah-akibat-bersikap-overprotektif-terhadap-anak/?ref=homelanding&med=hot_thread',
'Kaskus'),
('http://www.dailymotion.com/video/x3d1kj5_fallout-4-review_videogames',
'Dailymotion'),
('https://mobile.twitter.com/scottjehl/status/760618697727803394',
'Twitter'),
('http://m.kapanlagi.com/lirik/artis/anji/kata_siapa/',
'KapanLagi'),
# pylint: disable=line-too-long
('http://olx.co.id/iklan/iphone-6s-64-rose-gold-warna-favorite-IDiSdm5.html#5310a118c3;promoted',
'OLX'),
# pylint: disable=line-too-long
('http://enquiry.indianrail.gov.in/mntes/MntesServlet?action=MainMenu&subAction=excep&excpType=EC',
'EnquiryIndianRail'),
# TODO(rnephew): Rerecord this. crbug.com/728882
# pylint: disable=line-too-long
# ('https://googleblog.blogspot.jp/2016/02/building-safer-web-for-everyone.html',
# 'Blogspot'),
# pylint: disable=line-too-long
# ('http://m.detik.com/finance/read/2016/02/19/151843/3146351/1034/ekspor-tambang-mentah-mau-dibuka-lagi-kalau-sudah-bangun-smelter-bagaimana',
# 'Detik'),
], cache_temperatures, traffic_settings)
self.AddStories(['pwa'], [
# pylint: disable=line-too-long
('https://www.flipkart.com/big-wing-casuals/p/itmemeageyfn6m9z?lid=LSTSHOEMEAGURG2PHPW18FTBN&pid=SHOEMEAGURG2PHPW',
'FlipKart'),
('https://smp.suumo.jp/mansion/tokyo/sc_104/cond/?moreCond=1',
'Suumo'),
('https://voice-memos.appspot.com', 'VoiceMemos'),
('https://dev.opera.com/', 'DevOpera'),
('https://flipboard.com/topic/yoga', 'FlipBoard'),
# TODO(rnephew): Record these. crbug.com/728882
# ('https://wiki-offline.jakearchibald.com/',
# 'WikiOffline'),
# ('https://busrouter.sg', 'BusRouter'),
# ('https://airhorner.com', 'AirHorner'),
], cache_temperatures_for_pwa, traffic_settings)
self.AddStories(['tough_ttfmp'], [
('http://www.localmoxie.com', 'LocalMoxie'),
('http://www.dawn.com', 'Dawn'),
('http://www.thairath.co.th', 'Thairath'),
('http://www.hashocean.com', 'HashOcean'),
], cache_temperatures, traffic_settings)
self.AddStories(['easy_ttfmp'], [
('http://www.slideshare.net', 'SlideShare'),
('http://www.bradesco.com.br', 'Bradesco'),
('http://www.gsshop.com', 'GSShop'),
], cache_temperatures, traffic_settings)
self.AddStories(['tough_tti'], [
('http://www.thestar.com.my', 'TheStar'),
('http://www.58pic.com', '58Pic'),
('http://www.hongkiat.com', 'Hongkiat'),
('http://www.ibicn.com', 'IBI'),
], cache_temperatures, traffic_settings)
self.AddStories(['easy_tti'], [
('http://www.dramaq.com.tw', 'Dramaq'),
('http://www.locanto.in', 'Locanto'),
('http://www.francetvinfo.fr', 'FranceTVInfo'),
], cache_temperatures, traffic_settings)
def AddStories(self, tags, urls, cache_temperatures, traffic_settings):
for url, name in urls:
for temp in cache_temperatures:
for traffic in traffic_settings:
self.AddStory(page_cycler_story.PageCyclerStory(url, self, name=name,
shared_page_state_class=shared_page_state.SharedMobilePageState,
cache_temperature=temp, traffic_setting=traffic, tags=tags))
|
lib/core/eval.py | frostinassiky/2D-TAN | 249 | 11111764 | import json
import argparse
import numpy as np
from terminaltables import AsciiTable
from core.config import config, update_config
def iou(pred, gt): # require pred and gt is numpy
assert isinstance(pred, list) and isinstance(gt,list)
pred_is_list = isinstance(pred[0],list)
gt_is_list = isinstance(gt[0],list)
if not pred_is_list: pred = [pred]
if not gt_is_list: gt = [gt]
pred, gt = np.array(pred), np.array(gt)
inter_left = np.maximum(pred[:,0,None], gt[None,:,0])
inter_right = np.minimum(pred[:,1,None], gt[None,:,1])
inter = np.maximum(0.0, inter_right - inter_left)
union_left = np.minimum(pred[:,0,None], gt[None,:,0])
union_right = np.maximum(pred[:,1,None], gt[None,:,1])
union = np.maximum(0.0, union_right - union_left)
overlap = 1.0 * inter / union
if not gt_is_list:
overlap = overlap[:,0]
if not pred_is_list:
overlap = overlap[0]
return overlap
def rank(pred, gt):
return pred.index(gt) + 1
def nms(dets, thresh=0.4, top_k=-1):
"""Pure Python NMS baseline."""
if len(dets) == 0: return []
order = np.arange(0,len(dets),1)
dets = np.array(dets)
x1 = dets[:, 0]
x2 = dets[:, 1]
lengths = x2 - x1
keep = []
while order.size > 0:
i = order[0]
keep.append(i)
if len(keep) == top_k:
break
xx1 = np.maximum(x1[i], x1[order[1:]])
xx2 = np.minimum(x2[i], x2[order[1:]])
inter = np.maximum(0.0, xx2 - xx1)
ovr = inter / (lengths[i] + lengths[order[1:]] - inter)
inds = np.where(ovr <= thresh)[0]
order = order[inds + 1]
return dets[keep]
def eval(segments, data):
tious = [float(i) for i in config.TEST.TIOU.split(',')] if isinstance(config.TEST.TIOU,str) else [config.TEST.TIOU]
recalls = [int(i) for i in config.TEST.RECALL.split(',')] if isinstance(config.TEST.RECALL,str) else [config.TEST.RECALL]
eval_result = [[[] for _ in recalls] for _ in tious]
max_recall = max(recalls)
average_iou = []
for seg, dat in zip(segments, data):
seg = nms(seg, thresh=config.TEST.NMS_THRESH, top_k=max_recall).tolist()
overlap = iou(seg, [dat['times']])
average_iou.append(np.mean(np.sort(overlap[0])[-3:]))
for i,t in enumerate(tious):
for j,r in enumerate(recalls):
eval_result[i][j].append((overlap > t)[:r].any())
eval_result = np.array(eval_result).mean(axis=-1)
miou = np.mean(average_iou)
return eval_result, miou
def eval_predictions(segments, data, verbose=True):
eval_result, miou = eval(segments, data)
if verbose:
print(display_results(eval_result, miou, ''))
return eval_result, miou
def display_results(eval_result, miou, title=None):
tious = [float(i) for i in config.TEST.TIOU.split(',')] if isinstance(config.TEST.TIOU,str) else [config.TEST.TIOU]
recalls = [int(i) for i in config.TEST.RECALL.split(',')] if isinstance(config.TEST.RECALL,str) else [config.TEST.RECALL]
display_data = [['Rank@{},mIoU@{}'.format(i,j) for i in recalls for j in tious]+['mIoU']]
eval_result = eval_result*100
miou = miou*100
display_data.append(['{:.02f}'.format(eval_result[j][i]) for i in range(len(recalls)) for j in range(len(tious))]
+['{:.02f}'.format(miou)])
table = AsciiTable(display_data, title)
for i in range(len(tious)*len(recalls)):
table.justify_columns[i] = 'center'
return table.table
def parse_args():
parser = argparse.ArgumentParser(description='Train localization network')
# general
parser.add_argument('--cfg', help='experiment configure file name', required=True, type=str)
args, rest = parser.parse_known_args()
# update config
update_config(args.cfg)
parser.add_argument('--verbose', default=False, action="store_true", help='print progress bar')
args = parser.parse_args()
return args
def reset_config(config, args):
if args.verbose:
config.VERBOSE = args.verbose
if __name__ == '__main__':
args = parse_args()
reset_config(config, args)
train_data = json.load(open('/data/home2/hacker01/Data/DiDeMo/train_data.json', 'r'))
val_data = json.load(open('/data/home2/hacker01/Data/DiDeMo/val_data.json', 'r'))
moment_frequency_dict = {}
for d in train_data:
times = [t for t in d['times']]
for time in times:
time = tuple(time)
if time not in moment_frequency_dict.keys():
moment_frequency_dict[time] = 0
moment_frequency_dict[time] += 1
prior = sorted(moment_frequency_dict, key=moment_frequency_dict.get, reverse=True)
prior = [list(item) for item in prior]
prediction = [prior for d in val_data]
eval_predictions(prediction, val_data) |
examples/text_classification/model.py | mattolson93/wikipedia2vec | 744 | 11111781 | <reponame>mattolson93/wikipedia2vec
import torch
import torch.nn as nn
import torch.nn.functional as F
class NABoE(nn.Module):
def __init__(self, word_embedding, entity_embedding, num_classes, dropout_prob, use_word):
super(NABoE, self).__init__()
self.use_word = use_word
self.word_embedding = nn.Embedding(word_embedding.shape[0], word_embedding.shape[1], padding_idx=0)
self.word_embedding.weight = nn.Parameter(torch.FloatTensor(word_embedding))
self.entity_embedding = nn.Embedding(entity_embedding.shape[0], entity_embedding.shape[1], padding_idx=0)
self.entity_embedding.weight = nn.Parameter(torch.FloatTensor(entity_embedding))
self.attention_layer = nn.Linear(2, 1)
self.dropout = nn.Dropout(p=dropout_prob)
self.output_layer = nn.Linear(word_embedding.shape[1], num_classes)
def forward(self, word_ids, entity_ids, prior_probs):
word_sum_vector = self.word_embedding(word_ids).sum(1)
entity_vectors = self.entity_embedding(entity_ids)
word_norm_vector = word_sum_vector / torch.norm(word_sum_vector, dim=1, keepdim=True).clamp(min=1e-12).detach()
entity_norm_vectors = entity_vectors / torch.norm(entity_vectors, dim=2, keepdim=True).clamp(min=1e-12).detach()
cosine_similarities = (word_norm_vector.unsqueeze(1) * entity_norm_vectors).sum(2, keepdim=True)
attention_features = torch.cat((prior_probs.unsqueeze(2), cosine_similarities), 2)
attention_logits = self.attention_layer(attention_features).squeeze(-1)
attention_logits = attention_logits.masked_fill_(entity_ids == 0, -1e32)
attention_weights = F.softmax(attention_logits, dim=1)
feature_vector = (entity_vectors * attention_weights.unsqueeze(-1)).sum(1)
if self.use_word:
word_feature_vector = word_sum_vector / (word_ids != 0).sum(dim=1, keepdim=True).type_as(word_sum_vector)
feature_vector = feature_vector + word_feature_vector
feature_vector = self.dropout(feature_vector)
return self.output_layer(feature_vector)
|
plecost_lib/libs/utils.py | enterstudio/plecost | 363 | 11111808 | <gh_stars>100-1000
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Plecost: Wordpress vulnerabilities finder
#
# @url: http://iniqua.com/labs/
# @url: https://github.com/iniqua/plecost
#
# @author:<NAME> aka ffranz (http://iniqua.com/)
# @author:<NAME> aka cr0hn (http://www.cr0hn.com/me/)
#
# Copyright (c) 2015, Iniqua Team
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
"""
This file contains some orphan functions
"""
import urllib
import aiohttp
import asyncio
import os.path as op
from urllib.parse import urljoin
from random import choice, randint
from difflib import SequenceMatcher
from string import ascii_letters, digits
try:
from termcolor import colored
except ImportError:
def colored(text, color):
return text
__all__ = ["colorize", "generate_error_page", "get_diff_ratio", "get_data_folder", "check_redirects"]
# ----------------------------------------------------------------------
def log(message, log_level=0, current_log_level=None):
"""
Auxiliary function to use as log level
:param current_log_level: Log level selected at the moment of running the program
:type current_log_level: int
:param message: Message to display
:type message: basestring
:param log_level: log level: 0-4
:type log_level: int
"""
from sys import stdout
from os import environ
if current_log_level is None:
try:
_current_log_level = int(environ["PLECOST_LOG_LEVEL"])
except KeyError:
_current_log_level = 0
else:
_current_log_level = current_log_level
if log_level <= _current_log_level:
print(message, end='')
stdout.flush()
# ----------------------------------------------------------------------
def colorize(text, color="yellow", activate=True):
"""
This function return the text in a indicated color, if color is activate.
:param text: Text to colorize
:type text: basestring
:param color: Color name
:type color: basestring
:param activate: boolean value that indicates if color is enabled.
:type activate: bool
:return: string with colored (if activated) text.
:rtype: str
"""
if activate:
return colored(text, color)
# ----------------------------------------------------------------------
def generate_error_page(url):
"""
Takes an URL to an existing document and generates a random URL
to a nonexisting document, to trigger a server error.
Example:
>>> from libs.utils import generate_error_page
>>> generate_error_page("http://www.site.com/index.php")
'http://www.site.com/index.php.19ds_8vjX'
:param url: Original URL.
:type url: str
:return: Generated URL.
:rtype: str
"""
if not isinstance(url, str):
raise TypeError("Expected basestring, got '%s' instead" % type(url))
# Get random path
random_path = "".join(choice(ascii_letters + digits) for _ in range(randint(5, 20)))
return "%s/%s" % (url, random_path)
# ------------------------------------------------------------------------------
def get_diff_ratio(text1, text2):
"""
Compare two texts and return a floating point value between 0 and 1 with
the difference ratio, with 0 being absolutely different and 1 being
absolutely equal - the more similar the two texts are, the closer the ratio
will be to 1.
..note:
This function was taken from Golismero project: http://github.com/golismero/golismero
:param text1: First text to compare.
:type text1: basestring
:param text2: Second text to compare.
:type text2: basestring
:returns: Floating point value between 0 and 1.
:rtype: float
"""
# Solve some trivial type errors (like using None).
if not text1:
text1 = ""
if not text2:
text2 = ""
# Trivial case, the two texts are identical.
if text1 == text2:
return 1.0
# Use the difflib sequence matcher to calculate the ratio.
m = SequenceMatcher(a=text1, b=text2)
return m.ratio()
# ----------------------------------------------------------------------
def get_data_folder():
"""
Return path of resources folder.
:return: path of resources folder.
:rtype: str
"""
return op.abspath(op.join(op.dirname(__file__), "..", "resources"))
# ----------------------------------------------------------------------
def update_progress(values, print_function=None, prefix_text="", bar_len=40):
"""
Creates a process bar in ASCII
>>> for x in update_progress(range(1, 65), prefix_text="Prefix text: "):
Prefix text: [########] 20.31%
:param values: list or generator with items to process
:type values: list
:param print_function: function used to display information. By default is buildin 'print'.
:type print_function: function
:param prefix_text: Text to write before bar.
:type prefix_text: str
:param bar_len: number of characters used into bar.
:type bar_len: int
"""
_values = list(values)
_len_values = len(_values)
_var_len = bar_len
_print = print_function or print
for i, x in enumerate(_values, start=1):
_percent = (i/_len_values)
_percent_fix = int((i/_len_values) * _var_len)
_print('\r{0} [#{1}] {2:.2f}%'.format(prefix_text, '#'*_percent_fix, _percent*100), end='')
yield x
# ------------------------------------------------------------------------------
@asyncio.coroutine
def download(url,
max_redirect=2,
loop=None,
session=None,
method="get",
get_content=True,
auto_redirect=True,
custom_hostname=None):
"""
Download a web page content.
:param url: path where to get information.
:type url: basestring
:param max_tries: maximum number of retries for each page
:type max_tries: int
:param connector: HTTPConnection instance
:type connector: `HTTPConnection`
:param loop: Event loop object
:type loop: loop
:param method: HTTP method to use
:type method: str
:param get_content: boolean value that indicates if must download content or not
:type get_content: bool
:return: Web page content as a tuple: (http_header, status, basestring)
:rtype: (dict, int, str)
"""
ret_status = None
ret_headers = None
ret_content = None
custom_headers = {}
if custom_hostname:
custom_headers["host"] = custom_hostname
try:
with aiohttp.Timeout(5):
if max_redirect < 0:
return None, None, None
response = yield from session.request(
method,
url,
headers=custom_headers,
allow_redirects=False)
if response.status in (300, 301, 302, 303, 307):
location = response.headers.get('location')
next_url = urllib.parse.urljoin(url, location)
if max_redirect > 0:
log('\n[!] redirect to %r from %r\n' % (next_url, url),
log_level=1)
if auto_redirect is True:
# return _loop.run_until_complete(download(next_url,
# max_redirect=(max_redirect-1)))
r = yield from download(next_url,
max_redirect=(max_redirect - 1))
return r
else:
ret_headers, ret_status, ret_content = response.headers, response.status, None
else:
log('\n[!] redirect limit reached for %r from %r\n' % (next_url, url), log_level=2)
ret_headers, ret_status, ret_content = response.headers, response.status, None
else:
content = None
if get_content:
content = (yield from response.read()).decode(errors="ignore")
ret_headers, ret_status, ret_content = response.headers, response.status, content
# Timeout error
except Exception:
pass
return ret_headers, ret_status, ret_content
# --------------------------------------------------------------------------
class ConcurrentDownloader:
"""Get a list of URLs. Download their content and call user defined
action """
# ----------------------------------------------------------------------
def __init__(self,
process_url_content,
session,
max_redirects=2,
max_tasks=10,
ignore_403=False,
loop=None):
"""
:param process_url_content: function to process URL content, after it is downloaded
:type process_url_content: function
:param max_redirects: maximum number of redirects
:type max_redirects: int
:param max_tries: maximum number of HTTP retries.
:type max_tries: int
:param max_tasks: maximum number of concurrent tasks
:type max_tasks: int
:param loop: optional event loop object
:type loop: loop
:param ignore_403: Ignore 403 responses from server
:type ignore_403: bool
:param connector: aioTCPConnector object
:type connector: aiohttp.TCPConnector
>>> import asyncio
>>> display=lambda x: print(x)
>>> loop = asyncio.get_event_loop()
>>> v = ConcurrentDownloader(url_base="http://myhost.com", process_url_content=display)
>>> loop.run_until_complete(v.run())
"""
self.session = session
self.ignore_403 = ignore_403,
self.max_redirects = max_redirects
self.process_url_function = process_url_content or (lambda x: None)
self.max_tasks = max_tasks
self.loop = loop or asyncio.get_event_loop()
self.q = asyncio.Queue(loop=self.loop)
self.__results = []
self.__results_append = self.results.append
# ----------------------------------------------------------------------
@property
def results(self):
return self.__results
# ----------------------------------------------------------------------
@asyncio.coroutine
def _work(self):
while True:
# Get an URL to process
url = yield from self.q.get()
# Download content
log("\n |- Trying: %s" % colorize(url, "yellow"), log_level=1)
headers, status, content = yield from download(url,
session=self.session,
max_redirect=self.max_redirects,
loop=self.loop)
if self.ignore_403 is True and status == 403:
continue
else:
# Processing response
_r = self.process_url_function(url, headers, status, content)
if _r is not None:
self.__results_append(_r)
del headers, status, content
self.q.task_done()
# --------------------------------------------------------------------------
#
# Public method
#
# ----------------------------------------------------------------------
def close(self):
"""Close resources."""
self.connector.close()
# --------------------------------------------------------------------------
@asyncio.coroutine
def run(self):
"""
Start the analyzer daemon.
"""
# Add workers
workers = [asyncio.Task(self._work(), loop=self.loop)
for _ in range(self.max_tasks)]
# Wait content of workers ends
yield from self.q.join()
for w in workers:
w.cancel()
# ----------------------------------------------------------------------
def add_url(self, url):
"""
Add a URL to queue to analyze
:param url: URL to store
:type url: str
"""
self.q.put_nowait(url)
# ----------------------------------------------------------------------
def add_url_list(self, urls):
"""
Add an URL list to processing
:param urls: list with URLs
:type urls: str
"""
for x in urls:
self.q.put_nowait(x)
|
test_inference.py | shintotm/breast_density_classifier | 158 | 11111825 | import argparse
import numpy as np
MODEL_PATH_DICT = {
"cnn": {
"tf": "saved_models/BreastDensity_BaselineBreastModel/model.ckpt",
"torch": "saved_models/BreastDensity_BaselineBreastModel/model.p",
},
"histogram": {
"tf": "saved_models/BreastDensity_BaselineHistogramModel/model.ckpt",
"torch": "saved_models/BreastDensity_BaselineHistogramModel/model.p",
},
}
def get_result(library, device_type, model_type):
if library == "tf":
import density_model_tf
inference_func = density_model_tf.inference
elif library == "torch":
import density_model_torch
inference_func = density_model_torch.inference
else:
raise RuntimeError(library)
return inference_func({
"model_type": model_type,
"model_path": MODEL_PATH_DICT[model_type][library],
"device_type": device_type,
"gpu_number": 0,
"image_path": "images/",
"input_size": (2600, 2000),
"bins_histogram": 50,
}, verbose=False)
GOLDEN_RESULT = {
"histogram": (0.0819444, 0.78304, 0.133503, 0.00151265),
"cnn": (0.209689, 0.765076, 0.024949, 0.000285853),
}
# CPU-GOLDEN Consistency
def test_tf_golden_equal_cnn():
assert np.allclose(get_result("tf", "cpu", "cnn"), GOLDEN_RESULT["cnn"])
def test_torch_golden_equal_cnn():
assert np.allclose(get_result("torch", "cpu", "cnn"), GOLDEN_RESULT["cnn"])
def test_tf_golden_equal_histogram():
assert np.allclose(get_result("tf", "cpu", "histogram"), GOLDEN_RESULT["histogram"])
def test_torch_golden_equal_histogram():
assert np.allclose(get_result("torch", "cpu", "histogram"), GOLDEN_RESULT["histogram"])
# CPU-GPU Consistency
def test_tf_cpu_gpu_equal_cnn():
assert np.allclose(get_result("tf", "cpu", "cnn"), get_result("tf", "gpu", "cnn"))
def test_torch_cpu_gpu_equal_cnn():
assert np.allclose(get_result("torch", "cpu", "cnn"), get_result("torch", "gpu", "cnn"))
def test_tf_cpu_gpu_equal_histogram():
assert np.allclose(get_result("tf", "cpu", "histogram"), get_result("tf", "gpu", "histogram"))
def test_torch_cpu_gpu_equal_histogram():
assert np.allclose(get_result("torch", "cpu", "histogram"), get_result("torch", "gpu", "histogram"))
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Run Tests')
parser.add_argument('--using')
parser.add_argument('--with-gpu', action="store_true")
args = parser.parse_args()
test_list = []
if args.using == "tf":
test_list.append(test_tf_golden_equal_cnn)
test_list.append(test_tf_golden_equal_histogram)
if args.with_gpu:
test_list.append(test_tf_cpu_gpu_equal_cnn)
test_list.append(test_tf_cpu_gpu_equal_histogram)
elif args.using == "torch":
test_list.append(test_torch_golden_equal_cnn)
test_list.append(test_torch_golden_equal_histogram)
if args.with_gpu:
test_list.append(test_torch_cpu_gpu_equal_cnn)
test_list.append(test_torch_cpu_gpu_equal_histogram)
else:
raise RuntimeError("Provide --using 'tf' or 'torch'")
for test_func in test_list:
try:
test_func()
print("{}: PASSED".format(test_func.__name__))
except Exception as e:
print("{}: FAILED".format(test_func.__name__))
raise
print("All {} test(s) passed.".format(len(test_list)))
|
src/API/python_listener.py | parzival3/Surelog | 156 | 11111852 | <reponame>parzival3/Surelog
# Sample listener
SLregisterNewErrorType("[NOTE :PY0403]", "Module declaration \"%s\"", "");
SLoverrideSeverity("[NOTE :PY0403]", "INFO")
def enterModule_nonansi_header(prog, ctx):
SLaddErrorContext(prog, ctx, "[INFO :PY0403]", SLgetText(prog, ctx))
def enterModule_ansi_header(prog, ctx):
SLaddErrorContext(prog, ctx, "[INFO :PY0403]", SLgetText(prog, ctx))
|
lib/pymedphys/_imports/slow/imports.py | ethanio12345/pymedphys | 207 | 11111854 | # pylint: disable = unused-import, reimported, import-error
import tensorflow # type: ignore
|
koku/sources/test/test_sources_error_message.py | rubik-ai/koku | 157 | 11111862 | #
# Copyright 2021 Red Hat Inc.
# SPDX-License-Identifier: Apache-2.0
#
"""Test the Sources Error Messages."""
from django.test import TestCase
from rest_framework.serializers import ValidationError
from api.common import error_obj
from providers.provider_errors import ProviderErrors
from sources.sources_error_message import SourcesErrorMessage
class SourcesErrorMessageTest(TestCase):
"""Test cases for SourcesErrorMessage."""
def test_aws_errors(self):
"""Test AWS error types."""
test_matrix = [
{
"key": ProviderErrors.AWS_ROLE_ARN_UNREACHABLE,
"internal_message": "internal resource name message string",
"expected_message": ProviderErrors.AWS_ROLE_ARN_UNREACHABLE_MESSAGE,
},
{
"key": ProviderErrors.AWS_BILLING_SOURCE_NOT_FOUND,
"internal_message": "internal billing source message string",
"expected_message": ProviderErrors.AWS_BILLING_SOURCE_NOT_FOUND_MESSAGE,
},
{
"key": ProviderErrors.AWS_COMPRESSION_REPORT_CONFIG,
"internal_message": "internal compression error message",
"expected_message": ProviderErrors.AWS_COMPRESSION_REPORT_CONFIG_MESSAGE,
},
{
"key": ProviderErrors.AWS_BUCKET_MISSING,
"internal_message": ProviderErrors.AWS_BUCKET_MISSING_MESSAGE,
"expected_message": ProviderErrors.AWS_BUCKET_MISSING_MESSAGE,
},
]
for test in test_matrix:
with self.subTest(test=test):
key = test.get("key")
message = test.get("internal_message")
error = ValidationError(error_obj(key, message))
message_obj = SourcesErrorMessage(error)
self.assertEquals(message_obj.display(source_id=1), test.get("expected_message"))
def test_azure_errors(self):
"""Test Azure error types."""
test_matrix = [
{
"key": ProviderErrors.AZURE_CLIENT_ERROR,
"internal_message": ", AdalError: Get Token request returned http error: 401 and server response:",
"expected_message": ProviderErrors.AZURE_INCORRECT_CLIENT_SECRET_MESSAGE,
},
{
"key": ProviderErrors.AZURE_CLIENT_ERROR,
"internal_message": (
", AdalError: Get Token request returned http error: 400 and server response:"
' {"error":"invalid_request","error_description":"AADSTS90002: Tenant'
),
"expected_message": ProviderErrors.AZURE_INCORRECT_TENANT_ID_MESSAGE,
},
{
"key": ProviderErrors.AZURE_CLIENT_ERROR,
"internal_message": (
", AdalError: Get Token request returned http error: 400 and server response:"
' {"error":"unauthorized_client","error_description":"AADSTS700016:'
),
"expected_message": ProviderErrors.AZURE_INCORRECT_CLIENT_ID_MESSAGE,
},
{
"key": ProviderErrors.AZURE_CLIENT_ERROR,
"internal_message": (
"Azure Error: ResourceGroupNotFound\nMessage: Resource group" "'RG2' could not be found."
),
"expected_message": ProviderErrors.AZURE_INCORRECT_RESOURCE_GROUP_MESSAGE,
},
{
"key": ProviderErrors.AZURE_CLIENT_ERROR,
"internal_message": (
"Azure Error: ResourceNotFound\nMessage: The "
"Resource 'Microsoft.Storage/storageAccounts/mysa5' under "
"resource group 'RG1' was not found"
),
"expected_message": ProviderErrors.AZURE_INCORRECT_STORAGE_ACCOUNT_MESSAGE,
},
{
"key": ProviderErrors.AZURE_CLIENT_ERROR,
"internal_message": (
"Azure Error: SubscriptionNotFound\nMessage: The "
"subscription '2639de71-ca37-4a17-a104-17665a50e7fd'"
" could not be found."
),
"expected_message": ProviderErrors.AZURE_INCORRECT_SUBSCRIPTION_ID_MESSAGE,
},
{
"key": ProviderErrors.AZURE_CLIENT_ERROR,
"internal_message": "Random azure error",
"expected_message": ProviderErrors.AZURE_GENERAL_CLIENT_ERROR_MESSAGE,
},
]
for test in test_matrix:
with self.subTest(test=test):
key = test.get("key")
message = test.get("internal_message")
error = ValidationError(error_obj(key, message))
message_obj = SourcesErrorMessage(error)
self.assertEquals(message_obj.display(source_id=1), test.get("expected_message"))
def test_general_string_error(self):
"""Test general string error fallback."""
random_error_dict = {"rando": "error"}
message_obj = SourcesErrorMessage(random_error_dict)
self.assertEquals(message_obj.display(source_id=1), str(random_error_dict))
def test_available_source(self):
"""Test an available source message."""
message_obj = SourcesErrorMessage(None).display(source_id=1)
self.assertEquals(message_obj, "")
|
iPERCore/tools/utils/geometry/mesh.py | JSssssss/iPERCore | 2,223 | 11111885 | <reponame>JSssssss/iPERCore<filename>iPERCore/tools/utils/geometry/mesh.py<gh_stars>1000+
# Copyright (c) 2020-2021 impersonator.org authors (<NAME> and <NAME>). All rights reserved.
import itertools
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import json
def save_to_obj(path, verts, faces, vts, vns, faces_vts, faces_vns):
"""
Save the SMPL model into .obj file.
Parameter:
---------
path: Path to save.
"""
with open(path, "w") as fp:
fp.write("g\n")
for v in verts:
fp.write("v %.8f %.8f %.8f\n" % (v[0], v[1], v[2]))
if len(vts) != 0:
for vt in vts:
fp.write("vt %.8f %.8f\n" % (vt[0], vt[1]))
if len(vns) != 0:
for vn in vns:
fp.write("vn %f %f\n" % (vn[0], vn[1]))
if len(faces_vts) == 0 or len(faces_vns) == 0:
# index from 1
for f in faces + 1:
fp.write("f %d %d %d\n" % (f[0], f[1], f[2]))
else:
# index from 1
for f, vt, vn in zip(faces + 1, faces_vts + 1, faces_vns + 1):
fp.write(
"f %d/%d/%d %d/%d/%d %d/%d/%d\n" % (
f[0], vt[0], vn[0],
f[1], vt[1], vn[1],
f[2], vt[2], vn[2])
)
fp.write("s off\n")
def load_obj(obj_file):
with open(obj_file, "r") as fp:
verts = []
faces = []
vts = []
vns = []
faces_vts = []
faces_vns = []
for line in fp:
line = line.rstrip()
line_splits = line.split()
prefix = line_splits[0]
if prefix == "v":
verts.append(np.array([line_splits[1], line_splits[2], line_splits[3]], dtype=np.float32))
elif prefix == "vn":
vns.append(np.array([line_splits[1], line_splits[2], line_splits[3]], dtype=np.float32))
elif prefix == "vt":
vts.append(np.array([line_splits[1], line_splits[2]], dtype=np.float32))
elif prefix == "f":
f = []
f_vt = []
f_vn = []
for p_str in line_splits[1:4]:
p_split = p_str.split("/")
f.append(p_split[0])
if len(p_split) > 1:
f_vt.append(p_split[1])
f_vn.append(p_split[2])
# index from 0
faces.append(np.array(f, dtype=np.int32) - 1)
faces_vts.append(np.array(f_vt, dtype=np.int32) - 1)
faces_vns.append(np.array(f_vn, dtype=np.int32) - 1)
elif prefix == "g" or prefix == "s":
continue
else:
# raise ValueError(prefix)
pass
obj_dict = {
"vertices": np.array(verts, dtype=np.float32),
"faces": np.array(faces, dtype=np.int32),
"vts": np.array(vts, dtype=np.float32),
"vns": np.array(vns, dtype=np.float32),
"faces_vts": np.array(faces_vts, dtype=np.int32),
"faces_vns": np.array(faces_vns, dtype=np.int32)
}
return obj_dict
def sample_textures(texture_flow, images):
"""
texture_flow: B x F x T x T x 2
(In normalized coordinate [-1, 1])
images: B x 3 x N x N
output: B x F x T x T x 3
"""
# Reshape into B x F x T*T x 2
T = texture_flow.size(-2)
F = texture_flow.size(1)
flow_grid = texture_flow.view(-1, F, T * T, 2)
# B x 3 x F x T*T
samples = torch.nn.functional.grid_sample(images, flow_grid)
# B x 3 x F x T x T
samples = samples.view(-1, 3, F, T, T)
# B x F x T x T x 3
return samples.permute(0, 2, 3, 4, 1)
def get_spherical_coords(X):
# X is N x 3
rad = np.linalg.norm(X, axis=1)
# Inclination
theta = np.arccos(X[:, 2] / rad)
# Azimuth
phi = np.arctan2(X[:, 1], X[:, 0])
# Normalize both to be between [-1, 1]
vv = (theta / np.pi) * 2 - 1
uu = ((phi + np.pi) / (2 * np.pi)) * 2 - 1
# Return N x 2
return np.stack([uu, vv], 1)
def compute_coords(tex_size):
"""
:param tex_size:
:return: (2, T*T)
"""
alpha = np.arange(tex_size, dtype=np.float) / (tex_size - 1)
beta = np.arange(tex_size, dtype=np.float) / (tex_size - 1)
# Barycentric coordinate values
coords = np.stack([p for p in itertools.product(*[alpha, beta])]) # T*T x 2
coords = torch.FloatTensor(coords.T) # (2, T*T)
return coords
def compute_uvsampler(verts, faces, tex_size=2):
"""
For this mesh, pre-computes the UV coordinates for
F x T x T points.
Returns F x T x T x 2
"""
alpha = np.arange(tex_size, dtype=np.float) / (tex_size - 1)
beta = np.arange(tex_size, dtype=np.float) / (tex_size - 1)
# Barycentric coordinate values
coords = np.stack([p for p in itertools.product(*[alpha, beta])]) # 36 x 2
vs = verts[faces]
# Compute alpha, beta (this is the same order as NMR)
v2 = vs[:, 2] # (656, 3)
v0v2 = vs[:, 0] - vs[:, 2] # (656, 3)
v1v2 = vs[:, 1] - vs[:, 2] # (656, 3)
# F x 3 x T*2
samples = np.dstack([v0v2, v1v2]).dot(coords.T) + v2.reshape(-1, 3, 1)
# F x T*2 x 3 points on the sphere
samples = np.transpose(samples, (0, 2, 1))
# Now convert these to uv.
uv = get_spherical_coords(samples.reshape(-1, 3))
# uv = uv.reshape(-1, len(coords), 2)
uv = uv.reshape((-1, tex_size, tex_size, 2))
return uv
def create_uvsampler(uv_mapping_path="data/uv_mappings.txt", tex_size=2, fill_back=False):
"""
For this mesh, pre-computes the UV coordinates for
F x T x T points.
Returns F x T*T x 2
"""
alpha = np.arange(tex_size, dtype=np.float32) / (tex_size - 1)
beta = np.arange(tex_size, dtype=np.float32) / (tex_size - 1)
# Barycentric coordinate values
coords = np.stack([p for p in itertools.product(*[alpha, beta])]) # T*2 x 2
obj_info = load_obj(uv_mapping_path)
vts = obj_info["vts"]
vts[:, 1] = 1 - vts[:, 1]
faces = obj_info["faces_vts"]
if fill_back:
faces = np.concatenate((faces, faces[:, ::-1]), axis=0)
# F x 3 x 2
f2vts = vts[faces]
# Compute alpha, beta (this is the same order as NMR)
v2 = f2vts[:, 2] # (nf, 2)
v0v2 = f2vts[:, 0] - f2vts[:, 2] # (nf, 2)
v1v2 = f2vts[:, 1] - f2vts[:, 2] # (nf, 2)
# F x 2 x T*2
samples = np.dstack([v0v2, v1v2]).dot(coords.T) + v2.reshape(-1, 2, 1)
samples = np.clip(samples, a_min=0.0, a_max=1.0)
# F x T*2 x 2 points on the sphere
uv = np.transpose(samples, (0, 2, 1))
# uv = uv.reshape(-1, tex_size, tex_size, 2)
# normalize to [-1, 1]
uv = uv * 2 - 1
return uv
def compute_barycenter(f2vts):
"""
Args:
f2vts: F x 3 x 2
Returns:
fbc: F x 2
"""
# Compute alpha, beta (this is the same order as NMR)
v2 = f2vts[:, 2] # (nf, 2)
v0v2 = f2vts[:, 0] - f2vts[:, 2] # (nf, 2)
v1v2 = f2vts[:, 1] - f2vts[:, 2] # (nf, 2)
fbc = v2 + 0.5 * v0v2 + 0.5 * v1v2
return fbc
def get_f2vts(uv_map_path_or_obj_info, fill_back=False, z=1):
"""
For this mesh, pre-computes the bary-center coords.
Returns F x 3 x 3
"""
if isinstance(uv_map_path_or_obj_info, str):
obj_info = load_obj(uv_map_path_or_obj_info)
else:
obj_info = uv_map_path_or_obj_info
vts = np.copy(obj_info["vts"])
vts[:, 1] = 1 - vts[:, 1]
vts = vts * 2 - 1
# F x (2 + 1) = F x 3
vts = np.concatenate([vts, np.zeros((vts.shape[0], 1), dtype=np.float32) + z], axis=-1)
faces = obj_info["faces_vts"]
if fill_back:
faces = np.concatenate((faces, faces[:, ::-1]), axis=0)
# F x 3 x 3
f2vts = vts[faces]
return f2vts
def cal_face_k_nearest(fbc, nearest_k=10):
"""
Args:
fbc (np.ndarray): f x 3 or (nf x 2)
nearest_k (int): the nearest percent
Returns:
nearest_ids (np.ndarray): f x nearest_k
"""
# f x f
nf = fbc.shape[0]
fbc_square = fbc ** 2
fbc_square_sum = np.sum(fbc_square, axis=1)
dists = np.zeros((nf, nf))
dists += np.reshape(fbc_square_sum, (1, nf)) # 1 * nf
dists += np.reshape(fbc_square_sum, (nf, 1)) # nf * 1
dists -= 2 * np.dot(fbc, fbc.T) # nf * nf
nearest_ids = np.argsort(dists, axis=-1)[:, 0:nearest_k]
return nearest_ids
def find_part_k_nearest_faces(f2vts, parts, k=20):
"""
Args:
f2vts (np.ndarray): F x 3 x 3
parts (dict):
k (int):
Returns:
face_k_nearest:
"""
fbc = compute_barycenter(f2vts)
nf = fbc.shape[0]
face_k_nearest = np.empty((nf, k), dtype=np.int)
for name, f_ids in parts.items():
part_fbc = fbc[f_ids]
nearest_ids = cal_face_k_nearest(part_fbc, nearest_k=k)
nearest_f_ids = np.array(f_ids, dtype=np.int)[nearest_ids]
face_k_nearest[f_ids, :] = nearest_f_ids
return face_k_nearest
def get_front_ids(nf, front_info, fill_back=False):
if fill_back:
half_nf = nf // 2
with open(front_info, "r") as reader:
front_data = json.load(reader)
faces = front_data["face"]
if fill_back:
faces = faces + [f + half_nf for f in faces]
return faces
def get_back_ids(nf, all_info, front_info, fill_back=False):
if fill_back:
half_nf = nf // 2
with open(all_info, "r") as reader:
head_faces = set(json.load(reader)["face"])
with open(front_info, "r") as front_reader:
front_faces = set(json.load(front_reader)["face"])
faces = list(head_faces - front_faces)
if fill_back:
faces = faces + [f + half_nf for f in faces]
return faces
def get_part_ids(nf, part_info, fill_back=False):
if fill_back:
half_nf = nf // 2
with open(part_info, "r") as reader:
part_data = json.load(reader)
part_names = sorted(part_data.keys())
total_faces = set()
ordered_faces = dict()
for i, part_name in enumerate(part_names):
part_vals = part_data[part_name]
faces = part_vals["face"]
if fill_back:
faces = faces + [f + half_nf for f in faces]
ordered_faces[part_name] = faces
total_faces |= set(faces)
nf_counter = len(total_faces)
assert nf_counter == nf, "nf_counter = {}, nf = {}".format(nf_counter, nf)
return ordered_faces
def binary_mapping(nf):
width = len(np.binary_repr(nf))
map_fn = [np.array(list(map(int, np.binary_repr(i, width=width)))) for i in range(nf)]
map_fn = np.stack(map_fn, axis=0)
bg = np.zeros((1, width), dtype=np.float32) - 1.0
return map_fn, bg
def ids_mapping(nf):
map_fn = np.arange(0, 1, 1/nf, dtype=np.float32)
bg = np.array([[-1]], dtype=np.float32)
return map_fn, bg
def par_mapping(nf, part_info, fill_back=False):
if fill_back:
half_nf = nf // 2
with open(part_info, "r") as reader:
part_data = json.load(reader)
ndim = len(part_data) + 1 # 10
map_fn = np.zeros((nf, ndim), dtype=np.float32)
part_names = sorted(part_data.keys())
total_faces = set()
for i, part_name in enumerate(part_names):
part_vals = part_data[part_name]
faces = part_vals["face"]
if fill_back:
faces = faces + [f + half_nf for f in faces]
map_fn[faces, i] = 1.0
total_faces |= set(faces)
nf_counter = len(total_faces)
assert nf_counter == nf, "nf_counter = {}, nf = {}".format(nf_counter, nf)
# add bg
bg = np.zeros((1, ndim), dtype=np.float32)
bg[0, -1] = 1
return map_fn, bg
def front_mapping(nf, front_face_info, fill_back=False):
if fill_back:
half_nf = nf // 2
map_fn = np.zeros((nf, 1), dtype=np.float32)
with open(front_face_info, "r") as reader:
front_data = json.load(reader)
faces = front_data["face"]
if fill_back:
faces = faces + [f + half_nf for f in faces]
map_fn[faces] = 1.0
# add bg
bg = np.zeros((1, 1), dtype=np.float32)
return map_fn, bg
def back_mapping(nf, head_face_info, front_face_info, fill_back=False):
if fill_back:
half_nf = nf // 2
map_fn = np.zeros((nf, 1), dtype=np.float32)
with open(head_face_info, "r") as reader:
head_faces = set(json.load(reader)["face"])
with open(front_face_info, "r") as front_reader:
front_faces = set(json.load(front_reader)["face"])
faces = list(head_faces - front_faces)
if fill_back:
faces = faces + [f + half_nf for f in faces]
map_fn[faces] = 1.0
# add bg
bg = np.zeros((1, 1), dtype=np.float32)
return map_fn, bg
def create_mapping(map_name, obj_info,
part_path="assets/configs/pose3d/smpl_part_info.json",
front_path="assets/configs/pose3d/front_body.json",
facial_path="assets/configs/pose3d/front_facial.json",
head_path="assets/configs/pose3d/head.json",
contain_bg=True, fill_back=False):
"""
Args:
map_name:
"uv" -> (F + 1) x 2 (bg as -1)
"uv_seg" -> (F + 1) x 3 (bs as -1)
"ids" -> (F + 1) x 1 (bg as -1)
"binary" -> (F + 1) x 14 (bs as -1)
"seg" -> (F + 1) x 1 (bs as 0)
"par" -> (F + 1) x (10 + 1)
obj_info:
part_path:
front_path:
facial_path:
head_path:
contain_bg:
fill_back:
Returns:
"""
# F x C
f2vts = get_f2vts(obj_info, fill_back=fill_back, z=0)
nf = f2vts.shape[0]
if map_name == "uv":
fbc = compute_barycenter(f2vts)
map_fn = fbc[:, 0:2] # F x 2
bg = np.array([[-1, -1]], dtype=np.float32)
elif map_name == "seg":
map_fn = np.ones((nf, 1), dtype=np.float32)
bg = np.array([[0]], dtype=np.float32)
elif map_name == "uv_seg":
fbc = compute_barycenter(f2vts)
map_fn = fbc # F x 3
bg = np.array([[0, 0, 1]], dtype=np.float32)
elif map_name == "par":
map_fn, bg = par_mapping(nf, part_path)
elif map_name == "front":
map_fn, bg = front_mapping(nf, front_path, fill_back=fill_back)
elif map_name == "facial":
map_fn, bg = front_mapping(nf, facial_path, fill_back=fill_back)
elif map_name == "head":
map_fn, bg = front_mapping(nf, head_path, fill_back=fill_back)
elif map_name == "back":
map_fn, bg = back_mapping(nf, head_path, facial_path, fill_back=fill_back)
elif map_name == "ids":
map_fn, bg = ids_mapping(nf)
elif map_name == "binary":
map_fn, bg = binary_mapping(nf)
else:
raise ValueError("map name error {}".format(map_name))
if contain_bg:
map_fn = np.concatenate([map_fn, bg], axis=0)
return map_fn
def get_part_face_ids(part_type, mapping_path="assets/checkpoints/pose3d/mapper.txt",
part_path="assets/configs/pose3d/smpl_part_info.json",
front_path="assets/configs/pose3d/front_body.json",
head_path="assets/configs/pose3d/head.json",
facial_path="assets/configs/pose3d/front_facial.json",
fill_back=False):
# F x C
f2vts = get_f2vts(mapping_path, fill_back=fill_back, z=0)
nf = f2vts.shape[0]
if part_type == "head_front":
faces = get_front_ids(nf, facial_path, fill_back=fill_back)
elif part_type == "head_back":
faces = get_back_ids(nf, head_path, facial_path, fill_back=fill_back)
elif part_type == "body_front":
faces = get_front_ids(nf, front_path, fill_back=fill_back)
elif part_type == "par":
faces = get_part_ids(nf, part_path, fill_back=fill_back)
else:
raise ValueError("map name error {}".format(part_type))
return faces
def get_map_fn_dim(map_name):
"""
:param map_name:
"seg" -> (F + 1) x 1 (bs as -1 or 0)
"uv" -> (F + 1) x 2 (bg as -1)
"uv_seg" -> (F + 1) x 3 (bg as -1)
"ids" -> (F + 1) x 1 (bg as -1)
"binary" -> (F + 1) x 15 (bs as -1)
"par" -> (F + 1) x (10 + 1)
:return:
"""
# F x C
if map_name == "seg":
dim = 1
elif map_name == "uv":
dim = 2
elif map_name == "uv_seg":
dim = 3
elif map_name == "par":
dim = 11
elif map_name == "ids":
dim = 1
elif map_name == "binary":
dim = 15
else:
raise ValueError("map name error {}".format(map_name))
return dim
def cvt_fim_enc(fim_enc, map_name):
h, w, c = fim_enc.shape
if map_name == "uv":
# (H, W, 2), bg is -1, -> (H, W, 3)
img = np.ones((h, w, 3), dtype=np.float32)
# print(fim_enc.shape)
img[:, :, 0:2] = fim_enc[:, :, 0:2]
img = np.transpose(img, axes=(2, 0, 1))
elif map_name == "seg":
# (H, W, 1), bg is -1 -> (H, W)
img = fim_enc[:, :, 0]
elif map_name == "uv_seg":
# (H, W, 3) -> (H, W, 3)
img = fim_enc.copy()
img = np.transpose(img, axes=(2, 0, 1))
elif map_name == "par":
# (H, W, C) -> (H, W)
img = fim_enc.argmax(axis=-1)
img = img.astype(np.float32)
img /= img.max()
elif map_name == "ids":
# (H, W, 1), bg is -1 -> (H, W)
img = fim_enc[:, :, 0]
elif map_name == "binary":
img = np.zeros((h, w), dtype=np.float32)
def bin2int(bits):
total = 0
for shift, j in enumerate(bits[::-1]):
if j:
total += 1 << shift
return total
for i in range(h):
for j in range(w):
val = bin2int(fim_enc[i, j, :])
img[i, j] = val
img /= img.max()
else:
raise ValueError(map_name)
img = img.astype(np.float32)
return img
|
recipes/Python/577366_TRAC_Interpreter__Sixties_programming/recipe-577366.py | tdiprima/code | 2,023 | 11111889 | import sys
import unittest
BEGIN_ACTIVE_FUNC = "\x80"
BEGIN_NEUTRAL_FUNC = "\x81"
END_FUNC = "\x8f"
END_ARG = "\x8e"
SEGMENT_GAP = "\xff"
class TracError(Exception):
pass
def list_get(list_, index, default=""):
try:
return list_[index]
except:
return default
def scan_char(list_, pos):
return list_get(list_, pos)
def scan_chars(list_, pos, n):
chars = []
for i in range(n):
c = scan_char(list_, pos + i)
if c:
chars.append(c)
return "".join(chars)
class Processor(object):
def __init__(self, program=""):
self.work = [] # workspace containing current TRAC program
self.sp = 0 # position of scanning pointer in workspace
self.forms = {} # key-value storage for program variables
self.output = "" # last string printed to output by ps for unit testing
self.trace = True # flag for printing trace results of function evaluation
self.primitives = {} # dictionary containing bound methods for TRAC primitives
self.initialize(program)
def tn(self, args):
self.trace = True
return ""
def tf(self, args):
self.trace = False
return ""
def ds(self, args):
key = list_get(args, 0)
value = list_get(args, 1)
self.forms[key] = value
return ""
def ps(self, args):
try:
s = list_get(args, 0)
print s
self.output = s
except:
pass
return ""
def ad(self, args):
try:
num1 = int(list_get(args, 0))
num2 = int(list_get(args, 1))
return str(num1 + num2)
except:
return ""
def su(self, args):
try:
num1 = int(list_get(args, 0))
num2 = int(list_get(args, 1))
return str(num1 - num2)
except:
return ""
def ml(self, args):
try:
num1 = int(list_get(args, 0))
num2 = int(list_get(args, 1))
return str(num1 * num2)
except:
return ""
def dv(self, args):
try:
num1 = int(list_get(args, 0))
num2 = int(list_get(args, 1))
return str(num1 / num2)
except:
return ""
def eq(self, args):
try:
s1 = list_get(args, 0)
s2 = list_get(args, 1)
eq_result = list_get(args, 2)
neq_result = list_get(args, 3)
if s1 == s2:
return eq_result
else:
return neq_result
except:
return ""
def ss(self, args):
try:
form_key = args.pop(0)
form = self.forms[form_key]
form_marked = form
for i in range(len(args)):
arg = args[i]
marker = "%s%s" % (SEGMENT_GAP, chr(i))
form_marked = form_marked.replace(arg, marker)
self.forms[form_key] = form_marked
form_list = []
form_list += form_marked
#print "ss: %s" % (form_list)
return ""
except:
return ""
def cl(self, args):
try:
form_key = args.pop(0)
form = self.forms[form_key]
form_processed = form
for i in range(len(args)):
arg = args[i]
marker = "%s%s" % (SEGMENT_GAP, chr(i))
form_processed = form_processed.replace(marker, arg)
return form_processed
except:
return ""
def initialize(self, program=""):
self.forms = {}
self.work = []
self.reset()
self.work += program
self.primitives = {"ds":self.ds, \
"ps":self.ps, \
"ss":self.ss, \
"cl":self.cl, \
"ad":self.ad, \
"su":self.su, \
"ml":self.ml, \
"dv":self.dv, \
"tn":self.tn, \
"tf":self.tf, \
"eq":self.eq \
}
def run(self):
args = []
handler = self.scan_next_char
while handler:
try:
next_handler, args = handler(args)
except TracError, e:
sys.stderr.write("TracError: %s\n" % e )
next_handler, args = self.reset, []
handler = next_handler
def scan_next_char(self, args): # Rule 1
args = []
#self.db("scan_next_char")
c = scan_char(self.work, self.sp)
if c:
if c == '(':
handler = self.handle_begin_paren
elif c in "\n\r\t":
handler = self.handle_tab_return
elif c == ',':
handler = self.handle_comma
elif c == '#' and scan_chars(self.work, self.sp, 2) == '#(':
handler = self.handle_begin_active_func
elif c == '#' and scan_chars(self.work, self.sp, 3) == '##(':
handler = self.handle_begin_neutral_func
elif c == '#':
handler = self.handle_sharp_sign
elif c == ')':
handler = self.handle_end_paren
else:
args.append(c)
handler = self.handle_char
else:
self.db("exit")
print "Forms: %s" % (self.forms)
print "Output: [%s]" % (self.output)
handler = None
return handler, args
def handle_begin_paren(self, args): # Rule 2
args = []
nested_count = 1
chars = []
matched = False
del self.work[self.sp]
c = scan_char(self.work, self.sp)
while c and not matched:
if c == ')':
nested_count -= 1
if nested_count == 0:
matched = True
break
if not matched:
if c == '(':
nested_count += 1
chars.append(c)
self.sp += 1
c = scan_char(self.work, self.sp)
if matched:
del self.work[self.sp]
else:
raise TracError, "%s: can't find matching end parenthesis" %("handle_begin_paren")
return self.scan_next_char, []
def handle_tab_return(self, args): # Rule 3
args = []
del self.work[self.sp]
self.sp -= 1
return self.inc_scan_pointer_continue, args
def handle_comma(self, args): # Rule 4
args = []
self.work[self.sp] = END_ARG
return self.inc_scan_pointer_continue, args
def handle_begin_active_func(self, args): # Rule 5
args = []
del self.work[self.sp:self.sp + 2]
self.work.insert(self.sp, BEGIN_ACTIVE_FUNC)
self.sp += 1
return self.scan_next_char, args
def handle_begin_neutral_func(self, args): # Rule 6
args = []
del self.work[self.sp:self.sp + 3]
self.work.insert(self.sp, BEGIN_NEUTRAL_FUNC)
self.sp += 1
return self.scan_next_char, args
def handle_sharp_sign(self, args): # Rule 7
args = []
return self.inc_scan_pointer_continue, args
def handle_end_paren(self, args): # Rule 8
#self.db("end_paren_0")
args = []
self.work[self.sp] = END_FUNC
func_begin = self.get_func_begin()
func_result = self.eval_func(func_begin)
func_marker = self.work[func_begin]
args.append(func_begin)
if func_result == "":
handler = self.handle_null_func_result # Rule 10
elif func_marker == BEGIN_ACTIVE_FUNC:
args.append(func_result)
handler = self.handle_active_func_result # Rule 11
elif func_marker == BEGIN_NEUTRAL_FUNC:
args.append(func_result)
handler = self.handle_neutral_func_result # Rule 12
else:
raise TracError, "%s: invalid func_marker" %("handle_end_paren")
#self.db("end_paren_1")
return handler, args
def get_func_begin(self):
pos = self.sp - 1
c = self.work[pos]
while c:
if c == BEGIN_ACTIVE_FUNC or c == BEGIN_NEUTRAL_FUNC:
break
pos -= 1
if pos >= 0:
c = self.work[pos]
else:
raise TracError, "%s: can't find begin function marker" %("get_func_begin")
return pos
def get_func_end(self, func_begin):
pos = func_begin
c = self.work[pos]
while c:
if c == END_FUNC:
break
pos += 1
c = self.work[pos]
return pos
def get_func_args(self, func_begin):
args = []
cur_arg = []
pos = func_begin
c = self.work[pos]
db = []
while c:
db.append(c)
if c == BEGIN_ACTIVE_FUNC or c == BEGIN_NEUTRAL_FUNC:
pass
elif c == END_ARG or c == END_FUNC:
arg = "".join(cur_arg)
args.append(arg)
cur_arg = []
else:
cur_arg.append(c)
if c != END_FUNC:
pos += 1
c = self.work[pos]
db = []
else:
break
return args
def eval_func(self, func_begin):
result = ""
try:
args = self.get_func_args(func_begin)
func_name = args[0]
primitive = self.primitives.get(func_name, None)
if primitive:
result = primitive(args[1:])
if self.trace:
print "eval_func: %s %s -> [%s]" % (func_name, args[1:], result)
except Exception, e:
raise TracError, "%s: failed - %s" %("eval_func", e)
return result
def handle_char(self, args): # Rule 9
c = args[0]
args = []
return self.inc_scan_pointer_continue, args
def handle_null_func_result(self, args): # Rule 10
return self.handle_func_cleanup, args
def handle_active_func_result(self, args): # Rule 11
func_begin = args[0]
func_result = args[1]
args = []
self.work[self.sp+1:self.sp+1] += func_result
args.append(func_begin)
#self.db("handle_active_func_result")
return self.handle_func_cleanup, args
def handle_neutral_func_result(self, args): # Rule 12
func_begin = args[0]
func_result = args[1]
args = []
self.work[self.sp+1:self.sp+1] += func_result
func_end = self.sp
del self.work[func_begin:func_end + 1]
self.sp = func_begin + len(func_result) - 1
#self.db("handle_neutral_func_result")
return self.inc_scan_pointer_continue, args
def handle_func_cleanup(self, args): # Rule 13
if args:
func_begin = args[0]
func_end = self.get_func_end(func_begin)
args = []
del self.work[func_begin:func_end + 1]
self.sp = func_begin - 1
#self.db("handle_func_cleanup")
return self.inc_scan_pointer_continue, args
def reset(self, args=[]): # Rule 14
args = []
self.work = []
self.sp = 0
return self.scan_next_char, args
def inc_scan_pointer_continue(self, args): # Rule 15
args = []
self.sp += 1
return self.scan_next_char, args
def db(self, msg="db"):
print "%s: %s SP:%d %s" % (msg, self.work[0:self.sp], self.sp, self.work[self.sp:])
class TestTrac(unittest.TestCase):
def setUp(self):
pass
def __test(self, program, output):
self.processor = Processor()
self.processor.initialize(program)
self.processor.run()
self.assertEqual(self.processor.output, output)
def test_1_ps(self):
self.__test("#(ps,Hello world)", "Hello world")
def test_2_equal(self):
self.__test("#(ps,#(eq,Cat,Cat,equal,not equal))", "equal")
def test_3_not_equal(self):
self.__test("#(ps,#(eq,Cat,Dog,equal,not equal))", "not equal")
def test_4_ds(self):
self.__test("#(ds,AA,Cat)#(ps,#(cl,AA))", "Cat")
def test_5_protect_parens(self):
self.__test("#(ds,AA,Cat)#(ds,BB,(#(cl,AA)))#(ps,(#(cl,BB)))", "#(cl,BB)")
def test_6_neutral_func(self):
self.__test("#(ds,AA,Cat)#(ds,BB,(#(cl,AA)))#(ps,##(cl,BB))", "#(cl,AA)")
def test_7_indirection(self):
self.__test("#(ds,AA,Cat)#(ds,BB,(#(cl,AA)))#(ps,#(cl,BB))", "Cat")
def test_8_ss(self):
self.__test("#(ds,AA,Hello X)#(ss,AA,X)#(ps,#(cl,AA,world))", "Hello world")
def test_9_factorial(self):
self.__test("""
#(ds,Factorial,(#(eq,X,1,1,(#(ml,X,#(cl,Factorial,#(su,X,1)))))))
#(ss,Factorial,X)
#(ps,#(cl,Factorial,5))
""", "120")
if __name__ == "__main__":
print __file__
unittest.main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.