max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
flax/optim/rmsprop.py | navjotts/flax | 2,249 | 11067586 | # Copyright 2021 The Flax Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import jax.numpy as jnp
import numpy as np
from .. import struct
from .base import OptimizerDef
@struct.dataclass
class _RMSPropHyperParams:
"""RMSProp hyper parameters"""
learning_rate: float
beta2: float
eps: float
centered: bool
@struct.dataclass
class _RMSPropParamState:
"""RMSProp parameter state"""
v: np.ndarray
mg: np.ndarray
class RMSProp(OptimizerDef):
"""RMSProp optimizer"""
def __init__(self, learning_rate: float = None, beta2=0.9, eps=1e-8,
centered=False):
"""Constructor for the RMSProp optimizer
Args:
learning_rate: the step size used to update the parameters.
beta2: the coefficient used for the moving average of the
gradient magnitude (default: 0.9).
eps: the term added to the gradient magnitude estimate for
numerical stability.
centered: If `True`, gradients are normalized by the estimated
variance of the gradient; if False, by the uncentered second moment.
Setting this to `True` may help with training, but is slightly more
expensive in terms of computation and memory. Defaults to `False`.
"""
hyper_params = _RMSPropHyperParams(learning_rate, beta2, eps, centered)
super().__init__(hyper_params)
def init_param_state(self, param):
"""Initialize parameter state"""
mg = jnp.zeros_like(param) if self.hyper_params.centered else None
return _RMSPropParamState(jnp.zeros_like(param), mg)
def apply_param_gradient(self, step, hyper_params, param, state, grad):
"""Apply per-parameter gradients"""
assert hyper_params.learning_rate is not None, 'no learning rate provided.'
new_v = hyper_params.beta2 * state.v + (
1.0 - hyper_params.beta2) * jnp.square(grad)
if hyper_params.centered:
new_mg = hyper_params.beta2 * state.mg + (1.0 - hyper_params.beta2) * grad
maybe_centered_v = new_v - jnp.square(new_mg)
else:
new_mg = state.mg
maybe_centered_v = new_v
new_param = param - hyper_params.learning_rate * grad / (
jnp.sqrt(maybe_centered_v) + hyper_params.eps)
new_state = _RMSPropParamState(new_v, new_mg)
return new_param, new_state
|
detect.py | zeroc00I/pyfiscan | 496 | 11067590 | <reponame>zeroc00I/pyfiscan
import os
import logging
import re
import chardet
yaml_fn_dict = {}
def yaml_visible(fn):
"""Decorator, which allows us to point to function names in YAML-files.
Example: fingerprint: detect_general
"""
yaml_fn_dict[fn.__name__] = fn
return fn
def grep_from_file(version_file, regexp):
"""Grepping file with predefined regexp to find a version. This returns
m.group from regexp: (?P<version>foo)
"""
with open(version_file, 'r') as version_file:
try:
source = version_file.readlines()
except UnicodeDecodeError:
with open(version_file.name, 'rb') as handle:
res = chardet.detect(handle.read())
source = handle.read().decode(res['encoding'])
handle.close()
prog = re.compile(regexp)
for line in source:
match = prog.match(line)
try:
found_match = match.group('version')
return found_match
except re.error:
logging.error('Invalid regular expression: %s', regexp)
except AttributeError:
pass
@yaml_visible
def detect_general(source_file, regexp):
"""Detects from source file if it contains version information. Uses first
regexp-match.
"""
if not (os.path.isfile(source_file) and regexp):
return
return grep_from_file(source_file, regexp[0])
@yaml_visible
def detect_joomla(source_file, regexp):
"""Detects from source file if it contains version information of Joomla"""
if not (os.path.isfile(source_file) and regexp):
return
logging.debug('Dectecting Joomla from: %s', source_file)
release_version = grep_from_file(source_file, regexp[0])
if not release_version:
logging.debug('Could not find release version from: %s', source_file)
return
logging.debug('Release version: %s', release_version)
dev_level_version = grep_from_file(source_file, regexp[1])
if not dev_level_version:
logging.debug('Could not find development version from: %s', source_file)
return
logging.debug('Development level version: %s', dev_level_version)
return release_version + "." + dev_level_version
@yaml_visible
def detect_wikkawiki(source_file, regexp):
"""Detects from file if the file has version information of WikkaWiki.
Wikka-1.3.2-p7/version.php:
$svn_version = '1.3.2';
if (!defined('WAKKA_VERSION')) define('WAKKA_VERSION', $svn_version);
if(!defined('WIKKA_PATCH_LEVEL')) define('WIKKA_PATCH_LEVEL', '7');
"""
if not (os.path.isfile(source_file) and regexp):
return
logging.debug('Dectecting WikkaWiki from: %s', source_file)
version = grep_from_file(source_file, regexp[0])
if not version:
logging.debug('Could not find version from: %s', source_file)
return
logging.debug('Version: %s', version)
patch_level = grep_from_file(source_file, regexp[1])
if not patch_level:
logging.debug('Could not find patch level from: %s', patch_level)
return
logging.debug('Patch level: %s', patch_level)
if version and patch_level:
return version + "-p" + patch_level
@yaml_visible
def detect_gallery(source_file, regexp):
"""Detects from source file if it contains version information of Gallery.
Also ignores Git-versions.
"""
if not (os.path.isfile(source_file) and regexp):
return
logging.debug('Dectecting Gallery from: %s', source_file)
version = grep_from_file(source_file, regexp[0])
if not version:
logging.debug('Could not find version from: %s', source_file)
return
logging.debug('Gallery version %s %s' % (version, source_file))
git_version = grep_from_file(source_file,
'.*?const.*?RELEASE_CHANNEL.*?(?P<version>(git))')
if git_version:
logging.debug('Not reporting Gallery Git-version %s', source_file)
return
else:
return version
@yaml_visible
def detect_redmine(source_file, regexp):
"""Detects from source file if it contains version information. Uses first
regexp-match.
"""
if not (os.path.isfile(source_file) and regexp):
return
with open(source_file) as redmine_changelog:
if not 'Redmine changelog' in redmine_changelog.read():
return
return grep_from_file(source_file, regexp[0])
@yaml_visible
def detect_withoutnewlines(source_file, regexp):
"""Strips newlines from source file."""
if not (os.path.isfile(source_file) and regexp):
return
with open(source_file, 'r') as f:
try:
source = f.read().replace('\n', '')
f.close()
except UnicodeDecodeError:
with open(f.name, 'rb') as handle:
res = chardet.detect(handle.read())
source = handle.read().decode(res['encoding']).replace('\n', '')
handle.close()
try:
return re.compile(regexp[0]).match(source).group('version')
except re.error:
logging.error('Invalid regular expression: %s', regexp)
except AttributeError:
pass
|
tests/test_unix.py | PyO3/tokio | 239 | 11067601 | <filename>tests/test_unix.py<gh_stars>100-1000
# Copied from the uvloop project. If you add a new unittest here,
# please consider contributing it to the uvloop project.
#
# Portions copyright (c) 2015-present MagicStack Inc. http://magic.io
import asyncio
import os
import socket
import tempfile
import pytest
import _testbase as tb
from test_ssl import (ONLYCERT, ONLYKEY, create_client_ssl_context,
create_server_ssl_context)
def test_create_unix_server_1(loop):
CNT = 0 # number of clients that were successful
TOTAL_CNT = 100 # total number of clients that test will create
TIMEOUT = 5.0 # timeout for this test
async def handle_client(reader, writer):
nonlocal CNT
data = await reader.readexactly(4)
assert data == b'AAAA'
writer.write(b'OK')
data = await reader.readexactly(4)
assert data == b'BBBB'
writer.write(b'SPAM')
await writer.drain()
writer.close()
CNT += 1
async def test_client(addr):
sock = socket.socket(socket.AF_UNIX)
with sock:
sock.setblocking(False)
await loop.sock_connect(sock, addr)
await loop.sock_sendall(sock, b'AAAA')
buf = b''
while len(buf) != 2:
buf += await loop.sock_recv(sock, 1)
assert buf == b'OK'
await loop.sock_sendall(sock, b'BBBB')
buf = b''
while len(buf) != 4:
buf += await loop.sock_recv(sock, 1)
assert buf == b'SPAM'
async def start_server():
nonlocal CNT
CNT = 0
with tempfile.TemporaryDirectory() as td:
sock_name = os.path.join(td, 'sock')
srv = await asyncio.start_unix_server(
handle_client,
sock_name,
loop=loop)
try:
# srv_socks = srv.sockets
# assert srv_socks
tasks = []
for _ in range(TOTAL_CNT):
tasks.append(test_client(sock_name))
await asyncio.wait_for(
asyncio.gather(*tasks, loop=loop),
TIMEOUT, loop=loop)
finally:
loop.call_soon(srv.close)
await srv.wait_closed()
# Check that the server cleaned-up proxy-sockets
# for srv_sock in srv_socks:
# assert srv_sock.fileno() == -1
# asyncio doesn't cleanup the sock file
assert os.path.exists(sock_name)
async def start_server_sock(start_server):
nonlocal CNT
CNT = 0
with tempfile.TemporaryDirectory() as td:
sock_name = os.path.join(td, 'sock')
sock = socket.socket(socket.AF_UNIX)
sock.bind(sock_name)
srv = await start_server(sock)
await asyncio.sleep(0.1, loop=loop)
try:
# srv_socks = srv.sockets
# self.assertTrue(srv_socks)
tasks = []
for _ in range(TOTAL_CNT):
tasks.append(test_client(sock_name))
await asyncio.wait_for(
asyncio.gather(*tasks, loop=loop),
TIMEOUT, loop=loop)
finally:
loop.call_soon(srv.close)
await srv.wait_closed()
# Check that the server cleaned-up proxy-sockets
# for srv_sock in srv_socks:
# self.assertEqual(srv_sock.fileno(), -1)
# asyncio doesn't cleanup the sock file
assert os.path.exists(sock_name)
# with self.subTest(func='start_unix_server(host, port)'):
loop.run_until_complete(start_server())
assert CNT == TOTAL_CNT
# with self.subTest(func='start_unix_server(sock)'):
loop.run_until_complete(start_server_sock(
lambda sock: asyncio.start_unix_server(
handle_client,
None,
loop=loop,
sock=sock)))
assert CNT == TOTAL_CNT
# with self.subTest(func='start_server(sock)'):
loop.run_until_complete(start_server_sock(
lambda sock: asyncio.start_server(
handle_client,
None, None,
loop=loop,
sock=sock)))
assert CNT == TOTAL_CNT
def test_create_unix_server_2(loop):
with tempfile.TemporaryDirectory() as td:
sock_name = os.path.join(td, 'sock')
with open(sock_name, 'wt') as f:
f.write('x')
with pytest.raises(OSError) as excinfo:
loop.run_until_complete(
loop.create_unix_server(object, sock_name))
excinfo.match('in use')
def test_create_unix_connection_1(loop):
CNT = 0
TOTAL_CNT = 100
def server():
data = yield tb.read(4)
assert data == b'AAAA'
yield tb.write(b'OK')
data = yield tb.read(4)
assert data == b'BBBB'
yield tb.write(b'SPAM')
async def client(addr):
reader, writer = await asyncio.open_unix_connection(
addr,
loop=loop)
writer.write(b'AAAA')
assert await reader.readexactly(2) == b'OK'
writer.write(b'BBBB')
assert await reader.readexactly(4) == b'SPAM'
nonlocal CNT
CNT += 1
writer.close()
async def client_2(addr):
sock = socket.socket(socket.AF_UNIX)
sock.connect(addr)
reader, writer = await asyncio.open_unix_connection(
sock=sock,
loop=loop)
writer.write(b'AAAA')
assert await reader.readexactly(2) == b'OK'
writer.write(b'BBBB')
assert await reader.readexactly(4) == b'SPAM'
nonlocal CNT
CNT += 1
writer.close()
async def client_3(addr):
sock = socket.socket(socket.AF_UNIX)
sock.connect(addr)
reader, writer = await asyncio.open_connection(
sock=sock,
loop=loop)
writer.write(b'AAAA')
assert await reader.readexactly(2) == b'OK'
writer.write(b'BBBB')
assert await reader.readexactly(4) == b'SPAM'
nonlocal CNT
CNT += 1
writer.close()
def run(coro):
nonlocal CNT
CNT = 0
srv = tb.tcp_server(server,
family=socket.AF_UNIX,
max_clients=TOTAL_CNT,
backlog=TOTAL_CNT)
srv.start()
tasks = []
for _ in range(TOTAL_CNT):
tasks.append(coro(srv.addr))
loop.run_until_complete(
asyncio.gather(*tasks, loop=loop))
srv.join()
assert CNT == TOTAL_CNT
run(client)
# run(client_2)
# run(client_3)
def test_create_unix_connection_2(loop):
with tempfile.NamedTemporaryFile() as tmp:
path = tmp.name
async def client():
reader, writer = await asyncio.open_unix_connection(
path,
loop=loop)
async def runner():
with pytest.raises(FileNotFoundError):
await client()
loop.run_until_complete(runner())
def test_create_unix_connection_3(loop):
CNT = 0
TOTAL_CNT = 100
def server():
data = yield tb.read(4)
assert data == b'AAAA'
yield tb.close()
async def client(addr):
reader, writer = await asyncio.open_unix_connection(
addr,
loop=loop)
# sock = writer._transport.get_extra_info('socket')
# assert sock.family == socket.AF_UNIX
writer.write(b'AAAA')
with pytest.raises(asyncio.IncompleteReadError):
await reader.readexactly(10)
writer.close()
nonlocal CNT
CNT += 1
def run(coro):
nonlocal CNT
CNT = 0
srv = tb.tcp_server(server,
family=socket.AF_UNIX,
max_clients=TOTAL_CNT,
backlog=TOTAL_CNT)
srv.start()
tasks = []
for _ in range(TOTAL_CNT):
tasks.append(coro(srv.addr))
loop.run_until_complete(
asyncio.gather(*tasks, loop=loop))
srv.join()
assert CNT == TOTAL_CNT
run(client)
def test_create_unix_connection_4(loop):
sock = socket.socket(socket.AF_UNIX)
sock.close()
async def client():
reader, writer = await asyncio.open_unix_connection(
sock=sock,
loop=loop)
async def runner():
with pytest.raises(OSError) as excinfo:
await client()
excinfo.match('Bad file')
loop.run_until_complete(runner())
def test_create_unix_connection_5(loop):
s1, s2 = socket.socketpair(socket.AF_UNIX)
excs = []
class Proto(asyncio.Protocol):
def connection_lost(self, exc):
excs.append(exc)
proto = Proto()
async def client():
t, _ = await loop.create_unix_connection(
lambda: proto,
None,
sock=s2)
t.write(b'AAAAA')
s1.close()
t.write(b'AAAAA')
await asyncio.sleep(0.1, loop=loop)
loop.run_until_complete(client())
assert len(excs) == 1
assert excs[0].__class__ in (BrokenPipeError, ConnectionResetError)
def test_create_unix_connection_ssl_1(loop):
CNT = 0
TOTAL_CNT = 25
A_DATA = b'A' * 1024 * 1024
B_DATA = b'B' * 1024 * 1024
sslctx = create_server_ssl_context(ONLYCERT, ONLYKEY)
client_sslctx = create_client_ssl_context()
def server():
yield tb.starttls(
sslctx,
server_side=True)
data = yield tb.read(len(A_DATA))
assert data == A_DATA
yield tb.write(b'OK')
data = yield tb.read(len(B_DATA))
assert data == B_DATA
yield tb.write(b'SPAM')
yield tb.close()
async def client(addr):
reader, writer = await asyncio.open_unix_connection(
addr,
ssl=client_sslctx,
server_hostname='',
loop=loop)
writer.write(A_DATA)
assert await reader.readexactly(2) == b'OK'
writer.write(B_DATA)
assert await reader.readexactly(4) == b'SPAM'
nonlocal CNT
CNT += 1
writer.close()
def run(coro):
nonlocal CNT
CNT = 0
srv = tb.tcp_server(server,
family=socket.AF_UNIX,
max_clients=TOTAL_CNT,
backlog=TOTAL_CNT)
srv.start()
tasks = []
for _ in range(TOTAL_CNT):
tasks.append(coro(srv.addr))
loop.run_until_complete(
asyncio.gather(*tasks, loop=loop))
srv.join()
assert CNT == TOTAL_CNT
run(client)
|
calvinextras/calvinsys/media/webcam/opencv/Webcam.py | gabrielcercel/calvin-base | 334 | 11067609 | # -*- coding: utf-8 -*-
# Copyright (c) 2017 Ericsson AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import cv2
from calvinextras.calvinsys.media.webcam import BaseWebcam
from calvin.runtime.south.async import threads
from calvin.utilities.calvinlogger import get_logger
_log = get_logger(__name__)
class Webcam(BaseWebcam.BaseWebcam):
"""
Implementation of Webcam Calvinsys API
"""
def init(self, width, height, device=0, **kwargs):
self._webcam = cv2.VideoCapture(device)
self._webcam.set(cv2.CAP_PROP_FRAME_WIDTH, width)
self._webcam.set(cv2.CAP_PROP_FRAME_HEIGHT, height)
self._in_progress = None
self._b64image = None
def can_write(self):
return self._b64image is None and self._in_progress is None
def _read_image(self):
import base64
status, frame = self._webcam.read()
barr = None
if status:
status, image = cv2.imencode(".jpg", frame)
if status :
barr = image.tostring()
return base64.b64encode(barr)
def _image_ready(self, image, *args, **kwargs):
self._b64image = image
self._in_progress = None
self.scheduler_wakeup()
def _image_error(self, *args, **kwargs):
self._in_progress = None
self.scheduler_wakeup()
def write(self, _):
self._in_progress = threads.defer_to_thread(self._read_image)
self._in_progress.addCallback(self._image_ready)
self._in_progress.addCallback(self._image_error)
def can_read(self):
return self._b64image is not None and self._in_progress is None
def read(self):
b64image = self._b64image
self._b64image = None
return b64image
def close(self):
if self._in_progress:
self._in_progress.cancel()
self._webcam.release()
self._webcam = None
|
training/transnet.py | uygnef/TransNetV2 | 155 | 11067666 | import gin
import h5py
import numpy as np
import tensorflow as tf
from models import ResNet18, ResNetBlock
@gin.configurable(blacklist=["name"])
class TransNetV2(tf.keras.Model):
def __init__(self, F=16, L=3, S=2, D=256,
use_resnet_features=False,
use_many_hot_targets=False,
use_frame_similarity=False,
use_mean_pooling=False,
use_convex_comb_reg=False,
dropout_rate=None,
use_resnet_like_top=False,
frame_similarity_on_last_layer=False,
use_color_histograms=False,
name="TransNet"):
super(TransNetV2, self).__init__(name=name)
self.resnet_layers = ResNetFeatures() if use_resnet_features else (lambda x, training=False: x / 255.)
self.blocks = [StackedDDCNNV2(n_blocks=S, filters=F, stochastic_depth_drop_prob=0., name="SDDCNN_1")]
self.blocks += [StackedDDCNNV2(n_blocks=S, filters=F * 2**i, name="SDDCNN_{:d}".format(i + 1)) for i in range(1, L)]
self.fc1 = tf.keras.layers.Dense(D, activation=tf.nn.relu)
self.cls_layer1 = tf.keras.layers.Dense(1, activation=None)
self.cls_layer2 = tf.keras.layers.Dense(1, activation=None) if use_many_hot_targets else None
self.frame_sim_layer = FrameSimilarity() if use_frame_similarity else None
self.color_hist_layer = ColorHistograms() if use_color_histograms else None
self.use_mean_pooling = use_mean_pooling
self.convex_comb_reg = ConvexCombinationRegularization() if use_convex_comb_reg else None
self.dropout = tf.keras.layers.Dropout(dropout_rate) if dropout_rate is not None else None
self.frame_similarity_on_last_layer = frame_similarity_on_last_layer
self.resnet_like_top = use_resnet_like_top
if self.resnet_like_top:
self.resnet_like_top_conv = tf.keras.layers.Conv3D(filters=32, kernel_size=(3, 7, 7), strides=(1, 2, 2),
padding="SAME", use_bias=False,
name="resnet_like_top/conv")
self.resnet_like_top_bn = tf.keras.layers.BatchNormalization(name="resnet_like_top/bn")
self.resnet_like_top_max_pool = tf.keras.layers.MaxPooling3D(pool_size=(1, 3, 3), strides=(1, 2, 2),
padding="SAME")
def call(self, inputs, training=False):
out_dict = {}
x = inputs
x = self.resnet_layers(x, training=training)
if self.resnet_like_top:
x = self.resnet_like_top_conv(x)
x = self.resnet_like_top_bn(x)
x = self.resnet_like_top_max_pool(x)
block_features = []
for block in self.blocks:
x = block(x, training=training)
block_features.append(x)
if self.convex_comb_reg is not None:
out_dict["alphas"], out_dict["comb_reg_loss"] = self.convex_comb_reg(inputs, x)
if self.use_mean_pooling:
x = tf.math.reduce_mean(x, axis=[2, 3])
else:
shape = [tf.shape(x)[0], tf.shape(x)[1], np.prod(x.get_shape().as_list()[2:])]
x = tf.reshape(x, shape=shape, name="flatten_3d")
if self.frame_sim_layer is not None and not self.frame_similarity_on_last_layer:
x = tf.concat([self.frame_sim_layer(block_features), x], 2)
if self.color_hist_layer is not None:
x = tf.concat([self.color_hist_layer(inputs), x], 2)
x = self.fc1(x)
if self.dropout is not None:
x = self.dropout(x, training=training)
if self.frame_sim_layer is not None and self.frame_similarity_on_last_layer:
x = tf.concat([self.frame_sim_layer(block_features), x], 2)
one_hot = self.cls_layer1(x)
if self.cls_layer2 is not None:
out_dict["many_hot"] = self.cls_layer2(x)
if len(out_dict) > 0:
return one_hot, out_dict
return one_hot
@gin.configurable(whitelist=["shortcut", "use_octave_conv", "pool_type", "stochastic_depth_drop_prob"])
class StackedDDCNNV2(tf.keras.layers.Layer):
def __init__(self, n_blocks, filters, shortcut=False, use_octave_conv=False, pool_type="max",
stochastic_depth_drop_prob=0., name="StackedDDCNN"):
super(StackedDDCNNV2, self).__init__(name=name)
assert pool_type == "max" or pool_type == "avg"
if use_octave_conv and pool_type == "max":
print("WARN: Octave convolution was designed with average pooling, not max pooling.")
self.shortcut = shortcut
# self.shortcut = None
# if shortcut:
# self.shortcut = tf.keras.layers.Conv3D(filters * 4, kernel_size=1, dilation_rate=1, padding="SAME",
# activation=None, use_bias=True, name="shortcut")
self.blocks = [DilatedDCNNV2(filters, octave_conv=use_octave_conv,
activation=tf.nn.relu if i != n_blocks else None,
name="DDCNN_{:d}".format(i)) for i in range(1, n_blocks + 1)]
self.pool = tf.keras.layers.MaxPool3D(pool_size=(1, 2, 2)) if pool_type == "max" else \
tf.keras.layers.AveragePooling3D(pool_size=(1, 2, 2))
self.octave = use_octave_conv
self.stochastic_depth_drop_prob = stochastic_depth_drop_prob
def call(self, inputs, training=False):
x = inputs
shortcut = None
if self.octave:
x = [self.pool(x), x]
for block in self.blocks:
x = block(x, training=training)
if shortcut is None:
shortcut = x
if self.octave:
x = tf.concat([x[0], self.pool(x[1])], -1)
x = tf.nn.relu(x)
if self.shortcut is not None:
# shortcut = self.shortcut(inputs)
if self.stochastic_depth_drop_prob != 0.:
if training:
x = tf.cond(tf.random.uniform([]) < self.stochastic_depth_drop_prob,
lambda: shortcut, lambda: x + shortcut)
else:
x = (1 - self.stochastic_depth_drop_prob) * x + shortcut
else:
x += shortcut
if not self.octave:
x = self.pool(x)
return x
@gin.configurable(whitelist=["batch_norm"])
class DilatedDCNNV2(tf.keras.layers.Layer):
def __init__(self, filters, batch_norm=False, activation=None, octave_conv=False, name="DilatedDCNN"):
super(DilatedDCNNV2, self).__init__(name=name)
assert not (octave_conv and batch_norm)
self.conv1 = Conv3DConfigurable(filters, 1, use_bias=not batch_norm, octave=octave_conv, name="Conv3D_1")
self.conv2 = Conv3DConfigurable(filters, 2, use_bias=not batch_norm, octave=octave_conv, name="Conv3D_2")
self.conv3 = Conv3DConfigurable(filters, 4, use_bias=not batch_norm, octave=octave_conv, name="Conv3D_4")
self.conv4 = Conv3DConfigurable(filters, 8, use_bias=not batch_norm, octave=octave_conv, name="Conv3D_8")
self.octave = octave_conv
self.batch_norm = tf.keras.layers.BatchNormalization(name="bn") if batch_norm else None
self.activation = activation
def call(self, inputs, training=False):
conv1 = self.conv1(inputs, training=training)
conv2 = self.conv2(inputs, training=training)
conv3 = self.conv3(inputs, training=training)
conv4 = self.conv4(inputs, training=training)
if self.octave:
x = [tf.concat([conv1[0], conv2[0], conv3[0], conv4[0]], axis=4),
tf.concat([conv1[1], conv2[1], conv3[1], conv4[1]], axis=4)]
else:
x = tf.concat([conv1, conv2, conv3, conv4], axis=4)
if self.batch_norm is not None:
x = self.batch_norm(x, training=training)
if self.activation is not None:
if self.octave:
x = [self.activation(x[0]), self.activation(x[1])]
else:
x = self.activation(x)
return x
@gin.configurable(whitelist=["separable", "kernel_initializer"])
class Conv3DConfigurable(tf.keras.layers.Layer):
def __init__(self,
filters,
dilation_rate,
separable=False,
octave=False,
use_bias=True,
kernel_initializer="glorot_uniform",
name="Conv3D"):
super(Conv3DConfigurable, self).__init__(name=name)
assert not (separable and octave)
if separable:
# (2+1)D convolution https://arxiv.org/pdf/1711.11248.pdf
conv1 = tf.keras.layers.Conv3D(2 * filters, kernel_size=(1, 3, 3), dilation_rate=(1, 1, 1),
padding="SAME", activation=None, use_bias=False,
name="conv_spatial", kernel_initializer=kernel_initializer)
conv2 = tf.keras.layers.Conv3D(filters, kernel_size=(3, 1, 1), dilation_rate=(dilation_rate, 1, 1),
padding="SAME", activation=None, use_bias=use_bias, name="conv_temporal",
kernel_initializer=kernel_initializer)
self.layers = [conv1, conv2]
elif octave:
conv = OctConv3D(filters, kernel_size=3, dilation_rate=(dilation_rate, 1, 1), use_bias=use_bias,
kernel_initializer=kernel_initializer)
self.layers = [conv]
else:
conv = tf.keras.layers.Conv3D(filters, kernel_size=3, dilation_rate=(dilation_rate, 1, 1),
padding="SAME", activation=None, use_bias=use_bias, name="conv",
kernel_initializer=kernel_initializer)
self.layers = [conv]
def call(self, inputs):
x = inputs
for layer in self.layers:
x = layer(x)
return x
@gin.configurable(whitelist=["alpha"])
class OctConv3D(tf.keras.layers.Layer):
def __init__(self, filters, kernel_size=3, dilation_rate=(1, 1, 1), alpha=0.25,
use_bias=True, kernel_initializer="glorot_uniform", name="OctConv3D"):
super(OctConv3D, self).__init__(name=name)
self.low_channels = int(filters * alpha)
self.high_channels = filters - self.low_channels
self.high_to_high = tf.keras.layers.Conv3D(self.high_channels, kernel_size=kernel_size, activation=None,
dilation_rate=dilation_rate, padding="SAME",
use_bias=use_bias, kernel_initializer=kernel_initializer,
name="high_to_high")
self.high_to_low = tf.keras.layers.Conv3D(self.low_channels, kernel_size=kernel_size, activation=None,
dilation_rate=dilation_rate, padding="SAME",
use_bias=False, kernel_initializer=kernel_initializer,
name="high_to_low")
self.low_to_high = tf.keras.layers.Conv3D(self.high_channels, kernel_size=kernel_size, activation=None,
dilation_rate=dilation_rate, padding="SAME",
use_bias=False, kernel_initializer=kernel_initializer,
name="low_to_high")
self.low_to_low = tf.keras.layers.Conv3D(self.low_channels, kernel_size=kernel_size, activation=None,
dilation_rate=dilation_rate, padding="SAME",
use_bias=use_bias, kernel_initializer=kernel_initializer,
name="low_to_low")
self.upsampler = tf.keras.layers.UpSampling3D(size=(1, 2, 2))
self.downsampler = tf.keras.layers.AveragePooling3D(pool_size=(1, 2, 2), strides=(1, 2, 2), padding="SAME")
@staticmethod
def pad_to(tensor, target_shape):
shape = tf.shape(tensor)
padding = [[0, tar - curr] for curr, tar in zip(shape, target_shape)]
return tf.pad(tensor, padding, "CONSTANT")
@staticmethod
def crop_to(tensor, target_width, target_height):
return tensor[:, :, :target_height, :target_width]
def call(self, inputs):
low_inputs, high_inputs = inputs
high_to_high = self.high_to_high(high_inputs)
high_to_low = self.high_to_low(self.downsampler(high_inputs))
low_to_high = self.upsampler(self.low_to_high(low_inputs))
low_to_low = self.low_to_low(low_inputs)
high_output = high_to_high[:, :, :tf.shape(low_to_high)[2], :tf.shape(low_to_high)[3]] + low_to_high
low_output = low_to_low + high_to_low[:, :, :tf.shape(low_to_low)[2], :tf.shape(low_to_low)[3]]
# print("OctConv3D:", low_inputs.shape, "->", low_output.shape, "|", high_inputs.shape, "->", high_output.shape)
return low_output, high_output
@gin.configurable(whitelist=["trainable"])
class ResNetFeatures(tf.keras.layers.Layer):
def __init__(self, trainable=False, name="ResNetFeatures"):
super(ResNetFeatures, self).__init__(trainable=trainable, name=name)
self.conv1 = tf.keras.layers.Conv2D(filters=64, kernel_size=(7, 7), strides=(2, 2),
padding="SAME", use_bias=False, name="conv1")
self.bn1 = tf.keras.layers.BatchNormalization(name="conv1/bn")
self.max_pool = tf.keras.layers.MaxPooling2D(pool_size=(3, 3), strides=(2, 2), padding="SAME")
self.layer2a = ResNetBlock(64, name="Block2a")
self.layer2b = ResNetBlock(64, name="Block2b")
self.mean = tf.constant(ResNet18.MEAN)
self.std = tf.constant(ResNet18.STD)
def call(self, inputs, training=False):
training = training if self.trainable else False
shape = tf.shape(inputs)
x = tf.reshape(inputs, [shape[0] * shape[1], shape[2], shape[3], shape[4]])
x = (x - self.mean) / self.std
x = self.conv1(x)
x = self.bn1(x, training=training)
x = tf.nn.relu(x)
x = self.max_pool(x)
x = self.layer2a(x, training=training)
x = self.layer2b(x, training=training)
new_shape = tf.shape(x)
x = tf.reshape(x, [shape[0], shape[1], new_shape[1], new_shape[2], new_shape[3]])
return x
def restore_me(self, checkpoint):
with h5py.File(checkpoint, "r") as f:
for v in self.variables:
name = v.name.split("/")[2:]
if name[0].startswith("Block"):
name = name[:1] + name
else:
name = name[:len(name) - 1] + name
name = "/".join(name)
v.assign(f[name][:])
@gin.configurable(whitelist=["similarity_dim", "lookup_window", "output_dim", "stop_gradient", "use_bias"])
class FrameSimilarity(tf.keras.layers.Layer):
def __init__(self,
similarity_dim=128,
lookup_window=101,
output_dim=128,
stop_gradient=False,
use_bias=False,
name="FrameSimilarity"):
super(FrameSimilarity, self).__init__(name=name)
self.projection = tf.keras.layers.Dense(similarity_dim, use_bias=use_bias, activation=None)
self.fc = tf.keras.layers.Dense(output_dim, activation=tf.nn.relu)
self.lookup_window = lookup_window
self.stop_gradient = stop_gradient
assert lookup_window % 2 == 1, "`lookup_window` must be odd integer"
def call(self, inputs):
x = tf.concat([
tf.math.reduce_mean(x, axis=[2, 3]) for x in inputs
], axis=2)
if self.stop_gradient:
x = tf.stop_gradient(x)
x = self.projection(x)
x = tf.nn.l2_normalize(x, axis=2)
batch_size, time_window = tf.shape(x)[0], tf.shape(x)[1]
similarities = tf.matmul(x, x, transpose_b=True) # [batch_size, time_window, time_window]
similarities_padded = tf.pad(similarities, [[0, 0], [0, 0], [(self.lookup_window - 1) // 2] * 2])
batch_indices = tf.tile(
tf.reshape(tf.range(batch_size), [batch_size, 1, 1]), [1, time_window, self.lookup_window]
)
time_indices = tf.tile(
tf.reshape(tf.range(time_window), [1, time_window, 1]), [batch_size, 1, self.lookup_window]
)
lookup_indices = tf.tile(
tf.reshape(tf.range(self.lookup_window), [1, 1, self.lookup_window]), [batch_size, time_window, 1]
) + time_indices
indices = tf.stack([batch_indices, time_indices, lookup_indices], -1)
similarities = tf.gather_nd(similarities_padded, indices)
return self.fc(similarities)
@gin.configurable(whitelist=["filters", "delta_scale", "loss_weight"])
class ConvexCombinationRegularization(tf.keras.layers.Layer):
def __init__(self, filters=32, delta_scale=10., loss_weight=0.01, name="ConvexCombinationRegularization"):
super(ConvexCombinationRegularization, self).__init__(name=name)
self.projection = tf.keras.layers.Conv3D(filters, kernel_size=1, dilation_rate=1, padding="SAME",
activation=tf.nn.relu, use_bias=True)
self.features = tf.keras.layers.Conv3D(filters * 2, kernel_size=(3, 3, 3), dilation_rate=1, padding="SAME",
activation=tf.nn.relu, use_bias=True)
self.dense = tf.keras.layers.Dense(1, activation=None, use_bias=True)
self.loss = tf.keras.losses.Huber(reduction=tf.keras.losses.Reduction.NONE)
self.delta_scale = delta_scale
self.loss_weight = loss_weight
def call(self, image_inputs, feature_inputs):
x = feature_inputs
x = self.projection(x)
batch_size = tf.shape(x)[0]
window_size = tf.shape(x)[1]
first_frame = tf.tile(x[:, :1], [1, window_size, 1, 1, 1])
last_frame = tf.tile(x[:, -1:], [1, window_size, 1, 1, 1])
x = tf.concat([x, first_frame, last_frame], -1)
x = self.features(x)
x = tf.math.reduce_mean(x, axis=[2, 3])
alpha = self.dense(x)
first_img = tf.tile(image_inputs[:, :1], [1, window_size, 1, 1, 1])
last_img = tf.tile(image_inputs[:, -1:], [1, window_size, 1, 1, 1])
alpha_ = tf.nn.sigmoid(alpha)
alpha_ = tf.reshape(alpha_, [batch_size, window_size, 1, 1, 1])
predictions_ = (alpha_ * first_img + (1 - alpha_) * last_img)
loss_ = self.loss(y_true=image_inputs / self.delta_scale, y_pred=predictions_ / self.delta_scale)
loss_ = self.loss_weight * tf.math.reduce_mean(loss_)
return alpha, loss_
@gin.configurable(whitelist=["lookup_window", "output_dim"])
class ColorHistograms(tf.keras.layers.Layer):
def __init__(self, lookup_window=101, output_dim=None, name="ColorHistograms"):
super(ColorHistograms, self).__init__(name=name)
self.fc = tf.keras.layers.Dense(output_dim, activation=tf.nn.relu) if output_dim is not None else None
self.lookup_window = lookup_window
assert lookup_window % 2 == 1, "`lookup_window` must be odd integer"
@staticmethod
def compute_color_histograms(frames):
frames = tf.cast(frames, tf.int32)
def get_bin(frames):
# returns 0 .. 511
R, G, B = frames[:, :, 0], frames[:, :, 1], frames[:, :, 2]
R, G, B = tf.bitwise.right_shift(R, 5), tf.bitwise.right_shift(G, 5), tf.bitwise.right_shift(B, 5)
return tf.bitwise.left_shift(R, 6) + tf.bitwise.left_shift(G, 3) + B
batch_size, time_window, height, width = tf.shape(frames)[0], tf.shape(frames)[1], tf.shape(frames)[2], \
tf.shape(frames)[3]
no_channels = frames.shape[-1]
assert no_channels == 3 or no_channels == 6
if no_channels == 3:
frames_flatten = tf.reshape(frames, [batch_size * time_window, height * width, 3])
else:
frames_flatten = tf.reshape(frames, [batch_size * time_window, height * width * 2, 3])
binned_values = get_bin(frames_flatten)
frame_bin_prefix = tf.bitwise.left_shift(tf.range(batch_size * time_window), 9)[:, tf.newaxis]
binned_values = binned_values + frame_bin_prefix
ones = tf.ones_like(binned_values, dtype=tf.int32)
histograms = tf.math.unsorted_segment_sum(ones, binned_values, batch_size * time_window * 512)
histograms = tf.reshape(histograms, [batch_size, time_window, 512])
histograms_normalized = tf.cast(histograms, tf.float32)
histograms_normalized = histograms_normalized / tf.linalg.norm(histograms_normalized, axis=2, keepdims=True)
return histograms_normalized
def call(self, inputs):
x = self.compute_color_histograms(inputs)
batch_size, time_window = tf.shape(x)[0], tf.shape(x)[1]
similarities = tf.matmul(x, x, transpose_b=True) # [batch_size, time_window, time_window]
similarities_padded = tf.pad(similarities, [[0, 0], [0, 0], [(self.lookup_window - 1) // 2] * 2])
batch_indices = tf.tile(
tf.reshape(tf.range(batch_size), [batch_size, 1, 1]), [1, time_window, self.lookup_window]
)
time_indices = tf.tile(
tf.reshape(tf.range(time_window), [1, time_window, 1]), [batch_size, 1, self.lookup_window]
)
lookup_indices = tf.tile(
tf.reshape(tf.range(self.lookup_window), [1, 1, self.lookup_window]), [batch_size, time_window, 1]
) + time_indices
indices = tf.stack([batch_indices, time_indices, lookup_indices], -1)
similarities = tf.gather_nd(similarities_padded, indices)
if self.fc is not None:
return self.fc(similarities)
return similarities
|
homeassistant/components/buienradar/const.py | MrDelik/core | 30,023 | 11067679 | """Constants for buienradar component."""
DOMAIN = "buienradar"
DEFAULT_TIMEFRAME = 60
DEFAULT_DIMENSION = 700
DEFAULT_DELTA = 600
CONF_DELTA = "delta"
CONF_COUNTRY = "country_code"
CONF_TIMEFRAME = "timeframe"
SUPPORTED_COUNTRY_CODES = ["NL", "BE"]
DEFAULT_COUNTRY = "NL"
"""Schedule next call after (minutes)."""
SCHEDULE_OK = 10
"""When an error occurred, new call after (minutes)."""
SCHEDULE_NOK = 2
|
services/fuse/tests/test_exec.py | chlige/arvados | 222 | 11067680 | # Copyright (C) The Arvados Authors. All rights reserved.
#
# SPDX-License-Identifier: AGPL-3.0
from __future__ import absolute_import
from six import assertRegex
import arvados_fuse.command
import json
import multiprocessing
import os
from . import run_test_server
import tempfile
import unittest
from .integration_test import workerPool
try:
from shlex import quote
except:
from pipes import quote
def try_exec(mnt, cmd):
try:
os.environ['KEEP_LOCAL_STORE'] = tempfile.mkdtemp()
arvados_fuse.command.Mount(
arvados_fuse.command.ArgumentParser().parse_args([
'--read-write',
'--mount-tmp=zzz',
'--unmount-timeout=0.1',
mnt,
'--exec'] + cmd)).run()
except SystemExit:
pass
else:
raise AssertionError('should have exited')
class ExecMode(unittest.TestCase):
@classmethod
def setUpClass(cls):
run_test_server.run()
run_test_server.run_keep(blob_signing=True, num_servers=2)
run_test_server.authorize_with('active')
@classmethod
def tearDownClass(cls):
run_test_server.stop_keep(num_servers=2)
def setUp(self):
self.mnt = tempfile.mkdtemp()
_, self.okfile = tempfile.mkstemp()
def tearDown(self):
os.rmdir(self.mnt)
os.unlink(self.okfile)
def test_exec(self):
workerPool().apply(try_exec, (self.mnt, [
'sh', '-c',
'echo -n foo >{}; cp {} {}'.format(
quote(os.path.join(self.mnt, 'zzz', 'foo.txt')),
quote(os.path.join(self.mnt, 'zzz', '.arvados#collection')),
quote(os.path.join(self.okfile)))]))
with open(self.okfile) as f:
assertRegex(
self,
json.load(f)['manifest_text'],
r' 0:3:foo.txt\n')
|
models/plot_spam_scores.py | chihyunsong/oc-nn | 203 | 11067683 | <reponame>chihyunsong/oc-nn
import numpy as np
import pandas as pd
from sklearn import utils
import matplotlib.pyplot as plt
dataPath = './data/'
activations = ["Linear","Sigmoid"]
methods = ["Linear","RBF"]
def plot_decision_scores_SPAM(dataset,df_spam_scores):
# Four axes, returned as a 2-d array
#
f, axarr = plt.subplots(5, 2,figsize=(20,20))
st = f.suptitle("One Class NN: "+dataset, fontsize="x-large",fontweight='bold');
_=axarr[0, 0].hist(df_spam_scores["sklearn-OCSVM-Linear-Train"], bins = 25, label = 'Normal')
_=axarr[0, 0].hist(df_spam_scores["sklearn-OCSVM-Linear-Test"], bins = 25, label = 'Anomaly')
_=axarr[0, 0].set_title("sklearn-OCSVM : " + methods[0])
_=axarr[0, 1].hist(df_spam_scores["sklearn-OCSVM-RBF-Train"], bins = 25, label = 'Normal')
_=axarr[0, 1].hist(df_spam_scores["sklearn-OCSVM-RBF-Test"], bins = 25, label = 'Anomaly')
_=axarr[0, 1].set_title("sklearn-OCSVM : " + methods[1])
_=axarr[0, 1].legend(loc="upper right")
_=axarr[1, 0].hist(df_spam_scores["sklearn-OCSVM-explicit-Linear-Train"], bins = 25, label = 'Normal')
_=axarr[1, 0].hist(df_spam_scores["sklearn-OCSVM-explicit-Linear-Test"], bins = 25, label = 'Anomaly')
_=axarr[1, 0].set_title("sklearn-OCSVM-explicit : " + activations[0]);
_=axarr[1, 1].hist(df_spam_scores["sklearn-OCSVM-explicit-Sigmoid-Train"], bins = 25, label = 'Normal')
_=axarr[1, 1].hist(df_spam_scores["sklearn-OCSVM-explicit-Sigmoid-Test"], bins = 25, label = 'Anomaly')
_=axarr[1, 1].set_title("sklearn-OCSVM-explicit : " + activations[1]);
_=axarr[1, 1].legend(loc="upper right")
_=axarr[2, 0].hist(df_spam_scores["One_Class_NN_explicit-Linear-Train"], bins = 25, label = 'Normal')
_=axarr[2, 0].hist(df_spam_scores["One_Class_NN_explicit-Linear-Test"], bins = 25, label = 'Anomaly')
_=axarr[2, 0].set_title("One_Class_NN_explicit: " + activations[0]);
_=axarr[2, 1].hist(df_spam_scores["One_Class_NN_explicit-Sigmoid-Train"], bins = 25, label = 'Normal')
_=axarr[2, 1].hist(df_spam_scores["One_Class_NN_explicit-Sigmoid-Test"], bins = 25, label = 'Anomaly')
_=axarr[2, 1].set_title("One_Class_NN_explicit: " + activations[1]);
_=axarr[2, 1].legend(loc="upper right")
_=axarr[3, 0].hist(df_spam_scores["tf_OneClass_NN-Linear-Train"], bins = 25, label = 'Normal')
_=axarr[3, 0].hist(df_spam_scores["tf_OneClass_NN-Linear-Test"], bins = 25, label = 'Anomaly')
_=axarr[3, 0].set_title("tf_OneClass_NN: " + activations[0]);
_=axarr[3, 1].hist(df_spam_scores["tf_OneClass_NN-Sigmoid-Train"], bins = 25, label = 'Normal')
_=axarr[3, 1].hist(df_spam_scores["tf_OneClass_NN-Sigmoid-Test"], bins = 25, label = 'Anomaly')
_=axarr[3, 1].set_title("tf_OneClass_NN: " + activations[1]);
_=axarr[3, 1].legend(loc="upper right")
_=axarr[4, 0].hist(df_spam_scores["tflearn_OneClass_NN-Linear-Train"], bins = 25, label = 'Normal')
_=axarr[4, 0].hist(df_spam_scores["tflearn_OneClass_NN-Linear-Test"], bins = 25, label = 'Anomaly')
_=axarr[4, 0].set_title("tflearn_OneClass_NN: " + activations[0]);
_=axarr[4, 1].hist(df_spam_scores["tflearn_OneClass_NN-Sigmoid-Train"], bins = 25, label = 'Normal')
_=axarr[4, 1].hist(df_spam_scores["tflearn_OneClass_NN-Sigmoid-Test"], bins = 25, label = 'Anomaly')
_=axarr[4, 1].set_title("tflearn_OneClass_NN: " + activations[1]);
_=axarr[4, 1].legend(loc="upper right")
# Fine-tune figure; hide x ticks for top plots and y ticks for right plots
_=plt.setp([a.get_xticklabels() for a in axarr[0, :]], visible=False)
_=plt.setp([a.get_yticklabels() for a in axarr[:, 1]], visible=False)
_=plt.title("One Class NN: spam ");
_=plt.legend(loc = 'upper right');
return
|
napkin_ml/examples/logistic_regression.py | manasbedmutha98/NapkinML | 548 | 11067709 | <reponame>manasbedmutha98/NapkinML
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from napkin_ml import LogisticRegression, PCA
from napkin_ml.utils import Plot, load_iris, train_test_split
def main():
# Load dataset
data = load_iris()
X = data['data']
y = data['target']
# Reduce to two classes
X = X[y != 0]
y = y[y != 0]
y -= 1
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33)
clf = LogisticRegression()
clf.fit(X_train, y_train)
y_pred = np.rint(clf.predict(X_test))
accuracy = np.mean(y_pred == y_test)
print ("Accuracy:", accuracy)
Plot().plot_in_2d(X, y,
title="Logistic Regression",
accuracy=accuracy,
legend_labels=data['target_names'])
if __name__ == "__main__":
main()
|
tests/test_function_basis.py | vaishnavtv/neurodiffeq | 202 | 11067716 | import pytest
import torch
import numpy as np
import torch.nn as nn
from numpy import isclose
from neurodiffeq.function_basis import LegendrePolynomial
from neurodiffeq.function_basis import LegendreBasis
from neurodiffeq.function_basis import ZonalSphericalHarmonics
from neurodiffeq.function_basis import ZonalSphericalHarmonicsLaplacian
from neurodiffeq.neurodiffeq import safe_diff as diff
from scipy.special import legendre # legendre polynomials
from scipy.special import sph_harm # spherical harmonics
@pytest.fixture
def n_samples():
return 50
@pytest.fixture
def shape(n_samples):
return (n_samples, 1)
@pytest.fixture
def max_degree():
return 20
def test_legendre_polynomials(shape, max_degree):
x1 = np.random.rand(*shape)
x2 = torch.tensor(x1, requires_grad=True)
for d in range(max_degree):
p1 = legendre(d)(x1)
p2 = LegendrePolynomial(d)(x2)
assert p2.requires_grad, f"output seems detached from the graph"
p2 = p2.detach().cpu().numpy()
assert isclose(p2, p1).all(), f"p1 = {p1}, p2 = {p2}, delta = {p1 - p2}, max_delta = {np.max(abs(p1 - p2))}"
def test_legendre_basis(shape, max_degree):
x1 = np.random.rand(*shape)
x2 = torch.tensor(x1, requires_grad=True)
y1 = np.concatenate(
[legendre(d)(x1) for d in range(max_degree + 1)],
axis=1,
)
net = LegendreBasis(max_degree=max_degree)
y2 = net(x2)
assert y2.requires_grad, f"output seems detached from the graph"
y2 = y2.detach().cpu().numpy()
assert isclose(y2, y1).all(), f"y1 = {y1}, y2 = {y2}, delta = {y1 - y2}, max_delta = {np.max(abs(y1 - y2))}"
def test_zero_order_spherical_harmonics(shape, max_degree):
# note that in scipy, theta is azimuthal angle (0, 2 pi) while phi is polar angle (0, pi)
thetas1 = np.random.rand(*shape) * np.pi * 2
phis1 = np.random.rand(*shape) * np.pi
# in neurodiffeq, theta and phi should be exchanged
thetas2 = torch.tensor(phis1, requires_grad=True)
phis2 = torch.tensor(thetas1, requires_grad=True)
order = 0
y1 = np.concatenate(
[sph_harm(order, degree, thetas1, phis1) for degree in range(max_degree + 1)],
axis=1,
)
assert (np.imag(y1) == 0).all(), f"y1 has non-zero imaginary part: {y1}"
y1 = np.real(y1)
net = ZonalSphericalHarmonics(max_degree)
y2 = net(thetas2, phis2)
assert y2.requires_grad, f"output seems detached from the graph"
y2 = y2.detach().cpu().numpy()
assert isclose(y2, y1, atol=1e-5, rtol=1e-3).all(), \
f"y1 = {y1}, y2 = {y2}, delta = {y1 - y2}, max_delta = {np.max(abs(y1 - y2))}"
def test_zero_order_spherical_harmonics_laplacian(shape, max_degree):
# Somehow, if changing default dtype to float32, the test fails by a large margin
N_FLOAT = np.float64
T_FLOAT = torch.float64
THETA_EPS = 0.1
r_values = np.random.rand(*shape).astype(N_FLOAT) + 1.1
theta_values = np.random.uniform(THETA_EPS, np.pi - THETA_EPS, size=shape).astype(N_FLOAT)
phi_values = np.random.rand(*shape).astype(N_FLOAT) * np.pi * 2
net = nn.Sequential(
nn.Linear(1, 10),
nn.Tanh(),
nn.Linear(10, max_degree + 1),
).to(T_FLOAT)
harmonics = ZonalSphericalHarmonics(max_degree=max_degree)
r1 = torch.tensor(r_values, requires_grad=True)
theta1 = torch.tensor(theta_values, requires_grad=True)
phi1 = torch.tensor(phi_values, requires_grad=True)
coeffs1 = net(r1)
us = torch.sum(coeffs1 * harmonics(theta1, phi1), dim=1, keepdim=True)
def laplacian1(u, r, theta, phi):
r_lap = diff(u * r, r, order=2) / r
theta_lap = diff(diff(u, theta) * torch.sin(theta), theta) / (r ** 2) / torch.sin(theta)
phi_lap = diff(u, phi, order=2) / (r ** 2) / torch.sin(theta) ** 2
return r_lap + theta_lap + phi_lap
lap1 = laplacian1(us, r1, theta1, phi1)
assert lap1.requires_grad, "lap1 seems detached from graph"
r2 = torch.tensor(r_values, requires_grad=True)
theta2 = torch.tensor(theta_values, requires_grad=True)
phi2 = torch.tensor(phi_values, requires_grad=True)
coeffs2 = net(r2)
laplacian2 = ZonalSphericalHarmonicsLaplacian(max_degree=max_degree)
lap2 = laplacian2(coeffs2, r2, theta2, phi2)
assert lap2.requires_grad, "lap2 seems detached from graph"
assert torch.isclose(lap2, lap1, rtol=1e-3, atol=1e-5).all(), \
f"lap1 = {lap1}\nlap2 = {lap2}\ndelta = {lap1 - lap2}\nmax_delta = {(lap1 - lap2).abs().max().item()}"
|
scripts/conda/make-m2-proxy.py | Pahandrovich/omniscidb | 868 | 11067723 | #!/usr/bin/env python
'''
Generate ~/.m2/settings.xml specifying proxies
if such environment variables are set.
'''
import os
import sys
import re
import errno
def simple_xml(name, sections):
''' very simple xml generator for one-level depth items '''
result = ['<%s>' % name]
for sec_name, sec_value in sections:
result.append(' <{0}>{1}</{0}>'.format(sec_name, sec_value))
result.append('</%s>' % name)
return '\n'.join(' ' + line for line in result)
_made_ids = set()
def gen_proxy(var_name):
value = os.environ.get(var_name, '')
if not value:
return None
try:
parsed = re.search(r'''((?P<protocol>[^:]+)://)? # protocol followed by ://, optional
((?P<username>[^:]+)(:(?P<password>[^@]+))?@)? # user:password part, optional
(?P<host>[^@]+?) # hostname, which is basically everything but other known parts
(:(?P<port>\d+))? # port, optional
$''', value, re.VERBOSE).groupdict()
except AttributeError:
sys.stderr.write('WARNING: unexpected format, could not parse $%s=%s\n' % (var_name, value))
return None
if not parsed['host']:
return None
id_name = var_name.lower()
if id_name in _made_ids:
num = 0
while ('%s.%s' % (id_name, num)) in _made_ids:
num +=1
id_name = '%s.%s' % (id_name, num)
_made_ids.add(id_name)
sections = [('id', id_name), ('active', 'true')]
for param_name in ('protocol', 'host', 'port', 'username', 'password'):
if parsed[param_name]:
sections.append((param_name, parsed[param_name]))
return simple_xml('proxy', sections)
def make_settings(*var_names):
sections = []
for name in var_names:
value = gen_proxy(name)
if value:
sections.append(value)
if not sections:
return None
template = '''<settings xmlns="http://maven.apache.org/SETTINGS/1.0.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/SETTINGS/1.0.0
https://maven.apache.org/xsd/settings-1.0.0.xsd">
<proxies>
%s
</proxies>
</settings>'''
return template % '\n'.join(sections)
def main():
settings = make_settings('http_proxy', 'HTTP_PROXY', 'https_proxy', 'HTTPS_PROXY')
target = os.path.expanduser('~/.m2/settings.xml')
if not settings:
try:
os.remove(target)
except OSError as ex:
if ex.errno != errno.ENOENT:
raise
return
try:
os.makedirs(os.path.dirname(target))
except OSError as ex:
if ex.errno != errno.EEXIST:
raise
with open(target, 'w') as out:
out.write(settings)
if __name__ == '__main__':
main()
|
tests/test_cpan.py | trathborne/nvchecker | 320 | 11067733 | <filename>tests/test_cpan.py<gh_stars>100-1000
# MIT licensed
# Copyright (c) 2013-2020 lilydjwg <<EMAIL>>, et al.
import pytest
pytestmark = [pytest.mark.asyncio, pytest.mark.needs_net]
async def test_cpan(get_version):
assert await get_version("POE-Component-Server-HTTPServer", {
"source": "cpan",
}) == "0.9.2"
|
office-plugin/windows-office/share/Scripts/python/HelloWorld.py | jerrykcode/kkFileView | 6,660 | 11067736 | <reponame>jerrykcode/kkFileView
# HelloWorld python script for the scripting framework
#
# This file is part of the LibreOffice project.
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
#
# This file incorporates work covered by the following license notice:
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed
# with this work for additional information regarding copyright
# ownership. The ASF licenses this file to you under the Apache
# License, Version 2.0 (the "License"); you may not use this file
# except in compliance with the License. You may obtain a copy of
# the License at http://www.apache.org/licenses/LICENSE-2.0 .
#
def HelloWorldPython():
"""Prints the string 'Hello World (in Python)' into the current document.
"""
# Get the doc from the scripting context which is made available to all
# scripts.
desktop = XSCRIPTCONTEXT.getDesktop()
model = desktop.getCurrentComponent()
# Check whether there's already an opened document.
# Otherwise, create a new one
if not hasattr(model, "Text"):
model = desktop.loadComponentFromURL(
"private:factory/swriter", "_blank", 0, ())
# get the XText interface
text = model.Text
# create an XTextRange at the end of the document
tRange = text.End
# and set the string
tRange.String = "Hello World (in Python)"
return None
# vim: set shiftwidth=4 softtabstop=4 expandtab:
|
atp-auto-core-open/atp/views/project.py | rebecca1202/testAuto | 130 | 11067749 | # -*- coding:utf-8 -*-
"""
File Name: `project`.py
Version:
Description:
增加接口:/project/add
入参格式:
{
"projectName":"XXX",
"simpleDec:"xxx"(非必填)
}
修改接口:/project/edit
入参格式:
{
"id":xx,
"projectName":"xxx",
"simpleDec:"xxx"(非必填)
}
删除接口:/project/delete
入参格式:
{
“id":xxx
}
查询接口:/project/list
入参格式:
{
"projectName":"xxx"(非必填)
}
"""
import json
import time
from flask import Blueprint
from flask_restful import Resource
from atp.api.comm_log import logger
from atp.api.mysql_manager import (
# ProjectInfoManager, SystemInfoManager, ModuleInfoManager, TestsuiteInfoManager,
# query_subtree, TestcaseInfoManager, query_subtree_with_case_id,
BaseSystemInfoManager, BaseModuleInfoManager, BaseProjectInfoManager,
UICasePageInfoManager, UiProjectInfoManager, UiSystemInfoManager, UiModuleInfoManager,
BaseTestcaseInfoManager)
from atp.views.wrappers import timer, login_check, developer_check
from atp.utils.common import get_request_json, make_response, username_to_nickname
from atp.api.redis_api import RedisManager
from flask import request
redis = RedisManager()
project = Blueprint('project_interface', __name__)
class Project(Resource):
def __init__(self):
self.data = get_request_json()
self.ucpim = UICasePageInfoManager()
self.username = redis.get_username(request.headers.get('X-Token'))
@timer
def post(self, action):
# if action == 'add':
# return self.add_project()
#
# elif action == 'edit':
# return self.edit_project()
#
# elif action == 'delete':
# return self.delete_project()
#
# elif action == 'detail':
# return self.detail()
#
# elif action == 'list':
# return self.project_list()
#
# elif action == 'subtree':
# return self.project_subtree()
#
# elif action == 'subtreeWithCase':
# return self.project_subtree_with_case()
if action == 'baseList':
return self.base_project_list()
elif action == 'baseSubtree':
return self.base_project_subtree()
elif action == 'uiSubtree':
return self.ui_project_subtree()
elif action == 'pageSubtree':
return self.page_tree()
elif action == 'uiList':
return self.ui_project_list()
else:
return make_response({"code": "100", "desc": "url错误,不存在的接口动作<{action}>".format(action=action)})
@login_check
def base_project_list(self):
try:
project_name = self.data.pop('projectName', None)
except KeyError:
return make_response({"code": "100", "desc": "入参校验失败"})
if project_name:
if not BaseProjectInfoManager.get_project(project_name=project_name):
return make_response({"code": "200", "desc": "项目名不存在,无法查询"})
result = BaseProjectInfoManager.base_project_info(project_name)
project_list = []
for obj in result:
project_list.append({
"id": obj.id,
"projectName": obj.project_name
})
return make_response({"code": "000", "desc": project_list})
@login_check
def ui_project_list(self):
try:
project_name = self.data.pop('projectName', None)
except KeyError:
return make_response({"code": "100", "desc": "入参校验失败"})
result = UiProjectInfoManager.ui_project_info(project_name)
project_list = []
for obj in result:
project_list.append({
"id": obj.id,
"projectName": obj.project_name
})
return make_response({"code": "000", "desc": project_list})
@login_check
def base_project_subtree(self):
"""根据业务用例的项目id查询配置在该项目下的系统-模块...-业务功能"""
try:
project_id = self.data.pop('id')
except KeyError:
return make_response({"code": "100", "desc": "入参校验失败"})
system_list = BaseSystemInfoManager.get_all_system(project_id)
subtree = []
for system_obj in system_list:
system_tree = []
res_all_modules = BaseModuleInfoManager.get_modules(system_id=system_obj.id)
if res_all_modules:
for module_obj in res_all_modules:
response = query_parent_module_id(module_obj.id)
if response:
system_tree.append({
"children": response,
"id": module_obj.id,
"label": module_obj.module_name,
"moduleId": module_obj.id
})
else:
system_tree.append({
"id": module_obj.id,
"label": module_obj.module_name,
"moduleId_last": module_obj.id
})
subtree.append({
"children": system_tree,
"id": system_obj.id,
"label": system_obj.system_name,
"systemId": system_obj.id
})
else:
subtree.append({
"id": system_obj.id,
"label": system_obj.system_name,
"systemId": system_obj.id
})
subtree = count_base_subtree(subtree)
return make_response({"code": "000", "data": subtree})
@login_check
def ui_project_subtree(self):
"""根据UI用例的项目id查询配置在该项目下的系统-模块"""
try:
project_id = self.data.pop('id')
except KeyError:
return make_response({"code": "100", "desc": "入参校验失败"})
system_list = UiSystemInfoManager.get_all_system(project_id)
subtree = []
for system_obj in system_list:
system_tree = []
res_all_modules = UiModuleInfoManager.get_modules(system_id=system_obj.id)
print(res_all_modules)
if res_all_modules:
for module_obj in res_all_modules:
system_tree.append({
"id": module_obj.id,
"label": module_obj.module_name,
"moduleId": module_obj.id
})
subtree.append({
"children": system_tree,
"id": system_obj.id,
"label": system_obj.system_name,
"systemId": system_obj.id
})
else:
subtree.append({
"id": system_obj.id,
"label": system_obj.system_name,
"systemId": system_obj.id
})
return make_response({"code": "000", "data": subtree})
@login_check
def page_tree(self):
"""根据业务用例的项目id查询配置在该项目下的系统-模块...-业务功能"""
try:
project_id = self.data.pop('id')
except KeyError:
return make_response({"code": "100", "desc": "入参校验失败"})
project_list = UiSystemInfoManager.get_all_system(project_id)
response = []
for project_obj in project_list:
system_obj = UiSystemInfoManager.query_system(id=project_obj.id)
if not system_obj:
return make_response({"code": "200", "desc": "系统不存在"})
page_list = []
page_info_list = self.ucpim.query_ui_pages(system_id=project_obj.id)
for page_info in page_info_list:
page_list.append({
"id": page_info.id,
"label": page_info.page_name,
"pageId": page_info.id
})
response.append({
"children": page_list,
"id": project_obj.id,
"label": system_obj.system_name,
"systemId": project_obj.id
})
return make_response({"code": "000", "data": response})
def query_parent_module_id(module_id):
module_id_list = []
objs = BaseModuleInfoManager.get_modules(parent_module_id=module_id)
if objs:
for obj in objs:
res = query_parent_module_id(obj.id)
if res:
module_id_list.append({
"children": res,
"id": obj.id,
"label": obj.module_name,
"moduleId": obj.id
})
else:
module_id_list.append({
"id": obj.id,
"label": obj.module_name,
"moduleId_last": obj.id
})
return module_id_list
else:
return None
def append_system(subtree, row, index_id):
system_dict = dict()
index_id += 1
system_dict['id'] = index_id
system_dict['label'] = row.system_name
system_dict['systemId'] = row[0]
system_dict['children'] = []
if row[2]:
index_id = append_module(system_dict, row, index_id)
subtree.append(system_dict)
return index_id
def append_testsuite(module, row, index_id):
index_id += 1
testsuite_dict = dict()
testsuite_dict['id'] = index_id
testsuite_dict['label'] = row.testsuite_name
testsuite_dict['testsuiteId'] = row[4]
module['children'].append(testsuite_dict)
return index_id
def append_module(system_dict, row, index_id):
index_id += 1
module_dict = dict()
module_dict['id'] = index_id
module_dict['label'] = row.module_name
module_dict['moduleId'] = row[2]
module_dict['children'] = []
if row[4]:
index_id = append_testsuite(module_dict, row, index_id)
system_dict['children'].append(module_dict)
return index_id
def append_system_with_case(subtree, row, index_id):
system_dict = dict()
index_id += 1
system_dict['id'] = index_id
system_dict['label'] = row.system_name
system_dict['systemId'] = row[0]
system_dict['children'] = []
if row[2]:
index_id = append_module_with_case(system_dict, row, index_id)
subtree.append(system_dict)
return index_id
def append_testsuite_with_case(module, row, index_id):
index_id += 1
testsuite_dict = dict()
testsuite_dict['id'] = index_id
testsuite_dict['label'] = row.testsuite_name
testsuite_dict['testsuiteId'] = row[4]
testsuite_dict['children'] = []
if row[6]:
index_id = append_testcase_with_case(testsuite_dict, row, index_id)
module['children'].append(testsuite_dict)
return index_id
def append_module_with_case(system_dict, row, index_id):
index_id += 1
module_dict = dict()
module_dict['id'] = index_id
module_dict['label'] = row.module_name
module_dict['moduleId'] = row[2]
module_dict['children'] = []
if row[4]:
index_id = append_testsuite_with_case(module_dict, row, index_id)
system_dict['children'].append(module_dict)
return index_id
def append_testcase_with_case(testsuite, row, index_id):
index_id += 1
testcase_dict = dict()
testcase_dict['id'] = index_id
testcase_dict['label'] = str(row[6]) + '_' + row[7]
testcase_dict['testcaseId'] = row[6]
testsuite['children'].append(testcase_dict)
return index_id
def count_subtree(subtree):
for s_dic in subtree:
s_case_count = 0
for m_dic in s_dic['children']:
m_case_count = 0
for ts_dic in m_dic['children']:
ts_case_count = len(ts_dic['children'])
ts_dic['label'] += ' ({})'.format(ts_case_count)
m_case_count += ts_case_count
m_dic['label'] += ' ({})'.format(m_case_count)
s_case_count += m_case_count
s_dic['label'] += ' ({})'.format(s_case_count)
return subtree
def count_base_subtree(subtree):
grouped_case_data = BaseTestcaseInfoManager.group_testcases_by_module_id()
def count_base_case_by_node(node_dic):
current_node_count = 0
if 'children' in node_dic:
for sub_node_dic in node_dic['children']:
current_node_count += count_base_case_by_node(sub_node_dic)
node_dic['label'] += ' ({})'.format(current_node_count)
else:
if 'moduleId_last' in node_dic:
for row in grouped_case_data:
if row[0] == node_dic['moduleId_last']:
current_node_count = row[1]
break
node_dic['label'] += ' ({})'.format(current_node_count)
return current_node_count
for s_dic in subtree:
count_base_case_by_node(s_dic)
return subtree
|
python/tvm/contrib/ethosu/cascader/plan_generator.py | shengxinhu/tvm | 4,640 | 11067751 | <reponame>shengxinhu/tvm<gh_stars>1000+
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Algorithms to generate Plans for a CascaderGraph."""
from typing import List, Dict, Tuple
from tvm.contrib.ethosu.cascader.tensor_config import MemoryRegion, TensorConfig
from . import _ffi_api
from .cascader_options import CascaderOptions
from .plan import Plan
from .stripe_config import StripeConfig
from .graph import CascaderGraph, Part, Tensor
def _generate_output_stripe_configs(
part: Part, stripe_factors: int, enable_striping: bool, multi_dimensional: bool
) -> List[StripeConfig]:
return list(
_ffi_api.GenerateOutputStripeConfigs(
part, stripe_factors, enable_striping, multi_dimensional
)
)
def _generate_single_plans(
part: Part,
output_stripe_configs: List[StripeConfig],
home_map: Dict[Tensor, List[MemoryRegion]],
cascade_region: MemoryRegion,
) -> List[Plan]:
return list(_ffi_api.GenerateSinglePlans(part, output_stripe_configs, home_map, cascade_region))
def _generate_graph_plans(
graph: CascaderGraph,
home_map: Dict[Tensor, List[MemoryRegion]],
options: CascaderOptions,
):
return _ffi_api.GenerateGraphPlans(
graph,
home_map,
options,
)
def get_copy_cycles_hint(tensor_config: TensorConfig) -> Tuple[int, int]:
"""
Returns a hint estimating the number of cycles for the copy
specified by tensor_config.
Parameters
----------
tensor_config : TensorConfig
The tensor configuration to estimate.
Returns
-------
mem2mem_cycles : int
Total estimated cycles.
initial_mem2mem_cycles : int
Estimated cycles for the first block.
"""
return _ffi_api.GetCopyCyclesHint(tensor_config)
|
utest/resources/robotdata/libs/TestLib.py | guojiajiaok/RIDE | 775 | 11067767 | def testlib_keyword():
"""
"""
return True
def testlib_keyword_with_args(arg1, arg2='default value', *args):
"""This keyword requires one argument, has one optional argument and varargs.
This is some more documentation
"""
pass
def testlib_keyword_with_kwonlyargs(arg1, *args, namedarg1, namedarg2='default value', **kwargs):
pass
|
kitsune/kbadge/migrations/0004_auto_20200629_0826.py | AndrewDVXI/kitsune | 929 | 11067778 | <gh_stars>100-1000
# Generated by Django 2.2.13 on 2020-06-29 08:26
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('kbadge', '0003_auto_20190816_1824'),
]
operations = [
migrations.AlterField(
model_name='award',
name='description',
field=models.TextField(blank=True, help_text='Explanation and evidence for the badge award'),
),
migrations.AlterField(
model_name='award',
name='image',
field=models.ImageField(blank=True, null=True, upload_to='uploads/badges/'),
),
migrations.AlterField(
model_name='badge',
name='image',
field=models.ImageField(blank=True, help_text='Must be square. Recommended 256x256.', null=True, upload_to='uploads/badges/'),
),
migrations.AlterField(
model_name='badge',
name='unique',
field=models.BooleanField(default=True, help_text='Should awards of this badge be limited to one-per-person?'),
),
]
|
ceph/tests/test_e2e.py | vbarbaresi/integrations-core | 663 | 11067780 | # (C) Datadog, Inc. 2019-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
import pytest
from datadog_checks.ceph import Ceph
from .common import BASIC_CONFIG, EXPECTED_METRICS, EXPECTED_SERVICE_CHECKS, EXPECTED_SERVICE_TAGS
@pytest.mark.e2e
def test_check(dd_agent_check):
aggregator = dd_agent_check(BASIC_CONFIG, rate=True)
for metric in EXPECTED_METRICS:
aggregator.assert_metric(metric, at_least=1)
for sc in EXPECTED_SERVICE_CHECKS:
aggregator.assert_service_check(sc, status=Ceph.OK, tags=EXPECTED_SERVICE_TAGS)
aggregator.assert_service_check('ceph.overall_status', status=Ceph.OK, tags=EXPECTED_SERVICE_TAGS)
|
pandapower/control/controller/trafo/ContinuousTapControl.py | yougnen/pandapower | 104 | 11067811 | <gh_stars>100-1000
# -*- coding: utf-8 -*-
# Copyright (c) 2016-2021 by University of Kassel and Fraunhofer Institute for Energy Economics
# and Energy System Technology (IEE), Kassel. All rights reserved.
import numpy as np
from pandapower.control.controller.trafo_control import TrafoController
class ContinuousTapControl(TrafoController):
"""
Trafo Controller with local tap changer voltage control.
INPUT:
**net** (attrdict) - Pandapower struct
**tid** (int) - ID of the trafo that is controlled
**vm_set_pu** (float) - Maximum OLTC target voltage at bus in pu
OPTIONAL:
**tol** (float, 0.001) - Voltage tolerance band at bus in percent (default: 1% = 0.01pu)
**side** (string, "lv") - Side of the transformer where the voltage is controlled
**trafo_type** (float, "2W") - Trafo type ("2W" or "3W")
**in_service** (bool, True) - Indicates if the controller is currently in_service
**check_tap_bounds** (bool, True) - In case of true the tap_bounds will be considered
**drop_same_existing_ctrl** (bool, False) - Indicates if already existing controllers of the same type and with the same matching parameters (e.g. at same element) should be dropped
"""
def __init__(self, net, tid, vm_set_pu, tol=1e-3, side="lv", trafotype="2W", in_service=True,
check_tap_bounds=True, level=0, order=0, drop_same_existing_ctrl=False,
matching_params=None, **kwargs):
if matching_params is None:
matching_params = {"tid": tid, 'trafotype': trafotype}
super().__init__(net, tid=tid, side=side, tol=tol, in_service=in_service,
trafotype=trafotype, level=level, order=order,
drop_same_existing_ctrl=drop_same_existing_ctrl,
matching_params=matching_params, **kwargs)
t = net[self.trafotable]
b = net.bus
if trafotype == "2W":
self.t_nom = t.at[tid, "vn_lv_kv"] / t.at[tid, "vn_hv_kv"] * \
b.at[net[self.trafotable].at[tid, "hv_bus"], "vn_kv"] / \
b.at[net[self.trafotable].at[tid, "lv_bus"], "vn_kv"]
elif side == "lv":
self.t_nom = t.at[tid, "vn_lv_kv"] / t.at[tid, "vn_hv_kv"] * \
b.at[net[self.trafotable].at[tid, "hv_bus"], "vn_kv"] / \
b.at[net[self.trafotable].at[tid, "lv_bus"], "vn_kv"]
elif side == "mv":
self.t_nom = t.at[tid, "vn_mv_kv"] / t.at[tid, "vn_hv_kv"] * \
b.at[net[self.trafotable].at[tid, "hv_bus"], "vn_kv"] / \
b.at[net[self.trafotable].at[tid, "mv_bus"], "vn_kv"]
self.check_tap_bounds = check_tap_bounds
self.vm_set_pu = vm_set_pu
self.trafotype = trafotype
self.tol = tol
def control_step(self, net):
"""
Implements one step of the ContinuousTapControl
"""
delta_vm_pu = net.res_bus.at[self.controlled_bus, "vm_pu"] - self.vm_set_pu
tc = delta_vm_pu / self.tap_step_percent * 100 / self.t_nom
self.tap_pos += tc * self.tap_side_coeff * self.tap_sign
if self.check_tap_bounds:
self.tap_pos = np.clip(self.tap_pos, self.tap_min, self.tap_max)
# WRITE TO NET
if net[self.trafotable].tap_pos.dtype != "float":
net[self.trafotable].tap_pos = net[self.trafotable].tap_pos.astype(float)
net[self.trafotable].at[self.tid, "tap_pos"] = self.tap_pos
def is_converged(self, net):
"""
The ContinuousTapControl is converged, when the difference of the voltage between control steps is smaller
than the Tolerance (tol).
"""
if not net[self.trafotable].at[self.tid, 'in_service']:
return True
vm_pu = net.res_bus.at[self.controlled_bus, "vm_pu"]
self.tap_pos = net[self.trafotable].at[self.tid, 'tap_pos']
difference = 1 - self.vm_set_pu / vm_pu
if self.check_tap_bounds:
if self.tap_side_coeff * self.tap_sign == 1:
if vm_pu < self.vm_set_pu and self.tap_pos == self.tap_min:
return True
elif vm_pu > self.vm_set_pu and self.tap_pos == self.tap_max:
return True
elif self.tap_side_coeff * self.tap_sign == -1:
if vm_pu > self.vm_set_pu and self.tap_pos == self.tap_min:
return True
elif vm_pu < self.vm_set_pu and self.tap_pos == self.tap_max:
return True
return abs(difference) < self.tol
|
cifar/network-slimming/models/channel_selection.py | dukebw/rethinking-network-pruning | 1,410 | 11067826 | import numpy as np
import torch
import torch.nn as nn
class channel_selection(nn.Module):
"""
Select channels from the output of BatchNorm2d layer. It should be put directly after BatchNorm2d layer.
The output shape of this layer is determined by the number of 1 in `self.indexes`.
"""
def __init__(self, num_channels):
"""
Initialize the `indexes` with all one vector with the length same as the number of channels.
During pruning, the places in `indexes` which correpond to the channels to be pruned will be set to 0.
"""
super(channel_selection, self).__init__()
self.indexes = nn.Parameter(torch.ones(num_channels))
def forward(self, input_tensor):
"""
Parameter
---------
input_tensor: (N,C,H,W). It should be the output of BatchNorm2d layer.
"""
selected_index = np.squeeze(np.argwhere(self.indexes.data.cpu().numpy()))
if selected_index.size == 1:
selected_index = np.resize(selected_index, (1,))
output = input_tensor[:, selected_index, :, :]
return output |
jacweb/session/session.py | dapatil211/Jacinle | 114 | 11067867 | #! /usr/bin/env python3
# -*- coding: utf-8 -*-
# File : session.py
# Author : <NAME>
# Email : <EMAIL>
# Date : 10/23/2018
#
# This file is part of Jacinle.
# Distributed under terms of the MIT license.
import hmac
import uuid
import hashlib
__all__ = ['Session', 'SessionManagerBase']
class Session(dict):
def __init__(self, session_manager, request_handler):
self._session_manager = session_manager
self._request_handler = request_handler
session_id, session_data = self._session_manager.get(self._request_handler)
self._identifier = session_id
super().__init__(session_data)
@property
def identifier(self):
return self._identifier
def save(self):
self._session_manager.set(self._request_handler, self.identifier, dict(self))
class SessionManagerBase(object):
def __init__(self, secret):
self._secret = secret
super().__init__()
@property
def secret(self):
return self._secret
def new(self, request_handler):
return Session(self, request_handler)
def get(self, request_handler):
raise NotImplementedError()
def set(self, request_handler, session_id, data):
raise NotImplementedError()
def _generate_id(self):
new_id = hashlib.sha256((self.secret + str(uuid.uuid4())).encode('utf-8'))
return new_id.hexdigest()
def _generate_hmac(self, session_id):
if type(session_id) is not bytes:
session_id = session_id.encode('utf-8')
return hmac.new(session_id, self.secret.encode('utf-8'), hashlib.sha256).hexdigest()
|
core/utils.py | Alberdi/babybuddy | 922 | 11067888 | # -*- coding: utf-8 -*-
import random
from django.utils import timezone
from django.utils.translation import ngettext
random.seed()
COLORS = [
"#ff0000",
"#00ff00",
"#0000ff",
"#ff00ff",
"#ffff00",
"#00ffff",
"#ff7f7f",
"#7fff7f",
"#7f7fff",
"#ff7fff",
"#ffff7f",
"#7fffff",
"#7f0000",
"#007f00",
"#00007f",
"#7f007f",
"#7f7f00",
"#007f7f",
]
def duration_string(duration, precision="s"):
"""Format hours, minutes and seconds as a human-friendly string (e.g. "2
hours, 25 minutes, 31 seconds") with precision to h = hours, m = minutes or
s = seconds.
"""
h, m, s = duration_parts(duration)
duration = ""
if h > 0:
duration = ngettext("%(hours)s hour", "%(hours)s hours", h) % {"hours": h}
if m > 0 and precision != "h":
if duration != "":
duration += ", "
duration += ngettext("%(minutes)s minute", "%(minutes)s minutes", m) % {
"minutes": m
}
if s > 0 and precision != "h" and precision != "m":
if duration != "":
duration += ", "
duration += ngettext("%(seconds)s second", "%(seconds)s seconds", s) % {
"seconds": s
}
return duration
def duration_parts(duration):
"""Get hours, minutes and seconds from a timedelta."""
if not isinstance(duration, timezone.timedelta):
raise TypeError("Duration provided must be a timedetla")
h, remainder = divmod(duration.seconds, 3600)
h += duration.days * 24
m, s = divmod(remainder, 60)
return h, m, s
def random_color():
return COLORS[random.randrange(0, len(COLORS))]
|
packs/webpagetest/actions/random_test.py | jonico/st2contrib | 164 | 11067893 | from lib.webpagetest import WebPageTestAction
__all__ = ['RandomTest']
class RandomTest(WebPageTestAction):
def run(self, domain):
return self.test_random_location(domain)
|
library/goldsrc/mdl_v4/mdl_file.py | anderlli0053/SourceIO | 199 | 11067916 | from typing import List
from ...shared.base import Base
from ...utils.byte_io_mdl import ByteIO
from .structs.bone import StudioBone
from .structs.model import StudioModel
from .structs.sequence import StudioSequence
from .structs.studioheader import StudioHeader
from .structs.bodypart import StudioBodypart
class Mdl(Base):
def __init__(self, filepath):
self.store_value("MDL", self)
self.reader = ByteIO(filepath)
self.header = StudioHeader()
self.bones: List[StudioBone] = []
self.bodyparts: List[StudioBodypart] = []
self.sequences: List[StudioSequence] = []
self.models: List[StudioModel] = []
def read(self):
header = self.header
reader = self.reader
header.read(reader)
for _ in range(header.bone_count):
bone = StudioBone()
bone.read(reader)
self.bones.append(bone)
for _ in range(header.sequence_count):
sequence = StudioSequence()
sequence.read(reader)
self.sequences.append(sequence)
total_model_count = 0
for _ in range(header.body_part_count):
bodypart = StudioBodypart()
bodypart.read(reader)
total_model_count += bodypart.model_count
self.bodyparts.append(bodypart)
assert total_model_count == header.unk_count, \
f'Total count of models should match unk_count, {total_model_count}!={header.unk_count}'
for sequence in self.sequences:
sequence.read_anim_values(reader, header.bone_count)
for _ in range(total_model_count):
model = StudioModel()
model.read(reader)
self.models.append(model)
|
setup_app/installers/passport.py | duttarnab/community-edition-setup | 178 | 11067921 | <reponame>duttarnab/community-edition-setup
import os
import glob
import json
from setup_app import paths
from setup_app.static import AppType, InstallOption
from setup_app.config import Config
from setup_app.utils import base
from setup_app.installers.node import NodeInstaller
class PassportInstaller(NodeInstaller):
def __init__(self):
self.service_name = 'passport'
self.app_type = AppType.SERVICE
self.install_type = InstallOption.OPTONAL
self.install_var = 'installPassport'
self.register_progess()
passport_version = Config.oxVersion.replace('-SNAPSHOT','').replace('.Final','')
self.source_files = [
(os.path.join(Config.distGluuFolder, 'passport.tgz'), 'https://ox.gluu.org/npm/passport/passport-{}.tgz'.format(passport_version)),
(os.path.join(Config.distGluuFolder, 'passport-node_modules.tar.gz'), 'https://ox.gluu.org/npm/passport/passport-version_{}-node_modules.tar.gz'.format(passport_version))
]
self.gluu_passport_base = os.path.join(self.node_base, 'passport')
self.passport_initd_script = os.path.join(Config.install_dir, 'static/system/initd/passport')
self.passport_config = os.path.join(Config.configFolder, 'passport-config.json')
self.passport_templates_folder = os.path.join(Config.templateFolder, 'passport')
self.ldif_scripts_fn = os.path.join(Config.outputFolder, 'passport/scripts.ldif')
self.passport_oxtrust_config_fn = os.path.join(Config.outputFolder, 'passport/passport_oxtrust_config.son')
self.passport_central_config_json = os.path.join(Config.outputFolder, 'passport/passport-central-config.json')
self.ldif_passport_config = os.path.join(Config.outputFolder, 'passport/oxpassport-config.ldif')
self.ldif_passport = os.path.join(Config.outputFolder, 'passport/passport.ldif')
self.ldif_passport_clients = os.path.join(Config.outputFolder, 'passport/passport_clients.ldif')
self.passport_rs_client_jks_fn = os.path.join(Config.certFolder, 'passport-rs.jks')
self.passport_rp_client_jks_fn = os.path.join(Config.certFolder, 'passport-rp.jks')
self.passport_rp_client_cert_fn = os.path.join(Config.certFolder, 'passport-rp.pem')
self.passportSpTLSCACert = os.path.join(Config.certFolder, 'passport-sp.pem')
self.passportSpTLSCert = os.path.join(Config.certFolder, 'passport-sp.crt')
self.passportSpTLSKey = os.path.join(Config.certFolder, 'passport-sp.key')
self.passportSpJksFn = os.path.join(Config.certFolder, 'passport-sp.jks')
def install(self):
self.logIt("Preparing passport service base folders")
self.run([paths.cmd_mkdir, '-p', self.gluu_passport_base])
self.extract_passport()
self.extract_modules()
# Copy init.d script
self.copyFile(self.passport_initd_script, Config.gluuOptSystemFolder)
self.run([paths.cmd_chmod, '-R', "755", os.path.join(Config.gluuOptSystemFolder, 'passport')])
# Install passport system service script
self.installNodeService('passport')
self.run([paths.cmd_chown, '-R', 'node:node', self.gluu_passport_base])
# enable service at startup
self.enable()
def extract_passport(self):
# Extract package
try:
self.logIt("Extracting {} into {}".format(self.source_files[0][0], self.gluu_passport_base))
self.run([paths.cmd_tar, '--strip', '1', '-xzf', self.source_files[0][0], '-C', self.gluu_passport_base, '--no-xattrs', '--no-same-owner', '--no-same-permissions'])
except:
self.logIt("Error encountered while extracting archive {}".format(self.source_files[0][0]))
def extract_modules(self):
modules_target_dir = os.path.join(self.gluu_passport_base, 'node_modules')
modules_source_dir = os.path.dirname(self.source_files[1][0])
self.run([paths.cmd_mkdir, '-p', modules_target_dir])
node_modules_list = glob.glob(os.path.join(modules_source_dir, 'passport*node_modules*'))
if node_modules_list:
passport_modules_archive = max(node_modules_list)
self.logIt("Extracting passport node modules")
self.run([paths.cmd_tar, '--strip', '1', '-xzf', passport_modules_archive, '-C', modules_target_dir, '--no-xattrs', '--no-same-owner', '--no-same-permissions'])
else:
# Install dependencies
try:
self.logIt("Running npm install in %s" % self.gluu_passport_base)
nodeEnv = os.environ.copy()
nodeEnv['PATH'] = ':'.join((os.path.join(Config.node_home, 'bin'), nodeEnv['PATH']))
cmd_npm = os.path.join(Config.node_home, 'bin', 'npm')
self.run([cmd_npm, 'install', '-P'], self.gluu_passport_base, nodeEnv, True)
except:
self.logIt("Error encountered running npm install in {}".format(self.gluu_passport_base))
def installed(self):
return os.path.exists(self.gluu_passport_base)
def generate_configuration(self):
self.logIt("Generating Passport configuration")
if not Config.get('passportSpKeyPass'):
Config.passportSpKeyPass = <PASSWORD>.<PASSWORD>()
Config.passportSpJksPass = self.getPW()
if not Config.get('passport_rp_client_cert_alg'):
Config.passport_rp_client_cert_alg = 'RS512'
if not Config.get('passport_rp_client_jks_pass'):
Config.passport_rp_client_jks_pass = '<PASSWORD>'
if not Config.get('passport_rs_client_jks_pass'):
Config.passport_rs_client_jks_pass = self.<PASSWORD>()
Config.passport_rs_client_jks_pass_encoded = self.obscure(Config.passport_rs_client_jks_pass)
client_var_id_list = (
('passport_rs_client_id', '1501.'),
('passport_rp_client_id', '1502.'),
('passport_rp_ii_client_id', '1503.'),
)
self.check_clients(client_var_id_list)
self.check_clients([('passport_resource_id', '1504.')], resource=True)
# backup existing files
for f in glob.glob(os.path.join(Config.certFolder, 'passport-*')):
if not f.endswith('~'):
self.backupFile(f, move=True)
# create certificates
self.gen_cert('passport-sp', Config.passportSpKeyPass, 'ldap', Config.ldap_hostname)
Config.passport_rs_client_jwks = self.gen_openid_jwks_jks_keys(self.passport_rs_client_jks_fn, Config.passport_rs_client_jks_pass)
Config.templateRenderingDict['passport_rs_client_base64_jwks'] = self.generate_base64_string(Config.passport_rs_client_jwks, 1)
Config.passport_rp_client_jwks = self.gen_openid_jwks_jks_keys(self.passport_rp_client_jks_fn, Config.passport_rp_client_jks_pass)
Config.templateRenderingDict['passport_rp_client_base64_jwks'] = self.generate_base64_string(Config.passport_rp_client_jwks, 1)
self.logIt("Preparing Passport OpenID RP certificate...")
passport_rp_client_jwks_json = json.loads(''.join(Config.passport_rp_client_jwks))
for jwks_key in passport_rp_client_jwks_json["keys"]:
if jwks_key["alg"] == Config.passport_rp_client_cert_alg:
Config.passport_rp_client_cert_alias = jwks_key["kid"]
break
self.export_openid_key(self.passport_rp_client_jks_fn, Config.passport_rp_client_jks_pass, Config.passport_rp_client_cert_alias, self.passport_rp_client_cert_fn)
# set owner and mode of certificate files
cert_files = glob.glob(os.path.join(Config.certFolder, 'passport*'))
for fn in cert_files:
self.run([paths.cmd_chmod, '440', fn])
self.run([paths.cmd_chown, 'root:gluu', fn])
def render_import_templates(self):
self.logIt("Rendering Passport templates")
output_folder = os.path.join(Config.outputFolder,'passport')
self.renderTemplateInOut(self.passport_config, self.passport_templates_folder, Config.configFolder)
self.renderTemplateInOut(self.passport_central_config_json, self.passport_templates_folder, output_folder)
Config.templateRenderingDict['passport_central_config_base64'] = self.generate_base64_ldap_file(self.passport_central_config_json)
scripts_template = os.path.join(self.passport_templates_folder, os.path.basename(self.ldif_scripts_fn))
extensions = base.find_script_names(scripts_template)
self.prepare_base64_extension_scripts(extensions=extensions)
for tmp in (
self.passport_oxtrust_config_fn,
self.ldif_scripts_fn,
self.passport_config,
self.ldif_passport,
self.ldif_passport_clients,
self.ldif_passport_config,
):
self.renderTemplateInOut(tmp, self.passport_templates_folder, output_folder)
ldif_files = (self.ldif_scripts_fn, self.ldif_passport, self.ldif_passport_config, self.ldif_passport_clients)
self.dbUtils.import_ldif(ldif_files)
def update_backend(self):
self.dbUtils.enable_service('gluuPassportEnabled')
for inum in ['2FDB-CF02', 'D40C-1CA4', '2DAF-F9A5']:
self.dbUtils.enable_script(inum)
passport_oxtrust_config = base.readJsonFile(self.passport_oxtrust_config_fn)
self.dbUtils.set_oxTrustConfApplication(passport_oxtrust_config)
self.dbUtils.set_configuration('gluuPassportEnabled', 'true')
self.dbUtils.add_client2script('2DAF-F9A5', Config.passport_rp_client_id)
self.dbUtils.add_client2script('2DAF-F995', Config.passport_rp_client_id)
def create_folders(self):
# Create logs folder
self.run([paths.cmd_mkdir, '-p', os.path.join(self.gluu_passport_base, 'logs')])
#create empty log file unless exists
log_file = os.path.join(self.gluu_passport_base, 'logs/start.log')
if not os.path.exists(log_file):
self.writeFile(log_file, '')
|
zella-graphics/draw-lines/main.py | whitmans-max/python-examples | 140 | 11067927 | #!/usr/bin/env python3
from graphics import *
STEP = 10
win = GraphWin('Example', 300, 300)
for x1 in range(0, 301, STEP):
x2 = x1 + STEP
Line(Point(x1, 0), Point(300, x2)).draw(win)
Line(Point(300-x1, 0), Point(0, x2)).draw(win)
Line(Point(0, x2), Point(x1, 300)).draw(win)
Line(Point(300, x2), Point(300-x1, 300)).draw(win)
win.getMouse()
win.close()
|
autobahn/wamp/test/test_protocol_peer.py | rapyuta-robotics/autobahn-python | 1,670 | 11067931 | ###############################################################################
#
# The MIT License (MIT)
#
# Copyright (c) Crossbar.io Technologies GmbH
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
###############################################################################
import os
# we need to select a txaio subsystem because we're importing the base
# protocol classes here for testing purposes. "normally" yo'd import
# from autobahn.twisted.wamp or autobahn.asyncio.wamp explicitly.
import txaio
if os.environ.get('USE_TWISTED', False):
txaio.use_twisted()
else:
txaio.use_asyncio()
from autobahn import wamp
from autobahn.wamp import message
from autobahn.wamp import exception
from autobahn.wamp import protocol
import unittest
class TestPeerExceptions(unittest.TestCase):
def test_exception_from_message(self):
session = protocol.BaseSession()
@wamp.error("com.myapp.error1")
class AppError1(Exception):
pass
@wamp.error("com.myapp.error2")
class AppError2(Exception):
pass
session.define(AppError1)
session.define(AppError2)
# map defined errors to user exceptions
emsg = message.Error(message.Call.MESSAGE_TYPE, 123456, 'com.myapp.error1')
exc = session._exception_from_message(emsg)
self.assertIsInstance(exc, AppError1)
self.assertEqual(exc.args, ())
emsg = message.Error(message.Call.MESSAGE_TYPE, 123456, 'com.myapp.error2')
exc = session._exception_from_message(emsg)
self.assertIsInstance(exc, AppError2)
self.assertEqual(exc.args, ())
# map undefined error to (generic) exception
emsg = message.Error(message.Call.MESSAGE_TYPE, 123456, 'com.myapp.error3')
exc = session._exception_from_message(emsg)
self.assertIsInstance(exc, exception.ApplicationError)
self.assertEqual(exc.error, 'com.myapp.error3')
self.assertEqual(exc.args, ())
self.assertEqual(exc.kwargs, {})
emsg = message.Error(message.Call.MESSAGE_TYPE, 123456, 'com.myapp.error3', args=[1, 2, 'hello'])
exc = session._exception_from_message(emsg)
self.assertIsInstance(exc, exception.ApplicationError)
self.assertEqual(exc.error, 'com.myapp.error3')
self.assertEqual(exc.args, (1, 2, 'hello'))
self.assertEqual(exc.kwargs, {})
emsg = message.Error(message.Call.MESSAGE_TYPE, 123456, 'com.myapp.error3', args=[1, 2, 'hello'], kwargs={'foo': 23, 'bar': 'baz'})
exc = session._exception_from_message(emsg)
self.assertIsInstance(exc, exception.ApplicationError)
self.assertEqual(exc.error, 'com.myapp.error3')
self.assertEqual(exc.args, (1, 2, 'hello'))
self.assertEqual(exc.kwargs, {'foo': 23, 'bar': 'baz'})
def test_message_from_exception(self):
session = protocol.BaseSession()
@wamp.error("com.myapp.error1")
class AppError1(Exception):
pass
@wamp.error("com.myapp.error2")
class AppError2(Exception):
pass
session.define(AppError1)
session.define(AppError2)
exc = AppError1()
msg = session._message_from_exception(message.Call.MESSAGE_TYPE, 123456, exc)
self.assertEqual(msg.marshal(), [message.Error.MESSAGE_TYPE, message.Call.MESSAGE_TYPE, 123456, {}, "com.myapp.error1"])
|
tests/basic/kwargs2.py | MoonStarCZW/py2rb | 124 | 11067933 | def myfunc(a, b, *c, fuga='hoge', **d):
print(a)
print(b)
for i in c:
print(i)
print(fuga)
keys = list(d.keys())
keys.sort()
for i in keys:
print(i)
print(d[i])
myfunc(1, 2, bar='a', foo='c')
print()
myfunc(1, 2, 3, 4, bar='a', foo='c')
myfunc(1, 2, 3, 4, bar='a', fuga='hogehoge', foo='c')
|
src/model_fns_tf.py | TheodoreGalanos/DALLE-mtf | 385 | 11067941 | import tensorflow.compat.v1 as tf
import tensorflow.compat.v2 as tf2
from tensorflow.python.tpu import tpu_estimator
from .optimizers import get_optimizer
from .vae_tf import DiscreteVAE
from .utils import scalar_summary, mode_to_str, create_host_call
def vae_model_fn(features, labels, mode, params):
# Build mtf_features & seq length dict for getting number of microbatches
# We need to pack inputs into a dict to pass into serialize_training_step
H = W = params["dataset"]["image_size"] # TODO: check equal
mode_str = mode_to_str(mode)
batch_size = params[f"{mode_str}_batch_size"]
n_channels = params.get("input_channels", 3)
model = DiscreteVAE(
num_tokens=params["num_tokens"],
dim=params["n_embd"],
hidden_dim=params["hidden_dim"],
input_channels=n_channels,
convblocks=params.get("convblocks", [(3, 64), (3, 128), (3, 256)]),
recompute_grad=params.get("recompute_grad", False),
use_bf16=params.get("use_bf16", False),
stack_factor=params.get("stack_factor", 1),
dimensions=H
)
if mode == tf.estimator.ModeKeys.PREDICT:
raise NotImplementedError
train_gumbel = params.get("train_gumbel_hard", True)
eval_gumbel = params.get("eval_gumbel_hard", True)
# We're not predicting, so we better be training or evaluating
assert (mode == tf.estimator.ModeKeys.TRAIN or mode == tf.estimator.ModeKeys.EVAL)
gumbel = train_gumbel if mode == tf.estimator.ModeKeys.TRAIN else eval_gumbel
if params.get("temp_anneal_steps", None):
warmup_frac = tf.cast(tf.train.get_global_step(), tf.float32) / params["temp_anneal_steps"]
warmup_frac = tf.minimum(warmup_frac, tf.constant(1.0))
temp = params["temp_start"] - warmup_frac * (params["temp_start"] - params["temp"])
else:
temp = params.get("temp", 1.0)
# TODO: add back in microbatching
if params.get("use_bf16", False):
with tf.tpu.bfloat16_scope():
with tf.variable_scope("vae"):
loss, reconstruction = model.forward(features, return_recon_loss=True, temperature=temp, hard_gumbel=gumbel)
loss = tf.cast(loss, tf.float32)
reconstruction = tf.cast(reconstruction, tf.float32)
else:
with tf.variable_scope("vae"):
loss, reconstruction = model.forward(features, return_recon_loss=True, temperature=temp, hard_gumbel=gumbel)
optimizer = tf.train.AdamOptimizer(
learning_rate=params["lr"]
)
optimizer = tf.tpu.CrossShardOptimizer(optimizer)
global_step = tf.train.get_or_create_global_step()
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
train_op = optimizer.minimize(loss, global_step)
def host_call_fn(gs, loss, input, reconstruction):
gs = gs[0]
loss = tf.math.reduce_mean(loss)
denormalize = lambda x: (x + 1) / 2
with tf2.summary.create_file_writer(params['model_path']).as_default():
tf2.summary.scalar('loss', loss, step=gs)
tf2.summary.image('input_image', denormalize(input), step=gs)
tf2.summary.image('reconstruction_image', denormalize(reconstruction), step=gs)
return tf.summary.all_v2_summary_ops()
def metric_fn(gs, loss, input, reconstruction):
gs = gs[0]
loss = tf.math.reduce_mean(loss)
denormalize = lambda x: (x + 1) / 2
with tf2.summary.create_file_writer(params['model_path']).as_default():
loss_op = tf.metrics.mean(loss)
with tf2.summary.record_if(loss_op[0] < tf.constant(1e-9)):
tf2.summary.image('eval/input_image', denormalize(input), step=gs)
tf2.summary.image('eval/reconstruction_image', denormalize(reconstruction), step=gs)
with tf.control_dependencies(tf.summary.all_v2_summary_ops()):
dummy_op = tf.no_op()
return {"_loss": loss_op,
"zzz_dummy": (tf.constant(0), dummy_op)}
# To log the loss, current learning rate, and epoch for Tensorboard, the
# summary op needs to be run on the host CPU via host_call. host_call
# expects [batch_size, ...] Tensors, thus reshape to introduce a batch
# dimension. These Tensors are implicitly concatenated to
# [params['batch_size']].
gs_t = tf.reshape(global_step, [1])
loss_t = tf.reshape(loss, [1])
host_call = (host_call_fn, [gs_t, loss_t, features, reconstruction])
metric = (metric_fn, [gs_t, loss_t, features, reconstruction])
return tpu_estimator.TPUEstimatorSpec(
mode,
loss=loss,
host_call=host_call if mode == tf.estimator.ModeKeys.TRAIN else None,
train_op=train_op,
eval_metrics=metric)
|
preprocess/ljspeech_wavenet.py | ishine/self-attention-tacotron | 111 | 11067946 | # ==============================================================================
# Copyright (c) 2018, Yamagishi Laboratory, National Institute of Informatics
# Author: <NAME> (<EMAIL>)
# All rights reserved.
# ==============================================================================
""" Preprocess for LJSpeech dataset. """
from pyspark import SparkContext, RDD
import numpy as np
import os
from collections import namedtuple
from utils.audio import Audio
class TextAndPath(namedtuple("TextAndPath", ["id", "key", "wav_path", "labels_path", "text"])):
pass
class LJSpeech:
def __init__(self, in_dir, mel_out_dir, wav_out_dir, hparams):
self.in_dir = in_dir
self.mel_out_dir = mel_out_dir
self.wav_out_dir = wav_out_dir
self.audio = Audio(hparams)
@property
def record_ids(self):
return map(lambda v: str(v), range(1, 13101))
def record_file_path(self, record_id, kind):
assert kind in ["source", "target"]
return os.path.join(self.mel_out_dir, f"ljspeech-{kind}-{int(record_id):05d}.tfrecord")
def text_and_path_rdd(self, sc: SparkContext):
return sc.parallelize(
self._extract_all_text_and_path())
def process_wav(self, rdd: RDD):
return rdd.mapValues(self._process_wav)
def _extract_text_and_path(self, line, index):
parts = line.strip().split('|')
key = parts[0]
text = parts[2]
wav_path = os.path.join(self.in_dir, 'wavs', '%s.wav' % key)
return TextAndPath(index, key, wav_path, None, text)
def _extract_all_text_and_path(self):
with open(os.path.join(self.in_dir, 'metadata.csv'), mode='r', encoding='utf-8') as f:
for index, line in enumerate(f):
extracted = self._extract_text_and_path(line, index)
if extracted is not None:
yield (index, extracted)
def _process_wav(self, paths: TextAndPath):
wav = self.audio.load_wav(paths.wav_path)
mel_spectrogram = self.audio.melspectrogram(wav).astype(np.float32).T
mel_spectrogram = self.audio.normalize_mel(mel_spectrogram)
mel_filepath = os.path.join(self.mel_out_dir, f"{paths.key}.mfbsp")
wav_filepath = os.path.join(self.wav_out_dir, f"{paths.key}.wav")
mel_spectrogram.tofile(mel_filepath, format="<f4")
self.audio.save_wav(wav, wav_filepath)
|
lib/python2.7/site-packages/jwt/algorithms.py | ervinpepic/E-commerce | 674 | 11067947 | <filename>lib/python2.7/site-packages/jwt/algorithms.py
import hashlib
import hmac
from .compat import constant_time_compare, string_types, text_type
from .exceptions import InvalidKeyError
from .utils import der_to_raw_signature, raw_to_der_signature
try:
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.serialization import (
load_pem_private_key, load_pem_public_key, load_ssh_public_key
)
from cryptography.hazmat.primitives.asymmetric.rsa import (
RSAPrivateKey, RSAPublicKey
)
from cryptography.hazmat.primitives.asymmetric.ec import (
EllipticCurvePrivateKey, EllipticCurvePublicKey
)
from cryptography.hazmat.primitives.asymmetric import ec, padding
from cryptography.hazmat.backends import default_backend
from cryptography.exceptions import InvalidSignature
has_crypto = True
except ImportError:
has_crypto = False
def get_default_algorithms():
"""
Returns the algorithms that are implemented by the library.
"""
default_algorithms = {
'none': NoneAlgorithm(),
'HS256': HMACAlgorithm(HMACAlgorithm.SHA256),
'HS384': HMACAlgorithm(HMACAlgorithm.SHA384),
'HS512': HMACAlgorithm(HMACAlgorithm.SHA512)
}
if has_crypto:
default_algorithms.update({
'RS256': RSAAlgorithm(RSAAlgorithm.SHA256),
'RS384': RSAAlgorithm(RSAAlgorithm.SHA384),
'RS512': RSAAlgorithm(RSAAlgorithm.SHA512),
'ES256': ECAlgorithm(ECAlgorithm.SHA256),
'ES384': ECAlgorithm(ECAlgorithm.SHA384),
'ES512': ECAlgorithm(ECAlgorithm.SHA512),
'PS256': RSAPSSAlgorithm(RSAPSSAlgorithm.SHA256),
'PS384': RSAPSSAlgorithm(RSAPSSAlgorithm.SHA384),
'PS512': RSAPSSAlgorithm(RSAPSSAlgorithm.SHA512)
})
return default_algorithms
class Algorithm(object):
"""
The interface for an algorithm used to sign and verify tokens.
"""
def prepare_key(self, key):
"""
Performs necessary validation and conversions on the key and returns
the key value in the proper format for sign() and verify().
"""
raise NotImplementedError
def sign(self, msg, key):
"""
Returns a digital signature for the specified message
using the specified key value.
"""
raise NotImplementedError
def verify(self, msg, key, sig):
"""
Verifies that the specified digital signature is valid
for the specified message and key values.
"""
raise NotImplementedError
class NoneAlgorithm(Algorithm):
"""
Placeholder for use when no signing or verification
operations are required.
"""
def prepare_key(self, key):
if key == '':
key = None
if key is not None:
raise InvalidKeyError('When alg = "none", key value must be None.')
return key
def sign(self, msg, key):
return b''
def verify(self, msg, key, sig):
return False
class HMACAlgorithm(Algorithm):
"""
Performs signing and verification operations using HMAC
and the specified hash function.
"""
SHA256 = hashlib.sha256
SHA384 = hashlib.sha384
SHA512 = hashlib.sha512
def __init__(self, hash_alg):
self.hash_alg = hash_alg
def prepare_key(self, key):
if not isinstance(key, string_types) and not isinstance(key, bytes):
raise TypeError('Expecting a string- or bytes-formatted key.')
if isinstance(key, text_type):
key = key.encode('utf-8')
invalid_strings = [
b'-----BEGIN PUBLIC KEY-----',
b'-----BEGIN CERTIFICATE-----',
b'ssh-rsa'
]
if any([string_value in key for string_value in invalid_strings]):
raise InvalidKeyError(
'The specified key is an asymmetric key or x509 certificate and'
' should not be used as an HMAC secret.')
return key
def sign(self, msg, key):
return hmac.new(key, msg, self.hash_alg).digest()
def verify(self, msg, key, sig):
return constant_time_compare(sig, self.sign(msg, key))
if has_crypto:
class RSAAlgorithm(Algorithm):
"""
Performs signing and verification operations using
RSASSA-PKCS-v1_5 and the specified hash function.
"""
SHA256 = hashes.SHA256
SHA384 = hashes.SHA384
SHA512 = hashes.SHA512
def __init__(self, hash_alg):
self.hash_alg = hash_alg
def prepare_key(self, key):
if isinstance(key, RSAPrivateKey) or \
isinstance(key, RSAPublicKey):
return key
if isinstance(key, string_types):
if isinstance(key, text_type):
key = key.encode('utf-8')
try:
if key.startswith(b'ssh-rsa'):
key = load_ssh_public_key(key, backend=default_backend())
else:
key = load_pem_private_key(key, password=None, backend=default_backend())
except ValueError:
key = load_pem_public_key(key, backend=default_backend())
else:
raise TypeError('Expecting a PEM-formatted key.')
return key
def sign(self, msg, key):
signer = key.signer(
padding.PKCS1v15(),
self.hash_alg()
)
signer.update(msg)
return signer.finalize()
def verify(self, msg, key, sig):
verifier = key.verifier(
sig,
padding.PKCS1v15(),
self.hash_alg()
)
verifier.update(msg)
try:
verifier.verify()
return True
except InvalidSignature:
return False
class ECAlgorithm(Algorithm):
"""
Performs signing and verification operations using
ECDSA and the specified hash function
"""
SHA256 = hashes.SHA256
SHA384 = hashes.SHA384
SHA512 = hashes.SHA512
def __init__(self, hash_alg):
self.hash_alg = hash_alg
def prepare_key(self, key):
if isinstance(key, EllipticCurvePrivateKey) or \
isinstance(key, EllipticCurvePublicKey):
return key
if isinstance(key, string_types):
if isinstance(key, text_type):
key = key.encode('utf-8')
# Attempt to load key. We don't know if it's
# a Signing Key or a Verifying Key, so we try
# the Verifying Key first.
try:
key = load_pem_public_key(key, backend=default_backend())
except ValueError:
key = load_pem_private_key(key, password=None, backend=default_backend())
else:
raise TypeError('Expecting a PEM-formatted key.')
return key
def sign(self, msg, key):
signer = key.signer(ec.ECDSA(self.hash_alg()))
signer.update(msg)
der_sig = signer.finalize()
return der_to_raw_signature(der_sig, key.curve)
def verify(self, msg, key, sig):
try:
der_sig = raw_to_der_signature(sig, key.curve)
except ValueError:
return False
verifier = key.verifier(der_sig, ec.ECDSA(self.hash_alg()))
verifier.update(msg)
try:
verifier.verify()
return True
except InvalidSignature:
return False
class RSAPSSAlgorithm(RSAAlgorithm):
"""
Performs a signature using RSASSA-PSS with MGF1
"""
def sign(self, msg, key):
signer = key.signer(
padding.PSS(
mgf=padding.MGF1(self.hash_alg()),
salt_length=self.hash_alg.digest_size
),
self.hash_alg()
)
signer.update(msg)
return signer.finalize()
def verify(self, msg, key, sig):
verifier = key.verifier(
sig,
padding.PSS(
mgf=padding.MGF1(self.hash_alg()),
salt_length=self.hash_alg.digest_size
),
self.hash_alg()
)
verifier.update(msg)
try:
verifier.verify()
return True
except InvalidSignature:
return False
|
Unbias_LightGBM/examples/python-guide/simple_example.py | atul2804/Unbiased_LambdaMart | 212 | 11067950 | # coding: utf-8
# pylint: disable = invalid-name, C0111
import json
import lightgbm as lgb
import pandas as pd
from sklearn.metrics import mean_squared_error
# load or create your dataset
print('Load data...')
df_train = pd.read_csv('../regression/regression.train', header=None, sep='\t')
df_test = pd.read_csv('../regression/regression.test', header=None, sep='\t')
y_train = df_train[0].values
y_test = df_test[0].values
X_train = df_train.drop(0, axis=1).values
X_test = df_test.drop(0, axis=1).values
# create dataset for lightgbm
lgb_train = lgb.Dataset(X_train, y_train)
lgb_eval = lgb.Dataset(X_test, y_test, reference=lgb_train)
# specify your configurations as a dict
params = {
'task': 'train',
'boosting_type': 'gbdt',
'objective': 'regression',
'metric': {'l2', 'auc'},
'num_leaves': 31,
'learning_rate': 0.05,
'feature_fraction': 0.9,
'bagging_fraction': 0.8,
'bagging_freq': 5,
'verbose': 0
}
print('Start training...')
# train
gbm = lgb.train(params,
lgb_train,
num_boost_round=20,
valid_sets=lgb_eval,
early_stopping_rounds=5)
print('Save model...')
# save model to file
gbm.save_model('model.txt')
print('Start predicting...')
# predict
y_pred = gbm.predict(X_test, num_iteration=gbm.best_iteration)
# eval
print('The rmse of prediction is:', mean_squared_error(y_test, y_pred) ** 0.5)
|
third_party/blink/tools/blinkpy/w3c/import_notifier.py | zealoussnow/chromium | 14,668 | 11067955 | # Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Sends notifications after automatic imports from web-platform-tests (WPT).
Automatically file bugs for new failures caused by WPT imports for opted-in
directories.
Design doc: https://docs.google.com/document/d/1W3V81l94slAC_rPcTKWXgv3YxRxtlSIAxi3yj6NsbBw/edit?usp=sharing
"""
from collections import defaultdict
import logging
import re
from blinkpy.common.net.luci_auth import LuciAuth
from blinkpy.common.path_finder import PathFinder
from blinkpy.w3c.common import WPT_GH_URL
from blinkpy.w3c.directory_owners_extractor import DirectoryOwnersExtractor
from blinkpy.w3c.monorail import MonorailAPI, MonorailIssue
from blinkpy.w3c.wpt_expectations_updater import WPTExpectationsUpdater
from blinkpy.web_tests.port.android import (
PRODUCTS, ANDROID_WEBLAYER)
_log = logging.getLogger(__name__)
GITHUB_COMMIT_PREFIX = WPT_GH_URL + 'commit/'
SHORT_GERRIT_PREFIX = 'https://crrev.com/c/'
class ImportNotifier(object):
def __init__(self, host, chromium_git, local_wpt):
self.host = host
self.git = chromium_git
self.local_wpt = local_wpt
self._monorail_api = MonorailAPI
self.default_port = host.port_factory.get()
self.finder = PathFinder(host.filesystem)
self.owners_extractor = DirectoryOwnersExtractor(host)
self.new_failures_by_directory = defaultdict(list)
self.components_for_product = {ANDROID_WEBLAYER: ["Internals>WebLayer"]}
self.labels_for_product = {
ANDROID_WEBLAYER: ["Project-WebLayer-WebPlatformSupport", "WL-WPT-Compat"]
}
def main(self,
wpt_revision_start,
wpt_revision_end,
rebaselined_tests,
test_expectations,
new_override_expectations,
issue,
patchset,
dry_run=True,
service_account_key_json=None):
"""Files bug reports for new failures.
Args:
wpt_revision_start: The start of the imported WPT revision range
(exclusive), i.e. the last imported revision.
wpt_revision_end: The end of the imported WPT revision range
(inclusive), i.e. the current imported revision.
rebaselined_tests: A list of test names that have been rebaselined.
test_expectations: A dictionary mapping names of tests that cannot
be rebaselined to a list of new test expectation lines.
issue: The issue number of the import CL (a string).
patchset: The patchset number of the import CL (a string).
dry_run: If True, no bugs will be actually filed to crbug.com.
service_account_key_json: The path to a JSON private key of a
service account for accessing Monorail. If None, try to get an
access token from luci-auth.
Note: "test names" are paths of the tests relative to web_tests.
"""
gerrit_url = SHORT_GERRIT_PREFIX + issue
gerrit_url_with_ps = gerrit_url + '/' + patchset + '/'
changed_test_baselines = self.find_changed_baselines_of_tests(
rebaselined_tests)
self.examine_baseline_changes(changed_test_baselines,
gerrit_url_with_ps)
self.examine_new_test_expectations(test_expectations)
bugs = self.create_bugs_from_new_failures(wpt_revision_start,
wpt_revision_end, gerrit_url)
self.file_bugs(bugs, dry_run, service_account_key_json)
for product, expectation_lines in new_override_expectations.items():
bugs = self.create_bugs_for_product(wpt_revision_start,
wpt_revision_end,
gerrit_url,
product,
expectation_lines)
self.file_bugs(bugs, dry_run, service_account_key_json)
def find_changed_baselines_of_tests(self, rebaselined_tests):
"""Finds the corresponding changed baselines of each test.
Args:
rebaselined_tests: A list of test names that have been rebaselined.
Returns:
A dictionary mapping test names to paths of their baselines changed
in this import CL (paths relative to the root of Chromium repo).
"""
test_baselines = {}
changed_files = self.git.changed_files()
for test_name in rebaselined_tests:
test_without_ext, _ = self.host.filesystem.splitext(test_name)
changed_baselines = []
# TODO(robertma): Refactor this into web_tests.port.base.
baseline_name = test_without_ext + '-expected.txt'
for changed_file in changed_files:
if changed_file.endswith(baseline_name):
changed_baselines.append(changed_file)
if changed_baselines:
test_baselines[test_name] = changed_baselines
return test_baselines
def examine_baseline_changes(self, changed_test_baselines,
gerrit_url_with_ps):
"""Examines all changed baselines to find new failures.
Args:
changed_test_baselines: A dictionary mapping test names to paths of
changed baselines.
gerrit_url_with_ps: Gerrit URL of this CL with the patchset number.
"""
for test_name, changed_baselines in changed_test_baselines.items():
directory = self.find_owned_directory(test_name)
if not directory:
_log.warning('Cannot find OWNERS of %s', test_name)
continue
for baseline in changed_baselines:
if self.more_failures_in_baseline(baseline):
self.new_failures_by_directory[directory].append(
TestFailure(
TestFailure.BASELINE_CHANGE,
test_name,
baseline_path=baseline,
gerrit_url_with_ps=gerrit_url_with_ps))
def more_failures_in_baseline(self, baseline):
"""Determines if a testharness.js baseline file has new failures.
The file is assumed to have been modified in the current git checkout,
and so has a diff we can parse.
We recognize two types of failures: FAIL lines, which are output for a
specific subtest failing, and harness errors, which indicate an uncaught
error in the test. Increasing numbers of either are considered new
failures - this includes going from FAIL to error or vice-versa.
"""
diff = self.git.run(['diff', '-U0', 'origin/main', '--', baseline])
delta_failures = 0
delta_harness_errors = 0
for line in diff.splitlines():
if line.startswith('+FAIL'):
delta_failures += 1
if line.startswith('-FAIL'):
delta_failures -= 1
if line.startswith('+Harness Error.'):
delta_harness_errors += 1
if line.startswith('-Harness Error.'):
delta_harness_errors -= 1
return delta_failures > 0 or delta_harness_errors > 0
def examine_new_test_expectations(self, test_expectations):
"""Examines new test expectations to find new failures.
Args:
test_expectations: A dictionary mapping names of tests that cannot
be rebaselined to a list of new test expectation lines.
"""
for test_name, expectation_lines in test_expectations.items():
directory = self.find_owned_directory(test_name)
if not directory:
_log.warning('Cannot find OWNERS of %s', test_name)
continue
for expectation_line in expectation_lines:
self.new_failures_by_directory[directory].append(
TestFailure(
TestFailure.NEW_EXPECTATION,
test_name,
expectation_line=expectation_line))
def create_bugs_for_product(self, wpt_revision_start, wpt_revision_end,
gerrit_url, product, expectation_lines):
"""Files bug reports for new failures per product
Args:
wpt_revision_start: The start of the imported WPT revision range
(exclusive), i.e. the last imported revision.
wpt_revision_end: The end of the imported WPT revision range
(inclusive), i.e. the current imported revision.
gerrit_url: Gerrit URL of the CL.
product: the product for which to file bugs for.
expectation_lines: list of new expectations for this product
Return:
A MonorailIssue object that should be filed.
"""
bugs = []
summary = '[WPT] New failures introduced by import {}'.format(gerrit_url)
prologue = ('WPT import {} introduced new failures:\n\n'
'List of new failures:\n'.format(gerrit_url))
failure_list = ''
for _, failure in expectation_lines.items():
failure_list += str(failure) + '\n'
expectations_statement = (
'\nExpectations have been automatically added for '
'the failing results to keep the bots green. Please '
'investigate the new failures and triage as appropriate.\n')
range_statement = '\nThis import contains upstream changes from {} to {}:\n'.format(
wpt_revision_start, wpt_revision_end)
description = (prologue + failure_list + expectations_statement +
range_statement)
bug = MonorailIssue.new_chromium_issue(
summary,
description,
cc=[],
components=self.components_for_product[product],
labels=self.labels_for_product[product])
bugs.append(bug)
return bugs
def create_bugs_from_new_failures(self, wpt_revision_start,
wpt_revision_end, gerrit_url):
"""Files bug reports for new failures.
Args:
wpt_revision_start: The start of the imported WPT revision range
(exclusive), i.e. the last imported revision.
wpt_revision_end: The end of the imported WPT revision range
(inclusive), i.e. the current imported revision.
gerrit_url: Gerrit URL of the CL.
Return:
A list of MonorailIssue objects that should be filed.
"""
imported_commits = self.local_wpt.commits_in_range(
wpt_revision_start, wpt_revision_end)
bugs = []
for directory, failures in self.new_failures_by_directory.items():
summary = '[WPT] New failures introduced in {} by import {}'.format(
directory, gerrit_url)
full_directory = self.host.filesystem.join(
self.finder.web_tests_dir(), directory)
owners_file = self.host.filesystem.join(full_directory, 'OWNERS')
metadata_file = self.host.filesystem.join(full_directory,
'DIR_METADATA')
is_wpt_notify_enabled = False
try:
is_wpt_notify_enabled = self.owners_extractor.is_wpt_notify_enabled(
metadata_file)
except KeyError:
_log.info('KeyError when parsing %s' % metadata_file)
if not is_wpt_notify_enabled:
_log.info("WPT-NOTIFY disabled in %s." % full_directory)
continue
owners = self.owners_extractor.extract_owners(owners_file)
# owners may be empty but not None.
cc = owners
component = self.owners_extractor.extract_component(metadata_file)
# component could be None.
components = [component] if component else None
prologue = ('WPT import {} introduced new failures in {}:\n\n'
'List of new failures:\n'.format(
gerrit_url, directory))
failure_list = ''
for failure in failures:
failure_list += str(failure) + '\n'
expectations_statement = (
'\nExpectations or baseline files [0] have been automatically '
'added for the failing results to keep the bots green. Please '
'investigate the new failures and triage as appropriate.\n')
range_statement = '\nThis import contains upstream changes from {} to {}:\n'.format(
wpt_revision_start, wpt_revision_end)
commit_list = self.format_commit_list(imported_commits,
full_directory)
links_list = '\n[0]: https://chromium.googlesource.com/chromium/src/+/HEAD/docs/testing/web_test_expectations.md\n'
description = (prologue + failure_list + expectations_statement +
range_statement + commit_list + links_list)
bug = MonorailIssue.new_chromium_issue(summary,
description,
cc,
components,
labels=['Test-WebTest'])
_log.info(bug)
_log.info("WPT-NOTIFY enabled in %s; adding the bug to the pending list." % full_directory)
bugs.append(bug)
return bugs
def format_commit_list(self, imported_commits, directory):
"""Formats the list of imported WPT commits.
Imports affecting the given directory will be highlighted.
Args:
imported_commits: A list of (SHA, commit subject) pairs.
directory: An absolute path of a directory in the Chromium repo, for
which the list is formatted.
Returns:
A multi-line string.
"""
path_from_wpt = self.host.filesystem.relpath(
directory, self.finder.path_from_web_tests('external', 'wpt'))
commit_list = ''
for sha, subject in imported_commits:
# subject is a Unicode string and can contain non-ASCII characters.
line = u'{}: {}'.format(subject, GITHUB_COMMIT_PREFIX + sha)
if self.local_wpt.is_commit_affecting_directory(
sha, path_from_wpt):
line += ' [affecting this directory]'
commit_list += line + '\n'
return commit_list
def find_owned_directory(self, test_name):
"""Finds the lowest directory that contains the test and has OWNERS.
Args:
The name of the test (a path relative to web_tests).
Returns:
The path of the found directory relative to web_tests.
"""
# Always use non-virtual test names when looking up OWNERS.
if self.default_port.lookup_virtual_test_base(test_name):
test_name = self.default_port.lookup_virtual_test_base(test_name)
# find_owners_file takes either a relative path from the *root* of the
# repository, or an absolute path.
abs_test_path = self.finder.path_from_web_tests(test_name)
owners_file = self.owners_extractor.find_owners_file(
self.host.filesystem.dirname(abs_test_path))
if not owners_file:
return None
owned_directory = self.host.filesystem.dirname(owners_file)
short_directory = self.host.filesystem.relpath(
owned_directory, self.finder.web_tests_dir())
return short_directory
def file_bugs(self, bugs, dry_run, service_account_key_json=None):
"""Files a list of bugs to Monorail.
Args:
bugs: A list of MonorailIssue objects.
dry_run: A boolean, whether we are in dry run mode.
service_account_key_json: Optional, see docs for main().
"""
# TODO(robertma): Better error handling in this method.
if dry_run:
_log.info(
'[dry_run] Would have filed the %d bugs in the pending list.',
len(bugs))
return
_log.info('Filing %d bugs in the pending list to Monorail', len(bugs))
api = self._get_monorail_api(service_account_key_json)
for index, bug in enumerate(bugs, start=1):
response = api.insert_issue(bug)
_log.info('[%d] Filed bug: %s', index,
MonorailIssue.crbug_link(response['id']))
def _get_monorail_api(self, service_account_key_json):
if service_account_key_json:
return self._monorail_api(
service_account_key_json=service_account_key_json)
token = LuciAuth(self.host).get_access_token()
return self._monorail_api(access_token=token)
class TestFailure(object):
"""A simple abstraction of a new test failure for the notifier."""
# Failure types:
BASELINE_CHANGE = 1
NEW_EXPECTATION = 2
def __init__(self,
failure_type,
test_name,
expectation_line='',
baseline_path='',
gerrit_url_with_ps=''):
if failure_type == self.BASELINE_CHANGE:
assert baseline_path and gerrit_url_with_ps
else:
assert failure_type == self.NEW_EXPECTATION
assert expectation_line
self.failure_type = failure_type
self.test_name = test_name
self.expectation_line = expectation_line
self.baseline_path = baseline_path
self.gerrit_url_with_ps = gerrit_url_with_ps
def __str__(self):
if self.failure_type == self.BASELINE_CHANGE:
return self._format_baseline_change()
else:
return self._format_new_expectation()
def __eq__(self, other):
return (self.failure_type == other.failure_type
and self.test_name == other.test_name
and self.expectation_line == other.expectation_line
and self.baseline_path == other.baseline_path
and self.gerrit_url_with_ps == other.gerrit_url_with_ps)
def _format_baseline_change(self):
assert self.failure_type == self.BASELINE_CHANGE
result = ''
# TODO(robertma): Is there any better way than using regexp?
platform = re.search(r'/platform/([^/]+)/', self.baseline_path)
if platform:
result += '[ {} ] '.format(platform.group(1).capitalize())
result += '{} new failing tests: {}{}'.format(
self.test_name, self.gerrit_url_with_ps, self.baseline_path)
return result
def _format_new_expectation(self):
assert self.failure_type == self.NEW_EXPECTATION
# TODO(robertma): Are there saner ways to remove the link to the umbrella bug?
line = self.expectation_line
if line.startswith(WPTExpectationsUpdater.UMBRELLA_BUG):
line = line[len(WPTExpectationsUpdater.UMBRELLA_BUG):].lstrip()
return line
|
tests/utils.py | Patil2099/tangent | 2,379 | 11067960 | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common testing utilities."""
from copy import deepcopy
from autograd import grad as ag_grad
from autograd import value_and_grad as ag_value_and_grad
from autograd.misc.flatten import flatten
import autograd.numpy as ag_np
import numpy as np
import tangent
# Autograd's NumPy implementation may be missing the definition for _NoValue.
if not hasattr(ag_np, '_NoValue'):
ag_np._NoValue = np._NoValue # pylint: disable=protected-access
def assert_forward_not_implemented(func, wrt):
try:
tangent.autodiff(func, mode='forward', preserve_result=False, wrt=wrt)
assert False, 'Remove this when implementing.'
except NotImplementedError:
pass
def _assert_allclose(a, b, tol=1e-5):
if isinstance(a, (tuple, list)) and isinstance(b, (tuple, list)):
for ia, ib in zip(a, b):
_assert_allclose(ia, ib, tol)
else:
try:
a = np.nan_to_num(a)
b = np.nan_to_num(b)
assert np.allclose(a, b, tol), ('Expected: %s\nGot: %s' % (b, a))
except TypeError:
raise TypeError('Could not compare values %s and %s' % (a, b))
def assert_result_matches_reference(
tangent_func,
reference_func,
backup_reference_func,
tolerance=1e-7):
"""Test Tangent functionality against reference implementation.
Args:
tangent_func: Returns the Tangent derivative.
reference_func: Returns the derivative calculated by the reference
implementation.
backup_reference_func: Returns the derivative calculated by a catch-all
implementation, should the reference be unavailable.
tolerance: Absolute tolerance override for FP comparisons.
"""
tangent_value = tangent_func()
try:
reference_value = reference_func()
except (ImportError, TypeError) as e:
if __debug__:
print('WARNING: Reference function call failed. The test will revert to '
'the backup reference.\nReason for failure: %s' % e)
# TODO: Try to narrow the exception handler.
reference_value = backup_reference_func()
_assert_allclose(tangent_value, reference_value, tolerance)
def numeric_grad(func, eps=1e-6):
"""Generate a finite-differences gradient of function `f`.
def f(x, *args):
...
return scalar
g = numeric_grad(f, eps=1e-4)
finite_difference_grad_of_x = g(x, *args)
Adapted from github.com/hips/autograd
"""
def g(x, *args):
fd_grad, unflatten_fd = flatten(tangent.init_grad(x))
y = func(deepcopy(x), *args)
seed = np.ones_like(y)
for d in range(fd_grad.size):
x_flat, unflatten_x = flatten(deepcopy(x))
x_flat[d] += eps / 2
a = np.array(func(unflatten_x(x_flat), *args))
x_flat, unflatten_x = flatten(deepcopy(x))
x_flat[d] -= eps / 2
b = np.array(func(unflatten_x(x_flat), *args))
fd_grad[d] = np.dot((a - b) / eps, seed)
return unflatten_fd(fd_grad)
return g
def test_reverse_array(func, motion, optimized, preserve_result, *args):
"""Test gradients of functions with NumPy-compatible signatures."""
def tangent_func():
y = func(*deepcopy(args))
if np.array(y).size > 1:
init_grad = np.ones_like(y)
else:
init_grad = 1
func.__globals__['np'] = np
df = tangent.autodiff(
func,
mode='reverse',
motion=motion,
optimized=optimized,
preserve_result=preserve_result,
verbose=1)
if motion == 'joint':
return df(*deepcopy(args) + (init_grad,))
return df(*deepcopy(args), init_grad=init_grad)
def reference_func():
func.__globals__['np'] = ag_np
if preserve_result:
val, gradval = ag_value_and_grad(func)(*deepcopy(args))
return gradval, val
else:
return ag_grad(func)(*deepcopy(args))
def backup_reference_func():
func.__globals__['np'] = np
df_num = numeric_grad(func)
gradval = df_num(*deepcopy(args))
if preserve_result:
val = func(*deepcopy(args))
return gradval, val
else:
return gradval
assert_result_matches_reference(tangent_func, reference_func,
backup_reference_func)
def test_forward_array(func, wrt, preserve_result, *args):
"""Test derivatives of functions with NumPy-compatible signatures."""
def tangent_func():
func.__globals__['np'] = np
df = tangent.autodiff(
func,
mode='forward',
preserve_result=preserve_result,
wrt=wrt,
optimized=True,
verbose=1)
args_ = args + (1.0,) # seed gradient
return df(*deepcopy(args_))
def reference_func():
func.__globals__['np'] = ag_np
if preserve_result:
# Note: ag_value_and_grad returns (val, grad) but we need (grad, val)
val, gradval = ag_value_and_grad(func)(*deepcopy(args))
return gradval, val
else:
return ag_grad(func)(*deepcopy(args))
def backup_reference_func():
func.__globals__['np'] = np
df_num = numeric_grad(func)
gradval = df_num(*deepcopy(args))
if preserve_result:
val = func(*deepcopy(args))
return gradval, val
else:
return gradval
assert_result_matches_reference(tangent_func, reference_func,
backup_reference_func)
|
tests/test_record.py | fshart/aesara | 111 | 11067966 | from io import StringIO
from aesara import function
from aesara.tensor.type import iscalar
from tests.record import MismatchError, Record, RecordMode
def test_record_good():
# Tests that when we record a sequence of events, then
# repeat it exactly, the Record class:
# 1) Records it correctly
# 2) Does not raise any errors
# Record a sequence of events
output = StringIO()
recorder = Record(file_object=output, replay=False)
num_lines = 10
for i in range(num_lines):
recorder.handle_line(str(i) + "\n")
# Make sure they were recorded correctly
output_value = output.getvalue()
assert output_value == "".join(str(i) + "\n" for i in range(num_lines))
# Make sure that the playback functionality doesn't raise any errors
# when we repeat them
output = StringIO(output_value)
playback_checker = Record(file_object=output, replay=True)
for i in range(num_lines):
playback_checker.handle_line(str(i) + "\n")
def test_record_bad():
# Tests that when we record a sequence of events, then
# do something different on playback, the Record class catches it.
# Record a sequence of events
output = StringIO()
recorder = Record(file_object=output, replay=False)
num_lines = 10
for i in range(num_lines):
recorder.handle_line(str(i) + "\n")
# Make sure that the playback functionality doesn't raise any errors
# when we repeat some of them
output_value = output.getvalue()
output = StringIO(output_value)
playback_checker = Record(file_object=output, replay=True)
for i in range(num_lines // 2):
playback_checker.handle_line(str(i) + "\n")
# Make sure it raises an error when we deviate from the recorded sequence
try:
playback_checker.handle_line("0\n")
except MismatchError:
return
raise AssertionError(
"Failed to detect mismatch between recorded sequence " " and repetition of it."
)
def test_record_mode_good():
# Like test_record_good, but some events are recorded by the
# aesara RecordMode. We don't attempt to check the
# exact string value of the record in this case.
# Record a sequence of events
output = StringIO()
recorder = Record(file_object=output, replay=False)
record_mode = RecordMode(recorder)
i = iscalar()
f = function([i], i, mode=record_mode, name="f")
num_lines = 10
for i in range(num_lines):
recorder.handle_line(str(i) + "\n")
f(i)
# Make sure that the playback functionality doesn't raise any errors
# when we repeat them
output_value = output.getvalue()
output = StringIO(output_value)
playback_checker = Record(file_object=output, replay=True)
playback_mode = RecordMode(playback_checker)
i = iscalar()
f = function([i], i, mode=playback_mode, name="f")
for i in range(num_lines):
playback_checker.handle_line(str(i) + "\n")
f(i)
def test_record_mode_bad():
# Like test_record_bad, but some events are recorded by the
# aesara RecordMode, as is the event that triggers the mismatch
# error.
# Record a sequence of events
output = StringIO()
recorder = Record(file_object=output, replay=False)
record_mode = RecordMode(recorder)
i = iscalar()
f = function([i], i, mode=record_mode, name="f")
num_lines = 10
for i in range(num_lines):
recorder.handle_line(str(i) + "\n")
f(i)
# Make sure that the playback functionality doesn't raise any errors
# when we repeat them
output_value = output.getvalue()
output = StringIO(output_value)
playback_checker = Record(file_object=output, replay=True)
playback_mode = RecordMode(playback_checker)
i = iscalar()
f = function([i], i, mode=playback_mode, name="f")
for i in range(num_lines // 2):
playback_checker.handle_line(str(i) + "\n")
f(i)
# Make sure a wrong event causes a MismatchError
try:
f(0)
except MismatchError:
return
raise AssertionError("Failed to detect a mismatch.")
|
LogKeyModel_predict.py | HankKung/DeepLog | 268 | 11067968 | import torch
import torch.nn as nn
import time
import argparse
# Device configuration
device = torch.device("cpu")
def generate(name):
# If you what to replicate the DeepLog paper results(Actually, I have a better result than DeepLog paper results),
# you should use the 'list' not 'set' to obtain the full dataset, I use 'set' just for test and acceleration.
hdfs = set()
# hdfs = []
with open('data/' + name, 'r') as f:
for ln in f.readlines():
ln = list(map(lambda n: n - 1, map(int, ln.strip().split())))
ln = ln + [-1] * (window_size + 1 - len(ln))
hdfs.add(tuple(ln))
# hdfs.append(tuple(ln))
print('Number of sessions({}): {}'.format(name, len(hdfs)))
return hdfs
class Model(nn.Module):
def __init__(self, input_size, hidden_size, num_layers, num_keys):
super(Model, self).__init__()
self.hidden_size = hidden_size
self.num_layers = num_layers
self.lstm = nn.LSTM(input_size, hidden_size, num_layers, batch_first=True)
self.fc = nn.Linear(hidden_size, num_keys)
def forward(self, x):
h0 = torch.zeros(self.num_layers, x.size(0), self.hidden_size).to(device)
c0 = torch.zeros(self.num_layers, x.size(0), self.hidden_size).to(device)
out, _ = self.lstm(x, (h0, c0))
out = self.fc(out[:, -1, :])
return out
if __name__ == '__main__':
# Hyperparameters
num_classes = 28
input_size = 1
model_path = 'model/Adam_batch_size=2048_epoch=300.pt'
parser = argparse.ArgumentParser()
parser.add_argument('-num_layers', default=2, type=int)
parser.add_argument('-hidden_size', default=64, type=int)
parser.add_argument('-window_size', default=10, type=int)
parser.add_argument('-num_candidates', default=9, type=int)
args = parser.parse_args()
num_layers = args.num_layers
hidden_size = args.hidden_size
window_size = args.window_size
num_candidates = args.num_candidates
model = Model(input_size, hidden_size, num_layers, num_classes).to(device)
model.load_state_dict(torch.load(model_path))
model.eval()
print('model_path: {}'.format(model_path))
test_normal_loader = generate('hdfs_test_normal')
test_abnormal_loader = generate('hdfs_test_abnormal')
TP = 0
FP = 0
# Test the model
start_time = time.time()
with torch.no_grad():
for line in test_normal_loader:
for i in range(len(line) - window_size):
seq = line[i:i + window_size]
label = line[i + window_size]
seq = torch.tensor(seq, dtype=torch.float).view(-1, window_size, input_size).to(device)
label = torch.tensor(label).view(-1).to(device)
output = model(seq)
predicted = torch.argsort(output, 1)[0][-num_candidates:]
if label not in predicted:
FP += 1
break
with torch.no_grad():
for line in test_abnormal_loader:
for i in range(len(line) - window_size):
seq = line[i:i + window_size]
label = line[i + window_size]
seq = torch.tensor(seq, dtype=torch.float).view(-1, window_size, input_size).to(device)
label = torch.tensor(label).view(-1).to(device)
output = model(seq)
predicted = torch.argsort(output, 1)[0][-num_candidates:]
if label not in predicted:
TP += 1
break
elapsed_time = time.time() - start_time
print('elapsed_time: {:.3f}s'.format(elapsed_time))
# Compute precision, recall and F1-measure
FN = len(test_abnormal_loader) - TP
P = 100 * TP / (TP + FP)
R = 100 * TP / (TP + FN)
F1 = 2 * P * R / (P + R)
print('false positive (FP): {}, false negative (FN): {}, Precision: {:.3f}%, Recall: {:.3f}%, F1-measure: {:.3f}%'.format(FP, FN, P, R, F1))
print('Finished Predicting')
|
src/test/pythonFiles/folding/attach_server.py | ChaseKnowlden/vscode-jupyter | 615 | 11067982 | <reponame>ChaseKnowlden/vscode-jupyter
# Python Tools for Visual Studio
# Copyright(c) Microsoft Corporation
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the License); you may not use
# this file except in compliance with the License. You may obtain a copy of the
# License at http://www.apache.org/licenses/LICENSE-2.0
#
# THIS CODE IS PROVIDED ON AN *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS
# OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY
# IMPLIED WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE,
# MERCHANTABLITY OR NON-INFRINGEMENT.
#
# See the Apache Version 2.0 License for specific language governing
# permissions and limitations under the License.
__author__ = "Microsoft Corporation <<EMAIL>>"
__version__ = "3.0.0.0"
__all__ = ['enable_attach', 'wait_for_attach', 'break_into_debugger', 'settrace', 'is_attached', 'AttachAlreadyEnabledError']
import atexit
import getpass
import os
import os.path
import platform
import socket
import struct
import sys
import threading
try:
import thread
except ImportError:
import _thread as thread
try:
import ssl
except ImportError:
ssl = None
import ptvsd.visualstudio_py_debugger as vspd
import ptvsd.visualstudio_py_repl as vspr
from ptvsd.visualstudio_py_util import to_bytes, read_bytes, read_int, read_string, write_bytes, write_int, write_string
# The server (i.e. the Python app) waits on a TCP port provided. Whenever anything connects to that port,
# it immediately sends the octet sequence 'PTVSDBG', followed by version number represented as int64,
# and then waits for the client to respond with the same exact byte sequence. After signatures are thereby
# exchanged and found to match, the client is expected to provide a string secret (in the usual debugger
# string format, None/ACII/Unicode prefix + length + data), which can be an empty string to designate the
# lack of a specified secret.
#
# If the secret does not match the one expected by the server, it responds with 'RJCT', and then closes
# the connection. Otherwise, the server responds with 'ACPT', and awaits a 4-octet command. The following
# commands are recognized:
#
# 'INFO'
# Report information about the process. The server responds with the following information, in order:
# - Process ID (int64)
# - Executable name (string)
# - User name (string)
# - Implementation name (string)
# and then immediately closes connection. Note, all string fields can be empty or null strings.
#
# 'ATCH'
# Attach debugger to the process. If successful, the server responds with 'ACPT', followed by process ID
# (int64), and then the Python language version that the server is running represented by three int64s -
# major, minor, micro; From there on the socket is assumed to be using the normal PTVS debugging protocol.
# If attaching was not successful (which can happen if some other debugger is already attached), the server
# responds with 'RJCT' and closes the connection.
#
# 'REPL'
# Attach REPL to the process. If successful, the server responds with 'ACPT', and from there on the socket
# is assumed to be using the normal PTVS REPL protocol. If not successful (which can happen if there is
# no debugger attached), the server responds with 'RJCT' and closes the connection.
PTVS_VER = '2.2'
DEFAULT_PORT = 5678
PTVSDBG_VER = 6 # must be kept in sync with DebuggerProtocolVersion in PythonRemoteProcess.cs
PTVSDBG = to_bytes('PTVSDBG')
ACPT = to_bytes('ACPT')
RJCT = to_bytes('RJCT')
INFO = to_bytes('INFO')
ATCH = to_bytes('ATCH')
REPL = to_bytes('REPL')
PY_ROOT = os.path.normcase(__file__)
while os.path.basename(PY_ROOT) != 'pythonFiles':
PY_ROOT = os.path.dirname(PY_ROOT)
_attach_enabled = False
_attached = threading.Event()
class AttachAlreadyEnabledError(Exception):
"""`ptvsd.enable_attach` has already been called in this process."""
def enable_attach(secret, address = ('0.0.0.0', DEFAULT_PORT), certfile = None, keyfile = None, redirect_output = True):
"""Enables Python Tools for Visual Studio to attach to this process remotely
to debug Python code.
Parameters
----------
secret : str
Used to validate the clients - only those clients providing the valid
secret will be allowed to connect to this server. On client side, the
secret is prepended to the Qualifier string, separated from the
hostname by ``'@'``, e.g.: ``'<EMAIL>:5678'``. If
secret is ``None``, there's no validation, and any client can connect
freely.
address : (str, int), optional
Specifies the interface and port on which the debugging server should
listen for TCP connections. It is in the same format as used for
regular sockets of the `socket.AF_INET` family, i.e. a tuple of
``(hostname, port)``. On client side, the server is identified by the
Qualifier string in the usual ``'hostname:port'`` format, e.g.:
``'myhost.cloudapp.net:5678'``. Default is ``('0.0.0.0', 5678)``.
certfile : str, optional
Used to enable SSL. If not specified, or if set to ``None``, the
connection between this program and the debugger will be unsecure,
and can be intercepted on the wire. If specified, the meaning of this
parameter is the same as for `ssl.wrap_socket`.
keyfile : str, optional
Used together with `certfile` when SSL is enabled. Its meaning is the
same as for ``ssl.wrap_socket``.
redirect_output : bool, optional
Specifies whether any output (on both `stdout` and `stderr`) produced
by this program should be sent to the debugger. Default is ``True``.
Notes
-----
This function returns immediately after setting up the debugging server,
and does not block program execution. If you need to block until debugger
is attached, call `ptvsd.wait_for_attach`. The debugger can be detached
and re-attached multiple times after `enable_attach` is called.
This function can only be called once during the lifetime of the process.
On a second call, `AttachAlreadyEnabledError` is raised. In circumstances
where the caller does not control how many times the function will be
called (e.g. when a script with a single call is run more than once by
a hosting app or framework), the call should be wrapped in ``try..except``.
Only the thread on which this function is called, and any threads that are
created after it returns, will be visible in the debugger once it is
attached. Any threads that are already running before this function is
called will not be visible.
"""
if not ssl and (certfile or keyfile):
raise ValueError('could not import the ssl module - SSL is not supported on this version of Python')
if sys.platform == 'cli':
# Check that IronPython was launched with -X:Frames and -X:Tracing, since we can't register our trace
# func on the thread that calls enable_attach otherwise
import clr
x_tracing = clr.GetCurrentRuntime().GetLanguageByExtension('py').Options.Tracing
x_frames = clr.GetCurrentRuntime().GetLanguageByExtension('py').Options.Frames
if not x_tracing or not x_frames:
raise RuntimeError('IronPython must be started with -X:Tracing and -X:Frames options to support PTVS remote debugging.')
global _attach_enabled
if _attach_enabled:
raise AttachAlreadyEnabledError('ptvsd.enable_attach() has already been called in this process.')
_attach_enabled = True
atexit.register(vspd.detach_process_and_notify_debugger)
server = socket.socket(proto=socket.IPPROTO_TCP)
server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
server.bind(address)
server.listen(1)
def server_thread_func():
while True:
client = None
raw_client = None
try:
client, addr = server.accept()
if certfile:
client = ssl.wrap_socket(client, server_side = True, ssl_version = ssl.PROTOCOL_TLSv1, certfile = certfile, keyfile = keyfile)
write_bytes(client, PTVSDBG)
write_int(client, PTVSDBG_VER)
response = read_bytes(client, 7)
if response != PTVSDBG:
continue
dbg_ver = read_int(client)
if dbg_ver != PTVSDBG_VER:
continue
client_secret = read_string(client)
if secret is None or secret == client_secret:
write_bytes(client, ACPT)
else:
write_bytes(client, RJCT)
continue
response = read_bytes(client, 4)
if response == INFO:
try:
pid = os.getpid()
except AttributeError:
pid = 0
write_int(client, pid)
exe = sys.executable or ''
write_string(client, exe)
try:
username = getpass.getuser()
except AttributeError:
username = ''
write_string(client, username)
try:
impl = platform.python_implementation()
except AttributeError:
try:
impl = sys.implementation.name
except AttributeError:
impl = 'Python'
major, minor, micro, release_level, serial = sys.version_info
os_and_arch = platform.system()
if os_and_arch == "":
os_and_arch = sys.platform
try:
if sys.maxsize > 2**32:
os_and_arch += ' 64-bit'
else:
os_and_arch += ' 32-bit'
except AttributeError:
pass
version = '%s %s.%s.%s (%s)' % (impl, major, minor, micro, os_and_arch)
write_string(client, version)
# Don't just drop the connection - let the debugger close it after it finishes reading.
client.recv(1)
elif response == ATCH:
debug_options = vspd.parse_debug_options(read_string(client))
debug_options.setdefault('rules', []).append({
'path': PY_ROOT,
'include': False,
})
if redirect_output:
debug_options.add('RedirectOutput')
if vspd.DETACHED:
write_bytes(client, ACPT)
try:
pid = os.getpid()
except AttributeError:
pid = 0
write_int(client, pid)
major, minor, micro, release_level, serial = sys.version_info
write_int(client, major)
write_int(client, minor)
write_int(client, micro)
vspd.attach_process_from_socket(client, debug_options, report = True)
vspd.mark_all_threads_for_break(vspd.STEPPING_ATTACH_BREAK)
_attached.set()
client = None
else:
write_bytes(client, RJCT)
elif response == REPL:
if not vspd.DETACHED:
write_bytes(client, ACPT)
vspd.connect_repl_using_socket(client)
client = None
else:
write_bytes(client, RJCT)
except (socket.error, OSError):
pass
finally:
if client is not None:
client.close()
server_thread = threading.Thread(target = server_thread_func)
server_thread.setDaemon(True)
server_thread.start()
frames = []
f = sys._getframe()
while True:
f = f.f_back
if f is None:
break
frames.append(f)
frames.reverse()
cur_thread = vspd.new_thread()
for f in frames:
cur_thread.push_frame(f)
def replace_trace_func():
for f in frames:
f.f_trace = cur_thread.trace_func
replace_trace_func()
sys.settrace(cur_thread.trace_func)
vspd.intercept_threads(for_attach = True)
# Alias for convenience of users of pydevd
settrace = enable_attach
def wait_for_attach(timeout = None):
"""If a PTVS remote debugger is attached, returns immediately. Otherwise,
blocks until a remote debugger attaches to this process, or until the
optional timeout occurs.
Parameters
----------
timeout : float, optional
The timeout for the operation in seconds (or fractions thereof).
"""
if vspd.DETACHED:
_attached.clear()
_attached.wait(timeout)
def break_into_debugger():
"""If a PTVS remote debugger is attached, pauses execution of all threads,
and breaks into the debugger with current thread as active.
"""
if not vspd.DETACHED:
vspd.SEND_BREAK_COMPLETE = thread.get_ident()
vspd.mark_all_threads_for_break()
def is_attached():
"""Returns ``True`` if debugger is attached, ``False`` otherwise."""
return not vspd.DETACHED
|
test/demo_bindings.py | hujiawei-sjtu/sdf_tools | 159 | 11067991 | <reponame>hujiawei-sjtu/sdf_tools<filename>test/demo_bindings.py
#! /usr/bin/env python
import numpy as np
from time import time
import matplotlib.pyplot as plt
from sdf_tools import utils_2d
def main():
res = 0.01
x_width = 100
y_height = 100
grid_world = np.zeros([y_height, x_width], dtype=np.uint8)
grid_world[0:10, 0:10] = 1
grid_world[90:100, 90:100] = 1
grid_world[20:30, 40:50] = 1
center_x = 0
center_y = 0
sdf_origin = [center_x - x_width / 2, center_y - y_height / 2]
t0 = time()
sdf, sdf_gradient = utils_2d.compute_sdf_and_gradient(grid_world, res, sdf_origin)
dt = time() - t0
print('time: {}s'.format(dt))
plt.figure()
plt.title("SDF")
plt.imshow(np.flipud(sdf))
plt.figure()
plt.title("Gradient")
xx, yy = np.meshgrid(range(x_width), range(y_height))
plt.quiver(xx, yy, sdf_gradient[:, :, 0], sdf_gradient[:, :, 1])
plt.axis("equal")
plt.show()
if __name__ == '__main__':
main()
|
src/products/admin.py | pv45412/Try-Django | 1,110 | 11067993 | <reponame>pv45412/Try-Django
from django.contrib import admin
from .models import Product
admin.site.register(Product) |
braintree/address_gateway.py | futureironman/braintree_python | 182 | 11068009 | import re
import braintree
from braintree.address import Address
from braintree.error_result import ErrorResult
from braintree.exceptions.not_found_error import NotFoundError
from braintree.resource import Resource
from braintree.successful_result import SuccessfulResult
class AddressGateway(object):
def __init__(self, gateway):
self.gateway = gateway
self.config = gateway.config
def create(self, params=None):
if params is None:
params = {}
Resource.verify_keys(params, Address.create_signature())
if "customer_id" not in params:
raise KeyError("customer_id must be provided")
if not re.search(r"\A[0-9A-Za-z_-]+\Z", params["customer_id"]):
raise KeyError("customer_id contains invalid characters")
response = self.config.http().post(self.config.base_merchant_path() + "/customers/" + params.pop("customer_id") + "/addresses", {"address": params})
if "address" in response:
return SuccessfulResult({"address": Address(self.gateway, response["address"])})
elif "api_error_response" in response:
return ErrorResult(self.gateway, response["api_error_response"])
def delete(self, customer_id, address_id):
self.config.http().delete(self.config.base_merchant_path() + "/customers/" + customer_id + "/addresses/" + address_id)
return SuccessfulResult()
def find(self, customer_id, address_id):
try:
if customer_id is None or customer_id.strip() == "" or address_id is None or address_id.strip() == "":
raise NotFoundError()
response = self.config.http().get(self.config.base_merchant_path() + "/customers/" + customer_id + "/addresses/" + address_id)
return Address(self.gateway, response["address"])
except NotFoundError:
raise NotFoundError("address for customer " + repr(customer_id) + " with id " + repr(address_id) + " not found")
def update(self, customer_id, address_id, params=None):
if params is None:
params = {}
Resource.verify_keys(params, Address.update_signature())
response = self.config.http().put(
self.config.base_merchant_path() + "/customers/" + customer_id + "/addresses/" + address_id,
{"address": params}
)
if "address" in response:
return SuccessfulResult({"address": Address(self.gateway, response["address"])})
elif "api_error_response" in response:
return ErrorResult(self.gateway, response["api_error_response"])
|
onnxruntime/test/testdata/transform/fusion/bias_softmax_gen.py | jamill/onnxruntime | 669 | 11068017 | import onnx
from onnx import OperatorSetIdProto, TensorProto, helper
add = helper.make_node("Add", ["input", "bias"], ["add_out"], "add")
reverseadd = helper.make_node("Add", ["bias", "input"], ["add_out"], "add")
softmax1 = helper.make_node("Softmax", ["add_out"], ["output"], "softmax", axis=1)
softmax3 = helper.make_node("Softmax", ["add_out"], ["output"], "softmax", axis=3)
softmax6 = helper.make_node("Softmax", ["add_out"], ["output"], "softmax", axis=6)
softmax_no_axis = helper.make_node("Softmax", ["add_out"], ["output"], "softmax")
onnxdomain = OperatorSetIdProto()
onnxdomain.version = 13
# The empty string ("") or absence of this field implies the operator set that is defined as part of the ONNX specification.
onnxdomain.domain = ""
msdomain = OperatorSetIdProto()
msdomain.version = 1
msdomain.domain = "com.microsoft"
opsets = [onnxdomain, msdomain]
onnx.save(
helper.make_model(
helper.make_graph(
[add, softmax_no_axis],
"Add_Softmax_Fusion",
[
helper.make_tensor_value_info("input", TensorProto.FLOAT, ["d_1", "d_2"]),
helper.make_tensor_value_info("bias", TensorProto.FLOAT, ["d_1", "d_2"]),
],
[
helper.make_tensor_value_info("output", TensorProto.FLOAT, ["d_1", "d_2"]),
],
[],
),
opset_imports=opsets,
),
r"bias_softmax_fusion_simple_no_axis_opset13.onnx",
)
onnx.save(
helper.make_model(
helper.make_graph(
[add, softmax1],
"Add_Softmax_Fusion",
[
helper.make_tensor_value_info("input", TensorProto.BFLOAT16, ["d_1", "d_2"]),
helper.make_tensor_value_info("bias", TensorProto.BFLOAT16, ["d_1", "d_2"]),
],
[
helper.make_tensor_value_info("output", TensorProto.BFLOAT16, ["d_1", "d_2"]),
],
[],
),
opset_imports=opsets,
),
r"bias_softmax_fusion_bfloat16.onnx",
)
onnx.save(
helper.make_model(
helper.make_graph(
[add, softmax1],
"Add_Softmax_Fusion",
[
helper.make_tensor_value_info("input", TensorProto.FLOAT, ["d_1", "d_2"]),
helper.make_tensor_value_info("bias", TensorProto.FLOAT, ["d_1", "d_2"]),
],
[
helper.make_tensor_value_info("output", TensorProto.FLOAT, ["d_1", "d_2"]),
],
[],
)
),
r"bias_softmax_fusion_simple.onnx",
)
onnx.save(
helper.make_model(
helper.make_graph(
[add, softmax6],
"Add_Softmax_Fusion",
[
helper.make_tensor_value_info(
"input",
TensorProto.FLOAT,
["d_0", "d_1", "d_2", "d_3", "d_4", "d_5", "d_6", "d_7", "d_8"],
),
helper.make_tensor_value_info(
"bias",
TensorProto.FLOAT,
["d_0", "d_1", "d_2", 1, 1, 1, "d_6", "d_7", "d_8"],
),
],
[
helper.make_tensor_value_info(
"output",
TensorProto.FLOAT,
["d_0", "d_1", "d_2", "d_3", "d_4", "d_5", "d_6", "d_7", "d_8"],
),
],
[],
)
),
r"bias_softmax_fusion_middleones.onnx",
)
onnx.save(
helper.make_model(
helper.make_graph(
[reverseadd, softmax6],
"Add_Softmax_Fusion",
[
helper.make_tensor_value_info(
"input",
TensorProto.FLOAT,
["d_0", "d_1", "d_2", "d_3", "d_4", "d_5", "d_6", "d_7", "d_8"],
),
helper.make_tensor_value_info(
"bias",
TensorProto.FLOAT,
["d_0", "d_1", "d_2", 1, 1, 1, "d_6", "d_7", "d_8"],
),
],
[
helper.make_tensor_value_info(
"output",
TensorProto.FLOAT,
["d_0", "d_1", "d_2", "d_3", "d_4", "d_5", "d_6", "d_7", "d_8"],
),
],
[],
)
),
r"bias_softmax_fusion_middleones_reversed.onnx",
)
# should NOT fuse
onnx.save(
helper.make_model(
helper.make_graph(
[add, softmax3],
"Add_Softmax_Fusion",
[
helper.make_tensor_value_info(
"input",
TensorProto.FLOAT,
["d_0", "d_1", "d_2", "d_3", "d_4", "d_5", "d_6", "d_7", "d_8"],
),
helper.make_tensor_value_info(
"bias",
TensorProto.FLOAT,
["d_0", "d_1", "d_2", 1, 1, 1, "d_6", "d_7", "d_8"],
),
],
[
helper.make_tensor_value_info(
"output",
TensorProto.FLOAT,
["d_0", "d_1", "d_2", "d_3", "d_4", "d_5", "d_6", "d_7", "d_8"],
),
],
[],
)
),
r"bias_softmax_fusion_middleones_badaxis.onnx",
)
onnx.save(
helper.make_model(
helper.make_graph(
[add, softmax6],
"Add_Softmax_Fusion",
[
helper.make_tensor_value_info(
"input",
TensorProto.FLOAT,
["d_0", "d_1", "d_2", "d_3", "d_4", "d_5", "d_6", "d_7", "d_8"],
),
helper.make_tensor_value_info("bias", TensorProto.FLOAT, [1, 1, 1, 1, 1, 1, "d_6", "d_7", "d_8"]),
],
[
helper.make_tensor_value_info(
"output",
TensorProto.FLOAT,
["d_0", "d_1", "d_2", "d_3", "d_4", "d_5", "d_6", "d_7", "d_8"],
),
],
[],
)
),
r"bias_softmax_fusion_allleadingones.onnx",
)
onnx.save(
helper.make_model(
helper.make_graph(
[add, softmax6],
"Add_Softmax_Fusion",
[
helper.make_tensor_value_info(
"input",
TensorProto.FLOAT,
["d_0", "d_1", "d_2", "d_3", "d_4", "d_5", "d_6", "d_7", "d_8"],
),
helper.make_tensor_value_info("bias", TensorProto.FLOAT, [1, 1, "d_6", "d_7", "d_8"]),
],
[
helper.make_tensor_value_info(
"output",
TensorProto.FLOAT,
["d_0", "d_1", "d_2", "d_3", "d_4", "d_5", "d_6", "d_7", "d_8"],
),
],
[],
)
),
r"bias_softmax_fusion_someleadingones.onnx",
)
onnx.save(
helper.make_model(
helper.make_graph(
[add, softmax6],
"Add_Softmax_Fusion",
[
helper.make_tensor_value_info(
"input",
TensorProto.FLOAT,
["d_0", "d_1", "d_2", "d_3", "d_4", "d_5", "d_6", "d_7", "d_8"],
),
helper.make_tensor_value_info("bias", TensorProto.FLOAT, ["d_6", "d_7", "d_8"]),
],
[
helper.make_tensor_value_info(
"output",
TensorProto.FLOAT,
["d_0", "d_1", "d_2", "d_3", "d_4", "d_5", "d_6", "d_7", "d_8"],
),
],
[],
)
),
r"bias_softmax_fusion_noleadingones.onnx",
)
|
main.py | rossning92/rpi-robot | 106 | 11068029 | <reponame>rossning92/rpi-robot<gh_stars>100-1000
from flask import Flask, request, jsonify
from flask_socketio import SocketIO, join_room, emit, send
from test_twowheel import test_controller
from twowheel import TwoWheelController
import signal
import sys
app = Flask(__name__, static_url_path="")
socketio = SocketIO(app)
@app.route("/")
def index():
return app.send_static_file("index.html")
@socketio.on("set_axis")
def set_axis(data):
print(data)
controller.set_axis(x=data["x"], y=data["y"])
if __name__ == "__main__":
controller = TwoWheelController()
test_controller(controller)
socketio.run(app, debug=True, host="0.0.0.0")
|
tests/tello_qt/wifi.py | aaaaronlin/TelloPy | 591 | 11068045 | <reponame>aaaaronlin/TelloPy<gh_stars>100-1000
#!/usr/bin/env python
import subprocess
import time
def get_device_name():
proc = subprocess.Popen(['networksetup','-listallhardwareports'], stdout=subprocess.PIPE)
wifi = False
device_name = None
while True:
line = proc.stdout.readline()
if line == '':
break
words = line.rstrip().split()
if (2 < len(words) and
words[0] == 'Hardware' and words[1] == 'Port:' and words[2] == 'Wi-Fi'):
wifi = True
elif wifi and words[0] == 'Device:':
device_name = words[1]
else:
wifi = False
if device_name == None:
raise Exception('Wi-Fi device not found')
return device_name
def get_status(device_name = get_device_name()):
proc = subprocess.Popen(['ifconfig', device_name], stdout=subprocess.PIPE)
wifi = False
status = None
while True:
line = proc.stdout.readline()
if line == '':
break
words = line.rstrip().split()
if words[0] == 'status:':
status = words[1]
if status == None:
raise Exception('Unknown Wi-Fi status')
return status
def set_power(power, device_name = get_device_name()):
proc = subprocess.check_output(['networksetup', '-setairportpower', device_name, power])
def get_ssid(device_name = get_device_name()):
proc = subprocess.Popen(['networksetup', '-getairportnetwork', device_name],
stdout=subprocess.PIPE)
ssid = None
while True:
line = proc.stdout.readline()
if line == '':
break
words = line.rstrip().split()
if (4 <= len(words) and
words[0] == 'Current' and words[1] == 'Wi-Fi' and words[2] == 'Network:'):
ssid = words[3]
if ssid == None:
raise Exception('Wi-Fi not connected')
return ssid
def wait():
prev_status = get_status()
while True:
status = get_status()
if prev_status != status:
break
if status != 'active':
set_power('off')
set_power('on')
time.sleep(2)
if __name__ == '__main__':
print('device name = %s' % device_name())
print('status = %s' % status())
while True:
wait()
if status() == 'active':
print('connected to %s' % ssid())
|
venv/lib/python3.8/site-packages/textblob/sentiments.py | avrumnoor/NewsSummarizer | 6,608 | 11068068 | <reponame>avrumnoor/NewsSummarizer
# -*- coding: utf-8 -*-
'''Default sentiment analyzers are English for backwards compatibility, so
you can still do
>>> from textblob.sentiments import PatternAnalyzer
which is equivalent to
>>> from textblob.en.sentiments import PatternAnalyzer
'''
from __future__ import absolute_import
from textblob.base import BaseSentimentAnalyzer
from textblob.en.sentiments import (DISCRETE, CONTINUOUS,
PatternAnalyzer, NaiveBayesAnalyzer)
__all__ = [
'BaseSentimentAnalyzer',
'DISCRETE',
'CONTINUOUS',
'PatternAnalyzer',
'NaiveBayesAnalyzer',
]
|
lib/matplotlib/testing/image_util.py | jbbrokaw/matplotlib | 113 | 11068082 | # This module contains some functionality from the Python Imaging
# Library, that has been ported to use Numpy arrays rather than PIL
# Image objects.
# The Python Imaging Library is
# Copyright (c) 1997-2009 by Secret Labs AB
# Copyright (c) 1995-2009 by <NAME>
# By obtaining, using, and/or copying this software and/or its
# associated documentation, you agree that you have read, understood,
# and will comply with the following terms and conditions:
# Permission to use, copy, modify, and distribute this software and its
# associated documentation for any purpose and without fee is hereby
# granted, provided that the above copyright notice appears in all
# copies, and that both that copyright notice and this permission notice
# appear in supporting documentation, and that the name of Secret Labs
# AB or the author not be used in advertising or publicity pertaining to
# distribution of the software without specific, written prior
# permission.
# SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD TO
# THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
# FITNESS. IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from six.moves import xrange
import numpy as np
from matplotlib.cbook import deprecated, warn_deprecated
warn_deprecated('1.4.0', name='matplotlib.testing.image_util',
obj_type='module')
@deprecated('1.4.0')
def autocontrast(image, cutoff=0):
"""
Maximize image contrast, based on histogram. This completely
ignores the alpha channel.
"""
assert image.dtype == np.uint8
output_image = np.empty((image.shape[0], image.shape[1], 3), np.uint8)
for i in xrange(0, 3):
plane = image[:,:,i]
output_plane = output_image[:,:,i]
h = np.histogram(plane, bins=256)[0]
if cutoff:
# cut off pixels from both ends of the histogram
# get number of pixels
n = 0
for ix in xrange(256):
n = n + h[ix]
# remove cutoff% pixels from the low end
cut = n * cutoff / 100
for lo in range(256):
if cut > h[lo]:
cut = cut - h[lo]
h[lo] = 0
else:
h[lo] = h[lo] - cut
cut = 0
if cut <= 0:
break
# remove cutoff% samples from the hi end
cut = n * cutoff / 100
for hi in xrange(255, -1, -1):
if cut > h[hi]:
cut = cut - h[hi]
h[hi] = 0
else:
h[hi] = h[hi] - cut
cut = 0
if cut <= 0:
break
# find lowest/highest samples after preprocessing
for lo in xrange(256):
if h[lo]:
break
for hi in xrange(255, -1, -1):
if h[hi]:
break
if hi <= lo:
output_plane[:,:] = plane
else:
scale = 255.0 / (hi - lo)
offset = -lo * scale
lut = np.arange(256, dtype=np.float)
lut *= scale
lut += offset
lut = lut.clip(0, 255)
lut = lut.astype(np.uint8)
output_plane[:,:] = lut[plane]
return output_image
|
tests/schema/test_interface.py | otakuy/strawberry | 2,062 | 11068085 | <gh_stars>1000+
from dataclasses import dataclass
from typing import List
import pytest
import strawberry
def test_query_interface():
@strawberry.interface
class Cheese:
name: str
@strawberry.type
class Swiss(Cheese):
canton: str
@strawberry.type
class Italian(Cheese):
province: str
@strawberry.type
class Root:
@strawberry.field
def assortment(self) -> List[Cheese]:
return [
Italian(name="Asiago", province="Friuli"),
Swiss(name="Tomme", canton="Vaud"),
]
schema = strawberry.Schema(query=Root, types=[Swiss, Italian])
query = """{
assortment {
name
... on Italian { province }
... on Swiss { canton }
}
}"""
result = schema.execute_sync(query)
assert not result.errors
assert result.data["assortment"] == [
{"name": "Asiago", "province": "Friuli"},
{"canton": "Vaud", "name": "Tomme"},
]
def test_interfaces_can_implement_other_interfaces():
@strawberry.interface
class Error:
message: str
@strawberry.interface
class FieldError(Error):
message: str
field: str
@strawberry.type
class PasswordTooShort(FieldError):
message: str
field: str
fix: str
@strawberry.type
class Query:
@strawberry.field
def always_error(self) -> Error:
return PasswordTooShort(
message="Password Too Short",
field="Password",
fix="Choose more characters",
)
schema = strawberry.Schema(Query, types=[PasswordTooShort])
query = """{
alwaysError {
... on Error {
message
}
... on FieldError {
field
}
... on PasswordTooShort {
fix
}
}
}"""
result = schema.execute_sync(query)
assert not result.errors
assert result.data["alwaysError"] == {
"message": "Password Too Short",
"field": "Password",
"fix": "Choose more characters",
}
def test_interface_duck_typing():
@strawberry.interface
class Entity:
id: int
@strawberry.type
class Anime(Entity):
name: str
@dataclass
class AnimeORM:
id: int
name: str
@strawberry.type
class Query:
@strawberry.field
def anime(self) -> Anime:
return AnimeORM(id=1, name="One Piece") # type: ignore
schema = strawberry.Schema(query=Query)
query = """{
anime { name }
}"""
result = schema.execute_sync(query)
assert not result.errors
assert result.data == {"anime": {"name": "One Piece"}}
@pytest.mark.xfail(reason="We don't support returning dictionaries yet")
def test_interface_duck_typing_returning_dict():
@strawberry.interface
class Entity:
id: int
@strawberry.type
class Anime(Entity):
name: str
@dataclass
class AnimeORM:
id: int
name: str
@strawberry.type
class Query:
@strawberry.field
def anime(self) -> Anime:
return dict(id=1, name="One Piece") # type: ignore
schema = strawberry.Schema(query=Query)
query = """{
anime { name }
}"""
result = schema.execute_sync(query)
assert not result.errors
assert result.data == {"anime": {"name": "One Piece"}}
|
ppci/arch/arm/arch.py | kl4w3i/ppci | 161 | 11068091 | """ ARM architecture definition. """
import io
from ... import ir
from ...binutils.assembler import BaseAssembler
from ..arch import Architecture
from ..arch_info import ArchInfo, TypeInfo
from ..generic_instructions import Label, Alignment, RegisterUseDef
from ..data_instructions import Db, Dd, Dcd2, data_isa
from ..registers import RegisterClass
from ..stack import StackLocation
from .registers import ArmRegister, register_range, LowArmRegister, RegisterSet
from .registers import R0, R1, R2, R3, R4, all_registers
from .registers import R5, R6, R7, R8
from .registers import R9, R10, R11, LR, PC, SP
from .arm_instructions import LdrPseudo, arm_isa
from .thumb_instructions import thumb_isa
from . import thumb_instructions
from . import arm_instructions
class ArmCallingConvention:
pass
class ArmArch(Architecture):
""" Arm machine class. """
name = "arm"
option_names = ("thumb", "jazelle", "neon", "vfpv1", "vfpv2")
def __init__(self, options=None):
super().__init__(options=options)
if self.has_option("thumb"):
self.assembler = ThumbAssembler()
self.isa = thumb_isa + data_isa
# We use r7 as frame pointer (in case of thumb ;)):
self.fp = R7
self.callee_save = (R5, R6)
# Registers usable by register allocator:
register_classes = [
RegisterClass(
"reg",
[ir.i8, ir.i32, ir.ptr, ir.u8, ir.u32, ir.i16, ir.u16],
LowArmRegister,
[R0, R1, R2, R3, R4, R5, R6, R7],
)
]
else:
self.isa = arm_isa + data_isa
self.assembler = ArmAssembler()
self.fp = R11
self.callee_save = (R5, R6, R7, R8, R9, R10)
# Registers usable by register allocator:
register_classes = [
RegisterClass(
"loreg",
[],
LowArmRegister,
[R0, R1, R2, R3, R4, R5, R6, R7],
),
RegisterClass(
"reg",
[ir.i8, ir.i32, ir.u8, ir.u32, ir.i16, ir.u16, ir.ptr],
ArmRegister,
[R0, R1, R2, R3, R4, R5, R6, R7, R8, R9, R10, R11],
),
]
self.assembler.gen_asm_parser(self.isa)
self.gdb_registers = all_registers
self.gdb_pc = PC
self.info = ArchInfo(
type_infos={
ir.i8: TypeInfo(1, 1),
ir.u8: TypeInfo(1, 1),
ir.i16: TypeInfo(2, 2),
ir.u16: TypeInfo(2, 2),
ir.i32: TypeInfo(4, 4),
ir.u32: TypeInfo(4, 4),
ir.f32: TypeInfo(4, 4),
ir.f64: TypeInfo(8, 8),
"int": ir.i32,
"long": ir.i32,
"ptr": ir.u32,
ir.ptr: ir.u32,
},
register_classes=register_classes,
)
def get_runtime(self):
""" Implement compiler runtime functions """
from ...api import asm
if self.has_option("thumb"):
asm_src = ""
else:
asm_src = ARM_ASM_RT
return asm(io.StringIO(asm_src), self)
def move(self, dst, src):
""" Generate a move from src to dst """
if self.has_option("thumb"):
return thumb_instructions.Mov2(dst, src, ismove=True)
else:
return arm_instructions.Mov2(
dst, src, arm_instructions.NoShift(), ismove=True
)
def gen_prologue(self, frame):
"""Returns prologue instruction sequence.
Reserve stack for this calling frame for:
- local variables
- save registers
- parameters to called functions
"""
# Label indication function:
yield Label(frame.name)
# Save the link register and the frame pointer:
if self.has_option("thumb"):
yield thumb_instructions.Push({LR, R7})
else:
yield arm_instructions.Push(RegisterSet({LR, R11}))
# Setup frame pointer:
if self.has_option("thumb"):
yield thumb_instructions.Mov2(R7, SP)
else:
yield arm_instructions.Mov2(R11, SP, arm_instructions.NoShift())
# Reserve stack for this calling frame for:
# 1. local variables
# 2. save registers
# 3. parameters to called functions
if frame.stacksize:
ssize = round_up(frame.stacksize)
if self.has_option("thumb"):
# Reserve stack space:
# subSp cannot handle large numbers:
while ssize > 0:
inc = min(124, ssize)
yield thumb_instructions.SubSp(inc)
ssize -= inc
else:
yield arm_instructions.SubImm(SP, SP, ssize)
# Callee save registers:
callee_save = self.get_callee_saved(frame)
if callee_save:
if self.has_option("thumb"):
yield thumb_instructions.Push(callee_save)
else:
yield arm_instructions.Push(RegisterSet(callee_save))
# Allocate space for outgoing calls:
extras = max(frame.out_calls) if frame.out_calls else 0
if extras:
ssize = round_up(extras)
if self.has_option("thumb"):
raise NotImplementedError()
else:
yield arm_instructions.SubImm(SP, SP, ssize)
def gen_epilogue(self, frame):
"""Return epilogue sequence for a frame.
Adjust frame pointer and add constant pool.
Also free up space on stack for:
- Space for parameters passed to called functions.
- Space for save registers
- Space for local variables
"""
# Free space for outgoing calls:
extras = max(frame.out_calls) if frame.out_calls else 0
if extras:
ssize = round_up(extras)
if self.has_option("thumb"):
raise NotImplementedError()
else:
yield arm_instructions.AddImm(SP, SP, ssize)
# Callee save registers:
callee_save = self.get_callee_saved(frame)
if callee_save:
if self.has_option("thumb"):
yield thumb_instructions.Pop(callee_save)
else:
yield arm_instructions.Pop(RegisterSet(callee_save))
if frame.stacksize > 0:
ssize = round_up(frame.stacksize)
if self.has_option("thumb"):
# subSp cannot handle large numbers:
while ssize > 0:
inc = min(124, ssize)
yield thumb_instructions.AddSp(inc)
ssize -= inc
else:
yield arm_instructions.AddImm(SP, SP, ssize)
if self.has_option("thumb"):
yield thumb_instructions.Pop({PC, R7})
else:
yield arm_instructions.Pop(RegisterSet({PC, R11}))
# Add final literal pool
for instruction in self.litpool(frame):
yield instruction
if not self.has_option("thumb"):
yield Alignment(4) # Align at 4 bytes
def get_callee_saved(self, frame):
saved_registers = set()
for register in self.callee_save:
if register in frame.used_regs:
saved_registers.add(register)
return saved_registers
def gen_arm_memcpy(self, p1, p2, v3, size):
# Called before register allocation
# Major crappy memcpy, can be improved!
for idx in range(size):
yield arm_instructions.Ldrb(v3, p2, idx)
yield arm_instructions.Strb(v3, p1, idx)
# TODO: yield the below from time to time for really big stuff:
# yield arm_instructions.AddImm(p1, 1)
# yield arm_instructions.AddImm(p2, 1)
def gen_call(self, frame, label, args, rv):
arg_types = [a[0] for a in args]
arg_locs = self.determine_arg_locations(arg_types)
arg_regs = []
stack_size = 0
for arg_loc, arg2 in zip(arg_locs, args):
arg = arg2[1]
if isinstance(arg_loc, ArmRegister):
arg_regs.append(arg_loc)
yield self.move(arg_loc, arg)
elif isinstance(arg_loc, StackLocation):
stack_size += arg_loc.size
if isinstance(arg, ArmRegister):
# Store register on stack:
if self.has_option("thumb"):
yield thumb_instructions.Str1(arg, SP, arg_loc.offset)
else:
yield arm_instructions.Str1(arg, SP, arg_loc.offset)
elif isinstance(arg, StackLocation):
if self.has_option("thumb"):
raise NotImplementedError()
else:
# Generate memcpy now:
# print(arg2, arg_loc)
assert arg.size == arg_loc.size
# Now start a copy routine to copy some stack:
p1 = frame.new_reg(ArmRegister)
p2 = frame.new_reg(ArmRegister)
v3 = frame.new_reg(ArmRegister)
# Destination location:
# Remember that the LR and FP are pushed in between
# So hence -8:
yield arm_instructions.AddImm(
p1, SP, arg_loc.offset - 8
)
# Source location:
yield arm_instructions.SubImm(p2, self.fp, -arg.offset)
for instruction in self.gen_arm_memcpy(
p1, p2, v3, arg.size
):
yield instruction
else: # pragma: no cover
raise NotImplementedError(str(arg))
else: # pragma: no cover
raise NotImplementedError("Parameters in memory not impl")
# Record that certain amount of stack is required:
frame.add_out_call(stack_size)
yield RegisterUseDef(uses=arg_regs)
clobbers = [R0, R1, R2, R3, R4]
if self.has_option("thumb"):
if isinstance(label, ArmRegister):
# Ensure thumb mode!
yield thumb_instructions.AddImm(label, label, 1)
yield thumb_instructions.Blx(label, clobbers=clobbers)
else:
yield thumb_instructions.Bl(label, clobbers=clobbers)
else:
if isinstance(label, ArmRegister):
yield arm_instructions.Blx(label, clobbers=clobbers)
else:
yield arm_instructions.Bl(label, clobbers=clobbers)
if rv:
retval_loc = self.determine_rv_location(rv[0])
yield RegisterUseDef(defs=(retval_loc,))
yield self.move(rv[1], retval_loc)
def gen_function_enter(self, args):
arg_types = [a[0] for a in args]
arg_locs = self.determine_arg_locations(arg_types)
arg_regs = set(
arg_loc for arg_loc in arg_locs if isinstance(arg_loc, ArmRegister)
)
yield RegisterUseDef(defs=arg_regs)
for arg_loc, arg2 in zip(arg_locs, args):
arg = arg2[1]
if isinstance(arg_loc, ArmRegister):
yield self.move(arg, arg_loc)
elif isinstance(arg_loc, StackLocation):
pass
else: # pragma: no cover
raise NotImplementedError("Parameters in memory not impl")
def gen_function_exit(self, rv):
live_out = set()
if rv:
retval_loc = self.determine_rv_location(rv[0])
yield self.move(retval_loc, rv[1])
live_out.add(retval_loc)
yield RegisterUseDef(uses=live_out)
def litpool(self, frame):
""" Generate instruction for the current literals """
# Align at 4 bytes
if frame.constants:
yield Alignment(4)
# Add constant literals:
while frame.constants:
label, value = frame.constants.pop(0)
yield Label(label)
if isinstance(value, int):
yield Dd(value)
elif isinstance(value, str):
yield Dcd2(value)
elif isinstance(value, bytes):
for byte in value:
yield Db(byte)
yield Alignment(4) # Align at 4 bytes
else: # pragma: no cover
raise NotImplementedError("Constant of type {}".format(value))
def between_blocks(self, frame):
for instruction in self.litpool(frame):
yield instruction
def determine_arg_locations(self, arg_types):
"""
Given a set of argument types, determine location for argument
ABI:
pass arg1 in R1
pass arg2 in R2
pass arg3 in R3
pass arg4 in R4
return value in R0
"""
# TODO: what ABI to use?
# Perhaps follow the arm ABI spec?
locations = []
regs = [R1, R2, R3, R4]
offset = 8
for arg_ty in arg_types:
if arg_ty.is_blob:
r = StackLocation(offset, arg_ty.size)
offset += arg_ty.size
else:
# Pass non-blob values in registers if possible:
if regs:
r = regs.pop(0)
else:
arg_size = self.info.get_size(arg_ty)
r = StackLocation(offset, arg_size)
offset += arg_size
locations.append(r)
return locations
def determine_rv_location(self, ret_type):
rv = R0
return rv
class ArmAssembler(BaseAssembler):
""" Assembler for the arm instruction set """
def __init__(self):
super().__init__()
# self.parser.assembler = self
self.add_extra_rules()
self.lit_pool = []
self.lit_counter = 0
def add_extra_rules(self):
# Implement register list syntaxis:
reg_nt = "$reg_cls_armregister$"
self.typ2nt[RegisterSet] = "reg_list"
self.add_rule(
"reg_list", ["{", "reg_list_inner", "}"], lambda rhs: rhs[1]
)
self.add_rule("reg_list_inner", ["reg_or_range"], lambda rhs: rhs[0])
# self.add_rule(
# 'reg_list_inner',
# ['reg_or_range', ',', 'reg_list_inner'],
# lambda rhs: RegisterSet(rhs[0] | rhs[2]))
self.add_rule(
"reg_list_inner",
["reg_list_inner", ",", "reg_or_range"],
lambda rhs: RegisterSet(rhs[0] | rhs[2]),
)
self.add_rule(
"reg_or_range", [reg_nt], lambda rhs: RegisterSet([rhs[0]])
)
self.add_rule(
"reg_or_range",
[reg_nt, "-", reg_nt],
lambda rhs: RegisterSet(register_range(rhs[0], rhs[2])),
)
# Ldr pseudo instruction:
# TODO: fix the add_literal other way:
self.add_rule(
"instruction",
["ldr", reg_nt, ",", "=", "ID"],
lambda rhs: LdrPseudo(rhs[1], rhs[4].val, self.add_literal),
)
def flush(self):
assert not self.in_macro
while self.lit_pool:
i = self.lit_pool.pop(0)
self.emit(i)
def add_literal(self, v):
""" For use in the pseudo instruction LDR r0, =SOMESYM """
# Invent some label for the literal and store it.
assert isinstance(v, str)
self.lit_counter += 1
label_name = "_lit_{}".format(self.lit_counter)
self.lit_pool.append(Label(label_name))
self.lit_pool.append(Dcd2(v))
return label_name
class ThumbAssembler(BaseAssembler):
def __init__(self):
super().__init__()
self.parser.assembler = self
self.add_extra_rules()
def add_extra_rules(self):
# Implement register list syntaxis:
reg_nt = "$reg_cls_armregister$"
self.typ2nt[set] = "reg_list"
self.add_rule(
"reg_list", ["{", "reg_list_inner", "}"], lambda rhs: rhs[1]
)
self.add_rule("reg_list_inner", ["reg_or_range"], lambda rhs: rhs[0])
# For a left right parser, or right left parser, this is important:
self.add_rule(
"reg_list_inner",
["reg_list_inner", ",", "reg_or_range"],
lambda rhs: rhs[0] | rhs[2],
)
# self.add_rule(
# 'reg_list_inner',
# ['reg_or_range', ',', 'reg_list_inner'], lambda rhs: rhs[0] | rhs[2])
self.add_rule("reg_or_range", [reg_nt], lambda rhs: set([rhs[0]]))
self.add_rule(
"reg_or_range",
[reg_nt, "-", reg_nt],
lambda rhs: register_range(rhs[0], rhs[2]),
)
def round_up(s):
return s + (4 - s % 4)
ARM_ASM_RT = """
global __sdiv
__sdiv:
; Divide r1 by r2
; R4 is a work register.
; r0 is the quotient
push {r4}
mov r4, r2 ; mov divisor into temporary register.
; Blow up divisor until it is larger than the divident.
cmp r4, r1, lsr 1 ; If r4 < r1, then, shift left once more.
__sdiv_inc:
movls r4, r4, lsl 1
cmp r4, r1, lsr 1
bls __sdiv_inc
mov r0, 0 ; Initialize the result
; Repeatedly substract shifted divisor
__sdiv_dec:
cmp r1, r4 ; Can we substract the current temp value?
subcs r1, r1, r4 ; Substract temp from divisor if carry
adc r0, r0, r0 ; double (shift left) and add carry
mov r4, r4, lsr 1 ; Shift right one
cmp r4, r2 ; Is temp less than divisor?
bhs __sdiv_dec ; If so, repeat.
pop {r4}
mov pc, lr ; Return from function.
"""
|
stanza/server/ud_enhancer.py | asears/stanza | 3,633 | 11068118 |
import stanza
from stanza.protobuf import DependencyEnhancerRequest, Document, Language
from stanza.server.java_protobuf_requests import send_request, add_sentence, JavaProtobufContext
ENHANCER_JAVA = "edu.stanford.nlp.trees.ud.ProcessUniversalEnhancerRequest"
def build_enhancer_request(doc, language, pronouns_pattern):
if bool(language) == bool(pronouns_pattern):
raise ValueError("Should set exactly one of language and pronouns_pattern")
request = DependencyEnhancerRequest()
if pronouns_pattern:
request.setRelativePronouns(pronouns_pattern)
elif language.lower() in ("en", "english"):
request.language = Language.UniversalEnglish
elif language.lower() in ("zh", "zh-hans", "chinese"):
request.language = Language.UniversalChinese
else:
raise ValueError("Sorry, but language " + language + " is not supported yet. Either set a pronouns pattern or file an issue at https://stanfordnlp.github.io/stanza suggesting a mechanism for converting this language")
request_doc = request.document
request_doc.text = doc.text
num_tokens = 0
for sent_idx, sentence in enumerate(doc.sentences):
request_sentence = add_sentence(request_doc.sentence, sentence, num_tokens)
num_tokens = num_tokens + sum(len(token.words) for token in sentence.tokens)
graph = request_sentence.basicDependencies
nodes = []
word_index = 0
for token in sentence.tokens:
for word in token.words:
# TODO: refactor with the bit in java_protobuf_requests
word_index = word_index + 1
node = graph.node.add()
node.sentenceIndex = sent_idx
node.index = word_index
if word.head != 0:
edge = graph.edge.add()
edge.source = word.head
edge.target = word_index
edge.dep = word.deprel
return request
def process_doc(doc, language=None, pronouns_pattern=None):
request = build_enhancer_request(doc, language, pronouns_pattern)
return send_request(request, Document, ENHANCER_JAVA, "$CLASSPATH")
class UniversalEnhancer(JavaProtobufContext):
"""
UniversalEnhancer context window
This is a context window which keeps a process open. Should allow
for multiple requests without launching new java processes each time.
"""
def __init__(self, language=None, pronouns_pattern=None, classpath=None):
super(UniversalEnhancer, self).__init__(classpath, Document, ENHANCER_JAVA)
if bool(language) == bool(pronouns_pattern):
raise ValueError("Should set exactly one of language and pronouns_pattern")
self.language = language
self.pronouns_pattern = pronouns_pattern
def process(self, doc):
request = build_enhancer_request(doc, self.language, self.pronouns_pattern)
return self.process_request(request)
def main():
nlp = stanza.Pipeline('en',
processors='tokenize,pos,lemma,depparse')
with UniversalEnhancer(language="en") as enhancer:
doc = nlp("This is the car that I bought")
result = enhancer.process(doc)
print(result.sentence[0].enhancedDependencies)
if __name__ == '__main__':
main()
|
plugins/gcalc.py | daviddever/skybot | 114 | 11068121 | from __future__ import unicode_literals
from util import hook, http
@hook.command
def calc(inp):
""".calc <term> -- returns Google Calculator result"""
h = http.get_html("http://www.google.com/search", q=inp)
m = h.xpath('//h2[@class="r"]/text()')
if not m:
return "could not calculate " + inp
res = " ".join(m[0].split())
return res
|
imagenet_nv/multiproc.py | sanjeevm0/imagenet-fast | 298 | 11068122 | import argparse
import torch
import sys
import subprocess
from pathlib import Path
argslist = list(sys.argv)[1:]
world_size = torch.cuda.device_count()
if '--world-size' in argslist:
argslist[argslist.index('--world-size')+1] = str(world_size)
else:
argslist.append('--world-size')
argslist.append(str(world_size))
save_dir = Path.cwd()
if '--save-dir' in argslist:
save_dir = argslist[argslist.index('--save-dir')+1]
workers = []
for i in range(world_size):
if '--rank' in argslist:
argslist[argslist.index('--rank')+1] = str(i)
else:
argslist.append('--rank')
argslist.append(str(i))
stdout = None if i == 0 else open(f'{save_dir}/GPU_{i}.log', "w")
p = subprocess.Popen([str(sys.executable)]+argslist, stdout=stdout)
workers.append(p)
for p in workers: p.wait()
|
mmaction/core/post_processing/__init__.py | arpanmangal/coinaction | 1,929 | 11068136 | from .bbox_nms import multiclass_nms, singleclass_nms
from .merge_augs import (merge_aug_proposals, merge_aug_bboxes,
merge_aug_scores)
__all__ = [
'multiclass_nms', 'singleclass_nms', 'merge_aug_proposals', 'merge_aug_bboxes',
'merge_aug_scores'
]
|
pyston/test/external/test_sqlalchemy.py | mananpal1997/pyston | 2,441 | 11068188 | <gh_stars>1000+
import os
import shutil
import subprocess
import sys
import tempfile
if __name__ == "__main__":
with tempfile.TemporaryDirectory() as tempdir:
# The log seems somewhat non-deterministic:
print("PYSTONTEST: no-log-check")
def rel(path):
return os.path.join(os.path.dirname(__file__), path)
# sqlalchemy currently has a bug where the test suite will fail
# if a parent directory is named "test". So copy it into the temp dir
# https://github.com/sqlalchemy/sqlalchemy/issues/7045
sqlalchemy_dir = os.path.join(tempdir, "sqlalchemy")
shutil.copytree(rel("sqlalchemy"), sqlalchemy_dir)
env_dir = os.path.abspath(os.path.join(tempdir, "env"))
subprocess.check_call([rel("../../../build/bootstrap_env/bin/virtualenv"), "-p", sys.executable, env_dir])
subprocess.check_call([os.path.join(env_dir, "bin/pip"), "install", "pytest", "tox"])
# r = subprocess.call([os.path.join(env_dir, "bin/pytest"), "--db", "sqlite"], cwd=rel("sqlalchemy"))
env = dict(os.environ)
env["TOX_WORKERS"] = "-n0"
r = subprocess.call([os.path.join(env_dir, "bin/tox"), "--parallel", "0", "-e", "py38-sqlite"], cwd=sqlalchemy_dir, env=env)
assert r in (0, 1), r
|
tests/unit/lib/test_aws.py | senstb/aws-elastic-beanstalk-cli | 110 | 11068227 | # Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import os
import sys
import unittest
import botocore
import botocore.exceptions
import botocore.parsers
import mock
import pytest
from mock import patch, MagicMock
from ebcli import __version__ as current_ebcli_version
from ebcli.lib import aws
class TestAws(unittest.TestCase):
def setUp(self):
self.response_data = {
'ResponseMetadata': {
'HTTPStatusCode': 500
},
'Error': {
'Message': '500 Internal Server Error'
}
}
def test_user_agent(self):
aws.set_region('us-east-1')
client = aws._get_client('elasticbeanstalk')
user_agent = client._client_config.user_agent
self.assertTrue(
user_agent.startswith(
'eb-cli/{current_ebcli_version}'.format(
current_ebcli_version=current_ebcli_version
)
)
)
def test_handle_response_code__500x_code__max_attempts_reached(self):
aggregated_response_message = [r"""Received 5XX error during attempt #11
500 Internal Server Error
"""]
with self.assertRaises(aws.MaxRetriesError) as context_manager:
aws._handle_response_code(self.response_data, 11, aggregated_response_message)
self.assertEqual(context_manager.exception.message,
"Max retries exceeded for service error (5XX)\n" + ('\n').join(aggregated_response_message))
@pytest.mark.skipif(sys.version_info < (3,4),
reason="requires python3.4 or higher")
@patch('ebcli.lib.aws.LOG')
def test_handle_response_code__500x_code__max_attempts_not_reached(self, LOG):
aggregated_response_message = []
aws._handle_response_code(self.response_data, 10, aggregated_response_message)
calls = [
mock.call('Response: {\'Error\': {\'Message\': \'500 Internal Server Error\'}, \'ResponseMetadata\': {\'HTTPStatusCode\': 500}}'),
mock.call('API call finished, status = 500'),
mock.call('Received 5XX error')
]
LOG.debug.assert_has_calls(calls)
@pytest.mark.skipif(sys.version_info > (2,7,11),
reason="requires python2.7.11 or lower")
@patch('ebcli.lib.aws.LOG')
def test_handle_response_code__500x_code__max_attempts_not_reached(self, LOG):
aggregated_response_message = []
aws._handle_response_code(self.response_data, 10, aggregated_response_message)
calls = [
mock.call("Response: {'Error': {'Message': '500 Internal Server Error'}, 'ResponseMetadata': {'HTTPStatusCode': 500}}"),
mock.call('API call finished, status = 500'),
mock.call('Received 5XX error')
]
LOG.debug.assert_has_calls(calls)
@mock.patch('ebcli.lib.aws._get_delay')
@mock.patch('ebcli.lib.aws._set_operation')
@mock.patch('ebcli.lib.aws._sleep')
def test_make_api_call__failure__status_code_5xx(
self,
_sleep_mock,
_set_operation_mock,
_get_delay_mock
):
self.maxDiff = None
operation = MagicMock(
side_effect=botocore.exceptions.ClientError(
self.response_data,
'some_operation'
)
)
_set_operation_mock.return_value = operation
_get_delay_mock.side_effect = None
_sleep_mock.side_effect = None
with self.assertRaises(aws.MaxRetriesError) as cm:
aws.make_api_call('some_service', 'some_operation')
exception_message = r"""Max retries exceeded for service error (5XX)
Received 5XX error during attempt #1
500 Internal Server Error
Received 5XX error during attempt #2
500 Internal Server Error
Received 5XX error during attempt #3
500 Internal Server Error
Received 5XX error during attempt #4
500 Internal Server Error
Received 5XX error during attempt #5
500 Internal Server Error
Received 5XX error during attempt #6
500 Internal Server Error
Received 5XX error during attempt #7
500 Internal Server Error
Received 5XX error during attempt #8
500 Internal Server Error
Received 5XX error during attempt #9
500 Internal Server Error
Received 5XX error during attempt #10
500 Internal Server Error
Received 5XX error during attempt #11
500 Internal Server Error
"""
self.assertEqual(
exception_message,
str(cm.exception)
)
def test_handle_response_code__TooManyConfigurationTemplatesException_received(self):
self.response_data = {
'ResponseMetadata': {
'date': 'Tue, 20 Jun 2017 06:34:57 GMT',
'RetryAttempts': 0,
'HTTPStatusCode': 400,
'RequestId':
'93836311-5582-11e7-8c17-c11b3f8f545e'
},
'Error': {
'Code': 'TooManyConfigurationTemplatesException',
'Type': 'Sender'
}
}
exception_message = ' '.join([
'Your request cannot be completed. You have reached the maximum',
'number of saved configuration templates. Learn more about service',
'limits: http://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html'
])
with self.assertRaises(aws.TooManyConfigurationTemplatesException) as context_manager:
aws._handle_response_code(self.response_data, 0, [])
self.assertEqual(context_manager.exception.message, exception_message)
def test_handle_response_code__botocore_client_exception_AccessDenied(self):
self.response_data = {
'Error': {
'Type': 'Sender',
'Code': 'AccessDenied',
'Message': 'User: arn:aws:iam::123123123123:user/permissionless_user is not authorized to perform: cloudformation:GetTemplate on resource: arn:aws:cloudformation:us-west-2:123123123123:stack/aws-yolo-stack/*'
},
'ResponseMetadata': {
'RequestId': '123123123-ddfg-sdff-qwee-123123123dsfsdf',
'HTTPStatusCode': 403,
'HTTPHeaders': {
'x-amzn-requestid': '123123123-ddfg-sdff-qwee-123123123dsfsdf',
'content-type': 'text/xml',
'content-length': '439',
'date': 'Wed, 08 Nov 2017 04:16:52 GMT'
},
'RetryAttempts': 0
}
}
exception_message = (
'Operation Denied. User: arn:aws:iam::123123123123:user/permissionless_user '
'is not authorized to perform: cloudformation:GetTemplate on resource: '
'arn:aws:cloudformation:us-west-2:123123123123:stack/aws-yolo-stack/*'
)
with self.assertRaises(aws.NotAuthorizedError) as context_manager:
aws._handle_response_code(self.response_data, 0, [])
self.assertEqual(context_manager.exception.message, exception_message)
@patch('ebcli.lib.aws._set_operation')
@patch('ebcli.lib.aws._get_delay')
def test_handle_botocore_response_parser_error(self, get_delay_mock, set_operation_mock):
get_delay_mock.return_value = 0
operation_mock = MagicMock(
side_effect=[
botocore.parsers.ResponseParserError(
os.linesep.join(
[
"Unable to parse response (no element found: line 1, column 0), invalid XML received:",
"b''"
]
)
),
botocore.parsers.ResponseParserError(
os.linesep.join(
[
"Unable to parse response (no element found: line 1, column 0), invalid XML received:",
"b''"
]
)
),
{
'Events': [],
'ResponseMetadata': {
'RequestId': 'd0ac21eb-c138-42cb-a679-eea1a6e56fe0',
'HTTPStatusCode': 200,
'date': 'Wed, 21 Feb 2018 09:09:16 GMT',
'RetryAttempts': 0
}
}
]
)
set_operation_mock.return_value = operation_mock
aws.make_api_call('some_aws_service', 'some_operation')
@patch('ebcli.lib.aws._set_operation')
@patch('ebcli.lib.aws._get_delay')
def test_handle_botocore_response_parser_error__max_attempts_reached(self, get_delay_mock, set_operation_mock):
self.maxDiff = None
get_delay_mock.return_value = 0
botocore_parse_errors = []
for i in range(1, 12):
botocore_parse_errors.append(
botocore.parsers.ResponseParserError(
os.linesep.join(
[
"Unable to parse response (no element found: line 1, column 0), invalid XML received:",
"b''"
]
)
)
)
operation_mock = MagicMock(side_effect=botocore_parse_errors)
set_operation_mock.return_value = operation_mock
with self.assertRaises(aws.MaxRetriesError) as context_manager:
aws.make_api_call('some_aws_service', 'some_operation')
self.assertEqual(
"""Max retries exceeded for ResponseParserErrorsUnable to parse response (no element found: line 1, column 0), invalid XML received:
b''
Unable to parse response (no element found: line 1, column 0), invalid XML received:
b''
Unable to parse response (no element found: line 1, column 0), invalid XML received:
b''
Unable to parse response (no element found: line 1, column 0), invalid XML received:
b''
Unable to parse response (no element found: line 1, column 0), invalid XML received:
b''
Unable to parse response (no element found: line 1, column 0), invalid XML received:
b''
Unable to parse response (no element found: line 1, column 0), invalid XML received:
b''
Unable to parse response (no element found: line 1, column 0), invalid XML received:
b''
Unable to parse response (no element found: line 1, column 0), invalid XML received:
b''
Unable to parse response (no element found: line 1, column 0), invalid XML received:
b''""",
str(context_manager.exception).replace('\r\n', '\n').replace(r'\s$', "")
)
|
tests/unittests/resources/worker_deps_path/common_module/__init__.py | anandagopal6/azure-functions-python-worker | 277 | 11068245 | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
__version__: str = 'worker'
import os
# ./tests/unittests/resources/worker_deps_path/common_module
package_location: str = os.path.dirname(__file__)
|
davarocr/davarocr/davar_common/datasets/davar_multi_dataset.py | icedream2/DAVAR-Lab-OCR | 387 | 11068247 | <gh_stars>100-1000
"""
##################################################################################################
# Copyright Info : Copyright (c) Davar Lab @ Hikvision Research Institute. All rights reserved.
# Filename : davar_multi_dataset.py
# Abstract : Implementation of the multiple dataset loading of davar group.
# Current Version: 1.0.0
# Date : 2021-05-01
##################################################################################################
"""
import bisect
from torch.utils.data import ConcatDataset
from torch.utils.data import Dataset
from mmdet.datasets import DATASETS
@DATASETS.register_module()
class DavarMultiDataset(Dataset):
"""MultiDataset: Support for different sample ratios from different dataset"""
CLASSES = None
def __init__(self,
batch_ratios,
dataset,
test_mode=False):
"""
davar multiple dataset loading
Args:
batch_ratios (int|list|str): use ratio on each dataset in each batch
dataset (dataset): dataset for concatenation
"""
# parameter initialization
self.test_mode = test_mode
if isinstance(batch_ratios, (float, int)):
batch_ratios = [batch_ratios]
elif isinstance(batch_ratios, (tuple, list)):
pass
else:
batch_ratios = list(map(float, batch_ratios.split('|')))
self.batch_ratios = batch_ratios
self.datasets = list()
for _, dataset_ in enumerate(dataset):
print('number of samples:', len(dataset_))
self.datasets.append(dataset_)
# concat all the dataset
self.concated_dataset = ConcatDataset(self.datasets)
if not self.test_mode:
self._set_group_flag()
def __len__(self):
"""
Returns:
int: length of the dataset
"""
return len(self.concated_dataset)
def _set_group_flag(self):
"""
capture the parameter in config
"""
group_samples = list()
for dataset in self.datasets:
group_samples.append(len(dataset))
self.flag = dict()
self.flag['batch_ratios'] = self.batch_ratios
self.flag['group_samples'] = group_samples
def prepare_train_img(self, idx):
"""
prepare for the train image
Args:
idx (int): training sample index
Returns:
Dataset: training sample index corresponding image sample
"""
data = self.concated_dataset.__getitem__(idx)
return data
def prepare_test_img(self, idx):
"""
prepare for the test image
Args:
idx (int): test sample index
Returns:
np.array: test sample index corresponding dataset
"""
data = self.concated_dataset.__getitem__(idx)
return data
def __getitem__(self, idx):
"""
Args:
idx (int): sample index
Returns:
np.array: sample index corresponding image sample
"""
assert idx < len(self), 'index range error'
if self.test_mode:
return self.prepare_test_img(idx)
return self.prepare_train_img(idx)
def get_ann_info(self, idx):
"""
get training label information
Args:
idx (int): sample index
Returns:
text: sample index corresponding label information
"""
dataset_idx = bisect.bisect_right(self.concated_dataset.cumulative_sizes, idx)
if dataset_idx == 0:
sample_idx = idx
else:
sample_idx = idx - self.concated_dataset.cumulative_sizes[dataset_idx - 1]
return self.concated_dataset.datasets[dataset_idx].get_ann_info(sample_idx)
def evaluate(self,
results,
metric='accuracy',
logger=None,
**eval_kwargs):
"""
model evaluation api
Args:
results (list): model prediction results
metric (str): evaluation metric
logger (logging.Logger): Logger used for printing related information during evaluation. Default: None.
Returns:
dict: model evaluation metric
"""
validation_result = self.datasets[0].evaluate(results, metric, logger, **eval_kwargs)
return validation_result
|
src/multiclass/KNNParameter.py | ocatak/malware_api_class | 172 | 11068267 |
class KNNParameter:
def __init__(self, n_neighbors, p, algorithm):
#Embedding
self.n_neighbors = n_neighbors
self.p = p
self.algorithm = algorithm
class KNNParameters:
def __init__(self):
# model
self.parameters = []
# 2019-07-06 09_09_39.367144
#1- 33 self.parameters.append(KNNParameter(3))
#2- 31 self.parameters.append(KNNParameter(5))
#3- 31 self.parameters.append(KNNParameter(9))
#4- 32 self.parameters.append(KNNParameter(15))
# 2019-07-06 13_51_03.456234
#5- 30 self.parameters.append(KNNParameter(21))
self.parameters.append(KNNParameter(3, 3, "auto"))
self.parameters.append(KNNParameter(3, 5, "auto"))
self.parameters.append(KNNParameter(3, 7, "auto"))
self.parameters.append(KNNParameter(3, 2, "ball_tree"))
self.parameters.append(KNNParameter(3, 2, "kd_tree"))
self.parameters.append(KNNParameter(3, 2, "brute"))
self.parameters.append(KNNParameter(5, 2, "ball_tree"))
self.parameters.append(KNNParameter(5, 2, "kd_tree"))
self.parameters.append(KNNParameter(5, 2, "brute"))
self.parameters.append(KNNParameter(7, 2, "brute"))
self.parameters.append(KNNParameter(3, 3, "auto"))
self.parameters.append(KNNParameter(3, 5, "auto"))
self.parameters.append(KNNParameter(3, 7, "auto"))
self.parameters.append(KNNParameter(3, 2, "ball_tree"))
self.parameters.append(KNNParameter(3, 2, "kd_tree"))
self.parameters.append(KNNParameter(3, 2, "brute"))
self.parameters.append(KNNParameter(5, 2, "ball_tree"))
self.parameters.append(KNNParameter(5, 2, "kd_tree"))
self.parameters.append(KNNParameter(5, 2, "brute"))
self.parameters.append(KNNParameter(7, 2, "brute"))
self.index = 0
|
helper.py | C00reNUT/Watermark-Removal-Pytorch | 189 | 11068278 | <gh_stars>100-1000
import numpy as np
from PIL import Image
import matplotlib.pyplot as plt
import torch
from torchvision.utils import make_grid
def pil_to_np_array(pil_image):
ar = np.array(pil_image)
if len(ar.shape) == 3:
ar = ar.transpose(2,0,1)
else:
ar = ar[None, ...]
return ar.astype(np.float32) / 255.
def np_to_torch_array(np_array):
return torch.from_numpy(np_array)[None, :]
def torch_to_np_array(torch_array):
return torch_array.detach().cpu().numpy()[0]
def read_image(path, image_size = -1):
pil_image = Image.open(path)
return pil_image
def crop_image(image, crop_factor = 64):
shape = (image.size[0] - image.size[0] % crop_factor, image.size[1] - image.size[1] % crop_factor)
bbox = [int((image.shape[0] - shape[0])/2), int((image.shape[1] - shape[1])/2), int((image.shape[0] + shape[0])/2), int((image.shape[1] + shape[1])/2)]
return image.crop(bbox)
def get_image_grid(images, nrow = 3):
torch_images = [torch.from_numpy(x) for x in images]
grid = make_grid(torch_images, nrow)
return grid.numpy()
def visualize_sample(*images_np, nrow = 3, size_factor = 10):
c = max(x.shape[0] for x in images_np)
images_np = [x if (x.shape[0] == c) else np.concatenate([x, x, x], axis = 0) for x in images_np]
grid = get_image_grid(images_np, nrow)
plt.figure(figsize = (len(images_np) + size_factor, 12 + size_factor))
plt.axis('off')
plt.imshow(grid.transpose(1, 2, 0))
plt.show()
def max_dimension_resize(image_pil, mask_pil, max_dim):
w, h = image_pil.size
aspect_ratio = w / h
if w > max_dim:
h = int((h / w) * max_dim)
w = max_dim
elif h > max_dim:
w = int((w / h) * max_dim)
h = max_dim
return image_pil.resize((w, h)), mask_pil.resize((w, h))
def preprocess_images(image_path, mask_path, max_dim):
image_pil = read_image(image_path).convert('RGB')
mask_pil = read_image(mask_path).convert('RGB')
image_pil, mask_pil = max_dimension_resize(image_pil, mask_pil, max_dim)
image_np = pil_to_np_array(image_pil)
mask_np = pil_to_np_array(mask_pil)
print('Visualizing mask overlap...')
visualize_sample(image_np, mask_np, image_np * mask_np, nrow = 3, size_factor = 10)
return image_np, mask_np |
src/gimelstudio/utils/drawing.py | iwoithe/GimelStudio | 134 | 11068280 | # ----------------------------------------------------------------------------
# Gimel Studio Copyright 2019-2022 by <NAME> and contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
import wx
def DrawCheckerBoard(dc, rect, checkcolor, box=1):
""" Draws a checkerboard pattern on a wx.DC. Useful for
Alpha channel backgrounds, etc.
NOTE: Seems to only work with the wx.DC
"""
y = rect.y
dc.SetPen(wx.Pen(checkcolor))
dc.SetBrush(wx.Brush(checkcolor))
dc.SetClippingRegion(rect)
while y < rect.height:
x = box * ((y // box) % 2) + 2
while x < rect.width:
dc.DrawRectangle(x, y, box, box)
x += box * 2
y += box
|
src/genie/libs/parser/iosxe/tests/ShowPlatformSoftwareYangManagementProcessMonitor/cli/equal/golden_output_expected.py | balmasea/genieparser | 204 | 11068284 | <filename>src/genie/libs/parser/iosxe/tests/ShowPlatformSoftwareYangManagementProcessMonitor/cli/equal/golden_output_expected.py
expected_output = {
'pid': {
2357: {
'cpu': 0.4,
'elapsed': '21:44:31',
'mem': 2.8,
'command': 'confd',
'rss': 224620,
'state': 'S',
'vsz': 1969908
},
2361: {
'cpu': 0.0,
'elapsed': '21:44:31',
'mem': 0.0,
'command': 'confd-startup.s',
'rss': 4268,
'state': 'S',
'vsz': 6180
},
2383: {
'cpu': 0.0,
'elapsed': '21:44:31',
'mem': 0.0,
'command': 'confd-startup.s',
'rss': 3900,
'state': 'S',
'vsz': 6180
},
32428: {
'cpu': 0.0,
'elapsed': '21:44:33',
'mem': 0.0,
'command': 'confd-startup.s',
'rss': 5652,
'state': 'S',
'vsz': 6180
},
551: {
'cpu': 0.0,
'elapsed': '21:44:33',
'mem': 0.6,
'command': 'dmiauthd',
'rss': 49600,
'state': 'S',
'vsz': 376940
},
1503: {
'cpu': 0.0,
'elapsed': '21:44:32',
'mem': 0.2,
'command': 'ncsshd',
'rss': 17592,
'state': 'S',
'vsz': 301344
},
7890: {
'cpu': 0.0,
'elapsed': '24:11',
'mem': 0.2,
'command': 'ncsshd',
'rss': 16492,
'state': 'S',
'vsz': 301344
},
7934: {
'cpu': 0.0,
'elapsed': '24:11',
'mem': 0.1,
'command': 'ncsshd',
'rss': 8392,
'state': 'S',
'vsz': 301344
},
1013: {
'cpu': 0.0,
'elapsed': '21:44:32',
'mem': 0.1,
'command': 'ncsshd_bp',
'rss': 15240,
'state': 'S',
'vsz': 173748
},
32652: {
'cpu': 0.2,
'elapsed': '21:44:33',
'mem': 2.4,
'command': 'ndbmand',
'rss': 188488,
'state': 'S',
'vsz': 1552136
},
8050: {
'cpu': 0.0,
'elapsed': '1-01:47:38',
'mem': 0.1,
'command': 'nginx',
'rss': 13412,
'state': 'S',
'vsz': 106280
},
8060: {
'cpu': 0.0,
'elapsed': '1-01:47:38',
'mem': 0.0,
'command': 'nginx',
'rss': 6980,
'state': 'S',
'vsz': 106796
},
8061: {
'cpu': 0.0,
'elapsed': '1-01:47:38',
'mem': 0.0,
'command': 'nginx',
'rss': 6444,
'state': 'S',
'vsz': 57556
},
30464: {
'cpu': 0.0,
'elapsed': '1-01:48:15',
'mem': 2.6,
'command': 'pubd',
'rss': 208436,
'state': 'S',
'vsz': 1976156
}
}
}
|
lulujianjie/0001/0001.py | saurabh896/python-1 | 3,976 | 11068300 | <gh_stars>1000+
import uuid
uuids = []
for i in range(200):#0-199
uuids.append(uuid.uuid1())#append把value当作一个整体插入list
i = 0;
for i in range(200):
print uuids[i]
|
alipay/aop/api/domain/AlipayCommerceLogisticsWaybillInstantdeliverySettleModel.py | antopen/alipay-sdk-python-all | 213 | 11068308 | <reponame>antopen/alipay-sdk-python-all
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class AlipayCommerceLogisticsWaybillInstantdeliverySettleModel(object):
def __init__(self):
self._logistics_code = None
self._order_no = None
self._out_settle_request_no = None
self._waybill_no = None
self._waybill_status = None
@property
def logistics_code(self):
return self._logistics_code
@logistics_code.setter
def logistics_code(self, value):
self._logistics_code = value
@property
def order_no(self):
return self._order_no
@order_no.setter
def order_no(self, value):
self._order_no = value
@property
def out_settle_request_no(self):
return self._out_settle_request_no
@out_settle_request_no.setter
def out_settle_request_no(self, value):
self._out_settle_request_no = value
@property
def waybill_no(self):
return self._waybill_no
@waybill_no.setter
def waybill_no(self, value):
self._waybill_no = value
@property
def waybill_status(self):
return self._waybill_status
@waybill_status.setter
def waybill_status(self, value):
self._waybill_status = value
def to_alipay_dict(self):
params = dict()
if self.logistics_code:
if hasattr(self.logistics_code, 'to_alipay_dict'):
params['logistics_code'] = self.logistics_code.to_alipay_dict()
else:
params['logistics_code'] = self.logistics_code
if self.order_no:
if hasattr(self.order_no, 'to_alipay_dict'):
params['order_no'] = self.order_no.to_alipay_dict()
else:
params['order_no'] = self.order_no
if self.out_settle_request_no:
if hasattr(self.out_settle_request_no, 'to_alipay_dict'):
params['out_settle_request_no'] = self.out_settle_request_no.to_alipay_dict()
else:
params['out_settle_request_no'] = self.out_settle_request_no
if self.waybill_no:
if hasattr(self.waybill_no, 'to_alipay_dict'):
params['waybill_no'] = self.waybill_no.to_alipay_dict()
else:
params['waybill_no'] = self.waybill_no
if self.waybill_status:
if hasattr(self.waybill_status, 'to_alipay_dict'):
params['waybill_status'] = self.waybill_status.to_alipay_dict()
else:
params['waybill_status'] = self.waybill_status
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayCommerceLogisticsWaybillInstantdeliverySettleModel()
if 'logistics_code' in d:
o.logistics_code = d['logistics_code']
if 'order_no' in d:
o.order_no = d['order_no']
if 'out_settle_request_no' in d:
o.out_settle_request_no = d['out_settle_request_no']
if 'waybill_no' in d:
o.waybill_no = d['waybill_no']
if 'waybill_status' in d:
o.waybill_status = d['waybill_status']
return o
|
packages/pytea/pylib/torch/utils/data/__init__.py | Sehun0819/pytea | 241 | 11068320 | from .sampler import Sampler
from .dataset import Dataset, Subset, TensorDataset, random_split
from .dataloader import DataLoader
from . import distributed as distributed
|
lib/soup.py | ismisepaul/InSpy | 267 | 11068326 | <filename>lib/soup.py
import BeautifulSoup, json
def soupify(response):
try:
soupd = BeautifulSoup.BeautifulSoup(response)
return soupd
except (AttributeError, TypeError) as e:
pass
except Exception as e:
print "Error: {}".format(e)
def get_employees(soup):
try:
employees = {}
for n, t in zip(soup.findAll('a', {"class": "professional__name"}), soup.findAll("p", {"class" : "professional__headline"})):
name = n.getText().encode('ascii','ignore')
title = t.getText().encode('ascii','ignore')
if name and title:
employees[name] = title
return employees
except (AttributeError, TypeError) as e:
pass
except Exception as e:
print "Error: {}".format(e)
|
tests/conftest.py | simik13/tfs | 106 | 11068340 | <reponame>simik13/tfs
# -*- coding: utf-8 -*-
import os
import re
from urllib.parse import urlparse
import httpretty
import pytest
from tfs import TFSAPI
def request_callback_get(request, uri, headers):
# Map path from url to a file
path = urlparse(uri).path.split('DefaultCollection/')[1]
response_file = os.path.normpath('tests/resources/{}'.format(path))
response_file = os.path.join(response_file, 'response.json')
if os.path.exists(response_file):
code = 200
response = open(response_file, mode='r', encoding="utf-8-sig").read()
else:
code = 404
response = "Cannot find file {}".format(response_file)
return code, headers, response
@pytest.fixture(autouse=True)
def tfs_server_mock():
for method in (httpretty.GET, httpretty.POST, httpretty.PUT, httpretty.PATCH):
httpretty.register_uri(method, re.compile(r"http://.*/DefaultCollection/.*"),
body=request_callback_get,
content_type="application/json")
@pytest.fixture()
def tfsapi():
client = TFSAPI("http://tfs.tfs.ru/tfs", 'DefaultCollection/MyProject', 'username', 'password')
yield client
|
tests/server/middleware/test_ui.py | monosidev/monosi | 156 | 11068348 | import pytest
import server.middleware.ui
def test__build_path():
pass
def test__serve_ui():
pass
def test_init_ui_with_serve_ui_config_true():
pass
def test_init_ui_with_serve_ui_config_false():
pass
|
Src/StdLib/Lib/site-packages/win32comext/axscript/client/pyscript.py | cwensley/ironpython2 | 1,078 | 11068354 | """Python ActiveX Scripting Implementation
This module implements the Python ActiveX Scripting client.
To register the implementation, simply "run" this Python program - ie
either double-click on it, or run "python.exe pyscript.py" from the
command line.
"""
import winerror
import win32com
import win32api
import pythoncom
import sys
import traceback
import re
import win32com.client.dynamic
from win32com.axscript.client import framework, scriptdispatch
from win32com.axscript import axscript
import win32com.server.register
from win32com.axscript.client.framework import \
RaiseAssert, trace, Exception, SCRIPTTEXT_FORCEEXECUTION, \
SCRIPTTEXT_ISEXPRESSION, SCRIPTTEXT_ISPERSISTENT
PyScript_CLSID = "{DF630910-1C1D-11d0-AE36-8C0F5E000000}"
debugging_attr = 0
def debug_attr_print(*args):
if debugging_attr:
trace(*args)
def ExpandTabs(text):
return re.sub('\t',' ', text)
def AddCR(text):
return re.sub('\n','\r\n',text)
class AXScriptCodeBlock(framework.AXScriptCodeBlock):
def GetDisplayName(self):
return "PyScript - " + framework.AXScriptCodeBlock.GetDisplayName(self)
# There is only ever _one_ ax object - it exists in the global namespace
# for all script items.
# It performs a search from all global/visible objects
# down.
# This means that if 2 sub-objects of the same name are used
# then only one is ever reachable using the ax shortcut.
class AXScriptAttribute:
"An attribute in a scripts namespace."
def __init__(self, engine):
self.__dict__['_scriptEngine_'] = engine
def __getattr__(self, attr):
if attr[1]=="_" and attr[:-1]=="_":
raise AttributeError(attr)
rc = self._FindAttribute_(attr)
if rc is None:
raise AttributeError(attr)
return rc
def _Close_(self):
self.__dict__['_scriptEngine_'] = None
def _DoFindAttribute_(self, obj, attr):
try:
return obj.subItems[attr.lower()].attributeObject
except KeyError:
pass
# Check out the sub-items
for item in obj.subItems.itervalues():
try:
return self._DoFindAttribute_(item, attr)
except AttributeError:
pass
raise AttributeError(attr)
def _FindAttribute_(self, attr):
for item in self._scriptEngine_.subItems.itervalues():
try:
return self._DoFindAttribute_(item, attr)
except AttributeError:
pass
# All else fails, see if it is a global
# (mainly b/w compat)
return getattr(self._scriptEngine_.globalNameSpaceModule, attr)
# raise AttributeError(attr)
class NamedScriptAttribute:
"An explicitely named object in an objects namespace"
# Each named object holds a reference to one of these.
# Whenever a sub-item appears in a namespace, it is really one of these
# objects. Has a circular reference back to the item itself, which is
# closed via _Close_()
def __init__(self, scriptItem):
self.__dict__['_scriptItem_'] = scriptItem
def __repr__(self):
return "<NamedItemAttribute" + repr(self._scriptItem_) + ">"
def __getattr__(self, attr):
# If a known subitem, return it.
try:
return self._scriptItem_.subItems[attr.lower()].attributeObject
except KeyError:
# Otherwise see if the dispatch can give it to us
if self._scriptItem_.dispatchContainer:
return getattr(self._scriptItem_.dispatchContainer,attr)
raise AttributeError(attr)
def __setattr__(self, attr, value):
# XXX - todo - if a known item, then should call its default
# dispatch method.
attr=attr.lower()
if self._scriptItem_.dispatchContainer:
try:
return setattr(self._scriptItem_.dispatchContainer,attr, value)
except AttributeError:
pass
raise AttributeError(attr)
def _Close_(self):
self.__dict__['_scriptItem_'] = None
class ScriptItem(framework.ScriptItem):
def __init__(self, parentItem, name, dispatch, flags):
framework.ScriptItem.__init__(self, parentItem, name, dispatch, flags)
self.scriptlets = {}
self.attributeObject = None
def Reset(self):
framework.ScriptItem.Reset(self)
if self.attributeObject:
self.attributeObject._Close_()
self.attributeObject = None
def Close(self):
framework.ScriptItem.Close(self) # calls reset.
self.dispatchContainer = None
self.scriptlets = {}
def Register(self):
framework.ScriptItem.Register(self)
self.attributeObject = NamedScriptAttribute(self)
if self.dispatch:
# Need to avoid the new Python "lazy" dispatch behaviour.
try:
engine = self.GetEngine()
olerepr = clsid = None
typeinfo = self.dispatch.GetTypeInfo()
clsid = typeinfo.GetTypeAttr()[0]
try:
olerepr = engine.mapKnownCOMTypes[clsid]
except KeyError:
pass
except pythoncom.com_error:
typeinfo = None
if olerepr is None:
olerepr = win32com.client.dynamic.MakeOleRepr(self.dispatch, typeinfo, None)
if clsid is not None:
engine.mapKnownCOMTypes[clsid] = olerepr
self.dispatchContainer = win32com.client.dynamic.CDispatch(self.dispatch, olerepr, self.name)
# self.dispatchContainer = win32com.client.dynamic.Dispatch(self.dispatch, userName = self.name)
# self.dispatchContainer = win32com.client.dynamic.DumbDispatch(self.dispatch, userName = self.name)
# def Connect(self):
# framework.ScriptItem.Connect(self)
# def Disconnect(self):
# framework.ScriptItem.Disconnect(self)
class PyScript(framework.COMScript):
# Setup the auto-registration stuff...
_reg_verprogid_ = "Python.AXScript.2"
_reg_progid_ = "Python"
# _reg_policy_spec_ = default
_reg_catids_ = [axscript.CATID_ActiveScript,axscript.CATID_ActiveScriptParse]
_reg_desc_ = "Python ActiveX Scripting Engine"
_reg_clsid_ = PyScript_CLSID
_reg_class_spec_ = "win32com.axscript.client.pyscript.PyScript"
_reg_remove_keys_ = [(".pys",), ("pysFile",)]
_reg_threading_ = "both"
def __init__(self):
framework.COMScript.__init__(self)
self.globalNameSpaceModule = None
self.codeBlocks = []
self.scriptDispatch = None
def InitNew(self):
framework.COMScript.InitNew(self)
import imp
self.scriptDispatch = None
self.globalNameSpaceModule = imp.new_module("__ax_main__")
self.globalNameSpaceModule.__dict__['ax'] = AXScriptAttribute(self)
self.codeBlocks = []
self.persistedCodeBlocks = []
self.mapKnownCOMTypes = {} # Map of known CLSID to typereprs
self.codeBlockCounter = 0
def Stop(self):
# Flag every pending script as already done
for b in self.codeBlocks:
b.beenExecuted = 1
return framework.COMScript.Stop(self)
def Reset(self):
# Reset all code-blocks that are persistent, and discard the rest
oldCodeBlocks = self.codeBlocks[:]
self.codeBlocks = []
for b in oldCodeBlocks:
if b.flags & SCRIPTTEXT_ISPERSISTENT:
b.beenExecuted = 0
self.codeBlocks.append(b)
return framework.COMScript.Reset(self)
def _GetNextCodeBlockNumber(self):
self.codeBlockCounter = self.codeBlockCounter + 1
return self.codeBlockCounter
def RegisterNamedItem(self, item):
wasReg = item.isRegistered
framework.COMScript.RegisterNamedItem(self, item)
if not wasReg:
# Insert into our namespace.
# Add every item by name
if item.IsVisible():
self.globalNameSpaceModule.__dict__[item.name] = item.attributeObject
if item.IsGlobal():
# Global items means sub-items are also added...
for subitem in item.subItems.itervalues():
self.globalNameSpaceModule.__dict__[subitem.name] = subitem.attributeObject
# Also add all methods
for name, entry in item.dispatchContainer._olerepr_.mapFuncs.iteritems():
if not entry.hidden:
self.globalNameSpaceModule.__dict__[name] = getattr(item.dispatchContainer,name)
def DoExecutePendingScripts(self):
try:
globs = self.globalNameSpaceModule.__dict__
for codeBlock in self.codeBlocks:
if not codeBlock.beenExecuted:
if self.CompileInScriptedSection(codeBlock, "exec"):
self.ExecInScriptedSection(codeBlock, globs)
finally:
pass
def DoRun(self):
pass
def Close(self):
self.ResetNamespace()
self.globalNameSpaceModule = None
self.codeBlocks = []
self.scriptDispatch = None
framework.COMScript.Close(self)
def GetScriptDispatch(self, name):
# trace("GetScriptDispatch with", name)
# if name is not None: return None
if self.scriptDispatch is None:
self.scriptDispatch = scriptdispatch.MakeScriptDispatch(self, self.globalNameSpaceModule)
return self.scriptDispatch
def MakeEventMethodName(self, subItemName, eventName):
return subItemName[0].upper()+subItemName[1:] + "_" + eventName[0].upper()+eventName[1:]
def DoAddScriptlet(self, defaultName, code, itemName, subItemName, eventName, delimiter,sourceContextCookie, startLineNumber):
# Just store the code away - compile when called. (JIT :-)
item = self.GetNamedItem(itemName)
if itemName==subItemName: # Explicit handlers - eg <SCRIPT LANGUAGE="Python" for="TestForm" Event="onSubmit">
subItem = item
else:
subItem = item.GetCreateSubItem(item, subItemName, None, None)
funcName = self.MakeEventMethodName(subItemName, eventName)
codeBlock = AXScriptCodeBlock("Script Event %s" %funcName, code, sourceContextCookie, startLineNumber, 0)
self._AddScriptCodeBlock(codeBlock)
subItem.scriptlets[funcName] = codeBlock
def DoProcessScriptItemEvent(self, item, event, lcid, wFlags, args):
# trace("ScriptItemEvent", self, item, event, event.name, lcid, wFlags, args)
funcName = self.MakeEventMethodName(item.name, event.name)
codeBlock = function = None
try:
function = item.scriptlets[funcName]
if type(function)==type(self): # ie, is a CodeBlock instance
codeBlock = function
function = None
except KeyError:
pass
if codeBlock is not None:
realCode = "def %s():\n" % funcName
for line in framework.RemoveCR(codeBlock.codeText).split("\n"):
realCode = realCode + '\t' + line + '\n'
realCode = realCode + '\n'
if not self.CompileInScriptedSection(codeBlock, "exec", realCode):
return
dict = {}
self.ExecInScriptedSection(codeBlock, self.globalNameSpaceModule.__dict__, dict)
function = dict[funcName]
# cache back in scriptlets as a function.
item.scriptlets[funcName] = function
if function is None:
# still no function - see if in the global namespace.
try:
function = self.globalNameSpaceModule.__dict__[funcName]
except KeyError:
# Not there _exactly_ - do case ins search.
funcNameLook = funcName.lower()
for attr in self.globalNameSpaceModule.__dict__.iterkeys():
if funcNameLook==attr.lower():
function = self.globalNameSpaceModule.__dict__[attr]
# cache back in scriptlets, to avoid this overhead next time
item.scriptlets[funcName] = function
if function is None:
raise Exception(scode=winerror.DISP_E_MEMBERNOTFOUND)
return self.ApplyInScriptedSection(codeBlock, function, args)
def DoParseScriptText(self, code, sourceContextCookie, startLineNumber, bWantResult, flags):
code = framework.RemoveCR(code) + "\n"
if flags & SCRIPTTEXT_ISEXPRESSION:
name = "Script Expression"
exec_type = "eval"
else:
name = "Script Block"
exec_type = "exec"
num = self._GetNextCodeBlockNumber()
if num==1: num=""
name = "%s %s" % (name, num)
codeBlock = AXScriptCodeBlock(name, code, sourceContextCookie, startLineNumber, flags)
self._AddScriptCodeBlock(codeBlock)
globs = self.globalNameSpaceModule.__dict__
if bWantResult: # always immediate.
if self.CompileInScriptedSection(codeBlock, exec_type):
if flags & SCRIPTTEXT_ISEXPRESSION:
return self.EvalInScriptedSection(codeBlock, globs)
else:
return self.ExecInScriptedSection(codeBlock, globs)
# else compile failed, but user chose to keep running...
else:
if flags & SCRIPTTEXT_FORCEEXECUTION:
if self.CompileInScriptedSection(codeBlock, exec_type):
self.ExecInScriptedSection(codeBlock, globs)
else:
self.codeBlocks.append(codeBlock)
def GetNamedItemClass(self):
return ScriptItem
def ResetNamespace(self):
if self.globalNameSpaceModule is not None:
try:
self.globalNameSpaceModule.ax._Reset_()
except AttributeError:
pass # ???
globalNameSpaceModule = None
def DllRegisterServer():
klass=PyScript
win32com.server.register._set_subkeys(klass._reg_progid_ + "\\OLEScript", {}) # Just a CreateKey
# Basic Registration for wsh.
win32com.server.register._set_string(".pys", "pysFile")
win32com.server.register._set_string("pysFile\\ScriptEngine", klass._reg_progid_)
guid_wsh_shellex = "{60254CA5-953B-11CF-8C96-00AA00B8708C}"
win32com.server.register._set_string("pysFile\\ShellEx\\DropHandler", guid_wsh_shellex)
win32com.server.register._set_string("pysFile\\ShellEx\\PropertySheetHandlers\\WSHProps", guid_wsh_shellex)
def Register(klass=PyScript):
import sys
ret = win32com.server.register.UseCommandLine(klass,
finalize_register=DllRegisterServer)
return ret
if __name__=='__main__':
Register() |
d3pm/images/utils.py | xxdreck/google-research | 23,901 | 11068357 | # coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for categorical diffusion and training loop."""
import functools
from absl import logging
import flax
import jax
import jax.numpy as jnp
import ml_collections
import numpy as onp
import PIL
import tensorflow.compat.v2 as tf
# for left-multiplication for RGB -> Y'PbPr
RGB_TO_YUV = onp.array([[0.29900, -0.16874, 0.50000],
[0.58700, -0.33126, -0.41869],
[0.11400, 0.50000, -0.08131]])
def normalize_data(x, mode=None):
if mode is None or mode == 'rgb':
return x / 127.5 - 1.
elif mode == 'rgb_unit_var':
return 2. * normalize_data(x, mode='rgb')
elif mode == 'yuv':
return (x / 127.5 - 1.).dot(RGB_TO_YUV)
else:
raise NotImplementedError(mode)
def log_min_exp(a, b, epsilon=1.e-6):
"""Computes the log(exp(a) - exp(b)) (b<a) in a numerically stable fashion."""
y = a + jnp.log1p(-jnp.exp(b - a) + epsilon)
return y
def sample_categorical(logits, uniform_noise):
"""Samples from a categorical distribution.
Args:
logits: logits that determine categorical distributions. Shape should be
broadcastable under addition with noise shape, and of the form (...,
num_classes).
uniform_noise: uniform noise in range [0, 1). Shape: (..., num_classes).
Returns:
samples: samples.shape == noise.shape, with samples.shape[-1] equal to
num_classes.
"""
# For numerical precision clip the noise to a minimum value
uniform_noise = jnp.clip(
uniform_noise, a_min=jnp.finfo(uniform_noise.dtype).tiny, a_max=1.)
gumbel_noise = -jnp.log(-jnp.log(uniform_noise))
sample = jnp.argmax(logits + gumbel_noise, axis=-1)
return jax.nn.one_hot(sample, num_classes=logits.shape[-1])
def categorical_kl_logits(logits1, logits2, eps=1.e-6):
"""KL divergence between categorical distributions.
Distributions parameterized by logits.
Args:
logits1: logits of the first distribution. Last dim is class dim.
logits2: logits of the second distribution. Last dim is class dim.
eps: float small number to avoid numerical issues.
Returns:
KL(C(logits1) || C(logits2)): shape: logits1.shape[:-1]
"""
out = (
jax.nn.softmax(logits1 + eps, axis=-1) *
(jax.nn.log_softmax(logits1 + eps, axis=-1) -
jax.nn.log_softmax(logits2 + eps, axis=-1)))
return jnp.sum(out, axis=-1)
def categorical_kl_probs(probs1, probs2, eps=1.e-6):
"""KL divergence between categorical distributions.
Distributions parameterized by logits.
Args:
probs1: probs of the first distribution. Last dim is class dim.
probs2: probs of the second distribution. Last dim is class dim.
eps: float small number to avoid numerical issues.
Returns:
KL(C(probs) || C(logits2)): shape: logits1.shape[:-1]
"""
out = probs1 * (jnp.log(probs1 + eps) - jnp.log(probs2 + eps))
return jnp.sum(out, axis=-1)
def categorical_log_likelihood(x, logits):
"""Log likelihood of a discretized Gaussian specialized for image data.
Assumes data `x` consists of integers [0, num_classes-1].
Args:
x: where to evaluate the distribution. shape = (bs, ...), dtype=int32/int64
logits: logits, shape = (bs, ..., num_classes)
Returns:
log likelihoods
"""
log_probs = jax.nn.log_softmax(logits)
x_onehot = jax.nn.one_hot(x, logits.shape[-1])
return jnp.sum(log_probs * x_onehot, axis=-1)
def meanflat(x):
"""Take the mean over all axes except the first batch dimension."""
return x.mean(axis=tuple(range(1, len(x.shape))))
def global_norm(pytree):
return jnp.sqrt(
jnp.sum(
jnp.asarray([jnp.sum(jnp.square(x)) for x in jax.tree_leaves(pytree)
])))
@functools.partial(jax.jit, static_argnums=(2,))
def _foldin_and_split(rng, foldin_data, num):
return jax.random.split(jax.random.fold_in(rng, foldin_data), num)
def jax_randint(key, minval=0, maxval=2**20):
return int(jax.random.randint(key, shape=(), minval=minval, maxval=maxval))
class RngGen(object):
"""Random number generator state utility for Jax."""
def __init__(self, init_rng):
self._base_rng = init_rng
self._counter = 0
def __iter__(self):
return self
def __next__(self):
return self.advance(1)
def advance(self, count):
self._counter += count
return jax.random.fold_in(self._base_rng, self._counter)
def split(self, num):
self._counter += 1
return _foldin_and_split(self._base_rng, self._counter, num)
def clip_by_global_norm(pytree, clip_norm, use_norm=None):
if use_norm is None:
use_norm = global_norm(pytree)
# assert use_norm.shape == ()
assert not use_norm.shape
scale = clip_norm * jnp.minimum(1.0 / use_norm, 1.0 / clip_norm)
return jax.tree_map(lambda x: x * scale, pytree), use_norm
def apply_ema(decay, avg, new):
return jax.tree_multimap(lambda a, b: decay * a + (1. - decay) * b, avg, new)
def count_params(pytree):
return sum([x.size for x in jax.tree_leaves(pytree)])
def copy_pytree(pytree):
return jax.tree_map(jnp.array, pytree)
def dist(fn, accumulate, axis_name='batch'):
"""Wrap a function in pmap and device_get(unreplicate(.)) its return value."""
if accumulate == 'concat':
accumulate_fn = functools.partial(
allgather_and_reshape, axis_name=axis_name)
elif accumulate == 'mean':
accumulate_fn = functools.partial(jax.lax.pmean, axis_name=axis_name)
elif accumulate == 'none':
accumulate_fn = None
else:
raise NotImplementedError(accumulate)
@functools.partial(jax.pmap, axis_name=axis_name)
def pmapped_fn(*args, **kwargs):
out = fn(*args, **kwargs)
return out if accumulate_fn is None else jax.tree_map(accumulate_fn, out)
def wrapper(*args, **kwargs):
return jax.device_get(
flax.jax_utils.unreplicate(pmapped_fn(*args, **kwargs)))
return wrapper
def allgather_and_reshape(x, axis_name='batch'):
"""Allgather and merge the newly inserted axis w/ the original batch axis."""
y = jax.lax.all_gather(x, axis_name=axis_name)
assert y.shape[1:] == x.shape
return y.reshape(y.shape[0] * x.shape[0], *x.shape[1:])
def write_config_json(config, path):
if tf.io.gfile.exists(path):
return
with tf.io.gfile.GFile(path, 'w') as f:
f.write(config.to_json_best_effort(sort_keys=True, indent=4) + '\n')
def tf_to_numpy(tf_batch):
"""TF to NumPy, using ._numpy() to avoid copy."""
# pylint: disable=protected-access
return jax.tree_map(
lambda x: x._numpy() if hasattr(x, '_numpy') else x,
tf_batch)
def numpy_iter(tf_dataset):
return map(tf_to_numpy, iter(tf_dataset))
@functools.partial(jax.pmap, axis_name='batch')
def _check_synced(pytree):
mins = jax.lax.pmin(pytree, axis_name='batch')
equals = jax.tree_multimap(jnp.array_equal, pytree, mins)
return jnp.all(jnp.asarray(jax.tree_leaves(equals)))
def assert_synced(pytree):
"""Check that `pytree` is the same across all replicas.
Args:
pytree: the pytree to check (should be replicated)
Raises:
RuntimeError: if sync check failed
"""
# assert_synced.problem = pytree
# raise NotImplementedError()
equals = _check_synced(pytree)
assert equals.shape == (jax.local_device_count(),)
equals = all(jax.device_get(equals)) # no unreplicate
logging.info('Sync check result: %d', equals)
if not equals:
raise RuntimeError('Sync check failed!')
@functools.partial(jax.pmap, axis_name='i')
def _barrier(x):
return jax.lax.psum(x, axis_name='i')
def barrier():
"""MPI-like barrier."""
jax.device_get(_barrier(jnp.ones((jax.local_device_count(),))))
def np_tile_imgs(imgs, *, pad_pixels=1, pad_val=255, num_col=0):
"""NumPy utility: tile a batch of images into a single image.
Args:
imgs: np.ndarray: a uint8 array of images of shape [n, h, w, c]
pad_pixels: int: number of pixels of padding to add around each image
pad_val: int: padding value
num_col: int: number of columns in the tiling; defaults to a square
Returns:
np.ndarray: one tiled image: a uint8 array of shape [H, W, c]
"""
if pad_pixels < 0:
raise ValueError('Expected pad_pixels >= 0')
if not 0 <= pad_val <= 255:
raise ValueError('Expected pad_val in [0, 255]')
imgs = onp.asarray(imgs)
if imgs.dtype != onp.uint8:
raise ValueError('Expected uint8 input')
# if imgs.ndim == 3:
# imgs = imgs[..., None]
n, h, w, c = imgs.shape
if c not in [1, 3]:
raise ValueError('Expected 1 or 3 channels')
if num_col <= 0:
# Make a square
ceil_sqrt_n = int(onp.ceil(onp.sqrt(float(n))))
num_row = ceil_sqrt_n
num_col = ceil_sqrt_n
else:
# Make a B/num_per_row x num_per_row grid
assert n % num_col == 0
num_row = int(onp.ceil(n / num_col))
imgs = onp.pad(
imgs,
pad_width=((0, num_row * num_col - n), (pad_pixels, pad_pixels),
(pad_pixels, pad_pixels), (0, 0)),
mode='constant',
constant_values=pad_val)
h, w = h + 2 * pad_pixels, w + 2 * pad_pixels
imgs = imgs.reshape(num_row, num_col, h, w, c)
imgs = imgs.transpose(0, 2, 1, 3, 4)
imgs = imgs.reshape(num_row * h, num_col * w, c)
if pad_pixels > 0:
imgs = imgs[pad_pixels:-pad_pixels, pad_pixels:-pad_pixels, :]
if c == 1:
imgs = imgs[Ellipsis, 0]
return imgs
def save_tiled_imgs(filename, imgs, pad_pixels=1, pad_val=255, num_col=0):
PIL.Image.fromarray(
np_tile_imgs(
imgs, pad_pixels=pad_pixels, pad_val=pad_val,
num_col=num_col)).save(filename)
|
scripts/flattening_thresh.py | clairechingching/ScaffCC | 158 | 11068365 | <reponame>clairechingching/ScaffCC
#!/usr/bin/env python
import math
import argparse
def genFlattenModules(benchName):
fn = benchName+'.out'
f = open(fn,'r')
r = f.read().split('\n')
f.close()
l = filter(lambda x: (len(x)>1), r)
#print l
m = map(lambda x: x.replace(':',''), l)
m = map(lambda x: x.split(), m)
#print m
vals = map(lambda x: int(x[1]), m)
#print vals
numVals = len(vals)
print '\t[flattening_thresh.py] Total Num of Functions = ',numVals
names = ['000k','001k','010k','125k','300k','1M','2M','25M']
buckets = [(0,0),(0,1000),(1000,10000),(10000,125000),(125000,300000),(300000,1000000),(1000000,2000000),(2000000,25000000)]
numBuckets = len(buckets)
histVals = []
for i in range(numBuckets):
n = filter(lambda x: (int(x[1])>=buckets[i][0]) and (int(x[1])<buckets[i][1]), m)
histVals.append(len(n))
sumFunc = 0
for i in range(numBuckets):
print '\t',buckets[i][0],'-',buckets[i][1],' : ',histVals[i]
sumFunc = sumFunc+histVals[i]
print '\t>',buckets[-1][1] ,': ', numVals - sumFunc
for i in range(numBuckets):
can1k = filter(lambda x: (int(x[1])>=0) and (int(x[1])<buckets[i][1]), m)
n1k = map(lambda x: x[0], can1k)
fn = benchName+'.flat'+names[i]+'.txt'
fout = open(fn,'w')
for e in n1k:
fout.write(e)
fout.write('\n')
fout.close()
parser = argparse.ArgumentParser(description='Generate flattened module list for this benchmark')
parser.add_argument("input")
args = parser.parse_args()
genFlattenModules(args.input)
|
projects/UniDet/unidet/data/datasets/mapillary.py | Othinus099/interior | 349 | 11068372 | <reponame>Othinus099/interior
from detectron2.data.datasets.register_coco import register_coco_instances
import os
'''
categories = [
{'id': 28, 'name': 'animal--bird'} ,
{'id': 29, 'name': 'animal--ground-animal'} ,
{'id': 30, 'name': 'construction--flat--crosswalk-plain'} ,
{'id': 31, 'name': 'human--person'} ,
{'id': 32, 'name': 'human--rider--bicyclist'} ,
{'id': 33, 'name': 'human--rider--motorcyclist'} ,
{'id': 34, 'name': 'human--rider--other-rider'} ,
{'id': 35, 'name': 'marking--crosswalk-zebra'} ,
{'id': 36, 'name': 'object--banner'} ,
{'id': 37, 'name': 'object--bench'} ,
{'id': 38, 'name': 'object--bike-rack'} ,
{'id': 39, 'name': 'object--billboard'} ,
{'id': 40, 'name': 'object--catch-basin'} ,
{'id': 41, 'name': 'object--cctv-camera'} ,
{'id': 42, 'name': 'object--fire-hydrant'} ,
{'id': 43, 'name': 'object--junction-box'} ,
{'id': 44, 'name': 'object--mailbox'} ,
{'id': 45, 'name': 'object--manhole'} ,
{'id': 46, 'name': 'object--phone-booth'} ,
{'id': 47, 'name': 'object--street-light'} ,
{'id': 48, 'name': 'object--support--pole'} ,
{'id': 49, 'name': 'object--support--traffic-sign-frame'} ,
{'id': 50, 'name': 'object--support--utility-pole'} ,
{'id': 51, 'name': 'object--traffic-light'} ,
{'id': 52, 'name': 'object--traffic-sign--back'} ,
{'id': 53, 'name': 'object--traffic-sign--front'} ,
{'id': 54, 'name': 'object--trash-can'} ,
{'id': 55, 'name': 'object--vehicle--bicycle'} ,
{'id': 56, 'name': 'object--vehicle--boat'} ,
{'id': 57, 'name': 'object--vehicle--bus'} ,
{'id': 58, 'name': 'object--vehicle--car'} ,
{'id': 59, 'name': 'object--vehicle--caravan'} ,
{'id': 60, 'name': 'object--vehicle--motorcycle'} ,
{'id': 61, 'name': 'object--vehicle--other-vehicle'} ,
{'id': 62, 'name': 'object--vehicle--trailer'} ,
{'id': 63, 'name': 'object--vehicle--truck'} ,
{'id': 64, 'name': 'object--vehicle--wheeled-slow'} ,
]
'''
categories = [
{'id': 1, 'name': 'animal--bird'},
{'id': 2, 'name': 'animal--ground-animal'},
{'id': 9, 'name': 'construction--flat--crosswalk-plain'},
{'id': 20, 'name': 'human--person'},
{'id': 21, 'name': 'human--rider--bicyclist'},
{'id': 22, 'name': 'human--rider--motorcyclist'},
{'id': 23, 'name': 'human--rider--other-rider'},
{'id': 24, 'name': 'marking--crosswalk-zebra'},
{'id': 33, 'name': 'object--banner'},
{'id': 34, 'name': 'object--bench'},
{'id': 35, 'name': 'object--bike-rack'},
{'id': 36, 'name': 'object--billboard'},
{'id': 37, 'name': 'object--catch-basin'},
{'id': 38, 'name': 'object--cctv-camera'},
{'id': 39, 'name': 'object--fire-hydrant'},
{'id': 40, 'name': 'object--junction-box'},
{'id': 41, 'name': 'object--mailbox'},
{'id': 42, 'name': 'object--manhole'},
{'id': 43, 'name': 'object--phone-booth'},
{'id': 45, 'name': 'object--street-light'},
{'id': 46, 'name': 'object--support--pole'},
{'id': 47, 'name': 'object--support--traffic-sign-frame'},
{'id': 48, 'name': 'object--support--utility-pole'},
{'id': 49, 'name': 'object--traffic-light'},
{'id': 50, 'name': 'object--traffic-sign--back'},
{'id': 51, 'name': 'object--traffic-sign--front'},
{'id': 52, 'name': 'object--trash-can'},
{'id': 53, 'name': 'object--vehicle--bicycle'},
{'id': 54, 'name': 'object--vehicle--boat'},
{'id': 55, 'name': 'object--vehicle--bus'},
{'id': 56, 'name': 'object--vehicle--car'},
{'id': 57, 'name': 'object--vehicle--caravan'},
{'id': 58, 'name': 'object--vehicle--motorcycle'},
{'id': 60, 'name': 'object--vehicle--other-vehicle'},
{'id': 61, 'name': 'object--vehicle--trailer'},
{'id': 62, 'name': 'object--vehicle--truck'},
{'id': 63, 'name': 'object--vehicle--wheeled-slow'},
]
def _get_builtin_metadata():
id_to_name = {x['id']: x['name'] for x in categories}
thing_dataset_id_to_contiguous_id = {categories[i]['id']: i for i in range(37)}
thing_classes = [id_to_name[k] for k in sorted(id_to_name)]
return {
"thing_dataset_id_to_contiguous_id": thing_dataset_id_to_contiguous_id,
"thing_classes": thing_classes}
_PREDEFINED_SPLITS = {
"mapillary_train": ("mapillary/training/images/", "mapillary/annotations/training_fix_id.json"),
# "mapillary_train": ("mapillary/training/images/", "mapillary/annotations/training.json"),
"mapillary_val": ("mapillary/validation/images/", "mapillary/annotations/validation_fix_id.json"),
# "mapillary_val": ("mapillary/validation/images/", "mapillary/annotations/validation.json"),
"mapillary_960_train": ("mapillary/training/images960/", "mapillary/annotations/training960_fix_id.json"),
'mapillary_test': ('mapillary/testing/images/', 'mapillary/annotations/test_image_info_fix_id.json')
}
for key, (image_root, json_file) in _PREDEFINED_SPLITS.items():
register_coco_instances(
key,
_get_builtin_metadata(),
os.path.join("datasets", json_file) if "://" not in json_file else json_file,
os.path.join("datasets", image_root),
)
|
holoviews/tests/core/test_prettyprint.py | TheoMathurin/holoviews | 864 | 11068405 | <gh_stars>100-1000
"""
Test cases for the pretty printing system.
"""
from holoviews.element.comparison import ComparisonTestCase
from holoviews import Store, Element, Curve, Overlay, Layout
from holoviews.core.pprint import PrettyPrinter
from .test_dimensioned import CustomBackendTestCase, ExampleElement
class PrettyPrintTest(ComparisonTestCase):
def setUp(self):
self.element1 = Element(None, group='Value', label='Label')
self.element2 = Element(None, group='Value', label='')
def test_element_repr1(self):
r = PrettyPrinter.pprint(self.element1)
self.assertEqual(r, ':Element')
def test_overlay_repr1(self):
expected = ':Overlay\n .Value.Label :Element\n .Value.I :Element'
o = self.element1 * self.element2
r = PrettyPrinter.pprint(o)
self.assertEqual(r, expected)
def test_curve_pprint_repr(self):
# Ensure it isn't a bytes object with the 'b' prefix
expected = "':Curve [x] (y)'"
r = PrettyPrinter.pprint(Curve([1,2,3]))
self.assertEqual(repr(r), expected)
class PrettyPrintOptionsTest(CustomBackendTestCase):
def setUp(self):
super().setUp()
self.current_backend = Store.current_backend
self.pprinter = PrettyPrinter(show_options=True)
self.register_custom(ExampleElement, 'backend_1', ['plot_custom1'], ['style_custom1'])
self.register_custom(Overlay, 'backend_1', ['plot_custom1'])
self.register_custom(Layout, 'backend_1', ['plot_custom1'])
self.register_custom(ExampleElement, 'backend_2', ['plot_custom2'])
Store.current_backend = 'backend_1'
def test_element_options(self):
element = ExampleElement(None).opts(style_opt1='A', backend='backend_1')
r = self.pprinter.pprint(element)
self.assertEqual(r, ":ExampleElement\n | Options(style_opt1='A')")
def test_element_options_wrapping(self):
element = ExampleElement(None).opts(plot_opt1='A'*40, style_opt1='B'*40, backend='backend_1')
r = self.pprinter.pprint(element)
self.assertEqual(r, ":ExampleElement\n | Options(plot_opt1='AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA',\n | style_opt1='BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB')")
def test_overlay_options(self):
overlay = (ExampleElement(None) * ExampleElement(None)).opts(plot_opt1='A')
r = self.pprinter.pprint(overlay)
self.assertEqual(r, ":Overlay\n | Options(plot_opt1='A')\n .Element.I :ExampleElement\n .Element.II :ExampleElement")
def test_overlay_nested_options(self):
overlay = (ExampleElement(None) * ExampleElement(None)).opts('ExampleElement', plot_opt1='A', style_opt1='A')
r = self.pprinter.pprint(overlay)
self.assertEqual(r, ":Overlay\n .Element.I :ExampleElement\n | Options(plot_opt1='A', style_opt1='A')\n .Element.II :ExampleElement\n | Options(plot_opt1='A', style_opt1='A')")
def test_layout_options(self):
overlay = (ExampleElement(None) + ExampleElement(None)).opts(plot_opt1='A')
r = self.pprinter.pprint(overlay)
self.assertEqual(r, ":Layout\n | Options(plot_opt1='A')\n .Element.I :ExampleElement\n .Element.II :ExampleElement")
def test_layout_nested_options(self):
overlay = (ExampleElement(None) + ExampleElement(None)).opts('ExampleElement', plot_opt1='A', style_opt1='A')
r = self.pprinter.pprint(overlay)
self.assertEqual(r, ":Layout\n .Element.I :ExampleElement\n | Options(plot_opt1='A', style_opt1='A')\n .Element.II :ExampleElement\n | Options(plot_opt1='A', style_opt1='A')")
|
setup.py | mindspore-ai/serving | 157 | 11068416 | <reponame>mindspore-ai/serving
#!/usr/bin/env python3
# encoding: utf-8
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""setup package."""
import os
import stat
import platform
from setuptools import setup, find_packages
from setuptools.command.egg_info import egg_info
from setuptools.command.build_py import build_py
version = '1.6.0.B031'
backend_policy = os.getenv('BACKEND_POLICY')
commit_id = os.getenv('COMMIT_ID').replace("\n", "")
package_name = os.getenv('MS_PACKAGE_NAME').replace("\n", "")
pwd = os.path.dirname(os.path.realpath(__file__))
pkg_dir = os.path.join(pwd, 'build/package')
def _read_file(filename):
with open(os.path.join(pwd, filename), encoding='UTF-8') as f:
return f.read()
readme = _read_file('README.md')
release = _read_file('RELEASE.md')
def _write_version(file):
file.write("__version__ = '{}'\n".format(version))
def _write_config(file):
file.write("__backend__ = '{}'\n".format(backend_policy))
def _write_commit_file(file):
file.write("__commit_id__ = '{}'\n".format(commit_id))
def _write_package_name(file):
file.write("__package_name__ = '{}'\n".format(package_name))
def build_dependencies():
"""generate python file"""
version_file = os.path.join(pkg_dir, 'mindspore_serving', 'version.py')
with open(version_file, 'w') as f:
_write_version(f)
version_file = os.path.join(pwd, 'mindspore_serving', 'version.py')
with open(version_file, 'w') as f:
_write_version(f)
config_file = os.path.join(pkg_dir, 'mindspore_serving', 'default_config.py')
with open(config_file, 'w') as f:
_write_config(f)
config_file = os.path.join(pwd, 'mindspore_serving', 'default_config.py')
with open(config_file, 'w') as f:
_write_config(f)
package_info = os.path.join(pkg_dir, 'mindspore_serving', 'default_config.py')
with open(package_info, 'a') as f:
_write_package_name(f)
package_info = os.path.join(pwd, 'mindspore_serving', 'default_config.py')
with open(package_info, 'a') as f:
_write_package_name(f)
commit_file = os.path.join(pkg_dir, 'mindspore_serving', '.commit_id')
with open(commit_file, 'w') as f:
_write_commit_file(f)
commit_file = os.path.join(pwd, 'mindspore_serving', '.commit_id')
with open(commit_file, 'w') as f:
_write_commit_file(f)
build_dependencies()
required_package = [
'numpy >= 1.17.0',
'protobuf >= 3.13.0',
'grpcio>=1.36.0',
'psutil >= 5.6.1'
]
package_data = {
'': [
'*.so*',
'*.pyd',
'*.dll',
'lib/*.so*',
'lib/*.a',
'.commit_id',
'_mindspore_serving',
'proto/*.py'
]
}
def update_permissions(path):
"""
Update permissions.
Args:
path (str): Target directory path.
"""
if platform.system() == "Windows":
return
for dirpath, dirnames, filenames in os.walk(path):
for dirname in dirnames:
dir_fullpath = os.path.join(dirpath, dirname)
os.chmod(dir_fullpath, stat.S_IREAD | stat.S_IWRITE |
stat.S_IEXEC | stat.S_IRGRP | stat.S_IXGRP)
for filename in filenames:
file_fullpath = os.path.join(dirpath, filename)
os.chmod(file_fullpath, stat.S_IREAD)
def bin_files():
"""
Gets the binary files to be installed.
"""
data_files = []
binary_files = []
cache_server_bin = os.path.join('mindspore_serving', 'bin', 'cache_server')
if not os.path.exists(cache_server_bin):
return data_files
binary_files.append(cache_server_bin)
cache_admin_bin = os.path.join('mindspore_serving', 'bin', 'cache_admin')
if not os.path.exists(cache_admin_bin):
return data_files
binary_files.append(cache_admin_bin)
data_files.append(('bin', binary_files))
return data_files
class EggInfo(egg_info):
"""Egg info."""
def run(self):
super().run()
egg_info_dir = os.path.join(pkg_dir, 'mindspore_serving.egg-info')
update_permissions(egg_info_dir)
class BuildPy(build_py):
"""BuildPy."""
def run(self):
super().run()
mindspore_dir = os.path.join(pkg_dir, 'build', 'lib', 'mindspore_serving')
update_permissions(mindspore_dir)
mindspore_dir = os.path.join(pkg_dir, 'build', 'lib', 'akg')
update_permissions(mindspore_dir)
setup(
name=package_name,
version=version,
author='The MindSpore Authors',
author_email='<EMAIL>',
url='https://www.mindspore.cn',
download_url='https://gitee.com/mindspore/serving/tags',
project_urls={
'Sources': 'https://gitee.com/mindspore/serving',
'Issue Tracker': 'https://gitee.com/mindspore/serving/issues',
},
description='MindSpore is a new open source deep learning training/inference '
'framework that could be used for mobile, edge and cloud scenarios.',
# long_description="\n\n".join([readme, release]),
long_description="\n\n".join([readme]),
long_description_content_type="text/markdown",
data_files=bin_files(),
packages=find_packages(),
package_data=package_data,
include_package_data=True,
cmdclass={
'egg_info': EggInfo,
'build_py': BuildPy,
},
python_requires='>=3.7',
install_requires=required_package,
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Science/Research',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 3 :: Only',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: C++',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Software Development',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
],
license='Apache 2.0',
keywords='mindspore machine learning',
)
|
sdk/python/tests/unit/test_usage.py | MuhammadZeeshan34/feast | 2,258 | 11068427 | <reponame>MuhammadZeeshan34/feast<filename>sdk/python/tests/unit/test_usage.py
import datetime
import json
import time
from unittest.mock import patch
import pytest
from feast.usage import (
RatioSampler,
log_exceptions,
log_exceptions_and_usage,
set_usage_attribute,
tracing_span,
)
@pytest.fixture(scope="function")
def dummy_exporter():
event_log = []
with patch(
"feast.usage._export",
new=lambda e: event_log.append(json.loads(json.dumps(e))),
):
yield event_log
@pytest.fixture(scope="function", autouse=True)
def enabling_patch():
with patch("feast.usage._is_enabled") as p:
p.__bool__.return_value = True
yield p
def test_logging_disabled(dummy_exporter, enabling_patch):
enabling_patch.__bool__.return_value = False
@log_exceptions_and_usage(event="test-event")
def entrypoint():
pass
@log_exceptions(event="test-event")
def entrypoint2():
raise ValueError(1)
entrypoint()
with pytest.raises(ValueError):
entrypoint2()
assert not dummy_exporter
def test_global_context_building(dummy_exporter):
@log_exceptions_and_usage(event="test-event")
def entrypoint(provider):
if provider == "one":
provider_one()
if provider == "two":
provider_two()
@log_exceptions_and_usage(provider="provider-one")
def provider_one():
dummy_layer()
@log_exceptions_and_usage(provider="provider-two")
def provider_two():
set_usage_attribute("new-attr", "new-val")
@log_exceptions_and_usage
def dummy_layer():
redis_store()
@log_exceptions_and_usage(store="redis")
def redis_store():
set_usage_attribute("attr", "val")
entrypoint(provider="one")
entrypoint(provider="two")
scope_name = "test_usage.test_global_context_building.<locals>"
assert dummy_exporter
assert {
"event": "test-event",
"provider": "provider-one",
"store": "redis",
"attr": "val",
"entrypoint": f"{scope_name}.entrypoint",
}.items() <= dummy_exporter[0].items()
assert dummy_exporter[0]["calls"][0]["fn_name"] == f"{scope_name}.entrypoint"
assert dummy_exporter[0]["calls"][1]["fn_name"] == f"{scope_name}.provider_one"
assert dummy_exporter[0]["calls"][2]["fn_name"] == f"{scope_name}.dummy_layer"
assert dummy_exporter[0]["calls"][3]["fn_name"] == f"{scope_name}.redis_store"
assert (
not {"store", "attr"} & dummy_exporter[1].keys()
) # check that context was reset
assert {
"event": "test-event",
"provider": "provider-two",
"new-attr": "new-val",
}.items() <= dummy_exporter[1].items()
def test_exception_recording(dummy_exporter):
@log_exceptions_and_usage(event="test-event")
def entrypoint():
provider()
@log_exceptions_and_usage(provider="provider-one")
def provider():
raise ValueError(1)
with pytest.raises(ValueError):
entrypoint()
assert dummy_exporter
assert {
"event": "test-event",
"provider": "provider-one",
"exception": repr(ValueError(1)),
"entrypoint": "test_usage.test_exception_recording.<locals>.entrypoint",
}.items() <= dummy_exporter[0].items()
def test_only_exception_logging(dummy_exporter):
@log_exceptions(scope="exception-only")
def failing_fn():
raise ValueError(1)
@log_exceptions_and_usage(scope="usage-and-exception")
def entrypoint():
failing_fn()
with pytest.raises(ValueError):
failing_fn()
assert {
"exception": repr(ValueError(1)),
"scope": "exception-only",
"entrypoint": "test_usage.test_only_exception_logging.<locals>.failing_fn",
}.items() <= dummy_exporter[0].items()
with pytest.raises(ValueError):
entrypoint()
assert {
"exception": repr(ValueError(1)),
"scope": "usage-and-exception",
"entrypoint": "test_usage.test_only_exception_logging.<locals>.entrypoint",
}.items() <= dummy_exporter[1].items()
def test_ratio_based_sampling(dummy_exporter):
@log_exceptions_and_usage()
def entrypoint():
expensive_fn()
@log_exceptions_and_usage(sampler=RatioSampler(ratio=0.1))
def expensive_fn():
pass
for _ in range(100):
entrypoint()
assert len(dummy_exporter) == 10
def test_sampling_priority(dummy_exporter):
@log_exceptions_and_usage(sampler=RatioSampler(ratio=0.3))
def entrypoint():
expensive_fn()
@log_exceptions_and_usage(sampler=RatioSampler(ratio=0.01))
def expensive_fn():
other_fn()
@log_exceptions_and_usage(sampler=RatioSampler(ratio=0.1))
def other_fn():
pass
for _ in range(300):
entrypoint()
assert len(dummy_exporter) == 3
def test_time_recording(dummy_exporter):
@log_exceptions_and_usage()
def entrypoint():
time.sleep(0.1)
expensive_fn()
@log_exceptions_and_usage()
def expensive_fn():
time.sleep(0.5)
other_fn()
@log_exceptions_and_usage()
def other_fn():
time.sleep(0.2)
entrypoint()
assert dummy_exporter
calls = dummy_exporter[0]["calls"]
assert call_length_ms(calls[0]) >= 800
assert call_length_ms(calls[0]) > call_length_ms(calls[1]) >= 700
assert call_length_ms(calls[1]) > call_length_ms(calls[2]) >= 200
def test_profiling_decorator(dummy_exporter):
@log_exceptions_and_usage()
def entrypoint():
with tracing_span("custom_span"):
time.sleep(0.1)
entrypoint()
assert dummy_exporter
calls = dummy_exporter[0]["calls"]
assert len(calls)
assert call_length_ms(calls[0]) >= 100
assert call_length_ms(calls[1]) >= 100
assert (
calls[1]["fn_name"]
== "test_usage.test_profiling_decorator.<locals>.entrypoint.custom_span"
)
def call_length_ms(call):
return (
datetime.datetime.fromisoformat(call["end"])
- datetime.datetime.fromisoformat(call["start"])
).total_seconds() * 10 ** 3
|
scripts/dump-cli-help.py | paternal/lektor-website | 184 | 11068435 | <filename>scripts/dump-cli-help.py
import os
import click
from click.formatting import join_options
from lektor.cli import cli as root_command
OUT = os.path.abspath(os.path.join(
os.path.dirname(__file__), '..', 'content', 'docs', 'cli'))
def get_opts(param):
any_prefix_is_slash = []
def _write(opts):
rv, any_slashes = join_options(opts)
if any_slashes:
any_prefix_is_slash[:] = [True]
if not param.is_flag and not param.count:
rv += ' ' + param.make_metavar()
return rv
rv = [_write(param.opts)]
if param.secondary_opts:
rv.append(_write(param.secondary_opts))
return (any_prefix_is_slash and '; ' or ' / ').join(rv)
def write_page(data):
path = data['path'][1:]
if not path:
return
filename = os.path.join(OUT, *(path + ['contents.lr']))
dirname = os.path.dirname(filename)
try:
os.makedirs(dirname)
except OSError:
pass
args = [x['metavar'] for x in data['arguments']]
body = [
'`%s`' % ' '.join(data['path'] + args),
'',
data['help'] or '',
]
body.append('')
body.append('## Options')
body.append('')
for opt in data['options']:
prefix = '- `%s`: ' % opt['opt_string']
for line in click.wrap_text(
opt['help'] or '', 74, prefix, ' ').splitlines():
body.append(line)
body.append('- `--help`: print this help page.')
fields = [
('comment', 'This file is auto generated by dump-cli-help.py'),
('title', path[-1]),
('summary', data['summary']),
('type', 'cmdlet'),
('body', '\n'.join(body)),
]
with open(filename, 'w') as f:
f.write('\n---\n'.join('%s:%s%s' % (
k,
len(v.splitlines()) > 1 and '\n\n' or ' ',
v
) for k, v in fields))
def dump_command(cmd, path):
data = {
'path': path,
'summary': cmd.short_help,
'help': cmd.help.replace('\b', ''),
'options': [],
'arguments': [],
}
for param in cmd.params:
if isinstance(param, click.Option):
data['options'].append({
'opt_string': get_opts(param),
'help': param.help,
})
else:
data['arguments'].append({
'metavar': param.make_metavar(),
})
write_page(data)
if isinstance(cmd, click.Group):
for child_name, child_cmd in cmd.commands.iteritems():
dump_command(child_cmd, path + [child_name])
dump_command(root_command, ['lektor'])
|
recipes/ncurses/_run_test.py | Frmissjing/berryconda | 642 | 11068471 | import curses
import sys
if __name__ == '__main__':
if sys.stdout.isatty():
screen = curses.initscr()
try:
curses.cbreak()
pad = curses.newpad(10, 10)
size = screen.getmaxyx()
pad.refresh(0, 0, 0, 0, size[0] - 1, size[1] - 1)
finally:
curses.nocbreak()
curses.endwin()
|
Python/Tests/TestData/DjangoAnalysisTestApp/test_render/views.py | techkey/PTVS | 404 | 11068472 | from django.shortcuts import render, render_to_response
from django import template
register = template.Library()
def test_render_view(request):
return render(request, 'test_render.html', {'content': 'data'})
def test_render_to_response_view(request):
return render_to_response('test_render_to_response.html', {'content': 'data'})
@register.filter
def test_filter(f):
"""this is my filter"""
pass
@register.tag
def test_tag(*a, **kw):
pass
@register.filter('test_filter_2')
def test_filter_function(f):
"""this is my filter"""
pass
@register.tag('test_tag_2')
def test_tag_function(*a, **kw):
pass
@register.assignment_tag('test_assignment_tag')
def test_assignment_tag(*a, **kw):
pass
@register.simple_tag('test_simple_tag')
def test_simple_tag(*a, **kw):
pass
|
tests/syntax/simple_expression/test_attr_ref.py | PowerOlive/mindspore | 3,200 | 11068490 | <reponame>PowerOlive/mindspore
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
""" test syntax for logic expression """
import mindspore.nn as nn
import mindspore
from mindspore import context
from mindspore.common.parameter import Parameter
from mindspore.common.tensor import Tensor
context.set_context(mode=context.GRAPH_MODE)
class Net(nn.Cell):
def __init__(self):
super().__init__()
self.weight = Parameter(Tensor(3, mindspore.float32), name="w")
self.m = 2
def construct(self, x, y):
self.weight = x
self.m = 3
#self.l = 1
#y.weight = x
print(self.weight)
return x
def test_attr_ref():
x = Tensor(4, mindspore.float32)
net_y = Net()
net = Net()
ret = net(x, net_y)
print(ret)
|
bert/macaron-scripts/test/generate_test_scripts.py | changhaowu/macaron-net | 140 | 11068494 | import os
import sys
import copy
import itertools
import inspect
def task(name, n_sentences, task, criterion, symmetric, n_classes, data_path):
return locals()
def params(*args):
keys = ["seed_list", "n_epoch_list", "batch_sz_list", "lr_list", "weight_decay_list"]
assert len(args) == len(keys)
values = itertools.product(*args)
return [{k: v for k, v in zip(keys, vs)} for vs in values]
cola = (
task("cola", 8551, "glue_single", "cross_entropy_classify_binary", "", 1, "CoLA"),
params(["100 200 300 400 500 600"], ["3 4 5"], ["16 32"], ["0.00005 0.00003"], ["0.00 0.01"])
) # 60s / epoch, 3h / search
mrpc = (
task("mrpc", 3668, "glue_pair", "cross_entropy_classify_binary", "--symmetric", 1, "MRPC"),
params(["100 200 300 400 500 600"], ["3 4 5"], ["16 32"], ["0.00005 0.00003"], ["0.00 0.01"])
) # 50s / epoch, 3h / search
sts = (
task("sts", 5749, "glue_pair", "mean_squared_error", "--symmetric", 1, "STS-B"),
params(["100 200 300 400 500 600"], ["3 4 5"], ["16 32"], ["0.00005 0.00003"], ["0.00 0.01"])
) # 50s / epoch, 4h / search
rte = (
task("rte", 2475, "glue_pair", "cross_entropy_classify", "", 2, "RTE"),
params(["100 200 300 400 500 600"], ["3 4 5"], ["16 32"], ["0.00005 0.00003"], ["0.00 0.01"])
) # 60s / epoch, 3h / search
mnli = (
task("mnli", 392702, "glue_pair", "cross_entropy_classify", "", 3, "MNLI"),
params(["100", "200", "300"], ["3 4 5"], ["16 24"], ["0.00005", "0.00003"], ["0.00", "0.01"])
) # 5000s / epoch, bs 32 oom
mnlimm = (
task("mnlimm", 392702, "glue_pair", "cross_entropy_classify", "", 3, "MNLI-mm"),
params(["100", "200", "300"], ["3 4 5"], ["16 24"], ["0.00005", "0.00003"], ["0.00", "0.01"])
) # 5000s / epoch, bs 32 oom
qnli = (
task("qnli", 108436, "glue_pair", "cross_entropy_classify", "", 2, "QNLI-new"),
params(["100", "200", "300"], ["3 4 5"], ["16 24"], ["0.00005", "0.00003"], ["0.00", "0.01"])
) # 1600s / epoch, bs 32 oom
qqp = (
task("qqp", 363849, "glue_pair", "cross_entropy_classify_binary", "--symmetric", 1, "QQP"),
params(["100", "200", "300"], ["3 4 5"], ["16 24"], ["0.00005", "0.00003"], ["0.00", "0.01"])
) # 4000s / epoch, bs 32 oom
sst = (
task("sst", 67349, "glue_single", "cross_entropy_classify", "", 2, "SST-2"),
params(["100", "200", "300", "400", "500", "600"], ["3 4 5"], ["16 32"], ["0.00005 0.00003"], ["0.00 0.01"])
) # 400s / epoch, 18h / search
task_list = [cola, mrpc, sts, rte, mnli, mnlimm, qnli, qqp, sst]
bert_model_config = {
"bert_model_name": "macaron_pretrained",
"bert_model_path": "log/bert/transformer_bert_base_macaron/checkpoint_pretrained.pt",
"bert_model_arch": "transformer_classifier_base_macaron",
}
script_dir = os.path.join("generated/", bert_model_config["bert_model_name"])
env_vars = """
PROBLEM={name}
BERT_MODEL_NAME={bert_model_name}
TASK={task}
BERT_MODEL_PATH={bert_model_path}
N_CLASSES={n_classes}
ARCH={bert_model_arch}
N_SENT={n_sentences}
CRITERION={criterion}
SYMMETRIC={symmetric}
DATA_PATH=data/glue/{data_path}
SEED_LIST="{seed_list}"
N_EPOCH_LIST="{n_epoch_list}"
BATCH_SZ_LIST="{batch_sz_list}"
LR_LIST="{lr_list}"
WEIGHT_DECAY_LIST="{weight_decay_list}"
"""
script_template = r"""
CODE_PATH=.
cd $CODE_PATH
export PYTHONPATH=$CODE_PATH:$PYTHONPATH
for SEED in $SEED_LIST
do
for N_EPOCH in $N_EPOCH_LIST
do
for BATCH_SZ in $BATCH_SZ_LIST
do
SENT_PER_GPU=$(( BATCH_SZ / 1 ))
N_UPDATES=$(( ((N_SENT + BATCH_SZ - 1) / BATCH_SZ) * N_EPOCH ))
WARMUP_UPDATES=$(( (N_UPDATES + 5) / 10 ))
echo $SENT_PER_GPU $N_UPDATES $WARMUP_UPDATES
for LR in $LR_LIST
do
for WEIGHT_DECAY in $WEIGHT_DECAY_LIST
do
OUTPUT_PATH=log/bert_downstream/$BERT_MODEL_NAME/$PROBLEM/${N_EPOCH}-${BATCH_SZ}-${LR}-${WEIGHT_DECAY}-$SEED
mkdir -p $OUTPUT_PATH
python train.py $DATA_PATH --task $TASK --load-bert $BERT_MODEL_PATH --load-type no_out \
--arch $ARCH --n-classes $N_CLASSES \
--optimizer adam --adam-betas '(0.9, 0.999)' --adam-eps 1e-6 --clip-norm 0.0 --weight-decay $WEIGHT_DECAY \
--lr $LR --lr-scheduler linear --warmup-init-lr 1e-07 --warmup-updates $WARMUP_UPDATES --min-lr 1e-09 \
--criterion $CRITERION $SYMMETRIC \
--max-sentences $SENT_PER_GPU --max-update $N_UPDATES --seed $SEED \
--save-dir $OUTPUT_PATH --no-progress-bar --log-interval 100 --no-epoch-checkpoints \
| tee -a $OUTPUT_PATH/train_log.txt
done
done
done
done
done
"""
os.makedirs(script_dir, exist_ok=True)
os.system('cp {} {}'.format(__file__, script_dir))
for task_dict, params_list in task_list:
for i, param_dict in enumerate(params_list):
result_dict = {}
result_dict.update(task_dict)
result_dict.update(bert_model_config)
result_dict.update(param_dict)
this_env_var = env_vars.format(**result_dict)
script = this_env_var + script_template
script_name = os.path.join(script_dir, ".".join([task_dict["name"], "%02d" % i, "sh"]))
print(script_name)
with open(script_name, "w") as f:
f.write(script)
|
pysparkling/samplers.py | ptallada/pysparkling | 260 | 11068502 | import math
import random
try:
import numpy
except ImportError:
numpy = None
def pysparkling_poisson(lambda_):
if lambda_ == 0.0:
return 0
n = 0
exp_neg_lambda = math.exp(-lambda_)
prod = 1.0
while True:
prod *= random.random()
if prod > exp_neg_lambda:
n += 1
else:
return n
def poisson(lambda_):
if numpy is not None:
return numpy.random.poisson(lambda_)
return pysparkling_poisson(lambda_)
class BernoulliSampler:
def __init__(self, expectation):
self.expectation = expectation
def __call__(self, sample):
return 1 if random.random() < self.expectation else 0
class PoissonSampler:
def __init__(self, expectation):
self.expectation = expectation
def __call__(self, sample):
return poisson(self.expectation)
class BernoulliSamplerPerKey:
def __init__(self, expectations):
self.expectations = expectations
def __call__(self, sample):
key = sample[0]
return 1 if random.random() < self.expectations.get(key, 0.0) else 0
class PoissonSamplerPerKey:
def __init__(self, expectations):
self.expectations = expectations
def __call__(self, sample):
key = sample[0]
return poisson(self.expectations.get(key, 0.0))
|
coding_interviews/elements_of_programming_interview/longest_subarray_length_with_same_integers.py | LeandroTk/Algorithms | 205 | 11068506 | <filename>coding_interviews/elements_of_programming_interview/longest_subarray_length_with_same_integers.py
'''
Write a program that takes an array of integers and finds
the length of a longest subarray all of whose entries are equal.
'''
def longest_subarray_length_with_same_integers(numbers):
if not numbers:
return 0
longest_subarray_length, counter, current_comparator = 0, 0, numbers[0]
for number in numbers:
if number == current_comparator:
counter += 1
else:
counter = 1
current_comparator = number
longest_subarray_length = max(longest_subarray_length, counter)
return longest_subarray_length
numbers = [260, 290, 290, 250, 250, 250]
prices = [310, 315, 275, 295, 260, 270, 290, 230, 255, 250]
print(longest_subarray_length_with_same_integers(numbers))
print(longest_subarray_length_with_same_integers(prices))
print(longest_subarray_length_with_same_integers([]))
|
src/python/nimbusml/cluster/__init__.py | michaelgsharp/NimbusML | 134 | 11068510 | from .kmeansplusplus import KMeansPlusPlus
__all__ = [
'KMeansPlusPlus'
]
|
sdk/resourcemanagerhybrid/update_root_pom.py | billwert/azure-sdk-for-java | 1,350 | 11068548 | <gh_stars>1000+
with open('pom.xml', 'r') as file:
filedata = file.read()
filedata = filedata.replace('<module>sdk/resourcemanager</module>', '<module>sdk/resourcemanagerhybrid</module>')
with open('pom.xml', 'w') as file:
file.write(filedata)
|
reinforcement_learning/rl_hvac_ray_energyplus/source/eplus/__init__.py | jerrypeng7773/amazon-sagemaker-examples | 2,610 | 11068554 | <reponame>jerrypeng7773/amazon-sagemaker-examples<gh_stars>1000+
from gym.envs.registration import register
register(
id="large-office-v0",
entry_point="eplus.envs:LargeOfficeEnv",
)
register(id="data-center-v0", entry_point="eplus.envs:DataCenterEnv")
|
blocks/model.py | KIKOcaoyue/blocks | 1,067 | 11068566 | """Model - heavily annotated computation graph.
A model in Blocks is simply an annotated computation graph. The class
:class:`Model` extends :class:`blocks.graph.ComputationGraph` :class:,
which is able to handle annotations and roles in general, but is
deliberately made unaware of specific annotations that a Theano graph
created by Blocks typically has, such as bricks and application calls. The
:class:`Model` adds this functionality. Using :class:`Model` you can do
things like query all the bricks used to build the computation graph,
request "hierarchical names" of the parameters (a hierarchical name is a
path-like string which in addition to the parameter's name contains names
of the bricks on the path from a root brick to the brick that owns the
parameters, e.g. ``/mlp/linear/W``).
For more information, see :class:`Model` docstring.
"""
import logging
from collections import OrderedDict, Counter
from itertools import chain
from blocks.algorithms import GradientDescent
from blocks.graph import ComputationGraph
from blocks.filter import get_brick
logger = logging.getLogger(__name__)
class Model(ComputationGraph):
"""Handles annotations in Blocks-built computation graphs.
Use this class to handle your Blocks-created computation graph.
Examples
--------
>>> from theano import tensor
>>> from blocks.bricks import MLP, Tanh
>>> x = tensor.matrix('x')
>>> mlp = MLP([Tanh(), Tanh()], [10, 10, 10])
>>> y = mlp.apply(x)
>>> model = Model(y)
With :class:`Model` you can get access to the brick hierarchy. The
brick hierarchy is defined by ``children`` attributes that every brick
has. The bricks that are not children of other bricks are called top
bricks. It is often useful to have access to top bricks of a brick
hierarchy used to build a computation graph, and here is how you can do
it:
>>> model.get_top_bricks() #doctest: +ELLIPSIS
[<blocks.bricks.sequences.MLP object at ...]
You can also get "hierarchical" names for the parameters,
which encode the position of the owning brick in the
brick hierarchy.
>>> model.get_parameter_dict() #doctest: +NORMALIZE_WHITESPACE
OrderedDict([('/mlp/linear_1.b', b), ('/mlp/linear_0.b', b),
('/mlp/linear_0.W', W), ('/mlp/linear_1.W', W)])
"""
def __init__(self, *args, **kwargs):
super(Model, self).__init__(*args, **kwargs)
bricks = [get_brick(var) for var
in self.variables + self.scan_variables if get_brick(var)]
children = set(chain(*(brick.children for brick in bricks)))
# Quadratic complexity: we should not have thousands of
# top-level bricks.
self.top_bricks = []
for brick in bricks:
if brick not in children and brick not in self.top_bricks:
self.top_bricks.append(brick)
names = Counter([brick.name for brick in self.top_bricks])
repeated_names = [name for name, count in names.items() if count > 1]
if repeated_names:
raise ValueError("top bricks with the same name:"
" {}".format(', '.join(repeated_names)))
parameter_list = []
for parameter in self.parameters:
if get_brick(parameter):
parameter_list.append(
(get_brick(parameter).get_hierarchical_name(parameter),
parameter))
else:
parameter_list.append((parameter.name, parameter))
self._parameter_dict = OrderedDict(parameter_list)
def check_sanity(self, algorithm):
# Sanity check for the most common case
if self and isinstance(algorithm, GradientDescent):
if not (set(self.get_parameter_dict().values()) ==
set(algorithm.parameters)):
logger.warning("different parameters for model and algorithm")
def get_parameter_dict(self):
"""Returns parameters with their hierarchical names.
The parameter names are formed from positions of their owner bricks
in the bricks hierarchy. The variable names are used for the
parameters that do not belong to any brick.
Returns
-------
parameter_dict : dict
A dictionary of (hierarchical name, shared variable) pairs.
"""
return self._parameter_dict
def get_parameter_values(self):
"""Return the values of model parameters.
The same hierarhical names as in :meth:`get_parameter_dict` are
used to uniquely identify parameters.
Returns
-------
parameter_values : OrderedDict
Dictionary of (hierarchical name, :class:`~numpy.ndarray`)
pairs.
"""
return OrderedDict(
(name, parameter.get_value())
for name, parameter in self.get_parameter_dict().items())
def set_parameter_values(self, parameter_values):
"""Set the values of model parameters.
The same hierarhical names as in :meth:`get_parameter_dict` are
used to uniquely identify parameters.
Parameters
----------
parameter_values : OrderedDict
Dictionary of (hierarchical name, :class:`~numpy.ndarray`)
pairs.
"""
parameters = self.get_parameter_dict()
unknown = set(parameter_values) - set(parameters)
missing = set(parameters) - set(parameter_values)
if len(unknown):
logger.error("unknown parameter names: {}\n".format(unknown))
if len(missing):
logger.error("missing values for parameters: {}\n".format(missing))
for name, value in parameter_values.items():
if name in parameters:
model_shape = parameters[name].container.data.shape
if model_shape != value.shape:
raise ValueError("Shape mismatch for parameter: {}. "
"Expected {}, got {}."
.format(name, model_shape, value.shape))
parameters[name].set_value(value)
def get_top_bricks(self):
"""Get the bricks that do not have parents.
Returns
-------
bricks : list of :class:`~blocks.bricks.base.Brick`
"""
return self.top_bricks
|
PhysicsTools/PatAlgos/python/tools/heavyIonTools.py | ckamtsikis/cmssw | 852 | 11068567 | <reponame>ckamtsikis/cmssw
from PhysicsTools.PatAlgos.tools.ConfigToolBase import *
from PhysicsTools.PatAlgos.tools.helpers import *
class ConfigureHeavyIons(ConfigToolBase):
""" Configure all defaults for heavy ions
"""
_label='configureHeavyIons'
_defaultParameters=dicttypes.SortedKeysDict()
def __init__(self):
ConfigToolBase.__init__(self)
self._parameters=copy.deepcopy(self._defaultParameters)
self._comment = ""
def getDefaultParameters(self):
return self._defaultParameters
def __call__(self,process):
self.apply(process)
def toolCode(self, process):
productionDefaults(process)
selectionDefaults(process)
configureHeavyIons=ConfigureHeavyIons()
class ProductionDefaults(ConfigToolBase):
""" Configure all relevant layer1 candidates for heavy ions
"""
_label='productionDefaults'
_defaultParameters=dicttypes.SortedKeysDict()
def __init__(self):
ConfigToolBase.__init__(self)
self._parameters=copy.deepcopy(self._defaultParameters)
self._comment = ""
def getDefaultParameters(self):
return self._defaultParameters
def __call__(self,process):
self.apply(process)
def toolCode(self, process):
## adapt jet defaults
patJets = getattr(process, jetCollectionString())
patJets.jetSource = cms.InputTag("iterativeConePu5CaloJets")
jetCors = getattr(process, 'patJetCorrFactors')
jetCors.jetSource = cms.InputTag("iterativeConePu5CaloJets")
jetCors.corrLevels = cms.PSet(L2Relative = cms.string("L2Relative_IC5Calo"),
L3Absolute = cms.string("L3Absolute_IC5Calo"),
L1Offset = cms.string('none'),
L4EMF = cms.string('none'),
L5Flavor = cms.string('none'),
L6UE = cms.string('none'),
L7Parton = cms.string('none')
)
partonMatch = getattr(process, 'patJetPartonMatch')
partonMatch.src = cms.InputTag("iterativeConePu5CaloJets")
partonMatch.matched = cms.InputTag("hiPartons")
jetMatch = getattr(process, 'patJetGenJetMatch')
jetMatch.src = cms.InputTag("iterativeConePu5CaloJets")
jetMatch.matched = cms.InputTag("heavyIonCleanedGenJets")
patJets.addBTagInfo = False
patJets.addTagInfos = False
patJets.addDiscriminators = False
patJets.addAssociatedTracks = False
patJets.addJetCharge = False
patJets.addJetID = False
patJets.getJetMCFlavour = False
patJets.addGenPartonMatch = True
patJets.addGenJetMatch = True
patJets.embedGenJetMatch = True
patJets.embedGenPartonMatch = True
## adapt muon defaults
muonMatch = getattr(process, 'muonMatch')
muonMatch.matched = cms.InputTag("hiGenParticles")
patMuons = getattr(process, 'patMuons')
patMuons.embedGenMatch = cms.bool(True)
process.patMuons.embedCaloMETMuonCorrs = cms.bool(False)
process.patMuons.embedTcMETMuonCorrs = cms.bool(False)
process.patMuons.embedPFCandidate = cms.bool(False)
process.patMuons.useParticleFlow = cms.bool(False)
process.patMuons.addEfficiencies = cms.bool(False)
process.patMuons.addResolutions = cms.bool(False)
process.patMuons.pvSrc = cms.InputTag("hiSelectedVertex")
## adapt photon defaults
photonMatch = getattr(process, 'photonMatch')
photonMatch.matched = cms.InputTag("hiGenParticles")
patPhotons = getattr(process, 'patPhotons')
patPhotons.addPhotonID = cms.bool(True)
patPhotons.addGenMatch = cms.bool(True)
patPhotons.embedGenMatch = cms.bool(True)
patPhotons.userData.userFloats.src = cms.VInputTag(
cms.InputTag( "isoCC1"),cms.InputTag( "isoCC2"),cms.InputTag( "isoCC3"),cms.InputTag( "isoCC4"),cms.InputTag("isoCC5"),
cms.InputTag( "isoCR1"),cms.InputTag( "isoCR2"),cms.InputTag( "isoCR3"),cms.InputTag( "isoCR4"),cms.InputTag("isoCR5"),
cms.InputTag( "isoT11"),cms.InputTag( "isoT12"),cms.InputTag( "isoT13"),cms.InputTag( "isoT14"),
cms.InputTag( "isoT21"),cms.InputTag( "isoT22"),cms.InputTag( "isoT23"),cms.InputTag( "isoT24"),
cms.InputTag( "isoT31"),cms.InputTag( "isoT32"),cms.InputTag( "isoT33"),cms.InputTag( "isoT34"),
cms.InputTag( "isoT41"),cms.InputTag( "isoT42"),cms.InputTag( "isoT43"),cms.InputTag( "isoT44"),
cms.InputTag("isoDR11"),cms.InputTag("isoDR12"),cms.InputTag("isoDR13"),cms.InputTag("isoDR14"),
cms.InputTag("isoDR21"),cms.InputTag("isoDR22"),cms.InputTag("isoDR23"),cms.InputTag("isoDR24"),
cms.InputTag("isoDR31"),cms.InputTag("isoDR32"),cms.InputTag("isoDR33"),cms.InputTag("isoDR34"),
cms.InputTag("isoDR41"),cms.InputTag("isoDR42"),cms.InputTag("isoDR43"),cms.InputTag("isoDR44")
)
patPhotons.photonIDSource = cms.InputTag("PhotonIDProd","PhotonCutBasedIDLoose")
del patPhotons.photonIDSources
productionDefaults=ProductionDefaults()
class SelectionDefaults(ConfigToolBase):
""" Configure all relevant selected layer1 candidates for heavy ions
"""
_label='selectionDefaults'
_defaultParameters=dicttypes.SortedKeysDict()
def __init__(self):
ConfigToolBase.__init__(self)
self._parameters=copy.deepcopy(self._defaultParameters)
self._comment = ""
def getDefaultParameters(self):
return self._defaultParameters
def __call__(self,process):
self.apply(process)
def toolCode(self, process):
selectedJets = getattr(process, jetCollectionString('selected'))
selectedJets.cut = cms.string('pt > 20.')
selectedMuons = getattr(process, 'selectedPatMuons')
selectedMuons.cut = cms.string('pt > 0. & abs(eta) < 12.')
selectedPhotons = getattr(process, 'selectedPatPhotons')
selectedPhotons.cut = cms.string('pt > 0. & abs(eta) < 12.')
selectionDefaults=SelectionDefaults()
class DisbaleMonteCarloDeps(ConfigToolBase):
""" Cut off all MC dependencies
"""
_label='disableMonteCarloDeps'
_defaultParameters=dicttypes.SortedKeysDict()
def __init__(self):
ConfigToolBase.__init__(self)
self._parameters=copy.deepcopy(self._defaultParameters)
self._comment = ""
def getDefaultParameters(self):
return self._defaultParameters
def __call__(self,process):
self.apply(process)
def toolCode(self, process):
## switch MC to false in heavyIon Producer
process.heavyIon.doMC = False
## remove MC matching from heavyIonJets
process.makeHeavyIonJets.remove(process.genPartons)
process.makeHeavyIonJets.remove(process.heavyIonCleanedGenJets)
process.makeHeavyIonJets.remove(process.hiPartons)
process.makeHeavyIonJets.remove(process.patJetGenJetMatch)
process.makeHeavyIonJets.remove(process.patJetPartonMatch)
process.patJets.addGenPartonMatch = False
process.patJets.embedGenPartonMatch = False
process.patJets.genPartonMatch = ''
process.patJets.addGenJetMatch = False
process.patJets.genJetMatch = ''
process.patJets.getJetMCFlavour = False
process.patJets.JetPartonMapSource = ''
## remove MC matching from heavyIonMuons
process.makeHeavyIonMuons.remove(process.muonMatch)
process.patMuons.addGenMatch = False
process.patMuons.embedGenMatch = False
disableMonteCarloDeps=DisbaleMonteCarloDeps()
|
nnutils/nmr_pytorch.py | NVlabs/UMR | 184 | 11068573 | <gh_stars>100-1000
# -----------------------------------------------------------------------------------
# Code adapted from:
# https://github.com/akanazawa/cmr/blob/master/nnutils/nmr.py
#
# MIT License
#
# Copyright (c) 2018 akanazawa
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# -----------------------------------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import scipy.misc
import tqdm
import torch
import torch.nn as nn
import neural_renderer
from ..nnutils import geom_utils
class NMR(object):
def __init__(self, image_size, anti_aliasing, camera_mode, perspective):
renderer = neural_renderer.Renderer(image_size=image_size, anti_aliasing=anti_aliasing, camera_mode=camera_mode, perspective=perspective, background_color=[0,0,0])
self.renderer = renderer
def forward_mask(self, vertices, faces):
''' Renders masks.
Args:
vertices: B X N X 3 numpy array
faces: B X F X 3 numpy array
Returns:
masks: B X 256 X 256 numpy array
'''
masks = self.renderer.render_silhouettes(vertices, faces)
return masks
def forward_img(self, vertices, faces, textures):
''' Renders masks.
Args:
vertices: B X N X 3 numpy array
faces: B X F X 3 numpy array
textures: B X F X T X T X T X 3 numpy array
Returns:
images: B X 3 x 256 X 256 numpy array
'''
images = self.renderer.render_rgb(vertices, faces, textures)
return images
class Render(nn.Module):
def __init__(self, renderer):
super(Render, self).__init__()
self.renderer = renderer
def forward(self, vertices, faces, textures = None):
# B x N x 3
# Flipping the y-axis here to make it align with the image coordinate system!
vs = vertices
vs[:, :, 1] *= -1
fs = faces
if textures is None:
self.mask_only = True
masks = self.renderer.forward_mask(vs, fs)
return masks
else:
self.mask_only = False
ts = textures
imgs = self.renderer.forward_img(vs, fs, ts)
return imgs
class NeuralRenderer(nn.Module):
def __init__(self, img_size = 256):
super(NeuralRenderer, self).__init__()
self.renderer = NMR(image_size=img_size, anti_aliasing=True, camera_mode='look_at', perspective=False)
# Set a default camera to be at (0, 0, -2.732)
self.renderer.renderer.eye = [0, 0, -2.732]
# Make it a bit brighter for vis
self.renderer.renderer.light_intensity_ambient = 0.8
self.proj_fn = geom_utils.orthographic_proj_withz
self.offset_z = 5.
def ambient_light_only(self):
# Make light only ambient.
self.renderer.renderer.light_intensity_ambient = 1
self.renderer.renderer.light_intensity_directional = 0
def set_bgcolor(self, color):
self.renderer.renderer.background_color = color
def set_light_dir(self, direction, int_dir=0.8, int_amb=0.8):
renderer = self.renderer.renderer
renderer.light_direction = direction
renderer.light_intensity_directional = int_dir
renderer.light_intensity_ambient = int_amb
def project_points(self, verts, cams):
proj = self.proj_fn(verts, cams)
return proj[:, :, :2]
def forward(self, vertices, faces, cams, textures=None):
faces = faces.int()
verts = self.proj_fn(vertices, cams, offset_z=self.offset_z)
if textures is not None:
return Render(self.renderer)(verts, faces, textures)
else:
return Render(self.renderer)(verts, faces)
|
turnOffBarAfterRestart/turnOffBarAfterRestart.py | eifinger/appdaemon-scripts | 122 | 11068588 | import appdaemon.plugins.hass.hassapi as hass
from requests.exceptions import HTTPError
#
# Will turn the bar table green and then off when homeassistant restarts to indicate the restart went well
#
#
# Args:
#
# light: light. example: light.bar_table
#
# Release Notes
#
# Version 1.0:
# Initial Version
class TurnOffBarAfterRestart(hass.Hass):
def initialize(self):
self.timer_handle_list = []
self.listen_event_handle_list = []
self.listen_state_handle_list = []
self.light = self.args["light"]
self.timer_handle_list.append(self.run_in(self.turn_green_callback, 1))
def turn_off_callback(self, kwargs):
"""Turn off light"""
try:
self.log("Turning {} off".format(self.friendly_name(self.light)))
self.turn_off(self.light)
except HTTPError as exception:
self.log(
"Error trying to turn off entity. Will try again in 1s. Error: {}".format(
exception
),
level="WARNING",
)
self.timer_handle_list.append(self.run_in(self.turn_off_callback, 1))
def turn_green_callback(self, kwargs):
"""This is needed because the turn_on command can result in a HTTP 503 when homeassistant is restarting"""
try:
self.call_service(
"light/turn_on",
entity_id=self.light,
rgb_color=[0, 255, 0],
white_value=0,
)
self.log("Turning {} green".format(self.friendly_name(self.light)))
self.timer_handle_list.append(self.run_in(self.turn_off_callback, 5))
except HTTPError as exception:
self.log(
"Error trying to turn on entity. Will try again in 1s. Error: {}".format(
exception
),
level="WARNING",
)
self.timer_handle_list.append(self.run_in(self.turn_green_callback, 1))
def terminate(self):
for timer_handle in self.timer_handle_list:
self.cancel_timer(timer_handle)
for listen_event_handle in self.listen_event_handle_list:
self.cancel_listen_event(listen_event_handle)
for listen_state_handle in self.listen_state_handle_list:
self.cancel_listen_state(listen_state_handle)
|
gui/widgets/py_toggle/py_toggle.py | ZETOsfx/PyOneDark_Qt_Widgets_Modern_GUI | 318 | 11068598 | # ///////////////////////////////////////////////////////////////
#
# BY: <NAME>
# PROJECT MADE WITH: Qt Designer and PySide6
# V: 1.0.0
#
# This project can be used freely for all uses, as long as they maintain the
# respective credits only in the Python scripts, any information in the visual
# interface (GUI) can be modified without any implication.
#
# There are limitations on Qt licenses if you want to use your products
# commercially, I recommend reading them on the official website:
# https://doc.qt.io/qtforpython/licenses.html
#
# ///////////////////////////////////////////////////////////////
# IMPORT QT CORE
# ///////////////////////////////////////////////////////////////
from qt_core import *
class PyToggle(QCheckBox):
def __init__(
self,
width = 50,
bg_color = "#777",
circle_color = "#DDD",
active_color = "#00BCFF",
animation_curve = QEasingCurve.OutBounce
):
QCheckBox.__init__(self)
self.setFixedSize(width, 28)
self.setCursor(Qt.PointingHandCursor)
# COLORS
self._bg_color = bg_color
self._circle_color = circle_color
self._active_color = active_color
self._position = 3
self.animation = QPropertyAnimation(self, b"position")
self.animation.setEasingCurve(animation_curve)
self.animation.setDuration(500)
self.stateChanged.connect(self.setup_animation)
@Property(float)
def position(self):
return self._position
@position.setter
def position(self, pos):
self._position = pos
self.update()
# START STOP ANIMATION
def setup_animation(self, value):
self.animation.stop()
if value:
self.animation.setEndValue(self.width() - 26)
else:
self.animation.setEndValue(4)
self.animation.start()
def hitButton(self, pos: QPoint):
return self.contentsRect().contains(pos)
def paintEvent(self, e):
p = QPainter(self)
p.setRenderHint(QPainter.Antialiasing)
p.setFont(QFont("Segoe UI", 9))
# SET PEN
p.setPen(Qt.NoPen)
# DRAW RECT
rect = QRect(0, 0, self.width(), self.height())
if not self.isChecked():
p.setBrush(QColor(self._bg_color))
p.drawRoundedRect(0,0,rect.width(), 28, 14, 14)
p.setBrush(QColor(self._circle_color))
p.drawEllipse(self._position, 3, 22, 22)
else:
p.setBrush(QColor(self._active_color))
p.drawRoundedRect(0,0,rect.width(), 28, 14, 14)
p.setBrush(QColor(self._circle_color))
p.drawEllipse(self._position, 3, 22, 22)
p.end() |
froide/publicbody/migrations/0032_auto_20201221_1953.py | xenein/froide | 198 | 11068600 | # Generated by Django 3.1.4 on 2020-12-21 18:53
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("publicbody", "0031_publicbody_change_history"),
]
operations = [
migrations.AlterField(
model_name="category",
name="name",
field=models.CharField(max_length=100, unique=True, verbose_name="name"),
),
migrations.AlterField(
model_name="category",
name="slug",
field=models.SlugField(max_length=100, unique=True, verbose_name="slug"),
),
migrations.AlterField(
model_name="publicbodytag",
name="name",
field=models.CharField(max_length=100, unique=True, verbose_name="name"),
),
migrations.AlterField(
model_name="publicbodytag",
name="slug",
field=models.SlugField(max_length=100, unique=True, verbose_name="slug"),
),
]
|
tensornetwork/utils.py | khanhgithead/TensorNetwork | 1,681 | 11068605 | <filename>tensornetwork/utils.py
# Copyright 2019 The TensorNetwork Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import h5py
import numpy as np
from tensornetwork.component_factory import get_component
import tensornetwork.network_components as network_components
from tensornetwork.network_components import Edge, AbstractNode, Node
from tensornetwork.network_operations import reachable, get_all_edges
from typing import List, Union, BinaryIO
STRING_ENCODING = network_components.STRING_ENCODING
string_type = network_components.string_type
def save_nodes(nodes: List[AbstractNode], path: Union[str, BinaryIO]) -> None:
"""Save an iterable of nodes into hdf5 format.
Args:
nodes: An iterable of connected nodes. All nodes have to connect within
`nodes`.
path: path to file where network is saved.
"""
if reachable(nodes) > set(nodes):
raise ValueError(
"Some nodes in `nodes` are connected to nodes not contained in `nodes`."
" Saving not possible.")
if len(set(nodes)) < len(list(nodes)):
raise ValueError(
'Some nodes in `nodes` appear more than once. This is not supported')
#we need to iterate twice and order matters
edges = list(get_all_edges(nodes))
nodes = list(nodes)
old_edge_names = {n: edge.name for n, edge in enumerate(edges)}
old_node_names = {n: node.name for n, node in enumerate(nodes)}
#generate unique names for nodes and edges
#for saving them
for n, node in enumerate(nodes):
node.set_name('node{}'.format(n))
for e, edge in enumerate(edges):
edge.set_name('edge{}'.format(e))
with h5py.File(path, 'w') as net_file:
nodes_group = net_file.create_group('nodes')
node_names_group = net_file.create_group('node_names')
node_names_group.create_dataset(
'names',
dtype=string_type,
data=np.array(list(old_node_names.values()), dtype=object))
edges_group = net_file.create_group('edges')
edge_names_group = net_file.create_group('edge_names')
edge_names_group.create_dataset(
'names',
dtype=string_type,
data=np.array(list(old_edge_names.values()), dtype=object))
for n, node in enumerate(nodes):
node_group = nodes_group.create_group(node.name)
node._save_node(node_group)
for edge in node.edges:
if edge.node1 == node and edge in edges:
edge_group = edges_group.create_group(edge.name)
edge._save_edge(edge_group)
edges.remove(edge)
#name edges and nodes back to their original names
for n, node in enumerate(nodes):
nodes[n].set_name(old_node_names[n])
for n, edge in enumerate(edges):
edges[n].set_name(old_edge_names[n])
def load_nodes(path: str) -> List[AbstractNode]:
"""Load a set of nodes from disk.
Args:
path: path to file where network is saved.
Returns:
An iterable of `Node` objects
"""
nodes_list = []
edges_list = []
with h5py.File(path, 'r') as net_file:
nodes = list(net_file["nodes"].keys())
node_names = {
'node{}'.format(n): v for n, v in enumerate(
net_file["node_names"]['names'].asstr(STRING_ENCODING)[()])#pylint: disable=no-member
}
edge_names = {
'edge{}'.format(n): v for n, v in enumerate(
net_file["edge_names"]['names'].asstr(STRING_ENCODING)[()])#pylint: disable=no-member
}
edges = list(net_file["edges"].keys())
for node_name in nodes:
node_data = net_file["nodes/" + node_name]
node_type = get_component(node_data['type'].asstr()[()])
nodes_list.append(node_type._load_node(node_data=node_data))
nodes_dict = {node.name: node for node in nodes_list}
for edge in edges:
edge_data = net_file["edges/" + edge]
edges_list.append(Edge._load_edge(edge_data, nodes_dict))
for edge in edges_list:
edge.set_name(edge_names[edge.name])
for node in nodes_list:
node.set_name(node_names[node.name])
return nodes_list
def from_topology(topology, tensors, backend=None):
"""Create and connect new `tn.Node`s by the given einsum-like topology.
Example:
```
a, b, c = tn.from_topology("xy,yz,zx", [a, b, c])
```
Args:
topology: A string that defines the topology. Should be like
the left side of an einsum expression.
tensors: The tensors needed to create the nodes.
Returns:
A list of Nodes.
"""
edge_dict = {}
nodes = []
split_list = topology.split(",")
if len(split_list) != len(tensors):
raise ValueError("topology and number of tensors is mismatched")
for local_axes, tensor in zip(split_list, tensors):
local_axes_list = list(local_axes)
if len(local_axes_list) != len(tensor.shape):
raise ValueError(f"{local_axes} does not match shape {tensor.shape}")
new_node = Node(tensor, axis_names=local_axes_list, backend=backend)
for c in local_axes:
if c in edge_dict:
edge_dict[c] = edge_dict[c] ^ new_node[c]
else:
edge_dict[c] = new_node[c]
nodes.append(new_node)
return nodes
|
jitrebalance/test_jitrebalance.py | bit0fun/plugins | 173 | 11068612 | <filename>jitrebalance/test_jitrebalance.py
from pyln.client import RpcError
from pyln.testing.fixtures import * # noqa: F401, F403
from pyln.testing.utils import wait_for, DEVELOPER
import os
import time
import pytest
import unittest
currdir = os.path.dirname(__file__)
plugin = os.path.join(currdir, 'jitrebalance.py')
hold_plugin = os.path.join(currdir, 'tests/hold_htlcs.py')
reject_plugin = os.path.join(currdir, 'tests/refuse_htlcs.py')
@unittest.skipIf(not DEVELOPER, "gossip is too slow if we're not in developer mode")
def test_simple_rebalance(node_factory):
"""Simple rebalance that routes along a cycle to enable the original payment
l1 ---- l2 ---- l3 ----- l4
| /
| /
| /
l5
We are going to drain the channel (l2, l3) of most of its funds and then
ask l1 to route through [l1, l2, l3, l4]. Under normal circumstances
that'd fail since (l2, l3) doesn't have sufficient funds. l2 however will
attempt to rebalance (l2,l3) using a circular route (l2, l5, l3, l2) to
get the required funds back.
"""
print(plugin)
opts = [{}, {'plugin': plugin}, {}, {}, {}]
l1, l2, l3, l4, l5 = node_factory.get_nodes(5, opts=opts)
amt = 10**7
# Open the channels
channels = [(l1, l2), (l3, l2), (l3, l4), (l2, l5), (l5, l3)]
for src, dst in channels:
src.openchannel(dst, capacity=10**6)
# Drain (l2, l3) so that a larger payment fails later on
chan = l2.rpc.listpeers(l3.info['id'])['peers'][0]['channels'][0]
# Send 9 million millisatoshis + reserve + a tiny fee allowance from l3 to
# l2 for the actual payment
inv = l2.rpc.invoice(
chan['our_channel_reserve_satoshis'] * 1000 + 9000000 + 100,
"imbalance", "imbalance"
)
time.sleep(1)
l3.rpc.pay(inv['bolt11'])
def no_pending_htlcs():
peer = l2.rpc.listpeers(l3.info['id'])['peers'][0]
return peer['channels'][0]['htlcs'] == []
wait_for(no_pending_htlcs)
chan = l2.rpc.listpeers(l3.info['id'])['peers'][0]['channels'][0]
assert(chan['spendable_msatoshi'] < amt)
# Get (l2, l5) so we can exclude it when routing from l1 to l4
peer = l2.rpc.listpeers(l5.info['id'])['peers'][0]
scid = peer['channels'][0]['short_channel_id']
# The actual invoice that l1 will attempt to pay to l4, and that will be
# larger than the current capacity of (l2, l3) so it triggers a
# rebalancing.
inv = l4.rpc.invoice(amt, "test", "test")
# Now wait for gossip to settle and l1 to learn the topology so it can
# then route the payment. We do this now since we already did what we
# could without this info
wait_for(lambda: len(l1.rpc.listchannels()['channels']) == 2 * len(channels))
route = l1.rpc.getroute(node_id=l4.info['id'], msatoshi=amt, riskfactor=1,
exclude=[scid + '/0', scid + '/1'])['route']
# This will succeed with l2 doing a rebalancing just-in-time !
l1.rpc.sendpay(route, inv['payment_hash'], payment_secret=inv.get('payment_secret'))
assert l1.rpc.waitsendpay(inv['payment_hash'])['status'] == 'complete'
assert l2.daemon.is_in_log('Succesfully re-filled outgoing capacity')
@unittest.skipIf(not DEVELOPER, "gossip is too slow if we're not in developer mode")
def test_rebalance_failure(node_factory):
"""Same setup as the first test :
l1 ---- l2 ---- l3 ----- l4
| /
| /
| /
l5
We now test failures (l5 rejects HTLCs, l3 takes too long to resolve it).
"""
# First, the "no route left" case.
opts = [{}, {'plugin': plugin, 'jitrebalance-try-timeout': 3}, {}, {},
{'plugin': reject_plugin}]
l1, l2, l3, l4, l5 = node_factory.get_nodes(5, opts=opts)
amt = 10**7
# Open the channels
channels = [(l1, l2), (l3, l2), (l3, l4), (l2, l5), (l5, l3)]
for src, dst in channels:
src.openchannel(dst, capacity=10**6)
# Drain (l2, l3) so that a larger payment fails later on
chan = l2.rpc.listpeers(l3.info['id'])['peers'][0]['channels'][0]
# Send 9 million millisatoshis + reserve + a tiny fee allowance from l3 to
# l2 for the actual payment
inv = l2.rpc.invoice(
chan['our_channel_reserve_satoshis'] * 1000 + 9000000 + 100,
"imbalance", "imbalance"
)
time.sleep(1)
l3.rpc.pay(inv['bolt11'])
def no_pending_htlcs():
peer = l2.rpc.listpeers(l3.info['id'])['peers'][0]
return peer['channels'][0]['htlcs'] == []
wait_for(no_pending_htlcs)
chan = l2.rpc.listpeers(l3.info['id'])['peers'][0]['channels'][0]
assert(chan['spendable_msatoshi'] < amt)
# Get (l2, l5) so we can exclude it when routing from l1 to l4
peer = l2.rpc.listpeers(l5.info['id'])['peers'][0]
scid = peer['channels'][0]['short_channel_id']
# The actual invoice that l1 will attempt to pay to l4, and that will be
# larger than the current capacity of (l2, l3) so it triggers a
# rebalancing.
inv = l4.rpc.invoice(amt, "test", "test")
# Now wait for gossip to settle and l1 to learn the topology so it can
# then route the payment. We do this now since we already did what we
# could without this info
wait_for(lambda: len(l1.rpc.listchannels()['channels']) == 2 * len(channels))
route = l1.rpc.getroute(node_id=l4.info['id'], msatoshi=amt, riskfactor=1,
exclude=[scid + '/0', scid + '/1'])['route']
# This will exclude [l5, l3] and fail as there is no route left
l1.rpc.sendpay(route, inv['payment_hash'], payment_secret=inv.get('payment_secret'))
with pytest.raises(RpcError, match='WIRE_TEMPORARY_CHANNEL_FAILURE'):
l1.rpc.waitsendpay(inv['payment_hash'])
assert l2.daemon.is_in_log('Could not get a route, no remaining one?')
l5.rpc.plugin_stop(reject_plugin)
# Now test the timeout on number of attempts
l3.rpc.plugin_start(hold_plugin)
l1.rpc.sendpay(route, inv['payment_hash'], payment_secret=inv.get('payment_secret'))
# l3 will hold on the HTLC, and at the time it rejects it, l2 won't try
# other routes as it exceeded its timeout
with pytest.raises(RpcError, match='WIRE_TEMPORARY_CHANNEL_FAILURE'):
l1.rpc.waitsendpay(inv['payment_hash'])
assert l2.daemon.is_in_log('Timed out while trying to rebalance')
@unittest.skipIf(not DEVELOPER, "gossip is too slow if we're not in developer mode")
def test_issue_88(node_factory):
"""Reproduce issue #88: crash due to unconfirmed channel.
l2 has a channel open with l4, that is not confirmed yet, doesn't have a
stable short_channel_id, and will crash.
"""
l1, l2, l3 = node_factory.line_graph(3, opts=[{}, {'plugin': plugin}, {}], wait_for_announce=True)
l4 = node_factory.get_node()
l2.connect(l4)
l2.rpc.fundchannel(l4.info['id'], 10**5)
peers = l2.rpc.listpeers()['peers']
# We should have 3 peers...
assert(len(peers) == 3)
# ... but only 2 channels with a short_channel_id...
assert(sum([1 for p in peers if 'short_channel_id' in p['channels'][0]]) == 2)
# ... and one with l4, without a short_channel_id
assert('short_channel_id' not in l4.rpc.listpeers()['peers'][0]['channels'])
# Now if we send a payment l1 -> l2 -> l3, then l2 will stumble while
# attempting to access the short_channel_id on the l2 -> l4 channel:
inv = l3.rpc.invoice(1000, 'lbl', 'desc')['bolt11']
l1.rpc.pay(inv)
|
tests/ssg_test_suite/oscap.py | kmccarron-rh/content | 1,138 | 11068613 | <reponame>kmccarron-rh/content
#!/usr/bin/env python
from __future__ import print_function
import logging
import os.path
import re
import collections
import xml.etree.ElementTree
import json
import datetime
import socket
import sys
import time
from ssg.constants import OSCAP_PROFILE_ALL_ID
from ssg_test_suite.log import LogHelper
from ssg_test_suite import test_env
from ssg_test_suite import common
from ssg.shims import input_func
# Needed for compatibility as there is no TimeoutError in python2.
if sys.version_info[0] < 3:
TimeoutException = socket.timeout
else:
TimeoutException = TimeoutError
logging.getLogger(__name__).addHandler(logging.NullHandler())
_CONTEXT_RETURN_CODES = {'pass': 0,
'fail': 2,
'error': 1,
'notapplicable': 0,
'fixed': 0}
_ANSIBLE_TEMPLATE = 'urn:xccdf:fix:script:ansible'
_BASH_TEMPLATE = 'urn:xccdf:fix:script:sh'
_XCCDF_NS = 'http://checklists.nist.gov/xccdf/1.2'
PROFILE_ALL_ID_SINGLE_QUOTED = False
def analysis_to_serializable(analysis):
result = dict(analysis)
for key, value in analysis.items():
if type(value) == set:
result[key] = tuple(value)
return result
def save_analysis_to_json(analysis, output_fname):
analysis2 = analysis_to_serializable(analysis)
with open(output_fname, "w") as f:
json.dump(analysis2, f)
def triage_xml_results(fname):
tree = xml.etree.ElementTree.parse(fname)
all_xml_results = tree.findall(".//{%s}rule-result" % _XCCDF_NS)
triaged = collections.defaultdict(set)
for result in list(all_xml_results):
idref = result.get("idref")
status = result.find("{%s}result" % _XCCDF_NS).text
triaged[status].add(idref)
return triaged
def send_files_remote(verbose_path, remote_dir, domain_ip, *files):
"""Upload files to VM."""
# files is a list of absolute paths on the host
success = True
destination = 'root@{0}:{1}'.format(domain_ip, remote_dir)
files_string = ' '.join(files)
logging.debug('Uploading files {0} to {1}'.format(files_string,
destination))
command = ['scp'] + list(common.SSH_ADDITIONAL_OPTS) + list(files) + [destination]
if common.run_cmd_local(command, verbose_path)[0] != 0:
logging.error('Failed to upload files {0}'.format(files_string))
success = False
return success
def get_file_remote(test_env, verbose_path, local_dir, remote_path):
"""Download a file from VM."""
# remote_path is an absolute path of a file on remote machine
success = True
logging.debug('Downloading remote file {0} to {1}'
.format(remote_path, local_dir))
with open(verbose_path, "a") as log_file:
try:
test_env.scp_download_file(remote_path, local_dir, log_file)
except Exception:
logging.error('Failed to download file {0}'.format(remote_path))
success = False
return success
def find_result_id_in_output(output):
match = re.search('result id.*$', output, re.IGNORECASE | re.MULTILINE)
if match is None:
return None
# Return the right most word of the match which is the result id.
return match.group(0).split()[-1]
def get_result_id_from_arf(arf_path, verbose_path):
command = ['oscap', 'info', arf_path]
command_string = ' '.join(command)
returncode, output = common.run_cmd_local(command, verbose_path)
if returncode != 0:
raise RuntimeError('{0} returned {1} exit code'.
format(command_string, returncode))
res_id = find_result_id_in_output(output)
if res_id is None:
raise RuntimeError('Failed to find result ID in {0}'
.format(arf_path))
return res_id
def single_quote_string(input):
result = input
for char in "\"'":
result = result.replace(char, "")
return "'{}'".format(result)
def generate_fixes_remotely(test_env, formatting, verbose_path):
command_base = ['oscap', 'xccdf', 'generate', 'fix']
command_options = [
'--benchmark-id', formatting['benchmark_id'],
'--profile', formatting['profile'],
'--template', formatting['output_template'],
'--output', '/{output_file}'.format(** formatting),
]
command_operands = ['/{arf_file}'.format(** formatting)]
if 'result_id' in formatting:
command_options.extend(['--result-id', formatting['result_id']])
command_components = command_base + command_options + command_operands
command_string = ' '.join([single_quote_string(c) for c in command_components])
with open(verbose_path, "a") as log_file:
test_env.execute_ssh_command(command_string, log_file)
def run_stage_remediation_ansible(run_type, test_env, formatting, verbose_path):
"""
Returns False on error, or True in case of successful Ansible playbook
run."""
formatting['output_template'] = _ANSIBLE_TEMPLATE
send_arf_to_remote_machine_and_generate_remediations_there(
run_type, test_env, formatting, verbose_path)
if not get_file_remote(test_env, verbose_path, LogHelper.LOG_DIR,
'/' + formatting['output_file']):
return False
command = (
'ansible-playbook', '-v', '-i', '{0},'.format(formatting['domain_ip']),
'-u' 'root', '--ssh-common-args={0}'.format(' '.join(test_env.ssh_additional_options)),
formatting['playbook'])
command_string = ' '.join(command)
returncode, output = common.run_cmd_local(command, verbose_path)
# Appends output of ansible-playbook to the verbose_path file.
with open(verbose_path, 'ab') as f:
f.write('Stdout of "{}":'.format(command_string).encode("utf-8"))
f.write(output.encode("utf-8"))
if returncode != 0:
msg = (
'Ansible playbook remediation run has '
'exited with return code {} instead of expected 0'
.format(returncode))
LogHelper.preload_log(logging.ERROR, msg, 'fail')
return False
return True
def run_stage_remediation_bash(run_type, test_env, formatting, verbose_path):
"""
Returns False on error, or True in case of successful bash scripts
run."""
formatting['output_template'] = _BASH_TEMPLATE
send_arf_to_remote_machine_and_generate_remediations_there(
run_type, test_env, formatting, verbose_path)
if not get_file_remote(test_env, verbose_path, LogHelper.LOG_DIR,
'/' + formatting['output_file']):
return False
command_string = '/bin/bash -x /{output_file}'.format(** formatting)
with open(verbose_path, "a") as log_file:
try:
test_env.execute_ssh_command(command_string, log_file)
except Exception as exc:
msg = (
'Bash script remediation run has exited with return code {} '
'instead of expected 0'.format(exc.returncode))
LogHelper.preload_log(logging.ERROR, msg, 'fail')
return False
return True
def send_arf_to_remote_machine_and_generate_remediations_there(
run_type, test_env, formatting, verbose_path):
if run_type == 'rule':
try:
res_id = get_result_id_from_arf(formatting['arf'], verbose_path)
except Exception as exc:
logging.error(str(exc))
return False
formatting['result_id'] = res_id
with open(verbose_path, "a") as log_file:
try:
test_env.scp_upload_file(formatting["arf"], "/", log_file)
except Exception:
return False
try:
generate_fixes_remotely(test_env, formatting, verbose_path)
except Exception as exc:
logging.error(str(exc))
return False
def is_virtual_oscap_profile(profile):
""" Test if the profile belongs to the so called category virtual
from OpenSCAP available profiles. It can be (all) or other id we
might come up in the future, it just needs to be encapsulated
with parenthesis for example "(custom_profile)".
"""
if profile is not None:
if profile == OSCAP_PROFILE_ALL_ID:
return True
else:
if "(" == profile[:1] and ")" == profile[-1:]:
return True
return False
def process_profile_id(profile):
# Detect if the profile is virtual and include single quotes if needed.
if is_virtual_oscap_profile(profile):
if PROFILE_ALL_ID_SINGLE_QUOTED:
return "'{}'".format(profile)
else:
return profile
else:
return profile
class GenericRunner(object):
def __init__(self, environment, profile, datastream, benchmark_id):
self.environment = environment
self.profile = profile
self.datastream = datastream
self.benchmark_id = benchmark_id
self.arf_file = ''
self.arf_path = ''
self.verbose_path = ''
self.report_path = ''
self.results_path = ''
self.stage = 'undefined'
self.clean_files = False
self.create_reports = True
self.manual_debug = False
self._filenames_to_clean_afterwards = set()
self.command_base = []
self.command_options = []
self.command_operands = []
# number of seconds to sleep after reboot of vm to let
# the system to finish startup, there were problems with
# temporary files created by Dracut during image generation interfering
# with the scan
self.time_to_finish_startup = 30
def _make_arf_path(self):
self.arf_file = self._get_arf_file()
self.arf_path = os.path.join(LogHelper.LOG_DIR, self.arf_file)
def _get_arf_file(self):
raise NotImplementedError()
def _make_verbose_path(self):
verbose_file = self._get_verbose_file()
verbose_path = os.path.join(LogHelper.LOG_DIR, verbose_file)
self.verbose_path = LogHelper.find_name(verbose_path, '.verbose.log')
def _get_verbose_file(self):
raise NotImplementedError()
def _make_report_path(self):
report_file = self._get_report_file()
report_path = os.path.join(LogHelper.LOG_DIR, report_file)
self.report_path = LogHelper.find_name(report_path, '.html')
def _get_report_file(self):
raise NotImplementedError()
def _make_results_path(self):
results_file = self._get_results_file()
results_path = os.path.join(LogHelper.LOG_DIR, results_file)
self.results_path = LogHelper.find_name(results_path, '.xml')
def _get_results_file(self):
raise NotImplementedError()
def _generate_report_file(self):
self.command_options.extend([
'--report', self.report_path,
])
self._filenames_to_clean_afterwards.add(self.report_path)
def _wait_for_continue(self):
""" In case user requests to leave machine in failed state for hands
on debugging, ask for keypress to continue."""
input_func("Paused for manual debugging. Continue by pressing return.")
def prepare_online_scanning_arguments(self):
self.command_options.extend([
'--benchmark-id', self.benchmark_id,
'--profile', self.profile,
'--progress', '--oval-results',
])
self.command_operands.append(self.datastream)
def run_stage(self, stage):
self.stage = stage
self._make_verbose_path()
self._make_report_path()
self._make_arf_path()
self._make_results_path()
self.command_base = []
self.command_options = ['--verbose', 'DEVEL']
self.command_operands = []
result = None
if stage == 'initial':
result = self.initial()
elif stage == 'remediation':
result = self.remediation()
elif stage == 'final':
result = self.final()
else:
raise RuntimeError('Unknown stage: {}.'.format(stage))
if self.clean_files:
for fname in tuple(self._filenames_to_clean_afterwards):
try:
os.remove(fname)
except OSError:
logging.error(
"Failed to cleanup file '{0}'"
.format(fname))
finally:
self._filenames_to_clean_afterwards.remove(fname)
if result == 1:
LogHelper.log_preloaded('pass')
if self.clean_files:
files_to_remove = [self.verbose_path]
if stage in ['initial', 'final']:
files_to_remove.append(self.results_path)
for fname in tuple(files_to_remove):
try:
if os.path.exists(fname):
os.remove(fname)
except OSError:
logging.error(
"Failed to cleanup file '{0}'"
.format(fname))
elif result == 2:
LogHelper.log_preloaded('notapplicable')
else:
LogHelper.log_preloaded('fail')
if self.manual_debug:
self._wait_for_continue()
return result
@property
def get_command(self):
return self.command_base + self.command_options + self.command_operands
def make_oscap_call(self):
raise NotImplementedError()
def initial(self):
if self.create_reports:
self.command_options += ['--results', self.results_path]
result = self.make_oscap_call()
return result
def remediation(self):
raise NotImplementedError()
def final(self):
if self.create_reports:
self.command_options += ['--results', self.results_path]
result = self.make_oscap_call()
return result
def analyze(self, stage):
triaged_results = triage_xml_results(self.results_path)
triaged_results["stage"] = stage
triaged_results["runner"] = self.__class__.__name__
return triaged_results
def _get_formatting_dict_for_remediation(self):
formatting = {
'domain_ip': self.environment.domain_ip,
'profile': self.profile,
'datastream': self.datastream,
'benchmark_id': self.benchmark_id
}
formatting['arf'] = self.arf_path
formatting['arf_file'] = self.arf_file
return formatting
class ProfileRunner(GenericRunner):
def _get_arf_file(self):
return '{0}-initial-arf.xml'.format(self.profile)
def _get_verbose_file(self):
return '{0}-{1}'.format(self.profile, self.stage)
def _get_report_file(self):
return '{0}-{1}'.format(self.profile, self.stage)
def _get_results_file(self):
return '{0}-{1}-results'.format(self.profile, self.stage)
def final(self):
if self.environment.name == 'libvirt-based':
logging.info("Rebooting domain '{0}' before final scan."
.format(self.environment.domain_name))
self.environment.reboot()
logging.info("Waiting for {0} seconds to let the system finish startup."
.format(self.time_to_finish_startup))
time.sleep(self.time_to_finish_startup)
return GenericRunner.final(self)
def make_oscap_call(self):
self.prepare_online_scanning_arguments()
self._generate_report_file()
returncode, self._oscap_output = self.environment.scan(
self.command_options + self.command_operands, self.verbose_path)
if returncode not in [0, 2]:
logging.error(('Profile run should end with return code 0 or 2 '
'not "{0}" as it did!').format(returncode))
return False
return True
class RuleRunner(GenericRunner):
def __init__(
self, environment, profile, datastream, benchmark_id,
rule_id, script_name, dont_clean, no_reports, manual_debug):
super(RuleRunner, self).__init__(
environment, profile, datastream, benchmark_id,
)
self.rule_id = rule_id
self.context = None
self.script_name = script_name
self.clean_files = not dont_clean
self.create_reports = not no_reports
self.manual_debug = manual_debug
self._oscap_output = ''
def _get_arf_file(self):
return '{0}-initial-arf.xml'.format(self.rule_id)
def _get_verbose_file(self):
return '{0}-{1}-{2}'.format(self.rule_id, self.script_name, self.stage)
def _get_report_file(self):
return '{0}-{1}-{2}'.format(self.rule_id, self.script_name, self.stage)
def _get_results_file(self):
return '{0}-{1}-{2}-results-{3}'.format(
self.rule_id, self.script_name, self.profile, self.stage)
def make_oscap_call(self):
self.prepare_online_scanning_arguments()
if self.create_reports:
self._generate_report_file()
self.command_options.extend(
['--rule', self.rule_id])
returncode, self._oscap_output = self.environment.scan(
self.command_options + self.command_operands, self.verbose_path)
return self._analyze_output_of_oscap_call()
def final(self):
success = super(RuleRunner, self).final()
success = success and self._analyze_output_of_oscap_call()
return success
def _find_rule_result_in_output(self):
# oscap --progress options outputs rule results to stdout in
# following format:
# xccdf_org....rule_accounts_password_minlen_login_defs:pass
match = re.findall('{0}:(.*)$'.format(self.rule_id),
self._oscap_output,
re.MULTILINE)
if not match:
# When the rule is not selected, it won't match in output
return "notselected"
# When --remediation is executed, there will be two entries in
# progress output, one for fail, and one for fixed, e.g.
# xccdf_org....rule_accounts_password_minlen_login_defs:fail
# xccdf_org....rule_accounts_password_minlen_login_defs:fixed
# We are interested in the last one
return match[-1]
def _analyze_output_of_oscap_call(self):
local_success = 1
# check expected result
rule_result = self._find_rule_result_in_output()
if rule_result == "notapplicable":
msg = (
'Rule {0} evaluation resulted in {1}'
.format(self.rule_id, rule_result))
LogHelper.preload_log(logging.WARNING, msg, 'notapplicable')
local_success = 2
return local_success
if rule_result != self.context:
local_success = 0
if rule_result == 'notselected':
msg = (
'Rule {0} has not been evaluated! '
'Wrong profile selected in test scenario?'
.format(self.rule_id))
else:
msg = (
'Rule evaluation resulted in {0}, '
'instead of expected {1} during {2} stage '
.format(rule_result, self.context, self.stage)
)
LogHelper.preload_log(logging.ERROR, msg, 'fail')
return local_success
def _get_formatting_dict_for_remediation(self):
fmt = super(RuleRunner, self)._get_formatting_dict_for_remediation()
fmt['rule_id'] = self.rule_id
return fmt
def run_stage_with_context(self, stage, context):
self.context = context
return self.run_stage(stage)
class OscapProfileRunner(ProfileRunner):
def remediation(self):
self.command_options += ['--remediate']
return self.make_oscap_call()
class AnsibleProfileRunner(ProfileRunner):
def initial(self):
self.command_options += ['--results-arf', self.arf_path]
return super(AnsibleProfileRunner, self).initial()
def remediation(self):
formatting = self._get_formatting_dict_for_remediation()
formatting['output_file'] = '{0}.yml'.format(self.profile)
formatting['playbook'] = os.path.join(LogHelper.LOG_DIR,
formatting['output_file'])
return run_stage_remediation_ansible('profile', self.environment,
formatting,
self.verbose_path)
class BashProfileRunner(ProfileRunner):
def initial(self):
self.command_options += ['--results-arf', self.arf_path]
return super(BashProfileRunner, self).initial()
def remediation(self):
formatting = self._get_formatting_dict_for_remediation()
formatting['output_file'] = '{0}.sh'.format(self.profile)
return run_stage_remediation_bash('profile', self.environment, formatting, self.verbose_path)
class OscapRuleRunner(RuleRunner):
def remediation(self):
self.command_options += ['--remediate']
return self.make_oscap_call()
def final(self):
""" There is no need to run final scan again - result won't be different
to what we already have in remediation step."""
return True
class BashRuleRunner(RuleRunner):
def initial(self):
self.command_options += ['--results-arf', self.arf_path]
return super(BashRuleRunner, self).initial()
def remediation(self):
formatting = self._get_formatting_dict_for_remediation()
formatting['output_file'] = '{0}.sh'.format(self.rule_id)
success = run_stage_remediation_bash('rule', self.environment, formatting, self.verbose_path)
return success
class AnsibleRuleRunner(RuleRunner):
def initial(self):
self.command_options += ['--results-arf', self.arf_path]
return super(AnsibleRuleRunner, self).initial()
def remediation(self):
formatting = self._get_formatting_dict_for_remediation()
formatting['output_file'] = '{0}.yml'.format(self.rule_id)
formatting['playbook'] = os.path.join(LogHelper.LOG_DIR,
formatting['output_file'])
success = run_stage_remediation_ansible('rule', self.environment, formatting, self.verbose_path)
return success
class Checker(object):
def __init__(self, test_env):
self.test_env = test_env
self.executed_tests = 0
self.datastream = ""
self.benchmark_id = ""
self.remediate_using = ""
self.benchmark_cpes = set()
now = datetime.datetime.now()
self.test_timestamp_str = now.strftime("%Y-%m-%d %H:%M")
def test_target(self, target):
self.start()
try:
self._test_target(target)
except KeyboardInterrupt:
logging.info("Terminating the test run due to keyboard interrupt.")
except RuntimeError as exc:
logging.error("Terminating due to error: {msg}.".format(msg=str(exc)))
except TimeoutException as exc:
logging.error("Terminating due to timeout: {msg}".format(msg=str(exc)))
finally:
self.finalize()
def run_test_for_all_profiles(self, profiles, test_data=None):
if len(profiles) > 1:
with test_env.SavedState.create_from_environment(self.test_env, "prepared") as state:
args_list = [(p, test_data) for p in profiles]
state.map_on_top(self._run_test, args_list)
elif profiles:
self._run_test(profiles[0], test_data)
def _test_target(self, target):
raise NotImplementedError()
def _run_test(self, profile, test_data):
raise NotImplementedError()
def start(self):
self.executed_tests = 0
try:
self.test_env.start()
except Exception as exc:
msg = ("Failed to start test environment '{0}': {1}"
.format(self.test_env.name, str(exc)))
raise RuntimeError(msg)
def finalize(self):
if not self.executed_tests:
logging.warning("Nothing has been tested!")
try:
self.test_env.finalize()
except Exception as exc:
msg = ("Failed to finalize test environment '{0}': {1}"
.format(self.test_env.name, str(exc)))
raise RuntimeError(msg)
REMEDIATION_PROFILE_RUNNERS = {
'oscap': OscapProfileRunner,
'bash': BashProfileRunner,
'ansible': AnsibleProfileRunner,
}
REMEDIATION_RULE_RUNNERS = {
'oscap': OscapRuleRunner,
'bash': BashRuleRunner,
'ansible': AnsibleRuleRunner,
}
REMEDIATION_RUNNER_TO_REMEDIATION_MEANS = {
'oscap': 'bash',
'bash': 'bash',
'ansible': 'ansible',
}
|
python/28_binary_heap/heap.py | shipan3452/algo | 22,028 | 11068636 | #!/usr/bin/python
# -*- coding: UTF-8 -*-
import math
import random
class Heap:
def __init__(self, nums=None, capacity=100):
self._data = []
self._capacity = capacity
if type(nums) == list and len(nums) <= self._capacity:
for n in nums:
assert type(n) is int
self._data.append(n)
self._length = len(self._data)
self._heapify()
def _heapify(self):
if self._length <= 1:
return
# idx of the Last Parent node
lp = (self._length - 2) // 2
for i in range(lp, -1, -1):
self._heap_down(i)
def _heap_down(self, idx):
pass
def insert(self, num):
pass
def get_top(self):
if self._length <= 0:
return None
return self._data[0]
def remove_top(self):
if self._length <= 0:
return None
self._data[0], self._data[-1] = self._data[-1], self._data[0]
ret = self._data.pop()
self._length -= 1
self._heap_down(0)
return ret
def get_data(self):
return self._data
def get_length(self):
return self._length
@staticmethod
def _draw_heap(data):
"""
格式化打印
:param data:
:return:
"""
length = len(data)
if length == 0:
return 'empty heap'
ret = ''
for i, n in enumerate(data):
ret += str(n)
# 每行最后一个换行
if i == 2 ** int(math.log(i + 1, 2) + 1) - 2 or i == len(data) - 1:
ret += '\n'
else:
ret += ', '
return ret
def __repr__(self):
return self._draw_heap(self._data)
class MaxHeap(Heap):
def _heap_down(self, idx):
if self._length <= 1:
return
lp = (self._length - 2) // 2
while idx <= lp:
lc = 2 * idx + 1
rc = lc + 1
if rc <= self._length-1:
tmp = lc if self._data[lc] > self._data[rc] else rc
else:
tmp = lc
if self._data[tmp] > self._data[idx]:
self._data[tmp], self._data[idx] = self._data[idx], self._data[tmp]
idx = tmp
else:
break
def insert(self, num):
if self._length >= self._capacity:
return False
self._data.append(num)
self._length += 1
nn = self._length - 1
while nn > 0:
p = (nn-1) // 2
if self._data[nn] > self._data[p]:
self._data[nn], self._data[p] = self._data[p], self._data[nn]
nn = p
else:
break
return True
class MinHeap(Heap):
def _heap_down(self, idx):
if self._length <= 1:
return
lp = (self._length - 2) // 2
while idx <= lp:
lc = 2 * idx + 1
rc = lc + 1
if rc <= self._length-1:
tmp = lc if self._data[lc] < self._data[rc] else rc
else:
tmp = lc
if self._data[tmp] < self._data[idx]:
self._data[tmp], self._data[idx] = self._data[idx], self._data[tmp]
idx = tmp
else:
break
def insert(self, num):
if self._length >= self._capacity:
return False
self._data.append(num)
self._length += 1
nn = self._length - 1
while nn > 0:
p = (nn-1) // 2
if self._data[nn] < self._data[p]:
self._data[nn], self._data[p] = self._data[p], self._data[nn]
nn = p
else:
break
return True
if __name__ == '__main__':
nums = list(range(10))
random.shuffle(nums)
max_h = MaxHeap(nums)
print('--- max heap ---')
print(max_h)
print('--- min heap ---')
min_h = MinHeap(nums)
print(min_h)
|
Experimental/Cartpole_Highest_Reward_mem.py | ProGamerCode/FitML | 171 | 11068654 | '''
CartPole solution by <NAME>
https://github.com/FitMachineLearning/FitML/
https://www.youtube.com/channel/UCi7_WxajoowBl4_9P0DhzzA/featured
Using Actor Critic
Note that I prefe the terms Action Predictor Network and Q/Reward Predictor network better
Update
Cleaned up variables and more readable memory
Improved hyper parameters for better performance
'''
import numpy as np
import keras
import gym
import os
import h5py
import matplotlib
matplotlib.use("TkAgg")
import matplotlib.pyplot as plt
from keras.models import Sequential
from keras.layers import Dense, Dropout
from keras.layers import Embedding
from keras.layers import LSTM
from keras import optimizers
num_env_variables = 4
num_env_actions = 1
num_initial_observation = 10
learning_rate = 0.001
apLearning_rate = 0.003
weigths_filename = "CartPole-HRM-v2-weights.h5"
apWeights_filename = "CartPole_HRM-QL-v2-weights.h5"
#range within wich the SmartCrossEntropy action parameters will deviate from
#remembered optimal policy
sce_range = 0.2
b_discount = 0.98
max_memory_len = 10000
starting_explore_prob = 0.05
training_epochs = 5
load_previous_weights = False
observe_and_train = True
save_weights = True
num_games_to_play = 500
#One hot encoding array
possible_actions = np.arange(0,num_env_actions)
actions_1_hot = np.zeros((num_env_actions,num_env_actions))
actions_1_hot[np.arange(num_env_actions),possible_actions] = 1
#Create testing enviroment
env = gym.make('CartPole-v0')
env.reset()
#nitialize the Reward predictor model
model = Sequential()
#model.add(Dense(num_env_variables+num_env_actions, activation='tanh', input_dim=dataX.shape[1]))
model.add(Dense(128, activation='relu', input_dim=num_env_variables+num_env_actions))
#outputs a reward value
model.add(Dense(1))
opt = optimizers.adam(lr=learning_rate)
model.compile(loss='mse', optimizer=opt, metrics=['accuracy'])
#initialize the action predictor model
action_predictor_model = Sequential()
#model.add(Dense(num_env_variables+num_env_actions, activation='tanh', input_dim=dataX.shape[1]))
action_predictor_model.add(Dense(128, activation='relu', input_dim=num_env_variables))
action_predictor_model.add(Dense(num_env_actions))
opt2 = optimizers.adam(lr=apLearning_rate)
action_predictor_model.compile(loss='mse', optimizer=opt2, metrics=['accuracy'])
# initialize the action state reward matcher
# action_sate_reward_matcher(s,R)->a
# remembers which action to take in order to get a specific reward
action_sate_reward_matcher = Sequential()
action_sate_reward_matcher.add(Dense(128, activation='relu', input_dim=num_env_variables+1))
action_sate_reward_matcher.add(Dense(num_env_actions))
opt2 = optimizers.adam(lr=learning_rate)
action_sate_reward_matcher.compile(loss='mse', optimizer=opt2, metrics=['accuracy'])
# initialize the highest reward memory model
# hightest_reward_memory(s)->R
# it remembers the highest Reward (expected sum of discounted rewards) for this specific state
# This network will be initialized to remember only negative events. It will then update its weights based on experience
highest_reward_memory_model = Sequential()
#model.add(Dense(num_env_variables+num_env_actions, activation='tanh', input_dim=dataX.shape[1]))
highest_reward_memory_model.add(Dense(1024, activation='tanh', input_dim=num_env_variables))
highest_reward_memory_model.add(Dense(512, activation='tanh'))
highest_reward_memory_model.add(Dense(1))
opt2 = optimizers.adam(lr=apLearning_rate*3)
highest_reward_memory_model.compile(loss='mse', optimizer=opt2, metrics=['accuracy'])
#load previous model weights if they exist
if load_previous_weights:
dir_path = os.path.realpath(".")
fn = dir_path + "/"+weigths_filename
print("filepath ", fn)
if os.path.isfile(fn):
print("loading weights")
model.load_weights(weigths_filename)
else:
print("File ",weigths_filename," does not exis. Retraining... ")
#load previous action predictor model weights if they exist
if load_previous_weights:
dir_path = os.path.realpath(".")
fn = dir_path + "/"+ apWeights_filename
print("filepath ", fn)
if os.path.isfile(fn):
print("loading weights")
action_predictor_model.load_weights(apWeights_filename)
else:
print("File ",apWeights_filename," does not exis. Retraining... ")
#Record first 500 in a sequence and add them to the training sequence
total_steps = 0
#StateAction array
memorySA = np.zeros(shape=(1,num_env_variables+num_env_actions))
#State
memoryS = np.zeros(shape=(1,num_env_variables))
#StateHighestReward array
memorySHR = np.zeros(shape=(1,num_env_variables+1))
#Action array
memoryA = np.zeros(shape=(1,1))
#Value/Reward array
memoryR = np.zeros(shape=(1,1))
#Highest Value/Reward array
memoryHR = np.zeros(shape=(1,1))
#Best Action array
memoryBA = np.zeros(shape=(1,1))
mstats = []
def initilizeHighestRewardMemory():
dataS = np.random.rand(20,num_env_variables)
dataR = np.full((20,1),-1)
highest_reward_memory_model.fit(dataS,dataR, batch_size=32, epochs=5,verbose=0)
def predictTotalRewards(qstate, action):
qs_a = np.concatenate((qstate,action), axis=0)
predX = np.zeros(shape=(1,num_env_variables+num_env_actions))
predX[0] = qs_a
#print("trying to predict reward at qs_a", predX[0])
pred = model.predict(predX[0].reshape(1,predX.shape[1]))
remembered_total_reward = pred[0][0]
return remembered_total_reward
def GetActionForThisStateReward(qstate,R):
predX = np.zeros(shape=(1,num_env_variables+1))
#print("predX",predX)
predX[0] = np.concatenate( (qstate, np.array([R])) , axis=0)
pred = action_sate_reward_matcher.predict(predX[0].reshape(1,predX.shape[1]))
action4StateReward = pred[0][0]
return action4StateReward
def GetHighestRewardForState(qstate):
predX = np.zeros(shape=(1,num_env_variables))
predX[0] = qstate
#print("trying to predict reward at qs_a", predX[0])
pred = highest_reward_memory_model.predict(predX[0].reshape(1,predX.shape[1]))
highest_remembered_Reward = pred[0][0]
#print ("highest_remembered_Reward",highest_remembered_Reward)
return highest_remembered_Reward
def GetRememberedOptimalPolicy(qstate):
predX = np.zeros(shape=(1,num_env_variables))
predX[0] = qstate
#print("trying to predict reward at qs_a", predX[0])
pred = action_predictor_model.predict(predX[0].reshape(1,predX.shape[1]))
r_remembered_optimal_policy = pred[0]
return r_remembered_optimal_policy
def SmartCrossEntropy(current_optimal_policy):
sce = np.zeros(shape=(num_env_actions))
#print("current_optimal_policy", current_optimal_policy)
for i in range(num_env_actions):
sce[i] = current_optimal_policy[i] + sce_range * (np.random.rand(1)*2 - 1)
if sce[i] > 1:
sce[i] = 1.0
if sce[i] < -1:
sce[i] = -1
#print("current_optimal_policy", current_optimal_policy)
#print("sce", sce)
return sce
initilizeHighestRewardMemory()
'''
for i in range(10):
res = highest_reward_memory_model.predict(np.random.rand(1,num_env_variables))
print("highest_reward_memory_model", res)
for i in range(10):
dataS = np.random.rand(1,num_env_variables)
dataR = np.full((1,1),-10+i)
highest_reward_memory_model.fit(dataS,dataR, epochs=3,verbose=2)
res = highest_reward_memory_model.predict(dataS)
print("highest_reward_memory_model", res)
'''
if observe_and_train:
#Play the game 500 times
for game in range(num_games_to_play):
gameSA = np.zeros(shape=(1,num_env_variables+num_env_actions))
gameS = np.zeros(shape=(1,num_env_variables))
gameA = np.zeros(shape=(1,1))
gameBA = np.zeros(shape=(1,1))
gameR = np.zeros(shape=(1,1))
gameHR = np.zeros(shape=(1,1))
gameSHR = np.zeros(shape=(1,num_env_variables+1))
#Get the Q state
qs = env.reset()
#print("qs ", qs)
if game < num_initial_observation:
print("Observing game ", game)
else:
print("Learning & playing game ", game)
for step in range (500):
highest_reward = GetHighestRewardForState(qs)
best_action = 0
if game < num_initial_observation:
#take a radmon action
a = np.array([env.action_space.sample()])
else:
prob = np.random.rand(1)
explore_prob = starting_explore_prob-(starting_explore_prob/num_games_to_play)*game
#Chose between prediction and chance
if prob < explore_prob:
#take a random action
a=np.array([env.action_space.sample()])
else:
# Get highest remembered reward for this state
action4StateReward = GetActionForThisStateReward(qs,highest_reward)
best_action = action4StateReward
action4StateReward = np.array([action4StateReward])
#Get Remembered optiomal policy
remembered_optimal_policy = GetRememberedOptimalPolicy(qs)
#print("action4StateReward", action4StateReward,"remembered_optimal_policy", remembered_optimal_policy)
if predictTotalRewards(qs,remembered_optimal_policy) > predictTotalRewards(qs,action4StateReward):
action4StateReward = remembered_optimal_policy
randaction = np.array([env.action_space.sample()])
#print("highest_reward", highest_reward)
#mstatsR.append(highest_reward)
#Compare R for SmartCrossEntropy action with remembered_optimal_policy and select the best
#if predictTotalRewards(qs,remembered_optimal_policy) > utility_possible_actions[best_sce_i]:
if predictTotalRewards(qs,action4StateReward) > predictTotalRewards(qs,randaction):
a = action4StateReward
#print(" | selecting remembered_optimal_policy ",a)
else:
a = randaction
#print(" - selecting generated optimal policy ",a)
if a[0] <0:
a [0]= 0
if a[0] > 1:
a[0] = 1
env.render()
a = np.around(a)
a = a.astype(int)
qs_a = np.concatenate((qs,a), axis=0)
#get the target state and reward
s,r,done,info = env.step(a[0])
#record only the first x number of states
if done and step < 197:
r=-1
if step ==0:
gameSA[0] = qs_a
gameS[0] = qs
gameR[0] = np.array([r])
gameA[0] = np.array([r])
gameHR[0] = np.array([highest_reward])
gameSHR[0] = np.array(np.concatenate( (qs, np.array([highest_reward])), axis=0 ))
gameBA[0] = np.array([best_action])
else:
gameSA = np.vstack((gameSA, qs_a))
gameS = np.vstack((gameS, qs))
gameSHR = np.vstack((gameSHR, np.concatenate( (qs, np.array([highest_reward])), axis=0 ) ))
gameR = np.vstack((gameR, np.array([r])))
gameA = np.vstack((gameA, np.array([a])))
gameBA = np.vstack((gameBA, np.array([best_action])))
gameHR = np.vstack((gameHR, np.array([highest_reward])))
if done :
mstats.append(step)
#Calculate Q values from end to start of game
for i in range(0,gameR.shape[0]):
#print("Updating total_reward at game epoch ",(gameY.shape[0]-1) - i)
if i==0:
#print("reward at the last step ",gameY[(gameY.shape[0]-1)-i][0])
gameR[(gameR.shape[0]-1)-i][0] = gameR[(gameR.shape[0]-1)-i][0]
else:
#print("local error before Bellman", gameY[(gameY.shape[0]-1)-i][0],"Next error ", gameY[(gameY.shape[0]-1)-i+1][0])
gameR[(gameR.shape[0]-1)-i][0] = gameR[(gameR.shape[0]-1)-i][0]+b_discount*gameR[(gameR.shape[0]-1)-i+1][0]
#print("reward at step",i,"away from the end is",gameY[(gameY.shape[0]-1)-i][0])
if gameR[(gameR.shape[0]-1)-i][0] > gameHR[(gameHR.shape[0]-1)-i][0]:
#print ("Old HR",gameHR[(gameHR.shape[0]-1)-i][0], "New HR",gameR[(gameR.shape[0]-1)-i][0] )
gameHR[(gameR.shape[0]-1)-i][0] = gameR[(gameR.shape[0]-1)-i][0]
gameSHR[(gameR.shape[0]-1)-i] = np.concatenate( (qs, np.array([highest_reward])), axis=0 )
gameBA[(gameR.shape[0]-1)-i][0] = gameA[(gameR.shape[0]-1)-i][0]
if i==gameR.shape[0]-1:
print("Training Game #",game, " steps = ", step ,"last reward", r,"Highest reward",gameHR[(gameHR.shape[0]-1)-i][0] ," finished with headscore ", gameR[(gameR.shape[0]-1)-i][0])
if memoryR.shape[0] ==1:
memorySA = gameSA
memoryR = gameR
memoryA = gameA
memoryS = gameS
memoryHR = gameHR
memorySHR = gameSHR
memoryBA = gameBA
else:
#Add experience to memory
memorySA = np.concatenate((memorySA,gameSA),axis=0)
memoryS = np.concatenate((memoryS,gameS),axis=0)
memoryR = np.concatenate((memoryR,gameR),axis=0)
memoryA = np.concatenate((memoryA,gameA),axis=0)
memoryBA = np.concatenate((memoryBA,gameBA),axis=0)
memoryHR = np.concatenate((memoryHR,gameHR),axis=0)
memorySHR = np.concatenate((memorySHR,gameSHR),axis=0)
#if memory is full remove first element
if np.alen(memorySA) >= max_memory_len:
#print("memory full. mem len ", np.alen(memoryX))
for l in range(np.alen(gameR)):
memorySA = np.delete(memorySA, 0, axis=0)
memoryR = np.delete(memoryR, 0, axis=0)
memoryA = np.delete(memoryA, 0, axis=0)
memoryS = np.delete(memoryS, 0, axis=0)
memoryHR = np.delete(memoryHR, 0, axis=0)
memoryBA = np.delete(memoryBA, 0, axis=0)
memorySHR = np.delete(memorySHR, 0, axis=0)
#Update the states
qs=s
if step > 497:
done = True
#Retrain every X failures after num_initial_observation
if done and game >= num_initial_observation:
if game%3 == 0:
print("Training game# ", game,"momory size", memorySA.shape[0])
#training Reward predictor model
model.fit(memorySA,memoryR, batch_size=128,epochs=training_epochs,verbose=0)
highest_reward_memory_model.fit(memoryS,memoryHR,batch_size=128,epochs=training_epochs,verbose=0)
action_sate_reward_matcher.fit(memorySHR,memoryBA,batch_size=128,epochs=training_epochs,verbose=0)
#training action predictor model
action_predictor_model.fit(memoryS,memoryBA, batch_size=128, epochs=training_epochs,verbose=0)
if done and game >= num_initial_observation:
if save_weights and game%20 == 0:
#Save model
print("Saving weights")
model.save_weights(weigths_filename)
action_predictor_model.save_weights(apWeights_filename)
if done:
#Game won conditions
if step > 197:
print("Game ", game," WON *** " )
else:
print("Game ",game," ended with positive reward ")
#Game ended - Break
break
plt.plot(mstats)
plt.show()
#plt.plot(mstatsR)
#plt.show()
if save_weights:
#Save model
print("Saving weights")
model.save_weights(weigths_filename)
action_predictor_model.save_weights(apWeights_filename)
|
qiskit/transpiler/passes/layout/set_layout.py | Roshan-Thomas/qiskit-terra | 1,599 | 11068684 | # This code is part of Qiskit.
#
# (C) Copyright IBM 2019
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Set the ``layout`` property to the given layout."""
from qiskit.transpiler.basepasses import AnalysisPass
class SetLayout(AnalysisPass):
"""Set the ``layout`` property to the given layout.
This pass associates a physical qubit (int) to each virtual qubit
of the circuit (Qubit) in increasing order.
"""
def __init__(self, layout):
"""SetLayout initializer.
Args:
layout (Layout): the layout to set.
"""
super().__init__()
self.layout = layout
def run(self, dag):
"""Run the SetLayout pass on `dag`.
Args:
dag (DAGCircuit): DAG to map.
Returns:
DAGCircuit: the original DAG.
"""
self.property_set["layout"] = None if self.layout is None else self.layout.copy()
return dag
|
tutel/parted/patterns.py | G-arj/tutel | 156 | 11068692 | <reponame>G-arj/tutel
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from .solver import register_primitive
def is_replicas(dim):
return dim == -1
def is_partition(dim):
return dim >= 0
@register_primitive("BAR")
def primitive_pass_through(sess, node, output_dim, group_size, rank):
if not is_replicas(output_dim) and not is_partition(output_dim):
return
source_dims, num_partitions = node.parser.emit_dims_by_id(output_dim)
if is_replicas(output_dim) and num_partitions == 0:
yield (0, source_dims, {})
return
connectors = dict([(inp, sess.backend.link('$', -1, None, is_param=(node.inputs[inp].op_type == "param"))) for inp in source_dims if is_replicas(source_dims[inp])])
yield (0, source_dims, connectors)
@register_primitive("FAR")
def primitive_fwd_allreduce_sum(sess, node, output_dim, group_size, rank):
if not is_replicas(output_dim):
return
if node.parser.reduce_type != '+':
return
for i, ax in enumerate(node.parser.get_reduce_axes()):
if rank is not None and i != rank:
continue
try:
source_dims, num_partitions = node.parser.emit_dims_by_name(ax)
except NotImplementedError:
continue
assert num_partitions > 0, "It is unexpected that no certain input is parted."
connectors = dict([(inp, sess.backend.link('$', -1, None, is_param=(node.inputs[inp].op_type == "param"))) for inp in source_dims if is_replicas(source_dims[inp])])
connectors[''] = sess.backend.link('$', None, -1)
yield (i, source_dims, connectors)
@register_primitive("RS")
def primitive_fwd_reduce_scatter_sum(sess, node, output_dim, group_size, rank):
if not is_partition(output_dim):
return
if node.parser.reduce_type != '+':
return
for i, ax in enumerate(node.parser.get_reduce_axes()):
if rank is not None and i != rank:
continue
try:
source_dims, num_partitions = node.parser.emit_dims_by_name(ax)
except NotImplementedError:
continue
assert num_partitions > 0, "It is unexpected that no certain input is parted."
connectors = dict([(inp, sess.backend.link('$', -1, None, is_param=(node.inputs[inp].op_type == "param"))) for inp in source_dims if is_replicas(source_dims[inp])])
connectors[''] = sess.backend.link('$', None, output_dim)
yield (i, source_dims, connectors)
@register_primitive("SPLIT")
def primitive_fwd_spatial_split(sess, node, output_dim, group_size, rank):
if not is_partition(output_dim):
return
source_dims, num_partitions = node.parser.emit_dims_by_id(-1)
assert num_partitions == 0, "It is unexpected that certain input is parted."
connectors = dict([('', sess.backend.link('$', -1, output_dim))])
yield (0, source_dims, connectors)
@register_primitive("AG")
def primitive_fwd_all_gather(sess, node, output_dim, group_size, rank):
if not is_replicas(output_dim):
return
for i in range(len(node.shape)):
if rank is not None and i != rank:
continue
try:
if node.shape[i] % group_size != 0:
continue
source_dims, num_partitions = node.parser.emit_dims_by_id(i)
except NotImplementedError:
continue
if num_partitions == 0: # Handled by fwd_pass_through as well
continue
connectors = dict([(inp, sess.backend.link('$', -1, None, is_param=(node.inputs[inp].op_type == "param"))) for inp in source_dims if is_replicas(source_dims[inp])])
connectors[''] = sess.backend.link('$', rank, -1)
yield (i, source_dims, connectors)
@register_primitive("A2A")
def primitive_alltoall(sess, node, output_dim, group_size, rank):
if not is_partition(output_dim):
return
shape = node.shape
if len(shape) < 2 or shape[output_dim] % group_size != 0:
return
for i in range(len(node.shape)):
if rank is not None and i != rank:
continue
if shape[i] % group_size != 0 or output_dim == i:
continue
try:
source_dims, num_partitions = node.parser.emit_dims_by_id(i)
connectors = dict([(inp, sess.backend.link('$', -1, None, is_param=(node.inputs[inp].op_type == "param"))) for inp in source_dims if is_replicas(source_dims[inp])])
connectors[''] = sess.backend.link('$', i, output_dim)
yield (i, source_dims, connectors)
except NotImplementedError:
continue
@register_primitive("ZERO")
def primitive_zero(sess, node, output_dim, group_size, rank):
if not is_partition(output_dim):
return
source_dims, num_partitions = node.parser.emit_dims_by_id(output_dim)
if num_partitions == 0:
return
has_params, connectors = False, {}
for inp in source_dims:
if not is_replicas(source_dims[inp]):
continue
if node.inputs[inp].op_type == 'param':
source_dims[inp] = -2
has_params, connectors[inp] = True, sess.backend.link('$', -2, -1, output_shape=node.inputs[inp].shape)
else:
connectors[inp] = sess.backend.link('$', -1, None, is_param=(node.inputs[inp].op_type == "param"))
if not has_params:
return
yield (0, source_dims, connectors)
|
lldb/test/API/functionalities/data-formatter/data-formatter-stl/generic/optional/TestDataFormatterGenericOptional.py | LaudateCorpus1/llvm-project | 605 | 11068693 | <reponame>LaudateCorpus1/llvm-project
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
USE_LIBSTDCPP = "USE_LIBSTDCPP"
USE_LIBCPP = "USE_LIBCPP"
class GenericOptionalDataFormatterTestCase(TestBase):
mydir = TestBase.compute_mydir(__file__)
def do_test_with_run_command(self, stdlib_type):
"""Test that that file and class static variables display correctly."""
# This is the function to remove the custom formats in order to have a
# clean slate for the next test case.
def cleanup():
self.runCmd('type format clear', check=False)
self.runCmd('type summary clear', check=False)
self.runCmd('type filter clear', check=False)
self.runCmd('type synth clear', check=False)
self.addTearDownHook(cleanup)
self.build(dictionary={stdlib_type: "1"})
self.runCmd("file " + self.getBuildArtifact("a.out"), CURRENT_EXECUTABLE_SET)
bkpt = self.target().FindBreakpointByID(
lldbutil.run_break_set_by_source_regexp(
self, "break here"))
self.runCmd("run", RUN_SUCCEEDED)
# The stop reason of the thread should be breakpoint.
self.expect("thread list", STOPPED_DUE_TO_BREAKPOINT,
substrs=['stopped',
'stop reason = breakpoint'])
self.runCmd( "frame variable has_optional" )
output = self.res.GetOutput()
## The variable has_optional tells us if the test program
## detected we have a sufficient libc++ version to support optional
## false means we do not and therefore should skip the test
if output.find("(bool) has_optional = false") != -1 :
self.skipTest( "Optional not supported" )
lldbutil.continue_to_breakpoint(self.process(), bkpt)
self.expect("frame variable number_not_engaged",
substrs=['Has Value=false'])
self.expect("frame variable number_engaged",
substrs=['Has Value=true',
'Value = 42',
'}'])
self.expect("frame var numbers",
substrs=['(optional_int_vect) numbers = Has Value=true {',
'Value = size=4 {',
'[0] = 1',
'[1] = 2',
'[2] = 3',
'[3] = 4',
'}',
'}'])
self.expect("frame var ostring",
substrs=['(optional_string) ostring = Has Value=true {',
'Value = "hello"',
'}'])
@add_test_categories(["libc++"])
## Clang 7.0 is the oldest Clang that can reliably parse newer libc++ versions
## with -std=c++17.
@skipIf(oslist=no_match(["macosx"]), compiler="clang", compiler_version=['<', '7.0'])
## We are skipping gcc version less that 5.1 since this test requires -std=c++17
@skipIf(compiler="gcc", compiler_version=['<', '5.1'])
def test_with_run_command_libcpp(self):
self.do_test_with_run_command(USE_LIBCPP)
@add_test_categories(["libstdcxx"])
## Clang 7.0 is the oldest Clang that can reliably parse newer libc++ versions
## with -std=c++17.
@skipIf(compiler="clang", compiler_version=['<', '7.0'])
## We are skipping gcc version less that 5.1 since this test requires -std=c++17
@skipIf(compiler="gcc", compiler_version=['<', '5.1'])
def test_with_run_command_libstdcpp(self):
self.do_test_with_run_command(USE_LIBSTDCPP)
|
tests/core/test_hook.py | tomekr/cement | 826 | 11068702 | """Tests for cement.core.hook."""
from unittest.mock import Mock
from pytest import raises
from cement.core.exc import FrameworkError
from cement.core.foundation import TestApp
# module tests
class TestHookManager(object):
pass
# app functionality and coverage tests
def test_define():
with TestApp() as app:
app.hook.define('test_hook')
# is it defined?
assert app.hook.defined('test_hook')
# registering again should throw exception
with raises(FrameworkError, match='Hook name .* already defined!'):
app.hook.define('test_hook')
def test_register_and_run():
def hook_one():
return 'kapla 1'
def hook_two():
return 'kapla 2'
def hook_three():
return 'kapla 3'
with TestApp() as app:
app.hook.define('test_hook')
app.hook.register('test_hook', hook_one, weight=99)
app.hook.register('test_hook', hook_two, weight=-1)
app.hook.register('test_hook', hook_three, weight=-99)
assert len(app.hook.__hooks__['test_hook']) == 3
# and run it... track results to verify weight run order
results = []
for res in app.hook.run('test_hook'):
results.append(res)
assert results == ['kapla 3', 'kapla 2', 'kapla 1']
def test_register_hook_name_not_defined():
with TestApp() as app:
ret = app.hook.register('bogus_hook', print)
assert ret is False
def test_run_bad_hook():
with TestApp() as app:
with raises(FrameworkError, match='Hook name .* is not defined!'):
for res in app.hook.run('some_bogus_hook'):
pass
def test_framework_hooks():
test_hook = Mock(return_value='bogus')
test_hook.__name__ = 'bogusname'
test_hook_again = Mock(return_value='fake')
test_hook_again.__name__ = 'bogusname'
class MyApp(TestApp):
class Meta:
hooks = [
('pre_setup', test_hook),
('post_setup', test_hook),
('pre_run', test_hook),
('post_run', test_hook),
('pre_argument_parsing', test_hook),
('post_argument_parsing', test_hook),
('pre_close', test_hook),
('post_close', test_hook),
('signal', test_hook),
('pre_render', test_hook),
('pre_render', test_hook_again),
('post_render', test_hook),
('post_render', test_hook),
]
with MyApp() as app:
# Pre- and post- setup
assert test_hook.call_count == 2
test_hook.reset_mock()
# Pre/post run (+ pre/post argparse)
# App has no controller, so it also parses args here
app.run()
assert test_hook.call_count == 4
test_hook.reset_mock()
# pre/post render
# two hooks each, one is test_hook_again
app.render({1: 'bogus'})
assert test_hook.call_count == 3
assert test_hook_again.call_count == 1
test_hook.reset_mock()
test_hook_again.reset_mock()
# TODO: Test that signal hook gets called properly
# pre/post close
assert test_hook.call_count == 2
def test_generate_type_hook():
def my_generator():
for i in [1, 1, 1]:
yield i
with TestApp() as app:
app.hook.define('test_hook')
app.hook.register('test_hook', my_generator)
app.run()
for res in app.hook.run('test_hook'):
assert res == 1
def test_list():
with TestApp() as app:
assert 'pre_setup' in app.hook.list()
|
posthog/migrations/0040_remove_event_ip.py | avoajaugochukwu/posthog | 7,409 | 11068712 | # Generated by Django 3.0.3 on 2020-04-04 11:45
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
("posthog", "0039_populate_event_ip_property"),
]
operations = [
migrations.RemoveField(model_name="event", name="ip",),
]
|
TestSuites/RDP/RDPSUTControlAgent/Python/RDPSUTControlAgent.py | microsoft/WindowsProtocolTestSuites | 332 | 11068720 | #!/usr/bin/env python
import binascii
import datetime
import logging
import socket
import subprocess
import sys
import threading
from struct import Struct, unpack
try:
from configparser import ConfigParser
except ImportError:
from ConfigParser import ConfigParser # ver. < 3.0
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(message)s', level=logging.INFO)
message_type = {'SUT_CONTROL_REQUEST': 0x0000,
'SUT_CONTROL_RESPONSE': 0x0001}
command_id = {'START_RDP_CONNECTION': 0x0001,
'CLOSE_RDP_CONNECTION': 0x0002,
'AUTO_RECONNECT': 0x0003,
'BASIC_INPUT': 0x0004,
'SCREEN_SHOT': 0x0005,
'TOUCH_EVENT_SINGLE': 0x0101,
'TOUCH_EVENT_MULTIPLE': 0x0102,
'TOUCH_EVENT_DISMISS_HOVERING_CONTACT': 0x0103,
'DISPLAY_UPDATE_RESOLUTION': 0x0201,
'DISPLAY_UPDATE_MONITORS': 0x0202,
'DISPLAY_FULLSCREEN': 0x0203}
payload_type = {'RDP_FILE': 0x0000,
'PARAMETERS_STRUCT': 0x0001}
testsuite_id = {'RDP_TESTSUITE': 0x0001}
result_code = {'SUCCESS': 0x00000000}
basic_input_flag = {'KEYBOARD_EVENT': 0x00000001,
'UNICODE_KEYBOARD_EVENT': 0x00000002,
'MOUSE_EVENT': 0x00000004,
'EXTENDED_MOUSE_EVENT': 0x00000008,
'CLIENT_SYNCHRONIZE_EVENT': 0x00000010,
'CLIENT_REFRESH_RECT': 0x00000020,
'CLIENT_SUPPRESS_OUTPUT': 0x00000040}
screen_type = {'NORMAL': 0x0000,
'FULL_SCREEN': 0x0001}
connect_approach = {'NEGOTIATE': 0x0000,
'DIRECT': 0x0001}
monitor_action = {'ADD_MONITOR': 0x00000001,
'REMOVE_MONITOR': 0x00000002,
'MOVE_MONITOR_POSITION': 0x00000004}
class Payload:
def __init__(self):
self.type = ""
self.content = ""
self.port = 0
self.desktop_width = 0
self.desktop_height = 0
self.connect_approach = ""
self.screen_type = ""
self.address = ""
def encode(self):
pass
def decode(self, raw_payload):
self.type = unpack('<i', raw_payload[:4])
self.type = self.type[0]
logging.debug("payload type: %s", self.type)
if self.type == payload_type['RDP_FILE']:
content_length = len(raw_payload) - 4
self.content = unpack('<%sc' % content_length, raw_payload[4:])
logging.debug("content: %s", self.content)
elif self.type == payload_type['PARAMETERS_STRUCT']:
start = 4
self.port = unpack('<h', raw_payload[start:start + 2])
self.port = self.port[0]
logging.debug("port: %d", self.port)
start += 2
self.screen_type = unpack('<h', raw_payload[start:start + 2])
self.screen_type = self.screen_type[0]
logging.debug("screen_type: %s", self.screen_type)
start += 2
self.desktop_width = unpack('<h', raw_payload[start:start + 2])
self.desktop_width = self.desktop_width[0]
logging.debug("desktop_width: %d", self.desktop_width)
start += 2
self.desktop_height = unpack('<h', raw_payload[start:start + 2])
self.desktop_height = self.desktop_height[0]
logging.debug("desktop_height: %d", self.desktop_height)
start += 2
self.connect_approach = unpack('<h', raw_payload[start:start + 2])
self.connect_approach = self.connect_approach[0]
logging.debug("connect_approach: %s", self.connect_approach)
start += 2
address_length = unpack('<h', raw_payload[start:start + 2])
address_length = address_length[0]
if address_length:
start += 2
self.address = unpack(
'<%ss' % address_length, raw_payload[start:start + address_length])
self.address = self.address[0].decode('utf-8', errors="ignore")
logging.debug("address: %s", self.address)
else:
logging.error("wrong payload type")
class Message:
def __init__(self):
self.type = 0
self.rc = result_code['SUCCESS']
self.request_id = 0
self.testsuite_id = 0
self.command_id = 0
self.testcase_name = ""
self.help_message = ""
self.payload = ""
self.error_message = ""
self.monitor_action = 0
def encode(self):
packer = Struct('< h h h i %ss h i i i' %
len(self.testcase_name))
fields = (self.type, self.testsuite_id, self.command_id,
len(self.testcase_name), self.testcase_name,
self.request_id, self.rc, 0, 0)
packed_data = packer.pack(*fields)
logging.debug('sending "%s"' % binascii.hexlify(packed_data))
logging.debug('encoded!')
return packed_data
def decode(self, request):
header_length = 10
self.type, self.testsuite_id, self.command_id, name_length = \
unpack('<hhhi', request[:header_length])
if name_length:
self.testcase_name = unpack(
'<%sc' % name_length, request[header_length:name_length + header_length])
self.testcase_name = b''.join(self.testcase_name)
start = header_length + name_length
self.request_id, help_message_length = unpack(
'<hi', bytes(request[start:start + 6]))
if help_message_length:
start += 6
self.help_message = unpack(
'<%sc' % help_message_length, request[start:start + help_message_length])
self.help_message = b''.join(self.help_message)
start += help_message_length
payload_length = unpack('<i', request[start:start + 4])[0]
if payload_length:
start += 4
raw_payload = unpack('<%sc' % payload_length,
request[start:start + payload_length])
if self.command_id == command_id['START_RDP_CONNECTION']:
self.payload = Payload()
self.payload.decode(b''.join(raw_payload))
elif self.command_id == command_id['DISPLAY_UPDATE_MONITORS']:
self.monitor_action = unpack(
'<i', request[start + payload_length:])
elif self.command_id == command_id['DISPLAY_UPDATE_RESOLUTION']:
logging.debug("DISPLAY_UPDATE_RESOLUTION")
# TODO: parse payload structure
elif self.command_id == command_id['TOUCH_EVENT_MULTIPLE']:
logging.debug("TOUCH_EVENT_MULTIPLE")
# TODO: parse payload structure
elif self.command_id == command_id['TOUCH_EVENT_SINGLE']:
logging.debug("TOUCH_EVENT_SINGLE")
# TODO: parse payload structure
elif self.command_id == command_id['BASIC_INPUT']:
logging.debug("BASIC_INPUT")
# TODO: parse payload structure
else:
self.payload = ''.join(raw_payload)
logging.debug(self.payload)
def build_client_cmd(cmd, ip_address=None, ip_port=None):
if ip_address == None:
return cmd
if ip_port == 0:
address = ip_address
else:
address = "%s:%s" % (ip_address, ip_port)
return cmd.replace('{{ address }}', address)
def handle_connection(client_socket, config):
end = datetime.datetime.now() + datetime.timedelta(seconds=15)
while datetime.datetime.now() < end:
buffer_size = int(config.get('general', 'buffer_size'))
request = client_socket.recv(buffer_size)
if len(request) == 0:
continue
msg = Message()
msg.decode(request)
logging.info("testcase: %s" % msg.testcase_name)
logging.debug("command: %s" % msg.command_id)
if msg.type != message_type['SUT_CONTROL_REQUEST'] or msg.testsuite_id != testsuite_id['RDP_TESTSUITE']:
logging.error("message is not a control request")
continue
response = Message()
response.type = message_type['SUT_CONTROL_RESPONSE']
response.testsuite_id = testsuite_id['RDP_TESTSUITE']
response.command_id = msg.command_id
response.testcase_name = msg.testcase_name
response.request_id = msg.request_id
logging.info("Command ID: %s", msg.command_id)
if msg.command_id == command_id['START_RDP_CONNECTION']:
cmd_key = "Negotiate"
if msg.payload.connect_approach == connect_approach['DIRECT']:
cmd_key = "DirectCredSSP"
if msg.payload.screen_type == screen_type['FULL_SCREEN']:
cmd_key += "FullScreen"
config_cmd = config.get('client', cmd_key)
cmd = build_client_cmd(
config_cmd, msg.payload.address, msg.payload.port)
logging.info("Executing client: %s" % cmd)
subprocess.Popen(cmd.split(' '))
elif msg.command_id == command_id['CLOSE_RDP_CONNECTION']:
config_cmd = config.get('client', 'StopRDP')
cmd = build_client_cmd(config_cmd)
logging.debug("Terminate all clients...")
proc = subprocess.Popen(cmd.split(' '))
proc.wait()
elif msg.command_id == command_id['AUTO_RECONNECT']:
# TODO
pass
elif msg.command_id == command_id['SCREEN_SHOT']:
# TODO
pass
elif msg.command_id == command_id['BASIC_INPUT']:
# TODO
pass
elif msg.command_id == command_id['TOUCH_EVENT_SINGLE']:
# TODO
pass
elif msg.command_id == command_id['TOUCH_EVENT_MULTIPLE']:
# TODO
pass
elif msg.command_id == command_id['TOUCH_EVENT_DISMISS_HOVERING_CONTACT']:
# TODO
pass
elif msg.command_id == command_id['DISPLAY_UPDATE_RESOLUTION']:
# TODO
pass
elif msg.command_id == command_id['DISPLAY_UPDATE_MONITORS']:
# TODO
pass
elif msg.command_id == command_id['DISPLAY_FULLSCREEN']:
config_cmd = config.get('client', 'NegotiateFullScreen')
if msg.payload.connect_approach == connect_approach['DIRECT']:
config_cmd = config.get('client', 'DirectCredSSPFullScreen')
cmd = build_client_cmd(
config_cmd, msg.payload.address, msg.payload.port)
logging.info("Executing client: %s" % cmd)
subprocess.Popen(cmd.split(' '))
# FIXME: response.request_message = ""
# FIXME: response.error_message = ""
client_socket.sendall(response.encode())
logging.debug("Response has been sent")
def main():
CONFIG_PATH = 'settings.ini'
config = ConfigParser()
config.read(CONFIG_PATH)
ip_address = config.get('general', 'bind_ip_adress')
port = int(config.get('general', 'bind_port'))
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
while True:
try:
server.bind((ip_address, port))
except socket.error:
continue
break
server.listen(5) # max connections
logging.info("Listening on %s:%s" % (ip_address, port))
while True:
connection, address = server.accept()
logging.info("Accepted connection from %s:%s" %
(address[0], address[1]))
client_handler = threading.Thread(
target=handle_connection,
args=(connection, config)
)
client_handler.start()
if __name__ == '__main__':
sys.exit(main())
|
egs/yesno/ASR/transducer/beam_search.py | TIFOSI528/icefall | 173 | 11068752 | # Copyright 2021 Xiaomi Corp. (authors: <NAME>)
#
# See ../../../../LICENSE for clarification regarding multiple authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List
import torch
from transducer.model import Transducer
def greedy_search(model: Transducer, encoder_out: torch.Tensor) -> List[str]:
"""
Args:
model:
An instance of `Transducer`.
encoder_out:
A tensor of shape (N, T, C) from the encoder. Support only N==1 for now.
Returns:
Return the decoded result.
"""
assert encoder_out.ndim == 3
# support only batch_size == 1 for now
assert encoder_out.size(0) == 1, encoder_out.size(0)
blank_id = model.decoder.blank_id
device = model.device
sos = torch.tensor([blank_id], device=device).reshape(1, 1)
decoder_out, (h, c) = model.decoder(sos)
T = encoder_out.size(1)
t = 0
hyp = []
max_u = 1000 # terminate after this number of steps
u = 0
while t < T and u < max_u:
# fmt: off
current_encoder_out = encoder_out[:, t:t+1, :]
# fmt: on
logits = model.joiner(current_encoder_out, decoder_out)
log_prob = logits.log_softmax(dim=-1)
# log_prob is (N, 1, 1)
# TODO: Use logits.argmax()
y = log_prob.argmax()
if y != blank_id:
hyp.append(y.item())
y = y.reshape(1, 1)
decoder_out, (h, c) = model.decoder(y, (h, c))
u += 1
else:
t += 1
id2word = {1: "YES", 2: "NO"}
hyp = [id2word[i] for i in hyp]
return hyp
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.