max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
test/likelihoods/test_multitask_gaussian_likelihood.py | llguo95/gpytorch | 2,673 | 32983 | <filename>test/likelihoods/test_multitask_gaussian_likelihood.py
#!/usr/bin/env python3
import unittest
import torch
from gpytorch.distributions import MultitaskMultivariateNormal
from gpytorch.lazy import KroneckerProductLazyTensor, RootLazyTensor
from gpytorch.likelihoods import MultitaskGaussianLikelihood
from gpytorch.test.base_likelihood_test_case import BaseLikelihoodTestCase
class TestMultitaskGaussianLikelihood(BaseLikelihoodTestCase, unittest.TestCase):
seed = 2
def _create_conditional_input(self, batch_shape=torch.Size([])):
return torch.randn(*batch_shape, 5, 4)
def _create_marginal_input(self, batch_shape=torch.Size([])):
mat = torch.randn(*batch_shape, 5, 5)
mat2 = torch.randn(*batch_shape, 4, 4)
covar = KroneckerProductLazyTensor(RootLazyTensor(mat), RootLazyTensor(mat2))
return MultitaskMultivariateNormal(torch.randn(*batch_shape, 5, 4), covar)
def _create_targets(self, batch_shape=torch.Size([])):
return torch.randn(*batch_shape, 5, 4)
def create_likelihood(self):
return MultitaskGaussianLikelihood(num_tasks=4, rank=2)
def test_setters(self):
likelihood = MultitaskGaussianLikelihood(num_tasks=3, rank=0)
a = torch.randn(3, 2)
mat = a.matmul(a.transpose(-1, -2))
# test rank 0 setters
likelihood.noise = 0.5
self.assertAlmostEqual(0.5, likelihood.noise.item())
likelihood.task_noises = torch.tensor([0.04, 0.04, 0.04])
for i in range(3):
self.assertAlmostEqual(0.04, likelihood.task_noises[i].item())
with self.assertRaises(AttributeError) as context:
likelihood.task_noise_covar = mat
self.assertTrue("task noises" in str(context.exception))
# test low rank setters
likelihood = MultitaskGaussianLikelihood(num_tasks=3, rank=2)
likelihood.noise = 0.5
self.assertAlmostEqual(0.5, likelihood.noise.item())
likelihood.task_noise_covar = mat
self.assertAllClose(mat, likelihood.task_noise_covar)
with self.assertRaises(AttributeError) as context:
likelihood.task_noises = torch.tensor([0.04, 0.04, 0.04])
self.assertTrue("task noises" in str(context.exception))
class TestMultitaskGaussianLikelihoodBatch(TestMultitaskGaussianLikelihood):
seed = 0
def create_likelihood(self):
return MultitaskGaussianLikelihood(num_tasks=4, rank=2, batch_shape=torch.Size([3]))
def test_nonbatch(self):
pass
|
modules/dbnd-airflow/src/dbnd_airflow/web/airflow_app.py | dmytrostriletskyi/dbnd | 224 | 33004 | import logging
def create_app(config=None, testing=False):
from airflow.www_rbac import app as airflow_app
app, appbuilder = airflow_app.create_app(config=config, testing=testing)
# only now we can load view..
# this import might causes circular dependency if placed above
from dbnd_airflow.airflow_override.dbnd_aiflow_webserver import (
use_databand_airflow_dagbag,
)
use_databand_airflow_dagbag()
logging.info("Airflow applications has been created")
return app, appbuilder
def cached_appbuilder(config=None, testing=False):
_, appbuilder = create_app(config, testing)
return appbuilder
|
runway/lookups/handlers/random_string.py | onicagroup/runway | 134 | 33005 | <gh_stars>100-1000
"""Generate a random string."""
# pyright: reportIncompatibleMethodOverride=none
from __future__ import annotations
import logging
import secrets
import string
from typing import TYPE_CHECKING, Any, Callable, List, Sequence, Union
from typing_extensions import Final, Literal
from ...utils import BaseModel
from .base import LookupHandler
if TYPE_CHECKING:
from ...context import CfnginContext, RunwayContext
LOGGER = logging.getLogger(__name__)
class ArgsDataModel(BaseModel):
"""Arguments data model."""
digits: bool = True
lowercase: bool = True
punctuation: bool = False
uppercase: bool = True
class RandomStringLookup(LookupHandler):
"""Random string lookup."""
TYPE_NAME: Final[Literal["random.string"]] = "random.string"
"""Name that the Lookup is registered as."""
@staticmethod
def calculate_char_set(args: ArgsDataModel) -> str:
"""Calculate character set from the provided arguments."""
char_set = ""
if args.digits:
char_set += string.digits
if args.lowercase:
char_set += string.ascii_lowercase
if args.punctuation:
char_set += string.punctuation
if args.uppercase:
char_set += string.ascii_uppercase
LOGGER.debug("character set: %s", char_set)
return char_set
@staticmethod
def generate_random_string(char_set: Sequence[str], length: int) -> str:
"""Generate a random string of a set length from a set of characters."""
return "".join(secrets.choice(char_set) for _ in range(length))
@staticmethod
def has_digit(value: str) -> bool:
"""Check if value contains a digit."""
return any(v.isdigit() for v in value)
@staticmethod
def has_lowercase(value: str) -> bool:
"""Check if value contains lowercase."""
return any(v.islower() for v in value)
@staticmethod
def has_punctuation(value: str) -> bool:
"""Check if value contains uppercase."""
return any(v in string.punctuation for v in value)
@staticmethod
def has_uppercase(value: str) -> bool:
"""Check if value contains uppercase."""
return any(v.isupper() for v in value)
@classmethod
def ensure_has_one_of(cls, args: ArgsDataModel, value: str) -> bool:
"""Ensure value has at least one of each required character.
Args:
args: Hook args.
value: Value to check.
"""
checks: List[Callable[[str], bool]] = []
if args.digits:
checks.append(cls.has_digit)
if args.lowercase:
checks.append(cls.has_lowercase)
if args.punctuation:
checks.append(cls.has_punctuation)
if args.uppercase:
checks.append(cls.has_uppercase)
return sum(c(value) for c in checks) == len(checks)
@classmethod
def handle( # pylint: disable=arguments-differ
cls,
value: str,
context: Union[CfnginContext, RunwayContext],
*__args: Any,
**__kwargs: Any,
) -> Any:
"""Generate a random string.
Args:
value: The value passed to the Lookup.
context: The current context object.
Raises:
ValueError: Unable to find a value for the provided query and
a default value was not provided.
"""
raw_length, raw_args = cls.parse(value)
length = int(raw_length)
args = ArgsDataModel.parse_obj(raw_args)
char_set = cls.calculate_char_set(args)
while True:
result = cls.generate_random_string(char_set, length)
if cls.ensure_has_one_of(args, result):
break
return cls.format_results(result, **raw_args)
|
venv/Lib/site-packages/win32comext/directsound/test/ds_record.py | ajayiagbebaku/NFL-Model | 150 | 33007 | <filename>venv/Lib/site-packages/win32comext/directsound/test/ds_record.py
import pywintypes
import struct
import win32event, win32api
import os
import win32com.directsound.directsound as ds
def wav_header_pack(wfx, datasize):
return struct.pack(
"<4sl4s4slhhllhh4sl",
"RIFF",
36 + datasize,
"WAVE",
"fmt ",
16,
wfx.wFormatTag,
wfx.nChannels,
wfx.nSamplesPerSec,
wfx.nAvgBytesPerSec,
wfx.nBlockAlign,
wfx.wBitsPerSample,
"data",
datasize,
)
d = ds.DirectSoundCaptureCreate(None, None)
sdesc = ds.DSCBUFFERDESC()
sdesc.dwBufferBytes = 352800 # 2 seconds
sdesc.lpwfxFormat = pywintypes.WAVEFORMATEX()
sdesc.lpwfxFormat.wFormatTag = pywintypes.WAVE_FORMAT_PCM
sdesc.lpwfxFormat.nChannels = 2
sdesc.lpwfxFormat.nSamplesPerSec = 44100
sdesc.lpwfxFormat.nAvgBytesPerSec = 176400
sdesc.lpwfxFormat.nBlockAlign = 4
sdesc.lpwfxFormat.wBitsPerSample = 16
print(sdesc)
print(d)
buffer = d.CreateCaptureBuffer(sdesc)
event = win32event.CreateEvent(None, 0, 0, None)
notify = buffer.QueryInterface(ds.IID_IDirectSoundNotify)
notify.SetNotificationPositions((ds.DSBPN_OFFSETSTOP, event))
buffer.Start(0)
win32event.WaitForSingleObject(event, -1)
data = buffer.Update(0, 352800)
fname = os.path.join(win32api.GetTempPath(), "test_directsound_record.wav")
f = open(fname, "wb")
f.write(wav_header_pack(sdesc.lpwfxFormat, 352800))
f.write(data)
f.close()
|
indy_node/test/rich_schema/test_rich_schemas_disabled_by_default.py | Rob-S/indy-node | 627 | 33020 | <reponame>Rob-S/indy-node
import json
import pytest
from indy_common.constants import JSON_LD_CONTEXT, RS_CONTEXT_TYPE_VALUE, RS_ID, GET_RICH_SCHEMA_OBJECT_BY_ID, \
GET_RICH_SCHEMA_OBJECT_BY_METADATA, RS_NAME, RS_VERSION, RS_TYPE
from indy_node.test.api.helper import sdk_build_rich_schema_request, sdk_write_rich_schema_object_and_check
from indy_node.test.helper import rich_schemas_enabled_scope
from indy_node.test.rich_schema.templates import W3C_BASE_CONTEXT
from indy_node.test.rich_schema.test_send_get_rich_schema_obj import PARAMS
from indy_node.test.state_proof.helper import sdk_submit_operation_and_get_result
from plenum.common.constants import TXN_TYPE
from plenum.common.exceptions import RequestNackedException
from plenum.common.util import randomString
from plenum.test.helper import sdk_sign_and_submit_req, sdk_get_and_check_replies
@pytest.fixture(scope='module')
def write_rich_schema(looper, sdk_pool_handle, sdk_wallet_endorser, tconf):
with rich_schemas_enabled_scope(tconf):
for txn_type, rs_type, content, rs_id, rs_name, rs_version in PARAMS:
sdk_write_rich_schema_object_and_check(looper, sdk_wallet_endorser, sdk_pool_handle,
txn_type=txn_type, rs_id=rs_id, rs_name=rs_name,
rs_version=rs_version, rs_type=rs_type, rs_content=content)
@pytest.mark.parametrize('txn_type, rs_type, content, rs_id',
[(JSON_LD_CONTEXT, RS_CONTEXT_TYPE_VALUE, W3C_BASE_CONTEXT, randomString())])
def test_send_rich_schema_obj_disabled_by_default(looper, sdk_pool_handle, sdk_wallet_endorser, txn_type, rs_type,
content, rs_id):
request = sdk_build_rich_schema_request(looper, sdk_wallet_endorser,
txn_type, rs_id=rs_id, rs_name=randomString(),
rs_version='1.0', rs_type=rs_type,
rs_content=json.dumps(content))
req = sdk_sign_and_submit_req(sdk_pool_handle, sdk_wallet_endorser, request)
with pytest.raises(RequestNackedException, match='RichSchema transactions are disabled'):
sdk_get_and_check_replies(looper, [req])
@pytest.mark.parametrize('txn_type, rs_type, content, rs_id, rs_name, rs_version', PARAMS)
def test_send_get_rich_schema_obj_by_id_disabled_by_default(looper, sdk_pool_handle, sdk_wallet_endorser, txn_type,
rs_type, content, rs_id, rs_name, rs_version,
write_rich_schema):
get_rich_schema_by_id_operation = {
TXN_TYPE: GET_RICH_SCHEMA_OBJECT_BY_ID,
RS_ID: rs_id,
}
with pytest.raises(RequestNackedException, match='RichSchema queries are disabled'):
sdk_submit_operation_and_get_result(looper, sdk_pool_handle,
sdk_wallet_endorser,
get_rich_schema_by_id_operation)
@pytest.mark.parametrize('txn_type, rs_type, content, rs_id, rs_name, rs_version', PARAMS)
def test_send_get_rich_schema_obj_by_metadata_disabled_by_default(looper, sdk_pool_handle, sdk_wallet_endorser,
txn_type, rs_type, content, rs_id, rs_name,
rs_version, write_rich_schema):
get_rich_schema_by_metadata_operation = {
TXN_TYPE: GET_RICH_SCHEMA_OBJECT_BY_METADATA,
RS_NAME: rs_name,
RS_VERSION: rs_version,
RS_TYPE: rs_type
}
with pytest.raises(RequestNackedException, match='RichSchema queries are disabled'):
sdk_submit_operation_and_get_result(looper, sdk_pool_handle,
sdk_wallet_endorser,
get_rich_schema_by_metadata_operation)
|
pycwr/__init__.py | 1271756664/study | 144 | 33029 | from . import configure, core, draw, io, interp, retrieve, qc
__all__ = ["configure", "core", "draw", "io", "interp", "qc", "retrieve"]
|
amplify/ext/phpfpm/util/version.py | dp92987/nginx-amplify-agent | 308 | 33050 | <gh_stars>100-1000
# -*- coding: utf-8 -*-
from amplify.agent.common.context import context
from amplify.agent.common.util import subp
__author__ = "<NAME>"
__copyright__ = "Copyright (C) Nginx, Inc. All rights reserved."
__license__ = ""
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
VERSION_CMD = "%s --version"
def VERSION_PARSER(bin_path):
try:
raw_stdout, _ = subp.call(VERSION_CMD % bin_path)
except Exception as e:
exc_name = e.__class__.__name__
# this is being logged as debug only since we will rely on bin_path
# collection error to tip off support as to what is going wrong with
# version detection
context.log.debug(
'failed to get version info from "%s" due to %s' %
(bin_path, exc_name)
)
context.log.debug('additional info:', exc_info=True)
else:
# first line is all that we are interested in::
# PHP 5.5.9-1ubuntu4.17 (fpm-fcgi) (built: May 19 2016 19:08:26)
raw_line = raw_stdout[0]
raw_version = raw_line.split()[1] # 5.5.9-1ubuntu4.17
version = []
for char in raw_version:
if char.isdigit() or char in ('.', '-'):
version.append(char)
else:
break
# version = ['5', '.', '5', '.', '9', '-', '1']
# '5.5.9-1',
# 'PHP 5.5.9-1ubuntu4.17 (fpm-fcgi) (built: May 19 2016 19:08:26)'
return ''.join(version), raw_line
|
open/core/scripts/writeup_profile_prompt_generate_view.py | awesome-archive/open | 105 | 33063 | # flake8: noqa
import json
import time
import requests
from django.conf import settings
from websocket import create_connection
from open.core.scripts.swarm_ml_services import get_random_prompt
from open.core.writeup.constants import TEXT_GENERATION_URL
"""
this script's design was to compare performance behind django channels
and how much overhead it added versus directly hitting the microservice
output:
1.8620352506637574 was the average time in seconds to run.
1.8132854890823364 was the average time in seconds to run directly.
amazingly enough, django channels ... has almost zero overhead wow.
"""
def run():
# dpy runscript writeup_profile_prompt_generate_view
url = f"wss://open.senrigan.io/ws/async/writeup/{TEXT_GENERATION_URL}/session/a-cool-test-session/"
ws = create_connection(url)
start = time.time()
intervals = 50
for _ in range(intervals):
data = get_random_prompt()
ws_msg = json.dumps(data)
ws.send(ws_msg)
result = ws.recv()
end = time.time()
websocket_difference = end - start
print(f"{websocket_difference/intervals} was the average time in seconds to run.")
url = settings.GPT2_MEDUM_API_ENDPOINT
token_key = f"Token {settings.ML_SERVICE_ENDPOINT_API_KEY}"
headers = {"Authorization": token_key}
api_start = time.time()
for _ in range(intervals):
data = get_random_prompt()
response = requests.post(url, json=data, headers=headers)
assert response.status_code == 200
api_end = time.time()
api_difference = api_end - api_start
print(
f"{api_difference / intervals} was the average time in seconds to run directly."
)
|
utils/make_package.py | j123123/llilc | 1,712 | 33068 | <filename>utils/make_package.py<gh_stars>1000+
#!/usr/bin/env python
import sys
import argparse
import os
import subprocess
import platform
import io
import string
try:
# For Python >= 3.0
from urllib.request import urlopen
except ImportError:
# For Python < 3.0
from urllib2 import urlopen
import shutil
import stat
def run(args):
nugetFolder = os.path.join(args.target, ".nuget")
print("\nEnsuring folder: %s" % nugetFolder )
if not os.path.exists(nugetFolder):
os.makedirs(nugetFolder)
nugetExe = os.path.join(nugetFolder, "nuget.exe")
if not os.path.exists(nugetExe):
nugetOrg = "http://nuget.org/nuget.exe"
print("Downloading... %s" % nugetOrg )
response = urlopen(nugetOrg)
output = open(nugetExe,'wb')
output.write(response.read())
output.close()
# Ensure it's executable
st = os.stat(nugetExe)
os.chmod(nugetExe, st.st_mode | stat.S_IEXEC)
if (sys.platform != "win32"):
# shutil.which can be used for python 3.3 or later, instead.
for mono in ["/usr/bin/mono", "/usr/local/bin/mono"]:
if os.path.exists(mono):
monopath = mono
if not monopath:
raise "mono is required to run nuget.exe"
nugetExe = monopath + " " + nugetExe
nugetSpec = os.path.join(nugetFolder, os.path.basename(args.nuspec))
if args.nuspec != nugetSpec:
print("\nCopying " + args.nuspec + " to " + nugetSpec)
shutil.copyfile(args.nuspec, nugetSpec)
if args.json != None:
nugetJson = os.path.join(nugetFolder, os.path.basename(args.json))
if args.json != nugetJson:
print("\nCopying " + args.json + " to " + nugetJson)
shutil.copyfile(args.json, nugetJson)
nugetCommand = nugetExe + " pack " + nugetSpec \
+ " -NoPackageAnalysis -NoDefaultExcludes" \
" -OutputDirectory %s" % nugetFolder
ret = os.system(nugetCommand)
return ret
def main(argv):
parser = argparse.ArgumentParser(description=
"Download nuget and run it to create a package using the given nuspec. " \
"Example: make_package.py " \
"--target f:\llilc-rel\\bin\Release " \
"--nuspec f:\llilc\lib\ObjWriter\.nuget\Microsoft.Dotnet.ObjectWriter.nuspec",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--target", metavar="PATH",
default=None,
help="path to a target directory that contains files that will " \
"packaged")
parser.add_argument("--nuspec", metavar="PATH",
default=None,
help="path to a nuspec file. This file is assumed to be under " \
"a child directory (.nuget) of the target by convetion")
parser.add_argument("--json", metavar="PATH",
default=None,
help="path to a json file. This file is used to create " \
"a redirection package")
args,unknown = parser.parse_known_args(argv)
if unknown:
print("Unknown argument(s): ", ", ".join(unknown))
return -3
returncode=0
if args.target == None:
print("--target is not specified.")
return -3
if args.nuspec == None:
print("--nuspec is not specified")
return -3
returncode = run(args)
return returncode
if __name__ == "__main__":
returncode = main(sys.argv[1:])
sys.exit(returncode)
|
tests/inception/test_inception_hardware_perf.py | ispras/avatar2 | 415 | 33074 | from avatar2 import *
import sys
import os
import logging
import serial
import time
import argparse
import pyudev
import struct
import ctypes
from random import randint
# For profiling
import pstats
logging.basicConfig(filename='/tmp/inception-tests.log', level=logging.INFO)
# ****************************************************************************
def single_step(target, nb_test):
print("[*] Single step target %d times" % nb_test)
for i in range(nb_test):
pc = target.protocols.execution.read_pc()
print(pc)
target.step()
print('stepped')
next_pc = target.protocols.execution.read_pc()
print(next_pc)
# ****************************************************************************
def read_full_mem(target, nb_test, raw=True, summary=True):
print(" - Read the full memory")
nb_test = 1
average_read = 0
for i in range(nb_test):
t0 = time.time()
target.read_memory(ram.address, 1, ram.size, raw=raw)
t1 = time.time()
average_read += t1 - t0
if summary:
average_read = average_read / nb_test
speed_read = ram.size / average_read / 1024
print(" -> On average raw read of %s bytes takes %.2f sec, speed: %.2f KB/sec" % (ram.size, average_read, speed_read))
# ****************************************************************************
def write_full_mem(target, nb_test, raw=True, summary=True):
print(" - Write the full memory")
nb_test = 1
average_write = 0
buf = ctypes.create_string_buffer(ram.size)
for i in range(int(ram.size / 4)):
struct.pack_into(">I", buf, i * 4, randint(0, 0xffffffff))
for i in range(nb_test):
t0 = time.time()
target.write_memory(ram.address, 1, buf, raw=raw)
t1 = time.time()
average_write += t1 - t0
if summary:
average_write = average_write / nb_test
speed_write = ram.size / average_write / 1024
print(" -> On average raw write of %s bytes takes %.2f sec, speed: %.2f KB/sec" % (ram.size, average_write, speed_write))
# ****************************************************************************
def read_write_full_mem(target, nb_test, raw=True, summary=True):
print(" - Read and write the full memory")
reads = []
average_read_write = 0
for i in range(nb_test):
if raw:
t0 = time.time()
reads.append(target.read_memory(ram.address, 1, ram.size, raw=raw))
target.write_memory(ram.address, 1, reads[i], raw=raw)
t1 = time.time()
else:
t0 = time.time()
reads.append(target.read_memory(ram.address, 1, ram.size, raw=raw))
target.write_memory(ram.address, 1, reads[i], len(reads[i]), raw=raw)
t1 = time.time()
average_read_write += t1 - t0
if summary:
average_read_write = average_read_write / nb_test
speed_read_write = ram.size / average_read_write / 1024
print(" -> On average raw read&write of %s bytes takes %.2f sec, speed: %.2f KB/sec" % (ram.size, average_read_write, speed_read_write))
# Verify all reads are identical
for i in range(len(reads) - 1):
assert(reads[i] == reads[i+1])
#print("[!] Multiple reads produce different values !")
# ****************************************************************************
def random_read_write(target, nb_test, raw=True):
print(" - Random read / writes of random size in the ram")
for i in range(0, nb_test):
size = randint(0, int(ram.size / 8)) * 8
#size = 2**4
# Reset the board and wait to reach the breakpoint
target.reset()
target.wait()
if raw:
m1 = ctypes.create_string_buffer(size)
for j in range(int(size / 4)):
struct.pack_into(">I", m1, j * 4, randint(0, 0xFFFFFFFF))
target.write_memory(ram.address, 1, m1, raw=True)
m2 = target.read_memory(ram.address, 1, size, raw=True)
n1, n2 = ([] for i in range(2))
for j in range(int(size / 4)):
n1.append(struct.unpack_from(">I", m1, j)[0])
n2.append(struct.unpack_from(">I", m2, j)[0])
assert(n1 == n2)
#print("i=%s m1: %s m2: %s" % (i, m1.raw, m2))
#print("[!] Multiple random reads produce different values !")
else:
m1 = []
for j in range(int(size / 4)):
m1.append(randint(0, 0xFFFFFFFF))
target.write_memory(ram.address, 1, m1, size, raw=False)
m2 = target.read_memory(ram.address, 1, size, raw=False)
for j in range(int(size / 4)):
assert(m1[j] == m2[j])
#print("[!] Multiple random reads produce different values !")
#print("i=%s j=%s m1[j]: %s m2[j]: %s" % (i, j, m1[j], m2[j]))
# ****************************************************************************
def random_4bytes_read_write(target, nb_test):
print(" - Random read / writes of 4 bytes in the ram")
for i in range(nb_test):
written_word = randint(0, 0xFFFFFFFF)
address = randint(ram.address, ram.address + ram.size - 4)
target.write_memory(address, 4, written_word, 1, raw=False)
read_word = target.read_memory(address, 4, 1, raw=False)
assert(written_word == read_word)
# ****************************************************************************
def read_write_registers(target, nb_test):
print(" - Read / write registers")
regs = ['R0', 'R1', 'R2', 'R3', 'R4', 'R5', 'R6', 'R7', 'R8', 'R9', 'R10',
'R11', 'R12', 'SP', 'LR', 'PC', 'CPSR']
for i in range(nb_test):
for j in range(17):
written_reg = randint(0, 0xFFFFFFFF)
saved_reg = target.read_register(regs[j])
target.write_register(regs[j], written_reg)
read_reg = target.read_register(regs[j])
'''
if read_reg != written_reg:
print(i)
print(j)
print(hex(read_reg))
print(hex(written_reg))
'''
target.write_register(regs[j], saved_reg)
# ****************************************************************************
def transfer_state(av, target_from, target_to, nb_test, summary=True):
print(" - Transfer state")
average = 0
for i in range(nb_test):
t0 = time.time()
av.transfer_state(target_from, target_to, synced_ranges=[ram])
t1 = time.time()
average += t1 - t0
if summary:
average = average / nb_test
speed = ram.size / average / 1024
print(" -> On average transfer state from %s to %s of %s bytes takes %.2f sec, speed: %.2f KB/sec" % (target_from.name, target_to.name, ram.size, average, speed))
if __name__ == '__main__':
# Number each test is repeated
n = 2
avatar = Avatar(arch=ARMV7M, output_directory='/tmp/inception-tests')
nucleo = avatar.add_target(InceptionTarget, name='nucleo')
dum = avatar.add_target(DummyTarget, name='dum')
#qemu = avatar.add_target(QemuTarget, gdb_port=1236)
# Memory mapping of NUCLEO-L152RE
rom = avatar.add_memory_range(0x08000000, 0x1000000, 'rom',
file=firmware)
ram = avatar.add_memory_range(0x20000000, 0x14000, 'ram')
mmio = avatar.add_memory_range(0x40000000, 0x1000000,
forwarded=True, forwarded_to=nucleo)
ram = avatar.get_memory_range(0x20000000)
avatar.init_targets()
print("Targets initialized")
nucleo.reset()
nucleo.cont()
nucleo.stop()
print("Targets stopped, start tests for n = %s" % n)
print("[*] Raw read / writes tests")
read_full_mem(nucleo, n)
write_full_mem(nucleo, n)
read_write_full_mem(nucleo, n)
random_read_write(nucleo, n)
print("[*] !raw read / writes tests")
read_full_mem(nucleo, n, raw=False, summary=False)
write_full_mem(nucleo, n, raw=False, summary=False)
read_write_full_mem(nucleo, n, raw=False, summary=False)
random_read_write(nucleo, n, raw=False)
random_4bytes_read_write(nucleo, 100 * n)
print("[*] Read / Write registers")
read_write_registers(nucleo, n)
print("[*] Transfer state to dummy target")
transfer_state(avatar, nucleo, dum, n)
#Stop all threads for the profiler
print("[*] Test completed")
avatar.stop()
|
paura_lite.py | 777AVY777/Clone_paura | 191 | 33082 | <filename>paura_lite.py
# paura_lite:
# An ultra-simple command-line audio recorder with real-time
# spectrogram visualization
import numpy as np
import pyaudio
import struct
import scipy.fftpack as scp
import termplotlib as tpl
import os
# get window's dimensions
rows, columns = os.popen('stty size', 'r').read().split()
buff_size = 0.2 # window size in seconds
wanted_num_of_bins = 40 # number of frequency bins to display
# initialize soundcard for recording:
fs = 8000
pa = pyaudio.PyAudio()
stream = pa.open(format=pyaudio.paInt16, channels=1, rate=fs,
input=True, frames_per_buffer=int(fs * buff_size))
while 1: # for each recorded window (until ctr+c) is pressed
# get current block and convert to list of short ints,
block = stream.read(int(fs * buff_size))
format = "%dh" % (len(block) / 2)
shorts = struct.unpack(format, block)
# then normalize and convert to numpy array:
x = np.double(list(shorts)) / (2**15)
seg_len = len(x)
# get total energy of the current window and compute a normalization
# factor (to be used for visualizing the maximum spectrogram value)
energy = np.mean(x ** 2)
max_energy = 0.01 # energy for which the bars are set to max
max_width_from_energy = int((energy / max_energy) * int(columns)) + 1
if max_width_from_energy > int(columns) - 10:
max_width_from_energy = int(columns) - 10
# get the magnitude of the FFT and the corresponding frequencies
X = np.abs(scp.fft(x))[0:int(seg_len/2)]
freqs = (np.arange(0, 1 + 1.0/len(X), 1.0 / len(X)) * fs / 2)
# ... and resample to a fix number of frequency bins (to visualize)
wanted_step = (int(freqs.shape[0] / wanted_num_of_bins))
freqs2 = freqs[0::wanted_step].astype('int')
X2 = np.mean(X.reshape(-1, wanted_step), axis=1)
# plot (freqs, fft) as horizontal histogram:
fig = tpl.figure()
fig.barh(X2, labels=[str(int(f)) + " Hz" for f in freqs2[0:-1]],
show_vals=False, max_width=max_width_from_energy)
fig.show()
# add exactly as many new lines as they are needed to
# fill clear the screen in the next iteration:
print("\n" * (int(rows) - freqs2.shape[0] - 1))
|
dataviva/apps/map/views.py | joelvisroman/dataviva-site | 126 | 33083 | <gh_stars>100-1000
# -*- coding: utf-8 -*-
from flask import Blueprint, render_template, g, request, make_response
from dataviva.apps.general.views import get_locale
from dataviva.translations.dictionary import dictionary
from dataviva import datavivadir
from config import GZIP_DATA
from dataviva.utils.cached_query import cached_query
from dataviva.utils.graphs_services import location_service
from dataviva.apps.title.views import get_title
from dataviva.utils.graphs_services import *
import urllib
import json
mod = Blueprint('map', __name__,
template_folder='templates',
url_prefix='/<lang_code>/map',
static_folder='static')
@mod.url_value_preprocessor
def pull_lang_code(endpoint, values):
g.locale = values.pop('lang_code')
@mod.url_defaults
def add_language_code(endpoint, values):
values.setdefault('lang_code', get_locale())
@mod.before_request
def before_request():
g.page_type = mod.name
@mod.route('/<dataset>/<value>/', defaults={'id_ibge': ''})
@mod.route('/<dataset>/<value>/<id_ibge>')
def index(dataset, value, id_ibge):
filters = []
title_attrs = {}
services = {
'product': product_service,
'id_ibge': location_service,
'wld': wld_service,
'occupation': occupation_service,
'industry': industry_service,
'basic_course': sc_service,
}
for k, v in request.args.items():
if k not in ['values', 'filters', 'count', 'year']:
if v and k in services:
filters.append(services[k](v))
title_attrs[services[k](v)[0]] = services[k](v)[1]
else:
if k != 'colors':
filters.append((k, v))
title_attrs[k] = v
if id_ibge:
location = location_service(id_ibge)[0]
filters.append((location, id_ibge))
state = '' if location == 'region' else id_ibge[:2]
title_attrs[location] = id_ibge
else:
state = id_ibge
location = 'municipality'
filters = urllib.urlencode(filters)
title, subtitle = get_title(dataset, value, 'map', title_attrs)
return render_template('map/index.html',
dataset=dataset,
value=value,
state=state,
filters=filters,
title=title or '',
subtitle=subtitle or '',
dictionary=json.dumps(dictionary()))
@mod.route('/coords/', defaults={'id': 'all'})
@mod.route('/coords/<id>')
def coords(id):
if GZIP_DATA:
fileext = ".gz"
filetype = "gzip"
else:
fileext = ""
filetype = "json"
if id == "all":
file_name = "bra_all_states.json" + fileext
else:
file_name = ("coords-{0}.json" + fileext).format(id)
cached_q = cached_query(file_name)
if cached_q:
ret = make_response(cached_q)
else:
path = datavivadir + "/static/json/map/{0}".format(file_name)
gzip_file = open(path).read()
cached_query(file_name, gzip_file)
ret = make_response(gzip_file)
ret.headers['Content-Encoding'] = filetype
ret.headers['Content-Length'] = str(len(ret.data))
return ret
|
services/core/DNP3Agent/tests/unit_test_point_definitions.py | rmay-intwine/volttron | 406 | 33127 | <reponame>rmay-intwine/volttron<filename>services/core/DNP3Agent/tests/unit_test_point_definitions.py
# -*- coding: utf-8 -*- {{{
# vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et:
#
# Copyright 2018, 8minutenergy / Kisensum.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Neither 8minutenergy nor Kisensum, nor any of their
# employees, nor any jurisdiction or organization that has cooperated in the
# development of these materials, makes any warranty, express or
# implied, or assumes any legal liability or responsibility for the accuracy,
# completeness, or usefulness or any information, apparatus, product,
# software, or process disclosed, or represents that its use would not infringe
# privately owned rights. Reference herein to any specific commercial product,
# process, or service by trade name, trademark, manufacturer, or otherwise
# does not necessarily constitute or imply its endorsement, recommendation, or
# favoring by 8minutenergy or Kisensum.
# }}}
import pytest
try:
import dnp3
except ImportError:
pytest.skip("pydnp3 not found!", allow_module_level=True)
from dnp3.points import ArrayHeadPointDefinition, PointDefinitions, PointValue
from dnp3.mesa.agent import MesaAgent
from dnp3.mesa.functions import FunctionDefinitions
from test_mesa_agent import POINT_DEFINITIONS_PATH, FUNCTION_DEFINITIONS_PATH
def test_point_definition_load():
point_defs = PointDefinitions(point_definitions_path=POINT_DEFINITIONS_PATH)
import pprint
pprint.pprint(point_defs._points)
pprint.pprint(point_defs._point_name_dict)
print("_point_variations_dict")
pprint.pprint(point_defs._point_variation_dict)
def test_point_definition():
test_dict = {
"name": "CurveStart-X",
"type": "array", # Start of the curve's X/Y array
"array_times_repeated": 100,
"group": 40,
"variation": 1,
"index": 207,
"description": "Starting index for a curve of up to 99 X/Y points",
"array_points": [
{
"name": "Curve-X"
},
{
"name": "Curve-Y"
}
]
}
test_def = ArrayHeadPointDefinition(test_dict)
print(test_def)
def send_points(mesa_agent, some_points):
for name, value, index in some_points:
pdef = mesa_agent.point_definitions.get_point_named(name,index)
point_value = PointValue('Operate',
None,
value,
pdef,
pdef.index,
None) # What is op_type used for?
print(point_value)
mesa_agent._process_point_value(point_value)
def test_mesa_agent():
mesa_agent = MesaAgent(point_topic='points_foobar', local_ip='127.0.0.1', port=8999, outstation_config={},
function_topic='functions_foobar', outstation_status_topic='',
local_point_definitions_path=POINT_DEFINITIONS_PATH,
local_function_definitions_path=FUNCTION_DEFINITIONS_PATH)
mesa_agent._configure('', '', {})
point_definitions = mesa_agent.point_definitions
supported_pdef = point_definitions.get_point_named("Supports Charge/Discharge Mode")
mesa_agent.update_input_point(supported_pdef, True)
test_points = (
# ("DCHD.WinTms (out)", 1.0),
# ("DCHD.RmpTms (out)", 2.0),
# ("DCHD.RevtTms (out)", 3.0),
("CurveStart-X", 1.0, None),
("CurveStart-X", 2.0, 208),
)
send_points(mesa_agent, test_points)
def test_mesa_agent_2():
mesa_agent = MesaAgent(point_topic='points_foobar', local_ip='127.0.0.1', port=8999, outstation_config={},
function_topic='functions_foobar', outstation_status_topic='',
local_point_definitions_path=POINT_DEFINITIONS_PATH,
local_function_definitions_path=FUNCTION_DEFINITIONS_PATH)
mesa_agent._configure('', '', {})
point_definitions = mesa_agent.point_definitions
supported_pdef = point_definitions.get_point_named("Supports Charge/Discharge Mode")
mesa_agent.update_input_point(supported_pdef, True)
test_points = (
("DCHD.WinTms (out)", 1.0, None),
#("DCHD.RmpTms (out)", 2.0, None),
("DCHD.RevtTms (out)", 3.0, None),
)
send_points(mesa_agent, test_points)
def test_function_definitions():
point_definitions = PointDefinitions(point_definitions_path=POINT_DEFINITIONS_PATH)
fdefs = FunctionDefinitions(point_definitions, function_definitions_path=FUNCTION_DEFINITIONS_PATH)
fd = fdefs.function_for_id("curve")
print(fd)
pdef = point_definitions.get_point_named("DCHD.WinTms (out)")
print(pdef)
print(fdefs.step_definition_for_point(pdef))
def test_selector_block():
"""
Test send a Curve function / selector block (including an array of points) to MesaAgent.
Get MesaAgent's selector block and confirm that it has the correct contents.
Do this for a variety of Edit Selectors and array contents.
"""
def process_block_points(agt, block_points, edit_selector):
"""Send each point value in block_points to the MesaAgent."""
# print('Processing {}'.format(block_points))
for name, value, index in block_points:
point_definitions = agt.point_definitions
pdef = point_definitions.get_point_named(name, index)
point_value = PointValue('Operate', None, value, pdef, pdef.index, None)
agt._process_point_value(point_value)
returned_block = mesa_agent.get_selector_block('Curve Edit Selector', edit_selector)
# print('get_selector_block returned {}'.format(returned_block))
return returned_block
mesa_agent = MesaAgent(point_topic='points_foobar', local_ip='127.0.0.1', port=8999, outstation_config={},
function_topic='functions_foobar', outstation_status_topic='',
local_point_definitions_path=POINT_DEFINITIONS_PATH,
local_function_definitions_path=FUNCTION_DEFINITIONS_PATH)
mesa_agent._configure('', '', {})
block_1_points = [('Curve Edit Selector', 1, None), # index 191 - Function and SelectorBlock start
('CurveStart-X', 1.0, None), # Point #1-X: index 207 - Array start
('CurveStart-X', 2.0, 208), # Point #1-Y
('Curve Number of Points', 1, None)] # index 196 - Curve function end
block_2_points = [('Curve Edit Selector', 2, None), # index 191 - Function and SelectorBlock start
('CurveStart-X', 1.0, None), # Point #1-X: index 207 - Array start
('CurveStart-X', 2.0, 208), # Point #1-Y
('CurveStart-X', 3.0, 209), # Point #2-X
('CurveStart-X', 4.0, 210), # Point #2-Y
('Curve Number of Points', 2, None)] # index 196 - Curve function end
block_2a_points = [('Curve Edit Selector', 2, None), # index 191 - Function and SelectorBlock start
('CurveStart-X', 1.0, None), # Point #1-X: index 207 - Array start
('CurveStart-X', 2.0, 208), # Point #1-Y
('CurveStart-X', 5.0, 211), # Point #3-X
('CurveStart-X', 6.0, 212), # Point #3-Y
('Curve Number of Points', 3, None)] # index 196 - Curve function end
# Send block #1. Confirm that its array has a point with Y=2.0.
block = process_block_points(mesa_agent, block_1_points, 1)
assert block['CurveStart-X'][0]['Curve-Y'] == 2.0
# Send block #2. Confirm that its array has a point #2 with Y=4.0.
block = process_block_points(mesa_agent, block_2_points, 2)
assert block['CurveStart-X'][1]['Curve-Y'] == 4.0
# Send an updated block #2 with no point #2 and a new point #3.
block = process_block_points(mesa_agent, block_2a_points, 2)
# Confirm that its array still has a point #2 with Y=4.0, even though it wasn't just sent.
assert block['CurveStart-X'][1]['Curve-Y'] == 4.0
# Confirm that its array now has a point #3 with Y=6.0.
assert block['CurveStart-X'][2]['Curve-Y'] == 6.0
# Re-send block #1. Confirm that selector block initialization reset the point cache: the array has no second point.
block = process_block_points(mesa_agent, block_1_points, 1)
assert len(block['CurveStart-X']) == 1
if __name__ == "__main__":
# test_mesa_agent()
# test_mesa_agent_2()
# test_function_definitions()
# test_point_definition()
test_point_definition_load()
# test_selector_block()
|
tests/test_helper.py | swills/pykwalify | 275 | 33133 | <reponame>swills/pykwalify<filename>tests/test_helper.py
# -*- coding: utf-8 -*-
# python std lib
import logging
import logging.config
log = logging.getLogger()
# Set the root logger to be silent so all code that uses the python logger
# will not print anything unless we want it to, then it should be specified
# in each test and reseted after that test
def _set_log_lv(level=1337, loggers=None):
""" If no level is set then level will be so high all logging is silenced
"""
if loggers is None:
# If no additional loggers is specified then only apply to root logger
log.setLevel(level)
for handler in log.handlers:
handler.level = level
else:
# If we have other logging instances specified apply to root logger and them
if log not in loggers:
loggers.append(log)
for log_instance in loggers:
log_instance.setLevel(level)
for handler in log_instance.handlers:
handler.level = level
# Initially silence all logging
_set_log_lv()
|
hanlp/utils/__init__.py | antfootAlex/HanLP | 27,208 | 33156 | # -*- coding:utf-8 -*-
# Author: hankcs
# Date: 2019-08-24 22:12
from . import rules
def ls_resource_in_module(root) -> dict:
res = dict()
for k, v in root.__dict__.items():
if k.startswith('_') or v == root:
continue
if isinstance(v, str):
if v.startswith('http') and not v.endswith('/') and not v.endswith('#') and not v.startswith('_'):
res[k] = v
elif type(v).__name__ == 'module':
res.update(ls_resource_in_module(v))
if 'ALL' in root.__dict__ and isinstance(root.__dict__['ALL'], dict):
root.__dict__['ALL'].update(res)
return res
|
nonconformist/base.py | smazzanti/nonconformist | 301 | 33169 | <gh_stars>100-1000
#!/usr/bin/env python
"""
docstring
"""
# Authors: <NAME>
import abc
import numpy as np
from sklearn.base import BaseEstimator
class RegressorMixin(object):
def __init__(self):
super(RegressorMixin, self).__init__()
@classmethod
def get_problem_type(cls):
return 'regression'
class ClassifierMixin(object):
def __init__(self):
super(ClassifierMixin, self).__init__()
@classmethod
def get_problem_type(cls):
return 'classification'
class BaseModelAdapter(BaseEstimator):
__metaclass__ = abc.ABCMeta
def __init__(self, model, fit_params=None):
super(BaseModelAdapter, self).__init__()
self.model = model
self.last_x, self.last_y = None, None
self.clean = False
self.fit_params = {} if fit_params is None else fit_params
def fit(self, x, y):
"""Fits the model.
Parameters
----------
x : numpy array of shape [n_samples, n_features]
Inputs of examples for fitting the model.
y : numpy array of shape [n_samples]
Outputs of examples for fitting the model.
Returns
-------
None
"""
self.model.fit(x, y, **self.fit_params)
self.clean = False
def predict(self, x):
"""Returns the prediction made by the underlying model.
Parameters
----------
x : numpy array of shape [n_samples, n_features]
Inputs of test examples.
Returns
-------
y : numpy array of shape [n_samples]
Predicted outputs of test examples.
"""
if (
not self.clean or
self.last_x is None or
self.last_y is None or
not np.array_equal(self.last_x, x)
):
self.last_x = x
self.last_y = self._underlying_predict(x)
self.clean = True
return self.last_y.copy()
@abc.abstractmethod
def _underlying_predict(self, x):
"""Produces a prediction using the encapsulated model.
Parameters
----------
x : numpy array of shape [n_samples, n_features]
Inputs of test examples.
Returns
-------
y : numpy array of shape [n_samples]
Predicted outputs of test examples.
"""
pass
class ClassifierAdapter(BaseModelAdapter):
def __init__(self, model, fit_params=None):
super(ClassifierAdapter, self).__init__(model, fit_params)
def _underlying_predict(self, x):
return self.model.predict_proba(x)
class RegressorAdapter(BaseModelAdapter):
def __init__(self, model, fit_params=None):
super(RegressorAdapter, self).__init__(model, fit_params)
def _underlying_predict(self, x):
return self.model.predict(x)
class OobMixin(object):
def __init__(self, model, fit_params=None):
super(OobMixin, self).__init__(model, fit_params)
self.train_x = None
def fit(self, x, y):
super(OobMixin, self).fit(x, y)
self.train_x = x
def _underlying_predict(self, x):
# TODO: sub-sampling of ensemble for test patterns
oob = x == self.train_x
if hasattr(oob, 'all'):
oob = oob.all()
if oob:
return self._oob_prediction()
else:
return super(OobMixin, self)._underlying_predict(x)
class OobClassifierAdapter(OobMixin, ClassifierAdapter):
def __init__(self, model, fit_params=None):
super(OobClassifierAdapter, self).__init__(model, fit_params)
def _oob_prediction(self):
return self.model.oob_decision_function_
class OobRegressorAdapter(OobMixin, RegressorAdapter):
def __init__(self, model, fit_params=None):
super(OobRegressorAdapter, self).__init__(model, fit_params)
def _oob_prediction(self):
return self.model.oob_prediction_
|
ivi/agilent/agilent34410A.py | sacherjj/python-ivi | 161 | 33174 | """
Python Interchangeable Virtual Instrument Library
Copyright (c) 2012-2017 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import time
import struct
from .. import ivi
from .. import dmm
from .. import scpi
class agilent34410A(scpi.dmm.Base):
"Agilent 34410A IVI DMM driver"
def __init__(self, *args, **kwargs):
self.__dict__.setdefault('_instrument_id', '34410A')
super(agilent34410A, self).__init__(*args, **kwargs)
self._memory_size = 5
self._identity_description = "Agilent 34410A/11A IVI DMM driver"
self._identity_identifier = ""
self._identity_revision = ""
self._identity_vendor = ""
self._identity_instrument_manufacturer = "Agilent Technologies"
self._identity_instrument_model = ""
self._identity_instrument_firmware_revision = ""
self._identity_specification_major_version = 4
self._identity_specification_minor_version = 1
self._identity_supported_instrument_models = ['34410A', '34411A']
self._add_method('memory.save',
self._memory_save)
self._add_method('memory.recall',
self._memory_recall)
self._add_method('memory.set_name',
self._set_memory_name)
self._add_method('memory.get_name',
self._get_memory_name)
def _initialize(self, resource = None, id_query = False, reset = False, **keywargs):
"Opens an I/O session to the instrument."
super(agilent34410A, self)._initialize(resource, id_query, reset, **keywargs)
# interface clear
if not self._driver_operation_simulate:
self._clear()
# check ID
if id_query and not self._driver_operation_simulate:
id = self.identity.instrument_model
id_check = self._instrument_id
id_short = id[:len(id_check)]
if id_short != id_check:
raise Exception("Instrument ID mismatch, expecting %s, got %s", id_check, id_short)
# reset
if reset:
self.utility.reset()
def _memory_save(self, index):
index = int(index)
if index < 1 or index > self._memory_size:
raise OutOfRangeException()
if not self._driver_operation_simulate:
self._write("*sav %d" % index)
def _memory_recall(self, index):
index = int(index)
if index < 1 or index > self._memory_size:
raise OutOfRangeException()
if not self._driver_operation_simulate:
self._write("*rcl %d" % index)
def _get_memory_name(self, index):
index = int(index)
if index < 1 or index > self._memory_size:
raise OutOfRangeException()
if not self._driver_operation_simulate:
return self._ask("memory:state:name? %d" % index).strip(' "')
def _set_memory_name(self, index, value):
index = int(index)
value = str(value)
if index < 1 or index > self._memory_size:
raise OutOfRangeException()
if not self._driver_operation_simulate:
self._write("memory:state:name %d, \"%s\"" % (index, value))
|
hummingbot/connector/exchange/bittrex/bittrex_utils.py | joedomino874/hummingbot | 3,027 | 33185 | <filename>hummingbot/connector/exchange/bittrex/bittrex_utils.py
from hummingbot.client.config.config_var import ConfigVar
from hummingbot.client.config.config_methods import using_exchange
CENTRALIZED = True
EXAMPLE_PAIR = "ZRX-ETH"
DEFAULT_FEES = [0.25, 0.25]
KEYS = {
"bittrex_api_key":
ConfigVar(key="bittrex_api_key",
prompt="Enter your Bittrex API key >>> ",
required_if=using_exchange("bittrex"),
is_secure=True,
is_connect_key=True),
"bittrex_secret_key":
ConfigVar(key="bittrex_secret_key",
prompt="Enter your Bittrex secret key >>> ",
required_if=using_exchange("bittrex"),
is_secure=True,
is_connect_key=True),
}
|
Algo and DSA/LeetCode-Solutions-master/Python/maximum-difference-between-node-and-ancestor.py | Sourav692/FAANG-Interview-Preparation | 3,269 | 33235 | <filename>Algo and DSA/LeetCode-Solutions-master/Python/maximum-difference-between-node-and-ancestor.py
# Time: O(n)
# Space: O(h)
# Definition for a binary tree node.
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
# iterative stack solution
class Solution(object):
def maxAncestorDiff(self, root):
"""
:type root: TreeNode
:rtype: int
"""
result = 0
stack = [(root, 0, float("inf"))]
while stack:
node, mx, mn = stack.pop()
if not node:
continue
result = max(result, mx-node.val, node.val-mn)
mx = max(mx, node.val)
mn = min(mn, node.val)
stack.append((node.left, mx, mn))
stack.append((node.right, mx, mn))
return result
# Time: O(n)
# Space: O(h)
# recursive solution
class Solution2(object):
def maxAncestorDiff(self, root):
"""
:type root: TreeNode
:rtype: int
"""
def maxAncestorDiffHelper(node, mx, mn):
if not node:
return 0
result = max(mx-node.val, node.val-mn)
mx = max(mx, node.val)
mn = min(mn, node.val)
result = max(result, maxAncestorDiffHelper(node.left, mx, mn))
result = max(result, maxAncestorDiffHelper(node.right, mx, mn))
return result
return maxAncestorDiffHelper(root, 0, float("inf"))
|
blogs/migrations/0012_auto_20200601_1247.py | daaawx/bearblog | 657 | 33236 | <reponame>daaawx/bearblog<filename>blogs/migrations/0012_auto_20200601_1247.py
# Generated by Django 3.0.6 on 2020-06-01 12:47
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blogs', '0011_auto_20200531_0915'),
]
operations = [
migrations.RemoveField(
model_name='post',
name='tags',
),
migrations.AddField(
model_name='blog',
name='hashtags',
field=models.TextField(blank=True),
),
]
|
util/mach/mig.py | rovarma/crashpad | 2,151 | 33239 | #!/usr/bin/env python
# coding: utf-8
# Copyright 2014 The Crashpad Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import re
import subprocess
import sys
def FixUserImplementation(implementation):
"""Rewrites a MIG-generated user implementation (.c) file.
Rewrites the file at |implementation| by adding “__attribute__((unused))” to
the definition of any structure typedefed as “__Reply” by searching for the
pattern unique to those structure definitions. These structures are in fact
unused in the user implementation file, and this will trigger a
-Wunused-local-typedefs warning in gcc unless removed or marked with the
“unused” attribute.
"""
file = open(implementation, 'r+')
contents = file.read()
pattern = re.compile('^(\t} __Reply);$', re.MULTILINE)
contents = pattern.sub(r'\1 __attribute__((unused));', contents)
file.seek(0)
file.truncate()
file.write(contents)
file.close()
def FixServerImplementation(implementation):
"""Rewrites a MIG-generated server implementation (.c) file.
Rewrites the file at |implementation| by replacing “mig_internal” with
“mig_external” on functions that begin with “__MIG_check__”. This makes these
functions available to other callers outside this file from a linkage
perspective. It then returns, as a list of lines, declarations that can be
added to a header file, so that other files that include that header file will
have access to these declarations from a compilation perspective.
"""
file = open(implementation, 'r+')
contents = file.read()
# Find interesting declarations.
declaration_pattern = \
re.compile('^mig_internal (kern_return_t __MIG_check__.*)$',
re.MULTILINE)
declarations = declaration_pattern.findall(contents)
# Remove “__attribute__((__unused__))” from the declarations, and call them
# “mig_external” or “extern” depending on whether “mig_external” is defined.
attribute_pattern = re.compile(r'__attribute__\(\(__unused__\)\) ')
declarations = ['#ifdef mig_external\nmig_external\n#else\nextern\n#endif\n' +
attribute_pattern.sub('', x) +
';\n' for x in declarations]
# Rewrite the declarations in this file as “mig_external”.
contents = declaration_pattern.sub(r'mig_external \1', contents);
# Crashpad never implements the mach_msg_server() MIG callouts. To avoid
# needing to provide stub implementations, set KERN_FAILURE as the RetCode
# and abort().
routine_callout_pattern = re.compile(
r'OutP->RetCode = (([a-zA-Z0-9_]+)\(.+\));')
routine_callouts = routine_callout_pattern.findall(contents)
for routine in routine_callouts:
contents = contents.replace(routine[0], 'KERN_FAILURE; abort()')
# Include the header for abort().
contents = '#include <stdlib.h>\n' + contents
file.seek(0)
file.truncate()
file.write(contents)
file.close()
return declarations
def FixHeader(header, declarations=[]):
"""Rewrites a MIG-generated header (.h) file.
Rewrites the file at |header| by placing it inside an “extern "C"” block, so
that it declares things properly when included by a C++ compilation unit.
|declarations| can be a list of additional declarations to place inside the
“extern "C"” block after the original contents of |header|.
"""
file = open(header, 'r+')
contents = file.read()
declarations_text = ''.join(declarations)
contents = '''\
#ifdef __cplusplus
extern "C" {
#endif
%s
%s
#ifdef __cplusplus
}
#endif
''' % (contents, declarations_text)
file.seek(0)
file.truncate()
file.write(contents)
file.close()
def main(args):
parser = argparse.ArgumentParser()
parser.add_argument('--developer-dir', help='Path to Xcode')
parser.add_argument('--sdk', help='Path to SDK')
parser.add_argument('--include',
default=[],
action='append',
help='Additional include directory')
parser.add_argument('defs')
parser.add_argument('user_c')
parser.add_argument('server_c')
parser.add_argument('user_h')
parser.add_argument('server_h')
parsed = parser.parse_args(args)
command = ['mig',
'-user', parsed.user_c,
'-server', parsed.server_c,
'-header', parsed.user_h,
'-sheader', parsed.server_h,
]
if parsed.developer_dir is not None:
os.environ['DEVELOPER_DIR'] = parsed.developer_dir
if parsed.sdk is not None:
command.extend(['-isysroot', parsed.sdk])
for include in parsed.include:
command.extend(['-I' + include])
command.append(parsed.defs)
subprocess.check_call(command)
FixUserImplementation(parsed.user_c)
server_declarations = FixServerImplementation(parsed.server_c)
FixHeader(parsed.user_h)
FixHeader(parsed.server_h, server_declarations)
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
marketplaces/cron_report_daily_activity.py | diassor/CollectorCity-Market-Place | 135 | 33256 | <gh_stars>100-1000
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import logging
import datetime
os.environ['DJANGO_SETTINGS_MODULE'] = 'settings'
from django.core.management import setup_environ
from django.core.mail import send_mail
#from django.db import transaction
import settings
setup_environ(settings)
"""
Daily Activity (Sign Up / Cancel)
Total Customers
Total Sign Ups This Month
Total Sign Ups This Today
Total Cancelations This Month
Total Cancelations This Today
"""
def report_daily_activity():
from django.core.mail import EmailMultiAlternatives, EmailMessage
from django.template import Context, loader
from reports.views import get_daily_activity_data
day = datetime.datetime.now()
try:
t_txt = loader.get_template("admin/mail/daily_activity_report.txt")
t_html = loader.get_template("admin/mail/daily_activity_report.html")
c = get_daily_activity_data(day)
subject, from_email, to = 'Daily Activity Report', "<EMAIL>", "<EMAIL>"
text_content = t_txt.render(Context(c))
html_content = t_html.render(Context(c))
msg = EmailMultiAlternatives(subject, text_content, from_email, [to])
msg.attach_alternative(html_content, "text/html")
msg.send()
except Exception, e:
logging.info(e)
mail = EmailMessage(subject='Error when trying to generate Daily Activity Report',
body=e,
from_email=settings.EMAIL_FROM,
to=[mail for (name, mail) in settings.STAFF],
headers={'X-SMTPAPI': '{\"category\": \"Error\"}'})
mail.send(fail_silently=True)
# send_mail('Error when trying to generate Daily Activity Report', e , settings.EMAIL_FROM, [mail for (name, mail) in settings.STAFF], fail_silently=True)
if __name__ == "__main__":
report_daily_activity() |
utils/usergrid-util-python/usergrid_tools/general/queue_monitor.py | snoopdave/incubator-usergrid | 788 | 33257 | # */
# * Licensed to the Apache Software Foundation (ASF) under one
# * or more contributor license agreements. See the NOTICE file
# * distributed with this work for additional information
# * regarding copyright ownership. The ASF licenses this file
# * to you under the Apache License, Version 2.0 (the
# * "License"); you may not use this file except in compliance
# * with the License. You may obtain a copy of the License at
# *
# * http://www.apache.org/licenses/LICENSE-2.0
# *
# * Unless required by applicable law or agreed to in writing,
# * software distributed under the License is distributed on an
# * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# * KIND, either express or implied. See the License for the
# * specific language governing permissions and limitations
# * under the License.
# */
import argparse
import json
import datetime
import os
import time
import sys
import boto
from boto import sqs
### This monitors an SQS queue and measures the delta message count between polling intervals to infer the amount of time
### remaining to fully drain the queue
__author__ = '<EMAIL>'
def total_seconds(td):
return (td.microseconds + (td.seconds + td.days * 24.0 * 3600) * 10.0 ** 6) / 10.0 ** 6
def total_milliseconds(td):
return (td.microseconds + td.seconds * 1000000) / 1000
def get_time_remaining(count, rate):
if rate == 0:
return 'NaN'
seconds = count * 1.0 / rate
m, s = divmod(seconds, 60)
h, m = divmod(m, 60)
return "%d:%02d:%02d" % (h, m, s)
def parse_args():
parser = argparse.ArgumentParser(description='Usergrid Loader - Queue Monitor')
parser.add_argument('-c', '--config',
help='The queue to load into',
type=str,
default='%s/.usergrid/queue_monitor.json' % os.getenv("HOME"))
parser.add_argument('-q', '--queue_name',
help='The queue name to send messages to. If not specified the filename is used',
default='entities',
type=str)
my_args = parser.parse_args(sys.argv[1:])
print str(my_args)
return vars(my_args)
def main():
args = parse_args()
queue_name = args.get('queue_name')
print 'queue_name=%s' % queue_name
start_time = datetime.datetime.utcnow()
first_start_time = start_time
print "first start: %s" % first_start_time
with open(args.get('config'), 'r') as f:
config = json.load(f)
sqs_config = config.get('sqs')
last_time = datetime.datetime.utcnow()
sqs_conn = boto.sqs.connect_to_region(**sqs_config)
queue = sqs_conn.get_queue(queue_name)
last_size = queue.count()
first_size = last_size
print 'Starting Size: %s' % last_size
sleep = 10
time.sleep(sleep)
rate_sum = 0
rate_count = 0
while True:
size = queue.count()
time_stop = datetime.datetime.utcnow()
time_delta = total_seconds(time_stop - last_time)
agg_time_delta = total_seconds(time_stop - first_start_time)
agg_size_delta = first_size - size
agg_messages_rate = 1.0 * agg_size_delta / agg_time_delta
size_delta = last_size - size
messages_rate = 1.0 * size_delta / time_delta
rate_sum += messages_rate
rate_count += 1
print '%s | %s | Size: %s | Processed: %s | Last: %s | Avg: %s | Count: %s | agg rate: %s | Remaining: %s' % (
datetime.datetime.utcnow(),
queue_name,
size, size_delta, round(messages_rate, 2),
round(rate_sum / rate_count, 2), rate_count,
round(agg_messages_rate, 2),
get_time_remaining(size, agg_messages_rate))
last_size = size
last_time = time_stop
time.sleep(sleep)
if __name__ == '__main__':
main()
|
tests/transformer/test_assert.py | rahulbahal7/restricted-python | 236 | 33269 | <reponame>rahulbahal7/restricted-python
from tests.helper import restricted_exec
def test_RestrictingNodeTransformer__visit_Assert__1():
"""It allows assert statements."""
restricted_exec('assert 1')
|
cli/src/pcluster/cli/middleware.py | enrico-usai/cfncluster | 415 | 33328 | # Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not
# use this file except in compliance with the License. A copy of the License is
# located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "LICENSE.txt" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, express or
# implied. See the License for the specific language governing permissions and
# limitations under the License.
"""
This module defines middleware functions for command line operations.
This allows the ability to provide custom logic either before or after running
an operation by specifying the name of the operation, and then calling the
function that is provided as the first argument and passing the **kwargs
provided.
"""
import logging
import argparse
import boto3
import jmespath
from botocore.exceptions import WaiterError
import pcluster.cli.model
from pcluster.cli.exceptions import APIOperationException, ParameterException
LOGGER = logging.getLogger(__name__)
def _cluster_status(cluster_name):
controller = "cluster_operations_controller"
func_name = "describe_cluster"
full_func_name = f"pcluster.api.controllers.{controller}.{func_name}"
return pcluster.cli.model.call(full_func_name, cluster_name=cluster_name)
def add_additional_args(parser_map):
"""Add any additional arguments to parsers for individual operations.
NOTE: these additional arguments will also need to be removed before
calling the underlying function for the situation where they are not a part
of the specification.
"""
parser_map["create-cluster"].add_argument("--wait", action="store_true", help=argparse.SUPPRESS)
parser_map["delete-cluster"].add_argument("--wait", action="store_true", help=argparse.SUPPRESS)
parser_map["update-cluster"].add_argument("--wait", action="store_true", help=argparse.SUPPRESS)
def middleware_hooks():
"""Return a map and from operation to middleware functions.
The map has operation names as the keys and functions as values.
"""
return {"create-cluster": create_cluster, "delete-cluster": delete_cluster, "update-cluster": update_cluster}
def queryable(func):
def wrapper(dest_func, _body, kwargs):
query = kwargs.pop("query", None)
ret = func(dest_func, _body, kwargs)
try:
return jmespath.search(query, ret) if query else ret
except jmespath.exceptions.ParseError:
raise ParameterException({"message": "Invalid query string.", "query": query})
return wrapper
@queryable
def update_cluster(func, _body, kwargs):
wait = kwargs.pop("wait", False)
ret = func(**kwargs)
if wait and not kwargs.get("dryrun"):
cloud_formation = boto3.client("cloudformation")
waiter = cloud_formation.get_waiter("stack_update_complete")
try:
waiter.wait(StackName=kwargs["cluster_name"])
except WaiterError as e:
LOGGER.error("Failed when waiting for cluster update with error: %s", e)
raise APIOperationException(_cluster_status(kwargs["cluster_name"]))
ret = _cluster_status(kwargs["cluster_name"])
return ret
@queryable
def create_cluster(func, body, kwargs):
wait = kwargs.pop("wait", False)
ret = func(**kwargs)
if wait and not kwargs.get("dryrun"):
cloud_formation = boto3.client("cloudformation")
waiter = cloud_formation.get_waiter("stack_create_complete")
try:
waiter.wait(StackName=body["clusterName"])
except WaiterError as e:
LOGGER.error("Failed when waiting for cluster creation with error: %s", e)
raise APIOperationException(_cluster_status(body["clusterName"]))
ret = _cluster_status(body["clusterName"])
return ret
@queryable
def delete_cluster(func, _body, kwargs):
wait = kwargs.pop("wait", False)
ret = func(**kwargs)
if wait:
cloud_formation = boto3.client("cloudformation")
waiter = cloud_formation.get_waiter("stack_delete_complete")
try:
waiter.wait(StackName=kwargs["cluster_name"])
except WaiterError as e:
LOGGER.error("Failed when waiting for cluster deletion with error: %s", e)
raise APIOperationException({"message": f"Failed when deleting cluster '{kwargs['cluster_name']}'."})
return {"message": f"Successfully deleted cluster '{kwargs['cluster_name']}'."}
else:
return ret
|
tests/test_losses/test_mesh_losses.py | nightfuryyy/mmpose | 1,775 | 33331 | # Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from numpy.testing import assert_almost_equal
from mmpose.models import build_loss
from mmpose.models.utils.geometry import batch_rodrigues
def test_mesh_loss():
"""test mesh loss."""
loss_cfg = dict(
type='MeshLoss',
joints_2d_loss_weight=1,
joints_3d_loss_weight=1,
vertex_loss_weight=1,
smpl_pose_loss_weight=1,
smpl_beta_loss_weight=1,
img_res=256,
focal_length=5000)
loss = build_loss(loss_cfg)
smpl_pose = torch.zeros([1, 72], dtype=torch.float32)
smpl_rotmat = batch_rodrigues(smpl_pose.view(-1, 3)).view(-1, 24, 3, 3)
smpl_beta = torch.zeros([1, 10], dtype=torch.float32)
camera = torch.tensor([[1, 0, 0]], dtype=torch.float32)
vertices = torch.rand([1, 6890, 3], dtype=torch.float32)
joints_3d = torch.ones([1, 24, 3], dtype=torch.float32)
joints_2d = loss.project_points(joints_3d, camera) + (256 - 1) / 2
fake_pred = {}
fake_pred['pose'] = smpl_rotmat
fake_pred['beta'] = smpl_beta
fake_pred['camera'] = camera
fake_pred['vertices'] = vertices
fake_pred['joints_3d'] = joints_3d
fake_gt = {}
fake_gt['pose'] = smpl_pose
fake_gt['beta'] = smpl_beta
fake_gt['vertices'] = vertices
fake_gt['has_smpl'] = torch.ones(1, dtype=torch.float32)
fake_gt['joints_3d'] = joints_3d
fake_gt['joints_3d_visible'] = torch.ones([1, 24, 1], dtype=torch.float32)
fake_gt['joints_2d'] = joints_2d
fake_gt['joints_2d_visible'] = torch.ones([1, 24, 1], dtype=torch.float32)
losses = loss(fake_pred, fake_gt)
assert torch.allclose(losses['vertex_loss'], torch.tensor(0.))
assert torch.allclose(losses['smpl_pose_loss'], torch.tensor(0.))
assert torch.allclose(losses['smpl_beta_loss'], torch.tensor(0.))
assert torch.allclose(losses['joints_3d_loss'], torch.tensor(0.))
assert torch.allclose(losses['joints_2d_loss'], torch.tensor(0.))
fake_pred = {}
fake_pred['pose'] = smpl_rotmat + 1
fake_pred['beta'] = smpl_beta + 1
fake_pred['camera'] = camera
fake_pred['vertices'] = vertices + 1
fake_pred['joints_3d'] = joints_3d.clone()
joints_3d_t = joints_3d.clone()
joints_3d_t[:, 0] = joints_3d_t[:, 0] + 1
fake_gt = {}
fake_gt['pose'] = smpl_pose
fake_gt['beta'] = smpl_beta
fake_gt['vertices'] = vertices
fake_gt['has_smpl'] = torch.ones(1, dtype=torch.float32)
fake_gt['joints_3d'] = joints_3d_t
fake_gt['joints_3d_visible'] = torch.ones([1, 24, 1], dtype=torch.float32)
fake_gt['joints_2d'] = joints_2d + (256 - 1) / 2
fake_gt['joints_2d_visible'] = torch.ones([1, 24, 1], dtype=torch.float32)
losses = loss(fake_pred, fake_gt)
assert torch.allclose(losses['vertex_loss'], torch.tensor(1.))
assert torch.allclose(losses['smpl_pose_loss'], torch.tensor(1.))
assert torch.allclose(losses['smpl_beta_loss'], torch.tensor(1.))
assert torch.allclose(losses['joints_3d_loss'], torch.tensor(0.5 / 24))
assert torch.allclose(losses['joints_2d_loss'], torch.tensor(0.5))
def test_gan_loss():
"""test gan loss."""
with pytest.raises(NotImplementedError):
loss_cfg = dict(
type='GANLoss',
gan_type='test',
real_label_val=1.0,
fake_label_val=0.0,
loss_weight=1)
_ = build_loss(loss_cfg)
input_1 = torch.ones(1, 1)
input_2 = torch.ones(1, 3, 6, 6) * 2
# vanilla
loss_cfg = dict(
type='GANLoss',
gan_type='vanilla',
real_label_val=1.0,
fake_label_val=0.0,
loss_weight=2.0)
gan_loss = build_loss(loss_cfg)
loss = gan_loss(input_1, True, is_disc=False)
assert_almost_equal(loss.item(), 0.6265233)
loss = gan_loss(input_1, False, is_disc=False)
assert_almost_equal(loss.item(), 2.6265232)
loss = gan_loss(input_1, True, is_disc=True)
assert_almost_equal(loss.item(), 0.3132616)
loss = gan_loss(input_1, False, is_disc=True)
assert_almost_equal(loss.item(), 1.3132616)
# lsgan
loss_cfg = dict(
type='GANLoss',
gan_type='lsgan',
real_label_val=1.0,
fake_label_val=0.0,
loss_weight=2.0)
gan_loss = build_loss(loss_cfg)
loss = gan_loss(input_2, True, is_disc=False)
assert_almost_equal(loss.item(), 2.0)
loss = gan_loss(input_2, False, is_disc=False)
assert_almost_equal(loss.item(), 8.0)
loss = gan_loss(input_2, True, is_disc=True)
assert_almost_equal(loss.item(), 1.0)
loss = gan_loss(input_2, False, is_disc=True)
assert_almost_equal(loss.item(), 4.0)
# wgan
loss_cfg = dict(
type='GANLoss',
gan_type='wgan',
real_label_val=1.0,
fake_label_val=0.0,
loss_weight=2.0)
gan_loss = build_loss(loss_cfg)
loss = gan_loss(input_2, True, is_disc=False)
assert_almost_equal(loss.item(), -4.0)
loss = gan_loss(input_2, False, is_disc=False)
assert_almost_equal(loss.item(), 4)
loss = gan_loss(input_2, True, is_disc=True)
assert_almost_equal(loss.item(), -2.0)
loss = gan_loss(input_2, False, is_disc=True)
assert_almost_equal(loss.item(), 2.0)
# hinge
loss_cfg = dict(
type='GANLoss',
gan_type='hinge',
real_label_val=1.0,
fake_label_val=0.0,
loss_weight=2.0)
gan_loss = build_loss(loss_cfg)
loss = gan_loss(input_2, True, is_disc=False)
assert_almost_equal(loss.item(), -4.0)
loss = gan_loss(input_2, False, is_disc=False)
assert_almost_equal(loss.item(), -4.0)
loss = gan_loss(input_2, True, is_disc=True)
assert_almost_equal(loss.item(), 0.0)
loss = gan_loss(input_2, False, is_disc=True)
assert_almost_equal(loss.item(), 3.0)
|
terraform/stacks/threat-intelligence/lambdas/python/cloud-sniper-threat-intelligence/cloud_sniper_threat_intelligence.py | houey/cloud-sniper | 160 | 33354 | <gh_stars>100-1000
import boto3
import json
import datetime
import logging
import os
import ipaddress
import requests
log = logging.getLogger()
log.setLevel(logging.INFO)
QUEUE_URL = os.environ['SQS_QUEUE_CLOUD_SNIPER']
DYNAMO_TABLE = os.environ['DYNAMO_TABLE_CLOUD_SNIPER']
WEBHOOK_URL = os.environ['WEBHOOK_URL_CLOUD_SNIPER']
HUB_ACCOUNT_ID = os.environ['HUB_ACCOUNT_ID_CLOUD_SNIPER']
ROLE_SPOKE = os.environ['ROLE_SPOKE_CLOUD_SNIPER']
BUCKET_NAME = os.environ['BUCKET_NAME']
IOCS_PATH = os.environ['IOCS_PATH']
TOPIC_ARN = os.environ['TOPIC_ARN']
message = []
json_a = []
# hub account
s = boto3.session.Session(region_name=os.environ['AWS_REGION'])
ec2 = s.client('ec2')
sqs = s.client('sqs')
iam = s.client('iam')
r_ec2 = s.resource('ec2')
dynamodb = s.resource('dynamodb')
sns = s.client('sns')
# spoke account
sts_connection = boto3.client('sts')
networkConnectionAction = [
"UnauthorizedAccess:EC2/SSHBruteForce",
]
portProbeAction = [
"Recon:EC2/PortProbeUnprotectedPort",
]
instanceDetails = [
"UnauthorizedAccess:EC2/TorIPCaller",
]
awsApiCallAction = [
"Recon:IAMUser/TorIPCaller",
]
def read_sqs():
log.info("Processing queue")
response = sqs.receive_message(
QueueUrl=QUEUE_URL,
MaxNumberOfMessages=10,
MessageAttributeNames=[
'All'
],
)
if 'Messages' in response:
return response['Messages']
else:
log.info("There is no new message in the queue")
return
def search_ioc():
log.info("Searching for IOC ...")
global json_a
for b in message:
body = b['Body']
data = json.loads(body)
try:
flag = 0
for dt in networkConnectionAction:
if data["detail"]["type"] == dt:
flag = 1
break
for dt in portProbeAction:
if data["detail"]["type"] == dt:
flag = 2
break
for dt in instanceDetails:
if data["detail"]["type"] == dt:
flag = 3
break
for dt in awsApiCallAction:
if data["detail"]["type"] == dt:
flag = 4
break
if flag == 1:
ioc = []
src_ip = (json.dumps(
data["detail"]["service"]["action"]["networkConnectionAction"]["remoteIpDetails"][
"ipAddressV4"])).strip('"')
direction = data["detail"]["service"]["action"]["networkConnectionAction"]["connectionDirection"]
if ipaddress.ip_address(src_ip).is_private is False and direction == "INBOUND":
account_id = data["detail"]["accountId"]
region = data["detail"]["region"]
subnet_id = data["detail"]["resource"]["instanceDetails"]["networkInterfaces"][0]["subnetId"]
instance_id = data["detail"]["resource"]["instanceDetails"]["instanceId"]
ttp = data["detail"]["type"]
asn = \
data["detail"]["service"]["action"]["networkConnectionAction"]["remoteIpDetails"][
"organization"][
"asn"]
asn_org = (
data["detail"]["service"]["action"]["networkConnectionAction"]["remoteIpDetails"][
"organization"][
"asnOrg"]).replace(",", " ")
isp = (
data["detail"]["service"]["action"]["networkConnectionAction"]["remoteIpDetails"][
"organization"][
"isp"]).replace(",", " ")
org = (
data["detail"]["service"]["action"]["networkConnectionAction"]["remoteIpDetails"][
"organization"][
"org"]).replace(",", " ")
country = \
data["detail"]["service"]["action"]["networkConnectionAction"]["remoteIpDetails"]["country"][
"countryName"]
city = (data["detail"]["service"]["action"]["networkConnectionAction"]["remoteIpDetails"]["city"][
"cityName"]).replace(",", " ")
nacl_id = get_netacl_id(subnet_id, account_id)
hits = str(data["detail"]["service"]["count"])
vpc_id = data["detail"]["resource"]["instanceDetails"]["networkInterfaces"][0]["vpcId"]
sg_name = data["detail"]["resource"]["instanceDetails"]["networkInterfaces"][0]["securityGroups"][0]["groupName"]
sg_id = data["detail"]["resource"]["instanceDetails"]["networkInterfaces"][0]["securityGroups"][0]["groupId"]
tags = (str(data["detail"]["resource"]["instanceDetails"]["tags"])).replace(",", "")
account_alias = str(get_account_alias(account_id))
event_first_seen = str(data["detail"]["service"]["eventFirstSeen"])
ioc = ttp + "," + hits + "," + account_id + "," + account_alias + "," + region + "," + subnet_id + "," + src_ip + "," + instance_id + "," + nacl_id + "," + country + "," + city + "," + asn_org + "," + org + "," + isp + "," + asn + "," + vpc_id + "," + sg_name + "," + sg_id + "," + tags + "," + event_first_seen
log.info("IOCs: " + str(ioc))
put_to_s3(ioc)
if len(json_a) == 0:
json_a.append(ioc)
else:
for e in json_a:
if e != ioc:
json_a.append(ioc)
elif flag == 2:
ioc = []
src_ip = (json.dumps(
data["detail"]["service"]["action"]["portProbeAction"]["portProbeDetails"][0]["remoteIpDetails"][
"ipAddressV4"])).strip('"')
if ipaddress.ip_address(src_ip).is_private is False:
account_id = data["detail"]["accountId"]
region = data["detail"]["region"]
subnet_id = data["detail"]["resource"]["instanceDetails"]["networkInterfaces"][0]["subnetId"]
instance_id = data["detail"]["resource"]["instanceDetails"]["instanceId"]
ttp = data["detail"]["type"]
country = \
data["detail"]["service"]["action"]["portProbeAction"]["portProbeDetails"][0][
"remoteIpDetails"][
"country"][
"countryName"]
city = (
data["detail"]["service"]["action"]["portProbeAction"]["portProbeDetails"][0][
"remoteIpDetails"][
"city"][
"cityName"]).replace(",", " ")
asn_org = (
data["detail"]["service"]["action"]["portProbeAction"]["portProbeDetails"][0][
"remoteIpDetails"][
"organization"][
"asnOrg"]).replace(",", " ")
org = (
data["detail"]["service"]["action"]["portProbeAction"]["portProbeDetails"][0][
"remoteIpDetails"][
"organization"][
"org"]).replace(",", " ")
isp = (
data["detail"]["service"]["action"]["portProbeAction"]["portProbeDetails"][0][
"remoteIpDetails"][
"organization"][
"isp"]).replace(",", " ")
asn = \
data["detail"]["service"]["action"]["portProbeAction"]["portProbeDetails"][0][
"remoteIpDetails"][
"organization"][
"asn"]
nacl_id = get_netacl_id(subnet_id, account_id)
hits = str(data["detail"]["service"]["count"])
vpc_id = data["detail"]["resource"]["instanceDetails"]["networkInterfaces"][0]["vpcId"]
sg_name = data["detail"]["resource"]["instanceDetails"]["networkInterfaces"][0]["securityGroups"][0]["groupName"]
sg_id = data["detail"]["resource"]["instanceDetails"]["networkInterfaces"][0]["securityGroups"][0]["groupId"]
tags = (str(data["detail"]["resource"]["instanceDetails"]["tags"])).replace(",", "")
account_alias = str(get_account_alias(account_id))
event_first_seen = str(data["detail"]["service"]["eventFirstSeen"])
ioc = ttp + "," + hits + "," + account_id + "," + account_alias + "," + region + "," + subnet_id + "," + src_ip + "," + instance_id + "," + nacl_id + "," + country + "," + city + "," + asn_org + "," + org + "," + isp + "," + asn + "," + vpc_id + "," + sg_name + "," + sg_id + "," + tags + "," + event_first_seen
log.info("IOCs: " + str(ioc))
put_to_s3(ioc)
if len(json_a) == 0:
json_a.append(ioc)
else:
for e in json_a:
if e != ioc:
json_a.append(ioc)
elif flag == 3:
ioc = []
src_ip = (json.dumps(
data["detail"]["service"]["action"]["networkConnectionAction"]["remoteIpDetails"][
"ipAddressV4"])).strip('"')
direction = data["detail"]["service"]["action"]["networkConnectionAction"]["connectionDirection"]
if ipaddress.ip_address(src_ip).is_private is False and direction == "INBOUND":
account_id = data["detail"]["accountId"]
region = data["detail"]["region"]
subnet_id = data["detail"]["resource"]["instanceDetails"]["networkInterfaces"][0]["subnetId"]
instance_id = data["detail"]["resource"]["instanceDetails"]["instanceId"]
ttp = data["detail"]["type"]
asn = \
data["detail"]["service"]["action"]["networkConnectionAction"]["remoteIpDetails"][
"organization"][
"asn"]
asn_org = (
data["detail"]["service"]["action"]["networkConnectionAction"]["remoteIpDetails"][
"organization"][
"asnOrg"]).replace(",", " ")
isp = (
data["detail"]["service"]["action"]["networkConnectionAction"]["remoteIpDetails"][
"organization"][
"isp"]).replace(",", " ")
org = (
data["detail"]["service"]["action"]["networkConnectionAction"]["remoteIpDetails"][
"organization"][
"org"]).replace(",", " ")
country = \
data["detail"]["service"]["action"]["networkConnectionAction"]["remoteIpDetails"]["country"][
"countryName"]
try:
city = str((data["detail"]["service"]["action"]["networkConnectionAction"]["remoteIpDetails"]["city"][
"cityName"]).replace(",", " "))
except Exception as e:
city = "NIA"
nacl_id = get_netacl_id(subnet_id, account_id)
hits = str(data["detail"]["service"]["count"])
vpc_id = data["detail"]["resource"]["instanceDetails"]["networkInterfaces"][0]["vpcId"]
sg_name = data["detail"]["resource"]["instanceDetails"]["networkInterfaces"][0]["securityGroups"][0]["groupName"]
sg_id = data["detail"]["resource"]["instanceDetails"]["networkInterfaces"][0]["securityGroups"][0]["groupId"]
tags = (str(data["detail"]["resource"]["instanceDetails"]["tags"])).replace(",", "")
account_alias = str(get_account_alias(account_id))
event_first_seen = str(data["detail"]["service"]["eventFirstSeen"])
ioc = ttp + "," + hits + "," + account_id + "," + account_alias + "," + region + "," + subnet_id + "," + src_ip + "," + instance_id + "," + nacl_id + "," + country + "," + city + "," + asn_org + "," + org + "," + isp + "," + asn + "," + vpc_id + "," + sg_name + "," + sg_id + "," + tags + "," + event_first_seen
log.info("IOCs: " + str(ioc))
put_to_s3(ioc)
if len(json_a) == 0:
json_a.append(ioc)
else:
for e in json_a:
if e != ioc:
json_a.append(ioc)
elif flag == 4:
ioc = []
src_ip = (json.dumps(
data["detail"]["service"]["action"]["awsApiCallAction"]["remoteIpDetails"][
"ipAddressV4"])).strip('"')
account_id = data["detail"]["accountId"]
region = data["detail"]["region"]
ttp = data["detail"]["type"]
asn = \
data["detail"]["service"]["action"]["awsApiCallAction"]["remoteIpDetails"][
"organization"][
"asn"]
asn_org = (
data["detail"]["service"]["action"]["awsApiCallAction"]["remoteIpDetails"][
"organization"][
"asnOrg"]).replace(",", " ")
isp = (
data["detail"]["service"]["action"]["awsApiCallAction"]["remoteIpDetails"][
"organization"][
"isp"]).replace(",", " ")
org = (
data["detail"]["service"]["action"]["awsApiCallAction"]["remoteIpDetails"][
"organization"][
"org"]).replace(",", " ")
country = \
data["detail"]["service"]["action"]["awsApiCallAction"]["remoteIpDetails"]["country"][
"countryName"]
try:
city = str((data["detail"]["service"]["action"]["awsApiCallAction"]["remoteIpDetails"]["city"][
"cityName"]).replace(",", " "))
except Exception as e:
city = "NIA"
hits = str(data["detail"]["service"]["count"])
account_alias = str(get_account_alias(account_id))
event_first_seen = str(data["detail"]["service"]["eventFirstSeen"])
subnet_id = instance_id = nacl_id = vpc_id = sg_name = sg_id = tags = ""
principal_id = data["detail"]["resource"]["accessKeyDetails"]["principalId"]
user_name = data["detail"]["resource"]["accessKeyDetails"]["userName"]
ioc = ttp + "," + hits + "," + account_id + "," + account_alias + "," + region + "," + subnet_id + "," + src_ip + "," + instance_id + "," + nacl_id + "," + country + "," + city + "," + asn_org + "," + org + "," + isp + "," + asn + "," + vpc_id + "," + sg_name + "," + sg_id + "," + tags + "," + event_first_seen+ "," + principal_id + "," + user_name
log.info("IOCs: " + str(ioc))
put_to_s3(ioc)
if len(json_a) == 0:
json_a.append(ioc)
else:
for e in json_a:
if e != ioc:
json_a.append(ioc)
except Exception as e:
log.info("JSON could not be parsed:" + str(e))
def get_netacl_id(subnet_id, account_id):
log.info("Getting NACL id for subnet: " + str(subnet_id) + " account: " + str(account_id))
global HUB_ACCOUNT_ID
try:
nacl_id = ""
if account_id != HUB_ACCOUNT_ID:
client = assume_role(account_id, "client")
response = client.describe_network_acls(
Filters=[
{
'Name': 'association.subnet-id',
'Values': [
subnet_id,
]
}
]
)
else:
response = ec2.describe_network_acls(
Filters=[
{
'Name': 'association.subnet-id',
'Values': [
subnet_id,
]
}
]
)
nacls = response['NetworkAcls'][0]['Associations']
for n in nacls:
if n['SubnetId'] == subnet_id:
nacl_id = n['NetworkAclId']
log.info("NACL found:" + str(nacl_id))
return nacl_id
except Exception as e:
log.info("Failed to get NACL id:" + str(e))
def incident_and_response():
log.info("Incident and Response Automation ...")
ts = str(datetime.datetime.now())
ujsa = set(json_a)
for jsa in ujsa:
lst = jsa.split(",")
ioc = len(lst)
rule_no = "-1"
if ioc == 20:
ttp, hits, account_id, account_alias, region, subnet_id, src_ip, instance_id, nacl_id, country, city, asn_org, org, isp, asn, vpc_id, sg_name, sg_id, tags, event_first_seen = jsa.split(",")
lst_nacl = get_nacl_rule(nacl_id, account_id)
rule_no = int(lst_nacl.pop())
result = create_nacl_rule(nacl_id, src_ip, rule_no, account_id, set(lst_nacl))
if result:
update_ioc(src_ip, ts, ttp, hits, region, account_id, account_alias, nacl_id, subnet_id, instance_id, country, city, asn_org, org, isp, asn, rule_no, vpc_id, sg_name, sg_id, tags, event_first_seen)
message_to_slack(jsa)
elif ioc == 22:
ttp, hits, account_id, account_alias, region, subnet_id, src_ip, instance_id, nacl_id, country, city, asn_org, org, isp, asn, vpc_id, sg_name, sg_id, tags, event_first_seen, principal_id, user_name = jsa.split(",")
message_to_slack(jsa)
else:
country = city = asn_org = org = isp = asn = "NIA"
ttp, account_id, account_alias, region, subnet_id, src_ip, instance_id, nacl_id, vpc_id, sg_name, sg_id, tags, event_first_seen = jsa.split(",")
lst_nacl = get_nacl_rule(nacl_id, account_id)
rule_no = int(lst_nacl.pop())
result = create_nacl_rule(nacl_id, src_ip, rule_no, account_id, set(lst_nacl))
if result:
update_ioc(src_ip, ts, ttp, hits, region, account_id, account_alias, nacl_id, subnet_id, instance_id, country, city, asn_org, org, isp, asn, rule_no, vpc_id, sg_name, sg_id, tags, event_first_seen)
message_to_slack(jsa)
def get_nacl_rule(nacl_id, account_id):
rule = get_rules(nacl_id, account_id)
log.info("Getting rule number (entry) for NACL: " + str(nacl_id) + " account: " + str(account_id))
lst_no = []
lst_cidr = []
for r in rule:
no, cidr = r.split(",")
lst_no.append(int(no))
lst_cidr.append(cidr)
i = int(min(lst_no)) + 1
if int(min(lst_no)) == 100:
rule_no = 1
else:
count = 1
while count < 98:
count += 1
if i < 100 and i not in lst_no:
rule_no = i
break
else:
i += 1
log.info("Rule number (entry): " + str(rule_no))
log.info("CIDR already added: " + str(set(lst_cidr)))
lst_cidr.append(str(rule_no))
return lst_cidr
def get_rules(nacl_id, account_id):
log.info("Getting rules for NACL: " + str(nacl_id) + " account: " + str(account_id))
global HUB_ACCOUNT_ID
rules = []
if account_id != HUB_ACCOUNT_ID:
client = assume_role(account_id, "client")
response = client.describe_network_acls(
NetworkAclIds=[
nacl_id,
],
)
else:
response = ec2.describe_network_acls(
NetworkAclIds=[
nacl_id,
],
)
data = response['NetworkAcls'][0]['Entries']
for d in data:
entry = str(d['RuleNumber']) + "," + str(d['CidrBlock'])
rules.append(entry)
return rules
def create_nacl_rule(nacl_id, attacker_ip, rule_no, account_id, lst_nacl):
global HUB_ACCOUNT_ID
log.info("Creating NACL rule for attacker:" + str(attacker_ip))
if attacker_ip + '/32' not in lst_nacl and len(lst_nacl) <= 18:
if account_id != HUB_ACCOUNT_ID:
client = assume_role(account_id, "resource")
nacl = client.NetworkAcl(nacl_id)
else:
nacl = r_ec2.NetworkAcl(nacl_id)
response = nacl.create_entry(
CidrBlock=attacker_ip + '/32',
Egress=False,
PortRange={
'From': 0,
'To': 65535
},
Protocol='-1',
RuleAction='deny',
RuleNumber=rule_no
)
if response['ResponseMetadata']['HTTPStatusCode'] == 200:
return True
else:
return False
elif len(lst_nacl) == 20:
log.info("NACL is full, no more than 18 entries can be added")
else:
log.info("Attacker is already blocked")
def get_account_alias(account_id):
log.info("Getting alias for account: " + str(account_id))
global HUB_ACCOUNT_ID
rules = []
if account_id != HUB_ACCOUNT_ID:
client = assume_role(account_id, "iam")
response = client.list_account_aliases()
else:
response = iam.list_account_aliases()
alias = str(response['AccountAliases'])
result = alias[2:-2]
return result
def update_ioc(attacker_ip, timestamp, ttp, hits, region, account_id, account_alias, nacl_id, subnet_id, instance_id, country, city, asn_org, org, isp, asn, rule_no, vpc_id, sg_name, sg_id, tags, event_first_seen):
log.info("Sending IOCs to DynamoDB ...")
try:
table = dynamodb.Table(DYNAMO_TABLE)
scan = table.scan()
if scan['Items']:
updated = 0
for s in scan['Items']:
if s['attacker_ip'] == attacker_ip:
update_entry_attackers(attacker_ip, hits, rule_no, False)
updated = 1
if updated == 0:
create_entry_attackers(attacker_ip, timestamp, ttp, hits, region, account_id, account_alias, nacl_id, subnet_id,
instance_id, country, city, asn_org, org, isp, asn, rule_no, vpc_id, sg_name, sg_id, tags, event_first_seen)
else:
create_entry_attackers(attacker_ip, timestamp, ttp, hits, region, account_id, account_alias, nacl_id, subnet_id,
instance_id, country, city, asn_org, org, isp, asn, rule_no, vpc_id, sg_name, sg_id, tags, event_first_seen)
except Exception as e:
log.info("DynamoDB entry could not be updated" + str(e))
def update_entry_attackers(attacker_ip, hits, rule_no, deleted):
table = dynamodb.Table(DYNAMO_TABLE)
try:
if not deleted:
log.info("Updating new DynamoDB entry for attacker: " + str(attacker_ip))
response = table.update_item(
Key={
'attacker_ip': attacker_ip
},
UpdateExpression="set hits = :h, rule_no = :r_no",
ExpressionAttributeValues={
':h': hits,
':r_no': rule_no
},
ReturnValues="UPDATED_NEW"
)
return
else:
log.info("Updating cleaned (NACL) DynamoDB entry for attacker: " + str(attacker_ip))
response = table.update_item(
Key={
'attacker_ip': attacker_ip
},
UpdateExpression="set hits = :h, rule_no = :r_no",
ExpressionAttributeValues={
':h': hits,
':r_no': rule_no
},
ReturnValues="UPDATED_NEW"
)
return
except Exception as e:
log.info("DynamoDB could not be updated:" + str(e))
def create_entry_attackers(attacker_ip, timestamp, ttp, hits, region, account_id, account_alias, nacl_id, subnet_id, instance_id, country, city, asn_org, org, isp, asn, rule_no, vpc_id, sg_name, sg_id, tags, event_first_seen):
if not city:
city = "NIA"
log.info("Creating DynamoDB entry for attacker:" + str(attacker_ip))
try:
table = dynamodb.Table(DYNAMO_TABLE)
response = table.put_item(
Item={
'attacker_ip': str(attacker_ip),
'timestamp': str(timestamp),
'ttp': str(ttp),
'hits': str(hits),
'region': str(region),
'account_id': str(account_id),
'account_alias': str(account_alias),
'vpc_id': str(vpc_id),
'nacl_id': str(nacl_id),
'subnet_id': str(subnet_id),
'instance_id': str(instance_id),
'tags': str(tags),
'sg_name': str(sg_name),
'sg_id': str(sg_id),
'country': str(country),
'city': str(city),
'asn_org': str(asn_org),
'org': str(org),
'isp': str(isp),
'asn': str(asn),
'rule_no': str(rule_no),
'event_first_seen': str(event_first_seen)
}
)
except Exception as e:
log.info("DynamoDB entry could not be created" + str(e))
def assume_role(account_id, boto_type):
global ROLE_SPOKE
log.info("Assuming role: " + str(ROLE_SPOKE) + " account: " + str(account_id))
try:
sts = sts_connection.assume_role(
RoleArn="arn:aws:iam::" + account_id + ":role/" + ROLE_SPOKE,
RoleSessionName="cross_acct_lambda"
)
ACCESS_KEY = sts['Credentials']['AccessKeyId']
SECRET_KEY = sts['Credentials']['SecretAccessKey']
SESSION_TOKEN = sts['Credentials']['SessionToken']
if boto_type == "resource":
client = boto3.resource(
'ec2',
aws_access_key_id=ACCESS_KEY,
aws_secret_access_key=SECRET_KEY,
aws_session_token=SESSION_TOKEN,
)
elif boto_type == "client":
client = boto3.client(
'ec2',
aws_access_key_id=ACCESS_KEY,
aws_secret_access_key=SECRET_KEY,
aws_session_token=SESSION_TOKEN,
)
elif boto_type == "iam":
client = boto3.client(
'iam',
aws_access_key_id=ACCESS_KEY,
aws_secret_access_key=SECRET_KEY,
aws_session_token=SESSION_TOKEN,
)
return client
except Exception as e:
log.info("Role could not be assumed" + str(e))
def clean_nacls():
global HUB_ACCOUNT_ID
log.info("Cleaning old NACLs entries ... ")
try:
now = datetime.datetime.now()
table = dynamodb.Table(DYNAMO_TABLE)
response = table.scan()
for r in response['Items']:
if str(r['rule_no']) != "0":
t = r['timestamp']
account = r['account_id']
log.info("Searching for oldest entries in the account: " + str(account) + " attacker: " + str(r['attacker_ip']))
old = datetime.datetime.strptime(t, '%Y-%m-%d %H:%M:%S.%f')
difh = ((now - old).days * 24) + int((now - old).seconds / 3600)
log.info("Hours that remained blocked: " + str(difh))
if difh >= 6:
log.info("Cleaning NACL entry: " + str(r['rule_no']) + " account: " + str(account))
try:
if account != HUB_ACCOUNT_ID:
client = assume_role(account, "resource")
network_acl = client.NetworkAcl(r['nacl_id'])
else:
network_acl = r_ec2.NetworkAcl(r['nacl_id'])
response2 = network_acl.delete_entry(
Egress=False,
RuleNumber=int(r['rule_no'])
)
if response2['ResponseMetadata']['HTTPStatusCode'] == 200:
log.info("NACL rule deleted for attacker: " + str(r['attacker_ip']))
update_entry_attackers(str(r['attacker_ip']), str(r['hits']), "0", True)
return
else:
log.info("Failed to delete the entry")
except Exception as e:
log.info("Failed to instantiate resource NetworkAcl " + str(e))
log.info("Updating IOCs db to keep consistency ... " + str(e))
try:
update_entry_attackers(str(r['attacker_ip']), str(r['hits']), "0", True)
except Exception as e:
log.info("Updating IOCs db to keep consistency failed: " + str(e))
except Exception as e:
log.info("NACLs could not be deleted: " + str(e))
def message_to_slack(ioc):
lst = ioc.split(",")
ioc_len = len(lst)
try:
if ioc_len == 20:
ttp, hits, account_id, account_alias, region, subnet_id, src_ip, instance_id, nacl_id, country, city, asn_org, org, isp, asn, vpc_id, sg_name, sg_id, tags, event_first_seen = ioc.split(",")
nacl_url = "https://console.aws.amazon.com/vpc/home?region=" + region + "#acls:networkAclId=" + nacl_id + ";sort=networkAclId"
data = {
'text': '***************************************************************\n\n'
+ '*ATTACKER IP:* ' + src_ip + ' *HITS:* ' + hits + '\n'
+ '*TTP:* ' + ttp + '\n'
+ '*ACCOUNT ID:* ' + '`' + account_id + '`' + ' *ACCOUNT ALIAS:* ' + account_alias + ' *INSTANCE ID:* ' + '`' + instance_id + '`' + '\n'
+ '*TAGS:* ' + tags + '\n'
+ '*NACL:* ' + nacl_url + '\n'
+ '*VPC ID:* ' + '`' + vpc_id + '`' + ' *SUBNET ID:* ' + '`' + subnet_id + '`' + '\n'
+ '*COUNTRY:* ' + country + ' *CITY:* ' + city + '\n'
+ '*ASN ORG:* ' + asn_org + ' *ORG:* ' + org + ' *ISP:* ' + isp + '\n'
+ '*FIRST SEEN:* ' + event_first_seen + '\n'
+ '***************************************************************',
'username': 'CLOUD SNIPER BUDDY',
'icon_emoji': ':robot_face:'
}
else:
ttp, hits, account_id, account_alias, region, subnet_id, src_ip, instance_id, nacl_id, country, city, asn_org, org, isp, asn, vpc_id, sg_name, sg_id, tags, event_first_seen, principal_id, user_name = ioc.split(",")
data = {
'text': '***************************************************************\n\n'
+ '*ATTACKER IP:* ' + src_ip + ' *HITS:* ' + hits + '\n'
+ '*TTP:* ' + ttp + '\n'
+ '*ACCOUNT ID:* ' + '`' + account_id + '`' + ' *ACCOUNT ALIAS:* ' + account_alias + '\n'
+ '*COUNTRY:* ' + country + ' *CITY:* ' + city + '\n'
+ '*ASN ORG:* ' + asn_org + ' *ORG:* ' + org + ' *ISP:* ' + isp + '\n'
+ '*FIRST SEEN:* ' + event_first_seen + '\n'
+ '*USER NAME:* ' + user_name + ' *PRINCIPAL ID:* ' + principal_id + '\n'
+ '*DESCRIPTION:* API DescribeAlarms, commonly used in reconnaissance attacks, was invoked from a Tor exit node IP address. The threat intelligence feed does not provide resource details, so there is no automatic blocking. The user must be investigated' + '\n'
+ '***************************************************************',
'username': 'CLOUD SNIPER BUDDY',
'icon_emoji': ':robot_face:'
}
response = requests.post(WEBHOOK_URL, data=json.dumps(data), headers={'Content-Type': 'application/json'})
log.info('Sending message to Slack. Response: ' + str(response.text) + ' Response Code: ' + str(response.status_code))
except Exception as e:
log.info("Message could not be send to Slack: " + str(e))
def delete_sqs():
log.info("Deleting queue ...")
try:
for rh in message:
receipt_handle = rh['ReceiptHandle']
sqs.delete_message(
QueueUrl=QUEUE_URL,
ReceiptHandle=receipt_handle
)
log.info('Processed and deleted message: %s' % receipt_handle)
except Exception as e:
log.info("SQS queue could not be deleted" + str(e))
def put_to_s3(ioc):
log.info("Sending findings to S3 ...")
lst = ioc.split(",")
ioc_len = len(lst)
if ioc_len == 20:
ttp, hits, account_id, account_alias, region, subnet_id, src_ip, instance_id, nacl_id, country, city, asn_org, org, isp, asn, vpc_id, sg_name, sg_id, tags, event_first_seen = ioc.split(",")
dataset = {
'ttp': str(ttp),
'hits': str(hits),
'cloud.account.id': str(account_id),
'cloud.account.name': str(account_alias),
'cloud.region': str(region),
'interface.subnet.id': str(subnet_id),
'source.ip': str(src_ip),
'cloud.instance.id': str(instance_id),
'interface.nacl.id': str(nacl_id),
'country': str(country),
'city': str(city),
'asn_org': str(asn_org),
'org': str(org),
'isp': str(isp),
'asn': str(asn),
'interface.vpc.id': str(vpc_id),
'interface.security_group.name': str(sg_name),
'interface.security_group.id': str(sg_id),
'tags': str(tags),
'timestamp': str(event_first_seen),
'cloud.provider': 'aws'
}
else:
# 22
ttp, hits, account_id, account_alias, region, subnet_id, src_ip, instance_id, nacl_id, country, city, asn_org, org, isp, asn, vpc_id, sg_name, sg_id, tags, event_first_seen, principal_id, user_name = ioc.split(",")
dataset = {
'ttp': str(ttp),
'hits': str(hits),
'cloud.account.id': str(account_id),
'cloud.account.name': str(account_alias),
'cloud.region': str(region),
'source.ip': str(src_ip),
'country': str(country),
'city': str(city),
'asn_org': str(asn_org),
'org': str(org),
'isp': str(isp),
'asn': str(asn),
'timestamp': str(event_first_seen),
'cloud.provider': 'aws',
'cloud.principal_id': str(principal_id),
'cloud.user_name': str(user_name)
}
NOW = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
s3_resource = boto3.resource('s3')
bucket_name = BUCKET_NAME
iocs_path = IOCS_PATH
bucket = s3_resource.Bucket(name=bucket_name)
if iocs_path.startswith("/"):
iocs_path = iocs_path[1:]
if iocs_path.endswith("/"):
iocs_path = iocs_path[:-1]
try:
(bucket.Object(key=f"{iocs_path}/iocs_{NOW}.json")
.put(Body=bytes(json.dumps(dataset).encode('UTF-8'))))
except Exception as e:
log.info("Could not put the object to S3" + str(e))
def publish_to_sns():
publish_object = {"Message": "TOR"}
try:
response = sns.publish(
TopicArn=TOPIC_ARN,
Message=json.dumps(publish_object),
Subject="IR"
)
log.info("Publish to SNS: " + str(response['ResponseMetadata']['HTTPStatusCode']))
except Exception as e:
log.info("Could not publish to SNS " + str(e))
def cloud_sniper_threat_intelligence(event, context):
global message
log.info("Processing GuardDuty findings: %s" % json.dumps(event))
try:
clean_nacls()
message = read_sqs()
if message:
search_ioc()
incident_and_response()
delete_sqs()
log.info("Findings properly processed")
except Exception as e:
log.error('Failure to process finding ' + str(e))
|
pymoo/util/ref_dirs/energy_layer.py | jarreguit/pymoo | 762 | 33379 | import autograd.numpy as anp
import numpy as np
from autograd import value_and_grad
from pymoo.factory import normalize
from pymoo.util.ref_dirs.energy import squared_dist
from pymoo.util.ref_dirs.optimizer import Adam
from pymoo.util.reference_direction import ReferenceDirectionFactory, scale_reference_directions
class LayerwiseRieszEnergyReferenceDirectionFactory(ReferenceDirectionFactory):
def __init__(self,
n_dim,
partitions,
return_as_tuple=False,
n_max_iter=1000,
verbose=False,
X=None,
**kwargs):
super().__init__(n_dim, **kwargs)
self.scalings = None
self.n_max_iter = n_max_iter
self.verbose = verbose
self.return_as_tuple = return_as_tuple
self.X = X
self.partitions = partitions
def _step(self, optimizer, X, scalings):
obj, grad = value_and_grad(calc_potential_energy)(scalings, X)
scalings = optimizer.next(scalings, np.array(grad))
scalings = normalize(scalings, xl=0, xu=scalings.max())
return scalings, obj
def _solve(self, X, scalings):
# initialize the optimizer for the run
optimizer = Adam()
# for each iteration of gradient descent
for i in range(self.n_max_iter):
# execute one optimization step
_scalings, _obj = self._step(optimizer, X, scalings)
# evaluate how much the points have moved
delta = np.abs(_scalings - scalings).sum()
if self.verbose:
print(i, "objective", _obj, "delta", delta)
# if there was only a little delta during the last iteration -> terminate
if delta < 1e-5:
scalings = _scalings
break
# otherwise use the new points for the next iteration
scalings = _scalings
self.scalings = scalings
return get_points(X, scalings)
def do(self):
X = []
scalings = []
for k, p in enumerate(self.partitions):
if p > 1:
val = np.linspace(0, 1, p + 1)[1:-1]
_X = []
for i in range(self.n_dim):
for j in range(i + 1, self.n_dim):
x = np.zeros((len(val), self.n_dim))
x[:, i] = val
x[:, j] = 1 - val
_X.append(x)
X.append(np.row_stack(_X + [np.eye(self.n_dim)]))
elif p == 1:
X.append(np.eye(self.n_dim))
else:
X.append(np.full(self.n_dim, 1 / self.n_dim)[None, :])
scalings.append(1 - k / len(self.partitions))
scalings = np.array(scalings)
X = self._solve(X, scalings)
return X
# ---------------------------------------------------------------------------------------------------------
# Energy Functions
# ---------------------------------------------------------------------------------------------------------
def get_points(X, scalings):
vals = []
for i in range(len(X)):
vals.append(scale_reference_directions(X[i], scalings[i]))
X = anp.row_stack(vals)
return X
def calc_potential_energy(scalings, X):
X = get_points(X, scalings)
i, j = anp.triu_indices(len(X), 1)
D = squared_dist(X, X)[i, j]
if np.any(D < 1e-12):
return np.nan, np.nan
return (1 / D).mean()
|
semseg/datasets/celebamaskhq.py | Genevievekim/semantic-segmentation-1 | 196 | 33406 | import torch
from torch import Tensor
from torch.utils.data import Dataset
from torchvision import io
from pathlib import Path
from typing import Tuple
from torchvision import transforms as T
class CelebAMaskHQ(Dataset):
CLASSES = [
'background', 'skin', 'nose', 'eye_g', 'l_eye', 'r_eye', 'l_brow', 'r_brow', 'l_ear',
'r_ear', 'mouth', 'u_lip', 'l_lip', 'hair', 'hat', 'ear_r', 'neck_l', 'neck', 'cloth'
]
PALETTE = torch.tensor([
[0, 0, 0], [204, 0, 0], [76, 153, 0], [204, 204, 0], [51, 51, 255], [204, 0, 204], [0, 255, 255], [255, 204, 204], [102, 51, 0], [255, 0, 0],
[102, 204, 0], [255, 255, 0], [0, 0, 153], [0, 0, 204], [255, 51, 153], [0, 204, 204], [0, 51, 0], [255, 153, 51], [0, 204, 0]
])
def __init__(self, root: str, split: str = 'train', transform = None) -> None:
super().__init__()
assert split in ['train', 'val', 'test']
self.root = Path(root)
self.transform = transform
self.n_classes = len(self.CLASSES)
self.ignore_label = 255
self.resize = T.Resize((512, 512))
with open(self.root / f'{split}_list.txt') as f:
self.files = f.read().splitlines()
if not self.files:
raise Exception(f"No images found in {root}")
print(f"Found {len(self.files)} {split} images.")
def __len__(self) -> int:
return len(self.files)
def __getitem__(self, index: int) -> Tuple[Tensor, Tensor]:
img_path = self.root / 'CelebA-HQ-img' / f"{self.files[index]}.jpg"
lbl_path = self.root / 'CelebAMask-HQ-label' / f"{self.files[index]}.png"
image = io.read_image(str(img_path))
image = self.resize(image)
label = io.read_image(str(lbl_path))
if self.transform:
image, label = self.transform(image, label)
return image, label.squeeze().long()
if __name__ == '__main__':
from semseg.utils.visualize import visualize_dataset_sample
visualize_dataset_sample(CelebAMaskHQ, '/home/sithu/datasets/CelebAMask-HQ') |
bleach__examples__remove_tags__clear_html/escape_protocols.py | DazEB2/SimplePyScripts | 117 | 33414 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
# SOURCE: https://github.com/mozilla/bleach
# SOURCE: https://bleach.readthedocs.io/en/latest/clean.html#allowed-protocols-protocols
# pip install bleach
import bleach
# List of allowed protocols
print('List of allowed protocols:', bleach.sanitizer.ALLOWED_PROTOCOLS)
# ['http', 'https', 'mailto']
print(
bleach.clean(
'<a href="smb://more_text">allowed protocol</a>'
)
)
# <a>allowed protocol</a>
print(
bleach.clean(
'<a href="smb://more_text">allowed protocol</a>',
protocols=['http', 'https', 'smb']
)
)
# <a href="smb://more_text">allowed protocol</a>
print(
bleach.clean(
'<a href="smb://more_text">allowed protocol</a>',
protocols=bleach.ALLOWED_PROTOCOLS + ['smb']
)
)
# <a href="smb://more_text">allowed protocol</a>
|
mushroom_rl/utils/eligibility_trace.py | PuzeLiu/mushroom-rl | 344 | 33422 | <gh_stars>100-1000
from mushroom_rl.utils.table import Table
def EligibilityTrace(shape, name='replacing'):
"""
Factory method to create an eligibility trace of the provided type.
Args:
shape (list): shape of the eligibility trace table;
name (str, 'replacing'): type of the eligibility trace.
Returns:
The eligibility trace table of the provided shape and type.
"""
if name == 'replacing':
return ReplacingTrace(shape)
elif name == 'accumulating':
return AccumulatingTrace(shape)
else:
raise ValueError('Unknown type of trace.')
class ReplacingTrace(Table):
"""
Replacing trace.
"""
def reset(self):
self.table[:] = 0.
def update(self, state, action):
self.table[state, action] = 1.
class AccumulatingTrace(Table):
"""
Accumulating trace.
"""
def reset(self):
self.table[:] = 0.
def update(self, state, action):
self.table[state, action] += 1.
|
RaspberryPi/appMasterKey.py | colindembovsky/iot-central-firmware | 136 | 33448 | # Copyright (c) Microsoft. All rights reserved.
# Licensed under the MIT license.
import sys
import iotc
from iotc import IOTConnectType, IOTLogLevel
from random import randint
import base64
import hmac
import hashlib
gIsMicroPython = ('implementation' in dir(sys)) and ('name' in dir(sys.implementation)) and (sys.implementation.name == 'micropython')
def computeKey(secret, regId):
global gIsMicroPython
try:
secret = base64.b64decode(secret)
except:
print("ERROR: broken base64 secret => `" + secret + "`")
sys.exit()
if gIsMicroPython == False:
return base64.b64encode(hmac.new(secret, msg=regId.encode('utf8'), digestmod=hashlib.sha256).digest())
else:
return base64.b64encode(hmac.new(secret, msg=regId.encode('utf8'), digestmod=hashlib._sha256.sha256).digest())
deviceId = "DEVICE_ID"
scopeId = "SCOPE_ID"
masterKey = "PRIMARY/SECONDARY master Key"
deviceKey = computeKey(masterKey, deviceId)
iotc = iotc.Device(scopeId, deviceKey, deviceId, IOTConnectType.IOTC_CONNECT_SYMM_KEY)
iotc.setLogLevel(IOTLogLevel.IOTC_LOGGING_API_ONLY)
gCanSend = False
gCounter = 0
def onconnect(info):
global gCanSend
print("- [onconnect] => status:" + str(info.getStatusCode()))
if info.getStatusCode() == 0:
if iotc.isConnected():
gCanSend = True
def onmessagesent(info):
print("\t- [onmessagesent] => " + str(info.getPayload()))
def oncommand(info):
print("- [oncommand] => " + info.getTag() + " => " + str(info.getPayload()))
def onsettingsupdated(info):
print("- [onsettingsupdated] => " + info.getTag() + " => " + info.getPayload())
iotc.on("ConnectionStatus", onconnect)
iotc.on("MessageSent", onmessagesent)
iotc.on("Command", oncommand)
iotc.on("SettingsUpdated", onsettingsupdated)
iotc.connect()
while iotc.isConnected():
iotc.doNext() # do the async work needed to be done for MQTT
if gCanSend == True:
if gCounter % 20 == 0:
gCounter = 0
print("Sending telemetry..")
iotc.sendTelemetry("{ \
\"temp\": " + str(randint(20, 45)) + ", \
\"accelerometerX\": " + str(randint(2, 15)) + ", \
\"accelerometerY\": " + str(randint(3, 9)) + ", \
\"accelerometerZ\": " + str(randint(1, 4)) + "}")
gCounter += 1
|
attic/operator/dispatch.py | matteoshen/example-code | 5,651 | 33484 | """
Experiments with infix operator dispatch
>>> kadd = KnowsAdd()
>>> kadd + 1
(<KnowsAdd object>, 1)
>>> kadd * 1
"""
class KnowsAdd:
def __add__(self, other):
return self, other
def __repr__(self):
return '<{} object>'.format(type(self).__name__)
|
utest/resources/robotdata/datagenerator.py | guojiajiaok/RIDE | 775 | 33493 | #!/usr/bin/env python
# Copyright 2008-2015 Nokia Networks
# Copyright 2016- Robot Framework Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from getopt import getopt, GetoptError
from random import randint
import os
SUITE=\
"""*** Settings ***
Resource resource.txt
*** Test Cases ***
%TESTCASES%
*** Keywords ***
Test Keyword
Log jee
"""
RESOURCE=\
"""*** Variables ***
@{Resource Var} MOI
*** Keywords ***
%KEYWORDS%
"""
KEYWORD_TEMPLATE=\
"""My Keyword %KW_ID%
No Operation"""
TEST_CASE_TEMPLATE=\
"""My Test %TEST_ID%
My Keyword %KW_ID%
Log moi
Test Keyword
Log moi
Test Keyword
Log moi
Test Keyword
Log moi
Test Keyword
Log moi
Test Keyword
My Keyword %KW_ID%
Test Keyword
Log moi
Test Keyword
Log moi
Test Keyword
Log moi"""
def generate_tests(number_of_tests, number_of_keywords):
mytests = range(number_of_tests)
return '\n'.join(TEST_CASE_TEMPLATE.replace('%TEST_ID%', str(test_id))\
.replace('%KW_ID%', str(randint(0,number_of_keywords-1)))\
for test_id in mytests)
def generate_keywords(number_of_keywords):
mykeywords = range(number_of_keywords)
return '\n'.join(KEYWORD_TEMPLATE.replace('%KW_ID%', str(i)) for i in mykeywords)
def generate_suite(number_of_tests, number_of_keywords):
return SUITE.replace('%TESTCASES%', generate_tests(number_of_tests, number_of_keywords))\
.replace('%KEYWORDS%', generate_keywords(number_of_keywords))
def generate_resource(number_of_keywords):
return RESOURCE.replace('%KEYWORDS%', generate_keywords(number_of_keywords))
def generate(directory, suites, tests, keywords):
os.mkdir(directory)
mysuites = range(suites)
for suite_index in mysuites:
f = open(os.path.join('.', directory, 'suite%s.txt' % suite_index), 'w')
f.write(generate_suite(tests, keywords))
f.close()
r = open(os.path.join('.', directory, 'resource.txt'), 'w')
r.write(generate_resource(keywords))
r.close()
def usage():
print('datagenerator.py -d [directory] -s [NUMBER OF SUITES] -t [NUMBER OF TESTS IN SUITE] -k [NUMBER OF KEYWORDS]')
def main(args):
try:
opts, args = getopt(args, 'd:s:t:k:', [])
except GetoptError as e:
print(e)
usage()
sys.exit(2)
if len(opts) != 4:
if opts:
print(opts)
usage()
sys.exit(2)
for opt, arg in opts:
if opt == '-d':
directory = arg
if opt == '-s':
suites = int(arg)
if opt == '-t':
tests = int(arg)
if opt == '-k':
keywords = int(arg)
generate(directory, suites, tests, keywords)
if __name__ == '__main__':
import sys
main(sys.argv[1:])
|
sahara/service/edp/oozie/engine.py | ksshanam/sahara | 161 | 33497 | <gh_stars>100-1000
# Copyright (c) 2014 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
import os
import xml.dom.minidom as xml
from oslo_config import cfg
from oslo_utils import uuidutils
import six
from sahara import conductor as c
from sahara import context
from sahara.service.edp import base_engine
from sahara.service.edp import hdfs_helper as h
from sahara.service.edp.job_binaries import manager as jb_manager
from sahara.service.edp import job_utils
from sahara.service.edp.oozie import oozie as o
from sahara.service.edp.oozie.workflow_creator import workflow_factory
from sahara.service.validations.edp import job_execution as j
from sahara.utils import edp
from sahara.utils import remote
from sahara.utils import xmlutils as x
CONF = cfg.CONF
conductor = c.API
@six.add_metaclass(abc.ABCMeta)
class OozieJobEngine(base_engine.JobEngine):
def __init__(self, cluster):
self.cluster = cluster
self.plugin = job_utils.get_plugin(self.cluster)
def get_remote_client(self):
return o.RemoteOozieClient(self.get_oozie_server_uri(self.cluster),
self.get_oozie_server(self.cluster),
self.get_hdfs_user())
def get_client(self):
# by default engine will return standard oozie client implementation
return o.OozieClient(self.get_oozie_server_uri(self.cluster),
self.get_oozie_server(self.cluster))
def _get_oozie_job_params(self, hdfs_user, path_to_workflow,
oozie_params, use_hbase_lib,
scheduled_params=None, job_dir=None,
job_execution_type=None):
oozie_libpath_key = "oozie.libpath"
oozie_libpath = ""
rm_path = self.get_resource_manager_uri(self.cluster)
nn_path = self.get_name_node_uri(self.cluster)
hbase_common_lib_path = "%s%s" % (nn_path, h.HBASE_COMMON_LIB_PATH)
if use_hbase_lib:
if oozie_libpath_key in oozie_params:
oozie_libpath = "%s,%s" % (oozie_params.get(oozie_libpath_key,
""), hbase_common_lib_path)
else:
oozie_libpath = hbase_common_lib_path
if job_execution_type == "scheduled":
app_path = "oozie.coord.application.path"
job_parameters = {
"start": scheduled_params.get('start'),
"end": scheduled_params.get('end'),
"frequency": scheduled_params.get('frequency'),
"workflowAppUri": "%s%s" % (nn_path, job_dir),
app_path: "%s%s" % (nn_path, job_dir)}
else:
app_path = "oozie.wf.application.path"
job_parameters = {
app_path: "%s%s" % (nn_path, path_to_workflow)}
job_parameters["nameNode"] = nn_path
job_parameters["user.name"] = hdfs_user
job_parameters["jobTracker"] = rm_path
job_parameters[oozie_libpath_key] = oozie_libpath
job_parameters["oozie.use.system.libpath"] = "true"
# Don't let the application path be overwritten, that can't
# possibly make any sense
if app_path in oozie_params:
del oozie_params[app_path]
if oozie_libpath_key in oozie_params:
del oozie_params[oozie_libpath_key]
job_parameters.update(oozie_params)
return job_parameters
def _upload_workflow_file(self, where, job_dir, wf_xml, hdfs_user):
with remote.get_remote(where) as r:
h.put_file_to_hdfs(r, wf_xml, "workflow.xml", job_dir, hdfs_user)
return "%s/workflow.xml" % job_dir
def _upload_coordinator_file(self, where, job_dir, wf_xml, hdfs_user):
with remote.get_remote(where) as r:
h.put_file_to_hdfs(r, wf_xml, "coordinator.xml", job_dir,
hdfs_user)
return "%s/coordinator.xml" % job_dir
def cancel_job(self, job_execution):
if job_execution.engine_job_id is not None:
client = self.get_client()
client.kill_job(job_execution)
return client.get_job_info(job_execution)
def get_job_status(self, job_execution):
if job_execution.engine_job_id is not None:
return self.get_client().get_job_info(job_execution)
def _prepare_run_job(self, job_execution):
ctx = context.ctx()
# This will be a dictionary of tuples, (native_url, runtime_url)
# keyed by data_source id
data_source_urls = {}
prepared_job_params = {}
job = conductor.job_get(ctx, job_execution.job_id)
input_source, output_source = job_utils.get_input_output_data_sources(
job_execution, job, data_source_urls, self.cluster)
# Updated_job_configs will be a copy of job_execution.job_configs with
# any name or uuid references to data_sources resolved to paths
# assuming substitution is enabled.
# If substitution is not enabled then updated_job_configs will
# just be a reference to job_execution.job_configs to avoid a copy.
# Additional_sources will be a list of any data_sources found.
additional_sources, updated_job_configs = (
job_utils.resolve_data_source_references(job_execution.job_configs,
job_execution.id,
data_source_urls,
self.cluster)
)
job_execution = conductor.job_execution_update(
ctx, job_execution,
{"data_source_urls": job_utils.to_url_dict(data_source_urls)})
# Now that we've recorded the native urls, we can switch to the
# runtime urls
data_source_urls = job_utils.to_url_dict(data_source_urls,
runtime=True)
data_sources = additional_sources + [input_source, output_source]
job_utils.prepare_cluster_for_ds(data_sources,
self.cluster, updated_job_configs,
data_source_urls)
proxy_configs = updated_job_configs.get('proxy_configs')
configs = updated_job_configs.get('configs', {})
use_hbase_lib = configs.get('edp.hbase_common_lib', {})
# Extract all the 'oozie.' configs so that they can be set in the
# job properties file. These are config values for Oozie itself,
# not the job code
oozie_params = {}
for k in list(configs):
if k.startswith('oozie.'):
oozie_params[k] = configs[k]
external_hdfs_urls = self._resolve_external_hdfs_urls(
job_execution.job_configs)
for url in external_hdfs_urls:
h.configure_cluster_for_hdfs(self.cluster, url)
hdfs_user = self.get_hdfs_user()
# TODO(tmckay): this should probably be "get_namenode"
# but that call does not exist in the oozie engine api now.
oozie_server = self.get_oozie_server(self.cluster)
wf_dir = self._create_hdfs_workflow_dir(oozie_server, job)
self._upload_job_files_to_hdfs(oozie_server, wf_dir, job, configs,
proxy_configs)
wf_xml = workflow_factory.get_workflow_xml(
job, self.cluster, updated_job_configs,
input_source, output_source,
hdfs_user, data_source_urls)
path_to_workflow = self._upload_workflow_file(oozie_server, wf_dir,
wf_xml, hdfs_user)
prepared_job_params['context'] = ctx
prepared_job_params['hdfs_user'] = hdfs_user
prepared_job_params['path_to_workflow'] = path_to_workflow
prepared_job_params['use_hbase_lib'] = use_hbase_lib
prepared_job_params['job_execution'] = job_execution
prepared_job_params['oozie_params'] = oozie_params
prepared_job_params['wf_dir'] = wf_dir
prepared_job_params['oozie_server'] = oozie_server
return prepared_job_params
def run_job(self, job_execution):
prepared_job_params = self._prepare_run_job(job_execution)
path_to_workflow = prepared_job_params['path_to_workflow']
hdfs_user = prepared_job_params['hdfs_user']
oozie_params = prepared_job_params['oozie_params']
use_hbase_lib = prepared_job_params['use_hbase_lib']
ctx = prepared_job_params['context']
job_execution = prepared_job_params['job_execution']
job_params = self._get_oozie_job_params(hdfs_user,
path_to_workflow,
oozie_params,
use_hbase_lib)
client = self.get_client()
oozie_job_id = client.add_job(x.create_hadoop_xml(job_params),
job_execution)
job_execution = conductor.job_execution_get(ctx, job_execution.id)
if job_execution.info['status'] == edp.JOB_STATUS_TOBEKILLED:
return (None, edp.JOB_STATUS_KILLED, None)
conductor.job_execution_update(
context.ctx(), job_execution.id,
{'info': {'status': edp.JOB_STATUS_READYTORUN},
'engine_job_id': oozie_job_id})
client.run_job(job_execution, oozie_job_id)
try:
status = client.get_job_info(job_execution, oozie_job_id)['status']
except Exception:
status = None
return (oozie_job_id, status, None)
def run_scheduled_job(self, job_execution):
prepared_job_params = self._prepare_run_job(job_execution)
oozie_server = prepared_job_params['oozie_server']
wf_dir = prepared_job_params['wf_dir']
hdfs_user = prepared_job_params['hdfs_user']
oozie_params = prepared_job_params['oozie_params']
use_hbase_lib = prepared_job_params['use_hbase_lib']
ctx = prepared_job_params['context']
job_execution = prepared_job_params['job_execution']
coord_configs = {"jobTracker": "${jobTracker}",
"nameNode": "${nameNode}"}
coord_xml = self._create_coordinator_xml(coord_configs)
self._upload_coordinator_file(oozie_server, wf_dir, coord_xml,
hdfs_user)
job_params = self._get_oozie_job_params(
hdfs_user, None, oozie_params, use_hbase_lib,
job_execution.job_configs.job_execution_info, wf_dir,
"scheduled")
client = self.get_client()
oozie_job_id = client.add_job(x.create_hadoop_xml(job_params),
job_execution)
job_execution = conductor.job_execution_get(ctx, job_execution.id)
if job_execution.info['status'] == edp.JOB_STATUS_TOBEKILLED:
return (None, edp.JOB_STATUS_KILLED, None)
try:
status = client.get_job_status(job_execution,
oozie_job_id)['status']
except Exception:
status = None
return (oozie_job_id, status, None)
@abc.abstractmethod
def get_hdfs_user(self):
pass
@abc.abstractmethod
def create_hdfs_dir(self, remote, dir_name):
pass
@abc.abstractmethod
def get_oozie_server_uri(self, cluster):
pass
@abc.abstractmethod
def get_oozie_server(self, cluster):
pass
@abc.abstractmethod
def get_name_node_uri(self, cluster):
pass
@abc.abstractmethod
def get_resource_manager_uri(self, cluster):
pass
def validate_job_execution(self, cluster, job, data):
# Shell job type requires no specific fields
if job.type == edp.JOB_TYPE_SHELL:
return
# All other types except Java require input and output
# objects and Java require main class
if job.type == edp.JOB_TYPE_JAVA:
j.check_main_class_present(data, job)
else:
j.check_data_sources(data, job)
job_type, subtype = edp.split_job_type(job.type)
if job_type == edp.JOB_TYPE_MAPREDUCE and (
subtype == edp.JOB_SUBTYPE_STREAMING):
j.check_streaming_present(data, job)
@staticmethod
def get_possible_job_config(job_type):
return workflow_factory.get_possible_job_config(job_type)
@staticmethod
def get_supported_job_types():
return [edp.JOB_TYPE_HIVE,
edp.JOB_TYPE_JAVA,
edp.JOB_TYPE_MAPREDUCE,
edp.JOB_TYPE_MAPREDUCE_STREAMING,
edp.JOB_TYPE_PIG,
edp.JOB_TYPE_SHELL]
def _prepare_job_binaries(self, job_binaries, r):
for jb in job_binaries:
jb_manager.JOB_BINARIES.get_job_binary_by_url(jb.url). \
prepare_cluster(jb, remote=r)
def _upload_job_files_to_hdfs(self, where, job_dir, job, configs,
proxy_configs=None):
mains = list(job.mains) if job.mains else []
libs = list(job.libs) if job.libs else []
builtin_libs = edp.get_builtin_binaries(job, configs)
uploaded_paths = []
hdfs_user = self.get_hdfs_user()
job_dir_suffix = 'lib' if job.type != edp.JOB_TYPE_SHELL else ''
lib_dir = os.path.join(job_dir, job_dir_suffix)
with remote.get_remote(where) as r:
job_binaries = mains + libs
self._prepare_job_binaries(job_binaries, r)
# upload mains
uploaded_paths.extend(self._upload_job_binaries(r, mains,
proxy_configs,
hdfs_user,
job_dir))
# upload libs
if len(libs) and job_dir_suffix:
# HDFS 2.2.0 fails to put file if the lib dir does not exist
self.create_hdfs_dir(r, lib_dir)
uploaded_paths.extend(self._upload_job_binaries(r, libs,
proxy_configs,
hdfs_user,
lib_dir))
# upload buitin_libs
for lib in builtin_libs:
h.put_file_to_hdfs(r, lib['raw'], lib['name'], lib_dir,
hdfs_user)
uploaded_paths.append(lib_dir + lib['name'])
return uploaded_paths
def _upload_job_binaries(self, r, job_binaries, proxy_configs,
hdfs_user, job_dir):
uploaded_paths = []
for jb in job_binaries:
path = jb_manager.JOB_BINARIES. \
get_job_binary_by_url(jb.url). \
copy_binary_to_cluster(jb, proxy_configs=proxy_configs,
remote=r, context=context.ctx())
h.copy_from_local(r, path, job_dir, hdfs_user)
uploaded_paths.append(path)
return uploaded_paths
def _create_hdfs_workflow_dir(self, where, job):
constructed_dir = '/user/%s/' % self.get_hdfs_user()
constructed_dir = self._add_postfix(constructed_dir)
constructed_dir += '%s/%s' % (job.name, uuidutils.generate_uuid())
with remote.get_remote(where) as r:
self.create_hdfs_dir(r, constructed_dir)
return constructed_dir
def _create_coordinator_xml(self, coord_configs, config_filter=None,
appname='coord'):
doc = xml.Document()
# Create the <coordinator-app> base element
coord = doc.createElement('coordinator-app')
coord.attributes['name'] = appname
coord.attributes['start'] = "${start}"
coord.attributes['end'] = "${end}"
coord.attributes['frequency'] = "${frequency}"
coord.attributes['timezone'] = 'UTC'
coord.attributes['xmlns'] = 'uri:oozie:coordinator:0.2'
doc.appendChild(coord)
action = doc.createElement('action')
workflow = doc.createElement('workflow')
coord.appendChild(action)
action.appendChild(workflow)
x.add_text_element_to_tag(doc, "workflow", 'app-path',
"${workflowAppUri}")
configuration = doc.createElement('configuration')
workflow.appendChild(configuration)
default_configs = []
if config_filter is not None:
default_configs = [cfg['name'] for cfg in config_filter]
for name in sorted(coord_configs):
if name in default_configs or config_filter is None:
x.add_property_to_configuration(doc, name, coord_configs[name])
# Return newly created XML
return doc.toprettyxml(indent=" ")
def _add_postfix(self, constructed_dir):
def _append_slash_if_needed(path):
if path[-1] != '/':
path += '/'
return path
constructed_dir = _append_slash_if_needed(constructed_dir)
if CONF.job_workflow_postfix:
constructed_dir = ''.join([str(constructed_dir),
str(CONF.job_workflow_postfix)])
return _append_slash_if_needed(constructed_dir)
def _resolve_external_hdfs_urls(self, job_configs):
external_hdfs_urls = []
for k, v in six.iteritems(job_configs.get('configs', {})):
if isinstance(v, six.string_types) and v.startswith("hdfs://"):
external_hdfs_urls.append(v)
for k, v in six.iteritems(job_configs.get('params', {})):
if isinstance(v, six.string_types) and v.startswith("hdfs://"):
external_hdfs_urls.append(v)
for v in job_configs.get('args', []):
if isinstance(v, six.string_types) and v.startswith("hdfs://"):
external_hdfs_urls.append(v)
return external_hdfs_urls
def suspend_job(self, job_execution):
return self._manage_job(job_execution, edp.JOB_ACTION_SUSPEND)
def _manage_job(self, job_execution, action):
if job_execution.oozie_job_id is not None:
client = self.get_client()
if action == edp.JOB_ACTION_SUSPEND:
client.suspend_job(job_execution)
return client.get_job_status(job_execution)
|
source/Tutorials/Actions/client_0.py | dsauval/ros2_documentation | 291 | 33503 | import rclpy
from rclpy.action import ActionClient
from rclpy.node import Node
from action_tutorials_interfaces.action import Fibonacci
class FibonacciActionClient(Node):
def __init__(self):
super().__init__('fibonacci_action_client')
self._action_client = ActionClient(self, Fibonacci, 'fibonacci')
def send_goal(self, order):
goal_msg = Fibonacci.Goal()
goal_msg.order = order
self._action_client.wait_for_server()
return self._action_client.send_goal_async(goal_msg)
def main(args=None):
rclpy.init(args=args)
action_client = FibonacciActionClient()
future = action_client.send_goal(10)
rclpy.spin_until_future_complete(action_client, future)
if __name__ == '__main__':
main()
|
qcfractal/tests/test_server.py | MolSSI/dqm_server | 113 | 33512 | """
Tests the DQM Server class
"""
import json
import os
import threading
import pytest
import requests
import qcfractal.interface as ptl
from qcfractal import FractalServer, FractalSnowflake, FractalSnowflakeHandler
from qcfractal.testing import (
await_true,
find_open_port,
pristine_loop,
test_server,
using_geometric,
using_rdkit,
using_torsiondrive,
)
meta_set = {"errors", "n_inserted", "success", "duplicates", "error_description", "validation_errors"}
def test_server_information(test_server):
client = ptl.FractalClient(test_server)
server_info = client.server_information()
assert {"name", "heartbeat_frequency", "counts"} <= server_info.keys()
assert server_info["counts"].keys() >= {"molecule", "kvstore", "result", "collection"}
def test_storage_socket(test_server):
storage_api_addr = test_server.get_address() + "collection" # Targets and endpoint in the FractalServer
storage = {
"collection": "TorsionDriveRecord",
"name": "Torsion123",
"something": "else",
"array": ["54321"],
"visibility": True,
"view_available": False,
"group": "default",
}
# Cast collection type to lower since the server-side does it anyways
storage["collection"] = storage["collection"].lower()
r = requests.post(storage_api_addr, json={"meta": {}, "data": storage})
assert r.status_code == 200, r.reason
pdata = r.json()
assert pdata["meta"].keys() == meta_set
assert pdata["meta"]["n_inserted"] == 1
r = requests.get(
storage_api_addr, json={"meta": {}, "data": {"collection": storage["collection"], "name": storage["name"]}}
)
print(r.content)
assert r.status_code == 200, r.reason
pdata = r.json()
col_id = pdata["data"][0].pop("id")
# got a default values when created
pdata["data"][0].pop("tags", None)
pdata["data"][0].pop("tagline", None)
pdata["data"][0].pop("provenance", None)
pdata["data"][0].pop("view_url_hdf5", None)
pdata["data"][0].pop("view_url_plaintext", None)
pdata["data"][0].pop("view_metadata", None)
pdata["data"][0].pop("description", None)
assert pdata["data"][0] == storage
# Test collection id sub-resource
r = requests.get(f"{storage_api_addr}/{col_id}", json={"meta": {}, "data": {}}).json()
assert r["meta"]["success"] is True
assert len(r["data"]) == 1
assert r["data"][0]["id"] == col_id
r = requests.get(f"{storage_api_addr}/{col_id}", json={"meta": {}, "data": {"name": "wrong name"}}).json()
assert r["meta"]["success"] is True
assert len(r["data"]) == 0
def test_bad_collection_get(test_server):
for storage_api_addr in [
test_server.get_address() + "collection/1234/entry",
test_server.get_address() + "collection/1234/value",
test_server.get_address() + "collection/1234/list",
test_server.get_address() + "collection/1234/molecule",
]:
r = requests.get(storage_api_addr, json={"meta": {}, "data": {}})
assert r.status_code == 200, f"{r.reason} {storage_api_addr}"
assert r.json()["meta"]["success"] is False, storage_api_addr
def test_bad_collection_post(test_server):
storage = {
"collection": "TorsionDriveRecord",
"name": "Torsion123",
"something": "else",
"array": ["54321"],
"visibility": True,
"view_available": False,
}
# Cast collection type to lower since the server-side does it anyways
storage["collection"] = storage["collection"].lower()
for storage_api_addr in [
test_server.get_address() + "collection/1234",
test_server.get_address() + "collection/1234/value",
test_server.get_address() + "collection/1234/entry",
test_server.get_address() + "collection/1234/list",
test_server.get_address() + "collection/1234/molecule",
]:
r = requests.post(storage_api_addr, json={"meta": {}, "data": storage})
assert r.status_code == 200, r.reason
assert r.json()["meta"]["success"] is False
def test_bad_view_endpoints(test_server):
"""Tests that certain misspellings of the view endpoints result in 404s"""
addr = test_server.get_address()
assert requests.get(addr + "collection//value").status_code == 404
assert requests.get(addr + "collection/234/values").status_code == 404
assert requests.get(addr + "collections/234/value").status_code == 404
assert requests.get(addr + "collection/234/view/value").status_code == 404
assert requests.get(addr + "collection/value").status_code == 404
assert requests.get(addr + "collection/S22").status_code == 404
@pytest.mark.slow
def test_snowflakehandler_restart():
with FractalSnowflakeHandler() as server:
server.client()
proc1 = server._qcfractal_proc
server.restart()
server.client()
proc2 = server._qcfractal_proc
assert proc1 != proc2
assert proc1.poll() is not None
assert proc2.poll() is not None
def test_snowflakehandler_log():
with FractalSnowflakeHandler() as server:
proc = server._qcfractal_proc
assert "No SSL files passed in" in server.show_log(show=False, nlines=100)
assert "0 task" not in server.show_log(show=False, nlines=100)
assert proc.poll() is not None
@pytest.mark.slow
@using_geometric
@using_torsiondrive
@using_rdkit
def test_snowflake_service():
with FractalSnowflakeHandler() as server:
client = server.client()
hooh = ptl.data.get_molecule("hooh.json")
# Geometric options
tdinput = {
"initial_molecule": [hooh],
"keywords": {"dihedrals": [[0, 1, 2, 3]], "grid_spacing": [90]},
"optimization_spec": {"program": "geometric", "keywords": {"coordsys": "tric"}},
"qc_spec": {"driver": "gradient", "method": "UFF", "basis": None, "keywords": None, "program": "rdkit"},
}
ret = client.add_service([tdinput])
def geometric_await():
td = client.query_procedures(id=ret.ids)[0]
return td.status == "COMPLETE"
assert await_true(60, geometric_await, period=2), client.query_procedures(id=ret.ids)[0]
|
demo/python/horizon.py | ebraminio/astronomy-fork | 138 | 33562 | #!/usr/bin/env python3
#
# horizon.py - by <NAME> - 2019-12-18
#
# Example Python program for Astronomy Engine:
# https://github.com/cosinekitty/astronomy
#
# This is a more advanced example. It shows how to use coordinate
# transforms and a binary search to find the two azimuths where the
# ecliptic intersects with an observer's horizon at a given date and time.
#
# To execute, run the command:
#
# python3 horizon.py latitude longitude [yyyy-mm-ddThh:mm:ssZ]
#
import sys
import astronomy
from astro_demo_common import ParseArgs
NUM_SAMPLES = 4
def ECLIPLON(i):
return (360.0 * i) / NUM_SAMPLES
def HorizontalCoords(ecliptic_longitude, time, rot_ecl_hor):
eclip = astronomy.Spherical(
0.0, # being "on the ecliptic plane" means ecliptic latitude is zero.
ecliptic_longitude,
1.0 # any positive distance value will work fine.
)
# Convert ecliptic angular coordinates to ecliptic vector.
ecl_vec = astronomy.VectorFromSphere(eclip, time)
# Use the rotation matrix to convert ecliptic vector to horizontal vector.
hor_vec = astronomy.RotateVector(rot_ecl_hor, ecl_vec)
# Find horizontal angular coordinates, correcting for atmospheric refraction.
return astronomy.HorizonFromVector(hor_vec, astronomy.Refraction.Normal)
def Search(time, rot_ecl_hor, e1, e2):
tolerance = 1.0e-6 # one-millionth of a degree is close enough!
# Binary search: find the ecliptic longitude such that the horizontal altitude
# ascends through a zero value. The caller must pass e1, e2 such that the altitudes
# bound zero in ascending order.
while True:
e3 = (e1 + e2) / 2.0
h3 = HorizontalCoords(e3, time, rot_ecl_hor)
if abs(e2-e1) < tolerance:
return (e3, h3)
if h3.lat < 0.0:
e1 = e3
else:
e2 = e3
def FindEclipticCrossings(observer, time):
# The ecliptic is a celestial circle that describes the mean plane of
# the Earth's orbit around the Sun. We use J2000 ecliptic coordinates,
# meaning the x-axis is defined to where the plane of the Earth's
# equator on January 1, 2000 at noon UTC intersects the ecliptic plane.
# The positive x-axis points toward the March equinox.
# Calculate a rotation matrix that converts J2000 ecliptic vectors
# to horizontal vectors for this observer and time.
rot = astronomy.Rotation_ECL_HOR(time, observer)
# Sample several points around the ecliptic.
# Remember the horizontal coordinates for each sample.
hor = [HorizontalCoords(ECLIPLON(i), time, rot) for i in range(NUM_SAMPLES)]
for i in range(NUM_SAMPLES):
a1 = hor[i].lat
a2 = hor[(i+1) % NUM_SAMPLES].lat
e1 = ECLIPLON(i)
e2 = ECLIPLON(i+1)
if a1 * a2 <= 0.0:
if a2 > a1:
(ex, h) = Search(time, rot, e1, e2)
else:
(ex, h) = Search(time, rot, e2, e1)
if h.lon > 0.0 and h.lon < 180.0:
direction = 'ascends'
else:
direction = 'descends'
print('Ecliptic longitude {:0.4f} {} through horizon az {:0.4f}, alt {:0.5g}'.format(ex, direction, h.lon, h.lat))
return 0
if __name__ == '__main__':
observer, time = ParseArgs(sys.argv)
sys.exit(FindEclipticCrossings(observer, time))
|
backup/socketbackend.py | bit0fun/plugins | 173 | 33564 | from collections import namedtuple
import json, logging, socket, re, struct, time
from typing import Tuple, Iterator
from urllib.parse import urlparse, parse_qs
from backend import Backend, Change
from protocol import PacketType, recvall, PKT_CHANGE_TYPES, change_from_packet, packet_from_change, send_packet, recv_packet
# Total number of reconnection tries
RECONNECT_TRIES=5
# Delay in seconds between reconnections (initial)
RECONNECT_DELAY=5
# Scale delay factor after each failure
RECONNECT_DELAY_BACKOFF=1.5
HostPortInfo = namedtuple('HostPortInfo', ['host', 'port', 'addrtype'])
SocketURLInfo = namedtuple('SocketURLInfo', ['target', 'proxytype', 'proxytarget'])
# Network address type.
class AddrType:
IPv4 = 0
IPv6 = 1
NAME = 2
# Proxy type. Only SOCKS5 supported at the moment as this is sufficient for Tor.
class ProxyType:
DIRECT = 0
SOCKS5 = 1
def parse_host_port(path: str) -> HostPortInfo:
'''Parse a host:port pair.'''
if path.startswith('['): # bracketed IPv6 address
eidx = path.find(']')
if eidx == -1:
raise ValueError('Unterminated bracketed host address.')
host = path[1:eidx]
addrtype = AddrType.IPv6
eidx += 1
if eidx >= len(path) or path[eidx] != ':':
raise ValueError('Port number missing.')
eidx += 1
else:
eidx = path.find(':')
if eidx == -1:
raise ValueError('Port number missing.')
host = path[0:eidx]
if re.match('\d+\.\d+\.\d+\.\d+$', host): # matches IPv4 address format
addrtype = AddrType.IPv4
else:
addrtype = AddrType.NAME
eidx += 1
try:
port = int(path[eidx:])
except ValueError:
raise ValueError('Invalid port number')
return HostPortInfo(host=host, port=port, addrtype=addrtype)
def parse_socket_url(destination: str) -> SocketURLInfo:
'''Parse a socket: URL to extract the information contained in it.'''
url = urlparse(destination)
if url.scheme != 'socket':
raise ValueError('Scheme for socket backend must be socket:...')
target = parse_host_port(url.path)
proxytype = ProxyType.DIRECT
proxytarget = None
# parse query parameters
# reject unknown parameters (currently all of them)
qs = parse_qs(url.query)
for (key, values) in qs.items():
if key == 'proxy': # proxy=socks5:127.0.0.1:9050
if len(values) != 1:
raise ValueError('Proxy can only have one value')
(ptype, ptarget) = values[0].split(':', 1)
if ptype != 'socks5':
raise ValueError('Unknown proxy type ' + ptype)
proxytype = ProxyType.SOCKS5
proxytarget = parse_host_port(ptarget)
else:
raise ValueError('Unknown query string parameter ' + key)
return SocketURLInfo(target=target, proxytype=proxytype, proxytarget=proxytarget)
class SocketBackend(Backend):
def __init__(self, destination: str, create: bool):
self.version = None
self.prev_version = None
self.destination = destination
self.url = parse_socket_url(destination)
self.connect()
def connect(self):
if self.url.proxytype == ProxyType.DIRECT:
if self.url.target.addrtype == AddrType.IPv6:
self.sock = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
else: # TODO NAME is assumed to be IPv4 for now
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
else:
assert(self.url.proxytype == ProxyType.SOCKS5)
import socks
self.sock = socks.socksocket()
self.sock.set_proxy(socks.SOCKS5, self.url.proxytarget.host, self.url.proxytarget.port)
logging.info('Connecting to {}:{} (addrtype {}, proxytype {}, proxytarget {})...'.format(
self.url.target.host, self.url.target.port, self.url.target.addrtype,
self.url.proxytype, self.url.proxytarget))
self.sock.connect((self.url.target.host, self.url.target.port))
logging.info('Connected to {}'.format(self.destination))
def _send_packet(self, typ: int, payload: bytes) -> None:
send_packet(self.sock, typ, payload)
def _recv_packet(self) -> Tuple[int, bytes]:
return recv_packet(self.sock)
def initialize(self) -> bool:
'''
Initialize socket backend by request current metadata from server.
'''
logging.info('Initializing backend')
self._request_metadata()
logging.info('Initialized SocketBackend: protocol={}, version={}, prev_version={}, version_count={}'.format(
self.protocol, self.version, self.prev_version, self.version_count
))
return True
def _request_metadata(self) -> None:
self._send_packet(PacketType.REQ_METADATA, b'')
(typ, payload) = self._recv_packet()
assert(typ == PacketType.METADATA)
self.protocol, self.version, self.prev_version, self.version_count = struct.unpack("!IIIQ", payload)
def add_change(self, entry: Change) -> bool:
typ, payload = packet_from_change(entry)
base_version = self.version
retry = 0
retry_delay = RECONNECT_DELAY
need_connect = False
while True: # Retry loop
try:
if need_connect:
self.connect()
# Request metadata, to know where we stand
self._request_metadata()
if self.version == entry.version:
# If the current version at the server side matches the version of the
# entry, the packet was succesfully sent and processed and the error
# happened afterward. Nothing left to do.
return True
elif base_version == self.version:
# The other acceptable option is that the current version still matches
# that on the server side. Then we retry.
pass
else:
raise Exception('Unexpected backup version {} after reconnect'.format(self.version))
self._send_packet(typ, payload)
# Wait for change to be acknowledged before continuing.
(typ, _) = self._recv_packet()
assert(typ == PacketType.ACK)
except (BrokenPipeError, OSError):
pass
else:
break
if retry == RECONNECT_TRIES:
logging.error('Connection was lost while sending change (giving up after {} retries)'.format(retry))
raise IOError('Connection was lost while sending change')
retry += 1
logging.warning('Connection was lost while sending change (retry {} of {}, will try again after {} seconds)'.format(retry, RECONNECT_TRIES, retry_delay))
time.sleep(retry_delay)
retry_delay *= RECONNECT_DELAY_BACKOFF
need_connect = True
self.prev_version = self.version
self.version = entry.version
return True
def rewind(self) -> bool:
'''Rewind to previous version.'''
version = struct.pack("!I", self.prev_version)
self._send_packet(PacketType.REWIND, version)
# Wait for change to be acknowledged before continuing.
(typ, _) = self._recv_packet()
assert(typ == PacketType.ACK)
return True
def stream_changes(self) -> Iterator[Change]:
self._send_packet(PacketType.RESTORE, b'')
version = -1
while True:
(typ, payload) = self._recv_packet()
if typ in PKT_CHANGE_TYPES:
change = change_from_packet(typ, payload)
version = change.version
yield change
elif typ == PacketType.DONE:
break
else:
raise ValueError("Unknown entry type {}".format(typ))
if version != self.version:
raise ValueError("Versions do not match up: restored version {}, backend version {}".format(version, self.version))
assert(version == self.version)
def compact(self):
self._send_packet(PacketType.COMPACT, b'')
(typ, payload) = self._recv_packet()
assert(typ == PacketType.COMPACT_RES)
return json.loads(payload.decode())
|
recipes/onedpl/all/conanfile.py | dvirtz/conan-center-index | 562 | 33587 | <gh_stars>100-1000
import os
from conans import ConanFile, CMake, tools
required_conan_version = ">=1.28.0"
class OneDplConan(ConanFile):
name = "onedpl"
description = ("OneDPL (Formerly Parallel STL) is an implementation of "
"the C++ standard library algorithms"
"with support for execution policies, as specified in "
"ISO/IEC 14882:2017 standard, commonly called C++17")
license = ("Apache-2.0", "LLVM-exception")
url = "https://github.com/conan-io/conan-center-index"
homepage = "https://github.com/oneapi-src/oneDPL"
topics = ("stl", "parallelism")
settings = "os", "arch", "build_type", "compiler"
options = {"backend": ["tbb", "serial"]}
default_options = {"backend": "tbb"}
generators = ["cmake", "cmake_find_package"]
exports = ["CMakeLists.txt"]
no_copy_source = True
@property
def _source_subfolder(self):
return "source_subfolder"
def configure(self):
if self.settings.compiler.cppstd:
tools.check_min_cppstd(self, 11)
def requirements(self):
if self.options.backend == "tbb":
self.requires("tbb/2020.2")
def package_id(self):
self.info.header_only()
def source(self):
tools.get(**self.conan_data["sources"][self.version])
os.rename("oneDPL-" + self.version, self._source_subfolder)
def _configure_cmake(self):
cmake = CMake(self)
cmake.definitions["PARALLELSTL_BACKEND"] = self.options.backend
cmake.configure()
return cmake
def package(self):
cmake = self._configure_cmake()
cmake.install()
self.copy("*", src=os.path.join(self._source_subfolder, "stdlib"), dst=os.path.join("lib", "stdlib"))
self.copy("LICENSE.txt", src=self._source_subfolder, dst="licenses")
tools.rmdir(os.path.join(self.package_folder, "lib", "cmake"))
def package_info(self):
self.cpp_info.filenames["cmake_find_package"] = "ParallelSTL"
self.cpp_info.filenames["cmake_find_package_multi"] = "ParallelSTL"
self.cpp_info.names["cmake_find_package"] = "pstl"
self.cpp_info.names["cmake_find_package_multi"] = "pstl"
self.cpp_info.components["_onedpl"].names["cmake_find_package"] = "ParallelSTL"
self.cpp_info.components["_onedpl"].names["cmake_find_package_multi"] = "ParallelSTL"
self.cpp_info.components["_onedpl"].includedirs = ["include", os.path.join("lib", "stdlib")]
if self.options.backend == "tbb":
self.cpp_info.components["_onedpl"].requires = ["tbb::tbb"]
|
python/GafferTest/ExtensionAlgoTest.py | ddesmond/gaffer | 561 | 33598 | ##########################################################################
#
# Copyright (c) 2019, <NAME>. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of <NAME> nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import os
import sys
import unittest
import functools
import Gaffer
import GafferTest
class ExtensionAlgoTest( GafferTest.TestCase ) :
def setUp( self ) :
GafferTest.TestCase.setUp( self )
self.addCleanup(
functools.partial( setattr, sys, "path", sys.path[:] )
)
def testExport( self ) :
# Export
box = Gaffer.Box( "AddOne" )
box["__add"] = GafferTest.AddNode()
box["__add"]["op2"].setValue( 1 )
Gaffer.PlugAlgo.promote( box["__add"]["op1"] ).setName( "in" )
Gaffer.PlugAlgo.promote( box["__add"]["sum"] ).setName( "out" )
Gaffer.Metadata.registerValue( box, "description", "Test" )
Gaffer.Metadata.registerValue( box["in"], "description", "The input" )
Gaffer.Metadata.registerValue( box["out"], "description", "The output" )
Gaffer.Metadata.registerValue( box["in"], "test", 1 )
Gaffer.ExtensionAlgo.exportExtension( "TestExtension", [ box ], self.temporaryDirectory() )
self.assertTrue( os.path.exists( os.path.join( self.temporaryDirectory(), "python", "TestExtension" ) ) )
sys.path.append( os.path.join( self.temporaryDirectory(), "python" ) )
# Import and test
import TestExtension
script = Gaffer.ScriptNode()
script["node"] = TestExtension.AddOne()
script["node"]["in"].setValue( 2 )
self.assertEqual( script["node"]["out"].getValue(), 3 )
import TestExtensionUI
def assertExpectedMetadata( node ) :
self.assertEqual( Gaffer.Metadata.registeredValues( node, instanceOnly = True ), [] )
self.assertEqual( Gaffer.Metadata.registeredValues( node["in"], instanceOnly = True ), [] )
self.assertEqual( Gaffer.Metadata.registeredValues( node["out"], instanceOnly = True ), [] )
self.assertEqual( Gaffer.Metadata.value( node, "description" ), "Test" )
self.assertEqual( Gaffer.Metadata.value( node["in"], "description" ), "The input" )
self.assertEqual( Gaffer.Metadata.value( node["out"], "description" ), "The output" )
self.assertEqual( Gaffer.Metadata.value( node["in"], "test" ), 1 )
assertExpectedMetadata( script["node"] )
# Copy/paste and test
script.execute( script.serialise( filter = Gaffer.StandardSet( { script["node"] } ) ) )
self.assertEqual( script["node1"].keys(), script["node"].keys() )
self.assertEqual( script["node1"]["out"].getValue(), script["node"]["out"].getValue() )
assertExpectedMetadata( script["node1"] )
def testPlugTypes( self ) :
box = Gaffer.Box( "PlugTypes" )
box["int"] = Gaffer.IntPlug( flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
box["float"] = Gaffer.FloatPlug( flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
box["string"] = Gaffer.StringPlug( flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
box["v2i"] = Gaffer.V2iPlug( flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
box["v3i"] = Gaffer.V3iPlug( flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
box["color4f"] = Gaffer.Color4fPlug( flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
box["spline"] = Gaffer.SplinefColor3fPlug( flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
Gaffer.ExtensionAlgo.exportExtension( "PlugTypesExtension", [ box ], self.temporaryDirectory() )
sys.path.append( os.path.join( self.temporaryDirectory(), "python" ) )
import PlugTypesExtension
node = PlugTypesExtension.PlugTypes()
for plug in Gaffer.Plug.Range( node ) :
self.assertIsInstance( plug, type( box[plug.getName() ] ) )
if hasattr( plug, "getValue" ) :
self.assertEqual( plug.getValue(), box[plug.getName()].getValue() )
for plug in Gaffer.Plug.RecursiveRange( node ) :
self.assertFalse( plug.getFlags( Gaffer.Plug.Flags.Dynamic ) )
def testInternalExpression( self ) :
box = Gaffer.Box( "AddOne" )
box["in"] = Gaffer.IntPlug( flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
box["out"] = Gaffer.IntPlug( direction = Gaffer.Plug.Direction.Out, flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
box["__expression"] = Gaffer.Expression()
box["__expression"].setExpression( """parent["out"] = parent["in"] + 1""" )
Gaffer.ExtensionAlgo.exportExtension( "TestExtensionWithExpression", [ box ], self.temporaryDirectory() )
sys.path.append( os.path.join( self.temporaryDirectory(), "python" ) )
import TestExtensionWithExpression
script = Gaffer.ScriptNode()
script["node"] = TestExtensionWithExpression.AddOne()
script["node"]["in"].setValue( 2 )
self.assertEqual( script["node"]["out"].getValue(), 3 )
# Test copy/paste
script.execute( script.serialise( filter = Gaffer.StandardSet( { script["node"] } ) ) )
self.assertEqual( script["node1"].keys(), script["node"].keys() )
self.assertEqual( script["node1"]["out"].getValue(), 3 )
if __name__ == "__main__":
unittest.main()
|
kuwala/common/python_utils/src/time_utils.py | bmahmoudyan/kuwala | 381 | 33602 | import time
from time import sleep
def print_elapsed_time(exit_event):
start_time = time.time()
while True:
if exit_event.is_set():
break
print(f'Running for {round(time.time() - start_time)} s', end='\r')
sleep(1)
|
test/priors/test_half_cauchy_prior.py | noamsgl/gpytorch | 188 | 33611 | #!/usr/bin/env python3
import unittest
import torch
from torch.distributions import HalfCauchy
from gpytorch.priors import HalfCauchyPrior
from gpytorch.test.utils import least_used_cuda_device
class TestHalfCauchyPrior(unittest.TestCase):
def test_half_cauchy_prior_to_gpu(self):
if torch.cuda.is_available():
prior = HalfCauchy(1.0).cuda()
self.assertEqual(prior.concentration.device.type, "cuda")
self.assertEqual(prior.rate.device.type, "cuda")
def test_half_cauchy_prior_validate_args(self):
with self.assertRaises(ValueError):
HalfCauchyPrior(-1, validate_args=True)
with self.assertRaises(ValueError):
HalfCauchyPrior(-1, validate_args=True)
def test_half_cauchy_prior_log_prob(self, cuda=False):
device = torch.device("cuda") if cuda else torch.device("cpu")
prior = HalfCauchyPrior(0.1)
dist = HalfCauchy(0.1)
t = torch.tensor(1.0, device=device)
self.assertTrue(torch.equal(prior.log_prob(t), dist.log_prob(t)))
t = torch.tensor([1.5, 0.5], device=device)
self.assertTrue(torch.equal(prior.log_prob(t), dist.log_prob(t)))
t = torch.tensor([[1.0, 0.5], [3.0, 0.25]], device=device)
self.assertTrue(torch.equal(prior.log_prob(t), dist.log_prob(t)))
def test_half_cauchy_prior_log_prob_cuda(self):
if torch.cuda.is_available():
with least_used_cuda_device():
return self.test_gamma_prior_log_prob(cuda=True)
def test_half_cauchy_prior_log_prob_log_transform(self, cuda=False):
device = torch.device("cuda") if cuda else torch.device("cpu")
prior = HalfCauchyPrior(0.1, transform=torch.exp)
dist = HalfCauchy(0.1)
t = torch.tensor(0.0, device=device)
self.assertTrue(torch.equal(prior.log_prob(t), dist.log_prob(t.exp())))
t = torch.tensor([-1, 0.5], device=device)
self.assertTrue(torch.equal(prior.log_prob(t), dist.log_prob(t.exp())))
t = torch.tensor([[-1, 0.5], [0.1, -2.0]], device=device)
self.assertTrue(torch.equal(prior.log_prob(t), dist.log_prob(t.exp())))
def test_half_cauchy_prior_log_prob_log_transform_cuda(self):
if torch.cuda.is_available():
with least_used_cuda_device():
return self.test_half_cauchy_prior_log_prob_log_transform(cuda=True)
def test_half_cauchy_prior_batch_log_prob(self, cuda=False):
device = torch.device("cuda") if cuda else torch.device("cpu")
prior = HalfCauchyPrior(0.1)
dist = HalfCauchy(0.1)
t = torch.ones(2, device=device)
self.assertTrue(torch.equal(prior.log_prob(t), dist.log_prob(t)))
t = torch.ones(2, 2, device=device)
self.assertTrue(torch.equal(prior.log_prob(t), dist.log_prob(t)))
scale = torch.tensor([0.1, 1.0], device=device)
prior = HalfCauchyPrior(scale)
dist = HalfCauchy(scale)
t = torch.ones(2, device=device)
self.assertTrue(torch.equal(prior.log_prob(t), dist.log_prob(t)))
t = torch.ones(2, 2, device=device)
self.assertTrue(torch.equal(prior.log_prob(t), dist.log_prob(t)))
with self.assertRaises(ValueError):
prior.log_prob(torch.ones(3, device=device))
with self.assertRaises(ValueError):
prior.log_prob(torch.ones(2, 3, device=device))
def test_half_cauchy_prior_batch_log_prob_cuda(self):
if torch.cuda.is_available():
with least_used_cuda_device():
return self.test_half_cauchy_prior_batch_log_prob(cuda=True)
if __name__ == "__main__":
unittest.main()
|
Tools/decrypt_ulog.py | lgarciaos/Firmware | 4,224 | 33612 | #!/usr/bin/env python3
from Crypto.PublicKey import RSA
from Crypto.Cipher import PKCS1_OAEP
from Crypto.Cipher import ChaCha20
from Crypto.Hash import SHA256
import binascii
import argparse
#from pathlib import Path
import sys
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="""CLI tool to decrypt an ulog file\n""")
parser.add_argument("ulog_file", help=".ulog file", nargs='?', default=None)
parser.add_argument("ulog_key", help=".ulogk, encrypted key", nargs='?', default=None)
parser.add_argument("rsa_key", help=".pem format key for decrypting the ulog key", nargs='?', default=None)
args = parser.parse_args()
# Only generate a key pair, don't sign
if not args.ulog_file or not args.ulog_key or not args.rsa_key:
print('Need all arguments, the encrypted ulog file, the key and the key decryption key')
sys.exit(1);
# Read the private RSA key to decrypt the cahcha key
with open(args.rsa_key, 'rb') as f:
r = RSA.importKey(f.read(), passphrase='')
# Read the encrypted xchacha key and the nonce
with open(args.ulog_key, 'rb') as f:
ulog_key_header = f.read(22)
# Parse the header
try:
# magic
if not ulog_key_header.startswith(bytearray("ULogKey".encode())):
raise Exception()
# version
if ulog_key_header[7] != 1:
raise Exception()
# expected key exchange algorithm (RSA_OAEP)
if ulog_key_header[16] != 4:
raise Exception()
key_size = ulog_key_header[19] << 8 | ulog_key_header[18];
nonce_size = ulog_key_header[21] << 8 | ulog_key_header[20];
ulog_key_cipher = f.read(key_size)
nonce = f.read(nonce_size)
except:
print("Keyfile format error")
sys.exit(1);
# Decrypt the xchacha key
cipher_rsa = PKCS1_OAEP.new(r,SHA256)
ulog_key = cipher_rsa.decrypt(ulog_key_cipher)
#print(binascii.hexlify(ulog_key))
# Read and decrypt the .ulgc
cipher = ChaCha20.new(key=ulog_key, nonce=nonce)
with open(args.ulog_file, 'rb') as f:
with open(args.ulog_file.rstrip(args.ulog_file[-1]), 'wb') as out:
out.write(cipher.decrypt(f.read()))
|
alipay/aop/api/domain/ArInvoiceReceiptQueryOpenApiDTO.py | antopen/alipay-sdk-python-all | 213 | 33632 | <gh_stars>100-1000
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.MultiCurrencyMoneyOpenApi import MultiCurrencyMoneyOpenApi
from alipay.aop.api.domain.MultiCurrencyMoneyOpenApi import MultiCurrencyMoneyOpenApi
from alipay.aop.api.domain.MultiCurrencyMoneyOpenApi import MultiCurrencyMoneyOpenApi
class ArInvoiceReceiptQueryOpenApiDTO(object):
def __init__(self):
self._arrangement_no = None
self._id = None
self._inst_id = None
self._inv_dt = None
self._inv_mode = None
self._invoice_amt = None
self._invoiced_amt = None
self._ip_id = None
self._ip_role_id = None
self._link_invoice_amt = None
self._out_biz_no = None
self._pd_code = None
self._settle_type = None
self._statement_bill_no = None
self._status = None
self._tax_rate = None
self._tax_type = None
@property
def arrangement_no(self):
return self._arrangement_no
@arrangement_no.setter
def arrangement_no(self, value):
self._arrangement_no = value
@property
def id(self):
return self._id
@id.setter
def id(self, value):
self._id = value
@property
def inst_id(self):
return self._inst_id
@inst_id.setter
def inst_id(self, value):
self._inst_id = value
@property
def inv_dt(self):
return self._inv_dt
@inv_dt.setter
def inv_dt(self, value):
self._inv_dt = value
@property
def inv_mode(self):
return self._inv_mode
@inv_mode.setter
def inv_mode(self, value):
self._inv_mode = value
@property
def invoice_amt(self):
return self._invoice_amt
@invoice_amt.setter
def invoice_amt(self, value):
if isinstance(value, MultiCurrencyMoneyOpenApi):
self._invoice_amt = value
else:
self._invoice_amt = MultiCurrencyMoneyOpenApi.from_alipay_dict(value)
@property
def invoiced_amt(self):
return self._invoiced_amt
@invoiced_amt.setter
def invoiced_amt(self, value):
if isinstance(value, MultiCurrencyMoneyOpenApi):
self._invoiced_amt = value
else:
self._invoiced_amt = MultiCurrencyMoneyOpenApi.from_alipay_dict(value)
@property
def ip_id(self):
return self._ip_id
@ip_id.setter
def ip_id(self, value):
self._ip_id = value
@property
def ip_role_id(self):
return self._ip_role_id
@ip_role_id.setter
def ip_role_id(self, value):
self._ip_role_id = value
@property
def link_invoice_amt(self):
return self._link_invoice_amt
@link_invoice_amt.setter
def link_invoice_amt(self, value):
if isinstance(value, MultiCurrencyMoneyOpenApi):
self._link_invoice_amt = value
else:
self._link_invoice_amt = MultiCurrencyMoneyOpenApi.from_alipay_dict(value)
@property
def out_biz_no(self):
return self._out_biz_no
@out_biz_no.setter
def out_biz_no(self, value):
self._out_biz_no = value
@property
def pd_code(self):
return self._pd_code
@pd_code.setter
def pd_code(self, value):
self._pd_code = value
@property
def settle_type(self):
return self._settle_type
@settle_type.setter
def settle_type(self, value):
self._settle_type = value
@property
def statement_bill_no(self):
return self._statement_bill_no
@statement_bill_no.setter
def statement_bill_no(self, value):
self._statement_bill_no = value
@property
def status(self):
return self._status
@status.setter
def status(self, value):
self._status = value
@property
def tax_rate(self):
return self._tax_rate
@tax_rate.setter
def tax_rate(self, value):
self._tax_rate = value
@property
def tax_type(self):
return self._tax_type
@tax_type.setter
def tax_type(self, value):
self._tax_type = value
def to_alipay_dict(self):
params = dict()
if self.arrangement_no:
if hasattr(self.arrangement_no, 'to_alipay_dict'):
params['arrangement_no'] = self.arrangement_no.to_alipay_dict()
else:
params['arrangement_no'] = self.arrangement_no
if self.id:
if hasattr(self.id, 'to_alipay_dict'):
params['id'] = self.id.to_alipay_dict()
else:
params['id'] = self.id
if self.inst_id:
if hasattr(self.inst_id, 'to_alipay_dict'):
params['inst_id'] = self.inst_id.to_alipay_dict()
else:
params['inst_id'] = self.inst_id
if self.inv_dt:
if hasattr(self.inv_dt, 'to_alipay_dict'):
params['inv_dt'] = self.inv_dt.to_alipay_dict()
else:
params['inv_dt'] = self.inv_dt
if self.inv_mode:
if hasattr(self.inv_mode, 'to_alipay_dict'):
params['inv_mode'] = self.inv_mode.to_alipay_dict()
else:
params['inv_mode'] = self.inv_mode
if self.invoice_amt:
if hasattr(self.invoice_amt, 'to_alipay_dict'):
params['invoice_amt'] = self.invoice_amt.to_alipay_dict()
else:
params['invoice_amt'] = self.invoice_amt
if self.invoiced_amt:
if hasattr(self.invoiced_amt, 'to_alipay_dict'):
params['invoiced_amt'] = self.invoiced_amt.to_alipay_dict()
else:
params['invoiced_amt'] = self.invoiced_amt
if self.ip_id:
if hasattr(self.ip_id, 'to_alipay_dict'):
params['ip_id'] = self.ip_id.to_alipay_dict()
else:
params['ip_id'] = self.ip_id
if self.ip_role_id:
if hasattr(self.ip_role_id, 'to_alipay_dict'):
params['ip_role_id'] = self.ip_role_id.to_alipay_dict()
else:
params['ip_role_id'] = self.ip_role_id
if self.link_invoice_amt:
if hasattr(self.link_invoice_amt, 'to_alipay_dict'):
params['link_invoice_amt'] = self.link_invoice_amt.to_alipay_dict()
else:
params['link_invoice_amt'] = self.link_invoice_amt
if self.out_biz_no:
if hasattr(self.out_biz_no, 'to_alipay_dict'):
params['out_biz_no'] = self.out_biz_no.to_alipay_dict()
else:
params['out_biz_no'] = self.out_biz_no
if self.pd_code:
if hasattr(self.pd_code, 'to_alipay_dict'):
params['pd_code'] = self.pd_code.to_alipay_dict()
else:
params['pd_code'] = self.pd_code
if self.settle_type:
if hasattr(self.settle_type, 'to_alipay_dict'):
params['settle_type'] = self.settle_type.to_alipay_dict()
else:
params['settle_type'] = self.settle_type
if self.statement_bill_no:
if hasattr(self.statement_bill_no, 'to_alipay_dict'):
params['statement_bill_no'] = self.statement_bill_no.to_alipay_dict()
else:
params['statement_bill_no'] = self.statement_bill_no
if self.status:
if hasattr(self.status, 'to_alipay_dict'):
params['status'] = self.status.to_alipay_dict()
else:
params['status'] = self.status
if self.tax_rate:
if hasattr(self.tax_rate, 'to_alipay_dict'):
params['tax_rate'] = self.tax_rate.to_alipay_dict()
else:
params['tax_rate'] = self.tax_rate
if self.tax_type:
if hasattr(self.tax_type, 'to_alipay_dict'):
params['tax_type'] = self.tax_type.to_alipay_dict()
else:
params['tax_type'] = self.tax_type
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = ArInvoiceReceiptQueryOpenApiDTO()
if 'arrangement_no' in d:
o.arrangement_no = d['arrangement_no']
if 'id' in d:
o.id = d['id']
if 'inst_id' in d:
o.inst_id = d['inst_id']
if 'inv_dt' in d:
o.inv_dt = d['inv_dt']
if 'inv_mode' in d:
o.inv_mode = d['inv_mode']
if 'invoice_amt' in d:
o.invoice_amt = d['invoice_amt']
if 'invoiced_amt' in d:
o.invoiced_amt = d['invoiced_amt']
if 'ip_id' in d:
o.ip_id = d['ip_id']
if 'ip_role_id' in d:
o.ip_role_id = d['ip_role_id']
if 'link_invoice_amt' in d:
o.link_invoice_amt = d['link_invoice_amt']
if 'out_biz_no' in d:
o.out_biz_no = d['out_biz_no']
if 'pd_code' in d:
o.pd_code = d['pd_code']
if 'settle_type' in d:
o.settle_type = d['settle_type']
if 'statement_bill_no' in d:
o.statement_bill_no = d['statement_bill_no']
if 'status' in d:
o.status = d['status']
if 'tax_rate' in d:
o.tax_rate = d['tax_rate']
if 'tax_type' in d:
o.tax_type = d['tax_type']
return o
|
train_hg_seqnet.py | middleprince/fashionAi | 316 | 33649 | <filename>train_hg_seqnet.py
# Copyright 2018 <NAME>
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import tensorflow as tf
import config
# scaffold related configuration
tf.app.flags.DEFINE_string(
'data_dir', '../Datasets/tfrecords',
'The directory where the dataset input data is stored.')
tf.app.flags.DEFINE_string(
'dataset_name', '{}_????', 'The pattern of the dataset name to load.')
tf.app.flags.DEFINE_string(
#'blouse', 'dress', 'outwear', 'skirt', 'trousers', '*'
'dataset_split_name', 'blouse', 'The name of the train/test split.')
tf.app.flags.DEFINE_string(
'model_dir', './logs/',
'The parent directory where the model will be stored.')
tf.app.flags.DEFINE_integer(
'save_checkpoints_secs', 3600,
'The frequency with which the model is saved, in seconds.')
# model related configuration
tf.app.flags.DEFINE_integer(
'train_image_size', 256,
'The size of the input image for the model to use.')
tf.app.flags.DEFINE_integer(
'heatmap_size', 64,
'The size of the output heatmap of the model.')
tf.app.flags.DEFINE_float(
'heatmap_sigma', 1.,
'The sigma of Gaussian which generate the target heatmap.')
tf.app.flags.DEFINE_integer('feats_channals', 256, 'Number of features in the hourglass.')
tf.app.flags.DEFINE_integer('num_stacks', 8, 'Number of hourglasses to stack.')#8
tf.app.flags.DEFINE_integer('num_modules', 1, 'Number of residual modules at each location in the hourglass.')
tf.app.flags.DEFINE_float(
'bbox_border', 25.,
'The nearest distance of the crop border to al keypoints.')
tf.app.flags.DEFINE_integer(
'train_epochs', 5,
'The number of epochs to use for training.')
tf.app.flags.DEFINE_integer(
'epochs_per_eval', 1,
'The number of training epochs to run between evaluations.')
tf.app.flags.DEFINE_integer(
'batch_size', 6,
'Batch size for training and evaluation.')
tf.app.flags.DEFINE_string(
'data_format', 'channels_first', # 'channels_first' or 'channels_last'
'A flag to override the data format used in the model. channels_first '
'provides a performance boost on GPU but is not always compatible '
'with CPU. If left unspecified, the data format will be chosen '
'automatically based on whether TensorFlow was built for CPU or GPU.')
# optimizer related configuration
tf.app.flags.DEFINE_integer(
'tf_random_seed', 20180406, 'Random seed for TensorFlow initializers.')
tf.app.flags.DEFINE_float(
'weight_decay', 0.00000, 'The weight decay on the model weights.')
tf.app.flags.DEFINE_float(
'mse_weight', 1.0, 'The weight decay on the model weights.')
tf.app.flags.DEFINE_float(
'momentum', 0.0,#0.9
'The momentum for the MomentumOptimizer and RMSPropOptimizer.')
tf.app.flags.DEFINE_float('learning_rate', 2.5e-4, 'Initial learning rate.')#2.5e-4
tf.app.flags.DEFINE_float(
'end_learning_rate', 0.000001,
'The minimal end learning rate used by a polynomial decay learning rate.')
tf.app.flags.DEFINE_float(
'warmup_learning_rate', 0.00001,
'The start warm-up learning rate to avoid NAN.')
tf.app.flags.DEFINE_integer(
'warmup_steps', 100,
'The total steps to warm-up.')
# for learning rate piecewise_constant decay
tf.app.flags.DEFINE_string(
'decay_boundaries', '2, 3',
'Learning rate decay boundaries by global_step (comma-separated list).')
tf.app.flags.DEFINE_string(
'lr_decay_factors', '1, 0.5, 0.1',
'The values of learning_rate decay factor for each segment between boundaries (comma-separated list).')
# checkpoint related configuration
tf.app.flags.DEFINE_string(
'checkpoint_path', None,
'The path to a checkpoint from which to fine-tune.')
tf.app.flags.DEFINE_string(
'checkpoint_model_scope', None,
'Model scope in the checkpoint. None if the same as the trained model.')
tf.app.flags.DEFINE_string(
#'blouse', 'dress', 'outwear', 'skirt', 'trousers', 'all'
'model_scope', 'all',
'Model scope name used to replace the name_scope in checkpoint.')
tf.app.flags.DEFINE_string(
'checkpoint_exclude_scopes', None,#'all/hg_heatmap',#
'Comma-separated list of scopes of variables to exclude when restoring from a checkpoint.')
tf.app.flags.DEFINE_boolean(
'run_on_cloud', True,
'Wether we will train on cloud.')
tf.app.flags.DEFINE_boolean(
'seq_train', True,
'Wether we will train a sequence model.')
tf.app.flags.DEFINE_string(
'model_to_train', 'all, blouse, dress, outwear, skirt, trousers', #'all, blouse, dress, outwear, skirt, trousers', 'skirt, dress, outwear, trousers',
'The sub-model to train (comma-separated list).')
FLAGS = tf.app.flags.FLAGS
total_params = {
'--data_dir': FLAGS.data_dir,
'--dataset_name': FLAGS.dataset_name,
#'blouse', 'dress', 'outwear', 'skirt', 'trousers', '*'
'--model_dir': FLAGS.model_dir,
'--save_checkpoints_secs': FLAGS.save_checkpoints_secs,
'--train_image_size': FLAGS.train_image_size,
'--heatmap_size': FLAGS.heatmap_size,
'--heatmap_sigma': FLAGS.heatmap_sigma,
'--feats_channals': FLAGS.feats_channals,
'--num_stacks': FLAGS.num_stacks,
'--num_modules': FLAGS.num_modules,
'--bbox_border': FLAGS.bbox_border,
'--train_epochs': FLAGS.train_epochs,
'--epochs_per_eval': FLAGS.epochs_per_eval,
'--batch_size': FLAGS.batch_size,
'--data_format': FLAGS.data_format,
'--tf_random_seed': FLAGS.tf_random_seed,
'--weight_decay': FLAGS.weight_decay,
'--mse_weight': FLAGS.mse_weight,
'--momentum': FLAGS.momentum,
'--learning_rate': FLAGS.learning_rate,
'--end_learning_rate': FLAGS.end_learning_rate,
'--warmup_learning_rate': FLAGS.warmup_learning_rate,
'--warmup_steps': FLAGS.warmup_steps,
'--decay_boundaries': FLAGS.decay_boundaries,
'--lr_decay_factors': FLAGS.lr_decay_factors,
'--checkpoint_path': FLAGS.checkpoint_path,
'--checkpoint_model_scope': FLAGS.checkpoint_model_scope,
'--model_scope': FLAGS.model_scope,
'--checkpoint_exclude_scopes': FLAGS.checkpoint_exclude_scopes,
'--run_on_cloud': FLAGS.run_on_cloud
}
if FLAGS.seq_train:
detail_params = {
'all': {
'model_dir' : os.path.join(FLAGS.model_dir, 'all'),
'train_epochs': 6,
'epochs_per_eval': 3,
'decay_boundaries': '3, 4',
'model_scope': 'all',
},
'blouse': {
'model_dir' : os.path.join(FLAGS.model_dir, 'blouse'),
'train_epochs': 50,
'epochs_per_eval': 20,
'decay_boundaries': '15, 30',
'model_scope': 'blouse',
'checkpoint_path': os.path.join(FLAGS.model_dir, 'all'),
'checkpoint_model_scope': 'all',
'checkpoint_exclude_scopes': 'blouse/hg_heatmap',
},
'dress': {
'model_dir' : os.path.join(FLAGS.model_dir, 'dress'),
'train_epochs': 50,
'epochs_per_eval': 20,
'decay_boundaries': '15, 30',
'model_scope': 'dress',
'checkpoint_path': os.path.join(FLAGS.model_dir, 'all'),
'checkpoint_model_scope': 'all',
'checkpoint_exclude_scopes': 'dress/hg_heatmap',
},
'outwear': {
'model_dir' : os.path.join(FLAGS.model_dir, 'outwear'),
'train_epochs': 50,
'epochs_per_eval': 20,
'decay_boundaries': '15, 30',
'model_scope': 'outwear',
'checkpoint_path': os.path.join(FLAGS.model_dir, 'all'),
'checkpoint_model_scope': 'all',
'checkpoint_exclude_scopes': 'outwear/hg_heatmap',
},
'skirt': {
'model_dir' : os.path.join(FLAGS.model_dir, 'skirt'),
'train_epochs': 50,
'epochs_per_eval': 20,
'decay_boundaries': '15, 30',
'model_scope': 'skirt',
'checkpoint_path': os.path.join(FLAGS.model_dir, 'all'),
'checkpoint_model_scope': 'all',
'checkpoint_exclude_scopes': 'skirt/hg_heatmap',
},
'trousers': {
'model_dir' : os.path.join(FLAGS.model_dir, 'trousers'),
'train_epochs': 50,
'epochs_per_eval': 20,
'decay_boundaries': '15, 30',
'model_scope': 'trousers',
'checkpoint_path': os.path.join(FLAGS.model_dir, 'all'),
'checkpoint_model_scope': 'all',
'checkpoint_exclude_scopes': 'trousers/hg_heatmap',
},
}
else:
detail_params = {
'blouse': {
'model_dir' : os.path.join(FLAGS.model_dir, 'blouse'),
'train_epochs': 60,
'epochs_per_eval': 20,
'decay_boundaries': '20, 40',
'model_scope': 'blouse',
},
'dress': {
'model_dir' : os.path.join(FLAGS.model_dir, 'dress'),
'train_epochs': 60,
'epochs_per_eval': 20,
'decay_boundaries': '20, 40',
'model_scope': 'dress',
},
'outwear': {
'model_dir' : os.path.join(FLAGS.model_dir, 'outwear'),
'train_epochs': 60,
'epochs_per_eval': 20,
'decay_boundaries': '20, 40',
'model_scope': 'outwear',
},
'skirt': {
'model_dir' : os.path.join(FLAGS.model_dir, 'skirt'),
'train_epochs': 60,
'epochs_per_eval': 20,
'decay_boundaries': '20, 40',
'model_scope': 'skirt',
},
'trousers': {
'model_dir' : os.path.join(FLAGS.model_dir, 'trousers'),
'train_epochs': 60,
'epochs_per_eval': 20,
'decay_boundaries': '20, 40',
'model_scope': 'trousers',
},
}
def parse_comma_list(args):
return [float(s.strip()) for s in args.split(',')]
def parse_str_comma_list(args):
return [s.strip() for s in args.split(',')]
def main(_):
import subprocess
import copy
#['skirt', 'dress', 'outwear', 'trousers']#
all_category = parse_str_comma_list(FLAGS.model_to_train)
for cat in all_category:
tf.gfile.MakeDirs(os.path.join(FLAGS.model_dir, cat))
for cat in all_category:
temp_params = copy.deepcopy(total_params)
for k, v in total_params.items():
if k[2:] in detail_params[cat]:
temp_params[k] = detail_params[cat][k[2:]]
params_str = []
for k, v in temp_params.items():
if v is not None:
params_str.append(k)
params_str.append(str(v))
print('params send: ', params_str)
train_process = subprocess.Popen(['python', './train_subnet.py'] + params_str, stdout=subprocess.PIPE, cwd=os.getcwd())
output, _ = train_process.communicate()
print(output)
if __name__ == '__main__':
tf.logging.set_verbosity(tf.logging.INFO)
tf.app.run()
|
tests/unit/utils/test_instantiate.py | schiotz/nequip | 153 | 33658 | import pytest
import yaml
from nequip.utils import instantiate
simple_default = {"b": 1, "d": 31}
class SimpleExample:
def __init__(self, a, b=simple_default["b"], d=simple_default["d"]):
self.a = a
self.b = b
self.d = d
nested_default = {"d": 37}
class NestedExample:
def __init__(self, cls_c, a, cls_c_kwargs={}, d=nested_default["d"]):
self.c_obj = cls_c(**cls_c_kwargs)
self.a = a
self.d = d
def assert_dict(d):
for k, v in d.items():
if isinstance(v, dict):
assert_dict(v)
elif isinstance(v, str):
assert k == v
@pytest.mark.parametrize("positional_args", [dict(a=3, b=4), dict(a=5), dict()])
@pytest.mark.parametrize("optional_args", [dict(a=3, b=4), dict(a=5), dict()])
@pytest.mark.parametrize("all_args", [dict(a=6, b=7), dict(a=8), dict()])
@pytest.mark.parametrize("prefix", [True, False])
def test_simple_init(positional_args, optional_args, all_args, prefix):
union = {}
union.update(all_args)
union.update(optional_args)
union.update(positional_args)
if "a" not in union:
return
# decorate test with prefix
_all_args = (
{"simple_example_" + k: v for k, v in all_args.items()} if prefix else all_args
)
# check key mapping is correct
km, params = instantiate(
builder=SimpleExample,
prefix="simple_example",
positional_args=positional_args,
optional_args=optional_args,
all_args=_all_args,
return_args_only=True,
)
for t in km:
for k, v in km[t].items():
assert k in locals()[t + "_args"]
if prefix and t == "all":
assert v == "simple_example_" + k
else:
assert v == k
km, _ = instantiate(
builder=SimpleExample,
prefix="simple_example",
positional_args=positional_args,
all_args=params,
return_args_only=True,
)
assert_dict(km)
# check whether it gets the priority right
a1, params = instantiate(
builder=SimpleExample,
prefix="simple_example",
positional_args=positional_args,
optional_args=optional_args,
all_args=_all_args,
)
assert a1.a == union["a"]
if "b" in union:
assert a1.b == union["b"]
else:
assert a1.b == simple_default["b"]
for k in params:
if k in simple_default:
assert params[k] == union.get(k, simple_default[k])
# check whether the return value is right
a2 = SimpleExample(**positional_args, **params)
assert a1.a == a2.a
assert a1.b == a2.b
def test_prefix_priority():
args = {"prefix_a": 3, "a": 4}
a, params = instantiate(
builder=SimpleExample,
prefix="prefix",
all_args=args,
)
assert a.a == 3
@pytest.mark.parametrize("optional_args", [dict(a=3, b=4), dict(a=5), dict()])
@pytest.mark.parametrize("all_args", [dict(a=6, b=7), dict(a=8), dict()])
@pytest.mark.parametrize("prefix", [True, False])
def test_nested_kwargs(optional_args, all_args, prefix):
union = {}
union.update(all_args)
union.update(optional_args)
if "a" not in union:
return
c, params = instantiate(
builder=NestedExample,
prefix="prefix",
positional_args={"cls_c": SimpleExample},
optional_args=optional_args,
all_args=all_args,
)
def test_default():
"""
check the default value will not contaminate the other class
"""
c, params = instantiate(
builder=NestedExample,
prefix="prefix",
positional_args={"cls_c": SimpleExample},
optional_args={"a": 11},
)
c.d = nested_default["d"]
c.c_obj.d = simple_default["d"]
class A:
def __init__(self, cls_a, cls_a_kwargs):
self.a_obj = cls_a(**cls_a_kwargs)
class B:
def __init__(self, cls_b, cls_b_kwargs):
self.b_obj = cls_b(**cls_b_kwargs)
class C:
def __init__(self, cls_c, cls_c_kwargs): # noqa
self.c_obj = c_cls(**c_cls_kwargs) # noqa
def test_deep_nests():
all_args = {"a": 101, "b": 103, "c": 107}
obj, params = instantiate(
builder=NestedExample,
optional_args={"cls_c": A, "cls_a": B, "cls_b": SimpleExample},
all_args=all_args,
)
print(yaml.dump(params))
assert obj.c_obj.a_obj.b_obj.a == all_args["a"]
assert obj.c_obj.a_obj.b_obj.b == all_args["b"]
assert obj.c_obj.a_obj.b_obj.d == simple_default["d"]
assert obj.d == nested_default["d"]
obj = NestedExample(**params)
assert obj.c_obj.a_obj.b_obj.a == all_args["a"]
assert obj.c_obj.a_obj.b_obj.b == all_args["b"]
assert obj.c_obj.a_obj.b_obj.d == simple_default["d"]
assert obj.d == nested_default["d"]
km, params = instantiate(
builder=NestedExample,
optional_args={"cls_c": A, "cls_a": B, "cls_b": SimpleExample},
all_args=all_args,
return_args_only=True,
)
print(yaml.dump(km))
# check the key mapping is unique for
km, _ = instantiate(
builder=NestedExample, optional_args=params, return_args_only=True
)
assert_dict(km)
def test_recursion_nests():
with pytest.raises(RuntimeError) as excinfo:
b, params = instantiate(
builder=A,
positional_args={"cls_a": B},
optional_args={"cls_b": A},
)
assert "cyclic" in str(excinfo.value)
print(excinfo)
def test_cyclic_nests():
with pytest.raises(RuntimeError) as excinfo:
c, params = instantiate(
builder=A,
positional_args={"cls_a": B},
optional_args={"cls_b": C},
all_args={"cls_c": A},
)
assert "cyclic" in str(excinfo.value)
print(excinfo, "hello")
class BadKwargs1:
def __init__(self, thing_kwargs={}):
pass
class BadKwargs2:
def __init__(self, thing="a string", thing_kwargs={}):
pass
def test_bad_kwargs():
with pytest.raises(KeyError):
_ = instantiate(BadKwargs1)
with pytest.raises(ValueError):
_ = instantiate(BadKwargs2)
|
querybook/server/datasources_socketio/connect.py | shivammmmm/querybook | 1,144 | 33664 | <filename>querybook/server/datasources_socketio/connect.py
from flask_login import current_user
from flask_socketio import ConnectionRefusedError
from app.flask_app import socketio
from const.data_doc import DATA_DOC_NAMESPACE
from const.query_execution import QUERY_EXECUTION_NAMESPACE
def connect():
if not current_user.is_authenticated:
raise ConnectionRefusedError("User is not logged in, please refresh the page.")
socketio.on("connect", namespace=DATA_DOC_NAMESPACE)(connect)
socketio.on("connect", namespace=QUERY_EXECUTION_NAMESPACE)(connect)
|
Chapter 12/ch12_r03.py | PacktPublishing/Modern-Python-Cookbook | 107 | 33672 | <gh_stars>100-1000
"""
{
"swagger": "2.0",
"info": {
"title": "Python Cookbook\\nChapter 12, recipe 3.",
"version": "1.0"
},
"schemes": "http",
"host": "127.0.0.1:5000",
"basePath": "/dealer",
"consumes": "application/json",
"produces": "application/json",
"paths": {
"/hands": {
"get": {
"parameters": [
{"name": "cards",
"in": "query",
"description": "number of cards in each hand",
"type": "array", "items": {"type": "integer"},
"collectionFormat": "multi",
"default": [13, 13, 13, 13]
}
],
"responses": {
"200": {
"description": "one hand of cards for each `hand` value in the query string"
}
}
}
},
"/hand": {
"get": {
"parameters": [
{"name": "cards", "in": "query", "type": "integer", "default": 5}
],
"responses": {
"200": {
"description": "One hand of cards with a size given by the `hand` value in the query string"
}
}
}
}
}
}
"""
import random
from ch12_r01 import Card, Deck
from flask import Flask, jsonify, request, abort
from http import HTTPStatus
dealer = Flask('dealer')
specification = {
'swagger': '2.0',
'info': {
'title': '''Python Cookbook\nChapter 12, recipe 3.''',
'version': '1.0'
},
'schemes': ['http'],
'host': '127.0.0.1:5000',
'basePath': '/dealer',
'consumes': ['application/json'],
'produces': ['application/json'],
'paths': {
'/hands': {
'get': {
'parameters': [
{
'name': 'cards',
'in': 'query',
'description': 'number of cards in each hand',
'type': 'array',
'items': {'type': 'integer'},
'collectionFormat': 'multi',
'default': [13, 13, 13, 13]
}
],
'responses': {
'200': {
'description': '''One hand of cards for each `hand` value in the query string'''
}
}
}
},
'/hand': {
'get': {
'parameters': [
{
'name': 'cards',
'in': 'query',
'type': 'integer',
'default': 5
}
],
'responses': {
'200': {
'description': '''One hand of cards with a size given by the `hand` value in the query string'''
}
}
}
}
}
}
import os
random.seed(os.environ.get('DEAL_APP_SEED'))
deck = Deck()
@dealer.before_request
def check_json():
if request.path == '/dealer/swagger.json':
return
if 'json' in request.headers.get('Accept', '*/*'):
return
if 'json' == request.args.get('$format', 'html'):
return
return abort(HTTPStatus.BAD_REQUEST)
from flask import send_file
# @dealer.route('/dealer/swagger.json')
def swagger1():
response = send_file('swagger.json', mimetype='application/json')
return response
from flask import make_response
# @dealer.route('/dealer/swagger.json')
def swagger2():
response = make_response(__doc__.encode('utf-8'))
response.headers['Content-Type'] = 'application/json'
return response
from flask import make_response
import json
@dealer.route('/dealer/swagger.json')
def swagger3():
response = make_response(json.dumps(specification, indent=2).encode('utf-8'))
response.headers['Content-Type'] = 'application/json'
return response
@dealer.route('/dealer/hand/')
def deal():
try:
hand_size = int(request.args.get('cards', 5))
assert 1 <= hand_size < 53
except Exception as ex:
abort(HTTPStatus.BAD_REQUEST)
cards = deck.deal(hand_size)
response = jsonify([card.to_json() for card in cards])
return response
@dealer.route('/dealer/hands/')
def multi_hand():
try:
hand_sizes = request.args.getlist('cards', type=int)
if len(hand_sizes) == 0:
hand_sizes = [13,13,13,13]
assert all(1 <= hand_size < 53 for hand_size in hand_sizes)
assert sum(hand_sizes) < 53
except Exception as ex:
dealer.logger.exception(ex)
abort(HTTPStatus.BAD_REQUEST)
hands = [deck.deal(hand_size) for hand_size in hand_sizes]
response = jsonify(
[
{'hand':i, 'cards':[card.to_json() for card in hand]}
for i, hand in enumerate(hands)
]
)
return response
if __name__ == "__main__":
dealer.run(use_reloader=True, threaded=False)
|
thespian/test/__init__.py | dendron2000/Thespian | 210 | 33704 | """Defines various classes and definitions that provide assistance for
unit testing Actors in an ActorSystem."""
import unittest
import pytest
import logging
import time
from thespian.actors import ActorSystem
def simpleActorTestLogging():
"""This function returns a logging dictionary that can be passed as
the logDefs argument for ActorSystem() initialization to get
simple stdout logging configuration. This is not necessary for
typical unit testing that uses the simpleActorSystemBase, but
it can be useful for multiproc.. ActorSystems where the
separate processes created should have a very simple logging
configuration.
"""
import sys
if sys.platform == 'win32':
# Windows will not allow sys.stdout to be passed to a child
# process, which breaks the startup/config for some of the
# tests.
handler = { 'class': 'logging.handlers.RotatingFileHandler',
'filename': 'nosetests.log',
'maxBytes': 256*1024,
'backupCount':3,
}
else:
handler = { 'class': 'logging.StreamHandler',
'stream': sys.stdout,
}
return {
'version' : 1,
'handlers': { #'discarder': {'class': 'logging.NullHandler' },
'testStream' : handler,
},
'root': { 'handlers': ['testStream'] },
'disable_existing_loggers': False,
}
class LocallyManagedActorSystem(object):
def setSystemBase(self, newBase='simpleSystemBase', systemCapabilities=None, logDefs='BestForBase'):
newBaseStr = str(newBase)
if not hasattr(self, 'currentBase') or self.currentBase != newBaseStr:
ldefs = logDefs if logDefs != 'BestForBase' else (simpleActorTestLogging() if newBase.startswith('multiproc') else False)
# In case the ActorSystem was *already* setup, break the singleton aspect and re-init
ActorSystem(logDefs = ldefs).shutdown()
ActorSystem(newBase, systemCapabilities, logDefs = ldefs)
self.currentBase = newBaseStr
class ActorSystemTestCase(unittest.TestCase, LocallyManagedActorSystem):
"""The ActorSystemTestCase is a wrapper for the unittest TestCase
class that will startup a default ActorSystem in the provided
setUp() and tearDown() any active ActorSystem after testing.
If a non-default ActorSystem is to be used, the setSystemBase()
method should be called with that system base.
It also provides some additional methods for assistance in testing Actors.
"""
def setUp(self):
if not hasattr(self, 'currentBase'):
self.setSystemBase()
def tearDown(self):
if hasattr(self, 'currentBase'):
ActorSystem().shutdown()
delattr(self, 'currentBase')
import time
time.sleep(0.02)
@staticmethod
def actualActorObject(actorClass):
"""Normally an Actor is only instantiated in the context of an
ActorSystem, and then only responds to messages delivered
via that system. For testing purposes *only*, it may be
desireable to have the actual Actor instance to test
methods on that Actor directly. This method will return
that actual Actor instance after instantiating the actor in
an ActorSystem.
This method can ONLY be used with an ActorSystem that will
instantiate the Actor in the context of the current process
(e.g. simpleSystemBase) and the methods tested on the
resulting Actor CANNOT perform any Actor-related actions
(e.g. self.createActor(), self.send()).
This method is for TESTING only under very special
circumstances; if you're not sure you need this, then you
probably don't.
"""
# Create the Actor within the system.
aAddr = ActorSystem().createActor(actorClass)
# This depends on the internals of the systemBase
return ActorSystem()._systemBase.actorRegistry[aAddr.actorAddressString].instance
###
### pytest fixtures and helpers
###
testAdminPort = None
def get_free_admin_port_random():
global testAdminPort
if testAdminPort is None:
import random
# Reserved system ports are typically below 1024. Ephemeral
# ports typically start at either 32768 (Linux) or 49152
# (IANA), or range from 1024-5000 (older Windows). Pick
# something unused outside those ranges for the admin.
testAdminPort = random.randint(10000, 30000)
#testAdminPort = random.randint(5,60) * 1000
else:
testAdminPort = testAdminPort + 1
return testAdminPort
def get_free_admin_port():
import socket
import random
for tries in range(100):
port = random.randint(5000, 30000)
try:
for m,p in [ (socket.SOCK_STREAM, socket.IPPROTO_TCP),
(socket.SOCK_DGRAM, socket.IPPROTO_UDP),
]:
s = socket.socket(socket.AF_INET, m, p)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind(('',port))
s.close()
return port
except Exception:
pass
return get_free_admin_port_random()
@pytest.fixture(params=['simpleSystemBase',
'multiprocQueueBase',
'multiprocUDPBase',
'multiprocTCPBase',
'multiprocTCPBase-AdminRouting',
'multiprocTCPBase-AdminRoutingTXOnly',
])
def asys(request):
caps = {'Foo Allowed': True,
'Cows Allowed': True,
'Dogs Allowed': True,
'dog': 'food'}
if request.param.startswith('multiprocTCP') or \
request.param.startswith('multiprocUDP'):
caps['Admin Port'] = get_free_admin_port()
caps['Convention Address.IPv4'] = '', caps['Admin Port']
if request.param.endswith('-AdminRouting'):
caps['Admin Routing'] = True
if request.param.endswith('-AdminRoutingTXOnly'):
caps['Admin Routing'] = True
caps['Outbound Only'] = True
asys = ActorSystem(systemBase=request.param.partition('-')[0],
capabilities=caps,
logDefs=(simpleActorTestLogging()
if request.param.startswith('multiproc')
else False),
transientUnique=True)
asys.base_name = request.param
asys.port_num = caps.get('Admin Port', None)
asys.txonly = request.param.endswith('-AdminRoutingTXOnly')
request.addfinalizer(lambda asys=asys: asys.shutdown())
return asys
def similar_asys(asys, in_convention=True, start_wait=True, capabilities=None):
caps = capabilities or {}
if asys.base_name.startswith('multiprocTCP') or \
asys.base_name.startswith('multiprocUDP'):
caps['Admin Port'] = get_free_admin_port()
if in_convention:
caps['Convention Address.IPv4'] = '', asys.port_num
if asys.base_name.endswith('-AdminRouting'):
caps['Admin Routing'] = True
asys2 = ActorSystem(systemBase=asys.base_name.partition('-')[0],
capabilities=caps,
logDefs=(simpleActorTestLogging()
if asys.base_name.startswith('multiproc')
else False),
transientUnique=True)
asys2.base_name = asys.base_name
asys2.port_num = caps.get('Admin Port', None)
if in_convention and start_wait:
time.sleep(0.25) # Wait for Actor Systems to start and connect together
return asys2
@pytest.fixture
def asys2(request, asys):
asys2 = similar_asys(asys, in_convention=False)
# n.b. shutdown the second actor system first:
# 1. Some tests ask asys1 to create an actor
# 2. That actor is actually supported by asys2
# 3. There is an external port the tester uses for each asys
# 4. When asys2 is shutdown, it will attempt to notify the
# parent of the actor that the actor is dead
# 5. This parent is the external port for asys1.
# 6. If asys1 is shutdown first, then asys2 must time out
# on the transmit attempt (usually 5 minutes) before
# it can exit.
# 7. If the test is re-run within this 5 minute period, it will fail
# because the old asys2 is still existing but in shutdown state
# (and will therefore rightfully refuse new actions).
# By shutting down asys2 first, the parent notification can be
# performed and subsequent runs don't encounter the lingering
# asys2.
request.addfinalizer(lambda asys=asys2: asys2.shutdown())
return asys2
@pytest.fixture
def asys_pair(request, asys):
asys2 = similar_asys(asys, in_convention=True)
# n.b. shutdown the second actor system first:
# 1. Some tests ask asys1 to create an actor
# 2. That actor is actually supported by asys2
# 3. There is an external port the tester uses for each asys
# 4. When asys2 is shutdown, it will attempt to notify the
# parent of the actor that the actor is dead
# 5. This parent is the external port for asys1.
# 6. If asys1 is shutdown first, then asys2 must time out
# on the transmit attempt (usually 5 minutes) before
# it can exit.
# 7. If the test is re-run within this 5 minute period, it will fail
# because the old asys2 is still existing but in shutdown state
# (and will therefore rightfully refuse new actions).
# By shutting down asys2 first, the parent notification can be
# performed and subsequent runs don't encounter the lingering
# asys2.
request.addfinalizer(lambda asys=asys2: asys2.shutdown())
return (asys, asys2)
@pytest.fixture
def run_unstable_tests(request):
return request.config.getoption('unstable', default=False)
def unstable_test(run_unstable_tests, asys, *unstable_bases):
if asys.base_name in unstable_bases and not run_unstable_tests:
pytest.skip("Test unstable for %s system base"%asys.base_name)
def actor_system_unsupported(asys, *unsupported_bases):
if asys.base_name in unsupported_bases:
pytest.skip("Functionality not supported for %s system base"%asys.base_name)
from thespian.system.timing import timePeriodSeconds
import time
inTestDelay = lambda period: time.sleep(timePeriodSeconds(period))
def delay_for_next_of_kin_notification(system):
if system.base_name == 'multiprocQueueBase':
# The multiprocQueueBase signal processor cannot interrupt a
# sleeping Queue.get(), so for this base it is necessary to
# wait for the timeout on the Queue.get() to allow it time to
# notice and process the child exit.
time.sleep(2.5)
elif system.base_name == 'multiprocUDPBase':
time.sleep(0.6)
else:
time.sleep(0.1)
|
alipay/aop/api/domain/AlipayUserApplepayMerchantauthtokenGetModel.py | antopen/alipay-sdk-python-all | 213 | 33725 | <reponame>antopen/alipay-sdk-python-all<gh_stars>100-1000
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.OpenApiAppleRequestHeader import OpenApiAppleRequestHeader
class AlipayUserApplepayMerchantauthtokenGetModel(object):
def __init__(self):
self._amount = None
self._currency_code = None
self._partner_owned_merchant_identifier = None
self._provisioning_bundle_identifier = None
self._request_header = None
self._transaction_notification_identifier = None
@property
def amount(self):
return self._amount
@amount.setter
def amount(self, value):
self._amount = value
@property
def currency_code(self):
return self._currency_code
@currency_code.setter
def currency_code(self, value):
self._currency_code = value
@property
def partner_owned_merchant_identifier(self):
return self._partner_owned_merchant_identifier
@partner_owned_merchant_identifier.setter
def partner_owned_merchant_identifier(self, value):
self._partner_owned_merchant_identifier = value
@property
def provisioning_bundle_identifier(self):
return self._provisioning_bundle_identifier
@provisioning_bundle_identifier.setter
def provisioning_bundle_identifier(self, value):
self._provisioning_bundle_identifier = value
@property
def request_header(self):
return self._request_header
@request_header.setter
def request_header(self, value):
if isinstance(value, OpenApiAppleRequestHeader):
self._request_header = value
else:
self._request_header = OpenApiAppleRequestHeader.from_alipay_dict(value)
@property
def transaction_notification_identifier(self):
return self._transaction_notification_identifier
@transaction_notification_identifier.setter
def transaction_notification_identifier(self, value):
self._transaction_notification_identifier = value
def to_alipay_dict(self):
params = dict()
if self.amount:
if hasattr(self.amount, 'to_alipay_dict'):
params['amount'] = self.amount.to_alipay_dict()
else:
params['amount'] = self.amount
if self.currency_code:
if hasattr(self.currency_code, 'to_alipay_dict'):
params['currency_code'] = self.currency_code.to_alipay_dict()
else:
params['currency_code'] = self.currency_code
if self.partner_owned_merchant_identifier:
if hasattr(self.partner_owned_merchant_identifier, 'to_alipay_dict'):
params['partner_owned_merchant_identifier'] = self.partner_owned_merchant_identifier.to_alipay_dict()
else:
params['partner_owned_merchant_identifier'] = self.partner_owned_merchant_identifier
if self.provisioning_bundle_identifier:
if hasattr(self.provisioning_bundle_identifier, 'to_alipay_dict'):
params['provisioning_bundle_identifier'] = self.provisioning_bundle_identifier.to_alipay_dict()
else:
params['provisioning_bundle_identifier'] = self.provisioning_bundle_identifier
if self.request_header:
if hasattr(self.request_header, 'to_alipay_dict'):
params['request_header'] = self.request_header.to_alipay_dict()
else:
params['request_header'] = self.request_header
if self.transaction_notification_identifier:
if hasattr(self.transaction_notification_identifier, 'to_alipay_dict'):
params['transaction_notification_identifier'] = self.transaction_notification_identifier.to_alipay_dict()
else:
params['transaction_notification_identifier'] = self.transaction_notification_identifier
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayUserApplepayMerchantauthtokenGetModel()
if 'amount' in d:
o.amount = d['amount']
if 'currency_code' in d:
o.currency_code = d['currency_code']
if 'partner_owned_merchant_identifier' in d:
o.partner_owned_merchant_identifier = d['partner_owned_merchant_identifier']
if 'provisioning_bundle_identifier' in d:
o.provisioning_bundle_identifier = d['provisioning_bundle_identifier']
if 'request_header' in d:
o.request_header = d['request_header']
if 'transaction_notification_identifier' in d:
o.transaction_notification_identifier = d['transaction_notification_identifier']
return o
|
test/hlt/pytest/python/com/huawei/iotplatform/client/dto/QueryDeviceCmdCancelTaskOutDTO.py | yuanyi-thu/AIOT- | 128 | 33727 | from com.huawei.iotplatform.client.dto.DeviceCommandCancelTaskRespV4 import DeviceCommandCancelTaskRespV4
from com.huawei.iotplatform.client.dto.Pagination import Pagination
class QueryDeviceCmdCancelTaskOutDTO(object):
pagination = Pagination()
data = DeviceCommandCancelTaskRespV4()
def __init__(self):
pass
def getPagination(self):
return self.pagination
def setPagination(self, pagination):
self.pagination = pagination
def getData(self):
return self.data
def setData(self, data):
self.data = data
|
exec/tests/unit/runners/test_evaluators.py | AndersonReyes/klio | 705 | 33735 | <filename>exec/tests/unit/runners/test_evaluators.py
# Copyright 2021 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Tests for KlioPubSubReadEvaluator
These were adapted from
apache_beam/io/gcp/pubsub_test.py::TestReadFromPubSub.
These validate that the original expected behavior from _PubSubReadEvaluator
was kept, as well checking that:
* Responses are not auto-acked
* MessageManager daemon threads started
* Messages added to MessageManager
* Messages handled one at a time, instead of 10 at a time
"""
import pytest
from apache_beam import transforms as beam_transforms
from apache_beam import utils as beam_utils
from apache_beam.io.gcp import pubsub as b_pubsub
from apache_beam.options import pipeline_options
from apache_beam.runners.direct import direct_runner
from apache_beam.runners.direct import transform_evaluator
from apache_beam.testing import test_pipeline as beam_test_pipeline
from apache_beam.testing import test_utils as beam_test_utils
from apache_beam.testing import util as beam_testing_util
from klio.message import pubsub_message_manager as pmm
from klio_core.proto import klio_pb2
from klio_exec.runners import evaluators
@pytest.fixture
def patch_msg_manager(mocker, monkeypatch):
p = mocker.Mock(name="patch_msg_manager")
monkeypatch.setattr(pmm, "MessageManager", p)
return p
@pytest.fixture
def patch_sub_client(mocker, monkeypatch):
# patch out network calls in SubscriberClient instantiation
c = mocker.Mock(name="patch_sub_client")
monkeypatch.setattr(evaluators.g_pubsub, "SubscriberClient", c)
return c.return_value
class KlioTestPubSubReadEvaluator(object):
"""Wrapper of _PubSubReadEvaluator that makes it bounded."""
_pubsub_read_evaluator = evaluators.KlioPubSubReadEvaluator
def __init__(self, *args, **kwargs):
self._evaluator = self._pubsub_read_evaluator(*args, **kwargs)
def start_bundle(self):
return self._evaluator.start_bundle()
def process_element(self, element):
return self._evaluator.process_element(element)
def finish_bundle(self):
result = self._evaluator.finish_bundle()
result.unprocessed_bundles = []
result.keyed_watermark_holds = {None: None}
return result
transform_evaluator.TransformEvaluatorRegistry._test_evaluators_overrides = {
direct_runner._DirectReadFromPubSub: KlioTestPubSubReadEvaluator,
}
def test_klio_pubsub_read_eval_read_messages_success(
mocker, patch_sub_client, patch_msg_manager,
):
exp_entity_id = "entity_id"
kmsg = klio_pb2.KlioMessage()
kmsg.data.element = bytes(exp_entity_id, "utf-8")
data = kmsg.SerializeToString()
publish_time_secs = 1520861821
publish_time_nanos = 234567000
attributes = {"key": "value"}
ack_id = "ack_id"
pull_response = beam_test_utils.create_pull_response(
[
beam_test_utils.PullResponseMessage(
data, attributes, publish_time_secs, publish_time_nanos, ack_id
)
]
)
pmsg = b_pubsub.PubsubMessage(data, attributes)
expected_elements = [
beam_testing_util.TestWindowedValue(
pmsg,
beam_utils.timestamp.Timestamp(1520861821.234567),
[beam_transforms.window.GlobalWindow()],
)
]
patch_sub_client.pull.return_value = pull_response
options = pipeline_options.PipelineOptions([])
options.view_as(pipeline_options.StandardOptions).streaming = True
with beam_test_pipeline.TestPipeline(options=options) as p:
pcoll = p | b_pubsub.ReadFromPubSub(
"projects/fakeprj/topics/a_topic", None, None, with_attributes=True
)
beam_testing_util.assert_that(
pcoll,
beam_testing_util.equal_to(expected_elements),
reify_windows=True,
)
# Check overridden functionality:
# 1. Check that auto-acking is skipped
patch_sub_client.acknowledge.assert_not_called()
# 2. Check that MessageManager daemon threads were started
patch_msg_manager.assert_called_once_with(
patch_sub_client.subscription_path()
)
# 3. Check that messages were added to the MessageManager
patch_msg_manager.return_value.add.assert_called_once_with(ack_id, pmsg)
# 4. Check that one message is handled at a time, instead of the
# original 10
patch_sub_client.pull.assert_called_once_with(
mocker.ANY, max_messages=1, return_immediately=True
)
patch_sub_client.api.transport.channel.close.assert_called_once_with()
def test_read_messages_timestamp_attribute_milli_success(
mocker, patch_sub_client, patch_msg_manager,
):
exp_entity_id = "entity_id"
kmsg = klio_pb2.KlioMessage()
kmsg.data.element = bytes(exp_entity_id, "utf-8")
data = kmsg.SerializeToString()
attributes = {"time": "1337"}
publish_time_secs = 1520861821
publish_time_nanos = 234567000
ack_id = "ack_id"
pull_response = beam_test_utils.create_pull_response(
[
beam_test_utils.PullResponseMessage(
data, attributes, publish_time_secs, publish_time_nanos, ack_id
)
]
)
pmsg = b_pubsub.PubsubMessage(data, attributes)
expected_elements = [
beam_testing_util.TestWindowedValue(
pmsg,
beam_utils.timestamp.Timestamp(
micros=int(attributes["time"]) * 1000
),
[beam_transforms.window.GlobalWindow()],
),
]
patch_sub_client.pull.return_value = pull_response
options = pipeline_options.PipelineOptions([])
options.view_as(pipeline_options.StandardOptions).streaming = True
with beam_test_pipeline.TestPipeline(options=options) as p:
pcoll = p | b_pubsub.ReadFromPubSub(
"projects/fakeprj/topics/a_topic",
None,
None,
with_attributes=True,
timestamp_attribute="time",
)
# Check original functionality that was kept the same
beam_testing_util.assert_that(
pcoll,
beam_testing_util.equal_to(expected_elements),
reify_windows=True,
)
# Check overridden functionality:
# 1. Check that auto-acking is skipped
patch_sub_client.acknowledge.assert_not_called()
# 2. Check that MessageManager daemon threads were started
patch_msg_manager.assert_called_once_with(
patch_sub_client.subscription_path()
)
# 3. Check that messages were added to the MessageManager
patch_msg_manager.return_value.add.assert_called_once_with(ack_id, pmsg)
# 4. Check that one message is handled at a time, instead of the
# original 10
patch_sub_client.pull.assert_called_once_with(
mocker.ANY, max_messages=1, return_immediately=True
)
patch_sub_client.api.transport.channel.close.assert_called_once_with()
def test_read_messages_timestamp_attribute_rfc3339_success(
mocker, patch_sub_client, patch_msg_manager,
):
exp_entity_id = "entity_id"
kmsg = klio_pb2.KlioMessage()
kmsg.data.element = bytes(exp_entity_id, "utf-8")
data = kmsg.SerializeToString()
attributes = {"time": "2018-03-12T13:37:01.234567Z"}
publish_time_secs = 1337000000
publish_time_nanos = 133700000
ack_id = "ack_id"
pull_response = beam_test_utils.create_pull_response(
[
beam_test_utils.PullResponseMessage(
data, attributes, publish_time_secs, publish_time_nanos, ack_id
)
]
)
pmsg = b_pubsub.PubsubMessage(data, attributes)
expected_elements = [
beam_testing_util.TestWindowedValue(
pmsg,
beam_utils.timestamp.Timestamp.from_rfc3339(attributes["time"]),
[beam_transforms.window.GlobalWindow()],
),
]
patch_sub_client.pull.return_value = pull_response
options = pipeline_options.PipelineOptions([])
options.view_as(pipeline_options.StandardOptions).streaming = True
with beam_test_pipeline.TestPipeline(options=options) as p:
pcoll = p | b_pubsub.ReadFromPubSub(
"projects/fakeprj/topics/a_topic",
None,
None,
with_attributes=True,
timestamp_attribute="time",
)
# Check original functionality that was kept the same
beam_testing_util.assert_that(
pcoll,
beam_testing_util.equal_to(expected_elements),
reify_windows=True,
)
# Check overridden functionality:
# 1. Check that auto-acking is skipped
patch_sub_client.acknowledge.assert_not_called()
# 2. Check that MessageManager daemon threads were started
patch_msg_manager.assert_called_once_with(
patch_sub_client.subscription_path()
)
# 3. Check that messages were added to the MessageManager
patch_msg_manager.return_value.add.assert_called_once_with(ack_id, pmsg)
# 4. Check that one message is handled at a time, instead of the
# original 10
patch_sub_client.pull.assert_called_once_with(
mocker.ANY, max_messages=1, return_immediately=True
)
patch_sub_client.api.transport.channel.close.assert_called_once_with()
def test_read_messages_timestamp_attribute_missing(
mocker, patch_sub_client, patch_msg_manager,
):
exp_entity_id = "entity_id"
kmsg = klio_pb2.KlioMessage()
kmsg.data.element = bytes(exp_entity_id, "utf-8")
data = kmsg.SerializeToString()
attributes = {}
publish_time_secs = 1520861821
publish_time_nanos = 234567000
publish_time = "2018-03-12T13:37:01.234567Z"
ack_id = "ack_id"
pull_response = beam_test_utils.create_pull_response(
[
beam_test_utils.PullResponseMessage(
data, attributes, publish_time_secs, publish_time_nanos, ack_id
)
]
)
pmsg = b_pubsub.PubsubMessage(data, attributes)
expected_elements = [
beam_testing_util.TestWindowedValue(
pmsg,
beam_utils.timestamp.Timestamp.from_rfc3339(publish_time),
[beam_transforms.window.GlobalWindow()],
),
]
patch_sub_client.pull.return_value = pull_response
options = pipeline_options.PipelineOptions([])
options.view_as(pipeline_options.StandardOptions).streaming = True
with beam_test_pipeline.TestPipeline(options=options) as p:
pcoll = p | b_pubsub.ReadFromPubSub(
"projects/fakeprj/topics/a_topic",
None,
None,
with_attributes=True,
timestamp_attribute="nonexistent",
)
# Check original functionality that was kept the same
beam_testing_util.assert_that(
pcoll,
beam_testing_util.equal_to(expected_elements),
reify_windows=True,
)
# Check overridden functionality:
# 1. Check that auto-acking is skipped
patch_sub_client.acknowledge.assert_not_called()
# 2. Check that MessageManager daemon threads were started
patch_msg_manager.assert_called_once_with(
patch_sub_client.subscription_path()
)
# 3. Check that messages were added to the MessageManager
patch_msg_manager.return_value.add.assert_called_once_with(ack_id, pmsg)
# 4. Check that one message is handled at a time, instead of the
# original 10
patch_sub_client.pull.assert_called_once_with(
mocker.ANY, max_messages=1, return_immediately=True
)
patch_sub_client.api.transport.channel.close.assert_called_once_with()
def test_read_messages_timestamp_attribute_fail_parse(patch_sub_client):
exp_entity_id = "entity_id"
kmsg = klio_pb2.KlioMessage()
kmsg.data.element = bytes(exp_entity_id, "utf-8")
data = kmsg.SerializeToString()
attributes = {"time": "1337 unparseable"}
publish_time_secs = 1520861821
publish_time_nanos = 234567000
ack_id = "ack_id"
pull_response = beam_test_utils.create_pull_response(
[
beam_test_utils.PullResponseMessage(
data, attributes, publish_time_secs, publish_time_nanos, ack_id
)
]
)
patch_sub_client.pull.return_value = pull_response
options = pipeline_options.PipelineOptions([])
options.view_as(pipeline_options.StandardOptions).streaming = True
p = beam_test_pipeline.TestPipeline(options=options)
_ = p | b_pubsub.ReadFromPubSub(
"projects/fakeprj/topics/a_topic",
None,
None,
with_attributes=True,
timestamp_attribute="time",
)
with pytest.raises(ValueError, match=r"parse"):
p.run()
patch_sub_client.acknowledge.assert_not_called()
patch_sub_client.api.transport.channel.close.assert_called_with()
|
tests/components/geofency/__init__.py | domwillcode/home-assistant | 30,023 | 33757 | <filename>tests/components/geofency/__init__.py
"""Tests for the Geofency component."""
|
dialogue-engine/test/programytest/mappings/test_properties.py | cotobadesign/cotoba-agent-oss | 104 | 33761 | <reponame>cotobadesign/cotoba-agent-oss
"""
Copyright (c) 2020 COTOBA DESIGN, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import unittest
import os
from programy.mappings.properties import PropertiesCollection
from programy.storage.factory import StorageFactory
from programy.storage.stores.file.config import FileStorageConfiguration
from programy.storage.stores.file.engine import FileStorageEngine
from programy.storage.stores.file.config import FileStoreConfiguration
class PropertysTests(unittest.TestCase):
def test_initialise_collection(self):
collection = PropertiesCollection()
self.assertIsNotNone(collection)
def test_properties_operations(self):
collection = PropertiesCollection()
self.assertIsNotNone(collection)
collection.add_property("name", "KeiffBot 1.0")
collection.add_property("firstname", "Keiff")
collection.add_property("middlename", "AIML")
collection.add_property("lastname", "BoT")
collection.add_property("fullname", "KeiffBot")
self.assertTrue(collection.has_property("name"))
self.assertFalse(collection.has_property("age"))
self.assertEqual("KeiffBot 1.0", collection.property("name"))
self.assertIsNone(collection.property("age"))
def test_load_from_file(self):
storage_factory = StorageFactory()
file_store_config = FileStorageConfiguration()
file_store_config._properties_storage = FileStoreConfiguration(file=os.path.dirname(__file__) + os.sep + "test_files" + os.sep + "properties.txt",
format="text", extension="txt", encoding="utf-8", delete_on_start=False)
storage_engine = FileStorageEngine(file_store_config)
storage_factory._storage_engines[StorageFactory.PROPERTIES] = storage_engine
storage_factory._store_to_engine_map[StorageFactory.PROPERTIES] = storage_engine
collection = PropertiesCollection()
self.assertIsNotNone(collection)
collection.load(storage_factory)
self.assertTrue(collection.has_property("name"))
self.assertFalse(collection.has_property("age"))
self.assertEqual("KeiffBot 1.0", collection.property("name"))
self.assertIsNone(collection.property("age"))
def test_reload_from_file(self):
storage_factory = StorageFactory()
file_store_config = FileStorageConfiguration()
file_store_config._properties_storage = FileStoreConfiguration(file=os.path.dirname(__file__) + os.sep + "test_files" + os.sep + "properties.txt",
format="text", extension="txt", encoding="utf-8", delete_on_start=False)
storage_engine = FileStorageEngine(file_store_config)
storage_factory._storage_engines[StorageFactory.PROPERTIES] = storage_engine
storage_factory._store_to_engine_map[StorageFactory.PROPERTIES] = storage_engine
collection = PropertiesCollection()
self.assertIsNotNone(collection)
collection.load(storage_factory)
self.assertTrue(collection.has_property("name"))
self.assertFalse(collection.has_property("age"))
self.assertEqual("KeiffBot 1.0", collection.property("name"))
self.assertIsNone(collection.property("age"))
collection.remove()
collection.reload_file(storage_factory)
self.assertTrue(collection.has_property("name"))
self.assertFalse(collection.has_property("age"))
self.assertEqual("KeiffBot 1.0", collection.property("name"))
self.assertIsNone(collection.property("age"))
|
attic/concurrency/timer2.py | matteoshen/example-code | 5,651 | 33778 | <gh_stars>1000+
import asyncio
import sys
import contextlib
@asyncio.coroutine
def show_remaining(dots_task):
remaining = 5
while remaining:
print('Remaining: ', remaining)
sys.stdout.flush()
yield from asyncio.sleep(1)
remaining -= 1
dots_task.cancel()
print()
@asyncio.coroutine
def dots():
while True:
print('.', sep='', end='')
sys.stdout.flush()
yield from asyncio.sleep(.1)
def main():
with contextlib.closing(asyncio.get_event_loop()) as loop:
dots_task = asyncio.Task(dots())
coros = [show_remaining(dots_task), dots_task]
loop.run_until_complete(asyncio.wait(coros))
if __name__ == '__main__':
main()
|
tests/i18n/patterns/urls/path_unused.py | webjunkie/django | 790 | 33785 | from django.conf.urls import url
from django.conf.urls import patterns
from django.views.generic import TemplateView
view = TemplateView.as_view(template_name='dummy.html')
urlpatterns = patterns('',
url(r'^nl/foo/', view, name='not-translated'),
)
|
backend/profiles/serializers.py | stevethompsonstar/django-react-blog | 592 | 33786 | from rest_framework import serializers
from .models import Subscriber
class SubscriberSerializer(serializers.ModelSerializer):
class Meta:
model = Subscriber
fields = (
'email',
)
|
generate_imagery.py | AnthonyLapadula/pytorch-GANs | 278 | 33795 | import os
import shutil
import argparse
import torch
from torch import nn
from torchvision.utils import save_image, make_grid
import matplotlib.pyplot as plt
import numpy as np
import cv2 as cv
import utils.utils as utils
from utils.constants import *
class GenerationMode(enum.Enum):
SINGLE_IMAGE = 0,
INTERPOLATION = 1,
VECTOR_ARITHMETIC = 2
def postprocess_generated_img(generated_img_tensor):
assert isinstance(generated_img_tensor, torch.Tensor), f'Expected PyTorch tensor but got {type(generated_img_tensor)}.'
# Move the tensor from GPU to CPU, convert to numpy array, extract 0th batch, move the image channel
# from 0th to 2nd position (CHW -> HWC)
generated_img = np.moveaxis(generated_img_tensor.to('cpu').numpy()[0], 0, 2)
# If grayscale image repeat 3 times to get RGB image (for generators trained on MNIST)
if generated_img.shape[2] == 1:
generated_img = np.repeat(generated_img, 3, axis=2)
# Imagery is in the range [-1, 1] (generator has tanh as the output activation) move it into [0, 1] range
generated_img -= np.min(generated_img)
generated_img /= np.max(generated_img)
return generated_img
def generate_from_random_latent_vector(generator, cgan_digit=None):
with torch.no_grad():
latent_vector = utils.get_gaussian_latent_batch(1, next(generator.parameters()).device)
if cgan_digit is None:
generated_img = postprocess_generated_img(generator(latent_vector))
else: # condition and generate the digit specified by cgan_digit
ref_label = torch.tensor([cgan_digit], dtype=torch.int64)
ref_label_one_hot_encoding = torch.nn.functional.one_hot(ref_label, MNIST_NUM_CLASSES).type(torch.FloatTensor).to(next(generator.parameters()).device)
generated_img = postprocess_generated_img(generator(latent_vector, ref_label_one_hot_encoding))
return generated_img, latent_vector.to('cpu').numpy()[0]
def generate_from_specified_numpy_latent_vector(generator, latent_vector):
assert isinstance(latent_vector, np.ndarray), f'Expected latent vector to be numpy array but got {type(latent_vector)}.'
with torch.no_grad():
latent_vector_tensor = torch.unsqueeze(torch.tensor(latent_vector, device=next(generator.parameters()).device), dim=0)
return postprocess_generated_img(generator(latent_vector_tensor))
def linear_interpolation(t, p0, p1):
return p0 + t * (p1 - p0)
def spherical_interpolation(t, p0, p1):
""" Spherical interpolation (slerp) formula: https://en.wikipedia.org/wiki/Slerp
Found inspiration here: https://github.com/soumith/ganhacks
but I didn't get any improvement using it compared to linear interpolation.
Args:
t (float): has [0, 1] range
p0 (numpy array): First n-dimensional vector
p1 (numpy array): Second n-dimensional vector
Result:
Returns spherically interpolated vector.
"""
if t <= 0:
return p0
elif t >= 1:
return p1
elif np.allclose(p0, p1):
return p0
# Convert p0 and p1 to unit vectors and find the angle between them (omega)
omega = np.arccos(np.dot(p0 / np.linalg.norm(p0), p1 / np.linalg.norm(p1)))
sin_omega = np.sin(omega) # syntactic sugar
return np.sin((1.0 - t) * omega) / sin_omega * p0 + np.sin(t * omega) / sin_omega * p1
def display_vector_arithmetic_results(imgs_to_display):
fig = plt.figure(figsize=(6, 6))
title_fontsize = 'x-small'
num_display_imgs = 7
titles = ['happy women', 'happy woman (avg)', 'neutral women', 'neutral woman (avg)', 'neutral men', 'neutral man (avg)', 'result - happy man']
ax = np.zeros(num_display_imgs, dtype=object)
assert len(imgs_to_display) == num_display_imgs, f'Expected {num_display_imgs} got {len(imgs_to_display)} images.'
gs = fig.add_gridspec(5, 4, left=0.02, right=0.98, wspace=0.05, hspace=0.3)
ax[0] = fig.add_subplot(gs[0, :3])
ax[1] = fig.add_subplot(gs[0, 3])
ax[2] = fig.add_subplot(gs[1, :3])
ax[3] = fig.add_subplot(gs[1, 3])
ax[4] = fig.add_subplot(gs[2, :3])
ax[5] = fig.add_subplot(gs[2, 3])
ax[6] = fig.add_subplot(gs[3:, 1:3])
for i in range(num_display_imgs):
ax[i].imshow(cv.resize(imgs_to_display[i], (0, 0), fx=3, fy=3, interpolation=cv.INTER_NEAREST))
ax[i].set_title(titles[i], fontsize=title_fontsize)
ax[i].tick_params(which='both', bottom=False, left=False, labelleft=False, labelbottom=False)
plt.show()
def generate_new_images(model_name, cgan_digit=None, generation_mode=True, slerp=True, a=None, b=None, should_display=True):
""" Generate imagery using pre-trained generator (using vanilla_generator_000000.pth by default)
Args:
model_name (str): model name you want to use (default lookup location is BINARIES_PATH).
cgan_digit (int): if specified generate that exact digit.
generation_mode (enum): generate a single image from a random vector, interpolate between the 2 chosen latent
vectors, or perform arithmetic over latent vectors (note: not every mode is supported for every model type)
slerp (bool): if True use spherical interpolation otherwise use linear interpolation.
a, b (numpy arrays): latent vectors, if set to None you'll be prompted to choose images you like,
and use corresponding latent vectors instead.
should_display (bool): Display the generated images before saving them.
"""
model_path = os.path.join(BINARIES_PATH, model_name)
assert os.path.exists(model_path), f'Could not find the model {model_path}. You first need to train your generator.'
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# Prepare the correct (vanilla, cGAN, DCGAN, ...) model, load the weights and put the model into evaluation mode
model_state = torch.load(model_path)
gan_type = model_state["gan_type"]
print(f'Found {gan_type} GAN!')
_, generator = utils.get_gan(device, gan_type)
generator.load_state_dict(model_state["state_dict"], strict=True)
generator.eval()
# Generate a single image, save it and potentially display it
if generation_mode == GenerationMode.SINGLE_IMAGE:
generated_imgs_path = os.path.join(DATA_DIR_PATH, 'generated_imagery')
os.makedirs(generated_imgs_path, exist_ok=True)
generated_img, _ = generate_from_random_latent_vector(generator, cgan_digit if gan_type == GANType.CGAN.name else None)
utils.save_and_maybe_display_image(generated_imgs_path, generated_img, should_display=should_display)
# Pick 2 images you like between which you'd like to interpolate (by typing 'y' into console)
elif generation_mode == GenerationMode.INTERPOLATION:
assert gan_type == GANType.VANILLA.name or gan_type ==GANType.DCGAN.name, f'Got {gan_type} but only VANILLA/DCGAN are supported for the interpolation mode.'
interpolation_name = "spherical" if slerp else "linear"
interpolation_fn = spherical_interpolation if slerp else linear_interpolation
grid_interpolated_imgs_path = os.path.join(DATA_DIR_PATH, 'interpolated_imagery') # combined results dir
decomposed_interpolated_imgs_path = os.path.join(grid_interpolated_imgs_path, f'tmp_{gan_type}_{interpolation_name}_dump') # dump separate results
if os.path.exists(decomposed_interpolated_imgs_path):
shutil.rmtree(decomposed_interpolated_imgs_path)
os.makedirs(grid_interpolated_imgs_path, exist_ok=True)
os.makedirs(decomposed_interpolated_imgs_path, exist_ok=True)
latent_vector_a, latent_vector_b = [None, None]
# If a and b were not specified loop until the user picked the 2 images he/she likes.
found_good_vectors_flag = False
if a is None or b is None:
while not found_good_vectors_flag:
generated_img, latent_vector = generate_from_random_latent_vector(generator)
plt.imshow(generated_img); plt.title('Do you like this image?'); plt.show()
user_input = input("Do you like this generated image? [y for yes]:")
if user_input == 'y':
if latent_vector_a is None:
latent_vector_a = latent_vector
print('Saved the first latent vector.')
elif latent_vector_b is None:
latent_vector_b = latent_vector
print('Saved the second latent vector.')
found_good_vectors_flag = True
else:
print('Well lets generate a new one!')
continue
else:
print('Skipping latent vectors selection section and using cached ones.')
latent_vector_a, latent_vector_b = [a, b]
# Cache latent vectors
if a is None or b is None:
np.save(os.path.join(grid_interpolated_imgs_path, 'a.npy'), latent_vector_a)
np.save(os.path.join(grid_interpolated_imgs_path, 'b.npy'), latent_vector_b)
print(f'Lets do some {interpolation_name} interpolation!')
interpolation_resolution = 47 # number of images between the vectors a and b
num_interpolated_imgs = interpolation_resolution + 2 # + 2 so that we include a and b
generated_imgs = []
for i in range(num_interpolated_imgs):
t = i / (num_interpolated_imgs - 1) # goes from 0. to 1.
current_latent_vector = interpolation_fn(t, latent_vector_a, latent_vector_b)
generated_img = generate_from_specified_numpy_latent_vector(generator, current_latent_vector)
print(f'Generated image [{i+1}/{num_interpolated_imgs}].')
utils.save_and_maybe_display_image(decomposed_interpolated_imgs_path, generated_img, should_display=should_display)
# Move from channel last to channel first (CHW->HWC), PyTorch's save_image function expects BCHW format
generated_imgs.append(torch.tensor(np.moveaxis(generated_img, 2, 0)))
interpolated_block_img = torch.stack(generated_imgs)
interpolated_block_img = nn.Upsample(scale_factor=2.5, mode='nearest')(interpolated_block_img)
save_image(interpolated_block_img, os.path.join(grid_interpolated_imgs_path, utils.get_available_file_name(grid_interpolated_imgs_path)), nrow=int(np.sqrt(num_interpolated_imgs)))
elif generation_mode == GenerationMode.VECTOR_ARITHMETIC:
assert gan_type == GANType.DCGAN.name, f'Got {gan_type} but only DCGAN is supported for arithmetic mode.'
# Generate num_options face images and create a grid image from them
num_options = 100
generated_imgs = []
latent_vectors = []
padding = 2
for i in range(num_options):
generated_img, latent_vector = generate_from_random_latent_vector(generator)
generated_imgs.append(torch.tensor(np.moveaxis(generated_img, 2, 0))) # make_grid expects CHW format
latent_vectors.append(latent_vector)
stacked_tensor_imgs = torch.stack(generated_imgs)
final_tensor_img = make_grid(stacked_tensor_imgs, nrow=int(np.sqrt(num_options)), padding=padding)
display_img = np.moveaxis(final_tensor_img.numpy(), 0, 2)
# For storing latent vectors
num_of_vectors_per_category = 3
happy_woman_latent_vectors = []
neutral_woman_latent_vectors = []
neutral_man_latent_vectors = []
# Make it easy - by clicking on the plot you pick the image.
def onclick(event):
if event.dblclick:
pass
else: # single click
if event.button == 1: # left click
x_coord = event.xdata
y_coord = event.ydata
column = int(x_coord / (64 + padding))
row = int(y_coord / (64 + padding))
# Store latent vector corresponding to the image that the user clicked on.
if len(happy_woman_latent_vectors) < num_of_vectors_per_category:
happy_woman_latent_vectors.append(latent_vectors[10*row + column])
print(f'Picked image row={row}, column={column} as {len(happy_woman_latent_vectors)}. happy woman.')
elif len(neutral_woman_latent_vectors) < num_of_vectors_per_category:
neutral_woman_latent_vectors.append(latent_vectors[10*row + column])
print(f'Picked image row={row}, column={column} as {len(neutral_woman_latent_vectors)}. neutral woman.')
elif len(neutral_man_latent_vectors) < num_of_vectors_per_category:
neutral_man_latent_vectors.append(latent_vectors[10*row + column])
print(f'Picked image row={row}, column={column} as {len(neutral_man_latent_vectors)}. neutral man.')
else:
plt.close()
plt.figure(figsize=(10, 10))
plt.imshow(display_img)
# This is just an example you could also pick 3 neutral woman images with sunglasses, etc.
plt.title('Click on 3 happy women, 3 neutral women and \n 3 neutral men images (order matters!)')
cid = plt.gcf().canvas.mpl_connect('button_press_event', onclick)
plt.show()
plt.gcf().canvas.mpl_disconnect(cid)
print('Done choosing images.')
# Calculate the average latent vector for every category (happy woman, neutral woman, neutral man)
happy_woman_avg_latent_vector = np.mean(np.array(happy_woman_latent_vectors), axis=0)
neutral_woman_avg_latent_vector = np.mean(np.array(neutral_woman_latent_vectors), axis=0)
neutral_man_avg_latent_vector = np.mean(np.array(neutral_man_latent_vectors), axis=0)
# By subtracting neutral woman from the happy woman we capture the "vector of smiling". Adding that vector
# to a neutral man we get a happy man's latent vector! Our latent space has amazingly beautiful structure!
happy_man_latent_vector = neutral_man_avg_latent_vector + (happy_woman_avg_latent_vector - neutral_woman_avg_latent_vector)
# Generate images from these latent vectors
happy_women_imgs = np.hstack([generate_from_specified_numpy_latent_vector(generator, v) for v in happy_woman_latent_vectors])
neutral_women_imgs = np.hstack([generate_from_specified_numpy_latent_vector(generator, v) for v in neutral_woman_latent_vectors])
neutral_men_imgs = np.hstack([generate_from_specified_numpy_latent_vector(generator, v) for v in neutral_man_latent_vectors])
happy_woman_avg_img = generate_from_specified_numpy_latent_vector(generator, happy_woman_avg_latent_vector)
neutral_woman_avg_img = generate_from_specified_numpy_latent_vector(generator, neutral_woman_avg_latent_vector)
neutral_man_avg_img = generate_from_specified_numpy_latent_vector(generator, neutral_man_avg_latent_vector)
happy_man_img = generate_from_specified_numpy_latent_vector(generator, happy_man_latent_vector)
display_vector_arithmetic_results([happy_women_imgs, happy_woman_avg_img, neutral_women_imgs, neutral_woman_avg_img, neutral_men_imgs, neutral_man_avg_img, happy_man_img])
else:
raise Exception(f'Generation mode not yet supported.')
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--model_name", type=str, help="Pre-trained generator model name", default=r'VANILLA_000000.pth')
parser.add_argument("--cgan_digit", type=int, help="Used only for cGAN - generate specified digit", default=3)
parser.add_argument("--generation_mode", type=bool, help="Pick between 3 generation modes", default=GenerationMode.SINGLE_IMAGE)
parser.add_argument("--slerp", type=bool, help="Should use spherical interpolation (default No)", default=False)
parser.add_argument("--should_display", type=bool, help="Display intermediate results", default=True)
args = parser.parse_args()
# The first time you start generation in the interpolation mode it will cache a and b
# which you'll choose the first time you run the it.
a_path = os.path.join(DATA_DIR_PATH, 'interpolated_imagery', 'a.npy')
b_path = os.path.join(DATA_DIR_PATH, 'interpolated_imagery', 'b.npy')
latent_vector_a = np.load(a_path) if os.path.exists(a_path) else None
latent_vector_b = np.load(b_path) if os.path.exists(b_path) else None
generate_new_images(
args.model_name,
args.cgan_digit,
generation_mode=args.generation_mode,
slerp=args.slerp,
a=latent_vector_a,
b=latent_vector_b,
should_display=args.should_display)
|
create_swag/lm/load_data.py | gauravkmr/swagaf | 182 | 33813 | <reponame>gauravkmr/swagaf<gh_stars>100-1000
# First make the vocabulary, etc.
import os
import pickle as pkl
import random
import simplejson as json
from allennlp.common.util import get_spacy_model
from allennlp.data import Instance
from allennlp.data import Token
from allennlp.data import Vocabulary
from allennlp.data.dataset import Batch
from allennlp.data.fields import TextField
from allennlp.data.token_indexers import SingleIdTokenIndexer
from allennlp.data.token_indexers.elmo_indexer import ELMoTokenCharactersIndexer
from torch.utils.data import Dataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
from raw_data.events import DATA_PATH
from pytorch_misc import pairwise
from create_swag.lm.config import NUM_FOLDS
def load_lm_data(fold=None, mode='train'):
"""
Turns the sequential data into instances.
:param split:
:return:
"""
# Get or make vocab
spacy_model = get_spacy_model("en_core_web_sm", pos_tags=False, parse=False, ner=False)
if os.path.exists('vocabulary'):
print("Loading cached vocab. caution if you're building the dataset again!!!!", flush=True)
vocab = Vocabulary.from_files('vocabulary')
with open(os.path.join(DATA_PATH, 'events-3.json'), 'r') as f:
lm_data = json.load(f)
lm_data = [data_item for s in ('train', 'val', 'test') for data_item in lm_data[s]]
else:
assert fold is None
with open(os.path.join(DATA_PATH, 'events-3.json'), 'r') as f:
lm_data = json.load(f)
lm_data = [data_item for s in ('train', 'val', 'test') for data_item in lm_data[s]]
# Manually doing this because I don't want to double count things
vocab = Vocabulary.from_instances(
[Instance({'story': TextField(
[Token(x) for x in ['@@bos@@'] + [x.orth_ for x in spacy_model(sent)] + ['@@eos@@']], token_indexers={
'tokens': SingleIdTokenIndexer(namespace='tokens', lowercase_tokens=True)})}) for data_item in
lm_data for sent in
data_item['sentences']], min_count={'tokens': 3})
vocab.get_index_to_token_vocabulary('tokens')
vocab.save_to_files('vocabulary')
print("VOCABULARY HAS {} ITEMS".format(vocab.get_vocab_size(namespace='tokens')))
if all([os.path.exists('lm-{}-of-{}.pkl'.format(i, NUM_FOLDS)) for i in range(NUM_FOLDS)]):
print("LOADING CACHED DATASET", flush=True)
if mode == 'val':
with open('lm-{}-of-{}.pkl'.format(fold, NUM_FOLDS), 'rb') as f:
print("Loading split{} for {}".format(fold, mode))
instances = pkl.load(f)
else:
instances = []
for other_fold in range(NUM_FOLDS):
if other_fold != fold:
with open('lm-{}-of-{}.pkl'.format(other_fold, NUM_FOLDS), 'rb') as f:
print("Loading split{} for {}".format(other_fold, mode))
instances += pkl.load(f)
return instances, vocab
print("MAKING THE DATASET", flush=True)
assert fold is None
for item in tqdm(lm_data):
item['sentences_tokenized'] = [[st.orth_ for st in spacy_model(sent)] for sent in item['sentences']]
def _to_instances(data):
# flatten this
instances = []
for item in data:
for s1, s2 in pairwise(item['sentences_tokenized']):
instances.append((
Instance({'story': TextField([Token(x) for x in ['@@bos@@'] + s1 + s2 + ['@@eos@@']],
token_indexers={
'tokens': SingleIdTokenIndexer(namespace='tokens',
lowercase_tokens=True)})}),
s1,
s2,
item,
))
return instances
random.seed(123456)
random.shuffle(lm_data)
all_sets = []
for fold_ in range(NUM_FOLDS):
val_set = _to_instances(lm_data[len(lm_data) * fold_ // NUM_FOLDS:len(lm_data) * (fold_ + 1) // NUM_FOLDS])
with open('lm-{}-of-{}.pkl'.format(fold_, NUM_FOLDS), 'wb') as f:
pkl.dump(val_set, f)
all_sets.extend(val_set)
return all_sets, vocab
class RawPassages(Dataset):
def __init__(self, fold, mode):
self.mode = mode
self.fold = fold
self.instances, self.vocab = load_lm_data(fold=self.fold, mode=self.mode)
self.dataloader = DataLoader(dataset=self, batch_size=32,
shuffle=self.mode == 'train', num_workers=0,
collate_fn=self.collate, drop_last=self.mode == 'train')
self.indexer = ELMoTokenCharactersIndexer()
def collate(self, instances_l):
batch = Batch([x[0] for x in instances_l])
batch.index_instances(self.vocab)
batch_dict = {k: v['tokens'] for k, v in batch.as_tensor_dict().items()}
batch_dict['story_tokens'] = [instance[0].fields['story'].tokens for instance in instances_l]
batch_dict['story_full'] = [x[1] + x[2] for x in instances_l]
batch_dict['items'] = [x[3] for x in instances_l]
return batch_dict
def __len__(self):
return len(self.instances)
def __getitem__(self, index):
"""
:param index:
:return: * raw rocstories
* entities
* entity IDs + sentences
* Instance. to print use r3.fields['verb_phrase'].field_list[5].tokens
"""
return self.instances[index]
@classmethod
def splits(cls, fold):
return cls(fold, mode='train'), cls(fold, mode='val')
if __name__ == '__main__':
instances, vocab = load_lm_data()
# train, val = RawPassages.splits()
# for item in train.dataloader:
# for story in item['story_tokens']:
# tok_text = [x.text.lower() for x in story]
# remapped_text = [vocab.get_token_from_index(vocab.get_token_index(x)) for x in tok_text]
# print('({}) {} -> {}'.format('D' if tok_text != remapped_text else ' ',
# ' '.join(tok_text), ' '.join(remapped_text)), flush=True)
|
sound_play/scripts/test/test_sound_client.py | iory/audio_common | 742 | 33819 | <filename>sound_play/scripts/test/test_sound_client.py
#!/usr/bin/env python
import unittest
import rospy
import rostest
from sound_play.libsoundplay import SoundClient
class TestCase(unittest.TestCase):
def test_soundclient_constructor(self):
s = SoundClient()
self.assertIsNotNone(s)
if __name__ == '__main__':
rostest.rosrun('sound_play', 'test_sound_client', TestCase)
__author__ = '<NAME>'
|
rdkit/DataStructs/UnitTestBitEnsemble.py | kazuyaujihara/rdkit | 1,609 | 33837 | <gh_stars>1000+
# $Id$
#
# Copyright (C) 2003-2006 <NAME> and Rational Discovery LLC
#
# @@ All Rights Reserved @@
# This file is part of the RDKit.
# The contents are covered by the terms of the BSD license
# which is included in the file license.txt, found at the root
# of the RDKit source tree.
#
""" unit testing code for BitEnsembles
"""
import os
import shutil
import tempfile
import unittest
from rdkit import RDConfig
from rdkit.DataStructs import SparseBitVect
# This import is important to initialize the BitEnsemble module
from rdkit.DataStructs import BitEnsembleDb
from rdkit.DataStructs.BitEnsemble import BitEnsemble
class TestCase(unittest.TestCase):
def test1(self):
ensemble = BitEnsemble()
ensemble.SetBits([1, 11, 21, 31])
self.assertEqual(ensemble.GetNumBits(), 4)
bv = SparseBitVect(100)
bv.SetBit(1)
bv.SetBit(11)
bv.SetBit(13)
score = ensemble.ScoreWithOnBits(bv)
assert score == 2, 'bad score: %d' % (score)
score = ensemble.ScoreWithIndex(bv)
assert score == 2, 'bad score: %d' % (score)
def test2(self):
ensemble = BitEnsemble([1, 11, 21, 31])
bv = SparseBitVect(100)
bv.SetBit(1)
bv.SetBit(11)
bv.SetBit(13)
score = ensemble.ScoreWithOnBits(bv)
assert score == 2, 'bad score: %d' % (score)
score = ensemble.ScoreWithIndex(bv)
assert score == 2, 'bad score: %d' % (score)
def test3(self):
ensemble = BitEnsemble()
for bit in [1, 11, 21, 31]:
ensemble.AddBit(bit)
bv = SparseBitVect(100)
bv.SetBit(1)
bv.SetBit(11)
bv.SetBit(13)
score = ensemble.ScoreWithOnBits(bv)
assert score == 2, 'bad score: %d' % (score)
score = ensemble.ScoreWithIndex(bv)
assert score == 2, 'bad score: %d' % (score)
def _setupDb(self):
from rdkit.Dbase.DbConnection import DbConnect
fName = RDConfig.RDTestDatabase
if RDConfig.useSqlLite:
_, tempName = tempfile.mkstemp(suffix='sqlt')
self.tempDbName = tempName
shutil.copyfile(fName, tempName)
else: # pragma: nocover
tempName = '::RDTests'
self.conn = DbConnect(tempName)
self.dbTblName = 'bit_ensemble_test'
return self.conn
def tearDown(self):
if hasattr(self, 'tempDbName') and RDConfig.useSqlLite and os.path.exists(self.tempDbName):
try:
os.unlink(self.tempDbName)
except: # pragma: nocover
import traceback
traceback.print_exc()
def testdb1(self):
""" test the sig - db functionality """
conn = self._setupDb()
ensemble = BitEnsemble()
for bit in [1, 3, 4]:
ensemble.AddBit(bit)
sigBs = [([0, 0, 0, 0, 0, 0], (0, 0, 0)),
([0, 1, 0, 1, 0, 0], (1, 1, 0)),
([0, 1, 0, 0, 1, 0], (1, 0, 1)),
([0, 1, 0, 0, 1, 1], (1, 0, 1)), ]
ensemble.InitScoreTable(conn, self.dbTblName)
for bs, tgt in sigBs:
ensemble.ScoreToDb(bs, conn)
conn.Commit()
d = conn.GetData(table=self.dbTblName)
assert len(d) == len(sigBs), 'bad number of results returned'
for i in range(len(sigBs)):
bs, tgt = tuple(sigBs[i])
dbRes = tuple(d[i])
assert dbRes == tgt, 'bad bits returned: %s != %s' % (str(dbRes), str(tgt))
d = None
self.conn = None
def testdb2(self):
""" test the sig - db functionality """
conn = self._setupDb()
ensemble = BitEnsemble()
for bit in [1, 3, 4]:
ensemble.AddBit(bit)
sigBs = [([0, 0, 0, 0, 0, 0], (0, 0, 0)),
([0, 1, 0, 1, 0, 0], (1, 1, 0)),
([0, 1, 0, 0, 1, 0], (1, 0, 1)),
([0, 1, 0, 0, 1, 1], (1, 0, 1)), ]
ensemble.InitScoreTable(conn, self.dbTblName, idInfo='id varchar(10)', actInfo='act int')
for bs, tgt in sigBs:
ensemble.ScoreToDb(bs, conn, id='foo', act=1)
conn.Commit()
d = conn.GetData(table=self.dbTblName)
assert len(d) == len(sigBs), 'bad number of results returned'
for i in range(len(sigBs)):
bs, tgt = tuple(sigBs[i])
dbRes = tuple(d[i])
assert dbRes[1:-1] == tgt, 'bad bits returned: %s != %s' % (str(dbRes[1:-1]), str(tgt))
d = None
self.conn = None
if __name__ == '__main__': # pragma: nocover
unittest.main()
|
survae/tests/transforms/bijections/conditional/coupling/coupling_mixtures.py | alisiahkoohi/survae_flows | 262 | 33857 | <filename>survae/tests/transforms/bijections/conditional/coupling/coupling_mixtures.py
import numpy as np
import torch
import torch.nn as nn
import torchtestcase
import unittest
from survae.transforms.bijections.conditional.coupling import *
from survae.nn.layers import ElementwiseParams, ElementwiseParams2d, scale_fn
from survae.tests.transforms.bijections.conditional import ConditionalBijectionTest
class ConditionalGaussianMixtureCouplingBijectionTest(ConditionalBijectionTest):
def test_bijection_is_well_behaved(self):
num_mix = 8
batch_size = 10
elementwise_params = 3 * num_mix
self.eps = 5e-5
for shape in [(6,),
(6,4,4)]:
for num_condition in [None, 1]:
with self.subTest(shape=shape, num_condition=num_condition):
x = torch.randn(batch_size, *shape)
context = torch.randn(batch_size, *shape)
if num_condition is None:
if len(shape) == 1: net = nn.Sequential(nn.Linear(3+6,3*elementwise_params), ElementwiseParams(elementwise_params))
if len(shape) == 3: net = nn.Sequential(nn.Conv2d(3+6,3*elementwise_params, kernel_size=3, padding=1), ElementwiseParams2d(elementwise_params))
else:
if len(shape) == 1: net = nn.Sequential(nn.Linear(1+6,5*elementwise_params), ElementwiseParams(elementwise_params))
if len(shape) == 3: net = nn.Sequential(nn.Conv2d(1+6,5*elementwise_params, kernel_size=3, padding=1), ElementwiseParams2d(elementwise_params))
bijection = ConditionalGaussianMixtureCouplingBijection(net, num_mixtures=num_mix, num_condition=num_condition)
self.assert_bijection_is_well_behaved(bijection, x, context, z_shape=(batch_size, *shape))
z, _ = bijection.forward(x, context=context)
if num_condition is None:
self.assertEqual(x[:,:3], z[:,:3])
else:
self.assertEqual(x[:,:1], z[:,:1])
class ConditionalLogisticMixtureCouplingBijectionTest(ConditionalBijectionTest):
def test_bijection_is_well_behaved(self):
num_mix = 8
batch_size = 10
elementwise_params = 3 * num_mix
self.eps = 5e-5
for shape in [(6,),
(6,4,4)]:
for num_condition in [None, 1]:
with self.subTest(shape=shape, num_condition=num_condition):
x = torch.randn(batch_size, *shape)
context = torch.randn(batch_size, *shape)
if num_condition is None:
if len(shape) == 1: net = nn.Sequential(nn.Linear(3+6,3*elementwise_params), ElementwiseParams(elementwise_params))
if len(shape) == 3: net = nn.Sequential(nn.Conv2d(3+6,3*elementwise_params, kernel_size=3, padding=1), ElementwiseParams2d(elementwise_params))
else:
if len(shape) == 1: net = nn.Sequential(nn.Linear(1+6,5*elementwise_params), ElementwiseParams(elementwise_params))
if len(shape) == 3: net = nn.Sequential(nn.Conv2d(1+6,5*elementwise_params, kernel_size=3, padding=1), ElementwiseParams2d(elementwise_params))
bijection = ConditionalLogisticMixtureCouplingBijection(net, num_mixtures=num_mix, num_condition=num_condition)
self.assert_bijection_is_well_behaved(bijection, x, context, z_shape=(batch_size, *shape))
z, _ = bijection.forward(x, context=context)
if num_condition is None:
self.assertEqual(x[:,:3], z[:,:3])
else:
self.assertEqual(x[:,:1], z[:,:1])
class ConditionalCensoredLogisticMixtureCouplingBijectionTest(ConditionalBijectionTest):
def test_bijection_is_well_behaved(self):
num_bins = 16
num_mix = 8
batch_size = 10
elementwise_params = 3 * num_mix
self.eps = 1e-6
for shape in [(6,),
(6,4,4)]:
for num_condition in [None, 1]:
with self.subTest(shape=shape, num_condition=num_condition):
x = torch.rand(batch_size, *shape)
context = torch.randn(batch_size, *shape)
if num_condition is None:
if len(shape) == 1: net = nn.Sequential(nn.Linear(3+6,3*elementwise_params), ElementwiseParams(elementwise_params))
if len(shape) == 3: net = nn.Sequential(nn.Conv2d(3+6,3*elementwise_params, kernel_size=3, padding=1), ElementwiseParams2d(elementwise_params))
else:
if len(shape) == 1: net = nn.Sequential(nn.Linear(1+6,5*elementwise_params), ElementwiseParams(elementwise_params))
if len(shape) == 3: net = nn.Sequential(nn.Conv2d(1+6,5*elementwise_params, kernel_size=3, padding=1), ElementwiseParams2d(elementwise_params))
bijection = ConditionalCensoredLogisticMixtureCouplingBijection(net, num_mixtures=num_mix, num_bins=num_bins, num_condition=num_condition)
self.assert_bijection_is_well_behaved(bijection, x, context, z_shape=(batch_size, *shape))
z, _ = bijection.forward(x, context=context)
if num_condition is None:
self.assertEqual(x[:,:3], z[:,:3])
else:
self.assertEqual(x[:,:1], z[:,:1])
if __name__ == '__main__':
unittest.main()
|
utils/generate-sha256.py | dskrvk/anteater | 177 | 33869 | ##############################################################################
# Copyright (c) 2017 <NAME> <<EMAIL>>, Red Hat
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
# python generate-sha256.py --project /home/user/opnfv/infra
# output made to working directory, file `output.yaml`
import os
import sys
import hashlib
import argparse
from binaryornot.check import is_binary
hasher = hashlib.sha256()
parser = argparse.ArgumentParser()
parser.add_argument('--project', help="Full path to project folder",
required=True)
args = parser.parse_args()
ignore_dirs = ['.git']
sys.stdout = open('output.yaml', 'w')
print("binaries:")
for root, dirs, files in os.walk(args.project):
dirs[:] = [d for d in dirs if d not in ignore_dirs]
for file in files:
full_path = os.path.join(root, file)
if is_binary(full_path):
with open(full_path, 'rb') as afile:
buf = afile.read()
hasher.update(buf)
split_path = full_path.split(args.project + '/', 1)[-1]
print(" {}:".format(split_path))
sum = hasher.hexdigest()
print(" - {}".format(sum))
|
cli4/__main__.py | pygrigori/python-cloudflare | 465 | 33875 | #!/usr/bin/env python
"""Cloudflare API via command line"""
from __future__ import absolute_import
import sys
from .cli4 import cli4
def main(args=None):
"""Cloudflare API via command line"""
if args is None:
args = sys.argv[1:]
cli4(args)
if __name__ == '__main__':
main()
|
uq360/algorithms/blackbox_metamodel/metamodel_regression.py | Sclare87/UQ360 | 148 | 33877 | import inspect
from collections import namedtuple
import numpy as np
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.model_selection import train_test_split
from sklearn.exceptions import NotFittedError
from uq360.algorithms.posthocuq import PostHocUQ
class MetamodelRegression(PostHocUQ):
""" Extracts confidence scores from black-box regression models using a meta-model [2]_ .
References:
.. [2] Chen, Tongfei, et al. Confidence scoring using whitebox meta-models with linear classifier probes.
The 22nd International Conference on Artificial Intelligence and Statistics. PMLR, 2019.
"""
def _create_named_model(self, mdltype, config):
"""
Instantiates a model by name passed in 'mdltype'
:param mdltype: string with name (must be supprted)
:param config: dict with args passed in the instantiation call
:return: mdl instance
"""
assert (isinstance(mdltype, str))
if mdltype == 'gbr':
mdl = GradientBoostingRegressor(**config)
else:
raise NotImplementedError("ERROR: Requested model type unknown: \"%s\"" % mdltype)
return mdl
def _get_model_instance(self, model, config):
"""
Returns an instance of a model based on (a) a desired name or (b) passed in class, or
(c) passed in instance
:param model: string, class, or instance. Class and instance must have certain methods callable.
:param config: dict with args passed in during the instantiation
:return: model instance
"""
assert (model is not None and config is not None)
if isinstance(model, str): # 'model' is a name, create it
mdl = self._create_named_model(model, config)
elif inspect.isclass(model): # 'model' is a class, instantiate it
mdl = model(**config)
else: # 'model' is an instance, register it
mdl = model
if not all([hasattr(mdl, key) and callable(getattr(mdl, key)) for key in self.callable_keys]):
raise ValueError("ERROR: Passed model/method failed the interface test. Methods required: %s" %
','.join(self.callable_keys))
return mdl
def __init__(self, base_model=None, meta_model=None, base_config=None, meta_config=None, random_seed=42):
"""
:param base_model: Base model. Can be:
(1) None (default mdl will be set up),
(2) Named model (e.g., 'gbr'),
(3) Base model class declaration (e.g., sklearn.linear_model.LinearRegressor). Will instantiate.
(4) Model instance (instantiated outside). Will be re-used. Must have required callable methods.
Note: user-supplied classes and models must have certain callable methods ('predict', 'fit')
and be capable of raising NotFittedError.
:param meta_model: Meta model. Same values possible as with 'base_model'
:param base_config: None or a params dict to be passed to 'base_model' at instantiation
:param meta_config: None or a params dict to be passed to 'meta_model' at instantiation
:param random_seed: seed used in the various pipeline steps
"""
super(MetamodelRegression).__init__()
self.random_seed = random_seed
self.callable_keys = ['predict', 'fit'] # required methods - must be present in models passed in
self.base_model_default = 'gbr'
self.meta_model_default = 'gbr'
self.base_config_default = {'loss': 'ls', 'n_estimators': 300, 'max_depth': 10, 'learning_rate': 0.001,
'min_samples_leaf': 10, 'min_samples_split': 10, 'random_state': self.random_seed}
self.meta_config_default = {'loss': 'quantile', 'alpha': 0.95, 'n_estimators': 300, 'max_depth': 10,
'learning_rate': 0.001, 'min_samples_leaf': 10, 'min_samples_split': 10,
'random_state': self.random_seed}
self.base_config = base_config if base_config is not None else self.base_config_default
self.meta_config = meta_config if meta_config is not None else self.meta_config_default
self.base_model = None
self.meta_model = None
self.base_model = self._get_model_instance(base_model if base_model is not None else self.base_model_default,
self.base_config)
self.meta_model = self._get_model_instance(meta_model if meta_model is not None else self.meta_model_default,
self.meta_config)
def get_params(self, deep=True):
return {"base_model": self.base_model, "meta_model": self.meta_model, "base_config": self.base_config,
"meta_config": self.meta_config, "random_seed": self.random_seed}
def fit(self, X, y, meta_fraction=0.2, randomize_samples=True, base_is_prefitted=False,
meta_train_data=(None, None)):
"""
Fit base and meta models.
:param X: input to the base model
:param y: ground truth for the base model
:param meta_fraction: float in [0,1] - a fractional size of the partition carved out to train the meta model
(complement will be used to train the base model)
:param randomize_samples: use shuffling when creating partitions
:param base_is_prefitted: Setting True will skip fitting the base model (useful for base models that have been
instantiated outside/by the user and are already fitted.
:param meta_train_data: User supplied data to train the meta model. Note that this option should only be used
with 'base_is_prefitted'==True. Pass a tuple meta_train_data=(X_meta, y_meta) to activate.
Note that (X,y,meta_fraction, randomize_samples) will be ignored in this mode.
:return: self
"""
X = np.asarray(X)
y = np.asarray(y)
assert(len(meta_train_data)==2)
if meta_train_data[0] is None:
X_base, X_meta, y_base, y_meta = train_test_split(X, y, shuffle=randomize_samples, test_size=meta_fraction,
random_state=self.random_seed)
else:
if not base_is_prefitted:
raise ValueError("ERROR: fit(): base model must be pre-fitted to use the 'meta_train_data' option")
X_base = y_base = None
X_meta = meta_train_data[0]
y_meta = meta_train_data[1]
# fit the base model
if not base_is_prefitted:
self.base_model.fit(X_base, y_base)
# get input for the meta model from the base
try:
y_hat_meta = self.base_model.predict(X_meta)
except NotFittedError as e:
raise RuntimeError("ERROR: fit(): The base model appears not pre-fitted (%s)" % repr(e))
# used base input and output as meta input
X_meta_in = self._process_pretrained_model(X_meta, y_hat_meta)
# train meta model to predict abs diff
self.meta_model.fit(X_meta_in, np.abs(y_hat_meta - y_meta))
return self
def _process_pretrained_model(self, X, y_hat):
"""
Given the original input features and the base output probabilities, generate input features
to train a meta model. Current implementation copies all input features and appends.
:param X: numpy [nsamples, dim]
:param y_hat: [nsamples,]
:return: array with new features [nsamples, newdim]
"""
y_hat_meta_prime = np.expand_dims(y_hat, -1) if len(y_hat.shape) < 2 else y_hat
X_meta_in = np.hstack([X, y_hat_meta_prime])
return X_meta_in
def predict(self, X):
"""
Generate prediction and uncertainty bounds for data X.
:param X: input features
:return: namedtuple: A namedtuple that holds
y_mean: ndarray of shape (n_samples, [n_output_dims])
Mean of predictive distribution of the test points.
y_lower: ndarray of shape (n_samples, [n_output_dims])
Lower quantile of predictive distribution of the test points.
y_upper: ndarray of shape (n_samples, [n_output_dims])
Upper quantile of predictive distribution of the test points.
"""
y_hat = self.base_model.predict(X)
y_hat_prime = np.expand_dims(y_hat, -1) if len(y_hat.shape) < 2 else y_hat
X_meta_in = np.hstack([X, y_hat_prime])
z_hat = self.meta_model.predict(X_meta_in)
Result = namedtuple('res', ['y_mean', 'y_lower', 'y_upper'])
res = Result(y_hat, y_hat - z_hat, y_hat + z_hat)
return res
|
es_sink/es_sink/flushing_buffer.py | avmi/community | 305 | 33884 | <reponame>avmi/community<gh_stars>100-1000
'''
Copyright 2020, Amazon Web Services Inc.
This code is licensed under MIT license (see LICENSE.txt for details)
Python 3
Provides a buffer object that holds log lines in Elasticsearch _bulk
format. As each line is added, the buffer stores the control line
as well as the log line.
Employs an line_buffer to hold log lines as they are added. Optionally
sends monitor information to an ES cluster. Set the flush_trigger to
control how many lines are buffered before each flush.
'''
import time
from es_sink.descriptor import ESDescriptor, SQSDescriptor
from es_sink.line_buffer import ESLineBuffer, SQSLineBuffer
from es_sink.es_transport import ESTransport
from es_sink.sqs_transport import SQSTransport
from es_sink.transport_exceptions import BadSink
class FlushingESBuffer():
'''Wraps an ESLineBuffer object to provide _bulk flushing when the
flush_trigger is hit.'''
def __init__(self, descriptor, flush_trigger=1):
''' target_descriptor must be an ESDescriptor'''
self.transport = ESTransport(descriptor)
self.target_descriptor = descriptor
self.flush_trigger = flush_trigger
self.buffer = ESLineBuffer(descriptor)
def add_log_line(self, log_line):
'''Add a single log line to the internal buffer. If the flush trigger
is hit, send the bulk request.'''
self.buffer.add_log_line(log_line)
if self.buffer.es_doc_count() >= self.flush_trigger:
self.flush() # swallows the result. Do something with it?
def flush(self):
'''Flushes the line_buffer, sending all to the _bulk API'''
if self.buffer.es_doc_count() > 0:
try:
url = self.target_descriptor.bulk_url()
print("Flushing {} documents {} to {}".format(
self.buffer.es_doc_count(),
time.time(),
url))
result = self.transport.send('post', url, body=str(self.buffer))
result = result._asdict()
result['docs'] = self.buffer.es_doc_count()
self.buffer.clear()
return result
except Exception as exc:
message = "Exception sending request '{}'"
print(message.format(str(exc)))
raise exc
return None
class FlushingSQSBuffer():
'''Use to send ES _bulk data to SQS in batches.'''
def __init__(self, descriptor, flush_trigger=1):
self.target_descriptor = descriptor
self.flush_trigger = flush_trigger
self.transport = SQSTransport(descriptor)
self.buffer = SQSLineBuffer()
def add_log_line(self, line):
'''Add a single log line to the internal buffer. If the flush trigger
is hit, send the bulk request.'''
self.buffer.add_log_line(line)
if self.buffer.es_doc_count() >= self.flush_trigger:
self.flush() # swallows the result. Do something with it?
def flush(self):
'''Flushes the line_buffer, sending all to the _bulk API'''
print("Flushing {} documents {}".format(self.buffer.es_doc_count(),
time.time()))
if self.buffer.es_doc_count() > 0:
result = self.transport.send(str(self.buffer))
result = result._asdict()
result['docs'] = self.buffer.es_doc_count()
self.buffer.clear()
print(result)
return result
return None
def flushing_buffer_factory(descriptor, flush_trigger=1):
'''Call with a descriptor to receive a buffer object.'''
if isinstance(descriptor, ESDescriptor):
return FlushingESBuffer(descriptor, flush_trigger)
if isinstance(descriptor, SQSDescriptor):
return FlushingSQSBuffer(descriptor, flush_trigger)
raise BadSink()
|
matchzoo/utils/early_stopping.py | ChrisRBXiong/MatchZoo-py | 468 | 33888 | <gh_stars>100-1000
"""Early stopping."""
import typing
import torch
import numpy as np
class EarlyStopping:
"""
EarlyStopping stops training if no improvement after a given patience.
:param patience: Number fo events to wait if no improvement and then
stop the training.
:param should_decrease: The way to judge the best so far.
:param key: Key of metric to be compared.
"""
def __init__(
self,
patience: typing.Optional[int] = None,
should_decrease: bool = None,
key: typing.Any = None
):
"""Early stopping Constructor."""
self._patience = patience
self._key = key
self._best_so_far = 0
self._epochs_with_no_improvement = 0
self._is_best_so_far = False
self._early_stop = False
def state_dict(self) -> typing.Dict[str, typing.Any]:
"""A `Trainer` can use this to serialize the state."""
return {
'patience': self._patience,
'best_so_far': self._best_so_far,
'is_best_so_far': self._is_best_so_far,
'epochs_with_no_improvement': self._epochs_with_no_improvement,
}
def load_state_dict(
self,
state_dict: typing.Dict[str, typing.Any]
) -> None:
"""Hydrate a early stopping from a serialized state."""
self._patience = state_dict["patience"]
self._is_best_so_far = state_dict["is_best_so_far"]
self._best_so_far = state_dict["best_so_far"]
self._epochs_with_no_improvement = \
state_dict["epochs_with_no_improvement"]
def update(self, result: list):
"""Call function."""
score = result[self._key]
if score > self._best_so_far:
self._best_so_far = score
self._is_best_so_far = True
self._epochs_with_no_improvement = 0
else:
self._is_best_so_far = False
self._epochs_with_no_improvement += 1
@property
def best_so_far(self) -> bool:
"""Returns best so far."""
return self._best_so_far
@property
def is_best_so_far(self) -> bool:
"""Returns true if it is the best so far."""
return self._is_best_so_far
@property
def should_stop_early(self) -> bool:
"""Returns true if improvement has stopped for long enough."""
if not self._patience:
return False
else:
return self._epochs_with_no_improvement >= self._patience
|
moderngl_window/timers/base.py | DavideRuzza/moderngl-window | 142 | 33900 | from typing import Tuple
class BaseTimer:
"""
A timer controls the time passed into the the render function.
This can be used in creative ways to control the current time
such as basing it on current location in an audio file.
All methods must be implemented.
"""
@property
def is_paused(self) -> bool:
"""bool: The pause state of the timer"""
raise NotImplementedError()
@property
def is_running(self) -> bool:
"""bool: Is the timer currently running?"""
raise NotImplementedError()
@property
def time(self) -> float:
"""Get or set the current time.
This can be used to jump around in the timeline.
Returns:
float: The current time in seconds
"""
raise NotImplementedError()
@time.setter
def time(self, value: float):
raise NotImplementedError()
def next_frame(self) -> Tuple[float, float]:
"""Get timer information for the next frame.
Returns:
Tuple[float, float]: The frametime and current time
"""
raise NotImplementedError()
def start(self):
"""Start the timer initially or resume after pause"""
raise NotImplementedError()
def pause(self):
"""Pause the timer"""
raise NotImplementedError()
def toggle_pause(self):
"""Toggle pause state"""
raise NotImplementedError()
def stop(self) -> Tuple[float, float]:
"""
Stop the timer. Should only be called once when stopping the timer.
Returns:
Tuple[float, float]> Current position in the timer, actual running duration
"""
raise NotImplementedError()
|
alipay/aop/api/domain/AlipayOverseasRemitFundInitializeModel.py | antopen/alipay-sdk-python-all | 213 | 33904 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class AlipayOverseasRemitFundInitializeModel(object):
def __init__(self):
self._bc_remit_id = None
self._compliance_mid = None
self._extend_info = None
self._quote_route_info = None
self._receiver_amount = None
self._receiver_currency = None
self._receiver_info = None
self._receiver_mid = None
self._remark = None
self._remit_purpose = None
self._send_date = None
self._sender_address = None
self._sender_amount = None
self._sender_currency = None
self._sender_id = None
self._sender_info = None
self._sender_mid = None
self._sender_nationality = None
self._trans_currency = None
@property
def bc_remit_id(self):
return self._bc_remit_id
@bc_remit_id.setter
def bc_remit_id(self, value):
self._bc_remit_id = value
@property
def compliance_mid(self):
return self._compliance_mid
@compliance_mid.setter
def compliance_mid(self, value):
self._compliance_mid = value
@property
def extend_info(self):
return self._extend_info
@extend_info.setter
def extend_info(self, value):
self._extend_info = value
@property
def quote_route_info(self):
return self._quote_route_info
@quote_route_info.setter
def quote_route_info(self, value):
self._quote_route_info = value
@property
def receiver_amount(self):
return self._receiver_amount
@receiver_amount.setter
def receiver_amount(self, value):
self._receiver_amount = value
@property
def receiver_currency(self):
return self._receiver_currency
@receiver_currency.setter
def receiver_currency(self, value):
self._receiver_currency = value
@property
def receiver_info(self):
return self._receiver_info
@receiver_info.setter
def receiver_info(self, value):
self._receiver_info = value
@property
def receiver_mid(self):
return self._receiver_mid
@receiver_mid.setter
def receiver_mid(self, value):
self._receiver_mid = value
@property
def remark(self):
return self._remark
@remark.setter
def remark(self, value):
self._remark = value
@property
def remit_purpose(self):
return self._remit_purpose
@remit_purpose.setter
def remit_purpose(self, value):
self._remit_purpose = value
@property
def send_date(self):
return self._send_date
@send_date.setter
def send_date(self, value):
self._send_date = value
@property
def sender_address(self):
return self._sender_address
@sender_address.setter
def sender_address(self, value):
self._sender_address = value
@property
def sender_amount(self):
return self._sender_amount
@sender_amount.setter
def sender_amount(self, value):
self._sender_amount = value
@property
def sender_currency(self):
return self._sender_currency
@sender_currency.setter
def sender_currency(self, value):
self._sender_currency = value
@property
def sender_id(self):
return self._sender_id
@sender_id.setter
def sender_id(self, value):
self._sender_id = value
@property
def sender_info(self):
return self._sender_info
@sender_info.setter
def sender_info(self, value):
self._sender_info = value
@property
def sender_mid(self):
return self._sender_mid
@sender_mid.setter
def sender_mid(self, value):
self._sender_mid = value
@property
def sender_nationality(self):
return self._sender_nationality
@sender_nationality.setter
def sender_nationality(self, value):
self._sender_nationality = value
@property
def trans_currency(self):
return self._trans_currency
@trans_currency.setter
def trans_currency(self, value):
self._trans_currency = value
def to_alipay_dict(self):
params = dict()
if self.bc_remit_id:
if hasattr(self.bc_remit_id, 'to_alipay_dict'):
params['bc_remit_id'] = self.bc_remit_id.to_alipay_dict()
else:
params['bc_remit_id'] = self.bc_remit_id
if self.compliance_mid:
if hasattr(self.compliance_mid, 'to_alipay_dict'):
params['compliance_mid'] = self.compliance_mid.to_alipay_dict()
else:
params['compliance_mid'] = self.compliance_mid
if self.extend_info:
if hasattr(self.extend_info, 'to_alipay_dict'):
params['extend_info'] = self.extend_info.to_alipay_dict()
else:
params['extend_info'] = self.extend_info
if self.quote_route_info:
if hasattr(self.quote_route_info, 'to_alipay_dict'):
params['quote_route_info'] = self.quote_route_info.to_alipay_dict()
else:
params['quote_route_info'] = self.quote_route_info
if self.receiver_amount:
if hasattr(self.receiver_amount, 'to_alipay_dict'):
params['receiver_amount'] = self.receiver_amount.to_alipay_dict()
else:
params['receiver_amount'] = self.receiver_amount
if self.receiver_currency:
if hasattr(self.receiver_currency, 'to_alipay_dict'):
params['receiver_currency'] = self.receiver_currency.to_alipay_dict()
else:
params['receiver_currency'] = self.receiver_currency
if self.receiver_info:
if hasattr(self.receiver_info, 'to_alipay_dict'):
params['receiver_info'] = self.receiver_info.to_alipay_dict()
else:
params['receiver_info'] = self.receiver_info
if self.receiver_mid:
if hasattr(self.receiver_mid, 'to_alipay_dict'):
params['receiver_mid'] = self.receiver_mid.to_alipay_dict()
else:
params['receiver_mid'] = self.receiver_mid
if self.remark:
if hasattr(self.remark, 'to_alipay_dict'):
params['remark'] = self.remark.to_alipay_dict()
else:
params['remark'] = self.remark
if self.remit_purpose:
if hasattr(self.remit_purpose, 'to_alipay_dict'):
params['remit_purpose'] = self.remit_purpose.to_alipay_dict()
else:
params['remit_purpose'] = self.remit_purpose
if self.send_date:
if hasattr(self.send_date, 'to_alipay_dict'):
params['send_date'] = self.send_date.to_alipay_dict()
else:
params['send_date'] = self.send_date
if self.sender_address:
if hasattr(self.sender_address, 'to_alipay_dict'):
params['sender_address'] = self.sender_address.to_alipay_dict()
else:
params['sender_address'] = self.sender_address
if self.sender_amount:
if hasattr(self.sender_amount, 'to_alipay_dict'):
params['sender_amount'] = self.sender_amount.to_alipay_dict()
else:
params['sender_amount'] = self.sender_amount
if self.sender_currency:
if hasattr(self.sender_currency, 'to_alipay_dict'):
params['sender_currency'] = self.sender_currency.to_alipay_dict()
else:
params['sender_currency'] = self.sender_currency
if self.sender_id:
if hasattr(self.sender_id, 'to_alipay_dict'):
params['sender_id'] = self.sender_id.to_alipay_dict()
else:
params['sender_id'] = self.sender_id
if self.sender_info:
if hasattr(self.sender_info, 'to_alipay_dict'):
params['sender_info'] = self.sender_info.to_alipay_dict()
else:
params['sender_info'] = self.sender_info
if self.sender_mid:
if hasattr(self.sender_mid, 'to_alipay_dict'):
params['sender_mid'] = self.sender_mid.to_alipay_dict()
else:
params['sender_mid'] = self.sender_mid
if self.sender_nationality:
if hasattr(self.sender_nationality, 'to_alipay_dict'):
params['sender_nationality'] = self.sender_nationality.to_alipay_dict()
else:
params['sender_nationality'] = self.sender_nationality
if self.trans_currency:
if hasattr(self.trans_currency, 'to_alipay_dict'):
params['trans_currency'] = self.trans_currency.to_alipay_dict()
else:
params['trans_currency'] = self.trans_currency
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayOverseasRemitFundInitializeModel()
if 'bc_remit_id' in d:
o.bc_remit_id = d['bc_remit_id']
if 'compliance_mid' in d:
o.compliance_mid = d['compliance_mid']
if 'extend_info' in d:
o.extend_info = d['extend_info']
if 'quote_route_info' in d:
o.quote_route_info = d['quote_route_info']
if 'receiver_amount' in d:
o.receiver_amount = d['receiver_amount']
if 'receiver_currency' in d:
o.receiver_currency = d['receiver_currency']
if 'receiver_info' in d:
o.receiver_info = d['receiver_info']
if 'receiver_mid' in d:
o.receiver_mid = d['receiver_mid']
if 'remark' in d:
o.remark = d['remark']
if 'remit_purpose' in d:
o.remit_purpose = d['remit_purpose']
if 'send_date' in d:
o.send_date = d['send_date']
if 'sender_address' in d:
o.sender_address = d['sender_address']
if 'sender_amount' in d:
o.sender_amount = d['sender_amount']
if 'sender_currency' in d:
o.sender_currency = d['sender_currency']
if 'sender_id' in d:
o.sender_id = d['sender_id']
if 'sender_info' in d:
o.sender_info = d['sender_info']
if 'sender_mid' in d:
o.sender_mid = d['sender_mid']
if 'sender_nationality' in d:
o.sender_nationality = d['sender_nationality']
if 'trans_currency' in d:
o.trans_currency = d['trans_currency']
return o
|
problems/41/problem_41.py | r1cc4rdo/daily_coding_problem | 158 | 33920 | def coding_problem_41(flights_db, starting_airport):
"""
Given an unordered list of flights taken by someone, each represented as (origin, destination) pairs, and a
starting airport, compute the person's itinerary. If no such itinerary exists, return null. If there are multiple
possible itineraries, return the lexicographically smallest one. All flights must be used in the itinerary.
Examples:
>>> coding_problem_41([('SFO', 'HKO'), ('YYZ', 'SFO'), ('YUL', 'YYZ'), ('HKO', 'ORD')], 'YUL')
['YUL', 'YYZ', 'SFO', 'HKO', 'ORD']
>>> coding_problem_41([('SFO', 'COM'), ('COM', 'YYZ')], 'COM') # returns None
>>> coding_problem_41([('A', 'B'), ('A', 'C'), ('B', 'C'), ('C', 'A')], 'A')
['A', 'B', 'C', 'A', 'C']
The itinerary ['A', 'C', 'A', 'B', 'C'] is also a valid however the first one is lexicographically smaller.
"""
pass
if __name__ == '__main__':
import doctest
doctest.testmod(verbose=True)
|
paddlepalm/reader/match.py | baajur/PALM | 136 | 33922 | <filename>paddlepalm/reader/match.py
# -*- coding: UTF-8 -*-
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from paddlepalm.reader.base_reader import Reader
from paddlepalm.reader.utils.reader4ernie import ClassifyReader as CLSReader
class MatchReader(Reader):
"""
The reader completes the loading and processing of matching-like task (e.g, query-query, question-answer, text similarity, natural language inference) dataset. Supported file format: tsv.
For pointwise learning strategy, there should be two fields in training dataset file, i.e., `text_a`, `text_b` and `label`. For pairwise learning, there should exist three fields, i.e., `text_a`, `text_b` and `text_b_neg`. For predicting, only `text_a` and `text_b` are required.
A pointwise learning case shows as follows:
```
label [TAB] text_a [TAB] text_b
1 [TAB] Today is a good day. [TAB] what a nice day!
0 [TAB] Such a terriable day! [TAB] There is a dog.
1 [TAB] I feel lucky to meet you, dear. [TAB] You are my lucky, darling.
1 [TAB] He likes sunshine and I like him :). [TAB] I like him. He like sunshine.
0 [TAB] JUST! GO! OUT! [TAB] Come in please.
```
A pairwise learning case shows as follows:
text_a [TAB] text_b [TAB] text_b_neg
Today is a good day. [TAB] what a nice day! [TAB] terriable day!
Such a terriable day! [TAB] So terriable today! [TAB] There is a dog.
I feel lucky to meet you, dear. [TAB] You are my lucky, darling. [TAB] Buy some bananas, okey?
He likes sunshine and I like him :). [TAB] I like him. He like sunshine. [TAB] He has a dog.
JUST! GO! OUT! [TAB] go out now! [TAB] Come in please.
CAUTIOUS: the HEADER is required for each dataset file! And fields (columns) should be splited by Tab (\\t).
"""
def __init__(self, vocab_path, max_len, tokenizer='wordpiece', lang='en', seed=None, \
do_lower_case=False, learning_strategy='pointwise', phase='train', dev_count=1, print_prefix=''):
"""Create a new Reader for classification task data.
Args:
vocab_path: the vocab file path to do tokenization and token_ids generation.
max_len: The maximum length of the sequence (after word segmentation). The part exceeding max_len will be removed from right.
tokenizer: string type. The name of the used tokenizer. A tokenizer is to convert raw text into tokens. Avaliable tokenizers: wordpiece.
lang: the language of dataset. Supported language: en (English), cn (Chinese). Default is en (English).
seed: int type. The random seed to shuffle dataset. Default is None, means no use of random seed.
do_lower_case: bool type. Whether to do lowercase on English text. Default is False. This argument only works on English text.
learning_strategy: string type. This only works for training phase. Available strategies: pointwise, pairwise.
phase: the running phase of this reader. Supported phase: train, predict. Default is train.
Return:
a Reader object for matching-like task.
"""
Reader.__init__(self, phase)
assert lang.lower() in ['en', 'cn', 'english', 'chinese'], "supported language: en (English), cn (Chinese)."
assert phase in ['train', 'predict'], "supported phase: train, predict."
for_cn = lang.lower() == 'cn' or lang.lower() == 'chinese'
self._register.add('token_ids')
if phase == 'train':
if learning_strategy == 'pointwise':
self._register.add('label_ids')
if learning_strategy == 'pairwise':
self._register.add('token_ids_neg')
self._register.add('position_ids_neg')
self._register.add('segment_ids_neg')
self._register.add('input_mask_neg')
self._register.add('task_ids_neg')
self._is_training = phase == 'train'
self._learning_strategy = learning_strategy
match_reader = CLSReader(vocab_path,
max_seq_len=max_len,
do_lower_case=do_lower_case,
for_cn=for_cn,
random_seed=seed,
learning_strategy = learning_strategy)
self._reader = match_reader
self._dev_count = dev_count
self._phase = phase
@property
def outputs_attr(self):
attrs = {"token_ids": [[-1, -1], 'int64'],
"position_ids": [[-1, -1], 'int64'],
"segment_ids": [[-1, -1], 'int64'],
"input_mask": [[-1, -1, 1], 'float32'],
"task_ids": [[-1, -1], 'int64'],
"label_ids": [[-1], 'int64'],
"token_ids_neg": [[-1, -1], 'int64'],
"position_ids_neg": [[-1, -1], 'int64'],
"segment_ids_neg": [[-1, -1], 'int64'],
"input_mask_neg": [[-1, -1, 1], 'float32'],
"task_ids_neg": [[-1, -1], 'int64']
}
return self._get_registed_attrs(attrs)
def load_data(self, input_file, batch_size, num_epochs=None, \
file_format='tsv', shuffle_train=True):
"""Load matching data into reader.
Args:
input_file: the dataset file path. File format should keep consistent with `file_format` argument.
batch_size: number of examples for once yield. CAUSIOUS! If your environment exists multiple GPU devices (marked as dev_count), the batch_size should be divided by dev_count with no remainder!
num_epochs: the travelsal times of input examples. Default is None, means once for single-task learning and automatically calculated for multi-task learning. This argument only works on train phase.
file_format: the file format of input file. Supported format: tsv. Default is tsv.
shuffle_train: whether to shuffle training dataset. Default is True. This argument only works on training phase.
"""
self._batch_size = batch_size
self._num_epochs = num_epochs
self._data_generator = self._reader.data_generator( \
input_file, batch_size, num_epochs if self._phase == 'train' else 1, \
shuffle=shuffle_train if self._phase == 'train' else False, \
phase=self._phase)
def _iterator(self):
names = ['token_ids', 'segment_ids', 'position_ids', 'task_ids', 'input_mask', 'label_ids', \
'token_ids_neg', 'segment_ids_neg', 'position_ids_neg', 'task_ids_neg', 'input_mask_neg']
if self._learning_strategy == 'pairwise':
names.remove('label_ids')
for batch in self._data_generator():
outputs = {n: i for n,i in zip(names, batch)}
ret = {}
# TODO: move runtime shape check here
for attr in self.outputs_attr.keys():
ret[attr] = outputs[attr]
yield ret
@property
def num_examples(self):
return self._reader.get_num_examples(phase=self._phase)
@property
def num_epochs(self):
return self._num_epochs
|
tfx/tools/cli/commands/pipeline.py | avelez93/tfx | 1,813 | 33962 | <filename>tfx/tools/cli/commands/pipeline.py
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Commands for pipeline group."""
import sys
from typing import Optional
import click
from tfx.tools.cli import labels
from tfx.tools.cli.cli_context import Context
from tfx.tools.cli.cli_context import pass_context
from tfx.tools.cli.handler import handler_factory
def _check_deprecated_image_build_flags(build_target_image=None,
skaffold_cmd=None,
pipeline_package_path=None):
"""Checks and exits if deprecated flags were used."""
if build_target_image is not None:
sys.exit(
'[Error] --build-target-image flag was DELETED. You should specify '
'the build target image at the `KubeflowDagRunnerConfig` class '
'instead, and use --build-image flag without argument to build a '
'container image when creating or updating a pipeline.')
if skaffold_cmd is not None:
sys.exit(
'[Error] --skaffold-cmd flag was DELETED. TFX doesn\'t use skaffold '
'any more. You can delete --skaffold-cmd flag and the auto-genrated '
'build.yaml file. You must specify --build-image to trigger an '
'image build when creating or updating a pipeline.')
if pipeline_package_path is not None:
sys.exit(
'[Error] --pipeline-package-path flag was DELETED. You can specify '
'the package location as `output_filename` and `output_dir` when '
'creating a `KubeflowDagRunner` instance. CLI will read the pacakge '
'path specified there.')
@click.group('pipeline')
def pipeline_group() -> None:
pass
# TODO(b/132286477): Add support for requirements file.
@pipeline_group.command('create', help='Create a pipeline')
@pass_context
@click.option(
'--engine', default='auto', type=str, help='Orchestrator for pipelines')
@click.option(
'--pipeline_path',
'--pipeline-path',
required=True,
type=str,
help='Path to Python DSL.')
@click.option(
'--package_path',
'--package-path',
default=None,
type=str,
help='[DEPRECATED] Package path specified in a KubeflowDagRunner instace '
'will be used.')
@click.option(
'--build_target_image',
'--build-target-image',
default=None,
type=str,
help='[DEPRECATED] Please specify target image to the '
'KubeflowDagRunnerConfig class directly. `KUBEFLOW_TFX_IMAGE` environment '
'variable is not used any more.')
@click.option(
'--build_base_image',
'--build-base-image',
default=None,
type=str,
help='Container image path to be used as the base image. If not specified, '
'official TFX image with the same version will be used. You need to '
'specify --build-image flag to trigger an image build.')
@click.option(
'--skaffold_cmd',
'--skaffold-cmd',
default=None,
type=str,
help='[DEPRECATED] Skaffold is not used any more. Do not use this flag.')
@click.option(
'--endpoint',
default=None,
type=str,
help='Endpoint of the KFP API service to connect.')
@click.option(
'--iap_client_id',
'--iap-client-id',
default=None,
type=str,
help='Client ID for IAP protected endpoint.')
@click.option(
'-n',
'--namespace',
default='kubeflow',
type=str,
help='Kubernetes namespace to connect to the KFP API.')
@click.option(
'--build_image',
'--build-image',
is_flag=True,
default=False,
help='Build a container image for the pipeline using Dockerfile in the '
'current directory. If Dockerfile does not exist, a default Dockerfile '
'will be generated using --build-base-image.')
def create_pipeline(ctx: Context, engine: str, pipeline_path: str,
package_path: Optional[str],
build_target_image: Optional[str],
build_base_image: Optional[str],
skaffold_cmd: Optional[str], endpoint: Optional[str],
iap_client_id: Optional[str], namespace: str,
build_image: bool) -> None:
"""Command definition to create a pipeline."""
# TODO(b/179847638): Delete checks for deprecated flags.
_check_deprecated_image_build_flags(build_target_image, skaffold_cmd,
package_path)
if build_base_image is not None and not build_image:
sys.exit('--build-base-image used without --build-image. You have to use '
'--build-image flag to build a container image for the pipeline.')
# TODO(b/142358865): Add support for container building for Airflow and Beam
# runners when they support container executors.
click.echo('Creating pipeline')
ctx.flags_dict[labels.ENGINE_FLAG] = engine
ctx.flags_dict[labels.PIPELINE_DSL_PATH] = pipeline_path
ctx.flags_dict[labels.BASE_IMAGE] = build_base_image
ctx.flags_dict[labels.ENDPOINT] = endpoint
ctx.flags_dict[labels.IAP_CLIENT_ID] = iap_client_id
ctx.flags_dict[labels.NAMESPACE] = namespace
ctx.flags_dict[labels.BUILD_IMAGE] = build_image
handler_factory.create_handler(ctx.flags_dict).create_pipeline()
@pipeline_group.command('update', help='Update an existing pipeline.')
@pass_context
@click.option(
'--engine', default='auto', type=str, help='Orchestrator for pipelines')
@click.option(
'--pipeline_path',
'--pipeline-path',
required=True,
type=str,
help='Path to Python DSL file')
@click.option(
'--package_path',
'--package-path',
type=str,
default=None,
help='[DEPRECATED] Package path specified in a KubeflowDagRunner instace '
'will be used.')
@click.option(
'--skaffold_cmd',
'--skaffold-cmd',
default=None,
type=str,
help='[DEPRECATED] Skaffold is not used any more. Do not use this flag.')
@click.option(
'--endpoint',
default=None,
type=str,
help='Endpoint of the KFP API service to connect.')
@click.option(
'--iap_client_id',
'--iap-client-id',
default=None,
type=str,
help='Client ID for IAP protected endpoint.')
@click.option(
'-n',
'--namespace',
default='kubeflow',
type=str,
help='Kubernetes namespace to connect to the KFP API.')
@click.option(
'--build_image',
'--build-image',
is_flag=True,
default=False,
help='Build a container image for the pipeline using Dockerfile in the '
'current directory.')
def update_pipeline(ctx: Context, engine: str, pipeline_path: str,
package_path: Optional[str], skaffold_cmd: Optional[str],
endpoint: Optional[str], iap_client_id: Optional[str],
namespace: str, build_image: bool) -> None:
"""Command definition to update a pipeline."""
# TODO(b/179847638): Delete checks for deprecated flags.
_check_deprecated_image_build_flags(None, skaffold_cmd, package_path)
click.echo('Updating pipeline')
ctx.flags_dict[labels.ENGINE_FLAG] = engine
ctx.flags_dict[labels.PIPELINE_DSL_PATH] = pipeline_path
ctx.flags_dict[labels.ENDPOINT] = endpoint
ctx.flags_dict[labels.IAP_CLIENT_ID] = iap_client_id
ctx.flags_dict[labels.NAMESPACE] = namespace
ctx.flags_dict[labels.BUILD_IMAGE] = build_image
handler_factory.create_handler(ctx.flags_dict).update_pipeline()
@pipeline_group.command('delete', help='Delete a pipeline')
@pass_context
@click.option(
'--engine', default='auto', type=str, help='Orchestrator for pipelines')
@click.option(
'--pipeline_name',
'--pipeline-name',
required=True,
type=str,
help='Name of the pipeline')
@click.option(
'--endpoint',
default=None,
type=str,
help='Endpoint of the KFP API service to connect.')
@click.option(
'--iap_client_id',
'--iap-client-id',
default=None,
type=str,
help='Client ID for IAP protected endpoint.')
@click.option(
'-n',
'--namespace',
default='kubeflow',
type=str,
help='Kubernetes namespace to connect to the KFP API.')
def delete_pipeline(ctx: Context, engine: str, pipeline_name: str,
endpoint: str, iap_client_id: str, namespace: str) -> None:
"""Command definition to delete a pipeline."""
click.echo('Deleting pipeline')
ctx.flags_dict[labels.ENGINE_FLAG] = engine
ctx.flags_dict[labels.PIPELINE_NAME] = pipeline_name
ctx.flags_dict[labels.ENDPOINT] = endpoint
ctx.flags_dict[labels.IAP_CLIENT_ID] = iap_client_id
ctx.flags_dict[labels.NAMESPACE] = namespace
handler_factory.create_handler(ctx.flags_dict).delete_pipeline()
@pipeline_group.command('list', help='List all the pipelines')
@pass_context
@click.option(
'--engine', default='auto', type=str, help='orchestrator for pipelines')
@click.option(
'--endpoint',
default=None,
type=str,
help='Endpoint of the KFP API service to connect.')
@click.option(
'--iap_client_id',
'--iap-client-id',
default=None,
type=str,
help='Client ID for IAP protected endpoint.')
@click.option(
'-n',
'--namespace',
default='kubeflow',
type=str,
help='Kubernetes namespace to connect to the KFP API.')
def list_pipelines(ctx: Context, engine: str, endpoint: str, iap_client_id: str,
namespace: str) -> None:
"""Command definition to list pipelines."""
click.echo('Listing all pipelines')
ctx.flags_dict[labels.ENGINE_FLAG] = engine
ctx.flags_dict[labels.ENDPOINT] = endpoint
ctx.flags_dict[labels.IAP_CLIENT_ID] = iap_client_id
ctx.flags_dict[labels.NAMESPACE] = namespace
handler_factory.create_handler(ctx.flags_dict).list_pipelines()
@pipeline_group.command('compile', help='Compile a pipeline')
@pass_context
@click.option(
'--engine', default='auto', type=str, help='Orchestrator for pipelines')
@click.option(
'--pipeline_path',
'--pipeline-path',
required=True,
type=str,
help='Path to Python DSL.')
@click.option(
'--package_path',
'--package-path',
default=None,
type=str,
help='[DEPRECATED] Package path specified in a KubeflowDagRunner instace '
'will be used.')
def compile_pipeline(ctx: Context, engine: str, pipeline_path: str,
package_path: str) -> None:
"""Command definition to compile a pipeline."""
# TODO(b/179847638): Delete checks for deprecated flags.
_check_deprecated_image_build_flags(pipeline_package_path=package_path)
click.echo('Compiling pipeline')
ctx.flags_dict[labels.ENGINE_FLAG] = engine
ctx.flags_dict[labels.PIPELINE_DSL_PATH] = pipeline_path
handler_factory.create_handler(ctx.flags_dict).compile_pipeline()
@pipeline_group.command('schema', help='Obtain latest database schema.')
@pass_context
@click.option(
'--engine', default='auto', type=str, help='Orchestrator for pipelines')
@click.option(
'--pipeline_name',
'--pipeline-name',
required=True,
type=str,
help='Name of the pipeline')
def get_schema(ctx: Context, engine: str, pipeline_name: str) -> None:
"""Command definition to infer latest schema."""
click.echo('Getting latest schema.')
ctx.flags_dict[labels.ENGINE_FLAG] = engine
ctx.flags_dict[labels.PIPELINE_NAME] = pipeline_name
handler_factory.create_handler(ctx.flags_dict).get_schema()
|
alipay/aop/api/domain/ReduceInfo.py | snowxmas/alipay-sdk-python-all | 213 | 33975 | <gh_stars>100-1000
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class ReduceInfo(object):
def __init__(self):
self._brand_name = None
self._consume_amt = None
self._consume_store_name = None
self._payment_time = None
self._promo_amt = None
self._user_name = None
@property
def brand_name(self):
return self._brand_name
@brand_name.setter
def brand_name(self, value):
self._brand_name = value
@property
def consume_amt(self):
return self._consume_amt
@consume_amt.setter
def consume_amt(self, value):
self._consume_amt = value
@property
def consume_store_name(self):
return self._consume_store_name
@consume_store_name.setter
def consume_store_name(self, value):
self._consume_store_name = value
@property
def payment_time(self):
return self._payment_time
@payment_time.setter
def payment_time(self, value):
self._payment_time = value
@property
def promo_amt(self):
return self._promo_amt
@promo_amt.setter
def promo_amt(self, value):
self._promo_amt = value
@property
def user_name(self):
return self._user_name
@user_name.setter
def user_name(self, value):
self._user_name = value
def to_alipay_dict(self):
params = dict()
if self.brand_name:
if hasattr(self.brand_name, 'to_alipay_dict'):
params['brand_name'] = self.brand_name.to_alipay_dict()
else:
params['brand_name'] = self.brand_name
if self.consume_amt:
if hasattr(self.consume_amt, 'to_alipay_dict'):
params['consume_amt'] = self.consume_amt.to_alipay_dict()
else:
params['consume_amt'] = self.consume_amt
if self.consume_store_name:
if hasattr(self.consume_store_name, 'to_alipay_dict'):
params['consume_store_name'] = self.consume_store_name.to_alipay_dict()
else:
params['consume_store_name'] = self.consume_store_name
if self.payment_time:
if hasattr(self.payment_time, 'to_alipay_dict'):
params['payment_time'] = self.payment_time.to_alipay_dict()
else:
params['payment_time'] = self.payment_time
if self.promo_amt:
if hasattr(self.promo_amt, 'to_alipay_dict'):
params['promo_amt'] = self.promo_amt.to_alipay_dict()
else:
params['promo_amt'] = self.promo_amt
if self.user_name:
if hasattr(self.user_name, 'to_alipay_dict'):
params['user_name'] = self.user_name.to_alipay_dict()
else:
params['user_name'] = self.user_name
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = ReduceInfo()
if 'brand_name' in d:
o.brand_name = d['brand_name']
if 'consume_amt' in d:
o.consume_amt = d['consume_amt']
if 'consume_store_name' in d:
o.consume_store_name = d['consume_store_name']
if 'payment_time' in d:
o.payment_time = d['payment_time']
if 'promo_amt' in d:
o.promo_amt = d['promo_amt']
if 'user_name' in d:
o.user_name = d['user_name']
return o
|
tests/scripts/negative_linenumber_offsets.py | andyfcx/py-spy | 8,112 | 33979 | import time
def f():
[
# Must be split over multiple lines to see the error.
# https://github.com/benfred/py-spy/pull/208
time.sleep(1)
for _ in range(1000)
]
f()
|
starfish/core/imagestack/parser/crop.py | haoxusci/starfish | 164 | 33980 | <filename>starfish/core/imagestack/parser/crop.py<gh_stars>100-1000
from collections import OrderedDict
from typing import Collection, List, Mapping, MutableSequence, Optional, Set, Tuple, Union
import numpy as np
from slicedimage import Tile, TileSet
from starfish.core.imagestack.parser import TileCollectionData, TileData, TileKey
from starfish.core.types import ArrayLike, Axes, Coordinates, Number
class CropParameters:
"""Parameters for cropping an ImageStack at load time."""
def __init__(
self,
*,
permitted_rounds: Optional[Collection[int]]=None,
permitted_chs: Optional[Collection[int]]=None,
permitted_zplanes: Optional[Collection[int]]=None,
x_slice: Optional[Union[int, slice]]=None,
y_slice: Optional[Union[int, slice]]=None,
):
"""
Parameters
----------
permitted_rounds : Optional[Collection[int]]
The rounds in the original dataset to load into the ImageStack. If this is not set,
then all rounds are loaded into the ImageStack.
permitted_chs : Optional[Collection[int]]
The channels in the original dataset to load into the ImageStack. If this is not set,
then all channels are loaded into the ImageStack.
permitted_zplanes : Optional[Collection[int]]
The z-layers in the original dataset to load into the ImageStack. If this is not set,
then all z-layers are loaded into the ImageStack.
x_slice : Optional[Union[int, slice]]
The x-range in the x-y tile that is loaded into the ImageStack. If this is not set,
then the entire x-y tile is loaded into the ImageStack.
y_slice : Optional[Union[int, slice]]
The y-range in the x-y tile that is loaded into the ImageStack. If this is not set,
then the entire x-y tile is loaded into the ImageStack.
"""
self._permitted_rounds = set(permitted_rounds) if permitted_rounds else None
self._permitted_chs = set(permitted_chs) if permitted_chs else None
self._permitted_zplanes = set(permitted_zplanes) if permitted_zplanes else None
self._x_slice = x_slice
self._y_slice = y_slice
def _add_permitted_axes(self, axis_type: Axes, permitted_axis: int) -> None:
"""
Add a value to one of the permitted axes sets.
"""
if axis_type == Axes.ROUND and self._permitted_rounds:
self._permitted_rounds.add(permitted_axis)
if axis_type == Axes.CH and self._permitted_chs:
self._permitted_chs.add(permitted_axis)
if axis_type == Axes.ZPLANE and self._permitted_zplanes:
self._permitted_zplanes.add(permitted_axis)
def filter_tilekeys(self, tilekeys: Collection[TileKey]) -> Collection[TileKey]:
"""
Filters tilekeys for those that should be included in the resulting ImageStack.
"""
results: MutableSequence[TileKey] = list()
for tilekey in tilekeys:
if self._permitted_rounds is not None and tilekey.round not in self._permitted_rounds:
continue
if self._permitted_chs is not None and tilekey.ch not in self._permitted_chs:
continue
if self._permitted_zplanes is not None and tilekey.z not in self._permitted_zplanes:
continue
results.append(tilekey)
return results
@staticmethod
def _crop_axis(size: int, crop: Optional[Union[int, slice]]) -> Tuple[int, int]:
"""
Given the size of along an axis, and an optional cropping, return the start index
(inclusive) and end index (exclusive) of the crop. If no crop is specified, then the
original size (0, size) is returned.
"""
# convert int crops to a slice operation.
if isinstance(crop, int):
if crop < 0 or crop >= size:
raise IndexError("crop index out of range")
return crop, crop + 1
# convert start and stop to absolute values.
start: int
if crop is None or crop.start is None:
start = 0
elif crop.start is not None and crop.start < 0:
start = max(0, size + crop.start)
else:
start = min(size, crop.start)
stop: int
if crop is None or crop.stop is None:
stop = size
elif crop.stop is not None and crop.stop < 0:
stop = max(0, size + crop.stop)
else:
stop = min(size, crop.stop)
return start, stop
@staticmethod
def parse_aligned_groups(tileset: TileSet,
rounds: Optional[Collection[int]] = None,
chs: Optional[Collection[int]] = None,
zplanes: Optional[Collection[int]] = None,
x: Optional[Union[int, slice]] = None,
y: Optional[Union[int, slice]] = None
) -> List["CropParameters"]:
"""Takes a tileset and any optional selected axes lists compares the physical coordinates on each
tile to create aligned coordinate groups (groups of tiles that have the same physical
coordinates)
Parameters
----------
tileset: TileSet
The TileSet to parse
rounds: Optional[Collection[int]]
The rounds in the tileset to include in the final aligned groups. If this is not set,
then all rounds are included.
chs: Optional[Collection[int]]
The chs in the tileset to include in the final aligned groups. If this is not set,
then all chs are included.
zplanes: Optional[Collection[int]]
The zplanes in the tileset to include in the final aligned groups. If this is not set,
then all zplanes are included.
x: Optional[Union[int, slice]]
The x-range in the x-y tile to include in the final aligned groups. If this is not set,
then the entire x-y tile is included.
y: Optional[Union[int, slice]]
The y-range in the x-y tile to include in the final aligned groups. If this is not set,
then the entire x-y tile is included.
Returns
-------
List["CropParameters"]
A list of CropParameters. Each entry describes the r/ch/z values of tiles that are
aligned (have matching coordinates) and are within the selected_axes if provided.
"""
coord_groups: OrderedDict[tuple, CropParameters] = OrderedDict()
for tile in tileset.tiles():
if CropParameters.tile_in_selected_axes(tile, rounds, chs, zplanes):
x_y_coords = (
tile.coordinates[Coordinates.X][0], tile.coordinates[Coordinates.X][1],
tile.coordinates[Coordinates.Y][0], tile.coordinates[Coordinates.Y][1]
)
# A tile with this (x, y) has already been seen, add tile's indices to
# CropParameters
if x_y_coords in coord_groups:
crop_params = coord_groups[x_y_coords]
crop_params._add_permitted_axes(Axes.CH, tile.indices[Axes.CH])
crop_params._add_permitted_axes(Axes.ROUND, tile.indices[Axes.ROUND])
if Axes.ZPLANE in tile.indices:
crop_params._add_permitted_axes(Axes.ZPLANE, tile.indices[Axes.ZPLANE])
else:
coord_groups[x_y_coords] = CropParameters(
permitted_chs=[tile.indices[Axes.CH]],
permitted_rounds=[tile.indices[Axes.ROUND]],
permitted_zplanes=[tile.indices[Axes.ZPLANE]]
if Axes.ZPLANE in tile.indices else None,
x_slice=x,
y_slice=y)
return list(coord_groups.values())
@staticmethod
def tile_in_selected_axes(tile: Tile,
rounds: Optional[Collection[int]] = None,
chs: Optional[Collection[int]] = None,
zplanes: Optional[Collection[int]] = None) -> bool:
"""
Return True if a tile belongs in a list of selected axes.
Parameters
----------
tile:
The tile in question
rounds: Optional[Collection[int]]
The allowed rounds.
chs: Optional[Collection[int]]
The allowed chs.
zplanes: Optional[Collection[int]]
The allowed zplanes.
Returns
-------
Boolean
True if tile belongs with selected axes, False if not.
"""
if rounds and tile.indices[Axes.ROUND] not in rounds:
return False
if chs and tile.indices[Axes.CH] not in chs:
return False
if zplanes and tile.indices[Axes.ZPLANE] not in zplanes:
return False
return True
def crop_shape(self, shape: Mapping[Axes, int]) -> Mapping[Axes, int]:
"""
Given the shape of the original tile, return the shape of the cropped tile.
"""
output_x_shape = CropParameters._crop_axis(shape[Axes.X], self._x_slice)
output_y_shape = CropParameters._crop_axis(shape[Axes.Y], self._y_slice)
width = output_x_shape[1] - output_x_shape[0]
height = output_y_shape[1] - output_y_shape[0]
return {Axes.Y: height, Axes.X: width}
def crop_image(self, image: np.ndarray) -> np.ndarray:
"""
Given the original image, return the cropped image.
"""
output_x_shape = CropParameters._crop_axis(image.shape[1], self._x_slice)
output_y_shape = CropParameters._crop_axis(image.shape[0], self._y_slice)
return image[output_y_shape[0]:output_y_shape[1], output_x_shape[0]:output_x_shape[1]]
def crop_coordinates(
self, coordinates: Mapping[Coordinates, ArrayLike[Number]],
) -> Mapping[Coordinates, ArrayLike[Number]]:
"""
Given a mapping of coordinate to coordinate values, return a mapping of the coordinate to
cropped coordinate values.
"""
output_x_shape = CropParameters._crop_axis(len(coordinates[Coordinates.X]), self._x_slice)
output_y_shape = CropParameters._crop_axis(len(coordinates[Coordinates.Y]), self._y_slice)
return_coords = {
Coordinates.X: coordinates[Coordinates.X][output_x_shape[0]:output_x_shape[1]],
Coordinates.Y: coordinates[Coordinates.Y][output_y_shape[0]:output_y_shape[1]],
}
if Coordinates.Z in coordinates:
return_coords[Coordinates.Z] = coordinates[Coordinates.Z]
return return_coords
class CroppedTileData(TileData):
"""Represent a cropped view of a TileData object."""
def __init__(self, tile_data: TileData, cropping_parameters: CropParameters):
self.backing_tile_data = tile_data
self.cropping_parameters = cropping_parameters
@property
def tile_shape(self) -> Mapping[Axes, int]:
return self.cropping_parameters.crop_shape(self.backing_tile_data.tile_shape)
@property
def numpy_array(self) -> np.ndarray:
return self.cropping_parameters.crop_image(self.backing_tile_data.numpy_array)
@property
def coordinates(self) -> Mapping[Coordinates, ArrayLike[Number]]:
return self.cropping_parameters.crop_coordinates(self.backing_tile_data.coordinates)
@property
def selector(self) -> Mapping[Axes, int]:
return self.backing_tile_data.selector
class CroppedTileCollectionData(TileCollectionData):
"""Represent a cropped view of a TileCollectionData object."""
def __init__(
self,
backing_tile_collection_data: TileCollectionData,
crop_parameters: CropParameters,
) -> None:
self.backing_tile_collection_data = backing_tile_collection_data
self.crop_parameters = crop_parameters
def __getitem__(self, tilekey: TileKey) -> dict:
return self.backing_tile_collection_data[tilekey]
def keys(self) -> Collection[TileKey]:
return self.crop_parameters.filter_tilekeys(self.backing_tile_collection_data.keys())
@property
def group_by(self) -> Set[Axes]:
"""Returns the axes to group by when we load the data."""
return self.backing_tile_collection_data.group_by
@property
def tile_shape(self) -> Mapping[Axes, int]:
return self.crop_parameters.crop_shape(self.backing_tile_collection_data.tile_shape)
@property
def extras(self) -> dict:
return self.backing_tile_collection_data.extras
def get_tile_by_key(self, tilekey: TileKey) -> TileData:
return CroppedTileData(
self.backing_tile_collection_data.get_tile_by_key(tilekey),
self.crop_parameters,
)
def get_tile(self, r: int, ch: int, z: int) -> TileData:
return CroppedTileData(
self.backing_tile_collection_data.get_tile(r, ch, z),
self.crop_parameters,
)
|
mpc_ros/script/teleop_keyboard.py | NaokiTakahashi12/mpc_ros | 335 | 33981 | #!/usr/bin/python
# This is a modified verison of turtlebot_teleop.py
# to fullfill the needs of HyphaROS MiniCar use case
# Copyright (c) 2018, HyphaROS Workshop
#
# The original license info are as below:
# Copyright (c) 2011, <NAME>, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the <NAME>, Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import sys, select, termios, tty, math
import rospy
from ackermann_msgs.msg import AckermannDriveStamped
header_msg = """
Control HyphaROS Minicar!
-------------------------
Moving around:
i
j k l
,
w/x : increase/decrease throttle bounds by 10%
e/c : increase/decrease steering bounds by 10%
s : safety mode
space key, k : force stop
anything else : keep previous commands
CTRL-C to quit
"""
# Func for getting keyboard value
def getKey(safety_mode):
if safety_mode: # wait unit keyboard interrupt
tty.setraw(sys.stdin.fileno())
select.select([sys.stdin], [], [], 0)
key = sys.stdin.read(1)
termios.tcsetattr(sys.stdin, termios.TCSADRAIN, settings)
return key
else: # pass if not detected
tty.setraw(sys.stdin.fileno())
rlist, _, _ = select.select([sys.stdin], [], [], 0.1)
if rlist:
key = sys.stdin.read(1)
else:
key = ''
termios.tcsetattr(sys.stdin, termios.TCSADRAIN, settings)
return key
# Func for showing current bounds
def showInfo(speed_bound, angle_bound):
return "current bounds:\tspeed %s\tangle %s " % (speed_bound, angle_bound)
# Main Func
if __name__=="__main__":
settings = termios.tcgetattr(sys.stdin)
rospy.init_node('minicar_teleop')
pub_cmd = rospy.Publisher('/ackermann_cmd', AckermannDriveStamped, queue_size=5)
pub_safe = rospy.Publisher('/ackermann_safe', AckermannDriveStamped, queue_size=5)
safe_mode = bool(rospy.get_param('~safety_mode', False)) # true for safety cmds
speed_i = float(rospy.get_param('~speed_incremental', 0.1)) # m/s
angle_i = float(rospy.get_param('~angle_incremental', 5.0*math.pi/180.0)) # rad (=5 degree)
speed_bound = float(rospy.get_param('~speed_bound', 2.0))
angle_bound = float(rospy.get_param('~angle_bound', 30.0*math.pi/180.0))
if safe_mode:
print "Switched to Safety Mode !"
moveBindings = {
'i':(speed_i,0.0),
'j':(0.0,angle_i),
'l':(0.0,-angle_i),
',':(-speed_i,0.0),
}
boundBindings={
'w':(1.1,1),
'x':(.9,1),
'e':(1,1.1),
'c':(1,.9),
}
status = 0
acc = 0.1
target_speed = 0.0 # m/s
target_angle = 0.0 # rad
# Create AckermannDriveStamped msg object
ackermann_msg = AckermannDriveStamped()
#ackermann_msg.header.frame_id = 'car_id' # for future multi-cars applicaton
try:
print(header_msg)
print(showInfo(speed_bound, angle_bound))
while(1):
key = getKey(safe_mode)
if key in moveBindings.keys():
target_speed = target_speed + moveBindings[key][0]
target_angle = target_angle + moveBindings[key][1]
elif key in boundBindings.keys():
speed_bound = speed_bound * boundBindings[key][0]
angle_bound = angle_bound * boundBindings[key][1]
print(showInfo(speed_bound, angle_bound))
if (status == 14):
print(header_msg)
status = (status + 1) % 15
elif key == ' ' or key == 'k' :
target_speed = 0.0
target_angle = 0.0
elif key == 's' : # switch safety mode
safe_mode = not safe_mode
if safe_mode:
print "Switched to Safety Mode !"
else:
print "Back to Standard Mode !"
elif key == '\x03': # cltr + C
break
# Command constraints
if target_speed > speed_bound:
target_speed = speed_bound
if target_speed < -speed_bound:
target_speed = -speed_bound
if target_angle > angle_bound:
target_angle = angle_bound
if target_angle < -angle_bound:
target_angle = -angle_bound
# Publishing command
#ackermann_msg.header.stamp = rospy.Time.now() # for future multi-cars applicaton
ackermann_msg.drive.speed = target_speed
ackermann_msg.drive.steering_angle = target_angle
if safe_mode:
pub_safe.publish(ackermann_msg)
else:
pub_cmd.publish(ackermann_msg)
except Exception as e:
print(e)
finally:
ackermann_msg.drive.speed = 0
ackermann_msg.drive.steering_angle = 0
pub_cmd.publish(ackermann_msg)
pub_safe.publish(ackermann_msg)
termios.tcsetattr(sys.stdin, termios.TCSADRAIN, settings)
|
etl/parsers/etw/Microsoft_Windows_UAC_FileVirtualization.py | IMULMUL/etl-parser | 104 | 33984 | # -*- coding: utf-8 -*-
"""
Microsoft-Windows-UAC-FileVirtualization
GUID : c02afc2b-e24e-4449-ad76-bcc2c2575ead
"""
from construct import Int8sl, Int8ul, Int16ul, Int16sl, Int32sl, Int32ul, Int64sl, Int64ul, Bytes, Double, Float32l, Struct
from etl.utils import WString, CString, SystemTime, Guid
from etl.dtyp import Sid
from etl.parsers.etw.core import Etw, declare, guid
@declare(guid=guid("c02afc2b-e24e-4449-ad76-bcc2c2575ead"), event_id=2000, version=0)
class Microsoft_Windows_UAC_FileVirtualization_2000_0(Etw):
pattern = Struct(
"Error" / Int32ul
)
@declare(guid=guid("c02afc2b-e24e-4449-ad76-bcc2c2575ead"), event_id=2001, version=0)
class Microsoft_Windows_UAC_FileVirtualization_2001_0(Etw):
pattern = Struct(
"Error" / Int32ul
)
@declare(guid=guid("c02afc2b-e24e-4449-ad76-bcc2c2575ead"), event_id=2002, version=0)
class Microsoft_Windows_UAC_FileVirtualization_2002_0(Etw):
pattern = Struct(
"Error" / Int32ul
)
@declare(guid=guid("c02afc2b-e24e-4449-ad76-bcc2c2575ead"), event_id=2003, version=0)
class Microsoft_Windows_UAC_FileVirtualization_2003_0(Etw):
pattern = Struct(
"Error" / Int32ul
)
@declare(guid=guid("c02afc2b-e24e-4449-ad76-bcc2c2575ead"), event_id=2004, version=0)
class Microsoft_Windows_UAC_FileVirtualization_2004_0(Etw):
pattern = Struct(
"Error" / Int32ul
)
@declare(guid=guid("c02afc2b-e24e-4449-ad76-bcc2c2575ead"), event_id=2005, version=0)
class Microsoft_Windows_UAC_FileVirtualization_2005_0(Etw):
pattern = Struct(
"Error" / Int32ul
)
@declare(guid=guid("c02afc2b-e24e-4449-ad76-bcc2c2575ead"), event_id=2006, version=0)
class Microsoft_Windows_UAC_FileVirtualization_2006_0(Etw):
pattern = Struct(
"Flags" / Int32ul,
"SidLength" / Int32ul,
"Sid" / Bytes(lambda this: this.SidLength),
"FileNameLength" / Int16ul,
"FileNameBuffer" / Bytes(lambda this: this.FileNameLength),
"ProcessImageNameLength" / Int16ul,
"ProcessImageNameBuffer" / Bytes(lambda this: this.ProcessImageNameLength),
"Error" / Int32ul
)
@declare(guid=guid("c02afc2b-e24e-4449-ad76-bcc2c2575ead"), event_id=2007, version=0)
class Microsoft_Windows_UAC_FileVirtualization_2007_0(Etw):
pattern = Struct(
"Flags" / Int32ul,
"SidLength" / Int32ul,
"Sid" / Bytes(lambda this: this.SidLength),
"FileNameLength" / Int16ul,
"FileNameBuffer" / Bytes(lambda this: this.FileNameLength),
"ProcessImageNameLength" / Int16ul,
"ProcessImageNameBuffer" / Bytes(lambda this: this.ProcessImageNameLength),
"Error" / Int32ul
)
@declare(guid=guid("c02afc2b-e24e-4449-ad76-bcc2c2575ead"), event_id=2008, version=0)
class Microsoft_Windows_UAC_FileVirtualization_2008_0(Etw):
pattern = Struct(
"Flags" / Int32ul,
"SidLength" / Int32ul,
"Sid" / Bytes(lambda this: this.SidLength),
"FileNameLength" / Int16ul,
"FileNameBuffer" / Bytes(lambda this: this.FileNameLength),
"ProcessImageNameLength" / Int16ul,
"ProcessImageNameBuffer" / Bytes(lambda this: this.ProcessImageNameLength),
"Error" / Int32ul
)
@declare(guid=guid("c02afc2b-e24e-4449-ad76-bcc2c2575ead"), event_id=2009, version=0)
class Microsoft_Windows_UAC_FileVirtualization_2009_0(Etw):
pattern = Struct(
"Flags" / Int32ul,
"SidLength" / Int32ul,
"Sid" / Bytes(lambda this: this.SidLength),
"FileNameLength" / Int16ul,
"FileNameBuffer" / Bytes(lambda this: this.FileNameLength),
"ProcessImageNameLength" / Int16ul,
"ProcessImageNameBuffer" / Bytes(lambda this: this.ProcessImageNameLength),
"Error" / Int32ul
)
@declare(guid=guid("c02afc2b-e24e-4449-ad76-bcc2c2575ead"), event_id=2010, version=0)
class Microsoft_Windows_UAC_FileVirtualization_2010_0(Etw):
pattern = Struct(
"Flags" / Int32ul,
"SidLength" / Int32ul,
"Sid" / Bytes(lambda this: this.SidLength),
"FileNameLength" / Int16ul,
"FileNameBuffer" / Bytes(lambda this: this.FileNameLength),
"ProcessImageNameLength" / Int16ul,
"ProcessImageNameBuffer" / Bytes(lambda this: this.ProcessImageNameLength),
"Error" / Int32ul
)
@declare(guid=guid("c02afc2b-e24e-4449-ad76-bcc2c2575ead"), event_id=2011, version=0)
class Microsoft_Windows_UAC_FileVirtualization_2011_0(Etw):
pattern = Struct(
"Flags" / Int32ul,
"SidLength" / Int32ul,
"Sid" / Bytes(lambda this: this.SidLength),
"FileNameLength" / Int16ul,
"FileNameBuffer" / Bytes(lambda this: this.FileNameLength),
"ProcessImageNameLength" / Int16ul,
"ProcessImageNameBuffer" / Bytes(lambda this: this.ProcessImageNameLength),
"Error" / Int32ul
)
@declare(guid=guid("c02afc2b-e24e-4449-ad76-bcc2c2575ead"), event_id=2012, version=0)
class Microsoft_Windows_UAC_FileVirtualization_2012_0(Etw):
pattern = Struct(
"Flags" / Int32ul,
"SidLength" / Int32ul,
"Sid" / Bytes(lambda this: this.SidLength),
"FileNameLength" / Int16ul,
"FileNameBuffer" / Bytes(lambda this: this.FileNameLength),
"ProcessImageNameLength" / Int16ul,
"ProcessImageNameBuffer" / Bytes(lambda this: this.ProcessImageNameLength),
"Error" / Int32ul
)
@declare(guid=guid("c02afc2b-e24e-4449-ad76-bcc2c2575ead"), event_id=2013, version=0)
class Microsoft_Windows_UAC_FileVirtualization_2013_0(Etw):
pattern = Struct(
"Flags" / Int32ul,
"SidLength" / Int32ul,
"Sid" / Bytes(lambda this: this.SidLength),
"FileNameLength" / Int16ul,
"FileNameBuffer" / Bytes(lambda this: this.FileNameLength),
"ProcessImageNameLength" / Int16ul,
"ProcessImageNameBuffer" / Bytes(lambda this: this.ProcessImageNameLength),
"Error" / Int32ul
)
@declare(guid=guid("c02afc2b-e24e-4449-ad76-bcc2c2575ead"), event_id=2014, version=0)
class Microsoft_Windows_UAC_FileVirtualization_2014_0(Etw):
pattern = Struct(
"Flags" / Int32ul,
"SidLength" / Int32ul,
"Sid" / Bytes(lambda this: this.SidLength),
"FileNameLength" / Int16ul,
"FileNameBuffer" / Bytes(lambda this: this.FileNameLength),
"ProcessImageNameLength" / Int16ul,
"ProcessImageNameBuffer" / Bytes(lambda this: this.ProcessImageNameLength),
"Error" / Int32ul
)
@declare(guid=guid("c02afc2b-e24e-4449-ad76-bcc2c2575ead"), event_id=2015, version=0)
class Microsoft_Windows_UAC_FileVirtualization_2015_0(Etw):
pattern = Struct(
"Flags" / Int32ul,
"SidLength" / Int32ul,
"Sid" / Bytes(lambda this: this.SidLength),
"FileNameLength" / Int16ul,
"FileNameBuffer" / Bytes(lambda this: this.FileNameLength),
"ProcessImageNameLength" / Int16ul,
"ProcessImageNameBuffer" / Bytes(lambda this: this.ProcessImageNameLength),
"Error" / Int32ul
)
@declare(guid=guid("c02afc2b-e24e-4449-ad76-bcc2c2575ead"), event_id=2016, version=0)
class Microsoft_Windows_UAC_FileVirtualization_2016_0(Etw):
pattern = Struct(
"Flags" / Int32ul,
"SidLength" / Int32ul,
"Sid" / Bytes(lambda this: this.SidLength),
"FileNameLength" / Int16ul,
"FileNameBuffer" / Bytes(lambda this: this.FileNameLength),
"ProcessImageNameLength" / Int16ul,
"ProcessImageNameBuffer" / Bytes(lambda this: this.ProcessImageNameLength),
"Error" / Int32ul
)
@declare(guid=guid("c02afc2b-e24e-4449-ad76-bcc2c2575ead"), event_id=2017, version=0)
class Microsoft_Windows_UAC_FileVirtualization_2017_0(Etw):
pattern = Struct(
"Flags" / Int32ul,
"SidLength" / Int32ul,
"Sid" / Bytes(lambda this: this.SidLength),
"FileNameLength" / Int16ul,
"FileNameBuffer" / Bytes(lambda this: this.FileNameLength),
"ProcessImageNameLength" / Int16ul,
"ProcessImageNameBuffer" / Bytes(lambda this: this.ProcessImageNameLength),
"Error" / Int32ul
)
@declare(guid=guid("c02afc2b-e24e-4449-ad76-bcc2c2575ead"), event_id=2018, version=0)
class Microsoft_Windows_UAC_FileVirtualization_2018_0(Etw):
pattern = Struct(
"Flags" / Int32ul,
"SidLength" / Int32ul,
"Sid" / Bytes(lambda this: this.SidLength),
"FileNameLength" / Int16ul,
"FileNameBuffer" / Bytes(lambda this: this.FileNameLength),
"ProcessImageNameLength" / Int16ul,
"ProcessImageNameBuffer" / Bytes(lambda this: this.ProcessImageNameLength),
"Error" / Int32ul
)
@declare(guid=guid("c02afc2b-e24e-4449-ad76-bcc2c2575ead"), event_id=2019, version=0)
class Microsoft_Windows_UAC_FileVirtualization_2019_0(Etw):
pattern = Struct(
"Flags" / Int32ul,
"SidLength" / Int32ul,
"Sid" / Bytes(lambda this: this.SidLength),
"FileNameLength" / Int16ul,
"FileNameBuffer" / Bytes(lambda this: this.FileNameLength),
"ProcessImageNameLength" / Int16ul,
"ProcessImageNameBuffer" / Bytes(lambda this: this.ProcessImageNameLength),
"Error" / Int32ul
)
@declare(guid=guid("c02afc2b-e24e-4449-ad76-bcc2c2575ead"), event_id=4000, version=0)
class Microsoft_Windows_UAC_FileVirtualization_4000_0(Etw):
pattern = Struct(
"Flags" / Int32ul,
"SidLength" / Int32ul,
"Sid" / Bytes(lambda this: this.SidLength),
"FileNameLength" / Int16ul,
"FileNameBuffer" / Bytes(lambda this: this.FileNameLength),
"ProcessImageNameLength" / Int16ul,
"ProcessImageNameBuffer" / Bytes(lambda this: this.ProcessImageNameLength),
"CreateOptions" / Int32ul,
"DesiredAccess" / Int32ul,
"IrpMajorFunction" / Int8ul
)
@declare(guid=guid("c02afc2b-e24e-4449-ad76-bcc2c2575ead"), event_id=4001, version=0)
class Microsoft_Windows_UAC_FileVirtualization_4001_0(Etw):
pattern = Struct(
"Flags" / Int32ul,
"SidLength" / Int32ul,
"Sid" / Bytes(lambda this: this.SidLength),
"FileNameLength" / Int16ul,
"FileNameBuffer" / Bytes(lambda this: this.FileNameLength),
"ProcessImageNameLength" / Int16ul,
"ProcessImageNameBuffer" / Bytes(lambda this: this.ProcessImageNameLength),
"TargetFileNameLength" / Int16ul,
"TargetFileNameBuffer" / Bytes(lambda this: this.TargetFileNameLength)
)
@declare(guid=guid("c02afc2b-e24e-4449-ad76-bcc2c2575ead"), event_id=4002, version=0)
class Microsoft_Windows_UAC_FileVirtualization_4002_0(Etw):
pattern = Struct(
"Flags" / Int32ul,
"SidLength" / Int32ul,
"Sid" / Bytes(lambda this: this.SidLength),
"FileNameLength" / Int16ul,
"FileNameBuffer" / Bytes(lambda this: this.FileNameLength),
"ProcessImageNameLength" / Int16ul,
"ProcessImageNameBuffer" / Bytes(lambda this: this.ProcessImageNameLength)
)
@declare(guid=guid("c02afc2b-e24e-4449-ad76-bcc2c2575ead"), event_id=5000, version=0)
class Microsoft_Windows_UAC_FileVirtualization_5000_0(Etw):
pattern = Struct(
"Flags" / Int32ul,
"SidLength" / Int32ul,
"Sid" / Bytes(lambda this: this.SidLength),
"FileNameLength" / Int16ul,
"FileNameBuffer" / Bytes(lambda this: this.FileNameLength),
"ProcessImageNameLength" / Int16ul,
"ProcessImageNameBuffer" / Bytes(lambda this: this.ProcessImageNameLength),
"CreateOptions" / Int32ul,
"DesiredAccess" / Int32ul,
"IrpMajorFunction" / Int8ul,
"Exclusions" / Int32ul
)
@declare(guid=guid("c02afc2b-e24e-4449-ad76-bcc2c2575ead"), event_id=5002, version=0)
class Microsoft_Windows_UAC_FileVirtualization_5002_0(Etw):
pattern = Struct(
"Flags" / Int32ul,
"SidLength" / Int32ul,
"Sid" / Bytes(lambda this: this.SidLength),
"FileNameLength" / Int16ul,
"FileNameBuffer" / Bytes(lambda this: this.FileNameLength),
"ProcessImageNameLength" / Int16ul,
"ProcessImageNameBuffer" / Bytes(lambda this: this.ProcessImageNameLength),
"CreateOptions" / Int32ul,
"DesiredAccess" / Int32ul
)
@declare(guid=guid("c02afc2b-e24e-4449-ad76-bcc2c2575ead"), event_id=5003, version=0)
class Microsoft_Windows_UAC_FileVirtualization_5003_0(Etw):
pattern = Struct(
"Flags" / Int32ul,
"SidLength" / Int32ul,
"Sid" / Bytes(lambda this: this.SidLength),
"FileNameLength" / Int16ul,
"FileNameBuffer" / Bytes(lambda this: this.FileNameLength),
"ProcessImageNameLength" / Int16ul,
"ProcessImageNameBuffer" / Bytes(lambda this: this.ProcessImageNameLength)
)
@declare(guid=guid("c02afc2b-e24e-4449-ad76-bcc2c2575ead"), event_id=5004, version=0)
class Microsoft_Windows_UAC_FileVirtualization_5004_0(Etw):
pattern = Struct(
"Flags" / Int32ul,
"SidLength" / Int32ul,
"Sid" / Bytes(lambda this: this.SidLength),
"FileNameLength" / Int16ul,
"FileNameBuffer" / Bytes(lambda this: this.FileNameLength),
"ProcessImageNameLength" / Int16ul,
"ProcessImageNameBuffer" / Bytes(lambda this: this.ProcessImageNameLength)
)
|
simple/game_loop_process.py | loyalgarlic/snakepit-game | 124 | 34007 | import asyncio
from aiohttp import web
from concurrent.futures import ThreadPoolExecutor, ProcessPoolExecutor
from multiprocessing import Queue, Process
import os
from time import sleep
async def handle(request):
index = open("index.html", 'rb')
content = index.read()
return web.Response(body=content, content_type='text/html')
tick = asyncio.Condition()
async def wshandler(request):
ws = web.WebSocketResponse()
await ws.prepare(request)
recv_task = None
tick_task = None
while 1:
if not recv_task:
recv_task = asyncio.ensure_future(ws.receive())
if not tick_task:
await tick.acquire()
tick_task = asyncio.ensure_future(tick.wait())
done, pending = await asyncio.wait(
[recv_task,
tick_task],
return_when=asyncio.FIRST_COMPLETED)
if recv_task in done:
msg = recv_task.result()
if msg.tp == web.MsgType.text:
print("Got message %s" % msg.data)
ws.send_str("Pressed key code: {}".format(msg.data))
elif msg.tp == web.MsgType.close or\
msg.tp == web.MsgType.error:
break
recv_task = None
if tick_task in done:
ws.send_str("game loop ticks")
tick.release()
tick_task = None
return ws
def game_loop(asyncio_loop):
# coroutine to run in main thread
async def notify():
await tick.acquire()
tick.notify_all()
tick.release()
queue = Queue()
# function to run in a different process
def worker():
while 1:
print("doing heavy calculation in process {}".format(os.getpid()))
sleep(1)
queue.put("calculation result")
Process(target=worker).start()
while 1:
# blocks this thread but not main thread with event loop
result = queue.get()
print("getting {} in process {}".format(result, os.getpid()))
task = asyncio.run_coroutine_threadsafe(notify(), asyncio_loop)
task.result()
asyncio_loop = asyncio.get_event_loop()
executor = ThreadPoolExecutor(max_workers=1)
asyncio_loop.run_in_executor(executor, game_loop, asyncio_loop)
app = web.Application()
app.router.add_route('GET', '/connect', wshandler)
app.router.add_route('GET', '/', handle)
web.run_app(app)
|
python/eggroll/core/datastructure/__init__.py | liszekei/eggroll | 209 | 34016 | <gh_stars>100-1000
# Copyright (c) 2019 - now, Eggroll Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from importlib import import_module
from concurrent.futures import _base, ThreadPoolExecutor
from eggroll.core.datastructure.threadpool import ErThreadUnpooledExecutor
from eggroll.core.datastructure.queue import _PySimpleQueue
from eggroll.utils.log_utils import get_logger
L = get_logger()
try:
from queue import SimpleQueue
except ImportError:
SimpleQueue = _PySimpleQueue
def create_executor_pool(canonical_name: str = None, max_workers=None, thread_name_prefix=None, *args, **kwargs) -> _base.Executor:
if not canonical_name:
canonical_name = "concurrent.futures.ThreadPoolExecutor"
module_name, class_name = canonical_name.rsplit(".", 1)
_module = import_module(module_name)
_class = getattr(_module, class_name)
return _class(max_workers=max_workers, thread_name_prefix=thread_name_prefix, *args, **kwargs)
def create_simple_queue(*args, **kwargs):
return SimpleQueue()
|
lib/dataset/utils.py | decisionforce/mmTransformer | 199 | 34057 | <reponame>decisionforce/mmTransformer<filename>lib/dataset/utils.py
import math
import numpy as np
from sklearn.linear_model import LinearRegression
def get_heading_angle(traj: np.ndarray):
"""
get the heading angle
traj: [N,2] N>=6
"""
# length == 6
# sort position
_traj = traj.copy()
traj = traj.copy()
traj = traj[traj[:, 0].argsort()]
traj = traj[traj[:, 1].argsort()]
if traj.T[0].max()-traj.T[0].min() > traj.T[1].max()-traj.T[1].min(): # * dominated by x
reg = LinearRegression().fit(traj[:, 0].reshape(-1, 1), traj[:, 1])
traj_dir = _traj[-2:].mean(0) - _traj[:2].mean(0)
reg_dir = np.array([1, reg.coef_[0]])
angle = np.arctan(reg.coef_[0])
else:
# using y as sample and x as the target to fit a line
reg = LinearRegression().fit(traj[:, 1].reshape(-1, 1), traj[:, 0])
traj_dir = _traj[-2:].mean(0) - _traj[:2].mean(0)
reg_dir = np.array([reg.coef_[0], 1])*np.sign(reg.coef_[0])
if reg.coef_[0] == 0:
import pdb
pdb.set_trace()
angle = np.arctan(1/reg.coef_[0])
if angle < 0:
angle = 2*np.pi + angle
if (reg_dir*traj_dir).sum() < 0: # not same direction
angle = (angle+np.pi) % (2*np.pi)
# angle from y
angle_to_y = angle-np.pi/2
angle_to_y = -angle_to_y
return angle_to_y
def transform_coord(coords, angle):
x = coords[..., 0]
y = coords[..., 1]
x_transform = np.cos(angle)*x-np.sin(angle)*y
y_transform = np.cos(angle)*y+np.sin(angle)*x
output_coords = np.stack((x_transform, y_transform), axis=-1)
return output_coords
def transform_coord_flip(coords, angle):
x = coords[:, 0]
y = coords[:, 1]
x_transform = math.cos(angle)*x-math.sin(angle)*y
y_transform = math.cos(angle)*y+math.sin(angle)*x
x_transform = -1*x_transform # flip
# y_transform = -1*y_transform # flip
output_coords = np.stack((x_transform, y_transform), axis=-1)
return output_coords
|
corehq/apps/translations/integrations/transifex/project_migrator.py | dimagilg/commcare-hq | 471 | 34079 | <filename>corehq/apps/translations/integrations/transifex/project_migrator.py<gh_stars>100-1000
import copy
import datetime
import tempfile
from collections import OrderedDict
from django.utils.functional import cached_property
from django.utils.translation import ugettext as _
import polib
from memoized import memoized
from corehq.apps.app_manager.dbaccessors import get_app
from corehq.apps.translations.integrations.transifex.client import (
TransifexApiClient,
)
from corehq.apps.translations.integrations.transifex.const import (
SOURCE_LANGUAGE_MAPPING,
TRANSIFEX_SLUG_PREFIX_MAPPING,
)
from corehq.apps.translations.integrations.transifex.exceptions import (
InvalidProjectMigration,
ResourceMissing,
)
from corehq.apps.translations.models import TransifexProject
class ProjectMigrator(object):
def __init__(self, domain, project_slug, source_app_id, target_app_id, resource_ids_mapping):
"""
Migrate a transifex project from one app to another by
1. updating slugs of resources to use new module/form ids
2. updating context of translations in "Menus_and_forms" sheet to use new module/form ids
:param resource_ids_mapping: tuple of type, old_id, new_id
"""
self.domain = domain
self.project_slug = project_slug
self.project = TransifexProject.objects.get(slug=project_slug)
self.client = TransifexApiClient(self.project.organization.get_api_token, self.project.organization,
project_slug)
self.source_app_id = source_app_id
self.target_app_id = target_app_id
self.resource_ids_mapping = resource_ids_mapping
self.id_mapping = {old_id: new_id for _, old_id, new_id in self.resource_ids_mapping}
def validate(self):
ProjectMigrationValidator(self).validate()
def migrate(self):
slug_update_responses = self._update_slugs()
menus_and_forms_sheet_update_responses = self._update_menus_and_forms_sheet()
return slug_update_responses, menus_and_forms_sheet_update_responses
def _update_slugs(self):
responses = {}
for resource_type, old_id, new_id in self.resource_ids_mapping:
slug_prefix = self._get_slug_prefix(resource_type)
if not slug_prefix:
continue
resource_slug = "%s_%s" % (slug_prefix, old_id)
new_resource_slug = "%s_%s" % (slug_prefix, new_id)
responses[old_id] = self.client.update_resource_slug(resource_slug, new_resource_slug)
return responses
@memoized
def _get_slug_prefix(self, resource_type):
return TRANSIFEX_SLUG_PREFIX_MAPPING.get(resource_type)
def _update_menus_and_forms_sheet(self):
langs = copy.copy(self.source_app_langs)
translations = OrderedDict()
for lang in langs:
try:
translations[lang] = self.client.get_translation("Menus_and_forms", lang, lock_resource=False)
except ResourceMissing:
# Probably a lang in app not present on Transifex, so skip
pass
self._update_context(translations)
return self._upload_new_translations(translations)
@cached_property
def source_app_langs(self):
return self._source_app.langs
@cached_property
def _source_app(self):
return get_app(self.domain, self.source_app_id)
def _update_context(self, translations):
"""
update msgctxt for all POEntry objects replacing ids
:param translations: dict of lang code mapped to it list of POEntries
"""
for po_entries in translations.values():
for po_entry in po_entries:
# make sure the format is as expected, if not skip
context_entries = po_entry.msgctxt.split(":")
if len(context_entries) == 3:
resource_id = context_entries[-1]
# replace if we have been asked to replace it
if resource_id in self.id_mapping:
po_entry.msgctxt = po_entry.msgctxt.replace(resource_id, self.id_mapping[resource_id])
def _upload_new_translations(self, translations):
responses = {}
# the project source lang, which is the app default language should be the first to update.
# HQ keeps the default lang on top and hence it should be the first one here
assert list(translations.keys())[0] == self.target_app_default_lang
for lang_code in translations:
responses[lang_code] = self._upload_translation(translations[lang_code], lang_code)
return responses
def _upload_translation(self, translations, lang_code):
po = polib.POFile()
po.check_for_duplicates = False
po.metadata = self.get_metadata()
po.extend(translations)
with tempfile.NamedTemporaryFile() as temp_file:
po.save(temp_file.name)
temp_file.seek(0)
if lang_code == self.target_app_default_lang:
return self.client.upload_resource(temp_file.name, "Menus_and_forms", "Menus_and_forms",
update_resource=True)
else:
return self.client.upload_translation(temp_file.name, "Menus_and_forms", "Menus_and_forms",
lang_code)
def get_metadata(self):
now = str(datetime.datetime.now())
return {
'App-Id': self.target_app_id,
'PO-Creation-Date': now,
'MIME-Version': '1.0',
'Content-Type': 'text/plain; charset=utf-8',
'Language': self.target_app_default_lang
}
@cached_property
def target_app_default_lang(self):
return self._target_app.default_language
@cached_property
def _target_app(self):
return get_app(self.domain, self.target_app_id)
@cached_property
def get_project_source_lang(self):
return self.client.project_details().json()['source_language_code']
@cached_property
def source_app_default_lang(self):
return self._source_app.default_language
class ProjectMigrationValidator(object):
def __init__(self, migrator):
self.migrator = migrator
self.source_app_default_lang = migrator.source_app_default_lang
self.target_app_default_lang = migrator.target_app_default_lang
def validate(self):
self._ensure_same_source_lang()
def _ensure_same_source_lang(self):
"""
ensure same source lang for source app, target app and on transifex project
"""
if not self.source_app_default_lang or (self.source_app_default_lang != self.target_app_default_lang):
raise InvalidProjectMigration(
_("Target app default language and the source app default language don't match"))
project_source_lang = self.migrator.get_project_source_lang
source_app_lang_code = SOURCE_LANGUAGE_MAPPING.get(self.source_app_default_lang,
self.source_app_default_lang)
if source_app_lang_code != project_source_lang:
raise InvalidProjectMigration(
_("Transifex project source lang and the source app default language don't match"))
target_app_lang_code = SOURCE_LANGUAGE_MAPPING.get(self.target_app_default_lang,
self.target_app_default_lang)
if target_app_lang_code != project_source_lang:
raise InvalidProjectMigration(
_("Transifex project source lang and the target app default language don't match"))
|
notebook/str_compare_re.py | vhn0912/python-snippets | 174 | 34101 | <filename>notebook/str_compare_re.py
import re
s = 'aaa-AAA-123'
print(re.search('aaa', s))
# <re.Match object; span=(0, 3), match='aaa'>
print(re.search('xxx', s))
# None
print(re.search('^aaa', s))
# <re.Match object; span=(0, 3), match='aaa'>
print(re.search('^123', s))
# None
print(re.search('aaa$', s))
# None
print(re.search('123$', s))
# <re.Match object; span=(8, 11), match='123'>
print(re.search('[A-Z]+', s))
# <re.Match object; span=(4, 7), match='AAA'>
s = '012-3456-7890'
print(re.fullmatch(r'\d{3}-\d{4}-\d{4}', s))
# <re.Match object; span=(0, 13), match='012-3456-7890'>
s = 'tel: 012-3456-7890'
print(re.fullmatch(r'\d{3}-\d{4}-\d{4}', s))
# None
s = '012-3456-7890'
print(re.search(r'^\d{3}-\d{4}-\d{4}$', s))
# <re.Match object; span=(0, 13), match='012-3456-7890'>
s = 'tel: 012-3456-7890'
print(re.search('^\d{3}-\d{4}-\d{4}$', s))
# None
s = 'ABC'
print(re.search('abc', s))
# None
print(re.search('abc', s, re.IGNORECASE))
# <re.Match object; span=(0, 3), match='ABC'>
|
model-optimizer/unit_tests/extensions/ops/sparse_reshape_test.py | monroid/openvino | 2,406 | 34110 | <reponame>monroid/openvino
# Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import unittest
import numpy as np
from extensions.ops.sparse_reshape import SparseReshape
from mo.front.common.partial_infer.utils import int64_array
from mo.graph.graph import Node
from unit_tests.utils.graph import build_graph
nodes_attributes = {'input_indices': {'shape': None, 'value': None, 'kind': 'data'},
'input_shape': {'shape': None, 'value': None, 'kind': 'data'},
'new_shape': {'shape': None, 'value': None, 'kind': 'data'},
'sparse_reshape_node': {'op': 'SparseReshape', 'kind': 'op'},
'output_indices': {'shape': None, 'value': None, 'kind': 'data'},
'output_shape': {'shape': None, 'value': None, 'kind': 'data'}}
# graph 1
edges1 = [('input_indices', 'sparse_reshape_node', {'in': 0}),
('input_shape', 'sparse_reshape_node', {'in': 1}),
('new_shape', 'sparse_reshape_node', {'in': 2}),
('sparse_reshape_node', 'output_indices', {'out': 0}),
('sparse_reshape_node', 'output_shape', {'out': 1})]
inputs1 = {'input_indices': {'shape': int64_array([5, 2]), 'value': None},
'input_shape': {'shape': int64_array([2]), 'value': int64_array([4, 5])},
'new_shape': {'shape': int64_array([3]), 'value': int64_array([5, -1, 2])}}
class TestSparseReshape(unittest.TestCase):
def test_partial_infer1(self):
graph = build_graph(nodes_attributes, edges1, inputs1)
sparse_reshape_node = Node(graph, 'sparse_reshape_node')
SparseReshape.infer(sparse_reshape_node)
# prepare reference results
ref_output_indices_shape = np.array([5, 3], dtype=np.int32)
ref_output_shape_value = np.array([5, 2, 2], dtype=np.int32)
# get the result
res_output_indices_shape = graph.node['output_indices']['shape']
res_output_shape_value = graph.node['output_shape']['value']
self.assertTrue(np.array_equal(ref_output_indices_shape, res_output_indices_shape),
'shapes do not match expected: {} and given: {}'.format(ref_output_indices_shape, res_output_indices_shape))
self.assertTrue(np.array_equal(ref_output_shape_value, res_output_shape_value),
'values do not match expected: {} and given: {}'.format(ref_output_shape_value, res_output_shape_value))
|
scripts/get_article.py | theblueskies/prose | 2,906 | 34135 | <reponame>theblueskies/prose
import os
from newspaper import Article
url = 'http://fox13now.com/2013/12/30/new-year-new-laws-obamacare-pot-guns-and-drones/'
article = Article(url)
article.download()
article.parse()
with open(os.path.join('testdata', 'article.txt'), 'w') as f:
f.write(article.text)
|
test/test_server.py | gndu91/wsproto | 179 | 34146 | <gh_stars>100-1000
from typing import cast, List, Optional, Tuple
import h11
import pytest
from wsproto import WSConnection
from wsproto.connection import SERVER
from wsproto.events import (
AcceptConnection,
Event,
RejectConnection,
RejectData,
Request,
)
from wsproto.extensions import Extension
from wsproto.typing import Headers
from wsproto.utilities import (
generate_accept_token,
generate_nonce,
normed_header_dict,
RemoteProtocolError,
)
from .helpers import FakeExtension
def _make_connection_request(request_headers: Headers, method: str = "GET") -> Request:
client = h11.Connection(h11.CLIENT)
server = WSConnection(SERVER)
server.receive_data(
client.send(h11.Request(method=method, target="/", headers=request_headers))
)
event = next(server.events())
assert isinstance(event, Request)
return event
def test_connection_request() -> None:
event = _make_connection_request(
[
(b"Host", b"localhost"),
(b"Connection", b"Keep-Alive, Upgrade"),
(b"Upgrade", b"WebSocket"),
(b"Sec-WebSocket-Version", b"13"),
(b"Sec-WebSocket-Key", generate_nonce()),
(b"X-Foo", b"bar"),
]
)
assert event.extensions == []
assert event.host == "localhost"
assert event.subprotocols == []
assert event.target == "/"
headers = normed_header_dict(event.extra_headers)
assert b"host" not in headers
assert b"sec-websocket-extensions" not in headers
assert b"sec-websocket-protocol" not in headers
assert headers[b"connection"] == b"Keep-Alive, Upgrade"
assert headers[b"sec-websocket-version"] == b"13"
assert headers[b"upgrade"] == b"WebSocket"
assert headers[b"x-foo"] == b"bar"
def test_connection_request_bad_method() -> None:
with pytest.raises(RemoteProtocolError) as excinfo:
_make_connection_request(
[
(b"Host", b"localhost"),
(b"Connection", b"Keep-Alive, Upgrade"),
(b"Upgrade", b"WebSocket"),
(b"Sec-WebSocket-Version", b"13"),
(b"Sec-WebSocket-Key", generate_nonce()),
],
method="POST",
)
assert str(excinfo.value) == "Request method must be GET"
def test_connection_request_bad_connection_header() -> None:
with pytest.raises(RemoteProtocolError) as excinfo:
_make_connection_request(
[
(b"Host", b"localhost"),
(b"Connection", b"Keep-Alive, No-Upgrade"),
(b"Upgrade", b"WebSocket"),
(b"Sec-WebSocket-Version", b"13"),
(b"Sec-WebSocket-Key", generate_nonce()),
]
)
assert str(excinfo.value) == "Missing header, 'Connection: Upgrade'"
def test_connection_request_bad_upgrade_header() -> None:
with pytest.raises(RemoteProtocolError) as excinfo:
_make_connection_request(
[
(b"Host", b"localhost"),
(b"Connection", b"Keep-Alive, Upgrade"),
(b"Upgrade", b"h2c"),
(b"Sec-WebSocket-Version", b"13"),
(b"Sec-WebSocket-Key", generate_nonce()),
]
)
assert str(excinfo.value) == "Missing header, 'Upgrade: WebSocket'"
@pytest.mark.parametrize("version", [b"12", b"not-a-digit"])
def test_connection_request_bad_version_header(version: bytes) -> None:
with pytest.raises(RemoteProtocolError) as excinfo:
_make_connection_request(
[
(b"Host", b"localhost"),
(b"Connection", b"Keep-Alive, Upgrade"),
(b"Upgrade", b"WebSocket"),
(b"Sec-WebSocket-Version", version),
(b"Sec-WebSocket-Key", generate_nonce()),
]
)
assert str(excinfo.value) == "Missing header, 'Sec-WebSocket-Version'"
assert excinfo.value.event_hint == RejectConnection(
headers=[(b"Sec-WebSocket-Version", b"13")], status_code=426
)
def test_connection_request_key_header() -> None:
with pytest.raises(RemoteProtocolError) as excinfo:
_make_connection_request(
[
(b"Host", b"localhost"),
(b"Connection", b"Keep-Alive, Upgrade"),
(b"Upgrade", b"WebSocket"),
(b"Sec-WebSocket-Version", b"13"),
]
)
assert str(excinfo.value) == "Missing header, 'Sec-WebSocket-Key'"
def test_upgrade_request() -> None:
server = WSConnection(SERVER)
server.initiate_upgrade_connection(
[
(b"Host", b"localhost"),
(b"Connection", b"Keep-Alive, Upgrade"),
(b"Upgrade", b"WebSocket"),
(b"Sec-WebSocket-Version", b"13"),
(b"Sec-WebSocket-Key", generate_nonce()),
(b"X-Foo", b"bar"),
],
"/",
)
event = next(server.events())
event = cast(Request, event)
assert event.extensions == []
assert event.host == "localhost"
assert event.subprotocols == []
assert event.target == "/"
headers = normed_header_dict(event.extra_headers)
assert b"host" not in headers
assert b"sec-websocket-extensions" not in headers
assert b"sec-websocket-protocol" not in headers
assert headers[b"connection"] == b"Keep-Alive, Upgrade"
assert headers[b"sec-websocket-version"] == b"13"
assert headers[b"upgrade"] == b"WebSocket"
assert headers[b"x-foo"] == b"bar"
def _make_handshake(
request_headers: Headers,
accept_headers: Optional[Headers] = None,
subprotocol: Optional[str] = None,
extensions: Optional[List[Extension]] = None,
) -> Tuple[h11.InformationalResponse, bytes]:
client = h11.Connection(h11.CLIENT)
server = WSConnection(SERVER)
nonce = generate_nonce()
server.receive_data(
client.send(
h11.Request(
method="GET",
target="/",
headers=[
(b"Host", b"localhost"),
(b"Connection", b"Keep-Alive, Upgrade"),
(b"Upgrade", b"WebSocket"),
(b"Sec-WebSocket-Version", b"13"),
(b"Sec-WebSocket-Key", nonce),
]
+ request_headers,
)
)
)
client.receive_data(
server.send(
AcceptConnection(
extra_headers=accept_headers or [],
subprotocol=subprotocol,
extensions=extensions or [],
)
)
)
event = client.next_event()
return event, nonce
def test_handshake() -> None:
response, nonce = _make_handshake([])
response.headers = sorted(response.headers) # For test determinism
assert response == h11.InformationalResponse(
status_code=101,
headers=[
(b"connection", b"Upgrade"),
(b"sec-websocket-accept", generate_accept_token(nonce)),
(b"upgrade", b"WebSocket"),
],
)
def test_handshake_extra_headers() -> None:
response, nonce = _make_handshake([], accept_headers=[(b"X-Foo", b"bar")])
response.headers = sorted(response.headers) # For test determinism
assert response == h11.InformationalResponse(
status_code=101,
headers=[
(b"connection", b"Upgrade"),
(b"sec-websocket-accept", generate_accept_token(nonce)),
(b"upgrade", b"WebSocket"),
(b"x-foo", b"bar"),
],
)
@pytest.mark.parametrize("accept_subprotocol", ["one", "two"])
def test_handshake_with_subprotocol(accept_subprotocol: str) -> None:
response, _ = _make_handshake(
[(b"Sec-Websocket-Protocol", b"one, two")], subprotocol=accept_subprotocol
)
headers = normed_header_dict(response.headers)
assert headers[b"sec-websocket-protocol"] == accept_subprotocol.encode("ascii")
def test_handshake_with_extension() -> None:
extension = FakeExtension(accept_response=True)
response, _ = _make_handshake(
[(b"Sec-Websocket-Extensions", extension.name.encode("ascii"))],
extensions=[extension],
)
headers = normed_header_dict(response.headers)
assert headers[b"sec-websocket-extensions"] == extension.name.encode("ascii")
def test_handshake_with_extension_params() -> None:
offered_params = "parameter1=value3; parameter2=value4"
accepted_params = "parameter1=value1; parameter2=value2"
extension = FakeExtension(accept_response=accepted_params)
response, _ = _make_handshake(
[
(
b"Sec-Websocket-Extensions",
(f"{extension.name}; {offered_params}").encode("ascii"),
)
],
extensions=[extension],
)
headers = normed_header_dict(response.headers)
assert extension.offered == f"{extension.name}; {offered_params}"
assert headers[b"sec-websocket-extensions"] == (
f"{extension.name}; {accepted_params}"
).encode("ascii")
def test_handshake_with_extra_unaccepted_extension() -> None:
extension = FakeExtension(accept_response=True)
response, _ = _make_handshake(
[
(
b"Sec-Websocket-Extensions",
b"pretend, %s" % extension.name.encode("ascii"),
)
],
extensions=[extension],
)
headers = normed_header_dict(response.headers)
assert headers[b"sec-websocket-extensions"] == extension.name.encode("ascii")
def test_protocol_error() -> None:
server = WSConnection(SERVER)
with pytest.raises(RemoteProtocolError) as excinfo:
server.receive_data(b"broken nonsense\r\n\r\n")
assert str(excinfo.value) == "Bad HTTP message"
def _make_handshake_rejection(
status_code: int, body: Optional[bytes] = None
) -> List[Event]:
client = h11.Connection(h11.CLIENT)
server = WSConnection(SERVER)
nonce = generate_nonce()
server.receive_data(
client.send(
h11.Request(
method="GET",
target="/",
headers=[
(b"Host", b"localhost"),
(b"Connection", b"Keep-Alive, Upgrade"),
(b"Upgrade", b"WebSocket"),
(b"Sec-WebSocket-Version", b"13"),
(b"Sec-WebSocket-Key", nonce),
],
)
)
)
if body is not None:
client.receive_data(
server.send(
RejectConnection(
headers=[(b"content-length", b"%d" % len(body))],
status_code=status_code,
has_body=True,
)
)
)
client.receive_data(server.send(RejectData(data=body)))
else:
client.receive_data(server.send(RejectConnection(status_code=status_code)))
events = []
while True:
event = client.next_event()
events.append(event)
if isinstance(event, h11.EndOfMessage):
return events
def test_handshake_rejection() -> None:
events = _make_handshake_rejection(400)
assert events == [
h11.Response(headers=[(b"content-length", b"0")], status_code=400),
h11.EndOfMessage(),
]
def test_handshake_rejection_with_body() -> None:
events = _make_handshake_rejection(400, body=b"Hello")
assert events == [
h11.Response(headers=[(b"content-length", b"5")], status_code=400),
h11.Data(data=b"Hello"),
h11.EndOfMessage(),
]
|
toolchain/riscv/MSYS/python/Lib/test/encoded_modules/__init__.py | zhiqiang-hu/bl_iot_sdk | 207 | 34160 | # -*- encoding: utf-8 -*-
# This is a package that contains a number of modules that are used to
# test import from the source files that have different encodings.
# This file (the __init__ module of the package), is encoded in utf-8
# and contains a list of strings from various unicode planes that are
# encoded differently to compare them to the same strings encoded
# differently in submodules. The following list, test_strings,
# contains a list of tuples. The first element of each tuple is the
# suffix that should be prepended with 'module_' to arrive at the
# encoded submodule name, the second item is the encoding and the last
# is the test string. The same string is assigned to the variable
# named 'test' inside the submodule. If the decoding of modules works
# correctly, from module_xyz import test should result in the same
# string as listed below in the 'xyz' entry.
# module, encoding, test string
test_strings = (
('iso_8859_1', 'iso-8859-1', "Les hommes ont oublié cette vérité, "
"dit le renard. Mais tu ne dois pas l'oublier. Tu deviens "
"responsable pour toujours de ce que tu as apprivoisé."),
('koi8_r', 'koi8-r', "Познание бесконечности требует бесконечного времени.")
)
|
python/ambassador/compile.py | Asher-Wang/ambassador | 3,438 | 34168 | <reponame>Asher-Wang/ambassador<filename>python/ambassador/compile.py
# Copyright 2020 Datawire. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License
from typing import Any, Dict, Optional, Union
import logging
from .cache import Cache
from .config import Config
from .ir import IR
from .ir.ir import IRFileChecker
from .envoy import EnvoyConfig
from .fetch import ResourceFetcher
from .utils import SecretHandler, NullSecretHandler, Timer
def Compile(logger: logging.Logger, input_text: str,
cache: Optional[Cache]=None,
file_checker: Optional[IRFileChecker]=None,
secret_handler: Optional[SecretHandler]=None,
k8s=False, envoy_version="V2") -> Dict[str, Union[IR, EnvoyConfig]]:
"""
Compile is a helper function to take a bunch of YAML and compile it into an
IR and, optionally, an Envoy config.
The output is a dictionary:
{
"ir": the IR data structure
}
IFF v2 is True, there will be a toplevel "v2" key whose value is the Envoy
V2 config.
:param input_text: The input text (WATT snapshot JSON or K8s YAML per 'k8s')
:param k8s: If true, input_text is K8s YAML, otherwise it's WATT snapshot JSON
:param ir: Generate the IR IFF True
:param v2: Generate the V2 Envoy config IFF True
"""
if not file_checker:
file_checker = lambda path: True
if not secret_handler:
secret_handler = NullSecretHandler(logger, None, None, "fake")
aconf = Config()
fetcher = ResourceFetcher(logger, aconf)
if k8s:
fetcher.parse_yaml(input_text, k8s=True)
else:
fetcher.parse_watt(input_text)
aconf.load_all(fetcher.sorted())
ir = IR(aconf, cache=cache, file_checker=file_checker, secret_handler=secret_handler)
out: Dict[str, Union[IR, EnvoyConfig]] = { "ir": ir }
if ir:
out[envoy_version.lower()] = EnvoyConfig.generate(ir, envoy_version.upper(), cache=cache)
return out
|
examples/color4.py | yang69can/pyngl | 125 | 34174 | #
# File:
# color4.py
#
# Synopsis:
# Draws sixteen sample color boxs with RGB labels.
#
# Category:
# Colors
#
# Author:
# <NAME>
#
# Date of initial publication:
# January, 2006
#
# Description:
# This example draws sixteen color boxes using the RGB
# values for named colors. The boxes are labeled with
# the color name and the associated RGB values.
#
# Effects illustrated:
# o Drawing lines and polygons in NDC space.
# o RGB equivalents for some named colors.
# o Converting integer RGB color specifications to floating point.
#
# Output:
# o One plot is produced with sixteen sample color boxes.
#
from __future__ import print_function
import Ngl
import numpy
#
# Define the colors and labels to be used.
#
colors_and_labels = \
[ \
[233, 150, 122], "DarkSalmon", \
[164, 42, 42], "Brown", \
[255, 127, 0], "DarkOrange1", \
[255, 0, 0], "Red", \
[255, 255, 0], "Yellow", \
[ 0, 255, 0], "Green", \
[ 34, 139, 34], "ForestGreen", \
[ 0, 255, 255], "Cyan", \
[ 79, 148, 205], "SteelBlue3", \
[ 0, 0, 255], "Blue", \
[148, 0, 211], "DarkViolet", \
[255, 0, 255], "Magneta", \
[255, 255, 255], "White", \
[153, 153, 153], "Gray60", \
[102, 102, 102], "Gray40", \
[ 0, 0, 0], "Black" \
]
#
# Open a workstation with a default color table having
# background color "black" and foreground color "white".
#
rlist = Ngl.Resources()
rlist.wkColorMap = "default"
rlist.wkForegroundColor = "White"
rlist.wkBackgroundColor = "Black"
wks_type = "png"
wks = Ngl.open_wks(wks_type,"color4",rlist)
#
# Extract the colors and labels.
#
colors = colors_and_labels[0:len(colors_and_labels):2]
labels = colors_and_labels[1:len(colors_and_labels):2]
#
# Set up arrays and resource lists for drawing the boxes.
# Select "Helvetica-Bold" for all text.
#
x = numpy.zeros(5,'f')
y = numpy.zeros(5,'f')
poly_res = Ngl.Resources()
text_res = Ngl.Resources()
text_res.txFont = "Helvetica-Bold"
#
# Draw the color boxes and titles.
#
for i in range(0,len(colors)):
#
# delx_0 - horizontal spacing between boxes.
# delx_1 - width of a box.
# dely_0 - vertical spacing between boxes.
# dely_1 - height of a box.
#
delx_0, delx_1, dely_0, dely_1 = 0.245, 0.235, 0.22, 0.15
x[0], y[0] = 0.015 + delx_0*(i%4), 0.90 - (i//4)*dely_0
x[1], y[1] = x[0] + delx_1 , y[0]
x[2], y[2] = x[1] , y[1] - dely_1
x[3], y[3] = x[0] , y[2]
x[4], y[4] = x[0] , y[0]
#
# Convert the integer color values obtained from the
# named color chart (as entered above) to floating
# point numbers in the range 0. to 1.
#
r, g, b = colors[i][0]/255., colors[i][1]/255., colors[i][2]/255.
poly_res.gsFillColor = [r,g,b] # Ngl.new_color(wks, r, g, b)
#
# Draw a white outline if the color is black, otherwise draw a colored box.
#
if (labels[i] == "Black"):
Ngl.polyline_ndc(wks, x, y, poly_res)
else:
Ngl.polygon_ndc(wks, x, y, poly_res)
#
# Label the boxes.
#
text_res.txFontHeightF = 0.017
Ngl.text_ndc(wks, labels[i], 0.5*(x[0]+x[1]), y[0] + 0.0125, text_res)
rgb_label = "R={:4.2f} G={:4.2f} B={:4.2f}".format(r, g, b)
text_res.txFontHeightF = 0.015
Ngl.text_ndc(wks, rgb_label, 0.5*(x[0]+x[1]), y[3] - 0.0125, text_res)
#
# Plot top and bottom labels.
#
text_res.txFontHeightF = 0.025
Ngl.text_ndc(wks, "Sixteen Sample Colors", 0.5, 0.96, text_res)
text_res.txFontHeightF = 0.018
Ngl.text_ndc(wks, "The titles below each box indicate Red, Green, and Blue intensity values.", 0.5, 0.035, text_res)
Ngl.frame(wks)
Ngl.end()
|
tools/code_coverage/package/oss/cov_json.py | deltabravozulu/pytorch | 206 | 34193 | <reponame>deltabravozulu/pytorch<filename>tools/code_coverage/package/oss/cov_json.py
from ..tool import clang_coverage
from ..util.setting import CompilerType, Option, TestList, TestPlatform
from ..util.utils import check_compiler_type
from .init import detect_compiler_type
from .run import clang_run, gcc_run
def get_json_report(test_list: TestList, options: Option):
cov_type = detect_compiler_type()
check_compiler_type(cov_type)
if cov_type == CompilerType.CLANG:
# run
if options.need_run:
clang_run(test_list)
# merge && export
if options.need_merge:
clang_coverage.merge(test_list, TestPlatform.OSS)
if options.need_export:
clang_coverage.export(test_list, TestPlatform.OSS)
elif cov_type == CompilerType.GCC:
# run
if options.need_run:
gcc_run(test_list)
|
src/visualize/visualize_checkpoint.py | Immocat/ACTOR | 164 | 34214 | <reponame>Immocat/ACTOR
import os
import matplotlib.pyplot as plt
import torch
from src.utils.get_model_and_data import get_model_and_data
from src.parser.visualize import parser
from .visualize import viz_epoch
import src.utils.fixseed # noqa
plt.switch_backend('agg')
def main():
# parse options
parameters, folder, checkpointname, epoch = parser()
model, datasets = get_model_and_data(parameters)
dataset = datasets["train"]
print("Restore weights..")
checkpointpath = os.path.join(folder, checkpointname)
state_dict = torch.load(checkpointpath, map_location=parameters["device"])
model.load_state_dict(state_dict)
# visualize_params
viz_epoch(model, dataset, epoch, parameters, folder=folder, writer=None)
if __name__ == '__main__':
main()
|
src/newt/db/_ook.py | bmjjr/db | 153 | 34221 | import relstorage.storage
import ZODB.Connection
# Monkey patches, ook
def _ex_cursor(self, name=None):
if self._stale_error is not None:
raise self._stale_error
with self._lock:
self._before_load()
return self._load_conn.cursor(name)
relstorage.storage.RelStorage.ex_cursor = _ex_cursor
def _ex_connect(self):
return self._adapter.connmanager.open()
relstorage.storage.RelStorage.ex_connect = _ex_connect
def _ex_get(self, oid, ghost_pickle):
"""Return the persistent object with oid 'oid'."""
if self.opened is None:
raise ConnectionStateError("The database connection is closed")
obj = self._cache.get(oid, None)
if obj is not None:
return obj
obj = self._added.get(oid, None)
if obj is not None:
return obj
obj = self._pre_cache.get(oid, None)
if obj is not None:
return obj
obj = self._reader.getGhost(ghost_pickle) # New code
# Avoid infiniate loop if obj tries to load its state before
# it is added to the cache and it's state refers to it.
# (This will typically be the case for non-ghostifyable objects,
# like persistent caches.)
self._pre_cache[oid] = obj
self._cache.new_ghost(oid, obj)
self._pre_cache.pop(oid)
return obj
ZODB.Connection.Connection.ex_get = _ex_get
|
stream/migrations/0001_init_models.py | freejooo/vigilio | 137 | 34233 | <filename>stream/migrations/0001_init_models.py
# Generated by Django 3.1.5 on 2021-03-17 21:30
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name="Movie",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("imdb_id", models.CharField(max_length=10)),
("title", models.CharField(blank=True, max_length=120)),
("description", models.TextField(blank=True, null=True)),
("moviedb_popularity", models.FloatField(blank=True, null=True)),
(
"poster_path_big",
models.CharField(blank=True, max_length=255, null=True),
),
(
"poster_path_small",
models.CharField(blank=True, max_length=255, null=True),
),
(
"backdrop_path_big",
models.CharField(blank=True, max_length=255, null=True),
),
(
"backdrop_path_small",
models.CharField(blank=True, max_length=255, null=True),
),
("duration", models.IntegerField(default=0)),
("media_info_raw", models.JSONField(blank=True, default=dict)),
("imdb_score", models.FloatField(default=0.0)),
(
"original_language",
models.CharField(blank=True, max_length=2, null=True),
),
("release_date", models.DateField(blank=True, null=True)),
("is_adult", models.BooleanField(default=False)),
("is_ready", models.BooleanField(default=False)),
("updated_at", models.DateTimeField(auto_now=True)),
("created_at", models.DateTimeField(auto_now_add=True)),
],
),
migrations.CreateModel(
name="MovieDBCategory",
fields=[
("moviedb_id", models.IntegerField(primary_key=True, serialize=False)),
("name", models.CharField(max_length=20)),
("updated_at", models.DateTimeField(auto_now=True)),
("created_at", models.DateTimeField(auto_now_add=True)),
],
),
migrations.CreateModel(
name="MovieSubtitle",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("full_path", models.CharField(max_length=255)),
("relative_path", models.CharField(max_length=255)),
("file_name", models.CharField(max_length=255)),
("suffix", models.CharField(max_length=7)),
("updated_at", models.DateTimeField(auto_now=True)),
("created_at", models.DateTimeField(auto_now_add=True)),
],
),
migrations.CreateModel(
name="UserMovieHistory",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("current_second", models.IntegerField(default=0)),
("remaining_seconds", models.IntegerField(default=0)),
("is_watched", models.BooleanField(default=False)),
("created_at", models.DateTimeField(auto_now_add=True)),
("updated_at", models.DateTimeField(auto_now=True)),
(
"movie",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="history",
to="stream.movie",
),
),
(
"user",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to=settings.AUTH_USER_MODEL,
),
),
],
),
migrations.CreateModel(
name="MyList",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("created_at", models.DateTimeField(auto_now_add=True)),
(
"movie",
models.OneToOneField(
on_delete=django.db.models.deletion.CASCADE,
related_name="my_list",
to="stream.movie",
),
),
(
"user",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to=settings.AUTH_USER_MODEL,
),
),
],
),
migrations.CreateModel(
name="MovieContent",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("torrent_source", models.TextField(null=True)),
("full_path", models.CharField(blank=True, max_length=255, null=True)),
(
"relative_path",
models.CharField(blank=True, max_length=255, null=True),
),
(
"main_folder",
models.CharField(blank=True, max_length=255, null=True),
),
("file_name", models.CharField(blank=True, max_length=255, null=True)),
(
"file_extension",
models.CharField(blank=True, max_length=255, null=True),
),
(
"source_file_name",
models.CharField(blank=True, max_length=255, null=True),
),
(
"source_file_extension",
models.CharField(blank=True, max_length=255, null=True),
),
("resolution_width", models.IntegerField(default=0)),
("resolution_height", models.IntegerField(default=0)),
("raw_info", models.TextField(blank=True, null=True)),
("is_ready", models.BooleanField(default=False)),
("updated_at", models.DateTimeField(auto_now=True)),
("created_at", models.DateTimeField(auto_now_add=True)),
(
"movie_subtitle",
models.ManyToManyField(blank=True, to="stream.MovieSubtitle"),
),
],
),
migrations.AddField(
model_name="movie",
name="movie_content",
field=models.ManyToManyField(to="stream.MovieContent"),
),
migrations.AddField(
model_name="movie",
name="moviedb_category",
field=models.ManyToManyField(blank=True, to="stream.MovieDBCategory"),
),
]
|
Javatar.py | evandrocoan/Javatar | 142 | 34247 | <reponame>evandrocoan/Javatar
from .commands import *
from .core.event_handler import *
from .utils import (
Constant
)
def plugin_loaded():
Constant.startup()
|
General Questions/Longest_Common_Prefix.py | siddhi-244/CompetitiveProgrammingQuestionBank | 931 | 34254 | #Longest Common Prefix in python
#Implementation of python program to find the longest common prefix amongst the given list of strings.
#If there is no common prefix then returning 0.
#define the function to evaluate the longest common prefix
def longestCommonPrefix(s):
p = '' #declare an empty string
for i in range(len(min(s, key=len))):
f = s[0][i]
for j in s[1:]:
if j[i] != f:
return p
p += f
return p #return the longest common prefix
n = int(input("Enter the number of names in list for input:"))
print("Enter the Strings:")
s = [input() for i in range(n)]
if(longestCommonPrefix(s)):
print("The Common Prefix is:" ,longestCommonPrefix(s))
else:
print("There is no common prefix for the given list of strings, hence the answer is:", 0) |
netbox/extras/migrations/0061_extras_change_logging.py | TheFlyingCorpse/netbox | 4,994 | 34256 | from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('extras', '0060_customlink_button_class'),
]
operations = [
migrations.AddField(
model_name='customfield',
name='created',
field=models.DateField(auto_now_add=True, null=True),
),
migrations.AddField(
model_name='customfield',
name='last_updated',
field=models.DateTimeField(auto_now=True, null=True),
),
migrations.AddField(
model_name='customlink',
name='created',
field=models.DateField(auto_now_add=True, null=True),
),
migrations.AddField(
model_name='customlink',
name='last_updated',
field=models.DateTimeField(auto_now=True, null=True),
),
migrations.AddField(
model_name='exporttemplate',
name='created',
field=models.DateField(auto_now_add=True, null=True),
),
migrations.AddField(
model_name='exporttemplate',
name='last_updated',
field=models.DateTimeField(auto_now=True, null=True),
),
migrations.AddField(
model_name='webhook',
name='created',
field=models.DateField(auto_now_add=True, null=True),
),
migrations.AddField(
model_name='webhook',
name='last_updated',
field=models.DateTimeField(auto_now=True, null=True),
),
]
|
testing/examples/import_error.py | dry-python/dependencies | 175 | 34270 | from astral import Vision # noqa: F401
|
nydus/db/base.py | Elec/nydus | 102 | 34282 | <reponame>Elec/nydus<gh_stars>100-1000
"""
nydus.db.base
~~~~~~~~~~~~~
:copyright: (c) 2011-2012 DISQUS.
:license: Apache License 2.0, see LICENSE for more details.
"""
__all__ = ('LazyConnectionHandler', 'BaseCluster')
import collections
from nydus.db.map import DistributedContextManager
from nydus.db.routers import BaseRouter, routing_params
from nydus.utils import apply_defaults
def iter_hosts(hosts):
# this can either be a dictionary (with the key acting as the numeric
# index) or it can be a sorted list.
if isinstance(hosts, collections.Mapping):
return hosts.iteritems()
return enumerate(hosts)
def create_connection(Connection, num, host_settings, defaults):
# host_settings can be an iterable or a dictionary depending on the style
# of connection (some connections share options and simply just need to
# pass a single host, or a list of hosts)
if isinstance(host_settings, collections.Mapping):
return Connection(num, **apply_defaults(host_settings, defaults or {}))
elif isinstance(host_settings, collections.Iterable):
return Connection(num, *host_settings, **defaults or {})
return Connection(num, host_settings, **defaults or {})
class BaseCluster(object):
"""
Holds a cluster of connections.
"""
class MaxRetriesExceededError(Exception):
pass
def __init__(self, hosts, backend, router=BaseRouter, max_connection_retries=20, defaults=None):
self.hosts = dict(
(conn_number, create_connection(backend, conn_number, host_settings, defaults))
for conn_number, host_settings
in iter_hosts(hosts)
)
self.max_connection_retries = max_connection_retries
self.install_router(router)
def __len__(self):
return len(self.hosts)
def __getitem__(self, name):
return self.hosts[name]
def __getattr__(self, name):
return CallProxy(self, name)
def __iter__(self):
for name in self.hosts.iterkeys():
yield name
def install_router(self, router):
self.router = router(self)
def execute(self, path, args, kwargs):
connections = self.__connections_for(path, args=args, kwargs=kwargs)
results = []
for conn in connections:
for retry in xrange(self.max_connection_retries):
func = conn
for piece in path.split('.'):
func = getattr(func, piece)
try:
results.append(func(*args, **kwargs))
except tuple(conn.retryable_exceptions), e:
if not self.router.retryable:
raise e
elif retry == self.max_connection_retries - 1:
raise self.MaxRetriesExceededError(e)
else:
conn = self.__connections_for(path, retry_for=conn.num, args=args, kwargs=kwargs)[0]
else:
break
# If we only had one db to query, we simply return that res
if len(results) == 1:
return results[0]
else:
return results
def disconnect(self):
"""Disconnects all connections in cluster"""
for connection in self.hosts.itervalues():
connection.disconnect()
def get_conn(self, *args, **kwargs):
"""
Returns a connection object from the router given ``args``.
Useful in cases where a connection cannot be automatically determined
during all steps of the process. An example of this would be
Redis pipelines.
"""
connections = self.__connections_for('get_conn', args=args, kwargs=kwargs)
if len(connections) is 1:
return connections[0]
else:
return connections
def map(self, workers=None, **kwargs):
return DistributedContextManager(self, workers, **kwargs)
@routing_params
def __connections_for(self, attr, args, kwargs, **fkwargs):
return [self[n] for n in self.router.get_dbs(attr=attr, args=args, kwargs=kwargs, **fkwargs)]
class CallProxy(object):
"""
Handles routing function calls to the proper connection.
"""
def __init__(self, cluster, path):
self.__cluster = cluster
self.__path = path
def __call__(self, *args, **kwargs):
return self.__cluster.execute(self.__path, args, kwargs)
def __getattr__(self, name):
return CallProxy(self.__cluster, self.__path + '.' + name)
class LazyConnectionHandler(dict):
"""
Maps clusters of connections within a dictionary.
"""
def __init__(self, conf_callback):
self.conf_callback = conf_callback
self.conf_settings = {}
self.__is_ready = False
def __getitem__(self, key):
if not self.is_ready():
self.reload()
return super(LazyConnectionHandler, self).__getitem__(key)
def is_ready(self):
return self.__is_ready
def reload(self):
from nydus.db import create_cluster
for conn_alias, conn_settings in self.conf_callback().iteritems():
self[conn_alias] = create_cluster(conn_settings)
self._is_ready = True
def disconnect(self):
"""Disconnects all connections in cluster"""
for connection in self.itervalues():
connection.disconnect()
|
sprocket/util/extfrm.py | zhouming-hfut/sprocket | 500 | 34290 | <reponame>zhouming-hfut/sprocket<gh_stars>100-1000
# -*- coding: utf-8 -*-
from __future__ import division, print_function, absolute_import
import numpy as np
def extfrm(data, npow, power_threshold=-20):
"""Extract frame over the power threshold
Parameters
----------
data: array, shape (`T`, `dim`)
Array of input data
npow : array, shape (`T`)
Vector of normalized power sequence.
power_threshold : float, optional
Value of power threshold [dB]
Default set to -20
Returns
-------
data: array, shape (`T_ext`, `dim`)
Remaining data after extracting frame
`T_ext` <= `T`
"""
T = data.shape[0]
if T != len(npow):
raise("Length of two vectors is different.")
valid_index = np.where(npow > power_threshold)
extdata = data[valid_index]
assert extdata.shape[0] <= T
return extdata
|
rj_gameplay/stp/coordinator.py | RoboJackets/robocup-software | 200 | 34321 | <gh_stars>100-1000
"""This module contains the implementation of the coordinator."""
from typing import Any, Dict, Optional, Type, List, Callable
import stp.play
import stp.rc as rc
import stp.role.assignment as assignment
import stp.situation
import stp.skill
from rj_msgs import msg
NUM_ROBOTS = 16
class Coordinator:
"""The coordinator is responsible for using SituationAnalyzer to select the best
play to run, calling tick() on the play to get the list of skills, then ticking
all of the resulting skills."""
__slots__ = [
"_play_selector",
"_prev_situation",
"_prev_play",
"_prev_role_results",
"_props",
"_debug_callback",
]
_play_selector: stp.situation.IPlaySelector
_prev_situation: Optional[stp.situation.ISituation]
_prev_play: Optional[stp.play.IPlay]
_prev_role_results: assignment.FlatRoleResults
_props: Dict[Type[stp.play.IPlay], Any]
# TODO(1585): Properly handle type annotations for props instead of using Any.
def __init__(
self,
play_selector: stp.situation.IPlaySelector,
debug_callback: Callable[[stp.play.IPlay, List[stp.skill.ISkill]],
None] = None):
self._play_selector = play_selector
self._props = {}
self._prev_situation = None
self._prev_play = None
self._prev_role_results = {}
self._debug_callback = debug_callback
def tick(self, world_state: rc.WorldState) -> List[msg.RobotIntent]:
"""Performs 1 ticks of the STP system:
1. Selects the best play to run given the passed in world state.
2. Ticks the best play, collecting the list of skills to run.
3. Ticks the list of skills.
:param world_state: The current state of the world.
"""
# Call situational analysis to see which play should be running.
cur_situation, cur_play = self._play_selector.select(world_state)
cur_play_type: Type[stp.play.IPlay] = type(cur_play)
# Update the props.
cur_play_props = cur_play.compute_props(self._props.get(cur_play_type, None))
if isinstance(cur_play, type(
self._prev_play)) and not self._prev_play.is_done(world_state):
cur_play = self._prev_play
# This should be checked here or in the play selector, so we can restart a play easily
# Collect the list of skills from the play.
new_role_results, skills = cur_play.tick(
world_state, self._prev_role_results, cur_play_props
)
self._debug_callback(cur_play, [entry.skill for entry in skills])
# Get the list of actions from the skills
intents = [msg.RobotIntent() for i in range(NUM_ROBOTS)]
intents_dict = {}
for skill in skills:
robot = new_role_results[skill][0].role.robot
intents_dict.update(skill.skill.tick(robot, world_state, intents[robot.id]))
# Get the list of robot intents from the actions
for i in range(NUM_ROBOTS):
if i in intents_dict.keys():
intents[i] = intents_dict[i]
else:
intents[i].motion_command.empty_command = [msg.EmptyMotionCommand()]
# Update _prev_*.
self._prev_situation = cur_situation
self._prev_play = cur_play
self._prev_role_results = new_role_results
self._props[cur_play_type] = cur_play_props
return intents
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.