ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | 1a33a942ade463a5cd2d7a170f5b039fc6c19e75 | from django.contrib.auth import get_user_model
from django.test import TestCase
from django.urls import reverse
from rest_framework import status
from rest_framework.test import APIClient
CREATE_USER_URL = reverse('user:create')
TOKEN_URL = reverse('user:token')
ME_URL = reverse('user:me')
def create_user(**params):
return get_user_model().objects.create_user(**params)
class PublicUserApiTests(TestCase):
"""Test the users API (public)"""
def setUp(self):
self.client = APIClient()
def test_create_valid_user_success(self):
"""Test creating user with valid payload is successful """
payload = {
'email': '[email protected]',
'password': 'test_pass',
'name': 'Test'
}
res = self.client.post(CREATE_USER_URL, payload)
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
user = get_user_model().objects.get(**res.data)
self.assertTrue(user.check_password(payload['password']))
self.assertNotIn('password', res.data)
def test_user_exists(self):
"""Test creating user already exist"""
payload = {
'email': '[email protected]',
'password': 'test_pass',
}
create_user(**payload)
res = self.client.post(CREATE_USER_URL, payload)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_password_too_short(self):
"""Test password if is too short"""
payload = {
'email': '[email protected]',
'password': 'test',
}
res = self.client.post(CREATE_USER_URL, payload)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
user_exists = get_user_model().objects.filter(
email=payload['email']
).exists()
self.assertFalse(user_exists)
def test_creat_token_for_user(self):
"""Test create a token is created for the user"""
payload = {
'email': '[email protected]',
'password': 'test',
}
create_user(**payload)
res = self.client.post(TOKEN_URL, payload)
self.assertIn('token', res.data)
self.assertEqual(res.status_code, status.HTTP_200_OK)
def test_create_token_invalid_credentials(self):
create_user(email='[email protected]', password='test')
payload = {'email': '[email protected]', 'password': 'wronf'}
res = self.client.post(TOKEN_URL, payload)
self.assertNotIn('token', res.data)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_create_token_no_user(self):
payload = {
'email': '[email protected]',
'password': 'test',
}
res = self.client.post(TOKEN_URL, payload)
self.assertNotIn('token', res.data)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_create_token_missing_field(self):
res = self.client.post(TOKEN_URL, {'email': 'oee', 'password': ''})
self.assertNotIn('token', res.data)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_retrieve_user_unauthorized(self):
"""Test authentication is required for users"""
res = self.client.get(ME_URL)
self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)
class PrivateUserApiTests(TestCase):
def setUp(self):
self.user = create_user(
email='[email protected]',
password='Test123',
name='name',
)
self.client = APIClient()
self.client.force_authenticate(user=self.user)
def test_retrieve_profile_success(self):
"""Test retrieving profile for logged in used"""
res = self.client.get(ME_URL)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.data, {
'name': self.user.name,
'email': self.user.email
})
def test_post_me_not_allowed(self):
"""Test that POST is not allowed on the me url"""
res = self.client.post(ME_URL, {})
self.assertEqual(res.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
def test_update_user_profile(self):
"""Test updating the user profile for authenticated user"""
payload = {'name': 'new_name', 'password': 'new_password123'}
res = self.client.patch(ME_URL, payload)
print(ME_URL)
self.user.refresh_from_db()
self.assertEqual(self.user.name, payload['name'])
self.assertTrue(self.user.check_password(payload['password']))
self.assertEqual(res.status_code, status.HTTP_200_OK)
|
py | 1a33a9b1fa53e16c7c3c7d94e867586607358f18 | import ast
import copy
from vyper.exceptions import (
ParserException,
InvalidLiteralException,
StructureException,
TypeMismatchException,
FunctionDeclarationException,
EventDeclarationException
)
from vyper.signatures.function_signature import (
FunctionSignature,
VariableRecord,
)
from vyper.signatures.event_signature import (
EventSignature,
)
from vyper.parser.stmt import Stmt
from vyper.parser.expr import Expr
from vyper.parser.context import Context, Constancy
from vyper.parser.global_context import GlobalContext
from vyper.parser.lll_node import LLLnode
from vyper.parser.pre_parser import pre_parse
from vyper.parser.parser_utils import (
make_setter,
base_type_conversion,
byte_array_to_num,
decorate_ast,
getpos,
make_byte_array_copier,
resolve_negative_literals,
unwrap_location,
)
from vyper.types import (
BaseType,
ByteArrayLike,
ListType,
)
from vyper.types import (
get_size_of_type,
is_base_type,
ceil32,
)
from vyper.utils import (
MemoryPositions,
LOADED_LIMIT_MAP,
string_to_bytes,
)
from vyper.utils import (
bytes_to_int,
calc_mem_gas,
)
if not hasattr(ast, 'AnnAssign'):
raise Exception("Requires python 3.6 or higher for annotation support")
# Converts code to parse tree
def parse_to_ast(code):
class_names, code = pre_parse(code)
if '\x00' in code:
raise ParserException('No null bytes (\\x00) allowed in the source code.')
o = ast.parse(code) # python ast
decorate_ast(o, code, class_names) # decorated python ast
o = resolve_negative_literals(o)
return o.body
# Header code
initializer_list = ['seq', ['mstore', 28, ['calldataload', 0]]]
# Store limit constants at fixed addresses in memory.
initializer_list += [['mstore', pos, limit_size] for pos, limit_size in LOADED_LIMIT_MAP.items()]
initializer_lll = LLLnode.from_list(initializer_list, typ=None)
# Is a function the initializer?
def is_initializer(code):
return code.name == '__init__'
# Is a function the default function?
def is_default_func(code):
return code.name == '__default__'
# Generate default argument function signatures.
def generate_default_arg_sigs(code, contracts, global_ctx):
# generate all sigs, and attach.
total_default_args = len(code.args.defaults)
if total_default_args == 0:
return [
FunctionSignature.from_definition(
code,
sigs=contracts,
custom_units=global_ctx._custom_units,
custom_structs=global_ctx._structs,
constants=global_ctx._constants
)
]
base_args = code.args.args[:-total_default_args]
default_args = code.args.args[-total_default_args:]
# Generate a list of default function combinations.
row = [False] * (total_default_args)
table = [row.copy()]
for i in range(total_default_args):
row[i] = True
table.append(row.copy())
default_sig_strs = []
sig_fun_defs = []
for truth_row in table:
new_code = copy.deepcopy(code)
new_code.args.args = copy.deepcopy(base_args)
new_code.args.default = []
# Add necessary default args.
for idx, val in enumerate(truth_row):
if val is True:
new_code.args.args.append(default_args[idx])
sig = FunctionSignature.from_definition(
new_code,
sigs=contracts,
custom_units=global_ctx._custom_units,
custom_structs=global_ctx._structs,
constants=global_ctx._constants
)
default_sig_strs.append(sig.sig)
sig_fun_defs.append(sig)
return sig_fun_defs
# Get ABI signature
def mk_full_signature(code, sig_formatter=None, interface_codes=None):
if sig_formatter is None:
# Use default JSON style output.
sig_formatter = lambda sig, custom_units_descriptions: sig.to_abi_dict(custom_units_descriptions)
o = []
global_ctx = GlobalContext.get_global_context(code, interface_codes=interface_codes)
# Produce event signatues.
for code in global_ctx._events:
sig = EventSignature.from_declaration(code, global_ctx)
o.append(sig_formatter(sig, global_ctx._custom_units_descriptions))
# Produce function signatures.
for code in global_ctx._defs:
sig = FunctionSignature.from_definition(code,
sigs=global_ctx._contracts,
custom_units=global_ctx._custom_units,
custom_structs=global_ctx._structs,
constants=global_ctx._constants
)
if not sig.private:
default_sigs = generate_default_arg_sigs(code, global_ctx._contracts, global_ctx)
for s in default_sigs:
o.append(sig_formatter(s, global_ctx._custom_units_descriptions))
return o
def mk_method_identifiers(code, interface_codes=None):
o = {}
global_ctx = GlobalContext.get_global_context(parse_to_ast(code), interface_codes=interface_codes)
for code in global_ctx._defs:
sig = FunctionSignature.from_definition(code, sigs=global_ctx._contracts, custom_units=global_ctx._custom_units, constants=global_ctx._constants)
if not sig.private:
default_sigs = generate_default_arg_sigs(code, global_ctx._contracts, global_ctx)
for s in default_sigs:
o[s.sig] = hex(s.method_id)
return o
def parse_events(sigs, global_ctx):
for event in global_ctx._events:
sigs[event.target.id] = EventSignature.from_declaration(event, global_ctx)
return sigs
def parse_external_contracts(external_contracts, _contracts, _structs, _constants):
for _contractname in _contracts:
_contract_defs = _contracts[_contractname]
_defnames = [_def.name for _def in _contract_defs]
contract = {}
if len(set(_defnames)) < len(_contract_defs):
raise FunctionDeclarationException("Duplicate function name: %s" % [name for name in _defnames if _defnames.count(name) > 1][0])
for _def in _contract_defs:
constant = False
# test for valid call type keyword.
if len(_def.body) == 1 and \
isinstance(_def.body[0], ast.Expr) and \
isinstance(_def.body[0].value, ast.Name) and \
_def.body[0].value.id in ('modifying', 'constant'):
constant = True if _def.body[0].value.id == 'constant' else False
else:
raise StructureException('constant or modifying call type must be specified', _def)
# Recognizes already-defined structs
sig = FunctionSignature.from_definition(_def, contract_def=True, constant=constant, custom_structs=_structs, constants=_constants)
contract[sig.name] = sig
external_contracts[_contractname] = contract
return external_contracts
def parse_other_functions(o, otherfuncs, sigs, external_contracts, origcode, global_ctx, default_function, runtime_only):
sub = ['seq', initializer_lll]
add_gas = initializer_lll.gas
for _def in otherfuncs:
sub.append(parse_func(_def, {**{'self': sigs}, **external_contracts}, origcode, global_ctx)) # noqa E999
sub[-1].total_gas += add_gas
add_gas += 30
for sig in generate_default_arg_sigs(_def, external_contracts, global_ctx):
sig.gas = sub[-1].total_gas
sigs[sig.sig] = sig
# Add fallback function
if default_function:
default_func = parse_func(default_function[0], {**{'self': sigs}, **external_contracts}, origcode, global_ctx)
sub.append(default_func)
else:
sub.append(LLLnode.from_list(['revert', 0, 0], typ=None, annotation='Default function'))
if runtime_only:
return sub
else:
o.append(['return', 0, ['lll', sub, 0]])
return o
# Main python parse tree => LLL method
def parse_tree_to_lll(code, origcode, runtime_only=False, interface_codes=None):
global_ctx = GlobalContext.get_global_context(code, interface_codes=interface_codes)
_names_def = [_def.name for _def in global_ctx._defs]
# Checks for duplicate function names
if len(set(_names_def)) < len(_names_def):
raise FunctionDeclarationException("Duplicate function name: %s" % [name for name in _names_def if _names_def.count(name) > 1][0])
_names_events = [_event.target.id for _event in global_ctx._events]
# Checks for duplicate event names
if len(set(_names_events)) < len(_names_events):
raise EventDeclarationException("Duplicate event name: %s" % [name for name in _names_events if _names_events.count(name) > 1][0])
# Initialization function
initfunc = [_def for _def in global_ctx._defs if is_initializer(_def)]
# Default function
defaultfunc = [_def for _def in global_ctx._defs if is_default_func(_def)]
# Regular functions
otherfuncs = [_def for _def in global_ctx._defs if not is_initializer(_def) and not is_default_func(_def)]
sigs = {}
external_contracts = {}
# Create the main statement
o = ['seq']
if global_ctx._events:
sigs = parse_events(sigs, global_ctx)
if global_ctx._contracts:
external_contracts = parse_external_contracts(external_contracts, global_ctx._contracts, global_ctx._structs, global_ctx._constants)
# If there is an init func...
if initfunc:
o.append(['seq', initializer_lll])
o.append(parse_func(initfunc[0], {**{'self': sigs}, **external_contracts}, origcode, global_ctx))
# If there are regular functions...
if otherfuncs or defaultfunc:
o = parse_other_functions(
o, otherfuncs, sigs, external_contracts, origcode, global_ctx, defaultfunc, runtime_only
)
# Check interface.
if global_ctx._interface:
funcs_left = global_ctx._interface.copy()
for sig, func_sig in sigs.items():
if isinstance(func_sig, FunctionSignature):
if sig in funcs_left and not func_sig.private:
del funcs_left[sig]
if isinstance(func_sig, EventSignature) and func_sig.sig in funcs_left:
del funcs_left[func_sig.sig]
if funcs_left:
error_message = 'Contract does not comply to supplied Interface(s).\n'
missing_functions = [sig_name for sig_name, func_sig in funcs_left.items() if isinstance(func_sig, FunctionSignature)]
missing_events = [sig_name for sig_name, func_sig in funcs_left.items() if isinstance(func_sig, EventSignature)]
if missing_functions:
error_message += 'Missing interface functions:\n\t{}'.format('\n\t'.join(missing_functions))
if missing_events:
error_message += 'Missing interface events:\n\t{}'.format('\n\t'.join(missing_events))
raise StructureException(error_message)
return LLLnode.from_list(o, typ=None)
# Checks that an input matches its type
def make_clamper(datapos, mempos, typ, is_init=False):
if not is_init:
data_decl = ['calldataload', ['add', 4, datapos]]
copier = lambda pos, sz: ['calldatacopy', mempos, ['add', 4, pos], sz]
else:
data_decl = ['codeload', ['add', '~codelen', datapos]]
copier = lambda pos, sz: ['codecopy', mempos, ['add', '~codelen', pos], sz]
# Numbers: make sure they're in range
if is_base_type(typ, 'int128'):
return LLLnode.from_list(['clamp', ['mload', MemoryPositions.MINNUM], data_decl, ['mload', MemoryPositions.MAXNUM]],
typ=typ, annotation='checking int128 input')
# Booleans: make sure they're zero or one
elif is_base_type(typ, 'bool'):
return LLLnode.from_list(['uclamplt', data_decl, 2], typ=typ, annotation='checking bool input')
# Addresses: make sure they're in range
elif is_base_type(typ, 'address'):
return LLLnode.from_list(['uclamplt', data_decl, ['mload', MemoryPositions.ADDRSIZE]], typ=typ, annotation='checking address input')
# Bytes: make sure they have the right size
elif isinstance(typ, ByteArrayLike):
return LLLnode.from_list(['seq',
copier(data_decl, 32 + typ.maxlen),
['assert', ['le', ['calldataload', ['add', 4, data_decl]], typ.maxlen]]],
typ=None, annotation='checking bytearray input')
# Lists: recurse
elif isinstance(typ, ListType):
o = []
for i in range(typ.count):
offset = get_size_of_type(typ.subtype) * 32 * i
o.append(make_clamper(datapos + offset, mempos + offset, typ.subtype, is_init))
return LLLnode.from_list(['seq'] + o, typ=None, annotation='checking list input')
# Otherwise don't make any checks
else:
return LLLnode.from_list('pass')
def get_sig_statements(sig, pos):
method_id_node = LLLnode.from_list(sig.method_id, pos=pos, annotation='%s' % sig.sig)
if sig.private:
sig_compare = 0
private_label = LLLnode.from_list(
['label', 'priv_{}'.format(sig.method_id)],
pos=pos, annotation='%s' % sig.sig
)
else:
sig_compare = ['eq', ['mload', 0], method_id_node]
private_label = ['pass']
return sig_compare, private_label
def get_arg_copier(sig, total_size, memory_dest, offset=4):
# Copy arguments.
# For private function, MSTORE arguments and callback pointer from the stack.
if sig.private:
copier = ['seq']
for pos in range(0, total_size, 32):
copier.append(['mstore', memory_dest + pos, 'pass'])
else:
copier = ['calldatacopy', memory_dest, offset, total_size]
return copier
def make_unpacker(ident, i_placeholder, begin_pos):
start_label = 'dyn_unpack_start_' + ident
end_label = 'dyn_unpack_end_' + ident
return ['seq_unchecked',
['mstore', begin_pos, 'pass'], # get len
['mstore', i_placeholder, 0],
['label', start_label],
['if', ['ge', ['mload', i_placeholder], ['ceil32', ['mload', begin_pos]]], ['goto', end_label]], # break
['mstore', ['add', ['add', begin_pos, 32], ['mload', i_placeholder]], 'pass'], # pop into correct memory slot.
['mstore', i_placeholder, ['add', 32, ['mload', i_placeholder]]], # increment i
['goto', start_label],
['label', end_label]]
# Parses a function declaration
def parse_func(code, sigs, origcode, global_ctx, _vars=None):
if _vars is None:
_vars = {}
sig = FunctionSignature.from_definition(
code,
sigs=sigs,
custom_units=global_ctx._custom_units,
custom_structs=global_ctx._structs,
constants=global_ctx._constants
)
# Get base args for function.
total_default_args = len(code.args.defaults)
base_args = sig.args[:-total_default_args] if total_default_args > 0 else sig.args
default_args = code.args.args[-total_default_args:]
default_values = dict(zip([arg.arg for arg in default_args], code.args.defaults))
# __init__ function may not have defaults.
if sig.name == '__init__' and total_default_args > 0:
raise FunctionDeclarationException("__init__ function may not have default parameters.")
# Check for duplicate variables with globals
for arg in sig.args:
if arg.name in global_ctx._globals:
raise FunctionDeclarationException("Variable name duplicated between function arguments and globals: " + arg.name)
# Create a local (per function) context.
context = Context(
vars=_vars,
global_ctx=global_ctx,
sigs=sigs,
return_type=sig.output_type,
constancy=Constancy.Constant if sig.const else Constancy.Mutable,
is_payable=sig.payable,
origcode=origcode,
is_private=sig.private,
method_id=sig.method_id
)
# Copy calldata to memory for fixed-size arguments
max_copy_size = sum([32 if isinstance(arg.typ, ByteArrayLike) else get_size_of_type(arg.typ) * 32 for arg in sig.args])
base_copy_size = sum([32 if isinstance(arg.typ, ByteArrayLike) else get_size_of_type(arg.typ) * 32 for arg in base_args])
context.next_mem += max_copy_size
clampers = []
# Create callback_ptr, this stores a destination in the bytecode for a private
# function to jump to after a function has executed.
_post_callback_ptr = "{}_{}_post_callback_ptr".format(sig.name, sig.method_id)
if sig.private:
context.callback_ptr = context.new_placeholder(typ=BaseType('uint256'))
clampers.append(
LLLnode.from_list(['mstore', context.callback_ptr, 'pass'], annotation='pop callback pointer')
)
if total_default_args > 0:
clampers.append(['label', _post_callback_ptr])
# private functions without return types need to jump back to
# the calling function, as there is no return statement to handle the
# jump.
stop_func = [['stop']]
if sig.output_type is None and sig.private:
stop_func = [['jump', ['mload', context.callback_ptr]]]
if not len(base_args):
copier = 'pass'
elif sig.name == '__init__':
copier = ['codecopy', MemoryPositions.RESERVED_MEMORY, '~codelen', base_copy_size]
else:
copier = get_arg_copier(
sig=sig,
total_size=base_copy_size,
memory_dest=MemoryPositions.RESERVED_MEMORY
)
clampers.append(copier)
# Add asserts for payable and internal
# private never gets payable check.
if not sig.payable and not sig.private:
clampers.append(['assert', ['iszero', 'callvalue']])
# Fill variable positions
for i, arg in enumerate(sig.args):
if i < len(base_args) and not sig.private:
clampers.append(make_clamper(arg.pos, context.next_mem, arg.typ, sig.name == '__init__'))
if isinstance(arg.typ, ByteArrayLike):
context.vars[arg.name] = VariableRecord(arg.name, context.next_mem, arg.typ, False)
context.next_mem += 32 * get_size_of_type(arg.typ)
else:
context.vars[arg.name] = VariableRecord(arg.name, MemoryPositions.RESERVED_MEMORY + arg.pos, arg.typ, False)
# Private function copiers. No clamping for private functions.
dyn_variable_names = [a.name for a in base_args if isinstance(a.typ, ByteArrayLike)]
if sig.private and dyn_variable_names:
i_placeholder = context.new_placeholder(typ=BaseType('uint256'))
unpackers = []
for idx, var_name in enumerate(dyn_variable_names):
var = context.vars[var_name]
ident = "_load_args_%d_dynarg%d" % (sig.method_id, idx)
o = make_unpacker(ident=ident, i_placeholder=i_placeholder, begin_pos=var.pos)
unpackers.append(o)
if not unpackers:
unpackers = ['pass']
clampers.append(LLLnode.from_list(
['seq_unchecked'] + unpackers + [0], # [0] to complete full overarching 'seq' statement, see private_label.
typ=None, annotation='dynamic unpacker', pos=getpos(code))
)
# Create "clampers" (input well-formedness checkers)
# Return function body
if sig.name == '__init__':
o = LLLnode.from_list(['seq'] + clampers + [parse_body(code.body, context)], pos=getpos(code))
elif is_default_func(sig):
if len(sig.args) > 0:
raise FunctionDeclarationException('Default function may not receive any arguments.', code)
if sig.private:
raise FunctionDeclarationException('Default function may only be public.', code)
o = LLLnode.from_list(['seq'] + clampers + [parse_body(code.body, context)], pos=getpos(code))
else:
if total_default_args > 0: # Function with default parameters.
function_routine = "{}_{}".format(sig.name, sig.method_id)
default_sigs = generate_default_arg_sigs(code, sigs, global_ctx)
sig_chain = ['seq']
for default_sig in default_sigs:
sig_compare, private_label = get_sig_statements(default_sig, getpos(code))
# Populate unset default variables
populate_arg_count = len(sig.args) - len(default_sig.args)
set_defaults = []
if populate_arg_count > 0:
current_sig_arg_names = {x.name for x in default_sig.args}
missing_arg_names = [arg.arg for arg in default_args if arg.arg not in current_sig_arg_names]
for arg_name in missing_arg_names:
value = Expr(default_values[arg_name], context).lll_node
var = context.vars[arg_name]
left = LLLnode.from_list(var.pos, typ=var.typ, location='memory',
pos=getpos(code), mutable=var.mutable)
set_defaults.append(make_setter(left, value, 'memory', pos=getpos(code)))
current_sig_arg_names = {x.name for x in default_sig.args}
base_arg_names = {arg.name for arg in base_args}
if sig.private:
# Load all variables in default section, if private,
# because the stack is a linear pipe.
copier_arg_count = len(default_sig.args)
copier_arg_names = current_sig_arg_names
else:
copier_arg_count = len(default_sig.args) - len(base_args)
copier_arg_names = current_sig_arg_names - base_arg_names
# Order copier_arg_names, this is very important.
copier_arg_names = [x.name for x in default_sig.args if x.name in copier_arg_names]
# Variables to be populated from calldata/stack.
default_copiers = []
if copier_arg_count > 0:
# Get map of variables in calldata, with thier offsets
offset = 4
calldata_offset_map = {}
for arg in default_sig.args:
calldata_offset_map[arg.name] = offset
offset += 32 if isinstance(arg.typ, ByteArrayLike) else get_size_of_type(arg.typ) * 32
# Copy set default parameters from calldata
dynamics = []
for arg_name in copier_arg_names:
var = context.vars[arg_name]
calldata_offset = calldata_offset_map[arg_name]
if sig.private:
_offset = calldata_offset
if isinstance(var.typ, ByteArrayLike):
_size = 32
dynamics.append(var.pos)
else:
_size = var.size * 32
default_copiers.append(get_arg_copier(sig=sig, memory_dest=var.pos, total_size=_size, offset=_offset))
else:
# Add clampers.
default_copiers.append(make_clamper(calldata_offset - 4, var.pos, var.typ))
# Add copying code.
if isinstance(var.typ, ByteArrayLike):
_offset = ['add', 4, ['calldataload', calldata_offset]]
else:
_offset = calldata_offset
default_copiers.append(get_arg_copier(sig=sig, memory_dest=var.pos, total_size=var.size * 32, offset=_offset))
# Unpack byte array if necessary.
if dynamics:
i_placeholder = context.new_placeholder(typ=BaseType('uint256'))
for idx, var_pos in enumerate(dynamics):
ident = 'unpack_default_sig_dyn_%d_arg%d' % (default_sig.method_id, idx)
default_copiers.append(
make_unpacker(ident=ident, i_placeholder=i_placeholder, begin_pos=var_pos)
)
default_copiers.append(0) # for over arching seq, POP
sig_chain.append([
'if', sig_compare,
['seq',
private_label,
LLLnode.from_list(['mstore', context.callback_ptr, 'pass'], annotation='pop callback pointer', pos=getpos(code)) if sig.private else ['pass'],
['seq'] + set_defaults if set_defaults else ['pass'],
['seq_unchecked'] + default_copiers if default_copiers else ['pass'],
['goto', _post_callback_ptr if sig.private else function_routine]]
])
# With private functions all variable loading occurs in the default
# function sub routine.
if sig.private:
_clampers = [['label', _post_callback_ptr]]
else:
_clampers = clampers
# Function with default parameters.
o = LLLnode.from_list(
['seq',
sig_chain,
['if', 0, # can only be jumped into
['seq',
['label', function_routine] if not sig.private else ['pass'],
['seq'] + _clampers + [parse_body(c, context) for c in code.body] + stop_func]]], typ=None, pos=getpos(code))
else:
# Function without default parameters.
sig_compare, private_label = get_sig_statements(sig, getpos(code))
o = LLLnode.from_list(
['if',
sig_compare,
['seq'] + [private_label] + clampers + [parse_body(c, context) for c in code.body] + stop_func], typ=None, pos=getpos(code))
# Check for at leasts one return statement if necessary.
if context.return_type and context.function_return_count == 0:
raise FunctionDeclarationException(
"Missing return statement in function '%s' " % sig.name, code
)
o.context = context
o.total_gas = o.gas + calc_mem_gas(o.context.next_mem)
o.func_name = sig.name
return o
# Parse a piece of code
def parse_body(code, context):
if not isinstance(code, list):
return parse_stmt(code, context)
o = []
for stmt in code:
lll = parse_stmt(stmt, context)
o.append(lll)
return LLLnode.from_list(['seq'] + o, pos=getpos(code[0]) if code else None)
# Parse an expression
def parse_expr(expr, context):
return Expr(expr, context).lll_node
# Parse a statement (usually one line of code but not always)
def parse_stmt(stmt, context):
return Stmt(stmt, context).lll_node
def pack_logging_topics(event_id, args, expected_topics, context, pos):
topics = [event_id]
code_pos = pos
for pos, expected_topic in enumerate(expected_topics):
expected_type = expected_topic.typ
arg = args[pos]
value = parse_expr(arg, context)
arg_type = value.typ
if isinstance(arg_type, ByteArrayLike) and isinstance(expected_type, ByteArrayLike):
if arg_type.maxlen > expected_type.maxlen:
raise TypeMismatchException("Topic input bytes are too big: %r %r" % (arg_type, expected_type), code_pos)
if isinstance(arg, ast.Str):
bytez, bytez_length = string_to_bytes(arg.s)
if len(bytez) > 32:
raise InvalidLiteralException("Can only log a maximum of 32 bytes at a time.", code_pos)
topics.append(bytes_to_int(bytez + b'\x00' * (32 - bytez_length)))
else:
if value.location == "memory":
size = ['mload', value]
elif value.location == "storage":
size = ['sload', ['sha3_32', value]]
topics.append(byte_array_to_num(value, arg, 'uint256', size))
else:
value = unwrap_location(value)
value = base_type_conversion(value, arg_type, expected_type, pos=code_pos)
topics.append(value)
return topics
def pack_args_by_32(holder, maxlen, arg, typ, context, placeholder,
dynamic_offset_counter=None, datamem_start=None, zero_pad_i=None, pos=None):
"""
Copy necessary variables to pre-allocated memory section.
:param holder: Complete holder for all args
:param maxlen: Total length in bytes of the full arg section (static + dynamic).
:param arg: Current arg to pack
:param context: Context of arg
:param placeholder: Static placeholder for static argument part.
:param dynamic_offset_counter: position counter stored in static args.
:param dynamic_placeholder: pointer to current position in memory to write dynamic values to.
:param datamem_start: position where the whole datemem section starts.
"""
if isinstance(typ, BaseType):
if isinstance(arg, LLLnode):
value = unwrap_location(arg)
else:
value = parse_expr(arg, context)
value = base_type_conversion(value, value.typ, typ, pos)
holder.append(LLLnode.from_list(['mstore', placeholder, value], typ=typ, location='memory'))
elif isinstance(typ, ByteArrayLike):
if isinstance(arg, LLLnode): # Is prealloacted variable.
source_lll = arg
else:
source_lll = parse_expr(arg, context)
# Set static offset, in arg slot.
holder.append(LLLnode.from_list(['mstore', placeholder, ['mload', dynamic_offset_counter]]))
# Get the biginning to write the ByteArray to.
dest_placeholder = LLLnode.from_list(
['add', datamem_start, ['mload', dynamic_offset_counter]],
typ=typ, location='memory', annotation="pack_args_by_32:dest_placeholder")
copier = make_byte_array_copier(dest_placeholder, source_lll, pos=pos)
holder.append(copier)
# Add zero padding.
new_maxlen = ceil32(source_lll.typ.maxlen)
holder.append(
['with', '_ceil32_end', ['ceil32', ['mload', dest_placeholder]],
['seq',
['with', '_bytearray_loc', dest_placeholder,
['seq',
['repeat', zero_pad_i, ['mload', '_bytearray_loc'], new_maxlen,
['seq',
['if', ['ge', ['mload', zero_pad_i], '_ceil32_end'], 'break'], # stay within allocated bounds
['mstore8', ['add', ['add', '_bytearray_loc', 32], ['mload', zero_pad_i]], 0]]]]]]]
)
# Increment offset counter.
increment_counter = LLLnode.from_list(
['mstore', dynamic_offset_counter,
['add', ['add', ['mload', dynamic_offset_counter], ['ceil32', ['mload', dest_placeholder]]], 32]],
annotation='Increment dynamic offset counter'
)
holder.append(increment_counter)
elif isinstance(typ, ListType):
maxlen += (typ.count - 1) * 32
typ = typ.subtype
def check_list_type_match(provided): # Check list types match.
if provided != typ:
raise TypeMismatchException(
"Log list type '%s' does not match provided, expected '%s'" % (provided, typ)
)
# List from storage
if isinstance(arg, ast.Attribute) and arg.value.id == 'self':
stor_list = context.globals[arg.attr]
check_list_type_match(stor_list.typ.subtype)
size = stor_list.typ.count
mem_offset = 0
for i in range(0, size):
storage_offset = i
arg2 = LLLnode.from_list(['sload', ['add', ['sha3_32', Expr(arg, context).lll_node], storage_offset]],
typ=typ)
holder, maxlen = pack_args_by_32(holder, maxlen, arg2, typ, context, placeholder + mem_offset, pos=pos)
mem_offset += get_size_of_type(typ) * 32
# List from variable.
elif isinstance(arg, ast.Name):
size = context.vars[arg.id].size
pos = context.vars[arg.id].pos
check_list_type_match(context.vars[arg.id].typ.subtype)
mem_offset = 0
for i in range(0, size):
arg2 = LLLnode.from_list(pos + mem_offset, typ=typ, location='memory')
# p_holder = context.new_placeholder(BaseType(32)) if i > 0 else placeholder
holder, maxlen = pack_args_by_32(holder, maxlen, arg2, typ, context, placeholder + mem_offset, pos=pos)
mem_offset += get_size_of_type(typ) * 32
# List from list literal.
else:
mem_offset = 0
for i, arg2 in enumerate(arg.elts):
holder, maxlen = pack_args_by_32(holder, maxlen, arg2, typ, context, placeholder + mem_offset, pos=pos)
mem_offset += get_size_of_type(typ) * 32
return holder, maxlen
# Pack logging data arguments
def pack_logging_data(expected_data, args, context, pos):
# Checks to see if there's any data
if not args:
return ['seq'], 0, None, 0
holder = ['seq']
maxlen = len(args) * 32 # total size of all packed args (upper limit)
# Unroll any function calls, to temp variables.
prealloacted = {}
for idx, (arg, expected_arg) in enumerate(zip(args, expected_data)):
if isinstance(arg, (ast.Str, ast.Call)):
expr = Expr(arg, context)
source_lll = expr.lll_node
typ = source_lll.typ
if isinstance(arg, ast.Str):
if len(arg.s) > typ.maxlen:
raise TypeMismatchException("Data input bytes are to big: %r %r" % (len(arg.s), typ), pos)
tmp_variable = context.new_variable('_log_pack_var_%i_%i' % (arg.lineno, arg.col_offset), source_lll.typ)
tmp_variable_node = LLLnode.from_list(
tmp_variable, typ=source_lll.typ,
pos=getpos(arg), location="memory", annotation='log_prealloacted %r' % source_lll.typ
)
# Store len.
# holder.append(['mstore', len_placeholder, ['mload', unwrap_location(source_lll)]])
# Copy bytes.
holder.append(
make_setter(tmp_variable_node, source_lll, pos=getpos(arg), location='memory')
)
prealloacted[idx] = tmp_variable_node
requires_dynamic_offset = any([isinstance(data.typ, ByteArrayLike) for data in expected_data])
if requires_dynamic_offset:
zero_pad_i = context.new_placeholder(BaseType('uint256')) # Iterator used to zero pad memory.
dynamic_offset_counter = context.new_placeholder(BaseType(32))
dynamic_placeholder = context.new_placeholder(BaseType(32))
else:
dynamic_offset_counter = None
zero_pad_i = None
# Create placeholder for static args. Note: order of new_*() is important.
placeholder_map = {}
for i, (arg, data) in enumerate(zip(args, expected_data)):
typ = data.typ
if not isinstance(typ, ByteArrayLike):
placeholder = context.new_placeholder(typ)
else:
placeholder = context.new_placeholder(BaseType(32))
placeholder_map[i] = placeholder
# Populate static placeholders.
for i, (arg, data) in enumerate(zip(args, expected_data)):
typ = data.typ
placeholder = placeholder_map[i]
if not isinstance(typ, ByteArrayLike):
holder, maxlen = pack_args_by_32(holder, maxlen, prealloacted.get(i, arg), typ, context, placeholder, zero_pad_i=zero_pad_i, pos=pos)
# Dynamic position starts right after the static args.
if requires_dynamic_offset:
holder.append(LLLnode.from_list(['mstore', dynamic_offset_counter, maxlen]))
# Calculate maximum dynamic offset placeholders, used for gas estimation.
for i, (arg, data) in enumerate(zip(args, expected_data)):
typ = data.typ
if isinstance(typ, ByteArrayLike):
maxlen += 32 + ceil32(typ.maxlen)
if requires_dynamic_offset:
datamem_start = dynamic_placeholder + 32
else:
datamem_start = placeholder_map[0]
# Copy necessary data into allocated dynamic section.
for i, (arg, data) in enumerate(zip(args, expected_data)):
typ = data.typ
if isinstance(typ, ByteArrayLike):
pack_args_by_32(
holder=holder,
maxlen=maxlen,
arg=prealloacted.get(i, arg),
typ=typ,
context=context,
placeholder=placeholder_map[i],
datamem_start=datamem_start,
dynamic_offset_counter=dynamic_offset_counter,
zero_pad_i=zero_pad_i,
pos=pos
)
return holder, maxlen, dynamic_offset_counter, datamem_start
def parse_to_lll(kode, runtime_only=False, interface_codes=None):
code = parse_to_ast(kode)
return parse_tree_to_lll(code, kode, runtime_only=runtime_only, interface_codes=interface_codes)
|
py | 1a33ab44432e313eeadc00f9ce52474bf113ee95 | import datajoint as dj
import numpy as np
from numpy.lib import emath
from functools import reduce
from .common_session import Session # noqa: F401
schema = dj.schema('common_interval')
# TODO: ADD export to NWB function to save relevant intervals in an NWB file
@schema
class IntervalList(dj.Manual):
definition = """
# Time intervals used for analysis
-> Session
interval_list_name: varchar(200) # descriptive name of this interval list
---
valid_times: longblob # numpy array with start and end times for each interval
"""
@classmethod
def insert_from_nwbfile(cls, nwbf, *, nwb_file_name):
"""Add each entry in the NWB file epochs table to the IntervalList table.
The interval list name for each epoch is set to the first tag for the epoch.
If the epoch has no tags, then 'interval_x' will be used as the interval list name, where x is the index
(0-indexed) of the epoch in the epochs table.
The start time and stop time of the epoch are stored in the valid_times field as a numpy array of
[start time, stop time] for each epoch.
Parameters
----------
nwbf : pynwb.NWBFile
The source NWB file object.
nwb_file_name : str
The file name of the NWB file, used as a primary key to the Session table.
"""
if nwbf.epochs is None:
print('No epochs found in NWB file.')
return
epochs = nwbf.epochs.to_dataframe()
for epoch_index, epoch_data in epochs.iterrows():
epoch_dict = dict()
epoch_dict['nwb_file_name'] = nwb_file_name
if epoch_data.tags[0]:
epoch_dict['interval_list_name'] = epoch_data.tags[0]
else:
epoch_dict['interval_list_name'] = 'interval_' + str(epoch_index)
epoch_dict['valid_times'] = np.asarray(
[[epoch_data.start_time, epoch_data.stop_time]])
cls.insert1(epoch_dict, skip_duplicates=True)
# TODO: make all of the functions below faster if possible
def intervals_by_length(interval_list, min_length=0.0, max_length=1e10):
"""Returns an interval list with only the intervals whose length is > min_length and < max_length
Args:
interval_list ((N,2) np.array): input interval list.
min_length (float, optional): [minimum interval length in seconds]. Defaults to 0.0.
max_length ([type], optional): [maximum interval length in seconds]. Defaults to 1e10.
"""
# get the length of each interval
lengths = np.ravel(np.diff(interval_list))
# return only intervals of the appropriate lengths
return interval_list[np.logical_and(lengths > min_length, lengths < max_length)]
def interval_list_contains_ind(valid_times, timestamps):
"""Returns the indices for the timestamps that are contained within the valid_times intervals
:param valid_times: Array of [start, end] times
:type valid_times: numpy array
:param timestamps: list of timestamps
:type timestamps: numpy array or list
:return: indices of timestamps that are in one of the valid_times intervals
"""
ind = []
for valid_time in valid_times:
ind += np.ravel(np.argwhere(np.logical_and(timestamps >= valid_time[0],
timestamps <= valid_time[1]))).tolist()
return np.asarray(ind)
def interval_list_contains(valid_times, timestamps):
"""Returns the timestamps that are contained within the valid_times intervals
:param valid_times: Array of [start, end] times
:type valid_times: numpy array
:param timestamps: list of timestamps
:type timestamps: numpy array or list
:return: numpy array of timestamps that are in one of the valid_times intervals
"""
ind = []
for valid_time in valid_times:
ind += np.ravel(np.argwhere(np.logical_and(timestamps >= valid_time[0],
timestamps <= valid_time[1]))).tolist()
return timestamps[ind]
def interval_list_excludes_ind(valid_times, timestamps):
"""Returns the indices of the timestamps that are excluded from the valid_times intervals
:param valid_times: Array of [start, end] times
:type valid_times: numpy array
:param timestamps: list of timestamps
:type timestamps: numpy array or list
:return: numpy array of timestamps that are in one of the valid_times intervals
"""
# add the first and last times to the list and creat a list of invalid intervals
valid_times_list = np.ndarray.ravel(valid_times).tolist()
valid_times_list.insert(0, timestamps[0] - 0.00001)
valid_times_list.append(timestamps[-1] + 0.001)
invalid_times = np.array(valid_times_list).reshape(-1, 2)
# add the first and last timestamp indices
ind = []
for invalid_time in invalid_times:
ind += np.ravel(np.argwhere(np.logical_and(timestamps > invalid_time[0],
timestamps < invalid_time[1]))).tolist()
return np.asarray(ind)
def interval_list_excludes(valid_times, timestamps):
"""Returns the indices of the timestamps that are excluded from the valid_times intervals
:param valid_times: Array of [start, end] times
:type valid_times: numpy array
:param timestamps: list of timestamps
:type timestamps: numpy array or list
:return: numpy array of timestamps that are in one of the valid_times intervals
"""
# add the first and last times to the list and creat a list of invalid intervals
valid_times_list = np.ravel(valid_times).tolist()
valid_times_list.insert(0, timestamps[0] - 0.00001)
valid_times_list.append(timestamps[-1] + 0.00001)
invalid_times = np.array(valid_times_list).reshape(-1, 2)
# add the first and last timestamp indices
ind = []
for invalid_time in invalid_times:
ind += np.ravel(np.argwhere(np.logical_and(timestamps > invalid_time[0],
timestamps < invalid_time[1]))).tolist()
return timestamps[ind]
def interval_list_intersect(interval_list1, interval_list2, min_length=0):
"""Finds the intersections between two interval lists
Parameters
----------
interval_list1 : np.array, (N,2) where N = number of intervals
interval_list2 : np.array, (N,2) where N = number of intervals
min_length : float, optional.
Minimum length of intervals to include, default 0
Each interval is (start time, stop time)
Returns
-------
interval_list: np.array, (N,2)
"""
# first, consolidate interval lists to disjoint intervals by sorting and applying union
if interval_list1.ndim==1:
interval_list1 = np.expand_dims(interval_list1,0)
else:
interval_list1 = interval_list1[np.argsort(interval_list1[:,0])]
interval_list1 = reduce(_union_concat, interval_list1)
# the following check is needed in the case where the interval list is a single element (behavior of reduce)
if interval_list1.ndim==1:
interval_list1 = np.expand_dims(interval_list1,0)
if interval_list2.ndim==1:
interval_list2 = np.expand_dims(interval_list2,0)
else:
interval_list2 = interval_list2[np.argsort(interval_list2[:,0])]
interval_list2 = reduce(_union_concat, interval_list2)
# the following check is needed in the case where the interval list is a single element (behavior of reduce)
if interval_list2.ndim==1:
interval_list2 = np.expand_dims(interval_list2,0)
# then do pairwise comparison and collect intersections
intersecting_intervals = []
for interval2 in interval_list2:
for interval1 in interval_list1:
if _intersection(interval2, interval1) is not None:
intersecting_intervals.append(_intersection(interval1, interval2))
# if no intersection, then return an empty list
if not intersecting_intervals:
return []
else:
intersecting_intervals = np.asarray(intersecting_intervals)
intersecting_intervals = intersecting_intervals[np.argsort(intersecting_intervals[:,0])]
return intervals_by_length(intersecting_intervals, min_length=min_length)
def _intersection(interval1, interval2):
"Takes the (set-theoretic) intersection of two intervals"
intersection = np.array([max([interval1[0],interval2[0]]),
min([interval1[1],interval2[1]])])
if intersection[1]>intersection[0]:
return intersection
else:
return None
def _union(interval1, interval2):
"Takes the (set-theoretic) union of two intervals"
if _intersection(interval1, interval2) is None:
return np.array([interval1, interval2])
else:
return np.array([min([interval1[0],interval2[0]]),
max([interval1[1],interval2[1]])])
def _union_concat(interval_list, interval):
"""Compares the last interval of the interval list to the given interval and
* takes their union if overlapping
* concatenates the interval to the interval list if not
Recursively called with `reduce`.
"""
if interval_list.ndim==1:
interval_list = np.expand_dims(interval_list, 0)
if interval.ndim==1:
interval = np.expand_dims(interval, 0)
x = _union(interval_list[-1], interval[0])
if x.ndim==1:
x = np.expand_dims(x, 0)
return np.concatenate((interval_list[:-1], x), axis=0)
def union_adjacent_index(interval1, interval2):
"""unions two intervals that are adjacent in index
e.g. [a,b] and [b+1, c] is converted to [a,c]
if not adjacent, just concatenates interval2 at the end of interval1
Parameters
----------
interval1 : np.array
[description]
interval2 : np.array
[description]
"""
if interval1.ndim==1:
interval1 = np.expand_dims(interval1, 0)
if interval2.ndim==1:
interval2 = np.expand_dims(interval2, 0)
if interval1[-1][1]+1 == interval2[0][0] or interval2[0][1]+1 == interval1[-1][0]:
x = np.array([[np.min([interval1[-1][0],interval2[0][0]]),
np.max([interval1[-1][1],interval2[0][1]])]])
return np.concatenate((interval1[:-1], x), axis=0)
else:
return np.concatenate((interval1, interval2),axis=0)
# TODO: test interval_list_union code
def interval_list_union(interval_list1, interval_list2, min_length=0.0, max_length=1e10):
"""Finds the union (all times in one or both) for two interval lists
:param interval_list1: The first interval list
:type interval_list1: numpy array of intervals [start, stop]
:param interval_list2: The second interval list
:type interval_list2: numpy array of intervals [start, stop]
:param min_length: optional minimum length of interval for inclusion in output, default 0.0
:type min_length: float
:param max_length: optional maximum length of interval for inclusion in output, default 1e10
:type max_length: float
:return: interval_list
:rtype: numpy array of intervals [start, stop]
"""
# return np.array([min(interval_list1[0],interval_list2[0]),
# max(interval_list1[1],interval_list2[1])])
interval_list1 = np.ravel(interval_list1)
# create a parallel list where 1 indicates the start and -1 the end of an interval
interval_list1_start_end = np.ones(interval_list1.shape)
interval_list1_start_end[1::2] = -1
interval_list2 = np.ravel(interval_list2)
# create a parallel list for the second interval where 1 indicates the start and -1 the end of an interval
interval_list2_start_end = np.ones(interval_list2.shape)
interval_list2_start_end[1::2] = -1
# concatenate the two lists so we can resort the intervals and apply the same sorting to the start-end arrays
combined_intervals = np.concatenate((interval_list1, interval_list2))
ss = np.concatenate((interval_list1_start_end, interval_list2_start_end))
sort_ind = np.argsort(combined_intervals)
combined_intervals = combined_intervals[sort_ind]
# a cumulative sum of 1 indicates the beginning of a joint interval; a cumulative sum of 0 indicates the end
union_starts = np.ravel(np.array(np.where(np.cumsum(ss[sort_ind]) == 1)))
union_stops = np.ravel(np.array(np.where(np.cumsum(ss[sort_ind]) == 0)))
union = []
for start, stop in zip(union_starts, union_stops):
union.append([combined_intervals[start], combined_intervals[stop]])
return np.asarray(union)
def interval_list_censor(interval_list, timestamps):
"""returns a new interval list that starts and ends at the first and last timestamp
Args:
interval_list (numpy array of intervals [start, stop]): interval list from IntervalList valid times
timestamps (numpy array or list): timestamp list
Returns:
interval_list (numpy array of intervals [start, stop])
"""
# check that all timestamps are in the interval list
assert len(interval_list_contains_ind(interval_list, timestamps)) == len(timestamps), 'interval_list must contain all timestamps'
timestamps_interval = np.asarray([[timestamps[0], timestamps[-1]]])
return interval_list_intersect(interval_list, timestamps_interval)
|
py | 1a33abe6a42d53f3119d94bb9366e5c62fef94c6 | import matplotlib.pyplot as plt
import numpy as np
def plot_1d_model(m, *, data=None):
D = m.inducing_variable.Z.numpy().shape[1]
if data is not None:
X, Y = data[0], data[1]
plt.plot(X, Y, 'x')
data_inducingpts = np.vstack((X if data else np.zeros((0, D)), m.inducing_variable.Z.numpy()))
pX = np.linspace(np.min(data_inducingpts) - 1.0, np.max(data_inducingpts) + 1.0, 300)[:, None]
pY, pYv = m.predict_y(pX)
line, = plt.plot(pX, pY, lw=1.5)
col = line.get_color()
plt.plot(pX, pY + 2 * pYv ** 0.5, col, lw=1.5)
plt.plot(pX, pY - 2 * pYv ** 0.5, col, lw=1.5)
plt.plot(m.inducing_variable.Z.numpy(), np.zeros(m.inducing_variable.Z.numpy().shape), 'k|', mew=2)
|
py | 1a33ac2154ad3425d86a61740fd56d7de25c3256 | '''
# file sub.py
# brief Set 'SERVER','CLIENT_ID'(this can be null),'IOT_pubTopic','IOT_UserName','IOT_PassWord'
# download into pc or raspberryPi and run the file
# You receive the message from server
# Copyright Copyright (c) 2010 DFRobot Co.Ltd (http://www.dfrobot.com)
# licence The MIT License (MIT)
# author [LuoYufeng]([email protected])
# version V1.0
# date 2019-10-8
'''
import siot
import time
SERVER = "127.0.0.1" #MQTT服务器IP
CLIENT_ID = "" #在SIoT上,CLIENT_ID可以留空
IOT_pubTopic = 'xzr/001' #“topic”为“项目名称/设备名称”
IOT_UserName ='siot' #用户名
IOT_PassWord ='dfrobot' #密码
def sub_cb(client, userdata, msg):
print("\nTopic:" + str(msg.topic) + " Message:" + str(msg.payload))
siot.init(CLIENT_ID, SERVER, user=IOT_UserName, password=IOT_PassWord)
siot.connect()
siot.subscribe(IOT_pubTopic, sub_cb)
siot.loop()
try:
while True:
pass
except:
siot.stop()
print("disconnect seccused")
|
py | 1a33ac68e4d455b1d84aa9fe8c4c09076e7a2a10 | from .shared_testing_functions import generate_random_partition, generate_multilayer_intralayer_SBM
import igraph as ig
from math import log
from numpy import mean
from modularitypruning.louvain_utilities import repeated_louvain_from_gammas_omegas, \
check_multilayer_louvain_capabilities
from modularitypruning.parameter_estimation import iterative_multilayer_resolution_parameter_estimation
from modularitypruning.parameter_estimation_utilities import gamma_omega_estimate
from modularitypruning.partition_utilities import num_communities, all_degrees
from random import seed
import unittest
class TestTemporalAndMultilevelParameterEstimation(unittest.TestCase):
def generate_temporal_SBM(self, copying_probability, p_in, p_out, first_layer_membership, num_layers):
G_intralayer, layer_membership = generate_multilayer_intralayer_SBM(copying_probability, p_in, p_out,
first_layer_membership, num_layers)
# connect each node to itself in the next layer
num_nodes_per_layer = len(first_layer_membership)
interlayer_edges = [(num_nodes_per_layer * layer + v, num_nodes_per_layer * layer + v + num_nodes_per_layer)
for layer in range(num_layers - 1) for v in range(num_nodes_per_layer)]
G_interlayer = ig.Graph(interlayer_edges, directed=True)
return G_intralayer, G_interlayer, layer_membership
def assert_temporal_SBM_correct_convergence(self, first_layer_membership, copying_probability=0.75, num_layers=25,
p_in=0.25, p_out=0.05):
if not check_multilayer_louvain_capabilities(fatal=False):
# just return since this version of louvain is unable to perform multilayer parameter estimation anyway
return
K = num_communities(first_layer_membership)
G_intralayer, G_interlayer, layer_membership = self.generate_temporal_SBM(copying_probability, p_in, p_out,
first_layer_membership,
num_layers)
# compute ground truth gamma
k = mean(all_degrees(G_intralayer))
true_theta_in = p_in * (2 * G_intralayer.ecount()) / (k * k) / num_layers
true_theta_out = p_out * (2 * G_intralayer.ecount()) / (k * k) / num_layers
true_gamma = (true_theta_in - true_theta_out) / (log(true_theta_in) - log(true_theta_out))
# compute ground truth omega. For some reason, Pamfil et al. scale this by 1/2 (perhaps due to the directedness
# of the interlayer edges), so we do the same here
true_omega = log(1 + copying_probability * K / (1 - copying_probability))
true_omega /= (2 * (log(true_theta_in) - log(true_theta_out)))
gamma, omega, _ = iterative_multilayer_resolution_parameter_estimation(G_intralayer, G_interlayer,
layer_membership, gamma=1.0, omega=1.0,
model='temporal')
# check we converged close to the ground truth "correct" values
self.assertLess(abs(true_gamma - gamma), 0.05)
self.assertLess(abs(true_omega - omega), 0.1)
# check multilevel parameter estimation as well
# we never use this model, but it is a slight generalization of the temporal one
gamma, omega, _ = iterative_multilayer_resolution_parameter_estimation(G_intralayer, G_interlayer,
layer_membership, gamma=1.0, omega=1.0,
model='multilevel')
self.assertLess(abs(true_gamma - gamma), 0.05)
self.assertLess(abs(true_omega - omega), 0.1)
def test_temporal_SBM_correct_convergence_varying_copying_probabilty(self):
for eta in [0.25, 0.5, 0.75, 0.9]:
membership = generate_random_partition(num_nodes=100, K=2)
self.assert_temporal_SBM_correct_convergence(copying_probability=eta, first_layer_membership=membership)
def test_temporal_SBM_correct_convergence_varying_p_in(self):
for p_in in [0.5, 0.4, 0.3, 0.2]:
membership = generate_random_partition(num_nodes=100, K=2)
self.assert_temporal_SBM_correct_convergence(p_in=p_in, p_out=0.025, first_layer_membership=membership)
def test_temporal_SBM_correct_convergence_varying_p_out(self):
for p_out in [0.05, 0.04, 0.03, 0.02]:
membership = generate_random_partition(num_nodes=100, K=2)
self.assert_temporal_SBM_correct_convergence(p_out=p_out, first_layer_membership=membership)
def test_temporal_SBM_correct_convergence_varying_num_communities(self):
for K in [2, 3, 4, 5]:
membership = generate_random_partition(num_nodes=250, K=K)
self.assert_temporal_SBM_correct_convergence(first_layer_membership=membership)
def test_temporal_SBM_correct_convergence_varying_num_layers(self):
for num_layers in [20, 30, 40]:
membership = generate_random_partition(num_nodes=100, K=2)
self.assert_temporal_SBM_correct_convergence(first_layer_membership=membership, num_layers=num_layers)
def test_directed_consistency_temporal_SBM_louvain(self):
"""Test parameter estimate consistency on a temporal SBM when the intralayer edges are directed."""
if not check_multilayer_louvain_capabilities(fatal=False):
# just return since this version of louvain is unable to perform multilayer parameter estimation anyway
return
membership = [0] * 25 + [1] * 25 + [2] * 25
G_intralayer, G_interlayer, layer_membership = self.generate_temporal_SBM(copying_probability=0.9,
p_in=0.25, p_out=0.05,
first_layer_membership=membership,
num_layers=25)
partitions = repeated_louvain_from_gammas_omegas(G_intralayer, G_interlayer, layer_membership,
gammas=[0.5, 1.0, 1.5], omegas=[0.5, 1.0, 1.5])
for partition in partitions:
# here, undirected/directed refers to the intralayer edges only
# in Pamfil et al.'s temporal networks, interlayer edges are taken to be directed
gamma_undirected, omega_undirected = gamma_omega_estimate(G_intralayer, G_interlayer, layer_membership,
partition, model="temporal")
G_intralayer.to_directed()
gamma_directed, omega_directed = gamma_omega_estimate(G_intralayer, G_interlayer, layer_membership,
partition, model="temporal")
self.assertAlmostEqual(gamma_undirected, gamma_directed, places=10)
self.assertAlmostEqual(omega_undirected, omega_directed, places=10)
# check multilevel parameter estimation as well
gamma_undirected, omega_undirected = gamma_omega_estimate(G_intralayer, G_interlayer, layer_membership,
partition, model="multilevel")
G_intralayer.to_directed()
gamma_directed, omega_directed = gamma_omega_estimate(G_intralayer, G_interlayer, layer_membership,
partition, model="multilevel")
self.assertAlmostEqual(gamma_undirected, gamma_directed, places=10)
self.assertAlmostEqual(omega_undirected, omega_directed, places=10)
if __name__ == "__main__":
seed(0)
unittest.main()
|
py | 1a33ad3ba4cd7c9dbdc82fe9f6c6f4cbdd538f40 | # Copyright 2018 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Requires Python 2.6+ and Openssl 1.0+
#
import re
from azurelinuxagent.common.utils.textutil import parse_doc, find, findall
from tests.tools import load_bin_data, load_data, MagicMock, Mock
from azurelinuxagent.common.exception import HttpError, ResourceGoneError
from azurelinuxagent.common.future import httpclient
from azurelinuxagent.common.utils.cryptutil import CryptUtil
DATA_FILE = {
"version_info": "wire/version_info.xml",
"goal_state": "wire/goal_state.xml",
"hosting_env": "wire/hosting_env.xml",
"shared_config": "wire/shared_config.xml",
"certs": "wire/certs.xml",
"ext_conf": "wire/ext_conf.xml",
"manifest": "wire/manifest.xml",
"ga_manifest": "wire/ga_manifest.xml",
"trans_prv": "wire/trans_prv",
"trans_cert": "wire/trans_cert",
"test_ext": "ext/sample_ext-1.3.0.zip",
"remote_access": None,
"in_vm_artifacts_profile": None
}
DATA_FILE_IN_VM_ARTIFACTS_PROFILE = DATA_FILE.copy()
DATA_FILE_IN_VM_ARTIFACTS_PROFILE["ext_conf"] = "wire/ext_conf_in_vm_artifacts_profile.xml"
DATA_FILE_IN_VM_ARTIFACTS_PROFILE["in_vm_artifacts_profile"] = "wire/in_vm_artifacts_profile.json"
DATA_FILE_NO_EXT = DATA_FILE.copy()
DATA_FILE_NO_EXT["goal_state"] = "wire/goal_state_no_ext.xml"
DATA_FILE_NO_EXT["ext_conf"] = None
DATA_FILE_EXT_NO_SETTINGS = DATA_FILE.copy()
DATA_FILE_EXT_NO_SETTINGS["ext_conf"] = "wire/ext_conf_no_settings.xml"
DATA_FILE_EXT_NO_PUBLIC = DATA_FILE.copy()
DATA_FILE_EXT_NO_PUBLIC["ext_conf"] = "wire/ext_conf_no_public.xml"
DATA_FILE_EXT_AUTOUPGRADE = DATA_FILE.copy()
DATA_FILE_EXT_AUTOUPGRADE["ext_conf"] = "wire/ext_conf_autoupgrade.xml"
DATA_FILE_EXT_INTERNALVERSION = DATA_FILE.copy()
DATA_FILE_EXT_INTERNALVERSION["ext_conf"] = "wire/ext_conf_internalversion.xml"
DATA_FILE_EXT_AUTOUPGRADE_INTERNALVERSION = DATA_FILE.copy()
DATA_FILE_EXT_AUTOUPGRADE_INTERNALVERSION["ext_conf"] = "wire/ext_conf_autoupgrade_internalversion.xml"
DATA_FILE_EXT_ROLLINGUPGRADE = DATA_FILE.copy()
DATA_FILE_EXT_ROLLINGUPGRADE["ext_conf"] = "wire/ext_conf_upgradeguid.xml"
DATA_FILE_EXT_SEQUENCING = DATA_FILE.copy()
DATA_FILE_EXT_SEQUENCING["ext_conf"] = "wire/ext_conf_sequencing.xml"
DATA_FILE_EXT_DELETION = DATA_FILE.copy()
DATA_FILE_EXT_DELETION["manifest"] = "wire/manifest_deletion.xml"
DATA_FILE_EXT_SINGLE = DATA_FILE.copy()
DATA_FILE_EXT_SINGLE["manifest"] = "wire/manifest_deletion.xml"
DATA_FILE_MULTIPLE_EXT = DATA_FILE.copy()
DATA_FILE_MULTIPLE_EXT["ext_conf"] = "wire/ext_conf_multiple_extensions.xml"
DATA_FILE_NO_CERT_FORMAT = DATA_FILE.copy()
DATA_FILE_NO_CERT_FORMAT["certs"] = "wire/certs_no_format_specified.xml"
DATA_FILE_CERT_FORMAT_NOT_PFX = DATA_FILE.copy()
DATA_FILE_CERT_FORMAT_NOT_PFX["certs"] = "wire/certs_format_not_pfx.xml"
DATA_FILE_REMOTE_ACCESS = DATA_FILE.copy()
DATA_FILE_REMOTE_ACCESS["goal_state"] = "wire/goal_state_remote_access.xml"
DATA_FILE_REMOTE_ACCESS["remote_access"] = "wire/remote_access_single_account.xml"
DATA_FILE_PLUGIN_SETTINGS_MISMATCH = DATA_FILE.copy()
DATA_FILE_PLUGIN_SETTINGS_MISMATCH["ext_conf"] = "wire/ext_conf_plugin_settings_version_mismatch.xml"
class WireProtocolData(object):
def __init__(self, data_files=DATA_FILE):
self.emulate_stale_goal_state = False
self.call_counts = {
"comp=versions": 0,
"/versions": 0,
"/health": 0,
"/HealthService": 0,
"/vmAgentLog": 0,
"goalstate": 0,
"hostingenvuri": 0,
"sharedconfiguri": 0,
"certificatesuri": 0,
"extensionsconfiguri": 0,
"remoteaccessinfouri": 0,
"extensionArtifact": 0,
"manifest.xml": 0,
"manifest_of_ga.xml": 0,
"ExampleHandlerLinux": 0,
"in_vm_artifacts_profile": 0
}
self.data_files = data_files
self.version_info = None
self.goal_state = None
self.hosting_env = None
self.shared_config = None
self.certs = None
self.ext_conf = None
self.manifest = None
self.ga_manifest = None
self.trans_prv = None
self.trans_cert = None
self.ext = None
self.remote_access = None
self.in_vm_artifacts_profile = None
self.reload()
def reload(self):
self.version_info = load_data(self.data_files.get("version_info"))
self.goal_state = load_data(self.data_files.get("goal_state"))
self.hosting_env = load_data(self.data_files.get("hosting_env"))
self.shared_config = load_data(self.data_files.get("shared_config"))
self.certs = load_data(self.data_files.get("certs"))
self.ext_conf = self.data_files.get("ext_conf")
if self.ext_conf is not None:
self.ext_conf = load_data(self.ext_conf)
self.manifest = load_data(self.data_files.get("manifest"))
self.ga_manifest = load_data(self.data_files.get("ga_manifest"))
self.trans_prv = load_data(self.data_files.get("trans_prv"))
self.trans_cert = load_data(self.data_files.get("trans_cert"))
self.ext = load_bin_data(self.data_files.get("test_ext"))
remote_access_data_file = self.data_files.get("remote_access")
if remote_access_data_file is not None:
self.remote_access = load_data(remote_access_data_file)
in_vm_artifacts_profile_file = self.data_files.get("in_vm_artifacts_profile")
if in_vm_artifacts_profile_file is not None:
self.in_vm_artifacts_profile = load_data(in_vm_artifacts_profile_file)
def mock_http_get(self, url, *args, **kwargs):
content = None
resp = MagicMock()
resp.status = httpclient.OK
if "comp=versions" in url: # wire server versions
content = self.version_info
self.call_counts["comp=versions"] += 1
elif "/versions" in url: # HostPlugin versions
content = '["2015-09-01"]'
self.call_counts["/versions"] += 1
elif url.endswith("/health"): # HostPlugin health
content = ''
self.call_counts["/health"] += 1
elif "goalstate" in url:
content = self.goal_state
self.call_counts["goalstate"] += 1
elif "hostingenvuri" in url:
content = self.hosting_env
self.call_counts["hostingenvuri"] += 1
elif "sharedconfiguri" in url:
content = self.shared_config
self.call_counts["sharedconfiguri"] += 1
elif "certificatesuri" in url:
content = self.certs
self.call_counts["certificatesuri"] += 1
elif "extensionsconfiguri" in url:
content = self.ext_conf
self.call_counts["extensionsconfiguri"] += 1
elif "remoteaccessinfouri" in url:
content = self.remote_access
self.call_counts["remoteaccessinfouri"] += 1
elif ".vmSettings" in url or ".settings" in url:
content = self.in_vm_artifacts_profile
self.call_counts["in_vm_artifacts_profile"] += 1
else:
# A stale GoalState results in a 400 from the HostPlugin
# for which the HTTP handler in restutil raises ResourceGoneError
if self.emulate_stale_goal_state:
if "extensionArtifact" in url:
self.emulate_stale_goal_state = False
self.call_counts["extensionArtifact"] += 1
raise ResourceGoneError()
else:
raise HttpError()
# For HostPlugin requests, replace the URL with that passed
# via the x-ms-artifact-location header
if "extensionArtifact" in url:
self.call_counts["extensionArtifact"] += 1
if "headers" not in kwargs:
raise ValueError("HostPlugin request is missing the HTTP headers: {0}", kwargs)
if "x-ms-artifact-location" not in kwargs["headers"]:
raise ValueError("HostPlugin request is missing the x-ms-artifact-location header: {0}", kwargs)
url = kwargs["headers"]["x-ms-artifact-location"]
if "manifest.xml" in url:
content = self.manifest
self.call_counts["manifest.xml"] += 1
elif "manifest_of_ga.xml" in url:
content = self.ga_manifest
self.call_counts["manifest_of_ga.xml"] += 1
elif "ExampleHandlerLinux" in url:
content = self.ext
self.call_counts["ExampleHandlerLinux"] += 1
resp.read = Mock(return_value=content)
return resp
elif ".vmSettings" in url or ".settings" in url:
content = self.in_vm_artifacts_profile
self.call_counts["in_vm_artifacts_profile"] += 1
else:
raise Exception("Bad url {0}".format(url))
resp.read = Mock(return_value=content.encode("utf-8"))
return resp
def mock_http_post(self, url, *args, **kwargs):
content = None
resp = MagicMock()
resp.status = httpclient.OK
if url.endswith('/HealthService'):
self.call_counts['/HealthService'] += 1
content = ''
else:
raise Exception("Bad url {0}".format(url))
resp.read = Mock(return_value=content.encode("utf-8"))
return resp
def mock_http_put(self, url, *args, **kwargs):
content = None
resp = MagicMock()
resp.status = httpclient.OK
if url.endswith('/vmAgentLog'):
self.call_counts['/vmAgentLog'] += 1
content = ''
else:
raise Exception("Bad url {0}".format(url))
resp.read = Mock(return_value=content.encode("utf-8"))
return resp
def mock_crypt_util(self, *args, **kw):
#Partially patch instance method of class CryptUtil
cryptutil = CryptUtil(*args, **kw)
cryptutil.gen_transport_cert = Mock(side_effect=self.mock_gen_trans_cert)
return cryptutil
def mock_gen_trans_cert(self, trans_prv_file, trans_cert_file):
with open(trans_prv_file, 'w+') as prv_file:
prv_file.write(self.trans_prv)
with open(trans_cert_file, 'w+') as cert_file:
cert_file.write(self.trans_cert)
def get_no_of_plugins_in_extension_config(self):
if self.ext_conf is None:
return 0
ext_config_doc = parse_doc(self.ext_conf)
plugins_list = find(ext_config_doc, "Plugins")
return len(findall(plugins_list, "Plugin"))
#
# Having trouble reading the regular expressions below? you are not alone!
#
# For the use of "(?<=" "(?=" see 7.2.1 in https://docs.python.org/3.1/library/re.html
# For the use of "\g<1>" see backreferences in https://docs.python.org/3.1/library/re.html#re.sub
#
# Note that these regular expressions are not enough to parse all valid XML documents (e.g. they do
# not account for metacharacters like < or > in the values) but they are good enough for the test
# data. There are some basic checks, but the functions may not match valid XML or produce invalid
# XML if their input is too complex.
#
@staticmethod
def replace_xml_element_value(xml_document, element_name, element_value):
new_xml_document = re.sub(r'(?<=<{0}>).+(?=</{0}>)'.format(element_name), element_value, xml_document)
if new_xml_document == xml_document:
raise Exception("Could not match element '{0}'", element_name)
return new_xml_document
@staticmethod
def replace_xml_attribute_value(xml_document, element_name, attribute_name, attribute_value):
new_xml_document = re.sub(r'(?<=<{0} )(.*{1}=")[^"]+(?="[^>]*>)'.format(element_name, attribute_name), r'\g<1>{0}'.format(attribute_value), xml_document)
if new_xml_document == xml_document:
raise Exception("Could not match attribute '{0}' of element '{1}'".format(attribute_name, element_name))
return new_xml_document
def set_incarnation(self, incarnation):
'''
Sets the incarnation in the goal state, but not on its subcomponents (e.g. hosting env, shared config)
'''
self.goal_state = WireProtocolData.replace_xml_element_value(self.goal_state, "Incarnation", str(incarnation))
def set_container_id(self, container_id):
self.goal_state = WireProtocolData.replace_xml_element_value(self.goal_state, "ContainerId", container_id)
def set_role_config_name(self, role_config_name):
self.goal_state = WireProtocolData.replace_xml_element_value(self.goal_state, "ConfigName", role_config_name)
def set_hosting_env_deployment_name(self, deployment_name):
self.hosting_env = WireProtocolData.replace_xml_attribute_value(self.hosting_env, "Deployment", "name", deployment_name)
def set_shared_config_deployment_name(self, deployment_name):
self.shared_config = WireProtocolData.replace_xml_attribute_value(self.shared_config, "Deployment", "name", deployment_name)
def set_extensions_config_sequence_number(self, sequence_number):
'''
Sets the sequence number for *all* extensions
'''
self.ext_conf = WireProtocolData.replace_xml_attribute_value(self.ext_conf, "RuntimeSettings", "seqNo", str(sequence_number))
def set_extensions_config_version(self, version):
'''
Sets the version for *all* extensions
'''
self.ext_conf = WireProtocolData.replace_xml_attribute_value(self.ext_conf, "Plugin", "version", version)
def set_extensions_config_state(self, state):
'''
Sets the state for *all* extensions
'''
self.ext_conf = WireProtocolData.replace_xml_attribute_value(self.ext_conf, "Plugin", "state", state)
def set_manifest_version(self, version):
'''
Sets the version of the extension manifest
'''
self.manifest = WireProtocolData.replace_xml_element_value(self.manifest, "Version", version)
|
py | 1a33ae84ae2ffb0e40a65f1b6cbe5e2483dc1a17 | import logging
import sys
import time
class ColorFormatter(logging.Formatter):
BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE = range(8)
RESET_SEQ = '\033[0m'
COLOR_SEQ_TEMPLATE = '\033[1;{fore_color_int}m'
LEVELNO_TO_COLOR_INT_DICT = {
logging.WARNING: YELLOW,
logging.ERROR: RED,
}
def format(self, record):
message = logging.Formatter.format(self, record)
color_seq = ''
if record.levelno in self.LEVELNO_TO_COLOR_INT_DICT:
fore_color_int = 30 + self.LEVELNO_TO_COLOR_INT_DICT[record.levelno]
color_seq = self.COLOR_SEQ_TEMPLATE.format(fore_color_int=fore_color_int)
return '{0}{1}{2}'.format(color_seq, message, self.RESET_SEQ)
class NullHandler(logging.Handler):
def emit(self, record):
pass
class LessThanFilter(logging.Filter, object):
def __init__(self, exclusive_maximum, name=""):
super(LessThanFilter, self).__init__(name)
self.max_level = exclusive_maximum
def filter(self, record):
return 1 if record.levelno < self.max_level else 0
class DeferInfoToDebugFilter(logging.Filter, object):
def __init__(self, name=""):
super(DeferInfoToDebugFilter, self).__init__(name)
def filter(self, record):
if record.levelno == logging.INFO:
record.levelno = logging.DEBUG
record.levelname = 'DEBUG'
return 1
def register_tty_handler(stream, minlevel, maxlevel):
logging_handler = logging.StreamHandler(stream)
logging_handler.setFormatter(ColorFormatter('%(message)s'))
if minlevel is not None:
logging_handler.setLevel(minlevel)
else:
logging_handler.setLevel(logging.NOTSET)
if maxlevel is not None:
logging_handler.addFilter(LessThanFilter(maxlevel))
logging.getLogger().addHandler(logging_handler)
def register_file_handler(log_file_path, level=logging.DEBUG):
logging_handler = logging.FileHandler(log_file_path)
logging_handler.setFormatter(logging.Formatter('%(asctime)s.%(msecs)03dZ - %(levelname)7s - %(filename)30s:%(lineno)4d - %(message)s', '%Y-%m-%dT%H:%M:%S'))
logging_handler.formatter.converter = time.gmtime
logging_handler.setLevel(logging.DEBUG)
logging.getLogger().addHandler(logging_handler)
|
py | 1a33b09b48054b8eb062468c13ed59d6ee4ac7f2 | """ Module who handle updating """
import os
from dataclasses import dataclass
from pathlib import Path
from shutil import copyfile
from typing import Union
from packaging.version import InvalidVersion, Version
from inupdater.config import SettingsManager
from inupdater.ui import UserInterface
@dataclass(eq=False, order=False)
class Exefile:
path: Path
version: Version
def __eq__(self, o: object) -> bool:
return self.version == o.version
def __lt__(self, o: object) -> bool:
return self.version < o.version
def __le__(self, o: object) -> bool:
return self.version <= o.version
def __gt__(self, o: object) -> bool:
return self.version > o.version
def __ge__(self, o: object) -> bool:
return self.version >= o.version
class ExeUpdater:
def __init__(self, install_path: Path, ui: UserInterface) -> None:
self.install_path = install_path
self.ui = ui
settings_path = install_path / Path("settings.json")
# --------------------------------------
# Test Purpose only
# --------------------------------------
appexemple_path = Path().cwd() / Path("tests/appexemple")
test_settings = appexemple_path / Path("settings.json")
if not settings_path.exists() and test_settings.exists():
self.install_path = appexemple_path
settings_path = test_settings
# --------------------------------------
settingsmanager = SettingsManager(settings_path)
with settingsmanager as self.settings:
self.local = None
self.update = None
self.ui.show_message("Checking for updates...")
self.ui.set_state(2)
if self.update > self.local:
self.ui.show_message(f"We find a new update ! : {self.update.version}")
self.ui.set_state(4)
copyfile(self.update.path, self.local.path)
self.settings.version = self.update.version
self.ui.show_message("Update installed !")
self.ui.set_state(6)
@property
def local(self) -> Exefile:
exe_path = self.install_path / Path(
f"{self.settings._exe_name}.exe"
) # TODO EXE or not?!? check with no Admin
assert exe_path.exists()
exe_version = self.settings.version
return Exefile(exe_path, exe_version)
@local.setter
def local(self, _):
return
@property
def update(self) -> Exefile:
exe_path = self.check_for_latest_update(self.settings.dist_location)
exe_version = self.get_version(exe_path)
return Exefile(exe_path, exe_version)
@update.setter
def update(self, _) -> None:
return
@staticmethod
def get_version(pathver: Path) -> Version:
try:
return Version(pathver.stem.split("_")[1])
except IndexError as idx:
raise idx
except InvalidVersion as ive:
raise ive
@staticmethod
def get_exe_list(path: Path) -> list[Path]:
return [
f
for f in Path(path).iterdir()
if f.suffix == ".exe" and f.stem != "unins000"
]
def check_for_latest_update(self, path: Path) -> Path:
"""Check for latest update in a given path"""
exe_list = self.get_exe_list(path)
last = sorted(exe_list, key=self.get_version)[-1]
return last
def launch(self, *args):
command = [str(self.local.path), *args]
self.ui.show_message(f"Launching {self.settings._exe_name}")
self.ui.set_state(8)
self.ui.show_message("Please wait..")
self.ui.set_state(10)
self.ui.close()
os.system(" ".join([str(c) for c in command]))
|
py | 1a33b12b4b5ff975ba57ad394da80a4413f28c6c | import yaml
import json
from teuthology.test import fake_archive
from teuthology import report
class TestSerializer(object):
def setup(self):
self.archive = fake_archive.FakeArchive()
self.archive.setup()
self.archive_base = self.archive.archive_base
self.reporter = report.ResultsReporter(archive_base=self.archive_base)
def teardown(self):
self.archive.teardown()
def test_all_runs_one_run(self):
run_name = "test_all_runs"
yaml_path = "examples/3node_ceph.yaml"
job_count = 3
self.archive.create_fake_run(run_name, job_count, yaml_path)
assert [run_name] == self.reporter.serializer.all_runs
def test_all_runs_three_runs(self):
run_count = 3
runs = {}
for i in range(run_count):
run_name = "run #%s" % i
yaml_path = "examples/3node_ceph.yaml"
job_count = 3
job_ids = self.archive.create_fake_run(
run_name,
job_count,
yaml_path)
runs[run_name] = job_ids
assert sorted(runs.keys()) == sorted(self.reporter.serializer.all_runs)
def test_jobs_for_run(self):
run_name = "test_jobs_for_run"
yaml_path = "examples/3node_ceph.yaml"
job_count = 3
jobs = self.archive.create_fake_run(run_name, job_count, yaml_path)
job_ids = [str(job['job_id']) for job in jobs]
got_jobs = self.reporter.serializer.jobs_for_run(run_name)
assert sorted(job_ids) == sorted(got_jobs.keys())
def test_running_jobs_for_run(self):
run_name = "test_jobs_for_run"
yaml_path = "examples/3node_ceph.yaml"
job_count = 10
num_hung = 3
self.archive.create_fake_run(run_name, job_count, yaml_path,
num_hung=num_hung)
got_jobs = self.reporter.serializer.running_jobs_for_run(run_name)
assert len(got_jobs) == num_hung
def test_json_for_job(self):
run_name = "test_json_for_job"
yaml_path = "examples/3node_ceph.yaml"
job_count = 1
jobs = self.archive.create_fake_run(run_name, job_count, yaml_path)
job = jobs[0]
with open(yaml_path) as yaml_file:
obj_from_yaml = yaml.safe_load(yaml_file)
full_obj = obj_from_yaml.copy()
full_obj.update(job['info'])
full_obj.update(job['summary'])
out_json = self.reporter.serializer.json_for_job(
run_name, str(job['job_id']))
out_obj = json.loads(out_json)
assert full_obj == out_obj
|
py | 1a33b134f5e634609cee840b7c4bfb2609cd1381 | import sys
import pyportus as portus
class ConstFlow():
INIT_RATE = 1000000
def __init__(self, datapath, datapath_info):
self.datapath = datapath
self.datapath_info = datapath_info
self.rate = ConstFlow.INIT_RATE
self.datapath.set_program("default", [("Rate", self.rate)])
def on_report(self, r):
self.datapath.update_field("Rate", self.rate)
class Const(portus.AlgBase):
def datapath_programs(self):
return {
"default" : """\
(def (Report
(volatile acked 0)
(volatile loss 0)
(volatile rtt 0)
))
(when true
(:= Report.rtt Flow.rtt_sample_us)
(:= Report.acked (+ Report.acked Ack.bytes_acked))
(:= Report.loss Ack.lost_pkts_sample)
(report)
)
"""
}
def new_flow(self, datapath, datapath_info):
return ConstFlow(datapath, datapath_info)
alg = Const()
portus.start("unix", alg)
|
py | 1a33b1e426b83fb89ba91df601dab472b258192e | # Copyright (C) 2010-2011 Richard Lincoln
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from CIM15.Element import Element
class AltGeneratingUnitMeas(Element):
"""A prioritized measurement to be used for the generating unit in the control area specificaiton.A prioritized measurement to be used for the generating unit in the control area specificaiton.
"""
def __init__(self, priority=0, AnalogValue=None, ControlAreaGeneratingUnit=None, *args, **kw_args):
"""Initialises a new 'AltGeneratingUnitMeas' instance.
@param priority: Priority of a measurement usage. Lower numbers have first priority.
@param AnalogValue: The specific analog value used as a source.
@param ControlAreaGeneratingUnit: The control aread generating unit to which the prioritized measurement assignment is applied.
"""
#: Priority of a measurement usage. Lower numbers have first priority.
self.priority = priority
self._AnalogValue = None
self.AnalogValue = AnalogValue
self._ControlAreaGeneratingUnit = None
self.ControlAreaGeneratingUnit = ControlAreaGeneratingUnit
super(AltGeneratingUnitMeas, self).__init__(*args, **kw_args)
_attrs = ["priority"]
_attr_types = {"priority": int}
_defaults = {"priority": 0}
_enums = {}
_refs = ["AnalogValue", "ControlAreaGeneratingUnit"]
_many_refs = []
def getAnalogValue(self):
"""The specific analog value used as a source.
"""
return self._AnalogValue
def setAnalogValue(self, value):
if self._AnalogValue is not None:
filtered = [x for x in self.AnalogValue.AltGeneratingUnit if x != self]
self._AnalogValue._AltGeneratingUnit = filtered
self._AnalogValue = value
if self._AnalogValue is not None:
if self not in self._AnalogValue._AltGeneratingUnit:
self._AnalogValue._AltGeneratingUnit.append(self)
AnalogValue = property(getAnalogValue, setAnalogValue)
def getControlAreaGeneratingUnit(self):
"""The control aread generating unit to which the prioritized measurement assignment is applied.
"""
return self._ControlAreaGeneratingUnit
def setControlAreaGeneratingUnit(self, value):
if self._ControlAreaGeneratingUnit is not None:
filtered = [x for x in self.ControlAreaGeneratingUnit.AltGeneratingUnitMeas if x != self]
self._ControlAreaGeneratingUnit._AltGeneratingUnitMeas = filtered
self._ControlAreaGeneratingUnit = value
if self._ControlAreaGeneratingUnit is not None:
if self not in self._ControlAreaGeneratingUnit._AltGeneratingUnitMeas:
self._ControlAreaGeneratingUnit._AltGeneratingUnitMeas.append(self)
ControlAreaGeneratingUnit = property(getControlAreaGeneratingUnit, setControlAreaGeneratingUnit)
|
py | 1a33b393845835b095dc992bd7d612e238791ae0 | import collections
from typing import Any, Iterable, Iterator, Optional, Tuple
from river.base.typing import Dataset
from river.metrics import RegressionMetric
from .base import Forecaster
from .metric import HorizonMetric
TimeSeries = Iterator[
Tuple[
Optional[dict], # x
Any, # y
Iterable[Optional[dict]], # x_horizon
Iterable[Any], # y_horizon
]
]
def _iter_with_horizon(dataset: Dataset, horizon: int) -> TimeSeries:
"""
Examples
--------
>>> from river import datasets
>>> from river.time_series.evaluate import _iter_with_horizon
>>> dataset = datasets.AirlinePassengers()
>>> for x, y, x_horizon, y_horizon in _iter_with_horizon(dataset.take(8), horizon=3):
... print(x['month'].strftime('%Y-%m-%d'), y)
... print([x['month'].strftime('%Y-%m-%d') for x in x_horizon])
... print(list(y_horizon))
... print('---')
1949-01-01 112
['1949-02-01', '1949-03-01', '1949-04-01']
[118, 132, 129]
---
1949-02-01 118
['1949-03-01', '1949-04-01', '1949-05-01']
[132, 129, 121]
---
1949-03-01 132
['1949-04-01', '1949-05-01', '1949-06-01']
[129, 121, 135]
---
1949-04-01 129
['1949-05-01', '1949-06-01', '1949-07-01']
[121, 135, 148]
---
1949-05-01 121
['1949-06-01', '1949-07-01', '1949-08-01']
[135, 148, 148]
---
"""
x_horizon = collections.deque(maxlen=horizon)
y_horizon = collections.deque(maxlen=horizon)
stream = iter(dataset)
for _ in range(horizon):
x, y = next(stream)
x_horizon.append(x)
y_horizon.append(y)
for x, y in stream:
x_now = x_horizon.popleft()
y_now = y_horizon.popleft()
x_horizon.append(x)
y_horizon.append(y)
yield x_now, y_now, x_horizon, y_horizon
def _evaluate(
dataset: Dataset,
model: Forecaster,
metric: RegressionMetric,
horizon: int,
grace_period: int,
) -> HorizonMetric:
horizon_metric = HorizonMetric(metric)
steps = _iter_with_horizon(dataset, horizon)
for _ in range(grace_period):
x, y, x_horizon, y_horizon = next(steps)
model.learn_one(y=y, x=x)
for x, y, x_horizon, y_horizon in steps:
y_pred = model.forecast(horizon, xs=x_horizon)
horizon_metric.update(y_horizon, y_pred)
model.learn_one(y=y, x=x)
yield y_pred, horizon_metric
def evaluate(
dataset: Dataset,
model: Forecaster,
metric: RegressionMetric,
horizon: int,
grace_period=1,
) -> HorizonMetric:
"""Evaluates the performance of a forecaster on a time series dataset.
To understand why this method is useful, it's important to understand the difference between
nowcasting and forecasting. Nowcasting is about predicting a value at the next time step. This
can be seen as a special case of regression, where the value to predict is the value at the
next time step. In this case, the `evaluate.progressive_val_score` function may be used to
evaluate a model via progressive validation.
Forecasting models can also be evaluated via progressive validation. This is the purpose of
this function. At each time step `t`, the forecaster is asked to predict the values at `t + 1`,
`t + 2`, ..., `t + horizon`. The performance at each time step is measured and returned.
Parameters
----------
dataset
A sequential time series.
model
A forecaster.
metric
A regression metric.
horizon
grace_period
Initial period during which the metric is not updated. This is to fairly evaluate models
which need a warming up period to start producing meaningful forecasts. The first forecast
is skipped by default.
"""
horizon_metric = None
steps = _evaluate(dataset, model, metric, horizon, grace_period)
for _, horizon_metric in steps:
pass
return horizon_metric
|
py | 1a33b39ec629a3796c164b1a1e3c950dc6f3a4cc | import os
IMAGE_SIZE = 256
NUM_WORKERS = 4
TRAINING_BATCH_SIZE = 8
VAL_BATCH_SIZE = 8
EPOCH = 20
MILESTONES = [5, 10, 15]
SAVE_EPOCH = 5
WARM_EPOCH = 1
CHECKPOINTS_PATH = './checkpoints/'
LEARNING_RATE = 0.1
WEIGHT_DECAY = 5e-4
MOMENTUM = 0.9
GAMMA = 0.2
FRAME_SAMPLE = 10 |
py | 1a33b43b95a362bc7e9d4e0299efc79dc300191e | import os
from datetime import datetime, timedelta
import pandas as pd
from airflow import DAG
from airflow.operators.python_operator import PythonOperator
from sqlalchemy import create_engine
from generic_data_dag import GenericDataDag
class WikipediaDataDagHistorical(GenericDataDag):
"""
This is a sample dag for pulling wikipedia pageviews data on an hourly
basis.
"""
dag_id = "wikipedia-historical-data-dag"
dag_description = "Wikipedia historical pageviews data dag"
# TODO: update this to be the start date
start_date = datetime.now() - timedelta(days=1, hours=5)
end_date = datetime.now() - timedelta(days=1)
schedule_interval = "@hourly"
catchup = True
out_dir = "out"
table_name = "wikipedia_pageviews_data_historical"
analytics_postgres = "postgresql://postgres@postgres:5432/analytics"
engine = create_engine(analytics_postgres)
@classmethod
def download_from_wikipedia(cls, *args, **kwargs):
ts = kwargs["ts"]
yyyy = ts[0:4]
mm = ts[5:7]
dd = ts[8:10]
hh = ts[11:13]
# url format:
# https://dumps.wikimedia.org/other/pageviews/2021/2021-10/pageviews-20211001-010000.gz
url = (
f"https://dumps.wikimedia.org/other/pageviews/{yyyy}/{yyyy}-{mm}/"
f"pageviews-{yyyy}{mm}{dd}-{hh}0000.gz"
)
print("download from", url)
df = pd.read_csv(
url,
sep=" ",
header=None,
names=[
"domain_code",
"page_title",
"count_views",
"total_response_size",
],
)
df["dt"] = ts
df.to_csv(
os.path.join(cls.out_dir, f"pageviews-{yyyy}{mm}{dd}-{hh}.csv"),
index=False,
)
@classmethod
def upload_data_to_s3(cls, *args, **kwargs):
pass
@classmethod
def summarize_dataframe(cls, *args, **kwargs):
pass
def get_data(self):
return PythonOperator(
task_id="get-data-from-wikipedia",
python_callable=self.download_from_wikipedia,
provide_context=True,
)
def upload_data(self):
return PythonOperator(
task_id="upload-data",
python_callable=self.upload_data_to_s3,
provide_context=True,
)
def summarize_data(self):
return PythonOperator(
task_id="summarize-data",
python_callable=self.summarize_dataframe,
provide_context=True,
)
w = WikipediaDataDagHistorical()
w_dag = w.get_data_prep_dag()
|
py | 1a33b475c32c8a4b60d5a85548d06b3737789b8a | #!/usr/bin/env python3
# Publications markdown generator for academicpages
# Data format: JSON, see publications.json for examples
# Caution: Overwrites ../auto-publications.md
import json
JOURNAL_PUB = "journal"
CONFERENCE_PUB = "conference"
SHORT_PUB = "short"
ARXIV_PUB = "arxiv"
DISSERTATION_PUB = "dissertation"
PATENT_PUB = "patent"
POSTER_PUB = "poster"
def writeOutPrefix(handle):
handle.write("""---
layout: single
title: "Publications"
permalink: /publications/
author_profile: true
---
Here are the publications to which I have contributed.
To see them organized by project, see [here](/research).
""")
FILE_PATH = "{{ site.url }}/{{ site.baseurl }}/{{ site.filesurl }}/publications"
def makeLink(url):
if 'http' in url:
return url
else:
return "{}/{}".format(FILE_PATH, url)
def pub2md(pub):
links = []
if 'paperBasename' in pub and pub['paperBasename']:
links.append('<a href="{}"><i class="fas fa-file-pdf"></i></a>'.format(
makeLink(pub['paperBasename'])
))
if 'slidesBasename' in pub and pub['slidesBasename']:
links.append('<a href="{}"><i class="fas fa-file-powerpoint"></i></a>'.format(
makeLink(pub['slidesBasename'])
))
if 'artifactURL' in pub and pub['artifactURL']:
links.append('<a href="{}"><i class="fas fa-file-code"></i></a>'.format(
makeLink(pub['artifactURL'])
))
if 'videoURL' in pub and pub['videoURL']:
links.append('<a href="{}"><i class="fas fa-video"></i></a>'.format(
makeLink(pub['videoURL'])
))
if 'blogURL' in pub and pub['blogURL']:
links.append('<a href="{}"><i class="fab fa-medium"></i></a>'.format(
makeLink(pub['blogURL'])
))
if 'bestPaperAward' in pub and pub['bestPaperAward']:
links.append('[Best Paper Award](){: .btn}')
if len(pub['authors']) == 1:
authList = pub['authors'][0]
elif len(pub['authors']) == 2:
authList = ' and '.join(pub['authors'])
else:
authList = ', '.join(pub['authors'][:-1])
authList += ", and " + pub['authors'][-1]
cite = "*{}*. \n {}. \n {} {}. ".format(
pub['title'],
authList,
pub['venue'], pub['year'],
)
return cite + "\n " + ' '.join(links)
def writePubs(handle, headingTitle, pubs):
handle.write('\n## {}\n\n'.format(headingTitle))
for i, pub in enumerate(pubs):
handle.write("{}. {}\n".format(i+1, pub2md(pub)))
with open('publications.json', 'r') as infile, open('../auto-publications.md', 'w') as outfile:
writeOutPrefix(outfile)
pubs = json.load(infile)['publications']
pubs = sorted(pubs, key=lambda p: p['year'], reverse=True)
confPubs = [ pub for pub in pubs if pub['type'] == CONFERENCE_PUB ]
journalPubs = [ pub for pub in pubs if pub['type'] == JOURNAL_PUB ]
shortPubs = [ pub for pub in pubs if pub['type'] == SHORT_PUB ]
arxivPubs = [ pub for pub in pubs if pub['type'] == ARXIV_PUB ]
patentPubs = [ pub for pub in pubs if pub['type'] == PATENT_PUB ]
posterPubs = [ pub for pub in pubs if pub['type'] == POSTER_PUB ]
dissertationPubs = [ pub for pub in pubs if pub['type'] == DISSERTATION_PUB ]
if confPubs:
print("Writing the {} conference pubs".format(len(confPubs)))
writePubs(outfile, "Peer-reviewed conference papers", confPubs)
if journalPubs:
print("Writing the {} journal pubs".format(len(journalPubs)))
writePubs(outfile, "Peer-reviewed journal papers", journalPubs)
if shortPubs:
print("Writing the {} short pubs".format(len(shortPubs)))
writePubs(outfile, "Peer-reviewed short papers", shortPubs)
if arxivPubs:
print("Writing the {} arxiv pubs".format(len(arxivPubs)))
writePubs(outfile, "arXiv papers", arxivPubs)
if patentPubs:
print("Writing the {} patents".format(len(patentPubs)))
writePubs(outfile, "US Patents", patentPubs)
if posterPubs:
print("Writing the {} posters".format(len(posterPubs)))
writePubs(outfile, "Posters", posterPubs)
if dissertationPubs:
print("Writing the {} dissertations".format(len(dissertationPubs)))
writePubs(outfile, "Dissertation", dissertationPubs)
outfile.write('\n') |
py | 1a33b586e623edc3efec00a64c11803c01ef6dd7 | from django.shortcuts import render
from .models import Book
def index(request):
return render(request, 'template.html')
def store(request):
count = Book.objects.all().count()
context = {
'count': count,
}
return render(request, 'store.html', context)
|
py | 1a33b62a50a011e28574dca0430d3014611487d3 | print('\033[1;31;4m-=' * 6, 'DESAFIO 061 - PROGRESSÃO ARITIMÉTICA V2', '=-' * 6)
print('\033[0;1;33m-' * 29)
print('| \033[0;30;45m Digite o primeiro termo \033[0;1;33m |')
print('|', '-' * 25, '|')
t = int(input('| R: '))
print()
print('-' * 20)
print('| \033[0;30;45m Digite a razão \033[0;1;33m |')
print('|', '-' * 16, '|')
r = int(input("| R: "))
print()
pa = 0
print('-' * 70)
print('| \033[0;30;42m', end=' ')
while pa != 10:
print(t, end=' -> ')
t += r
pa += 1
print('Acabou! \033[0;33;1m |')
print('-' * 70)
|
py | 1a33b7dc08298cb4b88e5b1b6f75720da39150a8 | """
Provides functionality to emulate keyboard presses on host machine.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/keyboard/
"""
import voluptuous as vol
from homeassistant.const import (
SERVICE_MEDIA_NEXT_TRACK, SERVICE_MEDIA_PLAY_PAUSE,
SERVICE_MEDIA_PREVIOUS_TRACK, SERVICE_VOLUME_DOWN, SERVICE_VOLUME_MUTE,
SERVICE_VOLUME_UP)
DOMAIN = "keyboard"
REQUIREMENTS = ['pyuserinput==0.1.9']
TAP_KEY_SCHEMA = vol.Schema({})
def volume_up(hass):
"""Press the keyboard button for volume up."""
hass.services.call(DOMAIN, SERVICE_VOLUME_UP)
def volume_down(hass):
"""Press the keyboard button for volume down."""
hass.services.call(DOMAIN, SERVICE_VOLUME_DOWN)
def volume_mute(hass):
"""Press the keyboard button for muting volume."""
hass.services.call(DOMAIN, SERVICE_VOLUME_MUTE)
def media_play_pause(hass):
"""Press the keyboard button for play/pause."""
hass.services.call(DOMAIN, SERVICE_MEDIA_PLAY_PAUSE)
def media_next_track(hass):
"""Press the keyboard button for next track."""
hass.services.call(DOMAIN, SERVICE_MEDIA_NEXT_TRACK)
def media_prev_track(hass):
"""Press the keyboard button for prev track."""
hass.services.call(DOMAIN, SERVICE_MEDIA_PREVIOUS_TRACK)
def setup(hass, config):
"""Listen for keyboard events."""
import pykeyboard
keyboard = pykeyboard.PyKeyboard()
keyboard.special_key_assignment()
hass.services.register(DOMAIN, SERVICE_VOLUME_UP,
lambda service:
keyboard.tap_key(keyboard.volume_up_key),
schema=TAP_KEY_SCHEMA)
hass.services.register(DOMAIN, SERVICE_VOLUME_DOWN,
lambda service:
keyboard.tap_key(keyboard.volume_down_key),
schema=TAP_KEY_SCHEMA)
hass.services.register(DOMAIN, SERVICE_VOLUME_MUTE,
lambda service:
keyboard.tap_key(keyboard.volume_mute_key),
schema=TAP_KEY_SCHEMA)
hass.services.register(DOMAIN, SERVICE_MEDIA_PLAY_PAUSE,
lambda service:
keyboard.tap_key(keyboard.media_play_pause_key),
schema=TAP_KEY_SCHEMA)
hass.services.register(DOMAIN, SERVICE_MEDIA_NEXT_TRACK,
lambda service:
keyboard.tap_key(keyboard.media_next_track_key),
schema=TAP_KEY_SCHEMA)
hass.services.register(DOMAIN, SERVICE_MEDIA_PREVIOUS_TRACK,
lambda service:
keyboard.tap_key(keyboard.media_prev_track_key),
schema=TAP_KEY_SCHEMA)
return True
|
py | 1a33b7e71b6b891589202bfe4df2279250171212 | """Unit tests of commenting managers."""
import pytest
from ..utilities.general import is_never_authz, is_no_authz, uses_cataloging, uses_filesystem_only
from dlkit.abstract_osid.osid import errors
from dlkit.abstract_osid.type.objects import TypeList as abc_type_list
from dlkit.primordium.id.primitives import Id
from dlkit.primordium.type.primitives import Type
from dlkit.runtime import PROXY_SESSION, proxy_example
from dlkit.runtime.managers import Runtime
REQUEST = proxy_example.SimpleRequest()
CONDITION = PROXY_SESSION.get_proxy_condition()
CONDITION.set_http_request(REQUEST)
PROXY = PROXY_SESSION.get_proxy(CONDITION)
DEFAULT_TYPE = Type(**{'identifier': 'DEFAULT', 'namespace': 'DEFAULT', 'authority': 'DEFAULT'})
@pytest.fixture(scope="class",
params=['TEST_SERVICE', 'TEST_SERVICE_ALWAYS_AUTHZ', 'TEST_SERVICE_NEVER_AUTHZ', 'TEST_SERVICE_CATALOGING', 'TEST_SERVICE_FILESYSTEM', 'TEST_SERVICE_MEMCACHE'])
def commenting_profile_class_fixture(request):
request.cls.service_config = request.param
request.cls.mgr = Runtime().get_service_manager(
'COMMENTING',
proxy=PROXY,
implementation=request.cls.service_config)
@pytest.fixture(scope="function")
def commenting_profile_test_fixture(request):
pass
@pytest.mark.usefixtures("commenting_profile_class_fixture", "commenting_profile_test_fixture")
class TestCommentingProfile(object):
"""Tests for CommentingProfile"""
def test_supports_comment_lookup(self):
"""Tests supports_comment_lookup"""
assert isinstance(self.mgr.supports_comment_lookup(), bool)
def test_supports_comment_query(self):
"""Tests supports_comment_query"""
assert isinstance(self.mgr.supports_comment_query(), bool)
def test_supports_comment_admin(self):
"""Tests supports_comment_admin"""
assert isinstance(self.mgr.supports_comment_admin(), bool)
def test_supports_comment_book(self):
"""Tests supports_comment_book"""
assert isinstance(self.mgr.supports_comment_book(), bool)
def test_supports_comment_book_assignment(self):
"""Tests supports_comment_book_assignment"""
assert isinstance(self.mgr.supports_comment_book_assignment(), bool)
def test_supports_book_lookup(self):
"""Tests supports_book_lookup"""
assert isinstance(self.mgr.supports_book_lookup(), bool)
def test_supports_book_admin(self):
"""Tests supports_book_admin"""
assert isinstance(self.mgr.supports_book_admin(), bool)
def test_supports_book_hierarchy(self):
"""Tests supports_book_hierarchy"""
assert isinstance(self.mgr.supports_book_hierarchy(), bool)
def test_supports_book_hierarchy_design(self):
"""Tests supports_book_hierarchy_design"""
assert isinstance(self.mgr.supports_book_hierarchy_design(), bool)
def test_get_comment_record_types(self):
"""Tests get_comment_record_types"""
assert isinstance(self.mgr.get_comment_record_types(), abc_type_list)
def test_get_comment_search_record_types(self):
"""Tests get_comment_search_record_types"""
assert isinstance(self.mgr.get_comment_search_record_types(), abc_type_list)
def test_get_book_record_types(self):
"""Tests get_book_record_types"""
assert isinstance(self.mgr.get_book_record_types(), abc_type_list)
def test_get_book_search_record_types(self):
"""Tests get_book_search_record_types"""
assert isinstance(self.mgr.get_book_search_record_types(), abc_type_list)
class NotificationReceiver(object):
# Implemented from resource.ResourceManager
pass
@pytest.fixture(scope="class",
params=['TEST_SERVICE', 'TEST_SERVICE_ALWAYS_AUTHZ', 'TEST_SERVICE_NEVER_AUTHZ', 'TEST_SERVICE_CATALOGING', 'TEST_SERVICE_FILESYSTEM', 'TEST_SERVICE_MEMCACHE'])
def commenting_manager_class_fixture(request):
# Implemented from resource.ResourceManager
request.cls.service_config = request.param
request.cls.svc_mgr = Runtime().get_service_manager(
'COMMENTING',
implementation=request.cls.service_config)
if not is_never_authz(request.cls.service_config):
create_form = request.cls.svc_mgr.get_book_form_for_create([])
create_form.display_name = 'Test Book'
create_form.description = 'Test Book for commenting manager tests'
catalog = request.cls.svc_mgr.create_book(create_form)
request.cls.catalog_id = catalog.get_id()
request.cls.receiver = NotificationReceiver()
else:
request.cls.catalog_id = Id('resource.Resource%3A000000000000000000000000%40DLKIT.MIT.EDU')
def class_tear_down():
if not is_never_authz(request.cls.service_config):
request.cls.svc_mgr.delete_book(request.cls.catalog_id)
request.addfinalizer(class_tear_down)
@pytest.fixture(scope="function")
def commenting_manager_test_fixture(request):
# Implemented from resource.ResourceManager
pass
@pytest.mark.usefixtures("commenting_manager_class_fixture", "commenting_manager_test_fixture")
class TestCommentingManager(object):
"""Tests for CommentingManager"""
def test_get_comment_lookup_session(self):
"""Tests get_comment_lookup_session"""
# From tests_templates/resource.py::ResourceManager::get_resource_lookup_session_template
if self.svc_mgr.supports_comment_lookup():
self.svc_mgr.get_comment_lookup_session()
def test_get_comment_lookup_session_for_book(self):
"""Tests get_comment_lookup_session_for_book"""
# From tests_templates/resource.py::ResourceManager::get_resource_lookup_session_for_bin_template
if self.svc_mgr.supports_comment_lookup():
self.svc_mgr.get_comment_lookup_session_for_book(self.catalog_id)
with pytest.raises(errors.NullArgument):
self.svc_mgr.get_comment_lookup_session_for_book()
def test_get_comment_query_session(self):
"""Tests get_comment_query_session"""
# From tests_templates/resource.py::ResourceManager::get_resource_lookup_session_template
if self.svc_mgr.supports_comment_query():
self.svc_mgr.get_comment_query_session()
def test_get_comment_query_session_for_book(self):
"""Tests get_comment_query_session_for_book"""
# From tests_templates/resource.py::ResourceManager::get_resource_lookup_session_for_bin_template
if self.svc_mgr.supports_comment_query():
self.svc_mgr.get_comment_query_session_for_book(self.catalog_id)
with pytest.raises(errors.NullArgument):
self.svc_mgr.get_comment_query_session_for_book()
def test_get_comment_admin_session(self):
"""Tests get_comment_admin_session"""
# From tests_templates/resource.py::ResourceManager::get_resource_admin_session_template
if self.svc_mgr.supports_comment_admin():
self.svc_mgr.get_comment_admin_session()
def test_get_comment_admin_session_for_book(self):
"""Tests get_comment_admin_session_for_book"""
# From tests_templates/resource.py::ResourceManager::get_resource_admin_session_for_bin_template
if self.svc_mgr.supports_comment_admin():
self.svc_mgr.get_comment_admin_session_for_book(self.catalog_id)
with pytest.raises(errors.NullArgument):
self.svc_mgr.get_comment_admin_session_for_book()
def test_get_comment_book_session(self):
"""Tests get_comment_book_session"""
# From tests_templates/resource.py::ResourceManager::get_resource_admin_session_template
if self.svc_mgr.supports_comment_book():
self.svc_mgr.get_comment_book_session()
def test_get_comment_book_assignment_session(self):
"""Tests get_comment_book_assignment_session"""
# From tests_templates/resource.py::ResourceManager::get_resource_admin_session_template
if self.svc_mgr.supports_comment_book_assignment():
self.svc_mgr.get_comment_book_assignment_session()
def test_get_book_lookup_session(self):
"""Tests get_book_lookup_session"""
# From tests_templates/resource.py::ResourceManager::get_resource_admin_session_template
if self.svc_mgr.supports_book_lookup():
self.svc_mgr.get_book_lookup_session()
def test_get_book_admin_session(self):
"""Tests get_book_admin_session"""
# From tests_templates/resource.py::ResourceManager::get_resource_admin_session_template
if self.svc_mgr.supports_book_admin():
self.svc_mgr.get_book_admin_session()
def test_get_book_hierarchy_session(self):
"""Tests get_book_hierarchy_session"""
# From tests_templates/resource.py::ResourceManager::get_resource_admin_session_template
if self.svc_mgr.supports_book_hierarchy():
self.svc_mgr.get_book_hierarchy_session()
def test_get_book_hierarchy_design_session(self):
"""Tests get_book_hierarchy_design_session"""
# From tests_templates/resource.py::ResourceManager::get_resource_admin_session_template
if self.svc_mgr.supports_book_hierarchy_design():
self.svc_mgr.get_book_hierarchy_design_session()
def test_get_commenting_batch_manager(self):
"""Tests get_commenting_batch_manager"""
# From tests_templates/resource.py::ResourceManager::get_resource_batch_manager_template
if self.svc_mgr.supports_commenting_batch():
self.svc_mgr.get_commenting_batch_manager()
class NotificationReceiver(object):
# Implemented from resource.ResourceProxyManager
pass
@pytest.fixture(scope="class",
params=['TEST_SERVICE', 'TEST_SERVICE_ALWAYS_AUTHZ', 'TEST_SERVICE_NEVER_AUTHZ', 'TEST_SERVICE_CATALOGING', 'TEST_SERVICE_FILESYSTEM', 'TEST_SERVICE_MEMCACHE'])
def commenting_proxy_manager_class_fixture(request):
# Implemented from resource.ResourceProxyManager
request.cls.service_config = request.param
request.cls.svc_mgr = Runtime().get_service_manager(
'COMMENTING',
proxy=PROXY,
implementation=request.cls.service_config)
if not is_never_authz(request.cls.service_config):
create_form = request.cls.svc_mgr.get_book_form_for_create([])
create_form.display_name = 'Test Book'
create_form.description = 'Test Book for commenting proxy manager tests'
catalog = request.cls.svc_mgr.create_book(create_form)
request.cls.catalog_id = catalog.get_id()
else:
request.cls.catalog_id = Id('resource.Resource%3A000000000000000000000000%40DLKIT.MIT.EDU')
request.cls.receiver = NotificationReceiver()
def class_tear_down():
if not is_never_authz(request.cls.service_config):
request.cls.svc_mgr.delete_book(request.cls.catalog_id)
request.addfinalizer(class_tear_down)
@pytest.fixture(scope="function")
def commenting_proxy_manager_test_fixture(request):
# Implemented from resource.ResourceProxyManager
pass
@pytest.mark.usefixtures("commenting_proxy_manager_class_fixture", "commenting_proxy_manager_test_fixture")
class TestCommentingProxyManager(object):
"""Tests for CommentingProxyManager"""
def test_get_comment_lookup_session(self):
"""Tests get_comment_lookup_session"""
# From tests_templates/resource.py::ResourceProxyManager::get_resource_lookup_session_template
if self.svc_mgr.supports_comment_lookup():
self.svc_mgr.get_comment_lookup_session(PROXY)
with pytest.raises(errors.NullArgument):
self.svc_mgr.get_comment_lookup_session()
def test_get_comment_lookup_session_for_book(self):
"""Tests get_comment_lookup_session_for_book"""
# From tests_templates/resource.py::ResourceProxyManager::get_resource_lookup_session_for_bin_template
if self.svc_mgr.supports_comment_lookup():
self.svc_mgr.get_comment_lookup_session_for_book(self.catalog_id, PROXY)
with pytest.raises(errors.NullArgument):
self.svc_mgr.get_comment_lookup_session_for_book()
def test_get_comment_query_session(self):
"""Tests get_comment_query_session"""
# From tests_templates/resource.py::ResourceProxyManager::get_resource_lookup_session_template
if self.svc_mgr.supports_comment_query():
self.svc_mgr.get_comment_query_session(PROXY)
with pytest.raises(errors.NullArgument):
self.svc_mgr.get_comment_query_session()
def test_get_comment_query_session_for_book(self):
"""Tests get_comment_query_session_for_book"""
# From tests_templates/resource.py::ResourceProxyManager::get_resource_lookup_session_for_bin_template
if self.svc_mgr.supports_comment_query():
self.svc_mgr.get_comment_query_session_for_book(self.catalog_id, PROXY)
with pytest.raises(errors.NullArgument):
self.svc_mgr.get_comment_query_session_for_book()
def test_get_comment_admin_session(self):
"""Tests get_comment_admin_session"""
# From tests_templates/resource.py::ResourceProxyManager::get_resource_admin_session_template
if self.svc_mgr.supports_comment_admin():
self.svc_mgr.get_comment_admin_session(PROXY)
with pytest.raises(errors.NullArgument):
self.svc_mgr.get_comment_admin_session()
def test_get_comment_admin_session_for_book(self):
"""Tests get_comment_admin_session_for_book"""
# From tests_templates/resource.py::ResourceProxyManager::get_resource_admin_session_for_bin_template
if self.svc_mgr.supports_comment_admin():
self.svc_mgr.get_comment_admin_session_for_book(self.catalog_id, PROXY)
with pytest.raises(errors.NullArgument):
self.svc_mgr.get_comment_admin_session_for_book()
def test_get_comment_book_session(self):
"""Tests get_comment_book_session"""
# From tests_templates/resource.py::ResourceProxyManager::get_resource_admin_session_template
if self.svc_mgr.supports_comment_book():
self.svc_mgr.get_comment_book_session(PROXY)
with pytest.raises(errors.NullArgument):
self.svc_mgr.get_comment_book_session()
def test_get_comment_book_assignment_session(self):
"""Tests get_comment_book_assignment_session"""
# From tests_templates/resource.py::ResourceProxyManager::get_resource_admin_session_template
if self.svc_mgr.supports_comment_book_assignment():
self.svc_mgr.get_comment_book_assignment_session(PROXY)
with pytest.raises(errors.NullArgument):
self.svc_mgr.get_comment_book_assignment_session()
def test_get_book_lookup_session(self):
"""Tests get_book_lookup_session"""
# From tests_templates/resource.py::ResourceProxyManager::get_resource_admin_session_template
if self.svc_mgr.supports_book_lookup():
self.svc_mgr.get_book_lookup_session(PROXY)
with pytest.raises(errors.NullArgument):
self.svc_mgr.get_book_lookup_session()
def test_get_book_admin_session(self):
"""Tests get_book_admin_session"""
# From tests_templates/resource.py::ResourceProxyManager::get_resource_admin_session_template
if self.svc_mgr.supports_book_admin():
self.svc_mgr.get_book_admin_session(PROXY)
with pytest.raises(errors.NullArgument):
self.svc_mgr.get_book_admin_session()
def test_get_book_hierarchy_session(self):
"""Tests get_book_hierarchy_session"""
# From tests_templates/resource.py::ResourceProxyManager::get_resource_admin_session_template
if self.svc_mgr.supports_book_hierarchy():
self.svc_mgr.get_book_hierarchy_session(PROXY)
with pytest.raises(errors.NullArgument):
self.svc_mgr.get_book_hierarchy_session()
def test_get_book_hierarchy_design_session(self):
"""Tests get_book_hierarchy_design_session"""
# From tests_templates/resource.py::ResourceProxyManager::get_resource_admin_session_template
if self.svc_mgr.supports_book_hierarchy_design():
self.svc_mgr.get_book_hierarchy_design_session(PROXY)
with pytest.raises(errors.NullArgument):
self.svc_mgr.get_book_hierarchy_design_session()
def test_get_commenting_batch_proxy_manager(self):
"""Tests get_commenting_batch_proxy_manager"""
# From tests_templates/resource.py::ResourceProxyManager::get_resource_batch_proxy_manager_template
if self.svc_mgr.supports_commenting_batch():
self.svc_mgr.get_commenting_batch_proxy_manager()
|
py | 1a33b888231ce9176219e6582432d543065f9afe | import random
from datetime import datetime
import csv
import os
import sys
import struct
import argparse
import datetime
import paho.mqtt.client as mqtt
times = []
acc1 = []
acc2 = []
acc3 = []
ane1 = []
ane2 = []
ane3 = []
node = ""
# Buat fungsi umpan balik ketika koneksi ke mqtt berhasil dilakukan.
def on_connect(client, userdata, flags, rc):
print("Connected with result code "+str(rc))
# subscribe ke channel/topik saat on_connect()
client.subscribe("BAMS")
# Buat fungsi umpan balik ketika PUBLISH MESSAGE diterima dari mqtt server.
def on_message(client, userdata, msg):
n = 8 # pisah setiap 8 karakter
node = msg.payload[0:3].decode('ascii')
timestamp = datetime.datetime.now() - datetime.timedelta(seconds=3)
timestamp = timestamp.strftime("%H:%M:%S")
max_length=msg.payload[11:]
sensor = [struct.unpack('!f', bytes.fromhex(msg.payload[i:i+n].decode('ascii')))[0]
for i in range(11, len(msg.payload[11:]) + n, n)]
if node == "sb1":
array = [{"node" : node, "acc1" : sensor[80], "acc2" : sensor[180], "acc3" : 0, "ane1" : sensor[-3], "ane2" : sensor[-2], "ane3" : sensor[-1], "timestamp" : timestamp},]
elif node == "sb2":
array = [{"node" : node, "acc1" : sensor[80], "acc2" : sensor[180], "acc3" : sensor[280], "ane1" : sensor[-3], "ane2" : sensor[-2], "ane3" : sensor[-1], "timestamp" : timestamp},]
else:
array = [{"node" : node, "acc1" : sensor[80], "acc2" : sensor[180], "acc3" : 0, "ane1" : 0, "ane2" : 0, "ane3" : 0, "timestamp" : timestamp},]
if len(times) < 5:
if node == "sb1":
acc1.append(sensor[80])
acc2.append(sensor[180])
acc3.append(0)
ane1.append(sensor[-3])
ane2.append(sensor[-2])
ane3.append(sensor[-1])
times.append(timestamp)
elif node == "sb2":
acc1.append(sensor[80])
acc2.append(sensor[180])
acc3.append(sensor[280])
ane1.append(sensor[-3])
ane2.append(sensor[-2])
ane3.append(sensor[-1])
times.append(timestamp)
else:
acc1.append(sensor[80])
acc2.append(sensor[180])
acc3.append(0)
ane1.append(0)
ane2.append(0)
ane3.append(0)
times.append(timestamp)
else:
if node == "sb1":
acc1[:-1] = acc1[1:]
acc1[-1] = sensor[80]
acc2[:-1] = acc2[1:]
acc2[-1] = sensor[180]
acc3[:-1] = acc3[1:]
acc3[-1] = 0
ane1[:-1] = ane1[1:]
ane1[-1] = sensor[-3]
ane2[:-1] = ane2[1:]
ane2[-1] = sensor[-2]
ane3[:-1] = ane3[1:]
ane3[-1] = sensor[-1]
times[:-1] = times[1:]
times[-1] = timestamp
elif node == "sb2":
acc1[:-1] = acc1[1:]
acc1[-1] = sensor[80]
acc2[:-1] = acc2[1:]
acc2[-1] = sensor[180]
acc3[:-1] = acc3[1:]
acc3[-1] = sensor[280]
ane1[:-1] = ane1[1:]
ane1[-1] = sensor[-3]
ane2[:-1] = ane2[1:]
ane2[-1] = sensor[-2]
ane3[:-1] = ane3[1:]
ane3[-1] = sensor[-1]
times[:-1] = times[1:]
times[-1] = timestamp
else:
acc1[:-1] = acc1[1:]
acc1[-1] = sensor[80]
acc2[:-1] = acc2[1:]
acc2[-1] = sensor[180]
acc3[:-1] = acc3[1:]
acc3[-1] = 0
ane1[:-1] = ane1[1:]
ane1[-1] = 0
ane2[:-1] = ane2[1:]
ane2[-1] = 0
ane3[:-1] = ane3[1:]
ane3[-1] = 0
times[:-1] = times[1:]
times[-1] = timestamp
# print(sensor)
print('\n',node, timestamp, len(sensor), len(max_length), len(max_length)/8)
# print(sensor)
print(acc1)
print(acc2)
print(acc3)
print(ane1)
print(ane2)
print(ane3)
print(times)
"""Menulis File CSV pada python"""
# array = [{"node" : node, "acc1" : sensor[80], "acc2" : sensor[180], "acc3" : sensor[280], "ane1" : sensor[-2], "ane2" : sensor[-3], "ane3" : sensor[-1], "timestamp" : timestamp},]
with open("csvfile/file.csv", "w") as csvfile:
fields = ["node", "acc1", "acc2", "acc3", "ane1", "ane2", "ane3", "timestamp"]
writer = csv.DictWriter(csvfile, fieldnames = fields)
writer.writeheader()
writer.writerows(array)
client = mqtt.Client()
client.on_connect = on_connect
client.on_message = on_message
print("Melakukan koneksi ke Broker...")
client.connect("103.224.137.180", 9621)
client.username_pw_set('bams', 'bams.pwd')
client.loop_forever()
|
py | 1a33b99f731dde111109742eb27c2cca14b6d54b | import argparse
import asyncio
import datetime
import hashlib
import html
import logging
import re
import ssl
import tempfile
from argparse import Namespace
from base64 import b32encode
from collections import defaultdict
from time import time
from typing import Any
from typing import Dict
import irc.client
import irc.client_aio
import irc.connection
from jaraco.stream import buffer
from python_socks.async_.asyncio import Proxy
from heisenbridge import __version__
from heisenbridge.channel_room import ChannelRoom
from heisenbridge.command_parse import CommandManager
from heisenbridge.command_parse import CommandParser
from heisenbridge.command_parse import CommandParserError
from heisenbridge.irc import HeisenReactor
from heisenbridge.parser import IRCMatrixParser
from heisenbridge.plumbed_room import PlumbedRoom
from heisenbridge.private_room import parse_irc_formatting
from heisenbridge.private_room import PrivateRoom
from heisenbridge.private_room import unix_to_local
from heisenbridge.room import Room
def connected(f):
def wrapper(*args, **kwargs):
self = args[0]
if not self.conn or not self.conn.connected:
self.send_notice("Need to be connected to use this command.")
return asyncio.sleep(0)
return f(*args, **kwargs)
return wrapper
# forwards events to private and channel rooms
def ircroom_event(target_arg=None):
def outer(f):
def wrapper(self, conn, event):
if target_arg is not None:
# if we have target arg use that
target = event.arguments[target_arg].lower()
else:
# switch target around if it's targeted towards us directly
target = event.target.lower() if event.target != conn.real_nickname else event.source.nick.lower()
if target in self.rooms:
room = self.rooms[target]
try:
room_f = getattr(room, "on_" + event.type)
try:
return room_f(conn, event)
except Exception:
logging.exception(f"Calling on_{event.type} failed for {target}")
except AttributeError:
logging.warning(f"Expected {room} to have on_{event.type} but didn't")
return f(self, conn, event)
return wrapper
return outer
class NetworkRoom(Room):
# configuration stuff
name: str
connected: bool
nick: str
username: str
ircname: str
password: str
sasl_username: str
sasl_password: str
autocmd: str
pills_length: int
pills_ignore: list
autoquery: bool
tls_cert: str
rejoin_invite: bool
rejoin_kick: bool
# state
commands: CommandManager
conn: Any
rooms: Dict[str, Room]
connecting: bool
real_host: str
def init(self):
self.name = None
self.connected = False
self.nick = None
self.username = None
self.ircname = None
self.password = None
self.sasl_username = None
self.sasl_password = None
self.autocmd = None
self.pills_length = 2
self.pills_ignore = []
self.autoquery = True
self.allow_ctcp = False
self.tls_cert = None
self.rejoin_invite = True
self.rejoin_kick = False
self.commands = CommandManager()
self.conn = None
self.rooms = {}
self.connlock = asyncio.Lock()
self.disconnect = True
self.real_host = "?" * 63 # worst case default
self.keys = {} # temp dict of join channel keys
self.keepnick_task = None # async task
self.whois_data = defaultdict(dict) # buffer for keeping partial whois replies
cmd = CommandParser(
prog="NICK",
description="set/change nickname",
epilog=(
"You can always see your current nickname on the network without arguments.\n"
"If connected new nickname will be sent to the server immediately. It may be rejected and an underscore appended"
" to it automatically.\n"
),
)
cmd.add_argument("nick", nargs="?", help="new nickname")
self.commands.register(cmd, self.cmd_nick)
cmd = CommandParser(
prog="USERNAME",
description="set username",
epilog=(
"Setting a new username requires reconnecting to the network.\n"
"\n"
"Note: If identd is enabled this will be ignored and Matrix ID hash or admin set custom ident is used."
),
)
cmd.add_argument("username", nargs="?", help="new username")
cmd.add_argument("--remove", action="store_true", help="remove stored username")
self.commands.register(cmd, self.cmd_username)
cmd = CommandParser(
prog="IRCNAME",
description="set ircname (realname)",
epilog=("Setting a new ircname requires reconnecting to the network.\n"),
)
cmd.add_argument("ircname", nargs="?", help="new ircname")
cmd.add_argument("--remove", action="store_true", help="remove stored ircname")
self.commands.register(cmd, self.cmd_ircname)
cmd = CommandParser(
prog="PASSWORD",
description="set server password",
epilog=(
"You can store your network password using this command and it will be automatically offered on connect.\n"
"Some networks allow using this to identify with NickServ on connect without sending a separate message.\n"
"\n"
"Note: Bridge administrators can trivially see the stored password if they want to.\n"
),
)
cmd.add_argument("password", nargs="?", help="new password")
cmd.add_argument("--remove", action="store_true", help="remove stored password")
self.commands.register(cmd, self.cmd_password)
cmd = CommandParser(
prog="SASL",
description="set SASL PLAIN credentials",
epilog=(
"If the network supports SASL authentication you can configure them with this command.\n"
"\n"
"Note: Bridge administrators can trivially see the stored password if they want to.\n"
),
)
cmd.add_argument("--username", help="SASL username")
cmd.add_argument("--password", help="SASL password")
cmd.add_argument("--remove", action="store_true", help="remove stored credentials")
self.commands.register(cmd, self.cmd_sasl)
cmd = CommandParser(
prog="CERTFP",
description="configure CertFP authentication for this network",
epilog=(
"Using the set command requires you to paste a bundled PEM certificate (cert + key) on the next line"
" after the command within the same message. The certificate needs to include both the certificate and"
" the private key for it to be accepted.\n"
"\n"
"OpenSSL generation example (from Libera.Chat guides):\n"
"$ openssl req -x509 -new -newkey rsa:4096 -sha256 -days 1096 -nodes -out libera.pem -keyout libera.pem"
),
)
cmd.add_argument("--set", action="store_true", help="set X509 certificate bundle (PEM)")
cmd.add_argument("--remove", action="store_true", help="remove stored certificate")
self.commands.register(cmd, self.cmd_certfp)
cmd = CommandParser(
prog="AUTOCMD",
description="run commands on connect",
epilog=(
"If the network you are connecting to does not support server password to identify you automatically"
" can set this to send a command before joining channels.\n"
"\n"
'Example (QuakeNet): AUTOCMD "UMODE +x; MSG [email protected] auth foo bar"\n'
"Example (OFTC): AUTOCMD NICKSERV identify foo bar\n"
),
)
cmd.add_argument("command", nargs="*", help="commands separated with ';'")
cmd.add_argument("--remove", action="store_true", help="remove stored command")
self.commands.register(cmd, self.cmd_autocmd)
cmd = CommandParser(
prog="CONNECT",
description="connect to network",
epilog=(
"When this command is invoked the connection to this network will be persisted across disconnects and"
" bridge restart.\n"
"Only if the server KILLs your connection it will stay disconnected until CONNECT is invoked again.\n"
"\n"
"If you want to cancel automatic reconnect you need to issue the DISCONNECT command.\n"
),
)
self.commands.register(cmd, self.cmd_connect)
cmd = CommandParser(
prog="DISCONNECT",
description="disconnect from network",
epilog=(
"In addition to disconnecting from an active network connection this will also cancel any automatic"
"reconnection attempt.\n"
),
)
self.commands.register(cmd, self.cmd_disconnect)
cmd = CommandParser(prog="RECONNECT", description="reconnect to network")
self.commands.register(cmd, self.cmd_reconnect)
cmd = CommandParser(
prog="RAW",
description="send raw IRC commands",
epilog=(
"Arguments (text) are not quoted in any way so it's possible to send ANY command to the server.\n"
"This is meant as a last resort if the bridge does not have built-in support for some IRC command.\n"
"\n"
"Note: You may need to use colon (:) for multi-word arguments, see the IRC RFC for details.\n"
),
)
cmd.add_argument("text", nargs="+", help="raw text")
self.commands.register(cmd, self.cmd_raw)
cmd = CommandParser(
prog="QUERY",
description="start a private chat",
epilog=(
"Creates a new DM with the target nick. They do not need to be connected for this command to work.\n"
),
)
cmd.add_argument("nick", help="target nickname")
cmd.add_argument("message", nargs="*", help="optional message")
self.commands.register(cmd, self.cmd_query)
cmd = CommandParser(
prog="MSG",
description="send a message without opening a DM",
epilog=(
"If the target nick does not exist on the network an error reply may be generated by the server.\n"
),
)
cmd.add_argument("nick", help="target nickname")
cmd.add_argument("message", nargs="+", help="message")
self.commands.register(cmd, self.cmd_msg)
cmd = CommandParser(
prog="CTCP",
description="send a CTCP command",
epilog="You probably know what you are doing.",
)
cmd.add_argument("nick", help="target nickname")
cmd.add_argument("command", nargs="+", help="command and arguments")
self.commands.register(cmd, self.cmd_ctcp)
cmd = CommandParser(
prog="CTCPCFG",
description="enable/disable automatic CTCP replies",
)
cmd.add_argument("--enable", dest="enabled", action="store_true", help="Enable CTCP replies")
cmd.add_argument("--disable", dest="enabled", action="store_false", help="Disable CTCP replies")
cmd.set_defaults(enabled=None)
self.commands.register(cmd, self.cmd_ctcpcfg)
cmd = CommandParser(
prog="NICKSERV",
description="send a message to NickServ (if supported by network)",
epilog="Alias: NS",
)
cmd.add_argument("message", nargs="+", help="message")
self.commands.register(cmd, self.cmd_nickserv, ["NS"])
cmd = CommandParser(
prog="CHANSERV",
description="send a message to ChanServ (if supported by network)",
epilog="Alias: CS",
)
cmd.add_argument("message", nargs="+", help="message")
self.commands.register(cmd, self.cmd_chanserv, ["CS"])
cmd = CommandParser(
prog="JOIN",
description="join a channel",
epilog=(
"Any channels joined will be persisted between reconnects.\n"
"\n"
"Note: Bridge administrators can trivially see the stored channel key if they want to.\n"
),
)
cmd.add_argument("channel", help="target channel")
cmd.add_argument("key", nargs="?", help="channel key")
self.commands.register(cmd, self.cmd_join)
cmd = CommandParser(
prog="PLUMB",
description="plumb a room",
epilog=(
"Plumbs a channel in single-puppeted mode. This will make the bridge join the room and then join the"
" configured IRC channel.\n"
),
)
cmd.add_argument("room", help="target Matrix room ID (eg. !uniqueid:your-homeserver)")
cmd.add_argument("channel", help="target channel")
cmd.add_argument("key", nargs="?", help="channel key")
self.commands.register(cmd, self.cmd_plumb)
cmd = CommandParser(prog="UMODE", description="set user modes")
cmd.add_argument("flags", help="user mode flags")
self.commands.register(cmd, self.cmd_umode)
cmd = CommandParser(
prog="WAIT",
description="wait specified amount of time",
epilog=("Use with AUTOCMD to add delays between commands."),
)
cmd.add_argument("seconds", help="how many seconds to wait")
self.commands.register(cmd, self.cmd_wait)
cmd = CommandParser(
prog="PILLS",
description="configure automatic pills",
)
cmd.add_argument(
"--length", help="minimum length of nick to generate a pill, setting to 0 disables this feature", type=int
)
cmd.add_argument("--ignore", help="comma separated list of nicks to ignore for pills")
self.commands.register(cmd, self.cmd_pills)
cmd = CommandParser(
prog="AUTOQUERY",
description="enable or disable automatic room creation when getting a message",
)
cmd.add_argument("--enable", dest="enabled", action="store_true", help="Enable autoquery")
cmd.add_argument("--disable", dest="enabled", action="store_false", help="Disable autoquery")
cmd.set_defaults(enabled=None)
self.commands.register(cmd, self.cmd_autoquery)
cmd = CommandParser(prog="WHOIS", description="send a WHOIS(IS) command")
cmd.add_argument("nick", help="target nick")
self.commands.register(cmd, self.cmd_whois)
cmd = CommandParser(
prog="ROOM",
description="run a room command from network room",
epilog=(
"Try 'ROOM #foo' to get the list of commands for a room.",
"If a command generates IRC replies in a bouncer room they will appear in the room itself.",
),
)
cmd.add_argument("target", help="IRC channel or nick that has a room")
cmd.add_argument("command", help="Command and arguments", nargs=argparse.REMAINDER)
self.commands.register(cmd, self.cmd_room)
cmd = CommandParser(prog="REJOIN", description="configure rejoin behavior for channel rooms")
cmd.add_argument("--enable-invite", dest="invite", action="store_true", help="Enable rejoin on invite")
cmd.add_argument("--disable-invite", dest="invite", action="store_false", help="Disable rejoin on invite")
cmd.add_argument("--enable-kick", dest="kick", action="store_true", help="Enable rejoin on kick")
cmd.add_argument("--disable-kick", dest="kick", action="store_false", help="Disable rejoin on kick")
cmd.set_defaults(invite=None, kick=None)
self.commands.register(cmd, self.cmd_rejoin)
self.mx_register("m.room.message", self.on_mx_message)
@staticmethod
async def create(serv, name, user_id):
room_id = await serv.create_room(name, "Network room for {}".format(name), [user_id])
room = NetworkRoom(room_id, user_id, serv, [serv.user_id, user_id])
room.from_config({"name": name})
await room.save()
serv.register_room(room)
await room.show_help()
return room
def from_config(self, config: dict):
if "name" in config:
self.name = config["name"]
else:
raise Exception("No name key in config for NetworkRoom")
if "connected" in config:
self.connected = config["connected"]
if "nick" in config:
self.nick = config["nick"]
if "username" in config:
self.username = config["username"]
if "ircname" in config:
self.ircname = config["ircname"]
if "password" in config:
self.password = config["password"]
if "sasl_username" in config:
self.sasl_username = config["sasl_username"]
if "sasl_password" in config:
self.sasl_password = config["sasl_password"]
if "autocmd" in config:
self.autocmd = config["autocmd"]
if "pills_length" in config:
self.pills_length = config["pills_length"]
if "pills_ignore" in config:
self.pills_ignore = config["pills_ignore"]
if "autoquery" in config:
self.autoquery = config["autoquery"]
if "allow_ctcp" in config:
self.allow_ctcp = config["allow_ctcp"]
if "tls_cert" in config:
self.tls_cert = config["tls_cert"]
if "rejoin_invite" in config:
self.rejoin_invite = config["rejoin_invite"]
if "rejoin_kick" in config:
self.rejoin_kick = config["rejoin_kick"]
def to_config(self) -> dict:
return {
"name": self.name,
"connected": self.connected,
"nick": self.nick,
"username": self.username,
"ircname": self.ircname,
"password": self.password,
"sasl_username": self.sasl_username,
"sasl_password": self.sasl_password,
"autocmd": self.autocmd,
"allow_ctcp": self.allow_ctcp,
"tls_cert": self.tls_cert,
"pills_length": self.pills_length,
"pills_ignore": self.pills_ignore,
"rejoin_invite": self.rejoin_invite,
"rejoin_kick": self.rejoin_kick,
}
def is_valid(self) -> bool:
if self.name is None:
return False
# if user leaves network room and it's not connected we can clean it up
if not self.in_room(self.user_id) and not self.connected:
return False
return True
async def show_help(self):
self.send_notice_html(f"Welcome to the network room for <b>{html.escape(self.name)}</b>!")
try:
return await self.commands.trigger("HELP")
except CommandParserError as e:
return self.send_notice(str(e))
async def on_mx_message(self, event) -> None:
if event["content"]["msgtype"] != "m.text" or event["sender"] == self.serv.user_id:
return
# ignore edits
if "m.new_content" in event["content"]:
return
try:
if "formatted_body" in event["content"]:
lines = str(IRCMatrixParser.parse(event["content"]["formatted_body"])).split("\n")
else:
lines = event["content"]["body"].split("\n")
command = lines.pop(0)
tail = "\n".join(lines) if len(lines) > 0 else None
await self.commands.trigger(command, tail)
except CommandParserError as e:
self.send_notice(str(e))
async def cmd_connect(self, args) -> None:
await self.connect()
async def cmd_disconnect(self, args) -> None:
if not self.disconnect:
self.send_notice("Aborting connection attempt after backoff.")
self.disconnect = True
if self.connected:
self.connected = False
await self.save()
if self.conn:
self.send_notice("Disconnecting...")
self.conn.disconnect()
@connected
async def cmd_reconnect(self, args) -> None:
self.send_notice("Reconnecting...")
self.conn.disconnect()
await self.connect()
@connected
async def cmd_raw(self, args) -> None:
self.conn.send_raw(" ".join(args.text))
@connected
async def cmd_query(self, args) -> None:
# TODO: validate nick doesn't look like a channel
target = args.nick.lower()
message = " ".join(args.message)
if target in self.rooms:
room = self.rooms[target]
await self.serv.api.post_room_invite(room.id, self.user_id)
self.send_notice("Inviting back to private chat with {}.".format(args.nick))
else:
room = PrivateRoom.create(self, args.nick)
self.rooms[room.name] = room
self.send_notice("You have been invited to private chat with {}.".format(args.nick))
if len(message) > 0:
self.conn.privmsg(target, message)
self.send_notice(f"Sent out-of-room message to {target}: {message}")
@connected
async def cmd_msg(self, args) -> None:
message = " ".join(args.message)
self.conn.privmsg(args.nick, message)
self.send_notice(f"{self.conn.real_nickname} -> {args.nick}: {message}")
@connected
async def cmd_ctcp(self, args) -> None:
command = args.command[0].upper()
command_args = " ".join(args.command[1:])
self.conn.ctcp(command, args.nick, command_args)
self.send_notice_html(
f"{self.conn.real_nickname} -> <b>{args.nick}</b> CTCP <b>{html.escape(command)}</b> {html.escape(command_args)}"
)
async def cmd_ctcpcfg(self, args) -> None:
if args.enabled is not None:
self.allow_ctcp = args.enabled
await self.save()
self.send_notice(f"CTCP replies are {'enabled' if self.allow_ctcp else 'disabled'}")
@connected
async def cmd_nickserv(self, args) -> None:
message = " ".join(args.message)
self.send_notice(f"{self.conn.real_nickname} -> NickServ: {message}")
self.conn.send_raw("NICKSERV " + message)
@connected
async def cmd_chanserv(self, args) -> None:
message = " ".join(args.message)
self.send_notice(f"{self.conn.real_nickname} -> ChanServ: {message}")
self.conn.send_raw("CHANSERV " + message)
@connected
async def cmd_join(self, args) -> None:
channel = args.channel
if re.match(r"^[A-Za-z0-9]", channel):
channel = "#" + channel
# cache key so we can store later if join succeeds
self.keys[channel.lower()] = args.key
self.conn.join(channel, args.key)
@connected
async def cmd_plumb(self, args) -> None:
channel = args.channel
if re.match(r"^[A-Za-z0-9]", channel):
channel = "#" + channel
if not self.serv.is_admin(self.user_id):
self.send_notice("Plumbing is currently reserved for admins only.")
return
room = await PlumbedRoom.create(id=args.room, network=self, channel=channel, key=args.key)
self.conn.join(room.name, room.key)
@connected
async def cmd_umode(self, args) -> None:
self.conn.mode(self.conn.real_nickname, args.flags)
async def cmd_wait(self, args) -> None:
try:
seconds = float(args.seconds)
if seconds > 0 and seconds < 30:
await asyncio.sleep(seconds)
else:
self.send_notice(f"Unreasonable wait time: {args.seconds}")
except ValueError:
self.send_notice(f"Invalid wait time: {args.seconds}")
def get_nick(self):
if self.nick:
return self.nick
return self.user_id.split(":")[0][1:]
async def cmd_nick(self, args) -> None:
if args.nick is None:
nick = self.get_nick()
if self.conn and self.conn.connected:
self.send_notice(f"Current nickname: {self.conn.real_nickname} (configured: {nick})")
else:
self.send_notice(f"Configured nickname: {nick}")
return
self.nick = args.nick
await self.save()
self.send_notice("Nickname set to {}".format(self.nick))
if self.conn and self.conn.connected:
if self.keepnick_task:
self.keepnick_task.cancel()
self.keepnick_task = None
self.conn.nick(args.nick)
def get_ident(self):
idents = self.serv.config["idents"]
# use admin set override if exists
if self.user_id in idents:
return idents[self.user_id][:8]
# return mxid digest if no custom ident
return (
"m-"
+ b32encode(hashlib.sha1(self.user_id.encode("utf-8")).digest())
.decode("utf-8")
.replace("=", "")[:6]
.lower()
)
async def cmd_username(self, args) -> None:
if args.remove:
self.username = None
await self.save()
self.send_notice("Username removed.")
return
if args.username is None:
self.send_notice(f"Configured username: {str(self.username)}")
return
self.username = args.username
await self.save()
self.send_notice(f"Username set to {self.username}")
async def cmd_ircname(self, args) -> None:
if args.remove:
self.ircname = None
await self.save()
self.send_notice("Ircname removed.")
return
if args.ircname is None:
self.send_notice(f"Configured ircname: {str(self.ircname)}")
return
self.ircname = args.ircname
await self.save()
self.send_notice(f"Ircname set to {self.ircname}")
async def cmd_password(self, args) -> None:
if args.remove:
self.password = None
await self.save()
self.send_notice("Password removed.")
return
if args.password is None:
self.send_notice(f"Configured password: {self.password if self.password else ''}")
return
self.password = args.password
await self.save()
self.send_notice(f"Password set to {self.password}")
async def cmd_sasl(self, args) -> None:
if args.remove:
self.sasl_username = None
self.sasl_password = None
await self.save()
self.send_notice("SASL credentials removed.")
return
if args.username is None and args.password is None:
self.send_notice(f"SASL username: {self.sasl_username}")
self.send_notice(f"SASL password: {self.sasl_password}")
return
if args.username:
self.sasl_username = args.username
if args.password:
self.sasl_password = args.password
await self.save()
self.send_notice("SASL credentials updated.")
async def cmd_certfp(self, args) -> None:
if args.remove:
self.tls_cert = None
await self.save()
self.send_notice("CertFP certificate removed.")
elif args.set:
if args._tail is None:
example = (
"CERTFP --set\n"
"-----BEGIN CERTIFICATE-----\n"
"...\n"
"-----END CERTIFICATE-----\n"
"-----BEGIN PRIVATE KEY-----\n"
"...\n"
"-----END PRIVATE KEY-----\n"
)
self.send_notice_html(
f"<p>Expected the certificate to follow command. Certificate not updated.</p><pre><code>{example}</code></pre>"
)
return
# simple sanity checks it possibly looks alright
if not args._tail.startswith("-----"):
self.send_notice("This does not look like a PEM certificate.")
return
if "-----BEGIN CERTIFICATE----" not in args._tail:
self.send_notice("Certificate section is missing.")
return
if "-----BEGIN PRIVATE KEY----" not in args._tail:
self.send_notice("Private key section is missing.")
return
self.tls_cert = args._tail
await self.save()
self.send_notice("Client certificate saved.")
else:
if self.tls_cert:
self.send_notice("CertFP certificate exists.")
else:
self.send_notice("CertFP certificate does not exist.")
async def cmd_autocmd(self, args) -> None:
autocmd = " ".join(args.command)
if args.remove:
self.autocmd = None
await self.save()
self.send_notice("Autocmd removed.")
return
if autocmd == "":
self.send_notice(f"Configured autocmd: {self.autocmd if self.autocmd else ''}")
return
self.autocmd = autocmd
await self.save()
self.send_notice(f"Autocmd set to {self.autocmd}")
async def cmd_whois(self, args) -> None:
self.conn.whois(f"{args.nick} {args.nick}")
async def cmd_room(self, args) -> None:
target = args.target.lower()
if target not in self.rooms:
self.send_notice(f"No room for {args.target}")
return
room = self.rooms[target]
if len(args.command) == 0:
args.command = ["HELP"]
await room.commands.trigger_args(args.command, forward=True)
async def cmd_pills(self, args) -> None:
save = False
if args.length is not None:
self.pills_length = args.length
self.send_notice(f"Pills minimum length set to {self.pills_length}")
save = True
else:
self.send_notice(f"Pills minimum length is {self.pills_length}")
if args.ignore is not None:
self.pills_ignore = list(map(lambda x: x.strip(), args.ignore.split(",")))
self.send_notice(f"Pills ignore list set to {', '.join(self.pills_ignore)}")
save = True
else:
if len(self.pills_ignore) == 0:
self.send_notice("Pills ignore list is empty.")
else:
self.send_notice(f"Pills ignore list: {', '.join(self.pills_ignore)}")
if save:
await self.save()
async def cmd_autoquery(self, args) -> None:
if args.enabled is not None:
self.autoquery = args.enabled
await self.save()
self.send_notice(f"Autoquery is {'enabled' if self.autoquery else 'disabled'}")
async def cmd_rejoin(self, args) -> None:
if args.invite is not None:
self.rejoin_invite = args.invite
await self.save()
if args.kick is not None:
self.rejoin_kick = args.kick
await self.save()
self.send_notice(f"Rejoin on invite is {'enabled' if self.rejoin_invite else 'disabled'}")
self.send_notice(f"Rejoin on kick is {'enabled' if self.rejoin_kick else 'disabled'}")
async def connect(self) -> None:
if self.connlock.locked():
self.send_notice("Already connecting.")
return
async with self.connlock:
await self._connect()
async def _connect(self) -> None:
self.disconnect = False
if self.conn and self.conn.connected:
self.send_notice("Already connected.")
return
# attach loose sub-rooms to us
for room in self.serv.find_rooms(PrivateRoom, self.user_id):
if room.name not in self.rooms and room.network_name == self.name:
logging.debug(f"NetworkRoom {self.id} attaching PrivateRoom {room.id}")
room.network = self
self.rooms[room.name] = room
for room in self.serv.find_rooms(ChannelRoom, self.user_id):
if room.name not in self.rooms and room.network_name == self.name:
logging.debug(f"NetworkRoom {self.id} attaching ChannelRoom {room.id}")
room.network = self
self.rooms[room.name] = room
for room in self.serv.find_rooms(PlumbedRoom, self.user_id):
if room.name not in self.rooms and room.network_name == self.name:
logging.debug(f"NetworkRoom {self.id} attaching PlumbedRoom {room.id}")
room.network = self
self.rooms[room.name] = room
# force cleanup
if self.conn:
self.conn.close()
self.conn = None
network = self.serv.config["networks"][self.name]
# reset whois buffer
self.whois_data.clear()
backoff = 10
while not self.disconnect:
if self.name not in self.serv.config["networks"]:
self.send_notice("This network does not exist on this bridge anymore.")
return
if len(network["servers"]) == 0:
self.connected = False
self.send_notice("No servers to connect for this network.")
await self.save()
return
for i, server in enumerate(network["servers"]):
if i > 0:
await asyncio.sleep(10)
try:
with_tls = ""
ssl_ctx = False
server_hostname = None
if server["tls"] or ("tls_insecure" in server and server["tls_insecure"]):
ssl_ctx = ssl.create_default_context()
if "tls_insecure" in server and server["tls_insecure"]:
with_tls = " with insecure TLS"
ssl_ctx.check_hostname = False
ssl_ctx.verify_mode = ssl.CERT_NONE
else:
with_tls = " with TLS"
ssl_ctx.verify_mode = ssl.CERT_REQUIRED
if self.tls_cert:
with_tls += " and CertFP"
# do this awful hack to allow the SSL stack to load the cert and key
cert_file = tempfile.NamedTemporaryFile()
cert_file.write(self.tls_cert.encode("utf-8"))
cert_file.flush()
ssl_ctx.load_cert_chain(cert_file.name)
cert_file.close()
server_hostname = server["address"]
proxy = None
sock = None
address = server["address"]
port = server["port"]
with_proxy = ""
if "proxy" in server and server["proxy"] is not None and len(server["proxy"]) > 0:
proxy = Proxy.from_url(server["proxy"])
address = port = None
with_proxy = " through a SOCKS proxy"
self.send_notice(f"Connecting to {server['address']}:{server['port']}{with_tls}{with_proxy}...")
if proxy:
sock = await proxy.connect(dest_host=server["address"], dest_port=server["port"])
if self.sasl_username and self.sasl_password:
self.send_notice(f"Using SASL credentials for username {self.sasl_username}")
reactor = HeisenReactor(loop=asyncio.get_event_loop())
irc_server = reactor.server()
irc_server.buffer_class = buffer.LenientDecodingLineBuffer
factory = irc.connection.AioFactory(ssl=ssl_ctx, sock=sock, server_hostname=server_hostname)
self.conn = await irc_server.connect(
address,
port,
self.get_nick(),
self.password,
username=self.get_ident() if self.username is None else self.username,
ircname=self.ircname,
connect_factory=factory,
sasl_username=self.sasl_username,
sasl_password=self.sasl_password,
)
self.conn.add_global_handler("disconnect", self.on_disconnect)
self.conn.add_global_handler("welcome", self.on_welcome)
self.conn.add_global_handler("umodeis", self.on_umodeis)
self.conn.add_global_handler("channelmodeis", self.on_pass0)
self.conn.add_global_handler("channelcreate", self.on_pass0)
self.conn.add_global_handler("notopic", self.on_pass0)
self.conn.add_global_handler("currenttopic", self.on_pass0)
self.conn.add_global_handler("topicinfo", self.on_pass0)
self.conn.add_global_handler("namreply", self.on_pass1)
self.conn.add_global_handler("endofnames", self.on_pass0)
self.conn.add_global_handler("banlist", self.on_pass0)
self.conn.add_global_handler("endofbanlist", self.on_pass0)
# 400-599
self.conn.add_global_handler("nosuchnick", self.on_pass_if)
self.conn.add_global_handler("nosuchchannel", self.on_pass_if)
self.conn.add_global_handler("cannotsendtochan", self.on_pass0)
self.conn.add_global_handler("nicknameinuse", self.on_nicknameinuse)
self.conn.add_global_handler("erroneusnickname", self.on_erroneusnickname)
self.conn.add_global_handler("unavailresource", self.on_unavailresource)
self.conn.add_global_handler("usernotinchannel", self.on_pass1)
self.conn.add_global_handler("notonchannel", self.on_pass0)
self.conn.add_global_handler("useronchannel", self.on_pass1)
self.conn.add_global_handler("nologin", self.on_pass1)
self.conn.add_global_handler("keyset", self.on_pass)
self.conn.add_global_handler("channelisfull", self.on_pass)
self.conn.add_global_handler("inviteonlychan", self.on_pass)
self.conn.add_global_handler("bannedfromchan", self.on_pass)
self.conn.add_global_handler("badchannelkey", self.on_pass0)
self.conn.add_global_handler("badchanmask", self.on_pass)
self.conn.add_global_handler("nochanmodes", self.on_pass)
self.conn.add_global_handler("banlistfull", self.on_pass)
self.conn.add_global_handler("cannotknock", self.on_pass)
self.conn.add_global_handler("chanoprivsneeded", self.on_pass0)
# protocol
# FIXME: error
self.conn.add_global_handler("join", self.on_join)
self.conn.add_global_handler("join", self.on_join_update_host)
self.conn.add_global_handler("kick", self.on_pass)
self.conn.add_global_handler("mode", self.on_pass)
self.conn.add_global_handler("part", self.on_part)
self.conn.add_global_handler("privmsg", self.on_privmsg)
self.conn.add_global_handler("privnotice", self.on_privnotice)
self.conn.add_global_handler("pubmsg", self.on_pass)
self.conn.add_global_handler("pubnotice", self.on_pass)
self.conn.add_global_handler("quit", self.on_quit)
self.conn.add_global_handler("invite", self.on_invite)
self.conn.add_global_handler("wallops", self.on_wallops)
# FIXME: action
self.conn.add_global_handler("topic", self.on_pass)
self.conn.add_global_handler("nick", self.on_nick)
self.conn.add_global_handler("umode", self.on_umode)
self.conn.add_global_handler("kill", self.on_kill)
self.conn.add_global_handler("error", self.on_error)
# whois
self.conn.add_global_handler("whoisuser", self.on_whoisuser)
self.conn.add_global_handler("whoisserver", self.on_whoisserver)
self.conn.add_global_handler("whoischannels", self.on_whoischannels)
self.conn.add_global_handler("whoisidle", self.on_whoisidle)
self.conn.add_global_handler("whoisaccount", self.on_whoisaccount) # is logged in as
self.conn.add_global_handler("whoisoperator", self.on_whoisoperator)
self.conn.add_global_handler("338", self.on_whoisrealhost) # is actually using host
self.conn.add_global_handler("away", self.on_away)
self.conn.add_global_handler("endofwhois", self.on_endofwhois)
# generated
self.conn.add_global_handler("ctcp", self.on_ctcp)
self.conn.add_global_handler("ctcpreply", self.on_ctcpreply)
self.conn.add_global_handler("action", lambda conn, event: None)
# anything not handled above
self.conn.add_global_handler("unhandled_events", self.on_server_message)
if not self.connected:
self.connected = True
await self.save()
self.disconnect = False
# run connection registration (SASL, user, nick)
await self.conn.register()
return
except TimeoutError:
self.send_notice("Connection timed out.")
except irc.client.ServerConnectionError as e:
self.send_notice(str(e))
self.send_notice(f"Failed to connect: {str(e)}")
self.disconnect = True
except Exception as e:
self.send_notice(f"Failed to connect: {str(e)}")
if not self.disconnect:
self.send_notice(f"Tried all servers, waiting {backoff} seconds before trying again.")
await asyncio.sleep(backoff)
if backoff < 60:
backoff += 5
self.send_notice("Connection aborted.")
def on_disconnect(self, conn, event) -> None:
self.conn.disconnect()
self.conn.close()
self.conn = None
if self.connected and not self.disconnect:
self.send_notice("Disconnected, reconnecting...")
async def later():
await asyncio.sleep(10)
if not self.disconnect:
await self.connect()
asyncio.ensure_future(later())
else:
self.send_notice("Disconnected.")
@ircroom_event()
def on_pass(self, conn, event) -> None:
logging.warning(f"IRC room event '{event.type}' fell through, target was from command.")
source = self.source_text(conn, event)
args = " ".join(event.arguments)
source = self.source_text(conn, event)
target = str(event.target)
self.send_notice_html(f"<b>{source} {event.type} {target}</b> {html.escape(args)}")
@ircroom_event()
def on_pass_if(self, conn, event) -> None:
self.send_notice(" ".join(event.arguments))
@ircroom_event()
def on_pass_or_ignore(self, conn, event) -> None:
pass
@ircroom_event(target_arg=0)
def on_pass0(self, conn, event) -> None:
logging.warning(f"IRC room event '{event.type}' fell through, target was '{event.arguments[0]}'.")
self.send_notice(" ".join(event.arguments))
@ircroom_event(target_arg=1)
def on_pass1(self, conn, event) -> None:
logging.warning(f"IRC room event '{event.type}' fell through, target was '{event.arguments[1]}'.")
self.send_notice(" ".join(event.arguments))
def on_server_message(self, conn, event) -> None:
# test if the first argument is an ongoing whois target
if event.arguments[0].lower() in self.whois_data:
data = self.whois_data[event.arguments[0].lower()]
if "extra" not in data:
data["extra"] = []
data["extra"].append(" ".join(event.arguments[1:]))
else:
self.send_notice(" ".join(event.arguments))
def on_umodeis(self, conn, event) -> None:
self.send_notice(f"Your user mode is: {event.arguments[0]}")
def on_umode(self, conn, event) -> None:
self.send_notice(f"User mode changed for {event.target}: {event.arguments[0]}")
def source_text(self, conn, event) -> str:
source = None
if event.source is not None:
source = str(event.source.nick)
if event.source.user is not None and event.source.host is not None:
source += f" ({event.source.user}@{event.source.host})"
else:
source = conn.server
return source
@ircroom_event()
def on_privnotice(self, conn, event) -> None:
# show unhandled notices in server room
source = self.source_text(conn, event)
plain, formatted = parse_irc_formatting(event.arguments[0])
self.send_notice_html(f"Notice from <b>{source}:</b> {formatted if formatted else html.escape(plain)}")
@ircroom_event()
def on_ctcp(self, conn, event) -> None:
source = self.source_text(conn, event)
reply = None
if self.allow_ctcp:
if event.arguments[0] == "VERSION":
reply = f"VERSION Heisenbridge v{__version__}"
elif event.arguments[0] == "PING" and len(event.arguments) > 1:
reply = f"PING {event.arguments[1]}"
elif event.arguments[0] == "TIME":
reply = f"TIME {unix_to_local(time())}"
else:
self.send_notice_html(
f"<b>{source}</b> requested unknown <b>CTCP {html.escape(' '.join(event.arguments))}</b>"
)
if reply is not None:
self.conn.ctcp_reply(event.source.nick, reply)
self.send_notice_html(
f"<b>{source}</b> requested CTCP <b>{html.escape(event.arguments[0])}</b> -> {html.escape(reply)}"
)
else:
self.send_notice_html(f"<b>{source}</b> requested CTCP <b>{html.escape(event.arguments[0])}</b> (ignored)")
@ircroom_event()
def on_ctcpreply(self, conn, event) -> None:
command = event.arguments[0].upper()
reply = event.arguments[1]
self.send_notice_html(
f"CTCP <b>{html.escape(command)}</b> reply from <b>{event.source.nick}</b>: {html.escape(reply)}"
)
def on_welcome(self, conn, event) -> None:
self.on_server_message(conn, event)
async def later():
await asyncio.sleep(2)
if self.autocmd is not None:
self.send_notice("Executing autocmd and waiting a bit before joining channels...")
try:
await self.commands.trigger(
self.autocmd, allowed=["RAW", "MSG", "NICKSERV", "NS", "CHANSERV", "CS", "UMODE", "WAIT"]
)
except Exception as e:
self.send_notice(f"Autocmd failed: {str(e)}")
await asyncio.sleep(4)
# detect disconnect before we get to join
if not self.conn or not self.conn.connected:
return
channels = []
keyed_channels = []
for room in self.rooms.values():
if type(room) is ChannelRoom or type(room) is PlumbedRoom:
if room.key:
keyed_channels.append((room.name, room.key))
else:
channels.append(room.name)
if len(channels) > 0:
self.send_notice(f"Joining channels {', '.join(channels)}")
self.conn.join(",".join(channels))
if len(keyed_channels) > 0:
for channel, key in keyed_channels:
self.send_notice(f"Joining {channel} with a key")
self.conn.join(channel, key)
asyncio.ensure_future(later())
@ircroom_event()
def on_privmsg(self, conn, event) -> None:
# slightly backwards
target = event.source.nick.lower()
if target not in self.rooms:
if self.autoquery:
async def later():
# reuse query command to create a room
await self.cmd_query(Namespace(nick=event.source.nick, message=[]))
# push the message
room = self.rooms[target]
room.on_privmsg(conn, event)
asyncio.ensure_future(later())
else:
source = self.source_text(conn, event)
self.send_notice_html(f"Message from <b>{source}:</b> {html.escape(event.arguments[0])}")
else:
room = self.rooms[target]
if not room.in_room(self.user_id):
asyncio.ensure_future(self.serv.api.post_room_invite(self.rooms[target].id, self.user_id))
@ircroom_event()
def on_join(self, conn, event) -> None:
target = event.target.lower()
logging.debug(f"Handling JOIN to {target} by {event.source.nick} (we are {self.conn.real_nickname})")
# create a ChannelRoom in response to JOIN
if event.source.nick == self.conn.real_nickname and target not in self.rooms:
logging.debug("Pre-flight check for JOIN ok, going to create it...")
self.rooms[target] = ChannelRoom.create(self, event.target)
# pass this event through
self.rooms[target].on_join(conn, event)
def on_join_update_host(self, conn, event) -> None:
# update for split long
if event.source.nick == self.conn.real_nickname and self.real_host != event.source.host:
self.real_host = event.source.host
logging.debug(f"Self host updated to '{self.real_host}'")
@ircroom_event()
def on_part(self, conn, event) -> None:
if conn.real_nickname == event.source.nick:
self.send_notice_html(f"You left <b>{html.escape(event.target)}</b>")
else:
# should usually never end up here
self.send_notice_html(f"<b>{html.escape(event.source.nick)}</b> left <b>{html.escape(event.target)}</b>")
def on_quit(self, conn, event) -> None:
irc_user_id = self.serv.irc_user_id(self.name, event.source.nick)
# leave channels
for room in self.rooms.values():
if type(room) is ChannelRoom or type(room) is PlumbedRoom:
room._remove_puppet(irc_user_id, f"Quit: {event.arguments[0]}")
def on_nick(self, conn, event) -> None:
# the IRC library changes real_nickname before running handlers
if event.target == self.conn.real_nickname:
logging.debug(f"Detected own nick change to {event.target}")
if event.target == self.get_nick():
self.send_notice(f"You're now known as {event.target}")
old_irc_user_id = self.serv.irc_user_id(self.name, event.source.nick)
new_irc_user_id = self.serv.irc_user_id(self.name, event.target)
# special case where only cases change, ensure will update displayname sometime in the future
if old_irc_user_id == new_irc_user_id:
asyncio.ensure_future(self.serv.ensure_irc_user_id(self.name, event.target))
# leave and join channels
for room in self.rooms.values():
if type(room) is ChannelRoom or type(room) is PlumbedRoom:
room.rename(event.source.nick, event.target)
def on_nicknameinuse(self, conn, event) -> None:
self.send_notice(f"Nickname {event.arguments[0]} is in use")
if self.conn.real_nickname == "":
newnick = event.arguments[0] + "_"
self.conn.nick(newnick)
self.keepnick()
def on_erroneusnickname(self, conn, event) -> None:
self.send_notice(f"Nickname {event.arguments[0]} is erroneus and was rejected by the server")
@ircroom_event()
def on_unavailresource(self, conn, event) -> None:
if event.arguments[0][0] not in ["#", "!", "&"]:
self.send_notice(f"Nickname {event.arguments[0]} is currently unavailable")
if self.conn.real_nickname == "":
newnick = event.arguments[0] + "_"
self.conn.nick(newnick)
self.keepnick()
else:
self.send_notice(f"Channel {event.arguments[0]} is currently unavailable")
def keepnick(self):
if self.keepnick_task:
self.keepnick_task.cancel()
self.send_notice(f"Trying to set nickname to {self.get_nick()} again after five minutes.")
def try_keepnick():
self.keepnick_task = None
if not self.conn or not self.conn.connected:
return
self.conn.nick(self.get_nick())
self.keepnick_task = asyncio.get_event_loop().call_later(300, try_keepnick)
def on_invite(self, conn, event) -> None:
rejoin = ""
target = event.arguments[0].lower()
if self.rejoin_invite and target in self.rooms:
self.conn.join(event.arguments[0])
rejoin = " (rejoin on invite is enabled, joining back)"
self.send_notice_html(
f"<b>{event.source.nick}</b> has invited you to <b>{html.escape(event.arguments[0])}</b>{rejoin}"
)
def on_wallops(self, conn, event) -> None:
plain, formatted = parse_irc_formatting(event.target)
self.send_notice_html(f"<b>WALLOPS {event.source.nick}</b>: {formatted if formatted else html.escape(plain)}")
@ircroom_event()
def on_kill(self, conn, event) -> None:
if event.target == conn.real_nickname:
source = self.source_text(conn, event)
self.send_notice_html(f"Killed by <b>{source}</b>: {html.escape(event.arguments[0])}")
# do not reconnect after KILL
self.connected = False
def on_error(self, conn, event) -> None:
self.send_notice_html(f"<b>ERROR</b>: {html.escape(event.target)}")
def on_whoisuser(self, conn, event) -> None:
data = self.whois_data[event.arguments[0].lower()]
data["nick"] = event.arguments[0]
data["host"] = f"{event.arguments[1]}@{event.arguments[2]}"
data["realname"] = event.arguments[4]
def on_whoisserver(self, conn, event) -> None:
data = self.whois_data[event.arguments[0].lower()]
data["server"] = f"{event.arguments[1]} ({event.arguments[2]})"
def on_whoischannels(self, conn, event) -> None:
data = self.whois_data[event.arguments[0].lower()]
data["channels"] = event.arguments[1]
def on_whoisidle(self, conn, event) -> None:
data = self.whois_data[event.arguments[0].lower()]
data["idle"] = str(datetime.timedelta(seconds=int(event.arguments[1])))
if len(event.arguments) > 2:
data["signon"] = unix_to_local(int(event.arguments[2]))
def on_whoisaccount(self, conn, event) -> None:
data = self.whois_data[event.arguments[0].lower()]
data["account"] = event.arguments[1]
def on_whoisoperator(self, conn, event) -> None:
data = self.whois_data[event.arguments[0].lower()]
data["ircop"] = event.arguments[1]
def on_whoisrealhost(self, conn, event) -> None:
data = self.whois_data[event.arguments[0].lower()]
data["realhost"] = event.arguments[1]
def on_away(self, conn, event) -> None:
if event.arguments[0].lower() in self.whois_data:
self.whois_data[event.arguments[0].lower()]["away"] = event.arguments[1]
else:
self.send_notice(f"{event.arguments[0]} is away: {event.arguments[1]}")
def on_endofwhois(self, conn, event) -> None:
nick = event.arguments[0].lower()
data = self.whois_data[nick]
del self.whois_data[nick]
reply = []
fallback = []
reply.append("<table>")
for k in [
"nick",
"host",
"realname",
"realhost",
"away",
"channels",
"server",
"ircop",
"idle",
"signon",
"account",
]:
if k in data:
reply.append(f"<tr><td>{k}</td><td>{html.escape(data[k])}</td>")
fallback.append(f"{k}: {data[k]}")
if "extra" in data:
for v in data["extra"]:
reply.append(f"<tr><td></td><td>{html.escape(v)}</td>")
fallback.append(f"{data['nick']} {v}")
reply.append("</table>")
# forward whois reply to a DM if exists
target = self
if nick in self.rooms:
target = self.rooms[nick]
target.send_notice(formatted="".join(reply), text="\n".join(fallback))
|
py | 1a33b9a1311657f95da8ae8efc3e8eb7f4cf0539 | import pymysql
# Open database connection
db = pymysql.connect("web101","root","D0n0th4ck","crypto" )
# prepare a cursor object using cursor() method
cursor = db.cursor()
# Prepare SQL query to UPDATE required records
sql = "UPDATE EMPLOYEE SET AGE = AGE + 1
WHERE SEX = '%c'" % ('M')
try:
# Execute the SQL command
cursor.execute(sql)
# Commit your changes in the database
db.commit()
except:
# Rollback in case there is any error
db.rollback()
# disconnect from server
db.close() |
py | 1a33badb3d0022cd5d22e039d2b6ebc4635bde14 | # coding=utf-8
import os, re
import time
import string
#统计某一个进程名所占用的内存,同一个进程名,可能有多个进程
def countProcessMemoey(processName):
pattern = re.compile(r'([^\s]+)\s+(\d+)\s.*\s([^\s]+\sK)')
cmd = 'tasklist /fi "imagename eq ' + processName + '"' + ' | findstr.exe ' + processName
result = os.popen(cmd).read()
resultList = result.split("\n")
total=0
print("resultList ==",resultList)
for srcLine in resultList:
srcLine = "".join(srcLine.split('\n'))
if len(srcLine) == 0:
break
m = pattern.search(srcLine)
if m == None:
continue
#由于是查看python进程所占内存,因此通过pid将本程序过滤掉
if str(os.getpid()) == m.group(2):
continue
ori_mem = m.group(3).replace(',','')
ori_mem = ori_mem.replace(' K','')
ori_mem = ori_mem.replace(r'\sK','')
memEach = string.atoi(ori_mem)
# print 'ProcessName:'+ m.group(1) + '\tPID:' + m.group(2) + '\tmemory size:%.2f'% (memEach * 1.0 /1024), 'M'
total += memEach
print(total)
print("*" * 58)
#if __name__ == '__main__':
#进程名
processName = 'postgres.exe'
for i in range(1,1):
countProcessMemoey(processName)
time.sleep(5) |
py | 1a33bae6b2a90993cc0d6a33a8022a54f508775d | import pulumi
import pulumi.runtime
from ... import tables
class CertificateSigningRequestList(pulumi.CustomResource):
def __init__(self, __name__, __opts__=None, items=None, metadata=None):
if not __name__:
raise TypeError('Missing resource name argument (for URN creation)')
if not isinstance(__name__, str):
raise TypeError('Expected resource name to be a string')
if __opts__ and not isinstance(__opts__, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
__props__ = dict()
__props__['apiVersion'] = 'certificates.k8s.io/v1beta1'
__props__['kind'] = 'CertificateSigningRequestList'
if items is None:
raise TypeError('Missing required property items')
__props__['items'] = items
__props__['metadata'] = metadata
super(CertificateSigningRequestList, self).__init__(
"kubernetes:certificates.k8s.io/v1beta1:CertificateSigningRequestList",
__name__,
__props__,
__opts__)
def translate_output_property(self, prop: str) -> str:
return tables._CASING_FORWARD_TABLE.get(prop) or prop
def translate_input_property(self, prop: str) -> str:
return tables._CASING_BACKWARD_TABLE.get(prop) or prop
|
py | 1a33bc0e9111c83423c52ab710e688664c3bddbd | import sys
if sys.version_info[0] < 3:
import configparser
else:
import configparser
if __name__ == '__main__':
import re
CONFIG = [
{
'section_name': 'STARTUP',
'section_title': 'Startup Configuration',
'questions': [
{
'variable': 'STARTUP_MSG_DURATION',
'prompt': 'Startup mode duration (in seconds)?',
'help': 'Sets how long in seconds the startup mode will last',
'default': '5'
}
]
},
{
'section_name': 'DISPLAY',
'section_title': 'Display Configuration',
'questions': [
{
'prompt': 'Display type?',
'variable': 'DISPLAY_DRIVER',
'allowed': [ 'winstar_weg', 'hd44780', 'hd44780_i2c', 'hd44780_mcp23008', 'luma_i2c' ],
'help': 'Configures pydPiper for the display type you have installed',
'followup_questions': {
'^winstar_weg$|^hd44780$':
[
{ 'prompt': 'Register select pin?', 'variable': 'DISPLAY_PIN_RS', 'default': '7', 'help': 'What GPIO pin is the display\'s register select line connected to' },
{ 'prompt': 'Enable pin?', 'variable': 'DISPLAY_PIN_E', 'default': '8', 'help': 'What GPIO pin is the display\'s enable line connected to' },
{ 'prompt': 'Data 4 pin?', 'variable': 'DISPLAY_PIN_D4', 'default': '25', 'help': 'What GPIO pin is the display\'s data 4 line connected to' },
{ 'prompt': 'Data 5 pin?', 'variable': 'DISPLAY_PIN_D5', 'default': '24', 'help': 'What GPIO pin is the display\'s data 5 line connected to' },
{ 'prompt': 'Data 6 pin?', 'variable': 'DISPLAY_PIN_D6', 'default': '23', 'help': 'What GPIO pin is the display\'s data 6 line connected to' },
{ 'prompt': 'Data 7 pin?', 'variable': 'DISPLAY_PIN_D7', 'default': '27', 'help': 'What GPIO pin is the display\'s data 7 line connected to' }
],
'^hd44780_i2c$|^hd44780_mcp23008$|^luma_i2c$':
[
{ 'prompt': 'I2C Port?', 'variable': 'DISPLAY_I2C_PORT', 'default': '1', 'help': 'What I2C bus is the display connected to' },
{ 'prompt': 'I2C Address?', 'variable': 'DISPLAY_I2C_ADDRESS', 'default': '0x3d', 'help': 'What is the display\'s I2C address' }
],
'^luma_i2c$':
[
{ 'prompt': 'Type of Display?', 'variable': 'DISPLAY_DEVICETYPE', 'allowed': ['ssd1306', 'sh1106', 'ssd1322', 'ssd1325', 'ssd1331'], 'default': 'ssd1306', 'help': 'What is the display device type' },
{ 'prompt': 'Width of display (in pixels)?', 'variable': 'DISPLAY_WIDTH', 'default': '128', 'help': 'What is the horizontal resolution of the display in pixels' },
{ 'prompt': 'Height of display (in pixels)?', 'variable': 'DISPLAY_HEIGHT', 'default': '64', 'help': 'What is the vertical resolution of the display in pixels' },
],
'^winstar_weg$':
[
{ 'prompt': 'Width of display (in pixels)?', 'variable': 'DISPLAY_WIDTH', 'default': '80', 'help': 'What is the horizontal resolution of the display in pixels. Note: even if using the character version of the winstar, the value you enter should be in pixels. For reference, a 16x2 character display has a horizontal resolution of 80' },
{ 'prompt': 'Height of display (in pixels)?', 'variable': 'DISPLAY_HEIGHT', 'default': '16', 'help': 'What is the vertical resolution of the display in pixels. Note: even if using the character version of the winstar, the value you enter should be in pixels. For reference, a 16x2 character display has a vertical resolution of 16' },
{
'prompt': 'Enable pulse duration (in microseconds)?',
'variable': 'DISPLAY_ENABLE_DURATION',
'default': '0.1',
'help': 'Determines how long in microseconds the enable pulse should last. This should be set as low as possible but setting it too low may cause display instability. Recommended value is 1 ms for LCDs and 0.1 ms for OLEDs'
}, ],
'^hd44780$|^hd44780_i2c$|^hd44780_mcp23008$':
[
{ 'prompt': 'Width of display (in pixels)?', 'variable': 'DISPLAY_WIDTH', 'default': '80', 'help': 'What is the horizontal resolution of the display in pixels. Note: even though the hd44780 is a character device, the value you enter should be in pixels. For reference, a 16x2 character display has a horizontal resolution of 80' },
{ 'prompt': 'Height of display (in pixels)?', 'variable': 'DISPLAY_HEIGHT', 'default': '16', 'help': 'What is the vertical resolution of the display in pixels. Note: even though the hd44780 is a character device, the value you enter should be in pixels. For reference, a 16x2 character display has a vertical resolution of 16' },
{
'prompt': 'Enable pulse duration (in microseconds)?',
'variable': 'DISPLAY_ENABLE_DURATION',
'default': '1',
'help': 'Determines how long in microseconds the enable pulse should last. This should be set as low as possible but setting it too low may cause display instability. Recommended value is 1 ms for LCDs and 0.1 ms for OLEDs'
},
],
}
},
{
'prompt': 'Location of the pagefile?',
'variable': 'PAGEFILE',
'help': 'Sets which page file should be used to determine what and when to display content',
'default': 'pages_lcd_16x2.py'
},
{
'prompt': 'Animation Smoothing (in seconds)?',
'variable': 'ANIMATION_SMOOTHING',
'default': '0.15',
'help': 'Determines how often the display will attempt to update. This is used to smooth the animation effects'
}
]
},
{
'section_name': 'SYSTEM',
'section_title': 'System configuration',
'questions': [
{
'prompt': 'Location of log file?',
'variable': 'LOGFILE',
'default': '/var/log/pydPiper.log',
'help': 'Where should the log file be written to?'
},
{
'prompt': 'Logging Level?',
'variable': 'LOGLEVEL',
'allowed': ['debug', 'info', 'warning', 'error', 'critical'],
'casesensitive': False,
'default': 'info',
'help': 'Set logging level. Normal logging for the system is info. Setting to debug will provide much more information about how the system is operating which is useful for debugging'
},
{
'prompt': 'Time Zone?',
'variable': 'TIMEZONE',
'default': 'US/Eastern',
'help': 'Sets the time zone for the system. Use ISO 3166 compliant values. See https://en.wikipedia.org/wiki/List_of_tz_database_time_zones'
},
{
'prompt': '24-hour clock?',
'variable': 'TIME24HOUR',
'default': 'FALSE',
'casesensitive': False,
'allowed': ['true', 'false'],
'help': 'Determines whether the (deprecated) variable "current_time" is formatted as a 24 hour or 12 hour clock'
},
{
'prompt': 'Temperature Scale?',
'variable': 'TEMPERATURE',
'default': 'fahrenheit',
'casesensitive': False,
'allowed': ['fahrenheit', 'celsius'],
'help': 'Determines whether the temperature values will be shown in Fahrenheit or Celsius'
},
{
'prompt': 'Enable weather?',
'allowed': ['y','n','yes','no'],
'default': 'n',
'help': 'Do you want to enable the weather system? Requires an API key from a supported weather provider.',
'casesensitive': False,
'followup_questions': {
'^y$|^yes$':
[
{
'prompt': 'Weather service?',
'variable': 'WEATHER_SERVICE',
'default': 'accuweather',
'allowed': ['accuweather', 'wunderground', 'weerlive'],
'casesensitive': False,
'followup_questions': {
'^accuweather$|^wunderground$|^weerlive$':[
{
'prompt': 'API key?',
'variable': 'WEATHER_API',
'help': 'If using accuweather, an API can be requested from http://developer.accuweather.com. Note: Weather Underground no longer supports free API keys. weerlive.nl API key can be requested from http://weerlive.nl/delen.php'
},
{
'prompt': 'Location?',
'variable': 'WEATHER_LOCATION',
'help': 'You much provide a valid location. If using Accuweather, these can be searched for using the API calls shown on https://developer.accuweather.com/accuweather-locations-api/apis'
}
]
}
},
]
}
}
]
},
{
'section_name': 'SOURCE',
'section_title': 'Music distribution',
'questions': [
{
'prompt': 'Name of distribution?',
'variable': 'SOURCE_TYPE',
'allowed': ['volumio', 'moode', 'rune', 'lms', 'mpd', 'spop'],
'casesensitive': False,
'mandatory': True,
'followup_questions': {
'^volumio$':
[
{
'prompt': 'Server address?',
'variable': 'VOLUMIO_SERVER',
'default': 'localhost'
},
{
'prompt': 'Port?',
'variable': 'VOLUMIO_PORT',
'default': '3000'
}
],
'^rune$':
[
{
'prompt': 'Server address?',
'variable': 'RUNE_SERVER',
'default': 'localhost'
},
{
'prompt': 'Port?',
'variable': 'RUNE_PORT',
'default': '6379'
}
],
'^lms$':
[
{
'prompt': 'Server address?',
'variable': 'LMS_SERVER',
'default': 'localhost'
},
{
'prompt': 'Port?',
'variable': 'LMS_PORT',
'default': '9090'
},
{
'prompt': 'Username?',
'variable': 'LMS_USER',
},
{
'prompt': 'Password?',
'variable': 'LMS_PASSWORD',
},
{
'prompt': 'LMS Player MAC address?',
'variable': 'LMS_PLAYER',
}
],
'^mpd$|^moode$':
[
{
'prompt': 'Server address?',
'variable': 'MPD_SERVER',
'default': 'localhost'
},
{
'prompt': 'Port?',
'variable': 'MPD_PORT',
'default': '6600'
},
{
'prompt': 'Password?',
'variable': 'MPD_Password',
}
],
'^spop$':
[
{
'prompt': 'Server address?',
'variable': 'SPOP_SERVER',
'default': 'localhost'
},
{
'prompt': 'Port?',
'variable': 'SPOP_PORT',
'default': '6602'
},
{
'prompt': 'Password?',
'variable': 'SPOP_Password',
}
]
}
}
]
}
]
def process_section(section, config):
# if section does not exist, add it
try:
config.add_section(section['section_name'])
except:
pass
print(('\n'+section['section_title'].upper()+'\n'))
process_questions(section['section_name'],section['questions'],config)
def process_questions(section_name, questions, config):
for question in questions:
# If an previous value is available in the config file make it the default answer
try:
question['default'] = config.get(section_name, question['variable'])
except:
pass
value = ask_question(question)
if value and 'variable' in question:
config.set(section_name, question['variable'], value)
if 'followup_questions' in question:
if sys.version_info[0] < 3:
for match, followup_questions in question['followup_questions'].items():
if re.match(match,value):
process_questions(section_name, followup_questions, config)
else:
for match, followup_questions in list(question['followup_questions'].items()):
if re.match(match,value):
process_questions(section_name, followup_questions, config)
def ask_question(question):
prompt = question['prompt'] + ' [' + question['default'] + ']: ' if 'default' in question else question['prompt'] + ': '
while True:
if sys.version_info[0] < 3:
value = input(prompt)
else:
value = eval(input(prompt))
if value == '':
value = question.get('default','')
if 'casesensitive' in question and not question['casesensitive']:
value = value.lower()
if 'allowed' in question:
question['allowed'] = [allowed_value.lower() for allowed_value in question['allowed']]
if value == '?' or value.lower() == 'help':
if 'help' in question:
print((question['help']))
if 'allowed' in question:
line = 'Possible values are: '
for possible in question['allowed']:
line += possible + ', '
line = line[:-2]
print (line)
continue
if 'allowed' in question:
if value not in question['allowed'] and value:
print(('{0} is not a valid value'.format(value)))
continue
if 'mandatory' in question and question['mandatory'] is True and not value:
print ('This value can not be blank. Please enter a valid value')
continue
return value
print ('\nCreating configuration file for pydPiper')
print ('----------------------------------------')
if sys.version_info[0] < 3:
config = configparser.RawConfigParser()
serviceconfig = configparser.RawConfigParser()
else:
config = configparser.RawConfigParser()
serviceconfig = configparser.RawConfigParser()
serviceconfig.optionxform = str
config.read('pydPiper.cfg')
for section in CONFIG:
process_section(section,config)
print ('\nUPDATING pydPiper.cfg')
with open('pydPiper.cfg', 'w') as fp:
config.write(fp)
serviceconfig.add_section('Unit')
serviceconfig.add_section('Service')
serviceconfig.add_section('Install')
serviceconfig.set('Unit', 'Description', 'pydPiper')
serviceconfig.set('Service', 'Restart', 'always')
serviceconfig.set('Install', 'WantedBy', 'multi-user.target')
if config.get('SOURCE', 'source_type') == 'volumio':
serviceconfig.set('Unit', 'Requires', 'docker.service')
serviceconfig.set('Unit', 'After', 'volumio.service')
serviceconfig.set('Service', 'ExecStart', '/usr/bin/docker run --network=host --privileged -v /var/log:/var/log:rw -v /home/volumio/pydPiper:/app:rw dhrone/pydpiper:v0.31-alpha python /app/pydPiper.py')
elif config.get('SOURCE', 'source_type') == 'moode':
serviceconfig.set('Unit', 'Requires', 'docker.service')
serviceconfig.set('Unit', 'After', 'mpd.service docker.service')
serviceconfig.set('Service', 'ExecStart', '/usr/bin/docker run --network=host --privileged -v /var/log:/var/log:rw -v /home/pi/pydPiper:/app:rw dhrone/pydpiper:v0.31-alpha python /app/pydPiper.py')
elif config.get('SOURCE', 'source_type') == 'rune':
serviceconfig.set('Unit', 'After', 'network.target redis.target')
serviceconfig.set('Service', 'WorkingDirectory', '/root/pydPiper')
serviceconfig.set('Service', 'ExecStart', '/root/.local/bin/pipenv run python2 pydPiper.py')
if config.get('SOURCE', 'source_type') in ['volumio', 'moode', 'rune']:
print ('Creating pydpiper.service file\n')
with open('pydpiper.service', 'w') as fp:
serviceconfig.write(fp)
|
py | 1a33bc4f6391b4a5cc55b40c7a0c771dc34ed83e | import grpc
from concurrent import futures
import greeter_pb2
import greeter_pb2_grpc
class GreeterServicer(greeter_pb2_grpc.GreeterServicer):
def Echo(self, request, context):
response = greeter_pb2.HelloResponse()
response.name = request.name
return response
def Greet(self, request, context):
response = greeter_pb2.HelloResponse()
response.name = 'Result for ' + request.result.title + ' contains ' + request.result.snippets
return response
server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
greeter_pb2_grpc.add_GreeterServicer_to_server(GreeterServicer(), server)
server.add_insecure_port('[::]:50051')
|
py | 1a33bcfb331ffa38b4fc43c9c4113023ce2afdd2 | import torch
import pyredner.transform as transform
import redner
import math
import pyredner
from typing import Tuple, Optional, List
class Camera:
"""
Redner supports four types of cameras\: perspective, orthographic, fisheye, and panorama.
The camera takes a look at transform or a cam_to_world matrix to
transform from camera local space to world space. It also can optionally
take an intrinsic matrix that models field of view and camera skew.
Args
====
position: Optional[torch.Tensor]
the origin of the camera, 1-d tensor with size 3 and type float32
look_at: Optional[torch.Tensor]
the point camera is looking at, 1-d tensor with size 3 and type float32
up: Optional[torch.tensor]
the up vector of the camera, 1-d tensor with size 3 and type float32
fov: Optional[torch.Tensor]
the field of view of the camera in angle
no effect if the camera is a fisheye or panorama camera
1-d tensor with size 1 and type float32
clip_near: float
the near clipping plane of the camera, need to > 0
resolution: Tuple[int, int]
the size of the output image in (height, width)
cam_to_world: Optional[torch.Tensor]
overrides position, look_at, up vectors
4x4 matrix, optional
intrinsic_mat: Optional[torch.Tensor]
a matrix that transforms a point in camera space before the point
is projected to 2D screen space
used for modelling field of view and camera skewing
after the multiplication the point should be in
[-1, 1/aspect_ratio] x [1, -1/aspect_ratio] in homogeneous coordinates
the projection is then carried by the specific camera types
perspective camera normalizes the homogeneous coordinates
while orthogonal camera drop the Z coordinate.
ignored by fisheye or panorama cameras
overrides fov
3x3 matrix, optional
camera_type: render.camera_type
the type of the camera (perspective, orthographic, fisheye, or panorama)
fisheye: bool
whether the camera is a fisheye camera
(legacy parameter just to ensure compatibility).
"""
def __init__(self,
position: Optional[torch.Tensor] = None,
look_at: Optional[torch.Tensor] = None,
up: Optional[torch.Tensor] = None,
fov: Optional[torch.Tensor] = None,
clip_near: float = 1e-4,
resolution: Tuple[int, int] = (256, 256),
cam_to_world: Optional[torch.Tensor] = None,
intrinsic_mat: Optional[torch.Tensor] = None,
camera_type = pyredner.camera_type.perspective,
fisheye: bool = False):
if position is not None:
assert(position.dtype == torch.float32)
assert(len(position.shape) == 1 and position.shape[0] == 3)
if look_at is not None:
assert(look_at.dtype == torch.float32)
assert(len(look_at.shape) == 1 and look_at.shape[0] == 3)
if up is not None:
assert(up.dtype == torch.float32)
assert(len(up.shape) == 1 and up.shape[0] == 3)
if fov is not None:
assert(fov.dtype == torch.float32)
assert(len(fov.shape) == 1 and fov.shape[0] == 1)
assert(isinstance(clip_near, float))
if position is None and look_at is None and up is None:
assert(cam_to_world is not None)
self.position = position
self.look_at = look_at
self.up = up
self._fov = fov
self._cam_to_world = cam_to_world
if cam_to_world is not None:
self.world_to_cam = torch.inverse(self.cam_to_world).contiguous()
else:
self.world_to_cam = None
if intrinsic_mat is None:
if camera_type == redner.CameraType.perspective:
fov_factor = 1.0 / torch.tan(transform.radians(0.5 * fov))
o = torch.ones([1], dtype=torch.float32)
diag = torch.cat([fov_factor, fov_factor, o], 0)
self._intrinsic_mat = torch.diag(diag).contiguous()
else:
self._intrinsic_mat = torch.eye(3, dtype=torch.float32)
else:
self._intrinsic_mat = intrinsic_mat
self.intrinsic_mat_inv = torch.inverse(self.intrinsic_mat).contiguous()
self.clip_near = clip_near
self.resolution = resolution
self.camera_type = camera_type
if fisheye:
self.camera_type = pyredner.camera_type.fisheye
@property
def fov(self):
return self._fov
@fov.setter
def fov(self, value):
self._fov = value
fov_factor = 1.0 / torch.tan(transform.radians(0.5 * self._fov))
o = torch.ones([1], dtype=torch.float32)
diag = torch.cat([fov_factor, fov_factor, o], 0)
self._intrinsic_mat = torch.diag(diag).contiguous()
self.intrinsic_mat_inv = torch.inverse(self._intrinsic_mat).contiguous()
@property
def intrinsic_mat(self):
return self._intrinsic_mat
@intrinsic_mat.setter
def intrinsic_mat(self, value):
if value is not None:
self._intrinsic_mat = value
self.intrinsic_mat_inv = torch.inverse(self._intrinsic_mat).contiguous()
else:
assert(self.fov is not None)
self.fov = self._fov
@property
def cam_to_world(self):
return self._cam_to_world
@cam_to_world.setter
def cam_to_world(self, value):
if value is not None:
self._cam_to_world = value
self.world_to_cam = torch.inverse(self.cam_to_world).contiguous()
else:
self._cam_to_world = None
self.world_to_cam = None
def state_dict(self):
return {
'position': self._position,
'look_at': self._look_at,
'up': self._up,
'fov': self._fov,
'cam_to_world': self._cam_to_world,
'intrinsic_mat': self._intrinsic_mat,
'clip_near': self.clip_near,
'resolution': self.resolution,
'camera_type': self.camera_type
}
@classmethod
def load_state_dict(cls, state_dict):
out = cls.__new__(Camera)
out._position = state_dict['position']
out._look_at = state_dict['look_at']
out._up = state_dict['up']
out._fov = state_dict['fov']
out.cam_to_world = state_dict['cam_to_world']
out.intrinsic_mat = state_dict['intrinsic_mat']
out.clip_near = state_dict['clip_near']
out.resolution = state_dict['resolution']
out.camera_type = state_dict['camera_type']
return out
def automatic_camera_placement(shapes: List,
resolution: Tuple[int, int]):
"""
Given a list of objects or shapes, generates camera parameters automatically
using the bounding boxes of the shapes. Place the camera at
some distances from the shapes, so that it can see all of them.
Inspired by https://github.com/mitsuba-renderer/mitsuba/blob/master/src/librender/scene.cpp#L286
Parameters
==========
shapes: List
a list of redner Shape or Object
resolution: Tuple[int, int]
the size of the output image in (height, width)
Returns
=======
pyredner.Camera
a camera that can see all the objects.
"""
aabb_min = torch.tensor((float('inf'), float('inf'), float('inf')))
aabb_max = -torch.tensor((float('inf'), float('inf'), float('inf')))
for shape in shapes:
v = shape.vertices
v_min = torch.min(v, 0)[0].cpu()
v_max = torch.max(v, 0)[0].cpu()
aabb_min = torch.min(aabb_min, v_min)
aabb_max = torch.max(aabb_max, v_max)
assert(torch.isfinite(aabb_min).all() and torch.isfinite(aabb_max).all())
center = (aabb_max + aabb_min) * 0.5
extents = aabb_max - aabb_min
max_extents_xy = torch.max(extents[0], extents[1])
distance = max_extents_xy / (2 * math.tan(45 * 0.5 * math.pi / 180.0))
max_extents_xyz = torch.max(extents[2], max_extents_xy)
return Camera(position = torch.tensor((center[0], center[1], aabb_min[2] - distance)),
look_at = center,
up = torch.tensor((0.0, 1.0, 0.0)),
fov = torch.tensor([45.0]),
clip_near = 0.001 * float(distance),
resolution = resolution)
def generate_intrinsic_mat(fx: torch.Tensor,
fy: torch.Tensor,
skew: torch.Tensor,
x0: torch.Tensor,
y0: torch.Tensor):
"""
| Generate the following 3x3 intrinsic matrix given the parameters.
| fx, skew, x0
| 0, fy, y0
| 0, 0, 1
Parameters
==========
fx: torch.Tensor
Focal length at x dimension. 1D tensor with size 1.
fy: torch.Tensor
Focal length at y dimension. 1D tensor with size 1.
skew: torch.Tensor
Axis skew parameter describing shearing transform. 1D tensor with size 1.
x0: torch.Tensor
Principle point offset at x dimension. 1D tensor with size 1.
y0: torch.Tensor
Principle point offset at y dimension. 1D tensor with size 1.
Returns
=======
torch.Tensor
3x3 intrinsic matrix
"""
z = torch.zeros_like(fx)
o = torch.ones_like(fx)
row0 = torch.cat([fx, skew, x0])
row1 = torch.cat([ z, fy, y0])
row2 = torch.cat([ z, z, o])
return torch.stack([row0, row1, row2]).contiguous()
|
py | 1a33be7202e45438b11eb12ce8ef8ccc5ea6b675 | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def test_row_set_constructor():
from google.cloud.bigtable.row_set import RowSet
row_set = RowSet()
assert [] == row_set.row_keys
assert [] == row_set.row_ranges
def test_row_set__eq__():
from google.cloud.bigtable.row_set import RowRange
from google.cloud.bigtable.row_set import RowSet
row_key1 = b"row_key1"
row_key2 = b"row_key1"
row_range1 = RowRange(b"row_key4", b"row_key9")
row_range2 = RowRange(b"row_key4", b"row_key9")
row_set1 = RowSet()
row_set2 = RowSet()
row_set1.add_row_key(row_key1)
row_set2.add_row_key(row_key2)
row_set1.add_row_range(row_range1)
row_set2.add_row_range(row_range2)
assert row_set1 == row_set2
def test_row_set__eq__type_differ():
from google.cloud.bigtable.row_set import RowSet
row_set1 = RowSet()
row_set2 = object()
assert not (row_set1 == row_set2)
def test_row_set__eq__len_row_keys_differ():
from google.cloud.bigtable.row_set import RowSet
row_key1 = b"row_key1"
row_key2 = b"row_key1"
row_set1 = RowSet()
row_set2 = RowSet()
row_set1.add_row_key(row_key1)
row_set1.add_row_key(row_key2)
row_set2.add_row_key(row_key2)
assert not (row_set1 == row_set2)
def test_row_set__eq__len_row_ranges_differ():
from google.cloud.bigtable.row_set import RowRange
from google.cloud.bigtable.row_set import RowSet
row_range1 = RowRange(b"row_key4", b"row_key9")
row_range2 = RowRange(b"row_key4", b"row_key9")
row_set1 = RowSet()
row_set2 = RowSet()
row_set1.add_row_range(row_range1)
row_set1.add_row_range(row_range2)
row_set2.add_row_range(row_range2)
assert not (row_set1 == row_set2)
def test_row_set__eq__row_keys_differ():
from google.cloud.bigtable.row_set import RowSet
row_set1 = RowSet()
row_set2 = RowSet()
row_set1.add_row_key(b"row_key1")
row_set1.add_row_key(b"row_key2")
row_set1.add_row_key(b"row_key3")
row_set2.add_row_key(b"row_key1")
row_set2.add_row_key(b"row_key2")
row_set2.add_row_key(b"row_key4")
assert not (row_set1 == row_set2)
def test_row_set__eq__row_ranges_differ():
from google.cloud.bigtable.row_set import RowRange
from google.cloud.bigtable.row_set import RowSet
row_range1 = RowRange(b"row_key4", b"row_key9")
row_range2 = RowRange(b"row_key14", b"row_key19")
row_range3 = RowRange(b"row_key24", b"row_key29")
row_set1 = RowSet()
row_set2 = RowSet()
row_set1.add_row_range(row_range1)
row_set1.add_row_range(row_range2)
row_set1.add_row_range(row_range3)
row_set2.add_row_range(row_range1)
row_set2.add_row_range(row_range2)
assert not (row_set1 == row_set2)
def test_row_set__ne__():
from google.cloud.bigtable.row_set import RowRange
from google.cloud.bigtable.row_set import RowSet
row_key1 = b"row_key1"
row_key2 = b"row_key1"
row_range1 = RowRange(b"row_key4", b"row_key9")
row_range2 = RowRange(b"row_key5", b"row_key9")
row_set1 = RowSet()
row_set2 = RowSet()
row_set1.add_row_key(row_key1)
row_set2.add_row_key(row_key2)
row_set1.add_row_range(row_range1)
row_set2.add_row_range(row_range2)
assert row_set1 != row_set2
def test_row_set__ne__same_value():
from google.cloud.bigtable.row_set import RowRange
from google.cloud.bigtable.row_set import RowSet
row_key1 = b"row_key1"
row_key2 = b"row_key1"
row_range1 = RowRange(b"row_key4", b"row_key9")
row_range2 = RowRange(b"row_key4", b"row_key9")
row_set1 = RowSet()
row_set2 = RowSet()
row_set1.add_row_key(row_key1)
row_set2.add_row_key(row_key2)
row_set1.add_row_range(row_range1)
row_set2.add_row_range(row_range2)
assert not (row_set1 != row_set2)
def test_row_set_add_row_key():
from google.cloud.bigtable.row_set import RowSet
row_set = RowSet()
row_set.add_row_key("row_key1")
row_set.add_row_key("row_key2")
assert ["row_key1" == "row_key2"], row_set.row_keys
def test_row_set_add_row_range():
from google.cloud.bigtable.row_set import RowRange
from google.cloud.bigtable.row_set import RowSet
row_set = RowSet()
row_range1 = RowRange(b"row_key1", b"row_key9")
row_range2 = RowRange(b"row_key21", b"row_key29")
row_set.add_row_range(row_range1)
row_set.add_row_range(row_range2)
expected = [row_range1, row_range2]
assert expected == row_set.row_ranges
def test_row_set_add_row_range_from_keys():
from google.cloud.bigtable.row_set import RowSet
row_set = RowSet()
row_set.add_row_range_from_keys(
start_key=b"row_key1",
end_key=b"row_key9",
start_inclusive=False,
end_inclusive=True,
)
assert row_set.row_ranges[0].end_key == b"row_key9"
def test_row_set_add_row_range_with_prefix():
from google.cloud.bigtable.row_set import RowSet
row_set = RowSet()
row_set.add_row_range_with_prefix("row")
assert row_set.row_ranges[0].end_key == b"rox"
def test_row_set__update_message_request():
from google.cloud._helpers import _to_bytes
from google.cloud.bigtable.row_set import RowRange
from google.cloud.bigtable.row_set import RowSet
row_set = RowSet()
table_name = "table_name"
row_set.add_row_key("row_key1")
row_range1 = RowRange(b"row_key21", b"row_key29")
row_set.add_row_range(row_range1)
request = _ReadRowsRequestPB(table_name=table_name)
row_set._update_message_request(request)
expected_request = _ReadRowsRequestPB(table_name=table_name)
expected_request.rows.row_keys.append(_to_bytes("row_key1"))
expected_request.rows.row_ranges.append(row_range1.get_range_kwargs())
assert request == expected_request
def test_row_range_constructor():
from google.cloud.bigtable.row_set import RowRange
start_key = "row_key1"
end_key = "row_key9"
row_range = RowRange(start_key, end_key)
assert start_key == row_range.start_key
assert end_key == row_range.end_key
assert row_range.start_inclusive
assert not row_range.end_inclusive
def test_row_range___hash__set_equality():
from google.cloud.bigtable.row_set import RowRange
row_range1 = RowRange("row_key1", "row_key9")
row_range2 = RowRange("row_key1", "row_key9")
set_one = {row_range1, row_range2}
set_two = {row_range1, row_range2}
assert set_one == set_two
def test_row_range___hash__not_equals():
from google.cloud.bigtable.row_set import RowRange
row_range1 = RowRange("row_key1", "row_key9")
row_range2 = RowRange("row_key1", "row_key19")
set_one = {row_range1}
set_two = {row_range2}
assert set_one != set_two
def test_row_range__eq__():
from google.cloud.bigtable.row_set import RowRange
start_key = b"row_key1"
end_key = b"row_key9"
row_range1 = RowRange(start_key, end_key, True, False)
row_range2 = RowRange(start_key, end_key, True, False)
assert row_range1 == row_range2
def test_row_range___eq__type_differ():
from google.cloud.bigtable.row_set import RowRange
start_key = b"row_key1"
end_key = b"row_key9"
row_range1 = RowRange(start_key, end_key, True, False)
row_range2 = object()
assert row_range1 != row_range2
def test_row_range__ne__():
from google.cloud.bigtable.row_set import RowRange
start_key = b"row_key1"
end_key = b"row_key9"
row_range1 = RowRange(start_key, end_key, True, False)
row_range2 = RowRange(start_key, end_key, False, True)
assert row_range1 != row_range2
def test_row_range__ne__same_value():
from google.cloud.bigtable.row_set import RowRange
start_key = b"row_key1"
end_key = b"row_key9"
row_range1 = RowRange(start_key, end_key, True, False)
row_range2 = RowRange(start_key, end_key, True, False)
assert not (row_range1 != row_range2)
def test_row_range_get_range_kwargs_closed_open():
from google.cloud.bigtable.row_set import RowRange
start_key = b"row_key1"
end_key = b"row_key9"
expected_result = {"start_key_closed": start_key, "end_key_open": end_key}
row_range = RowRange(start_key, end_key)
actual_result = row_range.get_range_kwargs()
assert expected_result == actual_result
def test_row_range_get_range_kwargs_open_closed():
from google.cloud.bigtable.row_set import RowRange
start_key = b"row_key1"
end_key = b"row_key9"
expected_result = {"start_key_open": start_key, "end_key_closed": end_key}
row_range = RowRange(start_key, end_key, False, True)
actual_result = row_range.get_range_kwargs()
assert expected_result == actual_result
def _ReadRowsRequestPB(*args, **kw):
from google.cloud.bigtable_v2.types import bigtable as messages_v2_pb2
return messages_v2_pb2.ReadRowsRequest(*args, **kw)
|
py | 1a33beac78273ec7086ac96afdc709c3310790d6 | REQUEST_ERROR_STATUS_CODE = 503
REQUEST_ERROR_MESSAGE = "Request failed"
class APIError(Exception):
def __init__(self, response=None, message=None):
self.response = response
self._message = message
def __str__(self):
return "{} - {}".format(self.status_code, self.message)
@property
def message(self):
try:
return self.response.json().get('message', self.response.json().get('errors'))
except (TypeError, ValueError, AttributeError, KeyError):
return self._message or REQUEST_ERROR_MESSAGE
@property
def status_code(self):
try:
return self.response.status_code
except AttributeError:
return REQUEST_ERROR_STATUS_CODE
class HTTPError(APIError):
@staticmethod
def create(e):
error = HTTPError(e.response)
if error.status_code == 503:
error = HTTP503Error(e.response)
return error
class HTTP503Error(HTTPError):
"""Specific instance of HTTPError for 503 errors
Used for detecting whether failed requests should be retried.
"""
pass
class InvalidResponse(APIError):
pass
|
py | 1a33bec9b1baf15c38265083e456b1c5dbd060e6 | # -*- coding: utf-8 -*-
from openprocurement.api.validation import (
validate_accreditation_level
)
from openprocurement.api.utils import (
get_resource_accreditation
)
def validate_change_ownership_accreditation(request, **kwargs): # pylint: disable=unused-argument
levels = get_resource_accreditation(request, 'lot', request.context, 'create')
err_msg = 'Broker Accreditation level does not permit ownership change'
validate_accreditation_level(request, request.validated['lot'], levels, err_msg)
|
py | 1a33bf14117b74ecbc2c9a3ca035b0482681204f | import asyncio
import zlib
from aiocache import Cache
from aiocache.serializers import BaseSerializer
class CompressionSerializer(BaseSerializer):
# This is needed because zlib works with bytes.
# this way the underlying backend knows how to
# store/retrieve values
DEFAULT_ENCODING = None
def dumps(self, value):
print("I've received:\n{}".format(value))
compressed = zlib.compress(value.encode())
print("But I'm storing:\n{}".format(compressed))
return compressed
def loads(self, value):
print("I've retrieved:\n{}".format(value))
decompressed = zlib.decompress(value).decode()
print("But I'm returning:\n{}".format(decompressed))
return decompressed
cache = Cache(Cache.REDIS, serializer=CompressionSerializer(), namespace="main")
async def serializer():
text = (
"Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt"
"ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation"
"ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in"
"reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur"
"sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit"
"anim id est laborum."
)
await cache.set("key", text)
print("-----------------------------------")
real_value = await cache.get("key")
compressed_value = await cache.raw("get", "main:key")
assert len(compressed_value) < len(real_value.encode())
def test_serializer():
loop = asyncio.get_event_loop()
loop.run_until_complete(serializer())
loop.run_until_complete(cache.delete("key"))
loop.run_until_complete(cache.close())
if __name__ == "__main__":
test_serializer()
|
py | 1a33bfae7b7be41c30cce21f1dab2b63844483c0 | # Copyright 2016 Osvaldo Santana Neto
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from requests import Session
from zeep import Client, Transport
logger = logging.getLogger(__name__)
class SoapClient(Client):
def __init__(self, wsdl, cert=None, verify=True, timeout=8, **kwargs):
session = Session()
session.cert = cert
session.verify = verify
session.timeout = timeout
session.headers.update({'Content-Type': 'text/xml;charset=UTF-8'})
transport = Transport(
operation_timeout=timeout,
session=session
)
super().__init__(wsdl=wsdl, transport=transport, **kwargs)
|
py | 1a33c05f50de75fc9e16aa572c2845d5b397bfe8 | # -*- coding: utf-8 -*-
"""
Defines unit tests for :mod:`colour.characterisation.correction` module.
"""
from __future__ import division, unicode_literals
import numpy as np
import unittest
from itertools import permutations
from numpy.linalg import LinAlgError
from colour.characterisation.correction import (
augmented_matrix_Cheung2004, polynomial_expansion_Finlayson2015,
polynomial_expansion_Vandermonde, colour_correction_matrix_Cheung2004,
colour_correction_matrix_Finlayson2015,
colour_correction_matrix_Vandermonde, colour_correction_Cheung2004,
colour_correction_Finlayson2015, colour_correction_Vandermonde)
from colour.utilities import ignore_numpy_errors
__author__ = 'Colour Developers'
__copyright__ = 'Copyright (C) 2013-2019 - Colour Developers'
__license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause'
__maintainer__ = 'Colour Developers'
__email__ = '[email protected]'
__status__ = 'Production'
__all__ = [
'M_T', 'M_R', 'TestAugmentedMatrixCheung2004',
'TestPolynomialExpansionFinlayson2015',
'TestPolynomialExpansionVandermonde',
'TestColourCorrectionMatrixCheung2004',
'TestColourCorrectionMatrixFinlayson2015',
'TestColourCorrectionMatrixVandermonde', 'TestColourCorrectionCheung2004',
'TestColourCorrectionFinlayson2015', 'TestColourCorrectionVandermonde'
]
M_T = np.array([
[0.17224810, 0.09170660, 0.06416938],
[0.49189645, 0.27802050, 0.21923399],
[0.10999751, 0.18658946, 0.29938611],
[0.11666120, 0.14327905, 0.05713804],
[0.18988879, 0.18227649, 0.36056247],
[0.12501329, 0.42223442, 0.37027445],
[0.64785606, 0.22396782, 0.03365194],
[0.06761093, 0.11076896, 0.39779139],
[0.49101797, 0.09448929, 0.11623839],
[0.11622386, 0.04425753, 0.14469986],
[0.36867946, 0.44545230, 0.06028681],
[0.61632937, 0.32323906, 0.02437089],
[0.03016472, 0.06153243, 0.29014596],
[0.11103655, 0.30553067, 0.08149137],
[0.41162190, 0.05816656, 0.04845934],
[0.73339206, 0.53075188, 0.02475212],
[0.47347718, 0.08834792, 0.30310315],
[0.00000000, 0.25187016, 0.35062450],
[0.76809639, 0.78486240, 0.77808297],
[0.53822392, 0.54307997, 0.54710883],
[0.35458526, 0.35318419, 0.35524431],
[0.17976704, 0.18000531, 0.17991488],
[0.09351417, 0.09510603, 0.09675027],
[0.03405071, 0.03295077, 0.03702047],
])
M_R = np.array([
[0.15579559, 0.09715755, 0.07514556],
[0.39113140, 0.25943419, 0.21266708],
[0.12824821, 0.18463570, 0.31508023],
[0.12028974, 0.13455659, 0.07408400],
[0.19368988, 0.21158946, 0.37955964],
[0.19957424, 0.36085439, 0.40678123],
[0.48896605, 0.20691688, 0.05816533],
[0.09775522, 0.16710693, 0.47147724],
[0.39358649, 0.12233400, 0.10526425],
[0.10780332, 0.07258529, 0.16151473],
[0.27502671, 0.34705454, 0.09728099],
[0.43980441, 0.26880559, 0.05430533],
[0.05887212, 0.11126272, 0.38552469],
[0.12705825, 0.25787860, 0.13566464],
[0.35612929, 0.07933258, 0.05118732],
[0.48131976, 0.42082843, 0.07120612],
[0.34665585, 0.15170714, 0.24969804],
[0.08261116, 0.24588716, 0.48707733],
[0.66054904, 0.65941137, 0.66376412],
[0.48051509, 0.47870296, 0.48230082],
[0.33045354, 0.32904184, 0.33228886],
[0.18001305, 0.17978567, 0.18004416],
[0.10283975, 0.10424680, 0.10384975],
[0.04742204, 0.04772203, 0.04914226],
])
class TestAugmentedMatrixCheung2004(unittest.TestCase):
"""
Defines :func:`colour.characterisation.correction.\
augmented_matrix_Cheung2004` definition unit tests methods.
"""
def test_augmented_matrix_Cheung2004(self):
"""
Tests :func:`colour.characterisation.correction.\
augmented_matrix_Cheung2004` definition.
"""
RGB = np.array([0.17224810, 0.09170660, 0.06416938])
polynomials = [
np.array([0.17224810, 0.09170660, 0.06416938]),
np.array(
[0.17224810, 0.09170660, 0.06416938, 0.00101364, 1.00000000]),
np.array([
0.17224810, 0.09170660, 0.06416938, 0.01579629, 0.01105305,
0.00588476, 1.00000000
]),
np.array([
0.17224810, 0.09170660, 0.06416938, 0.01579629, 0.01105305,
0.00588476, 0.00101364, 1.00000000
]),
np.array([
0.17224810, 0.09170660, 0.06416938, 0.01579629, 0.01105305,
0.00588476, 0.02966941, 0.00841010, 0.00411771, 1.00000000
]),
np.array([
0.17224810, 0.09170660, 0.06416938, 0.01579629, 0.01105305,
0.00588476, 0.02966941, 0.00841010, 0.00411771, 0.00101364,
1.00000000
]),
np.array([
0.17224810, 0.09170660, 0.06416938, 0.01579629, 0.01105305,
0.00588476, 0.02966941, 0.00841010, 0.00411771, 0.00101364,
0.00511050, 0.00077126, 0.00026423, 1.00000000
]),
np.array([
0.17224810, 0.09170660, 0.06416938, 0.01579629, 0.01105305,
0.00588476, 0.02966941, 0.00841010, 0.00411771, 0.00101364,
0.00272088, 0.00053967, 0.00070927, 0.00511050, 0.00077126,
0.00026423
]),
np.array([
0.17224810, 0.09170660, 0.06416938, 0.01579629, 0.01105305,
0.00588476, 0.02966941, 0.00841010, 0.00411771, 0.00101364,
0.00272088, 0.00053967, 0.00070927, 0.00511050, 0.00077126,
0.00026423, 1.00000000
]),
np.array([
0.17224810, 0.09170660, 0.06416938, 0.01579629, 0.01105305,
0.00588476, 0.02966941, 0.00841010, 0.00411771, 0.00101364,
0.00272088, 0.00053967, 0.00070927, 0.00190387, 0.00144862,
0.00037762, 0.00511050, 0.00077126, 0.00026423
]),
np.array([
0.17224810, 0.09170660, 0.06416938, 0.01579629, 0.01105305,
0.00588476, 0.02966941, 0.00841010, 0.00411771, 0.00101364,
0.00272088, 0.00053967, 0.00070927, 0.00190387, 0.00144862,
0.00037762, 0.00511050, 0.00077126, 0.00026423, 1.00000000
]),
np.array([
0.17224810, 0.09170660, 0.06416938, 0.01579629, 0.01105305,
0.00588476, 0.02966941, 0.00841010, 0.00411771, 0.00101364,
0.00272088, 0.00053967, 0.00070927, 0.00190387, 0.00144862,
0.00037762, 0.00511050, 0.00077126, 0.00026423, 0.00017460,
0.00009296, 0.00006504
]),
]
for i, terms in enumerate([3, 5, 7, 8, 10, 11, 14, 16, 17, 19, 20,
22]):
np.testing.assert_almost_equal(
augmented_matrix_Cheung2004(RGB, terms),
polynomials[i],
decimal=7)
def test_raise_exception_augmented_matrix_Cheung2004(self):
"""
Tests :func:`colour.characterisation.correction.\
augmented_matrix_Cheung2004` definition raised exception.
"""
self.assertRaises(ValueError, augmented_matrix_Cheung2004,
np.array([0.17224810, 0.09170660, 0.06416938]), 4)
@ignore_numpy_errors
def test_nan_augmented_matrix_Cheung2004(self):
"""
Tests :func:`colour.characterisation.correction.\
augmented_matrix_Cheung2004` definition nan support.
"""
cases = [-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]
cases = set(permutations(cases * 3, r=3))
for case in cases:
augmented_matrix_Cheung2004(case)
class TestPolynomialExpansionFinlayson2015(unittest.TestCase):
"""
Defines :func:`colour.characterisation.correction.\
polynomial_expansion_Finlayson2015` definition unit tests methods.
"""
def test_polynomial_expansion_Finlayson2015(self):
"""
Tests :func:`colour.characterisation.correction.\
polynomial_expansion_Finlayson2015` definition.
"""
RGB = np.array([0.17224810, 0.09170660, 0.06416938])
polynomials = [
[
np.array([0.17224810, 0.09170660, 0.06416938]),
np.array([0.17224810, 0.09170660, 0.06416938])
],
[
np.array([
0.17224810, 0.09170660, 0.06416938, 0.02966941, 0.00841010,
0.00411771, 0.01579629, 0.00588476, 0.01105305
]),
np.array([
0.17224810, 0.09170660, 0.06416938, 0.00789814, 0.00294238,
0.00552653
])
],
[
np.array([
0.17224810, 0.09170660, 0.06416938, 0.02966941, 0.00841010,
0.00411771, 0.01579629, 0.00588476, 0.01105305, 0.00511050,
0.00077126, 0.00026423, 0.00144862, 0.00037762, 0.00070927,
0.00272088, 0.00053967, 0.00190387, 0.00101364
]),
np.array([
0.17224810, 0.09170660, 0.06416938, 0.00789814, 0.00294238,
0.00552653, 0.00048287, 0.00012587, 0.00023642, 0.00090696,
0.00017989, 0.00063462, 0.00033788
])
],
[
np.array([
0.17224810, 0.09170660, 0.06416938, 0.02966941, 0.00841010,
0.00411771, 0.01579629, 0.00588476, 0.01105305, 0.00511050,
0.00077126, 0.00026423, 0.00144862, 0.00037762, 0.00070927,
0.00272088, 0.00053967, 0.00190387, 0.00101364, 0.00088027,
0.00007073, 0.00001696, 0.00046867, 0.00032794, 0.00013285,
0.00004949, 0.00004551, 0.00002423, 0.00024952, 0.00003463,
0.00012217, 0.00017460, 0.00009296, 0.00006504
]),
np.array([
0.17224810, 0.09170660, 0.06416938, 0.00789814, 0.00294238,
0.00552653, 0.00048287, 0.00012587, 0.00023642, 0.00090696,
0.00017989, 0.00063462, 0.00033788, 0.00011717, 0.00008198,
0.00003321, 0.00001237, 0.00001138, 0.00000606, 0.00004365,
0.00002324, 0.00001626
])
],
]
for i in range(4):
np.testing.assert_almost_equal(
polynomial_expansion_Finlayson2015(RGB, i + 1, False),
polynomials[i][0],
decimal=7)
np.testing.assert_almost_equal(
polynomial_expansion_Finlayson2015(RGB, i + 1, True),
polynomials[i][1],
decimal=7)
def test_raise_exception_polynomial_expansion_Finlayson2015(self):
"""
Tests :func:`colour.characterisation.correction.\
polynomial_expansion_Finlayson2015` definition raised exception.
"""
self.assertRaises(ValueError, polynomial_expansion_Finlayson2015,
np.array([0.17224810, 0.09170660, 0.06416938]), 5)
@ignore_numpy_errors
def test_nan_polynomial_expansion_Finlayson2015(self):
"""
Tests :func:`colour.characterisation.correction.\
polynomial_expansion_Finlayson2015` definition nan support.
"""
cases = [-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]
cases = set(permutations(cases * 3, r=3))
for case in cases:
polynomial_expansion_Finlayson2015(case)
class TestPolynomialExpansionVandermonde(unittest.TestCase):
"""
Defines :func:`colour.characterisation.correction.\
polynomial_expansion_Vandermonde` definition unit tests methods.
"""
def test_polynomial_expansion_Vandermonde(self):
"""
Tests :func:`colour.characterisation.correction.\
polynomial_expansion_Vandermonde` definition.
"""
RGB = np.array([0.17224810, 0.09170660, 0.06416938])
polynomials = [
np.array([0.17224810, 0.09170660, 0.06416938, 1.00000000]),
np.array([
0.02966941, 0.00841010, 0.00411771, 0.17224810, 0.09170660,
0.06416938, 1.00000000
]),
np.array([
0.00511050, 0.00077126, 0.00026423, 0.02966941, 0.00841010,
0.00411771, 0.17224810, 0.09170660, 0.06416938, 1.00000000
]),
np.array([
0.00088027, 0.00007073, 0.00001696, 0.00511050, 0.00077126,
0.00026423, 0.02966941, 0.00841010, 0.00411771, 0.17224810,
0.09170660, 0.06416938, 1.00000000
]),
]
for i in range(4):
np.testing.assert_almost_equal(
polynomial_expansion_Vandermonde(RGB, i + 1),
polynomials[i],
decimal=7)
@ignore_numpy_errors
def test_nan_polynomial_expansion_Vandermonde(self):
"""
Tests :func:`colour.characterisation.correction.\
polynomial_expansion_Vandermonde` definition nan support.
"""
cases = [-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]
cases = set(permutations(cases * 3, r=3))
for case in cases:
polynomial_expansion_Vandermonde(case)
class TestColourCorrectionMatrixCheung2004(unittest.TestCase):
"""
Defines :func:`colour.characterisation.correction.\
colour_correction_matrix_Cheung2004` definition unit tests methods.
"""
def test_colour_correction_matrix_Cheung2004(self):
"""
Tests :func:`colour.characterisation.correction.\
colour_correction_matrix_Cheung2004` definition.
"""
np.testing.assert_almost_equal(
colour_correction_matrix_Cheung2004(M_T, M_R),
np.array([
[0.69822661, 0.03071629, 0.16210422],
[0.06893498, 0.67579611, 0.16430385],
[-0.06314956, 0.09212471, 0.97134152],
]),
decimal=7)
np.testing.assert_almost_equal(
colour_correction_matrix_Cheung2004(M_T, M_R, terms=7),
np.array([
[
0.80512769, 0.04001012, -0.01255261, -0.41056170,
-0.28052094, 0.68417697, 0.02251728
],
[
0.03270288, 0.71452384, 0.17581905, -0.00897913,
0.04900199, -0.17162742, 0.01688472
],
[
-0.03973098, -0.07164767, 1.16401636, 0.29017859,
-0.88909018, 0.26675507, 0.02345109
],
]),
decimal=7)
@ignore_numpy_errors
def test_nan_colour_correction_matrix_Cheung2004(self):
"""
Tests :func:`colour.characterisation.correction.
colour_correction_matrix_Cheung2004` definition nan support.
"""
cases = [-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]
cases = list(set(permutations(cases * 3, r=3)))[0:4]
for case in cases:
try:
colour_correction_matrix_Cheung2004(
np.vstack([case, case, case]),
np.transpose(np.vstack([case, case, case])))
except LinAlgError:
pass
class TestColourCorrectionMatrixFinlayson2015(unittest.TestCase):
"""
Defines :func:`colour.characterisation.correction.\
colour_correction_matrix_Finlayson2015` definition unit tests methods.
"""
def test_colour_correction_matrix_Finlayson2015(self):
"""
Tests :func:`colour.characterisation.correction.\
colour_correction_matrix_Finlayson2015` definition.
"""
np.testing.assert_almost_equal(
colour_correction_matrix_Finlayson2015(M_T, M_R),
np.array([
[0.69822661, 0.03071629, 0.16210422],
[0.06893498, 0.67579611, 0.16430385],
[-0.06314956, 0.09212471, 0.97134152],
]),
decimal=7)
np.testing.assert_almost_equal(
colour_correction_matrix_Finlayson2015(M_T, M_R, degree=3),
np.array([
[
0.93425721, 0.09232769, 0.05549845, -2.63952984,
2.86647167, -1.26651818, 1.80161659, -7.98949718,
1.43041717, 1.22152586, -0.78696110, -2.38339785,
7.60116922
],
[
0.07475736, 0.82145139, 0.21412644, -0.90237144,
-0.59178127, 0.15651823, -0.17394854, -1.28009106,
0.75986460, 1.18878672, 0.04525564, -1.29992663, 2.03709465
],
[
0.04808036, -0.01907445, 1.30366104, 0.61580416,
3.28111635, -6.98478443, 0.50372818, -8.69862528,
6.23059150, -1.45987996, -3.43681053, 9.83593060,
0.84418438
],
]),
decimal=7)
@ignore_numpy_errors
def test_nan_colour_correction_matrix_Finlayson2015(self):
"""
Tests :func:`colour.characterisation.correction.
colour_correction_matrix_Finlayson2015` definition nan support.
"""
cases = [-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]
cases = list(set(permutations(cases * 3, r=3)))[0:4]
for case in cases:
try:
colour_correction_matrix_Finlayson2015(
np.vstack([case, case, case]),
np.transpose(np.vstack([case, case, case])))
except LinAlgError:
pass
class TestColourCorrectionMatrixVandermonde(unittest.TestCase):
"""
Defines :func:`colour.characterisation.correction.\
colour_correction_matrix_Vandermonde` definition unit tests methods.
"""
def test_colour_correction_matrix_Vandermonde(self):
"""
Tests :func:`colour.characterisation.correction.\
colour_correction_matrix_Vandermonde` definition.
"""
np.testing.assert_almost_equal(
colour_correction_matrix_Vandermonde(M_T, M_R),
np.array([
[0.66770040, 0.02514036, 0.12745797, 0.02485425],
[0.03155494, 0.66896825, 0.12187874, 0.03043460],
[-0.14502258, 0.07716975, 0.87841836, 0.06666049],
]),
decimal=7)
np.testing.assert_almost_equal(
colour_correction_matrix_Vandermonde(M_T, M_R, degree=3),
np.array([
[
-0.04328223, -1.87886146, 1.83369170, -0.10798116,
1.06608177, -0.87495813, 0.75525839, -0.08558123,
0.15919076, 0.02404598
],
[
0.00998152, 0.44525275, -0.53192490, 0.00904507,
-0.41034458, 0.36173334, 0.02904178, 0.78362950,
0.07894900, 0.01986479
],
[
-1.66921744, 3.62954420, -2.96789849, 2.31451409,
-3.10767297, 1.85975390, -0.98795093, 0.85962796,
0.63591240, 0.07302317
],
]),
decimal=7)
@ignore_numpy_errors
def test_nan_colour_correction_matrix_Vandermonde(self):
"""
Tests :func:`colour.characterisation.correction.
colour_correction_matrix_Vandermonde` definition nan support.
"""
cases = [-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]
cases = list(set(permutations(cases * 3, r=3)))[0:4]
for case in cases:
try:
colour_correction_matrix_Vandermonde(
np.vstack([case, case, case]),
np.transpose(np.vstack([case, case, case])))
except LinAlgError:
pass
class TestColourCorrectionCheung2004(unittest.TestCase):
"""
Defines :func:`colour.characterisation.correction.\
colour_correction_Cheung2004` definition unit tests methods.
"""
def test_colour_correction_Cheung2004(self):
"""
Tests :func:`colour.characterisation.correction.\
colour_correction_Cheung2004` definition.
"""
RGB = np.array([0.17224810, 0.09170660, 0.06416938])
np.testing.assert_almost_equal(
colour_correction_Cheung2004(RGB, M_T, M_R),
np.array([0.13348722, 0.08439216, 0.05990144]),
decimal=7)
np.testing.assert_almost_equal(
colour_correction_Cheung2004(RGB, M_T, M_R, terms=7),
np.array([0.15850295, 0.09871628, 0.08105752]),
decimal=7)
def test_n_dimensional_colour_correction_Cheung2004(self):
"""
Tests :func:`colour.characterisation.correction.\
colour_correction_Cheung2004` definition n-dimensional support.
"""
RGB = np.array([0.17224810, 0.09170660, 0.06416938])
RGB_c = colour_correction_Cheung2004(RGB, M_T, M_R)
RGB = np.tile(RGB, (6, 1))
RGB_c = np.tile(RGB_c, (6, 1))
np.testing.assert_almost_equal(
colour_correction_Cheung2004(RGB, M_T, M_R), RGB_c, decimal=7)
RGB = np.reshape(RGB, (2, 3, 3))
RGB_c = np.reshape(RGB_c, (2, 3, 3))
np.testing.assert_almost_equal(
colour_correction_Cheung2004(RGB, M_T, M_R), RGB_c, decimal=7)
@ignore_numpy_errors
def test_nan_colour_correction_Cheung2004(self):
"""
Tests :func:`colour.characterisation.correction.\
colour_correction_Cheung2004` definition nan support.
"""
cases = [-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]
cases = list(set(permutations(cases * 3, r=3)))[0:4]
for case in cases:
try:
colour_correction_Cheung2004(
case, np.vstack([case, case, case]),
np.transpose(np.vstack([case, case, case])))
except LinAlgError:
pass
class TestColourCorrectionFinlayson2015(unittest.TestCase):
"""
Defines :func:`colour.characterisation.correction.\
colour_correction_Finlayson2015` definition unit tests methods.
"""
def test_colour_correction_Finlayson2015(self):
"""
Tests :func:`colour.characterisation.correction.\
colour_correction_Finlayson2015` definition.
"""
RGB = np.array([0.17224810, 0.09170660, 0.06416938])
np.testing.assert_almost_equal(
colour_correction_Finlayson2015(RGB, M_T, M_R),
np.array([0.13348722, 0.08439216, 0.05990144]),
decimal=7)
np.testing.assert_almost_equal(
colour_correction_Finlayson2015(RGB, M_T, M_R, degree=3),
np.array([0.15576430, 0.09483056, 0.07131041]),
decimal=7)
def test_n_dimensional_colour_correction_Finlayson2015(self):
"""
Tests :func:`colour.characterisation.correction.\
colour_correction_Finlayson2015` definition n-dimensional support.
"""
RGB = np.array([0.17224810, 0.09170660, 0.06416938])
RGB_c = colour_correction_Finlayson2015(RGB, M_T, M_R)
RGB = np.tile(RGB, (6, 1))
RGB_c = np.tile(RGB_c, (6, 1))
np.testing.assert_almost_equal(
colour_correction_Finlayson2015(RGB, M_T, M_R), RGB_c, decimal=7)
RGB = np.reshape(RGB, (2, 3, 3))
RGB_c = np.reshape(RGB_c, (2, 3, 3))
np.testing.assert_almost_equal(
colour_correction_Finlayson2015(RGB, M_T, M_R), RGB_c, decimal=7)
@ignore_numpy_errors
def test_nan_colour_correction_Finlayson2015(self):
"""
Tests :func:`colour.characterisation.correction.\
colour_correction_Finlayson2015` definition nan support.
"""
cases = [-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]
cases = list(set(permutations(cases * 3, r=3)))[0:4]
for case in cases:
try:
colour_correction_Finlayson2015(
case, np.vstack([case, case, case]),
np.transpose(np.vstack([case, case, case])))
except LinAlgError:
pass
class TestColourCorrectionVandermonde(unittest.TestCase):
"""
Defines :func:`colour.characterisation.correction.\
colour_correction_Vandermonde` definition unit tests methods.
"""
def test_colour_correction_Vandermonde(self):
"""
Tests :func:`colour.characterisation.correction.\
colour_correction_Vandermonde` definition.
"""
RGB = np.array([0.17224810, 0.09170660, 0.06416938])
np.testing.assert_almost_equal(
colour_correction_Vandermonde(RGB, M_T, M_R),
np.array([0.15034881, 0.10503956, 0.10512517]),
decimal=7)
np.testing.assert_almost_equal(
colour_correction_Vandermonde(RGB, M_T, M_R, degree=3),
np.array([0.15747814, 0.10035799, 0.06616709]),
decimal=7)
def test_n_dimensional_colour_correction_Vandermonde(self):
"""
Tests :func:`colour.characterisation.correction.\
colour_correction_Vandermonde` definition n-dimensional support.
"""
RGB = np.array([0.17224810, 0.09170660, 0.06416938])
RGB_c = colour_correction_Vandermonde(RGB, M_T, M_R)
RGB = np.tile(RGB, (6, 1))
RGB_c = np.tile(RGB_c, (6, 1))
np.testing.assert_almost_equal(
colour_correction_Vandermonde(RGB, M_T, M_R), RGB_c, decimal=7)
RGB = np.reshape(RGB, (2, 3, 3))
RGB_c = np.reshape(RGB_c, (2, 3, 3))
np.testing.assert_almost_equal(
colour_correction_Vandermonde(RGB, M_T, M_R), RGB_c, decimal=7)
@ignore_numpy_errors
def test_nan_colour_correction_Vandermonde(self):
"""
Tests :func:`colour.characterisation.correction.\
colour_correction_Vandermonde` definition nan support.
"""
cases = [-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]
cases = list(set(permutations(cases * 3, r=3)))[0:4]
for case in cases:
try:
colour_correction_Vandermonde(
case,
np.vstack([case, case, case]),
np.transpose(np.vstack([case, case, case])),
)
except LinAlgError:
pass
if __name__ == '__main__':
unittest.main()
|
py | 1a33c0a484d6e1632a25c6d502209f78284de201 | from rest_framework import serializers
from monitor.models import Site, Sensor, Parameter
class ParameterSerializer(serializers.ModelSerializer):
"""[summary]
Parameters
----------
serializers : [type]
[description]
"""
class Meta:
model = Parameter
fields = "__all__"
class SensorSerializer(serializers.ModelSerializer):
"""[summary]
Parameters
----------
serializers : [type]
[description]
"""
parameters = ParameterSerializer(many=True, read_only=True)
class Meta:
model = Sensor
fields = "__all__"
class SiteSerializer(serializers.ModelSerializer):
"""[summary]
Parameters
----------
serializers : [type]
[description]
"""
sensors = SensorSerializer(many=True, read_only=True)
class Meta:
model = Site
fields = "__all__"
|
py | 1a33c146b8be58f0554744042ee42a99ef0d869d | """This file is used for custom testing."""
import argparse
import os
import sys
import threading
import time
import requests
from google.protobuf import text_format
import Request_pb2
parser = argparse.ArgumentParser()
parser.add_argument('-st', '--start_port', help='Starting port number', type=int)
parser.add_argument('-c', '--count', help='Number of port needed' ,type=int)
parser.add_argument('-f', '--filename', help='File name of text file')
parser.add_argument('-n', '--number', help='Number of times to repeat the request', type=int)
args = parser.parse_args()
port = args.start_port
def new_dummy_server():
global port
port = port + 1
os.system('python dummy_vm_server.py ' + str(port))
def master_server():
os.system('python master_server.py -d b')
def send_request():
"""Reads expected output and task request files from specified folder.
Then sends the request to Master server specified number of times."""
text_file = open('task_request.txt', 'r')
task_request = Request_pb2.TaskRequest()
text_format.Parse(text_file.read(), task_request)
text_file.close()
file_a = Request_pb2.TaskStatusResponse()
file_b = Request_pb2.TaskStatusResponse()
fil = open('initial_task_response.txt', 'r')
text_format.Parse(fil.read(), file_a)
fil.close()
fil = open('final_task_response.txt', 'r')
text_format.Parse(fil.read(), file_b)
fil.close()
for i in range(args.number):
RESPONSE = requests.post(url='http://127.0.0.1:5000/assign_task',
files={'file': task_request.SerializeToString()})
file_A = Request_pb2.TaskStatusResponse()
file_A.ParseFromString(RESPONSE.content)
if file_A.status == Request_pb2.TaskStatusResponse.ACCEPTED :
process = threading.Thread(target = response, args= (file_a, file_A,
file_b, task_request.timeout, task_request.number_of_retries))
process.start()
else:
print(file_A)
def response(file_a, file_A, file_b, timeout, number_of_retries):
"""Query the Master server about the previous request,we sent to Master server."""
timer = timeout * (number_of_retries + 10)
time.sleep(timer)
task_status_request = Request_pb2.TaskStatusRequest()
task_status_request.request_id = file_A.current_task_id
RESPONSE = requests.post(url= 'http://127.0.0.1:5000/get_status',
files = {'file': task_status_request.SerializeToString()})
file_B = Request_pb2.TaskStatusResponse()
file_B.ParseFromString(RESPONSE.content)
match_proto(file_a, file_A , file_b, file_B)
def match_proto(file_a, file_A ,file_b, file_B):
"""Match the expected and received files of the response."""
if file_b.status == file_B.status and file_b.task_response.status == file_B.task_response.status:
print('Task request ' + str(file_A.current_task_id) + ' matched successfully')
else:
print('Task request ' + str(file_A.current_task_id) + ' did not matched successfully')
if __name__ == '__main__':
print( 'Starting_port {} Count {} filename {} number {} '.format(
args.start_port,
args.count,
args.filename,
args.number
))
process = threading.Thread(target = master_server)
process.start()
time.sleep(10)
count = args.count
for i in range(count):
time.sleep(2)
process = threading.Thread(target = new_dummy_server)
process.start()
time.sleep(5)
folder_list = args.filename.split(',')
for folder in folder_list:
os.chdir(os.path.join(os.getcwd(), folder))
send_request()
os.chdir('..')
|
py | 1a33c1cb75cfdb2fdd7ae8c16218de0dd4e5f39c | # pylint: disable=missing-docstring
from unittest.mock import Mock, patch
from peltak import testing
from peltak.core import conf
from peltak.core import context
from peltak.core import fs
from peltak.core import types
@patch('peltak.core.fs.filtered_walk')
@testing.patch_pelconf()
def test_calls_filtered_walk_with_paths_configured(p_filtered_walk: Mock):
files = types.FilesCollection.from_config({
'paths': ['path1', 'path2'],
})
fs.collect_files(files)
assert p_filtered_walk.call_count == 2
args, _ = p_filtered_walk.call_args_list[0]
expected = (conf.proj_path('path1'), files.whitelist(), files.blacklist())
assert tuple(args) == expected
args, _ = p_filtered_walk.call_args_list[1]
expected = (conf.proj_path('path2'), files.whitelist(), files.blacklist())
assert tuple(args) == expected
@patch('peltak.core.fs.filtered_walk', Mock(return_value=[]))
@patch('peltak.core.shell.cprint')
@testing.patch_pelconf()
def test_prints_debug_info_if_verbose_lvl_ge_3(p_cprint):
# type: (Mock) -> None
files = types.FilesCollection.from_config({
'paths': ['path1', 'path2'],
})
context.RunContext().set('verbose', 3)
fs.collect_files(files)
context.RunContext().set('verbose', 0)
assert next(
(True for x in p_cprint.call_args_list if 'only_staged: ' in x[0][0]),
False
)
assert next(
(True for x in p_cprint.call_args_list if 'untracked: ' in x[0][0]),
False
)
assert next(
(True for x in p_cprint.call_args_list if 'whitelist: ' in x[0][0]),
False
)
assert next(
(True for x in p_cprint.call_args_list if 'blacklist: ' in x[0][0]),
False
)
@patch('peltak.core.git.staged', Mock(return_value=['file1.txt', 'file2.yml']))
@testing.patch_pelconf()
def test_return_empty_list_if_none_of_the_whitelisted_files_are_staged():
"""
GIVEN files collection has a non-empty whitelist and only_staged == True
WHEN no staged files match the whitelist
THEN return empty list.
"""
files = types.FilesCollection.from_config({
'paths': ['path1'],
'include': ['*.py'],
'only_staged': True,
})
assert fs.collect_files(files) == []
|
py | 1a33c2de357e6231fe8fe7cb6010412b595d7536 | #!/bin/env python
# -*- coding=utf-8 -*-
__author__ = u'Rex Kang'
__description__ = u'根据需求,调用墨迹API生成一句话简介。'
__license__ = u'GPL - http://www.fsf.org/licenses/gpl.txt';
__history__ = {
u'1.0': [u'2017/05/19', u'调用墨迹API,完成基本功能。'],
u'1.1': [u'2017/06/08', u'增加图标,'],
u'1.2': [u'2018/09/19', u'获取白天天气替代实况天气,增加图标。'],
u'1.3': [u'2018/09/27', u'修正没有对应天气图标报错的问题。']
}
import urllib, urllib2, sys, json
def mojiAPI(apiDict, cityID, appCode):
method = 'POST'
querys = ''
bodys = {}
url = apiDict['host'] + apiDict['path']
# CityID来自于https://github.com/IceblueSakura/GetWeather/blob/master/Cityid.xml
bodys['cityId'] = cityID
bodys['token'] = apiDict['token']
post_data = urllib.urlencode(bodys)
request = urllib2.Request(url, post_data)
request.add_header('Authorization', 'APPCODE ' + appCode)
# 根据API的要求,定义相对应的Content-Type
request.add_header('Content-Type', 'application/x-www-form-urlencoded; charset=UTF-8')
response = urllib2.urlopen(request)
content = response.read()
if (content):
contentDict = {}
try:
contentDict = json.loads(content)
except Exception, err:
pass
finally:
return contentDict
def getCityID(city=u'海口市'):
cities = {
u'海口市': '1020',
u'三亚市': '1022',
u'乌鲁木齐市': '2505',
u'西安市': '2182',
}
return cities.get(city, '海口市')
# v1.1 Modified Start
def getWetaherIcon(w=u'晴'):
weatherIcon = {
u'晴': u'☀️',
u'阴': u'☁️',
u'多云': u'⛅',
u'阵雨': u'🌦',
u'雨': u'🌧',
# v1.3 Modified
u'雷阵雨': u'🌦',
u'中雨': u'⛈',
u'大雨': u'⛈',
u'暴雨': u'⛈'
}
# v1.3 Modified
return weatherIcon.get(w, u'☁️')
def getAOIIcon(aqi=40):
icon = u'🌱'
if int(aqi) > 150:
icon = u'🍂'
elif int(aqi) > 75:
icon = u'🍃'
return icon
# v1.1 Modified end
def main():
API = {
'BriefForecast': {
'name': u'精简预报3天',
'host': 'http://freecityid.market.alicloudapi.com',
'path': '/whapi/json/alicityweather/briefforecast3days',
'token': '677282c2f1b3d718152c4e25ed434bc4'
},
'BriefCondition': {
'name': u'精简预报实况',
'host': 'http://freecityid.market.alicloudapi.com',
'path': '/whapi/json/alicityweather/briefcondition',
'token': '46e13b7aab9bb77ee3358c3b672a2ae4'
},
'AQI': {
'name': u'精简AQI',
'host': 'http://freecityid.market.alicloudapi.com',
'path': '/whapi/json/alicityweather/briefaqi',
'token': '4dc41ae4c14189b47b2dc00c85b9d124'
}
}
city = u'海口市'
appCode = '86a53c38ddb546878deab2f87f106e7c'
strList = [''] * 8
try:
resultOfCondition = mojiAPI(API['BriefCondition'], getCityID(city), appCode)
resultOfForecast = mojiAPI(API['BriefForecast'], getCityID(city), appCode)
resultOfAQI = mojiAPI(API['AQI'], getCityID(city), appCode)
if resultOfCondition and 'data' in resultOfCondition:
cond = resultOfCondition['data']['condition']
# v1.2 Deleted
# strList[0] = getWetaherIcon(cond['condition'])
strList[5] = cond['humidity']
if resultOfForecast and 'data' in resultOfForecast:
fore = resultOfForecast['data']['forecast'][0]
# v1.2 Modified
strList[0] = getWetaherIcon(fore['conditionDay'])
strList[1] = fore['tempNight']
strList[2] = fore['tempDay']
strList[3] = fore['windDirDay']
strList[4] = fore['windLevelDay']
if resultOfAQI and 'data' in resultOfAQI:
strList[7] = resultOfAQI['data']['aqi']['value']
strList[6] = getAOIIcon(strList[7]) # v1.1 Modified
except Exception, err:
# print err
pass
finally:
if len(set(strList)) > 4:
# v1.2 Modified
str = u'%s,%s ~ %s℃,🌪%s%s级,💧%s%%,%s%s' % tuple(strList)
print str.encode('utf-8')
sys.exit(0)
else:
sys.exit(1)
main()
|
py | 1a33c30d89cdc6005b987510342c1717a33375ca | """Return True if two arrays are element-wise equal within a tolerance."""
from __future__ import annotations
import numpy
import numpoly
from ..baseclass import PolyLike
from ..dispatch import implements
@implements(numpy.allclose)
def allclose(
a: PolyLike,
b: PolyLike,
rtol: float = 1e-5,
atol: float = 1e-8,
equal_nan: bool = False,
) -> bool:
"""
Return True if two arrays are element-wise equal within a tolerance.
The tolerance values are positive, typically very small numbers. The
relative difference (`rtol` * abs(`b`)) and the absolute difference
`atol` are added together to compare against the absolute difference
between `a` and `b`.
If either array contains one or more NaNs, False is returned.
Infs are treated as equal if they are in the same place and of the same
sign in both arrays.
Args:
a, b:
Input arrays to compare.
rtol:
The relative tolerance parameter (see Notes).
atol:
The absolute tolerance parameter (see Notes).
equal_nan:
Whether to compare NaN's as equal. If True, NaN's in `a` will be
considered equal to NaN's in `b` in the output array.
Returns:
Returns True if the two arrays are equal within the given tolerance;
False otherwise.
Notes:
If the following equation is element-wise True, then allclose returns
True.
absolute(`a` - `b`) <= (`atol` + `rtol` * absolute(`b`))
The above equation is not symmetric in `a` and `b`, so that
``allclose(a, b)`` might be different from ``allclose(b, a)`` in some
rare cases.
The comparison of `a` and `b` uses standard broadcasting, which means
that `a` and `b` need not have the same shape in order for
``allclose(a, b)`` to evaluate to True. The same is true for `equal`
but not `array_equal`.
Examples:
>>> q0, q1 = numpoly.variable(2)
>>> numpoly.allclose([1e9*q0, 1e-7], [1.00001e9*q0, 1e-8])
False
>>> numpoly.allclose([1e9*q0, 1e-8], [1.00001e9*q0, 1e-9])
True
>>> numpoly.allclose([1e9*q0, 1e-8], [1.00001e9*q1, 1e-9])
False
>>> numpoly.allclose([q0, numpy.nan],
... [q0, numpy.nan], equal_nan=True)
True
"""
a, b = numpoly.align_polynomials(a, b)
for coeff1, coeff2 in zip(a.coefficients, b.coefficients):
if not numpy.allclose(
coeff1, coeff2, atol=atol, rtol=rtol, equal_nan=equal_nan):
return False
return True
|
py | 1a33c34047258292fc807d0008191fa54ebf56f2 | from report_vitals import report_battery_vitals
from filter_values import filterOut_safe_vitals
from process_battery_data import process_data
from controller_actions import get_actions
def is_battery_ok(bms_attributes):
data = process_data(bms_attributes)
report_battery_vitals(data)
get_actions(data)
value = list(filter(filterOut_safe_vitals,data))
return len(value) == 0
if __name__ == '__main__':
assert(is_battery_ok({'temperature': 25,'Soc': 70, 'Charge_rate': 0.7}) is True) #all values in limit
assert(is_battery_ok({'Temperature': 46,'soc': 23, 'Charge_rate': 0.77}) is False) #high temp warning,low soc warning,charge_rate high warnings
|
py | 1a33c42f92372e165762e9f8a6d5cf07f6b2d9d1 | import asyncio
import logging
import time
from typing import Set, List, Tuple, Optional
import ray
from ray.experimental.workflow import workflow_storage
from ray.experimental.workflow.common import (Workflow, WorkflowStatus,
WorkflowMetaData)
from ray.experimental.workflow.step_executor import commit_step
from ray.experimental.workflow.storage import get_global_storage
from ray.experimental.workflow.workflow_access import (
MANAGEMENT_ACTOR_NAME, flatten_workflow_output,
get_or_create_management_actor)
logger = logging.getLogger(__name__)
def run(entry_workflow: Workflow,
workflow_id: Optional[str] = None) -> ray.ObjectRef:
"""Run a workflow asynchronously. See "api.run()" for details."""
store = get_global_storage()
assert ray.is_initialized()
if workflow_id is None:
# Workflow ID format: {Entry workflow UUID}.{Unix time to nanoseconds}
workflow_id = f"{entry_workflow.id}.{time.time():.9f}"
logger.info(f"Workflow job created. [id=\"{workflow_id}\", storage_url="
f"\"{store.storage_url}\"].")
# checkpoint the workflow
ws = workflow_storage.get_workflow_storage(workflow_id)
commit_step(ws, "", entry_workflow)
workflow_manager = get_or_create_management_actor()
# NOTE: It is important to 'ray.get' the returned output. This
# ensures caller of 'run()' holds the reference to the workflow
# result. Otherwise if the actor removes the reference of the
# workflow output, the caller may fail to resolve the result.
output = ray.get(workflow_manager.run_or_resume.remote(workflow_id))
return flatten_workflow_output(workflow_id, output)
# TODO(suquark): support recovery with ObjectRef inputs.
def resume(workflow_id: str) -> ray.ObjectRef:
"""Resume a workflow asynchronously. See "api.resume()" for details.
"""
storage = get_global_storage()
logger.info(f"Resuming workflow [id=\"{workflow_id}\", storage_url="
f"\"{storage.storage_url}\"].")
workflow_manager = get_or_create_management_actor()
# NOTE: It is important to 'ray.get' the returned output. This
# ensures caller of 'run()' holds the reference to the workflow
# result. Otherwise if the actor removes the reference of the
# workflow output, the caller may fail to resolve the result.
output = ray.get(workflow_manager.run_or_resume.remote(workflow_id))
direct_output = flatten_workflow_output(workflow_id, output)
logger.info(f"Workflow job {workflow_id} resumed.")
return direct_output
def get_output(workflow_id: str) -> ray.ObjectRef:
"""Get the output of a running workflow.
See "api.get_output()" for details.
"""
assert ray.is_initialized()
try:
workflow_manager = ray.get_actor(MANAGEMENT_ACTOR_NAME)
except ValueError as e:
raise ValueError(
"Failed to connect to the workflow management "
"actor. The workflow could have already failed. You can use "
"workflow.resume() to resume the workflow.") from e
output = ray.get(workflow_manager.get_output.remote(workflow_id))
return flatten_workflow_output(workflow_id, output)
def cancel(workflow_id: str) -> None:
try:
workflow_manager = ray.get_actor(MANAGEMENT_ACTOR_NAME)
ray.get(workflow_manager.cancel_workflow.remote(workflow_id))
except ValueError:
wf_store = workflow_storage.get_workflow_storage(workflow_id)
wf_store.save_workflow_meta(WorkflowMetaData(WorkflowStatus.CANCELED))
def get_status(workflow_id: str) -> Optional[WorkflowStatus]:
try:
workflow_manager = ray.get_actor(MANAGEMENT_ACTOR_NAME)
running = ray.get(
workflow_manager.is_workflow_running.remote(workflow_id))
except Exception:
running = False
if running:
return WorkflowStatus.RUNNING
store = workflow_storage.get_workflow_storage(workflow_id)
meta = store.load_workflow_meta()
if meta is None:
raise ValueError(f"No such workflow_id {workflow_id}")
return meta.status
def list_all(status_filter: Set[WorkflowStatus]
) -> List[Tuple[str, WorkflowStatus]]:
try:
workflow_manager = ray.get_actor(MANAGEMENT_ACTOR_NAME)
except ValueError:
workflow_manager = None
if workflow_manager is None:
runnings = []
else:
runnings = ray.get(workflow_manager.list_running_workflow.remote())
if WorkflowStatus.RUNNING in status_filter and len(status_filter) == 1:
return [(r, WorkflowStatus.RUNNING) for r in runnings]
runnings = set(runnings)
# Here we don't have workflow id, so use empty one instead
store = workflow_storage.get_workflow_storage("")
ret = []
for (k, s) in store.list_workflow():
if s == WorkflowStatus.RUNNING and k not in runnings:
s = WorkflowStatus.RESUMABLE
if s in status_filter:
ret.append((k, s))
return ret
def resume_all(with_failed: bool) -> List[Tuple[str, ray.ObjectRef]]:
filter_set = {WorkflowStatus.RESUMABLE}
if with_failed:
filter_set.add(WorkflowStatus.FAILED)
all_failed = list_all(filter_set)
try:
workflow_manager = ray.get_actor(MANAGEMENT_ACTOR_NAME)
except Exception as e:
raise RuntimeError("Failed to get management actor") from e
async def _resume_one(wid: str) -> Tuple[str, Optional[ray.ObjectRef]]:
try:
obj = await workflow_manager.run_or_resume.remote(wid)
return (wid, flatten_workflow_output(wid, obj))
except Exception:
logger.error(f"Failed to resume workflow {wid}")
return (wid, None)
ret = workflow_storage.asyncio_run(
asyncio.gather(*[_resume_one(wid) for (wid, _) in all_failed]))
return [(wid, obj) for (wid, obj) in ret if obj is not None]
|
py | 1a33c7372a31eb9e9f710f8340250ddf37beb292 | import random
import requests
import json
import time
import os.path
import tempfile
import subprocess
from firebase import firebase, jsonutil
from copy import deepcopy
from threading import Thread
TOKEN = ''
def doGet(path, args={}):
resp = json.loads(requests.get('https://api.digitalocean.com/v2%s' % path, params=args, headers={'Authorization':'Bearer %s' % TOKEN}).text)
return resp
def doDelete(path):
resp = requests.delete('https://api.digitalocean.com/v2%s' % path, headers={'Authorization':'Bearer %s' % TOKEN}).text
return True
def doPost(path, args={}):
resp = json.loads(requests.post('https://api.digitalocean.com/v2%s' % path, data=json.dumps(args), headers={'Authorization':'Bearer %s' % TOKEN, 'Content-Type':'application/json'}).text)
return resp
def doDeploy(project_id, fals):
projects = firebase.get('/projects', name=None, params=None)
p = None
for project in projects:
if project_id in projects[project]:
p = projects[project][project_id]
if not p:
print("p doesn't exist")
return False
if 'droplet_id' not in p:
print("No droplet id")
return False
droplet = doGet('/droplets/%s' % p['droplet_id'])['droplet']
files = fals
tmpdir = tempfile.mkdtemp()
for f in files:
direct, name = os.path.split(files[f]['filepath'])
if not os.path.exists(os.path.join(tmpdir, direct)):
os.makedirs(os.path.join(tmpdir, direct))
fi = open(os.path.join(tmpdir, files[f]['filepath']), 'w')
fi.write(files[f]['text'])
fi.close()
net = droplet['networks']['v4'][0]['ip_address']
po = subprocess.Popen('/usr/bin/rsync -aze "ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null" --delete %s/ web@%s:web' % (tmpdir, net), shell=True)
print po.wait()
print('rsync done!')
if p['type'] == 'flask':
po = subprocess.Popen("/usr/bin/ssh %s -oStrictHostKeyChecking=no -oUserKnownHostsFile=/dev/null -l web 'kill `cat /home/web/.app_pid`; sleep 3; kill -9 `cat /home/web/.app_pid`; '" % net, shell=True)
print po.wait()
po = subprocess.Popen('/usr/bin/ssh %s -oStrictHostKeyChecking=no -oUserKnownHostsFile=/dev/null -l web \'screen -m -d /usr/bin/python /home/web/web/app.py; echo `pidof SCREEN` > /home/web/.app_pid\'' % net, shell=True)
print po.wait()
elif p['type'] == 'sinatra':
po = subprocess.Popen("/usr/bin/ssh %s -oStrictHostKeyChecking=no -oUserKnownHostsFile=/dev/null -l web 'kill `cat /home/web/.app_pid`; sleep 3; kill -9 `cat /home/web/.app_pid`; '" % net, shell=True)
print po.wait()
po = subprocess.Popen('/usr/bin/ssh %s -oStrictHostKeyChecking=no -oUserKnownHostsFile=/dev/null -l web \'screen -m -d /usr/bin/ruby /home/web/web/app.rb; echo `pidof SCREEN` > /home/web/.app_pid\'' % net, shell=True)
print po.wait()
elif p['type'] == 'php':
print 'LOL PHP U NEED NO RELOAD'
else:
print('Unsupported type!')
return False
if __name__ == '__main__':
firebase = firebase.FirebaseApplication('https://dep10y.firebaseio.com', authentication=None)
while True:
deploys = firebase.get('/files', name=None, params=None)
if deploys:
for i in deploys:
val = deploys[i]
print(i,val)
firebase.delete('/files', i)
thread = Thread(target=doDeploy, args=(i, val))
thread.start()
time.sleep(3)
|
py | 1a33c793b47d1a7f1a783d82e13b05c6df5c4948 | #!/usr/bin/env python
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Modified from go/bootstrap.py in Chromium infrastructure's repository to patch
# out everything but the core toolchain.
#
# https://chromium.googlesource.com/infra/infra/
"""Prepares a local hermetic Go installation.
- Downloads and unpacks the Go toolset in ../golang.
"""
import contextlib
import logging
import os
import platform
import shutil
import stat
import subprocess
import sys
import tarfile
import tempfile
import urllib
import zipfile
# TODO(vadimsh): Migrate to new golang.org/x/ paths once Golang moves to
# git completely.
LOGGER = logging.getLogger(__name__)
# /path/to/util/bot
ROOT = os.path.dirname(os.path.abspath(__file__))
# Where to install Go toolset to. GOROOT would be <TOOLSET_ROOT>/go.
TOOLSET_ROOT = os.path.join(os.path.dirname(ROOT), 'golang')
# Default workspace with infra go code.
WORKSPACE = os.path.join(ROOT, 'go')
# Platform depended suffix for executable files.
EXE_SFX = '.exe' if sys.platform == 'win32' else ''
# Pinned version of Go toolset to download.
TOOLSET_VERSION = 'go1.11.4'
# Platform dependent portion of a download URL. See http://golang.org/dl/.
TOOLSET_VARIANTS = {
('darwin', 'x86-64'): 'darwin-amd64.tar.gz',
('linux2', 'x86-32'): 'linux-386.tar.gz',
('linux2', 'x86-64'): 'linux-amd64.tar.gz',
('win32', 'x86-32'): 'windows-386.zip',
('win32', 'x86-64'): 'windows-amd64.zip',
}
# Download URL root.
DOWNLOAD_URL_PREFIX = 'https://storage.googleapis.com/golang'
class Failure(Exception):
"""Bootstrap failed."""
def get_toolset_url():
"""URL of a platform specific Go toolset archive."""
# TODO(vadimsh): Support toolset for cross-compilation.
arch = {
'amd64': 'x86-64',
'x86_64': 'x86-64',
'i386': 'x86-32',
'x86': 'x86-32',
}.get(platform.machine().lower())
variant = TOOLSET_VARIANTS.get((sys.platform, arch))
if not variant:
# TODO(vadimsh): Compile go lang from source.
raise Failure('Unrecognized platform')
return '%s/%s.%s' % (DOWNLOAD_URL_PREFIX, TOOLSET_VERSION, variant)
def read_file(path):
"""Returns contents of a given file or None if not readable."""
assert isinstance(path, (list, tuple))
try:
with open(os.path.join(*path), 'r') as f:
return f.read()
except IOError:
return None
def write_file(path, data):
"""Writes |data| to a file."""
assert isinstance(path, (list, tuple))
with open(os.path.join(*path), 'w') as f:
f.write(data)
def remove_directory(path):
"""Recursively removes a directory."""
assert isinstance(path, (list, tuple))
p = os.path.join(*path)
if not os.path.exists(p):
return
LOGGER.info('Removing %s', p)
# Crutch to remove read-only file (.git/* in particular) on Windows.
def onerror(func, path, _exc_info):
if not os.access(path, os.W_OK):
os.chmod(path, stat.S_IWUSR)
func(path)
else:
raise
shutil.rmtree(p, onerror=onerror if sys.platform == 'win32' else None)
def install_toolset(toolset_root, url):
"""Downloads and installs Go toolset.
GOROOT would be <toolset_root>/go/.
"""
if not os.path.exists(toolset_root):
os.makedirs(toolset_root)
pkg_path = os.path.join(toolset_root, url[url.rfind('/')+1:])
LOGGER.info('Downloading %s...', url)
download_file(url, pkg_path)
LOGGER.info('Extracting...')
if pkg_path.endswith('.zip'):
with zipfile.ZipFile(pkg_path, 'r') as f:
f.extractall(toolset_root)
elif pkg_path.endswith('.tar.gz'):
with tarfile.open(pkg_path, 'r:gz') as f:
f.extractall(toolset_root)
else:
raise Failure('Unrecognized archive format')
LOGGER.info('Validating...')
if not check_hello_world(toolset_root):
raise Failure('Something is not right, test program doesn\'t work')
def download_file(url, path):
"""Fetches |url| to |path|."""
last_progress = [0]
def report(a, b, c):
progress = int(a * b * 100.0 / c)
if progress != last_progress[0]:
print >> sys.stderr, 'Downloading... %d%%' % progress
last_progress[0] = progress
# TODO(vadimsh): Use something less crippled, something that validates SSL.
urllib.urlretrieve(url, path, reporthook=report)
@contextlib.contextmanager
def temp_dir(path):
"""Creates a temporary directory, then deletes it."""
tmp = tempfile.mkdtemp(dir=path)
try:
yield tmp
finally:
remove_directory([tmp])
def check_hello_world(toolset_root):
"""Compiles and runs 'hello world' program to verify that toolset works."""
with temp_dir(toolset_root) as tmp:
path = os.path.join(tmp, 'hello.go')
write_file([path], r"""
package main
func main() { println("hello, world\n") }
""")
out = subprocess.check_output(
[get_go_exe(toolset_root), 'run', path],
env=get_go_environ(toolset_root, tmp),
stderr=subprocess.STDOUT)
if out.strip() != 'hello, world':
LOGGER.error('Failed to run sample program:\n%s', out)
return False
return True
def ensure_toolset_installed(toolset_root):
"""Installs or updates Go toolset if necessary.
Returns True if new toolset was installed.
"""
installed = read_file([toolset_root, 'INSTALLED_TOOLSET'])
available = get_toolset_url()
if installed == available:
LOGGER.debug('Go toolset is up-to-date: %s', TOOLSET_VERSION)
return False
LOGGER.info('Installing Go toolset.')
LOGGER.info(' Old toolset is %s', installed)
LOGGER.info(' New toolset is %s', available)
remove_directory([toolset_root])
install_toolset(toolset_root, available)
LOGGER.info('Go toolset installed: %s', TOOLSET_VERSION)
write_file([toolset_root, 'INSTALLED_TOOLSET'], available)
return True
def get_go_environ(
toolset_root,
workspace=None):
"""Returns a copy of os.environ with added GO* environment variables.
Overrides GOROOT, GOPATH and GOBIN. Keeps everything else. Idempotent.
Args:
toolset_root: GOROOT would be <toolset_root>/go.
workspace: main workspace directory or None if compiling in GOROOT.
"""
env = os.environ.copy()
env['GOROOT'] = os.path.join(toolset_root, 'go')
if workspace:
env['GOBIN'] = os.path.join(workspace, 'bin')
else:
env.pop('GOBIN', None)
all_go_paths = []
if workspace:
all_go_paths.append(workspace)
env['GOPATH'] = os.pathsep.join(all_go_paths)
# New PATH entries.
paths_to_add = [
os.path.join(env['GOROOT'], 'bin'),
env.get('GOBIN'),
]
# Make sure not to add duplicates entries to PATH over and over again when
# get_go_environ is invoked multiple times.
path = env['PATH'].split(os.pathsep)
paths_to_add = [p for p in paths_to_add if p and p not in path]
env['PATH'] = os.pathsep.join(paths_to_add + path)
return env
def get_go_exe(toolset_root):
"""Returns path to go executable."""
return os.path.join(toolset_root, 'go', 'bin', 'go' + EXE_SFX)
def bootstrap(logging_level):
"""Installs all dependencies in default locations.
Supposed to be called at the beginning of some script (it modifies logger).
Args:
logging_level: logging level of bootstrap process.
"""
logging.basicConfig()
LOGGER.setLevel(logging_level)
ensure_toolset_installed(TOOLSET_ROOT)
def prepare_go_environ():
"""Returns dict with environment variables to set to use Go toolset.
Installs or updates the toolset if necessary.
"""
bootstrap(logging.INFO)
return get_go_environ(TOOLSET_ROOT, WORKSPACE)
def find_executable(name, workspaces):
"""Returns full path to an executable in some bin/ (in GOROOT or GOBIN)."""
basename = name
if EXE_SFX and basename.endswith(EXE_SFX):
basename = basename[:-len(EXE_SFX)]
roots = [os.path.join(TOOLSET_ROOT, 'go', 'bin')]
for path in workspaces:
roots.extend([
os.path.join(path, 'bin'),
])
for root in roots:
full_path = os.path.join(root, basename + EXE_SFX)
if os.path.exists(full_path):
return full_path
return name
def main(args):
if args:
print >> sys.stderr, sys.modules[__name__].__doc__,
return 2
bootstrap(logging.DEBUG)
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
py | 1a33c7df1cae1ab8e1cb99e3a748ceb7e6c59310 | # -*- coding: utf-8 -*-
"""This file contains the Task Scheduler Registry keys plugins."""
from __future__ import unicode_literals
from dfdatetime import filetime as dfdatetime_filetime
from plaso.containers import events
from plaso.containers import time_events
from plaso.lib import definitions
from plaso.lib import errors
from plaso.parsers import winreg
from plaso.parsers.winreg_plugins import dtfabric_plugin
from plaso.parsers.winreg_plugins import interface
class TaskCacheEventData(events.EventData):
"""Task Cache event data.
Attributes:
key_path (str): Windows Registry key path.
task_name (str): name of the task.
task_identifier (str): identifier of the task.
"""
DATA_TYPE = 'task_scheduler:task_cache:entry'
def __init__(self):
"""Initializes event data."""
super(TaskCacheEventData, self).__init__(data_type=self.DATA_TYPE)
self.key_path = None
self.task_name = None
self.task_identifier = None
class TaskCacheWindowsRegistryPlugin(
dtfabric_plugin.DtFabricBaseWindowsRegistryPlugin):
"""Plugin that parses a Task Cache key."""
NAME = 'windows_task_cache'
DESCRIPTION = 'Parser for Task Scheduler cache Registry data.'
FILTERS = frozenset([
interface.WindowsRegistryKeyPathFilter(
'HKEY_LOCAL_MACHINE\\Software\\Microsoft\\Windows NT\\'
'CurrentVersion\\Schedule\\TaskCache')])
_DEFINITION_FILE = 'task_scheduler.yaml'
def _GetIdValue(self, registry_key):
"""Retrieves the Id value from Task Cache Tree key.
Args:
registry_key (dfwinreg.WinRegistryKey): Windows Registry key.
Yields:
tuple: containing:
dfwinreg.WinRegistryKey: Windows Registry key.
dfwinreg.WinRegistryValue: Windows Registry value.
"""
id_value = registry_key.GetValueByName('Id')
if id_value:
yield registry_key, id_value
for sub_key in registry_key.GetSubkeys():
for value_key, id_value in self._GetIdValue(sub_key):
yield value_key, id_value
def ExtractEvents(self, parser_mediator, registry_key, **kwargs):
"""Extracts events from a Windows Registry key.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
registry_key (dfwinreg.WinRegistryKey): Windows Registry key.
"""
dynamic_info_size_error_reported = False
tasks_key = registry_key.GetSubkeyByName('Tasks')
tree_key = registry_key.GetSubkeyByName('Tree')
if not tasks_key or not tree_key:
parser_mediator.ProduceExtractionWarning(
'Task Cache is missing a Tasks or Tree sub key.')
return
task_guids = {}
for sub_key in tree_key.GetSubkeys():
for value_key, id_value in self._GetIdValue(sub_key):
# TODO: improve this check to a regex.
# The GUID is in the form {%GUID%} and stored an UTF-16 little-endian
# string and should be 78 bytes in size.
id_value_data_size = len(id_value.data)
if id_value_data_size != 78:
parser_mediator.ProduceExtractionWarning(
'unsupported Id value data size: {0:d}.'.format(
id_value_data_size))
continue
guid_string = id_value.GetDataAsObject()
task_guids[guid_string] = value_key.name
dynamic_info_map = self._GetDataTypeMap('dynamic_info_record')
dynamic_info2_map = self._GetDataTypeMap('dynamic_info2_record')
dynamic_info_size = dynamic_info_map.GetByteSize()
dynamic_info2_size = dynamic_info2_map.GetByteSize()
for sub_key in tasks_key.GetSubkeys():
dynamic_info_value = sub_key.GetValueByName('DynamicInfo')
if not dynamic_info_value:
continue
dynamic_info_record_map = None
dynamic_info_value_data_size = len(dynamic_info_value.data)
if dynamic_info_value_data_size == dynamic_info_size:
dynamic_info_record_map = dynamic_info_map
elif dynamic_info_value_data_size == dynamic_info2_size:
dynamic_info_record_map = dynamic_info2_map
else:
if not dynamic_info_size_error_reported:
parser_mediator.ProduceExtractionWarning(
'unsupported DynamicInfo value data size: {0:d}.'.format(
dynamic_info_value_data_size))
dynamic_info_size_error_reported = True
continue
try:
dynamic_info_record = self._ReadStructureFromByteStream(
dynamic_info_value.data, 0, dynamic_info_record_map)
except (ValueError, errors.ParseError) as exception:
parser_mediator.ProduceExtractionWarning(
'unable to parse DynamicInfo record with error: {0!s}.'.format(
exception))
name = task_guids.get(sub_key.name, sub_key.name)
event_data = TaskCacheEventData()
event_data.key_path = (registry_key.path).replace("\\", "/")
event_data.task_name = name
event_data.task_identifier = sub_key.name
event = time_events.DateTimeValuesEvent(
registry_key.last_written_time, definitions.TIME_DESCRIPTION_WRITTEN)
parser_mediator.ProduceEventWithEventData(event, event_data)
last_registered_time = dynamic_info_record.last_registered_time
if last_registered_time:
# Note this is likely either the last registered time or
# the update time.
date_time = dfdatetime_filetime.Filetime(timestamp=last_registered_time)
event = time_events.DateTimeValuesEvent(
date_time, 'Last registered time')
parser_mediator.ProduceEventWithEventData(event, event_data)
launch_time = dynamic_info_record.launch_time
if launch_time:
# Note this is likely the launch time.
date_time = dfdatetime_filetime.Filetime(timestamp=launch_time)
event = time_events.DateTimeValuesEvent(
date_time, 'Launch time')
parser_mediator.ProduceEventWithEventData(event, event_data)
unknown_time = getattr(dynamic_info_record, 'unknown_time', None)
if unknown_time:
date_time = dfdatetime_filetime.Filetime(timestamp=unknown_time)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_UNKNOWN)
parser_mediator.ProduceEventWithEventData(event, event_data)
# TODO: Add support for the Triggers value.
winreg.WinRegistryParser.RegisterPlugin(TaskCacheWindowsRegistryPlugin)
|
py | 1a33c81d779e1322233fbbf6244d1d545a70f3c6 | import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from MoG_valid import *
#learing rate = 0.1, K = 3, D=2, epoch=150
logging, indices = runMoG(0.1,5,2,300)
trainData,validData = loadData()
#Loss on train set
plt.figure(1)
plt.plot(logging[:,1],marker='+',label='K=5')
plt.legend(loc='upper right')
plt.title('Plot of Valid_Loss vs. Iterations')
plt.xlabel('Iterations')
plt.ylabel('Valid_Loss')
plt.figure(2)
plt.scatter(validData[:,0],validData[:,1],c= indices,label='K=5')
plt.legend(loc='upper right')
plt.title('Scatter Plot of ValidData')
plt.xlabel('x')
plt.ylabel('y')
plt.show()
|
py | 1a33c95d106552b554ea508378f9f2f55235e0ac | #!/usr/bin/env python3
# Removes old files in media root in order to keep your storage requirements low
from alexacloud import settings
import datetime
import shutil
import os
media_root = settings.MEDIA_ROOT
# Delete directories that were created more than 30 minutes
now = datetime.datetime.now()
ago = now - datetime.timedelta(minutes=30)
folders = [os.path.join(media_root, f) for f in os.listdir(media_root)]
folders = list(filter(os.path.isdir, folders))
for folder in folders:
st = os.stat(folder)
mtime = datetime.datetime.fromtimestamp(st.st_mtime)
if mtime < ago:
shutil.rmtree(folder)
|
py | 1a33cb7e5a286989055c19d5111a94ffe864214d | # -*- coding: utf-8 -*-
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from collections import OrderedDict
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
elif six.PY2:
import __builtin__
from . import config
from . import state
class traffic_engineering(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance - based on the path /network-instances/network-instance/protocols/protocol/isis/levels/level/traffic-engineering. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: This container defines ISIS TE.
"""
__slots__ = ("_path_helper", "_extmethods", "__config", "__state")
_yang_name = "traffic-engineering"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__config = YANGDynClass(
base=config.config,
is_container="container",
yang_name="config",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"isis",
"levels",
"level",
"traffic-engineering",
]
def _get_config(self):
"""
Getter method for config, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/traffic_engineering/config (container)
YANG Description: This container defines ISIS TE configuration.
"""
return self.__config
def _set_config(self, v, load=False):
"""
Setter method for config, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/traffic_engineering/config (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_config is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_config() directly.
YANG Description: This container defines ISIS TE configuration.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=config.config,
is_container="container",
yang_name="config",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """config must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=config.config, is_container='container', yang_name="config", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)""",
}
)
self.__config = t
if hasattr(self, "_set"):
self._set()
def _unset_config(self):
self.__config = YANGDynClass(
base=config.config,
is_container="container",
yang_name="config",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
def _get_state(self):
"""
Getter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/traffic_engineering/state (container)
YANG Description: This container defines ISIS TE state information.
"""
return self.__state
def _set_state(self, v, load=False):
"""
Setter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/traffic_engineering/state (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_state is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_state() directly.
YANG Description: This container defines ISIS TE state information.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """state must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)""",
}
)
self.__state = t
if hasattr(self, "_set"):
self._set()
def _unset_state(self):
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
config = __builtin__.property(_get_config, _set_config)
state = __builtin__.property(_get_state, _set_state)
_pyangbind_elements = OrderedDict([("config", config), ("state", state)])
from . import config
from . import state
class traffic_engineering(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance-l2 - based on the path /network-instances/network-instance/protocols/protocol/isis/levels/level/traffic-engineering. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: This container defines ISIS TE.
"""
__slots__ = ("_path_helper", "_extmethods", "__config", "__state")
_yang_name = "traffic-engineering"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__config = YANGDynClass(
base=config.config,
is_container="container",
yang_name="config",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"isis",
"levels",
"level",
"traffic-engineering",
]
def _get_config(self):
"""
Getter method for config, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/traffic_engineering/config (container)
YANG Description: This container defines ISIS TE configuration.
"""
return self.__config
def _set_config(self, v, load=False):
"""
Setter method for config, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/traffic_engineering/config (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_config is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_config() directly.
YANG Description: This container defines ISIS TE configuration.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=config.config,
is_container="container",
yang_name="config",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """config must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=config.config, is_container='container', yang_name="config", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)""",
}
)
self.__config = t
if hasattr(self, "_set"):
self._set()
def _unset_config(self):
self.__config = YANGDynClass(
base=config.config,
is_container="container",
yang_name="config",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
def _get_state(self):
"""
Getter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/traffic_engineering/state (container)
YANG Description: This container defines ISIS TE state information.
"""
return self.__state
def _set_state(self, v, load=False):
"""
Setter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/traffic_engineering/state (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_state is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_state() directly.
YANG Description: This container defines ISIS TE state information.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """state must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)""",
}
)
self.__state = t
if hasattr(self, "_set"):
self._set()
def _unset_state(self):
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
config = __builtin__.property(_get_config, _set_config)
state = __builtin__.property(_get_state, _set_state)
_pyangbind_elements = OrderedDict([("config", config), ("state", state)])
|
py | 1a33cb955c343ce80562b0e8ef1d75a6cacdb07b | import functools
import operator
from collections import defaultdict
from contextlib import suppress
from typing import TYPE_CHECKING, Any, Dict, Hashable, Mapping, Optional, Tuple, Union
import numpy as np
import pandas as pd
from . import dtypes, utils
from .indexing import get_indexer_nd
from .utils import is_dict_like, is_full_slice
from .variable import IndexVariable, Variable
if TYPE_CHECKING:
from .dataarray import DataArray
from .dataset import Dataset
def _get_joiner(join):
if join == "outer":
return functools.partial(functools.reduce, operator.or_)
elif join == "inner":
return functools.partial(functools.reduce, operator.and_)
elif join == "left":
return operator.itemgetter(0)
elif join == "right":
return operator.itemgetter(-1)
elif join == "exact":
# We cannot return a function to "align" in this case, because it needs
# access to the dimension name to give a good error message.
return None
elif join == "override":
# We rewrite all indexes and then use join='left'
return operator.itemgetter(0)
else:
raise ValueError("invalid value for join: %s" % join)
def _override_indexes(objects, all_indexes, exclude):
for dim, dim_indexes in all_indexes.items():
if dim not in exclude:
lengths = {index.size for index in dim_indexes}
if len(lengths) != 1:
raise ValueError(
"Indexes along dimension %r don't have the same length."
" Cannot use join='override'." % dim
)
objects = list(objects)
for idx, obj in enumerate(objects[1:]):
new_indexes = {}
for dim in obj.dims:
if dim not in exclude:
new_indexes[dim] = all_indexes[dim][0]
objects[idx + 1] = obj._overwrite_indexes(new_indexes)
return objects
def align(
*objects,
join="inner",
copy=True,
indexes=None,
exclude=frozenset(),
fill_value=dtypes.NA,
):
"""
Given any number of Dataset and/or DataArray objects, returns new
objects with aligned indexes and dimension sizes.
Array from the aligned objects are suitable as input to mathematical
operators, because along each dimension they have the same index and size.
Missing values (if ``join != 'inner'``) are filled with ``fill_value``.
The default fill value is NaN.
Parameters
----------
*objects : Dataset or DataArray
Objects to align.
join : {'outer', 'inner', 'left', 'right', 'exact', 'override'}, optional
Method for joining the indexes of the passed objects along each
dimension:
- 'outer': use the union of object indexes
- 'inner': use the intersection of object indexes
- 'left': use indexes from the first object with each dimension
- 'right': use indexes from the last object with each dimension
- 'exact': instead of aligning, raise `ValueError` when indexes to be
aligned are not equal
- 'override': if indexes are of same size, rewrite indexes to be
those of the first object with that dimension. Indexes for the same
dimension must have the same size in all objects.
copy : bool, optional
If ``copy=True``, data in the return values is always copied. If
``copy=False`` and reindexing is unnecessary, or can be performed with
only slice operations, then the output may share memory with the input.
In either case, new xarray objects are always returned.
indexes : dict-like, optional
Any indexes explicitly provided with the `indexes` argument should be
used in preference to the aligned indexes.
exclude : sequence of str, optional
Dimensions that must be excluded from alignment
fill_value : scalar, optional
Value to use for newly missing values
Returns
-------
aligned : same as *objects
Tuple of objects with aligned coordinates.
Raises
------
ValueError
If any dimensions without labels on the arguments have different sizes,
or a different size than the size of the aligned dimension labels.
Examples
--------
>>> import xarray as xr
>>> x = xr.DataArray([[25, 35], [10, 24]], dims=('lat', 'lon'),
... coords={'lat': [35., 40.], 'lon': [100., 120.]})
>>> y = xr.DataArray([[20, 5], [7, 13]], dims=('lat', 'lon'),
... coords={'lat': [35., 42.], 'lon': [100., 120.]})
>>> x
<xarray.DataArray (lat: 2, lon: 2)>
array([[25, 35],
[10, 24]])
Coordinates:
* lat (lat) float64 35.0 40.0
* lon (lon) float64 100.0 120.0
>>> y
<xarray.DataArray (lat: 2, lon: 2)>
array([[20, 5],
[ 7, 13]])
Coordinates:
* lat (lat) float64 35.0 42.0
* lon (lon) float64 100.0 120.0
>>> a, b = xr.align(x, y)
>>> a
<xarray.DataArray (lat: 1, lon: 2)>
array([[25, 35]])
Coordinates:
* lat (lat) float64 35.0
* lon (lon) float64 100.0 120.0
>>> b
<xarray.DataArray (lat: 1, lon: 2)>
array([[20, 5]])
Coordinates:
* lat (lat) float64 35.0
* lon (lon) float64 100.0 120.0
>>> a, b = xr.align(x, y, join='outer')
>>> a
<xarray.DataArray (lat: 3, lon: 2)>
array([[25., 35.],
[10., 24.],
[nan, nan]])
Coordinates:
* lat (lat) float64 35.0 40.0 42.0
* lon (lon) float64 100.0 120.0
>>> b
<xarray.DataArray (lat: 3, lon: 2)>
array([[20., 5.],
[nan, nan],
[ 7., 13.]])
Coordinates:
* lat (lat) float64 35.0 40.0 42.0
* lon (lon) float64 100.0 120.0
>>> a, b = xr.align(x, y, join='outer', fill_value=-999)
>>> a
<xarray.DataArray (lat: 3, lon: 2)>
array([[ 25, 35],
[ 10, 24],
[-999, -999]])
Coordinates:
* lat (lat) float64 35.0 40.0 42.0
* lon (lon) float64 100.0 120.0
>>> b
<xarray.DataArray (lat: 3, lon: 2)>
array([[ 20, 5],
[-999, -999],
[ 7, 13]])
Coordinates:
* lat (lat) float64 35.0 40.0 42.0
* lon (lon) float64 100.0 120.0
>>> a, b = xr.align(x, y, join='left')
>>> a
<xarray.DataArray (lat: 2, lon: 2)>
array([[25, 35],
[10, 24]])
Coordinates:
* lat (lat) float64 35.0 40.0
* lon (lon) float64 100.0 120.0
>>> b
<xarray.DataArray (lat: 2, lon: 2)>
array([[20., 5.],
[nan, nan]])
Coordinates:
* lat (lat) float64 35.0 40.0
* lon (lon) float64 100.0 120.0
>>> a, b = xr.align(x, y, join='right')
>>> a
<xarray.DataArray (lat: 2, lon: 2)>
array([[25., 35.],
[nan, nan]])
Coordinates:
* lat (lat) float64 35.0 42.0
* lon (lon) float64 100.0 120.0
>>> b
<xarray.DataArray (lat: 2, lon: 2)>
array([[20, 5],
[ 7, 13]])
Coordinates:
* lat (lat) float64 35.0 42.0
* lon (lon) float64 100.0 120.0
>>> a, b = xr.align(x, y, join='exact')
Traceback (most recent call last):
...
"indexes along dimension {!r} are not equal".format(dim)
ValueError: indexes along dimension 'lat' are not equal
>>> a, b = xr.align(x, y, join='override')
>>> a
<xarray.DataArray (lat: 2, lon: 2)>
array([[25, 35],
[10, 24]])
Coordinates:
* lat (lat) float64 35.0 40.0
* lon (lon) float64 100.0 120.0
>>> b
<xarray.DataArray (lat: 2, lon: 2)>
array([[20, 5],
[ 7, 13]])
Coordinates:
* lat (lat) float64 35.0 40.0
* lon (lon) float64 100.0 120.0
"""
if indexes is None:
indexes = {}
if not indexes and len(objects) == 1:
# fast path for the trivial case
obj, = objects
return (obj.copy(deep=copy),)
all_indexes = defaultdict(list)
unlabeled_dim_sizes = defaultdict(set)
for obj in objects:
for dim in obj.dims:
if dim not in exclude:
try:
index = obj.indexes[dim]
except KeyError:
unlabeled_dim_sizes[dim].add(obj.sizes[dim])
else:
all_indexes[dim].append(index)
if join == "override":
objects = _override_indexes(objects, all_indexes, exclude)
# We don't reindex over dimensions with all equal indexes for two reasons:
# - It's faster for the usual case (already aligned objects).
# - It ensures it's possible to do operations that don't require alignment
# on indexes with duplicate values (which cannot be reindexed with
# pandas). This is useful, e.g., for overwriting such duplicate indexes.
joiner = _get_joiner(join)
joined_indexes = {}
for dim, matching_indexes in all_indexes.items():
if dim in indexes:
index = utils.safe_cast_to_index(indexes[dim])
if (
any(not index.equals(other) for other in matching_indexes)
or dim in unlabeled_dim_sizes
):
joined_indexes[dim] = index
else:
if (
any(
not matching_indexes[0].equals(other)
for other in matching_indexes[1:]
)
or dim in unlabeled_dim_sizes
):
if join == "exact":
raise ValueError(f"indexes along dimension {dim!r} are not equal")
index = joiner(matching_indexes)
joined_indexes[dim] = index
else:
index = matching_indexes[0]
if dim in unlabeled_dim_sizes:
unlabeled_sizes = unlabeled_dim_sizes[dim]
labeled_size = index.size
if len(unlabeled_sizes | {labeled_size}) > 1:
raise ValueError(
"arguments without labels along dimension %r cannot be "
"aligned because they have different dimension size(s) %r "
"than the size of the aligned dimension labels: %r"
% (dim, unlabeled_sizes, labeled_size)
)
for dim in unlabeled_dim_sizes:
if dim not in all_indexes:
sizes = unlabeled_dim_sizes[dim]
if len(sizes) > 1:
raise ValueError(
"arguments without labels along dimension %r cannot be "
"aligned because they have different dimension sizes: %r"
% (dim, sizes)
)
result = []
for obj in objects:
valid_indexers = {k: v for k, v in joined_indexes.items() if k in obj.dims}
if not valid_indexers:
# fast path for no reindexing necessary
new_obj = obj.copy(deep=copy)
else:
new_obj = obj.reindex(copy=copy, fill_value=fill_value, **valid_indexers)
new_obj.encoding = obj.encoding
result.append(new_obj)
return tuple(result)
def deep_align(
objects,
join="inner",
copy=True,
indexes=None,
exclude=frozenset(),
raise_on_invalid=True,
fill_value=dtypes.NA,
):
"""Align objects for merging, recursing into dictionary values.
This function is not public API.
"""
from .dataarray import DataArray
from .dataset import Dataset
if indexes is None:
indexes = {}
def is_alignable(obj):
return isinstance(obj, (DataArray, Dataset))
positions = []
keys = []
out = []
targets = []
no_key = object()
not_replaced = object()
for position, variables in enumerate(objects):
if is_alignable(variables):
positions.append(position)
keys.append(no_key)
targets.append(variables)
out.append(not_replaced)
elif is_dict_like(variables):
current_out = {}
for k, v in variables.items():
if is_alignable(v) and k not in indexes:
# Skip variables in indexes for alignment, because these
# should to be overwritten instead:
# https://github.com/pydata/xarray/issues/725
# https://github.com/pydata/xarray/issues/3377
# TODO(shoyer): doing this here feels super-hacky -- can we
# move it explicitly into merge instead?
positions.append(position)
keys.append(k)
targets.append(v)
current_out[k] = not_replaced
else:
current_out[k] = v
out.append(current_out)
elif raise_on_invalid:
raise ValueError(
"object to align is neither an xarray.Dataset, "
"an xarray.DataArray nor a dictionary: {!r}".format(variables)
)
else:
out.append(variables)
aligned = align(
*targets,
join=join,
copy=copy,
indexes=indexes,
exclude=exclude,
fill_value=fill_value,
)
for position, key, aligned_obj in zip(positions, keys, aligned):
if key is no_key:
out[position] = aligned_obj
else:
out[position][key] = aligned_obj
# something went wrong: we should have replaced all sentinel values
for arg in out:
assert arg is not not_replaced
if is_dict_like(arg):
assert all(value is not not_replaced for value in arg.values())
return out
def reindex_like_indexers(
target: "Union[DataArray, Dataset]", other: "Union[DataArray, Dataset]"
) -> Dict[Hashable, pd.Index]:
"""Extract indexers to align target with other.
Not public API.
Parameters
----------
target : Dataset or DataArray
Object to be aligned.
other : Dataset or DataArray
Object to be aligned with.
Returns
-------
Dict[Hashable, pandas.Index] providing indexes for reindex keyword
arguments.
Raises
------
ValueError
If any dimensions without labels have different sizes.
"""
indexers = {k: v for k, v in other.indexes.items() if k in target.dims}
for dim in other.dims:
if dim not in indexers and dim in target.dims:
other_size = other.sizes[dim]
target_size = target.sizes[dim]
if other_size != target_size:
raise ValueError(
"different size for unlabeled "
"dimension on argument %r: %r vs %r"
% (dim, other_size, target_size)
)
return indexers
def reindex_variables(
variables: Mapping[Any, Variable],
sizes: Mapping[Any, int],
indexes: Mapping[Any, pd.Index],
indexers: Mapping,
method: Optional[str] = None,
tolerance: Any = None,
copy: bool = True,
fill_value: Optional[Any] = dtypes.NA,
) -> Tuple[Dict[Hashable, Variable], Dict[Hashable, pd.Index]]:
"""Conform a dictionary of aligned variables onto a new set of variables,
filling in missing values with NaN.
Not public API.
Parameters
----------
variables : dict-like
Dictionary of xarray.Variable objects.
sizes : dict-like
Dictionary from dimension names to integer sizes.
indexes : dict-like
Dictionary of indexes associated with variables.
indexers : dict
Dictionary with keys given by dimension names and values given by
arrays of coordinates tick labels. Any mis-matched coordinate values
will be filled in with NaN, and any mis-matched dimension names will
simply be ignored.
method : {None, 'nearest', 'pad'/'ffill', 'backfill'/'bfill'}, optional
Method to use for filling index values in ``indexers`` not found in
this dataset:
* None (default): don't fill gaps
* pad / ffill: propagate last valid index value forward
* backfill / bfill: propagate next valid index value backward
* nearest: use nearest valid index value
tolerance : optional
Maximum distance between original and new labels for inexact matches.
The values of the index at the matching locations must satisfy the
equation ``abs(index[indexer] - target) <= tolerance``.
copy : bool, optional
If ``copy=True``, data in the return values is always copied. If
``copy=False`` and reindexing is unnecessary, or can be performed
with only slice operations, then the output may share memory with
the input. In either case, new xarray objects are always returned.
fill_value : scalar, optional
Value to use for newly missing values
Returns
-------
reindexed : dict
Dict of reindexed variables.
new_indexes : dict
Dict of indexes associated with the reindexed variables.
"""
from .dataarray import DataArray
# create variables for the new dataset
reindexed: Dict[Hashable, Variable] = {}
# build up indexers for assignment along each dimension
int_indexers = {}
new_indexes = dict(indexes)
masked_dims = set()
unchanged_dims = set()
for dim, indexer in indexers.items():
if isinstance(indexer, DataArray) and indexer.dims != (dim,):
raise ValueError(
"Indexer has dimensions {:s} that are different "
"from that to be indexed along {:s}".format(str(indexer.dims), dim)
)
target = new_indexes[dim] = utils.safe_cast_to_index(indexers[dim])
if dim in indexes:
index = indexes[dim]
if not index.is_unique:
raise ValueError(
"cannot reindex or align along dimension %r because the "
"index has duplicate values" % dim
)
int_indexer = get_indexer_nd(index, target, method, tolerance)
# We uses negative values from get_indexer_nd to signify
# values that are missing in the index.
if (int_indexer < 0).any():
masked_dims.add(dim)
elif np.array_equal(int_indexer, np.arange(len(index))):
unchanged_dims.add(dim)
int_indexers[dim] = int_indexer
if dim in variables:
var = variables[dim]
args: tuple = (var.attrs, var.encoding)
else:
args = ()
reindexed[dim] = IndexVariable((dim,), target, *args)
for dim in sizes:
if dim not in indexes and dim in indexers:
existing_size = sizes[dim]
new_size = indexers[dim].size
if existing_size != new_size:
raise ValueError(
"cannot reindex or align along dimension %r without an "
"index because its size %r is different from the size of "
"the new index %r" % (dim, existing_size, new_size)
)
for name, var in variables.items():
if name not in indexers:
key = tuple(
slice(None) if d in unchanged_dims else int_indexers.get(d, slice(None))
for d in var.dims
)
needs_masking = any(d in masked_dims for d in var.dims)
if needs_masking:
new_var = var._getitem_with_mask(key, fill_value=fill_value)
elif all(is_full_slice(k) for k in key):
# no reindexing necessary
# here we need to manually deal with copying data, since
# we neither created a new ndarray nor used fancy indexing
new_var = var.copy(deep=copy)
else:
new_var = var[key]
reindexed[name] = new_var
return reindexed, new_indexes
def _get_broadcast_dims_map_common_coords(args, exclude):
common_coords = {}
dims_map = {}
for arg in args:
for dim in arg.dims:
if dim not in common_coords and dim not in exclude:
dims_map[dim] = arg.sizes[dim]
if dim in arg.coords:
common_coords[dim] = arg.coords[dim].variable
return dims_map, common_coords
def _broadcast_helper(arg, exclude, dims_map, common_coords):
from .dataarray import DataArray
from .dataset import Dataset
def _set_dims(var):
# Add excluded dims to a copy of dims_map
var_dims_map = dims_map.copy()
for dim in exclude:
with suppress(ValueError):
# ignore dim not in var.dims
var_dims_map[dim] = var.shape[var.dims.index(dim)]
return var.set_dims(var_dims_map)
def _broadcast_array(array):
data = _set_dims(array.variable)
coords = dict(array.coords)
coords.update(common_coords)
return DataArray(data, coords, data.dims, name=array.name, attrs=array.attrs)
def _broadcast_dataset(ds):
data_vars = {k: _set_dims(ds.variables[k]) for k in ds.data_vars}
coords = dict(ds.coords)
coords.update(common_coords)
return Dataset(data_vars, coords, ds.attrs)
if isinstance(arg, DataArray):
return _broadcast_array(arg)
elif isinstance(arg, Dataset):
return _broadcast_dataset(arg)
else:
raise ValueError("all input must be Dataset or DataArray objects")
def broadcast(*args, exclude=None):
"""Explicitly broadcast any number of DataArray or Dataset objects against
one another.
xarray objects automatically broadcast against each other in arithmetic
operations, so this function should not be necessary for normal use.
If no change is needed, the input data is returned to the output without
being copied.
Parameters
----------
*args : DataArray or Dataset objects
Arrays to broadcast against each other.
exclude : sequence of str, optional
Dimensions that must not be broadcasted
Returns
-------
broadcast : tuple of xarray objects
The same data as the input arrays, but with additional dimensions
inserted so that all data arrays have the same dimensions and shape.
Examples
--------
Broadcast two data arrays against one another to fill out their dimensions:
>>> a = xr.DataArray([1, 2, 3], dims='x')
>>> b = xr.DataArray([5, 6], dims='y')
>>> a
<xarray.DataArray (x: 3)>
array([1, 2, 3])
Coordinates:
* x (x) int64 0 1 2
>>> b
<xarray.DataArray (y: 2)>
array([5, 6])
Coordinates:
* y (y) int64 0 1
>>> a2, b2 = xr.broadcast(a, b)
>>> a2
<xarray.DataArray (x: 3, y: 2)>
array([[1, 1],
[2, 2],
[3, 3]])
Coordinates:
* x (x) int64 0 1 2
* y (y) int64 0 1
>>> b2
<xarray.DataArray (x: 3, y: 2)>
array([[5, 6],
[5, 6],
[5, 6]])
Coordinates:
* y (y) int64 0 1
* x (x) int64 0 1 2
Fill out the dimensions of all data variables in a dataset:
>>> ds = xr.Dataset({'a': a, 'b': b})
>>> ds2, = xr.broadcast(ds) # use tuple unpacking to extract one dataset
>>> ds2
<xarray.Dataset>
Dimensions: (x: 3, y: 2)
Coordinates:
* x (x) int64 0 1 2
* y (y) int64 0 1
Data variables:
a (x, y) int64 1 1 2 2 3 3
b (x, y) int64 5 6 5 6 5 6
"""
if exclude is None:
exclude = set()
args = align(*args, join="outer", copy=False, exclude=exclude)
dims_map, common_coords = _get_broadcast_dims_map_common_coords(args, exclude)
result = []
for arg in args:
result.append(_broadcast_helper(arg, exclude, dims_map, common_coords))
return tuple(result)
|
py | 1a33ccbd63bce34f6e4c6b7663b059592072a60f | import cv2
#Global_vars.cap1 = cv2.VideoCapture("rtsp://10.24.72.33:554/0")
cap1 = cv2.VideoCapture(0)
cap2 = cv2.VideoCapture("rtsp://admin:[email protected]:6461")
#Global_vars.cap1 = cv2.VideoCapture("rtsp://admin:[email protected]:554/Streaming/Channel/101")
## rtsp://192.168.2.109:554/user=admin&password=mammaloe&channel=1&stream=0.sdp?
## rtsp://89.239.192.188:553/ucast/11
#Global_vars.cap2 = cv2.VideoCapture("rtsp://viewer:[email protected]:80")
print("cap1 init done")
cv2.namedWindow("cam1", cv2.WINDOW_NORMAL)
cv2.namedWindow("cam2", cv2.WINDOW_NORMAL)
while 1:
try:
k = cv2.waitKey(1) & 0xff
if k == ord('q') or k == 27:
break
cap1_grab = cap1.grab()
_, stream_buffer1 = cap1.retrieve(cap1_grab)
cap2_grab = cap2.grab()
_, stream_buffer2 = cap2.retrieve(cap2_grab)
cv2.imshow("cam1", stream_buffer1)
cv2.imshow("cam2", stream_buffer2)
except:
pass
cap1.release()
cap2.release()
cv2.destroyAllWindows() |
py | 1a33cd4a9fa69e3d461e3e4aa5e45672b7a74fe3 | # 単純な出力
print('hello')
# hello
# シングルクォートエスケープ
print('I don\'t know')
# I don't know
# 複数行の出力
print("""\
line1
line2
line3\
""")
# line1
# line2
# line3
# 文字列の反復
print('Hi,' * 3 + 'Mike.')
# Hi,Hi,Hi,Mike.
# リテラル連結
print('Py''thon')
# Python
# 変数に入れた文字列の連結
prefix = 'Py'
print(prefix + 'thon')
# Python
# プラス記号がないとリテラルが複数行に渡るときも読みやすい
s = ('aaaaaaaaaaaaaaaa'
'bbbbbbbbbbbbbbbb')
print(s)
# aaaaaaaaaaaaaaaabbbbbbbbbbbbbbbb
# バックスラッシュで連結することもできるが読みにくいので嫌う人もいる
s = 'aaaaaaaaaaaaaaaa'\
'bbbbbbbbbbbbbbbb'
print(s)
# aaaaaaaaaaaaaaaabbbbbbbbbbbbbbbb
# リテラルの一部を出力
word = 'python'
print(word[0])
print(word[1])
print(word[-1])
# >>> print(word[0])
# p
# >>> print(word[1])
# y
# >>> print(word[-1])
# n
# スライス
print(word[0:2])
# >>> print(word[0:2])
# py
print(word[:2])
# >>> print(word[:2])
# py
print(word[2:])
# >>> print(word[2:])
# thon
# リテラルの一文字めを置き換える
word = 'python'
word = 'j' + word[1:]
print(word)
# jython
# リテラルの長さ
n = len(word)
print(n)
# >>> print(n)
# 6 |
py | 1a33cd713c18893ab8dcda3bfde5813829084e3a | #!/usr/bin/env python
# Copyright (C) 2009-2017 Wander Lairson Costa
# Copyright (C) 2017-2018 Robert Wlodarczyk
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from setuptools import setup
import usb
setup(
name='pyusb',
version=usb.__version__,
description='Python USB access module',
author='Robert Wlodarczyk',
author_email='[email protected]',
license='Apache',
url='http://pyusb.github.io',
packages=['usb', 'usb.backend'],
long_description=
"""
PyUSB offers easy USB devices communication in Python.
It should work without additional code in any environment with
Python >= 2.4, ctypes and an pre-built usb backend library
(currently, libusb 0.1.x, libusb 1.x, and OpenUSB).
""",
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: Information Technology',
'Intended Audience :: Manufacturing', # USB automation, or mfg USB devs
'Intended Audience :: Science/Research', # interface with instruments
'Intended Audience :: System Administrators', # integrate strange devs
'Intended Audience :: Telecommunications Industry', # telecomm devs
'License :: OSI Approved :: Apache Software License',
'Natural Language :: English',
# try to union the OSes that can build any of the backend libraries...
'Operating System :: MacOS :: MacOS X',
'Operating System :: Microsoft :: Windows :: Windows Vista',
'Operating System :: Microsoft :: Windows :: Windows 7',
'Operating System :: POSIX :: BSD :: FreeBSD',
'Operating System :: POSIX :: BSD :: NetBSD',
'Operating System :: POSIX :: BSD :: OpenBSD',
'Operating System :: POSIX :: Linux',
'Operating System :: POSIX :: SunOS/Solaris',
'Programming Language :: Python :: 2.4',
'Programming Language :: Python :: 2.5',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
# source(CPython,Jython,IronPython,PyPy): "The Long Term" section of
# http://ojs.pythonpapers.org/index.php/tpp/article/viewFile/23/23
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: IronPython',
'Programming Language :: Python :: Implementation :: Jython',
'Programming Language :: Python :: Implementation :: PyPy',
'Topic :: Scientific/Engineering :' \
': Interface Engine/Protocol Translator',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: System :: Hardware :: Hardware Drivers'
]
)
|
py | 1a33d025088d73bf0b59fa39afe84d4bb7e88d54 | # Generated by Django 2.0.5 on 2018-09-07 00:47
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('owner', '0020_auto_20180905_1312'),
('team', '0122_auto_20180821_1319'),
]
operations = [
migrations.AddField(
model_name='historicalmanualbookingsummary',
name='vehicle',
field=models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='owner.Vehicle'),
),
migrations.AddField(
model_name='manualbookingsummary',
name='vehicle',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='owner.Vehicle'),
),
migrations.AlterField(
model_name='manualbookingsummary',
name='user',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
|
py | 1a33d0c24afe2b7b33783150fd85880c16b5a4ea | # -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.async.base.exchange import Exchange
import math
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import DDoSProtection
class lbank (Exchange):
def describe(self):
return self.deep_extend(super(lbank, self).describe(), {
'id': 'lbank',
'name': 'LBank',
'countries': ['CN'],
'version': 'v1',
'has': {
'fetchTickers': True,
'fetchOHLCV': True,
'fetchOrder': True,
'fetchOrders': True,
'fetchOpenOrders': False, # status 0 API doesn't work
'fetchClosedOrders': True,
},
'timeframes': {
'1m': 'minute1',
'5m': 'minute5',
'15m': 'minute15',
'30m': 'minute30',
'1h': 'hour1',
'2h': 'hour2',
'4h': 'hour4',
'6h': 'hour6',
'8h': 'hour8',
'12h': 'hour12',
'1d': 'day1',
'1w': 'week1',
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/38063602-9605e28a-3302-11e8-81be-64b1e53c4cfb.jpg',
'api': 'https://api.lbank.info',
'www': 'https://www.lbank.info',
'doc': 'https://github.com/LBank-exchange/lbank-official-api-docs',
'fees': 'https://lbankinfo.zendesk.com/hc/zh-cn/articles/115002295114--%E8%B4%B9%E7%8E%87%E8%AF%B4%E6%98%8E',
},
'api': {
'public': {
'get': [
'currencyPairs',
'ticker',
'depth',
'trades',
'kline',
],
},
'private': {
'post': [
'user_info',
'create_order',
'cancel_order',
'orders_info',
'orders_info_history',
],
},
},
'fees': {
'trading': {
'maker': 0.1 / 100,
'taker': 0.1 / 100,
},
'funding': {
'withdraw': {
'BTC': None,
'ZEC': 0.01,
'ETH': 0.01,
'ETC': 0.01,
# 'QTUM': amount => max(0.01, amount * (0.1 / 100)),
'VEN': 10.0,
'BCH': 0.0002,
'SC': 50.0,
'BTM': 20.0,
'NAS': 1.0,
'EOS': 1.0,
'XWC': 5.0,
'BTS': 1.0,
'INK': 10.0,
'BOT': 3.0,
'YOYOW': 15.0,
'TGC': 10.0,
'NEO': 0.0,
'CMT': 20.0,
'SEER': 2000.0,
'FIL': None,
'BTG': None,
},
},
},
})
async def fetch_markets(self):
markets = await self.publicGetCurrencyPairs()
result = []
for i in range(0, len(markets)):
id = markets[i]
baseId, quoteId = id.split('_')
base = self.common_currency_code(baseId.upper())
quote = self.common_currency_code(quoteId.upper())
symbol = base + '/' + quote
precision = {
'amount': 8,
'price': 8,
}
lot = math.pow(10, -precision['amount'])
result.append({
'id': id,
'symbol': symbol,
'base': base,
'quote': quote,
'baseId': baseId,
'quoteId': quoteId,
'active': True,
'lot': lot,
'precision': precision,
'limits': {
'amount': {
'min': lot,
'max': None,
},
'price': {
'min': math.pow(10, -precision['price']),
'max': math.pow(10, precision['price']),
},
'cost': {
'min': None,
'max': None,
},
},
'info': id,
})
return result
def parse_ticker(self, ticker, market=None):
symbol = market['symbol']
timestamp = self.safe_integer(ticker, 'timestamp')
info = ticker
ticker = info['ticker']
last = self.safe_float(ticker, 'latest')
percentage = self.safe_float(ticker, 'change')
relativeChange = percentage / 100
open = last / self.sum(1, relativeChange)
change = last - open
average = self.sum(last, open) / 2
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_float(ticker, 'high'),
'low': self.safe_float(ticker, 'low'),
'bid': None,
'bidVolume': None,
'ask': None,
'askVolume': None,
'vwap': None,
'open': None,
'close': last,
'last': last,
'previousClose': None,
'change': change,
'percentage': percentage,
'average': average,
'baseVolume': self.safe_float(ticker, 'vol'),
'quoteVolume': self.safe_float(ticker, 'turnover'),
'info': info,
}
async def fetch_ticker(self, symbol, params={}):
await self.load_markets()
market = self.market(symbol)
response = await self.publicGetTicker(self.extend({
'symbol': market['id'],
}, params))
return self.parse_ticker(response, market)
async def fetch_tickers(self, symbols=None, params={}):
await self.load_markets()
tickers = await self.publicGetTicker(self.extend({
'symbol': 'all',
}, params))
result = {}
for i in range(0, len(tickers)):
ticker = tickers[i]
id = ticker['symbol']
if id in self.marketsById:
market = self.marketsById[id]
symbol = market['symbol']
result[symbol] = self.parse_ticker(ticker, market)
return result
async def fetch_order_book(self, symbol, limit=60, params={}):
await self.load_markets()
response = await self.publicGetDepth(self.extend({
'symbol': self.market_id(symbol),
'size': min(limit, 60),
}, params))
return self.parse_order_book(response)
def parse_trade(self, trade, market=None):
symbol = market['symbol']
timestamp = int(trade['date_ms'])
price = self.safe_float(trade, 'price')
amount = self.safe_float(trade, 'amount')
cost = self.cost_to_precision(symbol, price * amount)
return {
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'id': self.safe_string(trade, 'tid'),
'order': None,
'type': None,
'side': trade['type'],
'price': price,
'amount': amount,
'cost': float(cost),
'fee': None,
'info': self.safe_value(trade, 'info', trade),
}
async def fetch_trades(self, symbol, since=None, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
'size': 100,
}
if since is not None:
request['time'] = int(since / 1000)
if limit is not None:
request['size'] = limit
response = await self.publicGetTrades(self.extend(request, params))
return self.parse_trades(response, market, since, limit)
def parse_ohlcv(self, ohlcv, market=None, timeframe='1m', since=None, limit=None):
return [
ohlcv[0] * 1000,
ohlcv[1],
ohlcv[2],
ohlcv[3],
ohlcv[4],
ohlcv[5],
]
async def fetch_ohlcv(self, symbol, timeframe='5m', since=None, limit=1000, params={}):
await self.load_markets()
market = self.market(symbol)
if since is None:
raise ExchangeError(self.id + ' fetchOHLCV requires a since argument')
if limit is None:
raise ExchangeError(self.id + ' fetchOHLCV requires a limit argument')
request = {
'symbol': market['id'],
'type': self.timeframes[timeframe],
'size': limit,
'time': int(since / 1000),
}
response = await self.publicGetKline(self.extend(request, params))
return self.parse_ohlcvs(response, market, timeframe, since, limit)
async def fetch_balance(self, params={}):
await self.load_markets()
response = await self.privatePostUserInfo(params)
result = {'info': response}
ids = list(self.extend(response['info']['free'], response['info']['freeze']).keys())
for i in range(0, len(ids)):
id = ids[i]
code = id
if id in self.currencies_by_id:
code = self.currencies_by_id[id]['code']
free = self.safe_float(response['info']['free'], id, 0.0)
used = self.safe_float(response['info']['freeze'], id, 0.0)
account = {
'free': free,
'used': used,
'total': 0.0,
}
account['total'] = self.sum(account['free'], account['used'])
result[code] = account
return self.parse_balance(result)
def parse_order_status(self, status):
statuses = {
'-1': 'cancelled', # cancelled
'0': 'open', # not traded
'1': 'open', # partial deal
'2': 'closed', # complete deal
'4': 'closed', # disposal processing
}
return self.safe_string(statuses, status)
def parse_order(self, order, market=None):
symbol = None
responseMarket = self.safe_value(self.marketsById, order['symbol'])
if responseMarket is not None:
symbol = responseMarket['symbol']
elif market is not None:
symbol = market['symbol']
timestamp = self.safe_integer(order, 'create_time')
# Limit Order Request Returns: Order Price
# Market Order Returns: cny amount of market order
price = self.safe_float(order, 'price')
amount = self.safe_float(order, 'amount', 0.0)
filled = self.safe_float(order, 'deal_amount', 0.0)
av_price = self.safe_float(order, 'avg_price')
cost = None
if av_price is not None:
cost = filled * av_price
status = self.parse_order_status(self.safe_string(order, 'status'))
return {
'id': self.safe_string(order, 'order_id'),
'datetime': self.iso8601(timestamp),
'timestamp': timestamp,
'lastTradeTimestamp': None,
'status': status,
'symbol': symbol,
'type': self.safe_string(order, 'order_type'),
'side': order['type'],
'price': price,
'cost': cost,
'amount': amount,
'filled': filled,
'remaining': amount - filled,
'trades': None,
'fee': None,
'info': self.safe_value(order, 'info', order),
}
async def create_order(self, symbol, type, side, amount, price=None, params={}):
await self.load_markets()
market = self.market(symbol)
order = {
'symbol': market['id'],
'type': side,
'amount': amount,
}
if type == 'market':
order['type'] += '_market'
else:
order['price'] = price
response = await self.privatePostCreateOrder(self.extend(order, params))
order = self.omit(order, 'type')
order['order_id'] = response['order_id']
order['type'] = side
order['order_type'] = type
order['create_time'] = self.milliseconds()
order['info'] = response
order = self.parse_order(order, market)
id = order['id']
self.orders[id] = order
return order
async def cancel_order(self, id, symbol=None, params={}):
await self.load_markets()
market = self.market(symbol)
response = await self.privatePostCancelOrder(self.extend({
'symbol': market['id'],
'order_id': id,
}, params))
return response
async def fetch_order(self, id, symbol=None, params={}):
# Id can be a list of ids delimited by a comma
await self.load_markets()
market = self.market(symbol)
response = await self.privatePostOrdersInfo(self.extend({
'symbol': market['id'],
'order_id': id,
}, params))
orders = self.parse_orders(response['orders'], market)
if len(orders) == 1:
return orders[0]
else:
return orders
async def fetch_orders(self, symbol=None, since=None, limit=None, params={}):
await self.load_markets()
if limit is None:
limit = 100
market = self.market(symbol)
response = await self.privatePostOrdersInfoHistory(self.extend({
'symbol': market['id'],
'current_page': 1,
'page_length': limit,
}, params))
return self.parse_orders(response['orders'], None, since, limit)
async def fetch_closed_orders(self, symbol=None, since=None, limit=None, params={}):
orders = await self.fetch_orders(symbol, since, limit, params)
closed = self.filter_by(orders, 'status', 'closed')
cancelled = self.filter_by(orders, 'status', 'cancelled') # cancelled orders may be partially filled
return closed + cancelled
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
query = self.omit(params, self.extract_params(path))
url = self.urls['api'] + '/' + self.version + '/' + self.implode_params(path, params)
# Every endpoint ends with ".do"
url += '.do'
if api == 'public':
if query:
url += '?' + self.urlencode(query)
else:
self.check_required_credentials()
query = self.keysort(self.extend({
'api_key': self.apiKey,
}, params))
queryString = self.rawencode(query) + '&secret_key=' + self.secret
query['sign'] = self.hash(self.encode(queryString)).upper()
body = self.urlencode(query)
headers = {'Content-Type': 'application/x-www-form-urlencoded'}
return {'url': url, 'method': method, 'body': body, 'headers': headers}
async def request(self, path, api='public', method='GET', params={}, headers=None, body=None):
response = await self.fetch2(path, api, method, params, headers, body)
success = self.safe_string(response, 'result')
if success == 'false':
errorCode = self.safe_string(response, 'error_code')
message = self.safe_string({
'10000': 'Internal error',
'10001': 'The required parameters can not be empty',
'10002': 'verification failed',
'10003': 'Illegal parameters',
'10004': 'User requests are too frequent',
'10005': 'Key does not exist',
'10006': 'user does not exist',
'10007': 'Invalid signature',
'10008': 'This currency pair is not supported',
'10009': 'Limit orders can not be missing orders and the number of orders',
'10010': 'Order price or order quantity must be greater than 0',
'10011': 'Market orders can not be missing the amount of the order',
'10012': 'market sell orders can not be missing orders',
'10013': 'is less than the minimum trading position 0.001',
'10014': 'Account number is not enough',
'10015': 'The order type is wrong',
'10016': 'Account balance is not enough',
'10017': 'Abnormal server',
'10018': 'order inquiry can not be more than 50 less than one',
'10019': 'withdrawal orders can not be more than 3 less than one',
'10020': 'less than the minimum amount of the transaction limit of 0.001',
}, errorCode, self.json(response))
ErrorClass = self.safe_value({
'10002': AuthenticationError,
'10004': DDoSProtection,
'10005': AuthenticationError,
'10006': AuthenticationError,
'10007': AuthenticationError,
'10009': InvalidOrder,
'10010': InvalidOrder,
'10011': InvalidOrder,
'10012': InvalidOrder,
'10013': InvalidOrder,
'10014': InvalidOrder,
'10015': InvalidOrder,
'10016': InvalidOrder,
}, errorCode, ExchangeError)
raise ErrorClass(message)
return response
|
py | 1a33d1d62cfce9520b42fd074256cfe18c18a94c | from floodsystem.station import inconsistent_typical_range_stations
from floodsystem.stationdata import build_station_list
def run():
stations = build_station_list()
x = inconsistent_typical_range_stations(stations)
station_names = []
for inconsistent_typical_range_station in x:
station_names.append(inconsistent_typical_range_station.name)
station_names = sorted(station_names)
print(station_names)
if __name__ == "__main__":
print("*** Task 1F: CUED Part IA Flood Warning System ***")
run()
|
py | 1a33d24da77dcf1316bbc26eec7ca4bd7dd8c6cf | import json
import logging
import os
from pathlib import Path
import psutil
import time
import signal
from collections import namedtuple
from firexapp.events.model import ADDITIONAL_CHILDREN_KEY
from firexapp.submit.uid import Uid
logger = logging.getLogger(__name__)
DEFAULT_FLAME_TIMEOUT = 60 * 60 * 24 * 2
# This structure contains an index by UUID for both ancestors and descendants. This is memory inefficient,
# but makes queries that would involve multiple graph traversals very fast.
# TODO: If further performance enhancements are sought, this structure could be maintained during event receiving
# so that it isn't re-calculated per task query.
FlameTaskGraph = namedtuple('FlameTaskGraph', ['tasks_by_uuid', 'ancestors_by_uuid', 'descendants_by_uuid'])
def get_flame_redirect_file_path(root_logs_dir):
return os.path.join(root_logs_dir, 'flame.html')
def get_flame_debug_dir(root_logs_dir):
return os.path.join(root_logs_dir, Uid.debug_dirname, 'flame')
def get_flame_pid_file_path(root_logs_dir):
return os.path.join(get_flame_debug_dir(root_logs_dir), 'flame.pid')
def get_flame_pid(root_logs_dir):
return int(Path(get_flame_pid_file_path(root_logs_dir)).read_text().strip())
def wait_until(predicate, timeout, sleep_for, *args, **kwargs):
max_time = time.time() + timeout
while time.time() < max_time:
pred_result = predicate(*args, **kwargs)
if pred_result:
return pred_result
time.sleep(sleep_for)
return predicate(*args, **kwargs)
def wait_until_pid_not_exist(pid, timeout=7, sleep_for=1):
return wait_until(lambda p: not psutil.pid_exists(p), timeout, sleep_for, pid)
def web_request_ok(url):
import requests
try:
return requests.get(url).ok
except requests.exceptions.ConnectionError:
return False
def wait_until_web_request_ok(url, timeout=10, sleep_for=1):
return wait_until(web_request_ok, timeout, sleep_for, url)
def wait_until_path_exist(path, timeout=7, sleep_for=1):
return wait_until(os.path.exists, timeout, sleep_for, path)
def json_file_fn(json_file_path, fn):
if not os.path.isfile(json_file_path):
return False
try:
file_data = json.loads(Path(json_file_path).read_text())
except (json.decoder.JSONDecodeError, OSError):
return False
else:
return fn(file_data)
def get_rec_file(log_dir):
return os.path.join(get_flame_debug_dir(log_dir), 'flame.rec')
def find_rec_file(log_dir):
# Formerly was used for backwards compatability, now an alias for get_rec_file
return get_rec_file(log_dir)
def get_flame_url(port, hostname=None):
if hostname is None:
from socket import gethostname
hostname = gethostname()
return 'http://%s:%d' % (hostname, int(port))
class PathNotFoundException(Exception):
pass
def find(keys, input_dict, raise_error=False):
result = input_dict
for key in keys:
try:
result = result[key]
except Exception:
if raise_error:
raise PathNotFoundException()
return None
return result
def filter_paths(input_dict, paths_to_values):
results = {}
for in_key, in_vals in input_dict.items():
results[in_key] = []
for in_val in in_vals:
matches_all = all(to_equal == find(p, in_val) for p, to_equal in paths_to_values.items())
if matches_all:
results[in_key].append(in_val)
return results
def kill_flame(log_dir, sig=signal.SIGKILL, timeout=10):
flame_pid = get_flame_pid(log_dir)
kill_and_wait(flame_pid, sig, timeout)
return flame_pid
def kill_and_wait(pid, sig=signal.SIGKILL, timeout=10):
if psutil.pid_exists(pid):
os.kill(pid, sig)
wait_until_pid_not_exist(pid, timeout=timeout)
return not psutil.pid_exists(pid)
def create_rel_symlink(existing_path, symlink, target_is_directory=False):
rel_new_file = os.path.relpath(existing_path, start=os.path.dirname(symlink))
os.symlink(rel_new_file, symlink, target_is_directory=target_is_directory)
class BrokerConsumerConfig:
def __init__(self, max_retry_attempts, receiver_ready_file, terminate_on_complete):
self.max_retry_attempts = max_retry_attempts
self.receiver_ready_file = receiver_ready_file
self.terminate_on_complete = terminate_on_complete
def is_json_file(file_path):
try:
json.loads(Path(file_path).read_text())
except json.decoder.JSONDecodeError:
return False
else:
return True
def _both_instance(o1, o2, _type):
return isinstance(o1, _type) and isinstance(o2, _type)
def deep_merge(dict1, dict2):
result = dict(dict1)
for d2_key in dict2:
if d2_key in dict1:
v1 = dict1[d2_key]
v2 = dict2[d2_key]
if _both_instance(v1, v2, dict):
result[d2_key] = deep_merge(v1, v2)
elif _both_instance(v1, v2, list):
result[d2_key] = v1 + v2
elif _both_instance(v1, v2, set):
result[d2_key] = v1.union(v2)
elif v1 == v2:
# already the same value in both dicts, take from either.
result[d2_key] = v1
else:
# Both d1 and d2 have entries for d2_key, both entries are not dicts or lists or sets,
# and the values are not the same. This is a conflict.
# Overwrite d1's value to simulate dict.update() behaviour.
result[d2_key] = v2
else:
# New key for d1, just add it.
result[d2_key] = dict2[d2_key]
return result
def _validate_task_queries(task_representation):
if not isinstance(task_representation, list):
return False
missing_criterias = [r for r in task_representation
if 'matchCriteria' not in r or not isinstance(r['matchCriteria'], dict)]
if missing_criterias:
return False
# TODO: validate matchCriteria themselves
return True
def _normalize_criteria_key(k):
return k[1:] if k.startswith('?') else k
def task_matches_criteria(task: dict, criteria: dict):
if criteria['type'] == 'all':
return True
if criteria['type'] == 'always-select-fields':
# always-select-fields doesn't cause matches (tasks to be included), but paths here are always included
# in results.
return False
if criteria['type'] == 'equals':
criteria_val = criteria['value']
# TODO: if more adjusting qualifiers are added, this needs to be reworked.
required_keys = {k for k in criteria_val.keys() if not k.startswith('?')}
optional_keys = {_normalize_criteria_key(k) for k in criteria_val.keys() if k.startswith('?')}
present_required_keys = required_keys.intersection(task.keys())
if len(required_keys) != len(present_required_keys):
return False
present_optional_keys = optional_keys.intersection(task.keys())
normalized_criteria = {_normalize_criteria_key(k): v for k, v in criteria_val.items()}
for k in present_required_keys.union(present_optional_keys):
if task[k] != normalized_criteria[k]:
return False
return True
return False
def _create_dict_with_path_val(path_list, val):
r = {}
lastest_dict = r
for i, e in enumerate(path_list):
is_last = i == len(path_list) - 1
if is_last:
lastest_dict[e] = val
else:
lastest_dict[e] = {}
lastest_dict = lastest_dict[e]
return r
def _get_paths_from_task(paths, task):
r = {}
for path in paths:
try:
path_list = path.split('.')
val = find(path_list, task, raise_error=True)
except PathNotFoundException:
# Don't update the results dict if the current task doesn't have the path.
pass
else:
r = deep_merge(r, _create_dict_with_path_val(path_list, val))
return r
def _get_child_tasks_by_uuid(parent_uuid, all_tasks_by_uuid):
return {u: t for u, t in all_tasks_by_uuid.items() if t['parent_id'] == parent_uuid}
def _get_descendants(uuid, all_tasks_by_uuid):
descendants_by_uuid = _get_child_tasks_by_uuid(uuid, all_tasks_by_uuid)
uuids_to_check = list(descendants_by_uuid.keys())
while uuids_to_check:
cur_descendant_uuid = uuids_to_check.pop()
cur_descendant_children_by_uuid = _get_child_tasks_by_uuid(cur_descendant_uuid, all_tasks_by_uuid)
descendants_by_uuid.update(cur_descendant_children_by_uuid)
uuids_to_check += list(cur_descendant_children_by_uuid.keys())
return descendants_by_uuid
def _get_descendants_for_criteria(select_paths, descendant_criteria, ancestor_uuid, task_graph: FlameTaskGraph):
ancestor_descendants = task_graph.descendants_by_uuid[ancestor_uuid]
matched_descendants_by_uuid = {}
for criteria in descendant_criteria:
for descendant in ancestor_descendants:
if task_matches_criteria(descendant, criteria):
# Need no_descendants=True to prevent infinite loops.
# The fields that are selected for each descendant are determined by all queries, except
# descendant descendants are never included.
matched_descendants_by_uuid[descendant['uuid']] = select_from_task(
select_paths,
[], # Never include descendants in descendant queries to avoid infinite loop.
descendant,
task_graph)
return matched_descendants_by_uuid
def select_from_task(select_paths, select_descendants, task, task_graph: FlameTaskGraph):
selected_dict = {}
paths_update_dict = _get_paths_from_task(select_paths, task)
selected_dict.update(paths_update_dict)
selected_descendants_by_uuid = _get_descendants_for_criteria(select_paths, select_descendants, task['uuid'],
task_graph)
if selected_descendants_by_uuid:
selected_dict.update({'descendants': selected_descendants_by_uuid})
return selected_dict
def flatten(l):
return [item for sublist in l for item in sublist]
def get_always_select_fields(task_queries):
return flatten([q.get('selectPaths', []) for q in task_queries
if q['matchCriteria']['type'] == 'always-select-fields'])
def select_ancestor_of_task_descendant_match(uuid, query, select_paths, task_graph: FlameTaskGraph):
# Should the current task be included in the result because it matches some descendant criteria?
task = task_graph.tasks_by_uuid[uuid]
matching_criteria = [criteria for criteria in query.get('selectDescendants', [])
if task_matches_criteria(task, criteria)]
if matching_criteria:
# The current task matches some descendant criteria. Confirm that some ancestor matches the top-level
# criteria.
ancestor = next((a for a in task_graph.ancestors_by_uuid[uuid]
if task_matches_criteria(a, query['matchCriteria'])), None)
if ancestor:
# The current task and its ancestor should be included in the result.
return ancestor['uuid'], select_from_task(select_paths, matching_criteria, ancestor, task_graph)
return None, {}
def _get_children_by_uuid(tasks_by_uuid):
children_by_uuid = {}
for u, t in tasks_by_uuid.items():
if u not in children_by_uuid:
# Ensure every UUID has an entry in the result, even UUIDs with no children.
children_by_uuid[u] = []
# TODO: consider handling tasks with no 'parent_id' differently from tasks with None 'parent_id',
# since the latter case is the root task and the former seems inexplicable.
parent_id = t.get('parent_id')
if parent_id is not None:
if parent_id not in children_by_uuid:
children_by_uuid[parent_id] = []
children_by_uuid[parent_id].append(t)
return children_by_uuid
def _create_task_graph(tasks_by_uuid):
children_by_uuid = _get_children_by_uuid(tasks_by_uuid)
descendant_uuids_by_uuid = {}
ancestor_uuids_by_uuid = {}
root_task = next((t for t in tasks_by_uuid.values() if t['parent_id'] is None), None)
if root_task:
tasks_to_check = [root_task]
while tasks_to_check:
cur_task = tasks_to_check.pop()
if cur_task['uuid'] not in ancestor_uuids_by_uuid:
ancestor_uuids_by_uuid[cur_task['uuid']] = set()
cur_task_ancestor_uuids = ancestor_uuids_by_uuid[cur_task['uuid']]
# The task tree is being walked top-down, so it's safe to expect ancestors to be populated.
if cur_task.get('parent_id') is not None and cur_task['parent_id'] in ancestor_uuids_by_uuid:
# This task's ancestors are its parent's ancestors plus its parent.
ancestor_uuids = ancestor_uuids_by_uuid[cur_task['parent_id']].union([cur_task['parent_id']])
cur_task_ancestor_uuids.update(ancestor_uuids)
# Update ancestors of additional children.
additional_children_uuids = cur_task.get(ADDITIONAL_CHILDREN_KEY, [])
for additional_child_uuid in additional_children_uuids:
if additional_child_uuid not in ancestor_uuids_by_uuid:
ancestor_uuids_by_uuid[additional_child_uuid] = set()
ancestor_uuids_by_uuid[additional_child_uuid].update(cur_task_ancestor_uuids)
descendant_uuids_by_uuid[cur_task['uuid']] = set(additional_children_uuids)
for ancestor_uuid in cur_task_ancestor_uuids:
descendant_uuids_by_uuid[ancestor_uuid].add(cur_task['uuid'])
descendant_uuids_by_uuid[ancestor_uuid].update(additional_children_uuids)
# traverse the graph via real children only, not additional_children.
tasks_to_check.extend(children_by_uuid[cur_task['uuid']])
ancestors_by_uuid = {u: [tasks_by_uuid[au] for au in ancestor_uuids if au in tasks_by_uuid]
for u, ancestor_uuids in ancestor_uuids_by_uuid.items()}
descendants_by_uuid = {u: [tasks_by_uuid[du] for du in descendant_uuids if du in tasks_by_uuid]
for u, descendant_uuids in descendant_uuids_by_uuid.items()}
return FlameTaskGraph(tasks_by_uuid, ancestors_by_uuid, descendants_by_uuid)
def select_data_for_matches(task_uuid, task_queries, task_graph: FlameTaskGraph, match_descendant_criteria):
result_tasks_by_uuid = {}
always_select_fields = get_always_select_fields(task_queries)
for query in task_queries:
task = task_graph.tasks_by_uuid[task_uuid]
matches_criteria = task_matches_criteria(task, query['matchCriteria'])
select_paths = always_select_fields + query.get('selectPaths', [])
updates_by_uuid = {}
if matches_criteria:
updates_by_uuid[task_uuid] = select_from_task(select_paths, query.get('selectDescendants', []), task,
task_graph)
if match_descendant_criteria:
uuid, task_update = select_ancestor_of_task_descendant_match(task_uuid, query, select_paths, task_graph)
if uuid:
updates_by_uuid[uuid] = task_update
if updates_by_uuid:
result_tasks_by_uuid = deep_merge(result_tasks_by_uuid, updates_by_uuid)
return result_tasks_by_uuid
def _query_flame_tasks(task_uuids_to_query, task_queries, all_tasks_by_uuid, match_descendant_criteria):
if not _validate_task_queries(task_queries):
return {}
task_graph = _create_task_graph(all_tasks_by_uuid)
result_tasks_by_uuid = {}
for uuid in task_uuids_to_query:
selected_tasks_by_uuid = select_data_for_matches(uuid, task_queries, task_graph, match_descendant_criteria)
result_tasks_by_uuid = deep_merge(result_tasks_by_uuid, selected_tasks_by_uuid)
return result_tasks_by_uuid
def query_full_tasks(all_tasks_by_uuid, task_queries):
# When querying a full set of tasks, descendants will be included when their ancestors are matched.
return _query_flame_tasks(all_tasks_by_uuid.keys(), task_queries, all_tasks_by_uuid,
match_descendant_criteria=False)
def query_partial_tasks(task_uuids_to_query, task_queries, all_tasks_by_uuid):
# When querying a partial set of tasks, count descendants as matches to be included in the result.
return _query_flame_tasks(task_uuids_to_query, task_queries, all_tasks_by_uuid, match_descendant_criteria=True)
def get_dict_json_md5(query_config):
import hashlib
return hashlib.md5(json.dumps(query_config, sort_keys=True).encode('utf-8')).hexdigest()
|
py | 1a33d35cfde7c84c59de3d5695503b9aecb0f98c | import torch
from torch import nn
from utils.operator import gradient
def activation_name(activation: nn.Module) -> str:
if activation is nn.Tanh:
return "tanh"
elif activation is nn.ReLU or activation is nn.ELU or activation is nn.GELU:
return "relu"
elif activation is nn.SELU:
return "selu"
elif activation is nn.LeakyReLU:
return "leaky_relu"
elif activation is nn.Sigmoid:
return "sigmoid"
return "linear"
def linear_layer_with_init(width, height, init=nn.init.xavier_uniform_, activation=None) -> nn.Linear:
linear = nn.Linear(width, height)
if init is None or activation is None:
return linear
init(linear.weight, gain=nn.init.calculate_gain(activation_name(activation)))
return linear
class Base(nn.Module):
@torch.no_grad()
def predict(self, x):
return self(x)
@torch.no_grad()
def test(self, x, true_sdf):
sdf_predict = self(x)
return nn.MSELoss()(sdf_predict, true_sdf) # relative L2 norm of the error
def test_norm_gradient(self, x, true_norm_grad):
x.requires_grad_(True)
y = self(x)
norm_grad = torch.linalg.norm(gradient(y, x, create_graph=False), dim=1)
x.requires_grad_(False)
with torch.no_grad():
return nn.MSELoss()(norm_grad, true_norm_grad)
def test_residual(self, x):
x.requires_grad_(True)
y = self(x)
norm_grad = torch.linalg.norm(gradient(y, x, create_graph=False), dim=1)
x.requires_grad_(False)
with torch.no_grad():
return torch.mean((norm_grad - 1).abs())
def print_loss(self, verbose=False) -> None:
keys = [
"_loss",
"_loss_SDF",
"_loss_residual",
"_loss_residual_constraint",
"_loss_normal",
"_loss_cosine_similarity",
]
_loss_str = "Loss: "
for key in keys:
if hasattr(self, key):
_loss_str += f"{getattr(self, key):.6f} "
else:
_loss_str += "na "
if verbose:
print(_loss_str)
return _loss_str
# Physics-informed neural networks: A deep learning framework for solving forward and inverse problems involving nonlinear partial differential equations
# Raissi, Maziar, Paris Perdikaris, and George E. Karniadakis
class PINN(Base):
def loss_residual(self, p):
"""
Calculate residual from gradients, :attr:`p`
Args:
- :attr:`p`: tensor of gradient
Example:
```
y = model(x)
p = gradient(y, x)
model.loss_residual(p)
```
"""
norm_p = torch.linalg.norm(p, dim=1)
self._loss_residual = torch.mean((norm_p - 1) ** 2)
return self._loss_residual
def loss_residual_constraint(self, p):
"""
Calculate loss from gradient, :attr:`p`
`ReLU(norm(p) - 1)`
Args:
- :attr:`p`: tensor of gradient
Example:
```
y = model(x)
p = gradient(y, x)
model.loss_residual_constraint(p)
```
"""
norm_p = torch.linalg.norm(p, dim=1)
self._loss_residual_constraint = torch.mean(torch.nn.ReLU()(norm_p - 1))
return self._loss_residual_constraint
def loss_cosine_similarity(self, p, grad):
"""
Calculate loss from gradient of model (:attr:`p`) and training data (:attr:`grad`)
`torch.dot(p,grad)/(norm(p)*norm(grad))`
Args:
- :attr:`p`: tensor of gradient
- :attr:`grad`: tensor of target gradient
Example:
```
y = model(x)
p = gradient(y, x)
model.loss_cosine_similarity(p, grad)
```
"""
norm_p = torch.linalg.norm(p, dim=1)
norm_g = torch.linalg.norm(grad, dim=1)
self._loss_cosine_similarity = torch.mean(-torch.einsum("ij,ij->i", p, grad) / norm_p / norm_g)
return self._loss_cosine_similarity
def loss_SDF(self, y, sdf):
"""
Calculate loss from predicted SDF from model (:attr:`y`)
and SDF from training data (:attr:`sdf`)
`MSE(y, sdf)`
Args:
- :attr:`y`: predicted SDF
- :attr:`sdf`: target SDF
Example:
```
y = model(x)
model.loss_SDF(y, sdf)
```
"""
self._loss_SDF = torch.nn.MSELoss()(y, sdf)
return self._loss_SDF
def loss_normal(self, p, grad):
"""
Calculate loss from gradient of model (:attr:`p`) and training data (:attr:`grad`)
`MSE(p, (grad / norm(grad)))`
Args:
- :attr:`p`: predicted gradient
- :attr:`grad`: target gradient
Example:
```
y = model(x)
p = gradient(y, x)
model.loss_normal(p, grad)
```
"""
norm_grad = torch.linalg.norm(grad, dim=1)
normal = grad / norm_grad
self._loss_normal = torch.nn.MSELoss()(p, normal)
return self._loss_normal
|
py | 1a33d45f3b567928aa33e525fe19940d9911d137 | # GENERATED BY KOMAND SDK - DO NOT EDIT
from .action import RescanIndicator
|
bzl | 1a33d4fb7a7ac4eeab6c27c0e412442428e16f0a | """Some macros for building go test data."""
load("//testlib:expose_genfile.bzl", "expose_genfile")
load("@io_bazel_rules_go//go:def.bzl", "go_library")
load("@io_bazel_rules_go//proto:def.bzl", "go_proto_library")
def pb_go_proto_library(name, proto, genfile, visibility = None):
go_proto_library(
name = name,
proto = proto,
importpath = native.package_name() + "/" + name,
visibility = visibility,
)
native.filegroup(
name = name + "_src",
srcs = [":" + name],
output_group = "go_generated_srcs",
)
expose_genfile(
name = name + "_exposed_src",
genfile = genfile,
genfile_orig = name + "/" + genfile,
deps = [":" + name + "_src"],
)
def pb_go_library(**kwargs):
importpath = native.package_name() + "/" + kwargs["name"]
go_library(importpath = importpath, **kwargs)
def resources_package_name():
name = native.package_name()
if not name.endswith("/resources"):
name = name + "/resources"
return name
def resources_import_prefix():
return ""
|
py | 1a33d561002a6b407c4a0a41de782a01119d8199 | from datetime import date, datetime
from dateutil.parser import parse
from dateutil.relativedelta import relativedelta
from dotenv import load_dotenv
import json
import os
import requests
from requests.auth import HTTPBasicAuth
import signal
import sys
from webexteamsbot import TeamsBot
from webexteamsbot.models import Response
load_dotenv()
# Retrieve required details from environment variables
bot_email = os.getenv("PSA_BOT_EMAIL")
teams_token = os.getenv("PSA_BOT_TOKEN")
bot_url = os.getenv("PSA_BOT_URL")
bot_app_name = os.getenv("PSA_BOT_APP_NAME")
bot_scripts_api_user = os.getenv("PSA_BOT_SCRIPTS_API_USER")
bot_scripts_api_pass = os.getenv("PSA_BOT_SCRIPTS_API_PASS")
# If any of the bot environment variables are missing, terminate the app
if not bot_email or not teams_token or not bot_url or not bot_app_name:
print(
"sample.py - Missing Environment Variable. Please see the 'Usage'"
" section in the README."
)
if not bot_email:
print("TEAMS_BOT_EMAIL")
if not teams_token:
print("TEAMS_BOT_TOKEN")
if not bot_url:
print("TEAMS_BOT_URL")
if not bot_app_name:
print("TEAMS_BOT_APP_NAME")
sys.exit()
# Create a Bot Object
bot = TeamsBot(
bot_app_name,
teams_bot_token=teams_token,
teams_bot_url=bot_url,
teams_bot_email=bot_email,
debug=True,
webhook_resource_event=[
{"resource": "messages", "event": "created"},
{"resource": "attachmentActions", "event": "created"},
],
)
def create_message_with_attachment(rid, msgtxt, attachment):
headers = {
"content-type": "application/json; charset=utf-8",
"authorization": "Bearer " + teams_token,
}
url = "https://api.ciscospark.com/v1/messages"
data = {"roomId": rid, "attachments": [attachment], "markdown": msgtxt}
response = requests.post(url, json=data, headers=headers)
return response.json()
def get_attachment_actions(attachmentid):
headers = {
"content-type": "application/json; charset=utf-8",
"authorization": "Bearer " + teams_token,
}
url = "https://api.ciscospark.com/v1/attachment/actions/" + attachmentid
response = requests.get(url, headers=headers)
return response.json()
def create_outlook_meeting(reminder_info):
headers = {"Authorization": "Bearer " + o365_token}
url = 'https://graph.microsoft.com/v1.0/me/events'
if reminder_info["reminder_type"] == "days":
reminder_date = date.today() + relativedelta(days=+int(reminder_info["reminder_num"]))
elif reminder_info["reminder_type"] == "weeks":
reminder_date = date.today() + relativedelta(weeks=+int(reminder_info["reminder_num"]))
elif reminder_info["reminder_type"] == "months":
reminder_date = date.today() + relativedelta(months=+int(reminder_info["reminder_num"]))
payload = {}
payload["subject"] = reminder_info["account"] + ": " + reminder_info["purpose"]
startDateTime = reminder_date.strftime("%Y-%m-%d") + "T00:00:00.000"
payload["start"] = {"dateTime": startDateTime, "timeZone": "America/New_York"}
endDateTime = (reminder_date + relativedelta(days=+1)).strftime("%Y-%m-%d") + "T00:00:00.000"
payload["end"] = {"dateTime": endDateTime, "timeZone": "America/New_York"}
payload["isAllDay"] = True
payload["showAs"] = "free"
r = requests.post(url, json=payload, headers=headers)
return r.status_code
def handle_cards(api, incoming_msg):
m = get_attachment_actions(incoming_msg["data"]["id"])
card_type = m["inputs"]["card_type"]
if card_type == "add_reminder":
print("Reminder info sent: ")
print(m["inputs"])
status_code = create_outlook_meeting(m["inputs"])
print(status_code)
if status_code == 201:
return "Reminder scheduled successfully!"
else:
return "Error occurred during scheduling."
# Create a custom bot greeting function returned when no command is given.
# The default behavior of the bot is to return the '/help' command response
def greeting(incoming_msg):
# Loopkup details about sender
sender = bot.teams.people.get(incoming_msg.personId)
# Create a Response object and craft a reply in Markdown.
response = Response()
response.markdown = "Hello {}, I'm a chat bot. ".format(sender.firstName)
response.markdown += "See what I can do by asking for **/help**."
return response
def show_reminder_card(incoming_msg):
attachment = """
{
"contentType": "application/vnd.microsoft.card.adaptive",
"content": {
"type": "AdaptiveCard",
"$schema": "http://adaptivecards.io/schemas/adaptive-card.json",
"version": "1.2",
"body": [
{
"type": "ColumnSet",
"columns": [
{
"type": "Column",
"width": "stretch",
"items": [
{
"type": "TextBlock",
"size": "Large",
"text": "Add a Reminder"
}
]
}
]
},
{
"type": "Container",
"items": [
{
"type": "TextBlock",
"text": "Account",
"size": "Small"
},
{
"type": "Input.Text",
"placeholder": "Customer Name",
"id": "account"
}
]
},
{
"type": "Container",
"items": [
{
"type": "TextBlock",
"text": "Purpose",
"size": "Small"
},
{
"type": "Input.Text",
"placeholder": "DID/SO# - Description",
"id": "purpose"
}
]
},
{
"type": "Container",
"items": [
{
"type": "TextBlock",
"text": "Remind Me",
"size": "Small"
},
{
"type": "ColumnSet",
"columns": [
{
"type": "Column",
"width": 20,
"items": [
{
"type": "Input.Number",
"max": 999,
"min": 1,
"value": 30,
"id": "reminder_num"
}
]
},
{
"type": "Column",
"width": 30,
"items": [
{
"type": "Input.ChoiceSet",
"choices": [
{
"title": "Day(s)",
"value": "days"
},
{
"title": "Week(s)",
"value": "weeks"
},
{
"title": "Month(s)",
"value": "months"
}
],
"placeholder": "days",
"value": "days",
"id": "reminder_type"
}
]
},
{
"type": "Column",
"width": 40
}
]
}
]
},
{
"type": "Input.Text",
"isVisible": false,
"id": "card_type",
"value": "add_reminder"
},
{
"type": "ActionSet",
"actions": [
{
"type": "Action.Submit",
"title": "Submit"
}
]
}
]
}
}
"""
backupmessage = "This is an example using Adaptive Cards."
c = create_message_with_attachment(
incoming_msg.roomId, msgtxt=backupmessage, attachment=json.loads(attachment)
)
print(c)
return ""
def show_case_info_card(incoming_msg, case_info):
####################################################
# REDACTED - contact [email protected]
####################################################
def get_case_info(srid):
####################################################
# REDACTED - contact [email protected]
####################################################
def case_status(incoming_msg):
####################################################
# REDACTED - contact [email protected]
####################################################
# Set the bot greeting.
bot.set_greeting(greeting)
# Add commands
bot.add_command("attachmentActions", "*", handle_cards)
bot.add_command("/reminder", "Schedule a reminder", show_reminder_card)
bot.add_command("/status", "/status <SR#> to get the case status of that SR#", case_status)
# Every bot includes a default "/echo" command. You can remove it, or any
# other command with the remove_command(command) method.
bot.remove_command("/echo")
if __name__ == "__main__":
# Run Bot
bot.run(host="0.0.0.0", port=5000) |
py | 1a33d685bdef176de44744b8274c22452b4f258b | import numpy as np
import copy
from supervised.algorithms.registry import AlgorithmsRegistry
from supervised.algorithms.registry import BINARY_CLASSIFICATION
class HillClimbing:
"""
Example params are in JSON format:
{
"booster": ["gbtree", "gblinear"],
"objective": ["binary:logistic"],
"eval_metric": ["auc", "logloss"],
"eta": [0.0025, 0.005, 0.0075, 0.01, 0.025, 0.05, 0.075, 0.1]
}
"""
@staticmethod
def get(params, ml_task, seed=1):
np.random.seed(seed)
keys = list(params.keys())
if "num_class" in keys:
keys.remove("num_class")
keys.remove("model_type")
keys.remove("seed")
keys.remove("ml_task")
model_type = params["model_type"]
if model_type == "Baseline":
return [None, None]
model_info = AlgorithmsRegistry.registry[ml_task][model_type]
model_params = model_info["params"]
permuted_keys = np.random.permutation(keys)
key_to_update = None
for key_to_update in permuted_keys:
values = model_params[key_to_update]
if len(values) > 1:
break
left, right = None, None
for i, v in enumerate(values):
if v == params[key_to_update]:
if i + 1 < len(values):
right = values[i + 1]
if i - 1 >= 0:
left = values[i - 1]
params_1, params_2 = None, None
if left is not None:
params_1 = copy.deepcopy(params)
params_1[key_to_update] = left
if right is not None:
params_2 = copy.deepcopy(params)
params_2[key_to_update] = right
return [params_1, params_2]
|
py | 1a33d6c9870c903178338f2ce2c2f57f83ddecba | from setuptools import find_packages, setup
setup(
name='the_keyspy',
packages=find_packages(include=('the_keyspy*',)),
version='0.0.9',
description='The Keys Api',
author='Kevin Bonnoron',
author_email='[email protected]',
url='https://github.com/KevinBonnoron',
download_url='https://github.com/KevinBonnoron/the_keyspy/archive/refs/tags/v0.0.9.tar.gz',
license='MIT',
install_requires=['dataclasses_json', 'requests'],
setup_requires=['pytest-runner'],
tests_require=['pytest==6.2.4'],
test_suite='tests',
)
|
py | 1a33d9f2ab26bf45f649cfa93ef339981de91e76 | from __future__ import absolute_import
# Copyright (c) 2010-2017 openpyxl
# Simplified implementation of headers and footers: let worksheets have separate items
import re
from warnings import warn
from openpyxl.descriptors import (
Alias,
Bool,
Strict,
String,
Integer,
MatchPattern,
Typed,
)
from openpyxl.descriptors.serialisable import Serialisable
from openpyxl.compat import unicode
from openpyxl.xml.functions import Element
from openpyxl.utils.escape import escape, unescape
FONT_PATTERN = '&"(?P<font>.+)"'
COLOR_PATTERN = "&K(?P<color>[A-F0-9]{6})"
SIZE_REGEX = r"&(?P<size>\d+\s?)"
FORMAT_REGEX = re.compile("{0}|{1}|{2}".format(FONT_PATTERN, COLOR_PATTERN,
SIZE_REGEX)
)
def _split_string(text):
"""
Split the combined (decoded) string into left, center and right parts
# See http://stackoverflow.com/questions/27711175/regex-with-multiple-optional-groups for discussion
"""
ITEM_REGEX = re.compile("""
(&L(?P<left>.+?))?
(&C(?P<center>.+?))?
(&R(?P<right>.+?))?
$""", re.VERBOSE | re.DOTALL)
m = ITEM_REGEX.match(text)
try:
parts = m.groupdict()
except AttributeError:
warn("""Cannot parse header or footer so it will be ignored""")
parts = {'left':'', 'right':'', 'center':''}
return parts
class _HeaderFooterPart(Strict):
"""
Individual left/center/right header/footer part
Do not use directly.
Header & Footer ampersand codes:
* &A Inserts the worksheet name
* &B Toggles bold
* &D or &[Date] Inserts the current date
* &E Toggles double-underline
* &F or &[File] Inserts the workbook name
* &I Toggles italic
* &N or &[Pages] Inserts the total page count
* &S Toggles strikethrough
* &T Inserts the current time
* &[Tab] Inserts the worksheet name
* &U Toggles underline
* &X Toggles superscript
* &Y Toggles subscript
* &P or &[Page] Inserts the current page number
* &P+n Inserts the page number incremented by n
* &P-n Inserts the page number decremented by n
* &[Path] Inserts the workbook path
* && Escapes the ampersand character
* &"fontname" Selects the named font
* &nn Selects the specified 2-digit font point size
Colours are in RGB Hex
"""
text = String(allow_none=True)
font = String(allow_none=True)
size = Integer(allow_none=True)
RGB = ("^[A-Fa-f0-9]{6}$")
color = MatchPattern(allow_none=True, pattern=RGB)
def __init__(self, text=None, font=None, size=None, color=None):
self.text = text
self.font = font
self.size = size
self.color = color
def __str__(self):
"""
Convert to Excel HeaderFooter miniformat minus position
"""
fmt = []
if self.font:
fmt.append(u'&"{0}"'.format(self.font))
if self.size:
fmt.append("&{0} ".format(self.size))
if self.color:
fmt.append("&K{0}".format(self.color))
return u"".join(fmt + [self.text])
def __bool__(self):
return bool(self.text)
__nonzero__ = __bool__
@classmethod
def from_str(cls, text):
"""
Convert from miniformat to object
"""
keys = ('font', 'color', 'size')
kw = dict((k, v) for match in FORMAT_REGEX.findall(text)
for k, v in zip(keys, match) if v)
kw['text'] = FORMAT_REGEX.sub('', text)
return cls(**kw)
class HeaderFooterItem(Strict):
"""
Header or footer item
"""
left = Typed(expected_type=_HeaderFooterPart)
center = Typed(expected_type=_HeaderFooterPart)
centre = Alias("center")
right = Typed(expected_type=_HeaderFooterPart)
__keys = ('L', 'C', 'R')
def __init__(self, left=None, right=None, center=None):
if left is None:
left = _HeaderFooterPart()
self.left = left
if center is None:
center = _HeaderFooterPart()
self.center = center
if right is None:
right = _HeaderFooterPart()
self.right = right
def __str__(self):
"""
Pack parts into a single string
"""
TRANSFORM = {'&[Tab]': '&A', '&[Pages]': '&N', '&[Date]': '&D',
'&[Path]': '&Z', '&[Page]': '&P', '&[Time]': '&T', '&[File]': '&F',
'&[Picture]': '&G'}
# escape keys and create regex
SUBS_REGEX = re.compile("|".join(["({0})".format(re.escape(k))
for k in TRANSFORM]))
def replace(match):
"""
Callback for re.sub
Replace expanded control with mini-format equivalent
"""
sub = match.group(0)
return TRANSFORM[sub]
txt = []
for key, part in zip(
self.__keys, [self.left, self.center, self.right]):
if part.text is not None:
txt.append(u"&{0}{1}".format(key, unicode(part)))
txt = "".join(txt)
txt = SUBS_REGEX.sub(replace, txt)
return escape(txt)
def __bool__(self):
return any([self.left, self.center, self.right])
__nonzero__ = __bool__
def to_tree(self, tagname):
"""
Return as XML node
"""
el = Element(tagname)
el.text = unicode(self)
return el
@classmethod
def from_tree(cls, node):
if node.text:
text = unescape(node.text)
parts = _split_string(text)
for k, v in parts.items():
if v is not None:
parts[k] = _HeaderFooterPart.from_str(v)
self = cls(**parts)
return self
class HeaderFooter(Serialisable):
tagname = "headerFooter"
differentOddEven = Bool(allow_none=True)
differentFirst = Bool(allow_none=True)
scaleWithDoc = Bool(allow_none=True)
alignWithMargins = Bool(allow_none=True)
oddHeader = Typed(expected_type=HeaderFooterItem, allow_none=True)
oddFooter = Typed(expected_type=HeaderFooterItem, allow_none=True)
evenHeader = Typed(expected_type=HeaderFooterItem, allow_none=True)
evenFooter = Typed(expected_type=HeaderFooterItem, allow_none=True)
firstHeader = Typed(expected_type=HeaderFooterItem, allow_none=True)
firstFooter = Typed(expected_type=HeaderFooterItem, allow_none=True)
__elements__ = ("oddHeader", "oddFooter", "evenHeader", "evenFooter", "firstHeader", "firstFooter")
def __init__(self,
differentOddEven=None,
differentFirst=None,
scaleWithDoc=None,
alignWithMargins=None,
oddHeader=None,
oddFooter=None,
evenHeader=None,
evenFooter=None,
firstHeader=None,
firstFooter=None,
):
self.differentOddEven = differentOddEven
self.differentFirst = differentFirst
self.scaleWithDoc = scaleWithDoc
self.alignWithMargins = alignWithMargins
if oddHeader is None:
oddHeader = HeaderFooterItem()
self.oddHeader = oddHeader
if oddFooter is None:
oddFooter = HeaderFooterItem()
self.oddFooter = oddFooter
if evenHeader is None:
evenHeader = HeaderFooterItem()
self.evenHeader = evenHeader
if evenFooter is None:
evenFooter = HeaderFooterItem()
self.evenFooter = evenFooter
if firstHeader is None:
firstHeader = HeaderFooterItem()
self.firstHeader = firstHeader
if firstFooter is None:
firstFooter = HeaderFooterItem()
self.firstFooter = firstFooter
def __bool__(self):
parts = [getattr(self, attr) for attr in self.__attrs__ + self.__elements__]
return any(parts)
__nonzero__ = __bool__
|
py | 1a33da13d072947b36cbe55cc5105059b836bcd0 | import os
from django.core.management.base import BaseCommand, CommandError
from feeds.utils import import_feed
from feeds.models import Source
class Command(BaseCommand):
help = 'Imports an RSS feed file'
def add_arguments(self, parser):
parser.add_argument('source', default='')
parser.add_argument('feed', default='')
def handle(self, source, feed, *args, **options):
source = Source.objects.get(id=int(source))
feed_path = feed
assert os.path.isfile(feed_path)
with open(feed_path, 'r') as fin:
feed_body = fin.read()
import_feed(source, feed_body=feed_body, content_type="xml", output=self.stdout)
self.stdout.write(self.style.SUCCESS('Finished'))
|
py | 1a33dc8a157b5911d6041010e317f5280e6e48ad | # -*- coding: utf-8 -*
# 获得爬取Disasters Accidents推文所需信息
import json
from Config_Disasters_Accidents_od import get_noau_config
from datetime import datetime, timedelta
_, db, r = get_noau_config() # 数据库配置
def get_date(date):
# 获取日期集合
dates = date.split('\n')
date_list = []
for i in dates:
if not len(i.strip()) == 0:
date_list.append(i.strip())
return list(set(date_list))
def get_location(location, gpe):
# 获取地点集合
locations = location.split('\n')
gpes = gpe.split('\n')
location_list = []
for i in locations:
if not len(i.strip()) == 0:
location_list.append(i.strip())
for j in gpes:
if not len(j.strip()) == 0:
if j.strip not in location_list:
location_list.append(j.strip())
return list(set(location_list))
def get_gpe(gpe):
# 获取GPE集合
gpes = gpe.split('\n')
gpe_list = []
for i in gpes:
if not len(i.strip()) == 0:
gpe_list.append(i.strip())
return list(set(gpe_list))
def get_person(person):
# 获取person集合
persons = person.split('\n')
person_list = []
for i in persons:
if not len(i.strip()) == 0:
person_list.append(i.strip())
return list(set(person_list))
def get_triggers(trigger):
# 获取事件触发词集合
triggers = trigger.split('\n')
trigger_list = []
for i in triggers:
if not len(i.strip()) == 0:
trigger_list.append(i.strip())
return list(set(trigger_list))
def get_query_str(event):
# 获取Twitter查询信息
trigger = get_triggers(event['event']['trigger'])
date = event['event']['date']
date = date.strip()
temp = datetime.strptime(date, "%Y-%m-%d")
date_since = (temp - timedelta(days=7)).strftime('%Y-%m-%d')
date_until = (temp + timedelta(days=7)).strftime('%Y-%m-%d')
# 注意查询格式必须形如(xxx OR xxx) (xxx OR xxx) since:xxxx-xx-xx until:xxxx-xx-xx # 暂时不加地点
return '(' + ' OR '.join(trigger) + ')' + 'since:' + date_since + ' ' + 'until:' + date_until
def get_task():
events = db.event_list.find({})
for event in events:
q = get_query_str(event)
message = {'q': q, 'f': ['&f=news', '', '&f=tweets'], 'num': 10000, 'event_id': event['_id']}
print(message)
# 把获取推文所需信息放入Redis数据库
r.rpush('Disasters_Accidents_od', json.dumps(message))
print('master_Disasters_Accidents_od done!')
if __name__ == '__main__':
get_task()
|
py | 1a33de33d8f47c54a0321055104641bb3de002b3 | from logging import getLogger
from typing import Any, Dict, List, NamedTuple, Optional, Set, Tuple
from django.db import IntegrityError
from django.utils import timezone
from eth_account import Account
from packaging.version import Version
from redis import Redis
from web3.exceptions import BadFunctionCallOutput
from gnosis.eth import EthereumClient, EthereumClientProvider
from gnosis.eth.constants import NULL_ADDRESS
from gnosis.safe import ProxyFactory, Safe
from gnosis.safe.exceptions import InvalidMultisigTx, SafeServiceException
from gnosis.safe.signatures import signatures_to_bytes
from safe_relay_service.gas_station.gas_station import (GasStation,
GasStationProvider)
from safe_relay_service.tokens.models import Token
from safe_relay_service.tokens.price_oracles import CannotGetTokenPriceFromApi
from ..models import (BannedSigner, EthereumBlock, EthereumTx, SafeContract,
SafeMultisigTx)
from ..repositories.redis_repository import EthereumNonceLock, RedisRepository
logger = getLogger(__name__)
class TransactionServiceException(Exception):
pass
class SafeDoesNotExist(TransactionServiceException):
pass
class RefundMustBeEnabled(TransactionServiceException):
pass
class InvalidGasToken(TransactionServiceException):
pass
class SignaturesNotFound(TransactionServiceException):
pass
class SignaturesNotSorted(TransactionServiceException):
pass
class SafeMultisigTxExists(TransactionServiceException):
pass
class NotEnoughFundsForMultisigTx(TransactionServiceException):
pass
class InvalidOwners(TransactionServiceException):
pass
class InvalidMasterCopyAddress(TransactionServiceException):
pass
class InvalidProxyContract(TransactionServiceException):
pass
class InvalidRefundReceiver(TransactionServiceException):
pass
class InvalidGasEstimation(TransactionServiceException):
pass
class GasPriceTooLow(TransactionServiceException):
pass
class SignerIsBanned(TransactionServiceException):
pass
class TransactionEstimationWithNonce(NamedTuple):
safe_tx_gas: int
base_gas: int # For old versions it will equal to `data_gas`
data_gas: int # DEPRECATED
operational_gas: int # DEPRECATED
gas_price: int
gas_token: str
last_used_nonce: int
refund_receiver: str
class TransactionGasTokenEstimation(NamedTuple):
base_gas: int # For old versions it will equal to `data_gas`
gas_price: int
gas_token: str
class TransactionEstimationWithNonceAndGasTokens(NamedTuple):
last_used_nonce: int
safe_tx_gas: int
operational_gas: int # DEPRECATED
estimations: List[TransactionGasTokenEstimation]
class TransactionServiceProvider:
def __new__(cls):
if not hasattr(cls, 'instance'):
from django.conf import settings
cls.instance = TransactionService(GasStationProvider(),
EthereumClientProvider(),
RedisRepository().redis,
settings.SAFE_VALID_CONTRACT_ADDRESSES,
settings.SAFE_PROXY_FACTORY_ADDRESS,
settings.SAFE_TX_SENDER_PRIVATE_KEY)
return cls.instance
@classmethod
def del_singleton(cls):
if hasattr(cls, "instance"):
del cls.instance
class TransactionService:
def __init__(self, gas_station: GasStation, ethereum_client: EthereumClient, redis: Redis,
safe_valid_contract_addresses: Set[str], proxy_factory_address: str, tx_sender_private_key: str):
self.gas_station = gas_station
self.ethereum_client = ethereum_client
self.redis = redis
self.safe_valid_contract_addresses = safe_valid_contract_addresses
self.proxy_factory = ProxyFactory(proxy_factory_address, self.ethereum_client)
self.tx_sender_account = Account.from_key(tx_sender_private_key)
def _check_refund_receiver(self, refund_receiver: str) -> bool:
"""
Support tx.origin or relay tx sender as refund receiver.
This would prevent that anybody can front-run our service
:param refund_receiver: Payment refund receiver as Ethereum checksummed address
:return: True if refund_receiver is ok, False otherwise
"""
return refund_receiver in (NULL_ADDRESS, self.tx_sender_account.address)
@staticmethod
def _is_valid_gas_token(address: Optional[str]) -> float:
"""
:param address: Token address
:return: bool if gas token, false otherwise
"""
address = address or NULL_ADDRESS
if address == NULL_ADDRESS:
return True
try:
Token.objects.get(address=address, gas=True)
return True
except Token.DoesNotExist:
logger.warning('Cannot retrieve gas token from db: Gas token %s not valid', address)
return False
def _check_safe_gas_price(self, gas_token: Optional[str], safe_gas_price: int) -> bool:
"""
Check that `safe_gas_price` is not too low, so that the relay gets a full refund
for the tx. Gas_price must be always > 0, if not refunding would be disabled
If a `gas_token` is used we need to calculate the `gas_price` in Eth
Gas price must be at least >= _minimum_gas_price_ > 0
:param gas_token: Address of token is used, `NULL_ADDRESS` or `None` if it's ETH
:return:
:exception GasPriceTooLow
:exception InvalidGasToken
"""
if safe_gas_price < 1:
raise RefundMustBeEnabled('Tx internal gas price cannot be 0 or less, it was %d' % safe_gas_price)
minimum_accepted_gas_price = self._get_minimum_gas_price()
estimated_gas_price = self._estimate_tx_gas_price(minimum_accepted_gas_price, gas_token)
if safe_gas_price < estimated_gas_price:
raise GasPriceTooLow('Required gas-price>=%d with gas-token=%s' % (estimated_gas_price, gas_token))
return True
def _estimate_tx_gas_price(self, base_gas_price: int, gas_token: Optional[str] = None) -> int:
if gas_token and gas_token != NULL_ADDRESS:
try:
gas_token_model = Token.objects.get(address=gas_token, gas=True)
estimated_gas_price = gas_token_model.calculate_gas_price(base_gas_price)
except Token.DoesNotExist:
raise InvalidGasToken('Gas token %s not found' % gas_token)
else:
estimated_gas_price = base_gas_price
# FIXME Remove 2 / 3, workaround to prevent frontrunning
return int(estimated_gas_price * 2 / 3)
def _get_configured_gas_price(self) -> int:
"""
:return: Gas price for txs
"""
return self.gas_station.get_gas_prices().fast
def _get_minimum_gas_price(self) -> int:
"""
:return: Minimum gas price accepted for txs set by the user
"""
return self.gas_station.get_gas_prices().standard
def get_last_used_nonce(self, safe_address: str) -> Optional[int]:
safe = Safe(safe_address, self.ethereum_client)
last_used_nonce = SafeMultisigTx.objects.get_last_nonce_for_safe(safe_address)
last_used_nonce = last_used_nonce if last_used_nonce is not None else -1
try:
blockchain_nonce = safe.retrieve_nonce()
last_used_nonce = max(last_used_nonce, blockchain_nonce - 1)
if last_used_nonce < 0: # There's no last_used_nonce
last_used_nonce = None
return last_used_nonce
except BadFunctionCallOutput: # If Safe does not exist
raise SafeDoesNotExist(f'Safe={safe_address} does not exist')
def estimate_tx(self, safe_address: str, to: str, value: int, data: bytes, operation: int,
gas_token: Optional[str]) -> TransactionEstimationWithNonce:
"""
:return: TransactionEstimation with costs using the provided gas token and last used nonce of the Safe
:raises: InvalidGasToken: If Gas Token is not valid
"""
if not self._is_valid_gas_token(gas_token):
raise InvalidGasToken(gas_token)
last_used_nonce = self.get_last_used_nonce(safe_address)
safe = Safe(safe_address, self.ethereum_client)
safe_tx_gas = safe.estimate_tx_gas(to, value, data, operation)
safe_tx_base_gas = safe.estimate_tx_base_gas(to, value, data, operation, gas_token, safe_tx_gas)
# For Safe contracts v1.0.0 operational gas is not used (`base_gas` has all the related costs already)
safe_version = safe.retrieve_version()
if Version(safe_version) >= Version('1.0.0'):
safe_tx_operational_gas = 0
else:
safe_tx_operational_gas = safe.estimate_tx_operational_gas(len(data) if data else 0)
# Can throw RelayServiceException
gas_price = self._estimate_tx_gas_price(self._get_configured_gas_price(), gas_token)
return TransactionEstimationWithNonce(safe_tx_gas, safe_tx_base_gas, safe_tx_base_gas, safe_tx_operational_gas,
gas_price, gas_token or NULL_ADDRESS, last_used_nonce,
self.tx_sender_account.address)
def estimate_tx_for_all_tokens(self, safe_address: str, to: str, value: int, data: bytes,
operation: int) -> TransactionEstimationWithNonceAndGasTokens:
"""
:return: TransactionEstimation with costs using ether and every gas token supported by the service,
with the last used nonce of the Safe
:raises: InvalidGasToken: If Gas Token is not valid
"""
safe = Safe(safe_address, self.ethereum_client)
last_used_nonce = self.get_last_used_nonce(safe_address)
safe_tx_gas = safe.estimate_tx_gas(to, value, data, operation)
safe_version = safe.retrieve_version()
if Version(safe_version) >= Version('1.0.0'):
safe_tx_operational_gas = 0
else:
safe_tx_operational_gas = safe.estimate_tx_operational_gas(len(data) if data else 0)
# Calculate `base_gas` for ether and calculate for tokens using the ether token price
ether_safe_tx_base_gas = safe.estimate_tx_base_gas(to, value, data, operation, NULL_ADDRESS, safe_tx_gas)
base_gas_price = self._get_configured_gas_price()
gas_price = self._estimate_tx_gas_price(base_gas_price, NULL_ADDRESS)
gas_token_estimations = [TransactionGasTokenEstimation(ether_safe_tx_base_gas, gas_price, NULL_ADDRESS)]
token_gas_difference = 50000 # 50K gas more expensive than ether
for token in Token.objects.gas_tokens():
try:
gas_price = self._estimate_tx_gas_price(base_gas_price, token.address)
gas_token_estimations.append(
TransactionGasTokenEstimation(ether_safe_tx_base_gas + token_gas_difference,
gas_price, token.address)
)
except CannotGetTokenPriceFromApi:
logger.error('Cannot get price for token=%s', token.address)
return TransactionEstimationWithNonceAndGasTokens(last_used_nonce, safe_tx_gas, safe_tx_operational_gas,
gas_token_estimations)
def create_multisig_tx(self,
safe_address: str,
to: str,
value: int,
data: bytes,
operation: int,
safe_tx_gas: int,
base_gas: int,
gas_price: int,
gas_token: str,
refund_receiver: str,
safe_nonce: int,
signatures: List[Dict[str, int]]) -> SafeMultisigTx:
"""
:return: Database model of SafeMultisigTx
:raises: SafeMultisigTxExists: If Safe Multisig Tx with nonce already exists
:raises: InvalidGasToken: If Gas Token is not valid
:raises: TransactionServiceException: If Safe Tx is not valid (not sorted owners, bad signature, bad nonce...)
"""
safe_contract, _ = SafeContract.objects.get_or_create(address=safe_address,
defaults={'master_copy': NULL_ADDRESS})
created = timezone.now()
if SafeMultisigTx.objects.not_failed().filter(safe=safe_contract, nonce=safe_nonce).exists():
raise SafeMultisigTxExists(f'Tx with safe-nonce={safe_nonce} for safe={safe_address} already exists in DB')
signature_pairs = [(s['v'], s['r'], s['s']) for s in signatures]
signatures_packed = signatures_to_bytes(signature_pairs)
try:
tx_hash, safe_tx_hash, tx = self._send_multisig_tx(
safe_address,
to,
value,
data,
operation,
safe_tx_gas,
base_gas,
gas_price,
gas_token,
refund_receiver,
safe_nonce,
signatures_packed
)
except SafeServiceException as exc:
raise TransactionServiceException(str(exc)) from exc
ethereum_tx = EthereumTx.objects.create_from_tx_dict(tx, tx_hash)
try:
return SafeMultisigTx.objects.create(
created=created,
safe=safe_contract,
ethereum_tx=ethereum_tx,
to=to,
value=value,
data=data,
operation=operation,
safe_tx_gas=safe_tx_gas,
data_gas=base_gas,
gas_price=gas_price,
gas_token=None if gas_token == NULL_ADDRESS else gas_token,
refund_receiver=refund_receiver,
nonce=safe_nonce,
signatures=signatures_packed,
safe_tx_hash=safe_tx_hash,
)
except IntegrityError as exc:
raise SafeMultisigTxExists(f'Tx with safe_tx_hash={safe_tx_hash.hex()} already exists in DB') from exc
def _send_multisig_tx(self,
safe_address: str,
to: str,
value: int,
data: bytes,
operation: int,
safe_tx_gas: int,
base_gas: int,
gas_price: int,
gas_token: str,
refund_receiver: str,
safe_nonce: int,
signatures: bytes,
block_identifier='latest') -> Tuple[bytes, bytes, Dict[str, Any]]:
"""
This function calls the `send_multisig_tx` of the Safe, but has some limitations to prevent abusing
the relay
:return: Tuple(tx_hash, safe_tx_hash, tx)
:raises: InvalidMultisigTx: If user tx cannot go through the Safe
"""
safe = Safe(safe_address, self.ethereum_client)
data = data or b''
gas_token = gas_token or NULL_ADDRESS
refund_receiver = refund_receiver or NULL_ADDRESS
to = to or NULL_ADDRESS
# Make sure refund receiver is set to 0x0 so that the contract refunds the gas costs to tx.origin
if not self._check_refund_receiver(refund_receiver):
raise InvalidRefundReceiver(refund_receiver)
self._check_safe_gas_price(gas_token, gas_price)
# Make sure proxy contract is ours
if not self.proxy_factory.check_proxy_code(safe_address):
raise InvalidProxyContract(safe_address)
# Make sure master copy is valid
safe_master_copy_address = safe.retrieve_master_copy_address()
if safe_master_copy_address not in self.safe_valid_contract_addresses:
raise InvalidMasterCopyAddress(safe_master_copy_address)
# Check enough funds to pay for the gas
if not safe.check_funds_for_tx_gas(safe_tx_gas, base_gas, gas_price, gas_token):
raise NotEnoughFundsForMultisigTx
threshold = safe.retrieve_threshold()
number_signatures = len(signatures) // 65 # One signature = 65 bytes
if number_signatures < threshold:
raise SignaturesNotFound('Need at least %d signatures' % threshold)
safe_tx_gas_estimation = safe.estimate_tx_gas(to, value, data, operation)
safe_base_gas_estimation = safe.estimate_tx_base_gas(to, value, data, operation, gas_token,
safe_tx_gas_estimation)
if safe_tx_gas < safe_tx_gas_estimation or base_gas < safe_base_gas_estimation:
raise InvalidGasEstimation("Gas should be at least equal to safe-tx-gas=%d and base-gas=%d. Current is "
"safe-tx-gas=%d and base-gas=%d" %
(safe_tx_gas_estimation, safe_base_gas_estimation, safe_tx_gas, base_gas))
# We use fast tx gas price, if not txs could be stuck
tx_gas_price = self._get_configured_gas_price()
tx_sender_private_key = self.tx_sender_account.key
tx_sender_address = Account.from_key(tx_sender_private_key).address
safe_tx = safe.build_multisig_tx(
to,
value,
data,
operation,
safe_tx_gas,
base_gas,
gas_price,
gas_token,
refund_receiver,
signatures,
safe_nonce=safe_nonce,
safe_version=safe.retrieve_version()
)
owners = safe.retrieve_owners()
signers = safe_tx.signers
if set(signers) - set(owners): # All the signers must be owners
raise InvalidOwners('Signers=%s are not valid owners of the safe. Owners=%s', safe_tx.signers, owners)
if signers != safe_tx.sorted_signers:
raise SignaturesNotSorted('Safe-tx-hash=%s - Signatures are not sorted by owner: %s' %
(safe_tx.safe_tx_hash.hex(), safe_tx.signers))
if banned_signers := BannedSigner.objects.filter(address__in=signers):
raise SignerIsBanned(f'Signers {list(banned_signers)} are banned')
logger.info('Safe=%s safe-nonce=%d Check `call()` before sending transaction', safe_address, safe_nonce)
# Set `gasLimit` for `call()`. It will use the same that it will be used later for execution
tx_gas = safe_tx.recommended_gas()
safe_tx.call(tx_sender_address=tx_sender_address, tx_gas=tx_gas, block_identifier=block_identifier)
with EthereumNonceLock(self.redis, self.ethereum_client, self.tx_sender_account.address,
lock_timeout=60 * 2) as tx_nonce:
logger.info('Safe=%s safe-nonce=%d `call()` was successful', safe_address, safe_nonce)
tx_hash, tx = safe_tx.execute(tx_sender_private_key, tx_gas=tx_gas, tx_gas_price=tx_gas_price,
tx_nonce=tx_nonce, block_identifier=block_identifier)
logger.info('Safe=%s, Sent transaction with nonce=%d tx-hash=%s for safe-tx-hash=%s safe-nonce=%d',
safe_address, tx_nonce, tx_hash.hex(), safe_tx.safe_tx_hash.hex(), safe_tx.safe_nonce)
return tx_hash, safe_tx.safe_tx_hash, tx
def resend(self, gas_price: int, multisig_tx: SafeMultisigTx) -> Optional[EthereumTx]:
"""
Resend transaction with `gas_price` if it's higher or equal than transaction gas price. Setting equal
`gas_price` is allowed as sometimes a transaction can be out of the mempool but `gas_price` does not need
to be increased when resending
:param gas_price: New gas price for the transaction. Must be >= old gas price
:param multisig_tx: Multisig Tx not mined to be sent again
:return: If a new transaction is sent is returned, `None` if not
"""
assert multisig_tx.ethereum_tx.block_id is None, 'Block is present!'
transaction_receipt = self.ethereum_client.get_transaction_receipt(multisig_tx.ethereum_tx_id)
if transaction_receipt and transaction_receipt['blockNumber']:
logger.info(
'%s tx was already mined on block %d',
multisig_tx.ethereum_tx_id, transaction_receipt['blockNumber']
)
return None
if multisig_tx.ethereum_tx.gas_price > gas_price:
logger.info(
'%s tx gas price is %d > %d. Tx should be mined soon',
multisig_tx.ethereum_tx_id, multisig_tx.ethereum_tx.gas_price, gas_price
)
return None
safe = Safe(multisig_tx.safe_id, self.ethereum_client)
try:
safe_nonce = safe.retrieve_nonce()
if safe_nonce > multisig_tx.nonce:
logger.info(
'%s tx safe nonce is %d and current safe nonce is %d. Transaction is not valid anymore. Deleting',
multisig_tx.ethereum_tx_id, multisig_tx.nonce, safe_nonce
)
multisig_tx.delete() # Transaction is not valid anymore
return None
except (ValueError, BadFunctionCallOutput):
logger.error('Something is wrong with Safe %s, cannot retrieve nonce', multisig_tx.safe_id,
exc_info=True)
return None
logger.info(
'%s tx gas price was %d. Resending with new gas price %d',
multisig_tx.ethereum_tx_id, multisig_tx.ethereum_tx.gas_price, gas_price
)
safe_tx = multisig_tx.get_safe_tx(self.ethereum_client)
tx_gas = safe_tx.recommended_gas()
try:
tx_hash, tx = safe_tx.execute(self.tx_sender_account.key, tx_gas=tx_gas, tx_gas_price=gas_price,
tx_nonce=multisig_tx.ethereum_tx.nonce)
except ValueError as exc:
if exc.args and isinstance(exc.args[0], dict) and 'nonce' in exc.args[0].get('message', ''):
# ValueError({'code': -32010, 'message': 'Transaction nonce is too low. Try incrementing the nonce.'})
try:
# Check that transaction is still valid
safe_tx.call(tx_sender_address=self.tx_sender_account.address, tx_gas=tx_gas)
except InvalidMultisigTx:
# Maybe there's a transaction with a lower nonce that must be mined before
# It doesn't matter, as soon as a transaction with a newer nonce is added it will be deleted
return None
# Send transaction again with a new nonce
with EthereumNonceLock(self.redis, self.ethereum_client, self.tx_sender_account.address,
lock_timeout=60 * 2) as tx_nonce:
tx_hash, tx = safe_tx.execute(self.tx_sender_account.key, tx_gas=tx_gas, tx_gas_price=gas_price,
tx_nonce=tx_nonce)
else:
logger.error('Problem resending transaction', exc_info=True)
return None
multisig_tx.ethereum_tx = EthereumTx.objects.create_from_tx_dict(tx, tx_hash)
multisig_tx.full_clean(validate_unique=False)
multisig_tx.save(update_fields=['ethereum_tx'])
return multisig_tx.ethereum_tx
# TODO Refactor and test
def create_or_update_ethereum_tx(self, tx_hash: str) -> Optional[EthereumTx]:
try:
ethereum_tx = EthereumTx.objects.get(tx_hash=tx_hash)
if ethereum_tx.block is None:
tx_receipt = self.ethereum_client.get_transaction_receipt(tx_hash)
if tx_receipt:
ethereum_tx.block = self.get_or_create_ethereum_block(tx_receipt.blockNumber)
ethereum_tx.gas_used = tx_receipt['gasUsed']
ethereum_tx.status = tx_receipt.get('status')
ethereum_tx.transaction_index = tx_receipt['transactionIndex']
ethereum_tx.save(update_fields=['block', 'gas_used', 'status', 'transaction_index'])
return ethereum_tx
except EthereumTx.DoesNotExist:
tx = self.ethereum_client.get_transaction(tx_hash)
tx_receipt = self.ethereum_client.get_transaction_receipt(tx_hash)
if tx:
if tx_receipt:
ethereum_block = self.get_or_create_ethereum_block(tx_receipt.blockNumber)
return EthereumTx.objects.create_from_tx_dict(tx, tx_hash,
tx_receipt=tx_receipt.gasUsed,
ethereum_block=ethereum_block)
return EthereumTx.objects.create_from_tx_dict(tx, tx_hash)
# TODO Refactor and test
def get_or_create_ethereum_block(self, block_number: int):
try:
return EthereumBlock.objects.get(number=block_number)
except EthereumBlock.DoesNotExist:
block = self.ethereum_client.get_block(block_number)
return EthereumBlock.objects.create_from_block(block)
|
py | 1a33df8de301d9047b3a0a635b341282828d1415 | #!/usr/bin/env python3
import json
from pathlib import Path
from urllib.parse import urlparse
from tests.mocks.categories import CATEGORIES
from tests.mocks.kit_info import KIT_INFO
from tests.mocks.kit_sha1 import KIT_SHA1
file_path = Path(__file__).resolve()
api_mocks = file_path.parent.joinpath("apis")
ebuilds_mocks = file_path.parent.joinpath("ebuilds")
async def stub_get_page(uri, session, **kwargs):
o = urlparse(uri)
if o.path.endswith("kit-info.json"):
return json.dumps(KIT_INFO)
if o.path.endswith("kit-sha1.json"):
return json.dumps(KIT_SHA1)
if o.path.endswith("categories"):
return "\n".join(CATEGORIES)
if o.path.endswith("firefox-72.0.2.ebuild"):
with open(ebuilds_mocks / "firefox-72.0.2.ebuild") as f:
result = f.read()
return result
if o.netloc == "api.github.com":
return github_api_stub(o, **kwargs)
if o.netloc == "code.funtoo.org":
return funtoo_stash_api_stub(o, **kwargs)
def github_api_stub(o, **kwargs):
headers = kwargs["headers"] if "headers" in kwargs else None
if o.path.endswith("/5932b921ba48f44e9c19d19301ae9448bb3fd912"):
with open(
api_mocks / "github_5932b921ba48f44e9c19d19301ae9448bb3fd912.json"
) as f:
result = f.read()
return result
if o.path.endswith("/04eb725f50c46031116df312c634eb767ba1b718"):
with open(
api_mocks / "github_04eb725f50c46031116df312c634eb767ba1b718.json"
) as f:
result = f.read()
return result
if o.path.endswith("/ba2ec9cdda1ab7d29185777d5d9f7b2488ae7390"):
with open(
api_mocks / "github_ba2ec9cdda1ab7d29185777d5d9f7b2488ae7390.json"
) as f:
result = f.read()
return result
if o.path.endswith("/789bfa81a335ab23accbd0da7d0808b499227510"):
if headers is not None and headers["accept"] == "application/vnd.github.v3.raw":
with open(ebuilds_mocks / "firefox-72.0.2.ebuild") as f:
result = f.read()
else:
with open(
api_mocks / "github_789bfa81a335ab23accbd0da7d0808b499227510.json"
) as f:
result = f.read()
return result
raise ValueError("unsupported path")
def funtoo_stash_api_stub(o, **kwargs):
if o.query.endswith("=5932b921ba48f44e9c19d19301ae9448bb3fd912"):
with open(
api_mocks / "funtoo_stash_5932b921ba48f44e9c19d19301ae9448bb3fd912.json"
) as f:
result = f.read()
return result
if o.query.endswith("=04eb725f50c46031116df312c634eb767ba1b718"):
with open(
api_mocks / "funtoo_stash_04eb725f50c46031116df312c634eb767ba1b718.json"
) as f:
result = f.read()
return result
if o.query.endswith("=ba2ec9cdda1ab7d29185777d5d9f7b2488ae7390"):
with open(
api_mocks / "funtoo_stash_ba2ec9cdda1ab7d29185777d5d9f7b2488ae7390.json"
) as f:
result = f.read()
return result
raise ValueError("unsupported path")
|
py | 1a33dfe6c39969fc1f20b5934ce04e4db66357cd | # Generated by Django 3.1.5 on 2021-02-08 06:37
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('ad', '0009_advertisement_status'),
]
operations = [
migrations.AlterField(
model_name='advertisement',
name='category',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='ads', to='ad.category'),
),
]
|
py | 1a33e116e463f4ed5b338c6d051319963e3b0ece | """Pairwise genome alignment
src: {ensemblgenomes.prefix}/fasta/{species}/*.fa.gz
dst: ./pairwise/{target}/{query}/{chromosome}/sing.maf
https://lastz.github.io/lastz/
"""
import concurrent.futures as confu
import gzip
import logging
import os
import shutil
from pathlib import Path
from ..db import ensemblgenomes, phylo
from ..util import cli, fs, subp
_log = logging.getLogger(__name__)
_executor = confu.ThreadPoolExecutor()
def main(argv: list[str] = []):
parser = cli.logging_argparser()
parser.add_argument("-n", "--dry-run", action="store_true")
parser.add_argument("-j", "--jobs", type=int, default=os.cpu_count())
parser.add_argument("--quick", action="store_true")
parser.add_argument("-c", "--clade", choices=phylo.newicks.keys())
parser.add_argument("target", choices=ensemblgenomes.species_names())
parser.add_argument("query", nargs="*")
args = parser.parse_args(argv or None)
cli.logging_config(args.loglevel)
cli.dry_run = args.dry_run
if args.clade:
assert not args.query
run(args.target, args.clade, args.jobs, args.quick)
else:
_run(args.target, args.query, args.jobs, args.quick)
def run(target: str, clade: str, jobs: int, quick: bool = False):
tree = phylo.newicks[clade]
_run(target, phylo.extract_names(tree), jobs, quick)
return Path("pairwise") / target
def _run(target: str, queries: list[str], jobs: int, quick: bool):
queries = ensemblgenomes.sanitize_queries(target, queries)
_executor._max_workers = jobs
futures: list[confu.Future[Path]] = []
for query in queries:
pa = PairwiseAlignment(target, query, quick=quick)
futures.extend(pa.run())
for future in confu.as_completed(futures):
if (sing_maf := future.result()).exists():
print(sing_maf)
class PairwiseAlignment:
def __init__(self, target: str, query: str, quick: bool):
self._target = target
self._query = query
self._quick = quick
self._target_sizes = ensemblgenomes.get_file("fasize.chrom.sizes", target)
self._query_sizes = ensemblgenomes.get_file("fasize.chrom.sizes", query)
self._outdir = Path("pairwise") / target / query
def run(self):
if not cli.dry_run:
self._outdir.mkdir(0o755, parents=True, exist_ok=True)
patt = "*.chromosome.*.2bit"
it = ensemblgenomes.rglob(patt, [self._target])
target_chromosomes = fs.sorted_naturally(it)
it = ensemblgenomes.rglob(patt, [self._query])
query_chromosomes = fs.sorted_naturally(it)
subexe = confu.ThreadPoolExecutor(max_workers=len(target_chromosomes))
waiters: list[confu.Future[list[Path]]] = []
for t in target_chromosomes:
futures = [
_executor.submit(self.align_chr, t, q) for q in query_chromosomes
]
waiters.append(subexe.submit(wait_results, futures))
return [
_executor.submit(self.integrate, future.result())
for future in confu.as_completed(waiters)
]
def align_chr(self, target_2bit: Path, query_2bit: Path):
axtgz = self.lastz(target_2bit, query_2bit)
chain = self.axt_chain(target_2bit, query_2bit, axtgz)
return chain
def integrate(self, chains: list[Path]):
pre_chain = self.merge_sort_pre(chains)
syntenic_net = self.chain_net_syntenic(pre_chain)
sing_maf = self.net_axt_maf(syntenic_net, pre_chain)
return sing_maf
def lastz(self, target_2bit: Path, query_2bit: Path):
target_label = target_2bit.stem.rsplit("dna_sm.", 1)[1]
query_label = query_2bit.stem.rsplit("dna_sm.", 1)[1]
subdir = self._outdir / target_label
if not cli.dry_run:
subdir.mkdir(0o755, exist_ok=True)
axtgz = subdir / f"{query_label}.axt.gz"
args = f"lastz {target_2bit} {query_2bit} --format=axt --inner=2000 --step=7"
if self._quick:
args += " --notransition --nogapped"
is_to_run = fs.is_outdated(axtgz, [target_2bit, query_2bit])
lastz = subp.run_if(is_to_run, args, stdout=subp.PIPE)
if is_to_run and not cli.dry_run:
with gzip.open(axtgz, "wb") as fout:
fout.write(lastz.stdout)
return axtgz
def axt_chain(self, target_2bit: Path, query_2bit: Path, axtgz: Path):
chain = axtgz.with_suffix("").with_suffix(".chain")
cmd = "axtChain -minScore=5000 -linearGap=medium stdin"
cmd += f" {target_2bit} {query_2bit} {chain}"
is_to_run = fs.is_outdated(chain, axtgz)
p = subp.popen_if(is_to_run, cmd, stdin=subp.PIPE)
if is_to_run and not cli.dry_run:
assert p.stdin
with gzip.open(axtgz, "rb") as fin:
shutil.copyfileobj(fin, p.stdin)
p.stdin.close()
p.communicate()
return chain
def merge_sort_pre(self, chains: list[Path]):
parent = set(x.parent for x in chains)
subdir = parent.pop()
assert not parent, "chains are in the same directory"
pre_chain = subdir / "pre.chain.gz"
is_to_run = fs.is_outdated(pre_chain, chains)
merge_cmd = ["chainMergeSort"] + [str(x) for x in chains]
merge = subp.popen_if(is_to_run, merge_cmd, stdout=subp.PIPE)
assert merge.stdout
pre_cmd = f"chainPreNet stdin {self._target_sizes} {self._query_sizes} stdout"
pre = subp.popen_if(is_to_run, pre_cmd, stdin=merge.stdout, stdout=subp.PIPE)
merge.stdout.close()
if is_to_run and not cli.dry_run:
(stdout, _stderr) = pre.communicate()
with gzip.open(pre_chain, "wb") as fout:
fout.write(stdout)
return pre_chain
def chain_net_syntenic(self, pre_chain: Path):
syntenic_net = pre_chain.parent / "syntenic.net"
is_to_run = fs.is_outdated(syntenic_net, pre_chain)
cn_cmd = (
f"chainNet stdin {self._target_sizes} {self._query_sizes} stdout /dev/null"
)
cn = subp.popen_if(is_to_run, cn_cmd, stdin=subp.PIPE, stdout=subp.PIPE)
assert cn.stdin
assert cn.stdout
if is_to_run and not cli.dry_run:
with gzip.open(pre_chain, "rb") as fout:
shutil.copyfileobj(fout, cn.stdin)
cn.stdin.close()
sn = subp.popen_if(
is_to_run, f"netSyntenic stdin {syntenic_net}", stdin=cn.stdout
)
cn.stdout.close()
sn.communicate()
return syntenic_net
def net_axt_maf(self, syntenic_net: Path, pre_chain: Path):
sing_maf = syntenic_net.parent / "sing.maf"
target_2bit = ensemblgenomes.get_file("*.genome.2bit", self._target)
query_2bit = ensemblgenomes.get_file("*.genome.2bit", self._query)
is_to_run = fs.is_outdated(sing_maf, [syntenic_net, pre_chain])
toaxt_cmd = f"netToAxt {syntenic_net} stdin {target_2bit} {query_2bit} stdout"
toaxt = subp.popen_if(is_to_run, toaxt_cmd, stdin=subp.PIPE, stdout=subp.PIPE)
assert toaxt.stdin
assert toaxt.stdout
if is_to_run and not cli.dry_run:
with gzip.open(pre_chain, "rb") as fout:
shutil.copyfileobj(fout, toaxt.stdin)
toaxt.stdin.close()
sort = subp.popen_if(
is_to_run, "axtSort stdin stdout", stdin=toaxt.stdout, stdout=subp.PIPE
)
toaxt.stdout.close()
assert sort.stdout
tprefix = phylo.shorten(self._target)
qprefix = phylo.shorten(self._query)
axttomaf_cmd = (
f"axtToMaf -tPrefix={tprefix}. -qPrefix={qprefix}. stdin"
f" {self._target_sizes} {self._query_sizes} {sing_maf}"
)
atm = subp.popen_if(is_to_run, axttomaf_cmd, stdin=sort.stdout)
sort.stdout.close()
atm.communicate()
return sing_maf
def wait_results(futures: list[confu.Future[Path]]):
return [f.result() for f in futures]
if __name__ == "__main__":
main()
|
py | 1a33e17f7fd45c81d80dd83d057d79a618411933 | from cProfile import label
from IMLearn.learners.regressors import linear_regression
from IMLearn.learners.regressors import PolynomialFitting
from IMLearn.utils import split_train_test
import os
import numpy as np
import pandas as pd
import plotly.express as px
import plotly.io as pio
pio.templates.default = "simple_white"
def load_data(filename: str) -> pd.DataFrame:
"""
Load city daily temperature dataset and preprocess data.
Parameters
----------
filename: str
Path to house prices dataset
Returns
-------
Design matrix and response vector (Temp)
"""
full_data = pd.read_csv(filename, parse_dates=["Date"]).dropna().drop_duplicates()
#change date to the day in the year
full_data = full_data.apply(lambda x : [obj.timetuple().tm_yday for obj in x] if x.name == "Date" else x)
#delete samples with Temp< -10
full_data = full_data.drop(full_data[full_data.Temp < -10].index)
return full_data
if __name__ == '__main__':
np.random.seed(0)
# Question 1 - Load and preprocessing of city temperature dataset
data = load_data("/home/alonbentzi/IML.HUJI/datasets/City_Temperature.csv")
# Question 2 - Exploring data for specific country
Israel_data = data.loc[data['Country'] == "Israel"]
#convert "YEAR" to string for px.scatter function
Israel_data["Year"] = Israel_data["Year"].astype(str)
#plot Israel temp as function of Day of the year
fig = px.scatter(Israel_data, x="Date", y="Temp", color="Year",
title="Temp as a function of Day of the year | Israel")
fig.write_image(os.path.join("/home/alonbentzi/IML.HUJI/exercises/.plots", "Israel_data.png"))
# grouping by 'Month'
IL_by_month = Israel_data.groupby(['Month']).Temp.agg(std='std')
fig = px.bar(IL_by_month, x=IL_by_month.index, y="std",
title="STD Temp as a function of Month in Israel")
fig.write_image(os.path.join("/home/alonbentzi/IML.HUJI/exercises/.plots", "Month_tmp.png"))
# Question 3 - Exploring differences between countries
# grouping by 'Country & 'Month'
grouped_by_month_and_country = data.groupby(['Month','Country']).Temp.agg([np.mean, np.std])
grouped_by_month_and_country = grouped_by_month_and_country.reset_index('Country')
print(grouped_by_month_and_country.shape)
print(grouped_by_month_and_country.columns)
fig = px.line(grouped_by_month_and_country, y='mean' ,color='Country',
error_y= 'std',
labels={'x': "Month",
'Temp': "Temp (Avg)"},
title="std Temp as a function of Month")
fig.write_image(os.path.join("/home/alonbentzi/IML.HUJI/exercises/.plots", "Month_tmp_with_err.png"))
# Question 4 - Fitting model for different values of `k`
train_x, train_y, test_x, test_y = split_train_test(Israel_data['Date'], Israel_data['Temp'])
losses_array = np.empty((0,2), (int,float))
for k in range(1,11):
model = PolynomialFitting(k)
model._fit(train_x, train_y)
temp_loss = round(model._loss(test_x, test_y), 2)
losses_array = np.append(losses_array, [[k, temp_loss]], axis=0)
fig = px.bar(losses_array, x=losses_array[:,0], y=losses_array[:,1],
labels={'x': "K", "y": "Temp_loss"})
fig.write_image(os.path.join("/home/alonbentzi/IML.HUJI/exercises/.plots", "error_for_each_k.png"))
# Question 5 - Evaluating fitted model on different countries
BEST_K = 5
counries = []
loss_countries = []
model_5 = PolynomialFitting(BEST_K)
model_5.fit(Israel_data["Date"], Israel_data["Temp"])
for country in data["Country"].unique():
if country == "Israel": continue
df_country = data[data["Country"] == country]
loss = model_5.loss(df_country['Date'], df_country['Temp'])
counries.append(country)
loss_countries.append(loss)
#convert arrays to np.array
counries = np.array(counries)
loss_countries = np.array(loss_countries)
fig = px.bar(x=counries, y=loss_countries,
labels={'x': "Countries", "y": "Temp_loss"})
fig.write_image(os.path.join("/home/alonbentzi/IML.HUJI/exercises/.plots", "Q5.png"))
|
py | 1a33e1aeea05df564c1b61a9e4f3d68b9c63dc53 | # Copyright (C) 2003-2007 Robey Pointer <[email protected]>
#
# This file is part of paramiko.
#
# Paramiko is free software; you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 2.1 of the License, or (at your option)
# any later version.
#
# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
"""
Core protocol implementation
"""
import os
import socket
import sys
import threading
import time
import weakref
from hashlib import md5, sha1
import paramiko
from paramiko import util
from paramiko.auth_handler import AuthHandler
from paramiko.channel import Channel
from paramiko.common import xffffffff, cMSG_CHANNEL_OPEN, cMSG_IGNORE, \
cMSG_GLOBAL_REQUEST, DEBUG, MSG_KEXINIT, MSG_IGNORE, MSG_DISCONNECT, \
MSG_DEBUG, ERROR, WARNING, cMSG_UNIMPLEMENTED, INFO, cMSG_KEXINIT, \
cMSG_NEWKEYS, MSG_NEWKEYS, cMSG_REQUEST_SUCCESS, cMSG_REQUEST_FAILURE, \
CONNECTION_FAILED_CODE, OPEN_FAILED_ADMINISTRATIVELY_PROHIBITED, \
OPEN_SUCCEEDED, cMSG_CHANNEL_OPEN_FAILURE, cMSG_CHANNEL_OPEN_SUCCESS, \
MSG_GLOBAL_REQUEST, MSG_REQUEST_SUCCESS, MSG_REQUEST_FAILURE, \
MSG_CHANNEL_OPEN_SUCCESS, MSG_CHANNEL_OPEN_FAILURE, MSG_CHANNEL_OPEN, \
MSG_CHANNEL_SUCCESS, MSG_CHANNEL_FAILURE, MSG_CHANNEL_DATA, \
MSG_CHANNEL_EXTENDED_DATA, MSG_CHANNEL_WINDOW_ADJUST, MSG_CHANNEL_REQUEST, \
MSG_CHANNEL_EOF, MSG_CHANNEL_CLOSE
from paramiko.compress import ZlibCompressor, ZlibDecompressor
from paramiko.dsskey import DSSKey
from paramiko.kex_gex import KexGex
from paramiko.kex_group1 import KexGroup1
from paramiko.message import Message
from paramiko.packet import Packetizer, NeedRekeyException
from paramiko.primes import ModulusPack
from paramiko.py3compat import string_types, long, byte_ord, b
from paramiko.rsakey import RSAKey
from paramiko.ecdsakey import ECDSAKey
from paramiko.server import ServerInterface
from paramiko.sftp_client import SFTPClient
from paramiko.ssh_exception import (SSHException, BadAuthenticationType,
ChannelException, ProxyCommandFailure)
from paramiko.util import retry_on_signal
from Crypto.Cipher import Blowfish, AES, DES3, ARC4
try:
from Crypto.Util import Counter
except ImportError:
from paramiko.util import Counter
# for thread cleanup
_active_threads = []
def _join_lingering_threads():
for thr in _active_threads:
thr.stop_thread()
import atexit
atexit.register(_join_lingering_threads)
class Transport (threading.Thread):
"""
An SSH Transport attaches to a stream (usually a socket), negotiates an
encrypted session, authenticates, and then creates stream tunnels, called
`channels <.Channel>`, across the session. Multiple channels can be
multiplexed across a single session (and often are, in the case of port
forwardings).
"""
_PROTO_ID = '2.0'
_CLIENT_ID = 'paramiko_%s' % paramiko.__version__
_preferred_ciphers = ('aes128-ctr', 'aes256-ctr', 'aes128-cbc', 'blowfish-cbc',
'aes256-cbc', '3des-cbc', 'arcfour128', 'arcfour256')
_preferred_macs = ('hmac-sha1', 'hmac-md5', 'hmac-sha1-96', 'hmac-md5-96')
_preferred_keys = ('ssh-rsa', 'ssh-dss', 'ecdsa-sha2-nistp256')
_preferred_kex = ('diffie-hellman-group1-sha1', 'diffie-hellman-group-exchange-sha1')
_preferred_compression = ('none',)
_cipher_info = {
'aes128-ctr': {'class': AES, 'mode': AES.MODE_CTR, 'block-size': 16, 'key-size': 16},
'aes256-ctr': {'class': AES, 'mode': AES.MODE_CTR, 'block-size': 16, 'key-size': 32},
'blowfish-cbc': {'class': Blowfish, 'mode': Blowfish.MODE_CBC, 'block-size': 8, 'key-size': 16},
'aes128-cbc': {'class': AES, 'mode': AES.MODE_CBC, 'block-size': 16, 'key-size': 16},
'aes256-cbc': {'class': AES, 'mode': AES.MODE_CBC, 'block-size': 16, 'key-size': 32},
'3des-cbc': {'class': DES3, 'mode': DES3.MODE_CBC, 'block-size': 8, 'key-size': 24},
'arcfour128': {'class': ARC4, 'mode': None, 'block-size': 8, 'key-size': 16},
'arcfour256': {'class': ARC4, 'mode': None, 'block-size': 8, 'key-size': 32},
}
_mac_info = {
'hmac-sha1': {'class': sha1, 'size': 20},
'hmac-sha1-96': {'class': sha1, 'size': 12},
'hmac-md5': {'class': md5, 'size': 16},
'hmac-md5-96': {'class': md5, 'size': 12},
}
_key_info = {
'ssh-rsa': RSAKey,
'ssh-dss': DSSKey,
'ecdsa-sha2-nistp256': ECDSAKey,
}
_kex_info = {
'diffie-hellman-group1-sha1': KexGroup1,
'diffie-hellman-group-exchange-sha1': KexGex,
}
_compression_info = {
# [email protected] is just zlib, but only turned on after a successful
# authentication. openssh servers may only offer this type because
# they've had troubles with security holes in zlib in the past.
'[email protected]': (ZlibCompressor, ZlibDecompressor),
'zlib': (ZlibCompressor, ZlibDecompressor),
'none': (None, None),
}
_modulus_pack = None
def __init__(self, sock):
"""
Create a new SSH session over an existing socket, or socket-like
object. This only creates the `.Transport` object; it doesn't begin the
SSH session yet. Use `connect` or `start_client` to begin a client
session, or `start_server` to begin a server session.
If the object is not actually a socket, it must have the following
methods:
- ``send(str)``: Writes from 1 to ``len(str)`` bytes, and returns an
int representing the number of bytes written. Returns
0 or raises ``EOFError`` if the stream has been closed.
- ``recv(int)``: Reads from 1 to ``int`` bytes and returns them as a
string. Returns 0 or raises ``EOFError`` if the stream has been
closed.
- ``close()``: Closes the socket.
- ``settimeout(n)``: Sets a (float) timeout on I/O operations.
For ease of use, you may also pass in an address (as a tuple) or a host
string as the ``sock`` argument. (A host string is a hostname with an
optional port (separated by ``":"``) which will be converted into a
tuple of ``(hostname, port)``.) A socket will be connected to this
address and used for communication. Exceptions from the ``socket``
call may be thrown in this case.
:param socket sock:
a socket or socket-like object to create the session over.
"""
self.active = False
if isinstance(sock, string_types):
# convert "host:port" into (host, port)
hl = sock.split(':', 1)
if len(hl) == 1:
sock = (hl[0], 22)
else:
sock = (hl[0], int(hl[1]))
if type(sock) is tuple:
# connect to the given (host, port)
hostname, port = sock
reason = 'No suitable address family'
for (family, socktype, proto, canonname, sockaddr) in socket.getaddrinfo(hostname, port, socket.AF_UNSPEC, socket.SOCK_STREAM):
if socktype == socket.SOCK_STREAM:
af = family
addr = sockaddr
sock = socket.socket(af, socket.SOCK_STREAM)
try:
retry_on_signal(lambda: sock.connect((hostname, port)))
except socket.error as e:
reason = str(e)
else:
break
else:
raise SSHException(
'Unable to connect to %s: %s' % (hostname, reason))
# okay, normal socket-ish flow here...
threading.Thread.__init__(self)
self.setDaemon(True)
self.sock = sock
# Python < 2.3 doesn't have the settimeout method - RogerB
try:
# we set the timeout so we can check self.active periodically to
# see if we should bail. socket.timeout exception is never
# propagated.
self.sock.settimeout(0.1)
except AttributeError:
pass
# negotiated crypto parameters
self.packetizer = Packetizer(sock)
self.local_version = 'SSH-' + self._PROTO_ID + '-' + self._CLIENT_ID
self.remote_version = ''
self.local_cipher = self.remote_cipher = ''
self.local_kex_init = self.remote_kex_init = None
self.local_mac = self.remote_mac = None
self.local_compression = self.remote_compression = None
self.session_id = None
self.host_key_type = None
self.host_key = None
# state used during negotiation
self.kex_engine = None
self.H = None
self.K = None
self.initial_kex_done = False
self.in_kex = False
self.authenticated = False
self._expected_packet = tuple()
self.lock = threading.Lock() # synchronization (always higher level than write_lock)
# tracking open channels
self._channels = ChannelMap()
self.channel_events = {} # (id -> Event)
self.channels_seen = {} # (id -> True)
self._channel_counter = 1
self.window_size = 65536
self.max_packet_size = 34816
self._forward_agent_handler = None
self._x11_handler = None
self._tcp_handler = None
self.saved_exception = None
self.clear_to_send = threading.Event()
self.clear_to_send_lock = threading.Lock()
self.clear_to_send_timeout = 30.0
self.log_name = 'paramiko.transport'
self.logger = util.get_logger(self.log_name)
self.packetizer.set_log(self.logger)
self.auth_handler = None
self.global_response = None # response Message from an arbitrary global request
self.completion_event = None # user-defined event callbacks
self.banner_timeout = 15 # how long (seconds) to wait for the SSH banner
# server mode:
self.server_mode = False
self.server_object = None
self.server_key_dict = {}
self.server_accepts = []
self.server_accept_cv = threading.Condition(self.lock)
self.subsystem_table = {}
def __repr__(self):
"""
Returns a string representation of this object, for debugging.
"""
out = '<paramiko.Transport at %s' % hex(long(id(self)) & xffffffff)
if not self.active:
out += ' (unconnected)'
else:
if self.local_cipher != '':
out += ' (cipher %s, %d bits)' % (self.local_cipher,
self._cipher_info[self.local_cipher]['key-size'] * 8)
if self.is_authenticated():
out += ' (active; %d open channel(s))' % len(self._channels)
elif self.initial_kex_done:
out += ' (connected; awaiting auth)'
else:
out += ' (connecting)'
out += '>'
return out
def atfork(self):
"""
Terminate this Transport without closing the session. On posix
systems, if a Transport is open during process forking, both parent
and child will share the underlying socket, but only one process can
use the connection (without corrupting the session). Use this method
to clean up a Transport object without disrupting the other process.
.. versionadded:: 1.5.3
"""
self.close()
def get_security_options(self):
"""
Return a `.SecurityOptions` object which can be used to tweak the
encryption algorithms this transport will permit (for encryption,
digest/hash operations, public keys, and key exchanges) and the order
of preference for them.
"""
return SecurityOptions(self)
def start_client(self, event=None):
"""
Negotiate a new SSH2 session as a client. This is the first step after
creating a new `.Transport`. A separate thread is created for protocol
negotiation.
If an event is passed in, this method returns immediately. When
negotiation is done (successful or not), the given ``Event`` will
be triggered. On failure, `is_active` will return ``False``.
(Since 1.4) If ``event`` is ``None``, this method will not return until
negotation is done. On success, the method returns normally.
Otherwise an SSHException is raised.
After a successful negotiation, you will usually want to authenticate,
calling `auth_password <Transport.auth_password>` or
`auth_publickey <Transport.auth_publickey>`.
.. note:: `connect` is a simpler method for connecting as a client.
.. note:: After calling this method (or `start_server` or `connect`),
you should no longer directly read from or write to the original
socket object.
:param .threading.Event event:
an event to trigger when negotiation is complete (optional)
:raises SSHException: if negotiation fails (and no ``event`` was passed
in)
"""
self.active = True
if event is not None:
# async, return immediately and let the app poll for completion
self.completion_event = event
self.start()
return
# synchronous, wait for a result
self.completion_event = event = threading.Event()
self.start()
while True:
event.wait(0.1)
if not self.active:
e = self.get_exception()
if e is not None:
raise e
raise SSHException('Negotiation failed.')
if event.isSet():
break
def start_server(self, event=None, server=None):
"""
Negotiate a new SSH2 session as a server. This is the first step after
creating a new `.Transport` and setting up your server host key(s). A
separate thread is created for protocol negotiation.
If an event is passed in, this method returns immediately. When
negotiation is done (successful or not), the given ``Event`` will
be triggered. On failure, `is_active` will return ``False``.
(Since 1.4) If ``event`` is ``None``, this method will not return until
negotation is done. On success, the method returns normally.
Otherwise an SSHException is raised.
After a successful negotiation, the client will need to authenticate.
Override the methods `get_allowed_auths
<.ServerInterface.get_allowed_auths>`, `check_auth_none
<.ServerInterface.check_auth_none>`, `check_auth_password
<.ServerInterface.check_auth_password>`, and `check_auth_publickey
<.ServerInterface.check_auth_publickey>` in the given ``server`` object
to control the authentication process.
After a successful authentication, the client should request to open a
channel. Override `check_channel_request
<.ServerInterface.check_channel_request>` in the given ``server``
object to allow channels to be opened.
.. note::
After calling this method (or `start_client` or `connect`), you
should no longer directly read from or write to the original socket
object.
:param .threading.Event event:
an event to trigger when negotiation is complete.
:param .ServerInterface server:
an object used to perform authentication and create `channels
<.Channel>`
:raises SSHException: if negotiation fails (and no ``event`` was passed
in)
"""
if server is None:
server = ServerInterface()
self.server_mode = True
self.server_object = server
self.active = True
if event is not None:
# async, return immediately and let the app poll for completion
self.completion_event = event
self.start()
return
# synchronous, wait for a result
self.completion_event = event = threading.Event()
self.start()
while True:
event.wait(0.1)
if not self.active:
e = self.get_exception()
if e is not None:
raise e
raise SSHException('Negotiation failed.')
if event.isSet():
break
def add_server_key(self, key):
"""
Add a host key to the list of keys used for server mode. When behaving
as a server, the host key is used to sign certain packets during the
SSH2 negotiation, so that the client can trust that we are who we say
we are. Because this is used for signing, the key must contain private
key info, not just the public half. Only one key of each type (RSA or
DSS) is kept.
:param .PKey key:
the host key to add, usually an `.RSAKey` or `.DSSKey`.
"""
self.server_key_dict[key.get_name()] = key
def get_server_key(self):
"""
Return the active host key, in server mode. After negotiating with the
client, this method will return the negotiated host key. If only one
type of host key was set with `add_server_key`, that's the only key
that will ever be returned. But in cases where you have set more than
one type of host key (for example, an RSA key and a DSS key), the key
type will be negotiated by the client, and this method will return the
key of the type agreed on. If the host key has not been negotiated
yet, ``None`` is returned. In client mode, the behavior is undefined.
:return:
host key (`.PKey`) of the type negotiated by the client, or
``None``.
"""
try:
return self.server_key_dict[self.host_key_type]
except KeyError:
pass
return None
def load_server_moduli(filename=None):
"""
(optional)
Load a file of prime moduli for use in doing group-exchange key
negotiation in server mode. It's a rather obscure option and can be
safely ignored.
In server mode, the remote client may request "group-exchange" key
negotiation, which asks the server to send a random prime number that
fits certain criteria. These primes are pretty difficult to compute,
so they can't be generated on demand. But many systems contain a file
of suitable primes (usually named something like ``/etc/ssh/moduli``).
If you call `load_server_moduli` and it returns ``True``, then this
file of primes has been loaded and we will support "group-exchange" in
server mode. Otherwise server mode will just claim that it doesn't
support that method of key negotiation.
:param str filename:
optional path to the moduli file, if you happen to know that it's
not in a standard location.
:return:
True if a moduli file was successfully loaded; False otherwise.
.. note:: This has no effect when used in client mode.
"""
Transport._modulus_pack = ModulusPack()
# places to look for the openssh "moduli" file
file_list = ['/etc/ssh/moduli', '/usr/local/etc/moduli']
if filename is not None:
file_list.insert(0, filename)
for fn in file_list:
try:
Transport._modulus_pack.read_file(fn)
return True
except IOError:
pass
# none succeeded
Transport._modulus_pack = None
return False
load_server_moduli = staticmethod(load_server_moduli)
def close(self):
"""
Close this session, and any open channels that are tied to it.
"""
if not self.active:
return
self.stop_thread()
for chan in list(self._channels.values()):
chan._unlink()
self.sock.close()
def get_remote_server_key(self):
"""
Return the host key of the server (in client mode).
.. note::
Previously this call returned a tuple of ``(key type, key
string)``. You can get the same effect by calling `.PKey.get_name`
for the key type, and ``str(key)`` for the key string.
:raises SSHException: if no session is currently active.
:return: public key (`.PKey`) of the remote server
"""
if (not self.active) or (not self.initial_kex_done):
raise SSHException('No existing session')
return self.host_key
def is_active(self):
"""
Return true if this session is active (open).
:return:
True if the session is still active (open); False if the session is
closed
"""
return self.active
def open_session(self):
"""
Request a new channel to the server, of type ``"session"``. This is
just an alias for calling `open_channel` with an argument of
``"session"``.
:return: a new `.Channel`
:raises SSHException: if the request is rejected or the session ends
prematurely
"""
return self.open_channel('session')
def open_x11_channel(self, src_addr=None):
"""
Request a new channel to the client, of type ``"x11"``. This
is just an alias for ``open_channel('x11', src_addr=src_addr)``.
:param tuple src_addr:
the source address (``(str, int)``) of the x11 server (port is the
x11 port, ie. 6010)
:return: a new `.Channel`
:raises SSHException: if the request is rejected or the session ends
prematurely
"""
return self.open_channel('x11', src_addr=src_addr)
def open_forward_agent_channel(self):
"""
Request a new channel to the client, of type
``"[email protected]"``.
This is just an alias for ``open_channel('[email protected]')``.
:return: a new `.Channel`
:raises SSHException:
if the request is rejected or the session ends prematurely
"""
return self.open_channel('[email protected]')
def open_forwarded_tcpip_channel(self, src_addr, dest_addr):
"""
Request a new channel back to the client, of type ``"forwarded-tcpip"``.
This is used after a client has requested port forwarding, for sending
incoming connections back to the client.
:param src_addr: originator's address
:param dest_addr: local (server) connected address
"""
return self.open_channel('forwarded-tcpip', dest_addr, src_addr)
def open_channel(self, kind, dest_addr=None, src_addr=None):
"""
Request a new channel to the server. `Channels <.Channel>` are
socket-like objects used for the actual transfer of data across the
session. You may only request a channel after negotiating encryption
(using `connect` or `start_client`) and authenticating.
:param str kind:
the kind of channel requested (usually ``"session"``,
``"forwarded-tcpip"``, ``"direct-tcpip"``, or ``"x11"``)
:param tuple dest_addr:
the destination address (address + port tuple) of this port
forwarding, if ``kind`` is ``"forwarded-tcpip"`` or
``"direct-tcpip"`` (ignored for other channel types)
:param src_addr: the source address of this port forwarding, if
``kind`` is ``"forwarded-tcpip"``, ``"direct-tcpip"``, or ``"x11"``
:return: a new `.Channel` on success
:raises SSHException: if the request is rejected or the session ends
prematurely
"""
if not self.active:
raise SSHException('SSH session not active')
self.lock.acquire()
try:
chanid = self._next_channel()
m = Message()
m.add_byte(cMSG_CHANNEL_OPEN)
m.add_string(kind)
m.add_int(chanid)
m.add_int(self.window_size)
m.add_int(self.max_packet_size)
if (kind == 'forwarded-tcpip') or (kind == 'direct-tcpip'):
m.add_string(dest_addr[0])
m.add_int(dest_addr[1])
m.add_string(src_addr[0])
m.add_int(src_addr[1])
elif kind == 'x11':
m.add_string(src_addr[0])
m.add_int(src_addr[1])
chan = Channel(chanid)
self._channels.put(chanid, chan)
self.channel_events[chanid] = event = threading.Event()
self.channels_seen[chanid] = True
chan._set_transport(self)
chan._set_window(self.window_size, self.max_packet_size)
finally:
self.lock.release()
self._send_user_message(m)
while True:
event.wait(0.1)
if not self.active:
e = self.get_exception()
if e is None:
e = SSHException('Unable to open channel.')
raise e
if event.isSet():
break
chan = self._channels.get(chanid)
if chan is not None:
return chan
e = self.get_exception()
if e is None:
e = SSHException('Unable to open channel.')
raise e
def request_port_forward(self, address, port, handler=None):
"""
Ask the server to forward TCP connections from a listening port on
the server, across this SSH session.
If a handler is given, that handler is called from a different thread
whenever a forwarded connection arrives. The handler parameters are::
handler(channel, (origin_addr, origin_port), (server_addr, server_port))
where ``server_addr`` and ``server_port`` are the address and port that
the server was listening on.
If no handler is set, the default behavior is to send new incoming
forwarded connections into the accept queue, to be picked up via
`accept`.
:param str address: the address to bind when forwarding
:param int port:
the port to forward, or 0 to ask the server to allocate any port
:param callable handler:
optional handler for incoming forwarded connections, of the form
``func(Channel, (str, int), (str, int))``.
:return: the port number (`int`) allocated by the server
:raises SSHException: if the server refused the TCP forward request
"""
if not self.active:
raise SSHException('SSH session not active')
port = int(port)
response = self.global_request('tcpip-forward', (address, port), wait=True)
if response is None:
raise SSHException('TCP forwarding request denied')
if port == 0:
port = response.get_int()
if handler is None:
def default_handler(channel, src_addr, dest_addr_port):
#src_addr, src_port = src_addr_port
#dest_addr, dest_port = dest_addr_port
self._queue_incoming_channel(channel)
handler = default_handler
self._tcp_handler = handler
return port
def cancel_port_forward(self, address, port):
"""
Ask the server to cancel a previous port-forwarding request. No more
connections to the given address & port will be forwarded across this
ssh connection.
:param str address: the address to stop forwarding
:param int port: the port to stop forwarding
"""
if not self.active:
return
self._tcp_handler = None
self.global_request('cancel-tcpip-forward', (address, port), wait=True)
def open_sftp_client(self):
"""
Create an SFTP client channel from an open transport. On success, an
SFTP session will be opened with the remote host, and a new
`.SFTPClient` object will be returned.
:return:
a new `.SFTPClient` referring to an sftp session (channel) across
this transport
"""
return SFTPClient.from_transport(self)
def send_ignore(self, byte_count=None):
"""
Send a junk packet across the encrypted link. This is sometimes used
to add "noise" to a connection to confuse would-be attackers. It can
also be used as a keep-alive for long lived connections traversing
firewalls.
:param int byte_count:
the number of random bytes to send in the payload of the ignored
packet -- defaults to a random number from 10 to 41.
"""
m = Message()
m.add_byte(cMSG_IGNORE)
if byte_count is None:
byte_count = (byte_ord(os.urandom(1)) % 32) + 10
m.add_bytes(os.urandom(byte_count))
self._send_user_message(m)
def renegotiate_keys(self):
"""
Force this session to switch to new keys. Normally this is done
automatically after the session hits a certain number of packets or
bytes sent or received, but this method gives you the option of forcing
new keys whenever you want. Negotiating new keys causes a pause in
traffic both ways as the two sides swap keys and do computations. This
method returns when the session has switched to new keys.
:raises SSHException: if the key renegotiation failed (which causes the
session to end)
"""
self.completion_event = threading.Event()
self._send_kex_init()
while True:
self.completion_event.wait(0.1)
if not self.active:
e = self.get_exception()
if e is not None:
raise e
raise SSHException('Negotiation failed.')
if self.completion_event.isSet():
break
return
def set_keepalive(self, interval):
"""
Turn on/off keepalive packets (default is off). If this is set, after
``interval`` seconds without sending any data over the connection, a
"keepalive" packet will be sent (and ignored by the remote host). This
can be useful to keep connections alive over a NAT, for example.
:param int interval:
seconds to wait before sending a keepalive packet (or
0 to disable keepalives).
"""
self.packetizer.set_keepalive(interval,
lambda x=weakref.proxy(self): x.global_request('[email protected]', wait=False))
def global_request(self, kind, data=None, wait=True):
"""
Make a global request to the remote host. These are normally
extensions to the SSH2 protocol.
:param str kind: name of the request.
:param tuple data:
an optional tuple containing additional data to attach to the
request.
:param bool wait:
``True`` if this method should not return until a response is
received; ``False`` otherwise.
:return:
a `.Message` containing possible additional data if the request was
successful (or an empty `.Message` if ``wait`` was ``False``);
``None`` if the request was denied.
"""
if wait:
self.completion_event = threading.Event()
m = Message()
m.add_byte(cMSG_GLOBAL_REQUEST)
m.add_string(kind)
m.add_boolean(wait)
if data is not None:
m.add(*data)
self._log(DEBUG, 'Sending global request "%s"' % kind)
self._send_user_message(m)
if not wait:
return None
while True:
self.completion_event.wait(0.1)
if not self.active:
return None
if self.completion_event.isSet():
break
return self.global_response
def accept(self, timeout=None):
"""
Return the next channel opened by the client over this transport, in
server mode. If no channel is opened before the given timeout, ``None``
is returned.
:param int timeout:
seconds to wait for a channel, or ``None`` to wait forever
:return: a new `.Channel` opened by the client
"""
self.lock.acquire()
try:
if len(self.server_accepts) > 0:
chan = self.server_accepts.pop(0)
else:
self.server_accept_cv.wait(timeout)
if len(self.server_accepts) > 0:
chan = self.server_accepts.pop(0)
else:
# timeout
chan = None
finally:
self.lock.release()
return chan
def connect(self, hostkey=None, username='', password=None, pkey=None):
"""
Negotiate an SSH2 session, and optionally verify the server's host key
and authenticate using a password or private key. This is a shortcut
for `start_client`, `get_remote_server_key`, and
`Transport.auth_password` or `Transport.auth_publickey`. Use those
methods if you want more control.
You can use this method immediately after creating a Transport to
negotiate encryption with a server. If it fails, an exception will be
thrown. On success, the method will return cleanly, and an encrypted
session exists. You may immediately call `open_channel` or
`open_session` to get a `.Channel` object, which is used for data
transfer.
.. note::
If you fail to supply a password or private key, this method may
succeed, but a subsequent `open_channel` or `open_session` call may
fail because you haven't authenticated yet.
:param .PKey hostkey:
the host key expected from the server, or ``None`` if you don't
want to do host key verification.
:param str username: the username to authenticate as.
:param str password:
a password to use for authentication, if you want to use password
authentication; otherwise ``None``.
:param .PKey pkey:
a private key to use for authentication, if you want to use private
key authentication; otherwise ``None``.
:raises SSHException: if the SSH2 negotiation fails, the host key
supplied by the server is incorrect, or authentication fails.
"""
if hostkey is not None:
self._preferred_keys = [hostkey.get_name()]
self.start_client()
# check host key if we were given one
if hostkey is not None:
key = self.get_remote_server_key()
if (key.get_name() != hostkey.get_name()) or (key.asbytes() != hostkey.asbytes()):
self._log(DEBUG, 'Bad host key from server')
self._log(DEBUG, 'Expected: %s: %s' % (hostkey.get_name(), repr(hostkey.asbytes())))
self._log(DEBUG, 'Got : %s: %s' % (key.get_name(), repr(key.asbytes())))
raise SSHException('Bad host key from server')
self._log(DEBUG, 'Host key verified (%s)' % hostkey.get_name())
if (pkey is not None) or (password is not None):
if password is not None:
self._log(DEBUG, 'Attempting password auth...')
self.auth_password(username, password)
else:
self._log(DEBUG, 'Attempting public-key auth...')
self.auth_publickey(username, pkey)
return
def get_exception(self):
"""
Return any exception that happened during the last server request.
This can be used to fetch more specific error information after using
calls like `start_client`. The exception (if any) is cleared after
this call.
:return:
an exception, or ``None`` if there is no stored exception.
.. versionadded:: 1.1
"""
self.lock.acquire()
try:
e = self.saved_exception
self.saved_exception = None
return e
finally:
self.lock.release()
def set_subsystem_handler(self, name, handler, *larg, **kwarg):
"""
Set the handler class for a subsystem in server mode. If a request
for this subsystem is made on an open ssh channel later, this handler
will be constructed and called -- see `.SubsystemHandler` for more
detailed documentation.
Any extra parameters (including keyword arguments) are saved and
passed to the `.SubsystemHandler` constructor later.
:param str name: name of the subsystem.
:param class handler:
subclass of `.SubsystemHandler` that handles this subsystem.
"""
try:
self.lock.acquire()
self.subsystem_table[name] = (handler, larg, kwarg)
finally:
self.lock.release()
def is_authenticated(self):
"""
Return true if this session is active and authenticated.
:return:
True if the session is still open and has been authenticated
successfully; False if authentication failed and/or the session is
closed.
"""
return self.active and (self.auth_handler is not None) and self.auth_handler.is_authenticated()
def get_username(self):
"""
Return the username this connection is authenticated for. If the
session is not authenticated (or authentication failed), this method
returns ``None``.
:return: username that was authenticated (a `str`), or ``None``.
"""
if not self.active or (self.auth_handler is None):
return None
return self.auth_handler.get_username()
def get_banner(self):
"""
Return the banner supplied by the server upon connect. If no banner is
supplied, this method returns C{None}.
@return: server supplied banner, or C{None}.
@rtype: string
"""
if not self.active or (self.auth_handler is None):
return None
return self.auth_handler.banner
def auth_none(self, username):
"""
Try to authenticate to the server using no authentication at all.
This will almost always fail. It may be useful for determining the
list of authentication types supported by the server, by catching the
`.BadAuthenticationType` exception raised.
:param str username: the username to authenticate as
:return:
`list` of auth types permissible for the next stage of
authentication (normally empty)
:raises BadAuthenticationType: if "none" authentication isn't allowed
by the server for this user
:raises SSHException: if the authentication failed due to a network
error
.. versionadded:: 1.5
"""
if (not self.active) or (not self.initial_kex_done):
raise SSHException('No existing session')
my_event = threading.Event()
self.auth_handler = AuthHandler(self)
self.auth_handler.auth_none(username, my_event)
return self.auth_handler.wait_for_response(my_event)
def auth_password(self, username, password, event=None, fallback=True):
"""
Authenticate to the server using a password. The username and password
are sent over an encrypted link.
If an ``event`` is passed in, this method will return immediately, and
the event will be triggered once authentication succeeds or fails. On
success, `is_authenticated` will return ``True``. On failure, you may
use `get_exception` to get more detailed error information.
Since 1.1, if no event is passed, this method will block until the
authentication succeeds or fails. On failure, an exception is raised.
Otherwise, the method simply returns.
Since 1.5, if no event is passed and ``fallback`` is ``True`` (the
default), if the server doesn't support plain password authentication
but does support so-called "keyboard-interactive" mode, an attempt
will be made to authenticate using this interactive mode. If it fails,
the normal exception will be thrown as if the attempt had never been
made. This is useful for some recent Gentoo and Debian distributions,
which turn off plain password authentication in a misguided belief
that interactive authentication is "more secure". (It's not.)
If the server requires multi-step authentication (which is very rare),
this method will return a list of auth types permissible for the next
step. Otherwise, in the normal case, an empty list is returned.
:param str username: the username to authenticate as
:param basestring password: the password to authenticate with
:param .threading.Event event:
an event to trigger when the authentication attempt is complete
(whether it was successful or not)
:param bool fallback:
``True`` if an attempt at an automated "interactive" password auth
should be made if the server doesn't support normal password auth
:return:
`list` of auth types permissible for the next stage of
authentication (normally empty)
:raises BadAuthenticationType: if password authentication isn't
allowed by the server for this user (and no event was passed in)
:raises AuthenticationException: if the authentication failed (and no
event was passed in)
:raises SSHException: if there was a network error
"""
if (not self.active) or (not self.initial_kex_done):
# we should never try to send the password unless we're on a secure link
raise SSHException('No existing session')
if event is None:
my_event = threading.Event()
else:
my_event = event
self.auth_handler = AuthHandler(self)
self.auth_handler.auth_password(username, password, my_event)
if event is not None:
# caller wants to wait for event themselves
return []
try:
return self.auth_handler.wait_for_response(my_event)
except BadAuthenticationType as e:
# if password auth isn't allowed, but keyboard-interactive *is*, try to fudge it
if not fallback or ('keyboard-interactive' not in e.allowed_types):
raise
try:
def handler(title, instructions, fields):
if len(fields) > 1:
raise SSHException('Fallback authentication failed.')
if len(fields) == 0:
# for some reason, at least on os x, a 2nd request will
# be made with zero fields requested. maybe it's just
# to try to fake out automated scripting of the exact
# type we're doing here. *shrug* :)
return []
return [password]
return self.auth_interactive(username, handler)
except SSHException:
# attempt failed; just raise the original exception
raise e
def auth_publickey(self, username, key, event=None):
"""
Authenticate to the server using a private key. The key is used to
sign data from the server, so it must include the private part.
If an ``event`` is passed in, this method will return immediately, and
the event will be triggered once authentication succeeds or fails. On
success, `is_authenticated` will return ``True``. On failure, you may
use `get_exception` to get more detailed error information.
Since 1.1, if no event is passed, this method will block until the
authentication succeeds or fails. On failure, an exception is raised.
Otherwise, the method simply returns.
If the server requires multi-step authentication (which is very rare),
this method will return a list of auth types permissible for the next
step. Otherwise, in the normal case, an empty list is returned.
:param str username: the username to authenticate as
:param .PKey key: the private key to authenticate with
:param .threading.Event event:
an event to trigger when the authentication attempt is complete
(whether it was successful or not)
:return:
`list` of auth types permissible for the next stage of
authentication (normally empty)
:raises BadAuthenticationType: if public-key authentication isn't
allowed by the server for this user (and no event was passed in)
:raises AuthenticationException: if the authentication failed (and no
event was passed in)
:raises SSHException: if there was a network error
"""
if (not self.active) or (not self.initial_kex_done):
# we should never try to authenticate unless we're on a secure link
raise SSHException('No existing session')
if event is None:
my_event = threading.Event()
else:
my_event = event
self.auth_handler = AuthHandler(self)
self.auth_handler.auth_publickey(username, key, my_event)
if event is not None:
# caller wants to wait for event themselves
return []
return self.auth_handler.wait_for_response(my_event)
def auth_interactive(self, username, handler, submethods=''):
"""
Authenticate to the server interactively. A handler is used to answer
arbitrary questions from the server. On many servers, this is just a
dumb wrapper around PAM.
This method will block until the authentication succeeds or fails,
peroidically calling the handler asynchronously to get answers to
authentication questions. The handler may be called more than once
if the server continues to ask questions.
The handler is expected to be a callable that will handle calls of the
form: ``handler(title, instructions, prompt_list)``. The ``title`` is
meant to be a dialog-window title, and the ``instructions`` are user
instructions (both are strings). ``prompt_list`` will be a list of
prompts, each prompt being a tuple of ``(str, bool)``. The string is
the prompt and the boolean indicates whether the user text should be
echoed.
A sample call would thus be:
``handler('title', 'instructions', [('Password:', False)])``.
The handler should return a list or tuple of answers to the server's
questions.
If the server requires multi-step authentication (which is very rare),
this method will return a list of auth types permissible for the next
step. Otherwise, in the normal case, an empty list is returned.
:param str username: the username to authenticate as
:param callable handler: a handler for responding to server questions
:param str submethods: a string list of desired submethods (optional)
:return:
`list` of auth types permissible for the next stage of
authentication (normally empty).
:raises BadAuthenticationType: if public-key authentication isn't
allowed by the server for this user
:raises AuthenticationException: if the authentication failed
:raises SSHException: if there was a network error
.. versionadded:: 1.5
"""
if (not self.active) or (not self.initial_kex_done):
# we should never try to authenticate unless we're on a secure link
raise SSHException('No existing session')
my_event = threading.Event()
self.auth_handler = AuthHandler(self)
self.auth_handler.auth_interactive(username, handler, my_event, submethods)
return self.auth_handler.wait_for_response(my_event)
def set_log_channel(self, name):
"""
Set the channel for this transport's logging. The default is
``"paramiko.transport"`` but it can be set to anything you want. (See
the `.logging` module for more info.) SSH Channels will log to a
sub-channel of the one specified.
:param str name: new channel name for logging
.. versionadded:: 1.1
"""
self.log_name = name
self.logger = util.get_logger(name)
self.packetizer.set_log(self.logger)
def get_log_channel(self):
"""
Return the channel name used for this transport's logging.
:return: channel name as a `str`
.. versionadded:: 1.2
"""
return self.log_name
def set_hexdump(self, hexdump):
"""
Turn on/off logging a hex dump of protocol traffic at DEBUG level in
the logs. Normally you would want this off (which is the default),
but if you are debugging something, it may be useful.
:param bool hexdump:
``True`` to log protocol traffix (in hex) to the log; ``False``
otherwise.
"""
self.packetizer.set_hexdump(hexdump)
def get_hexdump(self):
"""
Return ``True`` if the transport is currently logging hex dumps of
protocol traffic.
:return: ``True`` if hex dumps are being logged, else ``False``.
.. versionadded:: 1.4
"""
return self.packetizer.get_hexdump()
def use_compression(self, compress=True):
"""
Turn on/off compression. This will only have an affect before starting
the transport (ie before calling `connect`, etc). By default,
compression is off since it negatively affects interactive sessions.
:param bool compress:
``True`` to ask the remote client/server to compress traffic;
``False`` to refuse compression
.. versionadded:: 1.5.2
"""
if compress:
self._preferred_compression = ('[email protected]', 'zlib', 'none')
else:
self._preferred_compression = ('none',)
def getpeername(self):
"""
Return the address of the remote side of this Transport, if possible.
This is effectively a wrapper around ``'getpeername'`` on the underlying
socket. If the socket-like object has no ``'getpeername'`` method,
then ``("unknown", 0)`` is returned.
:return:
the address of the remote host, if known, as a ``(str, int)``
tuple.
"""
gp = getattr(self.sock, 'getpeername', None)
if gp is None:
return 'unknown', 0
return gp()
def stop_thread(self):
self.active = False
self.packetizer.close()
while self.isAlive():
self.join(10)
### internals...
def _log(self, level, msg, *args):
if issubclass(type(msg), list):
for m in msg:
self.logger.log(level, m)
else:
self.logger.log(level, msg, *args)
def _get_modulus_pack(self):
"""used by KexGex to find primes for group exchange"""
return self._modulus_pack
def _next_channel(self):
"""you are holding the lock"""
chanid = self._channel_counter
while self._channels.get(chanid) is not None:
self._channel_counter = (self._channel_counter + 1) & 0xffffff
chanid = self._channel_counter
self._channel_counter = (self._channel_counter + 1) & 0xffffff
return chanid
def _unlink_channel(self, chanid):
"""used by a Channel to remove itself from the active channel list"""
self._channels.delete(chanid)
def _send_message(self, data):
self.packetizer.send_message(data)
def _send_user_message(self, data):
"""
send a message, but block if we're in key negotiation. this is used
for user-initiated requests.
"""
start = time.time()
while True:
self.clear_to_send.wait(0.1)
if not self.active:
self._log(DEBUG, 'Dropping user packet because connection is dead.')
return
self.clear_to_send_lock.acquire()
if self.clear_to_send.isSet():
break
self.clear_to_send_lock.release()
if time.time() > start + self.clear_to_send_timeout:
raise SSHException('Key-exchange timed out waiting for key negotiation')
try:
self._send_message(data)
finally:
self.clear_to_send_lock.release()
def _set_K_H(self, k, h):
"""used by a kex object to set the K (root key) and H (exchange hash)"""
self.K = k
self.H = h
if self.session_id is None:
self.session_id = h
def _expect_packet(self, *ptypes):
"""used by a kex object to register the next packet type it expects to see"""
self._expected_packet = tuple(ptypes)
def _verify_key(self, host_key, sig):
key = self._key_info[self.host_key_type](Message(host_key))
if key is None:
raise SSHException('Unknown host key type')
if not key.verify_ssh_sig(self.H, Message(sig)):
raise SSHException('Signature verification (%s) failed.' % self.host_key_type)
self.host_key = key
def _compute_key(self, id, nbytes):
"""id is 'A' - 'F' for the various keys used by ssh"""
m = Message()
m.add_mpint(self.K)
m.add_bytes(self.H)
m.add_byte(b(id))
m.add_bytes(self.session_id)
out = sofar = sha1(m.asbytes()).digest()
while len(out) < nbytes:
m = Message()
m.add_mpint(self.K)
m.add_bytes(self.H)
m.add_bytes(sofar)
digest = sha1(m.asbytes()).digest()
out += digest
sofar += digest
return out[:nbytes]
def _get_cipher(self, name, key, iv):
if name not in self._cipher_info:
raise SSHException('Unknown client cipher ' + name)
if name in ('arcfour128', 'arcfour256'):
# arcfour cipher
cipher = self._cipher_info[name]['class'].new(key)
# as per RFC 4345, the first 1536 bytes of keystream
# generated by the cipher MUST be discarded
cipher.encrypt(" " * 1536)
return cipher
elif name.endswith("-ctr"):
# CTR modes, we need a counter
counter = Counter.new(nbits=self._cipher_info[name]['block-size'] * 8, initial_value=util.inflate_long(iv, True))
return self._cipher_info[name]['class'].new(key, self._cipher_info[name]['mode'], iv, counter)
else:
return self._cipher_info[name]['class'].new(key, self._cipher_info[name]['mode'], iv)
def _set_forward_agent_handler(self, handler):
if handler is None:
def default_handler(channel):
self._queue_incoming_channel(channel)
self._forward_agent_handler = default_handler
else:
self._forward_agent_handler = handler
def _set_x11_handler(self, handler):
# only called if a channel has turned on x11 forwarding
if handler is None:
# by default, use the same mechanism as accept()
def default_handler(channel, src_addr_port):
self._queue_incoming_channel(channel)
self._x11_handler = default_handler
else:
self._x11_handler = handler
def _queue_incoming_channel(self, channel):
self.lock.acquire()
try:
self.server_accepts.append(channel)
self.server_accept_cv.notify()
finally:
self.lock.release()
def run(self):
# (use the exposed "run" method, because if we specify a thread target
# of a private method, threading.Thread will keep a reference to it
# indefinitely, creating a GC cycle and not letting Transport ever be
# GC'd. it's a bug in Thread.)
# Hold reference to 'sys' so we can test sys.modules to detect
# interpreter shutdown.
self.sys = sys
# active=True occurs before the thread is launched, to avoid a race
_active_threads.append(self)
if self.server_mode:
self._log(DEBUG, 'starting thread (server mode): %s' % hex(long(id(self)) & xffffffff))
else:
self._log(DEBUG, 'starting thread (client mode): %s' % hex(long(id(self)) & xffffffff))
try:
try:
self.packetizer.write_all(b(self.local_version + '\r\n'))
self._check_banner()
self._send_kex_init()
self._expect_packet(MSG_KEXINIT)
while self.active:
if self.packetizer.need_rekey() and not self.in_kex:
self._send_kex_init()
try:
ptype, m = self.packetizer.read_message()
except NeedRekeyException:
continue
if ptype == MSG_IGNORE:
continue
elif ptype == MSG_DISCONNECT:
self._parse_disconnect(m)
self.active = False
self.packetizer.close()
break
elif ptype == MSG_DEBUG:
self._parse_debug(m)
continue
if len(self._expected_packet) > 0:
if ptype not in self._expected_packet:
raise SSHException('Expecting packet from %r, got %d' % (self._expected_packet, ptype))
self._expected_packet = tuple()
if (ptype >= 30) and (ptype <= 39):
self.kex_engine.parse_next(ptype, m)
continue
if ptype in self._handler_table:
self._handler_table[ptype](self, m)
elif ptype in self._channel_handler_table:
chanid = m.get_int()
chan = self._channels.get(chanid)
if chan is not None:
self._channel_handler_table[ptype](chan, m)
elif chanid in self.channels_seen:
self._log(DEBUG, 'Ignoring message for dead channel %d' % chanid)
else:
self._log(ERROR, 'Channel request for unknown channel %d' % chanid)
self.active = False
self.packetizer.close()
elif (self.auth_handler is not None) and (ptype in self.auth_handler._handler_table):
self.auth_handler._handler_table[ptype](self.auth_handler, m)
else:
self._log(WARNING, 'Oops, unhandled type %d' % ptype)
msg = Message()
msg.add_byte(cMSG_UNIMPLEMENTED)
msg.add_int(m.seqno)
self._send_message(msg)
except SSHException as e:
self._log(ERROR, 'Exception: ' + str(e))
self._log(ERROR, util.tb_strings())
self.saved_exception = e
except EOFError as e:
self._log(DEBUG, 'EOF in transport thread')
#self._log(DEBUG, util.tb_strings())
self.saved_exception = e
except socket.error as e:
if type(e.args) is tuple:
if e.args:
emsg = '%s (%d)' % (e.args[1], e.args[0])
else: # empty tuple, e.g. socket.timeout
emsg = str(e) or repr(e)
else:
emsg = e.args
self._log(ERROR, 'Socket exception: ' + emsg)
self.saved_exception = e
except Exception as e:
self._log(ERROR, 'Unknown exception: ' + str(e))
self._log(ERROR, util.tb_strings())
self.saved_exception = e
_active_threads.remove(self)
for chan in list(self._channels.values()):
chan._unlink()
if self.active:
self.active = False
self.packetizer.close()
if self.completion_event is not None:
self.completion_event.set()
if self.auth_handler is not None:
self.auth_handler.abort()
for event in self.channel_events.values():
event.set()
try:
self.lock.acquire()
self.server_accept_cv.notify()
finally:
self.lock.release()
self.sock.close()
except:
# Don't raise spurious 'NoneType has no attribute X' errors when we
# wake up during interpreter shutdown. Or rather -- raise
# everything *if* sys.modules (used as a convenient sentinel)
# appears to still exist.
if self.sys.modules is not None:
raise
### protocol stages
def _negotiate_keys(self, m):
# throws SSHException on anything unusual
self.clear_to_send_lock.acquire()
try:
self.clear_to_send.clear()
finally:
self.clear_to_send_lock.release()
if self.local_kex_init is None:
# remote side wants to renegotiate
self._send_kex_init()
self._parse_kex_init(m)
self.kex_engine.start_kex()
def _check_banner(self):
# this is slow, but we only have to do it once
for i in range(100):
# give them 15 seconds for the first line, then just 2 seconds
# each additional line. (some sites have very high latency.)
if i == 0:
timeout = self.banner_timeout
else:
timeout = 2
try:
buf = self.packetizer.readline(timeout)
except ProxyCommandFailure:
raise
except Exception as e:
raise SSHException('Error reading SSH protocol banner' + str(e))
if buf[:4] == 'SSH-':
break
self._log(DEBUG, 'Banner: ' + buf)
if buf[:4] != 'SSH-':
raise SSHException('Indecipherable protocol version "' + buf + '"')
# save this server version string for later
self.remote_version = buf
# pull off any attached comment
comment = ''
i = buf.find(' ')
if i >= 0:
comment = buf[i+1:]
buf = buf[:i]
# parse out version string and make sure it matches
segs = buf.split('-', 2)
if len(segs) < 3:
raise SSHException('Invalid SSH banner')
version = segs[1]
client = segs[2]
if version != '1.99' and version != '2.0':
raise SSHException('Incompatible version (%s instead of 2.0)' % (version,))
self._log(INFO, 'Connected (version %s, client %s)' % (version, client))
def _send_kex_init(self):
"""
announce to the other side that we'd like to negotiate keys, and what
kind of key negotiation we support.
"""
self.clear_to_send_lock.acquire()
try:
self.clear_to_send.clear()
finally:
self.clear_to_send_lock.release()
self.in_kex = True
if self.server_mode:
if (self._modulus_pack is None) and ('diffie-hellman-group-exchange-sha1' in self._preferred_kex):
# can't do group-exchange if we don't have a pack of potential primes
pkex = list(self.get_security_options().kex)
pkex.remove('diffie-hellman-group-exchange-sha1')
self.get_security_options().kex = pkex
available_server_keys = list(filter(list(self.server_key_dict.keys()).__contains__,
self._preferred_keys))
else:
available_server_keys = self._preferred_keys
m = Message()
m.add_byte(cMSG_KEXINIT)
m.add_bytes(os.urandom(16))
m.add_list(self._preferred_kex)
m.add_list(available_server_keys)
m.add_list(self._preferred_ciphers)
m.add_list(self._preferred_ciphers)
m.add_list(self._preferred_macs)
m.add_list(self._preferred_macs)
m.add_list(self._preferred_compression)
m.add_list(self._preferred_compression)
m.add_string(bytes())
m.add_string(bytes())
m.add_boolean(False)
m.add_int(0)
# save a copy for later (needed to compute a hash)
self.local_kex_init = m.asbytes()
self._send_message(m)
def _parse_kex_init(self, m):
cookie = m.get_bytes(16)
kex_algo_list = m.get_list()
server_key_algo_list = m.get_list()
client_encrypt_algo_list = m.get_list()
server_encrypt_algo_list = m.get_list()
client_mac_algo_list = m.get_list()
server_mac_algo_list = m.get_list()
client_compress_algo_list = m.get_list()
server_compress_algo_list = m.get_list()
client_lang_list = m.get_list()
server_lang_list = m.get_list()
kex_follows = m.get_boolean()
unused = m.get_int()
self._log(DEBUG, 'kex algos:' + str(kex_algo_list) + ' server key:' + str(server_key_algo_list) +
' client encrypt:' + str(client_encrypt_algo_list) +
' server encrypt:' + str(server_encrypt_algo_list) +
' client mac:' + str(client_mac_algo_list) +
' server mac:' + str(server_mac_algo_list) +
' client compress:' + str(client_compress_algo_list) +
' server compress:' + str(server_compress_algo_list) +
' client lang:' + str(client_lang_list) +
' server lang:' + str(server_lang_list) +
' kex follows?' + str(kex_follows))
# as a server, we pick the first item in the client's list that we support.
# as a client, we pick the first item in our list that the server supports.
if self.server_mode:
agreed_kex = list(filter(self._preferred_kex.__contains__, kex_algo_list))
else:
agreed_kex = list(filter(kex_algo_list.__contains__, self._preferred_kex))
if len(agreed_kex) == 0:
raise SSHException('Incompatible ssh peer (no acceptable kex algorithm)')
self.kex_engine = self._kex_info[agreed_kex[0]](self)
if self.server_mode:
available_server_keys = list(filter(list(self.server_key_dict.keys()).__contains__,
self._preferred_keys))
agreed_keys = list(filter(available_server_keys.__contains__, server_key_algo_list))
else:
agreed_keys = list(filter(server_key_algo_list.__contains__, self._preferred_keys))
if len(agreed_keys) == 0:
raise SSHException('Incompatible ssh peer (no acceptable host key)')
self.host_key_type = agreed_keys[0]
if self.server_mode and (self.get_server_key() is None):
raise SSHException('Incompatible ssh peer (can\'t match requested host key type)')
if self.server_mode:
agreed_local_ciphers = list(filter(self._preferred_ciphers.__contains__,
server_encrypt_algo_list))
agreed_remote_ciphers = list(filter(self._preferred_ciphers.__contains__,
client_encrypt_algo_list))
else:
agreed_local_ciphers = list(filter(client_encrypt_algo_list.__contains__,
self._preferred_ciphers))
agreed_remote_ciphers = list(filter(server_encrypt_algo_list.__contains__,
self._preferred_ciphers))
if (len(agreed_local_ciphers) == 0) or (len(agreed_remote_ciphers) == 0):
raise SSHException('Incompatible ssh server (no acceptable ciphers)')
self.local_cipher = agreed_local_ciphers[0]
self.remote_cipher = agreed_remote_ciphers[0]
self._log(DEBUG, 'Ciphers agreed: local=%s, remote=%s' % (self.local_cipher, self.remote_cipher))
if self.server_mode:
agreed_remote_macs = list(filter(self._preferred_macs.__contains__, client_mac_algo_list))
agreed_local_macs = list(filter(self._preferred_macs.__contains__, server_mac_algo_list))
else:
agreed_local_macs = list(filter(client_mac_algo_list.__contains__, self._preferred_macs))
agreed_remote_macs = list(filter(server_mac_algo_list.__contains__, self._preferred_macs))
if (len(agreed_local_macs) == 0) or (len(agreed_remote_macs) == 0):
raise SSHException('Incompatible ssh server (no acceptable macs)')
self.local_mac = agreed_local_macs[0]
self.remote_mac = agreed_remote_macs[0]
if self.server_mode:
agreed_remote_compression = list(filter(self._preferred_compression.__contains__, client_compress_algo_list))
agreed_local_compression = list(filter(self._preferred_compression.__contains__, server_compress_algo_list))
else:
agreed_local_compression = list(filter(client_compress_algo_list.__contains__, self._preferred_compression))
agreed_remote_compression = list(filter(server_compress_algo_list.__contains__, self._preferred_compression))
if (len(agreed_local_compression) == 0) or (len(agreed_remote_compression) == 0):
raise SSHException('Incompatible ssh server (no acceptable compression) %r %r %r' % (agreed_local_compression, agreed_remote_compression, self._preferred_compression))
self.local_compression = agreed_local_compression[0]
self.remote_compression = agreed_remote_compression[0]
self._log(DEBUG, 'using kex %s; server key type %s; cipher: local %s, remote %s; mac: local %s, remote %s; compression: local %s, remote %s' %
(agreed_kex[0], self.host_key_type, self.local_cipher, self.remote_cipher, self.local_mac,
self.remote_mac, self.local_compression, self.remote_compression))
# save for computing hash later...
# now wait! openssh has a bug (and others might too) where there are
# actually some extra bytes (one NUL byte in openssh's case) added to
# the end of the packet but not parsed. turns out we need to throw
# away those bytes because they aren't part of the hash.
self.remote_kex_init = cMSG_KEXINIT + m.get_so_far()
def _activate_inbound(self):
"""switch on newly negotiated encryption parameters for inbound traffic"""
block_size = self._cipher_info[self.remote_cipher]['block-size']
if self.server_mode:
IV_in = self._compute_key('A', block_size)
key_in = self._compute_key('C', self._cipher_info[self.remote_cipher]['key-size'])
else:
IV_in = self._compute_key('B', block_size)
key_in = self._compute_key('D', self._cipher_info[self.remote_cipher]['key-size'])
engine = self._get_cipher(self.remote_cipher, key_in, IV_in)
mac_size = self._mac_info[self.remote_mac]['size']
mac_engine = self._mac_info[self.remote_mac]['class']
# initial mac keys are done in the hash's natural size (not the potentially truncated
# transmission size)
if self.server_mode:
mac_key = self._compute_key('E', mac_engine().digest_size)
else:
mac_key = self._compute_key('F', mac_engine().digest_size)
self.packetizer.set_inbound_cipher(engine, block_size, mac_engine, mac_size, mac_key)
compress_in = self._compression_info[self.remote_compression][1]
if (compress_in is not None) and ((self.remote_compression != '[email protected]') or self.authenticated):
self._log(DEBUG, 'Switching on inbound compression ...')
self.packetizer.set_inbound_compressor(compress_in())
def _activate_outbound(self):
"""switch on newly negotiated encryption parameters for outbound traffic"""
m = Message()
m.add_byte(cMSG_NEWKEYS)
self._send_message(m)
block_size = self._cipher_info[self.local_cipher]['block-size']
if self.server_mode:
IV_out = self._compute_key('B', block_size)
key_out = self._compute_key('D', self._cipher_info[self.local_cipher]['key-size'])
else:
IV_out = self._compute_key('A', block_size)
key_out = self._compute_key('C', self._cipher_info[self.local_cipher]['key-size'])
engine = self._get_cipher(self.local_cipher, key_out, IV_out)
mac_size = self._mac_info[self.local_mac]['size']
mac_engine = self._mac_info[self.local_mac]['class']
# initial mac keys are done in the hash's natural size (not the potentially truncated
# transmission size)
if self.server_mode:
mac_key = self._compute_key('F', mac_engine().digest_size)
else:
mac_key = self._compute_key('E', mac_engine().digest_size)
sdctr = self.local_cipher.endswith('-ctr')
self.packetizer.set_outbound_cipher(engine, block_size, mac_engine, mac_size, mac_key, sdctr)
compress_out = self._compression_info[self.local_compression][0]
if (compress_out is not None) and ((self.local_compression != '[email protected]') or self.authenticated):
self._log(DEBUG, 'Switching on outbound compression ...')
self.packetizer.set_outbound_compressor(compress_out())
if not self.packetizer.need_rekey():
self.in_kex = False
# we always expect to receive NEWKEYS now
self._expect_packet(MSG_NEWKEYS)
def _auth_trigger(self):
self.authenticated = True
# delayed initiation of compression
if self.local_compression == '[email protected]':
compress_out = self._compression_info[self.local_compression][0]
self._log(DEBUG, 'Switching on outbound compression ...')
self.packetizer.set_outbound_compressor(compress_out())
if self.remote_compression == '[email protected]':
compress_in = self._compression_info[self.remote_compression][1]
self._log(DEBUG, 'Switching on inbound compression ...')
self.packetizer.set_inbound_compressor(compress_in())
def _parse_newkeys(self, m):
self._log(DEBUG, 'Switch to new keys ...')
self._activate_inbound()
# can also free a bunch of stuff here
self.local_kex_init = self.remote_kex_init = None
self.K = None
self.kex_engine = None
if self.server_mode and (self.auth_handler is None):
# create auth handler for server mode
self.auth_handler = AuthHandler(self)
if not self.initial_kex_done:
# this was the first key exchange
self.initial_kex_done = True
# send an event?
if self.completion_event is not None:
self.completion_event.set()
# it's now okay to send data again (if this was a re-key)
if not self.packetizer.need_rekey():
self.in_kex = False
self.clear_to_send_lock.acquire()
try:
self.clear_to_send.set()
finally:
self.clear_to_send_lock.release()
return
def _parse_disconnect(self, m):
code = m.get_int()
desc = m.get_text()
self._log(INFO, 'Disconnect (code %d): %s' % (code, desc))
def _parse_global_request(self, m):
kind = m.get_text()
self._log(DEBUG, 'Received global request "%s"' % kind)
want_reply = m.get_boolean()
if not self.server_mode:
self._log(DEBUG, 'Rejecting "%s" global request from server.' % kind)
ok = False
elif kind == 'tcpip-forward':
address = m.get_text()
port = m.get_int()
ok = self.server_object.check_port_forward_request(address, port)
if ok:
ok = (ok,)
elif kind == 'cancel-tcpip-forward':
address = m.get_text()
port = m.get_int()
self.server_object.cancel_port_forward_request(address, port)
ok = True
else:
ok = self.server_object.check_global_request(kind, m)
extra = ()
if type(ok) is tuple:
extra = ok
ok = True
if want_reply:
msg = Message()
if ok:
msg.add_byte(cMSG_REQUEST_SUCCESS)
msg.add(*extra)
else:
msg.add_byte(cMSG_REQUEST_FAILURE)
self._send_message(msg)
def _parse_request_success(self, m):
self._log(DEBUG, 'Global request successful.')
self.global_response = m
if self.completion_event is not None:
self.completion_event.set()
def _parse_request_failure(self, m):
self._log(DEBUG, 'Global request denied.')
self.global_response = None
if self.completion_event is not None:
self.completion_event.set()
def _parse_channel_open_success(self, m):
chanid = m.get_int()
server_chanid = m.get_int()
server_window_size = m.get_int()
server_max_packet_size = m.get_int()
chan = self._channels.get(chanid)
if chan is None:
self._log(WARNING, 'Success for unrequested channel! [??]')
return
self.lock.acquire()
try:
chan._set_remote_channel(server_chanid, server_window_size, server_max_packet_size)
self._log(INFO, 'Secsh channel %d opened.' % chanid)
if chanid in self.channel_events:
self.channel_events[chanid].set()
del self.channel_events[chanid]
finally:
self.lock.release()
return
def _parse_channel_open_failure(self, m):
chanid = m.get_int()
reason = m.get_int()
reason_str = m.get_text()
lang = m.get_text()
reason_text = CONNECTION_FAILED_CODE.get(reason, '(unknown code)')
self._log(INFO, 'Secsh channel %d open FAILED: %s: %s' % (chanid, reason_str, reason_text))
self.lock.acquire()
try:
self.saved_exception = ChannelException(reason, reason_text)
if chanid in self.channel_events:
self._channels.delete(chanid)
if chanid in self.channel_events:
self.channel_events[chanid].set()
del self.channel_events[chanid]
finally:
self.lock.release()
return
def _parse_channel_open(self, m):
kind = m.get_text()
chanid = m.get_int()
initial_window_size = m.get_int()
max_packet_size = m.get_int()
reject = False
if (kind == '[email protected]') and (self._forward_agent_handler is not None):
self._log(DEBUG, 'Incoming forward agent connection')
self.lock.acquire()
try:
my_chanid = self._next_channel()
finally:
self.lock.release()
elif (kind == 'x11') and (self._x11_handler is not None):
origin_addr = m.get_text()
origin_port = m.get_int()
self._log(DEBUG, 'Incoming x11 connection from %s:%d' % (origin_addr, origin_port))
self.lock.acquire()
try:
my_chanid = self._next_channel()
finally:
self.lock.release()
elif (kind == 'forwarded-tcpip') and (self._tcp_handler is not None):
server_addr = m.get_text()
server_port = m.get_int()
origin_addr = m.get_text()
origin_port = m.get_int()
self._log(DEBUG, 'Incoming tcp forwarded connection from %s:%d' % (origin_addr, origin_port))
self.lock.acquire()
try:
my_chanid = self._next_channel()
finally:
self.lock.release()
elif not self.server_mode:
self._log(DEBUG, 'Rejecting "%s" channel request from server.' % kind)
reject = True
reason = OPEN_FAILED_ADMINISTRATIVELY_PROHIBITED
else:
self.lock.acquire()
try:
my_chanid = self._next_channel()
finally:
self.lock.release()
if kind == 'direct-tcpip':
# handle direct-tcpip requests comming from the client
dest_addr = m.get_text()
dest_port = m.get_int()
origin_addr = m.get_text()
origin_port = m.get_int()
reason = self.server_object.check_channel_direct_tcpip_request(
my_chanid, (origin_addr, origin_port), (dest_addr, dest_port))
else:
reason = self.server_object.check_channel_request(kind, my_chanid)
if reason != OPEN_SUCCEEDED:
self._log(DEBUG, 'Rejecting "%s" channel request from client.' % kind)
reject = True
if reject:
msg = Message()
msg.add_byte(cMSG_CHANNEL_OPEN_FAILURE)
msg.add_int(chanid)
msg.add_int(reason)
msg.add_string('')
msg.add_string('en')
self._send_message(msg)
return
chan = Channel(my_chanid)
self.lock.acquire()
try:
self._channels.put(my_chanid, chan)
self.channels_seen[my_chanid] = True
chan._set_transport(self)
chan._set_window(self.window_size, self.max_packet_size)
chan._set_remote_channel(chanid, initial_window_size, max_packet_size)
finally:
self.lock.release()
m = Message()
m.add_byte(cMSG_CHANNEL_OPEN_SUCCESS)
m.add_int(chanid)
m.add_int(my_chanid)
m.add_int(self.window_size)
m.add_int(self.max_packet_size)
self._send_message(m)
self._log(INFO, 'Secsh channel %d (%s) opened.', my_chanid, kind)
if kind == '[email protected]':
self._forward_agent_handler(chan)
elif kind == 'x11':
self._x11_handler(chan, (origin_addr, origin_port))
elif kind == 'forwarded-tcpip':
chan.origin_addr = (origin_addr, origin_port)
self._tcp_handler(chan, (origin_addr, origin_port), (server_addr, server_port))
else:
self._queue_incoming_channel(chan)
def _parse_debug(self, m):
always_display = m.get_boolean()
msg = m.get_string()
lang = m.get_string()
self._log(DEBUG, 'Debug msg: ' + util.safe_string(msg))
def _get_subsystem_handler(self, name):
try:
self.lock.acquire()
if name not in self.subsystem_table:
return None, [], {}
return self.subsystem_table[name]
finally:
self.lock.release()
_handler_table = {
MSG_NEWKEYS: _parse_newkeys,
MSG_GLOBAL_REQUEST: _parse_global_request,
MSG_REQUEST_SUCCESS: _parse_request_success,
MSG_REQUEST_FAILURE: _parse_request_failure,
MSG_CHANNEL_OPEN_SUCCESS: _parse_channel_open_success,
MSG_CHANNEL_OPEN_FAILURE: _parse_channel_open_failure,
MSG_CHANNEL_OPEN: _parse_channel_open,
MSG_KEXINIT: _negotiate_keys,
}
_channel_handler_table = {
MSG_CHANNEL_SUCCESS: Channel._request_success,
MSG_CHANNEL_FAILURE: Channel._request_failed,
MSG_CHANNEL_DATA: Channel._feed,
MSG_CHANNEL_EXTENDED_DATA: Channel._feed_extended,
MSG_CHANNEL_WINDOW_ADJUST: Channel._window_adjust,
MSG_CHANNEL_REQUEST: Channel._handle_request,
MSG_CHANNEL_EOF: Channel._handle_eof,
MSG_CHANNEL_CLOSE: Channel._handle_close,
}
class SecurityOptions (object):
"""
Simple object containing the security preferences of an ssh transport.
These are tuples of acceptable ciphers, digests, key types, and key
exchange algorithms, listed in order of preference.
Changing the contents and/or order of these fields affects the underlying
`.Transport` (but only if you change them before starting the session).
If you try to add an algorithm that paramiko doesn't recognize,
``ValueError`` will be raised. If you try to assign something besides a
tuple to one of the fields, ``TypeError`` will be raised.
"""
#__slots__ = [ 'ciphers', 'digests', 'key_types', 'kex', 'compression', '_transport' ]
__slots__ = '_transport'
def __init__(self, transport):
self._transport = transport
def __repr__(self):
"""
Returns a string representation of this object, for debugging.
"""
return '<paramiko.SecurityOptions for %s>' % repr(self._transport)
def _get_ciphers(self):
return self._transport._preferred_ciphers
def _get_digests(self):
return self._transport._preferred_macs
def _get_key_types(self):
return self._transport._preferred_keys
def _get_kex(self):
return self._transport._preferred_kex
def _get_compression(self):
return self._transport._preferred_compression
def _set(self, name, orig, x):
if type(x) is list:
x = tuple(x)
if type(x) is not tuple:
raise TypeError('expected tuple or list')
possible = list(getattr(self._transport, orig).keys())
forbidden = [n for n in x if n not in possible]
if len(forbidden) > 0:
raise ValueError('unknown cipher')
setattr(self._transport, name, x)
def _set_ciphers(self, x):
self._set('_preferred_ciphers', '_cipher_info', x)
def _set_digests(self, x):
self._set('_preferred_macs', '_mac_info', x)
def _set_key_types(self, x):
self._set('_preferred_keys', '_key_info', x)
def _set_kex(self, x):
self._set('_preferred_kex', '_kex_info', x)
def _set_compression(self, x):
self._set('_preferred_compression', '_compression_info', x)
ciphers = property(_get_ciphers, _set_ciphers, None,
"Symmetric encryption ciphers")
digests = property(_get_digests, _set_digests, None,
"Digest (one-way hash) algorithms")
key_types = property(_get_key_types, _set_key_types, None,
"Public-key algorithms")
kex = property(_get_kex, _set_kex, None, "Key exchange algorithms")
compression = property(_get_compression, _set_compression, None,
"Compression algorithms")
class ChannelMap (object):
def __init__(self):
# (id -> Channel)
self._map = weakref.WeakValueDictionary()
self._lock = threading.Lock()
def put(self, chanid, chan):
self._lock.acquire()
try:
self._map[chanid] = chan
finally:
self._lock.release()
def get(self, chanid):
self._lock.acquire()
try:
return self._map.get(chanid, None)
finally:
self._lock.release()
def delete(self, chanid):
self._lock.acquire()
try:
try:
del self._map[chanid]
except KeyError:
pass
finally:
self._lock.release()
def values(self):
self._lock.acquire()
try:
return list(self._map.values())
finally:
self._lock.release()
def __len__(self):
self._lock.acquire()
try:
return len(self._map)
finally:
self._lock.release()
|
py | 1a33e304c772a2821c1151227ffbb0401af82cbc | from functools import partial, wraps, update_wrapper
import copy
import re
import clize.errors
from .core import _accepts_context, _call_fragment_body, collect, DROP, many as _many
from .objects import Context
__all__ = ['accumulate', 'callable', 'filter', 'many', 'format', 'regex', 'keywords',
'focus', 'magnify', 'try_except']
decorators = []
def _get_lenses():
global lenses, _get_lenses
try:
import lenses
except ImportError: # pragma: no cover
lenses = None
_get_lenses = lambda: lenses
return lenses
def _get_formatter():
global _get_formatter
import string
formatter = string.Formatter()
_get_formatter = lambda: formatter
return formatter
def decorator(*names, doc=None, takes_string=False, prep=None, dec_args=()):
if prep is None:
if len(dec_args) == 1:
prep = lambda _, a: a
elif len(dec_args) > 1:
prep = lambda _, *a: a
def wrapperer(_spy_decorator):
@wraps(_spy_decorator)
def wrapper(fn, dec_args=()):
is_decorator = getattr(fn, '_spy_decorated', None)
if is_decorator:
xfn = fn
elif _accepts_context(fn):
xfn = partial(_call_fragment_body, fn)
else:
xfn = partial(_drop_context, fn)
if prep:
opaque = prep(fn, *dec_args)
def wrapped(v, context=None):
_spy_callable = fn # noqa: F841
_spy_value = v # noqa: F841
return _spy_decorator(xfn, v, context, opaque)
else:
def wrapped(v, context=None):
_spy_callable = fn # noqa: F841
_spy_value = v # noqa: F841
return _spy_decorator(xfn, v, context)
update_wrapper(wrapped, fn)
wrapped._spy_decorated = True
return wrapped
if dec_args:
orig_wrapper = wrapper
def wrapper(*a):
return partial(orig_wrapper, dec_args=a)
wrapper.decorator_names = names
wrapper.decorator_help = doc
wrapper.takes_string = takes_string
wrapper.dec_args = dec_args
decorators.append(wrapper)
return wrapper
return wrapperer
def _drop_context(fn, v, context):
return _call_fragment_body(fn, v)
@decorator('--accumulate', '-a', doc='Pass an iterator of yielded values to this fragment')
def accumulate(fn, v, context):
return fn(collect(context), context)
@decorator('--callable', '-c', doc='Call the result of this fragment')
def callable(fn, v, context):
result = fn(v, context)
return result(v)
@decorator('--filter', '-f', doc='Treat this fragment as a predicate to filter data')
def filter(fn, v, context):
result = fn(v, context)
return v if result else DROP
@decorator('--many', '-m', doc='Iterate over this fragment')
def many(fn, v, context):
result = fn(v, context)
return _many(result)
@decorator('--format', '-i', doc='Interpolate argument as a format string', takes_string=True, prep=lambda _: _get_formatter())
def format(fn, v, context, formatter):
env, x = fn(v, context)
return formatter.vformat(x, v, env)
@decorator('--regex', '--regexp', '-R', doc='Match argument as a regexp', takes_string=True)
def regex(fn, v, context):
env, x = fn(v, context)
return re.match(x, v)
def _kw_prep(fn):
base = fn
while hasattr(base, '__wrapped__'):
base = base.__wrapped__
if not hasattr(base, '_spy_setenv'):
raise ValueError("inappropriate function")
return base._spy_setenv
@decorator('--keywords', '-k', doc='Execute with the input value as the scope', prep=_kw_prep)
def keywords(fn, v, context, setenv):
setenv(v)
return fn(v, context)
def _convert_focus(s):
lenses = _get_lenses()
if lenses is not None and s.startswith('_'):
context = Context()
context['_'] = lenses.lens
return eval(s, context, {})
if s.startswith('.'):
return s[1:]
if s[:1] in '0123456789-' and (len(s) == 1 or s[1:].isdigit()):
return int(s)
if ':' in s:
if lenses is None:
raise clize.errors.ArgumentError("slice focusing requires `lenses`")
sbits = s.split(':')
bits = []
for x in sbits:
if x == '':
bits.append(None)
elif x.isdigit() or x[:1] == '-' and x[1:].isdigit():
bits.append(int(x))
else:
break
else:
if len(bits) in (2,3):
return lenses.lens[slice(*bits)].Each()
return s
_convert_focus.usage_name = 'ITEM'
def _focus_prep(fn, focus):
lenses = _get_lenses()
if lenses is None:
def apply(f, v):
v_ = copy.copy(v)
v_[focus] = f(v_[focus])
return v_
return apply
if not isinstance(focus, lenses.UnboundLens):
focus = lenses.lens[focus]
return lambda f, v: focus.modify(f)(v)
@decorator('--focus', '-o', doc='Operate on an item of the input in-place',
prep=_focus_prep, dec_args=[_convert_focus])
def focus(fn, v, context, f):
fn = partial(fn, context=context)
return f(fn, v)
def _magnify_prep(fn, focus):
lenses = _get_lenses()
if lenses is None:
def apply(f, v):
return f(v[focus])
return apply
if not isinstance(focus, lenses.UnboundLens):
focus = lenses.lens[focus]
return lambda f, v: f(focus.get()(v))
@decorator('--magnify', '-O', doc='Operate on and return an item of the input',
prep=_magnify_prep, dec_args=[_convert_focus])
def magnify(fn, v, context, f):
fn = partial(fn, context=context)
return f(fn, v)
@decorator('--try', '-t', doc='Filter out input that causes the fragment to raise an exception')
def try_except(fn, v, context):
try:
return fn(v, context)
except:
pass
return DROP
|
py | 1a33e4cacf6da32d0fea895ef4bf0176cbf0a4a4 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import simplejson as json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.CardFundInfo import CardFundInfo
from alipay.aop.api.domain.CardCreditInfo import CardCreditInfo
class AlipayAssetCardNewtemplateCreateModel(object):
def __init__(self):
self._account_model = None
self._assets_code = None
self._biz_from = None
self._card_fund_infos = None
self._card_model = None
self._card_name = None
self._creator = None
self._credit_info = None
self._extend_info = None
self._operator = None
self._out_biz_no = None
self._partner_id = None
self._period_type = None
self._product_code = None
self._settle_user_id = None
@property
def account_model(self):
return self._account_model
@account_model.setter
def account_model(self, value):
self._account_model = value
@property
def assets_code(self):
return self._assets_code
@assets_code.setter
def assets_code(self, value):
self._assets_code = value
@property
def biz_from(self):
return self._biz_from
@biz_from.setter
def biz_from(self, value):
self._biz_from = value
@property
def card_fund_infos(self):
return self._card_fund_infos
@card_fund_infos.setter
def card_fund_infos(self, value):
if isinstance(value, list):
self._card_fund_infos = list()
for i in value:
if isinstance(i, CardFundInfo):
self._card_fund_infos.append(i)
else:
self._card_fund_infos.append(CardFundInfo.from_alipay_dict(i))
@property
def card_model(self):
return self._card_model
@card_model.setter
def card_model(self, value):
self._card_model = value
@property
def card_name(self):
return self._card_name
@card_name.setter
def card_name(self, value):
self._card_name = value
@property
def creator(self):
return self._creator
@creator.setter
def creator(self, value):
self._creator = value
@property
def credit_info(self):
return self._credit_info
@credit_info.setter
def credit_info(self, value):
if isinstance(value, CardCreditInfo):
self._credit_info = value
else:
self._credit_info = CardCreditInfo.from_alipay_dict(value)
@property
def extend_info(self):
return self._extend_info
@extend_info.setter
def extend_info(self, value):
self._extend_info = value
@property
def operator(self):
return self._operator
@operator.setter
def operator(self, value):
self._operator = value
@property
def out_biz_no(self):
return self._out_biz_no
@out_biz_no.setter
def out_biz_no(self, value):
self._out_biz_no = value
@property
def partner_id(self):
return self._partner_id
@partner_id.setter
def partner_id(self, value):
self._partner_id = value
@property
def period_type(self):
return self._period_type
@period_type.setter
def period_type(self, value):
self._period_type = value
@property
def product_code(self):
return self._product_code
@product_code.setter
def product_code(self, value):
self._product_code = value
@property
def settle_user_id(self):
return self._settle_user_id
@settle_user_id.setter
def settle_user_id(self, value):
self._settle_user_id = value
def to_alipay_dict(self):
params = dict()
if self.account_model:
if hasattr(self.account_model, 'to_alipay_dict'):
params['account_model'] = self.account_model.to_alipay_dict()
else:
params['account_model'] = self.account_model
if self.assets_code:
if hasattr(self.assets_code, 'to_alipay_dict'):
params['assets_code'] = self.assets_code.to_alipay_dict()
else:
params['assets_code'] = self.assets_code
if self.biz_from:
if hasattr(self.biz_from, 'to_alipay_dict'):
params['biz_from'] = self.biz_from.to_alipay_dict()
else:
params['biz_from'] = self.biz_from
if self.card_fund_infos:
if isinstance(self.card_fund_infos, list):
for i in range(0, len(self.card_fund_infos)):
element = self.card_fund_infos[i]
if hasattr(element, 'to_alipay_dict'):
self.card_fund_infos[i] = element.to_alipay_dict()
if hasattr(self.card_fund_infos, 'to_alipay_dict'):
params['card_fund_infos'] = self.card_fund_infos.to_alipay_dict()
else:
params['card_fund_infos'] = self.card_fund_infos
if self.card_model:
if hasattr(self.card_model, 'to_alipay_dict'):
params['card_model'] = self.card_model.to_alipay_dict()
else:
params['card_model'] = self.card_model
if self.card_name:
if hasattr(self.card_name, 'to_alipay_dict'):
params['card_name'] = self.card_name.to_alipay_dict()
else:
params['card_name'] = self.card_name
if self.creator:
if hasattr(self.creator, 'to_alipay_dict'):
params['creator'] = self.creator.to_alipay_dict()
else:
params['creator'] = self.creator
if self.credit_info:
if hasattr(self.credit_info, 'to_alipay_dict'):
params['credit_info'] = self.credit_info.to_alipay_dict()
else:
params['credit_info'] = self.credit_info
if self.extend_info:
if hasattr(self.extend_info, 'to_alipay_dict'):
params['extend_info'] = self.extend_info.to_alipay_dict()
else:
params['extend_info'] = self.extend_info
if self.operator:
if hasattr(self.operator, 'to_alipay_dict'):
params['operator'] = self.operator.to_alipay_dict()
else:
params['operator'] = self.operator
if self.out_biz_no:
if hasattr(self.out_biz_no, 'to_alipay_dict'):
params['out_biz_no'] = self.out_biz_no.to_alipay_dict()
else:
params['out_biz_no'] = self.out_biz_no
if self.partner_id:
if hasattr(self.partner_id, 'to_alipay_dict'):
params['partner_id'] = self.partner_id.to_alipay_dict()
else:
params['partner_id'] = self.partner_id
if self.period_type:
if hasattr(self.period_type, 'to_alipay_dict'):
params['period_type'] = self.period_type.to_alipay_dict()
else:
params['period_type'] = self.period_type
if self.product_code:
if hasattr(self.product_code, 'to_alipay_dict'):
params['product_code'] = self.product_code.to_alipay_dict()
else:
params['product_code'] = self.product_code
if self.settle_user_id:
if hasattr(self.settle_user_id, 'to_alipay_dict'):
params['settle_user_id'] = self.settle_user_id.to_alipay_dict()
else:
params['settle_user_id'] = self.settle_user_id
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayAssetCardNewtemplateCreateModel()
if 'account_model' in d:
o.account_model = d['account_model']
if 'assets_code' in d:
o.assets_code = d['assets_code']
if 'biz_from' in d:
o.biz_from = d['biz_from']
if 'card_fund_infos' in d:
o.card_fund_infos = d['card_fund_infos']
if 'card_model' in d:
o.card_model = d['card_model']
if 'card_name' in d:
o.card_name = d['card_name']
if 'creator' in d:
o.creator = d['creator']
if 'credit_info' in d:
o.credit_info = d['credit_info']
if 'extend_info' in d:
o.extend_info = d['extend_info']
if 'operator' in d:
o.operator = d['operator']
if 'out_biz_no' in d:
o.out_biz_no = d['out_biz_no']
if 'partner_id' in d:
o.partner_id = d['partner_id']
if 'period_type' in d:
o.period_type = d['period_type']
if 'product_code' in d:
o.product_code = d['product_code']
if 'settle_user_id' in d:
o.settle_user_id = d['settle_user_id']
return o
|
py | 1a33e5b45985601108b0d117593dae16fb630219 | import pathlib
import requests
import json
import argparse
from folioclient.FolioClient import FolioClient
parser = argparse.ArgumentParser()
parser.add_argument("operation", help="backup or restore")
parser.add_argument("path", help="result file path (backup); take data from this file (restore)")
parser.add_argument("okapi_url", help="url of your FOLIO OKAPI endpoint.")
parser.add_argument("tenant_id", help="id of the FOLIO tenant")
parser.add_argument("username", help=("the api user"))
parser.add_argument("password", help=("the api users password"))
args = parser.parse_args()
folio_client = FolioClient(args.okapi_url, args.tenant_id, args.username, args.password)
okapiHeaders = folio_client.okapi_headers
if str(args.operation) == 'backup':
periods_query = "?withOpeningDays=true&showPast=true&showExceptional"
periods_path = "/calendar/periods/{}/period{}"
sp_request = requests.get(args.okapi_url + '/service-points',
headers=okapiHeaders)
sp_json = json.loads(sp_request.text)
service_points_ids = [sp['id'] for sp
in sp_json['servicepoints']]
periods_to_save = {}
for sp_id in service_points_ids:
query = periods_path.format(sp_id, periods_query)
period_req = requests.get(args.okapi_url + query,
headers=okapiHeaders)
periods_resp = json.loads(period_req.text)
periods_to_save[sp_id] = periods_resp
with open(args.path, 'w+') as settings_file:
settings_file.write(json.dumps(periods_to_save))
if args.operation == 'restore':
with open(args.path) as settings_file:
js = json.load(settings_file)
for sp_id, periods in js.items():
if any(periods['openingPeriods']):
period = periods['openingPeriods'][0]
periods_path = "/calendar/periods/{}/period".format(sp_id)
# print("{}, {}".format(sp_id, period['openingPeriods'][0]))
req = requests.post(args.okapi_url + periods_path,
data=json.dumps(period),
headers=okapiHeaders)
print(req.status_code)
print(req.text)
if str(req.status_code).startswith('4'):
print(req.text)
|
py | 1a33e611daabd97f81df5a0219737376cae9f246 | #Modules
import tensorflow as tf
from tensorflow.keras import datasets, layers, models
import matplotlib.pyplot as plt
#Accuracy Threshold
ACCURACY_THRESHOLD = 0.95
class myCallback(tf.keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs={}):
if(logs.get('acc') > ACCURACY_THRESHOLD):
print("\nReached %2.2f%% accuracy, so stopping training!!" %(ACCURACY_THRESHOLD*100))
self.model.stop_training = True
callbacks = myCallback()
#Dividing the Dataset
(train_images, train_labels), (test_images, test_labels) = datasets.cifar10.load_data()
train_images, test_images = train_images / 255.0, test_images / 255.0
class_names = ['airplane', 'automobile', 'bird', 'cat', 'deer','dog', 'frog', 'horse', 'ship', 'truck']
#Sample Plotting of Dataset
plt.figure(figsize=(10,10))
for i in range(25):
plt.subplot(5,5,i+1)
plt.xticks([])
plt.yticks([])
plt.grid(False)
plt.imshow(train_images[i], cmap=plt.cm.binary)
plt.xlabel(class_names[train_labels[i][0]])
plt.show()
#Convolutional Layer, Pooling Layer
model = models.Sequential()
model.add(layers.Conv2D(32, (3, 3), activation='relu', input_shape=(32, 32, 3)))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(64, (3, 3), activation='relu'))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(64, (3, 3), activation='relu'))
model.summary()
#Model Summary
model.add(layers.Flatten())
model.add(layers.Dense(64, activation='relu'))
model.add(layers.Dense(10))
model.summary()
#Compiling the model
model.compile(optimizer='adam', loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), metrics=['accuracy'])
#Deciding the training layers
epochs = 50
#Defining Variables
history = model.fit(train_images, train_labels, epochs=epochs,validation_data=(test_images, test_labels))
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs_range = range(0,epochs)
#Plotting accuracy
plt.figure(figsize=(8, 8))
plt.subplot(1, 2, 1)
plt.plot(epochs_range, acc, label='Training Accuracy')
plt.plot(epochs_range, val_acc, label='Validation Accuracy')
plt.legend(loc='lower right')
plt.title('Training and Validation Accuracy')
plt.show()
#Final Accuracy
test_loss, test_acc = model.evaluate(test_images, test_labels, verbose=2)
print(test_acc)
#Saving the model
model.save('dataset.h5')
print("Saving the model as dataset.h5")
|
py | 1a33e6937cccf0806c4bf1218f00250f6f92c3d6 | import json
import sys
import iotbx.phil
import dials.util
from dials.algorithms.spot_finding import per_image_analysis
from dials.util import tabulate
from dials.util.options import OptionParser, reflections_and_experiments_from_files
help_message = """
Reports the number of strong spots and computes an estimate of the resolution
limit for each image, given the results of dials.find_spots. Optionally
generates a plot of the per-image statistics (plot=image.png).
Examples::
dials.spot_counts_per_image imported.expt strong.refl
dials.spot_counts_per_image imported.expt strong.refl plot=per_image.png
"""
phil_scope = iotbx.phil.parse(
"""\
resolution_analysis = True
.type = bool
plot = None
.type = path
json = None
.type = path
split_json = False
.type = bool
joint_json = True
.type = bool
id = None
.type = int(value_min=0)
"""
)
@dials.util.show_mail_handle_errors()
def run(args=None):
usage = "dials.spot_counts_per_image [options] imported.expt strong.refl"
parser = OptionParser(
usage=usage,
read_reflections=True,
read_experiments=True,
phil=phil_scope,
check_format=False,
epilog=help_message,
)
params, options = parser.parse_args(args, show_diff_phil=False)
reflections, experiments = reflections_and_experiments_from_files(
params.input.reflections, params.input.experiments
)
if not reflections and not experiments:
parser.print_help()
return
# FIXME may want to change this to allow many to be passed i.e.
# from parallel runs
if len(reflections) != 1:
sys.exit("Only one reflection list may be passed")
reflections = reflections[0]
if "miller_index" in reflections:
sys.exit("Only unindexed reflections are currently supported")
if any(experiments.crystals()):
sys.exit("Only unindexed experiments are currently supported")
reflections.centroid_px_to_mm(experiments)
reflections.map_centroids_to_reciprocal_space(experiments)
if params.id is not None:
reflections = reflections.select(reflections["id"] == params.id)
all_stats = []
for i, expt in enumerate(experiments):
refl = reflections.select(reflections["id"] == i)
stats = per_image_analysis.stats_per_image(
expt, refl, resolution_analysis=params.resolution_analysis
)
all_stats.append(stats)
# transpose stats
summary_table = {}
for s in all_stats:
for k, value in s._asdict().items():
summary_table.setdefault(k, [])
summary_table[k].extend(value)
stats = per_image_analysis.StatsMultiImage(**summary_table)
print(stats)
overall_stats = per_image_analysis.stats_for_reflection_table(
reflections, resolution_analysis=params.resolution_analysis
)
rows = [
("Overall statistics", ""),
("#spots", "%i" % overall_stats.n_spots_total),
("#spots_no_ice", "%i" % overall_stats.n_spots_no_ice),
("d_min", f"{overall_stats.estimated_d_min:.2f}"),
(
"d_min (distl method 1)",
"%.2f (%.2f)"
% (overall_stats.d_min_distl_method_1, overall_stats.noisiness_method_1),
),
(
"d_min (distl method 2)",
"%.2f (%.2f)"
% (overall_stats.d_min_distl_method_2, overall_stats.noisiness_method_2),
),
]
print(tabulate(rows, headers="firstrow"))
if params.json:
if params.split_json:
for k, v in stats._asdict().items():
start, end = params.json.split(".")
with open(f"{start}_{k}.{end}", "w") as fp:
json.dump(v, fp)
if params.joint_json:
with open(params.json, "w") as fp:
json.dump(stats._asdict(), fp)
if params.plot:
import matplotlib
matplotlib.use("Agg")
per_image_analysis.plot_stats(stats, filename=params.plot)
if __name__ == "__main__":
run()
|
py | 1a33e70079b0e98775c4e88224007dfab5347452 | # -*- coding: utf-8 -*-
"""
MIT License
Copyright (c) 2017 Vic Chan
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import paddle.v2 as paddle
import gzip
import sys
import data_provider
import numpy as np
def param():
return paddle.attr.Param(
initial_std=0.01,
initial_mean=0
)
def encoder(x_):
x_ = paddle.layer.fc(
input=x_,
size=512,
act=paddle.activation.Sigmoid(),
param_attr=param(),
bias_attr=param()
)
x_ = paddle.layer.fc(
input=x_,
size=256,
act=paddle.activation.Relu(),
param_attr=param(),
bias_attr=param()
)
x_ = paddle.layer.fc(
input=x_,
size=128,
act=paddle.activation.Relu(),
param_attr=param(),
bias_attr=param()
)
return x_
def decoder(x_):
x_ = paddle.layer.fc(
input=x_,
size=128,
act=paddle.activation.Sigmoid(),
param_attr=param(),
bias_attr=param()
)
x_ = paddle.layer.fc(
input=x_,
size=256,
act=paddle.activation.Relu(),
param_attr=param(),
bias_attr=param()
)
x_ = paddle.layer.fc(
input=x_,
size=512,
act=paddle.activation.Relu(),
param_attr=param(),
bias_attr=param()
)
return x_
def output(x_):
return paddle.layer.fc(
input=x_,
size=784,
act=paddle.activation.Relu(),
param_attr=param(),
bias_attr=param()
)
paddle.init(use_gpu=False, trainer_count=1)
x = paddle.layer.data(
name='x',
type=paddle.data_type.dense_vector(784)
)
y = encoder(x)
y = decoder(y)
y = output(y)
def train():
optimizer = paddle.optimizer.RMSProp(
learning_rate=1e-3,
regularization=paddle.optimizer.L2Regularization(rate=8e-4)
)
loss = paddle.layer.mse_cost(label=x, input=y)
parameters = paddle.parameters.create(loss)
trainer = paddle.trainer.SGD(
cost=loss,
parameters=parameters,
update_equation=optimizer
)
feeding = {'x': 0}
def event_handler(event):
if isinstance(event, paddle.event.EndIteration):
if event.batch_id % 50 == 0:
print ("\n pass %d, Batch: %d cost: %f"
% (event.pass_id, event.batch_id, event.cost))
else:
sys.stdout.write('.')
sys.stdout.flush()
if isinstance(event, paddle.event.EndPass):
with gzip.open('output/params_pass_%d.tar.gz' % event.pass_id, 'w') as f:
parameters.to_tar(f)
reader = data_provider.create_reader('train', 60000)
trainer.train(
paddle.batch(
reader=reader,
batch_size=128
),
feeding=feeding,
num_passes=20,
event_handler=event_handler
)
def test(model_path):
with gzip.open(model_path, 'r') as openFile:
parameters = paddle.parameters.Parameters.from_tar(openFile)
testset = [[x] for x in data_provider.fetch_testingset()['images'][:10]]
# 使用infer进行预测
result = paddle.infer(
input=testset,
parameters=parameters,
output_layer=y,
feeding={'x': 0}
)
return result, np.array(testset)
if __name__ == '__main__':
origin, result = test('output/params_pass_19.tar.gz')
np.save('origin.dat', origin)
np.save('result.dat', result)
|
py | 1a33e757fbe850f631809a5752875262b095d59e | from django.core.management.base import BaseCommand
from pretalx.common.tasks import regenerate_css
from pretalx.event.models.event import Event
class Command(BaseCommand):
help = "Rebuild static files and language files"
def add_arguments(self, parser):
parser.add_argument("--event", type=str)
parser.add_argument(
"-s",
"--silent",
action="store_true",
dest="silent",
help="Silence most of the build output.",
)
def handle_regeneration(self, event, silent=False):
regenerate_css.apply_async(args=(event.pk,))
if not silent:
self.stdout.write(
self.style.SUCCESS(
f"[{event.slug}] Event style was successfully regenerated."
)
)
def handle(self, *args, **options):
event = options.get("event")
silent = 1 if options.get("silent") else 0
if event:
try:
event = Event.objects.get(slug__iexact=event)
except Event.DoesNotExist:
self.stdout.write(self.style.ERROR("This event does not exist."))
return
self.handle_regeneration(event, silent=silent)
else:
for event in Event.objects.all():
self.handle_regeneration(event, silent=silent)
|
py | 1a33e806c028b46354d08974a82de6a0a263b1f2 | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "dj_main.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
py | 1a33e915d5ae505b46538d3214a4668a7ae8741a | """ Tensorflow implementation of the face detection / alignment algorithm found at
https://github.com/kpzhang93/MTCNN_face_detection_alignment
"""
# MIT License
#
# Copyright (c) 2016 David Sandberg
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six import string_types, iteritems
import numpy as np
import tensorflow as tf
#from math import floor
import cv2
import os
def layer(op):
"""Decorator for composable network layers."""
def layer_decorated(self, *args, **kwargs):
# Automatically set a name if not provided.
name = kwargs.setdefault('name', self.get_unique_name(op.__name__))
# Figure out the layer inputs.
if len(self.terminals) == 0:
raise RuntimeError('No input variables found for layer %s.' % name)
elif len(self.terminals) == 1:
layer_input = self.terminals[0]
else:
layer_input = list(self.terminals)
# Perform the operation and get the output.
layer_output = op(self, layer_input, *args, **kwargs)
# Add to layer LUT.
self.layers[name] = layer_output
# This output is now the input for the next layer.
self.feed(layer_output)
# Return self for chained calls.
return self
return layer_decorated
class Network(object):
def __init__(self, inputs, trainable=True):
# The input nodes for this network
self.inputs = inputs
# The current list of terminal nodes
self.terminals = []
# Mapping from layer names to layers
self.layers = dict(inputs)
# If true, the resulting variables are set as trainable
self.trainable = trainable
self.setup()
def setup(self):
"""Construct the network. """
raise NotImplementedError('Must be implemented by the subclass.')
def load(self, data_path, session, ignore_missing=False):
"""Load network weights.
data_path: The path to the numpy-serialized network weights
session: The current TensorFlow session
ignore_missing: If true, serialized weights for missing layers are ignored.
"""
# print("@@@@@@@@@@@@",data_path)
data_dict = np.load(data_path, encoding='latin1',allow_pickle=True).item() #pylint: disable=no-member
for op_name in data_dict:
with tf.variable_scope(op_name, reuse=True):
for param_name, data in iteritems(data_dict[op_name]):
try:
var = tf.get_variable(param_name)
session.run(var.assign(data))
except ValueError:
if not ignore_missing:
raise
def feed(self, *args):
"""Set the input(s) for the next operation by replacing the terminal nodes.
The arguments can be either layer names or the actual layers.
"""
assert len(args) != 0
self.terminals = []
for fed_layer in args:
if isinstance(fed_layer, string_types):
try:
fed_layer = self.layers[fed_layer]
except KeyError:
raise KeyError('Unknown layer name fed: %s' % fed_layer)
self.terminals.append(fed_layer)
return self
def get_output(self):
"""Returns the current network output."""
return self.terminals[-1]
def get_unique_name(self, prefix):
"""Returns an index-suffixed unique name for the given prefix.
This is used for auto-generating layer names based on the type-prefix.
"""
ident = sum(t.startswith(prefix) for t, _ in self.layers.items()) + 1
return '%s_%d' % (prefix, ident)
def make_var(self, name, shape):
"""Creates a new TensorFlow variable."""
return tf.get_variable(name, shape, trainable=self.trainable)
def validate_padding(self, padding):
"""Verifies that the padding is one of the supported ones."""
assert padding in ('SAME', 'VALID')
@layer
def conv(self,
inp,
k_h,
k_w,
c_o,
s_h,
s_w,
name,
relu=True,
padding='SAME',
group=1,
biased=True):
# Verify that the padding is acceptable
self.validate_padding(padding)
# Get the number of channels in the input
c_i = int(inp.get_shape()[-1])
# Verify that the grouping parameter is valid
assert c_i % group == 0
assert c_o % group == 0
# Convolution for a given input and kernel
convolve = lambda i, k: tf.nn.conv2d(i, k, [1, s_h, s_w, 1], padding=padding)
with tf.variable_scope(name) as scope:
kernel = self.make_var('weights', shape=[k_h, k_w, c_i // group, c_o])
# This is the common-case. Convolve the input without any further complications.
output = convolve(inp, kernel)
# Add the biases
if biased:
biases = self.make_var('biases', [c_o])
output = tf.nn.bias_add(output, biases)
if relu:
# ReLU non-linearity
output = tf.nn.relu(output, name=scope.name)
return output
@layer
def prelu(self, inp, name):
with tf.variable_scope(name):
i = int(inp.get_shape()[-1])
alpha = self.make_var('alpha', shape=(i,))
output = tf.nn.relu(inp) + tf.multiply(alpha, -tf.nn.relu(-inp))
return output
@layer
def max_pool(self, inp, k_h, k_w, s_h, s_w, name, padding='SAME'):
self.validate_padding(padding)
return tf.nn.max_pool(inp,
ksize=[1, k_h, k_w, 1],
strides=[1, s_h, s_w, 1],
padding=padding,
name=name)
@layer
def fc(self, inp, num_out, name, relu=True):
with tf.variable_scope(name):
input_shape = inp.get_shape()
if input_shape.ndims == 4:
# The input is spatial. Vectorize it first.
dim = 1
for d in input_shape[1:].as_list():
dim *= int(d)
feed_in = tf.reshape(inp, [-1, dim])
else:
feed_in, dim = (inp, input_shape[-1].value)
weights = self.make_var('weights', shape=[dim, num_out])
biases = self.make_var('biases', [num_out])
op = tf.nn.relu_layer if relu else tf.nn.xw_plus_b
fc = op(feed_in, weights, biases, name=name)
return fc
"""
Multi dimensional softmax,
refer to https://github.com/tensorflow/tensorflow/issues/210
compute softmax along the dimension of target
the native softmax only supports batch_size x dimension
"""
@layer
def softmax(self, target, axis, name=None):
max_axis = tf.reduce_max(target, axis, keepdims=True)
target_exp = tf.exp(target-max_axis)
normalize = tf.reduce_sum(target_exp, axis, keepdims=True)
softmax = tf.div(target_exp, normalize, name)
return softmax
class PNet(Network):
def setup(self):
(self.feed('data') #pylint: disable=no-value-for-parameter, no-member
.conv(3, 3, 10, 1, 1, padding='VALID', relu=False, name='conv1')
.prelu(name='PReLU1')
.max_pool(2, 2, 2, 2, name='pool1')
.conv(3, 3, 16, 1, 1, padding='VALID', relu=False, name='conv2')
.prelu(name='PReLU2')
.conv(3, 3, 32, 1, 1, padding='VALID', relu=False, name='conv3')
.prelu(name='PReLU3')
.conv(1, 1, 2, 1, 1, relu=False, name='conv4-1')
.softmax(3,name='prob1'))
(self.feed('PReLU3') #pylint: disable=no-value-for-parameter
.conv(1, 1, 4, 1, 1, relu=False, name='conv4-2'))
class RNet(Network):
def setup(self):
(self.feed('data') #pylint: disable=no-value-for-parameter, no-member
.conv(3, 3, 28, 1, 1, padding='VALID', relu=False, name='conv1')
.prelu(name='prelu1')
.max_pool(3, 3, 2, 2, name='pool1')
.conv(3, 3, 48, 1, 1, padding='VALID', relu=False, name='conv2')
.prelu(name='prelu2')
.max_pool(3, 3, 2, 2, padding='VALID', name='pool2')
.conv(2, 2, 64, 1, 1, padding='VALID', relu=False, name='conv3')
.prelu(name='prelu3')
.fc(128, relu=False, name='conv4')
.prelu(name='prelu4')
.fc(2, relu=False, name='conv5-1')
.softmax(1,name='prob1'))
(self.feed('prelu4') #pylint: disable=no-value-for-parameter
.fc(4, relu=False, name='conv5-2'))
class ONet(Network):
def setup(self):
(self.feed('data') #pylint: disable=no-value-for-parameter, no-member
.conv(3, 3, 32, 1, 1, padding='VALID', relu=False, name='conv1')
.prelu(name='prelu1')
.max_pool(3, 3, 2, 2, name='pool1')
.conv(3, 3, 64, 1, 1, padding='VALID', relu=False, name='conv2')
.prelu(name='prelu2')
.max_pool(3, 3, 2, 2, padding='VALID', name='pool2')
.conv(3, 3, 64, 1, 1, padding='VALID', relu=False, name='conv3')
.prelu(name='prelu3')
.max_pool(2, 2, 2, 2, name='pool3')
.conv(2, 2, 128, 1, 1, padding='VALID', relu=False, name='conv4')
.prelu(name='prelu4')
.fc(256, relu=False, name='conv5')
.prelu(name='prelu5')
.fc(2, relu=False, name='conv6-1')
.softmax(1, name='prob1'))
(self.feed('prelu5') #pylint: disable=no-value-for-parameter
.fc(4, relu=False, name='conv6-2'))
(self.feed('prelu5') #pylint: disable=no-value-for-parameter
.fc(10, relu=False, name='conv6-3'))
def create_mtcnn(sess, model_path):
if not model_path:
model_path,_ = os.path.split(os.path.realpath(__file__))
# print("!!!!!!!!!!!!!!!!!!!",model_path)
with tf.variable_scope('pnet'):
data = tf.placeholder(tf.float32, (None,None,None,3), 'input')
pnet = PNet({'data':data})
pnet.load(os.path.join(model_path, 'det1.npy'), sess)
with tf.variable_scope('rnet'):
data = tf.placeholder(tf.float32, (None,24,24,3), 'input')
rnet = RNet({'data':data})
rnet.load(os.path.join(model_path, 'det2.npy'), sess)
with tf.variable_scope('onet'):
data = tf.placeholder(tf.float32, (None,48,48,3), 'input')
onet = ONet({'data':data})
onet.load(os.path.join(model_path, 'det3.npy'), sess)
pnet_fun = lambda img : sess.run(('pnet/conv4-2/BiasAdd:0', 'pnet/prob1:0'), feed_dict={'pnet/input:0':img})
rnet_fun = lambda img : sess.run(('rnet/conv5-2/conv5-2:0', 'rnet/prob1:0'), feed_dict={'rnet/input:0':img})
onet_fun = lambda img : sess.run(('onet/conv6-2/conv6-2:0', 'onet/conv6-3/conv6-3:0', 'onet/prob1:0'), feed_dict={'onet/input:0':img})
return pnet_fun, rnet_fun, onet_fun
def detect_face(img, minsize, pnet, rnet, onet, threshold, factor):
"""Detects faces in an image, and returns bounding boxes and points for them.
img: input image
minsize: minimum faces' size
pnet, rnet, onet: caffemodel
threshold: threshold=[th1, th2, th3], th1-3 are three steps's threshold
factor: the factor used to create a scaling pyramid of face sizes to detect in the image.
"""
factor_count=0
total_boxes=np.empty((0,9))
points=np.empty(0)
h=img.shape[0]
w=img.shape[1]
minl=np.amin([h, w])
m=12.0/minsize
minl=minl*m
# create scale pyramid
scales=[]
while minl>=12:
scales += [m*np.power(factor, factor_count)]
minl = minl*factor
factor_count += 1
# first stage
for scale in scales:
hs=int(np.ceil(h*scale))
ws=int(np.ceil(w*scale))
im_data = imresample(img, (hs, ws))
im_data = (im_data-127.5)*0.0078125
img_x = np.expand_dims(im_data, 0)
img_y = np.transpose(img_x, (0,2,1,3))
out = pnet(img_y)
out0 = np.transpose(out[0], (0,2,1,3))
out1 = np.transpose(out[1], (0,2,1,3))
boxes, _ = generateBoundingBox(out1[0,:,:,1].copy(), out0[0,:,:,:].copy(), scale, threshold[0])
# inter-scale nms
pick = nms(boxes.copy(), 0.5, 'Union')
if boxes.size>0 and pick.size>0:
boxes = boxes[pick,:]
total_boxes = np.append(total_boxes, boxes, axis=0)
numbox = total_boxes.shape[0]
if numbox>0:
pick = nms(total_boxes.copy(), 0.7, 'Union')
total_boxes = total_boxes[pick,:]
regw = total_boxes[:,2]-total_boxes[:,0]
regh = total_boxes[:,3]-total_boxes[:,1]
qq1 = total_boxes[:,0]+total_boxes[:,5]*regw
qq2 = total_boxes[:,1]+total_boxes[:,6]*regh
qq3 = total_boxes[:,2]+total_boxes[:,7]*regw
qq4 = total_boxes[:,3]+total_boxes[:,8]*regh
total_boxes = np.transpose(np.vstack([qq1, qq2, qq3, qq4, total_boxes[:,4]]))
total_boxes = rerec(total_boxes.copy())
total_boxes[:,0:4] = np.fix(total_boxes[:,0:4]).astype(np.int32)
dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph = pad(total_boxes.copy(), w, h)
numbox = total_boxes.shape[0]
if numbox>0:
# second stage
tempimg = np.zeros((24,24,3,numbox))
for k in range(0,numbox):
tmp = np.zeros((int(tmph[k]),int(tmpw[k]),3))
tmp[dy[k]-1:edy[k],dx[k]-1:edx[k],:] = img[y[k]-1:ey[k],x[k]-1:ex[k],:]
if tmp.shape[0]>0 and tmp.shape[1]>0 or tmp.shape[0]==0 and tmp.shape[1]==0:
tempimg[:,:,:,k] = imresample(tmp, (24, 24))
else:
return np.empty()
tempimg = (tempimg-127.5)*0.0078125
tempimg1 = np.transpose(tempimg, (3,1,0,2))
out = rnet(tempimg1)
out0 = np.transpose(out[0])
out1 = np.transpose(out[1])
score = out1[1,:]
ipass = np.where(score>threshold[1])
total_boxes = np.hstack([total_boxes[ipass[0],0:4].copy(), np.expand_dims(score[ipass].copy(),1)])
mv = out0[:,ipass[0]]
if total_boxes.shape[0]>0:
pick = nms(total_boxes, 0.7, 'Union')
total_boxes = total_boxes[pick,:]
total_boxes = bbreg(total_boxes.copy(), np.transpose(mv[:,pick]))
total_boxes = rerec(total_boxes.copy())
numbox = total_boxes.shape[0]
if numbox>0:
# third stage
total_boxes = np.fix(total_boxes).astype(np.int32)
dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph = pad(total_boxes.copy(), w, h)
tempimg = np.zeros((48,48,3,numbox))
for k in range(0,numbox):
tmp = np.zeros((int(tmph[k]),int(tmpw[k]),3))
tmp[dy[k]-1:edy[k],dx[k]-1:edx[k],:] = img[y[k]-1:ey[k],x[k]-1:ex[k],:]
if tmp.shape[0]>0 and tmp.shape[1]>0 or tmp.shape[0]==0 and tmp.shape[1]==0:
tempimg[:,:,:,k] = imresample(tmp, (48, 48))
else:
return np.empty()
tempimg = (tempimg-127.5)*0.0078125
tempimg1 = np.transpose(tempimg, (3,1,0,2))
out = onet(tempimg1)
out0 = np.transpose(out[0])
out1 = np.transpose(out[1])
out2 = np.transpose(out[2])
score = out2[1,:]
points = out1
ipass = np.where(score>threshold[2])
points = points[:,ipass[0]]
total_boxes = np.hstack([total_boxes[ipass[0],0:4].copy(), np.expand_dims(score[ipass].copy(),1)])
mv = out0[:,ipass[0]]
w = total_boxes[:,2]-total_boxes[:,0]+1
h = total_boxes[:,3]-total_boxes[:,1]+1
points[0:5,:] = np.tile(w,(5, 1))*points[0:5,:] + np.tile(total_boxes[:,0],(5, 1))-1
points[5:10,:] = np.tile(h,(5, 1))*points[5:10,:] + np.tile(total_boxes[:,1],(5, 1))-1
if total_boxes.shape[0]>0:
total_boxes = bbreg(total_boxes.copy(), np.transpose(mv))
pick = nms(total_boxes.copy(), 0.7, 'Min')
total_boxes = total_boxes[pick,:]
points = points[:,pick]
return total_boxes, points
def bulk_detect_face(images, detection_window_size_ratio, pnet, rnet, onet, threshold, factor):
"""Detects faces in a list of images
images: list containing input images
detection_window_size_ratio: ratio of minimum face size to smallest image dimension
pnet, rnet, onet: caffemodel
threshold: threshold=[th1 th2 th3], th1-3 are three steps's threshold [0-1]
factor: the factor used to create a scaling pyramid of face sizes to detect in the image.
"""
all_scales = [None] * len(images)
images_with_boxes = [None] * len(images)
for i in range(len(images)):
images_with_boxes[i] = {'total_boxes': np.empty((0, 9))}
# create scale pyramid
for index, img in enumerate(images):
all_scales[index] = []
h = img.shape[0]
w = img.shape[1]
minsize = int(detection_window_size_ratio * np.minimum(w, h))
factor_count = 0
minl = np.amin([h, w])
if minsize <= 12:
minsize = 12
m = 12.0 / minsize
minl = minl * m
while minl >= 12:
all_scales[index].append(m * np.power(factor, factor_count))
minl = minl * factor
factor_count += 1
# # # # # # # # # # # # #
# first stage - fast proposal network (pnet) to obtain face candidates
# # # # # # # # # # # # #
images_obj_per_resolution = {}
# TODO: use some type of rounding to number module 8 to increase probability that pyramid images will have the same resolution across input images
for index, scales in enumerate(all_scales):
h = images[index].shape[0]
w = images[index].shape[1]
for scale in scales:
hs = int(np.ceil(h * scale))
ws = int(np.ceil(w * scale))
if (ws, hs) not in images_obj_per_resolution:
images_obj_per_resolution[(ws, hs)] = []
im_data = imresample(images[index], (hs, ws))
im_data = (im_data - 127.5) * 0.0078125
img_y = np.transpose(im_data, (1, 0, 2)) # caffe uses different dimensions ordering
images_obj_per_resolution[(ws, hs)].append({'scale': scale, 'image': img_y, 'index': index})
for resolution in images_obj_per_resolution:
images_per_resolution = [i['image'] for i in images_obj_per_resolution[resolution]]
outs = pnet(images_per_resolution)
for index in range(len(outs[0])):
scale = images_obj_per_resolution[resolution][index]['scale']
image_index = images_obj_per_resolution[resolution][index]['index']
out0 = np.transpose(outs[0][index], (1, 0, 2))
out1 = np.transpose(outs[1][index], (1, 0, 2))
boxes, _ = generateBoundingBox(out1[:, :, 1].copy(), out0[:, :, :].copy(), scale, threshold[0])
# inter-scale nms
pick = nms(boxes.copy(), 0.5, 'Union')
if boxes.size > 0 and pick.size > 0:
boxes = boxes[pick, :]
images_with_boxes[image_index]['total_boxes'] = np.append(images_with_boxes[image_index]['total_boxes'],
boxes,
axis=0)
for index, image_obj in enumerate(images_with_boxes):
numbox = image_obj['total_boxes'].shape[0]
if numbox > 0:
h = images[index].shape[0]
w = images[index].shape[1]
pick = nms(image_obj['total_boxes'].copy(), 0.7, 'Union')
image_obj['total_boxes'] = image_obj['total_boxes'][pick, :]
regw = image_obj['total_boxes'][:, 2] - image_obj['total_boxes'][:, 0]
regh = image_obj['total_boxes'][:, 3] - image_obj['total_boxes'][:, 1]
qq1 = image_obj['total_boxes'][:, 0] + image_obj['total_boxes'][:, 5] * regw
qq2 = image_obj['total_boxes'][:, 1] + image_obj['total_boxes'][:, 6] * regh
qq3 = image_obj['total_boxes'][:, 2] + image_obj['total_boxes'][:, 7] * regw
qq4 = image_obj['total_boxes'][:, 3] + image_obj['total_boxes'][:, 8] * regh
image_obj['total_boxes'] = np.transpose(np.vstack([qq1, qq2, qq3, qq4, image_obj['total_boxes'][:, 4]]))
image_obj['total_boxes'] = rerec(image_obj['total_boxes'].copy())
image_obj['total_boxes'][:, 0:4] = np.fix(image_obj['total_boxes'][:, 0:4]).astype(np.int32)
dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph = pad(image_obj['total_boxes'].copy(), w, h)
numbox = image_obj['total_boxes'].shape[0]
tempimg = np.zeros((24, 24, 3, numbox))
if numbox > 0:
for k in range(0, numbox):
tmp = np.zeros((int(tmph[k]), int(tmpw[k]), 3))
tmp[dy[k] - 1:edy[k], dx[k] - 1:edx[k], :] = images[index][y[k] - 1:ey[k], x[k] - 1:ex[k], :]
if tmp.shape[0] > 0 and tmp.shape[1] > 0 or tmp.shape[0] == 0 and tmp.shape[1] == 0:
tempimg[:, :, :, k] = imresample(tmp, (24, 24))
else:
return np.empty()
tempimg = (tempimg - 127.5) * 0.0078125
image_obj['rnet_input'] = np.transpose(tempimg, (3, 1, 0, 2))
# # # # # # # # # # # # #
# second stage - refinement of face candidates with rnet
# # # # # # # # # # # # #
bulk_rnet_input = np.empty((0, 24, 24, 3))
for index, image_obj in enumerate(images_with_boxes):
if 'rnet_input' in image_obj:
bulk_rnet_input = np.append(bulk_rnet_input, image_obj['rnet_input'], axis=0)
out = rnet(bulk_rnet_input)
out0 = np.transpose(out[0])
out1 = np.transpose(out[1])
score = out1[1, :]
i = 0
for index, image_obj in enumerate(images_with_boxes):
if 'rnet_input' not in image_obj:
continue
rnet_input_count = image_obj['rnet_input'].shape[0]
score_per_image = score[i:i + rnet_input_count]
out0_per_image = out0[:, i:i + rnet_input_count]
ipass = np.where(score_per_image > threshold[1])
image_obj['total_boxes'] = np.hstack([image_obj['total_boxes'][ipass[0], 0:4].copy(),
np.expand_dims(score_per_image[ipass].copy(), 1)])
mv = out0_per_image[:, ipass[0]]
if image_obj['total_boxes'].shape[0] > 0:
h = images[index].shape[0]
w = images[index].shape[1]
pick = nms(image_obj['total_boxes'], 0.7, 'Union')
image_obj['total_boxes'] = image_obj['total_boxes'][pick, :]
image_obj['total_boxes'] = bbreg(image_obj['total_boxes'].copy(), np.transpose(mv[:, pick]))
image_obj['total_boxes'] = rerec(image_obj['total_boxes'].copy())
numbox = image_obj['total_boxes'].shape[0]
if numbox > 0:
tempimg = np.zeros((48, 48, 3, numbox))
image_obj['total_boxes'] = np.fix(image_obj['total_boxes']).astype(np.int32)
dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph = pad(image_obj['total_boxes'].copy(), w, h)
for k in range(0, numbox):
tmp = np.zeros((int(tmph[k]), int(tmpw[k]), 3))
tmp[dy[k] - 1:edy[k], dx[k] - 1:edx[k], :] = images[index][y[k] - 1:ey[k], x[k] - 1:ex[k], :]
if tmp.shape[0] > 0 and tmp.shape[1] > 0 or tmp.shape[0] == 0 and tmp.shape[1] == 0:
tempimg[:, :, :, k] = imresample(tmp, (48, 48))
else:
return np.empty()
tempimg = (tempimg - 127.5) * 0.0078125
image_obj['onet_input'] = np.transpose(tempimg, (3, 1, 0, 2))
i += rnet_input_count
# # # # # # # # # # # # #
# third stage - further refinement and facial landmarks positions with onet
# # # # # # # # # # # # #
bulk_onet_input = np.empty((0, 48, 48, 3))
for index, image_obj in enumerate(images_with_boxes):
if 'onet_input' in image_obj:
bulk_onet_input = np.append(bulk_onet_input, image_obj['onet_input'], axis=0)
out = onet(bulk_onet_input)
out0 = np.transpose(out[0])
out1 = np.transpose(out[1])
out2 = np.transpose(out[2])
score = out2[1, :]
points = out1
i = 0
ret = []
for index, image_obj in enumerate(images_with_boxes):
if 'onet_input' not in image_obj:
ret.append(None)
continue
onet_input_count = image_obj['onet_input'].shape[0]
out0_per_image = out0[:, i:i + onet_input_count]
score_per_image = score[i:i + onet_input_count]
points_per_image = points[:, i:i + onet_input_count]
ipass = np.where(score_per_image > threshold[2])
points_per_image = points_per_image[:, ipass[0]]
image_obj['total_boxes'] = np.hstack([image_obj['total_boxes'][ipass[0], 0:4].copy(),
np.expand_dims(score_per_image[ipass].copy(), 1)])
mv = out0_per_image[:, ipass[0]]
w = image_obj['total_boxes'][:, 2] - image_obj['total_boxes'][:, 0] + 1
h = image_obj['total_boxes'][:, 3] - image_obj['total_boxes'][:, 1] + 1
points_per_image[0:5, :] = np.tile(w, (5, 1)) * points_per_image[0:5, :] + np.tile(
image_obj['total_boxes'][:, 0], (5, 1)) - 1
points_per_image[5:10, :] = np.tile(h, (5, 1)) * points_per_image[5:10, :] + np.tile(
image_obj['total_boxes'][:, 1], (5, 1)) - 1
if image_obj['total_boxes'].shape[0] > 0:
image_obj['total_boxes'] = bbreg(image_obj['total_boxes'].copy(), np.transpose(mv))
pick = nms(image_obj['total_boxes'].copy(), 0.7, 'Min')
image_obj['total_boxes'] = image_obj['total_boxes'][pick, :]
points_per_image = points_per_image[:, pick]
ret.append((image_obj['total_boxes'], points_per_image))
else:
ret.append(None)
i += onet_input_count
return ret
# function [boundingbox] = bbreg(boundingbox,reg)
def bbreg(boundingbox,reg):
"""Calibrate bounding boxes"""
if reg.shape[1]==1:
reg = np.reshape(reg, (reg.shape[2], reg.shape[3]))
w = boundingbox[:,2]-boundingbox[:,0]+1
h = boundingbox[:,3]-boundingbox[:,1]+1
b1 = boundingbox[:,0]+reg[:,0]*w
b2 = boundingbox[:,1]+reg[:,1]*h
b3 = boundingbox[:,2]+reg[:,2]*w
b4 = boundingbox[:,3]+reg[:,3]*h
boundingbox[:,0:4] = np.transpose(np.vstack([b1, b2, b3, b4 ]))
return boundingbox
def generateBoundingBox(imap, reg, scale, t):
"""Use heatmap to generate bounding boxes"""
stride=2
cellsize=12
imap = np.transpose(imap)
dx1 = np.transpose(reg[:,:,0])
dy1 = np.transpose(reg[:,:,1])
dx2 = np.transpose(reg[:,:,2])
dy2 = np.transpose(reg[:,:,3])
y, x = np.where(imap >= t)
if y.shape[0]==1:
dx1 = np.flipud(dx1)
dy1 = np.flipud(dy1)
dx2 = np.flipud(dx2)
dy2 = np.flipud(dy2)
score = imap[(y,x)]
reg = np.transpose(np.vstack([ dx1[(y,x)], dy1[(y,x)], dx2[(y,x)], dy2[(y,x)] ]))
if reg.size==0:
reg = np.empty((0,3))
bb = np.transpose(np.vstack([y,x]))
q1 = np.fix((stride*bb+1)/scale)
q2 = np.fix((stride*bb+cellsize-1+1)/scale)
boundingbox = np.hstack([q1, q2, np.expand_dims(score,1), reg])
return boundingbox, reg
# function pick = nms(boxes,threshold,type)
def nms(boxes, threshold, method):
if boxes.size==0:
return np.empty((0,3))
x1 = boxes[:,0]
y1 = boxes[:,1]
x2 = boxes[:,2]
y2 = boxes[:,3]
s = boxes[:,4]
area = (x2-x1+1) * (y2-y1+1)
I = np.argsort(s)
pick = np.zeros_like(s, dtype=np.int16)
counter = 0
while I.size>0:
i = I[-1]
pick[counter] = i
counter += 1
idx = I[0:-1]
xx1 = np.maximum(x1[i], x1[idx])
yy1 = np.maximum(y1[i], y1[idx])
xx2 = np.minimum(x2[i], x2[idx])
yy2 = np.minimum(y2[i], y2[idx])
w = np.maximum(0.0, xx2-xx1+1)
h = np.maximum(0.0, yy2-yy1+1)
inter = w * h
if method is 'Min':
o = inter / np.minimum(area[i], area[idx])
else:
o = inter / (area[i] + area[idx] - inter)
I = I[np.where(o<=threshold)]
pick = pick[0:counter]
return pick
# function [dy edy dx edx y ey x ex tmpw tmph] = pad(total_boxes,w,h)
def pad(total_boxes, w, h):
"""Compute the padding coordinates (pad the bounding boxes to square)"""
tmpw = (total_boxes[:,2]-total_boxes[:,0]+1).astype(np.int32)
tmph = (total_boxes[:,3]-total_boxes[:,1]+1).astype(np.int32)
numbox = total_boxes.shape[0]
dx = np.ones((numbox), dtype=np.int32)
dy = np.ones((numbox), dtype=np.int32)
edx = tmpw.copy().astype(np.int32)
edy = tmph.copy().astype(np.int32)
x = total_boxes[:,0].copy().astype(np.int32)
y = total_boxes[:,1].copy().astype(np.int32)
ex = total_boxes[:,2].copy().astype(np.int32)
ey = total_boxes[:,3].copy().astype(np.int32)
tmp = np.where(ex>w)
edx.flat[tmp] = np.expand_dims(-ex[tmp]+w+tmpw[tmp],1)
ex[tmp] = w
tmp = np.where(ey>h)
edy.flat[tmp] = np.expand_dims(-ey[tmp]+h+tmph[tmp],1)
ey[tmp] = h
tmp = np.where(x<1)
dx.flat[tmp] = np.expand_dims(2-x[tmp],1)
x[tmp] = 1
tmp = np.where(y<1)
dy.flat[tmp] = np.expand_dims(2-y[tmp],1)
y[tmp] = 1
return dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph
# function [bboxA] = rerec(bboxA)
def rerec(bboxA):
"""Convert bboxA to square."""
h = bboxA[:,3]-bboxA[:,1]
w = bboxA[:,2]-bboxA[:,0]
l = np.maximum(w, h)
bboxA[:,0] = bboxA[:,0]+w*0.5-l*0.5
bboxA[:,1] = bboxA[:,1]+h*0.5-l*0.5
bboxA[:,2:4] = bboxA[:,0:2] + np.transpose(np.tile(l,(2,1)))
return bboxA
def imresample(img, sz):
im_data = cv2.resize(img, (sz[1], sz[0]), interpolation=cv2.INTER_AREA) #@UndefinedVariable
return im_data
# This method is kept for debugging purpose
# h=img.shape[0]
# w=img.shape[1]
# hs, ws = sz
# dx = float(w) / ws
# dy = float(h) / hs
# im_data = np.zeros((hs,ws,3))
# for a1 in range(0,hs):
# for a2 in range(0,ws):
# for a3 in range(0,3):
# im_data[a1,a2,a3] = img[int(floor(a1*dy)),int(floor(a2*dx)),a3]
# return im_data
|
py | 1a33e94bc9805a65f9bcf27ea5778dbea0cf3cbc | # SPDX-FileCopyrightText: 2020 The Magma Authors.
# SPDX-FileCopyrightText: 2022 Open Networking Foundation <[email protected]>
#
# SPDX-License-Identifier: BSD-3-Clause
import time
from abc import ABC, abstractmethod
from collections import namedtuple
from typing import Any, Optional
import metrics
from configuration.service_configs import load_service_config
from data_models.data_model import InvalidTrParamPath
from data_models.data_model_parameters import ParameterName
from device_config.configuration_init import build_desired_config
from exceptions import ConfigurationError, Tr069Error
from logger import EnodebdLogger as logger
from state_machines.acs_state_utils import (
does_inform_have_event,
get_all_objects_to_add,
get_all_objects_to_delete,
get_all_param_values_to_set,
get_obj_param_values_to_set,
get_object_params_to_get,
get_optional_param_to_check,
get_param_values_to_set,
get_params_to_get,
parse_get_parameter_values_response,
process_inform_message,
)
from state_machines.enb_acs import EnodebAcsStateMachine
from state_machines.timer import StateMachineTimer
from tr069 import models
AcsMsgAndTransition = namedtuple(
'AcsMsgAndTransition', ['msg', 'next_state'],
)
AcsReadMsgResult = namedtuple(
'AcsReadMsgResult', ['msg_handled', 'next_state'],
)
class EnodebAcsState(ABC):
"""
State class for the Enodeb state machine
States can transition after reading a message from the eNB, sending a
message out to the eNB, or when a timer completes. As such, some states
are only responsible for message sending, and others are only responsible
for reading incoming messages.
In the constructor, set up state transitions.
"""
def __init__(self):
self._acs = None
def enter(self) -> None:
"""
Set up your timers here. Call transition(..) on the ACS when the timer
completes or throw an error
"""
pass
def exit(self) -> None:
"""Destroy timers here"""
pass
def read_msg(self, message: Any) -> AcsReadMsgResult:
"""
Args: message: tr069 message
Returns: name of the next state, if transition required
"""
raise ConfigurationError(
'%s should implement read_msg() if it '
'needs to handle message reading' % self.__class__.__name__,
)
def get_msg(self, message: Any) -> AcsMsgAndTransition:
"""
Produce a message to send back to the eNB.
Args:
message: TR-069 message which was already processed by read_msg
Returns: Message and possible transition
"""
raise ConfigurationError(
'%s should implement get_msg() if it '
'needs to produce messages' % self.__class__.__name__,
)
@property
def acs(self) -> EnodebAcsStateMachine:
return self._acs
@acs.setter
def acs(self, val: EnodebAcsStateMachine) -> None:
self._acs = val
@abstractmethod
def state_description(self) -> str:
""" Provide a few words about what the state represents """
pass
class WaitInformState(EnodebAcsState):
"""
This state indicates that no Inform message has been received yet, or
that no Inform message has been received for a long time.
This state is used to handle an Inform message that arrived when enodebd
already believes that the eNB is connected. As such, it is unclear to
enodebd whether the eNB is just sending another Inform, or if a different
eNB was plugged into the same interface.
"""
def __init__(
self,
acs: EnodebAcsStateMachine,
when_done: str,
when_boot: Optional[str] = None,
):
super().__init__()
self.acs = acs
self.done_transition = when_done
self.boot_transition = when_boot
self.has_enb_just_booted = False
def read_msg(self, message: Any) -> AcsReadMsgResult:
"""
Args:
message: models.Inform Tr069 Inform message
"""
if not isinstance(message, models.Inform):
return AcsReadMsgResult(False, None)
process_inform_message(
message, self.acs.data_model,
self.acs.device_cfg,
)
# Switch enodeb status to connected
metrics.set_enb_status(
self.acs.device_cfg.get_parameter("Serial number"),
status="connected"
)
if does_inform_have_event(message, '1 BOOT'):
return AcsReadMsgResult(True, self.boot_transition)
return AcsReadMsgResult(True, None)
def get_msg(self, message: Any) -> AcsMsgAndTransition:
""" Reply with InformResponse """
response = models.InformResponse()
# Set maxEnvelopes to 1, as per TR-069 spec
response.MaxEnvelopes = 1
return AcsMsgAndTransition(response, self.done_transition)
def state_description(self) -> str:
return 'Waiting for an Inform'
class GetRPCMethodsState(EnodebAcsState):
"""
After the first Inform message from boot, it is expected that the eNB
will try to learn the RPC methods of the ACS.
"""
def __init__(self, acs: EnodebAcsStateMachine, when_done: str, when_skip: str):
super().__init__()
self.acs = acs
self.done_transition = when_done
self.skip_transition = when_skip
def read_msg(self, message: Any) -> AcsReadMsgResult:
# If this is a regular Inform, not after a reboot we'll get an empty
if isinstance(message, models.DummyInput):
return AcsReadMsgResult(True, self.skip_transition)
if not isinstance(message, models.GetRPCMethods):
return AcsReadMsgResult(False, self.done_transition)
return AcsReadMsgResult(True, None)
def get_msg(self, message: Any) -> AcsMsgAndTransition:
resp = models.GetRPCMethodsResponse()
resp.MethodList = models.MethodList()
RPC_METHODS = ['Inform', 'GetRPCMethods', 'TransferComplete']
resp.MethodList.arrayType = 'xsd:string[%d]' \
% len(RPC_METHODS)
resp.MethodList.string = RPC_METHODS
return AcsMsgAndTransition(resp, self.done_transition)
def state_description(self) -> str:
return 'Waiting for incoming GetRPC Methods after boot'
class BaicellsRemWaitState(EnodebAcsState):
"""
We've already received an Inform message. This state is to handle a
Baicells eNodeB issue.
After eNodeB is rebooted, hold off configuring it for some time to give
time for REM to run. This is a BaiCells eNodeB issue that doesn't support
enabling the eNodeB during initial REM.
In this state, just hang at responding to Inform, and then ending the
TR-069 session.
"""
CONFIG_DELAY_AFTER_BOOT = 600
def __init__(self, acs: EnodebAcsStateMachine, when_done: str):
super().__init__()
self.acs = acs
self.done_transition = when_done
self.rem_timer = None
def enter(self):
self.rem_timer = StateMachineTimer(self.CONFIG_DELAY_AFTER_BOOT)
logger.info(
'Holding off of eNB configuration for %s seconds. '
'Will resume after eNB REM process has finished. ',
self.CONFIG_DELAY_AFTER_BOOT,
)
def exit(self):
self.rem_timer = None
def read_msg(self, message: Any) -> AcsReadMsgResult:
if not isinstance(message, models.Inform):
return AcsReadMsgResult(False, None)
process_inform_message(
message, self.acs.data_model,
self.acs.device_cfg,
)
return AcsReadMsgResult(True, None)
def get_msg(self, message: Any) -> AcsMsgAndTransition:
if self.rem_timer.is_done():
return AcsMsgAndTransition(
models.DummyInput(),
self.done_transition,
)
return AcsMsgAndTransition(models.DummyInput(), None)
def state_description(self) -> str:
remaining = self.rem_timer.seconds_remaining()
return 'Waiting for eNB REM to run for %d more seconds before ' \
'resuming with configuration.' % remaining
class WaitEmptyMessageState(EnodebAcsState):
def __init__(
self,
acs: EnodebAcsStateMachine,
when_done: str,
when_missing: Optional[str] = None,
):
super().__init__()
self.acs = acs
self.done_transition = when_done
self.unknown_param_transition = when_missing
def read_msg(self, message: Any) -> AcsReadMsgResult:
"""
It's expected that we transition into this state right after receiving
an Inform message and replying with an InformResponse. At that point,
the eNB sends an empty HTTP request (aka DummyInput) to initiate the
rest of the provisioning process
"""
if not isinstance(message, models.DummyInput):
logger.debug("Ignoring message %s", str(type(message)))
return AcsReadMsgResult(msg_handled=False, next_state=None)
if self.unknown_param_transition:
if get_optional_param_to_check(self.acs.data_model):
return AcsReadMsgResult(
msg_handled=True,
next_state=self.unknown_param_transition,
)
return AcsReadMsgResult(
msg_handled=True,
next_state=self.done_transition,
)
def get_msg(self, message: Any) -> AcsReadMsgResult:
"""
Return a dummy message waiting for the empty message from CPE
"""
request = models.DummyInput()
return AcsMsgAndTransition(msg=request, next_state=None)
def state_description(self) -> str:
return 'Waiting for empty message from eNodeB'
class CheckOptionalParamsState(EnodebAcsState):
def __init__(
self,
acs: EnodebAcsStateMachine,
when_done: str,
):
super().__init__()
self.acs = acs
self.done_transition = when_done
self.optional_param = None
def get_msg(self, message: Any) -> AcsMsgAndTransition:
self.optional_param = get_optional_param_to_check(self.acs.data_model)
if self.optional_param is None:
raise Tr069Error('Invalid State')
# Generate the request
request = models.GetParameterValues()
request.ParameterNames = models.ParameterNames()
request.ParameterNames.arrayType = 'xsd:string[1]'
request.ParameterNames.string = []
path = self.acs.data_model.get_parameter(self.optional_param).path
request.ParameterNames.string.append(path)
return AcsMsgAndTransition(request, None)
def read_msg(self, message: Any) -> AcsReadMsgResult:
""" Process either GetParameterValuesResponse or a Fault """
if type(message) == models.Fault:
self.acs.data_model.set_parameter_presence(
self.optional_param,
False,
)
elif type(message) == models.GetParameterValuesResponse:
name_to_val = parse_get_parameter_values_response(
self.acs.data_model,
message,
)
logger.debug(
'Received CPE parameter values: %s',
str(name_to_val),
)
for name, val in name_to_val.items():
self.acs.data_model.set_parameter_presence(
self.optional_param,
True,
)
magma_val = self.acs.data_model.transform_for_magma(name, val)
self.acs.device_cfg.set_parameter(name, magma_val)
else:
return AcsReadMsgResult(False, None)
if get_optional_param_to_check(self.acs.data_model) is not None:
return AcsReadMsgResult(True, None)
return AcsReadMsgResult(True, self.done_transition)
def state_description(self) -> str:
return 'Checking if some optional parameters exist in data model'
class SendGetTransientParametersState(EnodebAcsState):
"""
Periodically read eNodeB status. Note: keep frequency low to avoid
backing up large numbers of read operations if enodebd is busy.
Some eNB parameters are read only and updated by the eNB itself.
"""
PARAMETERS = [
ParameterName.OP_STATE,
ParameterName.RF_TX_STATUS,
ParameterName.GPS_STATUS,
ParameterName.PTP_STATUS,
ParameterName.MME_STATUS,
ParameterName.GPS_LAT,
ParameterName.GPS_LONG,
]
def __init__(self, acs: EnodebAcsStateMachine, when_done: str):
super().__init__()
self.acs = acs
self.done_transition = when_done
def read_msg(self, message: Any) -> AcsReadMsgResult:
if not isinstance(message, models.DummyInput):
return AcsReadMsgResult(False, None)
return AcsReadMsgResult(True, None)
def get_msg(self, message: Any) -> AcsMsgAndTransition:
request = models.GetParameterValues()
request.ParameterNames = models.ParameterNames()
request.ParameterNames.string = []
for name in self.PARAMETERS:
# Not all data models have these parameters
if self.acs.data_model.is_parameter_present(name):
path = self.acs.data_model.get_parameter(name).path
request.ParameterNames.string.append(path)
request.ParameterNames.arrayType = \
'xsd:string[%d]' % len(request.ParameterNames.string)
return AcsMsgAndTransition(request, self.done_transition)
def state_description(self) -> str:
return 'Getting transient read-only parameters'
class WaitGetTransientParametersState(EnodebAcsState):
"""
Periodically read eNodeB status. Note: keep frequency low to avoid
backing up large numbers of read operations if enodebd is busy
"""
def __init__(
self,
acs: EnodebAcsStateMachine,
when_get: str,
when_get_obj_params: str,
when_delete: str,
when_add: str,
when_set: str,
when_skip: str,
):
super().__init__()
self.acs = acs
self.done_transition = when_get
self.get_obj_params_transition = when_get_obj_params
self.rm_obj_transition = when_delete
self.add_obj_transition = when_add
self.set_transition = when_set
self.skip_transition = when_skip
def read_msg(self, message: Any) -> AcsReadMsgResult:
if not isinstance(message, models.GetParameterValuesResponse):
return AcsReadMsgResult(False, None)
# Current values of the fetched parameters
name_to_val = parse_get_parameter_values_response(
self.acs.data_model,
message,
)
logger.debug('Fetched Transient Params: %s', str(name_to_val))
# Update device configuration
for name in name_to_val:
magma_val = \
self.acs.data_model.transform_for_magma(
name,
name_to_val[name],
)
self.acs.device_cfg.set_parameter(name, magma_val)
return AcsReadMsgResult(True, self.get_next_state())
def get_next_state(self) -> str:
should_get_params = \
len(
get_params_to_get(
self.acs.device_cfg,
self.acs.data_model,
),
) > 0
if should_get_params:
return self.done_transition
should_get_obj_params = \
len(
get_object_params_to_get(
self.acs.desired_cfg,
self.acs.device_cfg,
self.acs.data_model,
),
) > 0
if should_get_obj_params:
return self.get_obj_params_transition
elif len(
get_all_objects_to_delete(
self.acs.desired_cfg,
self.acs.device_cfg,
),
) > 0:
return self.rm_obj_transition
elif len(
get_all_objects_to_add(
self.acs.desired_cfg,
self.acs.device_cfg,
),
) > 0:
return self.add_obj_transition
return self.skip_transition
def state_description(self) -> str:
return 'Getting transient read-only parameters'
class GetParametersState(EnodebAcsState):
"""
Get the value of most parameters of the eNB that are defined in the data
model. Object parameters are excluded.
"""
def __init__(
self,
acs: EnodebAcsStateMachine,
when_done: str,
request_all_params: bool = False,
):
super().__init__()
self.acs = acs
self.done_transition = when_done
# Set to True if we want to request values of all parameters, even if
# the ACS state machine already has recorded values of them.
self.request_all_params = request_all_params
def read_msg(self, message: Any) -> AcsReadMsgResult:
"""
It's expected that we transition into this state right after receiving
an Inform message and replying with an InformResponse. At that point,
the eNB sends an empty HTTP request (aka DummyInput) to initiate the
rest of the provisioning process
"""
if not isinstance(message, models.DummyInput):
return AcsReadMsgResult(False, None)
return AcsReadMsgResult(True, None)
def get_msg(self, message: Any) -> AcsMsgAndTransition:
"""
Respond with GetParameterValuesRequest
Get the values of all parameters defined in the data model.
Also check which addable objects are present, and what the values of
parameters for those objects are.
"""
# Get the names of regular parameters
names = get_params_to_get(
self.acs.device_cfg, self.acs.data_model,
self.request_all_params,
)
# Generate the request
request = models.GetParameterValues()
request.ParameterNames = models.ParameterNames()
request.ParameterNames.arrayType = 'xsd:string[%d]' \
% len(names)
request.ParameterNames.string = []
for name in names:
path = self.acs.data_model.get_parameter(name).path
if path is not InvalidTrParamPath:
# Only get data elements backed by tr69 path
request.ParameterNames.string.append(path)
return AcsMsgAndTransition(request, self.done_transition)
def state_description(self) -> str:
return 'Getting non-object parameters'
class WaitGetParametersState(EnodebAcsState):
def __init__(self, acs: EnodebAcsStateMachine, when_done: str):
super().__init__()
self.acs = acs
self.done_transition = when_done
def read_msg(self, message: Any) -> AcsReadMsgResult:
""" Process GetParameterValuesResponse """
if not isinstance(message, models.GetParameterValuesResponse):
return AcsReadMsgResult(False, None)
name_to_val = parse_get_parameter_values_response(
self.acs.data_model,
message,
)
logger.debug('Received CPE parameter values: %s', str(name_to_val))
for name, val in name_to_val.items():
magma_val = self.acs.data_model.transform_for_magma(name, val)
self.acs.device_cfg.set_parameter(name, magma_val)
return AcsReadMsgResult(True, self.done_transition)
def state_description(self) -> str:
return 'Getting non-object parameters'
class GetObjectParametersState(EnodebAcsState):
def __init__(self, acs: EnodebAcsStateMachine, when_done: str):
super().__init__()
self.acs = acs
self.done_transition = when_done
def get_msg(self, message: Any) -> AcsMsgAndTransition:
""" Respond with GetParameterValuesRequest """
names = get_object_params_to_get(
self.acs.desired_cfg,
self.acs.device_cfg,
self.acs.data_model,
)
# Generate the request
request = models.GetParameterValues()
request.ParameterNames = models.ParameterNames()
request.ParameterNames.arrayType = 'xsd:string[%d]' \
% len(names)
request.ParameterNames.string = []
for name in names:
path = self.acs.data_model.get_parameter(name).path
request.ParameterNames.string.append(path)
return AcsMsgAndTransition(request, self.done_transition)
def state_description(self) -> str:
return 'Getting object parameters'
class WaitGetObjectParametersState(EnodebAcsState):
def __init__(
self,
acs: EnodebAcsStateMachine,
when_delete: str,
when_add: str,
when_set: str,
when_skip: str,
):
super().__init__()
self.acs = acs
self.rm_obj_transition = when_delete
self.add_obj_transition = when_add
self.set_params_transition = when_set
self.skip_transition = when_skip
def read_msg(self, message: Any) -> AcsReadMsgResult:
""" Process GetParameterValuesResponse """
if not isinstance(message, models.GetParameterValuesResponse):
return AcsReadMsgResult(False, None)
path_to_val = {}
if hasattr(message.ParameterList, 'ParameterValueStruct') and \
message.ParameterList.ParameterValueStruct is not None:
for param_value_struct in message.ParameterList.ParameterValueStruct:
path_to_val[param_value_struct.Name] = \
param_value_struct.Value.Data
logger.debug('Received object parameters: %s', str(path_to_val))
# Number of PLMN objects reported can be incorrect. Let's count them
num_plmns = 0
obj_to_params = self.acs.data_model.get_numbered_param_names()
while True:
obj_name = ParameterName.PLMN_N % (num_plmns + 1)
if obj_name not in obj_to_params or len(obj_to_params[obj_name]) == 0:
logger.warning(
"eNB has PLMN %s but not defined in model",
obj_name,
)
break
param_name_list = obj_to_params[obj_name]
obj_path = self.acs.data_model.get_parameter(param_name_list[0]).path
if obj_path not in path_to_val:
break
if not self.acs.device_cfg.has_object(obj_name):
self.acs.device_cfg.add_object(obj_name)
num_plmns += 1
for name in param_name_list:
path = self.acs.data_model.get_parameter(name).path
value = path_to_val[path]
magma_val = \
self.acs.data_model.transform_for_magma(name, value)
self.acs.device_cfg.set_parameter_for_object(
name, magma_val,
obj_name,
)
num_plmns_reported = \
int(self.acs.device_cfg.get_parameter(ParameterName.NUM_PLMNS))
if num_plmns != num_plmns_reported:
logger.warning(
"eNB reported %d PLMNs but found %d",
num_plmns_reported, num_plmns,
)
self.acs.device_cfg.set_parameter(
ParameterName.NUM_PLMNS,
num_plmns,
)
# Now we can have the desired state
if self.acs.desired_cfg is None:
self.acs.desired_cfg = build_desired_config(
self.acs.mconfig,
self.acs.service_config,
self.acs.device_cfg,
self.acs.data_model,
self.acs.config_postprocessor,
)
if len(
get_all_objects_to_delete(
self.acs.desired_cfg,
self.acs.device_cfg,
),
) > 0:
return AcsReadMsgResult(True, self.rm_obj_transition)
elif len(
get_all_objects_to_add(
self.acs.desired_cfg,
self.acs.device_cfg,
),
) > 0:
return AcsReadMsgResult(True, self.add_obj_transition)
elif len(
get_all_param_values_to_set(
self.acs.desired_cfg,
self.acs.device_cfg,
self.acs.data_model,
),
) > 0:
return AcsReadMsgResult(True, self.set_params_transition)
return AcsReadMsgResult(True, self.skip_transition)
def state_description(self) -> str:
return 'Getting object parameters'
class DeleteObjectsState(EnodebAcsState):
def __init__(
self,
acs: EnodebAcsStateMachine,
when_add: str,
when_skip: str,
):
super().__init__()
self.acs = acs
self.deleted_param = None
self.add_obj_transition = when_add
self.skip_transition = when_skip
def get_msg(self, message: Any) -> AcsMsgAndTransition:
"""
Send DeleteObject message to TR-069 and poll for response(s).
Input:
- Object name (string)
"""
request = models.DeleteObject()
self.deleted_param = get_all_objects_to_delete(
self.acs.desired_cfg,
self.acs.device_cfg,
)[0]
request.ObjectName = \
self.acs.data_model.get_parameter(self.deleted_param).path
return AcsMsgAndTransition(request, None)
def read_msg(self, message: Any) -> AcsReadMsgResult:
"""
Send DeleteObject message to TR-069 and poll for response(s).
Input:
- Object name (string)
"""
if type(message) == models.DeleteObjectResponse:
if message.Status != 0:
raise Tr069Error(
'Received DeleteObjectResponse with '
'Status=%d' % message.Status,
)
elif type(message) == models.Fault:
raise Tr069Error(
'Received Fault in response to DeleteObject '
'(faultstring = %s)' % message.FaultString,
)
else:
return AcsReadMsgResult(False, None)
self.acs.device_cfg.delete_object(self.deleted_param)
obj_list_to_delete = get_all_objects_to_delete(
self.acs.desired_cfg,
self.acs.device_cfg,
)
if len(obj_list_to_delete) > 0:
return AcsReadMsgResult(True, None)
if len(
get_all_objects_to_add(
self.acs.desired_cfg,
self.acs.device_cfg,
),
) == 0:
return AcsReadMsgResult(True, self.skip_transition)
return AcsReadMsgResult(True, self.add_obj_transition)
def state_description(self) -> str:
return 'Deleting objects'
class AddObjectsState(EnodebAcsState):
def __init__(self, acs: EnodebAcsStateMachine, when_done: str):
super().__init__()
self.acs = acs
self.done_transition = when_done
self.added_param = None
def get_msg(self, message: Any) -> AcsMsgAndTransition:
request = models.AddObject()
self.added_param = get_all_objects_to_add(
self.acs.desired_cfg,
self.acs.device_cfg,
)[0]
desired_param = self.acs.data_model.get_parameter(self.added_param)
desired_path = desired_param.path
path_parts = desired_path.split('.')
# If adding enumerated object, ie. XX.N. we should add it to the
# parent object XX. so strip the index
if len(path_parts) > 2 and \
path_parts[-1] == '' and path_parts[-2].isnumeric():
logger.debug('Stripping index from path=%s', desired_path)
desired_path = '.'.join(path_parts[:-2]) + '.'
request.ObjectName = desired_path
return AcsMsgAndTransition(request, None)
def read_msg(self, message: Any) -> AcsReadMsgResult:
if type(message) == models.AddObjectResponse:
if message.Status != 0:
raise Tr069Error(
'Received AddObjectResponse with '
'Status=%d' % message.Status,
)
elif type(message) == models.Fault:
raise Tr069Error(
'Received Fault in response to AddObject '
'(faultstring = %s)' % message.FaultString,
)
else:
return AcsReadMsgResult(False, None)
instance_n = message.InstanceNumber
self.acs.device_cfg.add_object(self.added_param % instance_n)
obj_list_to_add = get_all_objects_to_add(
self.acs.desired_cfg,
self.acs.device_cfg,
)
if len(obj_list_to_add) > 0:
return AcsReadMsgResult(True, None)
return AcsReadMsgResult(True, self.done_transition)
def state_description(self) -> str:
return 'Adding objects'
class SetParameterValuesState(EnodebAcsState):
def __init__(self, acs: EnodebAcsStateMachine, when_done: str):
super().__init__()
self.acs = acs
self.done_transition = when_done
def get_msg(self, message: Any) -> AcsMsgAndTransition:
request = models.SetParameterValues()
request.ParameterList = models.ParameterValueList()
param_values = get_all_param_values_to_set(
self.acs.desired_cfg,
self.acs.device_cfg,
self.acs.data_model,
)
request.ParameterList.arrayType = 'cwmp:ParameterValueStruct[%d]' \
% len(param_values)
request.ParameterList.ParameterValueStruct = []
logger.debug(
'Sending TR069 request to set CPE parameter values: %s',
str(param_values),
)
# TODO: Match key response when we support having multiple outstanding
# calls.
if self.acs.has_version_key:
request.ParameterKey = models.ParameterKeyType()
request.ParameterKey.Data =\
"SetParameter-{:10.0f}".format(self.acs.parameter_version_key)
request.ParameterKey.type = 'xsd:string'
for name, value in param_values.items():
param_info = self.acs.data_model.get_parameter(name)
type_ = param_info.type
name_value = models.ParameterValueStruct()
name_value.Value = models.anySimpleType()
name_value.Name = param_info.path
enb_value = self.acs.data_model.transform_for_enb(name, value)
if type_ in ('int', 'unsignedInt'):
name_value.Value.type = 'xsd:%s' % type_
name_value.Value.Data = str(enb_value)
elif type_ == 'boolean':
# Boolean values have integral representations in spec
name_value.Value.type = 'xsd:boolean'
name_value.Value.Data = str(int(enb_value))
elif type_ == 'string':
name_value.Value.type = 'xsd:string'
name_value.Value.Data = str(enb_value)
else:
raise Tr069Error(
'Unsupported type for %s: %s' %
(name, type_),
)
if param_info.is_invasive:
self.acs.are_invasive_changes_applied = False
request.ParameterList.ParameterValueStruct.append(name_value)
return AcsMsgAndTransition(request, self.done_transition)
def state_description(self) -> str:
return 'Setting parameter values'
class SetParameterValuesNotAdminState(EnodebAcsState):
def __init__(self, acs: EnodebAcsStateMachine, when_done: str):
super().__init__()
self.acs = acs
self.done_transition = when_done
def get_msg(self, message: Any) -> AcsMsgAndTransition:
request = models.SetParameterValues()
request.ParameterList = models.ParameterValueList()
param_values = get_all_param_values_to_set(
self.acs.desired_cfg,
self.acs.device_cfg,
self.acs.data_model,
exclude_admin=True,
)
request.ParameterList.arrayType = 'cwmp:ParameterValueStruct[%d]' \
% len(param_values)
request.ParameterList.ParameterValueStruct = []
logger.debug(
'Sending TR069 request to set CPE parameter values: %s',
str(param_values),
)
for name, value in param_values.items():
param_info = self.acs.data_model.get_parameter(name)
type_ = param_info.type
name_value = models.ParameterValueStruct()
name_value.Value = models.anySimpleType()
name_value.Name = param_info.path
enb_value = self.acs.data_model.transform_for_enb(name, value)
if type_ in ('int', 'unsignedInt'):
name_value.Value.type = 'xsd:%s' % type_
name_value.Value.Data = str(enb_value)
elif type_ == 'boolean':
# Boolean values have integral representations in spec
name_value.Value.type = 'xsd:boolean'
name_value.Value.Data = str(int(enb_value))
elif type_ == 'string':
name_value.Value.type = 'xsd:string'
name_value.Value.Data = str(enb_value)
else:
raise Tr069Error(
'Unsupported type for %s: %s' %
(name, type_),
)
if param_info.is_invasive:
self.acs.are_invasive_changes_applied = False
request.ParameterList.ParameterValueStruct.append(name_value)
return AcsMsgAndTransition(request, self.done_transition)
def state_description(self) -> str:
return 'Setting parameter values excluding Admin Enable'
class WaitSetParameterValuesState(EnodebAcsState):
def __init__(
self,
acs: EnodebAcsStateMachine,
when_done: str,
when_apply_invasive: str,
status_non_zero_allowed: bool = False,
):
super().__init__()
self.acs = acs
self.done_transition = when_done
self.apply_invasive_transition = when_apply_invasive
# Set Params can legally return zero and non zero status
# Per tr-196, if there are errors the method should return a fault.
# Make flag optional to compensate for existing radios returning non
# zero on error.
self.status_non_zero_allowed = status_non_zero_allowed
def read_msg(self, message: Any) -> AcsReadMsgResult:
if type(message) == models.SetParameterValuesResponse:
if not self.status_non_zero_allowed:
if message.Status != 0:
raise Tr069Error(
'Received SetParameterValuesResponse with '
'Status=%d' % message.Status,
)
self._mark_as_configured()
metrics.set_enb_last_configured_time(
self.acs.device_cfg.get_parameter("Serial number"),
self.acs.device_cfg.get_parameter("ip_address"),
int(time.time())
)
# Switch enodeb status to configured
metrics.set_enb_status(
self.acs.device_cfg.get_parameter("Serial number"),
status="configured"
)
if not self.acs.are_invasive_changes_applied:
return AcsReadMsgResult(True, self.apply_invasive_transition)
return AcsReadMsgResult(True, self.done_transition)
elif type(message) == models.Fault:
logger.error(
'Received Fault in response to SetParameterValues, '
'Code (%s), Message (%s)', message.FaultCode,
message.FaultString,
)
if message.SetParameterValuesFault is not None:
for fault in message.SetParameterValuesFault:
logger.error(
'SetParameterValuesFault Param: %s, '
'Code: %s, String: %s', fault.ParameterName,
fault.FaultCode, fault.FaultString,
)
return AcsReadMsgResult(False, None)
def _mark_as_configured(self) -> None:
"""
A successful attempt at setting parameter values means that we need to
update what we think the eNB's configuration is to match what we just
set the parameter values to.
"""
# Values of parameters
name_to_val = get_param_values_to_set(
self.acs.desired_cfg,
self.acs.device_cfg,
self.acs.data_model,
)
for name, val in name_to_val.items():
magma_val = self.acs.data_model.transform_for_magma(name, val)
self.acs.device_cfg.set_parameter(name, magma_val)
# Values of object parameters
obj_to_name_to_val = get_obj_param_values_to_set(
self.acs.desired_cfg,
self.acs.device_cfg,
self.acs.data_model,
)
for obj_name, name_to_val in obj_to_name_to_val.items():
for name, val in name_to_val.items():
logger.debug(
'Set obj: %s, name: %s, val: %s', str(obj_name),
str(name), str(val),
)
magma_val = self.acs.data_model.transform_for_magma(name, val)
self.acs.device_cfg.set_parameter_for_object(
name, magma_val,
obj_name,
)
logger.info('Successfully configured CPE parameters!')
def state_description(self) -> str:
return 'Setting parameter values'
class EndSessionState(EnodebAcsState):
""" To end a TR-069 session, send an empty HTTP response """
def __init__(self, acs: EnodebAcsStateMachine):
super().__init__()
self.acs = acs
def read_msg(self, message: Any) -> AcsReadMsgResult:
"""
No message is expected after enodebd sends the eNodeB
an empty HTTP response.
If a device sends an empty HTTP request, we can just
ignore it and send another empty response.
"""
if isinstance(message, models.DummyInput):
return AcsReadMsgResult(True, None)
return AcsReadMsgResult(False, None)
def get_msg(self, message: Any) -> AcsMsgAndTransition:
# Switch enodeb status to disconnected
metrics.set_enb_status(
self.acs.device_cfg.get_parameter("Serial number"),
status="disconnected"
)
request = models.DummyInput()
return AcsMsgAndTransition(request, None)
def state_description(self) -> str:
return 'Completed provisioning eNB. Awaiting new Inform.'
class EnbSendRebootState(EnodebAcsState):
def __init__(self, acs: EnodebAcsStateMachine, when_done: str):
super().__init__()
self.acs = acs
self.done_transition = when_done
self.prev_msg_was_inform = False
def read_msg(self, message: Any) -> AcsReadMsgResult:
"""
This state can be transitioned into through user command.
All messages received by enodebd will be ignored in this state.
"""
if self.prev_msg_was_inform \
and not isinstance(message, models.DummyInput):
return AcsReadMsgResult(False, None)
elif isinstance(message, models.Inform):
self.prev_msg_was_inform = True
process_inform_message(
message, self.acs.data_model,
self.acs.device_cfg,
)
return AcsReadMsgResult(True, None)
self.prev_msg_was_inform = False
return AcsReadMsgResult(True, None)
def get_msg(self, message: Any) -> AcsMsgAndTransition:
if self.prev_msg_was_inform:
response = models.InformResponse()
# Set maxEnvelopes to 1, as per TR-069 spec
response.MaxEnvelopes = 1
return AcsMsgAndTransition(response, None)
logger.info('Sending reboot request to eNB')
request = models.Reboot()
request.CommandKey = ''
self.acs.are_invasive_changes_applied = True
return AcsMsgAndTransition(request, self.done_transition)
def state_description(self) -> str:
return 'Rebooting eNB'
class SendRebootState(EnodebAcsState):
def __init__(self, acs: EnodebAcsStateMachine, when_done: str):
super().__init__()
self.acs = acs
self.done_transition = when_done
self.prev_msg_was_inform = False
def read_msg(self, message: Any) -> AcsReadMsgResult:
"""
This state can be transitioned into through user command.
All messages received by enodebd will be ignored in this state.
"""
if self.prev_msg_was_inform \
and not isinstance(message, models.DummyInput):
return AcsReadMsgResult(False, None)
elif isinstance(message, models.Inform):
self.prev_msg_was_inform = True
process_inform_message(
message, self.acs.data_model,
self.acs.device_cfg,
)
return AcsReadMsgResult(True, None)
self.prev_msg_was_inform = False
return AcsReadMsgResult(True, None)
def get_msg(self, message: Any) -> AcsMsgAndTransition:
if self.prev_msg_was_inform:
response = models.InformResponse()
# Set maxEnvelopes to 1, as per TR-069 spec
response.MaxEnvelopes = 1
return AcsMsgAndTransition(response, None)
logger.info('Sending reboot request to eNB')
request = models.Reboot()
request.CommandKey = ''
return AcsMsgAndTransition(request, self.done_transition)
def state_description(self) -> str:
return 'Rebooting eNB'
class WaitRebootResponseState(EnodebAcsState):
def __init__(self, acs: EnodebAcsStateMachine, when_done: str):
super().__init__()
self.acs = acs
self.done_transition = when_done
def read_msg(self, message: Any) -> AcsReadMsgResult:
if not isinstance(message, models.RebootResponse):
return AcsReadMsgResult(False, None)
return AcsReadMsgResult(True, None)
def get_msg(self, message: Any) -> AcsMsgAndTransition:
""" Reply with empty message """
return AcsMsgAndTransition(models.DummyInput(), self.done_transition)
def state_description(self) -> str:
return 'Rebooting eNB'
class WaitInformMRebootState(EnodebAcsState):
"""
After sending a reboot request, we expect an Inform request with a
specific 'inform event code'
"""
# Time to wait for eNodeB reboot. The measured time
# (on BaiCells indoor eNodeB)
# is ~110secs, so add healthy padding on top of this.
REBOOT_TIMEOUT = 300 # In seconds
# We expect that the Inform we receive tells us the eNB has rebooted
INFORM_EVENT_CODE = 'M Reboot'
def __init__(
self,
acs: EnodebAcsStateMachine,
when_done: str,
when_timeout: str,
):
super().__init__()
self.acs = acs
self.done_transition = when_done
self.timeout_transition = when_timeout
self.timeout_timer = None
self.timer_handle = None
def enter(self):
self.timeout_timer = StateMachineTimer(self.REBOOT_TIMEOUT)
def check_timer() -> None:
if self.timeout_timer.is_done():
self.acs.transition(self.timeout_transition)
raise Tr069Error(
'Did not receive Inform response after '
'rebooting',
)
self.timer_handle = \
self.acs.event_loop.call_later(
self.REBOOT_TIMEOUT,
check_timer,
)
def exit(self):
self.timer_handle.cancel()
self.timeout_timer = None
def read_msg(self, message: Any) -> AcsReadMsgResult:
if not isinstance(message, models.Inform):
return AcsReadMsgResult(False, None)
if not does_inform_have_event(message, self.INFORM_EVENT_CODE):
raise Tr069Error(
'Did not receive M Reboot event code in '
'Inform',
)
process_inform_message(
message, self.acs.data_model,
self.acs.device_cfg,
)
return AcsReadMsgResult(True, self.done_transition)
def state_description(self) -> str:
return 'Waiting for M Reboot code from Inform'
class WaitRebootDelayState(EnodebAcsState):
"""
After receiving the Inform notifying us that the eNodeB has successfully
rebooted, wait a short duration to prevent unspecified race conditions
that may occur w.r.t reboot
"""
# Short delay timer to prevent race conditions w.r.t. reboot
SHORT_CONFIG_DELAY = 10
def __init__(self, acs: EnodebAcsStateMachine, when_done: str):
super().__init__()
self.acs = acs
self.done_transition = when_done
self.config_timer = None
self.timer_handle = None
def enter(self):
self.config_timer = StateMachineTimer(self.SHORT_CONFIG_DELAY)
def check_timer() -> None:
if self.config_timer.is_done():
self.acs.transition(self.done_transition)
self.timer_handle = \
self.acs.event_loop.call_later(
self.SHORT_CONFIG_DELAY,
check_timer,
)
def exit(self):
self.timer_handle.cancel()
self.config_timer = None
def read_msg(self, message: Any) -> AcsReadMsgResult:
return AcsReadMsgResult(True, None)
def get_msg(self, message: Any) -> AcsMsgAndTransition:
return AcsMsgAndTransition(models.DummyInput(), None)
def state_description(self) -> str:
return 'Waiting after eNB reboot to prevent race conditions'
class DownloadState(EnodebAcsState):
"""
The eNB handler will enter this state when firmware version is older than desired version.
"""
def __init__(self, acs: EnodebAcsStateMachine, when_done: str):
super().__init__()
self.acs = acs
self.done_transition = when_done
def get_msg(self, message: Any) -> AcsMsgAndTransition:
# Switch enodeb status to firmware upgrading
metrics.set_enb_status(
self.acs.device_cfg.get_parameter("Serial number"),
status="firmware_upgrading"
)
request = models.Download()
request.CommandKey = "20220206215200"
request.FileType = "1 Firmware Upgrade Image"
request.URL = "http://10.128.250.131/firmware/Qproject_TEST3918_2102241222.ffw"
request.Username = ""
request.Password = ""
request.FileSize = 57208579
request.TargetFileName = "Qproject_TEST3918_2102241222.ffw"
request.DelaySeconds = 0
request.SuccessURL = ""
request.FailureURL = ""
return AcsMsgAndTransition(request, self.done_transition)
def state_description(self) -> str:
return 'Upgrade the firmware the desired version'
class WaitDownloadResponseState(EnodebAcsState):
"""
The eNB handler will enter this state after the Download command sent.
"""
def __init__(self, acs: EnodebAcsStateMachine, when_done: str):
super().__init__()
self.acs = acs
self.done_transition = when_done
def read_msg(self, message: Any) -> AcsReadMsgResult:
if not isinstance(message, models.DownloadResponse):
return AcsReadMsgResult(False, None)
return AcsReadMsgResult(True, None)
def get_msg(self, message: Any) -> AcsMsgAndTransition:
""" Reply with empty message """
logger.info("Received Download Response from eNodeB")
return AcsMsgAndTransition(models.DummyInput(), self.done_transition)
def state_description(self) -> str:
return "Wait DownloadResponse message"
class WaitInformTransferCompleteState(EnodebAcsState):
"""
The eNB handler will enter this state after firmware upgraded and rebooted
"""
REBOOT_TIMEOUT = 300 # In seconds
INFORM_EVENT_CODE = "7 TRANSFER COMPLETE"
PREIODIC_EVENT_CODE = "2 PERIODIC"
def __init__(self, acs: EnodebAcsStateMachine, when_done: str, when_periodic: str, when_timeout: str):
super().__init__()
self.acs = acs
self.done_transition = when_done
self.periodic_update_transition = when_periodic
self.timeout_transition = when_timeout
self.timeout_timer = None
self.timer_handle = None
def enter(self):
print("Get into the TransferComplete State")
self.timeout_timer = StateMachineTimer(self.REBOOT_TIMEOUT)
def check_timer() -> None:
if self.timeout_timer.is_done():
self.acs.transition(self.timeout_transition)
raise Tr069Error("Didn't receive Inform response after rebooting")
self.timer_handle = self.acs.event_loop.call_later(
self.REBOOT_TIMEOUT,
check_timer,
)
def exit(self):
self.timer_handle.cancel()
self.timeout_timer = None
def get_msg(self, message: Any) -> AcsMsgAndTransition:
return AcsMsgAndTransition(models.DummyInput(), None)
def read_msg(self, message: Any) -> AcsReadMsgResult:
if not isinstance(message, models.Inform):
return AcsReadMsgResult(False, None)
if does_inform_have_event(message, self.PREIODIC_EVENT_CODE):
logger.info("Receive Periodic update from enodeb")
return AcsReadMsgResult(True, self.periodic_update_transition)
if does_inform_have_event(message, self.INFORM_EVENT_CODE):
logger.info("Receive Transfer complete")
return AcsReadMsgResult(True, self.done_transition)
# Unhandled situation
return AcsReadMsgResult(False, None)
def state_description(self) -> str:
return "Wait DownloadResponse message"
class CheckStatusState(EnodebAcsState):
"""
Sent a request to enodeb to get the basic status from device
"""
def __init__(
self,
acs: EnodebAcsStateMachine,
when_done: str,
):
super().__init__()
self.acs = acs
self.done_transition = when_done
def get_msg(self, message: Any) -> AcsMsgAndTransition:
"""
Send with GetParameterValuesRequest
"""
self.PARAMETERS = [
ParameterName.RF_TX_STATUS,
ParameterName.GPS_STATUS,
ParameterName.GPS_LAT,
ParameterName.GPS_LONG,
]
request = models.GetParameterValues()
request.ParameterNames = models.ParameterNames()
request.ParameterNames.arrayType = 'xsd:string[1]'
request.ParameterNames.string = []
for name in self.PARAMETERS:
if self.acs.data_model.is_parameter_present(name):
path = self.acs.data_model.get_parameter(name).path
request.ParameterNames.string.append(path)
request.ParameterNames.arrayType = \
'xsd:string[%d]' % len(request.ParameterNames.string)
return AcsMsgAndTransition(request, self.done_transition)
def read_msg(self, message: Any) -> AcsReadMsgResult:
if not isinstance(message, models.GetParameterValuesResponse):
return AcsReadMsgResult(msg_handled=False, next_state=when_done)
name_to_val = parse_get_parameter_values_response(self.acs.data_model, message, )
logger.info("CheckStatusState: %s", str(name_to_val))
# Call set_enb_gps_status to update the parameter in prometheus api
metrics.set_enb_gps_status(
self.acs.device_cfg.get_parameter("Serial number"),
name_to_val["GPS lat"], name_to_val["GPS long"],
name_to_val["gps_status"]
)
# Call set_enb_op_status to update the parameter in prometheus api
metrics.set_enb_op_status(
self.acs.device_cfg.get_parameter("Serial number"),
name_to_val["Opstate"]
)
# Sleep 1 minute and check status again
time.sleep(60)
return AcsReadMsgResult(msg_handled=True, next_state=self.done_transition)
def state_description(self) -> str:
return 'Getting'
class ErrorState(EnodebAcsState):
"""
The eNB handler will enter this state when an unhandled Fault is received.
If the inform_transition_target constructor parameter is non-null, this
state will attempt to autoremediate by transitioning to the specified
target state when an Inform is received.
"""
def __init__(
self, acs: EnodebAcsStateMachine,
inform_transition_target: Optional[str] = None,
):
super().__init__()
self.acs = acs
self.inform_transition_target = inform_transition_target
def read_msg(self, message: Any) -> AcsReadMsgResult:
return AcsReadMsgResult(True, None)
def get_msg(self, message: Any) -> AcsMsgAndTransition:
if not self.inform_transition_target:
return AcsMsgAndTransition(models.DummyInput(), None)
if isinstance(message, models.Inform):
return AcsMsgAndTransition(
models.DummyInput(),
self.inform_transition_target,
)
return AcsMsgAndTransition(models.DummyInput(), None)
def state_description(self) -> str:
return 'Error state - awaiting manual restart of enodebd service or ' \
'an Inform to be received from the eNB' |
py | 1a33e96ba5a2eebbc07e4959e287260f51923a85 | from struct import Struct
from types import new_class
# class init => python type to obj, value = python type
# obj encode: obj to bytes/array
# classmethod decode: bytes array (array('B', [0, 2, 255, ..])) to python type
# str obj to str, mostly str(value)
# self.value is bytes on Base class
class MetaBluezFormat(type):
def __str__(self):
return "{}".format(self.__name__)
class MetaBluezFormatInt(type):
def __str__(self):
return "{}(len={},exponent={})".format(self.__name__, self.len, self.exponent)
class FormatBase(object, metaclass=MetaBluezFormat):
# __metaclass__ = MetaFormatInt
# 0 means variable length
len = 0
# for all numeric
exponent = 0
native_types = bytes
# init takes native python type as arg (depends on formatBase, base is 'bytes' type)
def __init__(self, value):
if not isinstance(value, self.native_types):
raise TypeError(
"{}, wrong type: {}, expected: {}".format(
self.__class__.__name__, type(value), self.native_types
)
)
self.value = value
try:
_ = self.encode()
except Exception as ex:
# keep exception raised by 'encode', but add this one
raise ValueError(f"{self.__class__.__name__}: {str(ex)}")
@classmethod
def decode(cls, value):
return cls(bytes(value))
def encode(self):
return self.value
def __str__(self):
return str(self.value)
def __eq__(self, other):
if isinstance(other, FormatBase):
return self.value == other.value
return self.value == other
# alias
class FormatRaw(FormatBase):
pass
# base only for non-power two uints
class FormatUint(FormatBase):
exponent = 0
len = 1
native_types = (int, float)
@classmethod
def decode(cls, value):
acc = 0
for idx, v in enumerate(value):
if idx == cls.len:
break
acc += int(v) * pow(2, 8 * idx)
if cls.exponent:
n = float(acc) * pow(10, cls.exponent)
if cls.exponent:
n = round(n, cls.exponent * -1)
return cls(n)
return cls(acc)
def encode(self):
if self.exponent:
v = int(self.value / pow(10, self.exponent))
else:
v = self.value
b = []
for idx in range(0, self.len):
b.append(v % 256)
v = int(v / 256)
return bytes(b)
class FormatUint24(FormatUint):
len = 3
class FormatUint40(FormatUint):
len = 5
class FormatUint48(FormatUint):
len = 6
_endian = "<"
# works only as base for powers of 2 sints
class FormatPacked(FormatBase):
exponent = 0
len = 1
# adds float for native type (self.value), but pack/unpack always the/to int
native_types = (int, float)
pck_fmt = Struct(_endian + "B")
@classmethod
def decode(cls, value):
v = bytes(value)
if len(v) < cls.len:
v = bytes(value) + bytes([0] * (cls.len - len(v)))
# acc = unpack(cls.endian + cls.pck_fmt, v)
acc = cls.pck_fmt.unpack(v)
if cls.exponent:
return cls(round(float(acc[0]) * pow(10, cls.exponent), cls.exponent * -1))
return cls(acc[0])
def encode(self):
if self.exponent:
v = int(self.value / pow(10, self.exponent))
else:
v = int(self.value)
return self.pck_fmt.pack(v)
def __int__(self):
return int(self.value)
def __float__(self):
return float(self.value)
class FormatUint8(FormatPacked):
pck_fmt = Struct(_endian + "B")
class FormatUint8Enum(FormatUint8):
pass
class FormatUint16(FormatPacked):
len = 2
pck_fmt = Struct(_endian + "H")
class FormatUint32(FormatPacked):
len = 4
pck_fmt = Struct(_endian + "I")
class FormatUint64(FormatPacked):
len = 8
pck_fmt = Struct(_endian + "Q")
class FormatSint8(FormatPacked):
pck_fmt = Struct(_endian + "b")
class FormatSint16(FormatPacked):
len = 2
pck_fmt = Struct(_endian + "h")
class FormatSint32(FormatPacked):
len = 4
pck_fmt = Struct(_endian + "i")
class FormatSint64(FormatPacked):
len = 2
pck_fmt = Struct(_endian + "q")
class FormatFloat32(FormatPacked):
len = 4
pck_fmt = Struct(_endian + "f")
class FormatFloat64(FormatPacked):
len = 8
pck_fmt = Struct(_endian + "d")
class FormatUtf8s(FormatBase):
# native 'value' format is unicode string
native_types = str
@classmethod
def decode(cls, value):
s = bytes(value).decode("utf-8")
l = len(s)
# remove trailing NUL
if l > 0 and s[l - 1] == "\x00":
s = s[:-1]
return cls(s)
def encode(self):
return self.value.encode("utf-8")
class FormatBitfield(FormatUint8):
len = 1
native_types = (int,)
def __str__(self):
return "0b{:08b}".format(self.value)
class FormatBitfield16(FormatUint16):
len = 2
def __str__(self):
return "0b{:016b}".format(self.value)
class FormatTuple(FormatBase):
sub_cls = []
sub_cls_names = []
native_types = (list, tuple)
# here we have a list/tuple as value
def __init__(self, value):
try:
if len(self.sub_cls) != len(value):
raise ValueError(
(
f"Expected {len(self.sub_cls)} number of values for format:"
"{self.__class__.__name__} ({self._sub_str()}}"
)
)
except TypeError:
raise TypeError(
"Expected iterable with {} number of values for format: {} ({})".format(
len(self.sub_cls), self.__class__.__name__, self._sub_str()
)
) from None
self.value = value
def _sub_str(self):
scn = self.sub_cls_names if self._is_named() else None
if scn and len(scn) == len(self):
d = {}
for idx, n in enumerate(scn):
d[n] = self.sub_cls[idx]
return str(d)
return "({})".format(",".join([sub_c.__name__ for sub_c in self.sub_cls]))
def _is_named(self):
try:
_ = self.sub_cls_names
except AttributeError:
return False
return bool(self.sub_cls_names)
# del not suported, wonder if wee need it
# def __delitem__(self, key):
# self.__delattr__(key)
def __len__(self):
return len(self.sub_cls)
def __getitem__(self, key):
if isinstance(key, int):
return self.value[key]
elif isinstance(key, str):
if not self._is_named():
raise TypeError("index must be int")
try:
idx = self.sub_cls_names.index(key)
except ValueError:
raise KeyError(key)
return self.value[idx]
raise TypeError("index must be str or int")
def __setitem__(self, key, sub_value):
if isinstance(key, int):
try:
# create sub class instance for type checking (raises Type/Value)
# resulting value should be original sub_value on success
self.value[key] = self.sub_cls[key](sub_value).value
except IndexError:
raise IndexError(
f"{self.__class__.__name__} assignment index out of range"
)
elif isinstance(key, str):
if not self._is_named():
raise TypeError("index must be int")
try:
idx = self.sub_cls_names.index(key)
except ValueError:
raise KeyError(key)
self.value[idx] = self.sub_cls[idx](sub_value).value
else:
raise TypeError("index must be str or int")
def keys(self):
if not self._is_named():
return []
return self.sub_cls_names
def values(self):
return self.value
def items(self):
if not self._is_named():
return []
return [
(self.sub_cls_names[idx], value) for idx, value in enumerate(self.value)
]
@classmethod
def decode(cls, value):
dec_vals = []
for sub in cls.sub_cls:
# consume bytes suitable for class, or all
len_get = len(value) if sub.len == 0 else sub.len
v = value[:len_get]
value = value[len_get:]
dec_vals.append(sub.decode(v))
return cls(cls.native_types[0](dec_vals))
def encode(self):
enc_vals = b""
for idx, val in enumerate(self.value):
# add bytes for all classes in order, or all
if isinstance(val, FormatBase):
enc_vals += val.encode()
else:
enc_vals += self.sub_cls[idx](val).encode()
return enc_vals
def __str__(self):
return "(" + ",".join([str(v) for v in self.value]) + ")"
def __eq__(self, other):
if isinstance(other, FormatTuple):
if len(other) != len(self):
return False
for idx, value in enumerate(self.values()):
if value != other[idx]:
return False
return True
elif not isinstance(other, FormatBase):
for idx, value in enumerate(self.values()):
if value != other[idx]:
return False
return True
return False
__all__ = (
"FormatBase",
"FormatRaw",
"FormatUint",
"FormatUint8",
"FormatUint8Enum",
"FormatUint16",
"FormatUint24",
"FormatUint32",
"FormatUint40",
"FormatUint48",
"FormatUint64",
"FormatSint8",
"FormatSint16",
"FormatSint32",
"FormatSint64",
"FormatUtf8s",
"FormatBitfield",
"FormatTuple",
)
|
py | 1a33ea239e6e281808bedb7e1cc4956de3b60f49 | # These are very task specifc calcfuntions which would be used
# to extract and calculate the separation performance descriptors.
# I normally apply them on the output_dict of hts workchain during
# the query.
from aiida.engine import calcfunction
from aiida.orm import Dict
@calcfunction
def get_spd_xe_exhaled(components, wc_output):
"""
Extracting the separation performance descriptors from the
output_dict of a GCMC simulation for Xenon recovery from exhaled anesthetic
gas.
"""
wc_dict = wc_output.get_dict()
comp1 = components['comp1']['name']
comp2 = components['comp1']['name']
comp3 = components['comp1']['name']
comp4 = components['comp1']['name']
y1 = wc_dict['mol_fraction'][comp1]
y2 = wc_dict['mol_fraction'][comp2]
y3 = wc_dict['mol_fraction'][comp1]
y4 = wc_dict['mol_fraction'][comp2]
n_1_des = wc_dict["isotherm"]["loading_absolute_average"][comp1][0]
n_1_ads = wc_dict["isotherm"]["loading_absolute_average"][comp1][1]
n_2_des = wc_dict["isotherm"]["loading_absolute_average"][comp2][0]
n_2_ads = wc_dict["isotherm"]["loading_absolute_average"][comp2][1]
y1 = wc_dict['mol_fraction'][comp1]
y2 = wc_dict['mol_fraction'][comp2]
s_1_2_ads = (n_1_ads / n_2_ads) * (y2 * y1)
s_1_2_des = (n_1_des / n_2_des) * (y2 * y1)
wc1 = n_1_ads - n_1_des
wc2 = n_2_ads - n_2_des
regen = (wc1 / n_1_ads) * 100
afm = wc1 * ((s_1_2_ads ** 2) / s_1_2_des)
output_dict = {
'sel_1_2_gcmc':
'S_1_2': s_1_2_ads,
'wc1': wc1,
}
return Dict(dict=output_dict)
|
py | 1a33ec8285c78dad0d5007c8931e6445ef596a8a | import unittest
import luigi.target
class TargetTest(unittest.TestCase):
def test_cannot_instantiate(self):
def instantiate_target():
luigi.target.Target()
self.assertRaises(TypeError, instantiate_target)
def test_abstract_subclass(self):
class ExistsLessTarget(luigi.target.Target):
pass
def instantiate_target():
ExistsLessTarget()
self.assertRaises(TypeError, instantiate_target)
def test_instantiate_subclass(self):
class GoodTarget(luigi.target.Target):
def exists(self):
return True
def open(self, mode):
return None
GoodTarget()
|
py | 1a33ed32d18f5c622a6c6094df5bb8bfc78915c4 | # -*- coding: utf-8 -*-
# Copyright (C) 2015 Yahoo! Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
from collections import abc
import itertools
import six
# Used for values that don't matter in sets backed by dicts...
_sentinel = object()
def _merge_in(target, iterable=None, sentinel=_sentinel):
"""Merges iterable into the target and returns the target."""
if iterable is not None:
for value in iterable:
target.setdefault(value, sentinel)
return target
class OrderedSet(abc.Set, abc.Hashable):
"""A read-only hashable set that retains insertion/initial ordering.
It should work in all existing places that ``frozenset`` is used.
See: https://mail.python.org/pipermail/python-ideas/2009-May/004567.html
for an idea thread that *may* eventually (*someday*) result in this (or
similar) code being included in the mainline python codebase (although
the end result of that thread is somewhat discouraging in that regard).
"""
__slots__ = ['_data']
def __init__(self, iterable=None):
self._data = _merge_in(collections.OrderedDict(), iterable)
def __hash__(self):
return self._hash()
def __contains__(self, value):
return value in self._data
def __len__(self):
return len(self._data)
def __iter__(self):
for value in six.iterkeys(self._data):
yield value
def __setstate__(self, items):
self.__init__(iterable=iter(items))
def __getstate__(self):
return tuple(self)
def __repr__(self):
return "%s(%s)" % (type(self).__name__, list(self))
def copy(self):
"""Return a shallow copy of a set."""
return self._from_iterable(iter(self))
def intersection(self, *sets):
"""Return the intersection of two or more sets as a new set.
(i.e. elements that are common to all of the sets.)
"""
def absorb_it(sets):
for value in iter(self):
matches = 0
for s in sets:
if value in s:
matches += 1
else:
break
if matches == len(sets):
yield value
return self._from_iterable(absorb_it(sets))
def issuperset(self, other):
"""Report whether this set contains another set."""
for value in other:
if value not in self:
return False
return True
def issubset(self, other):
"""Report whether another set contains this set."""
for value in iter(self):
if value not in other:
return False
return True
def difference(self, *sets):
"""Return the difference of two or more sets as a new set.
(i.e. all elements that are in this set but not the others.)
"""
def absorb_it(sets):
for value in iter(self):
seen = False
for s in sets:
if value in s:
seen = True
break
if not seen:
yield value
return self._from_iterable(absorb_it(sets))
def union(self, *sets):
"""Return the union of sets as a new set.
(i.e. all elements that are in either set.)
"""
return self._from_iterable(itertools.chain(iter(self), *sets))
|
py | 1a33ef00497f000cf8574efd26cf3f5c41f730c8 | from PythonOfficial import fibo
from PythonOfficial.fibo import fib,fib2
fibo.fib2(10)
# fibo.fib(4) |
py | 1a33ef5ff514b23958818e6297e81c6c7b922998 | #!/usr/bin/env python2
# coding=utf-8
# ^^^^^^^^^^^^ TODO remove when supporting only Python3
# Copyright (c) 2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test Hierarchical Deterministic wallet function."""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
class WalletHDTest(BitcoinTestFramework):
def setup_chain(self):
print("Initializing test directory "+self.options.tmpdir)
initialize_chain_clean(self.options.tmpdir, 2)
def setup_network(self):
self.nodes = start_nodes(2, self.options.tmpdir, [['-usehd=0'], ['-usehd=1', '-keypool=0']])
self.is_network_split = False
connect_nodes_bi(self.nodes, 0, 1)
self.is_network_split=False
self.sync_all()
def run_test (self):
tmpdir = self.options.tmpdir
# Make sure can't switch off usehd after wallet creation
stop_node(self.nodes[1],1)
try:
start_node(1, self.options.tmpdir, ['-usehd=0'])
raise AssertionError("Must not allow to turn off HD on an already existing HD wallet")
except Exception as e:
assert("bitozd exited with status 1 during initialization" in str(e))
# assert_start_raises_init_error(1, self.options.tmpdir, ['-usehd=0'], 'already existing HD wallet')
# self.nodes[1] = start_node(1, self.options.tmpdir, self.node_args[1])
self.nodes[1] = start_node(1, self.options.tmpdir, ['-usehd=1', '-keypool=0'])
connect_nodes_bi(self.nodes, 0, 1)
# Make sure we use hd, keep chainid
chainid = self.nodes[1].getwalletinfo()['hdchainid']
assert_equal(len(chainid), 64)
# create an internal key
change_addr = self.nodes[1].getrawchangeaddress()
change_addrV= self.nodes[1].validateaddress(change_addr);
assert_equal(change_addrV["hdkeypath"], "m/44'/1'/0'/1/0") #first internal child key
# Import a non-HD private key in the HD wallet
non_hd_add = self.nodes[0].getnewaddress()
self.nodes[1].importprivkey(self.nodes[0].dumpprivkey(non_hd_add))
# This should be enough to keep the master key and the non-HD key
self.nodes[1].backupwallet(tmpdir + "/hd.bak")
#self.nodes[1].dumpwallet(tmpdir + "/hd.dump")
# Derive some HD addresses and remember the last
# Also send funds to each add
self.nodes[0].generate(101)
hd_add = None
num_hd_adds = 300
for i in range(num_hd_adds):
hd_add = self.nodes[1].getnewaddress()
hd_info = self.nodes[1].validateaddress(hd_add)
assert_equal(hd_info["hdkeypath"], "m/44'/1'/0'/0/"+str(i+1))
assert_equal(hd_info["hdchainid"], chainid)
self.nodes[0].sendtoaddress(hd_add, 1)
self.nodes[0].generate(1)
self.nodes[0].sendtoaddress(non_hd_add, 1)
self.nodes[0].generate(1)
# create an internal key (again)
change_addr = self.nodes[1].getrawchangeaddress()
change_addrV= self.nodes[1].validateaddress(change_addr);
assert_equal(change_addrV["hdkeypath"], "m/44'/1'/0'/1/1") #second internal child key
self.sync_all()
assert_equal(self.nodes[1].getbalance(), num_hd_adds + 1)
print("Restore backup ...")
stop_node(self.nodes[1],1)
os.remove(self.options.tmpdir + "/node1/regtest/wallet.dat")
shutil.copyfile(tmpdir + "/hd.bak", tmpdir + "/node1/regtest/wallet.dat")
self.nodes[1] = start_node(1, self.options.tmpdir, ['-usehd=1', '-keypool=0'])
#connect_nodes_bi(self.nodes, 0, 1)
# Assert that derivation is deterministic
hd_add_2 = None
for _ in range(num_hd_adds):
hd_add_2 = self.nodes[1].getnewaddress()
hd_info_2 = self.nodes[1].validateaddress(hd_add_2)
assert_equal(hd_info_2["hdkeypath"], "m/44'/1'/0'/0/"+str(_+1))
assert_equal(hd_info_2["hdchainid"], chainid)
assert_equal(hd_add, hd_add_2)
# Needs rescan
stop_node(self.nodes[1],1)
self.nodes[1] = start_node(1, self.options.tmpdir, ['-usehd=1', '-keypool=0', '-rescan'])
#connect_nodes_bi(self.nodes, 0, 1)
assert_equal(self.nodes[1].getbalance(), num_hd_adds + 1)
# send a tx and make sure its using the internal chain for the changeoutput
txid = self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), 1)
outs = self.nodes[1].decoderawtransaction(self.nodes[1].gettransaction(txid)['hex'])['vout'];
keypath = ""
for out in outs:
if out['value'] != 1:
keypath = self.nodes[1].validateaddress(out['scriptPubKey']['addresses'][0])['hdkeypath']
assert_equal(keypath[0:13], "m/44'/1'/0'/1")
if __name__ == '__main__':
WalletHDTest().main ()
|
py | 1a33f0bda32f38503a541eaebc56ea240f3f4699 | from math import log
def tfidf(t, d, d_list):
return tf(t, d) * idf(t, d_list)
def tf(t, d):
r = 0
for term in d.split():
if t == term:
r += 1
return r
def idf(t, d_list):
d_with_t = 0
for d in d_list:
if t in d.split():
d_with_t += 1
return log(len(d_list) / d_with_t)
if __name__ == "__main__":
d1 = "snow in my shoe abandoned sparrow's nest"
d2 = "whitecaps on the bay a broken signboard banging in the April wind"
d3 = "lily out of the water out of itself bass picking bugs off the moon"
d4 = "an aging willow its image unsteady in the flowing stream"
d5 = "just friends he watches my gauze dress blowing on the line"
d6 = "little spider will you outlive me"
d7 = "meteor shower a gentle wave wets our sandals"
d_list = [d1, d2, d3, d4, d5, d6, d7]
print(tfidf("a", d2, d_list))
print(tfidf("out", d1, d_list))
print(tfidf("out", d3, d_list))
|
py | 1a33f11c03a9bbd02f567c19a1290b996641be3c | # pylint: disable=line-too-long, no-member
from __future__ import print_function
import pytz
from django.conf import settings
from django.core.management.base import BaseCommand
from django.db.models import Q
from django.utils import timezone
from ...decorators import handle_lock
from ...models import DataPoint, DataSource, DataSourceAlert, DataSourceReference, DataGeneratorDefinition
GENERATOR = 'pdk-remote-nudge'
CRITICAL_LEVEL = 12 * 60 * 60
WARNING_LEVEL = 6 * 60 * 60
class Command(BaseCommand):
help = 'Determines if mobile devices are receiving silent push notifications.'
@handle_lock
def handle(self, *args, **options): # pylint: disable=too-many-branches, too-many-statements, too-many-locals
try:
if (GENERATOR in settings.PDK_ENABLED_CHECKS) is False:
DataSourceAlert.objects.filter(generator_identifier=GENERATOR, active=True).update(active=False)
return
except AttributeError:
print('Did not find PDK_ENABLED_CHECKS in Django settings. Please define with a list of generators with status checks to enable.')
print('Example: PDK_ENABLED_CHECKS = (\'' + GENERATOR + '\',)')
here_tz = pytz.timezone(settings.TIME_ZONE)
for source in DataSource.objects.all(): # pylint: disable=too-many-nested-blocks
now = timezone.now()
source_reference = DataSourceReference.reference_for_source(source.identifier)
generator_definition = DataGeneratorDefinition.definition_for_identifier('pdk-app-event')
if source.should_suppress_alerts():
DataSourceAlert.objects.filter(data_source=source, generator_identifier=GENERATOR, active=True).update(active=False)
else:
secondary_query = Q(secondary_identifier='app_recv_remote_notification') | Q(secondary_identifier='pdk-received-firebase-message')
last_event = DataPoint.objects.filter(source_reference=source_reference, generator_definition=generator_definition).filter(secondary_query).order_by('-created').first()
last_alert = DataSourceAlert.objects.filter(data_source=source, generator_identifier=GENERATOR, active=True).order_by('-created').first()
alert_name = None
alert_details = {}
alert_level = 'info'
if last_event is not None:
delta = now - last_event.created
when = last_event.created.astimezone(here_tz)
if delta.total_seconds() > CRITICAL_LEVEL:
alert_name = 'Push Notifications Delayed'
alert_details['message'] = 'Device not received push notifications since ' + when.strftime('%H:%M on %b %d, %Y') + '.'
alert_level = 'critical'
elif delta.total_seconds() > WARNING_LEVEL:
alert_name = 'Push Notifications Delayed'
alert_details['message'] = 'Device not received push notifications since ' + when.strftime('%H:%M on %b %d, %Y') + '.'
alert_level = 'warning'
else:
alert_name = 'Push Notifications Never Received'
alert_details['message'] = 'Device has never received push notifications.'
if alert_name is not None:
if last_alert is None or last_alert.alert_name != alert_name or last_alert.alert_level != alert_level:
if last_alert is not None:
last_alert.active = False
last_alert.updated = timezone.now()
last_alert.save()
new_alert = DataSourceAlert(alert_name=alert_name, data_source=source, generator_identifier=GENERATOR)
new_alert.alert_level = alert_level
new_alert.update_alert_details(alert_details)
new_alert.created = timezone.now()
new_alert.updated = timezone.now()
new_alert.active = True
new_alert.save()
else:
last_alert.updated = timezone.now()
last_alert.update_alert_details(alert_details)
last_alert.save()
elif last_alert is not None:
last_alert.updated = timezone.now()
last_alert.active = False
last_alert.save()
|
py | 1a33f140daebadcff530a29ca74d88aa0e7a3b96 | #!/usr/bin/env python
"""Tests for `workshop_schedules` package."""
from typer.testing import CliRunner
from workshop_schedules import cli
def test_version():
import workshop_schedules
assert workshop_schedules.__version__
def test_command_line_interface():
"""Test the CLI."""
from workshop_schedules.cli import app
runner = CliRunner()
result = runner.invoke(app)
assert result.exit_code != 0
assert 'Usage: workshop-schedules' in result.output
help_result = runner.invoke(app, ['--help'])
assert help_result.exit_code == 0
assert '--help Show this message and exit.' in help_result.output
|
py | 1a33f287ca68646ea6c44654a5a08a4c7a7dab97 | # model settings
model = dict(
type='TTFNet',
pretrained='modelzoo://resnet18',
backbone=dict(
type='ResNet',
depth=18,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_eval=False,
style='pytorch'),
neck=None,
bbox_head=dict(
type='TTFLevelHead',
inplanes=(64, 128, 256, 512),
planes=(256, 128, 32),
down_ratio_b1=8,
down_ratio_b2=4,
hm_head_channels=(128, 128),
wh_head_channels=(64, 64),
hm_head_conv_num=(2, 2),
wh_head_conv_num=(1, 1),
num_classes=81,
wh_scale_factor_b1=16.,
wh_scale_factor_b2=16.,
shortcut_cfg=(1, 2, 3),
alpha=0.54,
beta=0.54,
max_objs=128,
hm_weight_b1=1.,
wh_weight_b1=5.,
hm_weight_b2=1.,
wh_weight_b2=5.,
b1_min_length=48,
b2_max_length=64,
mdcn_before_s8=True,
mdcn_before_s8_bn=False,
inf_branch=['b1', 'b2'],
use_simple_nms=False,
conv_cfg=None,
norm_cfg=dict(type='BN')))
cudnn_benchmark = True
# training and testing settings
train_cfg = dict(debug=False)
test_cfg = dict(score_thr=0.01, max_per_img=100)
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', img_scale=(512, 512), keep_ratio=False),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(512, 512),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=False),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
imgs_per_gpu=16,
workers_per_gpu=2,
train=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_train2017.json',
img_prefix=data_root + 'train2017/',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline))
# optimizer
optimizer = dict(
type='SGD',
lr=0.002,
momentum=0.9,
weight_decay=0.0004,
paramwise_options=dict(bias_lr_mult=2., bias_decay_mult=0.))
optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=1.0 / 5,
step=[9, 11])
checkpoint_config = dict(save_every_n_steps=200, max_to_keep=1, keep_every_n_epochs=9)
# yapf:disable
log_config = dict(interval=20)
# yapf:enable
# runtime settings
total_epochs = 12
dist_params = dict(backend='nccl')
log_level = 'INFO'
work_dir = './work_dirs/ttfv2net_r18_1x'
load_from = None
resume_from = None
workflow = [('train', 1)]
|
py | 1a33f363bf7e4f532d22d9decbe8653217877b03 | """Task I/O specifications."""
import attr
from pathlib import Path
import typing as ty
import inspect
import re
from glob import glob
from .helpers_file import template_update_single
def attr_fields(spec, exclude_names=()):
return [field for field in spec.__attrs_attrs__ if field.name not in exclude_names]
def attr_fields_dict(spec, exclude_names=()):
return {
field.name: field
for field in spec.__attrs_attrs__
if field.name not in exclude_names
}
class File:
"""An :obj:`os.pathlike` object, designating a file."""
class Directory:
"""An :obj:`os.pathlike` object, designating a folder."""
class MultiInputObj:
"""A ty.List[ty.Any] object, converter changes a single values to a list"""
@classmethod
def converter(cls, value):
from .helpers import ensure_list
if value == attr.NOTHING:
return value
else:
return ensure_list(value)
class MultiOutputObj:
"""A ty.List[ty.Any] object, converter changes an 1-el list to the single value"""
@classmethod
def converter(cls, value):
if isinstance(value, list) and len(value) == 1:
return value[0]
else:
return value
class MultiInputFile(MultiInputObj):
"""A ty.List[File] object, converter changes a single file path to a list"""
class MultiOutputFile(MultiOutputObj):
"""A ty.List[File] object, converter changes an 1-el list to the single value"""
@attr.s(auto_attribs=True, kw_only=True)
class SpecInfo:
"""Base data structure for metadata of specifications."""
name: str
"""A name for the specification."""
fields: ty.List[ty.Tuple] = attr.ib(factory=list)
"""List of names of fields (can be inputs or outputs)."""
bases: ty.Tuple[ty.Type] = attr.ib(factory=tuple)
"""Keeps track of specification inheritance.
Should be a tuple containing at least one BaseSpec """
@attr.s(auto_attribs=True, kw_only=True)
class BaseSpec:
"""The base dataclass specs for all inputs and outputs."""
def __attrs_post_init__(self):
self.files_hash = {
field.name: {}
for field in attr_fields(
self, exclude_names=("_graph_checksums", "bindings", "files_hash")
)
if field.metadata.get("output_file_template") is None
}
def __setattr__(self, name, value):
"""changing settatr, so the converter and validator is run
if input is set after __init__
"""
if inspect.stack()[1][3] == "__init__" or name in [
"inp_hash",
"changed",
"files_hash",
]:
super().__setattr__(name, value)
else:
tp = attr.fields_dict(self.__class__)[name].type
# if the type has a converter, e.g., MultiInputObj
if hasattr(tp, "converter"):
value = tp.converter(value)
self.files_hash[name] = {}
super().__setattr__(name, value)
# validate all fields that have set a validator
attr.validate(self)
def collect_additional_outputs(self, inputs, output_dir, outputs):
"""Get additional outputs."""
return {}
@property
def hash(self):
"""Compute a basic hash for any given set of fields."""
from .helpers import hash_value, hash_function
inp_dict = {}
for field in attr_fields(
self, exclude_names=("_graph_checksums", "bindings", "files_hash")
):
if field.metadata.get("output_file_template"):
continue
# removing values that are not set from hash calculation
if getattr(self, field.name) is attr.NOTHING:
continue
value = getattr(self, field.name)
inp_dict[field.name] = hash_value(
value=value,
tp=field.type,
metadata=field.metadata,
precalculated=self.files_hash[field.name],
)
inp_hash = hash_function(inp_dict)
if hasattr(self, "_graph_checksums"):
inp_hash = hash_function((inp_hash, self._graph_checksums))
return inp_hash
def retrieve_values(self, wf, state_index=None):
"""Get values contained by this spec."""
temp_values = {}
for field in attr_fields(self):
value = getattr(self, field.name)
if isinstance(value, LazyField):
value = value.get_value(wf, state_index=state_index)
temp_values[field.name] = value
for field, value in temp_values.items():
setattr(self, field, value)
def check_fields_input_spec(self):
"""
Check fields from input spec based on the medatada.
e.g., if xor, requires are fulfilled, if value provided when mandatory.
"""
fields = attr_fields(self)
names = []
require_to_check = {}
for fld in fields:
mdata = fld.metadata
# checking if the mandatory field is provided
if getattr(self, fld.name) is attr.NOTHING:
if mdata.get("mandatory"):
raise AttributeError(
f"{fld.name} is mandatory, but no value provided"
)
else:
continue
names.append(fld.name)
# checking if fields meet the xor and requires are
if "xor" in mdata:
if [el for el in mdata["xor"] if (el in names and el != fld.name)]:
raise AttributeError(
f"{fld.name} is mutually exclusive with {mdata['xor']}"
)
if "requires" in mdata:
if [el for el in mdata["requires"] if el not in names]:
# will check after adding all fields to names
require_to_check[fld.name] = mdata["requires"]
if (
fld.type in [File, Directory]
or "pydra.engine.specs.File" in str(fld.type)
or "pydra.engine.specs.Directory" in str(fld.type)
):
self._file_check_n_bindings(fld)
for nm, required in require_to_check.items():
required_notfound = [el for el in required if el not in names]
if required_notfound:
raise AttributeError(f"{nm} requires {required_notfound}")
def _file_check_n_bindings(self, field):
"""for tasks without container, this is simple check if the file exists"""
if isinstance(getattr(self, field.name), list):
# if value is a list and type is a list of Files/Directory, checking all elements
if field.type in [ty.List[File], ty.List[Directory]]:
for el in getattr(self, field.name):
file = Path(el)
if not file.exists() and field.type in [File, Directory]:
raise FileNotFoundError(
f"the file {file} from the {field.name} input does not exist"
)
else:
file = Path(getattr(self, field.name))
# error should be raised only if the type is strictly File or Directory
if not file.exists() and field.type in [File, Directory]:
raise FileNotFoundError(
f"the file {file} from the {field.name} input does not exist"
)
def check_metadata(self):
"""Check contained metadata."""
def template_update(self):
"""Update template."""
def copyfile_input(self, output_dir):
"""Copy the file pointed by a :class:`File` input."""
@attr.s(auto_attribs=True, kw_only=True)
class Runtime:
"""Represent run time metadata."""
rss_peak_gb: ty.Optional[float] = None
"""Peak in consumption of physical RAM."""
vms_peak_gb: ty.Optional[float] = None
"""Peak in consumption of virtual memory."""
cpu_peak_percent: ty.Optional[float] = None
"""Peak in cpu consumption."""
@attr.s(auto_attribs=True, kw_only=True)
class Result:
"""Metadata regarding the outputs of processing."""
output: ty.Optional[ty.Any] = None
runtime: ty.Optional[Runtime] = None
errored: bool = False
def __getstate__(self):
state = self.__dict__.copy()
if state["output"] is not None:
fields = tuple((el.name, el.type) for el in attr_fields(state["output"]))
state["output_spec"] = (state["output"].__class__.__name__, fields)
state["output"] = attr.asdict(state["output"], recurse=False)
return state
def __setstate__(self, state):
if "output_spec" in state:
spec = list(state["output_spec"])
del state["output_spec"]
klass = attr.make_class(
spec[0], {k: attr.ib(type=v) for k, v in list(spec[1])}
)
state["output"] = klass(**state["output"])
self.__dict__.update(state)
def get_output_field(self, field_name):
"""Used in get_values in Workflow
Parameters
----------
field_name : `str`
Name of field in LazyField object
"""
if field_name == "all_":
return attr.asdict(self.output, recurse=False)
else:
return getattr(self.output, field_name)
@attr.s(auto_attribs=True, kw_only=True)
class RuntimeSpec:
"""
Specification for a task.
From CWL::
InlineJavascriptRequirement
SchemaDefRequirement
DockerRequirement
SoftwareRequirement
InitialWorkDirRequirement
EnvVarRequirement
ShellCommandRequirement
ResourceRequirement
InlineScriptRequirement
"""
outdir: ty.Optional[str] = None
container: ty.Optional[str] = "shell"
network: bool = False
@attr.s(auto_attribs=True, kw_only=True)
class FunctionSpec(BaseSpec):
"""Specification for a process invoked from a shell."""
def check_metadata(self):
"""
Check the metadata for fields in input_spec and fields.
Also sets the default values when available and needed.
"""
supported_keys = {
"allowed_values",
"copyfile",
"help_string",
"mandatory",
# "readonly", #likely not needed
# "output_field_name", #likely not needed
# "output_file_template", #likely not needed
"requires",
"keep_extension",
"xor",
"sep",
}
for fld in attr_fields(self, exclude_names=("_func", "_graph_checksums")):
mdata = fld.metadata
# checking keys from metadata
if set(mdata.keys()) - supported_keys:
raise AttributeError(
f"only these keys are supported {supported_keys}, but "
f"{set(mdata.keys()) - supported_keys} provided"
)
# checking if the help string is provided (required field)
if "help_string" not in mdata:
raise AttributeError(f"{fld.name} doesn't have help_string field")
# not allowing for default if the field is mandatory
if not fld.default == attr.NOTHING and mdata.get("mandatory"):
raise AttributeError(
"default value should not be set when the field is mandatory"
)
# setting default if value not provided and default is available
if getattr(self, fld.name) is None:
if not fld.default == attr.NOTHING:
setattr(self, fld.name, fld.default)
@attr.s(auto_attribs=True, kw_only=True)
class ShellSpec(BaseSpec):
"""Specification for a process invoked from a shell."""
executable: ty.Union[str, ty.List[str]] = attr.ib(
metadata={
"help_string": "the first part of the command, can be a string, "
"e.g. 'ls', or a list, e.g. ['ls', '-l', 'dirname']"
}
)
args: ty.Union[str, ty.List[str], None] = attr.ib(
None,
metadata={
"help_string": "the last part of the command, can be a string, "
"e.g. <file_name>, or a list"
},
)
def retrieve_values(self, wf, state_index=None):
"""Parse output results."""
temp_values = {}
for field in attr_fields(self):
# retrieving values that do not have templates
if not field.metadata.get("output_file_template"):
value = getattr(self, field.name)
if isinstance(value, LazyField):
value = value.get_value(wf, state_index=state_index)
temp_values[field.name] = value
for field, value in temp_values.items():
value = path_to_string(value)
setattr(self, field, value)
def check_metadata(self):
"""
Check the metadata for fields in input_spec and fields.
Also sets the default values when available and needed.
"""
supported_keys = {
"allowed_values",
"argstr",
"container_path",
"copyfile",
"help_string",
"mandatory",
"readonly",
"output_field_name",
"output_file_template",
"position",
"requires",
"keep_extension",
"xor",
"sep",
"formatter",
}
for fld in attr_fields(self, exclude_names=("_func", "_graph_checksums")):
mdata = fld.metadata
# checking keys from metadata
if set(mdata.keys()) - supported_keys:
raise AttributeError(
f"only these keys are supported {supported_keys}, but "
f"{set(mdata.keys()) - supported_keys} provided"
)
# checking if the help string is provided (required field)
if "help_string" not in mdata:
raise AttributeError(f"{fld.name} doesn't have help_string field")
# assuming that fields with output_file_template shouldn't have default
if fld.default not in [attr.NOTHING, True, False] and mdata.get(
"output_file_template"
):
raise AttributeError(
"default value should not be set together with output_file_template"
)
# not allowing for default if the field is mandatory
if not fld.default == attr.NOTHING and mdata.get("mandatory"):
raise AttributeError(
"default value should not be set when the field is mandatory"
)
# setting default if value not provided and default is available
if getattr(self, fld.name) is None:
if not fld.default == attr.NOTHING:
setattr(self, fld.name, fld.default)
@attr.s(auto_attribs=True, kw_only=True)
class ShellOutSpec:
"""Output specification of a generic shell process."""
return_code: int
"""The process' exit code."""
stdout: ty.Union[File, str]
"""The process' standard output."""
stderr: ty.Union[File, str]
"""The process' standard input."""
def collect_additional_outputs(self, inputs, output_dir, outputs):
"""Collect additional outputs from shelltask output_spec."""
additional_out = {}
for fld in attr_fields(self, exclude_names=("return_code", "stdout", "stderr")):
if fld.type not in [
File,
MultiOutputFile,
Directory,
int,
float,
bool,
str,
list,
]:
raise Exception("not implemented (collect_additional_output)")
# assuming that field should have either default or metadata, but not both
if (
fld.default is None or fld.default == attr.NOTHING
) and not fld.metadata: # TODO: is it right?
raise AttributeError("File has to have default value or metadata")
elif fld.default != attr.NOTHING:
additional_out[fld.name] = self._field_defaultvalue(fld, output_dir)
elif fld.metadata:
if (
fld.type in [int, float, bool, str, list]
and "callable" not in fld.metadata
):
raise AttributeError(
f"{fld.type} has to have a callable in metadata"
)
additional_out[fld.name] = self._field_metadata(
fld, inputs, output_dir, outputs
)
return additional_out
def generated_output_names(self, inputs, output_dir):
"""Returns a list of all outputs that will be generated by the task.
Takes into account the task input and the requires list for the output fields.
TODO: should be in all Output specs?
"""
# checking the input (if all mandatory fields are provided, etc.)
inputs.check_fields_input_spec()
output_names = ["return_code", "stdout", "stderr"]
for fld in attr_fields(self, exclude_names=("return_code", "stdout", "stderr")):
if fld.type is not File:
raise Exception("not implemented (collect_additional_output)")
# assuming that field should have either default or metadata, but not both
if (
fld.default in (None, attr.NOTHING) and not fld.metadata
): # TODO: is it right?
raise AttributeError("File has to have default value or metadata")
elif fld.default != attr.NOTHING:
output_names.append(fld.name)
elif (
fld.metadata
and self._field_metadata(
fld, inputs, output_dir, outputs=None, check_existance=False
)
!= attr.NOTHING
):
output_names.append(fld.name)
return output_names
def _field_defaultvalue(self, fld, output_dir):
"""Collect output file if the default value specified."""
if not isinstance(fld.default, (str, Path)):
raise AttributeError(
f"{fld.name} is a File, so default value "
f"should be a string or a Path, "
f"{fld.default} provided"
)
default = fld.default
if isinstance(default, str):
default = Path(default)
default = output_dir / default
if "*" not in str(default):
if default.exists():
return default
else:
raise AttributeError(f"file {default} does not exist")
else:
all_files = [Path(el) for el in glob(str(default.expanduser()))]
if len(all_files) > 1:
return all_files
elif len(all_files) == 1:
return all_files[0]
else:
raise AttributeError(f"no file matches {default.name}")
def _field_metadata(
self, fld, inputs, output_dir, outputs=None, check_existance=True
):
"""Collect output file if metadata specified."""
if self._check_requires(fld, inputs) is False:
return attr.NOTHING
if "value" in fld.metadata:
return output_dir / fld.metadata["value"]
# this block is only run if "output_file_template" is provided in output_spec
# if the field is set in input_spec with output_file_template,
# than the field already should have value
elif "output_file_template" in fld.metadata:
value = template_update_single(
fld, inputs=inputs, output_dir=output_dir, spec_type="output"
)
if fld.type is MultiOutputFile and type(value) is list:
# TODO: how to deal with mandatory list outputs
ret = []
for val in value:
val = Path(val)
if check_existance and not val.exists():
ret.append(attr.NOTHING)
else:
ret.append(val)
return ret
else:
val = Path(value)
# checking if the file exists
if check_existance and not val.exists():
# if mandatory raise exception
if "mandatory" in fld.metadata:
if fld.metadata["mandatory"]:
raise Exception(
f"mandatory output for variable {fld.name} does not exist"
)
return attr.NOTHING
return val
elif "callable" in fld.metadata:
call_args = inspect.getargspec(fld.metadata["callable"])
call_args_val = {}
for argnm in call_args.args:
if argnm == "field":
call_args_val[argnm] = fld
elif argnm == "output_dir":
call_args_val[argnm] = output_dir
elif argnm == "inputs":
call_args_val[argnm] = inputs
elif argnm == "stdout":
call_args_val[argnm] = outputs["stdout"]
elif argnm == "stderr":
call_args_val[argnm] = outputs["stderr"]
else:
try:
call_args_val[argnm] = getattr(inputs, argnm)
except AttributeError:
raise AttributeError(
f"arguments of the callable function from {fld.name} "
f"has to be in inputs or be field or output_dir, "
f"but {argnm} is used"
)
return fld.metadata["callable"](**call_args_val)
else:
raise Exception("(_field_metadata) is not a current valid metadata key.")
def _check_requires(self, fld, inputs):
"""checking if all fields from the requires and template are set in the input
if requires is a list of list, checking if at least one list has all elements set
"""
from .helpers import ensure_list
if "requires" in fld.metadata:
# if requires is a list of list it is treated as el[0] OR el[1] OR...
if all([isinstance(el, list) for el in fld.metadata["requires"]]):
field_required_OR = fld.metadata["requires"]
# if requires is a list of tuples/strings - I'm creating a 1-el nested list
elif all([isinstance(el, (str, tuple)) for el in fld.metadata["requires"]]):
field_required_OR = [fld.metadata["requires"]]
else:
raise Exception(
f"requires field can be a list of list, or a list "
f"of strings/tuples, but {fld.metadata['requires']} "
f"provided for {fld.name}"
)
else:
field_required_OR = [[]]
for field_required in field_required_OR:
# if the output has output_file_template field,
# adding all input fields from the template to requires
if "output_file_template" in fld.metadata:
inp_fields = re.findall(r"{\w+}", fld.metadata["output_file_template"])
field_required += [
el[1:-1] for el in inp_fields if el[1:-1] not in field_required
]
# it's a flag, of the field from the list is not in input it will be changed to False
required_found = True
for field_required in field_required_OR:
required_found = True
# checking if the input fields from requires have set values
for inp in field_required:
if isinstance(inp, str): # name of the input field
if not hasattr(inputs, inp):
raise Exception(
f"{inp} is not a valid input field, can't be used in requires"
)
elif getattr(inputs, inp) in [attr.NOTHING, None]:
required_found = False
break
elif isinstance(inp, tuple): # (name, allowed values)
inp, allowed_val = inp[0], ensure_list(inp[1])
if not hasattr(inputs, inp):
raise Exception(
f"{inp} is not a valid input field, can't be used in requires"
)
elif getattr(inputs, inp) not in allowed_val:
required_found = False
break
else:
raise Exception(
f"each element of the requires element should be a string or a tuple, "
f"but {inp} is found in {field_required}"
)
# if the specific list from field_required_OR has all elements set, no need to check more
if required_found:
break
if required_found:
return True
else:
return False
@attr.s(auto_attribs=True, kw_only=True)
class ContainerSpec(ShellSpec):
"""Refine the generic command-line specification to container execution."""
image: ty.Union[File, str] = attr.ib(
metadata={"help_string": "image", "mandatory": True}
)
"""The image to be containerized."""
container: ty.Union[File, str, None] = attr.ib(
metadata={"help_string": "container"}
)
"""The container."""
container_xargs: ty.Optional[ty.List[str]] = attr.ib(
default=None, metadata={"help_string": "todo"}
)
"""Execution arguments to run the image."""
bindings: ty.Optional[
ty.List[
ty.Tuple[
Path, # local path
Path, # container path
ty.Optional[str], # mount mode
]
]
] = attr.ib(default=None, metadata={"help_string": "bindings"})
"""Mount points to be bound into the container."""
def _file_check_n_bindings(self, field):
if field.name == "image":
return
file = Path(getattr(self, field.name))
if field.metadata.get("container_path"):
# if the path is in a container the input should be treated as a str (hash as a str)
# field.type = "str"
# setattr(self, field.name, str(file))
pass
# if this is a local path, checking if the path exists
elif file.exists():
if self.bindings is None:
self.bindings = []
self.bindings.append((file.parent, f"/pydra_inp_{field.name}", "ro"))
# error should be raised only if the type is strictly File or Directory
elif field.type in [File, Directory]:
raise FileNotFoundError(
f"the file {file} from {field.name} input does not exist, "
f"if the file comes from the container, "
f"use field.metadata['container_path']=True"
)
@attr.s(auto_attribs=True, kw_only=True)
class DockerSpec(ContainerSpec):
"""Particularize container specifications to the Docker engine."""
container: str = attr.ib("docker", metadata={"help_string": "container"})
@attr.s(auto_attribs=True, kw_only=True)
class SingularitySpec(ContainerSpec):
"""Particularize container specifications to Singularity."""
container: str = attr.ib("singularity", metadata={"help_string": "container type"})
class LazyField:
"""Lazy fields implement promises."""
def __init__(self, node, attr_type):
"""Initialize a lazy field."""
self.name = node.name
if attr_type == "input":
self.fields = [field[0] for field in node.input_spec.fields]
elif attr_type == "output":
self.fields = node.output_names
else:
raise ValueError(f"LazyField: Unknown attr_type: {attr_type}")
self.attr_type = attr_type
self.field = None
def __getattr__(self, name):
if name in self.fields or name == "all_":
self.field = name
return self
if name in dir(self):
return self.__getattribute__(name)
raise AttributeError(
f"Task {self.name} has no {self.attr_type} attribute {name}"
)
def __getstate__(self):
state = self.__dict__.copy()
state["name"] = self.name
state["fields"] = self.fields
state["field"] = self.field
return state
def __setstate__(self, state):
self.__dict__.update(state)
def __repr__(self):
return f"LF('{self.name}', '{self.field}')"
def get_value(self, wf, state_index=None):
"""Return the value of a lazy field."""
if self.attr_type == "input":
return getattr(wf.inputs, self.field)
elif self.attr_type == "output":
node = getattr(wf, self.name)
result = node.result(state_index=state_index)
if isinstance(result, list):
if len(result) and isinstance(result[0], list):
results_new = []
for res_l in result:
res_l_new = []
for res in res_l:
if res.errored:
raise ValueError("Error from get_value")
else:
res_l_new.append(res.get_output_field(self.field))
results_new.append(res_l_new)
return results_new
else:
results_new = []
for res in result:
if res.errored:
raise ValueError("Error from get_value")
else:
results_new.append(res.get_output_field(self.field))
return results_new
else:
if result.errored:
raise ValueError("Error from get_value")
return result.get_output_field(self.field)
def donothing(*args, **kwargs):
return None
@attr.s(auto_attribs=True, kw_only=True)
class TaskHook:
"""Callable task hooks."""
pre_run_task: ty.Callable = donothing
post_run_task: ty.Callable = donothing
pre_run: ty.Callable = donothing
post_run: ty.Callable = donothing
def __setattr__(cls, attr, val):
if attr not in ["pre_run_task", "post_run_task", "pre_run", "post_run"]:
raise AttributeError("Cannot set unknown hook")
super().__setattr__(attr, val)
def reset(self):
for val in ["pre_run_task", "post_run_task", "pre_run", "post_run"]:
setattr(self, val, donothing)
def path_to_string(value):
"""Convert paths to strings."""
if isinstance(value, Path):
value = str(value)
elif isinstance(value, list) and len(value) and isinstance(value[0], Path):
value = [str(val) for val in value]
return value
|
py | 1a33f3d3038311daec2c163f0c2f49cbb1456acd | """
装饰器使用
"""
import time
import functools
import decorator
def cost(func):
@functools.wraps(func)
def wapper(*args):
t1 = time.time()
res = func(*args)
t2 = time.time()
print(f'运行时间为:{str(t2 - t1)}')
return res
# return wapper()返回的是执行结果了,所以不能加括号
return wapper
@cost
def excuteBll(a: int, b: int) -> int:
'''
返回两个数据的和
:param a:第一个参数
:param b: 第二个参数
:return:
'''
time.sleep(1)
print(a + b)
def retry(retry_count=3, sleep_time=1):
'''
重试装饰器
:param retry_count:重试次数,默认3
:param sleep_time: 等待时间,默认1
:return:
'''
def inner(func):
print('第1步')
@functools.wraps(func)
def wapper(*args, **kwargs):
print('第2步')
for i in range(retry_count):
print('第3步')
try:
print('第6步')
res = func(*args, **kwargs)
print('最后一步')
return res
except:
print('第7步')
time.sleep(sleep_time)
continue
return None
return wapper
return inner
@cost
@retry(retry_count=2, sleep_time=3)
def requestNameHttp(ip, address):
print('第4步')
print('请求操作中')
time.sleep(1)
print('请求成功')
return
class Cust(object):
'''
类装饰器 核心点是__call__函数
'''
def __init__(self, func):
self.func = func
def __call__(self, *args, **kwargs):
print('装饰器装饰')
f = self.func(*args, **kwargs)
print('装饰完成了')
return f
@Cust
def test02(a: int, b: int) -> int:
'''
返回两个数相加结果
:param a:
:param b:
:return:
'''
print('函数内部')
print(f'a+b={a + b}')
@decorator.decorator
def costss(func, time_sleep=3, *args, **kw):
print('开始了')
f = func(*args, **kw)
print('结束了')
return f
@costss
def costssTest(a, b):
print(f'a+b={a + b}')
if __name__ == '__main__':
# excuteBll(3, 4)
# print(excuteBll.__name__)
# print(excuteBll.__doc__)
print('=====================')
requestNameHttp('', '')
print('=====================')
test02(3, 4)
# print(test02.__name__)
# print(test02.__doc__)
print('=====================')
costssTest(3, 4)
|
py | 1a33f41ab294558ed2448b6b24fab06b5a7a62a3 | # -*- coding: utf-8 -*-
"""
LaunchDarkly REST API
# Overview ## Authentication All REST API resources are authenticated with either [personal or service access tokens](https://docs.launchdarkly.com/home/account-security/api-access-tokens), or session cookies. Other authentication mechanisms are not supported. You can manage personal access tokens on your [Account settings](https://app.launchdarkly.com/settings/tokens) page. LaunchDarkly also has SDK keys, mobile keys, and client-side IDs that are used by our server-side SDKs, mobile SDKs, and client-side SDKs, respectively. **These keys cannot be used to access our REST API**. These keys are environment-specific, and can only perform read-only operations (fetching feature flag settings). | Auth mechanism | Allowed resources | Use cases | | ----------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------- | -------------------------------------------------- | | [Personal access tokens](https://docs.launchdarkly.com/home/account-security/api-access-tokens) | Can be customized on a per-token basis | Building scripts, custom integrations, data export | | SDK keys | Can only access read-only SDK-specific resources and the firehose, restricted to a single environment | Server-side SDKs, Firehose API | | Mobile keys | Can only access read-only mobile SDK-specific resources, restricted to a single environment | Mobile SDKs | | Client-side ID | Single environment, only flags marked available to client-side | Client-side JavaScript | > #### Keep your access tokens and SDK keys private > > Access tokens should _never_ be exposed in untrusted contexts. Never put an access token in client-side JavaScript, or embed it in a mobile application. LaunchDarkly has special mobile keys that you can embed in mobile apps. If you accidentally expose an access token or SDK key, you can reset it from your [Account Settings](https://app.launchdarkly.com/settings#/tokens) page. > > The client-side ID is safe to embed in untrusted contexts. It's designed for use in client-side JavaScript. ### Via request header The preferred way to authenticate with the API is by adding an `Authorization` header containing your access token to your requests. The value of the `Authorization` header must be your access token. Manage personal access tokens from the [Account Settings](https://app.launchdarkly.com/settings/tokens) page. ### Via session cookie For testing purposes, you can make API calls directly from your web browser. If you're logged in to the application, the API will use your existing session to authenticate calls. If you have a [role](https://docs.launchdarkly.com/home/team/built-in-roles) other than Admin, or have a [custom role](https://docs.launchdarkly.com/home/team/custom-roles) defined, you may not have permission to perform some API calls. You will receive a `401` response code in that case. > ### Modifying the Origin header causes an error > > LaunchDarkly validates that the Origin header for any API request authenticated by a session cookie matches the expected Origin header. The expected Origin header is `https://app.launchdarkly.com`. > > If the Origin header does not match what's expected, LaunchDarkly returns an error. This error can prevent the LaunchDarkly app from working correctly. > > Any browser extension that intentionally changes the Origin header can cause this problem. For example, the `Allow-Control-Allow-Origin: *` Chrome extension changes the Origin header to `http://evil.com` and causes the app to fail. > > To prevent this error, do not modify your Origin header. > > LaunchDarkly does not require origin matching when authenticating with an access token, so this issue does not affect normal API usage. ## Representations All resources expect and return JSON response bodies. Error responses will also send a JSON body. Read [Errors](#section/Errors) for a more detailed description of the error format used by the API. In practice this means that you always get a response with a `Content-Type` header set to `application/json`. In addition, request bodies for `PUT`, `POST`, `REPORT` and `PATCH` requests must be encoded as JSON with a `Content-Type` header set to `application/json`. ### Summary and detailed representations When you fetch a list of resources, the response includes only the most important attributes of each resource. This is a _summary representation_ of the resource. When you fetch an individual resource (for example, a single feature flag), you receive a _detailed representation_ containing all of the attributes of the resource. The best way to find a detailed representation is to follow links. Every summary representation includes a link to its detailed representation. ### Links and addressability The best way to navigate the API is by following links. These are attributes in representations that link to other resources. The API always uses the same format for links: - Links to other resources within the API are encapsulated in a `_links` object. - If the resource has a corresponding link to HTML content on the site, it is stored in a special `_site` link. Each link has two attributes: an href (the URL) and a type (the content type). For example, a feature resource might return the following: ```json { \"_links\": { \"parent\": { \"href\": \"/api/features\", \"type\": \"application/json\" }, \"self\": { \"href\": \"/api/features/sort.order\", \"type\": \"application/json\" } }, \"_site\": { \"href\": \"/features/sort.order\", \"type\": \"text/html\" } } ``` From this, you can navigate to the parent collection of features by following the `parent` link, or navigate to the site page for the feature by following the `_site` link. Collections are always represented as a JSON object with an `items` attribute containing an array of representations. Like all other representations, collections have `_links` defined at the top level. Paginated collections include `first`, `last`, `next`, and `prev` links containing a URL with the respective set of elements in the collection. ## Updates Resources that accept partial updates use the `PATCH` verb, and support the [JSON Patch](https://datatracker.ietf.org/doc/html/rfc6902) format. Some resources also support the [JSON Merge Patch](https://datatracker.ietf.org/doc/html/rfc7386) format. In addition, some resources support optional comments that can be submitted with updates. Comments appear in outgoing webhooks, the audit log, and other integrations. ### Updates via JSON Patch [JSON Patch](https://datatracker.ietf.org/doc/html/rfc6902) is a way to specify the modifications to perform on a resource. For example, in this feature flag representation: ```json { \"name\": \"New recommendations engine\", \"key\": \"engine.enable\", \"description\": \"This is the description\", ... } ``` You can change the feature flag's description with the following patch document: ```json [{ \"op\": \"replace\", \"path\": \"/description\", \"value\": \"This is the new description\" }] ``` JSON Patch documents are always arrays. You can specify multiple modifications to perform in a single request. You can also test that certain preconditions are met before applying the patch: ```json [ { \"op\": \"test\", \"path\": \"/version\", \"value\": 10 }, { \"op\": \"replace\", \"path\": \"/description\", \"value\": \"The new description\" } ] ``` The above patch request tests whether the feature flag's `version` is `10`, and if so, changes the feature flag's description. Attributes that aren't editable, like a resource's `_links`, have names that start with an underscore. ### Updates via JSON Merge Patch The API also supports the [JSON Merge Patch](https://datatracker.ietf.org/doc/html/rfc7386) format, as well as the [Update feature flag](/tag/Feature-flags#operation/patchFeatureFlag) resource. JSON Merge Patch is less expressive than JSON Patch but in many cases, it is simpler to construct a merge patch document. For example, you can change a feature flag's description with the following merge patch document: ```json { \"description\": \"New flag description\" } ``` ### Updates with comments You can submit optional comments with `PATCH` changes. The [Update feature flag](/tag/Feature-flags#operation/patchFeatureFlag) resource supports comments. To submit a comment along with a JSON Patch document, use the following format: ```json { \"comment\": \"This is a comment string\", \"patch\": [{ \"op\": \"replace\", \"path\": \"/description\", \"value\": \"The new description\" }] } ``` To submit a comment along with a JSON Merge Patch document, use the following format: ```json { \"comment\": \"This is a comment string\", \"merge\": { \"description\": \"New flag description\" } } ``` ### Updates via semantic patches The API also supports the Semantic patch format. A semantic `PATCH` is a way to specify the modifications to perform on a resource as a set of executable instructions. JSON Patch uses paths and a limited set of operations to describe how to transform the current state of the resource into a new state. Semantic patch allows you to be explicit about intent using precise, custom instructions. In many cases, semantic patch instructions can also be defined independently of the current state of the resource. This can be useful when defining a change that may be applied at a future date. For example, in this feature flag configuration in environment Production: ```json { \"name\": \"Alternate sort order\", \"kind\": \"boolean\", \"key\": \"sort.order\", ... \"environments\": { \"production\": { \"on\": true, \"archived\": false, \"salt\": \"c29ydC5vcmRlcg==\", \"sel\": \"8de1085cb7354b0ab41c0e778376dfd3\", \"lastModified\": 1469131558260, \"version\": 81, \"targets\": [ { \"values\": [ \"[email protected]\" ], \"variation\": 0 }, { \"values\": [ \"1461797806429-33-861961230\", \"438580d8-02ee-418d-9eec-0085cab2bdf0\" ], \"variation\": 1 } ], \"rules\": [], \"fallthrough\": { \"variation\": 0 }, \"offVariation\": 1, \"prerequisites\": [], \"_site\": { \"href\": \"/default/production/features/sort.order\", \"type\": \"text/html\" } } } } ``` You can add a date you want a user to be removed from the feature flag's user targets. For example, “remove user 1461797806429-33-861961230 from the user target for variation 0 on the Alternate sort order flag in the production environment on Wed Jul 08 2020 at 15:27:41 pm”. This is done using the following: ```json { \"comment\": \"update expiring user targets\", \"instructions\": [ { \"kind\": \"removeExpireUserTargetDate\", \"userKey\": \"userKey\", \"variationId\": \"978d53f9-7fe3-4a63-992d-97bcb4535dc8\" }, { \"kind\": \"updateExpireUserTargetDate\", \"userKey\": \"userKey2\", \"variationId\": \"978d53f9-7fe3-4a63-992d-97bcb4535dc8\", \"value\": 1587582000000 }, { \"kind\": \"addExpireUserTargetDate\", \"userKey\": \"userKey3\", \"variationId\": \"978d53f9-7fe3-4a63-992d-97bcb4535dc8\", \"value\": 1594247266386 } ] } ``` Here is another example. In this feature flag configuration: ```json { \"name\": \"New recommendations engine\", \"key\": \"engine.enable\", \"environments\": { \"test\": { \"on\": true } } } ``` You can change the feature flag's description with the following patch document as a set of executable instructions. For example, “add user X to targets for variation Y and remove user A from targets for variation B for test flag”: ```json { \"comment\": \"\", \"instructions\": [ { \"kind\": \"removeUserTargets\", \"values\": [\"438580d8-02ee-418d-9eec-0085cab2bdf0\"], \"variationId\": \"852cb784-54ff-46b9-8c35-5498d2e4f270\" }, { \"kind\": \"addUserTargets\", \"values\": [\"438580d8-02ee-418d-9eec-0085cab2bdf0\"], \"variationId\": \"1bb18465-33b6-49aa-a3bd-eeb6650b33ad\" } ] } ``` > ### Supported semantic patch API endpoints > > - [Update feature flag](/tag/Feature-flags#operation/patchFeatureFlag) > - [Update expiring user targets on feature flag](/tag/Feature-flags#operation/patchExpiringUserTargets) > - [Update expiring user target for flags](/tag/User-settings#operation/patchExpiringFlagsForUser) > - [Update expiring user targets on segment](/tag/Segments#operation/patchExpiringUserTargetsForSegment) ## Errors The API always returns errors in a common format. Here's an example: ```json { \"code\": \"invalid_request\", \"message\": \"A feature with that key already exists\", \"id\": \"30ce6058-87da-11e4-b116-123b93f75cba\" } ``` The general class of error is indicated by the `code`. The `message` is a human-readable explanation of what went wrong. The `id` is a unique identifier. Use it when you're working with LaunchDarkly support to debug a problem with a specific API call. ### HTTP Status - Error Response Codes | Code | Definition | Desc. | Possible Solution | | ---- | ----------------- | ------------------------------------------------------------------------------------------- | ---------------------------------------------------------------- | | 400 | Bad Request | A request that fails may return this HTTP response code. | Ensure JSON syntax in request body is correct. | | 401 | Unauthorized | User doesn't have permission to an API call. | Ensure your SDK key is good. | | 403 | Forbidden | User does not have permission for operation. | Ensure that the user or access token has proper permissions set. | | 409 | Conflict | The API request could not be completed because it conflicted with a concurrent API request. | Retry your request. | | 429 | Too many requests | See [Rate limiting](/#section/Rate-limiting). | Wait and try again later. | ## CORS The LaunchDarkly API supports Cross Origin Resource Sharing (CORS) for AJAX requests from any origin. If an `Origin` header is given in a request, it will be echoed as an explicitly allowed origin. Otherwise, a wildcard is returned: `Access-Control-Allow-Origin: *`. For more information on CORS, see the [CORS W3C Recommendation](http://www.w3.org/TR/cors). Example CORS headers might look like: ```http Access-Control-Allow-Headers: Accept, Content-Type, Content-Length, Accept-Encoding, Authorization Access-Control-Allow-Methods: OPTIONS, GET, DELETE, PATCH Access-Control-Allow-Origin: * Access-Control-Max-Age: 300 ``` You can make authenticated CORS calls just as you would make same-origin calls, using either [token or session-based authentication](#section/Authentication). If you’re using session auth, you should set the `withCredentials` property for your `xhr` request to `true`. You should never expose your access tokens to untrusted users. ## Rate limiting We use several rate limiting strategies to ensure the availability of our APIs. Rate-limited calls to our APIs will return a `429` status code. Calls to our APIs will include headers indicating the current rate limit status. The specific headers returned depend on the API route being called. The limits differ based on the route, authentication mechanism, and other factors. Routes that are not rate limited may not contain any of the headers described below. > ### Rate limiting and SDKs > > LaunchDarkly SDKs are never rate limited and do not use the API endpoints defined here. LaunchDarkly uses a different set of approaches, including streaming/server-sent events and a global CDN, to ensure availability to the routes used by LaunchDarkly SDKs. > > The client-side ID is safe to embed in untrusted contexts. It's designed for use in client-side JavaScript. ### Global rate limits Authenticated requests are subject to a global limit. This is the maximum number of calls that can be made to the API per ten seconds. All personal access tokens on the account share this limit, so exceeding the limit with one access token will impact other tokens. Calls that are subject to global rate limits will return the headers below: | Header name | Description | | ------------------------------ | -------------------------------------------------------------------------------- | | `X-Ratelimit-Global-Remaining` | The maximum number of requests the account is permitted to make per ten seconds. | | `X-Ratelimit-Reset` | The time at which the current rate limit window resets in epoch milliseconds. | We do not publicly document the specific number of calls that can be made globally. This limit may change, and we encourage clients to program against the specification, relying on the two headers defined above, rather than hardcoding to the current limit. ### Route-level rate limits Some authenticated routes have custom rate limits. These also reset every ten seconds. Any access tokens hitting the same route share this limit, so exceeding the limit with one access token may impact other tokens. Calls that are subject to route-level rate limits will return the headers below: | Header name | Description | | ----------------------------- | ----------------------------------------------------------------------------------------------------- | | `X-Ratelimit-Route-Remaining` | The maximum number of requests to the current route the account is permitted to make per ten seconds. | | `X-Ratelimit-Reset` | The time at which the current rate limit window resets in epoch milliseconds. | A _route_ represents a specific URL pattern and verb. For example, the [Delete environment](/tag/Environments#operation/deleteEnvironment) endpoint is considered a single route, and each call to delete an environment counts against your route-level rate limit for that route. We do not publicly document the specific number of calls that can be made to each endpoint per ten seconds. These limits may change, and we encourage clients to program against the specification, relying on the two headers defined above, rather than hardcoding to the current limits. ### IP-based rate limiting We also employ IP-based rate limiting on some API routes. If you hit an IP-based rate limit, your API response will include a `Retry-After` header indicating how long to wait before re-trying the call. Clients must wait at least `Retry-After` seconds before making additional calls to our API, and should employ jitter and backoff strategies to avoid triggering rate limits again. ## OpenAPI (Swagger) We have a [complete OpenAPI (Swagger) specification](https://app.launchdarkly.com/api/v2/openapi.json) for our API. You can use this specification to generate client libraries to interact with our REST API in your language of choice. This specification is supported by several API-based tools such as Postman and Insomnia. In many cases, you can directly import our specification to ease use in navigating the APIs in the tooling. ## Client libraries We auto-generate multiple client libraries based on our OpenAPI specification. To learn more, visit [GitHub](https://github.com/search?q=topic%3Alaunchdarkly-api+org%3Alaunchdarkly&type=Repositories). ## Method Overriding Some firewalls and HTTP clients restrict the use of verbs other than `GET` and `POST`. In those environments, our API endpoints that use `PUT`, `PATCH`, and `DELETE` verbs will be inaccessible. To avoid this issue, our API supports the `X-HTTP-Method-Override` header, allowing clients to \"tunnel\" `PUT`, `PATCH`, and `DELETE` requests via a `POST` request. For example, if you wish to call one of our `PATCH` resources via a `POST` request, you can include `X-HTTP-Method-Override:PATCH` as a header. ## Beta resources We sometimes release new API resources in **beta** status before we release them with general availability. Resources that are in beta are still undergoing testing and development. They may change without notice, including becoming backwards incompatible. We try to promote resources into general availability as quickly as possible. This happens after sufficient testing and when we're satisfied that we no longer need to make backwards-incompatible changes. We mark beta resources with a \"Beta\" callout in our documentation, pictured below: > ### This feature is in beta > > To use this feature, pass in a header including the `LD-API-Version` key with value set to `beta`. Use this header with each call. To learn more, read [Beta resources](/#section/Beta-resources). ### Using beta resources To use a beta resource, you must include a header in the request. If you call a beta resource without this header, you'll receive a `403` response. Use this header: ``` LD-API-Version: beta ``` ## Versioning We try hard to keep our REST API backwards compatible, but we occasionally have to make backwards-incompatible changes in the process of shipping new features. These breaking changes can cause unexpected behavior if you don't prepare for them accordingly. Updates to our REST API include support for the latest features in LaunchDarkly. We also release a new version of our REST API every time we make a breaking change. We provide simultaneous support for multiple API versions so you can migrate from your current API version to a new version at your own pace. ### Setting the API version per request You can set the API version on a specific request by sending an `LD-API-Version` header, as shown in the example below: ``` LD-API-Version: 20191212 ``` The header value is the version number of the API version you'd like to request. The number for each version corresponds to the date the version was released. In the example above the version `20191212` corresponds to December 12, 2019. ### Setting the API version per access token When creating an access token, you must specify a specific version of the API to use. This ensures that integrations using this token cannot be broken by version changes. Tokens created before versioning was released have their version set to `20160426` (the version of the API that existed before versioning) so that they continue working the same way they did before versioning. If you would like to upgrade your integration to use a new API version, you can explicitly set the header described above. > ### Best practice: Set the header for every client or integration > > We recommend that you set the API version header explicitly in any client or integration you build. > > Only rely on the access token API version during manual testing. # noqa: E501
The version of the OpenAPI document: 2.0
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from launchdarkly_api.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
from ..model_utils import OpenApiModel
from launchdarkly_api.exceptions import ApiAttributeError
def lazy_import():
from launchdarkly_api.model.access_rep import AccessRep
from launchdarkly_api.model.flag_listing_rep import FlagListingRep
from launchdarkly_api.model.link import Link
from launchdarkly_api.model.segment_metadata import SegmentMetadata
from launchdarkly_api.model.user_segment_rule import UserSegmentRule
globals()['AccessRep'] = AccessRep
globals()['FlagListingRep'] = FlagListingRep
globals()['Link'] = Link
globals()['SegmentMetadata'] = SegmentMetadata
globals()['UserSegmentRule'] = UserSegmentRule
class UserSegment(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'name': (str,), # noqa: E501
'tags': ([str],), # noqa: E501
'creation_date': (int,), # noqa: E501
'key': (str,), # noqa: E501
'links': ({str: (Link,)},), # noqa: E501
'rules': ([UserSegmentRule],), # noqa: E501
'version': (int,), # noqa: E501
'deleted': (bool,), # noqa: E501
'generation': (int,), # noqa: E501
'description': (str,), # noqa: E501
'included': ([str],), # noqa: E501
'excluded': ([str],), # noqa: E501
'access': (AccessRep,), # noqa: E501
'flags': ([FlagListingRep],), # noqa: E501
'unbounded': (bool,), # noqa: E501
'unbounded_metadata': (SegmentMetadata,), # noqa: E501
'external': (str,), # noqa: E501
'external_link': (str,), # noqa: E501
'import_in_progress': (bool,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'name': 'name', # noqa: E501
'tags': 'tags', # noqa: E501
'creation_date': 'creationDate', # noqa: E501
'key': 'key', # noqa: E501
'links': '_links', # noqa: E501
'rules': 'rules', # noqa: E501
'version': 'version', # noqa: E501
'deleted': 'deleted', # noqa: E501
'generation': 'generation', # noqa: E501
'description': 'description', # noqa: E501
'included': 'included', # noqa: E501
'excluded': 'excluded', # noqa: E501
'access': '_access', # noqa: E501
'flags': '_flags', # noqa: E501
'unbounded': 'unbounded', # noqa: E501
'unbounded_metadata': '_unboundedMetadata', # noqa: E501
'external': '_external', # noqa: E501
'external_link': '_externalLink', # noqa: E501
'import_in_progress': '_importInProgress', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, name, tags, creation_date, key, links, rules, version, deleted, generation, *args, **kwargs): # noqa: E501
"""UserSegment - a model defined in OpenAPI
Args:
name (str): A human-friendly name for the segment
tags ([str]): Tags for the segment
creation_date (int):
key (str): A unique key used to reference the segment
links ({str: (Link,)}):
rules ([UserSegmentRule]):
version (int):
deleted (bool):
generation (int):
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
description (str): A description of the segment's purpose. [optional] # noqa: E501
included ([str]): Included users are always segment members, regardless of segment rules. For Big Segments this array is either empty or omitted entirely.. [optional] # noqa: E501
excluded ([str]): Segment rules bypass excluded users, so they will never be included based on rules. Excluded users may still be included explicitly. This value is omitted for Big Segments.. [optional] # noqa: E501
access (AccessRep): [optional] # noqa: E501
flags ([FlagListingRep]): [optional] # noqa: E501
unbounded (bool): [optional] # noqa: E501
unbounded_metadata (SegmentMetadata): [optional] # noqa: E501
external (str): [optional] # noqa: E501
external_link (str): [optional] # noqa: E501
import_in_progress (bool): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.name = name
self.tags = tags
self.creation_date = creation_date
self.key = key
self.links = links
self.rules = rules
self.version = version
self.deleted = deleted
self.generation = generation
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, name, tags, creation_date, key, links, rules, version, deleted, generation, *args, **kwargs): # noqa: E501
"""UserSegment - a model defined in OpenAPI
Args:
name (str): A human-friendly name for the segment
tags ([str]): Tags for the segment
creation_date (int):
key (str): A unique key used to reference the segment
links ({str: (Link,)}):
rules ([UserSegmentRule]):
version (int):
deleted (bool):
generation (int):
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
description (str): A description of the segment's purpose. [optional] # noqa: E501
included ([str]): Included users are always segment members, regardless of segment rules. For Big Segments this array is either empty or omitted entirely.. [optional] # noqa: E501
excluded ([str]): Segment rules bypass excluded users, so they will never be included based on rules. Excluded users may still be included explicitly. This value is omitted for Big Segments.. [optional] # noqa: E501
access (AccessRep): [optional] # noqa: E501
flags ([FlagListingRep]): [optional] # noqa: E501
unbounded (bool): [optional] # noqa: E501
unbounded_metadata (SegmentMetadata): [optional] # noqa: E501
external (str): [optional] # noqa: E501
external_link (str): [optional] # noqa: E501
import_in_progress (bool): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.name = name
self.tags = tags
self.creation_date = creation_date
self.key = key
self.links = links
self.rules = rules
self.version = version
self.deleted = deleted
self.generation = generation
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
|
py | 1a33f453015bf5f097b53861e7be032d99bea28c | # Solution to Problem 0011
def right(mat, i, j):
nums = []
for k in range(4):
try:
nums.append(mat[i][j+k])
except:
nums.append(0)
return nums
def down_right(mat, i, j):
nums = []
for k in range(4):
try:
nums.append(mat[i+k][j+k])
except:
nums.append(0)
return nums
def down(mat, i, j):
nums = []
for k in range(4):
try:
nums.append(mat[i+k][j])
except:
nums.append(0)
return nums
def down_left(mat, i, j):
nums = []
for k in range(4):
try:
nums.append(mat[i+k][j-k])
except:
nums.append(0)
return nums
def left(mat, i, j):
nums = []
for k in range(4):
try:
nums.append(mat[i][j-k])
except:
nums.append(0)
return nums
def up_left(mat, i, j):
nums = []
for k in range(4):
try:
nums.append(mat[i-k][j-k])
except:
nums.append(0)
return nums
def up(mat, i, j):
nums = []
for k in range(4):
try:
nums.append(mat[i-k][j])
except:
nums.append(0)
return nums
def up_right(mat, i, j):
nums = []
for k in range(4):
try:
nums.append(mat[i-k][j+k])
except:
nums.append(0)
return nums
def product(vec):
result = 1
for x in vec:
result = result * x
return result
def solution():
biggest = 0
grid = """
08 02 22 97 38 15 00 40 00 75 04 05 07 78 52 12 50 77 91 08
49 49 99 40 17 81 18 57 60 87 17 40 98 43 69 48 04 56 62 00
81 49 31 73 55 79 14 29 93 71 40 67 53 88 30 03 49 13 36 65
52 70 95 23 04 60 11 42 69 24 68 56 01 32 56 71 37 02 36 91
22 31 16 71 51 67 63 89 41 92 36 54 22 40 40 28 66 33 13 80
24 47 32 60 99 03 45 02 44 75 33 53 78 36 84 20 35 17 12 50
32 98 81 28 64 23 67 10 26 38 40 67 59 54 70 66 18 38 64 70
67 26 20 68 02 62 12 20 95 63 94 39 63 08 40 91 66 49 94 21
24 55 58 05 66 73 99 26 97 17 78 78 96 83 14 88 34 89 63 72
21 36 23 09 75 00 76 44 20 45 35 14 00 61 33 97 34 31 33 95
78 17 53 28 22 75 31 67 15 94 03 80 04 62 16 14 09 53 56 92
16 39 05 42 96 35 31 47 55 58 88 24 00 17 54 24 36 29 85 57
86 56 00 48 35 71 89 07 05 44 44 37 44 60 21 58 51 54 17 58
19 80 81 68 05 94 47 69 28 73 92 13 86 52 17 77 04 89 55 40
04 52 08 83 97 35 99 16 07 97 57 32 16 26 26 79 33 27 98 66
88 36 68 87 57 62 20 72 03 46 33 67 46 55 12 32 63 93 53 69
04 42 16 73 38 25 39 11 24 94 72 18 08 46 29 32 40 62 76 36
20 69 36 41 72 30 23 88 34 62 99 69 82 67 59 85 74 04 36 16
20 73 35 29 78 31 90 01 74 31 49 71 48 86 81 16 23 57 05 54
01 70 54 71 83 51 54 69 16 92 33 48 61 43 52 01 89 19 67 48
"""
grid = [row.strip() for row in grid.split("\n")]
grid = [[int(cell) for cell in row.split(" ")] for row in grid if row != ""]
v = []
for i in range(20):
for j in range(20):
v.append(product(right(grid, i, j)))
v.append(product(down_right(grid, i, j)))
v.append(product(down(grid, i, j)))
v.append(product(down_left(grid, i, j)))
v.append(product(left(grid, i, j)))
v.append(product(up_left(grid, i, j)))
v.append(product(up(grid, i, j)))
v.append(product(up_right(grid, i, j)))
if max(v) >= biggest:
biggest = max(v)
v = []
else:
v = []
return biggest
if __name__ == "__main__":
print(solution()) |
py | 1a33f4a7fd3a0ed9356efc09813562c3aabe1a6c | import tensorflow as tf
hello = tf.constant('Hello, TensorFlow')
sess = tf.Session()
print(sess.run(hello)) |
py | 1a33f4fa6823998ee412bd301a20f20e8dcd5089 | from .todo import Todo
__red_end_user_data_statement__ = "Todo lists are stored."
def setup(bot):
bot.add_cog(Todo(bot))
pass
|
py | 1a33f56f86251eb4d9f1f11e6847fee307de9e90 | import datetime
from dataclasses import dataclass
from typing import Dict, List, Tuple
import appdaemon.plugins.hass.hassapi as hass
@dataclass
class Preferences:
input_time: str
input_temperature: str
target_area: str
@classmethod
def from_args(cls, prefs: Dict[str, Dict[str, str]]) -> Dict[str, "Preferences"]:
ret = {}
for k, v in prefs.items():
ret[k] = cls(**v)
return ret
class Climate(hass.Hass):
"""Hacs class."""
def initialize(self):
try:
self.thermostat = self.args["thermostat"]
except KeyError:
self.log("missing required argument: thermostat")
raise
self.mode_switching_enabled = self.args.get("mode_switching_enabled", False)
try:
self.prefs = Preferences.from_args(self.args["preferences"])
except KeyError:
self.log("missing required argument: preferences")
raise
self.log(f"preferences: {self.prefs}")
self.time_pref = self.create_pref_time_dict()
try:
self._outside_temperature_sensor = self.args["weather_sensor"]
except KeyError:
self.log("missing required argument: weather_sensor")
raise
self.run_minutely(self.temperature_check, datetime.time(0, 0, 0))
@property
def outside_temperature(self) -> float:
return float(self.get_state(self._outside_temperature_sensor))
@property
def max_temperature(self) -> int:
return int(self.args.get("max_temperature", 80))
@property
def min_temperature(self) -> int:
return int(self.args.get("min_temperature", 60))
@property
def thermostat_temperature(self) -> int:
return int(self.get_state(
self.thermostat, attribute="current_temperature"
))
def temperature_check(self, kwargs):
self.log("Checking temperature")
pref = self.nearest(self.time_pref.keys(), self.get_now())
preference = self.time_pref.get(pref)
self.log(f"using preference: {preference}")
self._set_temp(preference)
def _set_temp(self, preference: Preferences):
temp_to_set = float(self.get_state(preference.input_temperature))
current_outside_temp = self.outside_temperature
current_state = self.get_state(self.thermostat)
thermostat_temp = self.thermostat_temperature
sensors = self.args.get("inside_temperature_sensors", {})
current_temps = self.get_current_temperatures(sensors)
target_area = preference.target_area
if target_area in current_temps:
target_area_temp = current_temps[target_area]
self.log(
f"Target area: {target_area} adjusted temperature: {target_area_temp}, actual: {current_temps[target_area]}"
)
else:
self.log("Target area not currently in current temperatures")
target_area_temp = thermostat_temp
try:
adjustment = thermostat_temp - current_temps[target_area]
except KeyError:
self.log(
f"Could not find target area: {target_area} in current temperatures"
)
adjustment = 0
temp_to_set += adjustment
if temp_to_set > self.max_temperature:
self.log(f"temp: {temp_to_set} was too high, using max temperature: {self.max_temperature}")
temp_to_set = self.max_temperature
elif temp_to_set < self.min_temperature:
self.log(f"temp: {temp_to_set} was too low, using min temperature: {self.min_temperature}")
temp_to_set = self.min_temperature
else:
self.log(f"temp_to_set: {temp_to_set} within temperature boundaries")
self.log(
f"adj_temp: {temp_to_set}, thermostat_temp: {thermostat_temp}, current_outside_temp: {current_outside_temp}"
)
if target_area_temp > current_outside_temp:
mode = "heat"
else:
mode = "cool"
self.log(f"Current mode: {current_state}, desired mode: {mode}")
if mode == "cool" and self.min_temperature == temp_to_set and self.mode_switching_enabled and current_state == "heat":
self.log(f"Changing climate mode from {current_state} to {mode}")
self.call_service(
"climate/set_hvac_mode", hvac_mode=mode, entity_id=self.thermostat
)
if current_state != mode and self.mode_switching_enabled:
self.log(f"Changing climate mode from {current_state} to {mode}")
self.call_service(
"climate/set_hvac_mode", hvac_mode=mode, entity_id=self.thermostat
)
self.log(
f"Current Temp Outside: {current_outside_temp}, current indoor temp: {thermostat_temp} setting indoor temp to: {temp_to_set}, using mode: {mode}"
)
self.call_service(
"climate/set_temperature", entity_id=self.thermostat, temperature=temp_to_set
)
def get_current_temperatures(self, sensors):
current_temps = {}
for k, v in sensors.items():
temps = []
for x in v["sensors"]:
inside_temp = self.get_state(x)
try:
temps.append(float(inside_temp))
except ValueError:
self.log(f"could not parse {inside_temp}")
if temps:
current_temps[k] = sum(temps) / len(temps)
self.log(f"Current temperature: {k} {current_temps[k]}")
return current_temps
def nearest(self, items, pivot):
date_items = [
datetime.datetime.combine(datetime.date.today(), x, tzinfo=pivot.tzinfo)
for x in items
]
date_items = [x for x in date_items if x < pivot]
if not date_items:
return min(items)
return min(date_items, key=lambda x: abs(x - pivot)).time()
def create_pref_time_dict(self) -> Dict[datetime.time, Preferences]:
ret = {}
for val in self.prefs.values():
state = self.get_state(val.input_time)
try:
ret[self.parse_time(state, aware=True)] = val
except TypeError:
self.log(f"Error parsing: {state}")
return ret |
py | 1a33f5b842cc03d5fdcd8f16f3d5075828adf70f | from torch import nn
from transformers import BertModel, BertTokenizer, BertConfig
import json
from typing import List, Dict, Optional
import os
import torch
from collections import OrderedDict
import numpy as np
import logging
class BioBERT(nn.Module):
"""Huggingface AutoModel to generate token embeddings.
Loads the correct class, e.g. BERT / RoBERTa etc.
"""
def __init__(self, max_seq_length: int = 128, model_args: Dict = {}, cache_dir: Optional[str] = None ):
super(BioBERT, self).__init__()
self.config_keys = ['max_seq_length']
self.max_seq_length = max_seq_length
config = BertConfig.from_json_file('/mnt/nas2/jaimeen/COVID/BioBERT/config.json')
self.auto_model = BertModel(config=config)
self.vocab = self.load_bert_vocab('/mnt/nas2/jaimeen/COVID/BioBERT/vocab.txt')
self.tokenizer = BertTokenizer(vocab_file='/mnt/nas2/jaimeen/COVID/BioBERT/vocab.txt', max_length=max_seq_length)
def load_bert_vocab(self, vocab_file):
"""Loads a vocabulary file into a dictionary."""
vocab = OrderedDict()
index = 0
with open(vocab_file, "r", encoding="utf-8") as reader:
while True:
token = reader.readline()
if not token:
break
token = token.strip()
vocab[token] = index
index += 1
return vocab
def load_pretrained(self, config):
state_dict = torch.load('/mnt/nas2/jaimeen/COVID/BioBERT/pytorch_model.bin')
new_state_dict = OrderedDict()
for k, v in state_dict.items():
if k.startswith('bert.'):
k = k.replace('bert.', '')
new_state_dict[k] = v
elif k.startswith('cls.'):
continue
else:
new_state_dict[k] = v
self.model = BertModel(config)
self.model.load_state_dict(new_state_dict)
def forward(self, features):
"""Returns token_embeddings, cls_token"""
output_states = self.auto_model(**features)
output_tokens = output_states[0]
cls_tokens = output_tokens[:, 0, :] # CLS token is first token
features.update({'token_embeddings': output_tokens, 'cls_token_embeddings': cls_tokens, 'attention_mask': features['attention_mask']})
if self.auto_model.config.output_hidden_states:
all_layer_idx = 2
if len(output_states) < 3: #Some models only output last_hidden_states and all_hidden_states
all_layer_idx = 1
hidden_states = output_states[all_layer_idx]
features.update({'all_layer_embeddings': hidden_states})
return features
def get_word_embedding_dimension(self) -> int:
return self.auto_model.config.hidden_size
def tokenize(self, text: str) -> List[int]:
"""
Tokenizes a text and maps tokens to token-ids
"""
return self.tokenizer.convert_tokens_to_ids(self.tokenizer.tokenize(text))
def get_sentence_features(self, tokens: List[int], pad_seq_length: int):
"""
Convert tokenized sentence in its embedding ids, segment ids and mask
:param tokens:
a tokenized sentence
:param pad_seq_length:
the maximal length of the sequence. Cannot be greater than self.sentence_transformer_config.max_seq_length
:return: embedding ids, segment ids and mask for the sentence
"""
pad_seq_length = min(pad_seq_length, self.max_seq_length) + 3 #Add space for special tokens
return self.tokenizer.prepare_for_model(tokens, max_length=pad_seq_length, pad_to_max_length=True, return_tensors='pt')
def get_config_dict(self):
return {key: self.__dict__[key] for key in self.config_keys}
def save(self, output_path: str):
self.auto_model.save_pretrained(output_path)
self.tokenizer.save_pretrained(output_path)
with open(os.path.join(output_path, '/mnt/nas2/jaimeen/COVID/BioBERT/config.json'), 'w') as fOut:
json.dump(self.get_config_dict(), fOut, indent=2)
|
py | 1a33f60e7ba514d369beea83a2e7667812334348 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2017-07-18 18:01
from __future__ import unicode_literals
import common.blocks.collapsebox
import common.blocks.columns
import common.blocks.table
import common.blocks.tabs
from django.db import migrations, models
import django.db.models.deletion
import modelcluster.contrib.taggit
import modelcluster.fields
import wagtail.wagtailcore.blocks
import wagtail.wagtailcore.fields
import wagtail.wagtailembeds.blocks
import wagtail.wagtailimages.blocks
class Migration(migrations.Migration):
dependencies = [
('taggit', '0002_auto_20150616_2121'),
('wagtailcore', '0030_index_on_pagerevision_created_at'),
('common', '0052_auto_20170331_1949'),
]
operations = [
migrations.CreateModel(
name='PageTag',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('content_object', modelcluster.fields.ParentalKey(on_delete=django.db.models.deletion.CASCADE, related_name='tagged_keywords', to='wagtailcore.Page')),
('tag', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='common_pagetag_items', to='taggit.Tag')),
],
options={
'abstract': False,
},
),
migrations.AlterField(
model_name='custompage',
name='content',
field=wagtail.wagtailcore.fields.StreamField((('appeal', wagtail.wagtailcore.blocks.StructBlock((('icon', wagtail.wagtailcore.blocks.ChoiceBlock(choices=[('none', 'none'), ('flask', 'flask'), ('group', 'group'), ('laptop', 'laptop'), ('sitemap', 'sitemap'), ('user', 'user'), ('book', 'book'), ('download', 'download')])), ('topic', wagtail.wagtailcore.blocks.CharBlock(max_length=35, required=True)), ('content', wagtail.wagtailcore.blocks.RichTextBlock(required=True))), classname='appeal', icon='tick', template='common/blocks/appeal.html')), ('heading', wagtail.wagtailcore.blocks.CharBlock(classname='full title')), ('statement', wagtail.wagtailcore.blocks.CharBlock()), ('paragraph', wagtail.wagtailcore.blocks.RichTextBlock()), ('imagechooser', wagtail.wagtailimages.blocks.ImageChooserBlock()), ('column', common.blocks.columns.RowBlock()), ('tabs', wagtail.wagtailcore.blocks.StructBlock((('tab_list', common.blocks.tabs.TabListBlock()), ('tabs_style', wagtail.wagtailcore.blocks.ChoiceBlock(choices=[('vertical', 'Vertical'), ('horizontal', 'Horizontal')], default='horizontal'))))), ('image', wagtail.wagtailcore.blocks.StructBlock((('main_image', wagtail.wagtailimages.blocks.ImageChooserBlock()), ('style', wagtail.wagtailcore.blocks.ChoiceBlock(choices=[('max-width:225px;max-height:145px', 'Small'), ('max-width:225px;max-height:145px;padding-top:20px', 'Small Pushed Down 20px'), ('max_width:250px;max-height:250px', 'Medium'), ('max_width:250px;max-height:250px;padding-top:20px', 'Medium Pushed Down 20px'), ('height:auto', 'Shrink to Fit')], default='height:auto')), ('url', wagtail.wagtailcore.blocks.CharBlock(max_length=250, required=False))))), ('customImage', wagtail.wagtailcore.blocks.StructBlock((('css_style', wagtail.wagtailcore.blocks.CharBlock(required=False)), ('main_image', wagtail.wagtailimages.blocks.ImageChooserBlock()), ('url', wagtail.wagtailcore.blocks.CharBlock(max_length=250, required=False))))), ('rich_text', wagtail.wagtailcore.blocks.RichTextBlock()), ('raw_html', wagtail.wagtailcore.blocks.RawHTMLBlock(help_text='With great power comes great responsibility. This HTML is unescaped. Be careful!')), ('people_block', wagtail.wagtailcore.blocks.StructBlock((('css_style', wagtail.wagtailcore.blocks.CharBlock(required=False)), ('displayStyle', wagtail.wagtailcore.blocks.ChoiceBlock(choices=[('concise-team', 'concise-team'), ('concise-alum', 'concise-alum'), ('concise-ambassador', 'concise-ambassador'), ('detailed', 'detailed')], default='concise-team')), ('tag', wagtail.wagtailcore.blocks.CharBlock(max_length=20))))), ('centered_text', wagtail.wagtailcore.blocks.StructBlock((('css_style', wagtail.wagtailcore.blocks.CharBlock(required=False)), ('text', wagtail.wagtailcore.blocks.RichTextBlock())))), ('hero_block', wagtail.wagtailcore.blocks.StructBlock((('css_style', wagtail.wagtailcore.blocks.CharBlock(required=False)), ('image', wagtail.wagtailimages.blocks.ImageChooserBlock(required=True)), ('description', wagtail.wagtailcore.blocks.RawHTMLBlock(required=True)), ('image_display_setting', wagtail.wagtailcore.blocks.ChoiceBlock(choices=[('background', 'Cover the whole Hero as a background'), ('icon', 'Center the image in the middle of the hero block')])), ('text_color', wagtail.wagtailcore.blocks.CharBlock(help_text='Enter a color for the text.'))))), ('spotlight_block', wagtail.wagtailcore.blocks.StructBlock((('css_style', wagtail.wagtailcore.blocks.CharBlock(required=False)), ('bubbles', wagtail.wagtailcore.blocks.StreamBlock((('bubble_block', wagtail.wagtailcore.blocks.StructBlock((('css_style', wagtail.wagtailcore.blocks.CharBlock(required=False)), ('image', wagtail.wagtailimages.blocks.ImageChooserBlock()), ('title', wagtail.wagtailcore.blocks.CharBlock(max_length=35, required=True)), ('description', wagtail.wagtailcore.blocks.RichTextBlock(required=True))))),)))))), ('job_whole_block', wagtail.wagtailcore.blocks.StructBlock((('css_style', wagtail.wagtailcore.blocks.CharBlock(required=False)),))), ('embed_block', wagtail.wagtailembeds.blocks.EmbedBlock()), ('whitespaceblock', wagtail.wagtailcore.blocks.StructBlock((('css_style', wagtail.wagtailcore.blocks.CharBlock(required=False)), ('height', wagtail.wagtailcore.blocks.IntegerBlock())))), ('clear_fixblock', wagtail.wagtailcore.blocks.StructBlock((('css_style', wagtail.wagtailcore.blocks.CharBlock(required=False)),))), ('code_block', wagtail.wagtailcore.blocks.StructBlock((('css_style', wagtail.wagtailcore.blocks.CharBlock(required=False)), ('language', wagtail.wagtailcore.blocks.ChoiceBlock(choices=[('python', 'python'), ('css', 'css'), ('sql', 'sql'), ('javascript', 'javascript'), ('clike', 'clike'), ('markup', 'markup'), ('java', 'java')], default='python')), ('codes', wagtail.wagtailcore.blocks.TextBlock())))), ('table_block', common.blocks.table.CustomTableBlock()), ('calender_block', wagtail.wagtailcore.blocks.StructBlock((('css_style', wagtail.wagtailcore.blocks.CharBlock(required=False)), ('source', wagtail.wagtailcore.blocks.CharBlock(help_text='Such as: [email protected]. You can also change the style of calendar block.', max_length=255, required=True))))), ('journal_block', wagtail.wagtailcore.blocks.StructBlock((('css_style', wagtail.wagtailcore.blocks.CharBlock(required=False)), ('displayStyle', wagtail.wagtailcore.blocks.ChoiceBlock(choices=[('participating journals', 'participating journals'), ('eligible journals', 'eligible journals'), ('journals signatory', 'journals signatory')], default='participating journals'))))), ('render_file', wagtail.wagtailcore.blocks.StructBlock((('css_style', wagtail.wagtailcore.blocks.CharBlock(required=False)), ('file_link', wagtail.wagtailcore.blocks.CharBlock(help_text='Full link to the file on the OSF', max_length=255, required=True))))), ('sponsor_partner_block', wagtail.wagtailcore.blocks.StructBlock((('displayChoice', wagtail.wagtailcore.blocks.ChoiceBlock(choices=[('sponsors', 'sponsors'), ('partners', 'partners')], default='sponsors')),))), ('collapse_block', wagtail.wagtailcore.blocks.StructBlock((('title', wagtail.wagtailcore.blocks.CharBlock()), ('list', common.blocks.collapsebox.CollapseBoxListBlock())))), ('button', wagtail.wagtailcore.blocks.StructBlock((('css_style', wagtail.wagtailcore.blocks.CharBlock(required=False)), ('description', wagtail.wagtailcore.blocks.CharBlock(max_length=255, required=True)), ('link', wagtail.wagtailcore.blocks.CharBlock(max_length=255, required=True)), ('on_click', wagtail.wagtailcore.blocks.CharBlock(max_length=255, required=False)))))), blank=True, null=True),
),
migrations.AddField(
model_name='custompage',
name='tags',
field=modelcluster.contrib.taggit.ClusterTaggableManager(blank=True, help_text='A comma-separated list of tags.', through='common.PageTag', to='taggit.Tag', verbose_name='Tags'),
),
migrations.AddField(
model_name='newsarticle',
name='tags',
field=modelcluster.contrib.taggit.ClusterTaggableManager(blank=True, help_text='A comma-separated list of tags.', through='common.PageTag', to='taggit.Tag', verbose_name='Tags'),
),
migrations.AddField(
model_name='newsindexpage',
name='tags',
field=modelcluster.contrib.taggit.ClusterTaggableManager(blank=True, help_text='A comma-separated list of tags.', through='common.PageTag', to='taggit.Tag', verbose_name='Tags'),
),
migrations.AddField(
model_name='pagealias',
name='tags',
field=modelcluster.contrib.taggit.ClusterTaggableManager(blank=True, help_text='A comma-separated list of tags.', through='common.PageTag', to='taggit.Tag', verbose_name='Tags'),
),
]
|
py | 1a33f62f1d43cedfe8ac61903d3b0f9435bbb992 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import argparse
import random
import sys
from inky import InkyWHAT
from PIL import Image, ImageFont, ImageDraw
from font_source_serif_pro import SourceSerifProSemibold
from font_source_sans_pro import SourceSansProSemibold
print("""Inky wHAT: Quotes
Display quotes on Inky wHAT.
""")
try:
import wikiquotes
except ImportError:
print("""This script requires the wikiquotes module.
Install with:
sudo apt install python-lxml
sudo pip install wikiquotes
""")
sys.exit(1)
# Command line arguments to set display type and colour, and enter your name
parser = argparse.ArgumentParser()
parser.add_argument('--colour', '-c', type=str, required=True, choices=["red", "black", "yellow"], help="ePaper display colour")
args = parser.parse_args()
colour = args.colour
# This function will take a quote as a string, a width to fit
# it into, and a font (one that's been loaded) and then reflow
# that quote with newlines to fit into the space required.
def reflow_quote(quote, width, font):
words = quote.split(" ")
reflowed = '"'
line_length = 0
for i in range(len(words)):
word = words[i] + " "
word_length = font.getsize(word)[0]
line_length += word_length
if line_length < width:
reflowed += word
else:
line_length = word_length
reflowed = reflowed[:-1] + "\n " + word
reflowed = reflowed.rstrip() + '"'
return reflowed
# Set up the correct display and scaling factors
inky_display = InkyWHAT(colour)
inky_display.set_border(inky_display.WHITE)
# inky_display.set_rotation(180)
w = inky_display.WIDTH
h = inky_display.HEIGHT
# Create a new canvas to draw on
img = Image.new("P", (inky_display.WIDTH, inky_display.HEIGHT))
draw = ImageDraw.Draw(img)
# Load the fonts
font_size = 24
author_font = ImageFont.truetype(SourceSerifProSemibold, font_size)
quote_font = ImageFont.truetype(SourceSansProSemibold, font_size)
# A list of famous scientists to search for quotes from
# on https://en.wikiquote.org. Change them to your
# favourite people, if you like!
people = [
"Ada Lovelace",
"Carl Sagan",
"Charles Darwin",
"Dorothy Hodgkin",
"Edith Clarke",
"Grace Hopper",
"Hedy Lamarr",
"Isaac Newton",
"James Clerk Maxwell",
"Margaret Hamilton",
"Marie Curie",
"Michael Faraday",
"Niels Bohr",
"Nikola Tesla",
"Rosalind Franklin",
"Stephen Hawking"
]
# The amount of padding around the quote. Note that
# a value of 30 means 15 pixels padding left and 15
# pixels padding right.
#
# Also define the max width and height for the quote.
padding = 50
max_width = w - padding
max_height = h - padding - author_font.getsize("ABCD ")[1]
below_max_length = False
# Only pick a quote that will fit in our defined area
# once rendered in the font and size defined.
while not below_max_length:
person = random.choice(people) # Pick a random person from our list
quote = wikiquotes.random_quote(person, "english")
reflowed = reflow_quote(quote, max_width, quote_font)
p_w, p_h = quote_font.getsize(reflowed) # Width and height of quote
p_h = p_h * (reflowed.count("\n") + 1) # Multiply through by number of lines
if p_h < max_height:
below_max_length = True # The quote fits! Break out of the loop.
else:
continue
# x- and y-coordinates for the top left of the quote
quote_x = (w - max_width) / 2
quote_y = ((h - max_height) + (max_height - p_h - author_font.getsize("ABCD ")[1])) / 2
# x- and y-coordinates for the top left of the author
author_x = quote_x
author_y = quote_y + p_h
author = "- " + person
# Draw red rectangles top and bottom to frame quote
draw.rectangle((padding / 4, padding / 4, w - (padding / 4), quote_y - (padding / 4)), fill=inky_display.RED)
draw.rectangle((padding / 4, author_y + author_font.getsize("ABCD ")[1] + (padding / 4) + 5, w - (padding / 4), h - (padding / 4)), fill=inky_display.RED)
# Add some white hatching to the red rectangles to make
# it look a bit more interesting
hatch_spacing = 12
for x in range(0, 2 * w, hatch_spacing):
draw.line((x, 0, x - w, h), fill=inky_display.WHITE, width=3)
# Write our quote and author to the canvas
draw.multiline_text((quote_x, quote_y), reflowed, fill=inky_display.BLACK, font=quote_font, align="left")
draw.multiline_text((author_x, author_y), author, fill=inky_display.RED, font=author_font, align="left")
print(reflowed + "\n" + author + "\n")
# Display the completed canvas on Inky wHAT
inky_display.set_image(img)
inky_display.show()
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.