filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_0_14825 | #!/usr/bin/env python3
# Copyright (c) 2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test mempool acceptance of raw transactions."""
from io import BytesIO
from test_framework.test_framework import SthcoinTestFramework
from test_framework.messages import (
BIP125_SEQUENCE_NUMBER,
COIN,
COutPoint,
CTransaction,
CTxOut,
MAX_BLOCK_BASE_SIZE,
)
from test_framework.script import (
hash160,
CScript,
OP_0,
OP_EQUAL,
OP_HASH160,
OP_RETURN,
)
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
bytes_to_hex_str,
hex_str_to_bytes,
wait_until,
)
class MempoolAcceptanceTest(SthcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.extra_args = [[
'-txindex',
'-reindex', # Need reindex for txindex
'-acceptnonstdtxn=0', # Try to mimic main-net
]] * self.num_nodes
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def check_mempool_result(self, result_expected, *args, **kwargs):
"""Wrapper to check result of testmempoolaccept on node_0's mempool"""
result_test = self.nodes[0].testmempoolaccept(*args, **kwargs)
assert_equal(result_expected, result_test)
assert_equal(self.nodes[0].getmempoolinfo()['size'], self.mempool_size) # Must not change mempool state
def run_test(self):
node = self.nodes[0]
self.log.info('Start with empty mempool, and 200 blocks')
self.mempool_size = 0
wait_until(lambda: node.getblockcount() == 200)
assert_equal(node.getmempoolinfo()['size'], self.mempool_size)
self.log.info('Should not accept garbage to testmempoolaccept')
assert_raises_rpc_error(-3, 'Expected type array, got string', lambda: node.testmempoolaccept(rawtxs='ff00baar'))
assert_raises_rpc_error(-8, 'Array must contain exactly one raw transaction for now', lambda: node.testmempoolaccept(rawtxs=['ff00baar', 'ff22']))
assert_raises_rpc_error(-22, 'TX decode failed', lambda: node.testmempoolaccept(rawtxs=['ff00baar']))
self.log.info('A transaction already in the blockchain')
coin = node.listunspent()[0] # Pick a random coin(base) to spend
raw_tx_in_block = node.signrawtransactionwithwallet(node.createrawtransaction(
inputs=[{'txid': coin['txid'], 'vout': coin['vout']}],
outputs=[{node.getnewaddress(): 0.3}, {node.getnewaddress(): 49}],
))['hex']
txid_in_block = node.sendrawtransaction(hexstring=raw_tx_in_block, allowhighfees=True)
node.generate(1)
self.check_mempool_result(
result_expected=[{'txid': txid_in_block, 'allowed': False, 'reject-reason': '18: txn-already-known'}],
rawtxs=[raw_tx_in_block],
)
self.log.info('A transaction not in the mempool')
fee = 0.00000700
raw_tx_0 = node.signrawtransactionwithwallet(node.createrawtransaction(
inputs=[{"txid": txid_in_block, "vout": 0, "sequence": BIP125_SEQUENCE_NUMBER}], # RBF is used later
outputs=[{node.getnewaddress(): 0.3 - fee}],
))['hex']
tx = CTransaction()
tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_0)))
txid_0 = tx.rehash()
self.check_mempool_result(
result_expected=[{'txid': txid_0, 'allowed': True}],
rawtxs=[raw_tx_0],
)
self.log.info('A transaction in the mempool')
node.sendrawtransaction(hexstring=raw_tx_0)
self.mempool_size = 1
self.check_mempool_result(
result_expected=[{'txid': txid_0, 'allowed': False, 'reject-reason': '18: txn-already-in-mempool'}],
rawtxs=[raw_tx_0],
)
self.log.info('A transaction that replaces a mempool transaction')
tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_0)))
tx.vout[0].nValue -= int(fee * COIN) # Double the fee
tx.vin[0].nSequence = BIP125_SEQUENCE_NUMBER + 1 # Now, opt out of RBF
raw_tx_0 = node.signrawtransactionwithwallet(bytes_to_hex_str(tx.serialize()))['hex']
tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_0)))
txid_0 = tx.rehash()
self.check_mempool_result(
result_expected=[{'txid': txid_0, 'allowed': True}],
rawtxs=[raw_tx_0],
)
self.log.info('A transaction that conflicts with an unconfirmed tx')
# Send the transaction that replaces the mempool transaction and opts out of replaceability
node.sendrawtransaction(hexstring=bytes_to_hex_str(tx.serialize()), allowhighfees=True)
# take original raw_tx_0
tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_0)))
tx.vout[0].nValue -= int(4 * fee * COIN) # Set more fee
# skip re-signing the tx
self.check_mempool_result(
result_expected=[{'txid': tx.rehash(), 'allowed': False, 'reject-reason': '18: txn-mempool-conflict'}],
rawtxs=[bytes_to_hex_str(tx.serialize())],
allowhighfees=True,
)
self.log.info('A transaction with missing inputs, that never existed')
tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_0)))
tx.vin[0].prevout = COutPoint(hash=int('ff' * 32, 16), n=14)
# skip re-signing the tx
self.check_mempool_result(
result_expected=[{'txid': tx.rehash(), 'allowed': False, 'reject-reason': 'missing-inputs'}],
rawtxs=[bytes_to_hex_str(tx.serialize())],
)
self.log.info('A transaction with missing inputs, that existed once in the past')
tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_0)))
tx.vin[0].prevout.n = 1 # Set vout to 1, to spend the other outpoint (49 coins) of the in-chain-tx we want to double spend
raw_tx_1 = node.signrawtransactionwithwallet(bytes_to_hex_str(tx.serialize()))['hex']
txid_1 = node.sendrawtransaction(hexstring=raw_tx_1, allowhighfees=True)
# Now spend both to "clearly hide" the outputs, ie. remove the coins from the utxo set by spending them
raw_tx_spend_both = node.signrawtransactionwithwallet(node.createrawtransaction(
inputs=[
{'txid': txid_0, 'vout': 0},
{'txid': txid_1, 'vout': 0},
],
outputs=[{node.getnewaddress(): 0.1}]
))['hex']
txid_spend_both = node.sendrawtransaction(hexstring=raw_tx_spend_both, allowhighfees=True)
node.generate(1)
self.mempool_size = 0
# Now see if we can add the coins back to the utxo set by sending the exact txs again
self.check_mempool_result(
result_expected=[{'txid': txid_0, 'allowed': False, 'reject-reason': 'missing-inputs'}],
rawtxs=[raw_tx_0],
)
self.check_mempool_result(
result_expected=[{'txid': txid_1, 'allowed': False, 'reject-reason': 'missing-inputs'}],
rawtxs=[raw_tx_1],
)
self.log.info('Create a signed "reference" tx for later use')
raw_tx_reference = node.signrawtransactionwithwallet(node.createrawtransaction(
inputs=[{'txid': txid_spend_both, 'vout': 0}],
outputs=[{node.getnewaddress(): 0.05}],
))['hex']
tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_reference)))
# Reference tx should be valid on itself
self.check_mempool_result(
result_expected=[{'txid': tx.rehash(), 'allowed': True}],
rawtxs=[bytes_to_hex_str(tx.serialize())],
)
self.log.info('A transaction with no outputs')
tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_reference)))
tx.vout = []
# Skip re-signing the transaction for context independent checks from now on
# tx.deserialize(BytesIO(hex_str_to_bytes(node.signrawtransactionwithwallet(bytes_to_hex_str(tx.serialize()))['hex'])))
self.check_mempool_result(
result_expected=[{'txid': tx.rehash(), 'allowed': False, 'reject-reason': '16: bad-txns-vout-empty'}],
rawtxs=[bytes_to_hex_str(tx.serialize())],
)
self.log.info('A really large transaction')
tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_reference)))
tx.vin = [tx.vin[0]] * (MAX_BLOCK_BASE_SIZE // len(tx.vin[0].serialize()))
self.check_mempool_result(
result_expected=[{'txid': tx.rehash(), 'allowed': False, 'reject-reason': '16: bad-txns-oversize'}],
rawtxs=[bytes_to_hex_str(tx.serialize())],
)
self.log.info('A transaction with negative output value')
tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_reference)))
tx.vout[0].nValue *= -1
self.check_mempool_result(
result_expected=[{'txid': tx.rehash(), 'allowed': False, 'reject-reason': '16: bad-txns-vout-negative'}],
rawtxs=[bytes_to_hex_str(tx.serialize())],
)
self.log.info('A transaction with too large output value')
tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_reference)))
tx.vout[0].nValue = 21000000 * COIN + 1
self.check_mempool_result(
result_expected=[{'txid': tx.rehash(), 'allowed': False, 'reject-reason': '16: bad-txns-vout-toolarge'}],
rawtxs=[bytes_to_hex_str(tx.serialize())],
)
self.log.info('A transaction with too large sum of output values')
tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_reference)))
tx.vout = [tx.vout[0]] * 2
tx.vout[0].nValue = 21000000 * COIN
self.check_mempool_result(
result_expected=[{'txid': tx.rehash(), 'allowed': False, 'reject-reason': '16: bad-txns-txouttotal-toolarge'}],
rawtxs=[bytes_to_hex_str(tx.serialize())],
)
self.log.info('A transaction with duplicate inputs')
tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_reference)))
tx.vin = [tx.vin[0]] * 2
self.check_mempool_result(
result_expected=[{'txid': tx.rehash(), 'allowed': False, 'reject-reason': '16: bad-txns-inputs-duplicate'}],
rawtxs=[bytes_to_hex_str(tx.serialize())],
)
self.log.info('A coinbase transaction')
# Pick the input of the first tx we signed, so it has to be a coinbase tx
raw_tx_coinbase_spent = node.getrawtransaction(txid=node.decoderawtransaction(hexstring=raw_tx_in_block)['vin'][0]['txid'])
tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_coinbase_spent)))
self.check_mempool_result(
result_expected=[{'txid': tx.rehash(), 'allowed': False, 'reject-reason': '16: coinbase'}],
rawtxs=[bytes_to_hex_str(tx.serialize())],
)
self.log.info('Some nonstandard transactions')
tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_reference)))
tx.nVersion = 3 # A version currently non-standard
self.check_mempool_result(
result_expected=[{'txid': tx.rehash(), 'allowed': False, 'reject-reason': '64: version'}],
rawtxs=[bytes_to_hex_str(tx.serialize())],
)
tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_reference)))
tx.vout[0].scriptPubKey = CScript([OP_0]) # Some non-standard script
self.check_mempool_result(
result_expected=[{'txid': tx.rehash(), 'allowed': False, 'reject-reason': '64: scriptpubkey'}],
rawtxs=[bytes_to_hex_str(tx.serialize())],
)
tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_reference)))
tx.vin[0].scriptSig = CScript([OP_HASH160]) # Some not-pushonly scriptSig
self.check_mempool_result(
result_expected=[{'txid': tx.rehash(), 'allowed': False, 'reject-reason': '64: scriptsig-not-pushonly'}],
rawtxs=[bytes_to_hex_str(tx.serialize())],
)
tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_reference)))
output_p2sh_burn = CTxOut(nValue=540, scriptPubKey=CScript([OP_HASH160, hash160(b'burn'), OP_EQUAL]))
num_scripts = 100000 // len(output_p2sh_burn.serialize()) # Use enough outputs to make the tx too large for our policy
tx.vout = [output_p2sh_burn] * num_scripts
self.check_mempool_result(
result_expected=[{'txid': tx.rehash(), 'allowed': False, 'reject-reason': '64: tx-size'}],
rawtxs=[bytes_to_hex_str(tx.serialize())],
)
tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_reference)))
tx.vout[0] = output_p2sh_burn
tx.vout[0].nValue -= 1 # Make output smaller, such that it is dust for our policy
self.check_mempool_result(
result_expected=[{'txid': tx.rehash(), 'allowed': False, 'reject-reason': '64: dust'}],
rawtxs=[bytes_to_hex_str(tx.serialize())],
)
tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_reference)))
tx.vout[0].scriptPubKey = CScript([OP_RETURN, b'\xff'])
tx.vout = [tx.vout[0]] * 2
self.check_mempool_result(
result_expected=[{'txid': tx.rehash(), 'allowed': False, 'reject-reason': '64: multi-op-return'}],
rawtxs=[bytes_to_hex_str(tx.serialize())],
)
self.log.info('A timelocked transaction')
tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_reference)))
tx.vin[0].nSequence -= 1 # Should be non-max, so locktime is not ignored
tx.nLockTime = node.getblockcount() + 1
self.check_mempool_result(
result_expected=[{'txid': tx.rehash(), 'allowed': False, 'reject-reason': '64: non-final'}],
rawtxs=[bytes_to_hex_str(tx.serialize())],
)
self.log.info('A transaction that is locked by BIP68 sequence logic')
tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_reference)))
tx.vin[0].nSequence = 2 # We could include it in the second block mined from now, but not the very next one
# Can skip re-signing the tx because of early rejection
self.check_mempool_result(
result_expected=[{'txid': tx.rehash(), 'allowed': False, 'reject-reason': '64: non-BIP68-final'}],
rawtxs=[bytes_to_hex_str(tx.serialize())],
allowhighfees=True,
)
if __name__ == '__main__':
MempoolAcceptanceTest().main()
|
the-stack_0_14826 | from collections import Counter
from collections import OrderedDict
players = ['Mike', 'Chris', 'Arnold']
standings = OrderedDict([(player, Counter()) for player in players])
print('standings:', standings)
standings['Mike']['game_played'] += 1
standings['Mike']['score'] = 2
standings['Mike']['game_played'] += 1
standings['Mike']['score'] = 3
standings['Arnold']['game_played'] += 1
standings['Arnold']['score'] = 5
standings['Chris']['game_played'] += 1
standings['Chris']['score'] = 5
rank = 1
print("standings.items:", standings.items())
standings_with_index = enumerate(standings.items())
print("standings.items.enum:", standings_with_index)
ranks = [(-counter['score'], counter['games_played'], i, name)
for i, (name, counter) in enumerate(standings.items())]
print("ranks", ranks)
print("Winner: {}".format(sorted(ranks)[rank - 1][3]))
|
the-stack_0_14829 | import io
import re
import six
from boto3.session import Session
from botocore.config import Config
AWS_ACCESS_KEY = 'AKIAJXFC3JRVYNIHX2UA'
AWS_ACCESS_SECRET_KEY = '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'
S3_BUCKET_NAME_PATTERN = re.compile(r'^[a-z0-9][a-z0-9\-]{1,61}[a-z0-9]$')
S3_KEY_PATTERN = re.compile(r'^[a-zA-Z0-9][a-zA-Z0-9\-./_]{3,253}[a-zA-Z0-9]$')
class S3Error(AssertionError):
pass
def get_client():
session = Session(aws_access_key_id=AWS_ACCESS_KEY, aws_secret_access_key=AWS_ACCESS_SECRET_KEY)
return session.client('s3', config=Config(signature_version='s3v4'))
def validate_bucket_name(bucket):
if not S3_BUCKET_NAME_PATTERN.match(bucket) or '--' in bucket:
raise S3Error('invalid bucket name {}'.format(bucket))
def validate_key_name(key):
if not S3_KEY_PATTERN.match(key):
raise S3Error('invalid s3 key name {}'.format(key))
def validate_content(content):
if content is None:
raise S3Error('no content to upload')
if not isinstance(content, bytes) and not hasattr(content, 'read'):
raise S3Error('content is neither a string nor a file like object, content={}'.format(content))
def download(bucket, key):
"""
always returns a byte string
"""
validate_bucket_name(bucket)
validate_key_name(key)
client = get_client()
# do a buffered download
bytes_io = io.BytesIO()
client.download_fileobj(bucket, key, bytes_io)
# hope that stuff is not too big, and just return content
return bytes_io.getvalue()
def download_file(bucket, key, filename):
"""
always returns a byte string
"""
validate_bucket_name(bucket)
validate_key_name(key)
client = get_client()
client.download_file(bucket, key, filename)
def upload(bucket, key, content, extra_agrs):
"""replace if key exists"""
# validate_content(content)
validate_bucket_name(bucket)
validate_key_name(key)
client = get_client()
if extra_agrs:
client.put_object(Body=content, Bucket=bucket, Key=key, ContentType=extra_agrs['ContentType'])
else:
client.put_object(Body=content, Bucket=bucket, Key=key)
def delete(bucket, key):
validate_bucket_name(bucket)
validate_key_name(key)
client = get_client()
client.delete_object(Bucket=bucket, Key=key)
def modify_metadata():
from api.models import S3Upload
client = get_client()
for s3_upload in S3Upload.objects.filter(folder='uploads/pod').filter(id__gte=34783).order_by('-id'):
try:
s3_obj = client.get_object(Bucket=s3_upload.bucket, Key=s3_upload.key())
client.put_object(Body=s3_obj['Body'].read(), Bucket=s3_upload.bucket, Key=s3_upload.key(),
ContentType='image/jpeg')
print(s3_upload.id)
except:
print(s3_upload.filename)
|
the-stack_0_14830 | """Declarative scaffolding for frameworks"""
import collections
import uuid
import warnings
__all__ = ["ModelMetaclass", "Field", "TypeDefinition",
"TypeEngine", "DeclareException"]
__version__ = "0.9.12"
missing = object()
# These engines can't be cleared
_fixed_engines = collections.ChainMap()
class DeclareException(Exception):
"""
Custom exception for cases where raising a built-in exception
would be ambiguous (whether it was thrown by declare or a bound function)
"""
pass
class TypeEngineMeta(type):
"""
Factory for :class:`~TypeEngine` so that each engine is init'd only once.
This is necessary since if :meth:`~TypeEngine.__new__` returns an instance
of the class, the :meth:`~TypeEngine.__init__` method will be called.
"""
engines = _fixed_engines.new_child()
def __call__(cls, namespace, *args, **kwargs):
engine = TypeEngineMeta.engines.get(namespace)
if engine is None:
engine = cls.__new__(cls)
TypeEngineMeta.engines[namespace] = engine
cls.__init__(engine, namespace, *args, **kwargs)
return engine
@classmethod
def clear_engines(metaclass):
"""Clear all non-fixed engines"""
metaclass.engines.clear()
class TypeEngine(object, metaclass=TypeEngineMeta):
"""
Collection of bound :class:`~TypeDefinition` for a given namespace.
TypeEngines are unique by namespace::
assert TypeEngine("foo") is TypeEngine("foo")
This makes it easier for groups of components to use a single engine to
translate values by type. By default :meth:`~TypeEngine.load` and
:meth:`~TypeEngine.dump` require a reference to the typedef used to convert
values. A custom Engine could use the :class:`~TypeDefinition` attributes
``python_type`` and ``backing_type`` to find the correct typedef from the
set of available typedefs and automatically convert to the necessary
format.
"""
def __init__(self, namespace="global", *args, **kwargs):
self.namespace = namespace
self.unbound_types = set()
self.bound_types = {}
@classmethod
def unique(cls):
"""Return a unique type engine (using uuid4)"""
namespace = str(uuid.uuid4())
return TypeEngine(namespace)
def register(self, typedef):
"""
Add the typedef to this engine if it is compatible.
After registering a :class:`~TypeDefinition`, it will not be bound
until :meth:`~TypeEngine.bind` is next called.
Nothing will happen when register is called with a typedef that is
pending binding or already bound. Otherwise, the engine will ensure it
is compatible with the type using :meth:`~TypeEngine.is_compatible`
before adding it to the set of unbound types.
Parameters
----------
typedef : :class:`~TypeDefinition`
The typedef to register with this engine
Raises
------
exc : :class:`ValueError`
If :meth:`~TypeEngine.is_compatible` is falsey
"""
if typedef in self.bound_types:
return
if not self.is_compatible(typedef):
raise ValueError("Incompatible type {} for engine {}".format(
typedef, self))
if typedef not in self.unbound_types:
self.unbound_types.add(typedef)
typedef._register(self)
def bind(self, **config):
"""
Bind all unbound types to the engine.
Bind each unbound typedef to the engine, passing in the engine and
:attr:`config`. The resulting ``load`` and ``dump`` functions can
be found under ``self.bound_types[typedef]["load"]`` and
``self.bound_types[typedef]["dump"], respectively.
Parameters
----------
config : dict, optional
Engine-binding configuration to pass to each typedef that will be
bound. Examples include floating-point precision values, maximum
lengths for strings, or any other translation constraints/settings
that a typedef needs to construct a load/dump function pair.
"""
while self.unbound_types:
typedef = self.unbound_types.pop()
try:
load, dump = typedef.bind(self, **config)
self.bound_types[typedef] = {
"load": load, "dump": dump
}
except Exception:
self.unbound_types.add(typedef)
raise
def load(self, typedef, value, **kwargs):
"""
Return the result of the bound load method for a typedef
Looks up the load function that was bound to the engine for a typedef,
and return the result of passing the given `value` and any `context`
to that function.
Parameters
----------
typedef : :class:`~TypeDefinition`
The typedef whose bound load method should be used
value : object
The value to be passed into the bound load method
**kwargs : kwargs
Context for the value being loaded
Returns
-------
loaded_value : object
The return value of the load function for the input value
Raises
------
exc : :class:`KeyError`
If the input typedef is not bound to this engine
Example
-------
.. code-block:: python
class Account(TypeDefinition):
prefix = "::account"
def load(self, value, **context):
return value + Account.prefix
def dump(self, value, **context):
return value[:-len(Account.prefix)]
typedef = Account()
engine = TypeEngine("accounts")
engine.register(typedef)
engine.bind()
assert engine.dump(typedef, "Jill::account") == "Jill"
"""
try:
bound_type = self.bound_types[typedef]
except KeyError:
raise DeclareException(
"Can't load unknown type {}".format(typedef))
else:
# Don't need to try/catch since load/dump are bound together
return bound_type["load"](value, **kwargs)
def dump(self, typedef, value, **kwargs):
"""
Return the result of the bound dump method for a typedef
Looks up the dump function that was bound to the engine for a typedef,
and return the result of passing the given `value` and any `context`
to that function.
Parameters
----------
typedef : :class:`~TypeDefinition`
The typedef whose bound dump method should be used
value : object
The value to be passed into the bound dump method
**kwargs : kwargs
Context for the value being dumped
Returns
-------
dumped_value : object
The return value of the dump function for the input value
Raises
------
exc : :class:`KeyError`
If the input typedef is not bound to this engine
Example
-------
.. code-block:: python
class Account(TypeDefinition):
prefix = "::account"
def load(self, value, context):
return value + Account.prefix
def dump(self, value, context):
return value[:-len(Account.prefix)]
typedef = Account()
engine = TypeEngine("accounts")
engine.register(typedef)
engine.bind()
assert engine.load(typedef, "Jill") == "Jill::account"
"""
try:
bound_type = self.bound_types[typedef]
except KeyError:
raise DeclareException(
"Can't dump unknown type {}".format(typedef))
else:
# Don't need to try/catch since load/dump are bound together
return bound_type["dump"](value, **kwargs)
def is_compatible(self, typedef): # pragma: no cover
"""
Returns ``true`` if the typedef is compatible with this engine.
This function should return ``False`` otherwise. The default
implementation will always return ``True``.
"""
return True
def __contains__(self, typedef):
return typedef in self.bound_types
_fixed_engines["global"] = TypeEngine("global")
class TypeDefinition:
"""
Translates between python types and backend/storage/transport types
A single TypeDefinition can be used for multiple TypeEngines, by
implementing :meth:`~TypeDefinition.bind` and returning different
(load, dump) function tuples for each engine.
For TypeDefinitions that are loaded/dumped the same for every engine,
just implement :meth:`~TypeDefinition._load` and
:meth:`~TypeDefinition._dump`.
"""
python_type = None
backing_type = None
def bind(self, engine, **config):
"""
Return a pair of (load, dump) functions for a specific engine.
Some Types will load and dump values depending on certain config, or
for different :class:`~TypeEngine`.
By default, this function will return the functions
:meth:`~TypeDefinition.load` and :meth:`~TypeDefinition._dump`.
The default :meth:`~TypeDefintion._load` and
:meth:`~TypeDefintion._dump` functions simply return the input value.
Parameters
----------
engine : :class:`~TypeEngine`
The engine that will save these load, dump functions
config : dictionary
Optional configuration for creating the functions.
Returns
-------
(load, dump) : (func, func) tuple
Each function takes a value and context, and returns a single value
"""
return self._load, self._dump
def _register(self, engine):
"""Called when the type is registered with an engine."""
pass
def _load(self, value, **kwargs):
"""
Engine-agnostic load function. Implement this method for any
TypeDefinition whose load function does not depend on the TypeEngine
being used to load it.
NOTE: This will not be available at runtime -
TypeDefinitionMetaclass hides the reference at runtime to reduce the
chance of incorrectly using an engine-agnostic load method when the
TypeDefinition prefers an engine-specific load method.
By default, returns :attr:`value` unchanged.
"""
return value
def _dump(self, value, **kwargs):
"""
Engine-agnostic dump function. Implement this method for any
TypeDefinition whose dump function does not depend on the TypeEngine
being used to dump it.
NOTE: This will not be available at runtime -
TypeDefinitionMetaclass hides the reference at runtime to reduce the
chance of incorrectly using an engine-agnostic dump method when the
TypeDefinition prefers an engine-specific dump method.
By default, returns :attr:`value` unchanged.
"""
return value
def subclassof(obj, classinfo):
"""Wrap issubclass to only return True/False"""
try:
return issubclass(obj, classinfo)
except TypeError:
return False
def instanceof(obj, classinfo):
"""Wrap isinstance to only return True/False"""
try:
return isinstance(obj, classinfo)
except TypeError: # pragma: no cover
# No coverage since we never call this without a class,
# type, or tuple of classes, types, or such typles.
return False
class Field:
def __init__(self, *, typedef=None, **kwargs):
self._model_name = None
if typedef is None:
self.typedef = typedef
else:
if subclassof(typedef, TypeDefinition):
typedef = typedef()
if instanceof(typedef, TypeDefinition):
self.typedef = typedef
else:
raise TypeError(("Expected {} to be None, instance of "
"TypeDefinition, or subclass of"
"TypeDefinition".format(typedef)))
super().__init__(**kwargs)
@property
def model_name(self):
"""Name of the model's attr that references self"""
return self._model_name
@model_name.setter
def model_name(self, value):
if self._model_name is not None:
raise AttributeError("{} model_name already set to '{}'".format(
self.__class__.__name__, self._model_name))
self._model_name = value
def set(self, obj, value):
if self._model_name is None:
raise AttributeError("Can't set field without binding to model")
obj.__dict__[self._model_name] = value
def get(self, obj):
if self._model_name is None:
raise AttributeError("Can't get field without binding to model")
try:
return obj.__dict__[self._model_name]
except KeyError:
raise AttributeError("'{}' has no attribute '{}'".format(
obj.__class__, self._model_name))
def delete(self, obj):
if self._model_name is None:
raise AttributeError("Can't delete field without binding to model")
try:
del obj.__dict__[self._model_name]
except KeyError:
raise AttributeError("'{}' has no attribute '{}'".format(
obj.__class__, self._model_name))
# Descriptor Protocol
# To override, use set, get, delete above
# https://docs.python.org/3.4/howto/descriptor.html
def __set__(self, obj, value):
self.set(obj, value)
def __get__(self, obj, type=None):
if obj is None:
return self
return self.get(obj)
def __delete__(self, obj):
self.delete(obj)
def index(objects, attr):
"""
Generate a mapping of a list of objects indexed by the given attr.
Parameters
----------
objects : :class:`list`, iterable
attr : string
The attribute to index the list of objects by
Returns
-------
dictionary : dict
keys are the value of each object's attr, and values are from objects
Example
-------
class Person(object):
def __init__(self, name, email, age):
self.name = name
self.email = email
self.age = age
people = [
Person('one', '[email protected]', 1),
Person('two', '[email protected]', 2),
Person('three', '[email protected]', 3)
]
by_email = index(people, 'email')
by_name = index(people, 'name')
assert by_name['one'] is people[0]
assert by_email['[email protected]'] is people[1]
"""
with warnings.catch_warnings():
warnings.simplefilter("ignore")
return {getattr(obj, attr): obj for obj in objects}
class ModelMetaclass(type, TypeDefinition):
"""
Track the order that ``Field`` attributes are declared, and
insert a Meta object (class) in the class
"""
@classmethod
def __prepare__(mcs, name, bases):
"""Returns an OrderedDict so attribute order is preserved"""
return collections.OrderedDict()
def __new__(mcs, name, bases, attrs):
"""Add a container class `Meta` to the class"""
Meta = attrs.get('Meta', missing)
if Meta is missing:
class Meta:
pass
attrs['Meta'] = Meta
if not isinstance(Meta, type):
raise TypeError("Expected `Meta` to be a class object")
cls = super().__new__(mcs, name, bases, attrs)
# Load and index fields by name
# ----------------------------------------------------------
fields = []
for name, attr in attrs.items():
if isinstance(attr, Field):
fields.append(attr)
# This will raise AttributeError if the field's
# name is already set
with warnings.catch_warnings():
warnings.simplefilter("ignore")
attr.model_name = name
Meta.fields_by_model_name = index(fields, 'model_name')
Meta.fields = fields
return cls
|
the-stack_0_14831 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack LLC.
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""RequestContext: context for requests that persist through all of nova."""
import copy
import uuid
from nova.openstack.common import local
from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
from nova import policy
LOG = logging.getLogger(__name__)
def generate_request_id():
return 'req-' + str(uuid.uuid4())
class RequestContext(object):
"""Security context and request information.
Represents the user taking a given action within the system.
"""
def __init__(self, user_id, project_id, is_admin=None, read_deleted="no",
roles=None, remote_address=None, timestamp=None,
request_id=None, auth_token=None, overwrite=True,
quota_class=None, user_name=None, project_name=None,
service_catalog=None, instance_lock_checked=False, **kwargs):
"""
:param read_deleted: 'no' indicates deleted records are hidden, 'yes'
indicates deleted records are visible, 'only' indicates that
*only* deleted records are visible.
:param overwrite: Set to False to ensure that the greenthread local
copy of the index is not overwritten.
:param kwargs: Extra arguments that might be present, but we ignore
because they possibly came in from older rpc messages.
"""
if kwargs:
LOG.warn(_('Arguments dropped when creating context: %s') %
str(kwargs))
self.user_id = user_id
self.project_id = project_id
self.roles = roles or []
self.is_admin = is_admin
if self.is_admin is None:
self.is_admin = policy.check_is_admin(self.roles)
self.read_deleted = read_deleted
self.remote_address = remote_address
if not timestamp:
timestamp = timeutils.utcnow()
if isinstance(timestamp, basestring):
timestamp = timeutils.parse_strtime(timestamp)
self.timestamp = timestamp
if not request_id:
request_id = generate_request_id()
self.request_id = request_id
self.auth_token = auth_token
self.service_catalog = service_catalog
self.instance_lock_checked = instance_lock_checked
# NOTE(markmc): this attribute is currently only used by the
# rs_limits turnstile pre-processor.
# See https://lists.launchpad.net/openstack/msg12200.html
self.quota_class = quota_class
self.user_name = user_name
self.project_name = project_name
if overwrite or not hasattr(local.store, 'context'):
self.update_store()
def _get_read_deleted(self):
return self._read_deleted
def _set_read_deleted(self, read_deleted):
if read_deleted not in ('no', 'yes', 'only'):
raise ValueError(_("read_deleted can only be one of 'no', "
"'yes' or 'only', not %r") % read_deleted)
self._read_deleted = read_deleted
def _del_read_deleted(self):
del self._read_deleted
read_deleted = property(_get_read_deleted, _set_read_deleted,
_del_read_deleted)
def update_store(self):
local.store.context = self
def to_dict(self):
return {'user_id': self.user_id,
'project_id': self.project_id,
'is_admin': self.is_admin,
'read_deleted': self.read_deleted,
'roles': self.roles,
'remote_address': self.remote_address,
'timestamp': timeutils.strtime(self.timestamp),
'request_id': self.request_id,
'auth_token': self.auth_token,
'quota_class': self.quota_class,
'user_name': self.user_name,
'service_catalog': self.service_catalog,
'project_name': self.project_name,
'instance_lock_checked': self.instance_lock_checked,
'tenant': self.tenant,
'user': self.user}
@classmethod
def from_dict(cls, values):
return cls(**values)
def elevated(self, read_deleted=None, overwrite=False):
"""Return a version of this context with admin flag set."""
context = copy.copy(self)
context.is_admin = True
if 'admin' not in context.roles:
context.roles.append('admin')
if read_deleted is not None:
context.read_deleted = read_deleted
return context
# NOTE(sirp): the openstack/common version of RequestContext uses
# tenant/user whereas the Nova version uses project_id/user_id. We need
# this shim in order to use context-aware code from openstack/common, like
# logging, until we make the switch to using openstack/common's version of
# RequestContext.
@property
def tenant(self):
return self.project_id
@property
def user(self):
return self.user_id
def get_admin_context(read_deleted="no"):
return RequestContext(user_id=None,
project_id=None,
is_admin=True,
read_deleted=read_deleted,
overwrite=False)
|
the-stack_0_14832 | #import dependencies
from bs4 import BeautifulSoup as bs
from splinter import Browser
import os
import pandas as pd
import time
import requests
import urllib
from urllib.request import urlopen, urlretrieve
from urllib.parse import urljoin
from urllib.parse import urlsplit
from splinter import Browser
from selenium import webdriver
from selenium.webdriver.firefox.firefox_binary import FirefoxBinary
import time
from bs4 import BeautifulSoup as yourVariable
#preparation steps:
import pymongo
#install flask
from flask import Flask, render_template
# setup mongo connection (MongoDB Compass to python)
conn = "mongodb://localhost:27017" # the default port for MongoDB
client = pymongo.MongoClient(conn) #to connect to Mongo database via db = client.name_of_database (it'll be created if absent)
# connect to mongo db and collection
db = client.hemispheresDB
collection = db.collection
### NASA Mars News
##Connecting to Mars Space News Site site
url_space = "https://spacenews.com/segment/news"
# Retrieve page with the requests module
response = requests.get(url_space)
def scrape():
from webdriver_manager.chrome import ChromeDriverManager
executable_path = {'executable_path': ChromeDriverManager().install()}
browser = Browser('chrome', **executable_path, headless=True)
# Create BeautifulSoup object; parse with 'lxml'
from bs4 import BeautifulSoup as bs
soup = bs(response.text, 'lxml')
mars_dict = {}
#find the latest articles, search for a title
results = soup.find_all('div', class_='article-item__top')
for result in results:
title = result.find('a', class_='title').text
# Extract title text, save it into variable
news_title = soup.title.text
mars_dict['a_title'] = news_title
paragraphs = soup.find_all("div", class_="article-meta")
for paragraph in paragraphs:
news_paragraph = paragraph.find('p', class_='post-excerpt').text
mars_dict['b_paragraph'] = news_paragraph
from webdriver_manager.chrome import ChromeDriverManager
executable_path = {'executable_path': ChromeDriverManager().install()}
browser = Browser('chrome', **executable_path, headless=True)
#Visit the url for the Featured Space Image site (https://spaceimages-mars.com), assign the url string to a variable
space_image = "https://spaceimages-mars.com"
browser.visit(space_image)
time.sleep(2)
from urllib.parse import urlsplit
base_url = "{0.scheme}://{0.netloc}/".format(urlsplit(space_image))
#get image url using BeautifulSoup
html_image = browser.html
soup = bs(html_image, "html.parser")
# Create BeautifulSoup object; parse with 'parser'
from bs4 import BeautifulSoup as bs
#get image url using BeautifulSoup
html_image = browser.html
soup = bs(html_image, 'html.parser')
soup = bs(urlopen(space_image))
for img in soup.find_all('img'):
featured_image_url = urljoin(space_image, img['src'])
file_name = img['src'].split('/')[-1]
urlretrieve(featured_image_url, file_name)
mars_dict['c_featured_image'] = featured_image_url
mars_dict['d_featured_image_name'] = file_name
### Mars Facts
url_facts = 'https://galaxyfacts-mars.com/'
time.sleep(2)
table = pd.read_html(url_facts)
facts_table = table[0]
facts_table.columns = ["Description", "Mars", "Earth"]
facts_table.set_index("Description", inplace=True)
mars_dict["e_Mars_data_table"] = facts_table.to_html()
### Mars Hemispheres
mars_hemispheres_list = []
#Visit the url for Mars Hemispheres site (https://marshemispheres.com/), assign the url string to a variable
hemisphere_images = "https://marshemispheres.com/"
browser.visit(hemisphere_images)
from urllib.parse import urlsplit
base_url = "{0.scheme}://{0.netloc}/".format(urlsplit(space_image))
#get image url using BeautifulSoup
html_image = browser.html
soup = bs(html_image, "html.parser")
# Create BeautifulSoup object; parse with 'parser'
from bs4 import BeautifulSoup as bs
#get image url using BeautifulSoup
html_image = browser.html
soup = bs(html_image, 'html.parser')
#Visit the url for Mars Hemispheres site (https://marshemispheres.com/), assign the url string to a variable
hemisphere_images = "https://marshemispheres.com/"
browser.visit(hemisphere_images)
mars_hemispheres_list = []
soup = bs(urlopen(hemisphere_images))
for i in range (4):
time.sleep(5) #to create a loop
# locate tag h3 (corresponding hemispheres)
images = browser.find_by_tag("h3")
# click on each image to get url
images[i].click()
# separate url
html = browser.html
soup = bs(html, "html.parser")
# search for HD image
url_hemisphere = soup.find("img", class_="wide-image")["src"]
# looking for image title
img_title = soup.find("h2",class_="title").text
# get image url
img_url = "https://marshemispheres.com/"+ url_hemisphere
# store the results into dictionary
dictionary={"title":img_title,"img_url":img_url}
# append the dictionary into mars hemisheres list
mars_hemispheres_list.append(dictionary)
browser.back()
mars_dict['f_Mars_hemispheres_list'] = mars_hemispheres_list
return mars_dict
|
the-stack_0_14834 | # -*- coding: utf-8 -*-
"""
flask-rstblog
~~~~~~~~~~~~~
:copyright: (c) 2011 by Christoph Heer.
:license: BSD, see LICENSE for more details.
"""
import os
from datetime import date
from jinja2 import FileSystemLoader
from flask import Flask, render_template
from flaskrst.modules import manager
from flaskrst.templating import inject_navigation
class Flask(Flask):
def create_global_jinja_loader(self):
template_path = os.path.join(self.config.get('SOURCE', ''),
"_templates")
builtin_templates = os.path.join(self.root_path, self.template_folder)
return FileSystemLoader([template_path, builtin_templates])
def create_app(source=None, config=None):
app = Flask("flaskrst")
# Set default config values
app.config.setdefault('MODULES', {})
app.config.setdefault('STYLESHEETS', [])
app.config.setdefault('FEEDS', [])
# Load config
if config:
app.config.from_pyfile(config)
config_loaded = True
# maybe there is a file declared by env
elif 'FLASK_RST_CONFIG' in os.environ:
app.config.from_envvar('FLASK_RST_CONFIG')
config_loaded = True
# no config loaded try again later after source setting
else:
config_loaded = False
# Set source path
if source:
app.config['SOURCE'] = source
elif 'FLASK_RST_SOURCE' in os.environ:
app.config['SOURCE'] = os.environ['FLASK_RST_SOURCE']
else:
# Use current working directory as source
app.config['SOURCE'] = os.getcwd()
# If no config already loaded than is a config maybe in source path
if not config_loaded:
config_path = os.path.join(app.config['SOURCE'], 'config.py')
app.config.from_pyfile(config_path, silent=True)
# Set path of static folder
if 'STATIC_FOLDER' in app.config:
app.static_folder = app.config['STATIC_FOLDER']
else:
# Is a static folder called _static in source path?
source_static_folder = os.path.join(app.config['SOURCE'], "_static")
if os.path.isdir(source_static_folder):
app.static_folder = source_static_folder
# Load flask-rst modules
manager.init_app(app)
manager.load_from_config()
# Add some jinja globals and context processors
app.jinja_env.globals['date'] = date
app.context_processor(inject_navigation)
@app.errorhandler(404)
def page_not_found(error):
return render_template('404.html'), 404
return app |
the-stack_0_14836 | from copy import deepcopy
from typing import Optional
from warnings import warn
import numpy
from catsim import cat
from catsim.simulation import Estimator, Selector
from sklearn.linear_model import LogisticRegression
def _fit_log_reg(
items,
administered_items,
response_vector,
use_discriminations=True,
# use_guess_slip=True,
log_reg=None,
):
if log_reg is None:
log_reg = LogisticRegression(C=float("inf"))
X = items[administered_items][:, 1, numpy.newaxis]
sample_weight = None
if use_discriminations:
sample_weight = items[administered_items, 0]
# if use_guess_slip:
# response_vector = [
# slip if resp else guess
# for resp, (guess, slip) in zip(
# response_vector, items[administered_items, 2:4]
# )
# ]
log_reg.fit(X, response_vector, sample_weight=sample_weight)
return log_reg
"""
def _set_log_reg(mean, scale):
coef = 1 / scale
_log_reg.intercept_ = -mean * coef
_log_reg.coef_ = coef
"""
def _log_reg_scale(log_reg):
return -1 / log_reg.coef_[0, 0]
class LogisticEstimator(Estimator):
"""Estimator that uses a hill-climbing algorithm to maximize the likelihood function
:param precision: number of decimal points of precision
:param verbose: verbosity level of the maximization method
"""
def __init__(
self, use_discriminations=True # , return_scale=False, # , use_guess_slip=True
):
super().__init__()
self._use_discriminations = use_discriminations
# self._use_guess_slip = use_guess_slip
# self._return_scale = return_scale
def estimate(
self,
index: int = None,
items: numpy.ndarray = None,
administered_items: list = None,
response_vector: list = None,
est_theta: float = None,
**kwargs,
) -> float:
"""Returns the theta value that minimizes the negative log-likelihood function, given the current state of the
test for the given examinee.
:param index: index of the current examinee in the simulator
:param items: a matrix containing item parameters in the format that `catsim` understands
(see: :py:func:`catsim.cat.generate_item_bank`)
:param administered_items: a list containing the indexes of items that were already administered
:param response_vector: a boolean list containing the examinee's answers to the administered items
:param est_theta: a float containing the current estimated proficiency
:returns: the current :math:`\\hat\\theta`
"""
items, administered_items, response_vector, est_theta = self._prepare_args(
return_items=True,
return_response_vector=True,
return_est_theta=True,
index=index,
items=items,
administered_items=administered_items,
response_vector=response_vector,
est_theta=est_theta,
**kwargs,
)
assert items is not None
assert administered_items is not None
assert response_vector is not None
assert est_theta is not None
if len(set(response_vector)) == 1:
return cat.dodd(est_theta, items, response_vector[-1])
log_reg = _fit_log_reg(
items,
administered_items,
response_vector,
use_discriminations=self._use_discriminations,
# use_guess_slip=self._use_guess_slip,
)
# y = mx + c, max entropy when y = 0 => x = -c / m
theta = -log_reg.intercept_[0] / log_reg.coef_[0, 0]
return theta
# return theta, _log_reg_scale(log_reg)
def _all_future_scales(
log_reg, items, administered_items, response_vector, next_choice
):
res = numpy.zeros((items.shape[0],))
for item in items[:, 1].argsort():
log_reg = _fit_log_reg(
items,
administered_items + [item],
response_vector + [next_choice],
use_discriminations=True,
log_reg=log_reg,
)
scale = abs(_log_reg_scale(log_reg))
res[item] = scale
return res
class MinExpectedScaleSelector(Selector):
"""
Owens 1977,
"""
def select(
self,
index: int = None,
items: numpy.ndarray = None,
administered_items: list = None,
est_theta: float = None,
response_vector: list = None,
**kwargs,
) -> Optional[int]:
"""Returns the index of the next item to be administered.
:param index: the index of the current examinee in the simulator.
:param items: a matrix containing item parameters in the format that `catsim` understands
(see: :py:func:`catsim.cat.generate_item_bank`)
:param administered_items: a list containing the indexes of items that were already administered
:param est_theta: a float containing the current estimated proficiency
:returns: index of the next item to be applied or `None` if there are no more items in the item bank.
"""
items, administered_items, response_vector, est_theta = self._prepare_args(
return_items=True,
return_response_vector=True,
return_est_theta=True,
index=index,
items=items,
administered_items=administered_items,
response_vector=response_vector,
est_theta=est_theta,
**kwargs,
)
assert items is not None
assert administered_items is not None
assert response_vector is not None
assert est_theta is not None
def default():
# Fall back to max info
ordered_items = self._sort_by_info(items, est_theta)
valid_indexes = self._get_non_administered(
ordered_items, administered_items
)
return valid_indexes[0]
if len(administered_items) > 0 and len(set(response_vector)) >= 2:
log_reg = LogisticRegression(C=float("inf"), warm_start=True)
log_reg_before = _fit_log_reg(
items,
administered_items,
response_vector,
use_discriminations=True,
log_reg=log_reg,
)
if _log_reg_scale(log_reg_before) <= 0:
return default()
log_reg.tol = 0.05
neg_prob, pos_prob = log_reg_before.predict_proba(
items[:, 1, numpy.newaxis]
).T
else:
return default()
# TODO: Can instead use Dodd's like logic to find expected scale even when there is only one class
# min_theta = min(items[:, 1])
# max_theta = max(items[:, 1])
# _set_log_reg(
# est_theta, min(max_theta - est_theta, est_theta - min_theta)
# )
working_log_reg = deepcopy(log_reg)
false_scales = _all_future_scales(
working_log_reg, items, administered_items, response_vector, False
)
working_log_reg = deepcopy(log_reg)
true_scales = _all_future_scales(
working_log_reg, items, administered_items, response_vector, True
)
organized_items = [
x
for x in numpy.array(
[
pp * ts + np * fs
for np, pp, fs, ts in zip(
neg_prob, pos_prob, false_scales, true_scales
)
]
).argsort()
if x not in administered_items
]
if len(organized_items) == 0:
warn("There are no more items to apply.")
return None
return organized_items[0]
|
the-stack_0_14837 | # weather.py
'''
# Configuration
The weather module reads from the weather.yaml file stored in bobbit's
configuration directory and expects the following values:
default: This is the default zip code
'''
import logging
import re
import aiohttp.client_exceptions
# Metadata
NAME = 'weather'
ENABLE = True
USAGE = '''Usage: ![weather|forecast] <zipcode>
Given a zipcode, this returns the current weather or the daily forecast for
that location.
Examples:
> !weather # Default location
> !forecast 46556 # Specific zip code
'''
WEATHER_RX = r'^!weather\s*(?P<zipcode>\d{5})*$'
FORECAST_RX = r'^!forecast\s*(?P<zipcode>\d{5})*$'
# Constants
ZIPCODE = {
'#nd-cse' : 46556, # Notre Dame, IN
'#ndlug' : 46556, # Notre Dame, IN
'#lug' : 46556, # Notre Dame, IN
'#uwec-cs': 54702, # Eau Claire, WI
}
DEFAULT_ZIPCODE = None
WEATHER_GOV_URL = 'https://forecast.weather.gov'
# Functions
async def retrieve_weather_data(bot, zipcode):
url = WEATHER_GOV_URL + '/zipcity.php'
params = {
'inputstring': zipcode
}
async with bot.http_client.get(url, params=params) as response:
try:
text = await response.text()
xml_url = re.findall(r'<a href="(MapClick[^"]+dwml)"', text)[0]
json_url = WEATHER_GOV_URL + '/' + xml_url.replace('dwml', 'json')
logging.debug('JSON URL: %s', json_url)
except IndexError as e:
logging.warning('Unable to get weather data: %s', e)
return {}
async with bot.http_client.get(json_url) as response:
try:
return await response.json()
except aiohttp.client_exceptions.ContentTypeError:
logging.warning('Unable to get weather data: %s', response.text)
return {}
def get_location(data):
location = data['location']['areaDescription']
for prefix in re.findall(r'(\d+ Miles [ENSW]+)', location):
location = location.replace(prefix, '')
return location.strip()[:-3] + ", " + location.strip()[-2:]
# Commands
async def weather(bot, message, zipcode=None):
zipcode = zipcode or ZIPCODE.get(message.channel, DEFAULT_ZIPCODE)
data = await retrieve_weather_data(bot, zipcode)
if not data:
return message.with_body('No results')
location = get_location(data)
current = data['currentobservation']
return message.with_body(bot.client.format_text(
'{bold}Weather{bold} for {bold}{location}{bold}: {temp}°F, {weather}',
location = location,
temp = current['Temp'].strip(),
weather = current['Weather'].strip(),
))
async def forecast(bot, message, zipcode=None):
zipcode = zipcode or ZIPCODE.get(message.channel, DEFAULT_ZIPCODE)
data = await retrieve_weather_data(bot, zipcode)
if not data:
return message.with_body('No results')
location = get_location(data)
text = data['data']['text']
return message.with_body(bot.client.format_text(
'{bold}Forecast{bold} for {bold}{location}{bold}: {bold}Today{bold}: {today} {bold}Tonight{bold}: {tonight}',
location = location,
today = text[0].strip(),
tonight = text[1].strip(),
))
# Register
def register(bot):
global DEFAULT_ZIPCODE
config = bot.config.load_module_config('weather')
DEFAULT_ZIPCODE = config.get('default', ZIPCODE['#lug'])
return (
('command', WEATHER_RX , weather),
('command', FORECAST_RX, forecast),
)
# vim: set sts=4 sw=4 ts=8 expandtab ft=python:
|
the-stack_0_14841 | '''
The actual saltkey functional code
'''
# Import python modules
import os
import shutil
import sys
import logging
# Import salt modules
import salt.crypt
import salt.utils
import salt.utils.event
log = logging.getLogger(__name__)
class Key(object):
'''
The object that encapsulates saltkey actions
'''
def __init__(self, opts):
self.opts = opts
self.event = salt.utils.event.SaltEvent(opts['sock_dir'], 'master')
self.colors = salt.utils.get_colors(
not bool(self.opts.get('no_color', False))
)
def _keys(self, key_type, full_path=False):
'''
Safely return the names of the unaccepted keys, pass True to return
the full key paths. Returns a set.
'''
ret = set()
subdir = ''
if key_type == 'pre':
subdir = 'minions_pre'
elif key_type == 'rej':
subdir = 'minions_rejected'
elif key_type == 'acc':
subdir = 'minions'
dir_ = os.path.join(self.opts['pki_dir'], subdir)
if not os.path.isdir(dir_):
err = ('The {0} directory is not present, ensure that '
'the master server has been started').format(subdir)
self._log(err, level='error')
sys.exit(42)
keys = os.listdir(dir_)
if full_path:
for key in keys:
ret.add(os.path.join(dir_, key))
else:
ret = set(keys)
return ret
def _log(self, message, level=''):
if hasattr(log, level):
log_msg = getattr(log, level)
log_msg(message)
if not self.opts['quiet']:
print(message)
def _list_pre(self, header=True, printer=None):
'''
List the unaccepted keys
'''
if header == True:
self._log('{0}Unaccepted Keys:{1}'.format(
self.colors['LIGHT_RED'], self.colors['ENDC']
))
keys = self._keys('pre')
if printer is None:
for key in sorted(keys):
output = '{0}{1}{2}'.format(
self.colors['RED'],
key,
self.colors['ENDC']
)
self._log(output)
else:
printer(list(keys))
def _list_accepted(self, header=True, printer=None):
'''
List the accepted public keys
'''
if header == True:
self._log('{0}Accepted Keys:{1}'.format(
self.colors['LIGHT_GREEN'], self.colors['ENDC']
))
keys = self._keys('acc')
if printer is None:
for key in sorted(keys):
self._log('{0}{1}{2}'.format(
self.colors['GREEN'], key, self.colors['ENDC']
))
else:
printer(list(keys))
def _list_rejected(self, header=True, printer=None):
'''
List the unaccepted keys
'''
if header == True:
self._log('{0}Rejected:{1}'.format(
self.colors['LIGHT_BLUE'], self.colors['ENDC']
))
keys = self._keys('rej')
if printer is None:
for key in sorted(keys):
self._log('{0}{1}{2}'.format(
self.colors['BLUE'], key, self.colors['ENDC']
))
else:
printer(list(keys))
def _list(self, name):
'''
List keys
'''
printout = self._get_outputter()
if 'json_out' in self.opts and self.opts['json_out']:
printout.indent = 2
if name in ('pre', 'un', 'unaccept', 'unaccepted'):
self._list_pre(header=False, printer=printout)
elif name in ('acc', 'accept', 'accepted'):
self._list_accepted(header=False, printer=printout)
elif name in ('rej', 'reject', 'rejected'):
self._list_rejected(header=False, printer=printout)
elif name in ('all',):
if printout is not None:
keys = {
'rejected': list(self._keys('rej')),
'accepted': list(self._keys('acc')),
'unaccepted': list(self._keys('pre')),
}
printout(keys)
else:
self._list_pre(printer=printout)
self._list_accepted(printer=printout)
self._list_rejected(printer=printout)
else:
err = ('Unrecognized key type "{0}". Run with -h for options.'
).format(name)
self._log(err, level='error')
def _get_outputter(self):
get_outputter = salt.output.get_outputter
if self.opts['raw_out']:
printout = get_outputter('raw')
elif self.opts['json_out']:
printout = get_outputter('json')
elif self.opts['yaml_out']:
printout = get_outputter('yaml')
else:
printout = None # use default color output
return printout
def _print_key(self, name):
'''
Print out the specified public key
'''
keys = self._keys('pre', True).union(self._keys('acc', True))
for key in sorted(keys):
if key.endswith(name):
with open(key, 'r') as kfn:
self._log(kfn.read())
def _print_all(self):
'''
Print out the public keys, all of em'
'''
self._log('{0}Unaccepted keys:{1}'.format(
self.colors['LIGHT_RED'], self.colors['ENDC']
))
for key in sorted(self._keys('pre', True)):
self._log(' {0}{1}{2}'.format(
self.colors['RED'],
os.path.basename(key),
self.colors['ENDC']
))
with open(key, 'r') as kfn:
self._log(kfn.read())
self._log('{0}Accepted keys:{1}'.format(
self.colors['LIGHT_GREEN'], self.colors['ENDC']
))
for key in sorted(self._keys('acc', True)):
self._log(' {0}{1}{2}'.format(
self.colors['GREEN'],
os.path.basename(key),
self.colors['ENDC']
))
with open(key, 'r') as kfn:
self._log(kfn.read())
self._log('{0}Rejected keys:{1}'.format(
self.colors['LIGHT_BLUE'], self.colors['ENDC']
))
for key in sorted(self._keys('pre', True)):
self._log(' {0}{1}{2}'.format(
self.colors['BLUE'],
os.path.basename(key),
self.colors['ENDC']))
with open(key, 'r') as kfn:
self._log(kfn.read())
def _accept(self, key):
'''
Accept a specified host's public key
'''
(minions_accepted,
minions_pre,
minions_rejected) = self._check_minions_directories()
pre = os.listdir(minions_pre)
if key not in pre:
err = ('The key named {0} does not exist, please accept an '
'available key').format(key)
#log.error(err)
self._log(err, level='error')
sys.exit(43)
shutil.move(os.path.join(minions_pre, key),
os.path.join(minions_accepted, key))
eload = {'result': True,
'act': 'accept',
'id': key}
self.event.fire_event(eload, 'key')
self._log(
'Key for {0} accepted.'.format(key),
level='info'
)
def _accept_all(self):
'''
Accept all keys in pre
'''
(minions_accepted,
minions_pre,
minions_rejected) = self._check_minions_directories()
for key in os.listdir(minions_pre):
self._accept(key)
def _delete_key(self, delete=None):
'''
Delete a key
'''
(minions_accepted,
minions_pre,
minions_rejected) = self._check_minions_directories()
if delete is None:
delete = self.opts['delete']
pre = os.path.join(minions_pre, delete)
acc = os.path.join(minions_accepted, delete)
rej = os.path.join(minions_rejected, delete)
if os.path.exists(pre):
os.remove(pre)
self._log('Removed pending key {0}'.format(delete),
level='info')
if os.path.exists(acc):
os.remove(acc)
self._log('Removed accepted key {0}'.format(delete),
level='info')
if os.path.exists(rej):
os.remove(rej)
self._log('Removed rejected key {0}'.format(delete),
level='info')
def _delete_all(self):
'''
Delete all keys
'''
for dir in ("acc", "rej", "pre"):
for key in self._keys(dir):
self._delete_key(key)
def _reject(self, key):
'''
Reject a specified host's public key
'''
(minions_accepted,
minions_pre,
minions_rejected) = self._check_minions_directories()
pre = os.listdir(minions_pre)
if key not in pre:
err = ('The host named {0} is unavailable, please accept an '
'available key').format(key)
self._log(err, level='error')
sys.exit(43)
shutil.move(os.path.join(minions_pre, key),
os.path.join(minions_rejected, key))
self._log('{0} key rejected.'.format(key), level='info')
def _reject_all(self):
'''
Reject all keys in pre
'''
(minions_accepted,
minions_pre,
minions_rejected) = self._check_minions_directories()
for key in os.listdir(minions_pre):
self._reject(key)
def _check_minions_directories(self):
minions_accepted = os.path.join(self.opts['pki_dir'], 'minions')
minions_pre = os.path.join(self.opts['pki_dir'], 'minions_pre')
minions_rejected = os.path.join(self.opts['pki_dir'],
'minions_rejected')
for dir_ in [minions_accepted, minions_pre, minions_rejected]:
if not os.path.isdir(dir_):
err = ('The minions directory {0} is not present, ensure '
'that the master server has been started'.format(dir_))
self._log(err, level='error')
sys.exit(42)
return minions_accepted, minions_pre, minions_rejected
def run(self):
'''
Run the logic for saltkey
'''
if self.opts['gen_keys']:
salt.crypt.gen_keys(
self.opts['gen_keys_dir'],
self.opts['gen_keys'],
self.opts['keysize'])
return
if self.opts['list']:
self._list(self.opts['list'])
elif self.opts['list_all']:
self._list('all')
elif self.opts['print']:
self._print_key(self.opts['print'])
elif self.opts['print_all']:
self._print_all()
elif self.opts['accept']:
self._accept(self.opts['accept'])
elif self.opts['accept_all']:
self._accept_all()
elif self.opts['reject']:
self._reject(self.opts['reject'])
elif self.opts['reject_all']:
self._reject_all()
elif self.opts['delete']:
self._delete_key()
elif self.opts['delete_all']:
self._delete_all()
else:
self._list('all')
|
the-stack_0_14842 | # Copyright (c) AIRBUS and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
import random
from enum import Enum
from typing import Any, Dict, List, Optional, Union
import numpy as np
from skdecide.discrete_optimization.generic_tools.do_mutation import Mutation
from skdecide.discrete_optimization.generic_tools.ea.deap_wrappers import (
generic_mutate_wrapper,
)
from skdecide.discrete_optimization.generic_tools.ea.ga import (
DeapCrossover,
DeapMutation,
DeapSelection,
ObjectiveHandling,
)
from skdecide.discrete_optimization.generic_tools.result_storage.result_storage import (
ResultStorage,
)
class ParametersGa:
mutation: Union[Mutation, DeapMutation] = None
crossover: DeapCrossover = None
selection: DeapSelection = None
encoding: str = None
objective_handling: ObjectiveHandling = None
objectives: Union[str, List[str]] = None
objective_weights: List[float] = None
pop_size: int = None
max_evals: int = None
mut_rate: float = None
crossover_rate: float = None
tournament_size: float = None
deap_verbose: bool = False
def __init__(
self,
mutation,
crossover,
selection,
encoding,
objective_handling,
objectives,
objective_weights,
pop_size,
max_evals,
mut_rate,
crossover_rate,
tournament_size,
deap_verbose,
):
self.mutation = mutation
self.crossover = crossover
self.selection = selection
self.encoding = encoding
self.objective_handling = objective_handling
self.objectives = objectives
self.objective_weights = objective_weights
self.pop_size = pop_size
self.max_evals = max_evals
self.mut_rate = mut_rate
self.crossover_rate = crossover_rate
self.tournament_size = tournament_size
self.deap_verbose = deap_verbose
@staticmethod
def default_rcpsp():
return ParametersGa(
mutation=DeapMutation.MUT_SHUFFLE_INDEXES,
crossover=DeapCrossover.CX_PARTIALY_MATCHED,
selection=DeapSelection.SEL_TOURNAMENT,
encoding="rcpsp_permutation",
objective_handling=ObjectiveHandling.AGGREGATE,
objectives=["makespan"],
objective_weights=[-1],
pop_size=100,
max_evals=10000,
mut_rate=0.1,
crossover_rate=0.9,
tournament_size=5,
deap_verbose=False,
)
class ParametersAltGa:
mutations: List[Union[Mutation, DeapMutation]] = None
crossovers: List[DeapCrossover] = None
selection: DeapSelection = None
encodings: List[str] = None
objective_handling: ObjectiveHandling = None
objectives: Union[str, List[str]] = None
objective_weights: List[float] = None
pop_size: int = None
max_evals: int = None
mut_rate: float = None
crossover_rate: float = None
tournament_size: float = None
deap_verbose: bool = False
sub_evals: List[int] = None
def __init__(
self,
mutations,
crossovers,
selection,
encodings,
objective_handling,
objectives,
objective_weights,
pop_size,
max_evals,
mut_rate,
crossover_rate,
tournament_size,
deap_verbose,
sub_evals,
):
self.mutations = mutations
self.crossovers = crossovers
self.selection = selection
self.encodings = encodings
self.objective_handling = objective_handling
self.objectives = objectives
self.objective_weights = objective_weights
self.pop_size = pop_size
self.max_evals = max_evals
self.mut_rate = mut_rate
self.crossover_rate = crossover_rate
self.tournament_size = tournament_size
self.deap_verbose = deap_verbose
self.sub_evals = sub_evals
@staticmethod
def default_mrcpsp():
return ParametersAltGa(
mutations=[DeapMutation.MUT_UNIFORM_INT, DeapMutation.MUT_SHUFFLE_INDEXES],
crossovers=[DeapCrossover.CX_ONE_POINT, DeapCrossover.CX_PARTIALY_MATCHED],
selection=DeapSelection.SEL_TOURNAMENT,
encodings=["rcpsp_modes_arrity_fix", "rcpsp_permutation"],
objective_handling=ObjectiveHandling.AGGREGATE,
objectives=["makespan"],
objective_weights=[-1],
pop_size=100,
max_evals=10000,
mut_rate=0.1,
crossover_rate=0.9,
tournament_size=5,
deap_verbose=False,
sub_evals=[1000, 1000],
)
@staticmethod
def default_msrcpsp():
return ParametersAltGa(
mutations=[
DeapMutation.MUT_UNIFORM_INT,
DeapMutation.MUT_SHUFFLE_INDEXES,
DeapMutation.MUT_SHUFFLE_INDEXES,
],
crossovers=[
DeapCrossover.CX_ONE_POINT,
DeapCrossover.CX_PARTIALY_MATCHED,
DeapCrossover.CX_PARTIALY_MATCHED,
],
selection=DeapSelection.SEL_TOURNAMENT,
# encodings=['modes_arrity_fix', 'priority_list_task', 'priority_worker_per_task_perm'],
encodings=[
"modes_arrity_fix_from_0",
"priority_list_task",
"priority_worker_per_task_perm",
],
objective_handling=ObjectiveHandling.AGGREGATE,
objectives=["makespan"],
objective_weights=[-1],
pop_size=100,
max_evals=10000,
mut_rate=0.1,
crossover_rate=0.9,
tournament_size=5,
deap_verbose=False,
sub_evals=[500, 500, 500],
)
|
the-stack_0_14843 | #!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: azure
short_description: create or terminate a virtual machine in azure
description:
- Creates or terminates azure instances. When created optionally waits for it to be 'running'.
version_added: "1.7"
options:
name:
description:
- name of the virtual machine and associated cloud service.
required: true
default: null
location:
description:
- the azure location to use (e.g. 'East US')
required: true
default: null
subscription_id:
description:
- azure subscription id. Overrides the AZURE_SUBSCRIPTION_ID environment variable.
required: false
default: null
management_cert_path:
description:
- path to an azure management certificate associated with the subscription id. Overrides the AZURE_CERT_PATH environment variable.
required: false
default: null
storage_account:
description:
- the azure storage account in which to store the data disks.
required: true
image:
description:
- system image for creating the virtual machine
(e.g., b39f27a8b8c64d52b05eac6a62ebad85__Ubuntu_DAILY_BUILD-precise-12_04_3-LTS-amd64-server-20131205-en-us-30GB)
required: true
default: null
role_size:
description:
- azure role size for the new virtual machine (e.g., Small, ExtraLarge, A6). You have to pay attention to the fact that instances of
type G and DS are not available in all regions (locations). Make sure if you selected the size and type of instance available in your chosen location.
required: false
default: Small
endpoints:
description:
- a comma-separated list of TCP ports to expose on the virtual machine (e.g., "22,80")
required: false
default: 22
user:
description:
- the unix username for the new virtual machine.
required: false
default: null
password:
description:
- the unix password for the new virtual machine.
required: false
default: null
ssh_cert_path:
description:
- path to an X509 certificate containing the public ssh key to install in the virtual machine.
See http://www.windowsazure.com/en-us/manage/linux/tutorials/intro-to-linux/ for more details.
- if this option is specified, password-based ssh authentication will be disabled.
required: false
default: null
virtual_network_name:
description:
- Name of virtual network.
required: false
default: null
hostname:
description:
- hostname to write /etc/hostname. Defaults to <name>.cloudapp.net.
required: false
default: null
wait:
description:
- wait for the instance to be in state 'running' before returning
required: false
default: "no"
choices: [ "yes", "no" ]
aliases: []
wait_timeout:
description:
- how long before wait gives up, in seconds
default: 600
aliases: []
wait_timeout_redirects:
description:
- how long before wait gives up for redirects, in seconds
default: 300
aliases: []
state:
description:
- create or terminate instances
required: false
default: 'present'
aliases: []
auto_updates:
description:
- Enable Auto Updates on Windows Machines
required: false
version_added: "2.0"
default: "no"
choices: [ "yes", "no" ]
enable_winrm:
description:
- Enable winrm on Windows Machines
required: false
version_added: "2.0"
default: "yes"
choices: [ "yes", "no" ]
os_type:
description:
- The type of the os that is gettings provisioned
required: false
version_added: "2.0"
default: "linux"
choices: [ "windows", "linux" ]
requirements:
- "python >= 2.6"
- "azure >= 0.7.1"
author: "John Whitbeck (@jwhitbeck)"
'''
EXAMPLES = '''
# Note: None of these examples set subscription_id or management_cert_path
# It is assumed that their matching environment variables are set.
- name: Provision virtual machine example
azure:
name: my-virtual-machine
role_size: Small
image: b39f27a8b8c64d52b05eac6a62ebad85__Ubuntu_DAILY_BUILD-precise-12_04_3-LTS-amd64-server-20131205-en-us-30GB
location: East US
user: ubuntu
ssh_cert_path: /path/to/azure_x509_cert.pem
storage_account: my-storage-account
wait: True
state: present
delegate_to: localhost
- name: Terminate virtual machine example
azure:
name: my-virtual-machine
state: absent
delegate_to: localhost
- name: Create windows machine
azure:
name: ben-Winows-23
hostname: win123
os_type: windows
enable_winrm: True
subscription_id: '{{ azure_sub_id }}'
management_cert_path: '{{ azure_cert_path }}'
role_size: Small
image: bd507d3a70934695bc2128e3e5a255ba__RightImage-Windows-2012-x64-v13.5
location: East Asia
password: xxx
storage_account: benooytes
user: admin
wait: True
state: present
virtual_network_name: '{{ vnet_name }}'
delegate_to: localhost
'''
import base64
import datetime
import os
import time
from urlparse import urlparse
from ansible.module_utils.facts import * # TimeoutError
AZURE_LOCATIONS = ['South Central US',
'Central US',
'East US 2',
'East US',
'West US',
'North Central US',
'North Europe',
'West Europe',
'East Asia',
'Southeast Asia',
'Japan West',
'Japan East',
'Brazil South']
AZURE_ROLE_SIZES = ['ExtraSmall',
'Small',
'Medium',
'Large',
'ExtraLarge',
'A5',
'A6',
'A7',
'A8',
'A9',
'Basic_A0',
'Basic_A1',
'Basic_A2',
'Basic_A3',
'Basic_A4',
'Standard_D1',
'Standard_D2',
'Standard_D3',
'Standard_D4',
'Standard_D11',
'Standard_D12',
'Standard_D13',
'Standard_D14',
'Standard_D1_v2',
'Standard_D2_v2',
'Standard_D3_v2',
'Standard_D4_v2',
'Standard_D5_v2',
'Standard_D11_v2',
'Standard_D12_v2',
'Standard_D13_v2',
'Standard_D14_v2',
'Standard_DS1',
'Standard_DS2',
'Standard_DS3',
'Standard_DS4',
'Standard_DS11',
'Standard_DS12',
'Standard_DS13',
'Standard_DS14',
'Standard_G1',
'Standard_G2',
'Standard_G3',
'Standard_G4',
'Standard_G5']
from distutils.version import LooseVersion
try:
import azure as windows_azure
if hasattr(windows_azure, '__version__') and LooseVersion(windows_azure.__version__) <= "0.11.1":
from azure import WindowsAzureError as AzureException
from azure import WindowsAzureMissingResourceError as AzureMissingException
else:
from azure.common import AzureException as AzureException
from azure.common import AzureMissingResourceHttpError as AzureMissingException
from azure.servicemanagement import (ServiceManagementService, OSVirtualHardDisk, SSH, PublicKeys,
PublicKey, LinuxConfigurationSet, ConfigurationSetInputEndpoints,
ConfigurationSetInputEndpoint, Listener, WindowsConfigurationSet)
HAS_AZURE = True
except ImportError:
HAS_AZURE = False
from types import MethodType
import json
def _wait_for_completion(azure, promise, wait_timeout, msg):
if not promise:
return
wait_timeout = time.time() + wait_timeout
while wait_timeout > time.time():
operation_result = azure.get_operation_status(promise.request_id)
time.sleep(5)
if operation_result.status == "Succeeded":
return
raise AzureException('Timed out waiting for async operation ' + msg + ' "' + str(promise.request_id) + '" to complete.')
def _delete_disks_when_detached(azure, wait_timeout, disk_names):
def _handle_timeout(signum, frame):
raise TimeoutError("Timeout reached while waiting for disks to become detached.")
signal.signal(signal.SIGALRM, _handle_timeout)
signal.alarm(wait_timeout)
try:
while len(disk_names) > 0:
for disk_name in disk_names:
disk = azure.get_disk(disk_name)
if disk.attached_to is None:
azure.delete_disk(disk.name, True)
disk_names.remove(disk_name)
except AzureException as e:
module.fail_json(msg="failed to get or delete disk %s, error was: %s" % (disk_name, str(e)))
finally:
signal.alarm(0)
def get_ssh_certificate_tokens(module, ssh_cert_path):
"""
Returns the sha1 fingerprint and a base64-encoded PKCS12 version of the certificate.
"""
# This returns a string such as SHA1 Fingerprint=88:60:0B:13:A9:14:47:DA:4E:19:10:7D:34:92:2B:DF:A1:7D:CA:FF
rc, stdout, stderr = module.run_command(['openssl', 'x509', '-in', ssh_cert_path, '-fingerprint', '-noout'])
if rc != 0:
module.fail_json(msg="failed to generate the key fingerprint, error was: %s" % stderr)
fingerprint = stdout.strip()[17:].replace(':', '')
rc, stdout, stderr = module.run_command(['openssl', 'pkcs12', '-export', '-in', ssh_cert_path, '-nokeys', '-password', 'pass:'])
if rc != 0:
module.fail_json(msg="failed to generate the pkcs12 signature from the certificate, error was: %s" % stderr)
pkcs12_base64 = base64.b64encode(stdout.strip())
return (fingerprint, pkcs12_base64)
def create_virtual_machine(module, azure):
"""
Create new virtual machine
module : AnsibleModule object
azure: authenticated azure ServiceManagementService object
Returns:
True if a new virtual machine and/or cloud service was created, false otherwise
"""
name = module.params.get('name')
os_type = module.params.get('os_type')
hostname = module.params.get('hostname') or name + ".cloudapp.net"
endpoints = module.params.get('endpoints').split(',')
ssh_cert_path = module.params.get('ssh_cert_path')
user = module.params.get('user')
password = module.params.get('password')
location = module.params.get('location')
role_size = module.params.get('role_size')
storage_account = module.params.get('storage_account')
image = module.params.get('image')
virtual_network_name = module.params.get('virtual_network_name')
wait = module.params.get('wait')
wait_timeout = int(module.params.get('wait_timeout'))
changed = False
# Check if a deployment with the same name already exists
cloud_service_name_available = azure.check_hosted_service_name_availability(name)
if cloud_service_name_available.result:
# cloud service does not exist; create it
try:
result = azure.create_hosted_service(service_name=name, label=name, location=location)
_wait_for_completion(azure, result, wait_timeout, "create_hosted_service")
changed = True
except AzureException as e:
module.fail_json(msg="failed to create the new service, error was: %s" % str(e))
try:
# check to see if a vm with this name exists; if so, do nothing
azure.get_role(name, name, name)
except AzureMissingException:
# vm does not exist; create it
if os_type == 'linux':
# Create linux configuration
disable_ssh_password_authentication = not password
vm_config = LinuxConfigurationSet(hostname, user, password, disable_ssh_password_authentication)
else:
#Create Windows Config
vm_config = WindowsConfigurationSet(hostname, password, None, module.params.get('auto_updates'), None, user)
vm_config.domain_join = None
if module.params.get('enable_winrm'):
listener = Listener('Http')
vm_config.win_rm.listeners.listeners.append(listener)
else:
vm_config.win_rm = None
# Add ssh certificates if specified
if ssh_cert_path:
fingerprint, pkcs12_base64 = get_ssh_certificate_tokens(module, ssh_cert_path)
# Add certificate to cloud service
result = azure.add_service_certificate(name, pkcs12_base64, 'pfx', '')
_wait_for_completion(azure, result, wait_timeout, "add_service_certificate")
# Create ssh config
ssh_config = SSH()
ssh_config.public_keys = PublicKeys()
authorized_keys_path = u'/home/%s/.ssh/authorized_keys' % user
ssh_config.public_keys.public_keys.append(PublicKey(path=authorized_keys_path, fingerprint=fingerprint))
# Append ssh config to linux machine config
vm_config.ssh = ssh_config
# Create network configuration
network_config = ConfigurationSetInputEndpoints()
network_config.configuration_set_type = 'NetworkConfiguration'
network_config.subnet_names = []
network_config.public_ips = None
for port in endpoints:
network_config.input_endpoints.append(ConfigurationSetInputEndpoint(name='TCP-%s' % port,
protocol='TCP',
port=port,
local_port=port))
# First determine where to store disk
today = datetime.date.today().strftime('%Y-%m-%d')
disk_prefix = u'%s-%s' % (name, name)
media_link = u'http://%s.blob.core.windows.net/vhds/%s-%s.vhd' % (storage_account, disk_prefix, today)
# Create system hard disk
os_hd = OSVirtualHardDisk(image, media_link)
# Spin up virtual machine
try:
result = azure.create_virtual_machine_deployment(service_name=name,
deployment_name=name,
deployment_slot='production',
label=name,
role_name=name,
system_config=vm_config,
network_config=network_config,
os_virtual_hard_disk=os_hd,
role_size=role_size,
role_type='PersistentVMRole',
virtual_network_name=virtual_network_name)
_wait_for_completion(azure, result, wait_timeout, "create_virtual_machine_deployment")
changed = True
except AzureException as e:
module.fail_json(msg="failed to create the new virtual machine, error was: %s" % str(e))
try:
deployment = azure.get_deployment_by_name(service_name=name, deployment_name=name)
return (changed, urlparse(deployment.url).hostname, deployment)
except AzureException as e:
module.fail_json(msg="failed to lookup the deployment information for %s, error was: %s" % (name, str(e)))
def terminate_virtual_machine(module, azure):
"""
Terminates a virtual machine
module : AnsibleModule object
azure: authenticated azure ServiceManagementService object
Returns:
True if a new virtual machine was deleted, false otherwise
"""
# Whether to wait for termination to complete before returning
wait = module.params.get('wait')
wait_timeout = int(module.params.get('wait_timeout'))
name = module.params.get('name')
delete_empty_services = module.params.get('delete_empty_services')
changed = False
deployment = None
public_dns_name = None
disk_names = []
try:
deployment = azure.get_deployment_by_name(service_name=name, deployment_name=name)
except AzureMissingException as e:
pass # no such deployment or service
except AzureException as e:
module.fail_json(msg="failed to find the deployment, error was: %s" % str(e))
# Delete deployment
if deployment:
changed = True
try:
# gather disk info
results = []
for role in deployment.role_list:
role_props = azure.get_role(name, deployment.name, role.role_name)
if role_props.os_virtual_hard_disk.disk_name not in disk_names:
disk_names.append(role_props.os_virtual_hard_disk.disk_name)
except AzureException as e:
module.fail_json(msg="failed to get the role %s, error was: %s" % (role.role_name, str(e)))
try:
result = azure.delete_deployment(name, deployment.name)
_wait_for_completion(azure, result, wait_timeout, "delete_deployment")
except AzureException as e:
module.fail_json(msg="failed to delete the deployment %s, error was: %s" % (deployment.name, str(e)))
# It's unclear when disks associated with terminated deployment get detached.
# Thus, until the wait_timeout is reached, we continue to delete disks as they
# become detached by polling the list of remaining disks and examining the state.
try:
_delete_disks_when_detached(azure, wait_timeout, disk_names)
except (AzureException, TimeoutError) as e:
module.fail_json(msg=str(e))
try:
# Now that the vm is deleted, remove the cloud service
result = azure.delete_hosted_service(service_name=name)
_wait_for_completion(azure, result, wait_timeout, "delete_hosted_service")
except AzureException as e:
module.fail_json(msg="failed to delete the service %s, error was: %s" % (name, str(e)))
public_dns_name = urlparse(deployment.url).hostname
return changed, public_dns_name, deployment
def get_azure_creds(module):
# Check module args for credentials, then check environment vars
subscription_id = module.params.get('subscription_id')
if not subscription_id:
subscription_id = os.environ.get('AZURE_SUBSCRIPTION_ID', None)
if not subscription_id:
module.fail_json(msg="No subscription_id provided. Please set 'AZURE_SUBSCRIPTION_ID' or use the 'subscription_id' parameter")
management_cert_path = module.params.get('management_cert_path')
if not management_cert_path:
management_cert_path = os.environ.get('AZURE_CERT_PATH', None)
if not management_cert_path:
module.fail_json(msg="No management_cert_path provided. Please set 'AZURE_CERT_PATH' or use the 'management_cert_path' parameter")
return subscription_id, management_cert_path
def main():
module = AnsibleModule(
argument_spec=dict(
ssh_cert_path=dict(),
name=dict(),
hostname=dict(),
os_type=dict(default='linux', choices=['linux', 'windows']),
location=dict(choices=AZURE_LOCATIONS),
role_size=dict(choices=AZURE_ROLE_SIZES),
subscription_id=dict(no_log=True),
storage_account=dict(),
management_cert_path=dict(),
endpoints=dict(default='22'),
user=dict(),
password=dict(no_log=True),
image=dict(),
virtual_network_name=dict(default=None),
state=dict(default='present'),
wait=dict(type='bool', default=False),
wait_timeout=dict(default=600),
wait_timeout_redirects=dict(default=300),
auto_updates=dict(type='bool', default=False),
enable_winrm=dict(type='bool', default=True),
)
)
if not HAS_AZURE:
module.fail_json(msg='azure python module required for this module')
# create azure ServiceManagementService object
subscription_id, management_cert_path = get_azure_creds(module)
wait_timeout_redirects = int(module.params.get('wait_timeout_redirects'))
if hasattr(windows_azure, '__version__') and LooseVersion(windows_azure.__version__) <= "0.8.0":
# wrapper for handling redirects which the sdk <= 0.8.0 is not following
azure = Wrapper(ServiceManagementService(subscription_id, management_cert_path), wait_timeout_redirects)
else:
azure = ServiceManagementService(subscription_id, management_cert_path)
cloud_service_raw = None
if module.params.get('state') == 'absent':
(changed, public_dns_name, deployment) = terminate_virtual_machine(module, azure)
elif module.params.get('state') == 'present':
# Changed is always set to true when provisioning new instances
if not module.params.get('name'):
module.fail_json(msg='name parameter is required for new instance')
if not module.params.get('image'):
module.fail_json(msg='image parameter is required for new instance')
if not module.params.get('user'):
module.fail_json(msg='user parameter is required for new instance')
if not module.params.get('location'):
module.fail_json(msg='location parameter is required for new instance')
if not module.params.get('storage_account'):
module.fail_json(msg='storage_account parameter is required for new instance')
if not (module.params.get('password') or module.params.get('ssh_cert_path')):
module.fail_json(msg='password or ssh_cert_path parameter is required for new instance')
(changed, public_dns_name, deployment) = create_virtual_machine(module, azure)
module.exit_json(changed=changed, public_dns_name=public_dns_name, deployment=json.loads(json.dumps(deployment, default=lambda o: o.__dict__)))
class Wrapper(object):
def __init__(self, obj, wait_timeout):
self.other = obj
self.wait_timeout = wait_timeout
def __getattr__(self, name):
if hasattr(self.other, name):
func = getattr(self.other, name)
return lambda *args, **kwargs: self._wrap(func, args, kwargs)
raise AttributeError(name)
def _wrap(self, func, args, kwargs):
if isinstance(func, MethodType):
result = self._handle_temporary_redirects(lambda: func(*args, **kwargs))
else:
result = self._handle_temporary_redirects(lambda: func(self.other, *args, **kwargs))
return result
def _handle_temporary_redirects(self, f):
wait_timeout = time.time() + self.wait_timeout
while wait_timeout > time.time():
try:
return f()
except AzureException as e:
if not str(e).lower().find("temporary redirect") == -1:
time.sleep(5)
pass
else:
raise e
# import module snippets
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
|
the-stack_0_14844 | import os
import keywords as kw
import mechanism_names as mn
import mechanism
from util import ParseUtil, make_readable_list_of_strings
# All parameters and their defaults.
PD = {kw.BEHAVIORS: set(), # set of (restricted) strings , REQ
kw.STIMULUS_ELEMENTS: set(), # set of (restricted) strings , REQ
kw.MECHANISM_NAME: '', # One of the available ones REQ
kw.START_V: 0, # Scalar or list of se->b:val or default:val ,
kw.START_VSS: 0, # Scalar or list of se->se:val or default:val ,
kw.ALPHA_V: 1, # -"- ,
kw.ALPHA_VSS: 1, # Scalar or list of se->se:val or default:val ,
kw.BETA: 1, # -"- ,
kw.MU: 0, # -"- ,
kw.DISCOUNT: 1, # Scalar
kw.TRACE: 0, # Scalar (number between 0 and 1)
kw.U: 0, # Scalar or list of se:val or default:val ,
kw.LAMBDA: 0, # Scalar or list of se:val or default:val ,
kw.START_W: 0, # -"- ,
kw.ALPHA_W: 1, # -"- ,
kw.BEHAVIOR_COST: 0, # Scalar or list of b:val or default:val ,
kw.RESPONSE_REQUIREMENTS: dict(), # List of b:se or b:(se1,se2,...) ,
kw.BIND_TRIALS: 'off', # on or off
kw.N_SUBJECTS: 1, # Positive integer
kw.TITLE: '', # String (,)
kw.SUBPLOTTITLE: '', # String (,)
kw.RUNLABEL: '', # String (restricted), for postrocessing only (,)
kw.SUBJECT: 'average', # average, all or zero-based index
kw.XSCALE: 'all', # all or s1->b1->s2->..., s=se1,se2,...
kw.XSCALE_MATCH: 'subset', # subset or exact
kw.EVAL_PHASES: 'all', # @post: all or list of phase labels ,
kw.CUMULATIVE: 'on', # on or off
kw.MATCH: 'subset', # subset or exact
kw.FILENAME: ''} # valid path REQ
def is_parameter_name(name):
return name in PD
def check_is_parameter_name(name):
if not is_parameter_name(name):
return f"Internal error: Invalid parameter name '{name}'."
return None
class Parameters():
def __init__(self):
# All parameters and their valuess
self.val = dict(PD)
# self.got = dict.fromkeys(PD, False)
def str_append(self, prop, v_str, variables, phases, all_run_labels, to_be_continued):
err = check_is_parameter_name(prop)
if err:
return err
if not self.is_csv(prop):
return f"Internal error: Parameter '{prop}' is not of type list."
return self.str_set(prop, v_str, variables, phases, all_run_labels,
to_be_continued, True)
def str_set(self, prop, v_str, variables, phases, all_run_labels, to_be_continued,
is_appending=False):
"""
Parses the specified value (as a string) of the specified parameter and sets the resulting
value. The input variables is a Variables object.
Returns error message if parsing failed.
"""
err = check_is_parameter_name(prop)
if err:
return err
# all_phase_labels = phases.labels_set()
if prop == kw.BEHAVIORS:
return self._parse_behaviors(v_str, variables, is_appending)
elif prop == kw.STIMULUS_ELEMENTS:
return self._parse_stimulus_elements(v_str, variables, is_appending)
elif prop == kw.MECHANISM_NAME:
return self._parse_mechanism_name(v_str)
elif prop in (kw.START_VSS, kw.ALPHA_VSS):
return self._parse_alphastart_vss(prop, v_str, variables, to_be_continued,
is_appending)
elif prop in (kw.START_W, kw.ALPHA_W, kw.U, kw.LAMBDA):
return self._parse_stimulus_values(prop, v_str, variables, to_be_continued,
is_appending)
elif prop in (kw.BETA, kw.MU, kw.START_V, kw.ALPHA_V):
return self._parse_stimulus_response_values(prop, v_str, variables,
to_be_continued, is_appending)
# Float
elif prop in (kw.DISCOUNT, kw.TRACE):
v, err = ParseUtil.evaluate(v_str, variables)
if err:
return err
if (v < 0) or (v > 1):
return f"Parameter '{prop}' must be a number >=0 and <=1."
self.val[prop] = v
return None
elif prop == kw.BEHAVIOR_COST:
return self._parse_behavior_cost(v_str, variables, to_be_continued, is_appending)
elif prop == kw.RESPONSE_REQUIREMENTS:
return self._parse_response_requirements(v_str, to_be_continued, is_appending)
# 'on' or 'off'
elif prop in (kw.BIND_TRIALS, kw.CUMULATIVE):
v_str_lower = v_str.lower()
if v_str_lower not in ('on', 'off'):
return "Parameter '{}' must be 'on' or 'off'.".format(prop)
self.val[prop] = v_str_lower
return None
# Positive integer
elif prop == kw.N_SUBJECTS:
v, err = ParseUtil.parse_posint(v_str, variables)
if err:
return err
if not v:
return "Parameter {} must be a positive integer.".format(kw.N_SUBJECTS)
self.val[kw.N_SUBJECTS] = v
return None
# Any nonempty (after strip) string
elif prop in (kw.TITLE, kw.SUBPLOTTITLE):
if to_be_continued: # Add the removed comma
v_str = v_str + ","
self.val[prop] = v_str
return None
# 'average', 'all' or 1-based index
elif prop == kw.SUBJECT:
return self._parse_subject(v_str, variables)
# 'all' or s1->b1->s2->..., s=se1,se2,...
elif prop == kw.XSCALE:
return self._parse_xscale(v_str, phases)
# 'subset' or 'exact'
elif prop in (kw.MATCH, kw.XSCALE_MATCH):
if v_str.lower() not in ('subset', 'exact'):
return "Parameter {} must be 'subset' or 'exact'.".format(prop)
self.val[prop] = v_str
return None
# 'all' or cs-list of phase labels
elif prop == kw.PHASES:
return self._parse_phases(v_str) # , all_phase_labels)
# String (@run-labels) (for postprocessing)
elif prop == kw.RUNLABEL:
if v_str not in all_run_labels:
return "Invalid @RUN-label {}".format(v_str)
self.val[kw.RUNLABEL] = v_str
return None
# Valid path to writable file
elif prop == kw.FILENAME:
filename = v_str
try:
file = open(filename, 'w', newline='')
except Exception as ex:
return str(ex)
finally:
file.close()
try:
os.remove(filename)
except FileNotFoundError:
pass
self.val[kw.FILENAME] = filename
return None
def make_mechanism_obj(self):
"""
Returns a Mechanism object (None of error) and error message (None if no error).
GA = 'ga'
SR = 'sr'
ES = 'es'
QL = 'ql'
AC = 'ac'
RW = 'rw'
MECHANISM_NAMES = (GA, SR, ES, QL, AC, RW)
"""
mechanism_name = self.val[kw.MECHANISM_NAME]
if not mechanism_name:
return None, "Parameter 'mechanism' is not specified."
self.scalar_expand()
if mechanism_name in mn.SR:
mechanism_obj = mechanism.StimulusResponse(self)
elif mechanism_name in mn.QL:
mechanism_obj = mechanism.Qlearning(self)
# elif mechanism_name == SARSA:
# mechanism_obj = LsMechanism.SARSA(**self.parameters)
elif mechanism_name in mn.ES:
mechanism_obj = mechanism.EXP_SARSA(self)
elif mechanism_name in mn.AC:
mechanism_obj = mechanism.ActorCritic(self)
elif mechanism_name in mn.GA:
mechanism_obj = mechanism.Enquist(self)
elif mechanism_name in mn.RW:
mechanism_obj = mechanism.OriginalRescorlaWagner(self)
else:
raise Exception(f"Internal error. Unknown mechanism {mechanism_name}.")
return mechanism_obj, None
def _parse_behaviors(self, behaviors_str, variables, is_appending):
"""
Parse the string behaviors_str with comma-separated behavior names and return the
corrsponding set of strings.
Example: "B1, B2,B123" returns {'B1', 'B2', 'B123'}
"""
if not is_appending:
self.val[kw.BEHAVIORS] = set()
behaviors_list = behaviors_str.split(',')
for b in behaviors_list:
b = b.strip()
if len(b) == 0:
return "Found empty behavior name."
if b in self.val[kw.BEHAVIORS]:
return f"The behavior name '{b}' occurs more than once."
if b in self.val[kw.STIMULUS_ELEMENTS]:
return f"The behavior name '{b}' is invalid, since it is a stimulus element."
if variables.contains(b):
return f"The behavior name '{b}' is invalid, since it is a variable name."
if not b.isidentifier():
return f"Behavior name '{b}' is not a valid identifier."
self.val[kw.BEHAVIORS].add(b)
return None # No error
def _parse_stimulus_elements(self, stimulus_elements_str, variables, is_appending):
"""
Parse the string stimulus_elements_str with comma-separated stimulus element names and
return the corrsponding set of strings.
Example: "E1, E2,E123" returns {'E1', 'E2', 'E123'}
"""
if not is_appending:
self.val[kw.STIMULUS_ELEMENTS] = set()
stimulus_elements_list = stimulus_elements_str.split(',')
for e in stimulus_elements_list:
e = e.strip()
if len(e) == 0:
return "Found empty stimulus element name."
if e in self.val[kw.STIMULUS_ELEMENTS]:
return f"The stimulus element name '{e}' occurs more than once."
if e in self.val[kw.BEHAVIORS]:
return f"The stimulus element name '{e}' is invalid, since it is a behavior name."
if variables.contains(e):
return f"The stimulus element name '{e}' is invalid, since it is a variable name."
if not e.isidentifier():
return f"Stimulus element name '{e}' is not a valid identifier."
self.val[kw.STIMULUS_ELEMENTS].add(e)
return None # No error
def _parse_mechanism_name(self, mechanism_name):
"""
Parse the string mechanism_name with a mechanism name and return the corrsponding string.
"""
mn_lower = mechanism_name.lower()
if mn_lower not in mn.MECHANISM_NAMES:
cs_valid_names = ', '.join(sorted(mn.MECHANISM_NAMES))
return "Invalid mechanism name '{}'. ".format(mechanism_name) + \
"Mechanism name must be one of the following: {}.".format(cs_valid_names)
self.val[kw.MECHANISM_NAME] = mn_lower
return None
def _parse_phases(self, v_str):
if v_str == 'all':
self.val[kw.PHASES] = v_str # list(all_phase_labels)
else:
phase_labels = ParseUtil.comma_split_strip(v_str)
for phase_label in phase_labels:
if len(phase_label) == 0:
return "Expected comma-separated list of phase labels, found {}".format(phase_labels)
# else:
# if phase_label not in all_phase_labels:
# return "Undefined phase label '{}'.".format(phase_label)
self.val[kw.PHASES] = phase_labels
return None
def _parse_behavior_cost(self, behavior_cost_str, variables, to_be_continued, is_appending):
if not self.val[kw.BEHAVIORS]:
return f"The parameter 'behaviors' must be assigned before the parameter '{kw.BEHAVIOR_COST}'."
# Create and populate the struct with None values
if not is_appending:
self.val[kw.BEHAVIOR_COST] = dict()
for e in self.val[kw.BEHAVIORS]:
self.val[kw.BEHAVIOR_COST][e] = None
self.val[kw.BEHAVIOR_COST][kw.DEFAULT] = None
single_c, _ = ParseUtil.evaluate(behavior_cost_str, variables)
if single_c is not None:
if is_appending:
return "A single value for '{}' cannot follow other values.".format(kw.BEHAVIOR_COST)
elif to_be_continued:
return "A single value for '{}' cannot be followed by other values.".format(kw.BEHAVIOR_COST)
else:
for key in self.val[kw.BEHAVIOR_COST]:
self.val[kw.BEHAVIOR_COST][key] = single_c
self.val[kw.BEHAVIOR_COST].pop(kw.DEFAULT)
else:
cs = ParseUtil.comma_split(behavior_cost_str)
cs = [x.strip() for x in cs]
for bc_str in cs: # bc_str is 'e:value' or 'default:value'
if bc_str.count(':') != 1:
return "Expected 'element:value' or 'default:value' in '{}', got '{}'.".format(kw.BEHAVIOR_COST, bc_str)
b, c_str = bc_str.split(':')
b = b.strip()
c_str = c_str.strip()
c, err = ParseUtil.evaluate(c_str, variables)
if err:
return f"Invalid value '{c_str}' for '{b}' in parameter '{kw.BEHAVIOR_COST}'."
if b == kw.DEFAULT:
if self.val[kw.BEHAVIOR_COST][kw.DEFAULT] is not None:
return "Default value for '{}' can only be stated once.".format(kw.BEHAVIOR_COST)
elif b not in self.val[kw.BEHAVIORS]:
return f"Error in parameter '{kw.BEHAVIOR_COST}': '{b}' is an invalid behavior name."
if self.val[kw.BEHAVIOR_COST][b] is not None:
return "Duplicate of {} in '{}'.".format(b, kw.BEHAVIOR_COST)
self.val[kw.BEHAVIOR_COST][b] = c
if not to_be_continued:
# Set the default value for non-set behaviors
err = self._set_default_values(kw.BEHAVIOR_COST)
if err:
return err
return None # No error
def _parse_stimulus_response_values(self, NAME, sr_str, variables, to_be_continued,
is_appending):
"""
Parse the string sr_str with a value for stimulus-response pairs.
Example: "S1->R1: 1.23, S2->R1:3.45, default:1" sets the parameter to
{('S1','R1'):1.23, ('S1','R2'):1, ('S2','R1'):3.45, ('S2','R2'):1}
under the assumption that
behaviors = {'R1', 'R2'} and
stimulus_elements = {'S1', 'S2'}
"""
if not self.val[kw.STIMULUS_ELEMENTS]:
return f"The parameter 'stimulus_elements' must be assigned before the parameter '{NAME}'."
if not self.val[kw.BEHAVIORS]:
return f"The parameter 'behaviors' must be assigned before the parameter '{NAME}'."
# Create and populate the struct with None values
if not is_appending:
self.val[NAME] = dict()
for e in self.val[kw.STIMULUS_ELEMENTS]:
for b in self.val[kw.BEHAVIORS]:
self.val[NAME][(e, b)] = None
self.val[NAME][kw.DEFAULT] = None
single_v, _ = ParseUtil.evaluate(sr_str, variables)
if single_v is not None:
if is_appending:
return f"A single value for '{NAME}' cannot follow other values."
elif to_be_continued:
return f"A single value for '{NAME}' cannot be followed by other values."
else:
for key in self.val[NAME]:
self.val[NAME][key] = single_v
self.val[NAME].pop(kw.DEFAULT)
else:
vs = ParseUtil.comma_split(sr_str)
vs = [x.strip() for x in vs]
for eb_v_str in vs: # eb_v_str is 'e->b:value' or 'default:value'
if eb_v_str.count(':') != 1:
return f"Expected 'x->y:value' or 'default:value' in '{NAME}', got '{eb_v_str}'."
eb, v_str = eb_v_str.split(':')
eb = eb.strip()
v_str = v_str.strip()
v, err = ParseUtil.evaluate(v_str, variables)
if err:
return f"Invalid value '{v_str}' for '{eb}' in parameter '{NAME}'."
if eb == kw.DEFAULT:
if self.val[NAME][kw.DEFAULT] is not None:
return f"Default value for '{NAME}' can only be stated once."
self.val[NAME][kw.DEFAULT] = v
elif eb.count('->') == 1:
e, b = eb.split('->')
if e not in self.val[kw.STIMULUS_ELEMENTS]:
return f"Error in parameter '{NAME}': '{e}' is an invalid stimulus element."
if b not in self.val[kw.BEHAVIORS]:
return f"Error in parameter '{NAME}': '{b}' is an invalid behavior name."
if self.val[NAME][(e, b)] is not None:
return f"Duplicate of {e}->{b} in '{NAME}'."
self.val[NAME][(e, b)] = v
else:
return f"Invalid string '{eb}' in parameter '{NAME}'."
if not to_be_continued:
# Set the default value for non-set stimulus-behavior pairs
err = self._set_default_values(NAME)
if err:
return err
return None # No error
def _parse_alphastart_vss(self, NAME, vss_str, variables, to_be_continued,
is_appending):
"""
Parse the string vss_str with a start_vss/alpha_vss specification.
Example: "S1->S2: 1.23, S2->S1:3.45, default:1" sets the parameter to
{('S1','S2'):1.23, ('S2','S1'):3.45, ('S1','S1'):1, ('S2','S2'):1}
under the assumption that stimulus_elements = {'S1', 'S2'}
"""
if not self.val[kw.STIMULUS_ELEMENTS]:
return f"The parameter 'stimulus_elements' must be assigned before the parameter '{NAME}'."
# Create and populate the struct with None values
if not is_appending:
self.val[NAME] = dict()
for e1 in self.val[kw.STIMULUS_ELEMENTS]:
for e2 in self.val[kw.STIMULUS_ELEMENTS]:
self.val[NAME][(e1, e2)] = None
self.val[NAME][kw.DEFAULT] = None
single_vss, _ = ParseUtil.evaluate(vss_str, variables)
if single_vss is not None:
if is_appending:
return f"A single value for '{NAME}' cannot follow other values."
elif to_be_continued:
return f"A single value for '{NAME}' cannot be followed by other values."
else:
for key in self.val[NAME]:
self.val[NAME][key] = single_vss
self.val[NAME].pop(kw.DEFAULT)
else:
vs = ParseUtil.comma_split(vss_str)
vs = [x.strip() for x in vs]
for ee_str in vs: # eb_v_str is 'e1->e2:value' or 'default:value'
if ee_str.count(':') != 1:
return f"Expected 'x->y:value' or 'default:value' in '{NAME}', got '{ee_str}'."
ee, v_str = ee_str.split(':')
ee = ee.strip()
v_str = v_str.strip()
v, err = ParseUtil.evaluate(v_str, variables)
if err:
return f"Invalid value '{v_str}' for '{ee}' in parameter '{NAME}'."
if ee == kw.DEFAULT:
if self.val[NAME][kw.DEFAULT] is not None:
return f"Default value for '{NAME}' can only be stated once."
self.val[NAME][kw.DEFAULT] = v
elif ee.count('->') == 1:
e1, e2 = ee.split('->')
if e1 not in self.val[kw.STIMULUS_ELEMENTS]:
return f"Error in parameter '{NAME}': '{e1}' is an invalid stimulus element."
if e2 not in self.val[kw.STIMULUS_ELEMENTS]:
return f"Error in parameter '{NAME}': '{e2}' is an invalid stimulus element."
if self.val[NAME][(e1, e2)] is not None:
return f"Duplicate of {e1}->{e2} in '{NAME}'."
self.val[NAME][(e1, e2)] = v
else:
return f"Invalid string '{ee}' in parameter '{NAME}'."
if not to_be_continued:
# Set the default value for non-set stimulus-stimulus pairs
err = self._set_default_values(NAME)
if err:
return err
return None # No error
def _parse_response_requirements(self, v_str, to_be_continued, is_appending):
if not self.val[kw.STIMULUS_ELEMENTS]:
return f"The parameter 'stimulus_elements' must be assigned before the parameter '{kw.RESPONSE_REQUIREMENTS}'."
if not self.val[kw.BEHAVIORS]:
return f"The parameter 'behaviors' must be assigned before the parameter '{kw.RESPONSE_REQUIREMENTS}'."
if not is_appending:
self.val[kw.RESPONSE_REQUIREMENTS] = dict()
for b in self.val[kw.BEHAVIORS]:
self.val[kw.RESPONSE_REQUIREMENTS][b] = None
rrs = ParseUtil.comma_split_sq(v_str)
for rr in rrs:
if rr.count(':') != 1:
return "Expected 'behavior:stimulus_element', got '{}'.".format(rr)
b, s = rr.split(':')
b = b.strip()
s = s.strip()
if len(b) == 0 or len(s) == 0:
return "Expected 'behavior:stimulus_element', got '{}'.".format(rr)
if b not in self.val[kw.BEHAVIORS]:
return "Unknown behavior name '{}'.".format(b)
if self.val[kw.RESPONSE_REQUIREMENTS][b] is not None:
return "Duplication of behavior '{}' in {}.".format(b, kw.RESPONSE_REQUIREMENTS)
if '[' in s or ']' in s:
if s.count('[') != 1 or s.count(']') != 1 or s[0] != '[' or s[-1] != ']':
return "Malformed expression '{}'.".format(s)
s = s[1:-1] # Strip the '['and the ']'
es = s.split(',')
for e in es:
e = e.strip()
if e not in self.val[kw.STIMULUS_ELEMENTS]:
return "Unknown stimulus element '{}'.".format(e)
self._response_requirements_add_element(b, e)
else:
if s not in self.val[kw.STIMULUS_ELEMENTS]:
return "Unknown stimulus element '{}'.".format(s)
self._response_requirements_add_element(b, s)
if not to_be_continued:
# For the unrestricted behaviors, add all stimulus elements
for b in self.val[kw.RESPONSE_REQUIREMENTS]:
if self.val[kw.RESPONSE_REQUIREMENTS][b] is None:
self.val[kw.RESPONSE_REQUIREMENTS][b] = set(self.val[kw.STIMULUS_ELEMENTS])
# Check that each stimulus element has at least one feasible response
stimulus_elements_in_rr = []
for stimulus_list in self.val[kw.RESPONSE_REQUIREMENTS].values():
stimulus_elements_in_rr.extend(stimulus_list)
if set(stimulus_elements_in_rr) != set(self.val[kw.STIMULUS_ELEMENTS]):
elements_without_response = set(self.val[kw.STIMULUS_ELEMENTS]) - set(stimulus_elements_in_rr)
elements_without_response = list(elements_without_response)
elements_without_response.sort() # To make error message testable
elements_without_response_str = make_readable_list_of_strings(elements_without_response)
err = f"Invalid {kw.RESPONSE_REQUIREMENTS}: "
if len(elements_without_response) == 1:
return err + f"Stimulus element {elements_without_response_str} has no possible responses."
else:
return err + f"Stimulus elements {elements_without_response_str} have no possible responses."
return None # No error
def _response_requirements_add_element(self, b, e):
if self.val[kw.RESPONSE_REQUIREMENTS][b] is None:
self.val[kw.RESPONSE_REQUIREMENTS][b] = {e}
else:
self.val[kw.RESPONSE_REQUIREMENTS][b].add(e)
def _parse_stimulus_values(self, NAME, stimulus_values, variables, to_be_continued,
is_appending):
if not self.val[kw.STIMULUS_ELEMENTS]:
return f"The parameter 'stimulus_elements' must be assigned before the parameter '{NAME}'."
# Create and populate the struct with None values
if not is_appending:
self.val[NAME] = dict()
for e in self.val[kw.STIMULUS_ELEMENTS]:
self.val[NAME][e] = None
self.val[NAME][kw.DEFAULT] = None
single_w, _ = ParseUtil.evaluate(stimulus_values, variables)
if single_w is not None:
if is_appending:
return "A single value for '{}' cannot follow other values.".format(NAME)
elif to_be_continued:
return "A single value for '{}' cannot be followed by other values.".format(NAME)
else:
for key in self.val[NAME]:
self.val[NAME][key] = single_w
self.val[NAME].pop(kw.DEFAULT)
else:
ws = ParseUtil.comma_split(stimulus_values)
ws = [x.strip() for x in ws]
for e_w_str in ws: # eb_w_str is 'e:value' or 'default:value'
if e_w_str.count(':') != 1:
return "Expected 'element:value' or 'default:value' in '{}', got '{}'.".format(NAME, e_w_str)
e, w_str = e_w_str.split(':')
e = e.strip()
w_str = w_str.strip()
w, err = ParseUtil.evaluate(w_str, variables)
if err:
return "Invalid value '{}' for '{}' in parameter '{}'.".format(w_str, e, NAME)
if e == kw.DEFAULT:
if self.val[NAME][kw.DEFAULT] is not None:
return "Default value for '{}' can only be stated once.".format(NAME)
elif e not in self.val[kw.STIMULUS_ELEMENTS]:
return f"Error in parameter '{NAME}': '{e}' is an invalid stimulus element."
if self.val[NAME][e] is not None:
return "Duplicate of {} in '{}'.".format(e, NAME)
self.val[NAME][e] = w
if not to_be_continued:
# Set the default value for non-set stimulus elements
err = self._set_default_values(NAME)
if err:
return err
return None # No error
def _parse_subject(self, v_str, variables):
err = f"Parameter {kw.SUBJECT} must be 'average', 'all', or a positive integer."
if v_str.lower() in ('average', 'all'):
self.val[kw.SUBJECT] = v_str.lower()
else:
v, interr = ParseUtil.parse_posint(v_str, variables)
if interr: # Parsing error
return err + " " + interr
if v is None: # Parsing worked, but negative integer
return err
self.val[kw.SUBJECT] = v - 1 # Zero-based index internally
return None
def _parse_xscale(self, xscale, phases):
if not self.val[kw.STIMULUS_ELEMENTS]:
return f"The parameter 'stimulus_elements' must be assigned before the parameter '{kw.XSCALE}'."
if not self.val[kw.BEHAVIORS] and self.val[kw.MECHANISM_NAME] not in mn.RW:
return f"The parameter 'behaviors' must be assigned before the parameter '{kw.XSCALE}'."
if phases.is_phase_label(xscale):
pass
elif xscale == 'all':
pass
else:
xscale, err = ParseUtil.parse_chain(xscale, self.val[kw.STIMULUS_ELEMENTS],
self.val[kw.BEHAVIORS])
if err:
return err
self.val[kw.XSCALE] = xscale
return None
def _set_default_values(self, NAME):
default_needed = False
for key in self.val[NAME]:
if key is not kw.DEFAULT and self.val[NAME][key] is None:
default_needed = True
break
if default_needed and self.val[NAME][kw.DEFAULT] is None:
return f"Missing default value for parameter '{NAME}'."
else:
for key in self.val[NAME]:
if self.val[NAME][key] is None:
self.val[NAME][key] = self.val[NAME][kw.DEFAULT]
self.val[NAME].pop(kw.DEFAULT)
def get(self, prop):
return self.val[prop]
def may_end_with_comma(self, prop):
return self.is_csv(prop) or prop in (kw.TITLE, kw.SUBPLOTTITLE, kw.RUNLABEL)
def is_csv(self, prop):
return prop in (kw.BEHAVIORS, kw.STIMULUS_ELEMENTS, kw.BETA, kw.MU, kw.LAMBDA, kw.START_V,
kw.START_VSS, kw.START_W, kw.ALPHA_V, kw.ALPHA_VSS, kw.ALPHA_W,
kw.BEHAVIOR_COST, kw.U, kw.RESPONSE_REQUIREMENTS, kw.PHASES)
def scalar_expand(self):
"""
Expand dict-parameters that are defined by scalar. If defined as dict, check that keys are
compatible with stimulus elements and behaviors.
"""
behaviors = self.val[kw.BEHAVIORS]
stimulus_elements = self.val[kw.STIMULUS_ELEMENTS]
# Check START_VSS and ALPHA_VSS
expected_ss_keys = set()
for stimulus_element1 in stimulus_elements:
for stimulus_element2 in stimulus_elements:
key = (stimulus_element1, stimulus_element2)
expected_ss_keys.add(key)
for param_name in [kw.START_VSS, kw.ALPHA_VSS]:
start_vss = self.val[param_name]
if type(start_vss) is dict:
if set(start_vss.keys()) != expected_ss_keys:
self._raise_match_err(param_name, kw.STIMULUS_ELEMENTS)
else: # scalar expand
self.val[param_name] = dict()
scalar = start_vss
for stimulus_element1 in stimulus_elements:
for stimulus_element2 in stimulus_elements:
key = (stimulus_element1, stimulus_element2)
self.val[param_name][key] = scalar
expected_sb_keys = set()
for stimulus_element in stimulus_elements:
for behavior in behaviors:
key = (stimulus_element, behavior)
expected_sb_keys.add(key)
# Check START_V
self._scalar_expand_element_behavior(kw.START_V, stimulus_elements, behaviors,
expected_sb_keys)
# Check ALPHA_V
self._scalar_expand_element_behavior(kw.ALPHA_V, stimulus_elements, behaviors,
expected_sb_keys)
# Check BETA
self._scalar_expand_element_behavior(kw.BETA, stimulus_elements, behaviors,
expected_sb_keys)
# Check MU
self._scalar_expand_element_behavior(kw.MU, stimulus_elements, behaviors,
expected_sb_keys)
expected_s_keys = set()
for stimulus_element in stimulus_elements:
expected_s_keys.add(stimulus_element)
# Check U
self._scalar_expand_element(kw.U, stimulus_elements, expected_s_keys)
# Check START_W
self._scalar_expand_element(kw.START_W, stimulus_elements, expected_s_keys)
# Check ALPHA_W
self._scalar_expand_element(kw.ALPHA_W, stimulus_elements, expected_s_keys)
# Check LAMBDA
self._scalar_expand_element(kw.LAMBDA, stimulus_elements, expected_s_keys)
# Check BEHAVIOR_COST
expected_b_keys = set()
for behavior in behaviors:
expected_b_keys.add(behavior)
behavior_cost = self.val[kw.BEHAVIOR_COST]
if type(behavior_cost) is dict:
if set(behavior_cost.keys()) != expected_b_keys:
self._raise_match_err(kw.BEHAVIOR_COST, kw.BEHAVIORS)
else: # scalar expand
self.val[kw.BEHAVIOR_COST] = dict()
scalar = behavior_cost
for behavior in behaviors:
self.val[kw.BEHAVIOR_COST][behavior] = scalar
def _scalar_expand_element_behavior(self, param_name, stimulus_elements, behaviors,
expected_sb_keys):
val = self.val[param_name]
if type(val) is dict:
if set(val.keys()) != expected_sb_keys:
self._raise_match_err(param_name, kw.STIMULUS_ELEMENTS, kw.BEHAVIORS)
else: # scalar expand
self.val[param_name] = dict()
scalar = val
for stimulus_element in stimulus_elements:
for behavior in behaviors:
key = (stimulus_element, behavior)
self.val[param_name][key] = scalar
def _scalar_expand_element(self, param_name, stimulus_elements, expected_s_keys):
val = self.val[param_name]
if type(val) is dict:
if set(val.keys()) != expected_s_keys:
self._raise_match_err(param_name, kw.STIMULUS_ELEMENTS)
else: # scalar expand
self.val[param_name] = dict()
scalar = val
for stimulus_element in stimulus_elements:
self.val[param_name][stimulus_element] = scalar
@staticmethod
def _raise_match_err(param1, param2, param3=None):
if param3:
err = f"The parameter '{param1}' does not match '{param2}' and '{param3}'."
else:
err = f"The parameter '{param1}' does not match '{param2}'."
raise Exception(err)
|
the-stack_0_14845 | import socket
import threading
import codecs
from scapy.all import *
contentTable = ['porn', 'guns', 'torrent', 'skype']
firstIface = 'eth0'
firstIfaceFlows = ['52:54:00:42:84:65']
secondIface = 'eth1'
secondIfaceFlows = ['52:54:00:a1:54:c0']
def inOutServer():
global contentTable
global firstIface
global secondIface
inSocket = socket.socket(socket.AF_PACKET, socket.SOCK_RAW, socket.htons(3))
inSocket.bind((firstIface, 0))
outSocket = socket.socket(socket.AF_PACKET, socket.SOCK_RAW)
outSocket.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, 1514)
outSocket.bind((secondIface, 0))
for index in range(len(contentTable)):
contentTable[index] = contentTable[index].encode()
while True:
pkt = inSocket.recvfrom(1514)
try:
et = Ether(bytes(pkt[0]))
if not et.src in firstIfaceFlows:
continue
except:
continue
if TCP in et and Raw in et:
if et[IP][TCP].dport == 80:
data = et[Raw].load
for content in contentTable:
if not content in data:
continue
index = data.find(content)
et[Raw].load = data[:index] + b'*' + data[index+len(content):]
del et[TCP].chksum
del et[IP].ihl
del et[IP].len
del et[IP].chksum
et.show2(dump=True)
break
outSocket.send(bytes(et))
def outInServer():
global firstIface
global secondIface
inSocket = socket.socket(socket.AF_PACKET, socket.SOCK_RAW, socket.htons(3))
inSocket.bind((secondIface, 0))
outSocket = socket.socket(socket.AF_PACKET, socket.SOCK_RAW)
outSocket.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, 1514)
outSocket.bind((firstIface, 0))
while True:
pkt = inSocket.recvfrom(1514)
try:
et = Ether(bytes(pkt[0]))
if not et.src in secondIfaceFlows:
continue
except:
continue
outSocket.send(bytes(et))
inOut = threading.Thread(target=inOutServer,args=())
outIn = threading.Thread(target=outInServer,args=())
outIn.start()
inOut.start()
inOut.join()
|
the-stack_0_14848 | # coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
import re
from pants.util.contextutil import temporary_dir
from pants_test.pants_run_integration_test import PantsRunIntegrationTest
class IvyResolveIntegrationTest(PantsRunIntegrationTest):
def test_ivy_resolve_gives_correct_exception_on_cycles(self):
with temporary_dir(root_dir=self.workdir_root()) as workdir:
pants_run = self.run_pants_with_workdir([
'compile', 'testprojects/src/java/org/pantsbuild/testproject/cycle1'], workdir)
self.assert_failure(pants_run)
self.assertIn('Cycle detected', pants_run.stderr_data)
def test_java_compile_with_ivy_report(self):
# Ensure the ivy report file gets generated
with temporary_dir(root_dir=self.workdir_root()) as workdir:
ivy_report_dir = '{workdir}/ivy-report'.format(workdir=workdir)
pants_run = self.run_pants_with_workdir([
'compile',
'testprojects/src/java/org/pantsbuild/testproject/unicode/main',
'--resolve-ivy-report',
'--resolve-ivy-outdir={reportdir}'.format(reportdir=ivy_report_dir)],
workdir)
self.assert_success(pants_run)
# Find the ivy report
found = False
pattern = re.compile('internal-[a-f0-9]+-default\.html$')
for f in os.listdir(ivy_report_dir):
if os.path.isfile(os.path.join(ivy_report_dir, f)):
if pattern.match(f):
found = True
break
self.assertTrue(found,
msg="Couldn't find ivy report in {report_dir}"
.format(report_dir=ivy_report_dir))
def test_ivy_args(self):
pants_run = self.run_pants([
'resolve',
'--resolve-ivy-args=-blablabla',
'examples/src/scala::'
])
self.assert_failure(pants_run)
self.assertIn('Unrecognized option: -blablabla', pants_run.stdout_data)
def test_ivy_confs_success(self):
pants_run = self.run_pants([
'resolve',
'--resolve-ivy-confs=default',
'--resolve-ivy-confs=sources',
'--resolve-ivy-confs=javadoc',
'3rdparty:junit'
])
self.assert_success(pants_run)
def test_ivy_confs_failure(self):
pants_run = self.run_pants([
'resolve',
'--resolve-ivy-confs=parampampam',
'3rdparty:junit'
])
self.assert_failure(pants_run)
def test_ivy_confs_ini_failure(self):
pants_ini_config = {'resolve.ivy': {'confs': 'parampampam'}}
pants_run = self.run_pants([
'resolve',
'3rdparty:junit'
], config=pants_ini_config)
self.assert_failure(pants_run)
|
the-stack_0_14850 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# SPDX-License-Identifier: Apache-2.0
# Copyright 2021 RT Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import rospy
from geometry_msgs.msg import Twist
from thouzer_msgs.msg import Vel
from std_srvs.srv import Trigger, TriggerResponse
class CmdVelTwist(object):
def __init__(self):
rospy.loginfo("cmd_vel remapper start")
self._twist_sub = rospy.Subscriber('/cmd_vel', Twist, self.joy_callback, queue_size=1)
self._vel_pub = rospy.Publisher('/thouzer/vel', Vel, queue_size=1)
def joy_callback(self, msg):
vel = Vel()
vel.v_mps = msg.linear.x
vel.w_degps = math.degrees(msg.angular.z)
print(vel)
self._vel_pub.publish(vel)
if __name__ == '__main__':
rospy.wait_for_service('/motor_on')
rospy.wait_for_service('/motor_off')
rospy.on_shutdown(rospy.ServiceProxy('/motor_off', Trigger).call)
rospy.ServiceProxy('/motor_on', Trigger).call()
rospy.init_node('thouzer_cmd_vel')
logicool_cmd_vel = CmdVelTwist()
rospy.spin()
|
the-stack_0_14856 | # qubit number=3
# total number=12
import numpy as np
from qiskit import QuantumCircuit, execute, Aer, QuantumRegister, ClassicalRegister, transpile, BasicAer, IBMQ
import networkx as nx
from qiskit.visualization import plot_histogram
from typing import *
from pprint import pprint
from math import log2
from collections import Counter
from qiskit.test.mock import FakeVigo, FakeYorktown
kernel = 'circuit/bernstein'
def make_circuit(n:int) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
prog = QuantumCircuit(input_qubit)
prog.h(input_qubit[0]) # number=1
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=3
prog.h(input_qubit[3]) # number=4
for edge in E:
k = edge[0]
l = edge[1]
prog.cp(-2 * gamma, input_qubit[k-1], input_qubit[l-1])
prog.p(gamma, k)
prog.p(gamma, l)
prog.rx(2 * beta, range(len(V)))
prog.cx(input_qubit[0],input_qubit[2]) # number=9
prog.x(input_qubit[2]) # number=10
prog.cx(input_qubit[0],input_qubit[2]) # number=11
prog.x(input_qubit[2]) # number=6
prog.cx(input_qubit[1],input_qubit[0]) # number=7
prog.cx(input_qubit[1],input_qubit[0]) # number=8
# circuit end
return prog
if __name__ == '__main__':
n = 4
V = np.arange(0, n, 1)
E = [(0, 1, 1.0), (0, 2, 1.0), (1, 2, 1.0), (3, 2, 1.0), (3, 1, 1.0)]
G = nx.Graph()
G.add_nodes_from(V)
G.add_weighted_edges_from(E)
step_size = 0.1
a_gamma = np.arange(0, np.pi, step_size)
a_beta = np.arange(0, np.pi, step_size)
a_gamma, a_beta = np.meshgrid(a_gamma, a_beta)
F1 = 3 - (np.sin(2 * a_beta) ** 2 * np.sin(2 * a_gamma) ** 2 - 0.5 * np.sin(4 * a_beta) * np.sin(4 * a_gamma)) * (
1 + np.cos(4 * a_gamma) ** 2)
result = np.where(F1 == np.amax(F1))
a = list(zip(result[0], result[1]))[0]
gamma = a[0] * step_size
beta = a[1] * step_size
prog = make_circuit(4)
sample_shot =3962
writefile = open("../data/startQiskit_QC87.csv", "w")
# prog.draw('mpl', filename=(kernel + '.png'))
IBMQ.load_account()
provider = IBMQ.get_provider(hub='ibm-q')
provider.backends()
backend = provider.get_backend("ibmq_5_yorktown")
circuit1 = transpile(prog, FakeYorktown())
circuit1.measure_all()
prog = circuit1
info = execute(prog,backend=backend, shots=sample_shot).result().get_counts()
print(info, file=writefile)
print("results end", file=writefile)
print(circuit1.depth(), file=writefile)
print(circuit1, file=writefile)
writefile.close()
|
the-stack_0_14858 | # Copyright 2019 The TensorTrade Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License
import tensortrade.orders.create as create
from typing import Union, List, Tuple
from itertools import product
from gym.spaces import Discrete
from tensortrade.actions import ActionScheme
from tensortrade.orders import TradeSide, TradeType, Order, OrderListener, risk_managed_order
class ManagedRiskOrders(ActionScheme):
"""A discrete action scheme that determines actions based on managing risk,
through setting a follow-up stop loss and take profit on every order.
"""
def __init__(self,
stop_loss_percentages: Union[List[float], float] = [0.02, 0.04, 0.06],
take_profit_percentages: Union[List[float], float] = [0.01, 0.02, 0.03],
trade_sizes: Union[List[float], int] = 10,
trade_type: TradeType = TradeType.MARKET,
ttl_in_seconds: int = None,
ttl_in_steps: int = None,
order_listener: OrderListener = None):
"""
Arguments:
pairs: A list of trading pairs to select from when submitting an order.
(e.g. TradingPair(BTC, USD), TradingPair(ETH, BTC), etc.)
stop_loss_percentages: A list of possible stop loss percentages for each order.
take_profit_percentages: A list of possible take profit percentages for each order.
trade_sizes: A list of trade sizes to select from when submitting an order.
(e.g. '[1, 1/3]' = 100% or 33% of balance is tradable. '4' = 25%, 50%, 75%, or 100% of balance is tradable.)
order_listener (optional): An optional listener for order events executed by this action scheme.
"""
self.stop_loss_percentages = self.default('stop_loss_percentages', stop_loss_percentages)
self.take_profit_percentages = self.default(
'take_profit_percentages', take_profit_percentages)
self.trade_sizes = self.default('trade_sizes', trade_sizes)
self.trade_type = self.default('trade_type', trade_type)
self.ttl_in_seconds = self.default('ttl_in_seconds', ttl_in_seconds)
self.ttl_in_steps = self.default('ttl_in_steps', ttl_in_steps)
self._order_listener = self.default('order_listener', order_listener)
generator = product(self.stop_loss_percentages,
self.take_profit_percentages,
self.trade_sizes,
[TradeSide.BUY, TradeSide.SELL])
self.actions = list(generator)
@property
def action_space(self) -> Discrete:
"""The discrete action space produced by the action scheme."""
return Discrete(len(self.actions))
@property
def stop_loss_percentages(self) -> List[float]:
"""A list of order percentage losses to select a stop loss from when submitting an order.
(e.g. 0.01 = sell if price drops 1%, 0.15 = 15%, etc.)
"""
return self._stop_loss_percentages
@stop_loss_percentages.setter
def stop_loss_percentages(self, stop_loss_percentages: Union[List[float], float]):
self._stop_loss_percentages = stop_loss_percentages if isinstance(
stop_loss_percentages, list) else [stop_loss_percentages]
@property
def take_profit_percentages(self) -> List[float]:
"""A list of order percentage gains to select a take profit from when submitting an order.
(e.g. 0.01 = sell if price rises 1%, 0.15 = 15%, etc.)
"""
return self._take_profit_percentages
@take_profit_percentages.setter
def take_profit_percentages(self, take_profit_percentages: Union[List[float], float]):
self._take_profit_percentages = take_profit_percentages if isinstance(
take_profit_percentages, list) else [take_profit_percentages]
@property
def trade_sizes(self) -> List[float]:
"""A list of trade sizes to select from when submitting an order.
(e.g. '[1, 1/3]' = 100% or 33% of balance is tradable. '4' = 25%, 50%, 75%, or 100% of balance is tradable.)
"""
return self._trade_sizes
@trade_sizes.setter
def trade_sizes(self, trade_sizes: Union[List[float], int]):
self._trade_sizes = trade_sizes if isinstance(trade_sizes, list) else [
(x + 1) / trade_sizes for x in range(trade_sizes)]
def get_order(self, action: int, portfolio: 'Portfolio') -> Order:
if action == 0:
return None
((exchange, pair), (stop_loss, take_profit, size, side)) = self.actions[action]
price = exchange.quote_price(pair)
wallet_instrument = pair.base if side == TradeSide.BUY else pair.quote
wallet = portfolio.get_wallet(exchange.id, instrument=wallet_instrument)
size = (wallet.balance.size * size)
size = min(wallet.balance.size, size)
if size < 10 ** -pair.base.precision:
return None
params = {
'step': exchange.clock.step,
'side': side,
'pair': pair,
'price': price,
'size': size,
'down_percent': stop_loss,
'up_percent': take_profit,
'portfolio': portfolio,
'trade_type': self.trade_type,
'ttl_in_seconds': self.ttl_in_seconds,
'ttl_in_steps': self.ttl_in_steps,
}
order = risk_managed_order(**params)
if self._order_listener is not None:
order.attach(self._order_listener)
def reset(self):
pass
|
the-stack_0_14860 | import _plotly_utils.basevalidators
class HighlightcolorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(
self, plotly_name="highlightcolor", parent_name="surface.contours.y", **kwargs
):
super(HighlightcolorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "style"),
**kwargs
)
|
the-stack_0_14866 | from pathlib import Path
import requests
from lxml import etree
headers = {
'user-agent': "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.132 Safari/537.36"
}
meta_url = "https://pypi.org/"
search_url = "https://pypi.org/search/?q"
def init_save_path(save_path="."):
""" 初始化下载环境 """
source_path = Path(save_path)
save_path = source_path / 'whls'
if not save_path.exists():
save_path.mkdir()
return save_path
def load_whl_info(packages_path: str):
with open(packages_path, "r") as fr:
whl_info = fr.read()
return whl_info
def init_download_packages(whl_info: str):
""" 处理输入 """
need_packages = []
package_info = [i.strip() for i in whl_info.split("\n") if i.strip()]
whl_name = ""
version = ""
for i in package_info:
whl_name = i
if "==" in i:
whl_name, version = i.split("==")
need_packages.append((whl_name, version))
return need_packages
def pypi_spider(save_path, need_packages: list, error_package: list = []):
""" pypi镜像包爬虫
need_packages: 需要下载的包
error_package: 下载中出错的包
"""
for idx, package_info in enumerate(need_packages, 1):
search_content = package_info[0]
version = package_info[1]
print('需要下载的包信息', package_info)
response = requests.get(
f'{search_url}={search_content}', headers=headers)
html_str = response.content.decode('utf-8')
html = etree.HTML(html_str)
search_results = html.xpath(
'//*[@id="content"]/div/div/div[2]/form/div[3]/ul/*')
result_url = ''
for result in search_results:
result_href = result.xpath('./a/@href')[0]
result_name = result.xpath('./a/h3/span[1]')[0].text
result_version = result.xpath('./a/h3/span[2]')[0].text
if result_name == search_content:
result_url = f'{meta_url}{result_href}#files'
break
elif result_name == search_content.capitalize() and len(result_name) == len(search_content):
result_url = f'{meta_url}{result_href}#files'
break
elif '-' in search_content and search_content.replace('-', '_') == result_name and len(result_name) == len(search_content):
result_url = f'{meta_url}{result_href}#files'
break
if version:
result_url = f'{meta_url}{result_href}{version}/#files'
print(f'开始准备下载 {result_name} {version}')
else:
print(f'开始准备下载 {result_name} {result_version}')
if not result_url:
error_package.append(search_content)
continue
# get download url
response = requests.get(result_url, headers=headers)
result_html_str = response.content.decode('utf-8')
result_html = etree.HTML(result_html_str)
result_download_nodes = result_html.xpath(
'//*[@id="files"]/table/tbody/tr')
win32_info = None # 相同版本的win32包
for result_download in result_download_nodes:
file_type = result_download.xpath(
'./td[1]/text()')[1].replace(" ", '').replace('\n', '')
download_version = result_download.xpath(
'./td[2]/text()')[1].replace(" ", '').replace('\n', '')
download_href = result_download.xpath('./th/a/@href')[0]
whl_name = result_download.xpath(
'./th/a/text()')[0].replace(" ", '').replace('\n', '')
whl_size = result_download.xpath(
'./th/text()')[2].replace(" ", '').replace('\n', '')
# 下载版本判断
if download_version == 'cp37' and 'win32' in whl_name:
win32_info = (whl_name, download_href)
if download_version == 'py2.py3' and 'py2.py3-none-any' in whl_name: # 准确下载python3的版本
break
elif download_version == 'cp37' and 'win_amd64' in whl_name: # 准确下载python3.7 win64的版本
# 查看是否有win32的包,并且下载
if win32_info:
print(f'{search_content}的win32版本下载链接', win32_info)
file_name = save_path / win32_info[0]
file_content = requests.get(win32_info[1], headers=headers)
with open(file_name.absolute(), 'wb') as f:
f.write(file_content.content)
break
elif 'py3' in download_version or download_version == 'None': # 下载通用版本
break
# 下载
file_name = save_path / whl_name
file_content = requests.get(download_href, headers=headers)
with open(file_name.absolute(), 'wb') as f:
f.write(file_content.content)
print(f'{search_content}{whl_size} 版本{download_version} 类型{file_type} -- 下载成功')
if len(need_packages) == idx:
print('此次任务结束')
if error_package:
print('此次任务下载失败的包如下:')
for idx, error_ in enumerate(error_package, 1):
print(f'{idx}: {error_}')
return error_package
def show_help():
print("choose which source you need to download")
url_info = """
+++++++++COMMANDS++++++++++
1:\t\tpypi.org
2:\t\tdouban
3:\t\taliyun
+++++++++++++++++++++++++++
"""
print(url_info)
def main_loop():
packages_path = input(">>> input packages path: ").strip()
if not packages_path:
print("not found")
return
whl_info = load_whl_info(packages_path)
need_packages = init_download_packages(whl_info)
input_path = input(">>> input save path: ").strip()
input_path = "." if not input_path else input_path
save_path = init_save_path(input_path)
show_help()
choose = input(">>> ")
if choose == "1":
pypi_spider(save_path, need_packages)
if __name__ == "__main__":
main_loop()
|
the-stack_0_14867 | from seleniumbase import BaseCase
from ..page_objects.main_page import MainPage as PageObjects
class HappyPathTest(BaseCase):
def common_actions(self):
# Preenche valor para aplicar com 20,00
self.update_text(PageObjects.input_valor_aplicar, '20,00')
# Preenche valor que você quer poupar com 20,00
self.update_text(PageObjects.input_valor_investir, '20,00')
# Por quanto tempo você quer poupar com 20
self.update_text(PageObjects.input_quanto_tempo, '20')
self.click(PageObjects.btn_simular)
self.assert_element(PageObjects.table)
self.click(PageObjects.btn_repeat_simulation)
def test_happy_path_para_voce(self):
self.open(PageObjects.url)
self.common_actions()
# Testar para empresa
self.click(PageObjects.radio_btn_empresa)
self.common_actions()
|
the-stack_0_14868 |
import sys
import csv_data
import my_perceptron
# TWEAKING VARIABLES
max_perceptron_iterations = 100
def printResults( data_name, result_unrounded ):
print( "RESULTS FOR", data_name.upper() )
print( "{:.2f}% correct prediction on {}\n".format( round( result_unrounded, 2 ), data_name.lower() ) )
def main( argv ):
if len( argv ) != 3:
print( "Usage: \"python3 perceptron.py <train> <test> <model>\"" ); exit()
# Read Data
Training_Data = csv_data.Data( argv[ 0 ] )
Testing_Data = csv_data.Data( argv[ 1 ] )
# Create Perceptron
perceptron = my_perceptron.Perceptron()
print( "\n\nPredictions results with", max_perceptron_iterations, "iterations of learning:\n" )
perceptron.perceptronTrain( Training_Data, max_perceptron_iterations )
resultsPercentage = perceptron.perceptronPredictionResults( Training_Data )
printResults( "Training Data", resultsPercentage )
resultsPercentage = perceptron.perceptronPredictionResultsAndPrintActivations( Testing_Data )
printResults( "Testing Data", resultsPercentage )
perceptron.outputModelToFile( argv[ 2 ] )
print( "Weights and bias recorded for", max_perceptron_iterations, "iterations in", argv[ 2 ], "\n" );
print( "\n\nAlso predictiong results with 50 iterations of learning because I get better percentages:\n" )
perceptron.perceptronTrain( Training_Data, 50 )
resultsPercentage = perceptron.perceptronPredictionResults( Training_Data )
printResults( "Training Data", resultsPercentage )
resultsPercentage = perceptron.perceptronPredictionResults( Testing_Data )
printResults( "Testing Data", resultsPercentage )
print( "Weights and bias of output file will still reflect the", max_perceptron_iterations, "iteration test above.\n\n" )
if __name__=='__main__':
main( sys.argv[1:] )
|
the-stack_0_14871 | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
from abc import ABC, abstractmethod
from typing import Any, Callable, Optional, Union
from collections.abc import Mapping, Sequence
from collections import namedtuple
from copy import deepcopy
from distutils.version import LooseVersion
import os
import torch
from torch import nn
from pytorch_lightning.utilities.apply_func import apply_to_collection
from pytorch_lightning.utilities.distributed import gather_all_tensors
from pytorch_lightning.metrics.utils import _flatten, dim_zero_cat, dim_zero_mean, dim_zero_sum
class Metric(nn.Module, ABC):
"""
Base class for all metrics present in the Metrics API.
Implements ``add_state()``, ``forward()``, ``reset()`` and a few other things to
handle distributed synchronization and per-step metric computation.
Override ``update()`` and ``compute()`` functions to implement your own metric. Use
``add_state()`` to register metric state variables which keep track of state on each
call of ``update()`` and are synchronized across processes when ``compute()`` is called.
Note:
Metric state variables can either be ``torch.Tensors`` or an empty list which can we used
to store `torch.Tensors``.
Note:
Different metrics only override ``update()`` and not ``forward()``. A call to ``update()``
is valid, but it won't return the metric value at the current step. A call to ``forward()``
automatically calls ``update()`` and also returns the metric value at the current step.
Args:
compute_on_step:
Forward only calls ``update()`` and returns None if this is set to False. default: True
dist_sync_on_step:
Synchronize metric state across processes at each ``forward()``
before returning the value at the step.
process_group:
Specify the process group on which synchronization is called. default: None (which selects the entire world)
dist_sync_fn:
Callback that performs the allgather operation on the metric state. When `None`, DDP
will be used to perform the allgather. default: None
"""
def __init__(
self,
compute_on_step: bool = True,
dist_sync_on_step: bool = False,
process_group: Optional[Any] = None,
dist_sync_fn: Callable = None,
):
super().__init__()
self.dist_sync_on_step = dist_sync_on_step
self.compute_on_step = compute_on_step
self.process_group = process_group
self.dist_sync_fn = dist_sync_fn
self._to_sync = True
self.update = self._wrap_update(self.update)
self.compute = self._wrap_compute(self.compute)
self._computed = None
self._forward_cache = None
# initialize state
self._reductions = {}
self._defaults = {}
def add_state(
self, name: str, default, dist_reduce_fx: Optional[Union[str, Callable]] = None, persistent: bool = True
):
"""
Adds metric state variable. Only used by subclasses.
Args:
name: The name of the state variable. The variable will then be accessible at ``self.name``.
default: Default value of the state; can either be a ``torch.Tensor`` or an empty list. The state will be
reset to this value when ``self.reset()`` is called.
dist_reduce_fx (Optional): Function to reduce state accross mutliple processes in distributed mode.
If value is ``"sum"``, ``"mean"``, or ``"cat"``, we will use ``torch.sum``, ``torch.mean``,
and ``torch.cat`` respectively, each with argument ``dim=0``. The user can also pass a custom
function in this parameter.
persistent (Optional): whether the state will be saved as part of the modules ``state_dict``.
Note:
Setting ``dist_reduce_fx`` to None will return the metric state synchronized across different processes.
However, there won't be any reduction function applied to the synchronized metric state.
The metric states would be synced as follows
- If the metric state is ``torch.Tensor``, the synced value will be a stacked ``torch.Tensor`` across
the process dimension if the metric state was a ``torch.Tensor``. The original ``torch.Tensor`` metric
state retains dimension and hence the synchronized output will be of shape ``(num_process, ...)``.
- If the metric state is a ``list``, the synced value will be a ``list`` containing the
combined elements from all processes.
Note:
When passing a custom function to ``dist_reduce_fx``, expect the synchronized metric state to follow
the format discussed in the above note.
"""
if (
not isinstance(default, torch.Tensor)
and not isinstance(default, list) # noqa: W503
or (isinstance(default, list) and len(default) != 0) # noqa: W503
):
raise ValueError(
"state variable must be a tensor or any empty list (where you can append tensors)"
)
if dist_reduce_fx == "sum":
dist_reduce_fx = dim_zero_sum
elif dist_reduce_fx == "mean":
dist_reduce_fx = dim_zero_mean
elif dist_reduce_fx == "cat":
dist_reduce_fx = dim_zero_cat
elif dist_reduce_fx is not None and not isinstance(dist_reduce_fx, Callable):
raise ValueError(
"`dist_reduce_fx` must be callable or one of ['mean', 'sum', 'cat', None]"
)
if isinstance(default, torch.Tensor):
if LooseVersion(torch.__version__) >= LooseVersion("1.6.0"):
# persistent keyword is only supported in torch >= 1.6.0
self.register_buffer(name, default, persistent=persistent)
else:
self.register_buffer(name, default)
else:
setattr(self, name, default)
self._defaults[name] = deepcopy(default)
self._reductions[name] = dist_reduce_fx
@torch.jit.unused
def forward(self, *args, **kwargs):
"""
Automatically calls ``update()``. Returns the metric value over inputs if ``compute_on_step`` is True.
"""
# add current step
with torch.no_grad():
self.update(*args, **kwargs)
self._forward_cache = None
if self.compute_on_step:
self._to_sync = self.dist_sync_on_step
# save context before switch
self._cache = {attr: getattr(self, attr) for attr in self._defaults.keys()}
# call reset, update, compute, on single batch
self.reset()
self.update(*args, **kwargs)
self._forward_cache = self.compute()
# restore context
for attr, val in self._cache.items():
setattr(self, attr, val)
self._to_sync = True
self._computed = None
return self._forward_cache
def _sync_dist(self, dist_sync_fn=gather_all_tensors):
input_dict = {attr: getattr(self, attr) for attr in self._reductions.keys()}
output_dict = apply_to_collection(
input_dict,
torch.Tensor,
dist_sync_fn,
group=self.process_group,
)
for attr, reduction_fn in self._reductions.items():
# pre-processing ops (stack or flatten for inputs)
if isinstance(output_dict[attr][0], torch.Tensor):
output_dict[attr] = torch.stack(output_dict[attr])
elif isinstance(output_dict[attr][0], list):
output_dict[attr] = _flatten(output_dict[attr])
assert isinstance(reduction_fn, (Callable)) or reduction_fn is None
reduced = reduction_fn(output_dict[attr]) if reduction_fn is not None else output_dict[attr]
setattr(self, attr, reduced)
def _wrap_update(self, update):
@functools.wraps(update)
def wrapped_func(*args, **kwargs):
self._computed = None
return update(*args, **kwargs)
return wrapped_func
def _wrap_compute(self, compute):
@functools.wraps(compute)
def wrapped_func(*args, **kwargs):
# return cached value
if self._computed is not None:
return self._computed
dist_sync_fn = self.dist_sync_fn
if (dist_sync_fn is None
and torch.distributed.is_available()
and torch.distributed.is_initialized()):
# User provided a bool, so we assume DDP if available
dist_sync_fn = gather_all_tensors
if self._to_sync and dist_sync_fn is not None:
self._sync_dist(dist_sync_fn)
self._computed = compute(*args, **kwargs)
self.reset()
return self._computed
return wrapped_func
@abstractmethod
def update(self) -> None: # pylint: disable=E0202
"""
Override this method to update the state variables of your metric class.
"""
pass
@abstractmethod
def compute(self): # pylint: disable=E0202
"""
Override this method to compute the final metric value from state variables
synchronized across the distributed backend.
"""
pass
def reset(self):
"""
This method automatically resets the metric state variables to their default value.
"""
for attr, default in self._defaults.items():
current_val = getattr(self, attr)
if isinstance(current_val, torch.Tensor):
setattr(self, attr, deepcopy(default).to(current_val.device))
else:
setattr(self, attr, deepcopy(default))
def __getstate__(self):
# ignore update and compute functions for pickling
return {k: v for k, v in self.__dict__.items() if k not in ["update", "compute"]}
def __setstate__(self, state):
# manually restore update and compute functions for pickling
self.__dict__.update(state)
self.update = self._wrap_update(self.update)
self.compute = self._wrap_compute(self.compute)
|
the-stack_0_14872 | #!/usr/bin/python3
#import time
import random
import imp
modl = imp.load_source('ppFunctions', '../00/ppFunctions.py')
import os
from ppFunctions import *
from termcolor import colored, cprint
#sleep becouse of loading midi modules
print("Are you ready?")
time.sleep(1)
print_status = lambda x: cprint(x, 'white', 'on_blue')
print_help = lambda x: cprint(x, 'red')
hit = 0
rounde = 1
done = False
generatedList = []
for i in range(stringToMidiNum("c"), stringToMidiNum("c'")+1):
if i%12 in blackTonesBase:
generatedList.append(i)
while True:
try:
os.system('clear')
print_status("Status: round=" + str(rounde) + ", hit=" + str(hit))
print_help("Help: rEPEAT sKIP")
playHarmonicNotes(stringToMidiNum("f a c'"))
randomNote = random.choice(generatedList)
playNote(randomNote)
while not done:
guessedNote = input("Your input:")
if guessedNote == "r":
print("Repeating...")
playHarmonicNotes(stringToMidiNum("f a c'"))
playNote(randomNote)
elif guessedNote == "s":
print("Skiping...")
done = True
elif guessedNote not in lilypondTones:
print("What? Syntax error!")
else:
if (lilypondTones[guessedNote] == randomNote%12):
print("Yea!")
hit += 1
rounde += 1
done = True
else:
print("Almost!")
hit = 0
done = False
except (KeyboardInterrupt):
print('...Program Stopped Manually!')
raise
|
the-stack_0_14873 | #
# This source file is part of the EdgeDB open source project.
#
# Copyright 2008-present MagicStack Inc. and the EdgeDB authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Mypy plugin to provide support for schema objects."""
from __future__ import annotations
from typing import *
from mypy import exprtotype
import mypy.plugin as mypy_plugin
from mypy import nodes
from mypy import types
from mypy import semanal
from mypy.plugins import common as mypy_helpers
from mypy.server import trigger as mypy_trigger
METADATA_KEY = 'edbplugin'
AST_BASE_CLASSES = {
'edb.common.ast.base.AST',
}
STRUCT_BASE_METACLASSES = {
'edb.common.struct.StructMeta',
}
SCHEMA_BASE_METACLASSES = {
'edb.schema.objects.ObjectMeta',
'edb.schema.types.SchemaCollectionMeta',
}
def plugin(version: str):
return EDBPlugin
class EDBPlugin(mypy_plugin.Plugin):
def get_base_class_hook(self, fullname: str):
if fullname.startswith('edb.'):
return self.handle_schema_class
def handle_schema_class(self, ctx: mypy_plugin.ClassDefContext):
mro = ctx.cls.info.mro
mcls = ctx.cls.info.metaclass_type
mcls_mro = mcls.type.mro if mcls else []
transformers: List[BaseTransformer] = []
if any(c.fullname in SCHEMA_BASE_METACLASSES for c in mcls_mro):
transformers.append(
SchemaClassTransformer(
ctx,
field_makers={'edb.schema.objects.SchemaField'},
)
)
transformers.append(
StructTransformer(
ctx,
field_makers={'edb.schema.objects.Field'},
)
)
elif any(c.fullname in STRUCT_BASE_METACLASSES for c in mcls_mro):
transformers.append(
StructTransformer(
ctx,
field_makers={'edb.common.struct.Field'},
)
)
elif any(c.fullname in AST_BASE_CLASSES for c in mro):
transformers.append(
ASTClassTransformer(
ctx,
)
)
for transformer in transformers:
transformer.transform()
class DeferException(Exception):
pass
class Field(NamedTuple):
name: str
has_explicit_accessor: bool
has_default: bool
line: int
column: int
type: types.Type
def to_argument(self) -> nodes.Argument:
result = nodes.Argument(
variable=self.to_var(),
type_annotation=self.type,
initializer=None,
kind=nodes.ARG_NAMED_OPT if self.has_default else nodes.ARG_NAMED,
)
return result
def to_var(self) -> nodes.Var:
return nodes.Var(self.name, self.type)
def serialize(self) -> nodes.JsonDict:
return {
'name': self.name,
'has_explicit_accessor': self.has_explicit_accessor,
'has_default': self.has_default,
'line': self.line,
'column': self.column,
'type': self.type.serialize(),
}
@classmethod
def deserialize(
cls,
api,
data: nodes.JsonDict,
) -> Field:
return cls(
name=data['name'],
has_explicit_accessor=data['has_explicit_accessor'],
has_default=data['has_default'],
line=data['line'],
column=data['column'],
type=mypy_helpers.deserialize_and_fixup_type(data['type'], api),
)
class BaseTransformer:
def __init__(
self,
ctx: mypy_plugin.ClassDefContext,
) -> None:
self._ctx = ctx
def transform(self):
ctx = self._ctx
metadata_key = self._get_metadata_key()
metadata = ctx.cls.info.metadata.get(metadata_key)
if not metadata:
ctx.cls.info.metadata[metadata_key] = metadata = {}
metadata['processing'] = True
if metadata.get('processed'):
return
try:
fields = self._transform()
except DeferException:
ctx.api.defer()
return None
metadata['fields'] = {f.name: f.serialize() for f in fields}
metadata['processed'] = True
def _transform(self) -> List[Field]:
raise NotImplementedError
def _field_from_field_def(
self,
stmt: nodes.AssignmentStmt,
name: nodes.NameExpr,
sym: nodes.SymbolTableNode,
) -> Optional[Field]:
raise NotImplementedError
def _collect_fields(self) -> List[Field]:
"""Collect all fields declared in a class and its ancestors."""
cls = self._ctx.cls
fields: List[Field] = []
known_fields: Set[str] = set()
for stmt in cls.defs.body:
if not isinstance(stmt, nodes.AssignmentStmt):
continue
lhs = stmt.lvalues[0]
if not isinstance(lhs, nodes.NameExpr):
continue
sym = cls.info.names.get(lhs.name)
if sym is None or isinstance(sym.node, nodes.PlaceholderNode):
# Not resolved yet?
continue
node = sym.node
assert isinstance(node, nodes.Var)
if node.is_classvar:
# Disregard ClassVar stuff
continue
field = self._field_from_field_def(stmt, lhs, sym)
if field is not None:
fields.append(field)
known_fields.add(field.name)
return self._get_inherited_fields(known_fields) + fields
def _lookup_type(self, fullname: str) -> types.Type:
ctx = self._ctx
type_sym = ctx.api.lookup_fully_qualified_or_none(fullname)
if type_sym is None:
raise DeferException
t: types.Type
if isinstance(type_sym.node, nodes.TypeInfo):
from mypy.typevars import fill_typevars
t = fill_typevars(type_sym.node)
elif type_sym.type:
t = type_sym.type
else:
ctx.api.fail(f'cannot find {fullname}', ctx.cls)
return t
def _get_metadata_key(self) -> str:
return f'{METADATA_KEY}%%{type(self).__name__}'
def _has_explicit_field_accessor(self, fieldname: str) -> bool:
cls = self._ctx.cls
accessor = cls.info.names.get(f'get_{fieldname}')
return accessor is not None and not accessor.plugin_generated
def _get_inherited_fields(self, self_fields: Set[str]) -> List[Field]:
ctx = self._ctx
cls = ctx.cls
all_fields: List[Field] = []
known_fields = set(self_fields)
for ancestor_info in cls.info.mro[1:-1]:
metadata = ancestor_info.metadata.get(self._get_metadata_key())
if metadata is None:
continue
elif not metadata.get('processed'):
raise DeferException
ancestor_fields = []
ctx.api.add_plugin_dependency(
mypy_trigger.make_wildcard_trigger(ancestor_info.fullname))
for name, data in metadata['fields'].items():
if name not in known_fields:
if self._has_explicit_field_accessor(name):
data = dict(data)
data['has_explicit_accessor'] = True
field = Field.deserialize(ctx.api, data)
known_fields.add(name)
ancestor_fields.append(field)
all_fields = ancestor_fields + all_fields
return all_fields
def _synthesize_init(self, fields: List[Field]) -> None:
ctx = self._ctx
cls_info = ctx.cls.info
# If our self type has placeholders (probably because of type
# var bounds), defer. If we skip deferring and stick something
# in our symbol table anyway, we'll get in trouble. (Arguably
# plugins.common ought to help us with this, but oh well.)
self_type = mypy_helpers.fill_typevars(cls_info)
if semanal.has_placeholder(self_type):
raise DeferException
if (
(
'__init__' not in cls_info.names
or cls_info.names['__init__'].plugin_generated
) and fields
):
mypy_helpers.add_method(
ctx,
'__init__',
self_type=self_type,
args=[field.to_argument() for field in fields],
return_type=types.NoneType(),
)
class BaseStructTransformer(BaseTransformer):
def __init__(
self,
ctx: mypy_plugin.ClassDefContext,
field_makers: AbstractSet[str],
) -> None:
super().__init__(ctx)
self._field_makers = field_makers
def _field_from_field_def(
self,
stmt: nodes.AssignmentStmt,
name: nodes.NameExpr,
sym: nodes.SymbolTableNode,
) -> Optional[Field]:
ctx = self._ctx
rhs = stmt.rvalue
if isinstance(rhs, nodes.CastExpr):
rhs = rhs.expr
if not isinstance(rhs, nodes.CallExpr):
return None
fdef = rhs.callee
ftype = None
if (
isinstance(fdef, nodes.IndexExpr)
and isinstance(fdef.analyzed, nodes.TypeApplication)
):
# Explicitly typed Field declaration
ctor = fdef.analyzed.expr
if len(fdef.analyzed.types) > 1:
ctx.api.fail('too many type arguments to Field', fdef)
ftype = fdef.analyzed.types[0]
else:
ctor = fdef
ftype = None
if (
not isinstance(ctor, nodes.RefExpr)
or ctor.fullname not in self._field_makers
):
return None
type_arg = rhs.args[0]
deflt = self._get_default(rhs)
if ftype is None:
try:
un_type = exprtotype.expr_to_unanalyzed_type(type_arg)
except exprtotype.TypeTranslationError:
ctx.api.fail('Cannot resolve schema field type', type_arg)
else:
ftype = ctx.api.anal_type(un_type)
if ftype is None:
raise DeferException
is_optional = (
isinstance(deflt, nodes.NameExpr)
and deflt.fullname == 'builtins.None'
)
if is_optional:
ftype = types.UnionType.make_union(
[ftype, types.NoneType()],
line=ftype.line,
column=ftype.column,
)
assert isinstance(name.node, nodes.Var)
name.node.type = ftype
return Field(
name=name.name,
has_explicit_accessor=self._has_explicit_field_accessor(name.name),
has_default=deflt is not None,
line=stmt.line,
column=stmt.column,
type=ftype,
)
def _get_default(self, call) -> Optional[nodes.Expression]:
for (n, v) in zip(call.arg_names, call.args):
if n == 'default':
return v
else:
return None
class StructTransformer(BaseStructTransformer):
def _transform(self) -> List[Field]:
fields = self._collect_fields()
self._synthesize_init(fields)
return fields
def _field_from_field_def(
self,
stmt: nodes.AssignmentStmt,
name: nodes.NameExpr,
sym: nodes.SymbolTableNode,
):
field = super()._field_from_field_def(stmt, name, sym)
if field is None:
return None
else:
assert isinstance(sym.node, nodes.Var)
sym.node.is_initialized_in_class = False
name.is_inferred_def = False
rhs = stmt.rvalue
if not isinstance(rhs, nodes.CastExpr):
stmt.rvalue = nodes.CastExpr(
typ=field.type,
expr=rhs,
)
stmt.rvalue.line = rhs.line
stmt.rvalue.column = rhs.column
return field
class SchemaClassTransformer(BaseStructTransformer):
def _transform(self) -> List[Field]:
ctx = self._ctx
fields = self._collect_fields()
schema_t = self._lookup_type('edb.schema.schema.Schema')
for f in fields:
if f.has_explicit_accessor:
continue
mypy_helpers.add_method(
ctx,
name=f'get_{f.name}',
args=[
nodes.Argument(
variable=nodes.Var(
name='schema',
type=schema_t,
),
type_annotation=schema_t,
initializer=None,
kind=nodes.ARG_POS,
),
],
return_type=f.type,
)
return fields
class ASTClassTransformer(BaseTransformer):
def _transform(self) -> List[Field]:
fields = self._collect_fields()
# NB: __init__ synthesis below brings up a vast number of
# typing errors which require AST definitions to be
# annotated with defaults properly and the code adjusted
# to handle Optional fields (historically we've been
# initializing container fields with empty lists/tuples).
# self._synthesize_init(fields)
return fields
def _field_from_field_def(
self,
stmt: nodes.AssignmentStmt,
name: nodes.NameExpr,
sym: nodes.SymbolTableNode,
) -> Optional[Field]:
if sym.type is None:
# No type annotation?
return None
else:
has_default = not isinstance(stmt.rvalue, nodes.TempNode)
if not has_default:
sym.implicit = True
return Field(
name=name.name,
has_default=has_default,
line=stmt.line,
column=stmt.column,
type=sym.type,
has_explicit_accessor=False,
)
|
the-stack_0_14875 | # code is borrowed from the original repo and fit into our training framework
# https://github.com/HuCaoFighting/Swin-Unet/tree/4375a8d6fa7d9c38184c5d3194db990a00a3e912
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
import torch.nn as nn
import torch.utils.checkpoint as checkpoint
from einops import rearrange
from timm.models.layers import DropPath, to_2tuple, trunc_normal_
import copy
import logging
import math
from os.path import join as pjoin
import numpy as np
from torch.nn import CrossEntropyLoss, Dropout, Softmax, Linear, Conv2d, LayerNorm
from torch.nn.modules.utils import _pair
from scipy import ndimage
class Mlp(nn.Module):
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
def window_partition(x, window_size):
"""
Args:
x: (B, H, W, C)
window_size (int): window size
Returns:
windows: (num_windows*B, window_size, window_size, C)
"""
B, H, W, C = x.shape
x = x.view(B, H // window_size, window_size, W // window_size, window_size, C)
windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C)
return windows
def window_reverse(windows, window_size, H, W):
"""
Args:
windows: (num_windows*B, window_size, window_size, C)
window_size (int): Window size
H (int): Height of image
W (int): Width of image
Returns:
x: (B, H, W, C)
"""
B = int(windows.shape[0] / (H * W / window_size / window_size))
x = windows.view(B, H // window_size, W // window_size, window_size, window_size, -1)
x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1)
return x
class WindowAttention(nn.Module):
r""" Window based multi-head self attention (W-MSA) module with relative position bias.
It supports both of shifted and non-shifted window.
Args:
dim (int): Number of input channels.
window_size (tuple[int]): The height and width of the window.
num_heads (int): Number of attention heads.
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set
attn_drop (float, optional): Dropout ratio of attention weight. Default: 0.0
proj_drop (float, optional): Dropout ratio of output. Default: 0.0
"""
def __init__(self, dim, window_size, num_heads, qkv_bias=True, qk_scale=None, attn_drop=0., proj_drop=0.):
super().__init__()
self.dim = dim
self.window_size = window_size # Wh, Ww
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
# define a parameter table of relative position bias
self.relative_position_bias_table = nn.Parameter(
torch.zeros((2 * window_size[0] - 1) * (2 * window_size[1] - 1), num_heads)) # 2*Wh-1 * 2*Ww-1, nH
# get pair-wise relative position index for each token inside the window
coords_h = torch.arange(self.window_size[0])
coords_w = torch.arange(self.window_size[1])
coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww
coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww
relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww
relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2
relative_coords[:, :, 0] += self.window_size[0] - 1 # shift to start from 0
relative_coords[:, :, 1] += self.window_size[1] - 1
relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1
relative_position_index = relative_coords.sum(-1) # Wh*Ww, Wh*Ww
self.register_buffer("relative_position_index", relative_position_index)
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
trunc_normal_(self.relative_position_bias_table, std=.02)
self.softmax = nn.Softmax(dim=-1)
def forward(self, x, mask=None):
"""
Args:
x: input features with shape of (num_windows*B, N, C)
mask: (0/-inf) mask with shape of (num_windows, Wh*Ww, Wh*Ww) or None
"""
B_, N, C = x.shape
qkv = self.qkv(x).reshape(B_, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple)
q = q * self.scale
attn = (q @ k.transpose(-2, -1))
relative_position_bias = self.relative_position_bias_table[self.relative_position_index.view(-1)].view(
self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1) # Wh*Ww,Wh*Ww,nH
relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww
attn = attn + relative_position_bias.unsqueeze(0)
if mask is not None:
nW = mask.shape[0]
attn = attn.view(B_ // nW, nW, self.num_heads, N, N) + mask.unsqueeze(1).unsqueeze(0)
attn = attn.view(-1, self.num_heads, N, N)
attn = self.softmax(attn)
else:
attn = self.softmax(attn)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B_, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
def extra_repr(self) -> str:
return f'dim={self.dim}, window_size={self.window_size}, num_heads={self.num_heads}'
def flops(self, N):
# calculate flops for 1 window with token length of N
flops = 0
# qkv = self.qkv(x)
flops += N * self.dim * 3 * self.dim
# attn = (q @ k.transpose(-2, -1))
flops += self.num_heads * N * (self.dim // self.num_heads) * N
# x = (attn @ v)
flops += self.num_heads * N * N * (self.dim // self.num_heads)
# x = self.proj(x)
flops += N * self.dim * self.dim
return flops
class SwinTransformerBlock(nn.Module):
r""" Swin Transformer Block.
Args:
dim (int): Number of input channels.
input_resolution (tuple[int]): Input resulotion.
num_heads (int): Number of attention heads.
window_size (int): Window size.
shift_size (int): Shift size for SW-MSA.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.
drop (float, optional): Dropout rate. Default: 0.0
attn_drop (float, optional): Attention dropout rate. Default: 0.0
drop_path (float, optional): Stochastic depth rate. Default: 0.0
act_layer (nn.Module, optional): Activation layer. Default: nn.GELU
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
"""
def __init__(self, dim, input_resolution, num_heads, window_size=7, shift_size=0,
mlp_ratio=4., qkv_bias=True, qk_scale=None, drop=0., attn_drop=0., drop_path=0.,
act_layer=nn.GELU, norm_layer=nn.LayerNorm):
super().__init__()
self.dim = dim
self.input_resolution = input_resolution
self.num_heads = num_heads
self.window_size = window_size
self.shift_size = shift_size
self.mlp_ratio = mlp_ratio
if min(self.input_resolution) <= self.window_size:
# if window size is larger than input resolution, we don't partition windows
self.shift_size = 0
self.window_size = min(self.input_resolution)
assert 0 <= self.shift_size < self.window_size, "shift_size must in 0-window_size"
self.norm1 = norm_layer(dim)
self.attn = WindowAttention(
dim, window_size=to_2tuple(self.window_size), num_heads=num_heads,
qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop)
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
if self.shift_size > 0:
# calculate attention mask for SW-MSA
H, W = self.input_resolution
img_mask = torch.zeros((1, H, W, 1)) # 1 H W 1
h_slices = (slice(0, -self.window_size),
slice(-self.window_size, -self.shift_size),
slice(-self.shift_size, None))
w_slices = (slice(0, -self.window_size),
slice(-self.window_size, -self.shift_size),
slice(-self.shift_size, None))
cnt = 0
for h in h_slices:
for w in w_slices:
img_mask[:, h, w, :] = cnt
cnt += 1
mask_windows = window_partition(img_mask, self.window_size) # nW, window_size, window_size, 1
mask_windows = mask_windows.view(-1, self.window_size * self.window_size)
attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2)
attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0))
else:
attn_mask = None
self.register_buffer("attn_mask", attn_mask)
def forward(self, x):
H, W = self.input_resolution
B, L, C = x.shape
assert L == H * W, "input feature has wrong size"
shortcut = x
x = self.norm1(x)
x = x.view(B, H, W, C)
# cyclic shift
if self.shift_size > 0:
shifted_x = torch.roll(x, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2))
else:
shifted_x = x
# partition windows
x_windows = window_partition(shifted_x, self.window_size) # nW*B, window_size, window_size, C
x_windows = x_windows.view(-1, self.window_size * self.window_size, C) # nW*B, window_size*window_size, C
# W-MSA/SW-MSA
attn_windows = self.attn(x_windows, mask=self.attn_mask) # nW*B, window_size*window_size, C
# merge windows
attn_windows = attn_windows.view(-1, self.window_size, self.window_size, C)
shifted_x = window_reverse(attn_windows, self.window_size, H, W) # B H' W' C
# reverse cyclic shift
if self.shift_size > 0:
x = torch.roll(shifted_x, shifts=(self.shift_size, self.shift_size), dims=(1, 2))
else:
x = shifted_x
x = x.view(B, H * W, C)
# FFN
x = shortcut + self.drop_path(x)
x = x + self.drop_path(self.mlp(self.norm2(x)))
return x
def extra_repr(self) -> str:
return f"dim={self.dim}, input_resolution={self.input_resolution}, num_heads={self.num_heads}, " \
f"window_size={self.window_size}, shift_size={self.shift_size}, mlp_ratio={self.mlp_ratio}"
def flops(self):
flops = 0
H, W = self.input_resolution
# norm1
flops += self.dim * H * W
# W-MSA/SW-MSA
nW = H * W / self.window_size / self.window_size
flops += nW * self.attn.flops(self.window_size * self.window_size)
# mlp
flops += 2 * H * W * self.dim * self.dim * self.mlp_ratio
# norm2
flops += self.dim * H * W
return flops
class PatchMerging(nn.Module):
r""" Patch Merging Layer.
Args:
input_resolution (tuple[int]): Resolution of input feature.
dim (int): Number of input channels.
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
"""
def __init__(self, input_resolution, dim, norm_layer=nn.LayerNorm):
super().__init__()
self.input_resolution = input_resolution
self.dim = dim
self.reduction = nn.Linear(4 * dim, 2 * dim, bias=False)
self.norm = norm_layer(4 * dim)
def forward(self, x):
"""
x: B, H*W, C
"""
H, W = self.input_resolution
B, L, C = x.shape
assert L == H * W, "input feature has wrong size"
assert H % 2 == 0 and W % 2 == 0, f"x size ({H}*{W}) are not even."
x = x.view(B, H, W, C)
x0 = x[:, 0::2, 0::2, :] # B H/2 W/2 C
x1 = x[:, 1::2, 0::2, :] # B H/2 W/2 C
x2 = x[:, 0::2, 1::2, :] # B H/2 W/2 C
x3 = x[:, 1::2, 1::2, :] # B H/2 W/2 C
x = torch.cat([x0, x1, x2, x3], -1) # B H/2 W/2 4*C
x = x.view(B, -1, 4 * C) # B H/2*W/2 4*C
x = self.norm(x)
x = self.reduction(x)
return x
def extra_repr(self) -> str:
return f"input_resolution={self.input_resolution}, dim={self.dim}"
def flops(self):
H, W = self.input_resolution
flops = H * W * self.dim
flops += (H // 2) * (W // 2) * 4 * self.dim * 2 * self.dim
return flops
class PatchExpand(nn.Module):
def __init__(self, input_resolution, dim, dim_scale=2, norm_layer=nn.LayerNorm):
super().__init__()
self.input_resolution = input_resolution
self.dim = dim
self.expand = nn.Linear(dim, 2*dim, bias=False) if dim_scale==2 else nn.Identity()
self.norm = norm_layer(dim // dim_scale)
def forward(self, x):
"""
x: B, H*W, C
"""
H, W = self.input_resolution
x = self.expand(x)
B, L, C = x.shape
assert L == H * W, "input feature has wrong size"
x = x.view(B, H, W, C)
x = rearrange(x, 'b h w (p1 p2 c)-> b (h p1) (w p2) c', p1=2, p2=2, c=C//4)
x = x.view(B,-1,C//4)
x= self.norm(x)
return x
class FinalPatchExpand_X4(nn.Module):
def __init__(self, input_resolution, dim, dim_scale=4, norm_layer=nn.LayerNorm):
super().__init__()
self.input_resolution = input_resolution
self.dim = dim
self.dim_scale = dim_scale
self.expand = nn.Linear(dim, 16*dim, bias=False)
self.output_dim = dim
self.norm = norm_layer(self.output_dim)
def forward(self, x):
"""
x: B, H*W, C
"""
H, W = self.input_resolution
x = self.expand(x)
B, L, C = x.shape
assert L == H * W, "input feature has wrong size"
x = x.view(B, H, W, C)
x = rearrange(x, 'b h w (p1 p2 c)-> b (h p1) (w p2) c', p1=self.dim_scale, p2=self.dim_scale, c=C//(self.dim_scale**2))
x = x.view(B,-1,self.output_dim)
x= self.norm(x)
return x
class BasicLayer(nn.Module):
""" A basic Swin Transformer layer for one stage.
Args:
dim (int): Number of input channels.
input_resolution (tuple[int]): Input resolution.
depth (int): Number of blocks.
num_heads (int): Number of attention heads.
window_size (int): Local window size.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.
drop (float, optional): Dropout rate. Default: 0.0
attn_drop (float, optional): Attention dropout rate. Default: 0.0
drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
downsample (nn.Module | None, optional): Downsample layer at the end of the layer. Default: None
use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False.
"""
def __init__(self, dim, input_resolution, depth, num_heads, window_size,
mlp_ratio=4., qkv_bias=True, qk_scale=None, drop=0., attn_drop=0.,
drop_path=0., norm_layer=nn.LayerNorm, downsample=None, use_checkpoint=False):
super().__init__()
self.dim = dim
self.input_resolution = input_resolution
self.depth = depth
self.use_checkpoint = use_checkpoint
# build blocks
self.blocks = nn.ModuleList([
SwinTransformerBlock(dim=dim, input_resolution=input_resolution,
num_heads=num_heads, window_size=window_size,
shift_size=0 if (i % 2 == 0) else window_size // 2,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop, attn_drop=attn_drop,
drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path,
norm_layer=norm_layer)
for i in range(depth)])
# patch merging layer
if downsample is not None:
self.downsample = downsample(input_resolution, dim=dim, norm_layer=norm_layer)
else:
self.downsample = None
def forward(self, x):
for blk in self.blocks:
if self.use_checkpoint:
x = checkpoint.checkpoint(blk, x)
else:
x = blk(x)
if self.downsample is not None:
x = self.downsample(x)
return x
def extra_repr(self) -> str:
return f"dim={self.dim}, input_resolution={self.input_resolution}, depth={self.depth}"
def flops(self):
flops = 0
for blk in self.blocks:
flops += blk.flops()
if self.downsample is not None:
flops += self.downsample.flops()
return flops
class BasicLayer_up(nn.Module):
""" A basic Swin Transformer layer for one stage.
Args:
dim (int): Number of input channels.
input_resolution (tuple[int]): Input resolution.
depth (int): Number of blocks.
num_heads (int): Number of attention heads.
window_size (int): Local window size.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.
drop (float, optional): Dropout rate. Default: 0.0
attn_drop (float, optional): Attention dropout rate. Default: 0.0
drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
downsample (nn.Module | None, optional): Downsample layer at the end of the layer. Default: None
use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False.
"""
def __init__(self, dim, input_resolution, depth, num_heads, window_size,
mlp_ratio=4., qkv_bias=True, qk_scale=None, drop=0., attn_drop=0.,
drop_path=0., norm_layer=nn.LayerNorm, upsample=None, use_checkpoint=False):
super().__init__()
self.dim = dim
self.input_resolution = input_resolution
self.depth = depth
self.use_checkpoint = use_checkpoint
# build blocks
self.blocks = nn.ModuleList([
SwinTransformerBlock(dim=dim, input_resolution=input_resolution,
num_heads=num_heads, window_size=window_size,
shift_size=0 if (i % 2 == 0) else window_size // 2,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop, attn_drop=attn_drop,
drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path,
norm_layer=norm_layer)
for i in range(depth)])
# patch merging layer
if upsample is not None:
self.upsample = PatchExpand(input_resolution, dim=dim, dim_scale=2, norm_layer=norm_layer)
else:
self.upsample = None
def forward(self, x):
for blk in self.blocks:
if self.use_checkpoint:
x = checkpoint.checkpoint(blk, x)
else:
x = blk(x)
if self.upsample is not None:
x = self.upsample(x)
return x
class PatchEmbed(nn.Module):
r""" Image to Patch Embedding
Args:
img_size (int): Image size. Default: 224.
patch_size (int): Patch token size. Default: 4.
in_chans (int): Number of input image channels. Default: 3.
embed_dim (int): Number of linear projection output channels. Default: 96.
norm_layer (nn.Module, optional): Normalization layer. Default: None
"""
def __init__(self, img_size=224, patch_size=4, in_chans=3, embed_dim=96, norm_layer=None):
super().__init__()
img_size = to_2tuple(img_size)
patch_size = to_2tuple(patch_size)
patches_resolution = [img_size[0] // patch_size[0], img_size[1] // patch_size[1]]
self.img_size = img_size
self.patch_size = patch_size
self.patches_resolution = patches_resolution
self.num_patches = patches_resolution[0] * patches_resolution[1]
self.in_chans = in_chans
self.embed_dim = embed_dim
self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size)
if norm_layer is not None:
self.norm = norm_layer(embed_dim)
else:
self.norm = None
def forward(self, x):
B, C, H, W = x.shape
# FIXME look at relaxing size constraints
assert H == self.img_size[0] and W == self.img_size[1], \
f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})."
x = self.proj(x).flatten(2).transpose(1, 2) # B Ph*Pw C
if self.norm is not None:
x = self.norm(x)
return x
def flops(self):
Ho, Wo = self.patches_resolution
flops = Ho * Wo * self.embed_dim * self.in_chans * (self.patch_size[0] * self.patch_size[1])
if self.norm is not None:
flops += Ho * Wo * self.embed_dim
return flops
class SwinTransformerSys(nn.Module):
r""" Swin Transformer
A PyTorch impl of : `Swin Transformer: Hierarchical Vision Transformer using Shifted Windows` -
https://arxiv.org/pdf/2103.14030
Args:
img_size (int | tuple(int)): Input image size. Default 224
patch_size (int | tuple(int)): Patch size. Default: 4
in_chans (int): Number of input image channels. Default: 3
num_classes (int): Number of classes for classification head. Default: 1000
embed_dim (int): Patch embedding dimension. Default: 96
depths (tuple(int)): Depth of each Swin Transformer layer.
num_heads (tuple(int)): Number of attention heads in different layers.
window_size (int): Window size. Default: 7
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4
qkv_bias (bool): If True, add a learnable bias to query, key, value. Default: True
qk_scale (float): Override default qk scale of head_dim ** -0.5 if set. Default: None
drop_rate (float): Dropout rate. Default: 0
attn_drop_rate (float): Attention dropout rate. Default: 0
drop_path_rate (float): Stochastic depth rate. Default: 0.1
norm_layer (nn.Module): Normalization layer. Default: nn.LayerNorm.
ape (bool): If True, add absolute position embedding to the patch embedding. Default: False
patch_norm (bool): If True, add normalization after patch embedding. Default: True
use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False
"""
def __init__(self, img_size=224, patch_size=4, in_chans=3, num_classes=1000,
embed_dim=96, depths=[2, 2, 2, 2], depths_decoder=[1, 2, 2, 2], num_heads=[3, 6, 12, 24],
window_size=7, mlp_ratio=4., qkv_bias=True, qk_scale=None,
drop_rate=0., attn_drop_rate=0., drop_path_rate=0.1,
norm_layer=nn.LayerNorm, ape=False, patch_norm=True,
use_checkpoint=False, final_upsample="expand_first", **kwargs):
super().__init__()
print("SwinTransformerSys expand initial----depths:{};depths_decoder:{};drop_path_rate:{};num_classes:{}".format(depths,
depths_decoder,drop_path_rate,num_classes))
self.num_classes = num_classes
self.num_layers = len(depths)
self.embed_dim = embed_dim
self.ape = ape
self.patch_norm = patch_norm
self.num_features = int(embed_dim * 2 ** (self.num_layers - 1))
self.num_features_up = int(embed_dim * 2)
self.mlp_ratio = mlp_ratio
self.final_upsample = final_upsample
# split image into non-overlapping patches
self.patch_embed = PatchEmbed(
img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim,
norm_layer=norm_layer if self.patch_norm else None)
num_patches = self.patch_embed.num_patches
patches_resolution = self.patch_embed.patches_resolution
self.patches_resolution = patches_resolution
# absolute position embedding
if self.ape:
self.absolute_pos_embed = nn.Parameter(torch.zeros(1, num_patches, embed_dim))
trunc_normal_(self.absolute_pos_embed, std=.02)
self.pos_drop = nn.Dropout(p=drop_rate)
# stochastic depth
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] # stochastic depth decay rule
# build encoder and bottleneck layers
self.layers = nn.ModuleList()
for i_layer in range(self.num_layers):
layer = BasicLayer(dim=int(embed_dim * 2 ** i_layer),
input_resolution=(patches_resolution[0] // (2 ** i_layer),
patches_resolution[1] // (2 ** i_layer)),
depth=depths[i_layer],
num_heads=num_heads[i_layer],
window_size=window_size,
mlp_ratio=self.mlp_ratio,
qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate,
drop_path=dpr[sum(depths[:i_layer]):sum(depths[:i_layer + 1])],
norm_layer=norm_layer,
downsample=PatchMerging if (i_layer < self.num_layers - 1) else None,
use_checkpoint=use_checkpoint)
self.layers.append(layer)
# build decoder layers
self.layers_up = nn.ModuleList()
self.concat_back_dim = nn.ModuleList()
for i_layer in range(self.num_layers):
concat_linear = nn.Linear(2*int(embed_dim*2**(self.num_layers-1-i_layer)),
int(embed_dim*2**(self.num_layers-1-i_layer))) if i_layer > 0 else nn.Identity()
if i_layer ==0 :
layer_up = PatchExpand(input_resolution=(patches_resolution[0] // (2 ** (self.num_layers-1-i_layer)),
patches_resolution[1] // (2 ** (self.num_layers-1-i_layer))), dim=int(embed_dim * 2 ** (self.num_layers-1-i_layer)), dim_scale=2, norm_layer=norm_layer)
else:
layer_up = BasicLayer_up(dim=int(embed_dim * 2 ** (self.num_layers-1-i_layer)),
input_resolution=(patches_resolution[0] // (2 ** (self.num_layers-1-i_layer)),
patches_resolution[1] // (2 ** (self.num_layers-1-i_layer))),
depth=depths[(self.num_layers-1-i_layer)],
num_heads=num_heads[(self.num_layers-1-i_layer)],
window_size=window_size,
mlp_ratio=self.mlp_ratio,
qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate,
drop_path=dpr[sum(depths[:(self.num_layers-1-i_layer)]):sum(depths[:(self.num_layers-1-i_layer) + 1])],
norm_layer=norm_layer,
upsample=PatchExpand if (i_layer < self.num_layers - 1) else None,
use_checkpoint=use_checkpoint)
self.layers_up.append(layer_up)
self.concat_back_dim.append(concat_linear)
self.norm = norm_layer(self.num_features)
self.norm_up= norm_layer(self.embed_dim)
if self.final_upsample == "expand_first":
print("---final upsample expand_first---")
self.up = FinalPatchExpand_X4(input_resolution=(img_size//patch_size,img_size//patch_size),dim_scale=4,dim=embed_dim)
self.output = nn.Conv2d(in_channels=embed_dim,out_channels=self.num_classes,kernel_size=1,bias=False)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
@torch.jit.ignore
def no_weight_decay(self):
return {'absolute_pos_embed'}
@torch.jit.ignore
def no_weight_decay_keywords(self):
return {'relative_position_bias_table'}
#Encoder and Bottleneck
def forward_features(self, x):
x = self.patch_embed(x)
if self.ape:
x = x + self.absolute_pos_embed
x = self.pos_drop(x)
x_downsample = []
for layer in self.layers:
x_downsample.append(x)
x = layer(x)
x = self.norm(x) # B L C
return x, x_downsample
#Dencoder and Skip connection
def forward_up_features(self, x, x_downsample):
for inx, layer_up in enumerate(self.layers_up):
if inx == 0:
x = layer_up(x)
else:
x = torch.cat([x,x_downsample[3-inx]],-1)
x = self.concat_back_dim[inx](x)
x = layer_up(x)
x = self.norm_up(x) # B L C
return x
def up_x4(self, x):
H, W = self.patches_resolution
B, L, C = x.shape
assert L == H*W, "input features has wrong size"
if self.final_upsample=="expand_first":
x = self.up(x)
x = x.view(B,4*H,4*W,-1)
x = x.permute(0,3,1,2) #B,C,H,W
x = self.output(x)
return x
def forward(self, x):
x, x_downsample = self.forward_features(x)
x = self.forward_up_features(x,x_downsample)
x = self.up_x4(x)
return x
def flops(self):
flops = 0
flops += self.patch_embed.flops()
for i, layer in enumerate(self.layers):
flops += layer.flops()
flops += self.num_features * self.patches_resolution[0] * self.patches_resolution[1] // (2 ** self.num_layers)
flops += self.num_features * self.num_classes
return flops
logger = logging.getLogger(__name__)
class SwinUnet_config():
def __init__(self):
self.patch_size = 4
self.in_chans = 3
self.num_classes = 4
self.embed_dim = 96
self.depths = [2, 2, 6, 2]
self.num_heads = [3, 6, 12, 24]
self.window_size = 7
self.mlp_ratio = 4.
self.qkv_bias = True
self.qk_scale = None
self.drop_rate = 0.
self.drop_path_rate = 0.1
self.ape = False
self.patch_norm = True
self.use_checkpoint = False
class SwinUnet(nn.Module):
def __init__(self, config, img_size=224, num_classes=21843, zero_head=False, vis=False):
super(SwinUnet, self).__init__()
self.num_classes = num_classes
self.zero_head = zero_head
self.config = config
self.swin_unet = SwinTransformerSys(img_size=img_size,
patch_size=config.patch_size,
in_chans=config.in_chans,
num_classes=self.num_classes,
embed_dim=config.embed_dim,
depths=config.depths,
num_heads=config.num_heads,
window_size=config.window_size,
mlp_ratio=config.mlp_ratio,
qkv_bias=config.qkv_bias,
qk_scale=config.qk_scale,
drop_rate=config.drop_rate,
drop_path_rate=config.drop_path_rate,
ape=config.ape,
patch_norm=config.patch_norm,
use_checkpoint=config.use_checkpoint)
def forward(self, x):
# print(x.size())
# if x.size()[1] == 1:
# x = x.repeat(1,3,1,1)
logits = self.swin_unet(x)
return logits
def load_from(self, pretrained_path):
if pretrained_path is not None:
print("pretrained_path:{}".format(pretrained_path))
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
pretrained_dict = torch.load(pretrained_path, map_location=device)
if "model" not in pretrained_dict:
print("---start load pretrained modle by splitting---")
pretrained_dict = {k[17:]:v for k,v in pretrained_dict.items()}
for k in list(pretrained_dict.keys()):
if "output" in k:
print("delete key:{}".format(k))
del pretrained_dict[k]
msg = self.swin_unet.load_state_dict(pretrained_dict,strict=False)
# print(msg)
return
pretrained_dict = pretrained_dict['model']
print("---start load pretrained modle of swin encoder---")
model_dict = self.swin_unet.state_dict()
full_dict = copy.deepcopy(pretrained_dict)
for k, v in pretrained_dict.items():
if "layers." in k:
current_layer_num = 3-int(k[7:8])
current_k = "layers_up." + str(current_layer_num) + k[8:]
full_dict.update({current_k:v})
for k in list(full_dict.keys()):
if k in model_dict:
if full_dict[k].shape != model_dict[k].shape:
print("delete:{};shape pretrain:{};shape model:{}".format(k,v.shape,model_dict[k].shape))
del full_dict[k]
msg = self.swin_unet.load_state_dict(full_dict, strict=False)
# print(msg)
else:
print("none pretrain")
|
the-stack_0_14877 | #!/usr/bin/env python3
import unittest
from unittest.mock import patch
import numpy as np
import pandas as pd
from tmc import points
from tmc.utils import load, get_stdout, patch_helper
module_name="src.subsetting_by_positions"
subsetting_by_positions = load(module_name, "subsetting_by_positions")
main = load(module_name, "main")
ph = patch_helper(module_name)
@points('p04-08.1')
class SubsettingByPositions(unittest.TestCase):
def test_shape_and_columns(self):
df = subsetting_by_positions()
self.assertEqual(df.shape, (10,2), msg="The returned DataFrame had wrong shape!")
#np.testing.assert_array_equal(df.index, range(10), err_msg="Incorrect index")
np.testing.assert_array_equal(df.columns, ["Title", "Artist"],
err_msg="Incorrect column names")
def test_called(self):
with patch(ph("subsetting_by_positions"), wraps=subsetting_by_positions) as psbp,\
patch(ph("pd.read_csv"), wraps=pd.read_csv) as prc:
main()
psbp.assert_called()
prc.assert_called()
if __name__ == '__main__':
unittest.main()
|
the-stack_0_14878 | # Import python libs
import new
import sys
# Import Salt Testing libs
from salttesting import skipIf, TestCase
from salttesting.helpers import ensure_in_syspath
ensure_in_syspath('../../')
# wmi and pythoncom modules are platform specific...
wmi = new.module('wmi')
sys.modules['wmi'] = wmi
pythoncom = new.module('pythoncom')
sys.modules['pythoncom'] = pythoncom
from salttesting.mock import NO_MOCK, NO_MOCK_REASON, Mock, patch, call, ANY
if NO_MOCK is False:
WMI = Mock()
wmi.WMI = Mock(return_value=WMI)
pythoncom.CoInitialize = Mock()
pythoncom.CoUninitialize = Mock()
# This is imported late so mock can do it's job
import bonneville.modules.win_status as status
@skipIf(NO_MOCK, NO_MOCK_REASON)
class TestProcsBase(TestCase):
def __init__(self, *args, **kwargs):
TestCase.__init__(self, *args, **kwargs)
self.__processes = []
def add_process(
self,
pid=100,
cmd='cmd',
name='name',
user='user',
user_domain='domain',
get_owner_result=0):
process = Mock()
process.GetOwner = Mock(
return_value=(user_domain, get_owner_result, user)
)
process.ProcessId = pid
process.CommandLine = cmd
process.Name = name
self.__processes.append(process)
def call_procs(self):
WMI.win32_process = Mock(return_value=self.__processes)
self.result = status.procs()
class TestProcsCount(TestProcsBase):
def setUp(self):
self.add_process(pid=100)
self.add_process(pid=101)
self.call_procs()
def test_process_count(self):
self.assertEqual(len(self.result), 2)
def test_process_key_is_pid(self):
self.assertSetEqual(set(self.result.keys()), set([100, 101]))
class TestProcsAttributes(TestProcsBase):
def setUp(self):
self._expected_name = 'name'
self._expected_cmd = 'cmd'
self._expected_user = 'user'
self._expected_domain = 'domain'
pid = 100
self.add_process(
pid=pid,
cmd=self._expected_cmd,
user=self._expected_user,
user_domain=self._expected_domain,
get_owner_result=0)
self.call_procs()
self.proc = self.result[pid]
def test_process_cmd_is_set(self):
self.assertEqual(self.proc['cmd'], self._expected_cmd)
def test_process_name_is_set(self):
self.assertEqual(self.proc['name'], self._expected_name)
def test_process_user_is_set(self):
self.assertEqual(self.proc['user'], self._expected_user)
def test_process_user_domain_is_set(self):
self.assertEqual(self.proc['user_domain'], self._expected_domain)
class TestProcsUnicodeAttributes(TestProcsBase):
def setUp(self):
unicode_str = u'\xc1'
self.utf8str = unicode_str.encode('utf8')
pid = 100
self.add_process(
pid=pid,
user=unicode_str,
user_domain=unicode_str,
cmd=unicode_str,
name=unicode_str)
self.call_procs()
self.proc = self.result[pid]
def test_process_cmd_is_utf8(self):
self.assertEqual(self.proc['cmd'], self.utf8str)
def test_process_name_is_utf8(self):
self.assertEqual(self.proc['name'], self.utf8str)
def test_process_user_is_utf8(self):
self.assertEqual(self.proc['user'], self.utf8str)
def test_process_user_domain_is_utf8(self):
self.assertEqual(self.proc['user_domain'], self.utf8str)
class TestProcsWMIGetOwnerAccessDeniedWorkaround(TestProcsBase):
def setUp(self):
self.expected_user = 'SYSTEM'
self.expected_domain = 'NT AUTHORITY'
self.add_process(pid=0, get_owner_result=2)
self.add_process(pid=4, get_owner_result=2)
self.call_procs()
def test_user_is_set(self):
self.assertEqual(self.result[0]['user'], self.expected_user)
self.assertEqual(self.result[4]['user'], self.expected_user)
def test_process_user_domain_is_set(self):
self.assertEqual(self.result[0]['user_domain'], self.expected_domain)
self.assertEqual(self.result[4]['user_domain'], self.expected_domain)
class TestProcsWMIGetOwnerErrorsAreLogged(TestProcsBase):
def setUp(self):
self.expected_error_code = 8
self.add_process(get_owner_result=self.expected_error_code)
def test_error_logged_if_process_get_owner_fails(self):
with patch('salt.modules.win_status.log') as log:
self.call_procs()
log.warning.assert_called_once_with(ANY)
self.assertIn(
str(self.expected_error_code),
log.warning.call_args[0][0]
)
class TestEmptyCommandLine(TestProcsBase):
def setUp(self):
self.expected_error_code = 8
pid = 100
self.add_process(pid=pid, cmd=None)
self.call_procs()
self.proc = self.result[pid]
def test_cmd_is_empty_string(self):
self.assertEqual(self.proc['cmd'], '')
#class TestProcsComInitialization(TestProcsBase):
# def setUp(self):
# call_count = 5
# for _ in range(call_count):
# self.call_procs()
# self.expected_calls = [call()] * call_count
#
# def test_initialize_and_uninitialize_called(self):
# pythoncom.CoInitialize.assert_has_calls(self.expected_calls)
# pythoncom.CoUninitialize.assert_has_calls(self.expected_calls)
if __name__ == '__main__':
from integration import run_tests
run_tests(
[
TestProcsCount,
TestProcsAttributes,
TestProcsUnicodeAttributes,
TestProcsWMIGetOwnerErrorsAreLogged,
TestProcsWMIGetOwnerAccessDeniedWorkaround,
],
needs_daemon=False
)
|
the-stack_0_14880 | from PyQt5 import QtWidgets
from PyQt5.QtCore import qWarning, Qt
from PyQt5.QtWidgets import QWidget, QSplitter
from candy_editor.qt.controls.ToolWindowManager.ToolWindowManagerArea import ToolWindowManagerArea
class ToolWindowManagerWrapper ( QWidget ):
def __init__ ( self, manager ):
super ( ToolWindowManagerWrapper, self ).__init__ ( manager )
self.manager = manager
self.setWindowFlags ( self.windowFlags () | Qt.Tool )
self.setWindowTitle ( '' )
mainLayout = QtWidgets.QVBoxLayout ( self )
mainLayout.setContentsMargins ( 0, 0, 0, 0 )
self.manager.wrappers.append ( self )
def closeEvent ( self, event ):
'''
关闭时处理所有拥有的ToolWindowManagerArea
'''
from .ToolWindowManager import ToolWindowManager
toolWindows = []
for widget in self.findChildren ( ToolWindowManagerArea ):
toolWindows += widget.toolWindows ()
self.manager.moveToolWindows ( toolWindows, ToolWindowManager.NoArea )
def saveState ( self ):
result = {}
if self.layout ().count () > 1:
qWarning ('too many children for wrapper')
return result
if self.isWindow () and self.layout ().count () == 0:
qWarning ('empty top level wrapper')
return result
# result[ 'geometry' ] = str ( self.saveGeometry () )
splitter = self.findChild ( QSplitter )
if splitter:
result[ 'splitter' ] = self.manager.saveSplitterState ( splitter )
else:
area = self.findChild ( ToolWindowManagerArea )
if area:
result[ 'area' ] = area.saveState ()
elif self.layout ().count () > 0:
qWarning ('unknown child')
return {}
return result
def restoreState ( self, data ):
if 'geometry' in data:
self.restoreGeometry ( data['geometry'] )
if self.layout ().count () > 0:
qWarning ('wrapper is not empty')
return
if 'splitter' in data:
self.layout ().addWidget (
self.manager.restoreSplitterState ( data[ 'splitter' ].toMap () )
)
elif 'area' in data:
area = self.manager.createArea ()
area.restoreState ( data[ 'area' ] )
self.layout ().addWidget ( area )
def isOccupied ( self ):
return self.layout ().count () > 0
|
the-stack_0_14881 | #Claire Williams & Matthew Rasmussen
#1/26/2021
#Moves info from one CSV file to other CSV files
#dictionary full of info
import csv
def make_athletes_table():
'''SOMETHING '''
athlete_dict = {}
with open('athlete_events.csv') as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
headers = next(csv_reader)
for row in csv_reader:
name = row[1]
sex = row[2]
height = row[4]
weight = row[5]
if name not in athlete_dict:
athlete_dict[name] = [len(athlete_dict) + 1, sex, height, weight]
with open('athletes.csv', 'w', newline='') as new_csv_file:
writer = csv.writer(new_csv_file, delimiter=',')
for key in athlete_dict:
writer.writerow([athlete_dict[key][0], key, athlete_dict[key][1], athlete_dict[key][2], athlete_dict[key][3]])
return athlete_dict
def make_nations_table():
nations_dict = {}
with open ('noc_regions.csv') as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
headers = next(csv_reader)
for row in csv_reader:
noc = row[0]
region = row[1]
nations_dict[noc] = [len(nations_dict) + 1, region]
with open ('athlete_events.csv') as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
headers = next(csv_reader)
for row in csv_reader:
noc = row[7]
team = row[6]
if noc not in nations_dict:
nations_dict[noc] = [len(nations_dict) + 1, team]
with open('nations.csv', 'w', newline='') as new_csv_file:
writer = csv.writer(new_csv_file, delimiter=',')
for nation in nations_dict:
writer.writerow([nations_dict[nation][0], nation, nations_dict[nation][1]])
return nations_dict
def make_games_table():
games_dict = {}
with open ('athlete_events.csv') as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
headers = next(csv_reader)
for row in csv_reader:
game = row[8]
year = row[9]
season = row[10]
city = row[11]
if game not in games_dict:
games_dict[game] = [len(games_dict) + 1, year, season, city]
with open('games.csv', 'w', newline='') as new_csv_file:
writer = csv.writer(new_csv_file, delimiter=',')
for key in games_dict:
writer.writerow([games_dict[key][0], games_dict[key][1], games_dict[key][2], games_dict[key][3]])
return games_dict
def make_contests_table():
contest_dict = {}
with open ('athlete_events.csv') as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
headers = next(csv_reader)
for row in csv_reader:
contest = row[13]
sport = row[12]
if contest not in contest_dict:
contest_dict[contest] = [len(contest_dict) + 1, sport]
with open('contests.csv', 'w', newline='') as new_csv_file:
writer = csv.writer(new_csv_file, delimiter=',')
for key in contest_dict:
writer.writerow([contest_dict[key][0], key, contest_dict[key][1]])
return contest_dict
def make_athletes_games(athelete_dict, nations_dict, games_dict):
athletes_games_dict = {}
with open ('athlete_events.csv') as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
headers = next(csv_reader)
for row in csv_reader:
athlete = row[1]
game_name = row[8]
noc = row[7]
if (athlete, game_name) not in athletes_games_dict:
athletes_games_dict[(athlete, game_name)] = [len(athletes_games_dict) + 1, athelete_dict[athlete][0], nations_dict[noc][0], games_dict[game_name][0]]
with open('athletes_games.csv', 'w', newline='') as new_csv_file:
writer = csv.writer(new_csv_file, delimiter=',')
for key in athletes_games_dict:
writer.writerow(athletes_games_dict[key])
return athletes_games_dict
def make_contests_medals(athletes_games_dict, contests_dict):
contests_medals_dict = {}
with open ('athlete_events.csv') as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
headers = next(csv_reader)
for row in csv_reader:
athlete = row[1]
game_name = row[8]
contest = row[13]
medal = row[14]
contests_medals_dict[len(contests_medals_dict) + 1] = [athletes_games_dict[(athlete, game_name)][0], contests_dict[contest][0], medal]
with open('contests_medals.csv', 'w', newline='') as new_csv_file:
writer = csv.writer(new_csv_file, delimiter=',')
for key in contests_medals_dict:
writer.writerow([key, contests_medals_dict[key][0], contests_medals_dict[key][1], contests_medals_dict[key][2]])
def main():
athelete_dict = make_athletes_table()
nations_dict = make_nations_table()
games_dict = make_games_table()
contests_dict = make_contests_table()
athletes_games_dict = make_athletes_games(athelete_dict, nations_dict, games_dict)
make_contests_medals(athletes_games_dict, contests_dict)
main() |
the-stack_0_14883 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for contrib.seq2seq.python.ops.attention_wrapper."""
# pylint: disable=unused-import,g-bad-import-order
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: enable=unused-import
import collections
import functools
import numpy as np
from tensorflow.contrib.seq2seq.python.ops import decoder
from tensorflow.contrib.seq2seq.python.ops import attention_wrapper as wrapper
from tensorflow.contrib.seq2seq.python.ops import helper as helper_py
from tensorflow.contrib.seq2seq.python.ops import basic_decoder
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.layers import core as layers_core
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import rnn
from tensorflow.python.ops import rnn_cell
from tensorflow.python.ops import variables
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.platform import test
from tensorflow.python.util import nest
# pylint: enable=g-import-not-at-top
# for testing
AttentionWrapperState = wrapper.AttentionWrapperState # pylint: disable=invalid-name
LSTMStateTuple = rnn_cell.LSTMStateTuple # pylint: disable=invalid-name
BasicDecoderOutput = basic_decoder.BasicDecoderOutput # pylint: disable=invalid-name
float32 = np.float32
int32 = np.int32
array = np.array
dtype = np.dtype
class ResultSummary(
collections.namedtuple('ResultSummary', ('shape', 'dtype', 'mean'))):
pass
def get_result_summary(x):
if isinstance(x, np.ndarray):
return ResultSummary(x.shape, x.dtype, x.mean())
return x
class AttentionWrapperTest(test.TestCase):
def assertAllCloseOrEqual(self, x, y, **kwargs):
if isinstance(x, np.ndarray) or isinstance(x, float):
return super(AttentionWrapperTest, self).assertAllClose(
x, y, atol=1e-3, **kwargs)
else:
self.assertAllEqual(x, y, **kwargs)
def testAttentionWrapperState(self):
num_fields = len(wrapper.AttentionWrapperState._fields) # pylint: disable=protected-access
state = wrapper.AttentionWrapperState(*([None] * num_fields))
new_state = state.clone(time=1)
self.assertEqual(state.time, None)
self.assertEqual(new_state.time, 1)
def testAttentionWrapperStateShapePropgation(self):
batch_size = 5
max_time = 5
num_units = 5
memory = random_ops.random_uniform(
[batch_size, max_time, num_units], seed=1)
mechanism = wrapper.LuongAttention(num_units, memory)
cell = wrapper.AttentionWrapper(rnn_cell.LSTMCell(num_units), mechanism)
# Create zero state with static batch size.
static_state = cell.zero_state(batch_size, dtypes.float32)
# Create zero state without static batch size.
state = cell.zero_state(array_ops.shape(memory)[0], dtypes.float32)
state = static_state.clone(
cell_state=state.cell_state, attention=state.attention)
self.assertEqual(state.cell_state.c.shape, static_state.cell_state.c.shape)
self.assertEqual(state.cell_state.h.shape, static_state.cell_state.h.shape)
self.assertEqual(state.attention.shape, static_state.attention.shape)
def _testWithAttention(self,
create_attention_mechanism,
expected_final_output,
expected_final_state,
attention_mechanism_depth=3,
alignment_history=False,
expected_final_alignment_history=None,
attention_layer_size=6,
attention_layer=None,
name=''):
attention_layer_sizes = (
[attention_layer_size] if attention_layer_size is not None else None)
attention_layers = (
[attention_layer] if attention_layer is not None else None)
self._testWithMaybeMultiAttention(
is_multi=False,
create_attention_mechanisms=[create_attention_mechanism],
expected_final_output=expected_final_output,
expected_final_state=expected_final_state,
attention_mechanism_depths=[attention_mechanism_depth],
alignment_history=alignment_history,
expected_final_alignment_history=expected_final_alignment_history,
attention_layer_sizes=attention_layer_sizes,
attention_layers=attention_layers,
name=name)
def _testWithMaybeMultiAttention(self,
is_multi,
create_attention_mechanisms,
expected_final_output,
expected_final_state,
attention_mechanism_depths,
alignment_history=False,
expected_final_alignment_history=None,
attention_layer_sizes=None,
attention_layers=None,
name=''):
# Allow is_multi to be True with a single mechanism to enable test for
# passing in a single mechanism in a list.
assert len(create_attention_mechanisms) == 1 or is_multi
encoder_sequence_length = [3, 2, 3, 1, 1]
decoder_sequence_length = [2, 0, 1, 2, 3]
batch_size = 5
encoder_max_time = 8
decoder_max_time = 4
input_depth = 7
encoder_output_depth = 10
cell_depth = 9
if attention_layer_sizes is not None:
# Compute sum of attention_layer_sizes. Use encoder_output_depth if None.
attention_depth = sum(attention_layer_size or encoder_output_depth
for attention_layer_size in attention_layer_sizes)
elif attention_layers is not None:
# Compute sum of attention_layers output depth.
attention_depth = sum(
attention_layer.compute_output_shape(
[batch_size, cell_depth + encoder_output_depth]).dims[-1].value
for attention_layer in attention_layers)
else:
attention_depth = encoder_output_depth * len(create_attention_mechanisms)
decoder_inputs = array_ops.placeholder_with_default(
np.random.randn(batch_size, decoder_max_time,
input_depth).astype(np.float32),
shape=(None, None, input_depth))
encoder_outputs = array_ops.placeholder_with_default(
np.random.randn(batch_size, encoder_max_time,
encoder_output_depth).astype(np.float32),
shape=(None, None, encoder_output_depth))
attention_mechanisms = [
creator(num_units=depth,
memory=encoder_outputs,
memory_sequence_length=encoder_sequence_length)
for creator, depth in zip(create_attention_mechanisms,
attention_mechanism_depths)]
with self.session(use_gpu=True) as sess:
with vs.variable_scope(
'root',
initializer=init_ops.random_normal_initializer(stddev=0.01, seed=3)):
attention_layer_size = attention_layer_sizes
attention_layer = attention_layers
if not is_multi:
if attention_layer_size is not None:
attention_layer_size = attention_layer_size[0]
if attention_layer is not None:
attention_layer = attention_layer[0]
cell = rnn_cell.LSTMCell(cell_depth)
cell = wrapper.AttentionWrapper(
cell,
attention_mechanisms if is_multi else attention_mechanisms[0],
attention_layer_size=attention_layer_size,
alignment_history=alignment_history,
attention_layer=attention_layer)
helper = helper_py.TrainingHelper(decoder_inputs,
decoder_sequence_length)
my_decoder = basic_decoder.BasicDecoder(
cell=cell,
helper=helper,
initial_state=cell.zero_state(
dtype=dtypes.float32, batch_size=batch_size))
final_outputs, final_state, _ = decoder.dynamic_decode(my_decoder)
self.assertTrue(
isinstance(final_outputs, basic_decoder.BasicDecoderOutput))
self.assertTrue(
isinstance(final_state, wrapper.AttentionWrapperState))
self.assertTrue(
isinstance(final_state.cell_state, rnn_cell.LSTMStateTuple))
self.assertEqual((batch_size, None, attention_depth),
tuple(final_outputs.rnn_output.get_shape().as_list()))
self.assertEqual((batch_size, None),
tuple(final_outputs.sample_id.get_shape().as_list()))
self.assertEqual((batch_size, attention_depth),
tuple(final_state.attention.get_shape().as_list()))
self.assertEqual((batch_size, cell_depth),
tuple(final_state.cell_state.c.get_shape().as_list()))
self.assertEqual((batch_size, cell_depth),
tuple(final_state.cell_state.h.get_shape().as_list()))
if alignment_history:
if is_multi:
state_alignment_history = []
for history_array in final_state.alignment_history:
history = history_array.stack()
self.assertEqual(
(None, batch_size, None),
tuple(history.get_shape().as_list()))
state_alignment_history.append(history)
state_alignment_history = tuple(state_alignment_history)
else:
state_alignment_history = final_state.alignment_history.stack()
self.assertEqual(
(None, batch_size, None),
tuple(state_alignment_history.get_shape().as_list()))
nest.assert_same_structure(
cell.state_size,
cell.zero_state(batch_size, dtypes.float32))
# Remove the history from final_state for purposes of the
# remainder of the tests.
final_state = final_state._replace(alignment_history=()) # pylint: disable=protected-access
else:
state_alignment_history = ()
sess.run(variables.global_variables_initializer())
sess_results = sess.run({
'final_outputs': final_outputs,
'final_state': final_state,
'state_alignment_history': state_alignment_history,
})
final_output_info = nest.map_structure(get_result_summary,
sess_results['final_outputs'])
final_state_info = nest.map_structure(get_result_summary,
sess_results['final_state'])
print(name)
print('Copy/paste:\nexpected_final_output = %s' % str(final_output_info))
print('expected_final_state = %s' % str(final_state_info))
nest.map_structure(self.assertAllCloseOrEqual, expected_final_output,
final_output_info)
nest.map_structure(self.assertAllCloseOrEqual, expected_final_state,
final_state_info)
if alignment_history: # by default, the wrapper emits attention as output
final_alignment_history_info = nest.map_structure(
get_result_summary, sess_results['state_alignment_history'])
print('expected_final_alignment_history = %s' %
str(final_alignment_history_info))
nest.map_structure(
self.assertAllCloseOrEqual,
# outputs are batch major but the stacked TensorArray is time major
expected_final_alignment_history,
final_alignment_history_info)
def testBahdanauNormalizedDType(self):
for dtype in [np.float16, np.float32, np.float64]:
num_units = 128
encoder_outputs = array_ops.placeholder(dtype, shape=[64, None, 256])
encoder_sequence_length = array_ops.placeholder(dtypes.int32, shape=[64])
decoder_inputs = array_ops.placeholder(dtype, shape=[64, None, 128])
decoder_sequence_length = array_ops.placeholder(dtypes.int32, shape=[64])
batch_size = 64
attention_mechanism = wrapper.BahdanauAttention(
num_units=num_units,
memory=encoder_outputs,
memory_sequence_length=encoder_sequence_length,
normalize=True,
dtype=dtype,
)
cell = rnn_cell.LSTMCell(num_units)
cell = wrapper.AttentionWrapper(cell, attention_mechanism)
helper = helper_py.TrainingHelper(decoder_inputs,
decoder_sequence_length)
my_decoder = basic_decoder.BasicDecoder(
cell=cell,
helper=helper,
initial_state=cell.zero_state(
dtype=dtype, batch_size=batch_size))
final_outputs, final_state, _ = decoder.dynamic_decode(my_decoder)
self.assertTrue(
isinstance(final_outputs, basic_decoder.BasicDecoderOutput))
self.assertEqual(final_outputs.rnn_output.dtype, dtype)
self.assertTrue(
isinstance(final_state, wrapper.AttentionWrapperState))
self.assertTrue(
isinstance(final_state.cell_state, rnn_cell.LSTMStateTuple))
def testBahdanauNotNormalized(self):
create_attention_mechanism = wrapper.BahdanauAttention
expected_final_output = BasicDecoderOutput(
rnn_output=ResultSummary(
shape=(5, 3, 6), dtype=dtype('float32'), mean=-0.0052250605),
sample_id=ResultSummary(
shape=(5, 3), dtype=dtype('int32'), mean=1.4))
expected_final_state = AttentionWrapperState(
cell_state=LSTMStateTuple(
c=ResultSummary(
shape=(5, 9), dtype=dtype('float32'), mean=-0.0040092287),
h=ResultSummary(
shape=(5, 9), dtype=dtype('float32'), mean=-0.0020015112)),
attention=ResultSummary(
shape=(5, 6), dtype=dtype('float32'), mean=-0.0052052638),
time=3,
alignments=ResultSummary(
shape=(5, 8), dtype=dtype('float32'), mean=0.125),
attention_state=ResultSummary(
shape=(5, 8), dtype=dtype('float32'), mean=0.125),
alignment_history=())
expected_final_alignment_history = ResultSummary(
shape=(3, 5, 8), dtype=dtype('float32'), mean=0.12500001)
self._testWithAttention(
create_attention_mechanism,
expected_final_output,
expected_final_state,
alignment_history=True,
expected_final_alignment_history=expected_final_alignment_history,
name='testBahdanauNotNormalized')
def testBahdanauNormalized(self):
create_attention_mechanism = functools.partial(
wrapper.BahdanauAttention, normalize=True)
expected_final_output = BasicDecoderOutput(
rnn_output=ResultSummary(
shape=(5, 3, 6), dtype=dtype('float32'), mean=-0.00597103),
sample_id=ResultSummary(
shape=(5, 3), dtype=dtype('int32'), mean=1.4))
expected_final_state = AttentionWrapperState(
cell_state=LSTMStateTuple(
c=ResultSummary(
shape=(5, 9), dtype=dtype('float32'), mean=-0.0040052128),
h=ResultSummary(
shape=(5, 9), dtype=dtype('float32'), mean=-0.0019996136)),
attention=ResultSummary(
shape=(5, 6), dtype=dtype('float32'), mean=-0.00595117),
time=3,
alignments=ResultSummary(
shape=(5, 8), dtype=dtype('float32'), mean=0.125),
attention_state=ResultSummary(
shape=(5, 8), dtype=dtype('float32'), mean=0.125),
alignment_history=())
self._testWithAttention(
create_attention_mechanism,
expected_final_output,
expected_final_state,
name='testBahdanauNormalized')
def testLuongNotNormalized(self):
create_attention_mechanism = wrapper.LuongAttention
expected_final_output = BasicDecoderOutput(
rnn_output=ResultSummary(
shape=(5, 3, 6), dtype=dtype('float32'), mean=-0.0052615386),
sample_id=ResultSummary(
shape=(5, 3), dtype=dtype('int32'), mean=1.4))
expected_final_state = AttentionWrapperState(
cell_state=LSTMStateTuple(
c=ResultSummary(
shape=(5, 9), dtype=dtype('float32'), mean=-0.004009536),
h=ResultSummary(
shape=(5, 9), dtype=dtype('float32'), mean=-0.0020016613)),
attention=ResultSummary(
shape=(5, 6), dtype=dtype('float32'), mean=-0.0051812846),
time=3,
alignments=ResultSummary(
shape=(5, 8), dtype=dtype('float32'), mean=0.125),
attention_state=ResultSummary(
shape=(5, 8), dtype=dtype('float32'), mean=0.125),
alignment_history=())
self._testWithAttention(
create_attention_mechanism,
expected_final_output,
expected_final_state,
attention_mechanism_depth=9,
name='testLuongNotNormalized')
def testLuongScaledDType(self):
# Test case for GitHub issue 18099
for dt in [np.float16, np.float32, np.float64]:
num_units = 128
encoder_outputs = array_ops.placeholder(dt, shape=[64, None, 256])
encoder_sequence_length = array_ops.placeholder(dtypes.int32, shape=[64])
decoder_inputs = array_ops.placeholder(dt, shape=[64, None, 128])
decoder_sequence_length = array_ops.placeholder(dtypes.int32, shape=[64])
batch_size = 64
attention_mechanism = wrapper.LuongAttention(
num_units=num_units,
memory=encoder_outputs,
memory_sequence_length=encoder_sequence_length,
scale=True,
dtype=dt,
)
cell = rnn_cell.LSTMCell(num_units)
cell = wrapper.AttentionWrapper(cell, attention_mechanism)
helper = helper_py.TrainingHelper(decoder_inputs,
decoder_sequence_length)
my_decoder = basic_decoder.BasicDecoder(
cell=cell,
helper=helper,
initial_state=cell.zero_state(
dtype=dt, batch_size=batch_size))
final_outputs, final_state, _ = decoder.dynamic_decode(my_decoder)
self.assertTrue(
isinstance(final_outputs, basic_decoder.BasicDecoderOutput))
self.assertEqual(final_outputs.rnn_output.dtype, dt)
self.assertTrue(
isinstance(final_state, wrapper.AttentionWrapperState))
self.assertTrue(
isinstance(final_state.cell_state, rnn_cell.LSTMStateTuple))
def testLuongScaled(self):
create_attention_mechanism = functools.partial(
wrapper.LuongAttention, scale=True)
expected_final_output = BasicDecoderOutput(
rnn_output=ResultSummary(
shape=(5, 3, 6), dtype=dtype('float32'), mean=-0.0052615386),
sample_id=ResultSummary(
shape=(5, 3), dtype=dtype('int32'), mean=1.4))
expected_final_state = AttentionWrapperState(
cell_state=LSTMStateTuple(
c=ResultSummary(
shape=(5, 9), dtype=dtype('float32'), mean=-0.004009536),
h=ResultSummary(
shape=(5, 9), dtype=dtype('float32'), mean=-0.0020016613)),
attention=ResultSummary(
shape=(5, 6), dtype=dtype('float32'), mean=-0.0051812846),
time=3,
alignments=ResultSummary(
shape=(5, 8), dtype=dtype('float32'), mean=0.125),
attention_state=ResultSummary(
shape=(5, 8), dtype=dtype('float32'), mean=0.125),
alignment_history=())
self._testWithAttention(
create_attention_mechanism,
expected_final_output,
expected_final_state,
attention_mechanism_depth=9,
name='testLuongScaled')
def testNotUseAttentionLayer(self):
create_attention_mechanism = wrapper.BahdanauAttention
expected_final_output = BasicDecoderOutput(
rnn_output=ResultSummary(
shape=(5, 3, 10), dtype=dtype('float32'), mean=0.117389656),
sample_id=ResultSummary(
shape=(5, 3), dtype=dtype('int32'), mean=4.5999999999999996))
expected_final_state = AttentionWrapperState(
cell_state=LSTMStateTuple(
c=ResultSummary(
shape=(5, 9), dtype=dtype('float32'), mean=-0.0063607907),
h=ResultSummary(
shape=(5, 9), dtype=dtype('float32'), mean=-0.00323448)),
attention=ResultSummary(
shape=(5, 10), dtype=dtype('float32'), mean=0.117389656,),
time=3,
alignments=ResultSummary(
shape=(5, 8), dtype=dtype('float32'), mean=0.125),
attention_state=ResultSummary(
shape=(5, 8), dtype=dtype('float32'), mean=0.125),
alignment_history=())
self._testWithAttention(
create_attention_mechanism,
expected_final_output,
expected_final_state,
attention_layer_size=None,
name='testNotUseAttentionLayer')
def test_safe_cumprod(self):
# Create some random test input
test_input = np.random.uniform(size=(10, 20))
for axis in [0, 1]:
for exclusive in [True, False]:
with self.cached_session():
# Compute cumprod with regular tf.cumprod
cumprod_output = math_ops.cumprod(
test_input, axis=axis, exclusive=exclusive).eval()
# Compute cumprod with safe_cumprod
safe_cumprod_output = wrapper.safe_cumprod(
test_input, axis=axis, exclusive=exclusive).eval()
for x, y in zip(cumprod_output.shape, safe_cumprod_output.shape):
self.assertEqual(x, y)
for x, y in zip(cumprod_output.flatten(),
safe_cumprod_output.flatten()):
# Use assertAlmostEqual for the actual values due to floating point
self.assertAlmostEqual(x, y, places=5)
def test_monotonic_attention(self):
def monotonic_attention_explicit(p_choose_i, previous_attention):
"""Explicitly compute monotonic attention distribution using numpy."""
# Base case for recurrence relation
out = [previous_attention[0]]
# Explicitly follow the recurrence relation
for j in range(1, p_choose_i.shape[0]):
out.append((1 - p_choose_i[j - 1])*out[j - 1] + previous_attention[j])
return p_choose_i*np.array(out)
# Generate a random batch of choosing probabilities for seq. len. 20
p_choose_i = np.random.uniform(size=(10, 20)).astype(np.float32)
# Generate random previous attention distributions
previous_attention = np.random.uniform(size=(10, 20)).astype(np.float32)
previous_attention /= previous_attention.sum(axis=1).reshape((-1, 1))
# Create the output to test against
explicit_output = np.array([
monotonic_attention_explicit(p, a)
for p, a in zip(p_choose_i, previous_attention)])
# Compute output with TensorFlow function, for both calculation types
with self.cached_session():
recursive_output = wrapper.monotonic_attention(
p_choose_i, previous_attention, 'recursive').eval()
self.assertEqual(recursive_output.ndim, explicit_output.ndim)
for x, y in zip(recursive_output.shape, explicit_output.shape):
self.assertEqual(x, y)
for x, y in zip(recursive_output.flatten(), explicit_output.flatten()):
# Use assertAlmostEqual for the actual values due to floating point
self.assertAlmostEqual(x, y, places=5)
# Generate new p_choose_i for parallel, which is unstable when p_choose_i[n]
# is close to 1
p_choose_i = np.random.uniform(0, 0.9, size=(10, 20)).astype(np.float32)
# Create new output to test against
explicit_output = np.array([
monotonic_attention_explicit(p, a)
for p, a in zip(p_choose_i, previous_attention)])
# Compute output with TensorFlow function, for both calculation types
with self.cached_session():
parallel_output = wrapper.monotonic_attention(
p_choose_i, previous_attention, 'parallel').eval()
self.assertEqual(parallel_output.ndim, explicit_output.ndim)
for x, y in zip(parallel_output.shape, explicit_output.shape):
self.assertEqual(x, y)
for x, y in zip(parallel_output.flatten(), explicit_output.flatten()):
# Use assertAlmostEqual for the actual values due to floating point
self.assertAlmostEqual(x, y, places=5)
# Now, test hard mode, where probabilities must be 0 or 1
p_choose_i = np.random.choice(np.array([0, 1], np.float32), (10, 20))
previous_attention = np.zeros((10, 20), np.float32)
# Randomly choose input sequence indices at each timestep
random_idx = np.random.randint(0, previous_attention.shape[1],
previous_attention.shape[0])
previous_attention[np.arange(previous_attention.shape[0]), random_idx] = 1
# Create the output to test against
explicit_output = np.array([
monotonic_attention_explicit(p, a)
for p, a in zip(p_choose_i, previous_attention)])
# Compute output with TensorFlow function, for both calculation types
with self.cached_session():
hard_output = wrapper.monotonic_attention(
# TensorFlow is unhappy when these are not wrapped as tf.constant
constant_op.constant(p_choose_i),
constant_op.constant(previous_attention),
'hard').eval()
self.assertEqual(hard_output.ndim, explicit_output.ndim)
for x, y in zip(hard_output.shape, explicit_output.shape):
self.assertEqual(x, y)
for x, y in zip(hard_output.flatten(), explicit_output.flatten()):
# Use assertAlmostEqual for the actual values due to floating point
self.assertAlmostEqual(x, y, places=5)
# Now, test recursively computing attention distributions vs. sampling
def sample(p_choose_i):
"""Generate a sequence of emit-ingest decisions from p_choose_i."""
output = np.zeros(p_choose_i.shape)
t_im1 = 0
for i in range(p_choose_i.shape[0]):
for j in range(t_im1, p_choose_i.shape[1]):
if np.random.uniform() <= p_choose_i[i, j]:
output[i, j] = 1
t_im1 = j
break
else:
t_im1 = p_choose_i.shape[1]
return output
# Now, the first axis is output timestep and second is input timestep
p_choose_i = np.random.uniform(size=(4, 5)).astype(np.float32)
# Generate the average of a bunch of samples
n_samples = 100000
sampled_output = np.mean(
[sample(p_choose_i) for _ in range(n_samples)], axis=0)
# Create initial previous_attention base case
recursive_output = [np.array([1] + [0]*(p_choose_i.shape[1] - 1),
np.float32)]
# Compute output with TensorFlow function, for both calculation types
with self.cached_session():
for j in range(p_choose_i.shape[0]):
# Compute attention distribution for this output time step
recursive_output.append(wrapper.monotonic_attention(
# newaxis is for adding the expected batch dimension
p_choose_i[j][np.newaxis],
recursive_output[-1][np.newaxis], 'recursive').eval()[0])
# Stack together distributions; remove basecase
recursive_output = np.array(recursive_output[1:])
self.assertEqual(recursive_output.ndim, sampled_output.ndim)
for x, y in zip(recursive_output.shape, sampled_output.shape):
self.assertEqual(x, y)
for x, y in zip(recursive_output.flatten(), sampled_output.flatten()):
# Use a very forgiving threshold since we are sampling
self.assertAlmostEqual(x, y, places=2)
def testBahdanauMonotonicNotNormalized(self):
create_attention_mechanism = functools.partial(
wrapper.BahdanauMonotonicAttention, sigmoid_noise=1.0,
sigmoid_noise_seed=3)
expected_final_output = BasicDecoderOutput(
rnn_output=ResultSummary(
shape=(5, 3, 6), dtype=dtype('float32'), mean=-0.002122893),
sample_id=ResultSummary(
shape=(5, 3), dtype=dtype('int32'), mean=1.7333333333333334))
expected_final_state = AttentionWrapperState(
cell_state=LSTMStateTuple(
c=ResultSummary(
shape=(5, 9), dtype=dtype('float32'), mean=-0.0040002423),
h=ResultSummary(
shape=(5, 9), dtype=dtype('float32'), mean=-0.0019968653)),
attention=ResultSummary(
shape=(5, 6), dtype=dtype('float32'), mean=-5.9313523e-05),
time=3,
alignments=ResultSummary(
shape=(5, 8), dtype=dtype('float32'), mean=0.032228071),
attention_state=ResultSummary(
shape=(5, 8), dtype=dtype('float32'), mean=0.032228071),
alignment_history=())
expected_final_alignment_history = ResultSummary(
shape=(3, 5, 8), dtype=dtype('float32'), mean=0.050430927)
self._testWithAttention(
create_attention_mechanism,
expected_final_output,
expected_final_state,
alignment_history=True,
expected_final_alignment_history=expected_final_alignment_history,
name='testBahdanauMonotonicNotNormalized')
def testBahdanauMonotonicNormalized(self):
create_attention_mechanism = functools.partial(
wrapper.BahdanauMonotonicAttention, normalize=True,
sigmoid_noise=1.0, sigmoid_noise_seed=3)
expected_final_output = BasicDecoderOutput(
rnn_output=ResultSummary(
shape=(5, 3, 6), dtype=dtype('float32'), mean=-0.0025896581),
sample_id=ResultSummary(
shape=(5, 3), dtype=dtype('int32'), mean=1.73333333))
expected_final_state = AttentionWrapperState(
cell_state=LSTMStateTuple(
c=ResultSummary(
shape=(5, 9), dtype=dtype('float32'), mean=-0.0040013152),
h=ResultSummary(
shape=(5, 9), dtype=dtype('float32'), mean=-0.0019973689)),
attention=ResultSummary(
shape=(5, 6), dtype=dtype('float32'), mean=-0.00069823361),
time=3,
alignments=ResultSummary(
shape=(5, 8), dtype=dtype('float32'), mean=0.029914695),
attention_state=ResultSummary(
shape=(5, 8), dtype=dtype('float32'), mean=0.029914695),
alignment_history=())
expected_final_alignment_history = ResultSummary(
shape=(3, 5, 8), dtype=dtype('float32'), mean=0.0465225502849)
self._testWithAttention(
create_attention_mechanism,
expected_final_output,
expected_final_state,
alignment_history=True,
expected_final_alignment_history=expected_final_alignment_history,
name='testBahdanauMonotonicNormalized')
def testBahdanauMonotonicHard(self):
# Run attention mechanism with mode='hard', make sure probabilities are hard
b, t, u, d = 10, 20, 30, 40
with self.session(use_gpu=True) as sess:
a = wrapper.BahdanauMonotonicAttention(
d,
random_ops.random_normal((b, t, u)),
mode='hard')
# Just feed previous attention as [1, 0, 0, ...]
attn, unused_state = a(
random_ops.random_normal((b, d)), array_ops.one_hot([0]*b, t))
sess.run(variables.global_variables_initializer())
attn_out = attn.eval()
# All values should be 0 or 1
self.assertTrue(np.all(np.logical_or(attn_out == 0, attn_out == 1)))
# Sum of distributions should be 0 or 1 (0 when all p_choose_i are 0)
self.assertTrue(np.all(np.logical_or(attn_out.sum(axis=1) == 1,
attn_out.sum(axis=1) == 0)))
def testLuongMonotonicNotNormalized(self):
create_attention_mechanism = functools.partial(
wrapper.LuongMonotonicAttention, sigmoid_noise=1.0,
sigmoid_noise_seed=3)
expected_final_output = BasicDecoderOutput(
rnn_output=ResultSummary(
shape=(5, 3, 6), dtype=dtype('float32'), mean=-0.0021257224),
sample_id=ResultSummary(
shape=(5, 3), dtype=dtype('int32'), mean=1.7333333333333334))
expected_final_state = AttentionWrapperState(
cell_state=LSTMStateTuple(
c=ResultSummary(
shape=(5, 9), dtype=dtype('float32'), mean=-0.0040003359),
h=ResultSummary(
shape=(5, 9), dtype=dtype('float32'), mean=-0.001996913)),
attention=ResultSummary(
shape=(5, 6), dtype=dtype('float32'), mean=-5.2024145e-05),
time=3,
alignments=ResultSummary(
shape=(5, 8), dtype=dtype('float32'), mean=0.032198936),
attention_state=ResultSummary(
shape=(5, 8), dtype=dtype('float32'), mean=0.032198936),
alignment_history=())
expected_final_alignment_history = ResultSummary(
shape=(3, 5, 8), dtype=dtype('float32'), mean=0.050387777)
self._testWithAttention(
create_attention_mechanism,
expected_final_output,
expected_final_state,
attention_mechanism_depth=9,
alignment_history=True,
expected_final_alignment_history=expected_final_alignment_history,
name='testLuongMonotonicNotNormalized')
def testLuongMonotonicScaled(self):
create_attention_mechanism = functools.partial(
wrapper.LuongMonotonicAttention, scale=True, sigmoid_noise=1.0,
sigmoid_noise_seed=3)
expected_final_output = BasicDecoderOutput(
rnn_output=ResultSummary(
shape=(5, 3, 6), dtype=dtype('float32'), mean=-0.0021257224),
sample_id=ResultSummary(
shape=(5, 3), dtype=dtype('int32'), mean=1.7333333333333334))
expected_final_state = AttentionWrapperState(
cell_state=LSTMStateTuple(
c=ResultSummary(
shape=(5, 9), dtype=dtype('float32'), mean=-0.0040003359),
h=ResultSummary(
shape=(5, 9), dtype=dtype('float32'), mean=-0.001996913)),
attention=ResultSummary(
shape=(5, 6), dtype=dtype('float32'), mean=-5.2024145e-05),
time=3,
alignments=ResultSummary(
shape=(5, 8), dtype=dtype('float32'), mean=0.032198936),
attention_state=ResultSummary(
shape=(5, 8), dtype=dtype('float32'), mean=0.032198936),
alignment_history=())
expected_final_alignment_history = ResultSummary(
shape=(3, 5, 8), dtype=dtype('float32'), mean=0.050387777)
self._testWithAttention(
create_attention_mechanism,
expected_final_output,
expected_final_state,
attention_mechanism_depth=9,
alignment_history=True,
expected_final_alignment_history=expected_final_alignment_history,
name='testLuongMonotonicScaled')
def testMultiAttention(self):
create_attention_mechanisms = (
wrapper.BahdanauAttention, wrapper.LuongAttention)
expected_final_output = BasicDecoderOutput(
rnn_output=ResultSummary(
shape=(5, 3, 7), dtype=dtype('float32'), mean=0.0011709079),
sample_id=ResultSummary(
shape=(5, 3), dtype=dtype('int32'), mean=3.2000000000000002))
expected_final_state = AttentionWrapperState(
cell_state=LSTMStateTuple(
c=ResultSummary(
shape=(5, 9), dtype=dtype('float32'), mean=-0.0038725811),
h=ResultSummary(
shape=(5, 9), dtype=dtype('float32'), mean=-0.0019329828)),
attention=ResultSummary(
shape=(5, 7), dtype=dtype('float32'), mean=0.001174294),
time=3,
alignments=(
ResultSummary(shape=(5, 8), dtype=dtype('float32'), mean=0.125),
ResultSummary(shape=(5, 8), dtype=dtype('float32'), mean=0.125)),
attention_state=(
ResultSummary(shape=(5, 8), dtype=dtype('float32'), mean=0.125),
ResultSummary(shape=(5, 8), dtype=dtype('float32'), mean=0.125)),
alignment_history=())
expected_final_alignment_history = (
ResultSummary(shape=(3, 5, 8), dtype=dtype('float32'), mean=0.125),
ResultSummary(shape=(3, 5, 8), dtype=dtype('float32'), mean=0.125))
self._testWithMaybeMultiAttention(
True,
create_attention_mechanisms,
expected_final_output,
expected_final_state,
attention_mechanism_depths=[9, 9],
attention_layer_sizes=[3, 4],
alignment_history=True,
expected_final_alignment_history=expected_final_alignment_history,
name='testMultiAttention')
def testMultiAttentionWithLayerInstances(self):
create_attention_mechanisms = (
wrapper.BahdanauAttention, wrapper.LuongAttention)
expected_final_output = BasicDecoderOutput(
rnn_output=ResultSummary(
shape=(5, 3, 7), dtype=dtype('float32'), mean=0.0011709079),
sample_id=ResultSummary(
shape=(5, 3), dtype=dtype('int32'), mean=3.2000000000000002))
expected_final_state = AttentionWrapperState(
cell_state=LSTMStateTuple(
c=ResultSummary(
shape=(5, 9), dtype=dtype('float32'), mean=-0.0038725811),
h=ResultSummary(
shape=(5, 9), dtype=dtype('float32'), mean=-0.0019329828)),
attention=ResultSummary(
shape=(5, 7), dtype=dtype('float32'), mean=0.001174294),
time=3,
alignments=(
ResultSummary(shape=(5, 8), dtype=dtype('float32'), mean=0.125),
ResultSummary(shape=(5, 8), dtype=dtype('float32'), mean=0.125)),
attention_state=(
ResultSummary(shape=(5, 8), dtype=dtype('float32'), mean=0.125),
ResultSummary(shape=(5, 8), dtype=dtype('float32'), mean=0.125)),
alignment_history=())
expected_final_alignment_history = (
ResultSummary(shape=(3, 5, 8), dtype=dtype('float32'), mean=0.125),
ResultSummary(shape=(3, 5, 8), dtype=dtype('float32'), mean=0.125))
self._testWithMaybeMultiAttention(
True,
create_attention_mechanisms,
expected_final_output,
expected_final_state,
attention_mechanism_depths=[9, 9],
attention_layers=[layers_core.Dense(3, use_bias=False),
layers_core.Dense(4, use_bias=False)],
alignment_history=True,
expected_final_alignment_history=expected_final_alignment_history,
name='testMultiAttention')
def testLuongMonotonicHard(self):
# Run attention mechanism with mode='hard', make sure probabilities are hard
b, t, u, d = 10, 20, 30, 40
with self.session(use_gpu=True) as sess:
a = wrapper.LuongMonotonicAttention(
d,
random_ops.random_normal((b, t, u)),
mode='hard')
# Just feed previous attention as [1, 0, 0, ...]
attn, unused_state = a(
random_ops.random_normal((b, d)), array_ops.one_hot([0]*b, t))
sess.run(variables.global_variables_initializer())
attn_out = attn.eval()
# All values should be 0 or 1
self.assertTrue(np.all(np.logical_or(attn_out == 0, attn_out == 1)))
# Sum of distributions should be 0 or 1 (0 when all p_choose_i are 0)
self.assertTrue(np.all(np.logical_or(attn_out.sum(axis=1) == 1,
attn_out.sum(axis=1) == 0)))
def testMultiAttentionNoAttentionLayer(self):
create_attention_mechanisms = (
wrapper.BahdanauAttention, wrapper.LuongAttention)
expected_final_output = BasicDecoderOutput(
rnn_output=ResultSummary(
shape=(5, 3, 20), dtype=dtype('float32'), mean=0.115853324533),
sample_id=ResultSummary(
shape=(5, 3), dtype=dtype('int32'), mean=8.6))
expected_final_state = AttentionWrapperState(
cell_state=LSTMStateTuple(
c=ResultSummary(
shape=(5, 9), dtype=dtype('float32'), mean=-0.003545674),
h=ResultSummary(
shape=(5, 9), dtype=dtype('float32'), mean=-0.0018327223)),
attention=ResultSummary(
shape=(5, 20), dtype=dtype('float32'), mean=0.11462739855),
time=3,
alignments=(ResultSummary(
shape=(5, 8), dtype=dtype('float32'), mean=0.125),
ResultSummary(
shape=(5, 8), dtype=dtype('float32'), mean=0.125)),
alignment_history=(),
attention_state=(ResultSummary(
shape=(5, 8), dtype=dtype('float32'), mean=0.125),
ResultSummary(
shape=(5, 8), dtype=dtype('float32'), mean=0.125)))
expected_final_alignment_history = (
ResultSummary(shape=(3, 5, 8), dtype=dtype('float32'), mean=0.125),
ResultSummary(shape=(3, 5, 8), dtype=dtype('float32'), mean=0.125))
self._testWithMaybeMultiAttention(
is_multi=True,
create_attention_mechanisms=create_attention_mechanisms,
expected_final_output=expected_final_output,
expected_final_state=expected_final_state,
attention_mechanism_depths=[9, 9],
alignment_history=True,
expected_final_alignment_history=expected_final_alignment_history,
name='testMultiAttention')
def testSingleAttentionAsList(self):
create_attention_mechanisms = [wrapper.BahdanauAttention]
expected_final_output = BasicDecoderOutput(
rnn_output=ResultSummary(
shape=(5, 3, 3), dtype=dtype('float32'), mean=-0.0098485695),
sample_id=ResultSummary(
shape=(5, 3), dtype=dtype('int32'), mean=1.8))
expected_final_state = AttentionWrapperState(
cell_state=LSTMStateTuple(
c=ResultSummary(
shape=(5, 9), dtype=dtype('float32'), mean=-0.0040023471),
h=ResultSummary(
shape=(5, 9), dtype=dtype('float32'), mean=-0.0019979973)),
attention=ResultSummary(
shape=(5, 3), dtype=dtype('float32'), mean=-0.0098808752),
time=3,
alignments=(
ResultSummary(shape=(5, 8), dtype=dtype('float32'), mean=0.125),),
attention_state=(
ResultSummary(shape=(5, 8), dtype=dtype('float32'), mean=0.125),),
alignment_history=())
expected_final_alignment_history = (
ResultSummary(shape=(3, 5, 8), dtype=dtype('float32'), mean=0.125),)
self._testWithMaybeMultiAttention(
is_multi=True, # pass the AttentionMechanism wrapped in a list
create_attention_mechanisms=create_attention_mechanisms,
expected_final_output=expected_final_output,
expected_final_state=expected_final_state,
attention_mechanism_depths=[9],
attention_layer_sizes=[3],
alignment_history=True,
expected_final_alignment_history=expected_final_alignment_history,
name='testMultiAttention')
def testCustomizedAttention(self):
batch_size = 2
max_time = 3
num_units = 2
memory = constant_op.constant([[[1., 1.], [2., 2.], [3., 3.]],
[[4., 4.], [5., 5.], [6., 6.]]])
memory_sequence_length = constant_op.constant([3, 2])
attention_mechanism = wrapper.BahdanauAttention(num_units, memory,
memory_sequence_length)
# Sets all returned values to be all ones.
def _customized_attention(unused_attention_mechanism, unused_cell_output,
unused_attention_state, unused_attention_layer):
"""Customized attention.
Returns:
attention: `Tensor` of shape [batch_size, num_units], attention output.
alignments: `Tensor` of shape [batch_size, max_time], sigma value for
each input memory (prob. function of input keys).
next_attention_state: A `Tensor` representing the next state for the
attention.
"""
attention = array_ops.ones([batch_size, num_units])
alignments = array_ops.ones([batch_size, max_time])
next_attention_state = alignments
return attention, alignments, next_attention_state
attention_cell = wrapper.AttentionWrapper(
rnn_cell.LSTMCell(2),
attention_mechanism,
attention_layer_size=None, # don't use attention layer.
output_attention=False,
alignment_history=(),
attention_fn=_customized_attention,
name='attention')
self.assertEqual(num_units, attention_cell.output_size)
initial_state = attention_cell.zero_state(
batch_size=2, dtype=dtypes.float32)
source_input_emb = array_ops.ones([2, 3, 2])
source_input_length = constant_op.constant([3, 2])
# 'state' is a tuple of
# (cell_state, h, attention, alignments, alignment_history, attention_state)
output, state = rnn.dynamic_rnn(
attention_cell,
inputs=source_input_emb,
sequence_length=source_input_length,
initial_state=initial_state,
dtype=dtypes.float32)
with self.session() as sess:
sess.run(variables.global_variables_initializer())
output_value, state_value = sess.run([output, state], feed_dict={})
self.assertAllEqual(np.array([2, 3, 2]), output_value.shape)
self.assertAllClose(np.array([[1., 1.], [1., 1.]]), state_value.attention)
self.assertAllClose(
np.array([[1., 1., 1.], [1., 1., 1.]]), state_value.alignments)
self.assertAllClose(
np.array([[1., 1., 1.], [1., 1., 1.]]), state_value.attention_state)
if __name__ == '__main__':
test.main()
|
the-stack_0_14884 | # Copyright 2019 Kyoto University (Hirofumi Inaguma)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Training/decoding definition for the speech translation task."""
import itertools
import json
import logging
import os
from chainer import training
from chainer.training import extensions
import numpy as np
import torch
from espnet.asr.asr_utils import adadelta_eps_decay
from espnet.asr.asr_utils import adam_lr_decay
from espnet.asr.asr_utils import add_results_to_json
from espnet.asr.asr_utils import CompareValueTrigger
from espnet.asr.asr_utils import restore_snapshot
from espnet.asr.asr_utils import snapshot_object
from espnet.asr.asr_utils import torch_load
from espnet.asr.asr_utils import torch_resume
from espnet.asr.asr_utils import torch_snapshot
from espnet.asr.pytorch_backend.asr_init import load_trained_model
from espnet.asr.pytorch_backend.asr_init import load_trained_modules
from espnet.nets.pytorch_backend.e2e_asr import pad_list
from espnet.nets.st_interface import STInterface
from espnet.utils.dataset import ChainerDataLoader
from espnet.utils.dataset import TransformDataset
from espnet.utils.deterministic_utils import set_deterministic_pytorch
from espnet.utils.dynamic_import import dynamic_import
from espnet.utils.io_utils import LoadInputsAndTargets
from espnet.utils.training.batchfy import make_batchset
from espnet.utils.training.iterators import ShufflingEnabler
from espnet.utils.training.tensorboard_logger import TensorboardLogger
from espnet.utils.training.train_utils import check_early_stop
from espnet.utils.training.train_utils import set_early_stop
from espnet.asr.pytorch_backend.asr import CustomConverter as ASRCustomConverter
from espnet.asr.pytorch_backend.asr import CustomEvaluator
from espnet.asr.pytorch_backend.asr import CustomUpdater
class CustomConverter(ASRCustomConverter):
"""Custom batch converter for Pytorch.
Args:
subsampling_factor (int): The subsampling factor.
dtype (torch.dtype): Data type to convert.
use_source_text (bool): use source transcription.
"""
def __init__(
self, subsampling_factor=1, dtype=torch.float32, use_source_text=False
):
"""Construct a CustomConverter object."""
super().__init__(subsampling_factor=subsampling_factor, dtype=dtype)
self.use_source_text = use_source_text
def __call__(self, batch, device=torch.device("cpu")):
"""Transform a batch and send it to a device.
Args:
batch (list): The batch to transform.
device (torch.device): The device to send to.
Returns:
tuple(torch.Tensor, torch.Tensor, torch.Tensor)
"""
# batch should be located in list
assert len(batch) == 1
xs, ys, ys_src = batch[0]
# get batch of lengths of input sequences
ilens = np.array([x.shape[0] for x in xs])
ilens = torch.from_numpy(ilens).to(device)
xs_pad = pad_list([torch.from_numpy(x).float() for x in xs], 0).to(
device, dtype=self.dtype
)
ys_pad = pad_list(
[torch.from_numpy(np.array(y, dtype=np.int64)) for y in ys],
self.ignore_id,
).to(device)
if self.use_source_text:
ys_pad_src = pad_list(
[torch.from_numpy(np.array(y, dtype=np.int64)) for y in ys_src],
self.ignore_id,
).to(device)
else:
ys_pad_src = None
return xs_pad, ilens, ys_pad, ys_pad_src
def train(args):
"""Train with the given args.
Args:
args (namespace): The program arguments.
"""
set_deterministic_pytorch(args)
# check cuda availability
if not torch.cuda.is_available():
logging.warning("cuda is not available")
# get input and output dimension info
with open(args.valid_json, "rb") as f:
valid_json = json.load(f)["utts"]
utts = list(valid_json.keys())
idim = int(valid_json[utts[0]]["input"][0]["shape"][-1])
odim = int(valid_json[utts[0]]["output"][0]["shape"][-1])
logging.info("#input dims : " + str(idim))
logging.info("#output dims: " + str(odim))
# Initialize with pre-trained ASR encoder and MT decoder
if args.enc_init is not None or args.dec_init is not None:
model = load_trained_modules(idim, odim, args, interface=STInterface)
else:
model_class = dynamic_import(args.model_module)
model = model_class(idim, odim, args)
assert isinstance(model, STInterface)
total_subsampling_factor = model.get_total_subsampling_factor()
# write model config
if not os.path.exists(args.outdir):
os.makedirs(args.outdir)
model_conf = args.outdir + "/model.json"
with open(model_conf, "wb") as f:
logging.info("writing a model config file to " + model_conf)
f.write(
json.dumps(
(idim, odim, vars(args)), indent=4, ensure_ascii=False, sort_keys=True
).encode("utf_8")
)
for key in sorted(vars(args).keys()):
logging.info("ARGS: " + key + ": " + str(vars(args)[key]))
reporter = model.reporter
# check the use of multi-gpu
if args.ngpu > 1:
if args.batch_size != 0:
logging.warning(
"batch size is automatically increased (%d -> %d)"
% (args.batch_size, args.batch_size * args.ngpu)
)
args.batch_size *= args.ngpu
# set torch device
device = torch.device("cuda" if args.ngpu > 0 else "cpu")
if args.train_dtype in ("float16", "float32", "float64"):
dtype = getattr(torch, args.train_dtype)
else:
dtype = torch.float32
model = model.to(device=device, dtype=dtype)
logging.warning(
"num. model params: {:,} (num. trained: {:,} ({:.1f}%))".format(
sum(p.numel() for p in model.parameters()),
sum(p.numel() for p in model.parameters() if p.requires_grad),
sum(p.numel() for p in model.parameters() if p.requires_grad)
* 100.0
/ sum(p.numel() for p in model.parameters()),
)
)
# Setup an optimizer
if args.opt == "adadelta":
optimizer = torch.optim.Adadelta(
model.parameters(), rho=0.95, eps=args.eps, weight_decay=args.weight_decay
)
elif args.opt == "adam":
optimizer = torch.optim.Adam(
model.parameters(), lr=args.lr, weight_decay=args.weight_decay
)
elif args.opt == "noam":
from espnet.nets.pytorch_backend.transformer.optimizer import get_std_opt
optimizer = get_std_opt(
model.parameters(),
args.adim,
args.transformer_warmup_steps,
args.transformer_lr,
)
else:
raise NotImplementedError("unknown optimizer: " + args.opt)
# setup apex.amp
if args.train_dtype in ("O0", "O1", "O2", "O3"):
try:
from apex import amp
except ImportError as e:
logging.error(
f"You need to install apex for --train-dtype {args.train_dtype}. "
"See https://github.com/NVIDIA/apex#linux"
)
raise e
if args.opt == "noam":
model, optimizer.optimizer = amp.initialize(
model, optimizer.optimizer, opt_level=args.train_dtype
)
else:
model, optimizer = amp.initialize(
model, optimizer, opt_level=args.train_dtype
)
use_apex = True
else:
use_apex = False
# FIXME: TOO DIRTY HACK
setattr(optimizer, "target", reporter)
setattr(optimizer, "serialize", lambda s: reporter.serialize(s))
# Setup a converter
converter = CustomConverter(
subsampling_factor=model.subsample[0],
dtype=dtype,
use_source_text=args.asr_weight > 0 or args.mt_weight > 0,
)
# read json data
with open(args.train_json, "rb") as f:
train_json = json.load(f)["utts"]
with open(args.valid_json, "rb") as f:
valid_json = json.load(f)["utts"]
use_sortagrad = args.sortagrad == -1 or args.sortagrad > 0
# make minibatch list (variable length)
train = make_batchset(
train_json,
args.batch_size,
args.maxlen_in,
args.maxlen_out,
args.minibatches,
min_batch_size=args.ngpu if args.ngpu > 1 else 1,
shortest_first=use_sortagrad,
count=args.batch_count,
batch_bins=args.batch_bins,
batch_frames_in=args.batch_frames_in,
batch_frames_out=args.batch_frames_out,
batch_frames_inout=args.batch_frames_inout,
iaxis=0,
oaxis=0,
)
valid = make_batchset(
valid_json,
args.batch_size,
args.maxlen_in,
args.maxlen_out,
args.minibatches,
min_batch_size=args.ngpu if args.ngpu > 1 else 1,
count=args.batch_count,
batch_bins=args.batch_bins,
batch_frames_in=args.batch_frames_in,
batch_frames_out=args.batch_frames_out,
batch_frames_inout=args.batch_frames_inout,
iaxis=0,
oaxis=0,
)
load_tr = LoadInputsAndTargets(
mode="asr",
load_output=True,
preprocess_conf=args.preprocess_conf,
preprocess_args={"train": True}, # Switch the mode of preprocessing
)
load_cv = LoadInputsAndTargets(
mode="asr",
load_output=True,
preprocess_conf=args.preprocess_conf,
preprocess_args={"train": False}, # Switch the mode of preprocessing
)
# hack to make batchsize argument as 1
# actual bathsize is included in a list
# default collate function converts numpy array to pytorch tensor
# we used an empty collate function instead which returns list
train_iter = ChainerDataLoader(
dataset=TransformDataset(train, lambda data: converter([load_tr(data)])),
batch_size=1,
num_workers=args.n_iter_processes,
shuffle=not use_sortagrad,
collate_fn=lambda x: x[0],
)
valid_iter = ChainerDataLoader(
dataset=TransformDataset(valid, lambda data: converter([load_cv(data)])),
batch_size=1,
shuffle=False,
collate_fn=lambda x: x[0],
num_workers=args.n_iter_processes,
)
# Set up a trainer
updater = CustomUpdater(
model,
args.grad_clip,
{"main": train_iter},
optimizer,
device,
args.ngpu,
args.grad_noise,
args.accum_grad,
use_apex=use_apex,
)
trainer = training.Trainer(updater, (args.epochs, "epoch"), out=args.outdir)
if use_sortagrad:
trainer.extend(
ShufflingEnabler([train_iter]),
trigger=(args.sortagrad if args.sortagrad != -1 else args.epochs, "epoch"),
)
# Resume from a snapshot
if args.resume:
logging.info("resumed from %s" % args.resume)
torch_resume(args.resume, trainer)
# Evaluate the model with the test dataset for each epoch
if args.save_interval_iters > 0:
trainer.extend(
CustomEvaluator(model, {"main": valid_iter}, reporter, device, args.ngpu),
trigger=(args.save_interval_iters, "iteration"),
)
else:
trainer.extend(
CustomEvaluator(model, {"main": valid_iter}, reporter, device, args.ngpu)
)
# Save attention weight at each epoch
if args.num_save_attention > 0:
data = sorted(
list(valid_json.items())[: args.num_save_attention],
key=lambda x: int(x[1]["input"][0]["shape"][1]),
reverse=True,
)
if hasattr(model, "module"):
att_vis_fn = model.module.calculate_all_attentions
plot_class = model.module.attention_plot_class
else:
att_vis_fn = model.calculate_all_attentions
plot_class = model.attention_plot_class
att_reporter = plot_class(
att_vis_fn,
data,
args.outdir + "/att_ws",
converter=converter,
transform=load_cv,
device=device,
subsampling_factor=total_subsampling_factor,
)
trainer.extend(att_reporter, trigger=(1, "epoch"))
else:
att_reporter = None
# Save CTC prob at each epoch
if (args.asr_weight > 0 and args.mtlalpha > 0) and args.num_save_ctc > 0:
# NOTE: sort it by output lengths
data = sorted(
list(valid_json.items())[: args.num_save_ctc],
key=lambda x: int(x[1]["output"][0]["shape"][0]),
reverse=True,
)
if hasattr(model, "module"):
ctc_vis_fn = model.module.calculate_all_ctc_probs
plot_class = model.module.ctc_plot_class
else:
ctc_vis_fn = model.calculate_all_ctc_probs
plot_class = model.ctc_plot_class
ctc_reporter = plot_class(
ctc_vis_fn,
data,
args.outdir + "/ctc_prob",
converter=converter,
transform=load_cv,
device=device,
subsampling_factor=total_subsampling_factor,
)
trainer.extend(ctc_reporter, trigger=(1, "epoch"))
else:
ctc_reporter = None
# Make a plot for training and validation values
trainer.extend(
extensions.PlotReport(
[
"main/loss",
"validation/main/loss",
"main/loss_asr",
"validation/main/loss_asr",
"main/loss_mt",
"validation/main/loss_mt",
"main/loss_st",
"validation/main/loss_st",
],
"epoch",
file_name="loss.png",
)
)
trainer.extend(
extensions.PlotReport(
[
"main/acc",
"validation/main/acc",
"main/acc_asr",
"validation/main/acc_asr",
"main/acc_mt",
"validation/main/acc_mt",
],
"epoch",
file_name="acc.png",
)
)
trainer.extend(
extensions.PlotReport(
["main/bleu", "validation/main/bleu"], "epoch", file_name="bleu.png"
)
)
# Save best models
trainer.extend(
snapshot_object(model, "model.loss.best"),
trigger=training.triggers.MinValueTrigger("validation/main/loss"),
)
trainer.extend(
snapshot_object(model, "model.acc.best"),
trigger=training.triggers.MaxValueTrigger("validation/main/acc"),
)
# save snapshot which contains model and optimizer states
if args.save_interval_iters > 0:
trainer.extend(
torch_snapshot(filename="snapshot.iter.{.updater.iteration}"),
trigger=(args.save_interval_iters, "iteration"),
)
else:
trainer.extend(torch_snapshot(), trigger=(1, "epoch"))
# epsilon decay in the optimizer
if args.opt == "adadelta":
if args.criterion == "acc":
trainer.extend(
restore_snapshot(
model, args.outdir + "/model.acc.best", load_fn=torch_load
),
trigger=CompareValueTrigger(
"validation/main/acc",
lambda best_value, current_value: best_value > current_value,
),
)
trainer.extend(
adadelta_eps_decay(args.eps_decay),
trigger=CompareValueTrigger(
"validation/main/acc",
lambda best_value, current_value: best_value > current_value,
),
)
elif args.criterion == "loss":
trainer.extend(
restore_snapshot(
model, args.outdir + "/model.loss.best", load_fn=torch_load
),
trigger=CompareValueTrigger(
"validation/main/loss",
lambda best_value, current_value: best_value < current_value,
),
)
trainer.extend(
adadelta_eps_decay(args.eps_decay),
trigger=CompareValueTrigger(
"validation/main/loss",
lambda best_value, current_value: best_value < current_value,
),
)
elif args.opt == "adam":
if args.criterion == "acc":
trainer.extend(
restore_snapshot(
model, args.outdir + "/model.acc.best", load_fn=torch_load
),
trigger=CompareValueTrigger(
"validation/main/acc",
lambda best_value, current_value: best_value > current_value,
),
)
trainer.extend(
adam_lr_decay(args.lr_decay),
trigger=CompareValueTrigger(
"validation/main/acc",
lambda best_value, current_value: best_value > current_value,
),
)
elif args.criterion == "loss":
trainer.extend(
restore_snapshot(
model, args.outdir + "/model.loss.best", load_fn=torch_load
),
trigger=CompareValueTrigger(
"validation/main/loss",
lambda best_value, current_value: best_value < current_value,
),
)
trainer.extend(
adam_lr_decay(args.lr_decay),
trigger=CompareValueTrigger(
"validation/main/loss",
lambda best_value, current_value: best_value < current_value,
),
)
# Write a log of evaluation statistics for each epoch
trainer.extend(
extensions.LogReport(trigger=(args.report_interval_iters, "iteration"))
)
report_keys = [
"epoch",
"iteration",
"main/loss",
"main/loss_st",
"main/loss_asr",
"validation/main/loss",
"validation/main/loss_st",
"validation/main/loss_asr",
"main/acc",
"validation/main/acc",
]
if args.asr_weight > 0:
report_keys.append("main/acc_asr")
report_keys.append("validation/main/acc_asr")
report_keys += ["elapsed_time"]
if args.opt == "adadelta":
trainer.extend(
extensions.observe_value(
"eps",
lambda trainer: trainer.updater.get_optimizer("main").param_groups[0][
"eps"
],
),
trigger=(args.report_interval_iters, "iteration"),
)
report_keys.append("eps")
elif args.opt in ["adam", "noam"]:
trainer.extend(
extensions.observe_value(
"lr",
lambda trainer: trainer.updater.get_optimizer("main").param_groups[0][
"lr"
],
),
trigger=(args.report_interval_iters, "iteration"),
)
report_keys.append("lr")
if args.asr_weight > 0:
if args.mtlalpha > 0:
report_keys.append("main/cer_ctc")
report_keys.append("validation/main/cer_ctc")
if args.mtlalpha < 1:
if args.report_cer:
report_keys.append("validation/main/cer")
if args.report_wer:
report_keys.append("validation/main/wer")
if args.report_bleu:
report_keys.append("main/bleu")
report_keys.append("validation/main/bleu")
trainer.extend(
extensions.PrintReport(report_keys),
trigger=(args.report_interval_iters, "iteration"),
)
trainer.extend(extensions.ProgressBar(update_interval=args.report_interval_iters))
set_early_stop(trainer, args)
if args.tensorboard_dir is not None and args.tensorboard_dir != "":
from torch.utils.tensorboard import SummaryWriter
trainer.extend(
TensorboardLogger(
SummaryWriter(args.tensorboard_dir),
att_reporter=att_reporter,
ctc_reporter=ctc_reporter,
),
trigger=(args.report_interval_iters, "iteration"),
)
# Run the training
trainer.run()
check_early_stop(trainer, args.epochs)
def trans(args):
"""Decode with the given args.
Args:
args (namespace): The program arguments.
"""
set_deterministic_pytorch(args)
model, train_args = load_trained_model(args.model)
assert isinstance(model, STInterface)
model.trans_args = args
# gpu
if args.ngpu == 1:
gpu_id = list(range(args.ngpu))
logging.info("gpu id: " + str(gpu_id))
model.cuda()
# read json data
with open(args.trans_json, "rb") as f:
js = json.load(f)["utts"]
new_js = {}
load_inputs_and_targets = LoadInputsAndTargets(
mode="asr",
load_output=False,
sort_in_input_length=False,
preprocess_conf=train_args.preprocess_conf
if args.preprocess_conf is None
else args.preprocess_conf,
preprocess_args={"train": False},
)
if args.batchsize == 0:
with torch.no_grad():
for idx, name in enumerate(js.keys(), 1):
logging.info("(%d/%d) decoding " + name, idx, len(js.keys()))
batch = [(name, js[name])]
feat = load_inputs_and_targets(batch)[0][0]
nbest_hyps = model.translate(
feat,
args,
train_args.char_list,
)
new_js[name] = add_results_to_json(
js[name], nbest_hyps, train_args.char_list
)
else:
def grouper(n, iterable, fillvalue=None):
kargs = [iter(iterable)] * n
return itertools.zip_longest(*kargs, fillvalue=fillvalue)
# sort data if batchsize > 1
keys = list(js.keys())
if args.batchsize > 1:
feat_lens = [js[key]["input"][0]["shape"][0] for key in keys]
sorted_index = sorted(range(len(feat_lens)), key=lambda i: -feat_lens[i])
keys = [keys[i] for i in sorted_index]
with torch.no_grad():
for names in grouper(args.batchsize, keys, None):
names = [name for name in names if name]
batch = [(name, js[name]) for name in names]
feats = load_inputs_and_targets(batch)[0]
nbest_hyps = model.translate_batch(
feats,
args,
train_args.char_list,
)
for i, nbest_hyp in enumerate(nbest_hyps):
name = names[i]
new_js[name] = add_results_to_json(
js[name], nbest_hyp, train_args.char_list
)
with open(args.result_label, "wb") as f:
f.write(
json.dumps(
{"utts": new_js}, indent=4, ensure_ascii=False, sort_keys=True
).encode("utf_8")
)
|
the-stack_0_14886 | import os
import os.path as osp
import argparse
import pickle
import numpy as np
from operator import itemgetter
import re
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('-appstr', type=str, default='unknown')
parser.add_argument('-ntask', type=int, default=1, help='number of tasks')
parser.add_argument("-bmin", type=int, default=1, help ='minimum value for bandit budget')
parser.add_argument("-bmax", type=int, default=8, help ='maximum value for bandit budget')
parser.add_argument("-eta", type=int, default=2, help ='base value for bandit structure')
parser.add_argument("-Nloop", type=int, default=1, help ='number of bandit loops')
parser.add_argument('-expid', type=str, default='0')
return parser.parse_args()
def main(args):
summary = []
my_source = f'./{args.appstr}_ntask{args.ntask}_bandit{args.bmin}-{args.bmax}-{args.eta}_Nloop{args.Nloop}_expid{args.expid}.txt'
save_path = f'./{args.appstr}_ntask{args.ntask}_bandit{args.bmin}-{args.bmax}-{args.eta}_Nloop{args.Nloop}_expid{args.expid}.pkl'
GPTuneBand_source = f'./{args.appstr}_ntask{args.ntask}_bandit{args.bmin}-{args.bmax}-{args.eta}_Nloop{args.Nloop}_expid{args.expid}_parsed.pkl'
with open(my_source, "r") as f:
line = f.readline()
while line:
info = line.split()
if (info[0] == 'Tuner:' and info[1] == "GPTuneBand"):
results = []
tunername = info[1]
results.append(tunername)
line = f.readline()
line = f.readline().split()
for _ in range(int(args.ntask)):
tid = int(line[1])
line = f.readline().split()
line = f.readline()
result = pickle.load(open(GPTuneBand_source, "rb"))
results.append(result)
if int(args.ntask) > 1:
line = f.readline().split()
summary.append(results)
line = f.readline()
elif (info[0] == 'Tuner:' and info[1] == "hpbandster"):
results = []
tunername = info[1]
results.append(tunername)
line = f.readline()
line = f.readline().split()
for _ in range(int(args.ntask)):
tid = int(line[1])
line = f.readline().split()
task = line[0][7:]
line = f.readline().strip(" Os ")
data = [[float(y) for y in x.split(", ")] for x in re.split('\[\[|\]\]|\), \(|\(|\)', line) if len(x) > 2]
data = [y for y in data if y[1] < float("Inf")]
x = []
y = []
pre_fix = 0
max_num = -999
for info in data:
if info[0] > max_num:
max_num = info[0]
for info in data:
pre_fix += info[0]/max_num
if np.isclose(info[0], max_num):
x.append(pre_fix)
y.append(info[1])
results.append([tid, task, [x, y]])
if int(args.ntask) > 1:
line = f.readline().split()
summary.append(results)
line = f.readline()
else: # GPTune OpenTuner and TPE
results = []
tunername = info[1]
results.append(tunername)
line = f.readline()
line = f.readline().split()
for _ in range(int(args.ntask)):
tid = int(line[1])
line = f.readline().split()
task = [x for x in line]
line = f.readline().strip(' Os [ ]\n')
history = [float(x) for x in re.split('\], \[', line)]
x = list(np.arange(1,len(history)+1))
results.append([tid, task, [x,history]])
if int(args.ntask) > 1:
line = f.readline().split()
summary.append(results)
line = f.readline()
print(summary[0])
print(summary[1])
print("Results saved to", save_path)
pickle.dump(summary, open(save_path, "wb"))
if __name__ == "__main__":
main(parse_args())
|
the-stack_0_14887 | from api.models.base import Base
db = Base.db
class Activity(Base):
"""Model activities available for points."""
__tablename__ = 'activities'
activity_type_id = db.Column(
db.String,
db.ForeignKey('activity_types.uuid'),
nullable=False
)
added_by_id = db.Column(
db.String,
db.ForeignKey('users.uuid'),
nullable=False
)
activity_date = db.Column(db.Date)
logged_activities = db.relationship(
'LoggedActivity',
back_populates='activity',
lazy='dynamic',
order_by='desc(LoggedActivity.created_at)'
)
activity_type = db.relationship(
'ActivityType',
back_populates='activities',
uselist=False
)
|
the-stack_0_14890 | import cv2
from time import sleep
cap = cv2.VideoCapture(0)
while True:
ret,frame = cap.read()
# Our operations on the frame come here
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# Display the resulting frame
cv2.imshow('frame',gray)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
sleep(1)
cap.release()
cv2.destroyAllWindows() |
the-stack_0_14893 | # gpl author: Ryan Inch (Imaginer)
import bpy
from bpy.types import Menu
from . import utils_core
class DynTopoMenu(Menu):
bl_label = "Dyntopo"
bl_idname = "VIEW3D_MT_sv3_dyntopo"
@classmethod
def poll(self, context):
return utils_core.get_mode() == 'SCULPT'
def draw(self, context):
layout = self.layout
if context.object.use_dynamic_topology_sculpting:
layout.row().operator("sculpt.dynamic_topology_toggle",
text="Disable Dynamic Topology")
layout.row().separator()
layout.row().menu(DynDetailMenu.bl_idname)
layout.row().menu(DetailMethodMenu.bl_idname)
layout.row().separator()
layout.row().operator("sculpt.optimize")
if context.tool_settings.sculpt.detail_type_method == 'CONSTANT':
layout.row().operator("sculpt.detail_flood_fill")
layout.row().menu(SymmetrizeMenu.bl_idname)
layout.row().prop(context.tool_settings.sculpt,
"use_smooth_shading", toggle=True)
else:
row = layout.row()
row.operator_context = 'INVOKE_DEFAULT'
row.operator("sculpt.dynamic_topology_toggle",
text="Enable Dynamic Topology")
class DynDetailMenu(Menu):
bl_label = "Detail Size"
bl_idname = "VIEW3D_MT_sv3_dyn_detail"
def init(self):
settings = (("40", 40),
("30", 30),
("20", 20),
("10", 10),
("5", 5),
("1", 1))
if bpy.context.tool_settings.sculpt.detail_type_method == 'RELATIVE':
datapath = "tool_settings.sculpt.detail_size"
slider_setting = "detail_size"
elif bpy.context.tool_settings.sculpt.detail_type_method == 'CONSTANT':
datapath = "tool_settings.sculpt.constant_detail_resolution"
slider_setting = "constant_detail_resolution"
else:
datapath = "tool_settings.sculpt.detail_percent"
slider_setting = "detail_percent"
settings = (("100", 100),
("75", 75),
("50", 50),
("25", 25),
("10", 10),
("5", 5))
return settings, datapath, slider_setting
def draw(self, context):
settings, datapath, slider_setting = self.init()
layout = self.layout
# add the top slider
layout.row().prop(context.tool_settings.sculpt,
slider_setting, slider=True)
layout.row().separator()
# add the rest of the menu items
for i in range(len(settings)):
utils_core.menuprop(
layout.row(), settings[i][0], settings[i][1], datapath,
icon='RADIOBUT_OFF', disable=True,
disable_icon='RADIOBUT_ON'
)
class DetailMethodMenu(Menu):
bl_label = "Detail Method"
bl_idname = "VIEW3D_MT_sv3_detail_method_menu"
def draw(self, context):
layout = self.layout
refine_path = "tool_settings.sculpt.detail_refine_method"
type_path = "tool_settings.sculpt.detail_type_method"
refine_items = (("Subdivide Edges", 'SUBDIVIDE'),
("Collapse Edges", 'COLLAPSE'),
("Subdivide Collapse", 'SUBDIVIDE_COLLAPSE'))
type_items = (("Relative Detail", 'RELATIVE'),
("Constant Detail", 'CONSTANT'),
("Brush Detail", 'BRUSH'))
layout.row().label(text="Refine")
layout.row().separator()
# add the refine menu items
for item in refine_items:
utils_core.menuprop(
layout.row(), item[0], item[1],
refine_path, disable=True,
icon='RADIOBUT_OFF',
disable_icon='RADIOBUT_ON'
)
layout.row().label(text="")
layout.row().label(text="Type")
layout.row().separator()
# add the type menu items
for item in type_items:
utils_core.menuprop(
layout.row(), item[0], item[1],
type_path, disable=True,
icon='RADIOBUT_OFF', disable_icon='RADIOBUT_ON'
)
class SymmetrizeMenu(Menu):
bl_label = "Symmetrize"
bl_idname = "VIEW3D_MT_sv3_symmetrize_menu"
def draw(self, context):
layout = self.layout
path = "tool_settings.sculpt.symmetrize_direction"
# add the the symmetrize operator to the menu
layout.row().operator("sculpt.symmetrize")
layout.row().separator()
# add the rest of the menu items
for item in context.tool_settings.sculpt. \
bl_rna.properties['symmetrize_direction'].enum_items:
utils_core.menuprop(
layout.row(), item.name, item.identifier,
path, disable=True,
icon='RADIOBUT_OFF', disable_icon='RADIOBUT_ON'
)
classes = (
DynTopoMenu,
DynDetailMenu,
DetailMethodMenu,
SymmetrizeMenu
)
def register():
for cls in classes:
bpy.utils.register_class(cls)
def unregister():
for cls in classes:
bpy.utils.unregister_class(cls)
|
the-stack_0_14894 | #
# Copyright (c) 2021, Neptune Labs Sp. z o.o.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import random
import pytest
from PIL import Image
from e2e_tests.base import AVAILABLE_CONTAINERS, BaseE2ETest, fake
from e2e_tests.utils import generate_image, image_to_png, tmp_context
from neptune.new.metadata_containers import MetadataContainer
class TestSeries(BaseE2ETest):
@pytest.mark.parametrize("container", AVAILABLE_CONTAINERS, indirect=True)
def test_log_numbers(self, container: MetadataContainer):
key = self.gen_key()
values = [random.random() for _ in range(50)]
container[key].log(values[0])
container[key].log(values[1:])
container.sync()
assert container[key].fetch_last() == values[-1]
fetched_values = container[key].fetch_values()
assert list(fetched_values["value"]) == values
@pytest.mark.parametrize("container", AVAILABLE_CONTAINERS, indirect=True)
def test_log_strings(self, container: MetadataContainer):
key = self.gen_key()
values = [fake.word() for _ in range(50)]
container[key].log(values[0])
container[key].log(values[1:])
container.sync()
assert container[key].fetch_last() == values[-1]
fetched_values = container[key].fetch_values()
assert list(fetched_values["value"]) == values
@pytest.mark.parametrize("container", AVAILABLE_CONTAINERS, indirect=True)
def test_log_images(self, container: MetadataContainer):
key = self.gen_key()
# images with size between 200KB - 12MB
images = list(generate_image(size=2**n) for n in range(8, 12))
container[key].log(images[0])
container[key].log(images[1:])
container.sync()
with tmp_context():
container[key].download_last("last")
container[key].download("all")
with Image.open("last/3.png") as img:
assert img == image_to_png(image=images[-1])
for i in range(4):
with Image.open(f"all/{i}.png") as img:
assert img == image_to_png(image=images[i])
|
the-stack_0_14898 | # Copyright 2016 Mycroft AI, Inc.
#
# This file is part of Mycroft Core.
#
# Mycroft Core is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Mycroft Core is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Mycroft Core. If not, see <http://www.gnu.org/licenses/>.
from __future__ import print_function
from adapt.intent import IntentBuilder
from mycroft.skills.core import MycroftSkill, intent_handler
from mycroft.util.log import getLogger
from mycroft.skills.context import *
import os
import string
import re
import math
__author__ = 'TREE'
LOGGER = getLogger(__name__)
class MathSkill(MycroftSkill):
def __init__(self):
super(MathSkill, self).__init__(name="MathSkill")
@intent_handler(IntentBuilder("MathStartIntent").require("MathStart").build())
@adds_context('MathContext')
def handle_math_start(self, message):
self.speak('Please provide the first number.', expect_response=True)
@intent_handler(IntentBuilder("FirstNumberIntent").require("Num1").require("MathContext").build())
@adds_context('FirstNumberContext')
def handle_first_number(self, message):
#utterance = message.data.get('utterance')
self.num1 = message.data.get("Num1")
self.speak('Please provide the second number.', expect_response=True)
print(self.num1)
@intent_handler(IntentBuilder("SecondNumberIntent").require("Num2").require("FirstNumberContext").build())
@adds_context('SecondNumberContext')
@removes_context('FirstNumberContext')
def handle_second_number(self, message):
#utterance = message.data.get('utterance')
self.num2 = message.data.get("Num2")
self.speak('What operation would you like to do', expect_response=True)
print(self.num2)
@intent_handler(IntentBuilder('CalculateIntent').require('Calculate').require('SecondNumberContext').build())
@adds_context('CalculateContext')
@removes_context('SecondNumberContext')
def handle_calculate(self, message):
utterance = message.data.get('utterance')
#print(utterance)
if "add" in utterance:
self.answer = float(self.num1) + float(self.num2)
self.speak('The answer is {}.'.format(self.answer))
elif "multiply" in utterance:
self.answer = float(self.num1) * float(self.num2)
self.speak('The answer is {}.'.format(self.answer))
elif "divide" in utterance:
self.answer = float(self.num1) / float(self.num2)
self.speak('The answer is {}.'.format(self.answer))
elif "subtract" in utterance:
self.answer = float(self.num1) - float(self.num2)
self.speak('The answer is {}'.format(self.answer))
self.speak('Would you like to perform another operation?', expect_response=True)
@intent_handler(IntentBuilder('NextCalculationIntent').require('Calculate').require('Num').require('CalculateContext').build())
def handle_next_calculation(self, message):
utterance = message.data.get('utterance')
self.num = message.data.get("Num")
print(utterance)
print(self.num)
if "add" in utterance:
self.answer = float(self.answer) + float(self.num)
self.speak('The answer is {}.'.format(self.answer))
elif "multiply" in utterance:
self.answer = float(self.answer) * float(self.num)
self.speak('The answer is {}.'.format(self.answer))
elif "x" in utterance:
self.answer = float(self.answer) * float(self.num)
self.speak('The answer is {}.'.format(self.answer))
elif "divide" in utterance:
self.answer = float(self.answer) / float(self.num)
self.speak('The answer is {}.'.format(self.answer))
elif "subtract" in utterance:
self.answer = float(self.answer) - float(self.num)
self.speak('The answer is {}.'.format(self.answer))
elif "square root" in utterance:
self.answer = math.sqrt(self.answer)
self.speak('The answer is {}.'.format(self.answer))
self.speak('Would you like to perform another operation?', expect_response=True)
@intent_handler(IntentBuilder("TangentIntent").require("Tangent").require("Num").build())
def handle_tangent(self, message):
utterance = message.data.get('utterance')
self.num = message.data.get("Num")
number = float(self.num)
if "degrees" in utterance:
self.answer = math.tan(math.radians(number))
self.speak('The answer is {:f} degrees.'.format(self.answer))
elif "radians" in utterance:
self.answer = math.tan(number)
self.speak('The answer is {:f} radians.'.format(self.answer))
else:
self.answer = math.tan(number)
self.speak('The answer is {:f} radians.'.format(self.answer))
def create_skill():
return MathSkill()
|
the-stack_0_14899 | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper functions for building example regressor Estimator models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Standard Imports
import tensorflow as tf
from tensorflow_model_analysis.eval_saved_model import export
from tensorflow_model_analysis.eval_saved_model import util
def make_regressor_input_fn(feature_spec):
"""Train input function.
Args:
feature_spec: a dictionary mapping feature_name to Tensor or SparseTensor.
Returns:
A function.
"""
def _input_fn():
"""Example-based input function."""
serialized_examples = [
x.SerializeToString() for x in [
util.make_example(age=1.0, language='english', label=4.0),
util.make_example(age=2.0, language='english', label=7.0),
util.make_example(age=3.0, language='english', label=10.0),
util.make_example(age=4.0, language='english', label=13.0),
util.make_example(age=1.0, language='chinese', label=3.0),
util.make_example(age=2.0, language='chinese', label=6.0),
util.make_example(age=3.0, language='chinese', label=9.0),
util.make_example(age=4.0, language='chinese', label=12.0),
util.make_example(age=10.0, language='english', label=31.0),
util.make_example(age=20.0, language='english', label=61.0),
util.make_example(age=30.0, language='english', label=91.0),
util.make_example(age=40.0, language='english', label=121.0),
util.make_example(age=10.0, language='chinese', label=30.0),
util.make_example(age=20.0, language='chinese', label=60.0),
util.make_example(age=30.0, language='chinese', label=90.0),
util.make_example(age=40.0, language='chinese', label=120.0)
]
]
features = tf.io.parse_example(
serialized=serialized_examples, features=feature_spec)
labels = features.pop('label')
return features, labels
return _input_fn
def make_classifier_input_fn(feature_spec, n_classes=2, label_vocabulary=None):
"""Train input function.
Args:
feature_spec: a dictionary mapping feature_name to Tensor or SparseTensor.
n_classes: set for multiclass.
label_vocabulary: (Optional) Label vocabulary to use for labels.
Returns:
A function.
"""
def _input_fn():
"""Example-based input function."""
english_label = label_vocabulary[1] if label_vocabulary else 1.0
chinese_label = label_vocabulary[0] if label_vocabulary else 0.0
if n_classes > 2:
# For multi-class labels, English is class 0, Chinese is class 1.
chinese_label = label_vocabulary[1] if label_vocabulary else 1
english_label = label_vocabulary[0] if label_vocabulary else 0
serialized_examples = [
x.SerializeToString() for x in [
util.make_example(age=1.0, language='english', label=english_label),
util.make_example(age=2.0, language='english', label=english_label),
util.make_example(age=3.0, language='chinese', label=chinese_label),
util.make_example(age=4.0, language='chinese', label=chinese_label)
]
]
features = tf.io.parse_example(
serialized=serialized_examples, features=feature_spec)
labels = features.pop('label')
if n_classes > 2 and not label_vocabulary:
labels = tf.sparse.to_dense(labels, default_value=-1)
return features, labels
return _input_fn
def make_example(age, language, label=None):
example = tf.train.Example()
example.features.feature['age'].float_list.value.append(age)
example.features.feature['language'].bytes_list.value.append(language)
if label:
if isinstance(label, list):
example.features.feature['label'].int64_list.value.extend(label)
else:
example.features.feature['label'].float_list.value.append(label)
return example
def linear_columns(include_label_column=False):
"""Return feature_columns for linear model."""
language = tf.feature_column.indicator_column(
tf.feature_column.categorical_column_with_vocabulary_list(
key='language', vocabulary_list=('english', 'chinese')))
age = tf.feature_column.numeric_column(key='age', default_value=0.0)
features = [age, language]
if include_label_column:
label = tf.feature_column.numeric_column(key='label', default_value=0.0)
features.append(label)
return features
def dnn_columns(include_label_column=False, n_classes=2):
"""Return feature_columns for DNN model."""
language = tf.feature_column.embedding_column(
tf.feature_column.categorical_column_with_vocabulary_list(
key='language', vocabulary_list=('english', 'chinese')),
dimension=1)
age = tf.feature_column.numeric_column(key='age', default_value=0.0)
features = [age, language]
if include_label_column:
label = tf.feature_column.numeric_column(key='label', default_value=0.0)
if n_classes > 2:
label = tf.feature_column.categorical_column_with_identity(
key='label', num_buckets=n_classes)
features.append(label)
return features
def regressor_extra_metrics(features, labels, predictions):
return {
'my_mean_prediction':
tf.compat.v1.metrics.mean(predictions['predictions']),
'my_mean_age':
tf.compat.v1.metrics.mean(features['age']),
'my_mean_label':
tf.compat.v1.metrics.mean(labels),
'my_mean_age_times_label':
tf.compat.v1.metrics.mean(labels * features['age']),
}
def classifier_extra_metrics(features, labels, predictions):
"""Returns extra metrics to use with classifier."""
if 'logistic' in predictions:
metrics = {
'my_mean_prediction':
tf.compat.v1.metrics.mean(predictions['logistic']),
'my_mean_age':
tf.compat.v1.metrics.mean(features['age']),
}
if labels.dtype != tf.string:
metrics.update({
'my_mean_label':
tf.compat.v1.metrics.mean(labels),
'my_mean_age_times_label':
tf.compat.v1.metrics.mean(labels * features['age']),
})
return metrics
# Logistic won't be present in multiclass cases.
return {
'mean_english_prediction':
tf.compat.v1.metrics.mean(predictions['probabilities'][0]),
'my_mean_age':
tf.compat.v1.metrics.mean(features['age']),
}
def export_model_and_eval_model(estimator,
serving_input_receiver_fn=None,
eval_input_receiver_fn=None,
export_path=None,
eval_export_path=None):
"""Export SavedModel and EvalSavedModel.
Args:
estimator: Estimator to export.
serving_input_receiver_fn: Serving input receiver function.
eval_input_receiver_fn: Eval input receiver function.
export_path: Export path. If None, inference model is not exported.
eval_export_path: Eval export path. If None, EvalSavedModel is not exported.
Returns:
Tuple of (path to the export directory, path to eval export directory).
"""
export_path_result = None
eval_export_path_result = None
if export_path and serving_input_receiver_fn:
export_path_result = estimator.export_saved_model(
export_dir_base=export_path,
serving_input_receiver_fn=serving_input_receiver_fn)
if eval_export_path and eval_input_receiver_fn:
eval_export_path_result = export.export_eval_savedmodel(
estimator=estimator,
export_dir_base=eval_export_path,
eval_input_receiver_fn=eval_input_receiver_fn,
serving_input_receiver_fn=serving_input_receiver_fn)
return export_path_result, eval_export_path_result
|
the-stack_0_14900 | # -*- coding:utf-8 -*-
# Copyright (c) 2015, Roger Duran. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import division
import os
from . import base
class DF(base.ThreadedPollText):
"""Disk Free Widget
By default the widget only displays if the space is less than warn_space.
"""
orientations = base.ORIENTATION_HORIZONTAL
defaults = [
('partition', '/', 'the partition to check space'),
('warn_color', 'ff0000', 'Warning color'),
('warn_space', 2, 'Warning space in scale defined by the ``measure`` option.'),
('visible_on_warn', True, 'Only display if warning'),
('measure', "G", "Measurement (G, M, B)"),
('format', '{p} ({uf}{m}|{r:.0f}%)',
'String format (p: partition, s: size, '
'f: free space, uf: user free space, m: measure, r: ratio (uf/s))'),
('update_interval', 60, 'The update interval.'),
]
measures = {"G": 1024 * 1024 * 1024,
"M": 1024 * 1024,
"B": 1024}
def __init__(self, **config):
base.ThreadedPollText.__init__(self, **config)
self.add_defaults(DF.defaults)
self.user_free = 0
self.calc = self.measures[self.measure]
def draw(self):
if self.user_free <= self.warn_space:
self.layout.colour = self.warn_color
else:
self.layout.colour = self.foreground
base.ThreadedPollText.draw(self)
def poll(self):
statvfs = os.statvfs(self.partition)
size = statvfs.f_frsize * statvfs.f_blocks // self.calc
free = statvfs.f_frsize * statvfs.f_bfree // self.calc
self.user_free = statvfs.f_frsize * statvfs.f_bavail // self.calc
if self.visible_on_warn and self.user_free >= self.warn_space:
text = ""
else:
text = self.format.format(p=self.partition, s=size, f=free,
uf=self.user_free, m=self.measure,
r=(size - self.user_free) / size * 100)
return text
|
the-stack_0_14901 | # pylint: disable=no-self-use,invalid-name
from __future__ import with_statement
from __future__ import absolute_import
from __future__ import print_function
from collections import defaultdict
import pytest
import numpy
from allennlp.common.checks import ConfigurationError
from allennlp.common.testing import AllenNlpTestCase
from allennlp.data import Token, Vocabulary
from allennlp.data.fields import TextField, SequenceLabelField
from allennlp.data.token_indexers import SingleIdTokenIndexer
class TestSequenceLabelField(AllenNlpTestCase):
def setUp(self):
super(TestSequenceLabelField, self).setUp()
self.text = TextField([Token(t) for t in [u"here", u"are", u"some", u"words", u"."]],
{u"words": SingleIdTokenIndexer(u"words")})
def test_tag_length_mismatch_raises(self):
with pytest.raises(ConfigurationError):
wrong_tags = [u"B", u"O", u"O"]
_ = SequenceLabelField(wrong_tags, self.text)
def test_count_vocab_items_correctly_indexes_tags(self):
tags = [u"B", u"I", u"O", u"O", u"O"]
sequence_label_field = SequenceLabelField(tags, self.text, label_namespace=u"labels")
counter = defaultdict(lambda: defaultdict(int))
sequence_label_field.count_vocab_items(counter)
assert counter[u"labels"][u"B"] == 1
assert counter[u"labels"][u"I"] == 1
assert counter[u"labels"][u"O"] == 3
assert set(counter.keys()) == set([u"labels"])
def test_index_converts_field_correctly(self):
vocab = Vocabulary()
b_index = vocab.add_token_to_namespace(u"B", namespace=u'*labels')
i_index = vocab.add_token_to_namespace(u"I", namespace=u'*labels')
o_index = vocab.add_token_to_namespace(u"O", namespace=u'*labels')
tags = [u"B", u"I", u"O", u"O", u"O"]
sequence_label_field = SequenceLabelField(tags, self.text, label_namespace=u"*labels")
sequence_label_field.index(vocab)
# pylint: disable=protected-access
assert sequence_label_field._indexed_labels == [b_index, i_index, o_index, o_index, o_index]
# pylint: enable=protected-access
def test_as_tensor_produces_integer_targets(self):
vocab = Vocabulary()
vocab.add_token_to_namespace(u"B", namespace=u'*labels')
vocab.add_token_to_namespace(u"I", namespace=u'*labels')
vocab.add_token_to_namespace(u"O", namespace=u'*labels')
tags = [u"B", u"I", u"O", u"O", u"O"]
sequence_label_field = SequenceLabelField(tags, self.text, label_namespace=u"*labels")
sequence_label_field.index(vocab)
padding_lengths = sequence_label_field.get_padding_lengths()
tensor = sequence_label_field.as_tensor(padding_lengths).detach().cpu().numpy()
numpy.testing.assert_array_almost_equal(tensor, numpy.array([0, 1, 2, 2, 2]))
def test_sequence_label_field_raises_on_incorrect_type(self):
with pytest.raises(ConfigurationError):
_ = SequenceLabelField([[], [], [], [], []], self.text)
def test_class_variables_for_namespace_warnings_work_correctly(self):
# pylint: disable=protected-access
tags = [u"B", u"I", u"O", u"O", u"O"]
assert u"text" not in SequenceLabelField._already_warned_namespaces
with self.assertLogs(logger=u"allennlp.data.fields.sequence_label_field", level=u"WARNING"):
_ = SequenceLabelField(tags, self.text, label_namespace=u"text")
# We've warned once, so we should have set the class variable to False.
assert u"text" in SequenceLabelField._already_warned_namespaces
with pytest.raises(AssertionError):
with self.assertLogs(logger=u"allennlp.data.fields.sequence_label_field", level=u"WARNING"):
_ = SequenceLabelField(tags, self.text, label_namespace=u"text")
# ... but a new namespace should still log a warning.
assert u"text2" not in SequenceLabelField._already_warned_namespaces
with self.assertLogs(logger=u"allennlp.data.fields.sequence_label_field", level=u"WARNING"):
_ = SequenceLabelField(tags, self.text, label_namespace=u"text2")
def test_printing_doesnt_crash(self):
tags = [u"B", u"I", u"O", u"O", u"O"]
sequence_label_field = SequenceLabelField(tags, self.text, label_namespace=u"labels")
print(sequence_label_field)
|
the-stack_0_14903 | from __future__ import annotations
from collections import Counter
import re
import string
import numpy as np
import pandas as pd
import torch
from torch.nn.init import xavier_uniform_
from torch.nn import Module, Embedding, Sequential, ELU, Conv1d, Linear, CrossEntropyLoss
from torch.nn.functional import avg_pool1d, dropout, relu, softmax
from torch.optim import Adam
from nlp_pytorch.data.base_dataset import SplitDataset
from nlp_pytorch.data.vocab import Vocabulary
from nlp_pytorch.train import train, make_train_state
def preprocess_text(text):
text = " ".join(word.lower() for word in text.split(" "))
text = re.sub(r"([.,!?])", r" \1 ", text)
text = re.sub(r"[^a-zA-Z.,!?]+", r" ", text)
return text
class NewsVectorizer(object):
def __init__(self, title_vocab, category_vocab, max_title):
self.title_vocab = title_vocab
self.category_vocab = category_vocab
self.max_vector_len = max_title + 2
def vectorize(self, title, vector_length: int = -1) -> np.array:
indices = [self.title_vocab.begin_seq_index]
indices.extend(self.title_vocab.lookup_token(token) for token in title.split(" "))
indices.append(self.title_vocab.end_seq_index)
if vector_length < 0:
vector_length = len(indices)
out_vector = np.zeros(self.max_vector_len, dtype=np.int64)
out_vector[: len(indices)] = indices
out_vector[len(indices) :] = self.title_vocab.mask_index
return out_vector
@classmethod
def from_dataframe(cls, news_df, cutoff=25):
category_vocab = Vocabulary()
for category in sorted(set(news_df.category)):
category_vocab.add_token(category)
max_title = 0
word_counts = Counter()
for title in news_df.title:
title_tokens = title.split(" ")
max_title = max(max_title, len(title_tokens))
for token in title_tokens:
if token not in string.punctuation:
word_counts[token] += 1
title_vocab = Vocabulary()
for word, word_count in word_counts.items():
if word_count >= cutoff:
title_vocab.add_token(word)
return cls(title_vocab, category_vocab, max_title)
class NewsDataset(SplitDataset):
def __init__(self, news_df: pd.DataFrame, vectorizer) -> None:
super().__init__(news_df, vectorizer)
@classmethod
def load_dataset_and_make_vectorizer(cls, csv_file: str) -> NewsDataset:
news_df = pd.read_csv(csv_file)
return cls(news_df, NewsVectorizer.from_dataframe(news_df))
def __getitem__(self, index: int):
row = self._target_df.iloc[index]
title_vector = self.vectorizer.vectorize(row.title)
category_index = self.vectorizer.category_vocab.lookup_token(row.category)
return {
"x_data": title_vector,
"y_target": category_index,
}
def load_glove_from_file(glove_filepath):
word_to_index = {}
embeddings = []
with open(glove_filepath, "r") as fp:
for index, line in enumerate(fp):
line = line.split(" ")
word_to_index[line[0]] = index
embedding_i = np.array([float(val) for val in line[1:]])
embeddings.append(embedding_i)
return word_to_index, np.stack(embeddings)
def make_embedding_matrix(glove_filepath, words):
word_to_idx, glove_embeddings = load_glove_from_file(glove_filepath)
embedding_size = glove_embeddings.shape[1]
final_embeddings = np.zeros((len(words), embedding_size))
for i, word in enumerate(words):
if word in word_to_idx:
final_embeddings[i, :] = glove_embeddings[word_to_idx[word]]
else:
embedding_i = torch.ones(1, embedding_size)
xavier_uniform_(embedding_i)
final_embeddings[i, :] = embedding_i
return final_embeddings
class NewsClassifier(Module):
def __init__(
self,
embedding_size,
num_embeddings,
num_channels,
hidden_dim,
num_classes,
dropout_p,
pretrained_embeddings=None,
padding_idx=0,
):
super().__init__()
if pretrained_embeddings is None:
self.emb = Embedding(
embedding_dim=embedding_size, num_embeddings=num_embeddings, padding_idx=padding_idx
)
else:
self.emb = Embedding(
embedding_dim=embedding_size,
num_embeddings=num_embeddings,
padding_idx=padding_idx,
_weight=pretrained_embeddings,
)
self.convnet = Sequential(
Conv1d(in_channels=embedding_size, out_channels=num_channels, kernel_size=3),
ELU(),
Conv1d(in_channels=num_channels, out_channels=num_channels, kernel_size=3, stride=2),
ELU(),
Conv1d(in_channels=num_channels, out_channels=num_channels, kernel_size=3, stride=2),
ELU(),
Conv1d(in_channels=num_channels, out_channels=num_channels, kernel_size=3),
ELU(),
)
self._dropout_p = dropout_p
self.fc1 = Linear(num_channels, hidden_dim)
self.fc2 = Linear(hidden_dim, num_classes)
def forward(self, x_in, apply_activator: bool = False):
x_embedded = self.emb(x_in).permute(0, 2, 1)
features = self.convnet(x_embedded)
remaining_size = features.size(dim=2)
features = avg_pool1d(features, remaining_size).squeeze(dim=2)
features = dropout(features, p=self._dropout_p)
intermediate_vector = relu(dropout(self.fc1(features), p=self._dropout_p))
prediction_vector = self.fc2(intermediate_vector)
if apply_activator:
prediction_vector = softmax(prediction_vector, dim=1)
return prediction_vector
def predict_category(title, classifer, vectorizer, max_length, device):
title = preprocess_text(title)
vectorized_title = torch.tensor(vectorizer.vectorize(title, vector_length=max_length)).to(device)
result = classifer(vectorized_title.unsqueeze(0), apply_activator=True)
probability_values, indices = result.max(dim=1)
predicated_category = vectorizer.category_vocab.lookup_index(indices.item())
return {"category": predicated_category, "probability": probability_values.item()}
def compute_accuracy(y_pred, y_target):
_, y_pred_indices = y_pred.max(dim=1)
n_correct = torch.eq(y_pred_indices, y_target).sum().item()
return n_correct / len(y_pred_indices) * 100
def main(num_epochs: int = 100, batch_size: int = 128):
args = {
"news_csv": "data/news_with_splits.csv",
"save_dir": "model_storage/yelp/",
"model_state_file": "model.pth",
"glove_filepath": "data/glove.6B.100d.txt",
"vectorizer_file": "vectorizer.json",
"use_glove": False,
"embedding_size": 100,
"hidden_dim": 100,
"num_channels": 100,
"learning_rate": 0.001,
"num_epochs": num_epochs,
"batch_size": batch_size,
"early_stopping_criteria": 5,
"frequency_cutoff": 25,
"dropout_p": 0.1,
"cuda": False,
}
train_state = make_train_state()
if torch.cuda.is_available():
args["cuda"] = True
args["device"] = torch.device("cuda:0" if args["cuda"] else "cpu")
print(args)
dataset = NewsDataset.load_dataset_and_make_vectorizer(args["news_csv"])
vectorizer = dataset.vectorizer
words = vectorizer.title_vocab._token_to_idx.keys()
embeddings = make_embedding_matrix(glove_filepath=args["glove_filepath"], words=words)
classifier = NewsClassifier(
embedding_size=args["embedding_size"],
num_embeddings=len(vectorizer.title_vocab),
num_channels=args["num_channels"],
hidden_dim=args["hidden_dim"],
num_classes=len(vectorizer.title_vocab),
dropout_p=args["dropout_p"],
pretrained_embeddings=torch.from_numpy(embeddings),
)
classifier = classifier.to(args["device"])
classifier.double()
loss_func = CrossEntropyLoss()
optimizer = Adam(classifier.parameters(), lr=args["learning_rate"])
train(args, train_state, dataset, classifier, optimizer, loss_func, compute_accuracy)
return {
"train_state": train_state,
"args": args,
"dataset": dataset,
"classifier": classifier,
"loss_func": loss_func,
"optimizer": optimizer,
}
|
the-stack_0_14904 | import os
import logging
logging.basicConfig(level=logging.DEBUG, format='[%(asctime)s] - %(message)s')
ld = logging.debug
class PartialObservabilityProblem:
def __init__(self,
dataset_path,
num_nodes,
T,
num_examples_to_generate,
test_fraction,
validation_fraction,
hidden_power_bus_id_list,
hidden_voltage_bus_id_list,
target_power_bus_id_list,
target_voltage_bus_id_list,
reference_bus_id,
Ns,
Nv):
"""
A data structure that captures the complete description of the partial observability
problem in power grids.
:param dataset_path: a string specifying the path to the dataset containing the
power and voltage recordings (4 csv files).
:param num_nodes: number of buses in the grid
:param T: number of time-steps in the scope of a single train/test example.
The last time step is partially observable
:param num_examples_to_generate: number of examples to be drawn at random from the
power-voltage records.
:param test_fraction: fractional number in [0.0,1.0]. Out of the dataset provided,
which fraction of the examples will be put aside as a test
set.
:param validation_fraction: fractional number in [0.0,1.0]. Out of the training set
provided, which fraction of the examples will be put aside as
a validation set.
:param hidden_power_bus_id_list: a list of bus ids, whose power is not observable at
the last time step.
:param hidden_voltage_bus_id_list:a list of bus ids, whose voltage is not observable at
the last time step.
:param target_power_bus_id_list: a list of bus ids, whose power is to be predicted
the last time step.
:param target_voltage_bus_id_list:a list of bus ids, whose voltage is to be predicted
the last time step.
:param reference_bus_id: an integer, id of the bus defined as a "slack" or aa reference bus.
:param Ns: Number of observable power measurements in the last time step
:param Nv: Number of observable voltage measurements in the last time step
"""
# TODO: add slack bus id as a data member.
self.dataset_path = dataset_path
self.num_nodes = num_nodes
self.T = T
self.num_examples_to_generate = num_examples_to_generate
self.test_fraction = test_fraction
self.validation_fraction = validation_fraction
self.hidden_power_bus_id_list = hidden_power_bus_id_list
self.hidden_voltage_bus_id_list = hidden_voltage_bus_id_list
self.visible_power_bus_id_list = list(sorted(set(range(num_nodes)) - set(hidden_power_bus_id_list)))# complementary to the hidden one
self.visible_voltage_bus_id_list = list(sorted(set(range(num_nodes)) - set(hidden_voltage_bus_id_list)))# complementary to the hidden one
self.target_power_bus_id_list = target_power_bus_id_list
self.target_voltage_bus_id_list = target_voltage_bus_id_list
self.reference_bus_id = reference_bus_id
self.Ns = Ns
self.Nv = Nv
# Measurement counts
self.num_phasors_per_bus = 2 # there are 2 phasors per bus (S,V)
self.num_measurements_per_phasor = 2 # phasors are complex (2 values)
self.num_hidden_power_measurements = len(self.hidden_power_bus_id_list) * self.num_measurements_per_phasor
self.num_hidden_voltage_measurements = len(self.hidden_voltage_bus_id_list) * self.num_measurements_per_phasor
self.num_hidden_measurements = self.num_hidden_power_measurements + self.num_hidden_voltage_measurements
self.num_target_power_measurements = len(self.target_power_bus_id_list) * self.num_measurements_per_phasor
self.num_target_voltage_measurements = len(self.target_voltage_bus_id_list) * self.num_measurements_per_phasor
self.num_target_measurements = self.num_target_voltage_measurements + self.num_target_power_measurements
self.num_target_buses = self.num_target_measurements // self.num_phasors_per_bus if self.num_target_power_measurements == 0 else -1 # this value will be available only when no power measurements are seeked.
self.num_visible_power_measurements = len(self.visible_power_bus_id_list) * self.num_measurements_per_phasor
self.num_visible_voltage_measurements = len(self.visible_voltage_bus_id_list) * self.num_measurements_per_phasor
self.num_all_measurements = self.num_nodes * self.num_phasors_per_bus * self.num_measurements_per_phasor
self.num_remaining_measurements = self.num_all_measurements - self.num_hidden_measurements # what is left after all the hidden measurements are removed.
assert(Ns * self.num_measurements_per_phasor == self.num_visible_power_measurements)
assert(Nv * self.num_measurements_per_phasor == self.num_visible_voltage_measurements)
assert(self.num_all_measurements == self.num_visible_voltage_measurements + self.num_visible_power_measurements + self.num_hidden_voltage_measurements + self.num_hidden_power_measurements)
def set_hidden_measurement_lists_from_Ns_Nv(num_nodes, Ns, Nv, list_bus_id_power_hiding_priority=None, list_bus_id_voltage_hiding_priority=None):
"""
Returns the list of the hidden power bus ids and a list of hidden voltage ids
:param num_nodes: number of buses in the grid
:param Ns: Number of observable power measurements in the last time step
:param Nv: Number of observable voltage measurements in the last time step
:param list_bus_id_power_hiding_priority: list of bus indices which was sorted according to the preferred
order of hiding. Index 0 of this list corresponds to the most likely bus to be hidden.
:param list_bus_id_voltage_hiding_priority: list of bus indices which was sorted according to the preferred
order of hiding. Index 0 of this list corresponds to the most likely bus to be hidden.
:return:
"""
if list_bus_id_power_hiding_priority is None:
list_bus_id_power_hiding_priority = list(range(num_nodes))
if list_bus_id_voltage_hiding_priority is None:
list_bus_id_voltage_hiding_priority = list(range(num_nodes))
hidden_power_bus_id_list = []
next_busid_to_hide = 0
for bus_id in range(Ns, num_nodes):
hidden_power_bus_id_list.append(list_bus_id_power_hiding_priority[next_busid_to_hide])
next_busid_to_hide += 1
hidden_voltage_bus_id_list = []
next_busid_to_hide = 0
for bus_id in range(Nv, num_nodes):
hidden_voltage_bus_id_list.append(list_bus_id_voltage_hiding_priority[next_busid_to_hide])
next_busid_to_hide += 1
hidden_power_bus_id_list.sort()
hidden_voltage_bus_id_list.sort()
return hidden_power_bus_id_list, hidden_voltage_bus_id_list
def set_hidden_measurement_lists_from_observability(num_nodes, observability, list_bus_id_hiding_priority=None):
"""
Returns the list of the hidden power bus ids and a list of hidden voltage ids
:param num_nodes: number of buses in the grid
:param observability: a fractional number in [0.0, 1.0] which
sets the observability degree considered
in the problem.
:param list_bus_id_hiding_priority: list of bus indices which was sorted according to the preferred
order of hiding. Index 0 of this list corresponds to the most likely bus to be hidden.
:return:
"""
if list_bus_id_hiding_priority is None:
list_bus_id_hiding_priority = list(range(num_nodes))
observability_step_size = 1 / float(2 * num_nodes)
hidden_power_bus_id_list = []
next_busid_to_hide = 0
for observability_step in range(1,num_nodes+1):
threshold_for_current_measurement = observability_step * observability_step_size
if threshold_for_current_measurement >= observability:
hidden_power_bus_id_list.append(list_bus_id_hiding_priority[next_busid_to_hide])
next_busid_to_hide += 1
hidden_voltage_bus_id_list = []
next_busid_to_hide = 0
for observability_step in range(1,num_nodes+1):
threshold_for_current_measurement = 0.5 + observability_step * observability_step_size
if threshold_for_current_measurement >= observability:
hidden_voltage_bus_id_list.append(list_bus_id_hiding_priority[next_busid_to_hide])
next_busid_to_hide += 1
hidden_power_bus_id_list.sort()
hidden_voltage_bus_id_list.sort()
return hidden_power_bus_id_list, hidden_voltage_bus_id_list
def make_str_for_pretty_print_int_list(lst):
"""
Produce a stirng which neatly prints a list of integers.
This is done by compacting the integers into contiguous ranges.
for example [0,1,2,3,4,10,11,12] will become "[0..4,10..12]"
:param lst: list of integers
:return: string
"""
stri="["
prev=None
seq_start_num = None
for i,num in enumerate(lst):
if prev is None:
# Warmup
seq_start_num = num
stri = stri + str(num)
elif prev != num - 1:
if seq_start_num != prev:
# Previous sequence contained more than 1 number.
if seq_start_num == prev-1:
stri = stri + ", " + str(prev)
else:
stri = stri + ".." + str(prev)
# Start new sequence
stri = stri + ", " + str(num)
seq_start_num = num
elif i==len(lst)-1:
if seq_start_num != num:
# Previous sequence contained more than 1 number.
if seq_start_num == prev:
stri = stri + ", " + str(num)
else:
stri = stri + ".." + str(num)
prev = num
stri = stri +"]"
return stri
def create_partial_observability_problem(dataset_dir, dataset_name, T, Ns, Nv, verbose=True, reverse_bus_hiding_order=False):
"""
Constructs a setting of a partial observability problem.
This function mainly determines the number of nodes and
sets the concrete bus ids for being hidden, targeted,
etc. All with accordance to the well known data sets and
to the observability degree specified as the [0,1]
fractional parameter "observability".
:param dataset_dir: a directory that contains all the datasets
:param dataset_name: a directory name of the dataset
:param T: Number of time steps to be observed at
:param Ns: Number of observable power measurements in the last time step
:param Nv: Number of observable voltage measurements in the last time step
:param verbose: boolean - if true then upon the creation of the pop
object - its attributes will be printed.
:return:
"""
# Common setting:
dataset_path = os.path.join(dataset_dir, dataset_name)
if dataset_name == 'solar_smooth_ord_60_downsampling_factor_60':
# 4-nodes grid with 10080 recorded time steps
num_nodes = 4
reference_bus_id = 3
num_examples_to_generate = 9000 # how many examples will be generated from the existing CSV files (generation is carried out via random T-long time series).
test_fraction = 0.1 # fraction of the generated examples that will become a test set. The splitting between the training and test time series is leakage-safe. Namely, no training time series overlaps with test time series.
validation_fraction = 0.0 # fraction of the train examples that will become a validation set. Warning: th ecurrent way of splitting the training to validation creates data leakage between the trainin and validation since the time series overlap!
# Set the observed bus id lists according to the "observability" parameter in a contiguous manner.
# TODO: Make sure that custom id list is synchronized with the following processing (in the neural net etc)
bus_hiding_priority_list = [0, 1, 2, 3]
bus_hiding_priority_list = list(reversed(bus_hiding_priority_list)) if reverse_bus_hiding_order else bus_hiding_priority_list
hidden_power_bus_id_list, hidden_voltage_bus_id_list = set_hidden_measurement_lists_from_Ns_Nv(num_nodes, Ns, Nv,
bus_hiding_priority_list,
bus_hiding_priority_list)
# Target bus ids:
# We assume that we only want to estimate all the voltage and none of the powers
# as the powers are easily recoverable once the voltages are estimated
target_power_bus_id_list = []
target_voltage_bus_id_list = list(range(num_nodes))
# Example for observability=0.45 in the :
# hidden_power_bus_id_list = [0] # hidden from input in T-1 (last) time-step
# hidden_voltage_bus_id_list = [0, 1, 2, 3] # hidden from input in T-1 (last) time-step
# target_power_bus_id_list = []
# target_voltage_bus_id_list = [0, 1, 2, 3]
elif dataset_name == 'ieee37_smooth_ord_60_downsampling_factor_60':
# 36-nodes grid with 10080 recorded time steps
num_nodes = 36
reference_bus_id = 0
num_examples_to_generate = 9000 # how many examples will be generated from the existing CSV files (generation is carried out via random T-long time series).
test_fraction = 0.1 # fraction of the generated examples that will become a test set. The splitting between the training and test time series is leakage-safe. Namely, no training time series overlaps with test time series.
validation_fraction = 0.0 # fraction of the train examples that will become a validation set. Warning: th ecurrent way of splitting the training to validation creates data leakage between the trainin and validation since the time series overlap!
# Set the observed bus id lists according to the "observability" parameter in a contiguous manner.
# TODO: Make sure that custom id list is synchronized with the following processing (in the neural net etc)
bus_hiding_priority_list = list(reversed(range(num_nodes))) # This creates a topological ordering of the nodes, such that the reference bus (slack bus) is the last to be hidden.
bus_hiding_priority_list = list(reversed(bus_hiding_priority_list[:-1]))+[bus_hiding_priority_list[-1]] if reverse_bus_hiding_order else bus_hiding_priority_list
hidden_power_bus_id_list, hidden_voltage_bus_id_list = set_hidden_measurement_lists_from_Ns_Nv(num_nodes, Ns, Nv,
bus_hiding_priority_list,
bus_hiding_priority_list)
# Target bus ids:
# We assume that we only want to estimate all the voltage and none of the powers
# as the powers are easily recoverable once the voltages are estimated
target_power_bus_id_list = []
target_voltage_bus_id_list = list(range(num_nodes))
else:
raise NameError("Unknown dataset required \"{}\"".format(dataset_name))
pop = PartialObservabilityProblem(dataset_path, num_nodes, T, num_examples_to_generate, test_fraction,
validation_fraction, hidden_power_bus_id_list, hidden_voltage_bus_id_list,
target_power_bus_id_list, target_voltage_bus_id_list, reference_bus_id,
Ns, Nv)
if verbose:
ld("Created PartialObservabilityProblem scenario:")
ld(" Dataset name: {}".format(dataset_name))
ld(" num_nodes: {}".format(num_nodes))
ld(" T: {}".format(T))
ld(" (Ns) number of observable bus powers at time=T-1: {}".format(Ns))
ld(" (Nv) number of observable bus voltages at time=T-1: {}".format(Nv))
ld(" num_examples_to_generate: {}".format(num_examples_to_generate))
ld(" test_fraction: {}".format(test_fraction))
ld(" validation_fraction: {}".format(validation_fraction))
ld(" hidden_power_bus_id_list: {}".format(make_str_for_pretty_print_int_list(hidden_power_bus_id_list)))
ld(" hidden_voltage_bus_id_list: {}".format(make_str_for_pretty_print_int_list(hidden_voltage_bus_id_list)))
ld(" target_power_bus_id_list: {}".format(make_str_for_pretty_print_int_list(target_power_bus_id_list)))
ld(" target_voltage_bus_id_list: {}".format(make_str_for_pretty_print_int_list(target_voltage_bus_id_list)))
return pop
|
the-stack_0_14907 | import sys
import uuid
from dataclasses import dataclass
from dataclasses import field
from dataclasses import fields
from dataclasses import make_dataclass
from typing import Dict
from typing import get_type_hints
from typing import Iterator
from typing import List
from typing import Union
from unittest import mock
from unittest import TestCase
from xml.etree.ElementTree import QName
from tests.fixtures.artists import Artist
from tests.fixtures.books import BookForm
from tests.fixtures.models import ChoiceType
from tests.fixtures.models import Parent
from tests.fixtures.models import TypeA
from tests.fixtures.models import TypeB
from tests.fixtures.models import UnionType
from tests.fixtures.series import Country
from tests.fixtures.submodels import ChoiceTypeChild
from xsdata.exceptions import XmlContextError
from xsdata.formats.dataclass.compat import class_types
from xsdata.formats.dataclass.models.builders import XmlMetaBuilder
from xsdata.formats.dataclass.models.builders import XmlVarBuilder
from xsdata.formats.dataclass.models.elements import XmlType
from xsdata.models.datatype import XmlDate
from xsdata.utils import text
from xsdata.utils.constants import return_input
from xsdata.utils.constants import return_true
from xsdata.utils.namespaces import build_qname
from xsdata.utils.testing import FactoryTestCase
from xsdata.utils.testing import XmlMetaFactory
from xsdata.utils.testing import XmlVarFactory
class XmlMetaBuilderTests(FactoryTestCase):
def setUp(self):
super().setUp()
self.builder = XmlMetaBuilder(
class_type=class_types.get_type("dataclasses"),
element_name_generator=return_input,
attribute_name_generator=return_input,
)
@mock.patch.object(XmlMetaBuilder, "build_vars")
def test_build(self, mock_build_vars):
var = XmlVarFactory.create(
xml_type=XmlType.ELEMENT, name="foo", qname="{foo}bar", types=(int,)
)
mock_build_vars.return_value = [var]
result = self.builder.build(Artist, None)
expected = XmlMetaFactory.create(
clazz=Artist,
qname="{http://musicbrainz.org/ns/mmd-2.0#}artist",
elements={var.qname: [var]},
)
self.assertEqual(expected, result)
mock_build_vars.assert_called_once_with(
Artist, "http://musicbrainz.org/ns/mmd-2.0#", return_input, return_input
)
@mock.patch.object(XmlMetaBuilder, "build_vars", return_value=[])
def test_build_with_parent_namespace(self, mock_build_vars):
result = self.builder.build(Country, "http://xsdata")
self.assertEqual(build_qname("http://xsdata", "country"), result.qname)
mock_build_vars.assert_called_once_with(
Country, "http://xsdata", return_input, return_input
)
@mock.patch.object(XmlMetaBuilder, "build_vars", return_value=[])
def test_build_with_no_meta_name_and_name_generator(self, *args):
self.builder.element_name_generator = text.snake_case
result = self.builder.build(BookForm, None)
self.assertEqual("book_form", result.qname)
def test_build_block_meta_inheritance(self):
@dataclass
class Bar:
class Meta:
name = "bar"
@dataclass
class Foo(Bar):
pass
@dataclass
class Thug(Bar):
class Meta:
name = "thug"
result = self.builder.build(Foo, None)
self.assertEqual("Foo", result.qname)
result = self.builder.build(Thug, None)
self.assertEqual("thug", result.qname)
def test_build_with_no_dataclass_raises_exception(self, *args):
with self.assertRaises(XmlContextError) as cm:
self.builder.build(int, None)
self.assertEqual(f"Type '{int}' is not a dataclass.", str(cm.exception))
def test_build_locates_globalns_per_field(self):
actual = self.builder.build(ChoiceTypeChild, None)
self.assertEqual(1, len(actual.choices))
self.assertEqual(9, len(actual.choices[0].elements))
self.assertIsNone(self.builder.find_globalns(object, "foo"))
def test_build_inner_type_has_no_target_qname(self):
actual = self.builder.build(Parent.Inner, None)
self.assertIsNone(actual.target_qname)
def test_target_namespace(self):
class Meta:
namespace = "bar"
target_namespace = "foo"
self.assertEqual("foo", self.builder.target_namespace(None, Meta))
del Meta.target_namespace
self.assertEqual("bar", self.builder.target_namespace(None, Meta))
class Module:
__NAMESPACE__ = "gl"
self.assertEqual("gl", self.builder.target_namespace(Module, Meta))
def test_build_vars(self):
result = self.builder.build_vars(BookForm, None, text.pascal_case, str.upper)
self.assertIsInstance(result, Iterator)
expected = [
XmlVarFactory.create(
xml_type=XmlType.ELEMENT,
index=1,
name="author",
qname="Author",
types=(str,),
required=True,
),
XmlVarFactory.create(
xml_type=XmlType.ELEMENT,
index=2,
name="title",
qname="Title",
types=(str,),
required=True,
),
XmlVarFactory.create(
xml_type=XmlType.ELEMENT,
index=3,
name="genre",
qname="Genre",
types=(str,),
required=True,
),
XmlVarFactory.create(
xml_type=XmlType.ELEMENT,
index=4,
name="price",
qname="Price",
types=(float,),
required=True,
),
XmlVarFactory.create(
xml_type=XmlType.ELEMENT,
index=5,
name="pub_date",
qname="PubDate",
types=(XmlDate,),
required=True,
),
XmlVarFactory.create(
xml_type=XmlType.ELEMENT,
index=6,
name="review",
qname="Review",
types=(str,),
required=True,
),
XmlVarFactory.create(
xml_type=XmlType.ATTRIBUTE, index=7, name="id", qname="ID", types=(str,)
),
XmlVarFactory.create(
xml_type=XmlType.ATTRIBUTE,
index=8,
name="lang",
qname="LANG",
types=(str,),
init=False,
default="en",
),
]
result = list(result)
self.assertEqual(expected, result)
for var in result:
self.assertIsNone(var.clazz)
def test_build_vars_with_ignore_types(self):
result = self.builder.build_vars(TypeB, None, return_input, return_input)
self.assertIsInstance(result, Iterator)
actual = list(result)
self.assertEqual(2, len(actual))
def test_default_xml_type(self):
cls = make_dataclass("a", [("x", int)])
self.assertEqual(XmlType.TEXT, self.builder.default_xml_type(cls))
cls = make_dataclass("b", [("x", int), ("y", int)])
self.assertEqual(XmlType.ELEMENT, self.builder.default_xml_type(cls))
cls = make_dataclass(
"c", [("x", int), ("y", int, field(metadata=dict(type="Text")))]
)
self.assertEqual(XmlType.ELEMENT, self.builder.default_xml_type(cls))
cls = make_dataclass(
"d", [("x", int), ("y", int, field(metadata=dict(type="Element")))]
)
self.assertEqual(XmlType.TEXT, self.builder.default_xml_type(cls))
with self.assertRaises(XmlContextError) as cm:
cls = make_dataclass(
"e",
[
("x", int, field(metadata=dict(type="Text"))),
("y", int, field(metadata=dict(type="Text"))),
],
)
self.builder.default_xml_type(cls)
self.assertEqual(
"Dataclass `e` includes more than one text node!", str(cm.exception)
)
class XmlVarBuilderTests(TestCase):
def setUp(self) -> None:
self.builder = XmlVarBuilder(
class_type=class_types.get_type("dataclasses"),
parent_ns=None,
default_xml_type=XmlType.ELEMENT,
element_name_generator=return_input,
attribute_name_generator=return_input,
)
super().setUp()
self.maxDiff = None
def test_build_with_choice_field(self):
globalns = sys.modules[ChoiceType.__module__].__dict__
type_hints = get_type_hints(ChoiceType)
class_field = fields(ChoiceType)[0]
self.builder.parent_ns = "bar"
self.maxDiff = None
actual = self.builder.build(
66,
"choice",
type_hints["choice"],
class_field.metadata,
True,
list,
globalns,
)
expected = XmlVarFactory.create(
index=67,
name="choice",
types=(object,),
factory=list,
any_type=True,
default=list,
xml_type=XmlType.ELEMENTS,
elements={
"{bar}a": XmlVarFactory.create(
index=1,
name="choice",
qname="{bar}a",
types=(TypeA,),
clazz=TypeA,
factory=list,
namespaces=("bar",),
),
"{bar}b": XmlVarFactory.create(
index=2,
name="choice",
qname="{bar}b",
types=(TypeB,),
clazz=TypeB,
factory=list,
namespaces=("bar",),
),
"{bar}int": XmlVarFactory.create(
index=3,
name="choice",
qname="{bar}int",
types=(int,),
factory=list,
namespaces=("bar",),
),
"{bar}int2": XmlVarFactory.create(
index=4,
name="choice",
qname="{bar}int2",
types=(int,),
derived=True,
nillable=True,
factory=list,
namespaces=("bar",),
),
"{bar}float": XmlVarFactory.create(
index=5,
name="choice",
qname="{bar}float",
types=(float,),
factory=list,
namespaces=("bar",),
),
"{bar}qname": XmlVarFactory.create(
index=6,
name="choice",
qname="{bar}qname",
types=(QName,),
factory=list,
namespaces=("bar",),
),
"{bar}tokens": XmlVarFactory.create(
index=7,
name="choice",
qname="{bar}tokens",
types=(int,),
tokens_factory=list,
derived=True,
factory=list,
default=return_true,
namespaces=("bar",),
),
"{foo}union": XmlVarFactory.create(
index=8,
name="choice",
qname="{foo}union",
types=(UnionType,),
clazz=UnionType,
factory=list,
namespaces=("foo",),
),
"{bar}p": XmlVarFactory.create(
index=9,
name="choice",
qname="{bar}p",
types=(float,),
derived=True,
factory=list,
default=1.1,
namespaces=("bar",),
),
},
wildcards=[
XmlVarFactory.create(
index=10,
name="choice",
xml_type=XmlType.WILDCARD,
qname="{http://www.w3.org/1999/xhtml}any",
types=(object,),
factory=list,
default=None,
namespaces=("http://www.w3.org/1999/xhtml",),
),
],
)
self.assertEqual(expected, actual)
def test_build_validates_result(self):
with self.assertRaises(XmlContextError) as cm:
self.builder.build(
1, "foo", List[int], {"type": "Attributes"}, True, None, None
)
self.assertEqual(
"Xml type 'Attributes' does not support typing: typing.List[int]",
str(cm.exception),
)
def test_resolve_namespaces(self):
func = self.builder.resolve_namespaces
self.builder.parent_ns = "bar"
self.assertEqual(("foo",), func(XmlType.ELEMENT, "foo"))
self.assertEqual((), func(XmlType.ELEMENT, ""))
self.assertEqual(("bar",), func(XmlType.ELEMENT, None))
self.assertEqual((), func(XmlType.ATTRIBUTE, None))
self.assertEqual(("bar",), func(XmlType.WILDCARD, None))
self.assertEqual(("##any",), func(XmlType.WILDCARD, "##any"))
self.builder.parent_ns = ""
self.assertEqual(("##any",), func(XmlType.WILDCARD, "##targetNamespace"))
self.builder.parent_ns = None
self.assertEqual(("##any",), func(XmlType.WILDCARD, "##targetNamespace"))
self.builder.parent_ns = "p"
self.assertEqual(("p",), func(XmlType.WILDCARD, "##targetNamespace"))
self.assertEqual(("",), func(XmlType.WILDCARD, "##local"))
self.assertEqual(("!p",), func(XmlType.WILDCARD, "##other"))
self.assertEqual(
("", "!p"), tuple(sorted(func(XmlType.WILDCARD, "##other ##local")))
)
self.assertEqual(
("foo", "p"),
tuple(sorted(func(XmlType.WILDCARD, "##targetNamespace foo"))),
)
def test_analyze_types(self):
actual = self.builder.analyze_types(List[List[Union[str, int]]], None)
self.assertEqual((list, list, (int, str)), actual)
actual = self.builder.analyze_types(Union[str, int], None)
self.assertEqual((None, None, (int, str)), actual)
actual = self.builder.analyze_types(Dict[str, int], None)
self.assertEqual((dict, None, (int, str)), actual)
with self.assertRaises(XmlContextError) as cm:
self.builder.analyze_types(List[List[List[int]]], None)
self.assertEqual(
"Unsupported typing: typing.List[typing.List[typing.List[int]]]",
str(cm.exception),
)
def test_is_valid(self):
# Attributes need origin dict
self.assertFalse(
self.builder.is_valid(XmlType.ATTRIBUTES, None, None, (), False, True)
)
# Attributes don't support any origin
self.assertFalse(
self.builder.is_valid(XmlType.ATTRIBUTES, dict, list, (), False, True)
)
# Attributes don't support xs:NMTOKENS
self.assertFalse(
self.builder.is_valid(XmlType.ATTRIBUTES, dict, None, (), True, True)
)
self.assertTrue(
self.builder.is_valid(
XmlType.ATTRIBUTES, dict, None, (str, str), False, True
)
)
# xs:NMTOKENS need origin list
self.assertFalse(
self.builder.is_valid(XmlType.TEXT, dict, None, (), True, True)
)
# xs:NMTOKENS need origin list
self.assertFalse(self.builder.is_valid(XmlType.TEXT, set, None, (), True, True))
# Any type object is a superset, it's only supported alone
self.assertFalse(
self.builder.is_valid(
XmlType.ELEMENT, None, None, (object, int), False, True
)
)
# Type is not registered in converter.
self.assertFalse(
self.builder.is_valid(
XmlType.TEXT, None, None, (int, uuid.UUID), False, True
)
)
# init false vars are ignored!
self.assertTrue(
self.builder.is_valid(
XmlType.TEXT, None, None, (int, uuid.UUID), False, False
)
)
|
the-stack_0_14908 | import flask
import git
import local_system
import update
api_blueprint = flask.Blueprint('api', __name__, url_prefix='/api')
@api_blueprint.route('/shutdown', methods=['POST'])
def shutdown_post():
try:
local_system.shutdown()
return _json_success()
except local_system.Error as e:
return _json_error(str(e)), 200
@api_blueprint.route('/restart', methods=['POST'])
def restart_post():
try:
local_system.restart()
return _json_success()
except local_system.Error as e:
return _json_error(str(e)), 200
@api_blueprint.route('/update', methods=['POST'])
def update_post():
"""Updates TinyPilot to the latest version available.
This is a slow endpoint, as it is expected to take 2~4 minutes to
complete.
Returns:
A JSON string with two keys: success and error.
success: true if successful.
error: null if successful, str otherwise.
Example of success:
{
'success': true,
'error': null,
}
Example of error:
{
'success': false,
'error': 'sudo: /opt/tinypilot-privileged/update: command not found'
}
"""
try:
update.update()
except update.Error as e:
return _json_error(str(e)), 200
return _json_success()
@api_blueprint.route('/version', methods=['GET'])
def version_get():
"""Retrieves the current installed version of TinyPilot.
Returns:
A JSON string with three keys when successful and two otherwise:
success, error and version (if successful).
success: true if successful.
error: null if successful, str otherwise.
version: str.
Example of success:
{
'success': true,
'error': null,
'version': 'bf07bfe72941457cf068ca0a44c6b0d62dd9ef05',
}
Example of error:
{
'success': false,
'error': 'git rev-parse HEAD failed.',
}
"""
try:
return _json_success({"version": git.local_head_commit_id()})
except git.Error as e:
return _json_error(str(e)), 200
@api_blueprint.route('/latestRelease', methods=['GET'])
def latest_release_get():
"""Retrieves the latest version of TinyPilot.
Returns:
A JSON string with three keys when successful and two otherwise:
success, error and version (if successful).
success: true if successful.
error: null if successful, str otherwise.
version: str.
Example of success:
{
'success': true,
'error': null,
'version': 'bf07bfe72941457cf068ca0a44c6b0d62dd9ef05',
}
Example of error:
{
'success': false,
'error': 'git rev-parse origin/master failed.',
}
"""
try:
return _json_success({"version": git.remote_head_commit_id()})
except git.Error as e:
return _json_error(str(e)), 200
def _json_success(fields={}):
response = {
'success': True,
'error': None,
}
for k, v in fields.items():
response[k] = v
return flask.jsonify(response)
def _json_error(message):
return flask.jsonify({
'success': False,
'error': message,
})
|
the-stack_0_14910 | # -*- coding: utf-8 -*-
import os
import types
import logging
from socket import AF_INET
from socket import AF_INET6
from socket import AF_UNSPEC
from itertools import chain
from functools import partial
from pr2modules import config
from pr2modules.config import AF_BRIDGE
from pr2modules.netlink import NLMSG_ERROR
from pr2modules.netlink import NLM_F_ATOMIC
from pr2modules.netlink import NLM_F_ROOT
from pr2modules.netlink import NLM_F_REPLACE
from pr2modules.netlink import NLM_F_REQUEST
from pr2modules.netlink import NLM_F_ACK
from pr2modules.netlink import NLM_F_DUMP
from pr2modules.netlink import NLM_F_CREATE
from pr2modules.netlink import NLM_F_EXCL
from pr2modules.netlink import NLM_F_APPEND
from pr2modules.netlink.rtnl import RTM_NEWADDR
from pr2modules.netlink.rtnl import RTM_GETADDR
from pr2modules.netlink.rtnl import RTM_DELADDR
from pr2modules.netlink.rtnl import RTM_NEWLINK
from pr2modules.netlink.rtnl import RTM_NEWLINKPROP
from pr2modules.netlink.rtnl import RTM_DELLINKPROP
from pr2modules.netlink.rtnl import RTM_GETLINK
from pr2modules.netlink.rtnl import RTM_DELLINK
from pr2modules.netlink.rtnl import RTM_NEWQDISC
from pr2modules.netlink.rtnl import RTM_GETQDISC
from pr2modules.netlink.rtnl import RTM_DELQDISC
from pr2modules.netlink.rtnl import RTM_NEWTFILTER
from pr2modules.netlink.rtnl import RTM_GETTFILTER
from pr2modules.netlink.rtnl import RTM_DELTFILTER
from pr2modules.netlink.rtnl import RTM_NEWTCLASS
from pr2modules.netlink.rtnl import RTM_GETTCLASS
from pr2modules.netlink.rtnl import RTM_DELTCLASS
from pr2modules.netlink.rtnl import RTM_NEWRULE
from pr2modules.netlink.rtnl import RTM_GETRULE
from pr2modules.netlink.rtnl import RTM_DELRULE
from pr2modules.netlink.rtnl import RTM_NEWROUTE
from pr2modules.netlink.rtnl import RTM_GETROUTE
from pr2modules.netlink.rtnl import RTM_DELROUTE
from pr2modules.netlink.rtnl import RTM_NEWNEIGH
from pr2modules.netlink.rtnl import RTM_GETNEIGH
from pr2modules.netlink.rtnl import RTM_DELNEIGH
from pr2modules.netlink.rtnl import RTM_SETLINK
from pr2modules.netlink.rtnl import RTM_GETNEIGHTBL
from pr2modules.netlink.rtnl import RTM_GETNSID
from pr2modules.netlink.rtnl import RTM_NEWNETNS
from pr2modules.netlink.rtnl import RTM_GETSTATS
from pr2modules.netlink.rtnl import TC_H_ROOT
from pr2modules.netlink.rtnl import rt_type
from pr2modules.netlink.rtnl import rt_scope
from pr2modules.netlink.rtnl import rt_proto
from pr2modules.netlink.rtnl.req import IPLinkRequest
from pr2modules.netlink.rtnl.req import IPBridgeRequest
from pr2modules.netlink.rtnl.req import IPBrPortRequest
from pr2modules.netlink.rtnl.req import IPRouteRequest
from pr2modules.netlink.rtnl.req import IPRuleRequest
from pr2modules.netlink.rtnl.req import IPAddrRequest
from pr2modules.netlink.rtnl.tcmsg import plugins as tc_plugins
from pr2modules.netlink.rtnl.tcmsg import tcmsg
from pr2modules.netlink.rtnl.rtmsg import rtmsg
from pr2modules.netlink.rtnl import ndmsg
from pr2modules.netlink.rtnl.ndtmsg import ndtmsg
from pr2modules.netlink.rtnl.fibmsg import fibmsg
from pr2modules.netlink.rtnl.ifinfmsg import ifinfmsg
from pr2modules.netlink.rtnl.ifinfmsg import IFF_NOARP
from pr2modules.netlink.rtnl.ifaddrmsg import ifaddrmsg
from pr2modules.netlink.rtnl.ifstatsmsg import ifstatsmsg
from pr2modules.netlink.rtnl.iprsocket import IPRSocket
from pr2modules.netlink.rtnl.iprsocket import IPBatchSocket
from pr2modules.netlink.rtnl.riprsocket import RawIPRSocket
from pr2modules.netlink.rtnl.nsidmsg import nsidmsg
from pr2modules.netlink.rtnl.nsinfmsg import nsinfmsg
from pr2modules.netlink.exceptions import SkipInode
from pr2modules.netlink.exceptions import NetlinkError
from pr2modules.common import AF_MPLS
from pr2modules.common import basestring
from pr2modules.common import getbroadcast
DEFAULT_TABLE = 254
log = logging.getLogger(__name__)
def transform_handle(handle):
if isinstance(handle, basestring):
(major, minor) = [int(x if x else '0', 16) for x in handle.split(':')]
handle = (major << 8 * 2) | minor
return handle
class RTNL_API(object):
'''
`RTNL_API` should not be instantiated by itself. It is intended
to be used as a mixin class. Following classes use `RTNL_API`:
* `IPRoute` -- RTNL API to the current network namespace
* `NetNS` -- RTNL API to another network namespace
* `IPBatch` -- RTNL compiler
* `ShellIPR` -- RTNL via standard I/O, runs IPRoute in a shell
It is an old-school API, that provides access to rtnetlink as is.
It helps you to retrieve and change almost all the data, available
through rtnetlink::
from pyroute2 import IPRoute
ipr = IPRoute()
# create an interface
ipr.link('add', ifname='brx', kind='bridge')
# lookup the index
dev = ipr.link_lookup(ifname='brx')[0]
# bring it down
ipr.link('set', index=dev, state='down')
# change the interface MAC address and rename it just for fun
ipr.link('set', index=dev,
address='00:11:22:33:44:55',
ifname='br-ctrl')
# add primary IP address
ipr.addr('add', index=dev,
address='10.0.0.1', mask=24,
broadcast='10.0.0.255')
# add secondary IP address
ipr.addr('add', index=dev,
address='10.0.0.2', mask=24,
broadcast='10.0.0.255')
# bring it up
ipr.link('set', index=dev, state='up')
'''
def __init__(self, *argv, **kwarg):
if 'netns_path' in kwarg:
self.netns_path = kwarg['netns_path']
else:
self.netns_path = config.netns_path
super(RTNL_API, self).__init__(*argv, **kwarg)
if not self.nlm_generator:
def _match(*argv, **kwarg):
return tuple(self._genmatch(*argv, **kwarg))
self._genmatch = self._match
self._match = _match
def _match(self, match, msgs):
# filtered results, the generator version
for msg in msgs:
if hasattr(match, '__call__'):
if match(msg):
yield msg
elif isinstance(match, dict):
matches = []
for key in match:
KEY = msg.name2nla(key)
if isinstance(match[key], types.FunctionType):
if msg.get(key) is not None:
matches.append(match[key](msg.get(key)))
elif msg.get_attr(KEY) is not None:
matches.append(match[key](msg.get_attr(KEY)))
else:
matches.append(False)
else:
matches.append(msg.get(key) == match[key] or
msg.get_attr(KEY) ==
match[key])
if all(matches):
yield msg
# 8<---------------------------------------------------------------
#
def dump(self):
'''
Iterate all the objects -- links, routes, addresses etc.
'''
##
# Well, it's the Linux API, why OpenBSD / FreeBSD here?
#
# 'Cause when you run RemoteIPRoute, it uses this class,
# and the code may be run on BSD systems as well, though
# BSD systems have only subset of the API
#
if self.uname[0] == 'OpenBSD':
methods = (self.get_links,
self.get_addr,
self.get_neighbours,
self.get_routes)
else:
methods = (self.get_links,
self.get_addr,
self.get_neighbours,
self.get_routes,
self.get_vlans,
partial(self.fdb, 'dump'),
partial(self.get_rules, family=AF_INET),
partial(self.get_rules, family=AF_INET6))
for method in methods:
for msg in method():
yield msg
# 8<---------------------------------------------------------------
#
# Listing methods
#
def get_qdiscs(self, index=None):
'''
Get all queue disciplines for all interfaces or for specified
one.
'''
msg = tcmsg()
msg['family'] = AF_UNSPEC
ret = self.nlm_request(msg, RTM_GETQDISC)
if index is None:
return ret
else:
return [x for x in ret if x['index'] == index]
def get_filters(self, index=0, handle=0, parent=0):
'''
Get filters for specified interface, handle and parent.
'''
msg = tcmsg()
msg['family'] = AF_UNSPEC
msg['index'] = index
msg['handle'] = handle
msg['parent'] = parent
return self.nlm_request(msg, RTM_GETTFILTER)
def get_classes(self, index=0):
'''
Get classes for specified interface.
'''
msg = tcmsg()
msg['family'] = AF_UNSPEC
msg['index'] = index
return self.nlm_request(msg, RTM_GETTCLASS)
def get_vlans(self, **kwarg):
'''
Dump available vlan info on bridge ports
'''
# IFLA_EXT_MASK, extended info mask
#
# include/uapi/linux/rtnetlink.h
# 1 << 0 => RTEXT_FILTER_VF
# 1 << 1 => RTEXT_FILTER_BRVLAN
# 1 << 2 => RTEXT_FILTER_BRVLAN_COMPRESSED
# 1 << 3 => RTEXT_FILTER_SKIP_STATS
#
# maybe place it as mapping into ifinfomsg.py?
#
match = kwarg.get('match', None) or kwarg or None
return self.link('dump',
family=AF_BRIDGE,
ext_mask=2,
match=match)
def get_links(self, *argv, **kwarg):
'''
Get network interfaces.
By default returns all interfaces. Arguments vector
can contain interface indices or a special keyword
'all'::
ip.get_links()
ip.get_links('all')
ip.get_links(1, 2, 3)
interfaces = [1, 2, 3]
ip.get_links(*interfaces)
'''
result = []
links = argv or [0]
if links[0] == 'all': # compat syntax
links = [0]
if links[0] == 0:
cmd = 'dump'
else:
cmd = 'get'
for index in links:
kwarg['index'] = index
result.extend(self.link(cmd, **kwarg))
return result
def get_neighbours(self, family=AF_UNSPEC, match=None, **kwarg):
'''
Dump ARP cache records.
The `family` keyword sets the family for the request:
e.g. `AF_INET` or `AF_INET6` for arp cache, `AF_BRIDGE`
for fdb.
If other keyword arguments not empty, they are used as
filter. Also, one can explicitly set filter as a function
with the `match` parameter.
Examples::
# get neighbours on the 3rd link:
ip.get_neighbours(ifindex=3)
# get a particular record by dst:
ip.get_neighbours(dst='172.16.0.1')
# get fdb records:
ip.get_neighbours(AF_BRIDGE)
# and filter them by a function:
ip.get_neighbours(AF_BRIDGE, match=lambda x: x['state'] == 2)
'''
return self.neigh('dump', family=family, match=match or kwarg)
def get_ntables(self, family=AF_UNSPEC):
'''
Get neighbour tables
'''
msg = ndtmsg()
msg['family'] = family
return self.nlm_request(msg, RTM_GETNEIGHTBL)
def get_addr(self, family=AF_UNSPEC, match=None, **kwarg):
'''
Dump addresses.
If family is not specified, both AF_INET and AF_INET6 addresses
will be dumped::
# get all addresses
ip.get_addr()
It is possible to apply filters on the results::
# get addresses for the 2nd interface
ip.get_addr(index=2)
# get addresses with IFA_LABEL == 'eth0'
ip.get_addr(label='eth0')
# get all the subnet addresses on the interface, identified
# by broadcast address (should be explicitly specified upon
# creation)
ip.get_addr(index=2, broadcast='192.168.1.255')
A custom predicate can be used as a filter::
ip.get_addr(match=lambda x: x['index'] == 1)
'''
return self.addr('dump', family=family, match=match or kwarg)
def get_rules(self, family=AF_UNSPEC, match=None, **kwarg):
'''
Get all rules. By default return all rules. To explicitly
request the IPv4 rules use `family=AF_INET`.
Example::
ip.get_rules() # get all the rules for all families
ip.get_rules(family=AF_INET6) # get only IPv6 rules
'''
return self.rule((RTM_GETRULE,
NLM_F_REQUEST | NLM_F_ROOT | NLM_F_ATOMIC),
family=family,
match=match or kwarg)
def get_routes(self, family=255, match=None, **kwarg):
'''
Get all routes. You can specify the table. There
are 255 routing classes (tables), and the kernel
returns all the routes on each request. So the
routine filters routes from full output.
Example::
ip.get_routes() # get all the routes for all families
ip.get_routes(family=AF_INET6) # get only IPv6 routes
ip.get_routes(table=254) # get routes from 254 table
The default family=255 is a hack. Despite the specs,
the kernel returns only IPv4 routes for AF_UNSPEC family.
But it returns all the routes for all the families if one
uses an invalid value here. Hack but true. And let's hope
the kernel team will not fix this bug.
'''
# get a particular route?
if isinstance(kwarg.get('dst'), basestring):
return self.route('get', dst=kwarg['dst'])
else:
return self.route('dump',
family=family,
match=match or kwarg)
# 8<---------------------------------------------------------------
# 8<---------------------------------------------------------------
#
# List NetNS info
#
def _dump_one_ns(self, path, registry):
item = nsinfmsg()
item['netnsid'] = 0xffffffff # default netnsid "unknown"
nsfd = 0
info = nsidmsg()
msg = nsidmsg()
try:
nsfd = os.open(path, os.O_RDONLY)
item['inode'] = os.fstat(nsfd).st_ino
#
# if the inode is registered, skip it
#
if item['inode'] in registry:
raise SkipInode()
registry.add(item['inode'])
#
# request NETNSA_NSID
#
# may not work on older kernels ( <4.20 ?)
#
msg['attrs'] = [('NETNSA_FD', nsfd)]
try:
for info in self.nlm_request(msg,
RTM_GETNSID,
NLM_F_REQUEST):
# response to nlm_request() is a list or a generator,
# that's why loop
item['netnsid'] = info.get_attr('NETNSA_NSID')
break
except Exception:
pass
item['attrs'] = [('NSINFO_PATH', path)]
except OSError:
raise SkipInode()
finally:
if nsfd > 0:
os.close(nsfd)
item['header']['type'] = RTM_NEWNETNS
item['header']['target'] = self.target
item['event'] = 'RTM_NEWNETNS'
return item
def _dump_dir(self, path, registry):
for name in os.listdir(path):
# strictly speaking, there is no need to use os.sep,
# since the code is not portable outside of Linux
nspath = '%s%s%s' % (path, os.sep, name)
try:
yield self._dump_one_ns(nspath, registry)
except SkipInode:
pass
def _dump_proc(self, registry):
for name in os.listdir('/proc'):
try:
int(name)
except ValueError:
continue
try:
yield self._dump_one_ns('/proc/%s/ns/net' % name, registry)
except SkipInode:
pass
def get_netns_info(self, list_proc=False):
'''
A prototype method to list available netns and associated
interfaces. A bit weird to have it here and not under
`pr2modules.netns`, but it uses RTNL to get all the info.
'''
#
# register all the ns inodes, not to repeat items in the output
#
registry = set()
#
# fetch veth peers
#
peers = {}
for peer in self.get_links():
netnsid = peer.get_attr('IFLA_LINK_NETNSID')
if netnsid is not None:
if netnsid not in peers:
peers[netnsid] = []
peers[netnsid].append(peer.get_attr('IFLA_IFNAME'))
#
# chain iterators:
#
# * one iterator for every item in self.path
# * one iterator for /proc/<pid>/ns/net
#
views = []
for path in self.netns_path:
views.append(self._dump_dir(path, registry))
if list_proc:
views.append(self._dump_proc(registry))
#
# iterate all the items
#
for view in views:
try:
for item in view:
#
# remove uninitialized 'value' field
#
del item['value']
#
# fetch peers for that ns
#
for peer in peers.get(item['netnsid'], []):
item['attrs'].append(('NSINFO_PEER', peer))
yield item
except OSError:
pass
# 8<---------------------------------------------------------------
# 8<---------------------------------------------------------------
#
# Shortcuts
#
def get_default_routes(self, family=AF_UNSPEC, table=DEFAULT_TABLE):
'''
Get default routes
'''
# according to iproute2/ip/iproute.c:print_route()
return [x for x in self.get_routes(family, table=table)
if (x.get_attr('RTA_DST', None) is None and
x['dst_len'] == 0)]
def link_lookup(self, **kwarg):
'''
Lookup interface index (indeces) by first level NLA
value.
Example::
ip.link_lookup(address="52:54:00:9d:4e:3d")
ip.link_lookup(ifname="lo")
ip.link_lookup(operstate="UP")
Please note, that link_lookup() returns list, not one
value.
'''
if set(kwarg) in ({'index', }, {'ifname', }, {'index', 'ifname'}):
# shortcut for index and ifname
try:
for link in self.link('get', **kwarg):
return [link['index']]
except NetlinkError:
return []
else:
# otherwise fallback to the userspace filter
return [link['index'] for link in self.get_links(match=kwarg)]
# 8<---------------------------------------------------------------
# 8<---------------------------------------------------------------
#
# Shortcuts to flush RTNL objects
#
def flush_routes(self, *argv, **kwarg):
'''
Flush routes -- purge route records from a table.
Arguments are the same as for `get_routes()`
routine. Actually, this routine implements a pipe from
`get_routes()` to `nlm_request()`.
'''
ret = []
for route in self.get_routes(*argv, **kwarg):
self.put(route, msg_type=RTM_DELROUTE, msg_flags=NLM_F_REQUEST)
ret.append(route)
return ret
def flush_addr(self, *argv, **kwarg):
'''
Flush IP addresses.
Examples::
# flush all addresses on the interface with index 2:
ipr.flush_addr(index=2)
# flush all addresses with IFA_LABEL='eth0':
ipr.flush_addr(label='eth0')
'''
flags = NLM_F_CREATE | NLM_F_EXCL | NLM_F_REQUEST
ret = []
for addr in self.get_addr(*argv, **kwarg):
self.put(addr, msg_type=RTM_DELADDR, msg_flags=flags)
ret.append(addr)
return ret
def flush_rules(self, *argv, **kwarg):
'''
Flush rules. Please keep in mind, that by default the function
operates on **all** rules of **all** families. To work only on
IPv4 rules, one should explicitly specify `family=AF_INET`.
Examples::
# flush all IPv4 rule with priorities above 5 and below 32000
ipr.flush_rules(family=AF_INET, priority=lambda x: 5 < x < 32000)
# flush all IPv6 rules that point to table 250:
ipr.flush_rules(family=socket.AF_INET6, table=250)
'''
flags = NLM_F_CREATE | NLM_F_EXCL | NLM_F_REQUEST
ret = []
for rule in self.get_rules(*argv, **kwarg):
self.put(rule, msg_type=RTM_DELRULE, msg_flags=flags)
ret.append(rule)
return ret
# 8<---------------------------------------------------------------
# 8<---------------------------------------------------------------
#
# Extensions to low-level functions
#
def brport(self, command, **kwarg):
'''
Set bridge port parameters. Example::
idx = ip.link_lookup(ifname='eth0')
ip.brport("set", index=idx, unicast_flood=0, cost=200)
ip.brport("show", index=idx)
Possible keywords are NLA names for the `protinfo_bridge` class,
without the prefix and in lower letters.
'''
if (command in ('dump', 'show')) and ('match' not in kwarg):
match = kwarg
else:
match = kwarg.pop('match', None)
flags_dump = NLM_F_REQUEST | NLM_F_DUMP
flags_req = NLM_F_REQUEST | NLM_F_ACK
commands = {'set': (RTM_SETLINK, flags_req),
'dump': (RTM_GETLINK, flags_dump),
'show': (RTM_GETLINK, flags_dump)}
(command, msg_flags) = commands.get(command, command)
msg = ifinfmsg()
if command == RTM_GETLINK:
msg['index'] = kwarg.get('index', 0)
else:
msg['index'] = kwarg.pop('index', 0)
msg['family'] = AF_BRIDGE
protinfo = IPBrPortRequest(kwarg)
msg['attrs'].append(('IFLA_PROTINFO', protinfo, 0x8000))
ret = self.nlm_request(msg,
msg_type=command,
msg_flags=msg_flags)
if match is not None:
ret = self._match(match, ret)
if not (command == RTM_GETLINK and self.nlm_generator):
ret = tuple(ret)
return ret
def vlan_filter(self, command, **kwarg):
'''
Vlan filters is another approach to support vlans in Linux.
Before vlan filters were introduced, there was only one way
to bridge vlans: one had to create vlan interfaces and
then add them as ports::
+------+ +----------+
net --> | eth0 | <--> | eth0.500 | <---+
+------+ +----------+ |
v
+------+ +-----+
net --> | eth1 | | br0 |
+------+ +-----+
^
+------+ +----------+ |
net --> | eth2 | <--> | eth2.500 | <---+
+------+ +----------+
It means that one has to create as many bridges, as there were
vlans. Vlan filters allow to bridge together underlying interfaces
and create vlans already on the bridge::
# v500 label shows which interfaces have vlan filter
+------+ v500
net --> | eth0 | <-------+
+------+ |
v
+------+ +-----+ +---------+
net --> | eth1 | <--> | br0 |<-->| br0v500 |
+------+ +-----+ +---------+
^
+------+ v500 |
net --> | eth2 | <-------+
+------+
In this example vlan 500 will be allowed only on ports `eth0` and
`eth2`, though all three eth nics are bridged.
Some example code::
# create bridge
ip.link("add",
ifname="br0",
kind="bridge")
# attach a port
ip.link("set",
index=ip.link_lookup(ifname="eth0")[0],
master=ip.link_lookup(ifname="br0")[0])
# set vlan filter
ip.vlan_filter("add",
index=ip.link_lookup(ifname="eth0")[0],
vlan_info={"vid": 500})
# create vlan interface on the bridge
ip.link("add",
ifname="br0v500",
kind="vlan",
link=ip.link_lookup(ifname="br0")[0],
vlan_id=500)
# set all UP
ip.link("set",
index=ip.link_lookup(ifname="br0")[0],
state="up")
ip.link("set",
index=ip.link_lookup(ifname="br0v500")[0],
state="up")
ip.link("set",
index=ip.link_lookup(ifname="eth0")[0],
state="up")
# set IP address
ip.addr("add",
index=ip.link_lookup(ifname="br0v500")[0],
address="172.16.5.2",
mask=24)
Now all the traffic to the network 172.16.5.2/24 will go
to vlan 500 only via ports that have such vlan filter.
Required arguments for `vlan_filter()` -- `index` and `vlan_info`.
Vlan info struct::
{"vid": uint16,
"flags": uint16}
More details:
* kernel:Documentation/networking/switchdev.txt
* pr2modules.netlink.rtnl.ifinfmsg:... vlan_info
One can specify `flags` as int or as a list of flag names:
* `master` == 0x1
* `pvid` == 0x2
* `untagged` == 0x4
* `range_begin` == 0x8
* `range_end` == 0x10
* `brentry` == 0x20
E.g.::
{"vid": 20,
"flags": ["pvid", "untagged"]}
# is equal to
{"vid": 20,
"flags": 6}
Commands:
**add**
Add vlan filter to a bridge port. Example::
ip.vlan_filter("add", index=2, vlan_info={"vid": 200})
**del**
Remove vlan filter from a bridge port. Example::
ip.vlan_filter("del", index=2, vlan_info={"vid": 200})
'''
flags_req = NLM_F_REQUEST | NLM_F_ACK
commands = {'add': (RTM_SETLINK, flags_req),
'del': (RTM_DELLINK, flags_req)}
kwarg['family'] = AF_BRIDGE
kwarg['kwarg_filter'] = IPBridgeRequest
(command, flags) = commands.get(command, command)
return tuple(self.link((command, flags), **kwarg))
def fdb(self, command, **kwarg):
'''
Bridge forwarding database management.
More details:
* kernel:Documentation/networking/switchdev.txt
* pr2modules.netlink.rtnl.ndmsg
**add**
Add a new FDB record. Works in the same way as ARP cache
management, but some additional NLAs can be used::
# simple FDB record
#
ip.fdb('add',
ifindex=ip.link_lookup(ifname='br0')[0],
lladdr='00:11:22:33:44:55',
dst='10.0.0.1')
# specify vlan
# NB: vlan should exist on the device, use
# `vlan_filter()`
#
ip.fdb('add',
ifindex=ip.link_lookup(ifname='br0')[0],
lladdr='00:11:22:33:44:55',
dst='10.0.0.1',
vlan=200)
# specify vxlan id and port
# NB: works only for vxlan devices, use
# `link("add", kind="vxlan", ...)`
#
# if port is not specified, the default one is used
# by the kernel.
#
# if vni (vxlan id) is equal to the device vni,
# the kernel doesn't report it back
#
ip.fdb('add',
ifindex=ip.link_lookup(ifname='vx500')[0]
lladdr='00:11:22:33:44:55',
dst='10.0.0.1',
port=5678,
vni=600)
**append**
Append a new FDB record. The same syntax as for **add**.
**del**
Remove an existing FDB record. The same syntax as for **add**.
**dump**
Dump all the FDB records. If any `**kwarg` is provided,
results will be filtered::
# dump all the records
ip.fdb('dump')
# show only specific lladdr, dst, vlan etc.
ip.fdb('dump', lladdr='00:11:22:33:44:55')
ip.fdb('dump', dst='10.0.0.1')
ip.fdb('dump', vlan=200)
'''
kwarg['family'] = AF_BRIDGE
# nud -> state
if 'nud' in kwarg:
kwarg['state'] = kwarg.pop('nud')
if (command in ('add', 'del', 'append')) and \
not (kwarg.get('state', 0) & ndmsg.states['noarp']):
# state must contain noarp in add / del / append
kwarg['state'] = kwarg.pop('state', 0) | ndmsg.states['noarp']
# other assumptions
if not kwarg.get('state', 0) & (ndmsg.states['permanent'] |
ndmsg.states['reachable']):
# permanent (default) or reachable
kwarg['state'] |= ndmsg.states['permanent']
if not kwarg.get('flags', 0) & (ndmsg.flags['self'] |
ndmsg.flags['master']):
# self (default) or master
kwarg['flags'] = kwarg.get('flags', 0) | ndmsg.flags['self']
#
return self.neigh(command, **kwarg)
# 8<---------------------------------------------------------------
#
# General low-level configuration methods
#
def neigh(self, command, **kwarg):
'''
Neighbours operations, same as `ip neigh` or `bridge fdb`
**add**
Add a neighbour record, e.g.::
from pyroute2 import IPRoute
from pr2modules.netlink.rtnl import ndmsg
# add a permanent record on veth0
idx = ip.link_lookup(ifname='veth0')[0]
ip.neigh('add',
dst='172.16.45.1',
lladdr='00:11:22:33:44:55',
ifindex=idx,
state=ndmsg.states['permanent'])
**set**
Set an existing record or create a new one, if it doesn't exist.
The same as above, but the command is "set"::
ip.neigh('set',
dst='172.16.45.1',
lladdr='00:11:22:33:44:55',
ifindex=idx,
state=ndmsg.states['permanent'])
**change**
Change an existing record. If the record doesn't exist, fail.
**del**
Delete an existing record.
**dump**
Dump all the records in the NDB::
ip.neigh('dump')
**get**
Get specific record (dst and ifindex are mandatory). Available
only on recent kernel::
ip.neigh('get',
dst='172.16.45.1',
ifindex=idx)
'''
if (command == 'dump') and ('match' not in kwarg):
match = kwarg
else:
match = kwarg.pop('match', None)
flags_dump = NLM_F_REQUEST | NLM_F_DUMP
flags_base = NLM_F_REQUEST | NLM_F_ACK
flags_make = flags_base | NLM_F_CREATE | NLM_F_EXCL
flags_append = flags_base | NLM_F_CREATE | NLM_F_APPEND
flags_change = flags_base | NLM_F_REPLACE
flags_replace = flags_change | NLM_F_CREATE
commands = {'add': (RTM_NEWNEIGH, flags_make),
'set': (RTM_NEWNEIGH, flags_replace),
'replace': (RTM_NEWNEIGH, flags_replace),
'change': (RTM_NEWNEIGH, flags_change),
'del': (RTM_DELNEIGH, flags_make),
'remove': (RTM_DELNEIGH, flags_make),
'delete': (RTM_DELNEIGH, flags_make),
'dump': (RTM_GETNEIGH, flags_dump),
'get': (RTM_GETNEIGH, flags_base),
'append': (RTM_NEWNEIGH, flags_append)}
(command, flags) = commands.get(command, command)
if 'nud' in kwarg:
kwarg['state'] = kwarg.pop('nud')
msg = ndmsg.ndmsg()
for field in msg.fields:
msg[field[0]] = kwarg.pop(field[0], 0)
msg['family'] = msg['family'] or AF_INET
msg['attrs'] = []
# fix nud kwarg
if isinstance(msg['state'], basestring):
msg['state'] = ndmsg.states_a2n(msg['state'])
for key in kwarg:
nla = ndmsg.ndmsg.name2nla(key)
if kwarg[key] is not None:
msg['attrs'].append([nla, kwarg[key]])
ret = self.nlm_request(msg,
msg_type=command,
msg_flags=flags)
if match:
ret = self._match(match, ret)
if not (command == RTM_GETNEIGH and self.nlm_generator):
ret = tuple(ret)
return ret
def link(self, command, **kwarg):
'''
Link operations.
Keywords to set up ifinfmsg fields:
* index -- interface index
* family -- AF_BRIDGE for bridge operations, otherwise 0
* flags -- device flags
* change -- change mask
All other keywords will be translated to NLA names, e.g.
`mtu -> IFLA_MTU`, `af_spec -> IFLA_AF_SPEC` etc. You can
provide a complete NLA structure or let filters do it for
you. E.g., these pairs show equal statements::
# set device MTU
ip.link("set", index=x, mtu=1000)
ip.link("set", index=x, IFLA_MTU=1000)
# add vlan device
ip.link("add", ifname="test", kind="dummy")
ip.link("add", ifname="test",
IFLA_LINKINFO={'attrs': [['IFLA_INFO_KIND', 'dummy']]})
Filters are implemented in the `pr2modules.netlink.rtnl.req` module.
You can contribute your own if you miss shortcuts.
Commands:
**add**
To create an interface, one should specify the interface kind::
ip.link("add",
ifname="test",
kind="dummy")
The kind can be any of those supported by kernel. It can be
`dummy`, `bridge`, `bond` etc. On modern kernels one can specify
even interface index::
ip.link("add",
ifname="br-test",
kind="bridge",
index=2345)
Specific type notes:
► geneve
Create GENEVE tunnel::
ip.link("add",
ifname="genx",
kind="geneve",
geneve_id=42,
geneve_remote="172.16.0.101")
Support for GENEVE over IPv6 is also included; use `geneve_remote6`
to configure a remote IPv6 address.
► gre
Create GRE tunnel::
ip.link("add",
ifname="grex",
kind="gre",
gre_local="172.16.0.1",
gre_remote="172.16.0.101",
gre_ttl=16)
The keyed GRE requires explicit iflags/oflags specification::
ip.link("add",
ifname="grex",
kind="gre",
gre_local="172.16.0.1",
gre_remote="172.16.0.101",
gre_ttl=16,
gre_ikey=10,
gre_okey=10,
gre_iflags=32,
gre_oflags=32)
Support for GRE over IPv6 is also included; use `kind=ip6gre` and
`ip6gre_` as the prefix for its values.
► ipip
Create ipip tunnel::
ip.link("add",
ifname="tun1",
kind="ipip",
ipip_local="172.16.0.1",
ipip_remote="172.16.0.101",
ipip_ttl=16)
Support for sit and ip6tnl is also included; use `kind=sit` and `sit_`
as prefix for sit tunnels, and `kind=ip6tnl` and `ip6tnl_` prefix for
ip6tnl tunnels.
► macvlan
Macvlan interfaces act like VLANs within OS. The macvlan driver
provides an ability to add several MAC addresses on one interface,
where every MAC address is reflected with a virtual interface in
the system.
In some setups macvlan interfaces can replace bridge interfaces,
providing more simple and at the same time high-performance
solution::
ip.link("add",
ifname="mvlan0",
kind="macvlan",
link=ip.link_lookup(ifname="em1")[0],
macvlan_mode="private").commit()
Several macvlan modes are available: "private", "vepa", "bridge",
"passthru". Ususally the default is "vepa".
► macvtap
Almost the same as macvlan, but creates also a character tap device::
ip.link("add",
ifname="mvtap0",
kind="macvtap",
link=ip.link_lookup(ifname="em1")[0],
macvtap_mode="vepa").commit()
Will create a device file `"/dev/tap%s" % index`
► tuntap
Possible `tuntap` keywords:
- `mode` — "tun" or "tap"
- `uid` — integer
- `gid` — integer
- `ifr` — dict of tuntap flags (see ifinfmsg:... tuntap_data)
Create a tap interface::
ip.link("add",
ifname="tap0",
kind="tuntap",
mode="tap")
Tun/tap interfaces are created using `ioctl()`, but the library
provides a transparent way to manage them using netlink API.
► veth
To properly create `veth` interface, one should specify
`peer` also, since `veth` interfaces are created in pairs::
# simple call
ip.link("add", ifname="v1p0", kind="veth", peer="v1p1")
# set up specific veth peer attributes
ip.link("add",
ifname="v1p0",
kind="veth",
peer={"ifname": "v1p1",
"net_ns_fd": "test_netns"})
► vlan
VLAN interfaces require additional parameters, `vlan_id` and
`link`, where `link` is a master interface to create VLAN on::
ip.link("add",
ifname="v100",
kind="vlan",
link=ip.link_lookup(ifname="eth0")[0],
vlan_id=100)
There is a possibility to create also 802.1ad interfaces::
# create external vlan 802.1ad, s-tag
ip.link("add",
ifname="v100s",
kind="vlan",
link=ip.link_lookup(ifname="eth0")[0],
vlan_id=100,
vlan_protocol=0x88a8)
# create internal vlan 802.1q, c-tag
ip.link("add",
ifname="v200c",
kind="vlan",
link=ip.link_lookup(ifname="v100s")[0],
vlan_id=200,
vlan_protocol=0x8100)
► vrf
VRF interfaces (see linux/Documentation/networking/vrf.txt)::
ip.link("add",
ifname="vrf-foo",
kind="vrf",
vrf_table=42)
► vxlan
VXLAN interfaces are like VLAN ones, but require a bit more
parameters::
ip.link("add",
ifname="vx101",
kind="vxlan",
vxlan_link=ip.link_lookup(ifname="eth0")[0],
vxlan_id=101,
vxlan_group='239.1.1.1',
vxlan_ttl=16)
All possible vxlan parameters are listed in the module
`pr2modules.netlink.rtnl.ifinfmsg:... vxlan_data`.
► ipoib
IPoIB driver provides an ability to create several ip interfaces
on one interface.
IPoIB interfaces requires the following parameter:
`link` : The master interface to create IPoIB on.
The following parameters can also be provided:
`pkey` : Inifiniband partition key the ip interface is associated with
`mode` : Underlying infiniband transport mode.
One of: ['datagram' ,'connected']
`umcast` : If set(1), multicast group membership for this interface is
handled by user space.
Example::
ip.link("add",
ifname="ipoib1",
kind="ipoib",
link=ip.link_lookup(ifname="ib0")[0],
pkey=10)
**set**
Set interface attributes::
# get interface index
x = ip.link_lookup(ifname="eth0")[0]
# put link down
ip.link("set", index=x, state="down")
# rename and set MAC addr
ip.link("set", index=x, address="00:11:22:33:44:55", name="bala")
# set MTU and TX queue length
ip.link("set", index=x, mtu=1000, txqlen=2000)
# bring link up
ip.link("set", index=x, state="up")
Keyword "state" is reserved. State can be "up" or "down",
it is a shortcut::
state="up": flags=1, mask=1
state="down": flags=0, mask=0
SR-IOV virtual function setup::
# get PF index
x = ip.link_lookup(ifname="eth0")[0]
# setup macaddr
ip.link("set",
index=x, # PF index
vf={"vf": 0, # VF index
"mac": "00:11:22:33:44:55"}) # address
# setup vlan
ip.link("set",
index=x, # PF index
vf={"vf": 0, # VF index
"vlan": 100}) # the simplest case
# setup QinQ
ip.link("set",
index=x, # PF index
vf={"vf": 0, # VF index
"vlan": [{"vlan": 100, # vlan id
"proto": 0x88a8}, # 802.1ad
{"vlan": 200, # vlan id
"proto": 0x8100}]}) # 802.1q
**update**
Almost the same as `set`, except it uses different flags
and message type. Mostly does the same, but in some cases
differs. If you're not sure what to use, use `set`.
**del**
Destroy the interface::
ip.link("del", index=ip.link_lookup(ifname="dummy0")[0])
**dump**
Dump info for all interfaces
**get**
Get specific interface info::
ip.link("get", index=ip.link_lookup(ifname="br0")[0])
Get extended attributes like SR-IOV setup::
ip.link("get", index=3, ext_mask=1)
'''
if (command == 'dump') and ('match' not in kwarg):
match = kwarg
else:
match = kwarg.pop('match', None)
if command[:4] == 'vlan':
log.warning('vlan filters are managed via `vlan_filter()`')
log.warning('this compatibility hack will be removed soon')
return self.vlan_filter(command[5:], **kwarg)
flags_dump = NLM_F_REQUEST | NLM_F_DUMP
flags_req = NLM_F_REQUEST | NLM_F_ACK
flags_create = flags_req | NLM_F_CREATE | NLM_F_EXCL
flag_append = flags_create | NLM_F_APPEND
commands = {'set': (RTM_NEWLINK, flags_req),
'update': (RTM_SETLINK, flags_create),
'add': (RTM_NEWLINK, flags_create),
'del': (RTM_DELLINK, flags_create),
'property_add': (RTM_NEWLINKPROP, flag_append),
'property_del': (RTM_DELLINKPROP, flags_req),
'remove': (RTM_DELLINK, flags_create),
'delete': (RTM_DELLINK, flags_create),
'dump': (RTM_GETLINK, flags_dump),
'get': (RTM_GETLINK, NLM_F_REQUEST)}
msg = ifinfmsg()
# ifinfmsg fields
#
# ifi_family
# ifi_type
# ifi_index
# ifi_flags
# ifi_change
#
msg['family'] = kwarg.pop('family', 0)
lrq = kwarg.pop('kwarg_filter', IPLinkRequest)
(command, msg_flags) = commands.get(command, command)
# index
msg['index'] = kwarg.pop('index', 0)
# flags
flags = kwarg.pop('flags', 0) or 0
# change
mask = kwarg.pop('mask', 0) or kwarg.pop('change', 0) or 0
# UP/DOWN shortcut
if 'state' in kwarg:
mask = 1 # IFF_UP mask
if kwarg['state'].lower() == 'up':
flags = 1 # 0 (down) or 1 (up)
del kwarg['state']
# arp on/off shortcut
if 'arp' in kwarg:
mask |= IFF_NOARP
if not kwarg.pop('arp'):
flags |= IFF_NOARP
msg['flags'] = flags
msg['change'] = mask
if 'altname' in kwarg:
altname = kwarg.pop("altname")
if command in (RTM_NEWLINKPROP, RTM_DELLINKPROP):
if not isinstance(altname, (list, tuple, set)):
altname = [altname]
kwarg["IFLA_PROP_LIST"] = {"attrs": [
("IFLA_ALT_IFNAME", alt_ifname)
for alt_ifname in altname
]}
else:
kwarg["IFLA_ALT_IFNAME"] = altname
# apply filter
kwarg = lrq(kwarg)
# attach NLA
for key in kwarg:
nla = type(msg).name2nla(key)
if kwarg[key] is not None:
msg['attrs'].append([nla, kwarg[key]])
ret = self.nlm_request(msg,
msg_type=command,
msg_flags=msg_flags)
if match is not None:
ret = self._match(match, ret)
if not (command == RTM_GETLINK and self.nlm_generator):
ret = tuple(ret)
return ret
def addr(self, command, index=None, address=None, mask=None,
family=None, scope=None, match=None, **kwarg):
'''
Address operations
* command -- add, delete, replace, dump
* index -- device index
* address -- IPv4 or IPv6 address
* mask -- address mask
* family -- socket.AF_INET for IPv4 or socket.AF_INET6 for IPv6
* scope -- the address scope, see /etc/iproute2/rt_scopes
* kwarg -- dictionary, any ifaddrmsg field or NLA
Later the method signature will be changed to::
def addr(self, command, match=None, **kwarg):
# the method body
So only keyword arguments (except of the command) will be accepted.
The reason for this change is an unification of API.
Example::
idx = 62
ip.addr('add', index=idx, address='10.0.0.1', mask=24)
ip.addr('add', index=idx, address='10.0.0.2', mask=24)
With more NLAs::
# explicitly set broadcast address
ip.addr('add', index=idx,
address='10.0.0.3',
broadcast='10.0.0.255',
prefixlen=24)
# make the secondary address visible to ifconfig: add label
ip.addr('add', index=idx,
address='10.0.0.4',
broadcast='10.0.0.255',
prefixlen=24,
label='eth0:1')
Configure p2p address on an interface::
ip.addr('add', index=idx,
address='10.1.1.2',
mask=24,
local='10.1.1.1')
'''
if command in ('get', 'set'):
return
lrq = kwarg.pop('kwarg_filter', IPAddrRequest)
flags_dump = NLM_F_REQUEST | NLM_F_DUMP
flags_base = NLM_F_REQUEST | NLM_F_ACK
flags_create = flags_base | NLM_F_CREATE | NLM_F_EXCL
flags_replace = flags_base | NLM_F_REPLACE | NLM_F_CREATE
commands = {'add': (RTM_NEWADDR, flags_create),
'del': (RTM_DELADDR, flags_create),
'remove': (RTM_DELADDR, flags_create),
'delete': (RTM_DELADDR, flags_create),
'replace': (RTM_NEWADDR, flags_replace),
'dump': (RTM_GETADDR, flags_dump)}
(command, flags) = commands.get(command, command)
# fetch args
index = index or kwarg.pop('index', 0)
family = family or kwarg.pop('family', None)
prefixlen = mask or kwarg.pop('mask', 0) or kwarg.pop('prefixlen', 0)
scope = scope or kwarg.pop('scope', 0)
# move address to kwarg
# FIXME: add deprecation notice
if address:
kwarg['address'] = address
# try to guess family, if it is not forced
if kwarg.get('address') and family is None:
if address.find(":") > -1:
family = AF_INET6
mask = mask or 128
else:
family = AF_INET
mask = mask or 32
# setup the message
msg = ifaddrmsg()
msg['index'] = index
msg['family'] = family or 0
msg['prefixlen'] = prefixlen
msg['scope'] = scope
kwarg = lrq(kwarg)
try:
kwarg.sync_cacheinfo()
except AttributeError:
pass
# inject IFA_LOCAL, if family is AF_INET and IFA_LOCAL is not set
if family == AF_INET and \
kwarg.get('address') and \
kwarg.get('local') is None:
kwarg['local'] = kwarg['address']
# patch broadcast, if needed
if kwarg.get('broadcast') is True:
kwarg['broadcast'] = getbroadcast(address, mask, family)
# work on NLA
for key in kwarg:
nla = ifaddrmsg.name2nla(key)
if kwarg[key] not in (None, ''):
msg['attrs'].append([nla, kwarg[key]])
ret = self.nlm_request(msg,
msg_type=command,
msg_flags=flags,
terminate=lambda x: x['header']['type'] ==
NLMSG_ERROR)
if match:
ret = self._match(match, ret)
if not (command == RTM_GETADDR and self.nlm_generator):
ret = tuple(ret)
return ret
def tc(self, command, kind=None, index=0, handle=0, **kwarg):
'''
"Swiss knife" for traffic control. With the method you can
add, delete or modify qdiscs, classes and filters.
* command -- add or delete qdisc, class, filter.
* kind -- a string identifier -- "sfq", "htb", "u32" and so on.
* handle -- integer or string
Command can be one of ("add", "del", "add-class", "del-class",
"add-filter", "del-filter") (see `commands` dict in the code).
Handle notice: traditional iproute2 notation, like "1:0", actually
represents two parts in one four-bytes integer::
1:0 -> 0x10000
1:1 -> 0x10001
ff:0 -> 0xff0000
ffff:1 -> 0xffff0001
Target notice: if your target is a class/qdisc that applies an
algorithm that can only apply to upstream traffic profile, but your
keys variable explicitly references a match that is only relevant for
upstream traffic, the kernel will reject the filter. Unless you're
dealing with devices like IMQs
For pyroute2 tc() you can use both forms: integer like 0xffff0000
or string like 'ffff:0000'. By default, handle is 0, so you can add
simple classless queues w/o need to specify handle. Ingress queue
causes handle to be 0xffff0000.
So, to set up sfq queue on interface 1, the function call
will be like that::
ip = IPRoute()
ip.tc("add", "sfq", 1)
Instead of string commands ("add", "del"...), you can use also
module constants, `RTM_NEWQDISC`, `RTM_DELQDISC` and so on::
ip = IPRoute()
flags = NLM_F_REQUEST | NLM_F_ACK | NLM_F_CREATE | NLM_F_EXCL
ip.tc((RTM_NEWQDISC, flags), "sfq", 1)
It should be noted that "change", "change-class" and
"change-filter" work like "replace", "replace-class" and
"replace-filter", except they will fail if the node doesn't
exist (while it would have been created by "replace"). This is
not the same behaviour as with "tc" where "change" can be used
to modify the value of some options while leaving the others
unchanged. However, as not all entities support this
operation, we believe the "change" commands as implemented
here are more useful.
Also available "modules" (returns tc plugins dict) and "help"
commands::
help(ip.tc("modules")["htb"])
print(ip.tc("help", "htb"))
'''
if command == 'set':
return
if command == 'modules':
return tc_plugins
if command == 'help':
p = tc_plugins.get(kind)
if p is not None and hasattr(p, '__doc__'):
return p.__doc__
else:
return 'No help available'
flags_base = NLM_F_REQUEST | NLM_F_ACK
flags_make = flags_base | NLM_F_CREATE | NLM_F_EXCL
flags_change = flags_base | NLM_F_REPLACE
flags_replace = flags_change | NLM_F_CREATE
commands = {'add': (RTM_NEWQDISC, flags_make),
'del': (RTM_DELQDISC, flags_make),
'remove': (RTM_DELQDISC, flags_make),
'delete': (RTM_DELQDISC, flags_make),
'change': (RTM_NEWQDISC, flags_change),
'replace': (RTM_NEWQDISC, flags_replace),
'add-class': (RTM_NEWTCLASS, flags_make),
'del-class': (RTM_DELTCLASS, flags_make),
'change-class': (RTM_NEWTCLASS, flags_change),
'replace-class': (RTM_NEWTCLASS, flags_replace),
'add-filter': (RTM_NEWTFILTER, flags_make),
'del-filter': (RTM_DELTFILTER, flags_make),
'change-filter': (RTM_NEWTFILTER, flags_change),
'replace-filter': (RTM_NEWTFILTER, flags_replace)}
if isinstance(command, int):
command = (command, flags_make)
if command == 'del':
if index == 0:
index = [x['index'] for x in self.get_links()
if x['index'] != 1]
if isinstance(index, (list, tuple, set)):
return list(chain(*(self.tc('del', index=x) for x in index)))
command, flags = commands.get(command, command)
msg = tcmsg()
# transform handle, parent and target, if needed:
handle = transform_handle(handle)
for item in ('parent', 'target', 'default'):
if item in kwarg and kwarg[item] is not None:
kwarg[item] = transform_handle(kwarg[item])
msg['index'] = index
msg['handle'] = handle
opts = kwarg.get('opts', None)
##
#
#
if kind in tc_plugins:
p = tc_plugins[kind]
msg['parent'] = kwarg.pop('parent', getattr(p, 'parent', 0))
if hasattr(p, 'fix_msg'):
p.fix_msg(msg, kwarg)
if kwarg:
if command in (RTM_NEWTCLASS, RTM_DELTCLASS):
opts = p.get_class_parameters(kwarg)
else:
opts = p.get_parameters(kwarg)
else:
msg['parent'] = kwarg.get('parent', TC_H_ROOT)
if kind is not None:
msg['attrs'].append(['TCA_KIND', kind])
if opts is not None:
msg['attrs'].append(['TCA_OPTIONS', opts])
return tuple(self.nlm_request(msg, msg_type=command, msg_flags=flags))
def route(self, command, **kwarg):
'''
Route operations.
Keywords to set up rtmsg fields:
* dst_len, src_len -- destination and source mask(see `dst` below)
* tos -- type of service
* table -- routing table
* proto -- `redirect`, `boot`, `static` (see `rt_proto`)
* scope -- routing realm
* type -- `unicast`, `local`, etc. (see `rt_type`)
`pr2modules/netlink/rtnl/rtmsg.py` rtmsg.nla_map:
* table -- routing table to use (default: 254)
* gateway -- via address
* prefsrc -- preferred source IP address
* dst -- the same as `prefix`
* iif -- incoming traffic interface
* oif -- outgoing traffic interface
etc.
One can specify mask not as `dst_len`, but as a part of `dst`,
e.g.: `dst="10.0.0.0/24"`.
Commands:
**add**
Example::
ip.route("add", dst="10.0.0.0/24", gateway="192.168.0.1")
It is possible to set also route metrics. There are two ways
to do so. The first is to use 'raw' NLA notation::
ip.route("add",
dst="10.0.0.0",
mask=24,
gateway="192.168.0.1",
metrics={"attrs": [["RTAX_MTU", 1400],
["RTAX_HOPLIMIT", 16]]})
The second way is to use shortcuts, provided by `IPRouteRequest`
class, which is applied to `**kwarg` automatically::
ip.route("add",
dst="10.0.0.0/24",
gateway="192.168.0.1",
metrics={"mtu": 1400,
"hoplimit": 16})
...
More `route()` examples. Blackhole route::
ip.route("add",
dst="10.0.0.0/24",
type="blackhole")
Create a route with metrics::
ip.route('add',
dst='172.16.0.0/24',
gateway='10.0.0.10',
metrics={'mtu': 1400,
'hoplimit': 16})
Multipath route::
ip.route("add",
dst="10.0.0.0/24",
multipath=[{"gateway": "192.168.0.1", "hops": 2},
{"gateway": "192.168.0.2", "hops": 1},
{"gateway": "192.168.0.3"}])
MPLS lwtunnel on eth0::
idx = ip.link_lookup(ifname='eth0')[0]
ip.route("add",
dst="10.0.0.0/24",
oif=idx,
encap={"type": "mpls",
"labels": "200/300"})
Create MPLS route: push label::
# $ sudo modprobe mpls_router
# $ sudo sysctl net.mpls.platform_labels=1024
ip.route('add',
family=AF_MPLS,
oif=idx,
dst=0x200,
newdst=[0x200, 0x300])
MPLS multipath::
idx = ip.link_lookup(ifname='eth0')[0]
ip.route("add",
dst="10.0.0.0/24",
table=20,
multipath=[{"gateway": "192.168.0.1",
"encap": {"type": "mpls",
"labels": 200}},
{"ifindex": idx,
"encap": {"type": "mpls",
"labels": 300}}])
MPLS target can be int, string, dict or list::
"labels": 300 # simple label
"labels": "300" # the same
"labels": (200, 300) # stacked
"labels": "200/300" # the same
# explicit label definition
"labels": {"bos": 1,
"label": 300,
"tc": 0,
"ttl": 16}
Create SEG6 tunnel encap mode (kernel >= 4.10)::
ip.route('add',
dst='2001:0:0:10::2/128',
oif=idx,
encap={'type': 'seg6',
'mode': 'encap',
'segs': '2000::5,2000::6'})
Create SEG6 tunnel inline mode (kernel >= 4.10)::
ip.route('add',
dst='2001:0:0:10::2/128',
oif=idx,
encap={'type': 'seg6',
'mode': 'inline',
'segs': ['2000::5', '2000::6']})
Create SEG6 tunnel inline mode with hmac (kernel >= 4.10)::
ip.route('add',
dst='2001:0:0:22::2/128',
oif=idx,
encap={'type': 'seg6',
'mode': 'inline',
'segs':'2000::5,2000::6,2000::7,2000::8',
'hmac':0xf})
Create SEG6 tunnel with ip4ip6 encapsulation (kernel >= 4.14)::
ip.route('add',
dst='172.16.0.0/24',
oif=idx,
encap={'type': 'seg6',
'mode': 'encap',
'segs': '2000::5,2000::6'})
Create SEG6LOCAL tunnel End.DX4 action (kernel >= 4.14)::
ip.route('add',
dst='2001:0:0:10::2/128',
oif=idx,
encap={'type': 'seg6local',
'action': 'End.DX4',
'nh4': '172.16.0.10'})
Create SEG6LOCAL tunnel End.DT6 action (kernel >= 4.14)::
ip.route('add',
dst='2001:0:0:10::2/128',
oif=idx,
encap={'type': 'seg6local',
'action': 'End.DT6',
'table':'10'})
Create SEG6LOCAL tunnel End.B6 action (kernel >= 4.14)::
ip.route('add',
dst='2001:0:0:10::2/128',
oif=idx,
encap={'type': 'seg6local',
'action': 'End.B6',
'srh':{'segs': '2000::5,2000::6'}})
Create SEG6LOCAL tunnel End.B6 action with hmac (kernel >= 4.14)::
ip.route('add',
dst='2001:0:0:10::2/128',
oif=idx,
encap={'type': 'seg6local',
'action': 'End.B6',
'srh': {'segs': '2000::5,2000::6',
'hmac':0xf}})
**change**, **replace**, **append**
Commands `change`, `replace` and `append` have the same meanings
as in ip-route(8): `change` modifies only existing route, while
`replace` creates a new one, if there is no such route yet.
`append` allows to create an IPv6 multipath route.
**del**
Remove the route. The same syntax as for **add**.
**get**
Get route by spec.
**dump**
Dump all routes.
'''
# 8<----------------------------------------------------
# FIXME
# flags should be moved to some more general place
flags_dump = NLM_F_DUMP | NLM_F_REQUEST
flags_base = NLM_F_REQUEST | NLM_F_ACK
flags_make = flags_base | NLM_F_CREATE | NLM_F_EXCL
flags_change = flags_base | NLM_F_REPLACE
flags_replace = flags_change | NLM_F_CREATE
flags_append = flags_base | NLM_F_CREATE | NLM_F_APPEND
# 8<----------------------------------------------------
# transform kwarg
if command in ('add', 'set', 'replace', 'change', 'append'):
kwarg['proto'] = kwarg.get('proto', 'static') or 'static'
kwarg['type'] = kwarg.get('type', 'unicast') or 'unicast'
kwarg = IPRouteRequest(kwarg)
if 'match' not in kwarg and command in ('dump', 'show'):
match = kwarg
else:
match = kwarg.pop('match', None)
callback = kwarg.pop('callback', None)
commands = {'add': (RTM_NEWROUTE, flags_make),
'set': (RTM_NEWROUTE, flags_replace),
'replace': (RTM_NEWROUTE, flags_replace),
'change': (RTM_NEWROUTE, flags_change),
'append': (RTM_NEWROUTE, flags_append),
'del': (RTM_DELROUTE, flags_make),
'remove': (RTM_DELROUTE, flags_make),
'delete': (RTM_DELROUTE, flags_make),
'get': (RTM_GETROUTE, NLM_F_REQUEST),
'show': (RTM_GETROUTE, flags_dump),
'dump': (RTM_GETROUTE, flags_dump)}
(command, flags) = commands.get(command, command)
msg = rtmsg()
# table is mandatory; by default == 254
# if table is not defined in kwarg, save it there
# also for nla_attr:
table = kwarg.get('table', 254)
msg['table'] = table if table <= 255 else 252
msg['family'] = kwarg.pop('family', AF_INET)
msg['scope'] = kwarg.pop('scope', rt_scope['universe'])
msg['dst_len'] = kwarg.pop('dst_len', None) or kwarg.pop('mask', 0)
msg['src_len'] = kwarg.pop('src_len', 0)
msg['tos'] = kwarg.pop('tos', 0)
msg['flags'] = kwarg.pop('flags', 0)
msg['type'] = kwarg.pop('type', rt_type['unspec'])
msg['proto'] = kwarg.pop('proto', rt_proto['unspec'])
msg['attrs'] = []
if msg['family'] == AF_MPLS:
for key in tuple(kwarg):
if key not in ('dst', 'newdst', 'via', 'multipath', 'oif'):
kwarg.pop(key)
for key in kwarg:
nla = rtmsg.name2nla(key)
if nla == 'RTA_DST' and not kwarg[key]:
continue
if kwarg[key] is not None:
msg['attrs'].append([nla, kwarg[key]])
# fix IP family, if needed
if msg['family'] in (AF_UNSPEC, 255):
if key in ('dst', 'src', 'gateway', 'prefsrc', 'newdst') \
and isinstance(kwarg[key], basestring):
msg['family'] = AF_INET6 if kwarg[key].find(':') >= 0 \
else AF_INET
elif key == 'multipath' and len(kwarg[key]) > 0:
hop = kwarg[key][0]
attrs = hop.get('attrs', [])
for attr in attrs:
if attr[0] == 'RTA_GATEWAY':
msg['family'] = AF_INET6 if \
attr[1].find(':') >= 0 else AF_INET
break
ret = self.nlm_request(msg,
msg_type=command,
msg_flags=flags,
callback=callback)
if match:
ret = self._match(match, ret)
if not (command == RTM_GETROUTE and self.nlm_generator):
ret = tuple(ret)
return ret
def rule(self, command, *argv, **kwarg):
'''
Rule operations
- command — add, delete
- table — 0 < table id < 253
- priority — 0 < rule's priority < 32766
- action — type of rule, default 'FR_ACT_NOP' (see fibmsg.py)
- rtscope — routing scope, default RT_SCOPE_UNIVERSE
`(RT_SCOPE_UNIVERSE|RT_SCOPE_SITE|\
RT_SCOPE_LINK|RT_SCOPE_HOST|RT_SCOPE_NOWHERE)`
- family — rule's family (socket.AF_INET (default) or
socket.AF_INET6)
- src — IP source for Source Based (Policy Based) routing's rule
- dst — IP for Destination Based (Policy Based) routing's rule
- src_len — Mask for Source Based (Policy Based) routing's rule
- dst_len — Mask for Destination Based (Policy Based) routing's
rule
- iifname — Input interface for Interface Based (Policy Based)
routing's rule
- oifname — Output interface for Interface Based (Policy Based)
routing's rule
- uid_range — Range of user identifiers, as a string like "1000:1234"
- dport_range — Range of destination ports, as a string like "80-120"
- sport_range — Range of source ports, as a string like "80-120"
All packets route via table 10::
# 32000: from all lookup 10
# ...
ip.rule('add', table=10, priority=32000)
Default action::
# 32001: from all lookup 11 unreachable
# ...
iproute.rule('add',
table=11,
priority=32001,
action='FR_ACT_UNREACHABLE')
Use source address to choose a routing table::
# 32004: from 10.64.75.141 lookup 14
# ...
iproute.rule('add',
table=14,
priority=32004,
src='10.64.75.141')
Use dst address to choose a routing table::
# 32005: from 10.64.75.141/24 lookup 15
# ...
iproute.rule('add',
table=15,
priority=32005,
dst='10.64.75.141',
dst_len=24)
Match fwmark::
# 32006: from 10.64.75.141 fwmark 0xa lookup 15
# ...
iproute.rule('add',
table=15,
priority=32006,
dst='10.64.75.141',
fwmark=10)
'''
if command == 'set':
return
flags_base = NLM_F_REQUEST | NLM_F_ACK
flags_make = flags_base | NLM_F_CREATE | NLM_F_EXCL
flags_dump = NLM_F_REQUEST | NLM_F_ROOT | NLM_F_ATOMIC
commands = {'add': (RTM_NEWRULE, flags_make),
'del': (RTM_DELRULE, flags_make),
'remove': (RTM_DELRULE, flags_make),
'delete': (RTM_DELRULE, flags_make),
'dump': (RTM_GETRULE, flags_dump)}
if isinstance(command, int):
command = (command, flags_make)
command, flags = commands.get(command, command)
if argv:
# this code block will be removed in some release
log.error('rule(): positional parameters are deprecated')
names = ['table', 'priority', 'action', 'family',
'src', 'src_len', 'dst', 'dst_len', 'fwmark',
'iifname', 'oifname']
kwarg.update(dict(zip(names, argv)))
kwarg = IPRuleRequest(kwarg)
msg = fibmsg()
table = kwarg.get('table', 0)
msg['table'] = table if table <= 255 else 252
for key in ('family',
'src_len',
'dst_len',
'action',
'tos',
'flags'):
msg[key] = kwarg.pop(key, 0)
msg['attrs'] = []
for key in kwarg:
nla = fibmsg.name2nla(key)
if kwarg[key] is not None:
msg['attrs'].append([nla, kwarg[key]])
ret = self.nlm_request(msg,
msg_type=command,
msg_flags=flags)
if 'match' in kwarg:
ret = self._match(kwarg['match'], ret)
if not (command == RTM_GETRULE and self.nlm_generator):
ret = tuple(ret)
return ret
def stats(self, command, **kwarg):
'''
Stats prototype.
'''
if (command == 'dump') and ('match' not in kwarg):
match = kwarg
else:
match = kwarg.pop('match', None)
commands = {'dump': (RTM_GETSTATS, NLM_F_REQUEST | NLM_F_DUMP),
'get': (RTM_GETSTATS, NLM_F_REQUEST | NLM_F_ACK)}
command, flags = commands.get(command, command)
msg = ifstatsmsg()
msg['filter_mask'] = kwarg.get('filter_mask', 31)
msg['ifindex'] = kwarg.get('ifindex', 0)
ret = self.nlm_request(msg,
msg_type=command,
msg_flags=flags)
if match is not None:
ret = self._match(match, ret)
if not (command == RTM_GETSTATS and self.nlm_generator):
ret = tuple(ret)
return ret
# 8<---------------------------------------------------------------
class IPBatch(RTNL_API, IPBatchSocket):
'''
Netlink requests compiler. Does not send any requests, but
instead stores them in the internal binary buffer. The
contents of the buffer can be used to send batch requests,
to test custom netlink parsers and so on.
Uses `RTNL_API` and provides all the same API as normal
`IPRoute` objects::
# create the batch compiler
ipb = IPBatch()
# compile requests into the internal buffer
ipb.link("add", index=550, ifname="test", kind="dummy")
ipb.link("set", index=550, state="up")
ipb.addr("add", index=550, address="10.0.0.2", mask=24)
# save the buffer
data = ipb.batch
# reset the buffer
ipb.reset()
...
# send the buffer
IPRoute().sendto(data, (0, 0))
'''
pass
class IPRoute(RTNL_API, IPRSocket):
'''
Regular ordinary utility class, see RTNL API for the list of methods.
'''
pass
class RawIPRoute(RTNL_API, RawIPRSocket):
'''
The same as `IPRoute`, but does not use the netlink proxy.
Thus it can not manage e.g. tun/tap interfaces.
'''
pass
|
the-stack_0_14912 | import numpy as np
import time
from rllab.misc import logger
def rollout(env, policy, path_length, render=False, speedup=None):
Da = env.action_space.flat_dim
Do = env.observation_space.flat_dim
observation = env.reset()
policy.reset()
observations = np.zeros((path_length + 1, Do))
actions = np.zeros((path_length, Da))
terminals = np.zeros((path_length, ))
rewards = np.zeros((path_length, ))
agent_infos = []
env_infos = []
t = 0
for t in range(path_length):
action, agent_info = policy.get_action(observation)
next_obs, reward, terminal, env_info = env.step(action)
agent_infos.append(agent_info)
env_infos.append(env_info)
actions[t] = action
terminals[t] = terminal
rewards[t] = reward
observations[t] = observation
observation = next_obs
if render:
env.render()
time_step = 0.05
time.sleep(time_step / speedup)
if terminal:
break
observations[t + 1] = observation
path = {
'observations': observations[:t + 1],
'actions': actions[:t + 1],
'rewards': rewards[:t + 1],
'terminals': terminals[:t + 1],
'next_observations': observations[1:t + 2],
'agent_infos': agent_infos,
'env_infos': env_infos
}
return path
def rollouts(env, policy, path_length, n_paths):
paths = list()
for i in range(n_paths):
paths.append(rollout(env, policy, path_length))
return paths
class Sampler(object):
def __init__(self, max_path_length, min_pool_size, batch_size):
self._max_path_length = max_path_length
self._min_pool_size = min_pool_size
self._batch_size = batch_size
self.env = None
self.policy = None
self.pool = None
def initialize(self, env, policy, pool):
self.env = env
self.policy = policy
self.pool = pool
def sample(self):
raise NotImplementedError
def batch_ready(self):
enough_samples = self.pool.size >= self._min_pool_size
return enough_samples
def random_batch(self):
return self.pool.random_batch(self._batch_size)
def terminate(self):
self.env.terminate()
def log_diagnostics(self):
logger.record_tabular('pool-size', self.pool.size)
class SimpleSampler(Sampler):
def __init__(self, **kwargs):
super(SimpleSampler, self).__init__(**kwargs)
self._path_length = 0
self._path_return = 0
self._last_path_return = 0
self._max_path_return = -np.inf
self._n_episodes = 0
self._current_observation = None
self._total_samples = 0
def sample(self):
if self._current_observation is None:
self._current_observation = self.env.reset()
action, _ = self.policy.get_action(self._current_observation)
next_observation, reward, terminal, info = self.env.step(action)
self._path_length += 1
self._path_return += reward
self._total_samples += 1
self.pool.add_sample(
observation=self._current_observation,
action=action,
reward=reward,
terminal=terminal,
next_observation=next_observation)
if terminal or self._path_length >= self._max_path_length:
self.policy.reset()
self._current_observation = self.env.reset()
self._path_length = 0
self._max_path_return = max(self._max_path_return,
self._path_return)
self._last_path_return = self._path_return
self._path_return = 0
self._n_episodes += 1
else:
self._current_observation = next_observation
def log_diagnostics(self):
super(SimpleSampler, self).log_diagnostics()
logger.record_tabular('max-path-return', self._max_path_return)
logger.record_tabular('last-path-return', self._last_path_return)
logger.record_tabular('episodes', self._n_episodes)
logger.record_tabular('total-samples', self._total_samples)
class DummySampler(Sampler):
def __init__(self, batch_size, max_path_length):
super(DummySampler, self).__init__(
max_path_length=max_path_length,
min_pool_size=0,
batch_size=batch_size)
def sample(self):
pass
|
the-stack_0_14913 | import os
from flask import Flask, request, abort, jsonify, json
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy import exc
from flask_cors import CORS
import random
from flask_migrate import Migrate
from models import setup_db, Movies, Actors, db
from auth.auth import AuthError, requires_auth
def create_app(test_config=None):
# create and configure the app
app = Flask(__name__)
setup_db(app)
CORS(app, resources={r'/*': {'origins': '*'}})
migrate = Migrate(app, db)
# CORS Headers
@app.after_request
def after_request(response):
response.headers.add('Access-Control-Allow-Headers', 'Content-Type\
,Authorization, true')
response.headers.add('Access-Control-Allow-Methods', 'GET,PUT,POST\
,DELETE, OPTIONS')
return response
def get_all_movies():
movies = []
all_movies = Movies.query.all()
for movie in all_movies:
movies.append(movie.format())
return movies
def get_all_actors():
actors = []
all_actors = Actors.query.all()
for actor in all_actors:
actors.append(actor.format())
return actors
# Error Handler
@app.errorhandler(401)
def bad_request(error):
"""
:error handler for error 400
:param error: Unauthorized
:return: error: HTTP status code, message: Error description
"""
return jsonify({
'success': False,
'error': 401,
'message': ' Unauthorized ' + str(error)
}), 401
@app.errorhandler(404)
def not_found(error):
return jsonify({
'success': False,
'error': 404,
'message': 'Not Found'
}), 404
@app.errorhandler(500)
def not_found(error):
return jsonify({
'success': False,
'error': 500,
'message': 'Token Expired or Internal Server error'
}), 500
@app.errorhandler(422)
def unprocessable_entity(error):
return jsonify({
'success': False,
'error': 422,
'message': 'Unprocessable Entity'
})
@app.route('/')
def get_greeting():
excited = os.environ['EXCITED']
greeting = "Hello,Your are in public Land"
if excited == 'true':
greeting = greeting + "!!!!!"
return greeting
@app.route('/actors', methods=['GET'])
@requires_auth('get:actors')
def getactors(payload):
formated_actors = []
all_actors = Actors.query.all()
for actor in all_actors:
formated_actors.append(actor.format())
return jsonify({
'actors': formated_actors,
'total_actors': len(formated_actors),
'success': True
})
@app.route('/actors', methods=['GET', 'POST'])
@requires_auth('add:actors')
def add_actors(payload):
if request.get_json().get('actor_age'):
body = request.get_json()
actor_age = body.get('actor_age')
actor_gender = body.get('actor_gender')
actor_name = body.get('actor_name')
actor = Actors(name=actor_name, age=actor_age, gender=actor_gender)
actor.insert()
actor_id = actor.id
actor_added = Actors.query.filter_by(id=actor_id).first()
return jsonify({
'success': True,
'added_actors': actor_added.format()
})
@app.route('/movies', methods=['GET'])
@requires_auth('get:movies')
def movies_get(payload):
formated_movies = []
dictionarize = {}
all_movies_with_actor_name = Movies.query.with_entities(Movies.id,
Movies.title,
Movies.
release_date,
Actors.name).join(Actors, (Movies.actor_id == Actors.id)).all()
for movies in all_movies_with_actor_name:
dictionarize['id'] = movies[0]
dictionarize['movie_name'] = movies[1]
dictionarize['release_date'] = movies[2]
dictionarize['actor_name'] = movies[3]
formated_movies.append(dict(dictionarize))
return jsonify({
'movies': formated_movies,
'total_movies': len(formated_movies),
'success': True
})
@app.route('/movies', methods=['GET', 'POST'])
@requires_auth('add:movies')
def movies(payload):
if request.get_json().get('movie_name'):
body = request.get_json()
movie_name = body.get('movie_name')
release_date = body.get('release_date')
id_actor = body.get('actor_id')
movie = Movies(title=movie_name, release_date=release_date,
actor_id=id_actor)
movie.insert()
movie_id = movie.id
movie_added = Movies.query.filter_by(id=movie_id).first()
return jsonify({
'success': True,
'added_movie': movie_added.format()
})
@app.route('/movies/<int:movie_id>', methods=['PATCH'])
@requires_auth('patch:movies')
def update_movie_by_id(payload, movie_id):
movie_by_id = Movies.query.filter_by(id=movie_id).first()
if movie_by_id is None:
abort(404)
try:
if request.get_json().get('new_movie_name') and request.get_json()\
.get('new_release_date'):
body = request.get_json()
new_title = body.get('new_movie_name')
new_release_date = body.get('new_release_date')
movie_by_id.title = new_title
movie_by_id.release_date = new_release_date
except ValueError:
try:
if request.get_json().get('new_movie_name'):
body = request.get_json()
new_title = body.get('new_movie_name')
movie_by_id.title = new_title
except ValueError:
try:
if request.get_json().get('new_release_date'):
body = request.get_json()
new_release_date = body.get('new_release_date')
movie_by_id.release_date = new_release_date
except ValueError:
abort(404)
movie_by_id.update()
all_movies = get_all_movies()
return jsonify({
'success': True,
'all_movies': all_movies
})
@app.route('/actors/<int:actor_id>', methods=['PATCH'])
@requires_auth('patch:actors')
def update_actor_by_id(payload, actor_id):
actor_by_id = Actors.query.filter_by(id=actor_id).first()
if actor_by_id is None:
abort(404)
try:
if request.get_json().get('new_actor_name') and request.get_json()\
.get('new_actor_age'):
body = request.get_json()
new_actor_name = body.get('new_actor_name')
new_actor_age = body.get('new_actor_age')
actor_by_id.name = new_actor_name
actor_by_id.age = new_actor_age
except ValueError:
try:
if request.get_json().get('new_actor_name'):
body = request.get_json()
new_actor_name = body.get('new_actor_name')
actor_by_id.name = new_actor_name
except ValueError:
try:
if request.get_json().get('new_actor_age'):
body = request.get_json()
new_actor_name = body.get('new_actor_age')
actor_by_id.age = new_actor_age
except ValueError:
abort(404)
actor_by_id.update()
all_actors = get_all_actors()
return jsonify({
'success': True,
'all_actors': all_actors
})
@app.route('/movies/<int:movie_id>', methods=['DELETE'])
@requires_auth('delete:movies')
def delete_movie_by_id(payload, movie_id):
movie_by_id = Movies.query.filter_by(id=movie_id).first()
if movie_by_id is None:
abort(404)
movie_by_id.delete()
return jsonify({
'success': True,
'deleted': movie_id
})
@app.route('/actors/<int:actor_id>', methods=['DELETE'])
@requires_auth('delete:actors')
def delete_actor_by_id(payload, actor_id):
actor_by_id = Actors.query.filter_by(id=actor_id).first()
if actor_by_id is None:
abort(404)
try:
actor_by_id.delete()
except exc.IntegrityError:
abort(404)
return jsonify({
'success': True,
'deleted': actor_id
})
return app
app = create_app()
if __name__ == '__main__':
app.run()
|
the-stack_0_14915 | import os
import pytest
from pyinsights.cli import run
CONFIG_FILEPATH_FOR_TEST = os.getenv('CONFIG_FILEPATH_FOR_TEST')
PROFILE_FOR_TEST = os.getenv('PROFILE_FOR_TEST')
REGION_FOR_TEST = os.getenv('REGION_FOR_TEST')
@pytest.mark.skipif(
CONFIG_FILEPATH_FOR_TEST is None,
reason='Use AWS Resource'
)
class TestPyInsights:
@pytest.fixture()
def kwargs(self):
return {
'profile': PROFILE_FOR_TEST,
'region': REGION_FOR_TEST,
'config': CONFIG_FILEPATH_FOR_TEST
}
def test_valid_kwargs_with_json_format(self, kwargs):
kwargs['format'] = 'json'
result = run(kwargs)
assert result is True
def test_valid_kwargs_with_table_format(self, kwargs):
kwargs['format'] = 'table'
result = run(kwargs)
assert result is True
|
the-stack_0_14916 | """EESG.py
Created by Latha Sethuraman, Katherine Dykes.
Copyright (c) NREL. All rights reserved.
Electromagnetic design based on conventional magnetic circuit laws
Structural design based on McDonald's thesis """
from openmdao.api import Group, Problem, ExplicitComponent,ExecComp,IndepVarComp,ScipyOptimizeDriver
import numpy as np
from math import pi, cos, sqrt, radians, sin, exp, log10, log, tan, atan
import sys, os
class EESG(ExplicitComponent):
""" Estimates overall mass dimensions and Efficiency of Electrically Excited Synchronous generator. """
def setup(self):
# EESG generator design inputs
#self.add_input('r_s', val=0.0, units ='m', desc='airgap radius r_s')
self.add_input('rad_ag', val=0.0, units ='m', desc='airgap radius')
self.add_input('l_s', val=0.0, units ='m', desc='Stator core length l_s')
self.add_input('h_s', val=0.0, units ='m', desc='Yoke height h_s')
self.add_input('tau_p',val=0.0, units ='m', desc='Pole pitch self.tau_p')
self.add_input('machine_rating',val=0.0, units ='W', desc='Machine rating')
self.add_input('n_nom',val=0.0, units ='rpm', desc='rated speed')
self.add_input('Torque',val=0.0, units ='N*m', desc='Rated torque ')
self.add_input('I_f',val=0.0000,units='A',desc='Excitation current')
self.add_input('N_f',val=0.0,units='A',desc='field turns')
self.add_input('h_ys',val=0.0, units ='m', desc='Yoke height')
self.add_input('h_yr',val=0.0, units ='m', desc='rotor yoke height')
# structural design variables
self.add_input('n_s' ,val=0.0, desc='number of stator arms n_s')
self.add_input('b_st' , val=0.0, units ='m', desc='arm width b_st')
self.add_input('d_s',val=0.0,units ='m', desc='arm depth d_s')
self.add_input('t_ws' ,val=0.0,units ='m', desc='arm depth thickness self.t_wr')
self.add_input('n_r' ,val=0.0, desc='number of arms n')
self.add_input('b_r' ,val=0.0,units ='m', desc='arm width b_r')
self.add_input('d_r' ,val=0.0, units ='m', desc='arm depth d_r')
self.add_input('t_wr' ,val=0.0, units ='m', desc='arm depth thickness self.t_wr')
self.add_input('R_o',val=0.0, units ='m',desc='Shaft radius')
# EESG generator design outputs
# Magnetic loading
self.add_output('B_symax' ,val=0.0, desc='Peak Stator Yoke flux density B_ymax')
self.add_output('B_tmax',val=0.0, desc='Peak Teeth flux density')
self.add_output('B_rymax',val=0.0, desc='Peak Rotor yoke flux density')
self.add_output('B_gfm',val=0.0, desc='Average air gap flux density B_g')
self.add_output('B_g' ,val=0.0, desc='Peak air gap flux density B_g')
self.add_output('B_pc',val=0.0, desc='Pole core flux density')
# Stator design
self.add_output('N_s' ,val=0.0, desc='Number of turns in the stator winding')
self.add_output('b_s',val=0.0, desc='slot width')
self.add_output('b_t',val=0.0, desc='tooth width')
self.add_output('A_Cuscalc',val=0.0, desc='Conductor cross-section mm^2')
self.add_output('S',val=0.0, desc='Stator slots')
# # Output parameters : Rotor design
self.add_output('h_p',val=0.0, desc='Pole height')
self.add_output('b_p',val=0.0, desc='Pole width')
self.add_output('p',val=0.0, desc='No of pole pairs')
self.add_output('n_brushes',val=0.0, desc='number of brushes')
self.add_output('A_Curcalc',val=0.0, desc='Rotor Conductor cross-section')
# Output parameters : Electrical performance
self.add_output('E_s',val=0.0, desc='Stator phase voltage')
self.add_output('f',val=0.0, desc='Generator output frequency')
self.add_output('I_s',val=0.0, desc='Generator output phase current')
self.add_output('R_s',val=0.0, desc='Stator resistance')
self.add_output('R_r',val=0.0, desc='Rotor resistance')
self.add_output('L_m',val=0.0, desc='Stator synchronising inductance')
self.add_output('J_s',val=0.0, desc='Stator Current density')
self.add_output('J_f',val=0.0, desc='rotor Current density')
self.add_output('A_1',val=0.0, desc='Specific current loading')
self.add_output('Load_mmf_ratio',val=0.0, desc='mmf_ratio')
# Objective functions and output
self.add_output('Mass',val=0.0, desc='Actual mass')
self.add_output('K_rad',val=0.0, desc='K_rad')
self.add_output('Losses',val=0.0, desc='Total loss')
self.add_output('gen_eff',val=0.0, desc='Generator efficiency')
# Structural performance
self.add_output('u_Ar',val=0.0, desc='Rotor radial deflection')
self.add_output('y_Ar',val=0.0, desc='Rotor axial deflection')
self.add_output('z_A_r',val=0.0, desc='Rotor circumferential deflection')
self.add_output('u_As',val=0.0, desc='Stator radial deflection')
self.add_output('y_As',val=0.0, desc='Stator axial deflection')
self.add_output('z_A_s',val=0.0, desc='Stator circumferential deflection')
self.add_output('u_all_r',val=0.0, desc='Allowable radial rotor')
self.add_output('u_all_s',val=0.0, desc='Allowable radial stator')
self.add_output('y_all',val=0.0, desc='Allowable axial')
self.add_output('z_all_s',val=0.0, desc='Allowable circum stator')
self.add_output('z_all_r',val=0.0, desc='Allowable circum rotor')
self.add_output('b_all_s',val=0.0, desc='Allowable arm')
self.add_output('b_all_r',val=0.0, desc='Allowable arm dimensions')
self.add_output('TC1',val=0.0, desc='Torque constraint')
self.add_output('TC2',val=0.0, desc='Torque constraint-rotor')
self.add_output('TC3',val=0.0, desc='Torque constraint-stator')
# Material properties
self.add_input('rho_Fes',val=0.0,units='kg*m**-3', desc='Structural Steel density ')
self.add_input('rho_Fe',val=0.0,units='kg*m**-3', desc='Magnetic Steel density ')
self.add_input('rho_Copper',val=0.0,units='kg*m**-3', desc='Copper density ')
# Mass Outputs
self.add_output('Copper', val=0.0, units='kg', desc='Copper Mass')
self.add_output('Iron', val=0.0, units='kg', desc='Electrical Steel Mass')
self.add_output('Structural_mass', val=0.0, units='kg', desc='Structural Mass')
# Other parameters
self.add_output('Power_ratio',val=0.0, desc='Power_ratio')
self.add_output('Slot_aspect_ratio',val=0.0,desc='Stator slot aspect ratio')
self.add_output('R_out',val=0.0, desc='Outer radius')
# inputs/outputs for interface with drivese
self.add_input('shaft_cm',val= np.zeros(3), units='m', desc='Main Shaft CM')
self.add_input('shaft_length',val=0.0, units='m', desc='main shaft length')
self.add_output('I',val=np.zeros(3),desc='Moments of Inertia for the component [Ixx, Iyy, Izz] around its center of mass')
self.add_output('cm', val=np.zeros(3),desc='COM [x,y,z]')
#self.declare_partials('*', '*', method='fd', form='central', step=1e-6)
def compute(self, inputs, outputs):
# Unpack outputs
rad_ag = inputs['rad_ag']
l_s = inputs['l_s']
h_s = inputs['h_s']
tau_p = inputs['tau_p']
N_f = inputs['N_f']
I_f = inputs['I_f']
h_ys = inputs['h_ys']
h_yr = inputs['h_yr']
machine_rating = inputs['machine_rating']
n_nom = inputs['n_nom']
Torque = inputs['Torque']
b_st = inputs['b_st']
d_s = inputs['d_s']
t_ws = inputs['t_ws']
n_r = inputs['n_r']
n_s = inputs['n_s']
b_r = inputs['b_r']
d_r = inputs['d_r']
t_wr = inputs['t_wr']
R_o = inputs['R_o']
rho_Fe = inputs['rho_Fe']
rho_Copper = inputs['rho_Copper']
rho_Fes = inputs['rho_Fes']
shaft_cm = inputs['shaft_cm']
shaft_length = inputs['shaft_length']
# Assign values to universal constants
g1 = 9.81 # m / s^2 acceleration due to gravity
E = 2e11 # N / m^2 young's modulus
sigma = 48.373e3 # shear stress
mu_0 = pi * 4e-7 # permeability of free space
phi = radians(90)
# Assign values to design constants
h_w = 0.005
b_so = 0.004 # Stator slot opening
m = 3 # number of phases
q1 = 2 # no of stator slots per pole per phase
b_s_tau_s = 0.45 # ratio of slot width to slot pitch
k_sfil = 0.65 # Slot fill factor (not used)
P_Fe0h = 4 # specific hysteresis losses W / kg @ 1.5 T @50 Hz
P_Fe0e = 1 # specific eddy losses W / kg @ 1.5 T @50 Hz
rho_Cu = 1.8e-8 * 1.4 # resisitivity of copper
k_fes = 0.9 # iron fill factor (not used)
y_tau_p = 1 # coil span / pole pitch fullpitch
k_fillr = 0.7 # rotor slot fill factor
k_s = 0.2 # magnetic saturation factor for iron
T = Torque
cos_phi = 0.85 # power factor
# back iron thickness for rotor and stator
t_s = h_ys
t = h_yr
# Aspect ratio
K_rad = l_s / (2 * rad_ag)
###################################################### Electromagnetic design#############################################
alpha_p = pi / 2 * .7 # (not used)
dia = 2 * rad_ag # air gap diameter
# air gap length and minimum values
g = 0.001 * dia
if(g < 0.005):
g = 0.005
r_r = rad_ag - g # rotor radius
d_se = dia + 2 * h_s + 2 * h_ys # stator outer diameter (not used)
p = np.round(pi * dia / (2 * tau_p)) # number of pole pairs
S = 2 * p*q1 * m # number of slots of stator phase winding
N_conductors = S * 2
N_s = N_conductors / 2/3 # Stator turns per phase
alpha = 180 / S/p # electrical angle (not used)
tau_s = pi * dia / S # slot pitch
h_ps = 0.1 * tau_p # height of pole shoe
b_pc = 0.4 * tau_p # width of pole core
h_pc = 0.6 * tau_p # height of pole core
h_p = 0.7 * tau_p # pole height
b_p = h_p
b_s = tau_s * b_s_tau_s # slot width
Slot_aspect_ratio = h_s / b_s
b_t = tau_s - b_s # tooth width
# Calculating carter factor and effective air gap
g_a = g
K_C1 = (tau_s + 10 * g_a) / (tau_s - b_s + 10 * g_a) # salient pole rotor
g_1 = K_C1 * g
# calculating angular frequency
om_m = 2 * pi * n_nom / 60
om_e = 60
f = n_nom * p / 60
# Slot fill factor according to air gap radius
if (2 * rad_ag>2):
K_fills = 0.65
else:
K_fills = 0.4
# Calculating Stator winding factor
k_y1 = sin(y_tau_p * pi / 2) # chording factor
k_q1 = sin(pi / 6) / q1 / sin(pi / 6 / q1) # winding zone factor
k_wd = k_y1 * k_q1
# Calculating stator winding conductor length, cross-section and resistance
shortpitch = 0
l_Cus = 2 * N_s * (2 * (tau_p - shortpitch / m/q1) + l_s) # length of winding
A_s = b_s * (h_s - h_w)
A_scalc = b_s * 1000 * (h_s - h_w) * 1000 # cross section in mm^2
A_Cus = A_s * q1 * p * K_fills / N_s
A_Cuscalc = A_scalc * q1 * p * K_fills / N_s
R_s = l_Cus * rho_Cu / A_Cus
# field winding design, conductor lenght, cross-section and resistance
N_f = np.round(N_f) # rounding the field winding turns to the nearest integer
I_srated = machine_rating / (sqrt(3) * 5000 * cos_phi)
l_pole = l_s - 0.05 + 0.120 # 50mm smaller than stator and 120mm longer to accommodate end stack
K_fe = 0.95
l_pfe = l_pole * K_fe
l_Cur = 4 * p*N_f * (l_pfe + b_pc + pi / 4 * (pi * (r_r - h_pc - h_ps) / p - b_pc))
A_Cur = k_fillr * h_pc * 0.5 / N_f * (pi * (r_r - h_pc - h_ps) / p - b_pc)
A_Curcalc = k_fillr * h_pc * 1000 * 0.5 / N_f * (pi * (r_r - h_pc - h_ps) * 1000 / p - b_pc * 1000)
Slot_Area = A_Cur * 2 * N_f / k_fillr # (not used)
R_r = rho_Cu * l_Cur / A_Cur
# field winding current density
J_f = I_f / A_Curcalc
# calculating air flux density
B_gfm = mu_0 * N_f * I_f / (g_1 * (1 + k_s)) # No load air gap flux density
B_g = B_gfm * 4*sin(0.5 * b_p * pi / tau_p) / pi # fundamental component
B_symax = tau_p * B_g / pi / h_ys # stator yoke flux density
L_fg = 2 * mu_0 * p*l_s * 4*N_f**2 * ((h_ps / (tau_p - b_p)) + (h_pc / (3 * pi * (r_r - h_pc - h_ps) / p - b_pc))) # (not used)
# calculating no-load voltage and stator current
E_s = 2 * N_s * l_s * rad_ag * k_wd * om_m * B_g / sqrt(2) # no-load voltage
I_s = (E_s - (E_s**2 - 4 * R_s * machine_rating / m)**0.5) / (2 * R_s)
# Calculating stator winding current density and specific current loading
A_1 = 6 * N_s * I_s / (pi * dia)
J_s = I_s / A_Cuscalc
# Calculating magnetic loading in other parts of the machine
delta_m = 0 # Initialising load angle
# peak flux density in pole core, rotor yoke and stator teeth
B_pc = (1 / b_pc) * ((2 * tau_p / pi) * B_g * cos(delta_m) + (2 * mu_0 * I_f * N_f * ((2 * h_ps / (tau_p - b_p)) + (h_pc / (tau_p - b_pc)))))
B_rymax = 0.5 * b_pc * B_pc / h_yr
B_tmax = (B_gfm + B_g) * tau_s * 0.5 / b_t
# Calculating leakage inductances in the stator
L_ssigmas = 2 * mu_0 * l_s * N_s**2 / p / q1 * ((h_s - h_w) / (3 * b_s) + h_w / b_so) # slot leakage inductance
L_ssigmaew = mu_0 * 1.2 * N_s**2 / p * 1.2 * (2 / 3 * tau_p + 0.01) # end winding leakage inductance
L_ssigmag = 2 * mu_0 * l_s * N_s**2 / p / q1 * (5 * (g / b_so) / (5 + 4 * (g / b_so))) # tooth tip leakage inductance
L_ssigma = (L_ssigmas + L_ssigmaew + L_ssigmag) # stator leakage inductance
# Calculating effective air gap
'''
What is the source of this function that combines 1st and 13th powers? Very suspicious...
Inputs appear to be in the range of 0.45 to 2.2, so outputs are 180 to 178000
def airGapFn(B, fact):
val = 400 * B + 7 * B**13
ans = val * fact
sys.stderr.write('aGF: B {} val {} ans {}\n'.format(B, val, ans))
return val
At_t = h_s * airGapFn(B_tmax, h_s)
At_sy = tau_p * 0.5 * airGapFn(B_symax, tau_p/2)
At_pc = (h_pc + h_ps) * airGapFn(B_pc, h_pc + h_ps)
At_ry = tau_p * 0.5 * airGapFn(B_rymax, tau_p/2)
'''
At_g = g_1 * B_gfm / mu_0
At_t = h_s * (400 * B_tmax + 7 * B_tmax**13)
At_sy = tau_p * 0.5 * (400 * B_symax + 7 * B_symax**13)
At_pc = (h_pc + h_ps) * (400 * B_pc + 7 * B_pc**13)
At_ry = tau_p * 0.5 * (400 * B_rymax + 7 * B_rymax**13)
g_eff = (At_g + At_t + At_sy + At_pc + At_ry) * g_1 / At_g
L_m = 6 * k_wd**2 * N_s**2 * mu_0 * rad_ag * l_s / pi / g_eff / p**2
B_r1 = (mu_0 * I_f * N_f * 4 * sin(0.5 * (b_p / tau_p) * pi)) / g_eff / pi # (not used)
# Calculating direct axis and quadrature axes inductances
L_dm = (b_p / tau_p +(1 / pi) * sin(pi * b_p / tau_p)) * L_m
L_qm = (b_p / tau_p -(1 / pi) * sin(pi * b_p / tau_p) + 2 / (3 * pi) * cos(b_p * pi / 2 * tau_p)) * L_m
# Calculating actual load angle
delta_m = (atan(om_e * L_qm * I_s / E_s))
L_d = L_dm + L_ssigma # (not used)
L_q = L_qm + L_ssigma # (not used)
I_sd = I_s * sin(delta_m)
I_sq = I_s * cos(delta_m)
# induced voltage
E_p = om_e * L_dm * I_sd + sqrt(E_s**2 - (om_e * L_qm * I_sq)**2) # (not used)
# M_sf = mu_0 * 8*rad_ag * l_s * k_wd * N_s * N_f * sin(0.5 * b_p / tau_p * pi) / (p * g_eff * pi)
# I_f1 = sqrt(2) * (E_p) / (om_e * M_sf)
# I_f2 = (E_p / E_s) * B_g * g_eff * pi / (4 * N_f * mu_0 * sin(pi * b_p / 2/tau_p))
# phi_max_stator = k_wd * N_s * pi * rad_ag * l_s * 2*mu_0 * N_f * I_f * 4*sin(0.5 * b_p / tau_p / pi) / (p * pi * g_eff * pi)
# M_sf = mu_0 * 8*rad_ag * l_s * k_wd * N_s * N_f * sin(0.5 * b_p / tau_p / pi) / (p * g_eff * pi)
L_tot = l_s + 2 * tau_p
# Excitation power
V_fn = 500
Power_excitation = V_fn * 2*I_f # total rated power in excitation winding
Power_ratio = Power_excitation * 100 / machine_rating
# Calculating Electromagnetically Active mass
L_tot = l_s + 2 * tau_p # (not used)
V_Cuss = m * l_Cus * A_Cus # volume of copper in stator
V_Cusr = l_Cur * A_Cur # volume of copper in rotor
V_Fest = (l_s * pi * ((rad_ag + h_s)**2 - rad_ag**2) - 2 * m*q1 * p*b_s * h_s * l_s) # volume of iron in stator tooth
V_Fesy = l_s * pi * ((rad_ag + h_s + h_ys)**2 - (rad_ag + h_s)**2) # volume of iron in stator yoke
V_Fert = 2 * p*l_pfe * (h_pc * b_pc + b_p * h_ps) # volume of iron in rotor pole
V_Fery = l_pfe * pi * ((r_r - h_ps - h_pc)**2 - (r_r - h_ps - h_pc - h_yr)**2) # volume of iron in rotor yoke
Copper = (V_Cuss + V_Cusr) * rho_Copper
M_Fest = V_Fest * rho_Fe
M_Fesy = V_Fesy * rho_Fe
M_Fert = V_Fert * rho_Fe
M_Fery = V_Fery * rho_Fe
Iron = M_Fest + M_Fesy + M_Fert + M_Fery
I_snom = machine_rating / (3 * E_s * cos_phi)
## Optional## Calculating mmf ratio
F_1no_load = 3 * 2**0.5 * N_s * k_wd * I_s / (pi * p) # (not used)
Nf_If_no_load = N_f * I_f
F_1_rated = (3 * 2**0.5 * N_s * k_wd * I_srated) / (pi * p)
Nf_If_rated = 2 * Nf_If_no_load
Load_mmf_ratio = Nf_If_rated / F_1_rated
## Calculating losses
#1. Copper losses
K_R = 1.2
P_Cuss = m * I_snom**2 * R_s * K_R
P_Cusr = I_f**2 * R_r
P_Cusnom_total = P_Cuss + P_Cusr
#2. Iron losses ( Hysteresis and Eddy currents)
P_Hyys = M_Fesy * (B_symax / 1.5)**2 * (P_Fe0h * om_e / (2 * pi * 60)) # Hysteresis losses in stator yoke
P_Ftys = M_Fesy * (B_symax / 1.5)**2 * (P_Fe0e * (om_e / (2 * pi * 60))**2) # Eddy losses in stator yoke
P_Fesynom = P_Hyys + P_Ftys
P_Hyd = M_Fest * (B_tmax / 1.5)**2 * (P_Fe0h * om_e / (2 * pi * 60)) # Hysteresis losses in stator teeth
P_Ftd = M_Fest * (B_tmax / 1.5)**2 * (P_Fe0e * (om_e / (2 * pi * 60))**2) # Eddy losses in stator teeth
P_Festnom = P_Hyd + P_Ftd
# brushes
delta_v = 1
n_brushes = (I_f * 2 / 120)
if (n_brushes<0.5):
n_brushes = 1
else:
n_brushes = np.round(n_brushes)
#3. brush losses
p_b = 2 * delta_v * (I_f)
Losses = P_Cusnom_total + P_Festnom + P_Fesynom + p_b
gen_eff = machine_rating * 100 / (Losses + machine_rating)
################################################## Structural Design ########################################################
## Structural deflection calculations
# rotor structure
q3 = B_g**2 / 2/mu_0 # normal component of Maxwell's stress
#l = l_s # l - stator core length - now using l_s everywhere
l_b = 2 * tau_p # end winding length # (not used)
l_e = l_s + 2 * 0.001 * rad_ag # equivalent core length # (not used)
a_r = (b_r * d_r) - ((b_r - 2 * t_wr) * (d_r - 2 * t_wr)) # cross-sectional area of rotor armms
A_r = l_s * t # cross-sectional area of rotor cylinder
N_r = np.round(n_r)
theta_r = pi / N_r # half angle between spokes
I_r = l_s * t**3 / 12 # second moment of area of rotor cylinder
I_arm_axi_r = ((b_r * d_r**3) - ((b_r - 2 * t_wr) * (d_r - 2 * t_wr)**3)) / 12 # second moment of area of rotor arm
I_arm_tor_r = ((d_r * b_r**3) - ((d_r - 2 * t_wr) * (b_r - 2 * t_wr)**3)) / 12 # second moment of area of rotot arm w.r.t torsion
R = r_r - h_ps - h_pc - 0.5 * h_yr
R_1 = R - h_yr * 0.5 # inner radius of rotor cylinder
k_1 = sqrt(I_r / A_r) # radius of gyration
m1 = (k_1 / R)**2
c = R / 500 # (not used)
u_all_r = R / 10000 # allowable radial deflection
b_all_r = 2 * pi * R_o / N_r # allowable circumferential arm dimension
# Calculating radial deflection of rotor structure according to Mc Donald's
Numer = R**3 * ((0.25 * (sin(theta_r) - (theta_r * cos(theta_r))) / (sin(theta_r))**2) - (0.5 / sin(theta_r)) + (0.5 / theta_r))
Pov = ((theta_r / (sin(theta_r))**2) + 1 / tan(theta_r)) * ((0.25 * R / A_r) + (0.25 * R**3 / I_r))
Qov = R**3 / (2 * I_r * theta_r * (m1 + 1))
Lov = (R_1 - R_o) / a_r
Denom = I_r * (Pov - Qov + Lov) # radial deflection % rotor
u_Ar = (q3 * R**2 / E / h_yr) * (1 + Numer / Denom)
# Calculating axial deflection of rotor structure
w_r = rho_Fes * g1 * sin(phi) * a_r * N_r
mass_st_lam = rho_Fe * 2*pi * (R + 0.5 * h_yr) * l_s * h_yr # mass of rotor yoke steel
W = g1 * sin(phi) * (mass_st_lam + (V_Cusr * rho_Copper) + M_Fert) / N_r # weight of rotor cylinder
l_ir = R # length of rotor arm beam at which rotor cylinder acts
l_iir = R_1
y_Ar = (W * l_ir**3 / 12 / E / I_arm_axi_r) + (w_r * l_iir**4 / 24 / E / I_arm_axi_r) # axial deflection
# Calculating torsional deflection of rotor structure
z_all_r = radians(0.05 * R) # allowable torsional deflection
z_A_r = (2 * pi * (R - 0.5 * h_yr) * l_s / N_r) * sigma * (l_ir - 0.5 * h_yr)**3 / 3 / E / I_arm_tor_r # circumferential deflection
# STATOR structure
A_st = l_s * t_s
a_s = (b_st * d_s) - ((b_st - 2 * t_ws) * (d_s - 2 * t_ws))
N_st = np.round(n_s)
theta_s = pi / N_st
I_st = l_s * t_s**3 / 12
I_arm_axi_s = ((b_st * d_s**3) - ((b_st - 2 * t_ws) * (d_s - 2 * t_ws)**3)) / 12 # second moment of area of stator arm
I_arm_tor_s = ((d_s * b_st**3) - ((d_s - 2 * t_ws) * (b_st - 2 * t_ws)**3)) / 12 # second moment of area of rotot arm w.r.t torsion
R_st = rad_ag + h_s + h_ys * 0.5
R_1s = R_st - h_ys * 0.5
k_2 = sqrt(I_st / A_st)
m2 = (k_2 / R_st)**2
# allowable deflections
b_all_s = 2 * pi * R_o / N_st
u_all_s = R_st / 10000
y_all = 2 * l_s / 100 # allowable axial deflection
z_all_s = radians(0.05 * R_st) # allowable torsional deflection
# Calculating radial deflection according to McDonald's
Numers = R_st**3 * ((0.25 * (sin(theta_s) - (theta_s * cos(theta_s))) / (sin(theta_s))**2) - (0.5 / sin(theta_s)) + (0.5 / theta_s))
Povs = ((theta_s / (sin(theta_s))**2) + 1 / tan(theta_s)) * ((0.25 * R_st / A_st) + (0.25 * R_st**3 / I_st))
Qovs = R_st**3 / (2 * I_st * theta_s * (m2 + 1))
Lovs = (R_1s - R_o) * 0.5 / a_s
Denoms = I_st * (Povs - Qovs + Lovs)
R_out = (R / 0.995 + h_s + h_ys)
u_As = (q3 * R_st**2 / E / t_s) * (1 + Numers / Denoms)
# Calculating axial deflection according to McDonald
l_is = R_st - R_o
l_iis = l_is
l_iiis = l_is # length of rotor arm beam at which self-weight acts
mass_st_lam_s = M_Fest + pi * l_s * rho_Fe * ((R_st + 0.5 * h_ys)**2 - (R_st - 0.5 * h_ys)**2)
W_is = g1 * sin(phi) * (rho_Fes * l_s * d_s**2 * 0.5) # weight of rotor cylinder
W_iis = g1 * sin(phi) * (V_Cuss * rho_Copper + mass_st_lam_s) / 2/N_st
w_s = rho_Fes * g1 * sin(phi) * a_s * N_st
X_comp1 = W_is * l_is**3 / 12 / E / I_arm_axi_s
X_comp2 = W_iis * l_iis**4 / 24 / E / I_arm_axi_s
X_comp3 = w_s * l_iiis**4 / 24 / E / I_arm_axi_s
y_As = X_comp1 + X_comp2 + X_comp3 # axial deflection
# Calculating torsional deflection
z_A_s = 2 * pi * (R_st + 0.5 * t_s) * l_s / (2 * N_st) * sigma * (l_is + 0.5 * t_s)**3 / 3 / E / I_arm_tor_s
# tangential stress constraints
TC1 = T / (2 * pi * sigma)
TC2 = R**2 * l_s
TC3 = R_st**2 * l_s
mass_stru_steel = 2 * (N_st * (R_1s - R_o) * a_s * rho_Fes)
# Calculating inactive mass and total mass
Structural_mass = mass_stru_steel + (N_r * (R_1 - R_o) * a_r * rho_Fes)
Mass = Copper + Iron + Structural_mass
I = np.zeros(3)
# Calculating mass moments of inertia and center of mass
I[0] = (0.50 * Mass*R_out**2)
I[1] = (0.25 * Mass*R_out**2 + Mass * l_s**2 / 12)
I[2] = I[1]
cm = np.zeros(3)
cm[0] = shaft_cm[0] + shaft_length / 2. + l_s / 2
cm[1] = shaft_cm[1]
cm[2] = shaft_cm[2]
outputs['B_symax'] = B_symax
outputs['B_tmax'] = B_tmax
outputs['B_rymax'] = B_rymax
outputs['B_gfm'] = B_gfm
outputs['B_g'] = B_g
outputs['B_pc'] = B_pc
outputs['N_s'] = N_s
outputs['b_s'] = b_s
outputs['b_t'] = b_t
outputs['A_Cuscalc'] = A_Cuscalc
outputs['A_Curcalc'] = A_Curcalc
outputs['b_p'] = b_p
outputs['h_p'] = h_p
outputs['p'] = p
outputs['E_s'] = E_s
outputs['f'] = f
outputs['I_s'] = I_s
outputs['R_s'] = R_s
outputs['L_m'] = L_m
outputs['A_1'] = A_1
outputs['J_s'] = J_s
outputs['R_r'] = R_r
outputs['Losses'] = Losses
outputs['Load_mmf_ratio'] = Load_mmf_ratio
outputs['Power_ratio'] = Power_ratio
outputs['n_brushes'] = n_brushes
outputs['J_f'] = J_f
outputs['K_rad'] = K_rad
outputs['gen_eff'] = gen_eff
outputs['S'] = S
outputs['Slot_aspect_ratio'] = Slot_aspect_ratio
outputs['Copper'] = Copper
outputs['Iron'] = Iron
outputs['u_Ar'] = u_Ar
outputs['y_Ar'] = y_Ar
outputs['z_A_r'] = z_A_r
outputs['u_As'] = u_As
outputs['y_As'] = y_As
outputs['z_A_s'] = z_A_s
outputs['u_all_r'] = u_all_r
outputs['u_all_s'] = u_all_s
outputs['y_all'] = y_all
outputs['z_all_s'] = z_all_s
outputs['z_all_r'] = z_all_r
outputs['b_all_s'] = b_all_s
outputs['b_all_r'] = b_all_r
outputs['TC1'] = TC1
outputs['TC2'] = TC2
outputs['TC3'] = TC3
outputs['R_out'] = R_out
outputs['Structural_mass'] = Structural_mass
outputs['Mass'] = Mass
outputs['cm'] = cm
outputs['I'] = I
|
the-stack_0_14917 | from jinja2 import Environment, PackageLoader
templates = {
'drawing': 'drawing.xml',
'hyperlink': 'hyperlink.xml',
'insert': 'insert.xml',
'main': 'base.xml',
'p': 'p.xml',
'pict': 'pict.xml',
'r': 'r.xml',
'sectPr': 'sectPr.xml',
'smartTag': 'smart_tag.xml',
'style': 'style.xml',
'styles': 'styles.xml',
'table': 'table.xml',
'tc': 'tc.xml',
'tr': 'tr.xml',
}
env = Environment(
loader=PackageLoader(
'docx2html.tests',
'templates',
),
)
class DocxBuilder(object):
@classmethod
def xml(self, body):
template = env.get_template(templates['main'])
return template.render(body=body)
@classmethod
def p_tag(self, text, bold=False):
if isinstance(text, str):
# Use create a single r tag based on the text and the bold
run_tag = DocxBuilder.r_tag(text, bold)
run_tags = [run_tag]
elif isinstance(text, list):
run_tags = text
else:
run_tags = [self.r_tag(None)]
template = env.get_template(templates['p'])
kwargs = {
'run_tags': run_tags,
}
return template.render(**kwargs)
@classmethod
def r_tag(self, text, is_bold=False, include_linebreak=False):
template = env.get_template(templates['r'])
kwargs = {
'include_linebreak': include_linebreak,
'text': text,
'is_bold': is_bold,
}
return template.render(**kwargs)
@classmethod
def hyperlink_tag(self, r_id, run_tags):
template = env.get_template(templates['hyperlink'])
kwargs = {
'r_id': r_id,
'run_tags': run_tags,
}
return template.render(**kwargs)
@classmethod
def insert_tag(self, run_tags):
template = env.get_template(templates['insert'])
kwargs = {
'run_tags': run_tags,
}
return template.render(**kwargs)
@classmethod
def smart_tag(self, run_tags):
template = env.get_template(templates['smartTag'])
kwargs = {
'run_tags': run_tags,
}
return template.render(**kwargs)
@classmethod
def li(self, text, ilvl, numId, bold=False):
if isinstance(text, str):
# Use create a single r tag based on the text and the bold
run_tag = DocxBuilder.r_tag(text, bold)
run_tags = [run_tag]
elif isinstance(text, list):
run_tags = []
for run_text, run_bold in text:
run_tags.append(DocxBuilder.r_tag(run_tags, run_bold))
else:
raise AssertionError('text must be a string or a list')
template = env.get_template(templates['p'])
kwargs = {
'run_tags': run_tags,
'is_list': True,
'ilvl': ilvl,
'numId': numId,
}
return template.render(**kwargs)
@classmethod
def table(self, num_rows, num_columns, text):
def _tc(cell_value):
template = env.get_template(templates['tc'])
return template.render(p_tag=cell_value)
def _tr(rows, text):
tcs = [_tc(next(text)) for _ in range(rows)]
template = env.get_template(templates['tr'])
return template.render(table_cells=tcs)
trs = [_tr(num_rows, text) for _ in range(num_rows)]
template = env.get_template(templates['table'])
return template.render(table_rows=trs)
@classmethod
def drawing(self, r_id):
template = env.get_template(templates['drawing'])
return template.render(r_id=r_id)
@classmethod
def pict(self, r_id=None):
template = env.get_template(templates['pict'])
return template.render(r_id=r_id)
@classmethod
def sectPr_tag(self, p_tag):
template = env.get_template(templates['sectPr'])
kwargs = {
'p_tag': p_tag,
}
return template.render(**kwargs)
@classmethod
def styles_xml(self, style_tags):
template = env.get_template(templates['styles'])
kwargs = {
'style_tags': style_tags,
}
return template.render(**kwargs)
@classmethod
def style(self, style_id, value):
template = env.get_template(templates['style'])
kwargs = {
'style_id': style_id,
'value': value,
}
return template.render(**kwargs)
|
the-stack_0_14919 | from discord.ext import commands
from cassiopeia import riotapi
config: dict = {}
def init(bot: commands.Bot, cfg: dict):
global config
config = cfg[__name__]
riotapi.set_region(config["api_region"])
riotapi.set_api_key(config["api_key"])
from .trivia import LoLTrivia
bot.add_cog(LoLTrivia(bot))
|
the-stack_0_14920 | from JumpScale9Portal.portal import exceptions
import re
INT = r"""(?:[+-]?(?:[0-9]+))"""
BASE10NUM = r"""(?<![0-9.+-])(?>[+-]?(?:(?:[0-9]+(?:\.[0-9]+)?)|(?:\.[0-9]+)))"""
NUMBER = r"""(?<![0-9.+-])(?>[+-]?(?:(?:[0-9]+(?:\.[0-9]+)?)|(?:\.[0-9]+)))"""
BASE16NUM = r"""(?<![0-9A-Fa-f])(?:[+-]?(?:0x)?(?:[0-9A-Fa-f]+))"""
BASE16FLOAT = r"""\b(?<![0-9A-Fa-f.])(?:[+-]?(?:0x)?(?:(?:[0-9A-Fa-f]+(?:\.[0-9A-Fa-f]*)?)|(?:\.[0-9A-Fa-f]+)))\b"""
POSINT = r"""\b(?:[1-9][0-9]*)\b"""
NONNEGINT = r"""\b(?:[0-9]+)\b"""
WORD = r"""\b\w+\b"""
NOTSPACE = r"""\S+"""
SPACE = r"""\s*"""
DATA = r""".*?"""
GREEDYDATA = r""".*"""
QUOTEDSTRING = r"""(?>(?<!\\)(?>"(?>\\.|[^\\"]+)+"|""|(?>'(?>\\.|[^\\']+)+')|''|(?>`(?>\\.|[^\\`]+)+`)|``))"""
UUID = r"""[A-Fa-f0-9]{8}-(?:[A-Fa-f0-9]{4}-){3}[A-Fa-f0-9]{12}"""
def NAME(val):
for i in r"""<>"'""":
if i in val:
raise exceptions.BadRequest('The name you entered contains invalid characters')
if len(val) < 2:
raise exceptions.BadRequest('The name cannot be shorter than two characters')
return True
def IP(val):
return sum([x.isdigit() and 0 <= int(x) <= 255 for x in val.split('.')]) == 4
def PASSWORD(val):
return 8 <= len(val) <= 60
def USERNAME(val):
m = re.match("[a-zA-Z0-9._-]+(?:@[a-zA-Z0-9._-]+)?", val)
if 2 < len(val.split('@')[0]) < 40 and m and m.end() == len(val):
return True
else:
raise exceptions.BadRequest('Usernames can only contain alphanumeric characters, dots, dashes, underscores and should be between 2 and 40 characters')
def GROUPNAME(val):
m = re.match("[a-zA-Z0-9._-]+", val)
if 2 < len(val) < 40 and m and m.end() == len(val):
return True
else:
raise exceptions.BadRequest('Groupnames can only contain alphanumeric characters, dots, dashes, underscores and should be between 2 and 40 characters')
def EMAIL(val):
atpos = val.find('@')
dotpos = val.find('.')
if atpos == -1 or dotpos == -1:
raise exceptions.BadRequest('Invalid Email Address given')
elif dotpos < atpos:
raise exceptions.BadRequest('Invalid Email Address given')
|
the-stack_0_14922 | #!/usr/bin/env python
# coding: utf-8
import logging
import os
from timeit import default_timer as timer
import emmental
import torch
from emmental.data import EmmentalDataLoader
from emmental.learner import EmmentalLearner
from emmental.model import EmmentalModel
from fonduer import Meta, init_logging
from fonduer.candidates import CandidateExtractor, MentionExtractor, MentionFigures
from fonduer.candidates.matchers import _Matcher
from fonduer.candidates.models import Mention, candidate_subclass, mention_subclass
from fonduer.parser.models import Document, Figure, Paragraph, Section, Sentence
from PIL import Image
from hack.circular_connectors.augment_policy import Augmentation
from hack.circular_connectors.config import emmental_config
from hack.circular_connectors.scheduler import DauphinScheduler
from hack.circular_connectors.task import create_task
from hack.circular_connectors.thumbnail_dataset import ThumbnailDataset
from hack.utils import parse_dataset
# Configure logging for Fonduer
logger = logging.getLogger(__name__)
TRUE = 1
FALSE = 0
torch.backends.cudnn.deterministic = True # type: ignore
torch.backends.cudnn.benchmark = False # type: ignore
def main(
conn_string,
max_docs=float("inf"),
parse=False,
first_time=False,
gpu=None,
parallel=4,
log_dir=None,
verbose=False,
):
if not log_dir:
log_dir = "logs"
if verbose:
level = logging.INFO
else:
level = logging.WARNING
dirname = os.path.dirname(os.path.abspath(__file__))
init_logging(log_dir=os.path.join(dirname, log_dir), level=level)
session = Meta.init(conn_string).Session()
# Parsing
logger.info(f"Starting parsing...")
start = timer()
docs, train_docs, dev_docs, test_docs = parse_dataset(
session, dirname, first_time=first_time, parallel=parallel, max_docs=max_docs
)
end = timer()
logger.warning(f"Parse Time (min): {((end - start) / 60.0):.1f}")
logger.info(f"# of train Documents: {len(train_docs)}")
logger.info(f"# of dev Documents: {len(dev_docs)}")
logger.info(f"# of test Documents: {len(test_docs)}")
logger.info(f"Documents: {session.query(Document).count()}")
logger.info(f"Sections: {session.query(Section).count()}")
logger.info(f"Paragraphs: {session.query(Paragraph).count()}")
logger.info(f"Sentences: {session.query(Sentence).count()}")
logger.info(f"Figures: {session.query(Figure).count()}")
start = timer()
Thumbnails = mention_subclass("Thumbnails")
thumbnails_img = MentionFigures()
class HasFigures(_Matcher):
def _f(self, m):
file_path = ""
for prefix in [
f"{dirname}/data/train/html/",
f"{dirname}/data/dev/html/",
f"{dirname}/data/test/html/",
]:
if os.path.exists(prefix + m.figure.url):
file_path = prefix + m.figure.url
if file_path == "":
return False
img = Image.open(file_path)
width, height = img.size
min_value = min(width, height)
return min_value > 50
mention_extractor = MentionExtractor(
session, [Thumbnails], [thumbnails_img], [HasFigures()], parallelism=parallel
)
if first_time:
mention_extractor.apply(docs)
logger.info("Total Mentions: {}".format(session.query(Mention).count()))
ThumbnailLabel = candidate_subclass("ThumbnailLabel", [Thumbnails])
candidate_extractor = CandidateExtractor(
session, [ThumbnailLabel], throttlers=[None], parallelism=parallel
)
if first_time:
candidate_extractor.apply(train_docs, split=0)
candidate_extractor.apply(dev_docs, split=1)
candidate_extractor.apply(test_docs, split=2)
train_cands = candidate_extractor.get_candidates(split=0)
# Sort the dev_cands, which are used for training, for deterministic behavior
dev_cands = candidate_extractor.get_candidates(split=1, sort=True)
test_cands = candidate_extractor.get_candidates(split=2)
end = timer()
logger.warning(f"Candidate Extraction Time (min): {((end - start) / 60.0):.1f}")
logger.info("Total train candidate:\t{}".format(len(train_cands[0])))
logger.info("Total dev candidate:\t{}".format(len(dev_cands[0])))
logger.info("Total test candidate:\t{}".format(len(test_cands[0])))
fin = open(f"{dirname}/data/ground_truth.txt", "r")
gt = set()
for line in fin:
gt.add("::".join(line.lower().split()))
fin.close()
# Labeling
start = timer()
def LF_gt_label(c):
doc_file_id = (
f"{c[0].context.figure.document.name.lower()}.pdf::"
f"{os.path.basename(c[0].context.figure.url.lower())}"
)
return TRUE if doc_file_id in gt else FALSE
gt_dev = [LF_gt_label(cand) for cand in dev_cands[0]]
gt_test = [LF_gt_label(cand) for cand in test_cands[0]]
end = timer()
logger.warning(f"Supervision Time (min): {((end - start) / 60.0):.1f}")
batch_size = 64
input_size = 224
K = 2
emmental.init(log_dir=Meta.log_path, config=emmental_config)
emmental.Meta.config["learner_config"]["task_scheduler_config"][
"task_scheduler"
] = DauphinScheduler(augment_k=K, enlarge=1)
train_dataset = ThumbnailDataset(
"Thumbnail",
dev_cands[0],
gt_dev,
"train",
prob_label=True,
prefix=f"{dirname}/data/dev/html/",
input_size=input_size,
transform_cls=Augmentation(2),
k=K,
)
val_dataset = ThumbnailDataset(
"Thumbnail",
dev_cands[0],
gt_dev,
"valid",
prob_label=False,
prefix=f"{dirname}/data/dev/html/",
input_size=input_size,
k=1,
)
test_dataset = ThumbnailDataset(
"Thumbnail",
test_cands[0],
gt_test,
"test",
prob_label=False,
prefix=f"{dirname}/data/test/html/",
input_size=input_size,
k=1,
)
dataloaders = []
dataloaders.append(
EmmentalDataLoader(
task_to_label_dict={"Thumbnail": "labels"},
dataset=train_dataset,
split="train",
shuffle=True,
batch_size=batch_size,
num_workers=1,
)
)
dataloaders.append(
EmmentalDataLoader(
task_to_label_dict={"Thumbnail": "labels"},
dataset=val_dataset,
split="valid",
shuffle=False,
batch_size=batch_size,
num_workers=1,
)
)
dataloaders.append(
EmmentalDataLoader(
task_to_label_dict={"Thumbnail": "labels"},
dataset=test_dataset,
split="test",
shuffle=False,
batch_size=batch_size,
num_workers=1,
)
)
model = EmmentalModel(name=f"Thumbnail")
model.add_task(
create_task("Thumbnail", n_class=2, model="resnet18", pretrained=True)
)
emmental_learner = EmmentalLearner()
emmental_learner.learn(model, dataloaders)
scores = model.score(dataloaders)
logger.warning("Model Score:")
logger.warning(f"precision: {scores['Thumbnail/Thumbnail/test/precision']:.3f}")
logger.warning(f"recall: {scores['Thumbnail/Thumbnail/test/recall']:.3f}")
logger.warning(f"f1: {scores['Thumbnail/Thumbnail/test/f1']:.3f}")
|
the-stack_0_14923 | import os
import unittest
from livestreamer import Livestreamer, PluginError, NoPluginError
from livestreamer.plugins import Plugin
from livestreamer.stream import *
class TestPluginStream(unittest.TestCase):
def setUp(self):
self.session = Livestreamer()
def assertDictHas(self, a, b):
for key, value in a.items():
self.assertEqual(b[key], value)
def _test_akamaihd(self, surl, url):
channel = self.session.resolve_url(surl)
streams = channel.get_streams()
self.assertTrue("live" in streams)
stream = streams["live"]
self.assertTrue(isinstance(stream, AkamaiHDStream))
self.assertEqual(stream.url, url)
def _test_hls(self, surl, url):
channel = self.session.resolve_url(surl)
streams = channel.get_streams()
self.assertTrue("live" in streams)
stream = streams["live"]
self.assertTrue(isinstance(stream, HLSStream))
self.assertEqual(stream.url, url)
def _test_rtmp(self, surl, url, params):
channel = self.session.resolve_url(surl)
streams = channel.get_streams()
self.assertTrue("live" in streams)
stream = streams["live"]
self.assertTrue(isinstance(stream, RTMPStream))
self.assertEqual(stream.params["rtmp"], url)
self.assertDictHas(params, stream.params)
def test_plugin(self):
self._test_rtmp("rtmp://hostname.se/stream",
"rtmp://hostname.se/stream", dict())
self._test_rtmp("rtmp://hostname.se/stream live=1 num=47",
"rtmp://hostname.se/stream", dict(live=True, num=47))
self._test_rtmp("rtmp://hostname.se/stream live=1 qarg='a \'string' noq=test",
"rtmp://hostname.se/stream", dict(live=True, qarg='a \'string', noq="test"))
self._test_hls("hls://http://hostname.se/playlist.m3u8",
"http://hostname.se/playlist.m3u8")
self._test_akamaihd("akamaihd://http://hostname.se/stream",
"http://hostname.se/stream")
if __name__ == "__main__":
unittest.main()
|
the-stack_0_14926 | # -*- coding: iso-8859-15 -*-
# =================================================================
#
# Authors: Tom Kralidis <[email protected]>
# Angelos Tzotsos <[email protected]>
#
# Copyright (c) 2015 Tom Kralidis
# Copyright (c) 2015 Angelos Tzotsos
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# =================================================================
import logging
import os
from sqlalchemy import create_engine, asc, desc, func, __version__, select
from sqlalchemy.sql import text
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import create_session
from pycsw.core import util
LOGGER = logging.getLogger(__name__)
class Repository(object):
_engines = {}
@classmethod
def create_engine(clazz, url):
'''
SQL Alchemy engines are thread-safe and simple wrappers for connection pools
https://groups.google.com/forum/#!topic/sqlalchemy/t8i3RSKZGb0
To reduce startup time we can cache the engine as a class variable in the
repository object and do database initialization once
Engines are memoized by url
'''
if url not in clazz._engines:
LOGGER.debug('creating new engine: %s', url)
engine = create_engine('%s' % url, echo=False)
# load SQLite query bindings
# This can be directly bound via events
# for sqlite < 0.7, we need to to this on a per-connection basis
if engine.name in ['sqlite', 'sqlite3'] and __version__ >= '0.7':
from sqlalchemy import event
@event.listens_for(engine, "connect")
def connect(dbapi_connection, connection_rec):
dbapi_connection.create_function(
'query_spatial', 4, util.query_spatial)
dbapi_connection.create_function(
'update_xpath', 3, util.update_xpath)
dbapi_connection.create_function('get_anytext', 1,
util.get_anytext)
dbapi_connection.create_function('get_geometry_area', 1,
util.get_geometry_area)
dbapi_connection.create_function('get_spatial_overlay_rank', 2,
util.get_spatial_overlay_rank)
clazz._engines[url] = engine
return clazz._engines[url]
''' Class to interact with underlying repository '''
def __init__(self, database, context, app_root=None, table='records', repo_filter=None):
''' Initialize repository '''
self.context = context
self.filter = repo_filter
self.fts = False
# Don't use relative paths, this is hack to get around
# most wsgi restriction...
if (app_root and database.startswith('sqlite:///') and
not database.startswith('sqlite:////')):
database = database.replace('sqlite:///',
'sqlite:///%s%s' % (app_root, os.sep))
self.engine = Repository.create_engine('%s' % database)
base = declarative_base(bind=self.engine)
LOGGER.debug('binding ORM to existing database')
self.postgis_geometry_column = None
schema, table = util.sniff_table(table)
self.dataset = type('dataset', (base,),
dict(__tablename__=table,__table_args__={'autoload': True,
'schema': schema}))
self.dbtype = self.engine.name
self.session = create_session(self.engine)
temp_dbtype = None
if self.dbtype == 'postgresql':
# check if PostgreSQL is enabled with PostGIS 1.x
try:
self.session.execute(select([func.postgis_version()]))
temp_dbtype = 'postgresql+postgis+wkt'
LOGGER.debug('PostgreSQL+PostGIS1+WKT detected')
except Exception as err:
LOGGER.debug('PostgreSQL+PostGIS1+WKT detection failed')
# check if PostgreSQL is enabled with PostGIS 2.x
try:
self.session.execute('select(postgis_version())')
temp_dbtype = 'postgresql+postgis+wkt'
LOGGER.debug('PostgreSQL+PostGIS2+WKT detected')
except Exception as err:
LOGGER.debug('PostgreSQL+PostGIS2+WKT detection failed')
# check if a native PostGIS geometry column exists
try:
result = self.session.execute("select f_geometry_column from geometry_columns where f_table_name = '%s' and f_geometry_column != 'wkt_geometry' limit 1;" % table)
row = result.fetchone()
self.postgis_geometry_column = str(row['f_geometry_column'])
temp_dbtype = 'postgresql+postgis+native'
LOGGER.debug('PostgreSQL+PostGIS+Native detected')
except Exception as err:
LOGGER.debug('PostgreSQL+PostGIS+Native not picked up: %s', str(err))
# check if a native PostgreSQL FTS GIN index exists
result = self.session.execute("select relname from pg_class where relname='fts_gin_idx'").scalar()
self.fts = bool(result)
LOGGER.debug('PostgreSQL FTS enabled: %r', self.fts)
if temp_dbtype is not None:
LOGGER.debug('%s support detected' % temp_dbtype)
self.dbtype = temp_dbtype
if self.dbtype in ['sqlite', 'sqlite3']: # load SQLite query bindings
# <= 0.6 behaviour
if not __version__ >= '0.7':
self.connection = self.engine.raw_connection()
self.connection.create_function(
'query_spatial', 4, util.query_spatial)
self.connection.create_function(
'update_xpath', 3, util.update_xpath)
self.connection.create_function('get_anytext', 1,
util.get_anytext)
self.connection.create_function('get_geometry_area', 1,
util.get_geometry_area)
self.connection.create_function('get_spatial_overlay_rank', 2,
util.get_spatial_overlay_rank)
LOGGER.debug('setting repository queryables')
# generate core queryables db and obj bindings
self.queryables = {}
for tname in self.context.model['typenames']:
for qname in self.context.model['typenames'][tname]['queryables']:
self.queryables[qname] = {}
for qkey, qvalue in \
self.context.model['typenames'][tname]['queryables'][qname].items():
self.queryables[qname][qkey] = qvalue
# flatten all queryables
# TODO smarter way of doing this
self.queryables['_all'] = {}
for qbl in self.queryables:
self.queryables['_all'].update(self.queryables[qbl])
self.queryables['_all'].update(self.context.md_core_model['mappings'])
def _create_values(self, values):
value_dict = {}
for num, value in enumerate(values):
value_dict['pvalue%d' % num] = value
return value_dict
def query_ids(self, ids):
''' Query by list of identifiers '''
column = getattr(self.dataset, \
self.context.md_core_model['mappings']['pycsw:Identifier'])
query = self.session.query(self.dataset).filter(column.in_(ids))
return self._get_repo_filter(query).all()
def query_domain(self, domain, typenames, domainquerytype='list',
count=False):
''' Query by property domain values '''
domain_value = getattr(self.dataset, domain)
if domainquerytype == 'range':
LOGGER.debug('Generating property name range values')
query = self.session.query(func.min(domain_value),
func.max(domain_value))
else:
if count:
LOGGER.debug('Generating property name frequency counts')
query = self.session.query(getattr(self.dataset, domain),
func.count(domain_value)).group_by(domain_value)
else:
query = self.session.query(domain_value).distinct()
return self._get_repo_filter(query).all()
def query_insert(self, direction='max'):
''' Query to get latest (default) or earliest update to repository '''
column = getattr(self.dataset, \
self.context.md_core_model['mappings']['pycsw:InsertDate'])
if direction == 'min':
return self._get_repo_filter(self.session.query(func.min(column))).first()[0]
# else default max
return self._get_repo_filter(self.session.query(func.max(column))).first()[0]
def query_source(self, source):
''' Query by source '''
column = getattr(self.dataset, \
self.context.md_core_model['mappings']['pycsw:Source'])
query = self.session.query(self.dataset).filter(column == source)
return self._get_repo_filter(query).all()
def query(self, constraint, sortby=None, typenames=None,
maxrecords=10, startposition=0):
''' Query records from underlying repository '''
# run the raw query and get total
if 'where' in constraint: # GetRecords with constraint
LOGGER.debug('constraint detected')
query = self.session.query(self.dataset).filter(
text(constraint['where'])).params(self._create_values(constraint['values']))
else: # GetRecords sans constraint
LOGGER.debug('No constraint detected')
query = self.session.query(self.dataset)
total = self._get_repo_filter(query).count()
if util.ranking_pass: #apply spatial ranking
#TODO: Check here for dbtype so to extract wkt from postgis native to wkt
LOGGER.debug('spatial ranking detected')
LOGGER.debug('Target WKT: %s', getattr(self.dataset, self.context.md_core_model['mappings']['pycsw:BoundingBox']))
LOGGER.debug('Query WKT: %s', util.ranking_query_geometry)
query = query.order_by(func.get_spatial_overlay_rank(getattr(self.dataset, self.context.md_core_model['mappings']['pycsw:BoundingBox']), util.ranking_query_geometry).desc())
#trying to make this wsgi safe
util.ranking_pass = False
util.ranking_query_geometry = ''
if sortby is not None: # apply sorting
LOGGER.debug('sorting detected')
#TODO: Check here for dbtype so to extract wkt from postgis native to wkt
sortby_column = getattr(self.dataset, sortby['propertyname'])
if sortby['order'] == 'DESC': # descending sort
if 'spatial' in sortby and sortby['spatial']: # spatial sort
query = query.order_by(func.get_geometry_area(sortby_column).desc())
else: # aspatial sort
query = query.order_by(sortby_column.desc())
else: # ascending sort
if 'spatial' in sortby and sortby['spatial']: # spatial sort
query = query.order_by(func.get_geometry_area(sortby_column))
else: # aspatial sort
query = query.order_by(sortby_column)
# always apply limit and offset
return [str(total), self._get_repo_filter(query).limit(
maxrecords).offset(startposition).all()]
def insert(self, record, source, insert_date):
''' Insert a record into the repository '''
try:
self.session.begin()
self.session.add(record)
self.session.commit()
except Exception as err:
self.session.rollback()
raise RuntimeError('ERROR: %s' % str(err.orig))
def update(self, record=None, recprops=None, constraint=None):
''' Update a record in the repository based on identifier '''
if record is not None:
identifier = getattr(record,
self.context.md_core_model['mappings']['pycsw:Identifier'])
xml = getattr(self.dataset,
self.context.md_core_model['mappings']['pycsw:XML'])
anytext = getattr(self.dataset,
self.context.md_core_model['mappings']['pycsw:AnyText'])
if recprops is None and constraint is None: # full update
LOGGER.debug('full update')
update_dict = dict([(getattr(self.dataset, key),
getattr(record, key)) \
for key in record.__dict__.keys() if key != '_sa_instance_state'])
try:
self.session.begin()
self._get_repo_filter(self.session.query(self.dataset)).filter_by(
identifier=identifier).update(update_dict, synchronize_session='fetch')
self.session.commit()
except Exception as err:
self.session.rollback()
raise RuntimeError('ERROR: %s' % str(err.orig))
else: # update based on record properties
LOGGER.debug('property based update')
try:
rows = rows2 = 0
self.session.begin()
for rpu in recprops:
# update queryable column and XML document via XPath
if 'xpath' not in rpu['rp']:
self.session.rollback()
raise RuntimeError('XPath not found for property %s' % rpu['rp']['name'])
if 'dbcol' not in rpu['rp']:
self.session.rollback()
raise RuntimeError('property not found for XPath %s' % rpu['rp']['name'])
rows += self._get_repo_filter(self.session.query(self.dataset)).filter(
text(constraint['where'])).params(self._create_values(constraint['values'])).update({
getattr(self.dataset,
rpu['rp']['dbcol']): rpu['value'],
'xml': func.update_xpath(str(self.context.namespaces),
getattr(self.dataset,
self.context.md_core_model['mappings']['pycsw:XML']),
str(rpu)),
}, synchronize_session='fetch')
# then update anytext tokens
rows2 += self._get_repo_filter(self.session.query(self.dataset)).filter(
text(constraint['where'])).params(self._create_values(constraint['values'])).update({
'anytext': func.get_anytext(getattr(
self.dataset, self.context.md_core_model['mappings']['pycsw:XML']))
}, synchronize_session='fetch')
self.session.commit()
return rows
except Exception as err:
self.session.rollback()
raise RuntimeError('ERROR: %s' % str(err.orig))
def delete(self, constraint):
''' Delete a record from the repository '''
try:
self.session.begin()
rows = self._get_repo_filter(self.session.query(self.dataset)).filter(
text(constraint['where'])).params(self._create_values(constraint['values']))
parentids = []
for row in rows: # get ids
parentids.append(getattr(row,
self.context.md_core_model['mappings']['pycsw:Identifier']))
rows=rows.delete(synchronize_session='fetch')
if rows > 0:
LOGGER.debug('Deleting all child records')
# delete any child records which had this record as a parent
rows += self._get_repo_filter(self.session.query(self.dataset)).filter(
getattr(self.dataset,
self.context.md_core_model['mappings']['pycsw:ParentIdentifier']).in_(parentids)).delete(
synchronize_session='fetch')
self.session.commit()
except Exception as err:
self.session.rollback()
raise RuntimeError('ERROR: %s' % str(err.orig))
return rows
def _get_repo_filter(self, query):
''' Apply repository wide side filter / mask query '''
if self.filter is not None:
return query.filter(self.filter)
return query
|
the-stack_0_14927 | # model settings
norm_cfg = dict(type='SyncBN', requires_grad=True)
model = dict(
type='CascadeEncoderDecoder',
num_stages=2,
pretrained='open-mmlab://resnet50_v1c',
backbone=dict(
type='ResNetV1c',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
dilations=(1, 1, 1, 1),
strides=(1, 2, 2, 2),
norm_cfg=norm_cfg,
norm_eval=False,
style='pytorch',
contract_dilation=True),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
num_outs=4),
decode_head=[
dict(
type='FPNHead',
in_channels=[256, 256, 256, 256],
in_index=[0, 1, 2, 3],
feature_strides=[4, 8, 16, 32],
channels=128,
dropout_ratio=-1,
num_classes=19,
norm_cfg=norm_cfg,
align_corners=False,
loss_decode=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
dict(
type='PointHead',
in_channels=[256],
in_index=[0],
channels=256,
num_fcs=3,
coarse_pred_each_layer=True,
dropout_ratio=-1,
num_classes=19,
align_corners=False,
loss_decode=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0))
])
# model training and testing settings
train_cfg = dict(
num_points=2048, oversample_ratio=3, importance_sample_ratio=0.75)
test_cfg = dict(
mode='whole',
subdivision_steps=2,
subdivision_num_points=8196,
scale_factor=2)
|
the-stack_0_14930 | #!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
from ax.exceptions.model import ModelError
from ax.models.discrete.thompson import ThompsonSampler
from ax.utils.common.testutils import TestCase
class ThompsonSamplerTest(TestCase):
def setUp(self):
self.Xs = [[[1, 1], [2, 2], [3, 3], [4, 4]]] # 4 arms, each of dimensionality 2
self.Ys = [[1, 2, 3, 4]]
self.Yvars = [[1, 1, 1, 1]]
self.parameter_values = [[1, 2, 3, 4], [1, 2, 3, 4]]
self.outcome_names = ["x", "y"] # not used for regular TS
self.multiple_metrics_Xs = [
[[1, 1], [2, 2], [3, 3], [4, 4]],
[[1, 1], [2, 2], [3, 3], [4, 4]],
] # 2 metrics, 4 arms, each of dimensionality 2
self.multiple_metrics_Ys = [[1, 2, 3, 4], [0, 0, 0, 1]]
self.multiple_metrics_Yvars = [[1, 1, 1, 1], [1, 1, 1, 1]]
def testThompsonSampler(self):
generator = ThompsonSampler(min_weight=0.0)
generator.fit(
Xs=self.Xs,
Ys=self.Ys,
Yvars=self.Yvars,
parameter_values=self.parameter_values,
outcome_names=self.outcome_names,
)
arms, weights, gen_metadata = generator.gen(
n=3, parameter_values=self.parameter_values, objective_weights=np.ones(1)
)
self.assertEqual(arms, [[4, 4], [3, 3], [2, 2]])
for weight, expected_weight in zip(
weights, [3 * i for i in [0.725, 0.225, 0.05]]
):
self.assertAlmostEqual(weight, expected_weight, 1)
self.assertEqual(len(gen_metadata["arms_to_weights"]), 4)
def testThompsonSamplerValidation(self):
generator = ThompsonSampler(min_weight=0.01)
# all Xs are not the same
with self.assertRaises(ValueError):
generator.fit(
Xs=[[[1, 1], [2, 2], [3, 3], [4, 4]], [[1, 1], [2, 2], [4, 4]]],
Ys=self.Ys,
Yvars=self.Yvars,
parameter_values=self.parameter_values,
outcome_names=self.outcome_names,
)
# multiple observations per parameterization
with self.assertRaises(ValueError):
generator.fit(
Xs=[[[1, 1], [2, 2], [2, 2]]],
Ys=self.Ys,
Yvars=self.Yvars,
parameter_values=self.parameter_values,
outcome_names=self.outcome_names,
)
# these are not the same observations, so should not error
generator.fit(
Xs=[[[1, 1], [2.0, 2], [2, 2]]],
Ys=self.Ys,
Yvars=self.Yvars,
parameter_values=self.parameter_values,
outcome_names=self.outcome_names,
)
# requires objective weights
with self.assertRaises(ValueError):
generator.gen(5, self.parameter_values, objective_weights=None)
def testThompsonSamplerMinWeight(self):
generator = ThompsonSampler(min_weight=0.01)
generator.fit(
Xs=self.Xs,
Ys=self.Ys,
Yvars=self.Yvars,
parameter_values=self.parameter_values,
outcome_names=self.outcome_names,
)
arms, weights, _ = generator.gen(
n=5, parameter_values=self.parameter_values, objective_weights=np.ones(1)
)
self.assertEqual(arms, [[4, 4], [3, 3], [2, 2]])
for weight, expected_weight in zip(
weights, [3 * i for i in [0.725, 0.225, 0.05]]
):
self.assertAlmostEqual(weight, expected_weight, 1)
def testThompsonSamplerUniformWeights(self):
generator = ThompsonSampler(min_weight=0.0, uniform_weights=True)
generator.fit(
Xs=self.Xs,
Ys=self.Ys,
Yvars=self.Yvars,
parameter_values=self.parameter_values,
outcome_names=self.outcome_names,
)
arms, weights, _ = generator.gen(
n=3, parameter_values=self.parameter_values, objective_weights=np.ones(1)
)
self.assertEqual(arms, [[4, 4], [3, 3], [2, 2]])
for weight, expected_weight in zip(weights, [1.0, 1.0, 1.0]):
self.assertAlmostEqual(weight, expected_weight, 1)
def testThompsonSamplerInfeasible(self):
generator = ThompsonSampler(min_weight=0.9)
generator.fit(
Xs=self.Xs,
Ys=self.Ys,
Yvars=self.Yvars,
parameter_values=self.parameter_values,
outcome_names=self.outcome_names,
)
with self.assertRaises(ModelError):
generator.gen(
n=3,
parameter_values=self.parameter_values,
objective_weights=np.ones(1),
)
def testThompsonSamplerOutcomeConstraints(self):
generator = ThompsonSampler(min_weight=0.0)
generator.fit(
Xs=self.multiple_metrics_Xs,
Ys=self.multiple_metrics_Ys,
Yvars=self.multiple_metrics_Yvars,
parameter_values=self.parameter_values,
outcome_names=self.outcome_names,
)
arms, weights, _ = generator.gen(
n=4,
parameter_values=self.parameter_values,
objective_weights=np.array([1, 0]),
outcome_constraints=(
# pass in multiples of the same constraint
# to ensure that shapes are correct for multiple constraints
np.array([[0, 1], [0, 1], [0, 1]]),
np.array([[1], [1], [1]]),
),
)
self.assertEqual(arms, [[3, 3], [4, 4], [2, 2], [1, 1]])
for weight, expected_weight in zip(
weights, [4 * i for i in [0.4, 0.4, 0.15, 0.05]]
):
self.assertAlmostEqual(weight, expected_weight, delta=0.15)
def testThompsonSamplerOutcomeConstraintsInfeasible(self):
generator = ThompsonSampler(min_weight=0.0)
generator.fit(
Xs=self.multiple_metrics_Xs,
Ys=self.multiple_metrics_Ys,
Yvars=self.multiple_metrics_Yvars,
parameter_values=self.parameter_values,
outcome_names=self.outcome_names,
)
with self.assertRaises(ModelError):
generator.gen(
n=3,
parameter_values=self.parameter_values,
objective_weights=np.ones(2),
outcome_constraints=(np.array([[0, 1]]), np.array([[-10]])),
)
def testThompsonSamplerPredict(self):
generator = ThompsonSampler(min_weight=0.0)
generator.fit(
Xs=self.Xs,
Ys=self.Ys,
Yvars=self.Yvars,
parameter_values=self.parameter_values,
outcome_names=self.outcome_names,
)
f, cov = generator.predict([[1, 1], [3, 3]])
self.assertTrue(np.array_equal(f, np.array([[1], [3]])))
self.assertTrue(np.array_equal(cov, np.ones((2, 1, 1))))
with self.assertRaises(ValueError):
generator.predict([[1, 2]])
|
the-stack_0_14931 | """Basic tests of the Misty VM
SCL <[email protected]>
Copyright (c) 2020 rerobots, Inc.
"""
import pytest
import mistygrind
@pytest.fixture
def client(loop, aiohttp_client):
yield loop.run_until_complete(
aiohttp_client(mistygrind.vm.create_vm())
)
async def test_api_battery(client):
resp = await client.get('/api/battery')
assert resp.status == 200
payload = await resp.json()
assert payload['status'] == 'Success'
assert 'chargePercent' in payload['result'] and 'isCharging' in payload['result']
|
the-stack_0_14932 | #! /usr/bin/env python
import sys
from aubio import source, sink, pvoc, tss
if __name__ == '__main__':
if len(sys.argv) < 2:
print('usage: %s <inputfile> <outputfile_transient> <outputfile_steady>' % sys.argv[0])
sys.exit(1)
samplerate = 44100
win_s = 1024 # fft size
hop_s = win_s // 8 # block size
f = source(sys.argv[1], samplerate, hop_s)
g = sink(sys.argv[2], samplerate)
h = sink(sys.argv[3], samplerate)
pva = pvoc(win_s, hop_s) # a phase vocoder
pvb = pvoc(win_s, hop_s) # another phase vocoder
t = tss(win_s, hop_s) # transient steady state separation
t.set_threshold(0.01)
t.set_alpha(3.)
t.set_beta(4.)
read = hop_s
while read:
samples, read = f() # read file
spec = pva(samples) # compute spectrum
trans_spec, stead_spec = t(spec) # transient steady-state separation
transients = pva.rdo(trans_spec) # overlap-add synthesis of transients
steadstate = pvb.rdo(stead_spec) # overlap-add synthesis of steady states
g(transients, read) # write transients to output
h(steadstate, read) # write steady states to output
del f, g, h # finish writing the files now
sys.exit(0)
from demo_spectrogram import get_spectrogram
from pylab import subplot, show
subplot(311)
get_spectrogram(sys.argv[1])
subplot(312)
get_spectrogram(sys.argv[2])
subplot(313)
get_spectrogram(sys.argv[3])
show()
|
the-stack_0_14933 | import re
from urllib.parse import urlencode
import collections
from directory_api_client.client import api_client
from directory_constants import choices
import directory_components.helpers
from ipware import get_client_ip
from django.http import Http404
from django.utils import translation
from django.urls import reverse
from django.utils.html import escape, mark_safe
from core import constants
from core.constants import HeaderConfig
INDUSTRY_CHOICES = dict(choices.INDUSTRIES)
def unslugify(slug):
return slug.replace('-', ' ').capitalize()
def get_language_from_querystring(request):
language_codes = translation.trans_real.get_languages()
language_code = request.GET.get('language') or request.GET.get('lang')
if language_code and language_code in language_codes:
return language_code
NotifySettings = collections.namedtuple(
'NotifySettings',
[
'company_template',
'support_template',
'investor_template',
'support_email_address',
]
)
def get_ga_data_for_page(page_type):
return constants.GA_DATA_MAPPING[page_type]
def get_paginator_url(filters, url_name):
url = reverse(url_name)
querystring = urlencode({
key: value
for key, value in filters.lists()
if value and key != 'page'
}, doseq=True)
return f'{url}?{querystring}'
class SectorFilter:
def __init__(self, sectors):
self.sectors = sectors
def matches(self, opportunity):
return any(
sector['related_sector'].get('heading') in self.sectors
for sector in opportunity.get('related_sectors', [])
if sector['related_sector'] and sector['related_sector']['heading']
)
Scale = collections.namedtuple("Scale", "title min max")
class ScaleFilter:
scales_with_values = [
Scale(title='< £100m', min=1, max=99),
Scale(title='£100m - £499m', min=100, max=499),
Scale(title='£500m - £999m', min=500, max=999),
Scale(title='> £1bn', min=1000, max='None'),
Scale(title='Value unknown', min=0, max=0)
]
def __init__(self, scale_strings):
self.selected_scales = [
scaleFilter for scaleFilter in self.scales_with_values
if scaleFilter.title in scale_strings
]
def matches(self, opportunity):
for scale_chosen in self.selected_scales:
if scale_chosen.min == 0 and scale_chosen.max == 0:
if not opportunity['scale_value']:
return True
elif float(opportunity['scale_value']) == 0.00:
return True
elif scale_chosen.max == 'None':
if scale_chosen.min <= float(opportunity['scale_value']):
return True
elif scale_chosen.max:
if scale_chosen.min <= float(opportunity['scale_value']) <= scale_chosen.max: # NOQA
return True
class MultipleRegionsFilter:
def __init__(self, regions):
self.regions = regions
def matches(self, opportunity):
for related_region in opportunity.get('related_regions', []):
if related_region['title'] and related_region['title'] in self.regions:
return True
class SubSectorFilter:
def __init__(self, sub_sectors):
self.sub_sectors = sub_sectors
def matches(self, opportunity):
if 'sub_sectors' in opportunity and opportunity['sub_sectors']:
for sub_sector in opportunity['sub_sectors']:
if sub_sector in self.sub_sectors:
return True
return False
class InvestmentTypeFilter:
def __init__(self, investment_types):
self.investment_types = investment_types
def matches(self, opportunity):
if 'investment_type' in opportunity and opportunity['investment_type']:
if opportunity['investment_type'] in self.investment_types:
return True
return False
class PlanningStatusFilter:
def __init__(self, planning_statuses):
self.planning_statuses = planning_statuses
def matches(self, opportunity):
if 'planning_status' in opportunity and opportunity['planning_status']:
if opportunity['planning_status'] in self.planning_statuses:
return True
return False
def filter_opportunities(opportunities, filter_chosen):
return [opp for opp in opportunities if filter_chosen.matches(opp)]
Sort_by = collections.namedtuple("Sort_by", "title value reverse")
class SortFilter:
sort_by_with_values = [
Sort_by(title='Opportunity name: A to Z', value='title', reverse=False),
Sort_by(title='Opportunity name: Z to A', value='title', reverse=True),
Sort_by(
title='Scale: Low to High', value='scale_value', reverse=False
),
Sort_by(title='Scale: High to Low', value='scale_value', reverse=True)
]
def __init__(self, sort_by_filter_chosen):
self.sort_by_filter_chosen = next(
(sort_by for sort_by
in self.sort_by_with_values
if sort_by.title == sort_by_filter_chosen),
self.sort_by_with_values[0])
def sort_opportunities(opportunities, sort_by_chosen):
sort_filter = sort_by_chosen.sort_by_filter_chosen
if sort_filter.value == 'title':
opportunities.sort(
key=lambda x: x['title'],
reverse=sort_filter.reverse
)
if sort_filter.value == 'scale_value':
opportunities.sort(
key=lambda x: float(x['scale_value']),
reverse=sort_filter.reverse
)
return opportunities
class CompanyParser(directory_components.helpers.CompanyParser):
def serialize_for_template(self):
if not self.data:
return {}
return {
**self.data,
'date_of_creation': self.date_of_creation,
'address': self.address,
'sectors': self.sectors_label,
'keywords': self.keywords,
'employees': self.employees_label,
'expertise_industries': self.expertise_industries_label,
'expertise_regions': self.expertise_regions_label,
'expertise_countries': self.expertise_countries_label,
'expertise_languages': self.expertise_languages_label,
'has_expertise': self.has_expertise,
'expertise_products_services': (
self.expertise_products_services_label
),
'is_in_companies_house': self.is_in_companies_house,
}
def get_results_from_search_response(response):
parsed = response.json()
formatted_results = []
for result in parsed['hits']['hits']:
parser = CompanyParser(result['_source'])
formatted = parser.serialize_for_template()
if 'highlight' in result:
highlighted = '...'.join(
result['highlight'].get('description', '') or
result['highlight'].get('summary', '')
)
# escape all html tags other than <em> and </em>
highlighted_escaped = (
escape(highlighted).replace('<em>', '<em>').replace('</em>', '</em>')
)
formatted['highlight'] = mark_safe(highlighted_escaped)
formatted_results.append(formatted)
parsed['results'] = formatted_results
return parsed
def get_filters_labels(filters):
sectors = dict(choices.INDUSTRIES)
languages = dict(choices.EXPERTISE_LANGUAGES)
labels = []
skip_fields = [
'q',
'page',
# Prevents duplicates labels not to be displayed in filter list
'expertise_products_services_labels'
]
for name, values in filters.items():
if name in skip_fields:
pass
elif name == 'industries':
labels += [sectors[item] for item in values if item in sectors]
elif name == 'expertise_languages':
labels += [languages[item] for item in values if item in languages]
elif name.startswith('expertise_products_services_'):
labels += values
else:
for value in values:
labels.append(value.replace('_', ' ').title())
return labels
def get_company_profile(number):
response = api_client.company.published_profile_retrieve(number=number)
if response.status_code == 404:
raise Http404(f'API returned 404 for company number {number}')
response.raise_for_status()
return response.json()
def count_data_with_field(list_of_data, field):
filtered_list = [item for item in list_of_data if item[field]]
return len(filtered_list)
def pair_sector_values_with_label(sectors_values):
if not sectors_values:
return []
return [
pair_sector_value_with_label(value) for value in sectors_values
if value in INDUSTRY_CHOICES
]
def pair_sector_value_with_label(sectors_value):
return {'value': sectors_value, 'label': get_sectors_label(sectors_value)}
def get_sectors_label(sectors_value):
if not sectors_value:
return sectors_value
return INDUSTRY_CHOICES.get(sectors_value)
def get_case_study_details_from_response(response):
parsed = response.json()
# `format_company_details` expects `supplier_case_studies` key.
parsed['company']['supplier_case_studies'] = []
parsed['sector'] = pair_sector_value_with_label(parsed['sector'])
parsed['company'] = CompanyParser(
parsed['company']
).serialize_for_template()
return parsed
def format_case_study(case_study):
case_study_url = reverse(
'find-a-supplier:case-study-details',
kwargs={'id': case_study['pk'], 'slug': case_study['slug']},
)
return {
**case_study,
'sector': pair_sector_value_with_label(case_study['sector']),
'case_study_url': case_study_url,
}
def get_case_study(case_study_id):
response = api_client.company.published_case_study_retrieve(case_study_id)
if response.status_code == 404:
raise Http404(
"API returned 404 for case study with id %s", case_study_id,
)
response.raise_for_status()
return get_case_study_details_from_response(response)
def get_map_labels_with_vertical_positions(list_of_title_words, middle_x, middle_y):
lowest_y = middle_y - ((len(list_of_title_words) - 1) / 2) * 25
labels_with_coordinates = [
{'title': list_of_title_words[i], 'x': str(middle_x), 'y': str((lowest_y + (i * 25)))}
for i in range(len(list_of_title_words))
]
return labels_with_coordinates
def get_header_config(path):
for (pattern, config) in constants.HEADER_SECTION_MAPPING.items():
compiled_pattern = re.compile(pattern)
if compiled_pattern.match(path):
return config
# If no matching URL is found, just return a default config.
return HeaderConfig(section=None, sub_section=None)
def get_header_section(path):
return get_header_config(path).section
def get_header_sub_section(path):
return get_header_config(path).sub_section
def get_sender_ip_address(request):
ip, is_routable = get_client_ip(request)
return ip or None
|
the-stack_0_14934 | import functools
import logging
def catch_exception(func):
"""
A decorator that wraps the passed in function and logs
exceptions should one occur
"""
@functools.wraps(func)
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except:
# log the exception
err = "There was an exception in "
err += func.__name__
logging.exception(err)
# re-raise the exception
raise
return wrapper
|
the-stack_0_14936 | import pygame
import os
import random
CAR = pygame.image.load("Car.png")
BACKGROUND = pygame.image.load("Road.png")
BG_CARS = [
pygame.transform.scale(pygame.image.load("cars/" + vehicle), (100, 100))
for vehicle in os.listdir("cars")
]
MAX_CARS = 5
class Game:
RANDOM_CARS_COUNT = 0
def __init__(self):
pygame.init()
self.score = 0
self.window = pygame.display.set_mode((500, 800))
pygame.display.set_caption("Racing AI")
self.clock = pygame.time.Clock()
self.execute = True
def cleanUpCars(self, bg_cars):
for c in bg_cars:
if c.y >= 800:
bg_cars.remove(c)
self.RANDOM_CARS_COUNT -= 1
return bg_cars
def createNewCars(self, bg_cars):
extra = len([car for car in bg_cars if not car.onScreen()])
while self.RANDOM_CARS_COUNT != MAX_CARS + extra:
new_car = BackgroundCars(BG_CARS[random.randint(0, 5)], self.window)
will_append = True
for cars in bg_cars:
if cars.collide(new_car) or self.RANDOM_CARS_COUNT == MAX_CARS + extra:
will_append = False
break
if will_append:
bg_cars.append(new_car)
self.RANDOM_CARS_COUNT += 1
return bg_cars
def run(self):
car = Car(250, 650, self.window)
track = Track(50, self.window)
bg_cars = []
self.createNewCars(bg_cars)
while self.execute:
keys = pygame.key.get_pressed()
self.window.fill((0, 255, 0))
for event in pygame.event.get():
if event.type == pygame.QUIT or keys[pygame.K_0]:
self.execute = False
bg_cars = self.cleanUpCars(bg_cars)
bg_cars = self.createNewCars(bg_cars)
track.draw()
self.score = track.move(self.score)
car.draw()
for i in random.sample(
list(range(self.RANDOM_CARS_COUNT)), self.RANDOM_CARS_COUNT
):
bg_cars[i].draw()
bg_cars[i].move()
if keys[pygame.K_LEFT]:
car.x -= car.vel
if keys[pygame.K_RIGHT]:
car.x += car.vel
if keys[pygame.K_UP] and car.y + car.vel >= 250:
car.y -= car.vel
if keys[pygame.K_DOWN] and car.y + car.vel + car.height <= 750:
car.y += car.vel
for cars in bg_cars:
if cars.collide(car):
self.execute = False
if car.x < 50 or car.x + car.width > 450:
self.execute = False
self.clock.tick(60)
font = pygame.font.Font('freesansbold.ttf', 32)
text = font.render(" Score: " + str(self.score) + " ", True, (255, 0, 0), (0, 0, 0))
textRect = text.get_rect()
textRect.center = (400, 50)
self.window.blit(text, textRect)
pygame.display.update()
print("Score:", self.score)
pygame.time.wait(100)
pygame.quit()
class BackgroundCars:
def __init__(self, car, window):
self.x = random.randint(50, 350)
self.y = random.randint(-400, -100)
self.vel = 5
self.width = 100
self.height = 100
self.window = window
self.car = car
def move(self):
self.y += self.vel
def draw(self):
self.window.blit(self.car, (self.x, self.y))
def collide(self, gaddi):
playerMask = gaddi.mask()
carMask = self.mask()
collision = playerMask.overlap(carMask, (self.x - gaddi.x, self.y - gaddi.y))
return bool(collision)
def mask(self):
return pygame.mask.from_surface(self.car)
def onScreen(self):
if self.y <= 650:
return True
return False
def __str__(self):
return f"y: {self.y} , onScreen: {self.onScreen()}"
class Track:
def __init__(self, x, window):
self.x = x
self.y1 = 0
self.y2 = 800
self.vel = 10
self.window = window
def move(self, score):
self.y1 += self.vel
self.y2 += self.vel
if self.y1 - 800 > 0:
self.y1 = self.y2 - 800
if self.y2 - 800 > 0:
self.y2 = self.y1 - 800
return score + 1
def draw(self):
self.window.blit(BACKGROUND, (self.x, self.y1))
self.window.blit(BACKGROUND, (self.x, self.y2))
class Car:
def __init__(self, x, y, window):
self.x = x
self.y = y
self.vel = 6
self.width = 44
self.height = 100
self.window = window
self.car = CAR
def move(self):
self.y += self.vel
def draw(self):
self.window.blit(self.car, (self.x, self.y))
def mask(self):
return pygame.mask.from_surface(self.car)
if __name__ == "__main__":
game = Game()
game.run()
|
the-stack_0_14940 | #!/usr/bin/env python3
'''
Author: Tom McLaughlin
Email: [email protected]
Description: DynamoDB tables
'''
import os
import boto3
from boto3.dynamodb.conditions import Key
from .errors import ApiAuthSvcBaseError
from . import logging
_logger = logging.get_logger(__name__)
DDB_TABLE_NAME = os.environ.get('DDB_TABLE_NAME', '')
DDB_HASH_KEY = os.environ.get('DDB_HASH_KEY', '')
DDB_RANGE_KEY = os.environ.get('DDB_RANGE_KEY', '')
class DynamoDBTableBaseError(Exception):
'''Base exception class'''
class DynamoDBTableCheckItemError(DynamoDBTableBaseError):
'''Error checking item existence in dynamoDB'''
msg = "Error checking item existence in DynamoDB"
def __init__(self, message=msg) -> None:
super().__init__(message)
class DynamoDBTableGetItemError(DynamoDBTableBaseError):
'''Error Getting item in dynamoDB'''
msg = "Unable to get item from DynamoDB"
def __init__(self, message=msg) -> None:
super().__init__(message)
class DynamoDBTablePutItemError(DynamoDBTableBaseError):
'''Error Putting item in dynamoDB'''
msg = "Unable to write item to DynamoDB"
def __init__(self, message=msg) -> None:
super().__init__(message)
class DynamoDBTableQueryItemError(DynamoDBTableBaseError):
'''Error querying item in dynamoDB'''
msg = "Unable to query item in DynamoDB"
def __init__(self, message=msg) -> None:
super().__init__(message)
class DynamoDBTable:
'''DynamoDB Table'''
def __init__(self, table_name: str = DDB_TABLE_NAME, hash_key: str = DDB_HASH_KEY, range_key: str = DDB_RANGE_KEY) -> None:
self._table_name = table_name
self._hash_key = hash_key
self._range_key = range_key
self._ddb_resoruce = boto3.resource('dynamodb')
self._ddb_table = self._ddb_resoruce.Table(self._table_name)
@property
def table_name(self) -> str:
'''DDB table name.'''
return self._table_name
@property
def hash_key(self) -> str:
'''DDB table hash key'''
return self._hash_key
@property
def range_key(self) -> str:
'''DDB table range key'''
return self._range_key
def check_item_exists(self, item_id) -> bool:
'''Check if item already exists'''
try:
resp = self._ddb_table.query(
Select='COUNT',
KeyConditionExpression=Key(self._hash_key).eq(item_id)
)
except Exception as e:
_logger.exception(e)
raise DynamoDBTableCheckItemError
return resp.get('Count') > 0
def get_item(self, item_id, range_value, consistent_read=False) -> dict:
'''Return an item'''
_logger.info(item_id)
try:
items = self._ddb_table.get_item(
Key={
self._hash_key: item_id,
self._range_key: range_value
},
ConsistentRead=consistent_read
)
except Exception as e:
_logger.exception(e)
raise DynamoDBTableGetItemError
return items.get('Items')
def put_item(self, item: dict) -> None:
'''Put item in DDB'''
try:
self._ddb_table.put_item(
Item=item
)
except Exception as e:
_logger.exception(e)
raise DynamoDBTablePutItemError
def query_by_item_id(self, item_id, start_key: dict = {}) -> list:
'''query for item'''
item_list = []
query_kwargs = {
'KeyConditionExpression': Key(self._hash_key).eq(item_id)
}
if bool(start_key):
query_kwargs['ExclusiveStartKey'] = start_key
try:
resp = self._ddb_table.query(**query_kwargs)
except Exception as e:
_logger.exception(e)
raise DynamoDBTableQueryItemError
item_list += resp.get('Items')
if bool(resp.get('LastEvaluatedKey')):
item_list += self.query_by_item_id(item_id, resp.get('LastEvaluatedKey'))
return item_list
|
the-stack_0_14941 | #!/usr/bin/env pnpython4
# -*- coding: iso-8859-15 -*-
#
# Read Fairfield SEG-D (Version 1.6) from the Sweetwater experiment.
# Write PH5
#
# Steve Azevedo, May 2014
# Modified to read SEG-D from 3C's, July 2016
#
import os
import sys
import logging
import time
import json
import re
from math import modf
import warnings
from pyproj import Proj, transform
import construct
import bcd_py
from tables import NaturalNameWarning
from ph5.core import experiment, columns, segdreader, segdreader_smartsolo
from ph5 import LOGGING_FORMAT
warnings.filterwarnings('ignore', category=NaturalNameWarning)
PROG_VERSION = "2021.159"
LOGGER = logging.getLogger(__name__)
MAX_PH5_BYTES = 1073741824 * 100. # 100 GB (1024 X 1024 X 1024 X 2)
os.environ['TZ'] = 'GMT'
time.tzset()
APPEND = 1 # Number of SEG-D events to append to make 1 ph5 event.
DAS_INFO = {}
MAP_INFO = {}
# Current raw file processing
F = None
# RE for mini files
miniPH5RE = re.compile(r".*miniPH5_(\d\d\d\d\d)\.ph5")
# -2.5V to 2.5V
mV_full_scale = 5000
# 24-bit
counts_full_scale = 2**24
def bitweight(db):
# where db = 20log(V1,V2)
return (mV_full_scale / (10.**(db/20.))) / counts_full_scale
dbs = (0, 6, 12, 18, 24, 30, 36)
LSB_MAP = {db: bitweight(db) for db in dbs}
LSB = LSB_MAP[36]
# Manufacturers codes
FAIRFIELD = 20
OTHER = 0
def read_manufacture_code(filename):
""" read byte 17 for manufacture code"""
f = open(filename, 'rb')
f.seek(16)
byte = f.read(1)
swap = True
if sys.byteorder == 'big':
swap = False
bin = construct.BitStruct("BIN",
construct.BitField(
"field", 8, swapped=swap))
bcd = bin.parse(byte)['field']
if sys.byteorder == 'little':
bcd = construct.ULInt64("xxx").build(bcd)
else:
bcd = construct.UBInt64("xxx").build(bcd)
code = bcd_py.bcd2int(bcd, 0, 2)
f.close()
return code
def get_segdreader(filename, manucode):
"""
get the segdreader from manufacture code infile
or from --manufacturers_code argument
"""
KNOWN_CODE = {20: (segdreader, 'FairField'),
61: (segdreader_smartsolo, 'SmartSolo')}
req_code_list = ["%s for %s format" % (k, KNOWN_CODE[k][1])
for k in KNOWN_CODE.keys()]
req_code_str = ("Please give flag --manufacturers_code either "
' or '.join(req_code_list))
manu = read_manufacture_code(filename)
if manu in KNOWN_CODE.keys():
reader = KNOWN_CODE[manu][0]
else:
try:
if manucode in KNOWN_CODE.keys():
reader = KNOWN_CODE[manucode][0]
else:
LOGGER.error("manufacturers_code flag {0} is not one of "
"the known codes: {1}.\n{2}".
format(manucode, KNOWN_CODE.keys(), req_code_str))
raise Exception
except IndexError:
LOGGER.error("The manufacture code recorded in file {0} is not "
"one of the known codes: {1}.\n{2}".
format(manucode, KNOWN_CODE.keys(), req_code_str))
raise Exception
return reader
#
# To hold table rows and keys
#
class Rows_Keys(object):
__slots__ = ('rows', 'keys')
def __init__(self, rows=None, keys=None):
self.rows = rows
self.keys = keys
def set(self, rows=None, keys=None):
if rows is not None:
self.rows = rows
if keys is not None:
self.keys = keys
class Index_t_Info(object):
__slots__ = ('das', 'ph5file', 'ph5path', 'startepoch', 'stopepoch')
def __init__(self, das, ph5file, ph5path, startepoch, stopepoch):
self.das = das
self.ph5file = ph5file
self.ph5path = ph5path
self.startepoch = startepoch
self.stopepoch = stopepoch
class Resp(object):
__slots__ = ('lines', 'keys', 't')
def __init__(self, t):
self.t = t
self.update()
def update(self):
self.lines, self.keys = self.t.read_responses()
def match(self, bw, gain):
for ln in self.lines:
if ln['bit_weight/value_d'] == bw and ln['gain/value_i'] == gain:
return ln['n_i']
return -1
def next_i(self):
return len(self.lines)
class Trace(object):
__slots__ = ("trace", "headers")
def __init__(self, trace, headers):
self.trace = trace
self.headers = headers
def read_infile(infile):
''' Read list of input SEG-D files from a file '''
global FILES
def fn_sort(a, b):
return cmp(os.path.basename(a), os.path.basename(b))
try:
fh = file(infile)
except Exception:
LOGGER.warning("Failed to open %s\n" % infile)
return
while True:
line = fh.readline()
if not line:
break
line = line.strip()
if not line:
continue
if line[0] == '#':
continue
FILES.append(line)
FILES.sort(fn_sort)
def get_args():
global PH5, FILES, EVERY, NUM_MINI, TSPF, UTM, FIRST_MINI, APPEND,\
MANUFACTURERS_CODE
TSPF = False
from optparse import OptionParser
class MyParser(OptionParser):
"""
Override format_epilog to allow newlines
"""
def format_epilog(self, formatter):
return self.epilog
oparser = MyParser()
oparser.usage = "Version: {0} Usage: segdtoph5 [options]".format(
PROG_VERSION)
oparser.epilog = (
"Notice:\n"
"\tData of a Das can't be stored in more than one mini file.\n\n"
"\tUpdate astropy package for the lastes leap second table used in "
"converting time from GPS to UTC in SmartSolo's:\n"
"\t\tconda update astropy\n")
oparser.add_option("-r", "--raw", dest="rawfile",
help="Fairfield SEG-D v1.6 file.", metavar="raw_file")
oparser.add_option("-f", "--file",
action="store", dest="infile", type="string",
help="File containing list of Fairfield SEG-D\
v1.6 file names.",
metavar="file_list_file")
oparser.add_option("-n", "--nickname", dest="outfile",
help="The ph5 file prefix (experiment nick name).",
metavar="output_file_prefix")
oparser.add_option("-U", "--UTM", dest="utm_zone",
help="Locations in SEG-D file are UTM, --UTM=utmzone."
" Zone number and N or S designation"
" eg 13N",
type='str', default=0,
metavar="utm_zone")
oparser.add_option("-T", "--TSPF", dest="texas_spc",
help="Locations are in texas state plane coordinates.",
action='store_true', default=False)
oparser.add_option("-M", "--num_mini",
help=("Create a given number of miniPH5 files."
" Ex: -M 38"),
metavar="num_mini", type='int', default=None)
oparser.add_option("-S", "--first_mini",
help=("The index of the first miniPH5_xxxxx.ph5 "
"file of all. Ex: -S 5"),
metavar="first_mini", type='int', default=1)
oparser.add_option("-c", "--combine", dest="combine",
help="Combine this number if SEG-D traces to one\
PH5 trace.",
metavar="combine", type='int', default=APPEND)
oparser.add_option("-E", "--allevents", action="store_true",
dest="all_events",
default=False, metavar="all_events")
oparser.add_option("--manufacturers_code", dest="manufacturers_code",
help="Manufacturers code. Defaults to 20 for Fairfield.\
Most likely will not work for SEG-D written by other\
data loggers,",
type='int', default=None)
options, args = oparser.parse_args()
if options.rawfile and options.infile:
oparser.error("argument -f/--file: not allowed with argument -r/--raw")
FILES = []
PH5 = None
EVERY = options.all_events
NUM_MINI = options.num_mini
FIRST_MINI = options.first_mini
UTM = options.utm_zone
TSPF = options.texas_spc
APPEND = options.combine
MANUFACTURERS_CODE = options.manufacturers_code
if options.infile is not None:
read_infile(options.infile)
elif options.rawfile is not None:
FILES.append(options.rawfile)
if len(FILES) == 0:
raise Exception("No input file given.\n")
# Set output file
if options.outfile is not None:
PH5 = options.outfile
else:
raise Exception("No outfile (PH5) given.\n")
setLogger()
def setLogger():
if LOGGER.handlers != []:
LOGGER.removeHandler(LOGGER.handlers[0])
# Write log to file
ch = logging.FileHandler("segd2ph5.log")
ch.setLevel(logging.INFO)
# Add formatter
formatter = logging.Formatter(LOGGING_FORMAT)
ch.setFormatter(formatter)
LOGGER.addHandler(ch)
def initializeExperiment():
global EX
EX = experiment.ExperimentGroup(nickname=PH5)
EDIT = True
EX.ph5open(EDIT)
EX.initgroup()
def openPH5(filename):
''' Open PH5 file, miniPH5_xxxxx.ph5 '''
try:
if EXREC.ph5.isopen:
if EXREC.filename != filename:
EXREC.ph5close()
else:
return EXREC
except BaseException:
pass
exrec = experiment.ExperimentGroup(nickname=filename)
exrec.ph5open(True)
exrec.initgroup()
return exrec
def update_index_t_info(starttime, samples, sps):
''' Update info that gets saved in Index_t '''
global DAS_INFO, MAP_INFO
ph5file = EXREC.filename
ph5path = '/Experiment_g/Receivers_g/' + \
EXREC.ph5_g_receivers.current_g_das._v_name
ph5map = '/Experiment_g/Maps_g/' + EXREC.ph5_g_maps.current_g_das._v_name
das = ph5path[32:]
stoptime = starttime + (float(samples) / float(sps))
di = Index_t_Info(das, ph5file, ph5path, starttime, stoptime)
dm = Index_t_Info(das, ph5file, ph5map, starttime, stoptime)
if das not in DAS_INFO:
DAS_INFO[das] = []
MAP_INFO[das] = []
DAS_INFO[das].append(di)
MAP_INFO[das].append(dm)
LOGGER.info(
"DAS: {0} File: {1} First Sample: {2} Last Sample: {3}".format(
das, ph5file, time.ctime(starttime), time.ctime(stoptime)))
def update_external_references():
''' Update external references in master.ph5 to
miniPH5 files in Receivers_t '''
global F
LOGGER.info("Updating external references...")
n = 0
for i in INDEX_T_DAS.rows:
external_file = i['external_file_name_s'][2:]
external_path = i['hdf5_path_s']
target = external_file + ':' + external_path
external_group = external_path.split('/')[3]
# Nuke old node
try:
group_node = EX.ph5.get_node(external_path)
group_node.remove()
except Exception as e:
pass
# Re-create node
try:
EX.ph5.create_external_link(
'/Experiment_g/Receivers_g', external_group, target)
n += 1
except Exception as e:
# pass
LOGGER.error("{0}\n".format(e.message))
LOGGER.info("done, {0} das nodes recreated.\n".format(n))
n = 0
for i in INDEX_T_MAP.rows:
external_file = i['external_file_name_s'][2:]
external_path = i['hdf5_path_s']
target = external_file + ':' + external_path
external_group = external_path.split('/')[3]
# Nuke old node
try:
group_node = EX.ph5.get_node(external_path)
group_node.remove()
except Exception as e:
pass
# Re-create node
try:
EX.ph5.create_external_link(
'/Experiment_g/Maps_g', external_group, target)
n += 1
except Exception as e:
# pass
LOGGER.error("{0}\n".format(e.message))
LOGGER.info("done, {0} map nodes recreated.\n".format(n))
def get_current_data_only(size_of_data, das=None):
''' Return opened file handle for data only PH5 file that will be
less than MAX_PH5_BYTES after raw data is added to it.
'''
def sstripp(s):
s = s.replace('.ph5', '')
s = s.replace('./', '')
return s
def smallest():
''' Return the name of the smallest miniPH5_xxxxx.ph5 '''
minifiles = filter(miniPH5RE.match, os.listdir('.'))
tiny = minifiles[0]
for f in minifiles:
if os.path.getsize(f) < os.path.getsize(tiny):
tiny = f
return tiny
das = str(das)
newestfile = ''
# Get the most recent data only PH5 file or match DAS serialnumber
n = 0
for index_t in INDEX_T_DAS.rows:
# This DAS already exists in a ph5 file
if index_t['serial_number_s'] == das:
newestfile = sstripp(index_t['external_file_name_s'])
return openPH5(newestfile)
# miniPH5_xxxxx.ph5 with largest xxxxx
mh = miniPH5RE.match(index_t['external_file_name_s'])
if n < int(mh.groups()[0]):
newestfile = sstripp(index_t['external_file_name_s'])
n = int(mh.groups()[0])
if not newestfile:
# This is the first file added
return openPH5('miniPH5_{0:05d}'.format(FIRST_MINI))
size_of_exrec = os.path.getsize(newestfile + '.ph5')
if NUM_MINI is not None:
fm = FIRST_MINI - 1
if (int(newestfile[8:13]) - fm) < NUM_MINI:
newestfile = "miniPH5_{0:05d}".format(int(newestfile[8:13]) + 1)
else:
small = sstripp(smallest())
return openPH5(small)
elif (size_of_data + size_of_exrec) > MAX_PH5_BYTES:
newestfile = "miniPH5_{0:05d}".format(int(newestfile[8:13]) + 1)
return openPH5(newestfile)
def getLOG():
''' Create a open a new and unique header file under Maps_g/Das_g_
/Sta_g_
/Evt_g_
/Hdr_a_
'''
current_das = EXREC.ph5_g_receivers.get_das_name()
g = EXREC.ph5_g_maps.newdas('Das_g_', current_das)
EXREC.ph5_g_maps.setcurrent(g)
try:
name = EXREC.ph5_g_maps.nextarray('Hdr_a_')
except TypeError:
return None
log_array = EXREC.ph5_g_maps.newearray(
name, description="SEG-D header entries: {0}".format(Das))
return log_array, name
def process_traces(rh, th, tr):
'''
Inputs:
rh -> reel headers
th -> first trace header
tr -> trace data
'''
def get_true_channel(SD):
if SD.manufacturer == 'FairfieldNodal':
'''
Orientation Code:
chan 1 -> N Changed to '1'
chan 2 -> E Changed to '2'
chan 3 -> Z
or
chan 1 -> Z
'''
# Find channel by mapping to streamer_cable_number
if rh.channel_set_to_streamer_cable_map[
th.trace_header.channel_set] \
== 0:
true_channel = th.trace_header.channel_set
else:
true_channel = rh.channel_set_to_streamer_cable_map[
th.trace_header.channel_set]
if SD.chan_sets_per_scan >= 3:
OM = {1: '1', 2: '2', 3: 'Z'}
elif SD.chan_sets_per_scan == 1:
OM = {1: 'Z'}
else:
OM = None
if OM is None:
orientation_code = true_channel
else:
orientation_code = OM[true_channel]
elif SD.manufacturer == 'SmartSolo':
channel_list = ['N', 'E', 'Z']
filename_parts = SD.name().split('.')
found_channel = False
true_channel = 0
orientation_code = None
for p in filename_parts:
if p in channel_list:
orientation_code = p
true_channel = channel_list.index(p) + 1
found_channel = True
break
if not found_channel:
LOGGER.warning(
"Neither E, N, nor Z can't be found in filename")
return true_channel, orientation_code
def get_raw_file_name(SD):
filename = SD.name()
if SD.manufacturer == 'SmartSolo':
channel_list = ['E', 'N', 'Z']
filename_parts = filename.split('.')
chanidx = -1
for c in channel_list:
try:
chanidx = filename_parts.index(c)
break
except ValueError:
pass
"""
Shorten filename to fit the field:
remove 'segd' at the end
remove second and decimal of second
add . in front of chan to show somethings have been removed
Ex: filename: 453005483.1.2021.03.15.16.00.00.000.E.segd
=> shorten: 453005483.1.2021.03.15.16.00..E
"""
filename_parts.remove('segd')
filename_parts[chanidx] = '.' + filename_parts[chanidx]
filename_parts.pop(chanidx - 1) # remove decimal part
filename_parts.pop(chanidx - 2) # remove second part
filename = '.'.join(filename_parts)
return os.path.basename(filename)
def process_das():
global LSB
'''
'''
p_das_t = {}
''' Das_t
receiver_table_n_i
response_table_n_i
time_table_n_i
time/
type_s
epoch_l
ascii_s
micro_seconds_i
event_number_i
channel_number_i
sample_rate_i
sample_rate_multiplier_i
sample_count_i
stream_number_i
raw_file_name_s
array_name_data_a
array_name_SOH_a
array_name_event_a
array_name_log_a
'''
# Check to see if group exists for this das, if not build it
das_g, das_t, receiver_t, time_t = EXREC.ph5_g_receivers.newdas(
str(Das))
# Build maps group (XXX)
EXREC.ph5_g_maps.newdas('Das_g_', str(Das))
if rh.general_header_blocks[0].chan_sets_per_scan == 1:
# Single channel
p_das_t['receiver_table_n_i'] = 0 # 0 -> Z
elif rh.general_header_blocks[0].chan_sets_per_scan >= 3:
# 1 (N node) -> 1 (N PH5), 2 (E Node)-> 2 (E PH5), 3 (Z Node) -> 0
# (Z PH5)
M = {1: 1, 2: 2, 3: 0}
p_das_t['receiver_table_n_i'] = M[get_true_channel(SD)[0]]
else:
p_das_t['receiver_table_n_i'] = 0 # 0 -> Z
LOGGER.warning(
"Header channel set: {0}. Check Receiver_t entries!".format(
th.trace_header.channel_set))
p_das_t['response_table_n_i'] = None
p_das_t['time_table_n_i'] = 0
p_das_t['time/type_s'] = 'BOTH'
try:
trace_epoch = th.trace_epoch
except Exception as e:
LOGGER.warning("Failed to read shot epoch: {0}.".format(e.message))
trace_epoch = 0.
f, i = modf(trace_epoch / 1000000.)
p_das_t['time/epoch_l'] = int(i)
p_das_t['time/ascii_s'] = time.ctime(p_das_t['time/epoch_l'])
p_das_t['time/micro_seconds_i'] = int(f * 1000000.)
p_das_t['event_number_i'] = th.event_number
p_das_t['channel_number_i'] = get_true_channel(SD)[0]
p_das_t['sample_rate_i'] = SD.sample_rate
p_das_t['sample_rate_i'] = SD.sample_rate
p_das_t['sample_rate_multiplier_i'] = 1
p_das_t['sample_count_i'] = len(tr)
p_das_t['stream_number_i'] = 1
p_das_t['raw_file_name_s'] = get_raw_file_name(SD)
p_das_t['array_name_data_a'] = EXREC.ph5_g_receivers.nextarray(
'Data_a_')
p_response_t = {}
'''
n_i
gain/
units_s
value_i
bit_weight/
units_s
value_d
response_file_a
'''
try:
LSB = LSB_MAP[th.preamp_gain_db]
n_i = RESP.match(LSB, th.preamp_gain_db)
except Exception as e:
n_i = 0
p_response_t['gain/units_s'] = 'dB'
try:
p_response_t['gain/value_i'] = th.preamp_gain_db
except Exception as e:
LOGGER.warning(
"Failed to read trace pre amp gain: {0}.".format(e.message))
p_response_t['gain/value_i'] = 0.
p_response_t['gain/units_s'] = 'Unknown'
p_response_t['bit_weight/units_s'] = 'mV/count'
p_response_t['bit_weight/value_d'] = LSB
if n_i < 0:
n_i = RESP.next_i()
p_response_t['n_i'] = n_i
EX.ph5_g_responses.populateResponse_t(p_response_t)
RESP.update()
p_das_t['response_table_n_i'] = n_i
EXREC.ph5_g_receivers.populateDas_t(p_das_t)
des = "Epoch: " + str(p_das_t['time/epoch_l']) + \
" Channel: " + str(p_das_t['channel_number_i'])
# Write trace data here
try:
if SD.manufacturer == 'FairfieldNodal':
# Convert to counts
tr_counts = tr / LSB
EXREC.ph5_g_receivers.newarray(
p_das_t['array_name_data_a'], tr_counts, dtype='int32',
description=des)
elif SD.manufacturer == 'SmartSolo':
# SmartSolo is recorded by mV
EXREC.ph5_g_receivers.newarray(
p_das_t['array_name_data_a'], tr, dtype='float32',
description=des)
except Exception as e:
# Failed, leave as float
LOGGER.warning(
"Could not convert trace to counts. max: {1},\
min {2}\n{0}".format(
e.message, tr.max(), tr.min()))
p_response_t['bit_weight/value_d'] = 1.
EXREC.ph5_g_receivers.newarray(
p_das_t['array_name_data_a'], tr, dtype='float32',
description=des)
update_index_t_info(p_das_t['time/epoch_l'] + (
float(p_das_t['time/micro_seconds_i']) / 1000000.),
p_das_t['sample_count_i'],
p_das_t['sample_rate_i'] / p_das_t[
'sample_rate_multiplier_i'])
def process_array():
p_array_t = {}
def seen_sta():
if line not in ARRAY_T:
return False
elif Das not in ARRAY_T[line]:
return False
elif dtime not in ARRAY_T[line][Das]:
return False
elif chan_set in ARRAY_T[line][Das][dtime]:
if not ARRAY_T[line][Das][dtime][chan_set]:
return False
else:
return True
'''
deploy_time/
type_s
epoch_l
ascii_s
micro_seconds_i
pickup_time/
type_s
epoch_l
ascii_s
micro_seconds_i
id_s
das/
manufacturer_s
model_s
serial_number_s
notes_s
sensor/
manufacturer_s
model_s
serial_number_s
notes_s
location/
coordinate_system_s
projection_s
ellipsoid_s
X/
units_s
value_d
Y/
units_s
value_d
Z/
units_s
value_d
description_s
channel_number_i
description_s
sample_rate_i
sample_rate_multiplier_i
'''
'''
Band Code:
1000 <= G < 5000
250 <= D < 1000
80 <= E < 250
10 <= S < 80
'''
if SD.sample_rate >= 1000:
band_code = 'G'
elif SD.sample_rate >= 250 and SD.sample_rate < 1000:
band_code = 'D'
elif SD.sample_rate >= 80 and SD.sample_rate < 250:
band_code = 'E'
elif SD.sample_rate >= 10 and SD.sample_rate < 80:
band_code = 'S'
else:
band_code = 'X'
'''
Instrument Code:
Changed from H to P at request from Akram
'''
instrument_code = 'P'
chan_set, orientation_code = get_true_channel(SD)
p_array_t['seed_band_code_s'] = band_code
p_array_t['seed_instrument_code_s'] = instrument_code
p_array_t['seed_orientation_code_s'] = orientation_code
p_array_t['seed_station_name_s'] = Das.split('X')[1]
p_array_t['sample_rate_i'] = SD.sample_rate
p_array_t['sample_rate_multiplier_i'] = 1
p_array_t['deploy_time/type_s'] = 'BOTH'
try:
f, i = modf(SD.deploy_epoch)
except Exception as e:
LOGGER.warning(
"Failed to read deploy epoch: {0}.".format(
e.message))
f = i = 0.
p_array_t['deploy_time/epoch_l'] = int(i)
p_array_t['deploy_time/ascii_s'] = time.ctime(int(i))
p_array_t['deploy_time/micro_seconds_i'] = int(f * 1000000.)
p_array_t['pickup_time/type_s'] = 'BOTH'
try:
f, i = modf(SD.pickup_epoch)
except Exception as e:
LOGGER.warning(
"Failed to read pickup epoch: {0}.".format(
e.message))
f = i = 0.
p_array_t['pickup_time/epoch_l'] = int(i)
p_array_t['pickup_time/ascii_s'] = time.ctime(int(i))
p_array_t['pickup_time/micro_seconds_i'] = int(f * 1000000.)
p_array_t['id_s'] = Das.split('X')[1]
# use manu_code to decide SMARTSOLO dasmodel
p_array_t['das/manufacturer_s'] = SD.manufacturer
try:
if SD.manufacturer == "SmartSolo":
p_array_t['das/model_s'] = 'SmartSolo IGU16'
elif SD.manufacturer == "FairfieldNodal":
if SD.chan_sets_per_scan >= 3:
p_array_t['das/model_s'] = "ZLAND 3C"
else:
p_array_t['das/model_s'] = 'ZLAND 1C'
except Exception as e:
LOGGER.warning(
"Failed to read channel sets per scan: {0}.".format(e.message))
p_array_t['das/model_s'] = 'zland-[13]C'
p_array_t['das/serial_number_s'] = Das
p_array_t[
'das/notes_s'] = "manufacturer and model not read from data file."
p_array_t['sensor/manufacturer_s'] = 'Geo Space'
p_array_t['sensor/model_s'] = 'GS-30CT'
p_array_t[
'sensor/notes_s'] = "manufacturer and model not read from file."
if SD.manufacturer == 'FairfieldNodal':
if TSPF:
p_array_t['location/description_s'] = (
"Converted from Texas State Plane FIPS zone 4202")
elif UTM:
p_array_t['location/description_s'] = (
"Converted from UTM Zone {0}".format(UTM))
else:
p_array_t['location/description_s'] = "Read from SEG-D as is."
else:
p_array_t['location/description_s'] = "Read from SEG-D as is."
p_array_t['location/coordinate_system_s'] = 'geographic'
p_array_t['location/projection_s'] = 'WGS84'
p_array_t['location/X/units_s'] = 'degrees'
p_array_t['location/X/value_d'] = LON
p_array_t['location/Y/units_s'] = 'degrees'
p_array_t['location/Y/value_d'] = LAT
p_array_t['location/Z/units_s'] = 'unknown'
try:
p_array_t['location/Z/value_d'] = th.ele / 10.
except Exception as e:
LOGGER.warning(
"Failed to read elevation: {0}.".format(e.message))
p_array_t['location/Z/value_d'] = 0.
p_array_t['channel_number_i'] = chan_set
try:
p_array_t['description_s'] = "DAS: {0}, Node ID: {1}".format(
Das, SD.id_number)
except Exception as e:
LOGGER.warning(
"Failed to read ID number: {0}.".format(
e.message))
try:
line = th.line_number
if line == -1:
line = 1
except Exception as e:
LOGGER.warning("Failed to read line number: {0}.".format(
e.message))
line = 0
dtime = p_array_t['deploy_time/epoch_l']
if line not in ARRAY_T:
ARRAY_T[line] = {}
if Das not in ARRAY_T[line]:
ARRAY_T[line][Das] = {}
if dtime not in ARRAY_T[line][Das]:
ARRAY_T[line][Das][dtime] = {}
if chan_set not in ARRAY_T[line][Das][dtime]:
ARRAY_T[line][Das][dtime][chan_set] = []
if not seen_sta():
ARRAY_T[line][Das][dtime][chan_set].append(p_array_t)
elif SD.manufacturer == "SmartSolo":
# need to update the row after each trace is readed
# because the pickup time will be
# updated depend on trace_epoch
ARRAY_T[line][Das][dtime][chan_set][-1] = p_array_t
def process_reel_headers():
global RH
''' Save receiver record header information in\
Maps_g/Das_g_xxxxxxx/Hdr_a_xxxx file '''
def process(hdr, header_type):
ll = [{'FileType': 'SEG-D', 'HeaderType': header_type}, hdr]
log_array.append(json.dumps(
ll, sort_keys=True, indent=4).split('\n'))
log_array, log_name = getLOG()
for i in range(len(rh.general_header_blocks)):
ht = "General {0}".format(i+1)
process(rh.general_header_blocks[i], ht)
# Channel set descriptors
for i in range(len(rh.channel_set_descriptor)):
ht = "Channel Set {0}".format(i + 1)
process(rh.channel_set_descriptor, ht)
for i in range(len(rh.extended_headers)):
ht = "Extended {0}".format(i)
process(rh.extended_headers[i], ht)
# External header
process(rh.external_header, "External Header")
# External header shot
for i in range(len(rh.external_header_shot)):
ht = "External Shot {0}".format(i + 1)
process(rh.external_header_shot[i], ht)
RH = True
def process_trace_header():
''' Save trace header information in\
Maps_g/Das_g_xxxxxxx/Hdr_a_xxxx file '''
def process(hdr, header_type):
global TRACE_JSON
ll = [{'FileType': 'SEG-D', 'HeaderType': 'trace',
'HeaderSubType': header_type}, hdr]
TRACE_JSON.append(json.dumps(
ll, sort_keys=True, indent=4).split('\n'))
process(th.trace_header, "Trace Header")
for i in range(len(th.trace_header_N)):
ht = "Header N-{0}".format(i + 1)
process(th.trace_header_N[i], ht)
process_das()
process_array()
if not RH:
process_reel_headers()
process_trace_header()
def write_arrays(SD, Array_t):
''' Write /Experiment_g/Sorts_g/Array_t_xxx '''
lines = sorted(Array_t.keys())
# Loop through arrays/lines
for line in lines:
name = "Array_t_{0:03d}".format(int(line))
a = EX.ph5_g_sorts.newArraySort(name)
das_list = sorted(Array_t[line].keys())
# Loop through das_list
for das in das_list:
if SD.manufacturer == 'SmartSolo':
Array_t[line][das] = combine_array_entries(
name, Array_t[line][das])
dtimes = sorted(Array_t[line][das].keys())
# Loop through deploying times
for dtime in dtimes:
chan_sets = sorted(Array_t[line][das][dtime].keys())
# Loop through channel sets
for chan_set in chan_sets:
try:
for array_t in Array_t[line][das][dtime][chan_set]:
columns.populate(a, array_t)
except Exception as e:
print(e.message)
def combine_array_entries(aName, aOfDas):
"""
:para aName: "Array_t_xxx" to add to warning message
:param aOfDas: {dtime: {c:[entry]}} in which each dtime is an entry
:return aOnDeployTimes which has the same structure of aOfDas but the
times are combined if gap less than 2m
"""
aOnChannels = {} # {c_i: list of entries according to dtimes' order}
dtimes = sorted(aOfDas.keys())
for dtime in dtimes:
chan_sets = sorted(aOfDas[dtime].keys())
for c in chan_sets:
if c not in aOnChannels:
aOnChannels[c] = []
for entry in aOfDas[dtime][c]:
aOnChannels[c].append(entry)
# same structure of aOfDas but the times are combined if deploy time of
# the current entry is exactly the same as the pickup time of the previous
# one: # {dtime: {c:[combined entry] } }
aOnDeployTimes = {}
for c in aOnChannels:
prevPickupTime = 0
currDeployTime = 0
dEntries = aOnChannels[c]
for d in dEntries:
deployTime = d['deploy_time/epoch_l']
if deployTime > prevPickupTime:
currDeployTime = deployTime
if deployTime not in aOnDeployTimes:
aOnDeployTimes[deployTime] = {}
if c not in aOnDeployTimes[deployTime]:
aOnDeployTimes[deployTime][c] = [d]
else:
uEntry = aOnDeployTimes[currDeployTime][c][0]
msg = "Das %s - %s - station %s - chan %s: " % (
d['das/serial_number_s'], aName,
d['id_s'], d['channel_number_i'])
msg += "Combine %s"
msg += ("entry [%s - %s] into previous entry [%s - %s]" %
(d['deploy_time/ascii_s'],
d['pickup_time/ascii_s'],
uEntry['deploy_time/ascii_s'],
uEntry['pickup_time/ascii_s']))
descr = ""
if deployTime < prevPickupTime:
descr = "overlapping "
msg %= descr
LOGGER.warning(msg)
uEntry['pickup_time/epoch_l'] = d['pickup_time/epoch_l']
uEntry['pickup_time/ascii_s'] = d['pickup_time/ascii_s']
uEntry['pickup_time/micro_seconds_i'] = d['pickup_time/'
'micro_seconds_i']
prevPickupTime = d['pickup_time/epoch_l']
return aOnDeployTimes
def writeINDEX():
''' Write /Experiment_g/Receivers_g/Index_t '''
global DAS_INFO, MAP_INFO, INDEX_T_DAS, INDEX_T_MAP
dass = sorted(DAS_INFO.keys())
for das in dass:
di = {}
mi = {}
start = sys.maxsize
stop = 0.
dm = [(d, m) for d in DAS_INFO[das] for m in MAP_INFO[das]]
for d, m in dm:
di['external_file_name_s'] = d.ph5file
mi['external_file_name_s'] = m.ph5file
di['hdf5_path_s'] = d.ph5path
mi['hdf5_path_s'] = m.ph5path
di['serial_number_s'] = das
mi['serial_number_s'] = das
if d.startepoch < start:
start = d.startepoch
if d.stopepoch > stop:
stop = d.stopepoch
di['time_stamp/epoch_l'] = int(time.time())
mi['time_stamp/epoch_l'] = int(time.time())
di['time_stamp/micro_seconds_i'] = 0
mi['time_stamp/micro_seconds_i'] = 0
di['time_stamp/type_s'] = 'BOTH'
mi['time_stamp/type_s'] = 'BOTH'
di['time_stamp/ascii_s'] = time.ctime(di['time_stamp/epoch_l'])
mi['time_stamp/ascii_s'] = time.ctime(mi['time_stamp/epoch_l'])
di['start_time/epoch_l'] = int(modf(start)[1])
mi['start_time/epoch_l'] = int(modf(start)[1])
di['start_time/micro_seconds_i'] = int(modf(start)[0] * 1000000)
mi['start_time/micro_seconds_i'] = int(modf(start)[0] * 1000000)
di['start_time/type_s'] = 'BOTH'
mi['start_time/type_s'] = 'BOTH'
di['start_time/ascii_s'] = time.ctime(start)
mi['start_time/ascii_s'] = time.ctime(start)
di['end_time/epoch_l'] = modf(stop)[1]
mi['end_time/epoch_l'] = modf(stop)[1]
di['end_time/micro_seconds_i'] = int(modf(stop)[0] * 1000000)
mi['end_time/micro_seconds_i'] = int(modf(stop)[0] * 1000000)
di['end_time/type_s'] = 'BOTH'
mi['end_time/type_s'] = 'BOTH'
di['end_time/ascii_s'] = time.ctime(stop)
mi['end_time/ascii_s'] = time.ctime(stop)
EX.ph5_g_receivers.populateIndex_t(di)
EX.ph5_g_maps.populateIndex_t(mi)
rows, keys = EX.ph5_g_receivers.read_index()
INDEX_T_DAS = Rows_Keys(rows, keys)
rows, keys = EX.ph5_g_maps.read_index()
INDEX_T_MAP = Rows_Keys(rows, keys)
DAS_INFO = {}
MAP_INFO = {}
def txncsptolatlon(northing, easting):
'''
Sweetwater
Convert texas state plane coordinates in feet to
geographic coordinates, WGS84.
'''
# Texas NC state plane feet Zone 4202
sp = Proj(init='epsg:32038')
# WGS84, geographic
wgs = Proj(init='epsg:4326', proj='latlong')
# Texas SP coordinates: survey foot is 1200/3937 meters
lon, lat = transform(sp, wgs, easting * 0.30480060960121924,
northing * 0.30480060960121924)
return lat, lon
def utmcsptolatlon(northing, easting):
'''
Mount Saint Helens
Convert UTM to
geographic coordinates, WGS84.
'''
# UTM
new_UTM = re.split(r'(\d+)', UTM)
utmzone = str(new_UTM[1])
if str(new_UTM[2]).upper() == 'N':
NS = 'north'
elif str(new_UTM[2]).upper() == 'S':
NS = 'south'
else:
NS = 'north'
utmc = Proj("+proj=utm +zone="+utmzone+" +"+NS+" +ellps=WGS84")
print
# WGS84, geographic
wgs = Proj(init='epsg:4326', proj='latlong')
#
lon, lat = transform(utmc, wgs, easting, northing)
return lat, lon
def get_latlon(manu, th):
try:
if manu == 'FairfieldNodal':
if UTM:
# UTM
LAT, LON = utmcsptolatlon(th.lat/10., th.lon/10.)
elif TSPF:
# Texas State Plane coordinates
LAT, LON = txncsptolatlon(th.lat/10., th.lon/10.)
else:
LAT = th.lat / 10.
LON = th.lon / 10.
elif manu == 'SmartSolo':
LAT = th.lat
LON = th.lon
except Exception as e:
LOGGER.warning(
"Failed to convert location: {0}.\n".format(
e.message))
return LAT, LON
def main():
import time
then = time.time()
from numpy import append as npappend
def prof():
global RESP, INDEX_T_DAS, INDEX_T_MAP, SD, EXREC, MINIPH5, Das, SIZE,\
ARRAY_T, RH, LAT, LON, F, TRACE_JSON, APPEND
MINIPH5 = None
ARRAY_T = {}
def get_das(sd, warn=False):
if sd.manufacturer == 'FairfieldNodal':
# Return line_station or das#[-9:]
try:
das = "{0}X{1}".format(
sd.reel_headers.extended_headers[2].line_number,
sd.reel_headers.extended_headers[2].receiver_point)
except Exception:
try:
das = "{0}X{1}".format(
sd.reel_headers.external_header.receiver_line,
sd.reel_headers.external_header.receiver_point)
except Exception:
das = "sn" + \
str(sd.reel_headers.general_header_blocks[0].
manufactures_sn)
if das == 0:
das = "id" + \
str(sd.reel_headers
.extended_headers[0].id_number)[-9:]
elif sd.manufacturer == 'SmartSolo':
line_number = sd.trace_headers.line_number
receiver_point = sd.trace_headers.receiver_point
if line_number == -1:
if warn:
LOGGER.warning(
"Line number is using invalid default value -1. "
"Using 1 instead.")
line_number = 1
if receiver_point == -1:
if warn:
LOGGER.warning(
"Receiver point (stationID) is using invalid "
"default value -1. Using 1 instead.")
receiver_point = 1
das = "{0}X{1}".format(line_number, receiver_point)
# das = sd.id_number
return das
def get_node(sd):
# Return node part number, node id, and number of channels
pn = None # Part Number
id = None # Node ID
nc = None # Number of channel sets
try:
nc = sd.reel_headers.general_header_blocks[0][
'chan_sets_per_scan']
id = sd.id_number
if sd.manufacturer == 'FairfieldNodal':
pn = sd.reel_headers.extended_headers[0]['part_number']
except Exception:
pass
return pn, id, nc
try:
get_args()
except Exception as err_msg:
LOGGER.error(err_msg)
return 1
initializeExperiment()
LOGGER.info("segd2ph5 {0}".format(PROG_VERSION))
LOGGER.info("{0}".format(sys.argv))
if len(FILES) > 0:
RESP = Resp(EX.ph5_g_responses)
rows, keys = EX.ph5_g_receivers.read_index()
INDEX_T_DAS = Rows_Keys(rows, keys)
rows, keys = EX.ph5_g_maps.read_index()
INDEX_T_MAP = Rows_Keys(rows, keys)
for f in FILES:
F = f
traces = []
TRACE_JSON = []
try:
SIZE = os.path.getsize(f)
except Exception as e:
LOGGER.error("Failed to read {0}, {1}.\
Skipping...\n".format(f, str(e.message)))
continue
try:
segd_reader = get_segdreader(f, MANUFACTURERS_CODE)
except Exception:
continue
SD = segd_reader.Reader(infile=f)
LAT = None
LON = None
RH = False
try:
SD.process_general_headers()
SD.process_channel_set_descriptors()
SD.process_extended_headers()
SD.process_external_headers()
if SD.manufacturer == 'SmartSolo':
SD.process_trace_headers()
except segdreader.InputsError as e:
LOGGER.error(
"Possible bad SEG-D file -- {0}".format(
"".join(e.message)))
continue
nleft = APPEND
Das = get_das(SD, warn=True)
if not Das.isalnum():
LOGGER.error(
"DAS %s is not alphanumeric. Can't process." % Das)
return 1
part_number, node_id, number_of_channels = get_node(SD)
EXREC = get_current_data_only(SIZE, Das)
LOGGER.info(":<Processing>: {0}\n".format(SD.name()))
LOGGER.info(
"Processing: {0}... Size: {1}\n".format(SD.name(), SIZE))
if EXREC.filename != MINIPH5:
LOGGER.info("Opened: {0}...\n".format(EXREC.filename))
if node_id is None:
node_id_str = ''
else:
node_id_str = ', Node ID: %s' % node_id
LOGGER.info(
"DAS: {0}{1}, PN: {2}, Channels: {3}".format(
Das, node_id_str, part_number, number_of_channels))
MINIPH5 = EXREC.filename
n = 0
trace_index = 0
trace_headers_list = []
while True:
if SD.isEOF():
if n != 0:
thl = []
chan_set = None
t = None
new_traces = []
for T in traces:
thl.append(T.headers)
if chan_set is None:
chan_set = T.headers.trace_header.channel_set
if chan_set == T.headers.trace_header.channel_set:
if isinstance(t, type(None)):
t = T.trace
else:
t = npappend(t, T.trace)
else:
new_traces.append(T)
traces = new_traces
process_traces(SD.reel_headers, thl[0], t)
if DAS_INFO:
writeINDEX()
break
try:
trace, cs = SD.process_trace(trace_index)
trace_index += 1
except segdreader.InputsError as e:
LOGGER.error("{0}\n".format(F))
LOGGER.error(
"Possible bad SEG-D file -- {0}".format(
"".join(e.message)))
break
if not LAT and not LON:
LAT, LON = get_latlon(SD.manufacturer, SD.trace_headers)
trace_headers_list.append(SD.trace_headers)
if n == 0:
traces.append(Trace(trace, SD.trace_headers))
n = 1
Das = get_das(SD)
else:
traces.append(Trace(trace, SD.trace_headers))
if n >= nleft or EVERY is True:
thl = []
chan_set = None
chan_set_next = None
t = None
new_traces = []
# Need to check for gaps here!
for T in traces:
thl.append(T.headers)
if chan_set is None:
chan_set = T.headers.trace_header.channel_set
if chan_set == T.headers.trace_header.channel_set:
if isinstance(t, type(None)):
t = T.trace
else:
t = npappend(t, T.trace)
else:
new_traces.append(T)
if chan_set_next is None:
chan_set_next =\
T.headers.trace_header.channel_set
traces = new_traces
process_traces(SD.reel_headers, thl[0], t)
if new_traces:
nleft = APPEND - len(new_traces)
else:
nleft = APPEND
chan_set = chan_set_next
chan_set_next = None
if DAS_INFO:
writeINDEX()
n = 0
trace_headers_list = []
continue
n += 1
update_external_references()
if TRACE_JSON:
log_array, name = getLOG()
for line in TRACE_JSON:
log_array.append(line)
LOGGER.info(":<Finished>: {0}\n".format(F))
write_arrays(SD, ARRAY_T)
seconds = time.time() - then
try:
EX.ph5close()
EXREC.ph5close()
except Exception as e:
LOGGER.warning("{0}\n".format("".join(e.message)))
LOGGER.info("Done...{0:b}".format(int(seconds / 6.)))
logging.shutdown()
prof()
if __name__ == '__main__':
main()
|
the-stack_0_14944 |
import os
import sys
def ProcessFile(fileObj):
result = ""
line = fileObj.readline()
#skip to related materials section
while not("=Related Material" in line or "= Related Material" in line or "=Manual" in line or "==Manual" in line or "= Manual" in line or "== Manual" in line or line == ""):
result += line
line = fileObj.readline()
while not(line == ""):
if not(line[0] == "=" or line[0] == " " or line[0] == "\n" or line[0] == "-"):
line = "- " + line
result += line
line = fileObj.readline()
return result
# Set the directory you want to start from
rootDir = '.'
translatedDir = "I:\\github\\translated_files"
for dirName, subdirList, fileList in os.walk(rootDir):
#print('Found directory: %s' % dirName)
for fname in fileList:
filePath = dirName + "\\" + fname
fileCopyPath = dirName + "\\" + fname
if(".remarkup" in filePath):
print("processing: " + filePath)
fileObj = open(filePath, "r")
result = ProcessFile(fileObj)
result += " \n "
fileObj.close()
fileObj = open(fileCopyPath, "w")
fileObj.write(result)
fileObj.close()
|
the-stack_0_14945 | from django.http import Http404
from django.conf import settings
from django.shortcuts import get_list_or_404
from rest_framework.generics import RetrieveAPIView, DestroyAPIView, GenericAPIView
from rest_framework.response import Response
from client.authentication import ClientSenderIdAuthentication
from submission.models import Submission, Sender
from submission.constants import (
ACTION_NAME_EMAIL,
ACTION_NAME_GOV_NOTIFY_EMAIL,
ACTION_NAME_PARDOT,
ACTION_NAME_ZENDESK,
)
class TestAPIView(GenericAPIView):
authentication_classes = [ClientSenderIdAuthentication]
def dispatch(self, *args, **kwargs):
if not settings.FEATURE_TEST_API_ENABLED:
raise Http404
return super().dispatch(*args, **kwargs)
class SubmissionsTestAPIView(TestAPIView, DestroyAPIView, RetrieveAPIView):
queryset = Submission.objects.all()
http_method_names = ('delete', 'get',)
@staticmethod
def data_and_meta(submission: Submission):
return {
'data': dict(submission.data),
'meta': dict(submission.meta),
'is_sent': submission.is_sent,
'form_url': submission.form_url,
}
def get_submissions(self, email_address):
results = []
for submission in self.queryset.all():
if submission.meta['action_name'] == ACTION_NAME_PARDOT:
if submission.data['email'] == email_address:
results.append(self.data_and_meta(submission))
if submission.meta['action_name'] in [
ACTION_NAME_GOV_NOTIFY_EMAIL, ACTION_NAME_ZENDESK
]:
if submission.meta['email_address'] == email_address:
results.append(self.data_and_meta(submission))
if submission.meta['action_name'] == ACTION_NAME_EMAIL:
if email_address in submission.meta['recipients']:
results.append(self.data_and_meta(submission))
return results
def get(self, request, *args, **kwargs):
email_address = kwargs['email_address']
meta = self.get_submissions(email_address)
return Response(meta) if meta else Response(status=404)
def delete(self, request, **kwargs):
test_email_pattern = r'^test\+(.*)@directory\.uktrade\.io'
try:
test_submissions = get_list_or_404(
Submission,
sender__email_address__regex=test_email_pattern,
)
for submission in test_submissions:
submission.delete()
except Http404:
try:
test_email_actions = get_list_or_404(
Submission,
meta__recipients__0__regex=test_email_pattern,
)
for email_notification in test_email_actions:
email_notification.delete()
except Http404:
try:
test_zendesk_actions = get_list_or_404(
Submission,
meta__email_address__regex=test_email_pattern,
)
for zendesk_action in test_zendesk_actions:
zendesk_action.delete()
except Http404:
test_gov_notify_actions = get_list_or_404(
Submission,
meta__sender__email_address__regex=test_email_pattern,
)
for gov_notify_action in test_gov_notify_actions:
gov_notify_action.delete()
return Response(status=204)
class SendersTestAPIView(TestAPIView, DestroyAPIView):
queryset = Sender.objects.all()
http_method_names = 'delete'
def delete(self, request, **kwargs):
test_email_pattern = r'^test\+(.*)@directory\.uktrade\.io'
test_senders = get_list_or_404(
Sender,
email_address__regex=test_email_pattern,
)
for sender in test_senders:
sender.delete()
return Response(status=204)
|
the-stack_0_14948 | # code adpated from https://github.com/m-lundberg/simple-pid
import time
import warnings
def _clamp(value, limits):
lower, upper = limits
if value is None:
return None
elif (upper is not None) and (value > upper):
return upper
elif (lower is not None) and (value < lower):
return lower
return value
try:
# Get monotonic time to ensure that time deltas are always positive
_current_time = time.monotonic
except AttributeError:
# time.monotonic() not available (using python < 3.3), fallback to time.time()
_current_time = time.time
warnings.warn('time.monotonic() not available in python < 3.3, using time.time() as fallback')
class PID(object):
"""A simple PID controller."""
def __init__(
self,
kp=1.0,
ki=0.0,
kd=0.0,
setpoint=0,
sample_time=0.01,
output_limits=(None, None),
auto_mode=True,
proportional_on_measurement=False,
error_map=None,
):
"""
Initialize a new PID controller.
:param kp: The value for the proportional gain kp
:param ki: The value for the integral gain ki
:param kd: The value for the derivative gain kd
:param setpoint: The initial setpoint that the PID will try to achieve
:param sample_time: The time in seconds which the controller should wait before generating
a new output value. The PID works best when it is constantly called (eg. during a
loop), but with a sample time set so that the time difference between each update is
(close to) constant. If set to None, the PID will compute a new output value every time
it is called.
:param output_limits: The initial output limits to use, given as an iterable with 2
elements, for example: (lower, upper). The output will never go below the lower limit
or above the upper limit. Either of the limits can also be set to None to have no limit
in that direction. Setting output limits also avoids integral windup, since the
integral term will never be allowed to grow outside of the limits.
:param auto_mode: Whether the controller should be enabled (auto mode) or not (manual mode)
:param proportional_on_measurement: Whether the proportional term should be calculated on
the input directly rather than on the error (which is the traditional way). Using
proportional-on-measurement avoids overshoot for some types of systems.
:param error_map: Function to transform the error value in another constrained value.
"""
self.kp, self.ki, self.kd = kp, ki, kd
self.setpoint = setpoint
self.sample_time = sample_time
self._min_output, self._max_output = None, None
self._auto_mode = auto_mode
self.proportional_on_measurement = proportional_on_measurement
self.error_map = error_map
self._proportional = 0
self._integral = 0
self._derivative = 0
self._last_time = None
self._last_output = None
self._last_input = None
self.output_limits = output_limits
self.reset()
def __call__(self, input_, dt=None):
"""
Update the PID controller.
Call the PID controller with *input_* and calculate and return a control output if
sample_time seconds has passed since the last update. If no new output is calculated,
return the previous output instead (or None if no value has been calculated yet).
:param dt: If set, uses this value for timestep instead of real time. This can be used in
simulations when simulation time is different from real time.
"""
if not self.auto_mode:
return self._last_output
now = _current_time()
if dt is None:
dt = now - self._last_time if (now - self._last_time) else 1e-16
elif dt <= 0:
raise ValueError('dt has negative value {}, must be positive'.format(dt))
if self.sample_time is not None and dt < self.sample_time and self._last_output is not None:
# Only update every sample_time seconds
return self._last_output
# Compute error terms
error = self.setpoint - input_
d_input = input_ - (self._last_input if (self._last_input is not None) else input_)
# Check if must map the error
if self.error_map is not None:
error = self.error_map(error)
# Compute the proportional term
if not self.proportional_on_measurement:
# Regular proportional-on-error, simply set the proportional term
self._proportional = self.kp * error
else:
# Add the proportional error on measurement to error_sum
self._proportional -= self.kp * d_input
# Compute integral and derivative terms
self._integral += self.ki * error * dt
self._integral = _clamp(self._integral, self.output_limits) # Avoid integral windup
self._derivative = -self.kd * d_input / dt
# Compute final output
output = self._proportional + self._integral + self._derivative
output = _clamp(output, self.output_limits)
# Keep track of state
self._last_output = output
self._last_input = input_
self._last_time = now
return output
def __repr__(self):
return (
'{self.__class__.__name__}('
'kp={self.kp!r}, ki={self.ki!r}, kd={self.kd!r}, '
'setpoint={self.setpoint!r}, sample_time={self.sample_time!r}, '
'output_limits={self.output_limits!r}, auto_mode={self.auto_mode!r}, '
'proportional_on_measurement={self.proportional_on_measurement!r},'
'error_map={self.error_map!r}'
')'
).format(self=self)
@property
def components(self):
"""
The P-, I- and D-terms from the last computation as separate components as a tuple. Useful
for visualizing what the controller is doing or when tuning hard-to-tune systems.
"""
return self._proportional, self._integral, self._derivative
@property
def tunings(self):
"""The tunings used by the controller as a tuple: (kp, ki, kd)."""
return self.kp, self.ki, self.kd
@tunings.setter
def tunings(self, tunings):
"""Set the PID tunings."""
self.kp, self.ki, self.kd = tunings
@property
def auto_mode(self):
"""Whether the controller is currently enabled (in auto mode) or not."""
return self._auto_mode
@auto_mode.setter
def auto_mode(self, enabled):
"""Enable or disable the PID controller."""
self.set_auto_mode(enabled)
def set_auto_mode(self, enabled, last_output=None):
"""
Enable or disable the PID controller, optionally setting the last output value.
This is useful if some system has been manually controlled and if the PID should take over.
In that case, disable the PID by setting auto mode to False and later when the PID should
be turned back on, pass the last output variable (the control variable) and it will be set
as the starting I-term when the PID is set to auto mode.
:param enabled: Whether auto mode should be enabled, True or False
:param last_output: The last output, or the control variable, that the PID should start
from when going from manual mode to auto mode. Has no effect if the PID is already in
auto mode.
"""
if enabled and not self._auto_mode:
# Switching from manual mode to auto, reset
self.reset()
self._integral = last_output if (last_output is not None) else 0
self._integral = _clamp(self._integral, self.output_limits)
self._auto_mode = enabled
@property
def output_limits(self):
"""
The current output limits as a 2-tuple: (lower, upper).
See also the *output_limits* parameter in :meth:`PID.__init__`.
"""
return self._min_output, self._max_output
@output_limits.setter
def output_limits(self, limits):
"""Set the output limits."""
if limits is None:
self._min_output, self._max_output = None, None
return
min_output, max_output = limits
if (None not in limits) and (max_output < min_output):
raise ValueError('lower limit must be less than upper limit')
self._min_output = min_output
self._max_output = max_output
self._integral = _clamp(self._integral, self.output_limits)
self._last_output = _clamp(self._last_output, self.output_limits)
def reset(self):
"""
Reset the PID controller internals.
This sets each term to 0 as well as clearing the integral, the last output and the last
input (derivative calculation).
"""
self._proportional = 0
self._integral = 0
self._derivative = 0
self._integral = _clamp(self._integral, self.output_limits)
self._last_time = _current_time()
self._last_output = None
self._last_input = None
|
the-stack_0_14950 | """
sphinx.builders.texinfo
~~~~~~~~~~~~~~~~~~~~~~~
Texinfo builder.
:copyright: Copyright 2007-2021 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import os
from os import path
from typing import Any, Dict, Iterable, List, Tuple, Union
from docutils import nodes
from docutils.frontend import OptionParser
from docutils.io import FileOutput
from sphinx import addnodes, package_dir
from sphinx.application import Sphinx
from sphinx.builders import Builder
from sphinx.config import Config
from sphinx.environment.adapters.asset import ImageAdapter
from sphinx.errors import NoUri
from sphinx.locale import _, __
from sphinx.util import logging, progress_message, status_iterator
from sphinx.util.console import darkgreen # type: ignore
from sphinx.util.docutils import new_document
from sphinx.util.fileutil import copy_asset_file
from sphinx.util.nodes import inline_all_toctrees
from sphinx.util.osutil import SEP, ensuredir, make_filename_from_project
from sphinx.writers.texinfo import TexinfoTranslator, TexinfoWriter
logger = logging.getLogger(__name__)
template_dir = os.path.join(package_dir, 'templates', 'texinfo')
class TexinfoBuilder(Builder):
"""
Builds Texinfo output to create Info documentation.
"""
name = 'texinfo'
format = 'texinfo'
epilog = __('The Texinfo files are in %(outdir)s.')
if os.name == 'posix':
epilog += __("\nRun 'make' in that directory to run these through "
"makeinfo\n"
"(use 'make info' here to do that automatically).")
supported_image_types = ['image/png', 'image/jpeg',
'image/gif']
default_translator_class = TexinfoTranslator
def init(self) -> None:
self.docnames = [] # type: Iterable[str]
self.document_data = [] # type: List[Tuple[str, str, str, str, str, str, str, bool]]
def get_outdated_docs(self) -> Union[str, List[str]]:
return 'all documents' # for now
def get_target_uri(self, docname: str, typ: str = None) -> str:
if docname not in self.docnames:
raise NoUri(docname, typ)
else:
return '%' + docname
def get_relative_uri(self, from_: str, to: str, typ: str = None) -> str:
# ignore source path
return self.get_target_uri(to, typ)
def init_document_data(self) -> None:
preliminary_document_data = [list(x) for x in self.config.texinfo_documents]
if not preliminary_document_data:
logger.warning(__('no "texinfo_documents" config value found; no documents '
'will be written'))
return
# assign subdirs to titles
self.titles = [] # type: List[Tuple[str, str]]
for entry in preliminary_document_data:
docname = entry[0]
if docname not in self.env.all_docs:
logger.warning(__('"texinfo_documents" config value references unknown '
'document %s'), docname)
continue
self.document_data.append(entry) # type: ignore
if docname.endswith(SEP + 'index'):
docname = docname[:-5]
self.titles.append((docname, entry[2]))
def write(self, *ignored: Any) -> None:
self.init_document_data()
for entry in self.document_data:
docname, targetname, title, author = entry[:4]
targetname += '.texi'
direntry = description = category = ''
if len(entry) > 6:
direntry, description, category = entry[4:7]
toctree_only = False
if len(entry) > 7:
toctree_only = entry[7]
destination = FileOutput(
destination_path=path.join(self.outdir, targetname),
encoding='utf-8')
with progress_message(__("processing %s") % targetname):
appendices = self.config.texinfo_appendices or []
doctree = self.assemble_doctree(docname, toctree_only, appendices=appendices)
with progress_message(__("writing")):
self.post_process_images(doctree)
docwriter = TexinfoWriter(self)
settings = OptionParser(
defaults=self.env.settings,
components=(docwriter,),
read_config_files=True).get_default_values() # type: Any
settings.author = author
settings.title = title
settings.texinfo_filename = targetname[:-5] + '.info'
settings.texinfo_elements = self.config.texinfo_elements
settings.texinfo_dir_entry = direntry or ''
settings.texinfo_dir_category = category or ''
settings.texinfo_dir_description = description or ''
settings.docname = docname
doctree.settings = settings
docwriter.write(doctree, destination)
self.copy_image_files(targetname[:-5])
def assemble_doctree(self, indexfile: str, toctree_only: bool, appendices: List[str]) -> nodes.document: # NOQA
self.docnames = set([indexfile] + appendices)
logger.info(darkgreen(indexfile) + " ", nonl=True)
tree = self.env.get_doctree(indexfile)
tree['docname'] = indexfile
if toctree_only:
# extract toctree nodes from the tree and put them in a
# fresh document
new_tree = new_document('<texinfo output>')
new_sect = nodes.section()
new_sect += nodes.title('<Set title in conf.py>',
'<Set title in conf.py>')
new_tree += new_sect
for node in tree.traverse(addnodes.toctree):
new_sect += node
tree = new_tree
largetree = inline_all_toctrees(self, self.docnames, indexfile, tree,
darkgreen, [indexfile])
largetree['docname'] = indexfile
for docname in appendices:
appendix = self.env.get_doctree(docname)
appendix['docname'] = docname
largetree.append(appendix)
logger.info('')
logger.info(__("resolving references..."))
self.env.resolve_references(largetree, indexfile, self)
# TODO: add support for external :ref:s
for pendingnode in largetree.traverse(addnodes.pending_xref):
docname = pendingnode['refdocname']
sectname = pendingnode['refsectname']
newnodes = [nodes.emphasis(sectname, sectname)] # type: List[nodes.Node]
for subdir, title in self.titles:
if docname.startswith(subdir):
newnodes.append(nodes.Text(_(' (in '), _(' (in ')))
newnodes.append(nodes.emphasis(title, title))
newnodes.append(nodes.Text(')', ')'))
break
else:
pass
pendingnode.replace_self(newnodes)
return largetree
def finish(self) -> None:
self.copy_support_files()
def copy_image_files(self, targetname: str) -> None:
if self.images:
stringify_func = ImageAdapter(self.app.env).get_original_image_uri
for src in status_iterator(self.images, __('copying images... '), "brown",
len(self.images), self.app.verbosity,
stringify_func=stringify_func):
dest = self.images[src]
try:
imagedir = path.join(self.outdir, targetname + '-figures')
ensuredir(imagedir)
copy_asset_file(path.join(self.srcdir, src),
path.join(imagedir, dest))
except Exception as err:
logger.warning(__('cannot copy image file %r: %s'),
path.join(self.srcdir, src), err)
def copy_support_files(self) -> None:
try:
with progress_message(__('copying Texinfo support files')):
logger.info('Makefile ', nonl=True)
copy_asset_file(os.path.join(template_dir, 'Makefile'), self.outdir)
except OSError as err:
logger.warning(__("error writing file Makefile: %s"), err)
def default_texinfo_documents(config: Config) -> List[Tuple[str, str, str, str, str, str, str]]: # NOQA
""" Better default texinfo_documents settings. """
filename = make_filename_from_project(config.project)
return [(config.master_doc, filename, config.project, config.author, filename,
'One line description of project', 'Miscellaneous')]
def setup(app: Sphinx) -> Dict[str, Any]:
app.add_builder(TexinfoBuilder)
app.add_config_value('texinfo_documents', default_texinfo_documents, None)
app.add_config_value('texinfo_appendices', [], None)
app.add_config_value('texinfo_elements', {}, None)
app.add_config_value('texinfo_domain_indices', True, None, [list])
app.add_config_value('texinfo_show_urls', 'footnote', None)
app.add_config_value('texinfo_no_detailmenu', False, None)
return {
'version': 'builtin',
'parallel_read_safe': True,
'parallel_write_safe': True,
}
|
the-stack_0_14951 | import re, textwrap
import z3
from .utils import logger
from .z3api import z3utils
###############################################################################
# Serialize Declarations
###############################################################################
def smt_sort_str(sort):
assert isinstance(sort, z3.SortRef), \
"Received {} of type {} != SortRef".format(c, type(c).__name__)
if z3utils.is_array_sort(sort):
return '(Array {} {})'.format(smt_sort_str(sort.domain()), smt_sort_str(sort.range()))
else:
return sort.name()
def smt_const_decl(c):
assert isinstance(c, z3.ExprRef), \
"Received {} of type {} != ExprRef".format(c, type(c).__name__)
assert c.decl().arity() == 0, \
"Received {} of arity {} != 0 as const decl".format(c, c.decl().arity())
return '(declare-fun {} () {})'.format(c, smt_sort_str(c.decl().range()))
def smt_list(ls):
return '({})'.format(' '.join(ls))
def smt_fun_decl(f):
assert isinstance(f, z3.FuncDeclRef), \
"Received {} of type {} != FuncDeclRef".format(f, type(f).__name__)
dom = smt_list([smt_sort_str(f.domain(i)) for i in range(0,f.arity())])
rng = smt_sort_str(f.range())
return '(declare-fun {} {} {})'.format(f, dom, rng)
def smt_sort_decl(sort):
return '(declare-sort {} 0)'.format(sort)
###############################################################################
# Serialize Expression
###############################################################################
def translate_head_func_decl(expr):
decl = expr.decl()
assert isinstance(decl,z3.FuncDeclRef)
s = str(decl)
if s == '==': return '='
elif z3.is_K(expr): #s == 'K':
# Const array => Must include type
return '(as const {})'.format(smt_sort_str(decl.range()))
elif z3.is_map(expr):
# FIXME: Not general enough for data maps?
return '(_ map {})'.format(str(z3.get_map_func(expr)).lower())
else: return s.lower()
def expr_to_smt2_string(encoding, multi_line = True, indent = ' '):
assert isinstance(encoding, z3.ExprRef), \
'Received {} of type {} for ExprRef serialization'.format(encoding, type(encoding).__name__)
if not multi_line:
indent = ''
pat = re.compile(r'\s+')
def smtify(expr, children):
if z3.is_var(expr):
# TODO: Allow more than one quantified var?
assert str(expr)=='Var(0)', \
'Currently only support for expressions with a single quantified variable'
return '_x_'
elif z3.is_quantifier(expr):
assert expr.num_vars() == 1, \
'Currently only support for expressions with a single quantified variable'
return '{}({} ((_x_ {}))\n{})'.format(
'forall' if expr.is_forall() else 'exists',
expr.var_sort(0),
children[0]
)
else:
#print('{!r} with children {!r}'.format(expr, children))
assert z3.is_app(expr)
assert isinstance(encoding, z3.ExprRef)
# TODO: Improve/simplify the whitespace handling
sjoin = '\n' if multi_line else ' '
child_string = sjoin.join(children)
if indent:
child_string = textwrap.indent(child_string, indent)
stripped = pat.sub(' ', child_string)
while stripped[0] == ' ':
stripped = stripped[1:]
if len(stripped) < 20 or not multi_line:
rep = stripped
else:
rep = '\n' + child_string
res = '({} {})'.format(
translate_head_func_decl(expr),
rep)
#print('Will return {}'.format(res))
return res
def leaf_to_smt(leaf):
#print('Leaf: {!r}'.format(leaf))
s = str(leaf)
if (s == 'True' or s == 'False'):
return s.lower()
else:
return s
return z3utils.expr_fold(encoding, leaf_to_smt, smtify)
###############################################################################
# Serialize Complete Encoding
###############################################################################
def write_encoding_to_file(file, encoding, structs):
with open(file, 'w') as f:
f.write(serialize_encoding(encoding, structs))
def serialize_encoding(encoding, structs):
assert(isinstance(encoding, z3.ExprRef))
# Const decls
consts = z3utils.collect_consts(encoding)
ordered = sorted(consts, key=z3utils.by_complexity)
const_decls = [smt_const_decl(c) for c in ordered]
# Generate sort-based decls based on the sorts for which we have constants
sort_decls = []
fun_refs = []
# FIXME: With the lambda backend we declare functions for data structures that aren't used (because they all use the same sort, Int) => Determine based on parsed input instead?
for struct in structs:
sort = struct.sort
if z3utils.contains_sort(consts, sort):
if sort.to_declare():
logger.debug('Declaring uninterpreted sort {}'.format(sort))
sort_decls.append(sort)
fun_refs += struct.heap_fns()
main_decls = ([smt_sort_decl(s) for s in sort_decls]
+ [smt_fun_decl(f) for f in fun_refs])
# Full list of declarations
decls = main_decls + const_decls
# use our encoding of the assertion directly for readability
smt2_encoding = expr_to_smt2_string(encoding)
assertion = '(assert \n {}\n)'.format(smt2_encoding)
checks = '(check-sat)\n(get-model)'
# TODO: Re-enable set-logic for the quantified backend?
logic = '' # '(set-logic AUFLIA)' + '\n'
full_encoding = logic + '\n'.join(decls) + '\n' + assertion + '\n' + checks + '\n'
return full_encoding
|
the-stack_0_14954 | """
Defines different methods to configure a connection to a Kubernetes cluster.
"""
import asyncio
import base64
import contextlib
import copy
import datetime
import json
import logging
import os
import kubernetes
import kubernetes_asyncio
from kubernetes_asyncio.client import Configuration
from kubernetes_asyncio.config.kube_config import KubeConfigLoader, KubeConfigMerger
from kubernetes_asyncio.config.google_auth import google_auth_credentials
from kubernetes_asyncio.config.dateutil import parse_rfc3339
logger = logging.getLogger(__name__)
tzUTC = datetime.timezone.utc
class AutoRefreshKubeConfigLoader(KubeConfigLoader):
"""
Extends KubeConfigLoader, automatically attempts to refresh authentication
credentials before they expire.
"""
def __init__(self, *args, **kwargs):
super(AutoRefreshKubeConfigLoader, self).__init__(*args, **kwargs)
self._retry_count = 0
self._max_retries = float("Inf")
self.auto_refresh = True
self.refresh_task = None
self.last_refreshed = None
self.token_expire_ts = None
def __del__(self):
self.auto_refresh = False
def extract_oid_expiration_from_provider(self, provider):
"""
Extracts the expiration datestamp for the provider token
Parameters
----------
provider : authentication provider dictionary.
Returns
-------
expires : expiration timestamp
"""
parts = provider["config"]["id-token"].split(".")
if len(parts) != 3:
raise ValueError("oidc: JWT tokens should contain 3 period-delimited parts")
id_token = parts[1]
# Re-pad the unpadded JWT token
id_token += (4 - len(id_token) % 4) * "="
jwt_attributes = json.loads(base64.b64decode(id_token).decode("utf8"))
expires = jwt_attributes.get("exp")
return expires
async def create_refresh_task_from_expiration_timestamp(self, expiration_timestamp):
"""
Takes an expiration timestamp, and creates a refresh task to ensure that the token
does not expire.
Parameters
----------
expiration_timestamp : time at which the current authentication token will expire
Returns
-------
N/A
"""
# Set our token expiry to be actual expiry - 20%
expiry = parse_rfc3339(expiration_timestamp)
expiry_delta = datetime.timedelta(
seconds=(expiry - datetime.datetime.now(tz=tzUTC)).total_seconds()
)
scaled_expiry_delta = datetime.timedelta(
seconds=0.8 * expiry_delta.total_seconds()
)
self.refresh_task = asyncio.create_task(
self.refresh_after(
when=scaled_expiry_delta.total_seconds(), reschedule_on_failure=True
),
name="dask_auth_auto_refresh",
)
self.last_refreshed = datetime.datetime.now(tz=tzUTC)
self.token_expire_ts = self.last_refreshed + scaled_expiry_delta
async def refresh_after(self, when, reschedule_on_failure=False):
"""
Refresh kuberenetes authentication
Parameters
----------
when : Seconds before we should refresh. This should be set to some delta before
the actual token expiration time, or you will likely see authentication race
/ failure conditions.
reschedule_on_failure : If the refresh task fails, re-try in 30 seconds, until
_max_retries is exceeded, then raise an exception.
"""
if not self.auto_refresh:
return
logger.debug(
msg=f"Refresh_at coroutine sleeping for "
f"{int(when // 60)} minutes {(when % 60):0.2f} seconds."
)
try:
await asyncio.sleep(when)
if self.provider == "gcp":
await self.refresh_gcp_token()
elif self.provider == "oidc":
await self.refresh_oid_token()
return
elif "exec" in self._user:
logger.warning(msg="Auto-refresh doesn't support generic ExecProvider")
return
except Exception as e:
logger.warning(
msg=f"Authentication refresh failed for provider '{self.provider}.'",
exc_info=e,
)
if not reschedule_on_failure or self._retry_count > self._max_retries:
raise
logger.warning(msg=f"Retrying '{self.provider}' in 30 seconds.")
self._retry_count += 1
self.refresh_task = asyncio.create_task(self.refresh_after(30))
async def refresh_oid_token(self):
"""
Adapted from kubernetes_asyncio/config/kube_config:_load_oid_token
Refreshes the existing oid token, if necessary, and creates a refresh task
that will keep the token from expiring.
Returns
-------
"""
provider = self._user["auth-provider"]
logger.debug("Refreshing OID token.")
if "config" not in provider:
raise ValueError("oidc: missing configuration")
if (not self.token_expire_ts) or (
self.token_expire_ts <= datetime.datetime.now(tz=tzUTC)
):
await self._refresh_oidc(provider)
expires = self.extract_oid_expiration_from_provider(provider=provider)
await self.create_refresh_task_from_expiration_timestamp(
expiration_timestamp=expires
)
self.token = "Bearer {}".format(provider["config"]["id-token"])
async def refresh_gcp_token(self):
"""
Adapted from kubernetes_asyncio/config/kube_config:load_gcp_token
Refreshes the existing gcp token, if necessary, and creates a refresh task
that will keep the token from expiring.
Returns
-------
"""
if "config" not in self._user["auth-provider"]:
self._user["auth-provider"].value["config"] = {}
config = self._user["auth-provider"]["config"]
if (not self.token_expire_ts) or (
self.token_expire_ts <= datetime.datetime.now(tz=tzUTC)
):
logger.debug("Refreshing GCP token.")
if self._get_google_credentials is not None:
if asyncio.iscoroutinefunction(self._get_google_credentials):
credentials = await self._get_google_credentials()
else:
credentials = self._get_google_credentials()
else:
# config is read-only.
extra_args = " --force-auth-refresh"
_config = {
"cmd-args": config["cmd-args"] + extra_args,
"cmd-path": config["cmd-path"],
}
credentials = await google_auth_credentials(_config)
config.value["access-token"] = credentials.token
config.value["expiry"] = credentials.expiry
# Set our token expiry to be actual expiry - 20%
await self.create_refresh_task_from_expiration_timestamp(
expiration_timestamp=config.value["expiry"]
)
if self._config_persister:
self._config_persister(self._config.value)
self.token = "Bearer %s" % config["access-token"]
async def _load_oid_token(self):
"""
Overrides KubeConfigLoader implementation.
Returns
-------
Auth token
"""
await self.refresh_oid_token()
return self.token
async def load_gcp_token(self):
"""
Override KubeConfigLoader implementation so that we can keep track of the expiration timestamp
and automatically refresh auth tokens.
Returns
-------
GCP access token
"""
await self.refresh_gcp_token()
return self.token
class AutoRefreshConfiguration(Configuration):
"""
Extends kubernetes_async Configuration to support automatic token refresh.
Lets us keep track of the original loader object, which can be used
to regenerate the authentication token.
"""
def __init__(self, loader, refresh_frequency=None, *args, **kwargs):
super(AutoRefreshConfiguration, self).__init__(*args, **kwargs)
# Set refresh api callback
self.refresh_api_key_hook = self.refresh_api_key
self.last_refreshed = datetime.datetime.now(tz=tzUTC)
self.loader = loader
# Adapted from kubernetes_asyncio/client/configuration.py:__deepcopy__
def __deepcopy__(self, memo):
"""
Modified so that we don't try to deep copy the loader off the config
"""
cls = self.__class__
result = cls.__new__(cls)
memo[id(self)] = result
for k, v in self.__dict__.items():
if k not in ("logger", "logger_file_handler", "loader"):
setattr(result, k, copy.deepcopy(v, memo))
# shallow copy loader object
result.loader = self.loader
# shallow copy of loggers
result.logger = copy.copy(self.logger)
# use setters to configure loggers
result.logger_file = self.logger_file
result.debug = self.debug
return result
def refresh_api_key(self, client_configuration):
"""
Checks to see if the loader has updated the authentication token. If it
has, the token is copied from the loader into the current configuration.
This function is assigned to Configuration.refresh_api_key_hook, and will
fire when entering get_api_key_with_prefix, before the api_key is retrieved.
"""
if self.loader.last_refreshed is not None:
if (
self.last_refreshed is None
or self.last_refreshed < self.loader.last_refreshed
):
logger.debug("Entering refresh_api_key_hook")
client_configuration.api_key[
"authorization"
] = client_configuration.loader.token
self.last_refreshed = datetime.datetime.now(tz=tzUTC)
class ClusterAuth(object):
"""
An abstract base class for methods for configuring a connection to a
Kubernetes API server.
Examples
--------
>>> from dask_kubernetes import KubeConfig
>>> auth = KubeConfig(context='minikube')
>>> from dask_kubernetes import KubeAuth
>>> auth = KubeAuth(host='https://localhost', username='superuser', password='pass')
"""
async def load(self):
"""
Load Kubernetes configuration and set as default
Raises
------
kubernetes.client.KubeConfigException
"""
raise NotImplementedError()
@staticmethod
async def load_first(auth=None):
"""
Load the first valid configuration in the list *auth*. A single
configuration method can be passed.
Parameters
----------
auth: List[ClusterAuth] (optional)
Configuration methods to attempt in order. Defaults to
``[InCluster(), KubeConfig()]``.
"""
if auth is None:
auth = ClusterAuth.DEFAULT
elif isinstance(auth, ClusterAuth):
auth = [auth]
elif isinstance(auth, list):
if not auth:
raise kubernetes_asyncio.config.ConfigException(
"No authorization methods were provided"
)
else:
msg = (
"Invalid authorization method provided. See ClusterAuth "
"docstring for ways to specify authentication methods"
)
raise ValueError(msg)
auth_exc = None
for auth_instance in auth:
try:
await auth_instance.load()
except (
kubernetes_asyncio.config.ConfigException,
kubernetes.config.ConfigException,
) as exc:
logger.debug(
"Failed to load configuration with %s method: %s",
auth_instance.__class__,
exc,
)
auth_exc = exc
else:
break
else:
raise auth_exc
class InCluster(ClusterAuth):
"""Configure the Kubernetes connection from a container's environment.
This authentication method is intended for use when the client is running
in a container started by Kubernetes with an authorized service account.
This loads the mounted service account token and discovers the Kubernetes
API via Kubernetes service discovery.
"""
async def load(self):
kubernetes.config.load_incluster_config()
kubernetes_asyncio.config.load_incluster_config()
class KubeConfig(ClusterAuth):
"""Configure the Kubernetes connection from a kubeconfig file.
Parameters
----------
config_file: str (optional)
The path of the kubeconfig file to load. Defaults to the value of the
``KUBECONFIG`` environment variable, or the string ``~/.kube/config``.
context: str (optional)
The kubeconfig context to use. Defaults to the value of ``current-context``
in the configuration file.
persist_config: bool (optional)
Whether changes to the configuration will be saved back to disk (e.g.
GCP token refresh). Defaults to ``True``.
"""
def __init__(self, config_file=None, context=None, persist_config=True):
self.config_file = config_file
self.context = context
self.persist_config = persist_config
async def load(self):
with contextlib.suppress(KeyError):
if self.config_file is None:
self.config_file = os.path.abspath(
os.path.expanduser(os.environ.get("KUBECONFIG", "~/.kube/config"))
)
await self.load_kube_config()
# Adapted from from kubernetes_asyncio/config/kube_config.py:get_kube_config_loader_for_yaml_file
def get_kube_config_loader_for_yaml_file(self):
kcfg = KubeConfigMerger(self.config_file)
config_persister = None
if self.persist_config:
config_persister = kcfg.save_changes()
return AutoRefreshKubeConfigLoader(
config_dict=kcfg.config,
config_base_path=None,
config_persister=config_persister,
)
# Adapted from kubernetes_asyncio/config/kube_config.py:load_kube_config
async def load_kube_config(self):
# Create a config loader, this will automatically refresh our credentials before they expire
loader = self.get_kube_config_loader_for_yaml_file()
# Grab our async + callback aware configuration
config = AutoRefreshConfiguration(loader)
await loader.load_and_set(config)
Configuration.set_default(config)
class KubeAuth(ClusterAuth):
"""Configure the Kubernetes connection explicitly.
Parameters
----------
host: str
The base URL of the Kubernetes host to connect
username: str (optional)
Username for HTTP basic authentication
password: str (optional)
Password for HTTP basic authentication
debug: bool (optional)
Debug switch
verify_ssl: bool (optional)
Set this to false to skip verifying SSL certificate when calling API
from https server. Defaults to ``True``.
ssl_ca_cert: str (optional)
Set this to customize the certificate file to verify the peer.
cert_file: str (optional)
Client certificate file
key_file: str (optional)
Client key file
assert_hostname: bool (optional)
Set this to True/False to enable/disable SSL hostname verification.
Defaults to True.
proxy: str (optional)
URL for a proxy to connect through
"""
def __init__(self, host, **kwargs):
# We need to create a new configuration in this way, because if we just
# instantiate a new Configuration object we will get the default
# values.
config = type.__call__(kubernetes.client.Configuration)
config.host = host
for key, value in kwargs.items():
setattr(config, key, value)
self.config = config
async def load(self):
kubernetes.client.Configuration.set_default(self.config)
await kubernetes_asyncio.client.Configuration.set_default(self.config)
ClusterAuth.DEFAULT = [InCluster(), KubeConfig()]
|
the-stack_0_14960 | #!/usr/bin/env python
# coding: utf-8
# # Tutoriel complet Regression lineaire
# ## Utilisation de l'intégration continue
# ## Collect data using pandas
# In[59]:
# modules nécessaires pour le notebook
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.linear_model import LinearRegression
from sklearn import model_selection
from sklearn import metrics
# In[60]:
# lire le fichier de données
#utiliser le param index_col: Column to use as the row labels of the DataFrame
df = pd.read_csv('Advertising.csv',
index_col=0)
df.head()
# In[61]:
df.describe()
# # identification des descripteurs, cible et observations
# Quels sont les descripteurs? On a 3 descripteurs dans ce dataset qui sont:
# * TV
# * Radio
# * Newspaper
# Quelle est la cible?
# * Sales: vente d'un produit
# Quelle est la forme ou shape du dataframe?
# In[62]:
df.shape
# On voit que l'on a 200 observations avec 4 colonnes dont 3 sont des descripteurs
# # Tracé des relations entre les descripteurs et la cible
# In[63]:
#utilisation d'une figure avec 3 plots aligné sur une ligne
fig, axes = plt.subplots(1,3,sharey=False)
df.plot(kind='scatter', x='TV', y='sales',
ax=axes[0], figsize=(16,8))
df.plot(kind='scatter', x='radio', y='sales',
ax=axes[1], figsize=(16,8))
df.plot(kind='scatter', x='newspaper', y='sales',
ax=axes[2], figsize=(16,8))
# On voit au niveau des graphes qu'il existe une certaine relation linéaire entre TV et Sales ainsi que radio et Sales
# In[64]:
#meme chose mais avec seaborn
sns.pairplot(data=df, x_vars=['TV','radio','newspaper'],
y_vars='sales', height=7, aspect=0.7)
# # Tracé des correlations entre les différents descripteurs et cible
# * Cette partie n'a pas encore été faite.
# # Développement du modele linear regression
# In[65]:
cols_predicteurs = ['TV','radio','newspaper']
#predicteurs
X = df[cols_predicteurs]
y = df.sales
# In[66]:
#Effectuer la séparation Training-Test
X_train, X_test, y_train, y_test = model_selection.train_test_split(X,
y , test_size = 0.2, random_state=42)
#detail de chacun des sous-dataset
print (X_train.shape, y_train.shape)
print (X_test.shape, y_test.shape)
# In[67]:
#estimation des coeeficients du modele lineaire
lm = LinearRegression()
lm.fit(X_train,y_train)
#Afficher les coefficients
print(lm.intercept_)
print(lm.coef_)
# In[68]:
#Afficher l'equation
list(zip(cols_predicteurs, lm.coef_))
# In[69]:
# proceder au test
y_pred = lm.predict(X_test)
# In[70]:
import numpy as np
#comparer les valeurs test et prédites
test_pred_df = pd.DataFrame( { 'Valeurs test': y_test,
'Valeurs prédites': np.round( y_pred, 2),
'residuels': y_test - y_pred } )
test_pred_df[0:10]
# In[71]:
# RMSE
mse = np.sqrt(metrics.mean_squared_error(y_test,
y_pred))
print(np.sqrt(metrics.mean_squared_error(y_test,
y_pred)))
#Calcul du R-squared
r2 = metrics.r2_score(y_test, y_pred)
print(r2)
# In[72]:
# Write scores to a file
with open("metrics.txt", 'w') as outfile:
outfile.write("MSE: {0:2.1f} \n".format(mse))
outfile.write("R2: {0:2.1f}\n".format(r2))
# In[73]:
#Référence: The Elements of Statistical Learning - Hastie, Tibshirani and Friedman, voir https://web.stanford.edu/~hastie/ElemStatLearn/
|
the-stack_0_14962 | # This software may be modified and distributed under the terms
# of the MIT license. See the LICENSE file for details.
#
# PanDA authors:
# - Aleksandr Alekseev, [email protected], 2022
# - Paul Nilsson, [email protected], 2022
from abc import ABC, abstractmethod
from typing import Iterator, Union
import json
import logging
import socket
import ssl
from requests.auth import HTTPBasicAuth
import requests
try:
import pylogbeat
from logstash_async.utils import ichunked
except ImportError:
pass
logger = logging.getLogger(__name__)
class TimeoutNotSet:
pass
class Transport(ABC):
"""The :class:`Transport <Transport>` is the abstract base class of
all transport protocols.
:param host: The name of the host.
:type host: str
:param port: The TCP/UDP port.
:type port: int
:param timeout: The connection timeout.
:type timeout: None or float
:param ssl_enable: Activates TLS.
:type ssl_enable: bool
:param ssl_verify: Activates the TLS certificate verification.
:type ssl_verify: bool or str
:param use_logging: Use logging for debugging.
:type use_logging: bool
"""
def __init__(
self,
host: str,
port: int,
timeout: Union[None, float],
ssl_enable: bool,
ssl_verify: Union[bool, str],
use_logging: bool,
):
self._host = host
self._port = port
self._timeout = None if timeout is TimeoutNotSet else timeout
self._ssl_enable = ssl_enable
self._ssl_verify = ssl_verify
self._use_logging = use_logging
super().__init__()
@abstractmethod
def send(self, events: list, **kwargs):
pass
@abstractmethod
def close(self):
pass
class UdpTransport:
_keep_connection = False
# ----------------------------------------------------------------------
# pylint: disable=unused-argument
def __init__(self, host, port, timeout=TimeoutNotSet, **kwargs):
self._host = host
self._port = port
self._timeout = timeout
self._sock = None
# ----------------------------------------------------------------------
def send(self, events, use_logging=False): # pylint: disable=unused-argument
# Ideally we would keep the socket open but this is risky because we might not notice
# a broken TCP connection and send events into the dark.
# On UDP we push into the dark by design :)
self._create_socket()
try:
self._send(events)
finally:
self._close()
# ----------------------------------------------------------------------
def _create_socket(self):
if self._sock is not None:
return
# from logging.handlers.DatagramHandler
self._sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
if self._timeout is not TimeoutNotSet:
self._sock.settimeout(self._timeout)
# ----------------------------------------------------------------------
def _send(self, events):
for event in events:
self._send_via_socket(event)
# ----------------------------------------------------------------------
def _send_via_socket(self, data):
data_to_send = self._convert_data_to_send(data)
self._sock.sendto(data_to_send, (self._host, self._port))
# ----------------------------------------------------------------------
def _convert_data_to_send(self, data):
if not isinstance(data, bytes):
return bytes(data, 'utf-8')
return data
# ----------------------------------------------------------------------
def _close(self, force=False):
if not self._keep_connection or force:
if self._sock:
self._sock.close()
self._sock = None
# ----------------------------------------------------------------------
def close(self):
self._close(force=True)
class TcpTransport(UdpTransport):
# ----------------------------------------------------------------------
def __init__( # pylint: disable=too-many-arguments
self,
host,
port,
ssl_enable,
ssl_verify,
keyfile,
certfile,
ca_certs,
timeout=TimeoutNotSet,
**kwargs):
super().__init__(host, port)
self._ssl_enable = ssl_enable
self._ssl_verify = ssl_verify
self._keyfile = keyfile
self._certfile = certfile
self._ca_certs = ca_certs
self._timeout = timeout
# ----------------------------------------------------------------------
def _create_socket(self):
if self._sock is not None:
return
# from logging.handlers.SocketHandler
self._sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if self._timeout is not TimeoutNotSet:
self._sock.settimeout(self._timeout)
try:
self._sock.connect((self._host, self._port))
# non-SSL
if not self._ssl_enable:
return
# SSL
cert_reqs = ssl.CERT_REQUIRED
ssl_context = ssl.create_default_context(cafile=self._ca_certs)
if not self._ssl_verify:
if self._ca_certs:
cert_reqs = ssl.CERT_OPTIONAL
else:
cert_reqs = ssl.CERT_NONE
ssl_context.verify_mode = cert_reqs
ssl_context.check_hostname = False
ssl_context.load_cert_chain(self._certfile, self._keyfile)
self._sock = ssl_context.wrap_socket(self._sock, server_side=False)
except socket.error:
self._close()
raise
# ----------------------------------------------------------------------
def _send_via_socket(self, data):
data_to_send = self._convert_data_to_send(data)
self._sock.sendall(data_to_send)
class BeatsTransport:
_batch_size = 10
# ----------------------------------------------------------------------
def __init__( # pylint: disable=too-many-arguments
self,
host,
port,
ssl_enable,
ssl_verify,
keyfile,
certfile,
ca_certs,
timeout=TimeoutNotSet,
**kwargs):
timeout_ = None if timeout is TimeoutNotSet else timeout
self._client_arguments = dict(
host=host,
port=port,
timeout=timeout_,
ssl_enable=ssl_enable,
ssl_verify=ssl_verify,
keyfile=keyfile,
certfile=certfile,
ca_certs=ca_certs,
**kwargs)
# ----------------------------------------------------------------------
def close(self):
pass # nothing to do
# ----------------------------------------------------------------------
def send(self, events, use_logging=False):
client = pylogbeat.PyLogBeatClient(use_logging=use_logging, **self._client_arguments)
with client:
for events_subset in ichunked(events, self._batch_size):
try:
client.send(events_subset)
except Exception:
pass
class HttpTransport(Transport):
"""The :class:`HttpTransport <HttpTransport>` implements a client for the
logstash plugin `inputs_http`.
For more details visit:
https://www.elastic.co/guide/en/logstash/current/plugins-inputs-http.html
:param host: The hostname of the logstash HTTP server.
:type host: str
:param port: The TCP port of the logstash HTTP server.
:type port: int
:param timeout: The connection timeout. (Default: None)
:type timeout: float
:param ssl_enable: Activates TLS. (Default: True)
:type ssl_enable: bool
:param ssl_verify: Activates the TLS certificate verification. If the flag
is True the class tries to verify the TLS certificate with certifi. If you
pass a string with a file location to CA certificate the class tries to
validate it against it. (Default: True)
:type ssl_verify: bool or str
:param use_logging: Use logging for debugging.
:type use_logging: bool
:param username: Username for basic authorization. (Default: "")
:type username: str
:param password: Password for basic authorization. (Default: "")
:type password: str
:param max_content_length: The max content of an HTTP request in bytes.
(Default: 100MB)
:type max_content_length: int
"""
def __init__(
self,
host: str,
port: int,
timeout: Union[None, float] = TimeoutNotSet,
ssl_enable: bool = True,
ssl_verify: Union[bool, str] = True,
use_logging: bool = False,
#keyfile: Union[bool, str] = True,
#certfile: Union[bool, str] = True,
**kwargs
):
super().__init__(host, port, timeout, ssl_enable, ssl_verify, use_logging)
self._username = kwargs.get('username', None)
self._password = kwargs.get('password', None)
self._max_content_length = kwargs.get('max_content_length', 100 * 1024 * 1024)
self.__session = None
self._cert = kwargs.get('cert', None)
@property
def url(self) -> str:
"""The URL of the logstash pipeline based on the hostname, the port and
the TLS usage.
:return: The URL of the logstash HTTP pipeline.
:rtype: str
"""
protocol = 'http'
if self._ssl_enable:
protocol = 'https'
return f'{protocol}://{self._host}:{self._port}'
def __batches(self, events: list) -> Iterator[list]:
"""Generate dynamic sized batches based on the max content length.
:param events: A list of events.
:type events: list
:return: A iterator which generates batches of events.
:rtype: Iterator[list]
"""
current_batch = []
event_iter = iter(events)
while True:
try:
current_event = next(event_iter)
except StopIteration:
current_event = None
if not current_batch:
return
yield current_batch
if current_event is None:
return
if len(current_event) > self._max_content_length:
msg = 'The event size <%s> is greater than the max content length <%s>.'
msg += 'Skipping event.'
if self._use_logging:
logger.warning(msg, len(current_event), self._max_content_length)
continue
obj = json.loads(current_event)
content_length = len(json.dumps(current_batch + [obj]).encode('utf8'))
if content_length > self._max_content_length:
batch = current_batch
current_batch = [obj]
yield batch
else:
current_batch += [obj]
def __auth(self) -> HTTPBasicAuth:
"""The authentication method for the logstash pipeline. If the username
or the password is not set correctly it will return None.
:return: A HTTP basic auth object or None.
:rtype: HTTPBasicAuth
"""
if self._username is None or self._password is None:
return None
return HTTPBasicAuth(self._username, self._password)
def close(self) -> None:
"""Close the HTTP session.
"""
if self.__session is not None:
self.__session.close()
def send(self, events: list, **kwargs):
"""Send events to the logstash pipeline.
Max Events: `logstash_async.Constants.QUEUED_EVENTS_BATCH_SIZE`
Max Content Length: `HttpTransport._max_content_length`
The method receives a list of events from the worker. It tries to send
as much of the events as possible in one request. If the total size of
the received events is greater than the maximal content length the
events will be divide into batches.
:param events: A list of events
:type events: list
"""
self.__session = requests.Session()
#print(self._cert)
for batch in self.__batches(events):
if self._use_logging:
logger.debug('Batch length: %s, Batch size: %s',
len(batch), len(json.dumps(batch).encode('utf8')))
response = self.__session.post(
self.url,
headers={'Content-Type': 'application/json'},
json=batch,
verify=self._ssl_verify,
timeout=self._timeout,
auth=self.__auth(),
cert=self._cert)
#print(response)
if response.status_code != 200:
self.close()
response.raise_for_status()
self.close()
|
the-stack_0_14965 | # Copyright 2016-2020 The Matrix.org Foundation C.I.C.
# Copyright 2020 Sorunome
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
import logging
import random
from http import HTTPStatus
from typing import TYPE_CHECKING, Iterable, List, Optional, Set, Tuple
from synapse import types
from synapse.api.constants import (
AccountDataTypes,
EventContentFields,
EventTypes,
GuestAccess,
Membership,
)
from synapse.api.errors import (
AuthError,
Codes,
LimitExceededError,
ShadowBanError,
SynapseError,
)
from synapse.api.ratelimiting import Ratelimiter
from synapse.event_auth import get_named_level, get_power_level_event
from synapse.events import EventBase
from synapse.events.snapshot import EventContext
from synapse.handlers.profile import MAX_AVATAR_URL_LEN, MAX_DISPLAYNAME_LEN
from synapse.types import (
JsonDict,
Requester,
RoomAlias,
RoomID,
StateMap,
UserID,
create_requester,
get_domain_from_id,
)
from synapse.util.async_helpers import Linearizer
from synapse.util.distributor import user_left_room
from ._base import BaseHandler
if TYPE_CHECKING:
from synapse.server import HomeServer
logger = logging.getLogger(__name__)
class RoomMemberHandler(metaclass=abc.ABCMeta):
# TODO(paul): This handler currently contains a messy conflation of
# low-level API that works on UserID objects and so on, and REST-level
# API that takes ID strings and returns pagination chunks. These concerns
# ought to be separated out a lot better.
def __init__(self, hs: "HomeServer"):
self.hs = hs
self.store = hs.get_datastore()
self.auth = hs.get_auth()
self.state_handler = hs.get_state_handler()
self.config = hs.config
self._server_name = hs.hostname
self.federation_handler = hs.get_federation_handler()
self.directory_handler = hs.get_directory_handler()
self.identity_handler = hs.get_identity_handler()
self.registration_handler = hs.get_registration_handler()
self.profile_handler = hs.get_profile_handler()
self.event_creation_handler = hs.get_event_creation_handler()
self.account_data_handler = hs.get_account_data_handler()
self.event_auth_handler = hs.get_event_auth_handler()
self.member_linearizer: Linearizer = Linearizer(name="member")
self.clock = hs.get_clock()
self.spam_checker = hs.get_spam_checker()
self.third_party_event_rules = hs.get_third_party_event_rules()
self._server_notices_mxid = self.config.servernotices.server_notices_mxid
self._enable_lookup = hs.config.enable_3pid_lookup
self.allow_per_room_profiles = self.config.allow_per_room_profiles
self._join_rate_limiter_local = Ratelimiter(
store=self.store,
clock=self.clock,
rate_hz=hs.config.ratelimiting.rc_joins_local.per_second,
burst_count=hs.config.ratelimiting.rc_joins_local.burst_count,
)
self._join_rate_limiter_remote = Ratelimiter(
store=self.store,
clock=self.clock,
rate_hz=hs.config.ratelimiting.rc_joins_remote.per_second,
burst_count=hs.config.ratelimiting.rc_joins_remote.burst_count,
)
self._invites_per_room_limiter = Ratelimiter(
store=self.store,
clock=self.clock,
rate_hz=hs.config.ratelimiting.rc_invites_per_room.per_second,
burst_count=hs.config.ratelimiting.rc_invites_per_room.burst_count,
)
self._invites_per_user_limiter = Ratelimiter(
store=self.store,
clock=self.clock,
rate_hz=hs.config.ratelimiting.rc_invites_per_user.per_second,
burst_count=hs.config.ratelimiting.rc_invites_per_user.burst_count,
)
# This is only used to get at the ratelimit function. It's fine there are
# multiple of these as it doesn't store state.
self.base_handler = BaseHandler(hs)
@abc.abstractmethod
async def _remote_join(
self,
requester: Requester,
remote_room_hosts: List[str],
room_id: str,
user: UserID,
content: dict,
) -> Tuple[str, int]:
"""Try and join a room that this server is not in
Args:
requester
remote_room_hosts: List of servers that can be used to join via.
room_id: Room that we are trying to join
user: User who is trying to join
content: A dict that should be used as the content of the join event.
"""
raise NotImplementedError()
@abc.abstractmethod
async def remote_knock(
self,
remote_room_hosts: List[str],
room_id: str,
user: UserID,
content: dict,
) -> Tuple[str, int]:
"""Try and knock on a room that this server is not in
Args:
remote_room_hosts: List of servers that can be used to knock via.
room_id: Room that we are trying to knock on.
user: User who is trying to knock.
content: A dict that should be used as the content of the knock event.
"""
raise NotImplementedError()
@abc.abstractmethod
async def remote_reject_invite(
self,
invite_event_id: str,
txn_id: Optional[str],
requester: Requester,
content: JsonDict,
) -> Tuple[str, int]:
"""
Rejects an out-of-band invite we have received from a remote server
Args:
invite_event_id: ID of the invite to be rejected
txn_id: optional transaction ID supplied by the client
requester: user making the rejection request, according to the access token
content: additional content to include in the rejection event.
Normally an empty dict.
Returns:
event id, stream_id of the leave event
"""
raise NotImplementedError()
@abc.abstractmethod
async def remote_rescind_knock(
self,
knock_event_id: str,
txn_id: Optional[str],
requester: Requester,
content: JsonDict,
) -> Tuple[str, int]:
"""Rescind a local knock made on a remote room.
Args:
knock_event_id: The ID of the knock event to rescind.
txn_id: An optional transaction ID supplied by the client.
requester: The user making the request, according to the access token.
content: The content of the generated leave event.
Returns:
A tuple containing (event_id, stream_id of the leave event).
"""
raise NotImplementedError()
@abc.abstractmethod
async def _user_left_room(self, target: UserID, room_id: str) -> None:
"""Notifies distributor on master process that the user has left the
room.
Args:
target
room_id
"""
raise NotImplementedError()
@abc.abstractmethod
async def forget(self, user: UserID, room_id: str) -> None:
raise NotImplementedError()
async def ratelimit_multiple_invites(
self,
requester: Optional[Requester],
room_id: Optional[str],
n_invites: int,
update: bool = True,
) -> None:
"""Ratelimit more than one invite sent by the given requester in the given room.
Args:
requester: The requester sending the invites.
room_id: The room the invites are being sent in.
n_invites: The amount of invites to ratelimit for.
update: Whether to update the ratelimiter's cache.
Raises:
LimitExceededError: The requester can't send that many invites in the room.
"""
await self._invites_per_room_limiter.ratelimit(
requester,
room_id,
update=update,
n_actions=n_invites,
)
async def ratelimit_invite(
self,
requester: Optional[Requester],
room_id: Optional[str],
invitee_user_id: str,
) -> None:
"""Ratelimit invites by room and by target user.
If room ID is missing then we just rate limit by target user.
"""
if room_id:
await self._invites_per_room_limiter.ratelimit(requester, room_id)
await self._invites_per_user_limiter.ratelimit(requester, invitee_user_id)
async def _local_membership_update(
self,
requester: Requester,
target: UserID,
room_id: str,
membership: str,
prev_event_ids: List[str],
auth_event_ids: Optional[List[str]] = None,
txn_id: Optional[str] = None,
ratelimit: bool = True,
content: Optional[dict] = None,
require_consent: bool = True,
outlier: bool = False,
) -> Tuple[str, int]:
"""
Internal membership update function to get an existing event or create
and persist a new event for the new membership change.
Args:
requester:
target:
room_id:
membership:
prev_event_ids: The event IDs to use as the prev events
auth_event_ids:
The event ids to use as the auth_events for the new event.
Should normally be left as None, which will cause them to be calculated
based on the room state at the prev_events.
txn_id:
ratelimit:
content:
require_consent:
outlier: Indicates whether the event is an `outlier`, i.e. if
it's from an arbitrary point and floating in the DAG as
opposed to being inline with the current DAG.
Returns:
Tuple of event ID and stream ordering position
"""
user_id = target.to_string()
if content is None:
content = {}
content["membership"] = membership
if requester.is_guest:
content["kind"] = "guest"
# Check if we already have an event with a matching transaction ID. (We
# do this check just before we persist an event as well, but may as well
# do it up front for efficiency.)
if txn_id and requester.access_token_id:
existing_event_id = await self.store.get_event_id_from_transaction_id(
room_id,
requester.user.to_string(),
requester.access_token_id,
txn_id,
)
if existing_event_id:
event_pos = await self.store.get_position_for_event(existing_event_id)
return existing_event_id, event_pos.stream
event, context = await self.event_creation_handler.create_event(
requester,
{
"type": EventTypes.Member,
"content": content,
"room_id": room_id,
"sender": requester.user.to_string(),
"state_key": user_id,
# For backwards compatibility:
"membership": membership,
},
txn_id=txn_id,
prev_event_ids=prev_event_ids,
auth_event_ids=auth_event_ids,
require_consent=require_consent,
outlier=outlier,
)
prev_state_ids = await context.get_prev_state_ids()
prev_member_event_id = prev_state_ids.get((EventTypes.Member, user_id), None)
if event.membership == Membership.JOIN:
newly_joined = True
if prev_member_event_id:
prev_member_event = await self.store.get_event(prev_member_event_id)
newly_joined = prev_member_event.membership != Membership.JOIN
# Only rate-limit if the user actually joined the room, otherwise we'll end
# up blocking profile updates.
if newly_joined and ratelimit:
time_now_s = self.clock.time()
(
allowed,
time_allowed,
) = await self._join_rate_limiter_local.can_do_action(requester)
if not allowed:
raise LimitExceededError(
retry_after_ms=int(1000 * (time_allowed - time_now_s))
)
result_event = await self.event_creation_handler.handle_new_client_event(
requester,
event,
context,
extra_users=[target],
ratelimit=ratelimit,
)
if event.membership == Membership.LEAVE:
if prev_member_event_id:
prev_member_event = await self.store.get_event(prev_member_event_id)
if prev_member_event.membership == Membership.JOIN:
await self._user_left_room(target, room_id)
# we know it was persisted, so should have a stream ordering
assert result_event.internal_metadata.stream_ordering
return result_event.event_id, result_event.internal_metadata.stream_ordering
async def copy_room_tags_and_direct_to_room(
self, old_room_id: str, new_room_id: str, user_id: str
) -> None:
"""Copies the tags and direct room state from one room to another.
Args:
old_room_id: The room ID of the old room.
new_room_id: The room ID of the new room.
user_id: The user's ID.
"""
# Retrieve user account data for predecessor room
user_account_data, _ = await self.store.get_account_data_for_user(user_id)
# Copy direct message state if applicable
direct_rooms = user_account_data.get(AccountDataTypes.DIRECT, {})
# Check which key this room is under
if isinstance(direct_rooms, dict):
for key, room_id_list in direct_rooms.items():
if old_room_id in room_id_list and new_room_id not in room_id_list:
# Add new room_id to this key
direct_rooms[key].append(new_room_id)
# Save back to user's m.direct account data
await self.account_data_handler.add_account_data_for_user(
user_id, AccountDataTypes.DIRECT, direct_rooms
)
break
# Copy room tags if applicable
room_tags = await self.store.get_tags_for_room(user_id, old_room_id)
# Copy each room tag to the new room
for tag, tag_content in room_tags.items():
await self.account_data_handler.add_tag_to_room(
user_id, new_room_id, tag, tag_content
)
async def update_membership(
self,
requester: Requester,
target: UserID,
room_id: str,
action: str,
txn_id: Optional[str] = None,
remote_room_hosts: Optional[List[str]] = None,
third_party_signed: Optional[dict] = None,
ratelimit: bool = True,
content: Optional[dict] = None,
require_consent: bool = True,
outlier: bool = False,
prev_event_ids: Optional[List[str]] = None,
auth_event_ids: Optional[List[str]] = None,
) -> Tuple[str, int]:
"""Update a user's membership in a room.
Params:
requester: The user who is performing the update.
target: The user whose membership is being updated.
room_id: The room ID whose membership is being updated.
action: The membership change, see synapse.api.constants.Membership.
txn_id: The transaction ID, if given.
remote_room_hosts: Remote servers to send the update to.
third_party_signed: Information from a 3PID invite.
ratelimit: Whether to rate limit the request.
content: The content of the created event.
require_consent: Whether consent is required.
outlier: Indicates whether the event is an `outlier`, i.e. if
it's from an arbitrary point and floating in the DAG as
opposed to being inline with the current DAG.
prev_event_ids: The event IDs to use as the prev events
auth_event_ids:
The event ids to use as the auth_events for the new event.
Should normally be left as None, which will cause them to be calculated
based on the room state at the prev_events.
Returns:
A tuple of the new event ID and stream ID.
Raises:
ShadowBanError if a shadow-banned requester attempts to send an invite.
"""
if action == Membership.INVITE and requester.shadow_banned:
# We randomly sleep a bit just to annoy the requester.
await self.clock.sleep(random.randint(1, 10))
raise ShadowBanError()
key = (room_id,)
with (await self.member_linearizer.queue(key)):
result = await self.update_membership_locked(
requester,
target,
room_id,
action,
txn_id=txn_id,
remote_room_hosts=remote_room_hosts,
third_party_signed=third_party_signed,
ratelimit=ratelimit,
content=content,
require_consent=require_consent,
outlier=outlier,
prev_event_ids=prev_event_ids,
auth_event_ids=auth_event_ids,
)
return result
async def update_membership_locked(
self,
requester: Requester,
target: UserID,
room_id: str,
action: str,
txn_id: Optional[str] = None,
remote_room_hosts: Optional[List[str]] = None,
third_party_signed: Optional[dict] = None,
ratelimit: bool = True,
content: Optional[dict] = None,
require_consent: bool = True,
outlier: bool = False,
prev_event_ids: Optional[List[str]] = None,
auth_event_ids: Optional[List[str]] = None,
) -> Tuple[str, int]:
"""Helper for update_membership.
Assumes that the membership linearizer is already held for the room.
Args:
requester:
target:
room_id:
action:
txn_id:
remote_room_hosts:
third_party_signed:
ratelimit:
content:
require_consent:
outlier: Indicates whether the event is an `outlier`, i.e. if
it's from an arbitrary point and floating in the DAG as
opposed to being inline with the current DAG.
prev_event_ids: The event IDs to use as the prev events
auth_event_ids:
The event ids to use as the auth_events for the new event.
Should normally be left as None, which will cause them to be calculated
based on the room state at the prev_events.
Returns:
A tuple of the new event ID and stream ID.
"""
content_specified = bool(content)
if content is None:
content = {}
else:
# We do a copy here as we potentially change some keys
# later on.
content = dict(content)
# allow the server notices mxid to set room-level profile
is_requester_server_notices_user = (
self._server_notices_mxid is not None
and requester.user.to_string() == self._server_notices_mxid
)
if (
not self.allow_per_room_profiles and not is_requester_server_notices_user
) or requester.shadow_banned:
# Strip profile data, knowing that new profile data will be added to the
# event's content in event_creation_handler.create_event() using the target's
# global profile.
content.pop("displayname", None)
content.pop("avatar_url", None)
if len(content.get("displayname") or "") > MAX_DISPLAYNAME_LEN:
raise SynapseError(
400,
f"Displayname is too long (max {MAX_DISPLAYNAME_LEN})",
errcode=Codes.BAD_JSON,
)
if len(content.get("avatar_url") or "") > MAX_AVATAR_URL_LEN:
raise SynapseError(
400,
f"Avatar URL is too long (max {MAX_AVATAR_URL_LEN})",
errcode=Codes.BAD_JSON,
)
effective_membership_state = action
if action in ["kick", "unban"]:
effective_membership_state = "leave"
# if this is a join with a 3pid signature, we may need to turn a 3pid
# invite into a normal invite before we can handle the join.
if third_party_signed is not None:
await self.federation_handler.exchange_third_party_invite(
third_party_signed["sender"],
target.to_string(),
room_id,
third_party_signed,
)
if not remote_room_hosts:
remote_room_hosts = []
if effective_membership_state not in ("leave", "ban"):
is_blocked = await self.store.is_room_blocked(room_id)
if is_blocked:
raise SynapseError(403, "This room has been blocked on this server")
if effective_membership_state == Membership.INVITE:
target_id = target.to_string()
if ratelimit:
await self.ratelimit_invite(requester, room_id, target_id)
# block any attempts to invite the server notices mxid
if target_id == self._server_notices_mxid:
raise SynapseError(HTTPStatus.FORBIDDEN, "Cannot invite this user")
block_invite = False
if (
self._server_notices_mxid is not None
and requester.user.to_string() == self._server_notices_mxid
):
# allow the server notices mxid to send invites
is_requester_admin = True
else:
is_requester_admin = await self.auth.is_server_admin(requester.user)
if not is_requester_admin:
if self.config.block_non_admin_invites:
logger.info(
"Blocking invite: user is not admin and non-admin "
"invites disabled"
)
block_invite = True
if not await self.spam_checker.user_may_invite(
requester.user.to_string(), target_id, room_id
):
logger.info("Blocking invite due to spam checker")
block_invite = True
if block_invite:
raise SynapseError(403, "Invites have been disabled on this server")
if prev_event_ids:
return await self._local_membership_update(
requester=requester,
target=target,
room_id=room_id,
membership=effective_membership_state,
txn_id=txn_id,
ratelimit=ratelimit,
prev_event_ids=prev_event_ids,
auth_event_ids=auth_event_ids,
content=content,
require_consent=require_consent,
outlier=outlier,
)
latest_event_ids = await self.store.get_prev_events_for_room(room_id)
current_state_ids = await self.state_handler.get_current_state_ids(
room_id, latest_event_ids=latest_event_ids
)
# TODO: Refactor into dictionary of explicitly allowed transitions
# between old and new state, with specific error messages for some
# transitions and generic otherwise
old_state_id = current_state_ids.get((EventTypes.Member, target.to_string()))
if old_state_id:
old_state = await self.store.get_event(old_state_id, allow_none=True)
old_membership = old_state.content.get("membership") if old_state else None
if action == "unban" and old_membership != "ban":
raise SynapseError(
403,
"Cannot unban user who was not banned"
" (membership=%s)" % old_membership,
errcode=Codes.BAD_STATE,
)
if old_membership == "ban" and action not in ["ban", "unban", "leave"]:
raise SynapseError(
403,
"Cannot %s user who was banned" % (action,),
errcode=Codes.BAD_STATE,
)
if old_state:
same_content = content == old_state.content
same_membership = old_membership == effective_membership_state
same_sender = requester.user.to_string() == old_state.sender
if same_sender and same_membership and same_content:
# duplicate event.
# we know it was persisted, so must have a stream ordering.
assert old_state.internal_metadata.stream_ordering
return (
old_state.event_id,
old_state.internal_metadata.stream_ordering,
)
if old_membership in ["ban", "leave"] and action == "kick":
raise AuthError(403, "The target user is not in the room")
# we don't allow people to reject invites to the server notice
# room, but they can leave it once they are joined.
if (
old_membership == Membership.INVITE
and effective_membership_state == Membership.LEAVE
):
is_blocked = await self._is_server_notice_room(room_id)
if is_blocked:
raise SynapseError(
HTTPStatus.FORBIDDEN,
"You cannot reject this invite",
errcode=Codes.CANNOT_LEAVE_SERVER_NOTICE_ROOM,
)
else:
if action == "kick":
raise AuthError(403, "The target user is not in the room")
is_host_in_room = await self._is_host_in_room(current_state_ids)
if effective_membership_state == Membership.JOIN:
if requester.is_guest:
guest_can_join = await self._can_guest_join(current_state_ids)
if not guest_can_join:
# This should be an auth check, but guests are a local concept,
# so don't really fit into the general auth process.
raise AuthError(403, "Guest access not allowed")
# Check if a remote join should be performed.
remote_join, remote_room_hosts = await self._should_perform_remote_join(
target.to_string(), room_id, remote_room_hosts, content, is_host_in_room
)
if remote_join:
if ratelimit:
time_now_s = self.clock.time()
(
allowed,
time_allowed,
) = await self._join_rate_limiter_remote.can_do_action(
requester,
)
if not allowed:
raise LimitExceededError(
retry_after_ms=int(1000 * (time_allowed - time_now_s))
)
inviter = await self._get_inviter(target.to_string(), room_id)
if inviter and not self.hs.is_mine(inviter):
remote_room_hosts.append(inviter.domain)
content["membership"] = Membership.JOIN
profile = self.profile_handler
if not content_specified:
content["displayname"] = await profile.get_displayname(target)
content["avatar_url"] = await profile.get_avatar_url(target)
if requester.is_guest:
content["kind"] = "guest"
remote_join_response = await self._remote_join(
requester, remote_room_hosts, room_id, target, content
)
return remote_join_response
elif effective_membership_state == Membership.LEAVE:
if not is_host_in_room:
# Figure out the user's current membership state for the room
(
current_membership_type,
current_membership_event_id,
) = await self.store.get_local_current_membership_for_user_in_room(
target.to_string(), room_id
)
if not current_membership_type or not current_membership_event_id:
logger.info(
"%s sent a leave request to %s, but that is not an active room "
"on this server, or there is no pending invite or knock",
target,
room_id,
)
raise SynapseError(404, "Not a known room")
# perhaps we've been invited
if current_membership_type == Membership.INVITE:
invite = await self.store.get_event(current_membership_event_id)
logger.info(
"%s rejects invite to %s from %s",
target,
room_id,
invite.sender,
)
if not self.hs.is_mine_id(invite.sender):
# send the rejection to the inviter's HS (with fallback to
# local event)
return await self.remote_reject_invite(
invite.event_id,
txn_id,
requester,
content,
)
# the inviter was on our server, but has now left. Carry on
# with the normal rejection codepath, which will also send the
# rejection out to any other servers we believe are still in the room.
# thanks to overzealous cleaning up of event_forward_extremities in
# `delete_old_current_state_events`, it's possible to end up with no
# forward extremities here. If that happens, let's just hang the
# rejection off the invite event.
#
# see: https://github.com/matrix-org/synapse/issues/7139
if len(latest_event_ids) == 0:
latest_event_ids = [invite.event_id]
# or perhaps this is a remote room that a local user has knocked on
elif current_membership_type == Membership.KNOCK:
knock = await self.store.get_event(current_membership_event_id)
return await self.remote_rescind_knock(
knock.event_id, txn_id, requester, content
)
elif effective_membership_state == Membership.KNOCK:
if not is_host_in_room:
# The knock needs to be sent over federation instead
remote_room_hosts.append(get_domain_from_id(room_id))
content["membership"] = Membership.KNOCK
profile = self.profile_handler
if "displayname" not in content:
content["displayname"] = await profile.get_displayname(target)
if "avatar_url" not in content:
content["avatar_url"] = await profile.get_avatar_url(target)
return await self.remote_knock(
remote_room_hosts, room_id, target, content
)
return await self._local_membership_update(
requester=requester,
target=target,
room_id=room_id,
membership=effective_membership_state,
txn_id=txn_id,
ratelimit=ratelimit,
prev_event_ids=latest_event_ids,
auth_event_ids=auth_event_ids,
content=content,
require_consent=require_consent,
outlier=outlier,
)
async def _should_perform_remote_join(
self,
user_id: str,
room_id: str,
remote_room_hosts: List[str],
content: JsonDict,
is_host_in_room: bool,
) -> Tuple[bool, List[str]]:
"""
Check whether the server should do a remote join (as opposed to a local
join) for a user.
Generally a remote join is used if:
* The server is not yet in the room.
* The server is in the room, the room has restricted join rules, the user
is not joined or invited to the room, and the server does not have
another user who is capable of issuing invites.
Args:
user_id: The user joining the room.
room_id: The room being joined.
remote_room_hosts: A list of remote room hosts.
content: The content to use as the event body of the join. This may
be modified.
is_host_in_room: True if the host is in the room.
Returns:
A tuple of:
True if a remote join should be performed. False if the join can be
done locally.
A list of remote room hosts to use. This is an empty list if a
local join is to be done.
"""
# If the host isn't in the room, pass through the prospective hosts.
if not is_host_in_room:
return True, remote_room_hosts
# If the host is in the room, but not one of the authorised hosts
# for restricted join rules, a remote join must be used.
room_version = await self.store.get_room_version(room_id)
current_state_ids = await self.store.get_current_state_ids(room_id)
# If restricted join rules are not being used, a local join can always
# be used.
if not await self.event_auth_handler.has_restricted_join_rules(
current_state_ids, room_version
):
return False, []
# If the user is invited to the room or already joined, the join
# event can always be issued locally.
prev_member_event_id = current_state_ids.get((EventTypes.Member, user_id), None)
prev_member_event = None
if prev_member_event_id:
prev_member_event = await self.store.get_event(prev_member_event_id)
if prev_member_event.membership in (
Membership.JOIN,
Membership.INVITE,
):
return False, []
# If the local host has a user who can issue invites, then a local
# join can be done.
#
# If not, generate a new list of remote hosts based on which
# can issue invites.
event_map = await self.store.get_events(current_state_ids.values())
current_state = {
state_key: event_map[event_id]
for state_key, event_id in current_state_ids.items()
}
allowed_servers = get_servers_from_users(
get_users_which_can_issue_invite(current_state)
)
# If the local server is not one of allowed servers, then a remote
# join must be done. Return the list of prospective servers based on
# which can issue invites.
if self.hs.hostname not in allowed_servers:
return True, list(allowed_servers)
# Ensure the member should be allowed access via membership in a room.
await self.event_auth_handler.check_restricted_join_rules(
current_state_ids, room_version, user_id, prev_member_event
)
# If this is going to be a local join, additional information must
# be included in the event content in order to efficiently validate
# the event.
content[
"join_authorised_via_users_server"
] = await self.event_auth_handler.get_user_which_could_invite(
room_id,
current_state_ids,
)
return False, []
async def transfer_room_state_on_room_upgrade(
self, old_room_id: str, room_id: str
) -> None:
"""Upon our server becoming aware of an upgraded room, either by upgrading a room
ourselves or joining one, we can transfer over information from the previous room.
Copies user state (tags/push rules) for every local user that was in the old room, as
well as migrating the room directory state.
Args:
old_room_id: The ID of the old room
room_id: The ID of the new room
"""
logger.info("Transferring room state from %s to %s", old_room_id, room_id)
# Find all local users that were in the old room and copy over each user's state
users = await self.store.get_users_in_room(old_room_id)
await self.copy_user_state_on_room_upgrade(old_room_id, room_id, users)
# Add new room to the room directory if the old room was there
# Remove old room from the room directory
old_room = await self.store.get_room(old_room_id)
if old_room and old_room["is_public"]:
await self.store.set_room_is_public(old_room_id, False)
await self.store.set_room_is_public(room_id, True)
# Transfer alias mappings in the room directory
await self.store.update_aliases_for_room(old_room_id, room_id)
# Check if any groups we own contain the predecessor room
local_group_ids = await self.store.get_local_groups_for_room(old_room_id)
for group_id in local_group_ids:
# Add new the new room to those groups
await self.store.add_room_to_group(group_id, room_id, old_room["is_public"])
# Remove the old room from those groups
await self.store.remove_room_from_group(group_id, old_room_id)
async def copy_user_state_on_room_upgrade(
self, old_room_id: str, new_room_id: str, user_ids: Iterable[str]
) -> None:
"""Copy user-specific information when they join a new room when that new room is the
result of a room upgrade
Args:
old_room_id: The ID of upgraded room
new_room_id: The ID of the new room
user_ids: User IDs to copy state for
"""
logger.debug(
"Copying over room tags and push rules from %s to %s for users %s",
old_room_id,
new_room_id,
user_ids,
)
for user_id in user_ids:
try:
# It is an upgraded room. Copy over old tags
await self.copy_room_tags_and_direct_to_room(
old_room_id, new_room_id, user_id
)
# Copy over push rules
await self.store.copy_push_rules_from_room_to_room_for_user(
old_room_id, new_room_id, user_id
)
except Exception:
logger.exception(
"Error copying tags and/or push rules from rooms %s to %s for user %s. "
"Skipping...",
old_room_id,
new_room_id,
user_id,
)
continue
async def send_membership_event(
self,
requester: Optional[Requester],
event: EventBase,
context: EventContext,
ratelimit: bool = True,
) -> None:
"""
Change the membership status of a user in a room.
Args:
requester: The local user who requested the membership
event. If None, certain checks, like whether this homeserver can
act as the sender, will be skipped.
event: The membership event.
context: The context of the event.
ratelimit: Whether to rate limit this request.
Raises:
SynapseError if there was a problem changing the membership.
"""
target_user = UserID.from_string(event.state_key)
room_id = event.room_id
if requester is not None:
sender = UserID.from_string(event.sender)
assert (
sender == requester.user
), "Sender (%s) must be same as requester (%s)" % (sender, requester.user)
assert self.hs.is_mine(sender), "Sender must be our own: %s" % (sender,)
else:
requester = types.create_requester(target_user)
prev_state_ids = await context.get_prev_state_ids()
if event.membership == Membership.JOIN:
if requester.is_guest:
guest_can_join = await self._can_guest_join(prev_state_ids)
if not guest_can_join:
# This should be an auth check, but guests are a local concept,
# so don't really fit into the general auth process.
raise AuthError(403, "Guest access not allowed")
if event.membership not in (Membership.LEAVE, Membership.BAN):
is_blocked = await self.store.is_room_blocked(room_id)
if is_blocked:
raise SynapseError(403, "This room has been blocked on this server")
event = await self.event_creation_handler.handle_new_client_event(
requester, event, context, extra_users=[target_user], ratelimit=ratelimit
)
prev_member_event_id = prev_state_ids.get(
(EventTypes.Member, event.state_key), None
)
if event.membership == Membership.LEAVE:
if prev_member_event_id:
prev_member_event = await self.store.get_event(prev_member_event_id)
if prev_member_event.membership == Membership.JOIN:
await self._user_left_room(target_user, room_id)
async def _can_guest_join(self, current_state_ids: StateMap[str]) -> bool:
"""
Returns whether a guest can join a room based on its current state.
"""
guest_access_id = current_state_ids.get((EventTypes.GuestAccess, ""), None)
if not guest_access_id:
return False
guest_access = await self.store.get_event(guest_access_id)
return bool(
guest_access
and guest_access.content
and guest_access.content.get(EventContentFields.GUEST_ACCESS)
== GuestAccess.CAN_JOIN
)
async def kick_guest_users(self, current_state: Iterable[EventBase]) -> None:
"""Kick any local guest users from the room.
This is called when the room state changes from guests allowed to not-allowed.
Params:
current_state: the current state of the room. We will iterate this to look
for guest users to kick.
"""
for member_event in current_state:
try:
if member_event.type != EventTypes.Member:
continue
if not self.hs.is_mine_id(member_event.state_key):
continue
if member_event.content["membership"] not in {
Membership.JOIN,
Membership.INVITE,
}:
continue
if (
"kind" not in member_event.content
or member_event.content["kind"] != "guest"
):
continue
# We make the user choose to leave, rather than have the
# event-sender kick them. This is partially because we don't
# need to worry about power levels, and partially because guest
# users are a concept which doesn't hugely work over federation,
# and having homeservers have their own users leave keeps more
# of that decision-making and control local to the guest-having
# homeserver.
target_user = UserID.from_string(member_event.state_key)
requester = create_requester(
target_user, is_guest=True, authenticated_entity=self._server_name
)
handler = self.hs.get_room_member_handler()
await handler.update_membership(
requester,
target_user,
member_event.room_id,
"leave",
ratelimit=False,
require_consent=False,
)
except Exception as e:
logger.exception("Error kicking guest user: %s" % (e,))
async def lookup_room_alias(
self, room_alias: RoomAlias
) -> Tuple[RoomID, List[str]]:
"""
Get the room ID associated with a room alias.
Args:
room_alias: The alias to look up.
Returns:
A tuple of:
The room ID as a RoomID object.
Hosts likely to be participating in the room ([str]).
Raises:
SynapseError if room alias could not be found.
"""
directory_handler = self.directory_handler
mapping = await directory_handler.get_association(room_alias)
if not mapping:
raise SynapseError(404, "No such room alias")
room_id = mapping["room_id"]
servers = mapping["servers"]
# put the server which owns the alias at the front of the server list.
if room_alias.domain in servers:
servers.remove(room_alias.domain)
servers.insert(0, room_alias.domain)
return RoomID.from_string(room_id), servers
async def _get_inviter(self, user_id: str, room_id: str) -> Optional[UserID]:
invite = await self.store.get_invite_for_local_user_in_room(
user_id=user_id, room_id=room_id
)
if invite:
return UserID.from_string(invite.sender)
return None
async def do_3pid_invite(
self,
room_id: str,
inviter: UserID,
medium: str,
address: str,
id_server: str,
requester: Requester,
txn_id: Optional[str],
id_access_token: Optional[str] = None,
) -> int:
"""Invite a 3PID to a room.
Args:
room_id: The room to invite the 3PID to.
inviter: The user sending the invite.
medium: The 3PID's medium.
address: The 3PID's address.
id_server: The identity server to use.
requester: The user making the request.
txn_id: The transaction ID this is part of, or None if this is not
part of a transaction.
id_access_token: The optional identity server access token.
Returns:
The new stream ID.
Raises:
ShadowBanError if the requester has been shadow-banned.
"""
if self.config.block_non_admin_invites:
is_requester_admin = await self.auth.is_server_admin(requester.user)
if not is_requester_admin:
raise SynapseError(
403, "Invites have been disabled on this server", Codes.FORBIDDEN
)
if requester.shadow_banned:
# We randomly sleep a bit just to annoy the requester.
await self.clock.sleep(random.randint(1, 10))
raise ShadowBanError()
# We need to rate limit *before* we send out any 3PID invites, so we
# can't just rely on the standard ratelimiting of events.
await self.base_handler.ratelimit(requester)
can_invite = await self.third_party_event_rules.check_threepid_can_be_invited(
medium, address, room_id
)
if not can_invite:
raise SynapseError(
403,
"This third-party identifier can not be invited in this room",
Codes.FORBIDDEN,
)
if not self._enable_lookup:
raise SynapseError(
403, "Looking up third-party identifiers is denied from this server"
)
invitee = await self.identity_handler.lookup_3pid(
id_server, medium, address, id_access_token
)
if invitee:
# Note that update_membership with an action of "invite" can raise
# a ShadowBanError, but this was done above already.
_, stream_id = await self.update_membership(
requester, UserID.from_string(invitee), room_id, "invite", txn_id=txn_id
)
else:
stream_id = await self._make_and_store_3pid_invite(
requester,
id_server,
medium,
address,
room_id,
inviter,
txn_id=txn_id,
id_access_token=id_access_token,
)
return stream_id
async def _make_and_store_3pid_invite(
self,
requester: Requester,
id_server: str,
medium: str,
address: str,
room_id: str,
user: UserID,
txn_id: Optional[str],
id_access_token: Optional[str] = None,
) -> int:
room_state = await self.state_handler.get_current_state(room_id)
inviter_display_name = ""
inviter_avatar_url = ""
member_event = room_state.get((EventTypes.Member, user.to_string()))
if member_event:
inviter_display_name = member_event.content.get("displayname", "")
inviter_avatar_url = member_event.content.get("avatar_url", "")
# if user has no display name, default to their MXID
if not inviter_display_name:
inviter_display_name = user.to_string()
canonical_room_alias = ""
canonical_alias_event = room_state.get((EventTypes.CanonicalAlias, ""))
if canonical_alias_event:
canonical_room_alias = canonical_alias_event.content.get("alias", "")
room_name = ""
room_name_event = room_state.get((EventTypes.Name, ""))
if room_name_event:
room_name = room_name_event.content.get("name", "")
room_type = None
room_create_event = room_state.get((EventTypes.Create, ""))
if room_create_event:
room_type = room_create_event.content.get(EventContentFields.ROOM_TYPE)
room_join_rules = ""
join_rules_event = room_state.get((EventTypes.JoinRules, ""))
if join_rules_event:
room_join_rules = join_rules_event.content.get("join_rule", "")
room_avatar_url = ""
room_avatar_event = room_state.get((EventTypes.RoomAvatar, ""))
if room_avatar_event:
room_avatar_url = room_avatar_event.content.get("url", "")
(
token,
public_keys,
fallback_public_key,
display_name,
) = await self.identity_handler.ask_id_server_for_third_party_invite(
requester=requester,
id_server=id_server,
medium=medium,
address=address,
room_id=room_id,
inviter_user_id=user.to_string(),
room_alias=canonical_room_alias,
room_avatar_url=room_avatar_url,
room_join_rules=room_join_rules,
room_name=room_name,
room_type=room_type,
inviter_display_name=inviter_display_name,
inviter_avatar_url=inviter_avatar_url,
id_access_token=id_access_token,
)
(
event,
stream_id,
) = await self.event_creation_handler.create_and_send_nonmember_event(
requester,
{
"type": EventTypes.ThirdPartyInvite,
"content": {
"display_name": display_name,
"public_keys": public_keys,
# For backwards compatibility:
"key_validity_url": fallback_public_key["key_validity_url"],
"public_key": fallback_public_key["public_key"],
},
"room_id": room_id,
"sender": user.to_string(),
"state_key": token,
},
ratelimit=False,
txn_id=txn_id,
)
return stream_id
async def _is_host_in_room(self, current_state_ids: StateMap[str]) -> bool:
# Have we just created the room, and is this about to be the very
# first member event?
create_event_id = current_state_ids.get(("m.room.create", ""))
if len(current_state_ids) == 1 and create_event_id:
# We can only get here if we're in the process of creating the room
return True
for etype, state_key in current_state_ids:
if etype != EventTypes.Member or not self.hs.is_mine_id(state_key):
continue
event_id = current_state_ids[(etype, state_key)]
event = await self.store.get_event(event_id, allow_none=True)
if not event:
continue
if event.membership == Membership.JOIN:
return True
return False
async def _is_server_notice_room(self, room_id: str) -> bool:
if self._server_notices_mxid is None:
return False
user_ids = await self.store.get_users_in_room(room_id)
return self._server_notices_mxid in user_ids
class RoomMemberMasterHandler(RoomMemberHandler):
def __init__(self, hs: "HomeServer"):
super().__init__(hs)
self.distributor = hs.get_distributor()
self.distributor.declare("user_left_room")
async def _is_remote_room_too_complex(
self, room_id: str, remote_room_hosts: List[str]
) -> Optional[bool]:
"""
Check if complexity of a remote room is too great.
Args:
room_id
remote_room_hosts
Returns: bool of whether the complexity is too great, or None
if unable to be fetched
"""
max_complexity = self.hs.config.limit_remote_rooms.complexity
complexity = await self.federation_handler.get_room_complexity(
remote_room_hosts, room_id
)
if complexity:
return complexity["v1"] > max_complexity
return None
async def _is_local_room_too_complex(self, room_id: str) -> bool:
"""
Check if the complexity of a local room is too great.
Args:
room_id: The room ID to check for complexity.
"""
max_complexity = self.hs.config.limit_remote_rooms.complexity
complexity = await self.store.get_room_complexity(room_id)
return complexity["v1"] > max_complexity
async def _remote_join(
self,
requester: Requester,
remote_room_hosts: List[str],
room_id: str,
user: UserID,
content: dict,
) -> Tuple[str, int]:
"""Implements RoomMemberHandler._remote_join"""
# filter ourselves out of remote_room_hosts: do_invite_join ignores it
# and if it is the only entry we'd like to return a 404 rather than a
# 500.
remote_room_hosts = [
host for host in remote_room_hosts if host != self.hs.hostname
]
if len(remote_room_hosts) == 0:
raise SynapseError(404, "No known servers")
check_complexity = self.hs.config.limit_remote_rooms.enabled
if check_complexity and self.hs.config.limit_remote_rooms.admins_can_join:
check_complexity = not await self.auth.is_server_admin(user)
if check_complexity:
# Fetch the room complexity
too_complex = await self._is_remote_room_too_complex(
room_id, remote_room_hosts
)
if too_complex is True:
raise SynapseError(
code=400,
msg=self.hs.config.limit_remote_rooms.complexity_error,
errcode=Codes.RESOURCE_LIMIT_EXCEEDED,
)
# We don't do an auth check if we are doing an invite
# join dance for now, since we're kinda implicitly checking
# that we are allowed to join when we decide whether or not we
# need to do the invite/join dance.
event_id, stream_id = await self.federation_handler.do_invite_join(
remote_room_hosts, room_id, user.to_string(), content
)
# Check the room we just joined wasn't too large, if we didn't fetch the
# complexity of it before.
if check_complexity:
if too_complex is False:
# We checked, and we're under the limit.
return event_id, stream_id
# Check again, but with the local state events
too_complex = await self._is_local_room_too_complex(room_id)
if too_complex is False:
# We're under the limit.
return event_id, stream_id
# The room is too large. Leave.
requester = types.create_requester(
user, authenticated_entity=self._server_name
)
await self.update_membership(
requester=requester, target=user, room_id=room_id, action="leave"
)
raise SynapseError(
code=400,
msg=self.hs.config.limit_remote_rooms.complexity_error,
errcode=Codes.RESOURCE_LIMIT_EXCEEDED,
)
return event_id, stream_id
async def remote_reject_invite(
self,
invite_event_id: str,
txn_id: Optional[str],
requester: Requester,
content: JsonDict,
) -> Tuple[str, int]:
"""
Rejects an out-of-band invite received from a remote user
Implements RoomMemberHandler.remote_reject_invite
"""
invite_event = await self.store.get_event(invite_event_id)
room_id = invite_event.room_id
target_user = invite_event.state_key
# first of all, try doing a rejection via the inviting server
fed_handler = self.federation_handler
try:
inviter_id = UserID.from_string(invite_event.sender)
event, stream_id = await fed_handler.do_remotely_reject_invite(
[inviter_id.domain], room_id, target_user, content=content
)
return event.event_id, stream_id
except Exception as e:
# if we were unable to reject the invite, we will generate our own
# leave event.
#
# The 'except' clause is very broad, but we need to
# capture everything from DNS failures upwards
#
logger.warning("Failed to reject invite: %s", e)
return await self._generate_local_out_of_band_leave(
invite_event, txn_id, requester, content
)
async def remote_rescind_knock(
self,
knock_event_id: str,
txn_id: Optional[str],
requester: Requester,
content: JsonDict,
) -> Tuple[str, int]:
"""
Rescinds a local knock made on a remote room
Args:
knock_event_id: The ID of the knock event to rescind.
txn_id: The transaction ID to use.
requester: The originator of the request.
content: The content of the leave event.
Implements RoomMemberHandler.remote_rescind_knock
"""
# TODO: We don't yet support rescinding knocks over federation
# as we don't know which homeserver to send it to. An obvious
# candidate is the remote homeserver we originally knocked through,
# however we don't currently store that information.
# Just rescind the knock locally
knock_event = await self.store.get_event(knock_event_id)
return await self._generate_local_out_of_band_leave(
knock_event, txn_id, requester, content
)
async def _generate_local_out_of_band_leave(
self,
previous_membership_event: EventBase,
txn_id: Optional[str],
requester: Requester,
content: JsonDict,
) -> Tuple[str, int]:
"""Generate a local leave event for a room
This can be called after we e.g fail to reject an invite via a remote server.
It generates an out-of-band membership event locally.
Args:
previous_membership_event: the previous membership event for this user
txn_id: optional transaction ID supplied by the client
requester: user making the request, according to the access token
content: additional content to include in the leave event.
Normally an empty dict.
Returns:
A tuple containing (event_id, stream_id of the leave event)
"""
room_id = previous_membership_event.room_id
target_user = previous_membership_event.state_key
content["membership"] = Membership.LEAVE
event_dict = {
"type": EventTypes.Member,
"room_id": room_id,
"sender": target_user,
"content": content,
"state_key": target_user,
}
# the auth events for the new event are the same as that of the previous event, plus
# the event itself.
#
# the prev_events consist solely of the previous membership event.
prev_event_ids = [previous_membership_event.event_id]
auth_event_ids = previous_membership_event.auth_event_ids() + prev_event_ids
event, context = await self.event_creation_handler.create_event(
requester,
event_dict,
txn_id=txn_id,
prev_event_ids=prev_event_ids,
auth_event_ids=auth_event_ids,
)
event.internal_metadata.outlier = True
event.internal_metadata.out_of_band_membership = True
result_event = await self.event_creation_handler.handle_new_client_event(
requester,
event,
context,
extra_users=[UserID.from_string(target_user)],
)
# we know it was persisted, so must have a stream ordering
assert result_event.internal_metadata.stream_ordering
return result_event.event_id, result_event.internal_metadata.stream_ordering
async def remote_knock(
self,
remote_room_hosts: List[str],
room_id: str,
user: UserID,
content: dict,
) -> Tuple[str, int]:
"""Sends a knock to a room. Attempts to do so via one remote out of a given list.
Args:
remote_room_hosts: A list of homeservers to try knocking through.
room_id: The ID of the room to knock on.
user: The user to knock on behalf of.
content: The content of the knock event.
Returns:
A tuple of (event ID, stream ID).
"""
# filter ourselves out of remote_room_hosts
remote_room_hosts = [
host for host in remote_room_hosts if host != self.hs.hostname
]
if len(remote_room_hosts) == 0:
raise SynapseError(404, "No known servers")
return await self.federation_handler.do_knock(
remote_room_hosts, room_id, user.to_string(), content=content
)
async def _user_left_room(self, target: UserID, room_id: str) -> None:
"""Implements RoomMemberHandler._user_left_room"""
user_left_room(self.distributor, target, room_id)
async def forget(self, user: UserID, room_id: str) -> None:
user_id = user.to_string()
member = await self.state_handler.get_current_state(
room_id=room_id, event_type=EventTypes.Member, state_key=user_id
)
membership = member.membership if member else None
if membership is not None and membership not in [
Membership.LEAVE,
Membership.BAN,
]:
raise SynapseError(400, "User %s in room %s" % (user_id, room_id))
if membership:
await self.store.forget(user_id, room_id)
def get_users_which_can_issue_invite(auth_events: StateMap[EventBase]) -> List[str]:
"""
Return the list of users which can issue invites.
This is done by exploring the joined users and comparing their power levels
to the necessyar power level to issue an invite.
Args:
auth_events: state in force at this point in the room
Returns:
The users which can issue invites.
"""
invite_level = get_named_level(auth_events, "invite", 0)
users_default_level = get_named_level(auth_events, "users_default", 0)
power_level_event = get_power_level_event(auth_events)
# Custom power-levels for users.
if power_level_event:
users = power_level_event.content.get("users", {})
else:
users = {}
result = []
# Check which members are able to invite by ensuring they're joined and have
# the necessary power level.
for (event_type, state_key), event in auth_events.items():
if event_type != EventTypes.Member:
continue
if event.membership != Membership.JOIN:
continue
# Check if the user has a custom power level.
if users.get(state_key, users_default_level) >= invite_level:
result.append(state_key)
return result
def get_servers_from_users(users: List[str]) -> Set[str]:
"""
Resolve a list of users into their servers.
Args:
users: A list of users.
Returns:
A set of servers.
"""
servers = set()
for user in users:
try:
servers.add(get_domain_from_id(user))
except SynapseError:
pass
return servers
|
the-stack_0_14966 | import argparse
import asyncio
from pybecker.becker import Becker
async def main():
"""Main function"""
parser = argparse.ArgumentParser()
parser.add_argument('-c', '--channel', required=True, help='channel')
parser.add_argument('-a', '--action', required=True, help='Command to execute (UP, DOWN, HALT, PAIR)')
parser.add_argument('-d', '--device', required=False, help='Device to use for connectivity')
args = parser.parse_args()
client = Becker()
if args.action == "UP":
await client.move_up(args.channel)
elif args.action == "HALT":
await client.stop(args.channel)
elif args.action == "DOWN":
await client.move_down(args.channel)
elif args.action == "PAIR":
await client.pair(args.channel)
if __name__ == '__main__':
asyncio.run(main())
|
the-stack_0_14967 | """Adapted from:
@longcw faster_rcnn_pytorch: https://github.com/longcw/faster_rcnn_pytorch
@rbgirshick py-faster-rcnn https://github.com/rbgirshick/py-faster-rcnn
Licensed under The MIT License [see LICENSE for details]
"""
from __future__ import print_function
import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
from torch.autograd import Variable
# from data import VOC_ROOT, VOCAnnotationTransform, VOCDetection, BaseTransform
# from data import VOC_CLASSES as labelmap
import torch.utils.data as data
from data import BaseTransform
from data.custom import CUSTOM_CLASSES as labelmap
from data.custom import customDetection, customAnnotationTransform, CUSTOM_CLASSES, CUSTOM_ROOT
# from ssd import build_ssd
from ssd_resnet_101_new import build_ssd
import sys
import os
import time
import argparse
import numpy as np
import pickle
import cv2
if sys.version_info[0] == 2:
import xml.etree.cElementTree as ET
else:
import xml.etree.ElementTree as ET
def str2bool(v):
return v.lower() in ("yes", "true", "t", "1")
parser = argparse.ArgumentParser(
description='Single Shot MultiBox Detector Evaluation')
parser.add_argument('--trained_model',
default='weights/CUSTOM.pth', type=str,
help='Trained state_dict file path to open')
parser.add_argument('--save_folder', default='eval/', type=str,
help='File path to save results')
parser.add_argument('--confidence_threshold', default=0.01, type=float,
help='Detection confidence threshold')
parser.add_argument('--top_k', default=5, type=int,
help='Further restrict the number of predictions to parse')
parser.add_argument('--cuda', default=True, type=str2bool,
help='Use cuda to train model')
parser.add_argument('--custom_root', default=CUSTOM_ROOT,
help='Location of VOC root directory')
parser.add_argument('--cleanup', default=True, type=str2bool,
help='Cleanup and remove results files following eval')
args = parser.parse_args()
if not os.path.exists(args.save_folder):
os.mkdir(args.save_folder)
if torch.cuda.is_available():
if args.cuda:
torch.set_default_tensor_type('torch.cuda.FloatTensor')
if not args.cuda:
print("WARNING: It looks like you have a CUDA device, but aren't using \
CUDA. Run with --cuda for optimal eval speed.")
torch.set_default_tensor_type('torch.FloatTensor')
else:
torch.set_default_tensor_type('torch.FloatTensor')
annopath = os.path.join(args.custom_root, 'shenhe', 'Annotations', '%s.xml')
imgpath = os.path.join(args.custom_root, 'shenhe', 'JPEGImages', '%s.jpg')
imgsetpath = os.path.join(args.custom_root, 'shenhe', 'ImageSets', 'Main', '%s.txt')
devkit_path = args.custom_root + 'shenhe'
dataset_mean = (104, 117, 123)
set_type = 'test'
class Timer(object):
"""A simple timer."""
def __init__(self):
self.total_time = 0.
self.calls = 0
self.start_time = 0.
self.diff = 0.
self.average_time = 0.
def tic(self):
# using time.time instead of time.clock because time time.clock
# does not normalize for multithreading
self.start_time = time.time()
def toc(self, average=True):
self.diff = time.time() - self.start_time
self.total_time += self.diff
self.calls += 1
self.average_time = self.total_time / self.calls
if average:
return self.average_time
else:
return self.diff
def parse_rec(filename):
""" Parse a PASCAL VOC xml file """
tree = ET.parse(filename)
objects = []
for obj in tree.findall('object'):
obj_struct = {}
obj_struct['name'] = obj.find('name').text
obj_struct['pose'] = obj.find('pose').text
obj_struct['truncated'] = int(obj.find('truncated').text)
obj_struct['difficult'] = int(obj.find('difficult').text)
bbox = obj.find('bndbox')
obj_struct['bbox'] = [int(bbox.find('xmin').text) - 1,
int(bbox.find('ymin').text) - 1,
int(bbox.find('xmax').text) - 1,
int(bbox.find('ymax').text) - 1]
objects.append(obj_struct)
return objects
def get_output_dir(name, phase):
"""Return the directory where experimental artifacts are placed.
If the directory does not exist, it is created.
A canonical path is built using the name from an imdb and a network
(if not None).
"""
filedir = os.path.join(name, phase)
if not os.path.exists(filedir):
os.makedirs(filedir)
return filedir
def get_voc_results_file_template(image_set, cls):
# VOCdevkit/VOC2007/results/det_test_aeroplane.txt
filename = 'det_' + image_set + '_%s.txt' % (cls)
filedir = os.path.join(devkit_path, 'results')
if not os.path.exists(filedir):
os.makedirs(filedir)
path = os.path.join(filedir, filename)
return path
def write_voc_results_file(all_boxes, dataset):
for cls_ind, cls in enumerate(labelmap):
print('Writing {:s} VOC results file'.format(cls))
filename = get_voc_results_file_template(set_type, cls)
with open(filename, 'wt') as f:
for im_ind, index in enumerate(dataset.ids):
dets = all_boxes[cls_ind+1][im_ind]
if dets == []:
continue
# the VOCdevkit expects 1-based indices
for k in range(dets.shape[0]):
f.write('{:s} {:.3f} {:.1f} {:.1f} {:.1f} {:.1f}\n'.
format(index[1], dets[k, -1],
dets[k, 0] + 1, dets[k, 1] + 1,
dets[k, 2] + 1, dets[k, 3] + 1))
def do_python_eval(output_dir='output', use_07=True):
cachedir = os.path.join(devkit_path, 'annotations_cache')
aps = []
# The PASCAL VOC metric changed in 2010
use_07_metric = use_07
print('VOC07 metric? ' + ('Yes' if use_07_metric else 'No'))
if not os.path.isdir(output_dir):
os.mkdir(output_dir)
for i, cls in enumerate(labelmap):
filename = get_voc_results_file_template(set_type, cls)
rec, prec, ap = voc_eval(
filename, annopath, imgsetpath % (set_type), cls, cachedir,
ovthresh=0.1, use_07_metric=use_07_metric)
aps += [ap]
print('AP for {} = {:.4f}'.format(cls, ap))
with open(os.path.join(output_dir, cls + '_pr.pkl'), 'wb') as f:
pickle.dump({'rec': rec, 'prec': prec, 'ap': ap}, f)
print('Mean AP = {:.4f}'.format(np.mean(aps)))
print('~~~~~~~~')
print('Results:')
for ap in aps:
print('{:.3f}'.format(ap))
print('{:.3f}'.format(np.mean(aps)))
print('~~~~~~~~')
print('')
print('--------------------------------------------------------------')
print('Results computed with the **unofficial** Python eval code.')
print('Results should be very close to the official MATLAB eval code.')
print('--------------------------------------------------------------')
def voc_ap(rec, prec, use_07_metric=True):
""" ap = voc_ap(rec, prec, [use_07_metric])
Compute VOC AP given precision and recall.
If use_07_metric is true, uses the
VOC 07 11 point method (default:True).
"""
if use_07_metric:
# 11 point metric
ap = 0.
for t in np.arange(0., 1.1, 0.1):
if np.sum(rec >= t) == 0:
p = 0
else:
p = np.max(prec[rec >= t])
ap = ap + p / 11.
else:
# correct AP calculation
# first append sentinel values at the end
mrec = np.concatenate(([0.], rec, [1.]))
mpre = np.concatenate(([0.], prec, [0.]))
# compute the precision envelope
for i in range(mpre.size - 1, 0, -1):
mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])
# to calculate area under PR curve, look for points
# where X axis (recall) changes value
i = np.where(mrec[1:] != mrec[:-1])[0]
# and sum (\Delta recall) * prec
ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])
return ap
def voc_eval(detpath,
annopath,
imagesetfile,
classname,
cachedir,
ovthresh=0.5,
use_07_metric=True):
"""rec, prec, ap = voc_eval(detpath,
annopath,
imagesetfile,
classname,
[ovthresh],
[use_07_metric])
Top level function that does the PASCAL VOC evaluation.
detpath: Path to detections
detpath.format(classname) should produce the detection results file.
annopath: Path to annotations
annopath.format(imagename) should be the xml annotations file.
imagesetfile: Text file containing the list of images, one image per line.
classname: Category name (duh)
cachedir: Directory for caching the annotations
[ovthresh]: Overlap threshold (default = 0.5)
[use_07_metric]: Whether to use VOC07's 11 point AP computation
(default True)
"""
# assumes detections are in detpath.format(classname)
# assumes annotations are in annopath.format(imagename)
# assumes imagesetfile is a text file with each line an image name
# cachedir caches the annotations in a pickle file
# first load gt
if not os.path.isdir(cachedir):
os.mkdir(cachedir)
cachefile = os.path.join(cachedir, 'annots.pkl')
# read list of images
with open(imagesetfile, 'r') as f:
lines = f.readlines()
imagenames = [x.strip() for x in lines]
if not os.path.isfile(cachefile):
# load annots
recs = {}
for i, imagename in enumerate(imagenames):
recs[imagename] = parse_rec(annopath % (imagename))
if i % 100 == 0:
print('Reading annotation for {:d}/{:d}'.format(
i + 1, len(imagenames)))
# save
print('Saving cached annotations to {:s}'.format(cachefile))
with open(cachefile, 'wb') as f:
pickle.dump(recs, f)
else:
# load
with open(cachefile, 'rb') as f:
recs = pickle.load(f)
# extract gt objects for this class
class_recs = {}
npos = 0
for imagename in imagenames:
R = [obj for obj in recs[imagename] if obj['name'] == classname]
bbox = np.array([x['bbox'] for x in R])
difficult = np.array([x['difficult'] for x in R]).astype(np.bool)
det = [False] * len(R)
npos = npos + sum(~difficult)
class_recs[imagename] = {'bbox': bbox,
'difficult': difficult,
'det': det}
# read dets
detfile = detpath.format(classname)
with open(detfile, 'r') as f:
lines = f.readlines()
if any(lines) == 1:
splitlines = [x.strip().split(' ') for x in lines]
image_ids = [x[0] for x in splitlines]
confidence = np.array([float(x[1]) for x in splitlines])
BB = np.array([[float(z) for z in x[2:]] for x in splitlines])
# sort by confidence
sorted_ind = np.argsort(-confidence)
sorted_scores = np.sort(-confidence)
BB = BB[sorted_ind, :]
image_ids = [image_ids[x] for x in sorted_ind]
# go down dets and mark TPs and FPs
nd = len(image_ids)
tp = np.zeros(nd)
fp = np.zeros(nd)
for d in range(nd):
R = class_recs[image_ids[d]]
bb = BB[d, :].astype(float)
ovmax = -np.inf
BBGT = R['bbox'].astype(float)
if BBGT.size > 0:
# compute overlaps
# intersection
ixmin = np.maximum(BBGT[:, 0], bb[0])
iymin = np.maximum(BBGT[:, 1], bb[1])
ixmax = np.minimum(BBGT[:, 2], bb[2])
iymax = np.minimum(BBGT[:, 3], bb[3])
iw = np.maximum(ixmax - ixmin, 0.)
ih = np.maximum(iymax - iymin, 0.)
inters = iw * ih
uni = ((bb[2] - bb[0]) * (bb[3] - bb[1]) +
(BBGT[:, 2] - BBGT[:, 0]) *
(BBGT[:, 3] - BBGT[:, 1]) - inters)
overlaps = inters / uni
ovmax = np.max(overlaps)
jmax = np.argmax(overlaps)
if ovmax > ovthresh:
if not R['difficult'][jmax]:
if not R['det'][jmax]:
tp[d] = 1.
R['det'][jmax] = 1
else:
fp[d] = 1.
else:
fp[d] = 1.
# compute precision recall
fp = np.cumsum(fp)
tp = np.cumsum(tp)
rec = tp / float(npos)
# avoid divide by zero in case the first detection matches a difficult
# ground truth
prec = tp / np.maximum(tp + fp, np.finfo(np.float64).eps)
ap = voc_ap(rec, prec, use_07_metric)
else:
rec = -1.
prec = -1.
ap = -1.
return rec, prec, ap
def test_net(save_folder, net, cuda, dataset, transform, top_k,
im_size=300, thresh=0.05):
num_images = len(dataset)
# all detections are collected into:
# all_boxes[cls][image] = N x 5 array of detections in
# (x1, y1, x2, y2, score)
all_boxes = [[[] for _ in range(num_images)]
for _ in range(len(labelmap)+1)]
# timers
_t = {'im_detect': Timer(), 'misc': Timer()}
output_dir = get_output_dir('ssd300_120000', set_type)
det_file = os.path.join(output_dir, 'detections.pkl')
for i in range(num_images):
im, gt, h, w = dataset.pull_item(i)
x = Variable(im.unsqueeze(0))
if args.cuda:
x = x.cuda()
_t['im_detect'].tic()
detections = net(x).data
detect_time = _t['im_detect'].toc(average=False)
# skip j = 0, because it's the background class
for j in range(1, detections.size(1)):
dets = detections[0, j, :]
mask = dets[:, 0].gt(0.).expand(5, dets.size(0)).t()
dets = torch.masked_select(dets, mask).view(-1, 5)
if dets.size(0) == 0:
continue
boxes = dets[:, 1:]
boxes[:, 0] *= w
boxes[:, 2] *= w
boxes[:, 1] *= h
boxes[:, 3] *= h
scores = dets[:, 0].cpu().numpy()
cls_dets = np.hstack((boxes.cpu().numpy(),
scores[:, np.newaxis])).astype(np.float32,
copy=False)
all_boxes[j][i] = cls_dets
print('im_detect: {:d}/{:d} {:.3f}s'.format(i + 1,
num_images, detect_time))
with open(det_file, 'wb') as f:
pickle.dump(all_boxes, f, pickle.HIGHEST_PROTOCOL)
print('Evaluating detections')
evaluate_detections(all_boxes, output_dir, dataset)
def evaluate_detections(box_list, output_dir, dataset):
write_voc_results_file(box_list, dataset)
do_python_eval(output_dir)
if __name__ == '__main__':
# load net
num_classes = len(labelmap) + 1 # +1 for background
net = build_ssd('test', 300, num_classes) # initialize SSD
net.load_state_dict(torch.load(args.trained_model))
net.eval()
print('Finished loading model!')
# load data
dataset = customDetection(args.custom_root, [('shenhe', set_type)],
BaseTransform(300, dataset_mean),
customAnnotationTransform())
if args.cuda:
net = net.cuda()
cudnn.benchmark = True
# evaluation
test_net(args.save_folder, net, args.cuda, dataset,
BaseTransform(net.size, dataset_mean), args.top_k, 300,
thresh=args.confidence_threshold)
|
the-stack_0_14968 | import torch.nn as nn
from mmcv.cnn import ConvModule
from mmcv.cnn import constant_init, kaiming_init
from ..builder import BACKBONES
import os
from mmdet.ops.CSPOSAModule import CSPOSAModule
class ConvStride2(nn.Module):
def __init__(self, in_ch, out_ch, kernel_size=3, exp=1, norm_cfg=dict(type='BN', requires_grad=True)):
super(ConvStride2, self).__init__()
# self.conv1x1 = ConvModule(in_ch, out_ch, kernel_size=1, stride=2, padding=0,
# norm_cfg=dict(type='BN', requires_grad=True))
self.conv3x3 = ConvModule(in_ch, out_ch, kernel_size=3, stride=2, padding=1,
norm_cfg=norm_cfg)
def forward(self, x):
# return self.conv1x1(x)+self.conv3x3(x)
return self.conv3x3(x)
class CSPOSAStage(nn.Module):
def __init__(self, in_ch, stage_ch, num_block, kernel_size,
conv_type=dict(type="NormalConv",
info=dict(norm_cfg=dict(type='BN', requires_grad=True))),
conv1x1=True):
assert isinstance(conv_type, dict), "conv_type must be string"
super(CSPOSAStage, self).__init__()
self.Block = nn.Sequential(ConvStride2(in_ch, stage_ch, kernel_size=kernel_size),
CSPOSAModule(stage_ch, num_block, conv_type, kernel_size=kernel_size, conv1x1=conv1x1))
def forward(self, x):
return self.Block(x)
@BACKBONES.register_module()
class CSPOSANet(nn.Module):
def __init__(self,
stem_channels,
stage_channels,
block_per_stage,
conv_type=dict(type="NormalConv",
info=dict(norm_cfg=dict(type='BN', requires_grad=True))),
num_out=5,
kernel_size=3,
conv1x1=True
):
super(CSPOSANet, self).__init__()
if isinstance(kernel_size, int):
kernel_sizes = [kernel_size for _ in range(len(stage_channels))]
if isinstance(kernel_size, list):
assert len(kernel_size) == len(stage_channels), \
"if kernel_size is list, len(kernel_size) should == len(stage_channels)"
kernel_sizes = kernel_size
else:
raise TypeError("type of kernel size should be int or list")
assert num_out <= len(stage_channels), 'num output should be less than stage channels!'
conv_info = conv_type["info"]
norm_cfg = conv_info["norm_cfg"]
self.stage_nums = len(stage_channels)
self.stem = ConvModule(3, stem_channels, kernel_size=3, stride=2, padding=1,
norm_cfg=norm_cfg)
'''defult end_stage is the last stage'''
self.start_stage = len(stage_channels)-num_out+1
self.stages = nn.ModuleList()
self.last_stage = len(stage_channels)
in_channel = stem_channels
for num_stages in range(self.stage_nums):
stage = CSPOSAStage(in_channel, stage_channels[num_stages], block_per_stage[num_stages],
kernel_size=kernel_sizes[num_stages], conv_type=conv_type, conv1x1=conv1x1)
in_channel = stage_channels[num_stages]
# stage = OrderedDict()
# for num_layers in range(block_per_stage[num_stages]):
# stage.update({'stage_{}_layer{}'.format(num_stages, num_layers):_OSA_stage(in_channel, stage_channels[num_stages],
# concat_channels[num_stages], layer_per_block[num_stages])})
# in_channel = concat_channels[num_stages]
self.stages.append(stage)
def init_weights(self, pretrained=None):
if isinstance(pretrained, str):
import torch
assert os.path.isfile(pretrained), "file {} not found.".format(pretrained)
self.load_state_dict(torch.load(pretrained), strict=False)
elif pretrained is None:
for m in self.modules():
if isinstance(m, nn.Conv2d):
kaiming_init(m)
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
constant_init(m, 1)
else:
raise TypeError('pretrained must be a str or None')
def forward(self, x):
tmp = x
x = self.stem(x)
for i in range(self.start_stage):
x = self.stages[i](x)
out = []
for i in range(self.start_stage, len(self.stages)):
out.append(x)
x = self.stages[i](x)
out.append(x)
return out
|
the-stack_0_14969 | import Eva
from collections import defaultdict
from cdlib import AttrNodeClustering
import networkx as nx
from cdlib.utils import convert_graph_formats
from cdlib.algorithms.internal.ILouvain import ML2
__all__ = ["eva", "ilouvain"]
def eva(
g_original: object,
labels: dict,
weight: str = "weight",
resolution: float = 1.0,
alpha: float = 0.5,
) -> AttrNodeClustering:
"""
The Eva algorithm extends the Louvain approach in order to deal with the attributes of the nodes (aka Louvain Extended to Vertex Attributes).
It optimizes - combining them linearly - two quality functions, a structural and a clustering one, namely Newman's modularity and purity, estimated as the product of the frequencies of the most frequent labels carried by the nodes within the communities.
A parameter alpha tunes the importance of the two functions: an high value of alpha favors the clustering criterion instead of the structural one.
**Supported Graph Types**
========== ======== ======== ======== ==============
Undirected Directed Weighted Temporal Node Attribute
========== ======== ======== ======== ==============
Yes No No No Yes
========== ======== ======== ======== ==============
:param g_original: a networkx/igraph object
:param labels: dictionary specifying for each node (key) a dict (value) specifying the name attribute (key) and its value (value)
:param weight: str, optional the key in graph to use as weight. Default to 'weight'
:param resolution: double, optional Will change the size of the communities, default to 1.
:param alpha: float, assumed in [0,1], optional Will tune the importance of modularity and purity criteria, default to 0.5
:return: AttrNodeClustering object
:Example:
>>> from cdlib.algorithms import eva
>>> import networkx as nx
>>> import random
>>> l1 = ['A', 'B', 'C', 'D']
>>> l2 = ["E", "F", "G"]
>>> g_attr = nx.barabasi_albert_graph(100, 5)
>>> labels=dict()
>>> for node in g_attr.nodes():
>>> labels[node]={"l1":random.choice(l1), "l2":random.choice(l2)}
>>> communities = eva(g_attr, labels, alpha=0.8)
:References:
Citraro, S., & Rossetti, G. (2019, December). Eva: Attribute-Aware Network Segmentation. In International Conference on Complex Networks and Their Applications (pp. 141-151). Springer, Cham.
.. note:: Reference implementation: https://github.com/GiulioRossetti/Eva/tree/master/Eva
"""
g = convert_graph_formats(g_original, nx.Graph)
nx.set_node_attributes(g, labels)
coms, coms_labels = Eva.eva_best_partition(
g, weight=weight, resolution=resolution, alpha=alpha
)
# Reshaping the results
coms_to_node = defaultdict(list)
for n, c in coms.items():
coms_to_node[c].append(n)
coms_eva = [list(c) for c in coms_to_node.values()]
return AttrNodeClustering(
coms_eva,
g_original,
"Eva",
coms_labels,
method_parameters={
"weight": weight,
"resolution": resolution,
"alpha": alpha,
},
)
def ilouvain(g_original: object, labels: dict) -> AttrNodeClustering:
"""
The I-Louvain algorithm extends the Louvain approach in order to deal only with the scalar attributes of the nodes.
It optimizes Newman's modularity combined with an entropy measure.
**Supported Graph Types**
========== ======== ======== ======== ==============
Undirected Directed Weighted Temporal Node Attribute
========== ======== ======== ======== ==============
Yes No No No Yes
========== ======== ======== ======== ==============
:param g_original: a networkx/igraph object
:param labels: dictionary specifying for each node (key) a dict (value) specifying the name attribute (key) and its value (value)
:return: AttrNodeClustering object
:Example:
>>> from cdlib.algorithms import ilouvain
>>> import networkx as nx
>>> import random
>>> l1 = [0.1, 0.4, 0.5]
>>> l2 = [34, 3, 112]
>>> g_attr = nx.barabasi_albert_graph(100, 5)
>>> labels=dict()
>>> for node in g_attr.nodes():
>>> labels[node]={"l1":random.choice(l1), "l2":random.choice(l2)}
>>> id = dict()
>>> for n in g.nodes():
>>> id[n] = n
>>> communities = ilouvain(g_attr, labels, id)
:References:
Combe D., Largeron C., Géry M., Egyed-Zsigmond E. "I-Louvain: An Attributed Graph Clustering Method". <https://link.springer.com/chapter/10.1007/978-3-319-24465-5_16> In: Fromont E., De Bie T., van Leeuwen M. (eds) Advances in Intelligent Data Analysis XIV. IDA (2015). Lecture Notes in Computer Science, vol 9385. Springer, Cham
"""
g = convert_graph_formats(g_original, nx.Graph)
nx.set_node_attributes(g, labels)
nid = dict()
for n in g.nodes():
nid[n] = n
algo = ML2(g, labels, nid)
coms = algo.findPartition()
# Reshaping the results
coms_to_node = defaultdict(list)
for n, c in coms.items():
coms_to_node[c].append(n)
coms_ilouv = [list(c) for c in coms_to_node.values()]
return AttrNodeClustering(coms_ilouv, g_original, "ILouvain")
|
the-stack_0_14973 | total = 0
line = input()
while line != "NoMoreMoney":
current = float(line)
if current < 0:
print("Invalid operation!")
break
total += current
print(f"Increase: {current:.2f}")
line = input()
print(f"Total: {total:.2f}")
|
the-stack_0_14974 | import logging
from concurrent.futures import ProcessPoolExecutor
from functools import partial
from typing import Iterable, List
import pandas as pd
from pyarrow import parquet as pq
from feast.constants import DATETIME_COLUMN
from feast.feature_set import FeatureSet
from feast.type_map import (
pa_column_to_proto_column,
pa_column_to_timestamp_proto_column,
)
from feast.types import Field_pb2 as FieldProto
from feast.types.FeatureRow_pb2 import FeatureRow
_logger = logging.getLogger(__name__)
GRPC_CONNECTION_TIMEOUT_DEFAULT = 3 # type: int
GRPC_CONNECTION_TIMEOUT_APPLY = 300 # type: int
FEAST_SERVING_URL_ENV_KEY = "FEAST_SERVING_URL" # type: str
FEAST_CORE_URL_ENV_KEY = "FEAST_CORE_URL" # type: str
BATCH_FEATURE_REQUEST_WAIT_TIME_SECONDS = 300
KAFKA_CHUNK_PRODUCTION_TIMEOUT = 120 # type: int
def _encode_pa_tables(
file: str, feature_set: str, fields: dict, ingestion_id: str, row_group_idx: int
) -> List[bytes]:
"""
Helper function to encode a PyArrow table(s) read from parquet file(s) into
FeatureRows.
This function accepts a list of file directory pointing to many parquet
files. All parquet files must have the same schema.
Each parquet file will be read into as a table and encoded into FeatureRows
using a pool of max_workers workers.
Args:
file (str):
File directory of all the parquet file to encode.
Parquet file must have more than one row group.
feature_set (str):
Feature set reference in the format f"{project}/{name}".
fields (dict[str, enum.Enum.ValueType]):
A mapping of field names to their value types.
ingestion_id (str):
UUID unique to this ingestion job.
row_group_idx(int):
Row group index to read and encode into byte like FeatureRow
protobuf objects.
Returns:
List[bytes]:
List of byte encoded FeatureRows from the parquet file.
"""
pq_file = pq.ParquetFile(file)
# Read parquet file as a PyArrow table
table = pq_file.read_row_group(row_group_idx)
# Add datetime column
datetime_col = pa_column_to_timestamp_proto_column(table.column(DATETIME_COLUMN))
# Preprocess the columns by converting all its values to Proto values
proto_columns = {
field_name: pa_column_to_proto_column(dtype, table.column(field_name))
for field_name, dtype in fields.items()
}
# List to store result
feature_rows: List[bytes] = []
# Loop optimization declaration(s)
field = FieldProto.Field
proto_items = proto_columns.items()
append = feature_rows.append
# Iterate through the rows
for row_idx in range(table.num_rows):
feature_row = FeatureRow(
event_timestamp=datetime_col[row_idx],
feature_set=feature_set,
ingestion_id=ingestion_id,
)
# Loop optimization declaration
ext = feature_row.fields.extend
# Insert field from each column
for k, v in proto_items:
ext([field(name=k, value=v[row_idx])])
# Append FeatureRow in byte string form
append(feature_row.SerializeToString())
return feature_rows
def get_feature_row_chunks(
file: str,
row_groups: List[int],
fs: FeatureSet,
ingestion_id: str,
max_workers: int,
) -> Iterable[List[bytes]]:
"""
Iterator function to encode a PyArrow table read from a parquet file to
FeatureRow(s).
Args:
file (str):
File directory of the parquet file. The parquet file must have more
than one row group.
row_groups (List[int]):
Specific row group indexes to be read and transformed in the parquet
file.
fs (feast.feature_set.FeatureSet):
FeatureSet describing parquet files.
ingestion_id (str):
UUID unique to this ingestion job.
max_workers (int):
Maximum number of workers to spawn.
Returns:
Iterable[List[bytes]]:
Iterable list of byte encoded FeatureRow(s).
"""
feature_set = f"{fs.project}/{fs.name}"
field_map = {field.name: field.dtype for field in fs.fields.values()}
func = partial(_encode_pa_tables, file, feature_set, field_map, ingestion_id)
with ProcessPoolExecutor(max_workers) as pool:
for chunk in pool.map(func, row_groups):
yield chunk
return
def validate_dataframe(dataframe: pd.DataFrame, feature_set: FeatureSet):
if "datetime" not in dataframe.columns:
raise ValueError(
f'Dataframe does not contain entity "datetime" in columns {dataframe.columns}'
)
for entity in feature_set.entities:
if entity.name not in dataframe.columns:
raise ValueError(
f"Dataframe does not contain entity {entity.name} in columns {dataframe.columns}"
)
for feature in feature_set.features:
if feature.name not in dataframe.columns:
raise ValueError(
f"Dataframe does not contain feature {feature.name} in columns {dataframe.columns}"
)
|
the-stack_0_14976 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# SPDX-License-Identifier: AMPAS
# Copyright Academy of Motion Picture Arts and Sciences
"""
Defines unit tests for *ACES* configuration.
"""
from __future__ import division
import hashlib
import os
import re
import shutil
import sys
import tempfile
import unittest
sys.path.append(os.path.abspath(
os.path.join(os.path.dirname(__file__), '..', '..')))
from aces_ocio.utilities import files_walker
from aces_ocio.generate_config import (
ACES_OCIO_CTL_DIRECTORY_ENVIRON,
generate_config)
__author__ = (
'Haarm-Pieter Duiker, Thomas Mansencal, Stephen Hill, Kevin Wheatley')
__copyright__ = (
'Copyright (C) 2014-2021 Academy of Motion Picture Arts and Sciences')
__license__ = 'Academy of Motion Picture Arts and Sciences License Terms'
__maintainer__ = 'Academy of Motion Picture Arts and Sciences'
__email__ = '[email protected]'
__status__ = 'Production'
__all__ = ['REFERENCE_CONFIG_ROOT_DIRECTORY',
'HASH_TEST_PATTERNS',
'UNHASHABLE_TEST_PATTERNS',
'TestACESConfig']
# TODO: Investigate how the current config has been generated to use it for
# tests.
REFERENCE_CONFIG_ROOT_DIRECTORY = os.path.abspath(
os.path.join(os.path.dirname(__file__), '..', '..', '..'))
HASH_TEST_PATTERNS = ('\.3dl', '\.lut', '\.csp')
UNHASHABLE_TEST_PATTERNS = ('\.icc', '\.ocio')
class TestACESConfig(unittest.TestCase):
"""
Performs tests on the *ACES* configuration.
"""
def setUp(self):
"""
Initialises common tests attributes.
"""
self.__aces_ocio_ctl_directory = os.environ.get(
ACES_OCIO_CTL_DIRECTORY_ENVIRON, None)
assert self.__aces_ocio_ctl_directory is not None, (
'Undefined "{0}" environment variable!'.format(
ACES_OCIO_CTL_DIRECTORY_ENVIRON))
assert os.path.exists(self.__aces_ocio_ctl_directory) is True, (
'"{0}" directory does not exists!'.format(
self.__aces_ocio_ctl_directory))
self.maxDiff = None
self.__temporary_directory = tempfile.mkdtemp()
def tearDown(self):
"""
Post tests actions.
"""
shutil.rmtree(self.__temporary_directory)
@staticmethod
def directory_hashes(directory,
filters_in=None,
filters_out=None,
flags=0):
"""
Recursively computes the hashes from the file within given directory.
Parameters
----------
directory : str or unicode
Directory to compute the file hashes.
filters_in : array_like
Included patterns.
filters_out : array_like
Excluded patterns.
flags : int
Regex flags.
Returns
-------
dict
Directory file hashes.
"""
hashes = {}
for path in files_walker(directory,
filters_in=filters_in,
filters_out=filters_out,
flags=flags):
with open(path) as file:
digest = hashlib.md5(
re.sub('\s', '', file.read())).hexdigest()
hashes[path.replace(directory, '')] = digest
return hashes
def test_ACES_config(self):
"""
Performs tests on the *ACES* configuration by computing hashes on the
generated configuration and comparing them to the existing one.
"""
self.assertTrue(generate_config(self.__aces_ocio_ctl_directory,
self.__temporary_directory))
reference_hashes = self.directory_hashes(
REFERENCE_CONFIG_ROOT_DIRECTORY,
HASH_TEST_PATTERNS)
test_hashes = self.directory_hashes(
self.__temporary_directory,
HASH_TEST_PATTERNS)
self.assertDictEqual(reference_hashes, test_hashes)
# Checking that unashable files ('.icc', '.ocio') are generated.
unashable = lambda x: (
sorted([file.replace(x, '') for file in
files_walker(x, UNHASHABLE_TEST_PATTERNS)]))
self.assertListEqual(unashable(REFERENCE_CONFIG_ROOT_DIRECTORY),
unashable(self.__temporary_directory))
if __name__ == '__main__':
unittest.main()
|
the-stack_0_14977 | from plotly.basedatatypes import BaseTraceHierarchyType
import copy
class Titlefont(BaseTraceHierarchyType):
# color
# -----
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, saddlebrown, salmon, sandybrown,
seagreen, seashell, sienna, silver, skyblue,
slateblue, slategray, slategrey, snow, springgreen,
steelblue, tan, teal, thistle, tomato, turquoise,
violet, wheat, white, whitesmoke, yellow,
yellowgreen
Returns
-------
str
"""
return self['color']
@color.setter
def color(self, val):
self['color'] = val
# family
# ------
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser will only be able to apply a font if
it is available on the system which it operates. Provide
multiple font families, separated by commas, to indicate the
preference in which to apply fonts if they aren't available on
the system. The plotly service (at https://plot.ly or on-
premise) generates images on a server, where only a select
number of fonts are installed and supported. These include
*Arial*, *Balto*, *Courier New*, *Droid Sans*,, *Droid Serif*,
*Droid Sans Mono*, *Gravitas One*, *Old Standard TT*, *Open
Sans*, *Overpass*, *PT Sans Narrow*, *Raleway*, *Times New
Roman*.
The 'family' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self['family']
@family.setter
def family(self, val):
self['family'] = val
# size
# ----
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
Returns
-------
int|float
"""
return self['size']
@size.setter
def size(self, val):
self['size'] = val
# property parent name
# --------------------
@property
def _parent_path_str(self):
return 'splom.marker.colorbar'
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The
plotly service (at https://plot.ly or on-premise)
generates images on a server, where only a select
number of fonts are installed and supported. These
include *Arial*, *Balto*, *Courier New*, *Droid Sans*,,
*Droid Serif*, *Droid Sans Mono*, *Gravitas One*, *Old
Standard TT*, *Open Sans*, *Overpass*, *PT Sans
Narrow*, *Raleway*, *Times New Roman*.
size
"""
def __init__(self, arg=None, color=None, family=None, size=None, **kwargs):
"""
Construct a new Titlefont object
Sets this color bar's title font.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
plotly.graph_objs.splom.marker.colorbar.Titlefont
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The
plotly service (at https://plot.ly or on-premise)
generates images on a server, where only a select
number of fonts are installed and supported. These
include *Arial*, *Balto*, *Courier New*, *Droid Sans*,,
*Droid Serif*, *Droid Sans Mono*, *Gravitas One*, *Old
Standard TT*, *Open Sans*, *Overpass*, *PT Sans
Narrow*, *Raleway*, *Times New Roman*.
size
Returns
-------
Titlefont
"""
super(Titlefont, self).__init__('titlefont')
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.splom.marker.colorbar.Titlefont
constructor must be a dict or
an instance of plotly.graph_objs.splom.marker.colorbar.Titlefont"""
)
# Import validators
# -----------------
from plotly.validators.splom.marker.colorbar import (
titlefont as v_titlefont
)
# Initialize validators
# ---------------------
self._validators['color'] = v_titlefont.ColorValidator()
self._validators['family'] = v_titlefont.FamilyValidator()
self._validators['size'] = v_titlefont.SizeValidator()
# Populate data dict with properties
# ----------------------------------
v = arg.pop('color', None)
self.color = color if color is not None else v
v = arg.pop('family', None)
self.family = family if family is not None else v
v = arg.pop('size', None)
self.size = size if size is not None else v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
|
the-stack_0_14978 | import config
import telebot
bot = telebot.TeleBot(config.token)
@bot.message_handler(commands=['start'])
def start_message(message):
keyboard = telebot.types.ReplyKeyboardMarkup(True)
keyboard.row('Добавить вещь', 'Найти вещь')
bot.send_message(message.chat.id, '''Привет!
Я помогу тебе обменять что-то ненужное на очень нужное.
Чтобы разместить вещь к обмену нажми - “Добавить вещь”. После этого тебе станут доступны вещи других пользователей.
Нажми “Найти вещь” и я пришлю тебе фотографии вещей для обмена. Понравилась вещь - пиши “Обменяться”, нет - снова нажимай “Найти вещь”.
Нажал “Обменяться”? - если владельцу вещи понравится что-то из твоих вещей, то я пришлю контакты вам обоим.''', reply_markup=keyboard)
@bot.message_handler(content_types=['text'])
def reply_all_message(message):
if message.text == 'Добавить вещь':
bot.send_message(message.chat.id, 'Введи название вещи')
elif message.text == 'Найти вещь':
keyboard = telebot.types.InlineKeyboardMarkup()
exchange_button = telebot.types.InlineKeyboardButton(text='Обменяться', url='https://www.google.com/')
keyboard.add(exchange_button)
img = open('sn.jpg', 'rb')
bot.send_photo(message.chat.id, img, 'Кроссовки', reply_markup=keyboard)
else:
print(f'Упаковываем вещь {message.text} в список юзера {message.chat.username}')
if __name__ == '__main__':
bot.infinity_polling() |
the-stack_0_14979 | import Backends
import random
import numpy as np
from .theano_helpers import floatX
def create_dropout_masks(route, fname, dimensionality, ks=1000):
"""
route = path where to create a file
fname = filename
ks = thousand of masks to create (1e6 masks by default)
"""
hdf5_backend = Backends.HDF5(route, fname)
for i in xrange(ks):
mask = random.random_binary_mask(
(dimensionality, 1000), np.random.randint(dimensionality, size=1000))
mask = mask.astype(floatX)
hdf5_backend.write([], "masks/%d/masks" % i, mask.T)
del hdf5_backend
def test_dropout_mask_creation():
create_dropout_masks("/tmp", "domask", 5, 2)
if __name__ == "__main__":
test_dropout_mask_creation()
|
the-stack_0_14981 | # -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
from pandas.api.types import is_string_dtype
from pandas.api.types import is_numeric_dtype
import re
import warnings
import multiprocessing as mp
import matplotlib.pyplot as plt
import time
import os
import platform
from .condition_fun import *
from .info_value import *
# converting vector (breaks & special_values) to dataframe
def split_vec_todf(vec):
'''
Create a dataframe based on provided vector.
Split the rows that including '%,%' into multiple rows.
Replace 'missing' by np.nan.
Params
------
vec: list
Returns
------
pandas.DataFrame
returns a dataframe with three columns
{'bin_chr':orginal vec, 'rowid':index of vec, 'value':splited vec}
'''
if vec is not None:
vec = [str(i) for i in vec]
a = pd.DataFrame({'bin_chr':vec}).assign(rowid=lambda x:x.index)
b = pd.DataFrame([i.split('%,%') for i in vec], index=vec)\
.stack().replace('missing', np.nan) \
.reset_index(name='value')\
.rename(columns={'level_0':'bin_chr'})[['bin_chr','value']]
# return
return pd.merge(a,b,on='bin_chr')
def add_missing_spl_val(dtm, breaks, spl_val):
'''
add missing to spl_val if there is nan in dtm.value and
missing is not specified in breaks and spl_val
Params
------
dtm: melt dataframe
breaks: breaks list
spl_val: speical values list
Returns
------
list
returns spl_val list
'''
if dtm.value.isnull().any():
if breaks is None:
if spl_val is None:
spl_val=['missing']
elif any([('missing' in str(i)) for i in spl_val]):
spl_val=spl_val
else:
spl_val=['missing']+spl_val
elif any([('missing' in str(i)) for i in breaks]):
spl_val=spl_val
else:
if spl_val is None:
spl_val=['missing']
elif any([('missing' in str(i)) for i in spl_val]):
spl_val=spl_val
else:
spl_val=['missing']+spl_val
# return
return spl_val
# count number of good or bad in y
def n0(x): return sum(x==0)
def n1(x): return sum(x==1)
# split dtm into bin_sv and dtm (without speical_values)
def dtm_binning_sv(dtm, breaks, spl_val):
'''
Split the orginal dtm (melt dataframe) into
binning_sv (binning of special_values) and
a new dtm (without special_values).
Params
------
dtm: melt dataframe
spl_val: speical values list
Returns
------
list
returns a list with binning_sv and dtm
'''
spl_val = add_missing_spl_val(dtm, breaks, spl_val)
if spl_val is not None:
# special_values from vector to dataframe
sv_df = split_vec_todf(spl_val)
# value
if is_numeric_dtype(dtm['value']):
sv_df['value'] = sv_df['value'].astype(dtm['value'].dtypes)
# sv_df['bin_chr'] = sv_df['bin_chr'].astype(dtm['value'].dtypes).astype(str)
sv_df['bin_chr'] = np.where(
np.isnan(sv_df['value']), sv_df['bin_chr'],
sv_df['value'].astype(dtm['value'].dtypes).astype(str))
# sv_df = sv_df.assign(value = lambda x: x.value.astype(dtm['value'].dtypes))
# dtm_sv & dtm
dtm_sv = pd.merge(dtm.fillna("missing"), sv_df[['value']].fillna("missing"), how='inner', on='value', right_index=True)
dtm = dtm[~dtm.index.isin(dtm_sv.index)].reset_index() if len(dtm_sv.index) < len(dtm.index) else None
# dtm_sv = dtm.query('value in {}'.format(sv_df['value'].tolist()))
# dtm = dtm.query('value not in {}'.format(sv_df['value'].tolist()))
if dtm_sv.shape[0] == 0:
return {'binning_sv':None, 'dtm':dtm}
# binning_sv
binning_sv = pd.merge(
dtm_sv.fillna('missing').groupby(['variable','value'])['y'].agg([n0, n1])\
.reset_index().rename(columns={'n0':'good','n1':'bad'}),
sv_df.fillna('missing'),
on='value'
).groupby(['variable', 'rowid', 'bin_chr']).agg({'bad':sum,'good':sum})\
.reset_index().rename(columns={'bin_chr':'bin'})\
.drop('rowid', axis=1)
else:
binning_sv = None
# return
return {'binning_sv':binning_sv, 'dtm':dtm}
# check empty bins for unmeric variable
def check_empty_bins(dtm, binning):
# check empty bins
bin_list = np.unique(dtm.bin.astype(str)).tolist()
if 'nan' in bin_list:
bin_list.remove('nan')
binleft = set([re.match(r'\[(.+),(.+)\)', i).group(1) for i in bin_list]).difference(set(['-inf', 'inf']))
binright = set([re.match(r'\[(.+),(.+)\)', i).group(2) for i in bin_list]).difference(set(['-inf', 'inf']))
if binleft != binright:
bstbrks = sorted(list(map(float, ['-inf'] + list(binright) + ['inf'])))
labels = ['[{},{})'.format(bstbrks[i], bstbrks[i+1]) for i in range(len(bstbrks)-1)]
dtm.loc[:,'bin'] = pd.cut(dtm['value'], bstbrks, right=False, labels=labels)
binning = dtm.groupby(['variable','bin'])['y'].agg([n0, n1])\
.reset_index().rename(columns={'n0':'good','n1':'bad'})
# warnings.warn("The break points are modified into '[{}]'. There are empty bins based on the provided break points.".format(','.join(binright)))
# binning
# dtm['bin'] = dtm['bin'].astype(str)
# return
return binning
# required in woebin2 # return binning if breaks provided
#' @import data.table
def woebin2_breaks(dtm, breaks, spl_val):
'''
get binning if breaks is provided
Params
------
dtm: melt dataframe
breaks: breaks list
spl_val: speical values list
Returns
------
DataFrame
returns a binning datafram
'''
# breaks from vector to dataframe
bk_df = split_vec_todf(breaks)
# dtm $ binning_sv
dtm_binsv_list = dtm_binning_sv(dtm, breaks, spl_val)
dtm = dtm_binsv_list['dtm']
binning_sv = dtm_binsv_list['binning_sv']
if dtm is None: return {'binning_sv':binning_sv, 'binning':None}
# binning
if is_numeric_dtype(dtm['value']):
# best breaks
bstbrks = ['-inf'] + list(set(bk_df.value.tolist()).difference(set([np.nan, '-inf', 'inf', 'Inf', '-Inf']))) + ['inf']
bstbrks = sorted(list(map(float, bstbrks)))
# cut
labels = ['[{},{})'.format(bstbrks[i], bstbrks[i+1]) for i in range(len(bstbrks)-1)]
dtm.loc[:,'bin'] = pd.cut(dtm['value'], bstbrks, right=False, labels=labels)
dtm['bin'] = dtm['bin'].astype(str)
binning = dtm.groupby(['variable','bin'])['y'].agg([n0, n1])\
.reset_index().rename(columns={'n0':'good','n1':'bad'})
# check empty bins for unmeric variable
binning = check_empty_bins(dtm, binning)
# sort bin
binning = pd.merge(
binning.assign(value=lambda x: [float(re.search(r"^\[(.*),(.*)\)", i).group(2)) if i != 'nan' else np.nan for i in binning['bin']] ),
bk_df.assign(value=lambda x: x.value.astype(float)),
how='left',on='value'
).sort_values(by="rowid").reset_index(drop=True)
# merge binning and bk_df if nan isin value
if bk_df['value'].isnull().any():
binning = binning.assign(bin=lambda x: [i if i != 'nan' else 'missing' for i in x['bin']])\
.fillna('missing').groupby(['variable','rowid'])\
.agg({'bin':lambda x: '%,%'.join(x), 'good':sum, 'bad':sum})\
.reset_index()
else:
# merge binning with bk_df
binning = pd.merge(
dtm,
bk_df.assign(bin=lambda x: x.bin_chr),
how='left', on='value'
).fillna('missing').groupby(['variable', 'rowid', 'bin'])['y'].agg([n0,n1])\
.rename(columns={'n0':'good','n1':'bad'})\
.reset_index().drop('rowid', axis=1)
# return
return {'binning_sv':binning_sv, 'binning':binning}
# required in woebin2_init_bin # return pretty breakpoints
def pretty(low, high, n):
'''
pretty breakpoints, the same as pretty function in R
Params
------
low: minimal value
low: maximal value
n: number of intervals
Returns
------
numpy.ndarray
returns a breakpoints array
'''
# nicenumber
def nicenumber(x):
exp = np.trunc(np.log10(abs(x)))
f = abs(x) / 10**exp
if f < 1.5:
nf = 1.
elif f < 3.:
nf = 2.
elif f < 7.:
nf = 5.
else:
nf = 10.
return np.sign(x) * nf * 10.**exp
# pretty breakpoints
d = abs(nicenumber((high-low)/(n-1)))
miny = np.floor(low / d) * d
maxy = np.ceil (high / d) * d
return np.arange(miny, maxy+0.5*d, d)
# required in woebin2 # return initial binning
def woebin2_init_bin(dtm, init_count_distr, breaks, spl_val):
'''
initial binning
Params
------
dtm: melt dataframe
init_count_distr: the minimal precentage in the fine binning process
breaks: breaks
breaks: breaks list
spl_val: speical values list
Returns
------
dict
returns a dict with initial binning and special_value binning
'''
# dtm $ binning_sv
dtm_binsv_list = dtm_binning_sv(dtm, breaks, spl_val)
dtm = dtm_binsv_list['dtm']
binning_sv = dtm_binsv_list['binning_sv']
if dtm is None: return {'binning_sv':binning_sv, 'initial_binning':None}
# binning
if is_numeric_dtype(dtm['value']): # numeric variable
xvalue = dtm['value'].astype(float)
# breaks vector & outlier
iq = xvalue.quantile([0.01, 0.25, 0.75, 0.99])
iqr = iq[0.75] - iq[0.25]
if iqr == 0:
prob_down = 0.01
prob_up = 0.99
else:
prob_down = 0.25
prob_up = 0.75
xvalue_rm_outlier = xvalue[(xvalue >= iq[prob_down]-3*iqr) & (xvalue <= iq[prob_up]+3*iqr)]
# number of initial binning
n = np.trunc(1/init_count_distr)
len_uniq_x = len(np.unique(xvalue_rm_outlier))
if len_uniq_x < n: n = len_uniq_x
# initial breaks
brk = np.unique(xvalue_rm_outlier) if len_uniq_x < 10 else pretty(min(xvalue_rm_outlier), max(xvalue_rm_outlier), n)
brk = list(filter(lambda x: x>np.nanmin(xvalue) and x<=np.nanmax(xvalue), brk))
brk = [float('-inf')] + sorted(brk) + [float('inf')]
# initial binning datatable
# cut
labels = ['[{},{})'.format(brk[i], brk[i+1]) for i in range(len(brk)-1)]
dtm.loc[:,'bin'] = pd.cut(dtm['value'], brk, right=False, labels=labels)#.astype(str)
# init_bin
init_bin = dtm.groupby('bin')['y'].agg([n0, n1])\
.reset_index().rename(columns={'n0':'good','n1':'bad'})
# check empty bins for unmeric variable
init_bin = check_empty_bins(dtm, init_bin)
init_bin = init_bin.assign(
variable = dtm['variable'].values[0],
brkp = lambda x: [float(re.match('^\[(.*),.+', i).group(1)) for i in x['bin']],
badprob = lambda x: x['bad']/(x['bad']+x['good'])
)[['variable', 'bin', 'brkp', 'good', 'bad', 'badprob']]
else: # other type variable
# initial binning datatable
init_bin = dtm.groupby('value')['y'].agg([n0,n1])\
.rename(columns={'n0':'good','n1':'bad'})\
.assign(
variable = dtm['variable'].values[0],
badprob = lambda x: x['bad']/(x['bad']+x['good'])
).reset_index()
# order by badprob if is.character
if dtm.value.dtype.name not in ['category', 'bool']:
init_bin = init_bin.sort_values(by='badprob').reset_index()
# add index as brkp column
init_bin = init_bin.assign(brkp = lambda x: x.index)\
[['variable', 'value', 'brkp', 'good', 'bad', 'badprob']]\
.rename(columns={'value':'bin'})
# remove brkp that good == 0 or bad == 0 ------
while len(init_bin.query('(good==0) or (bad==0)')) > 0:
# brkp needs to be removed if good==0 or bad==0
rm_brkp = init_bin.assign(count = lambda x: x['good']+x['bad'])\
.assign(
count_lag = lambda x: x['count'].shift(1).fillna(len(dtm)+1),
count_lead = lambda x: x['count'].shift(-1).fillna(len(dtm)+1)
).assign(merge_tolead = lambda x: x['count_lag'] > x['count_lead'])\
.query('(good==0) or (bad==0)')\
.query('count == count.min()').iloc[0,]
# set brkp to lead's or lag's
shift_period = -1 if rm_brkp['merge_tolead'] else 1
init_bin = init_bin.assign(brkp2 = lambda x: x['brkp'].shift(shift_period))\
.assign(brkp = lambda x:np.where(x['brkp'] == rm_brkp['brkp'], x['brkp2'], x['brkp']))
# groupby brkp
init_bin = init_bin.groupby('brkp').agg({
'variable':lambda x: np.unique(x),
'bin': lambda x: '%,%'.join(x),
'good': sum,
'bad': sum
}).assign(badprob = lambda x: x['bad']/(x['good']+x['bad']))\
.reset_index()
# format init_bin
if is_numeric_dtype(dtm['value']):
init_bin = init_bin\
.assign(bin = lambda x: [re.sub(r'(?<=,).+%,%.+,', '', i) if ('%,%' in i) else i for i in x['bin']])\
.assign(brkp = lambda x: [float(re.match('^\[(.*),.+', i).group(1)) for i in x['bin']])
# return
return {'binning_sv':binning_sv, 'initial_binning':init_bin}
# required in woebin2_tree # add 1 best break for tree-like binning
def woebin2_tree_add_1brkp(dtm, initial_binning, count_distr_limit, bestbreaks=None):
'''
add a breakpoint into provided bestbreaks
Params
------
dtm
initial_binning
count_distr_limit
bestbreaks
Returns
------
DataFrame
a binning dataframe with updated breaks
'''
# dtm removed values in spl_val
# total_iv for all best breaks
def total_iv_all_breaks(initial_binning, bestbreaks, dtm_rows):
# best breaks set
breaks_set = set(initial_binning.brkp).difference(set(list(map(float, ['-inf', 'inf']))))
if bestbreaks is not None: breaks_set = breaks_set.difference(set(bestbreaks))
breaks_set = sorted(breaks_set)
# loop on breaks_set
init_bin_all_breaks = initial_binning.copy(deep=True)
for i in breaks_set:
# best break + i
bestbreaks_i = [float('-inf')]+sorted(bestbreaks+[i] if bestbreaks is not None else [i])+[float('inf')]
# best break datatable
labels = ['[{},{})'.format(bestbreaks_i[i], bestbreaks_i[i+1]) for i in range(len(bestbreaks_i)-1)]
init_bin_all_breaks.loc[:,'bstbin'+str(i)] = pd.cut(init_bin_all_breaks['brkp'], bestbreaks_i, right=False, labels=labels)#.astype(str)
# best break dt
total_iv_all_brks = pd.melt(
init_bin_all_breaks, id_vars=["variable", "good", "bad"], var_name='bstbin',
value_vars=['bstbin'+str(i) for i in breaks_set])\
.groupby(['variable', 'bstbin', 'value'])\
.agg({'good':sum, 'bad':sum}).reset_index()\
.assign(count=lambda x: x['good']+x['bad'])
total_iv_all_brks['count_distr'] = total_iv_all_brks.groupby(['variable', 'bstbin'])\
['count'].apply(lambda x: x/dtm_rows).reset_index(drop=True)
total_iv_all_brks['min_count_distr'] = total_iv_all_brks.groupby(['variable', 'bstbin'])\
['count_distr'].transform(lambda x: min(x))
total_iv_all_brks = total_iv_all_brks\
.assign(bstbin = lambda x: [float(re.sub('^bstbin', '', i)) for i in x['bstbin']] )\
.groupby(['variable','bstbin','min_count_distr'])\
.apply(lambda x: iv_01(x['good'], x['bad'])).reset_index(name='total_iv')
# return
return total_iv_all_brks
# binning add 1best break
def binning_add_1bst(initial_binning, bestbreaks):
if bestbreaks is None:
bestbreaks_inf = [float('-inf'),float('inf')]
else:
if not is_numeric_dtype(dtm['value']):
bestbreaks = [i for i in bestbreaks if int(i) != min(initial_binning.brkp)]
bestbreaks_inf = [float('-inf')]+sorted(bestbreaks)+[float('inf')]
labels = ['[{},{})'.format(bestbreaks_inf[i], bestbreaks_inf[i+1]) for i in range(len(bestbreaks_inf)-1)]
binning_1bst_brk = initial_binning.assign(
bstbin = lambda x: pd.cut(x['brkp'], bestbreaks_inf, right=False, labels=labels)
)
if is_numeric_dtype(dtm['value']):
binning_1bst_brk = binning_1bst_brk.groupby(['variable', 'bstbin'])\
.agg({'good':sum, 'bad':sum}).reset_index().assign(bin=lambda x: x['bstbin'])\
[['bstbin', 'variable', 'bin', 'good', 'bad']]
else:
binning_1bst_brk = binning_1bst_brk.groupby(['variable', 'bstbin'])\
.agg({'good':sum, 'bad':sum, 'bin':lambda x:'%,%'.join(x)}).reset_index()\
[['bstbin', 'variable', 'bin', 'good', 'bad']]
# format
binning_1bst_brk['total_iv'] = iv_01(binning_1bst_brk.good, binning_1bst_brk.bad)
binning_1bst_brk['bstbrkp'] = [float(re.match("^\[(.*),.+", i).group(1)) for i in binning_1bst_brk['bstbin']]
# return
return binning_1bst_brk
# dtm_rows
dtm_rows = len(dtm.index)
# total_iv for all best breaks
total_iv_all_brks = total_iv_all_breaks(initial_binning, bestbreaks, dtm_rows)
# bestbreaks: total_iv == max(total_iv) & min(count_distr) >= count_distr_limit
bstbrk_maxiv = total_iv_all_brks.loc[lambda x: x['min_count_distr'] >= count_distr_limit]
if len(bstbrk_maxiv.index) > 0:
bstbrk_maxiv = bstbrk_maxiv.loc[lambda x: x['total_iv']==max(x['total_iv'])]
bstbrk_maxiv = bstbrk_maxiv['bstbin'].tolist()[0]
else:
bstbrk_maxiv = None
# bestbreaks
if bstbrk_maxiv is not None:
# add 1best break to bestbreaks
bestbreaks = bestbreaks+[bstbrk_maxiv] if bestbreaks is not None else [bstbrk_maxiv]
# binning add 1best break
bin_add_1bst = binning_add_1bst(initial_binning, bestbreaks)
# return
return bin_add_1bst
# required in woebin2 # return tree-like binning
def woebin2_tree(dtm, init_count_distr=0.02, count_distr_limit=0.05,
stop_limit=0.1, bin_num_limit=8, breaks=None, spl_val=None):
'''
binning using tree-like method
Params
------
dtm:
init_count_distr:
count_distr_limit:
stop_limit:
bin_num_limit:
breaks:
spl_val:
Returns
------
dict
returns a dict with initial binning and special_value binning
'''
# initial binning
bin_list = woebin2_init_bin(dtm, init_count_distr=init_count_distr, breaks=breaks, spl_val=spl_val)
initial_binning = bin_list['initial_binning']
binning_sv = bin_list['binning_sv']
if len(initial_binning.index)==1:
return {'binning_sv':binning_sv, 'binning':initial_binning}
# initialize parameters
len_brks = len(initial_binning.index)
bestbreaks = None
IVt1 = IVt2 = 1e-10
IVchg = 1 ## IV gain ratio
step_num = 1
# best breaks from three to n+1 bins
binning_tree = None
while (IVchg >= stop_limit) and (step_num+1 <= min([bin_num_limit, len_brks])):
binning_tree = woebin2_tree_add_1brkp(dtm, initial_binning, count_distr_limit, bestbreaks)
# best breaks
bestbreaks = binning_tree.loc[lambda x: x['bstbrkp'] != float('-inf'), 'bstbrkp'].tolist()
# information value
IVt2 = binning_tree['total_iv'].tolist()[0]
IVchg = IVt2/IVt1-1 ## ratio gain
IVt1 = IVt2
# step_num
step_num = step_num + 1
if binning_tree is None: binning_tree = initial_binning
# return
return {'binning_sv':binning_sv, 'binning':binning_tree}
# examples
# import time
# start = time.time()
# # binning_dict = woebin2_init_bin(dtm, init_count_distr=0.02, breaks=None, spl_val=None)
# # woebin2_tree_add_1brkp(dtm, binning_dict['initial_binning'], count_distr_limit=0.05)
# # woebin2_tree(dtm, binning_dict['initial_binning'], count_distr_limit=0.05)
# end = time.time()
# print(end - start)
# required in woebin2 # return chimerge binning
#' @importFrom stats qchisq
def woebin2_chimerge(dtm, init_count_distr=0.02, count_distr_limit=0.05,
stop_limit=0.1, bin_num_limit=8, breaks=None, spl_val=None):
'''
binning using chimerge method
Params
------
dtm:
init_count_distr:
count_distr_limit:
stop_limit:
bin_num_limit:
breaks:
spl_val:
Returns
------
dict
returns a dict with initial binning and special_value binning
'''
# [chimerge](http://blog.csdn.net/qunxingvip/article/details/50449376)
# [ChiMerge:Discretization of numeric attributs](http://www.aaai.org/Papers/AAAI/1992/AAAI92-019.pdf)
# chisq = function(a11, a12, a21, a22) {
# A = list(a1 = c(a11, a12), a2 = c(a21, a22))
# Adf = do.call(rbind, A)
#
# Edf =
# matrix(rowSums(Adf), ncol = 1) %*%
# matrix(colSums(Adf), nrow = 1) /
# sum(Adf)
#
# sum((Adf-Edf)^2/Edf)
# }
# function to create a chisq column in initial_binning
def add_chisq(initial_binning):
chisq_df = pd.melt(initial_binning,
id_vars=["brkp", "variable", "bin"], value_vars=["good", "bad"],
var_name='goodbad', value_name='a')\
.sort_values(by=['goodbad', 'brkp']).reset_index(drop=True)
###
chisq_df['a_lag'] = chisq_df.groupby('goodbad')['a'].apply(lambda x: x.shift(1))#.reset_index(drop=True)
chisq_df['a_rowsum'] = chisq_df.groupby('brkp')['a'].transform(lambda x: sum(x))#.reset_index(drop=True)
chisq_df['a_lag_rowsum'] = chisq_df.groupby('brkp')['a_lag'].transform(lambda x: sum(x))#.reset_index(drop=True)
###
chisq_df = pd.merge(
chisq_df.assign(a_colsum = lambda df: df.a+df.a_lag),
chisq_df.groupby('brkp').apply(lambda df: sum(df.a+df.a_lag)).reset_index(name='a_sum'))\
.assign(
e = lambda df: df.a_rowsum*df.a_colsum/df.a_sum,
e_lag = lambda df: df.a_lag_rowsum*df.a_colsum/df.a_sum
).assign(
ae = lambda df: (df.a-df.e)**2/df.e + (df.a_lag-df.e_lag)**2/df.e_lag
).groupby('brkp').apply(lambda x: sum(x.ae)).reset_index(name='chisq')
# return
return pd.merge(initial_binning.assign(count = lambda x: x['good']+x['bad']), chisq_df, how='left')
# initial binning
bin_list = woebin2_init_bin(dtm, init_count_distr=init_count_distr, breaks=breaks, spl_val=spl_val)
initial_binning = bin_list['initial_binning']
binning_sv = bin_list['binning_sv']
# return initial binning if its row number equals 1
if len(initial_binning.index)==1:
return {'binning_sv':binning_sv, 'binning':initial_binning}
# dtm_rows
dtm_rows = len(dtm.index)
# chisq limit
from scipy.special import chdtri
chisq_limit = chdtri(1, stop_limit)
# binning with chisq column
binning_chisq = add_chisq(initial_binning)
# param
bin_chisq_min = binning_chisq.chisq.min()
bin_count_distr_min = min(binning_chisq['count']/dtm_rows)
bin_nrow = len(binning_chisq.index)
# remove brkp if chisq < chisq_limit
while bin_chisq_min < chisq_limit or bin_count_distr_min < count_distr_limit or bin_nrow > bin_num_limit:
# brkp needs to be removed
if bin_chisq_min < chisq_limit:
rm_brkp = binning_chisq.assign(merge_tolead = False).sort_values(by=['chisq', 'count']).iloc[0,]
elif bin_count_distr_min < count_distr_limit:
rm_brkp = binning_chisq.assign(
count_distr = lambda x: x['count']/sum(x['count']),
chisq_lead = lambda x: x['chisq'].shift(-1).fillna(float('inf'))
).assign(merge_tolead = lambda x: x['chisq'] > x['chisq_lead'])
# replace merge_tolead as True
rm_brkp.loc[np.isnan(rm_brkp['chisq']), 'merge_tolead']=True
# order select 1st
rm_brkp = rm_brkp.sort_values(by=['count_distr']).iloc[0,]
elif bin_nrow > bin_num_limit:
rm_brkp = binning_chisq.assign(merge_tolead = False).sort_values(by=['chisq', 'count']).iloc[0,]
else:
break
# set brkp to lead's or lag's
shift_period = -1 if rm_brkp['merge_tolead'] else 1
binning_chisq = binning_chisq.assign(brkp2 = lambda x: x['brkp'].shift(shift_period))\
.assign(brkp = lambda x:np.where(x['brkp'] == rm_brkp['brkp'], x['brkp2'], x['brkp']))
# groupby brkp
binning_chisq = binning_chisq.groupby('brkp').agg({
'variable':lambda x:np.unique(x),
'bin': lambda x: '%,%'.join(x),
'good': sum,
'bad': sum
}).assign(badprob = lambda x: x['bad']/(x['good']+x['bad']))\
.reset_index()
# update
## add chisq to new binning dataframe
binning_chisq = add_chisq(binning_chisq)
## param
bin_nrow = len(binning_chisq.index)
if bin_nrow == 1:
break
bin_chisq_min = binning_chisq.chisq.min()
bin_count_distr_min = min(binning_chisq['count']/dtm_rows)
# format init_bin # remove (.+\\)%,%\\[.+,)
if is_numeric_dtype(dtm['value']):
binning_chisq = binning_chisq\
.assign(bin = lambda x: [re.sub(r'(?<=,).+%,%.+,', '', i) if ('%,%' in i) else i for i in x['bin']])\
.assign(brkp = lambda x: [float(re.match('^\[(.*),.+', i).group(1)) for i in x['bin']])
# return
return {'binning_sv':binning_sv, 'binning':binning_chisq}
# required in woebin2 # # format binning output
def binning_format(binning):
'''
format binning dataframe
Params
------
binning: with columns of variable, bin, good, bad
Returns
------
DataFrame
binning dataframe with columns of 'variable', 'bin',
'count', 'count_distr', 'good', 'bad', 'badprob', 'woe',
'bin_iv', 'total_iv', 'breaks', 'is_special_values'
'''
binning['count'] = binning['good'] + binning['bad']
binning['count_distr'] = binning['count']/sum(binning['count'])
binning['badprob'] = binning['bad']/binning['count']
# binning = binning.assign(
# count = lambda x: (x['good']+x['bad']),
# count_distr = lambda x: (x['good']+x['bad'])/sum(x['good']+x['bad']),
# badprob = lambda x: x['bad']/(x['good']+x['bad']))
# new columns: woe, iv, breaks, is_sv
binning['woe'] = woe_01(binning['good'],binning['bad'])
binning['bin_iv'] = miv_01(binning['good'],binning['bad'])
binning['total_iv'] = binning['bin_iv'].sum()
# breaks
binning['breaks'] = binning['bin']
if any([r'[' in str(i) for i in binning['bin']]):
def re_extract_all(x):
gp23 = re.match(r"^\[(.*), *(.*)\)((%,%missing)*)", x)
breaks_string = x if gp23 is None else gp23.group(2)+gp23.group(3)
return breaks_string
binning['breaks'] = [re_extract_all(i) for i in binning['bin']]
# is_sv
binning['is_special_values'] = binning['is_sv']
# return
return binning[['variable', 'bin', 'count', 'count_distr', 'good', 'bad', 'badprob', 'woe', 'bin_iv', 'total_iv', 'breaks', 'is_special_values']]
# woebin2
# This function provides woe binning for only two columns (one x and one y) dataframe.
def woebin2(dtm, breaks=None, spl_val=None,
init_count_distr=0.02, count_distr_limit=0.05,
stop_limit=0.1, bin_num_limit=8, method="tree"):
'''
provides woe binning for only two series
Params
------
Returns
------
DataFrame
'''
# binning
if breaks is not None:
# 1.return binning if breaks provided
bin_list = woebin2_breaks(dtm=dtm, breaks=breaks, spl_val=spl_val)
else:
if stop_limit == 'N':
# binning of initial & specialvalues
bin_list = woebin2_init_bin(dtm, init_count_distr=init_count_distr, breaks=breaks, spl_val=spl_val)
else:
if method == 'tree':
# 2.tree-like optimal binning
bin_list = woebin2_tree(
dtm, init_count_distr=init_count_distr, count_distr_limit=count_distr_limit,
stop_limit=stop_limit, bin_num_limit=bin_num_limit, breaks=breaks, spl_val=spl_val)
elif method == "chimerge":
# 2.chimerge optimal binning
bin_list = woebin2_chimerge(
dtm, init_count_distr=init_count_distr, count_distr_limit=count_distr_limit,
stop_limit=stop_limit, bin_num_limit=bin_num_limit, breaks=breaks, spl_val=spl_val)
# rbind binning_sv and binning
binning = pd.concat(bin_list, keys=bin_list.keys()).reset_index()\
.assign(is_sv = lambda x: x.level_0 =='binning_sv')
# return
return binning_format(binning)
def bins_to_breaks(bins, dt, to_string=False, save_string=None):
if isinstance(bins, dict):
bins = pd.concat(bins, ignore_index=True)
# x variables
xs_all = bins['variable'].unique()
# dtypes of variables
vars_class = pd.DataFrame({
'variable': xs_all,
'not_numeric': [not is_numeric_dtype(dt[i]) for i in xs_all]
})
# breakslist of bins
bins_breakslist = bins[~bins['breaks'].isin(["-inf","inf","missing"]) & ~bins['is_special_values']]
bins_breakslist = pd.merge(bins_breakslist[['variable', 'breaks']], vars_class, how='left', on='variable')
bins_breakslist.loc[bins_breakslist['not_numeric'], 'breaks'] = '\''+bins_breakslist.loc[bins_breakslist['not_numeric'], 'breaks']+'\''
bins_breakslist = bins_breakslist.groupby('variable')['breaks'].agg(lambda x: ','.join(x))
if to_string:
bins_breakslist = "breaks_list={\n"+', \n'.join('\''+bins_breakslist.index[i]+'\': ['+bins_breakslist[i]+']' for i in np.arange(len(bins_breakslist)))+"}"
if save_string is not None:
brk_lst_name = '{}_{}.py'.format(save_string, time.strftime('%Y%m%d_%H%M%S', time.localtime(time.time())))
with open(brk_lst_name, 'w') as f:
f.write(bins_breakslist)
print('[INFO] The breaks_list is saved as {}'.format(brk_lst_name))
return
return bins_breakslist
def woebin(dt, y, x=None,
var_skip=None, breaks_list=None, special_values=None,
stop_limit=0.1, count_distr_limit=0.05, bin_num_limit=8,
# min_perc_fine_bin=0.02, min_perc_coarse_bin=0.05, max_num_bin=8,
positive="bad|1", no_cores=None, print_step=0, method="tree",
ignore_const_cols=True, ignore_datetime_cols=True,
check_cate_num=True, replace_blank=True,
save_breaks_list=None, **kwargs):
'''
WOE Binning
------
`woebin` generates optimal binning for numerical, factor and categorical
variables using methods including tree-like segmentation or chi-square
merge. woebin can also customizing breakpoints if the breaks_list or
special_values was provided.
The default woe is defined as ln(Distr_Bad_i/Distr_Good_i). If you
prefer ln(Distr_Good_i/Distr_Bad_i), please set the argument `positive`
as negative value, such as '0' or 'good'. If there is a zero frequency
class when calculating woe, the zero will replaced by 0.99 to make the
woe calculable.
Params
------
dt: A data frame with both x (predictor/feature) and y (response/label) variables.
y: Name of y variable.
x: Name of x variables. Default is None. If x is None,
then all variables except y are counted as x variables.
var_skip: Name of variables that will skip for binning. Defaults to None.
breaks_list: List of break points, default is None.
If it is not None, variable binning will based on the
provided breaks.
special_values: the values specified in special_values
will be in separate bins. Default is None.
count_distr_limit: The minimum percentage of final binning
class number over total. Accepted range: 0.01-0.2; default
is 0.05.
stop_limit: Stop binning segmentation when information value
gain ratio less than the stop_limit, or stop binning merge
when the minimum of chi-square less than 'qchisq(1-stoplimit, 1)'.
Accepted range: 0-0.5; default is 0.1.
bin_num_limit: Integer. The maximum number of binning.
positive: Value of positive class, default "bad|1".
no_cores: Number of CPU cores for parallel computation.
Defaults None. If no_cores is None, the no_cores will
set as 1 if length of x variables less than 10, and will
set as the number of all CPU cores if the length of x variables
greater than or equal to 10.
print_step: A non-negative integer. Default is 1. If print_step>0,
print variable names by each print_step-th iteration.
If print_step=0 or no_cores>1, no message is print.
method: Optimal binning method, it should be "tree" or "chimerge".
Default is "tree".
ignore_const_cols: Logical. Ignore constant columns. Defaults to True.
ignore_datetime_cols: Logical. Ignore datetime columns. Defaults to True.
check_cate_num: Logical. Check whether the number of unique values in
categorical columns larger than 50. It might make the binning process slow
if there are too many unique categories. Defaults to True.
replace_blank: Logical. Replace blank values with None. Defaults to True.
save_breaks_list: The file name to save breaks_list. Default is None.
Returns
------
dictionary
Optimal or customized binning dataframe.
Examples
------
import scorecardpy as sc
import pandas as pd
# load data
dat = sc.germancredit()
# Example I
# binning of two variables in germancredit dataset
bins_2var = sc.woebin(dat, y = "creditability",
x = ["credit.amount", "purpose"])
# Example II
# binning of the germancredit dataset
bins_germ = sc.woebin(dat, y = "creditability")
# Example III
# customizing the breakpoints of binning
dat2 = pd.DataFrame({'creditability':['good','bad']}).sample(50, replace=True)
dat_nan = pd.concat([dat, dat2], ignore_index=True)
breaks_list = {
'age.in.years': [26, 35, 37, "Inf%,%missing"],
'housing': ["own", "for free%,%rent"]
}
special_values = {
'credit.amount': [2600, 9960, "6850%,%missing"],
'purpose': ["education", "others%,%missing"]
}
bins_cus_brk = sc.woebin(dat_nan, y="creditability",
x=["age.in.years","credit.amount","housing","purpose"],
breaks_list=breaks_list, special_values=special_values)
'''
# start time
start_time = time.time()
# arguments
## print_info
print_info = kwargs.get('print_info', True)
## init_count_distr
min_perc_fine_bin = kwargs.get('min_perc_fine_bin', None)
init_count_distr = kwargs.get('init_count_distr', min_perc_fine_bin)
if init_count_distr is None: init_count_distr = 0.02
## count_distr_limit
min_perc_coarse_bin = kwargs.get('min_perc_coarse_bin', None)
if min_perc_coarse_bin is not None: count_distr_limit = min_perc_coarse_bin
## bin_num_limit
max_num_bin = kwargs.get('max_num_bin', None)
if max_num_bin is not None: bin_num_limit = max_num_bin
# print infomation
if print_info: print('[INFO] creating woe binning ...')
dt = dt.copy(deep=True)
if isinstance(y, str):
y = [y]
if isinstance(x, str) and x is not None:
x = [x]
if x is not None:
dt = dt[y+x]
# check y
dt = check_y(dt, y, positive)
# remove constant columns
if ignore_const_cols: dt = check_const_cols(dt)
# remove date/time col
if ignore_datetime_cols: dt = check_datetime_cols(dt)
# check categorical columns' unique values
if check_cate_num: check_cateCols_uniqueValues(dt, var_skip)
# replace black with na
if replace_blank: dt = rep_blank_na(dt)
# x variable names
xs = x_variable(dt, y, x, var_skip)
xs_len = len(xs)
# print_step
print_step = check_print_step(print_step)
# breaks_list
breaks_list = check_breaks_list(breaks_list, xs)
# special_values
special_values = check_special_values(special_values, xs)
### ###
# stop_limit range
if stop_limit<0 or stop_limit>0.5 or not isinstance(stop_limit, (float, int)):
warnings.warn("Incorrect parameter specification; accepted stop_limit parameter range is 0-0.5. Parameter was set to default (0.1).")
stop_limit = 0.1
# init_count_distr range
if init_count_distr<0.01 or init_count_distr>0.2 or not isinstance(init_count_distr, (float, int)):
warnings.warn("Incorrect parameter specification; accepted init_count_distr parameter range is 0.01-0.2. Parameter was set to default (0.02).")
init_count_distr = 0.02
# count_distr_limit
if count_distr_limit<0.01 or count_distr_limit>0.2 or not isinstance(count_distr_limit, (float, int)):
warnings.warn("Incorrect parameter specification; accepted count_distr_limit parameter range is 0.01-0.2. Parameter was set to default (0.05).")
count_distr_limit = 0.05
# bin_num_limit
if not isinstance(bin_num_limit, (float, int)):
warnings.warn("Incorrect inputs; bin_num_limit should be numeric variable. Parameter was set to default (8).")
bin_num_limit = 8
# method
if method not in ["tree", "chimerge"]:
warnings.warn("Incorrect inputs; method should be tree or chimerge. Parameter was set to default (tree).")
method = "tree"
### ###
# binning for each x variable
# loop on xs
if (no_cores is None) or (no_cores < 1):
all_cores = mp.cpu_count() - 1
no_cores = int(np.ceil(xs_len/5 if xs_len/5 < all_cores else all_cores*0.9))
if platform.system() == 'Windows':
no_cores = 1
# ylist to str
y = y[0]
# binning for variables
if no_cores == 1:
# create empty bins dict
bins = {}
for i in np.arange(xs_len):
x_i = xs[i]
# print(x_i)
# print xs
if print_step>0 and bool((i+1)%print_step):
print(('{:'+str(len(str(xs_len)))+'.0f}/{} {}').format(i, xs_len, x_i), flush=True)
# woebining on one variable
bins[x_i] = woebin2(
dtm = pd.DataFrame({'y':dt[y], 'variable':x_i, 'value':dt[x_i]}),
breaks=breaks_list[x_i] if (breaks_list is not None) and (x_i in breaks_list.keys()) else None,
spl_val=special_values[x_i] if (special_values is not None) and (x_i in special_values.keys()) else None,
init_count_distr=init_count_distr,
count_distr_limit=count_distr_limit,
stop_limit=stop_limit,
bin_num_limit=bin_num_limit,
method=method
)
# try catch:
# "The variable '{}' caused the error: '{}'".format(x_i, error-info)
else:
pool = mp.Pool(processes=no_cores)
# arguments
args = zip(
[pd.DataFrame({'y':dt[y], 'variable':x_i, 'value':dt[x_i]}) for x_i in xs],
[breaks_list[i] if (breaks_list is not None) and (i in list(breaks_list.keys())) else None for i in xs],
[special_values[i] if (special_values is not None) and (i in list(special_values.keys())) else None for i in xs],
[init_count_distr]*xs_len, [count_distr_limit]*xs_len,
[stop_limit]*xs_len, [bin_num_limit]*xs_len, [method]*xs_len
)
# bins in dictionary
bins = dict(zip(xs, pool.starmap(woebin2, args)))
pool.close()
# runingtime
runingtime = time.time() - start_time
if runingtime >= 10 and print_info:
# print(time.strftime("%H:%M:%S", time.gmtime(runingtime)))
print('Binning on {} rows and {} columns in {}'.format(dt.shape[0], dt.shape[1], time.strftime("%H:%M:%S", time.gmtime(runingtime))))
if save_breaks_list is not None:
bins_to_breaks(bins, dt, to_string=True, save_string=save_breaks_list)
# return
return bins,breaks_list
#' @import data.table
def woepoints_ply1(dtx, binx, x_i, woe_points):
'''
Transform original values into woe or porints for one variable.
Params
------
Returns
------
'''
# woe_points: "woe" "points"
# binx = bins.loc[lambda x: x.variable == x_i]
# https://stackoverflow.com/questions/12680754/split-explode-pandas-dataframe-string-entry-to-separate-rows
binx = pd.merge(
binx[['bin']].assign(v1=binx['bin'].str.split('%,%')).explode('v1'),
binx[['bin', woe_points]],
how='left', on='bin'
).rename(columns={'v1':'V1',woe_points:'V2'})
# dtx
## cut numeric variable
if is_numeric_dtype(dtx[x_i]):
is_sv = pd.Series(not bool(re.search(r'\[', str(i))) for i in binx.V1)
binx_sv = binx.loc[is_sv]
binx_other = binx.loc[~is_sv]
# create bin column
breaks_binx_other = np.unique(list(map(float, ['-inf']+[re.match(r'.*\[(.*),.+\).*', str(i)).group(1) for i in binx_other['bin']]+['inf'])))
labels = ['[{},{})'.format(breaks_binx_other[i], breaks_binx_other[i+1]) for i in range(len(breaks_binx_other)-1)]
dtx = dtx.assign(xi_bin = lambda x: pd.cut(x[x_i], breaks_binx_other, right=False, labels=labels))\
.assign(xi_bin = lambda x: [i if (i != i) else str(i) for i in x['xi_bin']])
# dtx.loc[:,'xi_bin'] = pd.cut(dtx[x_i], breaks_binx_other, right=False, labels=labels)
# dtx.loc[:,'xi_bin'] = np.where(pd.isnull(dtx['xi_bin']), dtx['xi_bin'], dtx['xi_bin'].astype(str))
#
mask = dtx[x_i].isin(binx_sv['V1'])
dtx.loc[mask,'xi_bin'] = dtx.loc[mask, x_i].astype(str)
dtx = dtx[['xi_bin']].rename(columns={'xi_bin':x_i})
## to charcarter, na to missing
if not is_string_dtype(dtx[x_i]):
dtx.loc[:,x_i] = dtx.loc[:,x_i].astype(str).replace('nan', 'missing')
# dtx.loc[:,x_i] = np.where(pd.isnull(dtx[x_i]), dtx[x_i], dtx[x_i].astype(str))
dtx = dtx.replace(np.nan, 'missing').assign(rowid = dtx.index).sort_values('rowid')
# rename binx
binx.columns = ['bin', x_i, '_'.join([x_i,woe_points])]
# merge
dtx_suffix = pd.merge(dtx, binx, how='left', on=x_i).sort_values('rowid')\
.set_index(dtx.index)[['_'.join([x_i,woe_points])]]
return dtx_suffix
def woebin_ply(dt, bins, no_cores=None, print_step=0, replace_blank=True, **kwargs):
'''
WOE Transformation
------
`woebin_ply` converts original input data into woe values
based on the binning information generated from `woebin`.
Params
------
dt: A data frame.
bins: Binning information generated from `woebin`.
no_cores: Number of CPU cores for parallel computation.
Defaults None. If no_cores is None, the no_cores will
set as 1 if length of x variables less than 10, and will
set as the number of all CPU cores if the length of x
variables greater than or equal to 10.
print_step: A non-negative integer. Default is 1. If
print_step>0, print variable names by each print_step-th
iteration. If print_step=0 or no_cores>1, no message is print.
replace_blank: Logical. Replace blank values with None. Defaults to True.
Returns
-------
DataFrame
a dataframe of woe values for each variables
Examples
-------
import scorecardpy as sc
import pandas as pd
# load data
dat = sc.germancredit()
# Example I
dt = dat[["creditability", "credit.amount", "purpose"]]
# binning for dt
bins = sc.woebin(dt, y = "creditability")
# converting original value to woe
dt_woe = sc.woebin_ply(dt, bins=bins)
# Example II
# binning for germancredit dataset
bins_germancredit = sc.woebin(dat, y="creditability")
# converting the values in germancredit to woe
## bins is a dict
germancredit_woe = sc.woebin_ply(dat, bins=bins_germancredit)
## bins is a dataframe
germancredit_woe = sc.woebin_ply(dat, bins=pd.concat(bins_germancredit))
'''
# start time
start_time = time.time()
## print_info
print_info = kwargs.get('print_info', True)
if print_info: print('[INFO] converting into woe values ...')
# remove date/time col
# dt = rmcol_datetime_unique1(dt)
# replace "" by NA
if replace_blank: dt = rep_blank_na(dt)
# ncol of dt
# if len(dt.index) <= 1: raise Exception("Incorrect inputs; dt should have at least two columns.")
# print_step
print_step = check_print_step(print_step)
# bins # if (is.list(bins)) rbindlist(bins)
if isinstance(bins, dict):
bins = pd.concat(bins, ignore_index=True)
# x variables
xs_bin = bins['variable'].unique()
xs_dt = list(dt.columns)
xs = list(set(xs_bin).intersection(xs_dt))
# length of x variables
xs_len = len(xs)
# initial data set
dat = dt.loc[:,list(set(xs_dt) - set(xs))]
# loop on xs
if (no_cores is None) or (no_cores < 1):
all_cores = mp.cpu_count() - 1
no_cores = int(np.ceil(xs_len/5 if xs_len/5 < all_cores else all_cores*0.9))
if platform.system() == 'Windows':
no_cores = 1
#
if no_cores == 1:
for i in np.arange(xs_len):
x_i = xs[i]
# print xs
# print(x_i)
if print_step>0 and bool((i+1) % print_step):
print(('{:'+str(len(str(xs_len)))+'.0f}/{} {}').format(i, xs_len, x_i), flush=True)
#
binx = bins[bins['variable'] == x_i].reset_index()
# bins.loc[lambda x: x.variable == x_i]
# bins.loc[bins['variable'] == x_i] #
# bins.query('variable == \'{}\''.format(x_i))
dtx = dt[[x_i]]
dat = pd.concat([dat, woepoints_ply1(dtx, binx, x_i, woe_points="woe")], axis=1)
else:
pool = mp.Pool(processes=no_cores)
# arguments
args = zip(
[dt[[i]] for i in xs],
[bins[bins['variable'] == i] for i in xs],
[i for i in xs],
["woe"]*xs_len
)
# bins in dictionary
dat_suffix = pool.starmap(woepoints_ply1, args)
dat = pd.concat([dat]+dat_suffix, axis=1)
pool.close()
# runingtime
runingtime = time.time() - start_time
if runingtime >= 10 and print_info:
# print(time.strftime("%H:%M:%S", time.gmtime(runingtime)))
print('Woe transformating on {} rows and {} columns in {}'.format(dt.shape[0], xs_len, time.strftime("%H:%M:%S", time.gmtime(runingtime))))
return dat
# required in woebin_plot
#' @import data.table ggplot2
def plot_bin(binx, title, show_iv, rot = 0):
'''
plot binning of one variable
Params
------
binx:
title:
show_iv:
Returns
------
matplotlib fig object
'''
# y_right_max
y_right_max = np.ceil(binx['badprob'].max()*10)
if y_right_max % 2 == 1: y_right_max=y_right_max+1
if y_right_max - binx['badprob'].max()*10 <= 0.3: y_right_max = y_right_max+2
y_right_max = y_right_max/10
if y_right_max>1 or y_right_max<=0 or y_right_max is np.nan or y_right_max is None: y_right_max=1
## y_left_max
y_left_max = np.ceil(binx['count_distr'].max()*10)/10
if y_left_max>1 or y_left_max<=0 or y_left_max is np.nan or y_left_max is None: y_left_max=1
# title
title_string = binx.loc[0,'variable']+" (iv:"+str(round(binx.loc[0,'total_iv'],4))+")" if show_iv else binx.loc[0,'variable']
title_string = title+'-'+title_string if title is not None else title_string
# param
ind = np.arange(len(binx.index)) # the x locations for the groups
width = 0.35 # the width of the bars: can also be len(x) sequence
###### plot ######
fig, ax1 = plt.subplots()
ax2 = ax1.twinx()
# ax1
p1 = ax1.bar(ind, binx['good_distr'], width, color=(24/254, 192/254, 196/254))
p2 = ax1.bar(ind, binx['bad_distr'], width, bottom=binx['good_distr'], color=(246/254, 115/254, 109/254))
for i in ind:
ax1.text(i, binx.loc[i,'count_distr']*1.02, str(round(binx.loc[i,'count_distr']*100,1))+'%, '+str(binx.loc[i,'count']), ha='center')
# ax2
ax2.plot(ind, binx['badprob'], marker='o', color='blue')
for i in ind:
ax2.text(i, binx.loc[i,'badprob']*1.02, str(round(binx.loc[i,'badprob']*100,1))+'%', color='blue', ha='center')
# settings
# Plot the reference bad rate
bad_rate = [np.sum(binx['bad_distr'].values)/(np.sum(binx['good_distr'])+np.sum(binx['bad_distr'].values))]*len(binx)
ax2.plot(bad_rate, marker = '_', color = 'black', linestyle = "--", linewidth=2.0)
ax2.text(ax2.get_xlim()[1]/2, bad_rate[0] + 0.001 , 'Ref: '+ str(np.round(100*bad_rate[0],2))+ ' %')
ax1.set_ylabel('Bin count distribution')
ax2.set_ylabel('Bad probability', color='blue')
ax1.set_yticks(np.arange(0, y_left_max+0.2, 0.2))
ax1.set_xticklabels(ax1.get_xticklabels(), rotation = rot)
ax2.set_yticks(np.arange(0, y_right_max+0.2, 0.2))
ax2.tick_params(axis='y', colors='blue')
ax2.grid(False)
plt.xticks(ind, binx['bin'])
plt.title(title_string, loc='left')
plt.legend((p2[0], p1[0]), ('bad', 'good'), loc='upper right')
# show plot
plt.show()
return fig
def woebin_plot(bins, x=None, title=None, show_iv=True,orient = 0):
'''
WOE Binning Visualization
------
`woebin_plot` create plots of count distribution and bad probability
for each bin. The binning informations are generates by `woebin`.
Params
------
bins: A list or data frame. Binning information generated by `woebin`.
x: Name of x variables. Default is None. If x is None, then all
variables except y are counted as x variables.
title: String added to the plot title. Default is None.
show_iv: Logical. Default is True, which means show information value
in the plot title.
Returns
------
dict
a dict of matplotlib figure objests
Examples
------
import scorecardpy as sc
import matplotlib.pyplot as plt
# load data
dat = sc.germancredit()
# Example I
dt1 = dat[["creditability", "credit.amount"]]
bins1 = sc.woebin(dt1, y="creditability")
p1 = sc.woebin_plot(bins1)
plt.show(p1)
# Example II
bins = sc.woebin(dat, y="creditability")
plotlist = sc.woebin_plot(bins)
# # save binning plot
# for key,i in plotlist.items():
# plt.show(i)
# plt.savefig(str(key)+'.png')
'''
xs = x
# bins concat
if isinstance(bins, dict):
bins = pd.concat(bins, ignore_index=True)
# good bad distr
def gb_distr(binx):
binx['good_distr'] = binx['good']/sum(binx['count'])
binx['bad_distr'] = binx['bad']/sum(binx['count'])
return binx
bins = bins.groupby('variable').apply(gb_distr)
# x variable names
if xs is None: xs = bins['variable'].unique()
# plot export
plotlist = {}
for i in xs:
binx = bins[bins['variable'] == i].reset_index()
plotlist[i] = plot_bin(binx, title, show_iv ,orient)
return plotlist
# print basic information in woebin_adj
def woebin_adj_print_basic_info(i, xs, bins, dt, bins_breakslist):
'''
print basic information of woebinnig in adjusting process
Params
------
Returns
------
'''
x_i = xs[i-1]
xs_len = len(xs)
binx = bins.loc[bins['variable']==x_i]
print("--------", str(i)+"/"+str(xs_len), x_i, "--------")
# print(">>> dt["+x_i+"].dtypes: ")
# print(str(dt[x_i].dtypes), '\n')
#
print(">>> dt["+x_i+"].describe(): ")
print(dt[x_i].describe(), '\n')
if len(dt[x_i].unique()) < 10 or not is_numeric_dtype(dt[x_i]):
print(">>> dt["+x_i+"].value_counts(): ")
print(dt[x_i].value_counts(), '\n')
else:
dt[x_i].hist()
plt.title(x_i)
plt.show()
## current breaks
print(">>> Current breaks:")
print(bins_breakslist[x_i], '\n')
## woebin plotting
plt.show(woebin_plot(binx)[x_i])
# plot adjusted binning in woebin_adj
def woebin_adj_break_plot(dt, y, x_i, breaks, stop_limit, sv_i, method):
'''
update breaks and provies a binning plot
Params
------
Returns
------
'''
if breaks == '':
breaks = None
breaks_list = None if breaks is None else {x_i: eval('['+breaks+']')}
special_values = None if sv_i is None else {x_i: sv_i}
# binx update
bins_adj = woebin(dt[[x_i,y]], y, breaks_list=breaks_list, special_values=special_values, stop_limit = stop_limit, method=method)
## print adjust breaks
breaks_bin = set(bins_adj[x_i]['breaks']) - set(["-inf","inf","missing"])
breaks_bin = ', '.join(breaks_bin) if is_numeric_dtype(dt[x_i]) else ', '.join(['\''+ i+'\'' for i in breaks_bin])
print(">>> Current breaks:")
print(breaks_bin, '\n')
# print bin_adj
plt.show(woebin_plot(bins_adj))
# return breaks
if breaks == '' or breaks is None: breaks = breaks_bin
return breaks
def woebin_adj(dt, y, bins, adj_all_var=False, special_values=None, method="tree", save_breaks_list=None, count_distr_limit=0.05):
'''
WOE Binning Adjustment
------
`woebin_adj` interactively adjust the binning breaks.
Params
------
dt: A data frame.
y: Name of y variable.
bins: A list or data frame. Binning information generated from woebin.
adj_all_var: Logical, whether to show monotonic woe variables. Default
is True
special_values: the values specified in special_values will in separate
bins. Default is None.
method: optimal binning method, it should be "tree" or "chimerge".
Default is "tree".
save_breaks_list: The file name to save breaks_list. Default is None.
count_distr_limit: The minimum percentage of final binning
class number over total. Accepted range: 0.01-0.2; default
is 0.05.
Returns
------
dict
dictionary of breaks
Examples
------
import scorecardpy as sc
# load data
dat = sc.germancredit()
# Example I
dt = dat[["creditability", "age.in.years", "credit.amount"]]
bins = sc.woebin(dt, y="creditability")
breaks_adj = sc.woebin_adj(dt, y="creditability", bins=bins)
bins_final = sc.woebin(dt, y="creditability", breaks_list=breaks_adj)
# Example II
binsII = sc.woebin(dat, y="creditability")
breaks_adjII = sc.woebin_adj(dat, "creditability", binsII)
bins_finalII = sc.woebin(dat, y="creditability", breaks_list=breaks_adjII)
'''
# bins concat
if isinstance(bins, dict):
bins = pd.concat(bins, ignore_index=True)
# x variables
xs_all = bins['variable'].unique()
# adjust all variables
if not adj_all_var:
bins2 = bins.loc[~((bins['bin'] == 'missing') & (bins['count_distr'] >= count_distr_limit))].reset_index(drop=True)
bins2['badprob2'] = bins2.groupby('variable').apply(lambda x: x['badprob'].shift(1)).reset_index(drop=True)
bins2 = bins2.dropna(subset=['badprob2']).reset_index(drop=True)
bins2 = bins2.assign(badprob_trend = lambda x: x.badprob >= x.badprob2)
xs_adj = bins2.groupby('variable')['badprob_trend'].nunique()
xs_adj = xs_adj[xs_adj>1].index
else:
xs_adj = xs_all
# length of adjusting variables
xs_len = len(xs_adj)
# special_values
special_values = check_special_values(special_values, xs_adj)
# breakslist of bins
bins_breakslist = bins_to_breaks(bins,dt)
# loop on adjusting variables
if xs_len == 0:
warnings.warn('The binning breaks of all variables are perfect according to default settings.')
breaks_list = "{"+', '.join('\''+bins_breakslist.index[i]+'\': ['+bins_breakslist[i]+']' for i in np.arange(len(bins_breakslist)))+"}"
return breaks_list
# else
def menu(i, xs_len, x_i):
print('>>> Adjust breaks for ({}/{}) {}?'.format(i, xs_len, x_i))
print('1: next \n2: yes \n3: back')
adj_brk = input("Selection: ")
adj_brk = int(adj_brk)
if adj_brk not in [0,1,2,3]:
warnings.warn('Enter an item from the menu, or 0 to exit.')
adj_brk = input("Selection: ")
adj_brk = int(adj_brk)
return adj_brk
# init param
i = 1
breaks_list = None
while i <= xs_len:
breaks = stop_limit = None
# x_i
x_i = xs_adj[i-1]
sv_i = special_values[x_i] if (special_values is not None) and (x_i in special_values.keys()) else None
# if sv_i is not None:
# sv_i = ','.join('\'')
# basic information of x_i variable ------
woebin_adj_print_basic_info(i, xs_adj, bins, dt, bins_breakslist)
# adjusting breaks ------
adj_brk = menu(i, xs_len, x_i)
if adj_brk == 0:
return
while adj_brk == 2:
# modify breaks adj_brk == 2
breaks = input(">>> Enter modified breaks: ")
breaks = re.sub("^[,\.]+|[,\.]+$", "", breaks)
if breaks == 'N':
stop_limit = 'N'
breaks = None
else:
stop_limit = 0.1
try:
breaks = woebin_adj_break_plot(dt, y, x_i, breaks, stop_limit, sv_i, method=method)
except:
pass
# adj breaks again
adj_brk = menu(i, xs_len, x_i)
if adj_brk == 3:
# go back adj_brk == 3
i = i-1 if i>1 else i
else:
# go next adj_brk == 1
if breaks is not None and breaks != '':
bins_breakslist[x_i] = breaks
i += 1
# return
breaks_list = "{"+', '.join('\''+bins_breakslist.index[i]+'\': ['+bins_breakslist[i]+']' for i in np.arange(len(bins_breakslist)))+"}"
if save_breaks_list is not None:
bins_adj = woebin(dt, y, x=bins_breakslist.index, breaks_list=breaks_list)
bins_to_breaks(bins_adj, dt, to_string=True, save_string=save_breaks_list)
return breaks_list
|
the-stack_0_14984 | # -*- coding: utf-8 -*-
from jqdatasdk import auth, query, indicator, get_fundamentals, logout
from zvdata.api import get_data
from zvdata.utils.pd_utils import df_is_not_null
from zvt.api.api import get_finance_factors
from zvt.api.common import to_jq_entity_id, to_jq_report_period
from zvt.domain import FinanceFactor
from zvt.recorders.eastmoney.common import company_type_flag, get_fc, EastmoneyTimestampsDataRecorder, \
call_eastmoney_api, get_from_path_fields
from zvt.settings import JQ_ACCOUNT, JQ_PASSWD
from zvt.utils.pd_utils import index_df
from zvt.utils.time_utils import to_time_str, to_pd_timestamp
class BaseChinaStockFinanceRecorder(EastmoneyTimestampsDataRecorder):
finance_report_type = None
data_type = 1
timestamps_fetching_url = 'https://emh5.eastmoney.com/api/CaiWuFenXi/GetCompanyReportDateList'
timestamp_list_path_fields = ['CompanyReportDateList']
timestamp_path_fields = ['ReportDate']
def __init__(self, entity_type='stock', exchanges=['sh', 'sz'], entity_ids=None, codes=None, batch_size=10,
force_update=False, sleeping_time=5, default_size=2000, one_shot=False,
fix_duplicate_way='add') -> None:
super().__init__(entity_type, exchanges, entity_ids, codes, batch_size, force_update, sleeping_time,
default_size, one_shot, fix_duplicate_way)
auth(JQ_ACCOUNT, JQ_PASSWD)
def init_timestamps(self, entity):
param = {
"color": "w",
"fc": get_fc(entity),
"DataType": self.data_type
}
if self.finance_report_type == 'LiRunBiaoList' or self.finance_report_type == 'XianJinLiuLiangBiaoList':
param['ReportType'] = 1
timestamp_json_list = call_eastmoney_api(url=self.timestamps_fetching_url,
path_fields=self.timestamp_list_path_fields,
param=param)
if self.timestamp_path_fields:
timestamps = [get_from_path_fields(data, self.timestamp_path_fields) for data in timestamp_json_list]
return [to_pd_timestamp(t) for t in timestamps]
def generate_request_param(self, security_item, start, end, size, timestamps):
if len(timestamps) <= 10:
param = {
"color": "w",
"fc": get_fc(security_item),
"corpType": company_type_flag(security_item),
# 0 means get all types
"reportDateType": 0,
"endDate": '',
"latestCount": size
}
else:
param = {
"color": "w",
"fc": get_fc(security_item),
"corpType": company_type_flag(security_item),
# 0 means get all types
"reportDateType": 0,
"endDate": to_time_str(timestamps[10]),
"latestCount": 10
}
if self.finance_report_type == 'LiRunBiaoList' or self.finance_report_type == 'XianJinLiuLiangBiaoList':
param['reportType'] = 1
return param
def generate_path_fields(self, security_item):
comp_type = company_type_flag(security_item)
if comp_type == "3":
return ['{}_YinHang'.format(self.finance_report_type)]
elif comp_type == "2":
return ['{}_BaoXian'.format(self.finance_report_type)]
elif comp_type == "1":
return ['{}_QuanShang'.format(self.finance_report_type)]
elif comp_type == "4":
return ['{}_QiYe'.format(self.finance_report_type)]
def record(self, entity, start, end, size, timestamps):
# different with the default timestamps handling
param = self.generate_request_param(entity, start, end, size, timestamps)
self.logger.info('request param:{}'.format(param))
return self.api_wrapper.request(url=self.url, param=param, method=self.request_method,
path_fields=self.generate_path_fields(entity))
def get_original_time_field(self):
return 'ReportDate'
def fill_timestamp_with_jq(self, security_item, the_data):
# get report published date from jq
q = query(
indicator.pubDate
).filter(
indicator.code == to_jq_entity_id(security_item),
)
df = get_fundamentals(q, statDate=to_jq_report_period(the_data.report_date))
if not df.empty:
the_data.timestamp = to_pd_timestamp(df['pubDate'][0])
self.logger.info(
'jq fill {} {} timestamp:{} for report_date:{}'.format(self.data_schema, security_item.id,
the_data.timestamp,
the_data.report_date))
self.session.commit()
def on_finish_entity(self, entity):
# fill the timestamp for report published date
the_data_list = get_data(data_schema=self.data_schema,
provider=self.provider,
entity_id=entity.id,
order=self.data_schema.timestamp.asc(),
return_type='domain',
session=self.session,
filters=[self.data_schema.timestamp == self.data_schema.report_date,
self.data_schema.timestamp >= to_pd_timestamp('2005-01-01')])
if the_data_list:
if self.data_schema == FinanceFactor:
for the_data in the_data_list:
self.fill_timestamp_with_jq(entity, the_data)
else:
df = get_finance_factors(entity_id=entity.id,
columns=[FinanceFactor.timestamp, FinanceFactor.report_date, FinanceFactor.id],
filters=[FinanceFactor.timestamp != FinanceFactor.report_date,
FinanceFactor.timestamp >= to_pd_timestamp('2005-01-01'),
FinanceFactor.report_date >= the_data_list[0].report_date,
FinanceFactor.report_date <= the_data_list[-1].report_date, ])
if df_is_not_null(df):
index_df(df, index='report_date')
for the_data in the_data_list:
if (df is not None) and (not df.empty) and the_data.report_date in df.index:
the_data.timestamp = df.at[the_data.report_date, 'timestamp']
self.logger.info(
'db fill {} {} timestamp:{} for report_date:{}'.format(self.data_schema, entity.id,
the_data.timestamp,
the_data.report_date))
self.session.commit()
else:
# self.logger.info(
# 'waiting jq fill {} {} timestamp:{} for report_date:{}'.format(self.data_schema,
# security_item.id,
# the_data.timestamp,
# the_data.report_date))
self.fill_timestamp_with_jq(entity, the_data)
def on_finish(self):
super().on_finish()
logout()
|
the-stack_0_14985 | import numpy as np
import pytest
from devito.logger import info
from devito import norm, configuration
from examples.seismic.viscoacoustic import ViscoacousticWaveSolver
from examples.seismic import demo_model, setup_geometry, seismic_args
def viscoacoustic_setup(shape=(50, 50), spacing=(15.0, 15.0), tn=500., space_order=4,
nbl=40, preset='layers-viscoacoustic', kernel='sls',
time_order=2, **kwargs):
model = demo_model(preset, space_order=space_order, shape=shape, nbl=nbl,
dtype=kwargs.pop('dtype', np.float32), spacing=spacing)
# Source and receiver geometries
geometry = setup_geometry(model, tn)
# Create solver object to provide relevant operators
solver = ViscoacousticWaveSolver(model, geometry, space_order=space_order,
kernel=kernel, time_order=time_order, **kwargs)
return solver
def run(shape=(50, 50), spacing=(20.0, 20.0), tn=1000.0,
space_order=4, nbl=40, autotune=False, preset='layers-viscoacoustic',
kernel='sls', time_order=2, **kwargs):
solver = viscoacoustic_setup(shape=shape, spacing=spacing, nbl=nbl, tn=tn,
space_order=space_order, preset=preset,
kernel=kernel, time_order=time_order, **kwargs)
info("Applying Forward")
rec, p, v, summary = solver.forward(autotune=autotune)
return (summary.gflopss, summary.oi, summary.timings, [rec, p, v])
@pytest.mark.skipif(configuration['language'] == 'openacc', reason="see issue #1560")
@pytest.mark.parametrize('kernel, time_order, normrec, atol', [
('sls', 2, 684.385, 1e-2),
('sls', 1, 18.774, 1e-2),
('ren', 2, 677.673, 1e-2),
('ren', 1, 17.995, 1e-2),
('deng_mcmechan', 2, 673.041, 1e-2),
('deng_mcmechan', 1, 18.488, 1e-2),
])
def test_viscoacoustic(kernel, time_order, normrec, atol):
_, _, _, [rec, _, _] = run(kernel=kernel, time_order=time_order)
assert np.isclose(norm(rec), normrec, atol=atol, rtol=0)
@pytest.mark.skipif(configuration['language'] == 'openacc', reason="see issue #1560")
@pytest.mark.parametrize('ndim', [2, 3])
@pytest.mark.parametrize('kernel', ['sls', 'ren', 'deng_mcmechan'])
@pytest.mark.parametrize('time_order', [1, 2])
def test_viscoacoustic_stability(ndim, kernel, time_order):
shape = tuple([11]*ndim)
spacing = tuple([20]*ndim)
_, _, _, [rec, _, _] = run(shape=shape, spacing=spacing, tn=20000.0, nbl=0,
kernel=kernel, time_order=time_order)
assert np.isfinite(norm(rec))
if __name__ == "__main__":
description = ("Example script for a set of viscoacoustic operators.")
parser = seismic_args(description)
parser.add_argument("-k", dest="kernel", default='sls',
choices=['sls', 'ren', 'deng_mcmechan'],
help="Choice of finite-difference kernel")
parser.add_argument("-to", "--time_order", default=2,
type=int, help="Time order of the equation")
args = parser.parse_args()
# Preset parameters
ndim = args.ndim
shape = args.shape[:args.ndim]
spacing = tuple(ndim * [10.0])
tn = args.tn if args.tn > 0 else (750. if ndim < 3 else 1250.)
preset = 'constant-viscoacoustic' if args.constant else 'layers-viscoacoustic'
run(shape=shape, spacing=spacing, nbl=args.nbl, tn=tn, opt=args.opt,
space_order=args.space_order, autotune=args.autotune, preset=preset,
kernel=args.kernel, time_order=args.time_order)
|
the-stack_0_14987 | import traceback
from urllib.parse import urlparse
import click
import timeago
from metaflowbot.cli import action
from metaflowbot.message_templates.templates import error_message
from metaflowbot.state import MFBState
MAX_ARTIFACT_SIZE = 1000
import json
import requests
def random_joke():
ENDPOINT = r"https://official-joke-api.appspot.com/jokes/programming/random"
data = requests.get(ENDPOINT)
tt = json.loads(data.text)
return tt
@action.command(help="Tell me a joke")
@click.option("--create-thread/--no-create-thread", help="Will create a new thread")
@click.pass_context
def joke(ctx, create_thread=False):
obj = ctx.obj
if create_thread:
obj.publish_state(MFBState.message_new_thread(obj.thread))
try:
joke = random_joke()[0]
setup = joke["setup"]
punchline = joke["punchline"]
obj.reply(
f"""
{setup} \n{punchline}
"""
)
except:
traceback.print_exc()
my_traceback = traceback.format_exc()
err_msg = "Sorry, I couldn't find a joke at the moment :meow_dead:"
obj.reply(err_msg, **error_message(my_traceback, message=err_msg))
|
the-stack_0_14988 | """
Network tools to run from the Master
"""
import logging
import socket
import salt.utils.files
import salt.utils.network
import salt.utils.stringutils
log = logging.getLogger(__name__)
def wollist(maclist, bcast="255.255.255.255", destport=9):
"""
Send a "Magic Packet" to wake up a list of Minions.
This list must contain one MAC hardware address per line
CLI Example:
.. code-block:: bash
salt-run network.wollist '/path/to/maclist'
salt-run network.wollist '/path/to/maclist' 255.255.255.255 7
salt-run network.wollist '/path/to/maclist' 255.255.255.255 7
"""
ret = []
try:
with salt.utils.files.fopen(maclist, "r") as ifile:
for mac in ifile:
mac = salt.utils.stringutils.to_unicode(mac).strip()
wol(mac, bcast, destport)
print("Waking up {}".format(mac))
ret.append(mac)
except Exception as err: # pylint: disable=broad-except
__jid_event__.fire_event(
{"error": "Failed to open the MAC file. Error: {}".format(err)}, "progress"
)
return []
return ret
def wol(mac, bcast="255.255.255.255", destport=9):
"""
Send a "Magic Packet" to wake up a Minion
CLI Example:
.. code-block:: bash
salt-run network.wol 08-00-27-13-69-77
salt-run network.wol 080027136977 255.255.255.255 7
salt-run network.wol 08:00:27:13:69:77 255.255.255.255 7
"""
dest = salt.utils.network.mac_str_to_bytes(mac)
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
sock.sendto(b"\xff" * 6 + dest * 16, (bcast, int(destport)))
return True
def wolmatch(tgt, tgt_type="glob", bcast="255.255.255.255", destport=9):
"""
Send a "Magic Packet" to wake up Minions that are matched in the grains cache
CLI Example:
.. code-block:: bash
salt-run network.wolmatch minion_id
salt-run network.wolmatch 192.168.0.0/16 tgt_type='ipcidr' bcast=255.255.255.255 destport=7
"""
ret = []
minions = __salt__["cache.grains"](tgt, tgt_type)
for minion in minions:
for iface, mac in minions[minion]["hwaddr_interfaces"].items():
if iface == "lo":
continue
mac = mac.strip()
wol(mac, bcast, destport)
log.info("Waking up %s", mac)
ret.append(mac)
return ret
|
the-stack_0_14989 | #!/usr/bin/python
# Copyright 2014 Google.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Unit tests for the optimizer. """
import os
import re
import unittest
import encoder
import encoder_configuration
import optimizer
import test_tools
class DummyCodec(encoder.Codec):
def __init__(self):
super(DummyCodec, self).__init__('dummy')
self.extension = 'fake'
self.option_set = encoder.OptionSet(
encoder.IntegerOption('score', 0, 10),
encoder.Option('another_parameter', ['yes']),
)
def StartEncoder(self, context):
return encoder.Encoder(context,
encoder.OptionValueSet(self.option_set,
"--score=5"))
def Execute(self, parameters, rate, videofile, workdir):
# pylint: disable=W0613
match = re.search(r'--score=(\d+)', parameters.ToString())
if match:
return {'psnr': int(match.group(1)), 'bitrate': 100}
else:
return {'psnr': -100, 'bitrate': 100}
class DummyVideofile(encoder.Videofile):
def __init__(self, filename, clip_time):
super(DummyVideofile, self).__init__(filename)
self.clip_time = clip_time
def ClipTime(self):
return self.clip_time
def Returns1(target_bitrate, result):
"""Score function that returns a constant value."""
# pylint: disable=W0613
return 1.0
def ReturnsClipTime(target_bitrate, result):
# pylint: disable=W0613
return float(result['cliptime'])
class TestOptimizer(unittest.TestCase):
def setUp(self):
self.codec = DummyCodec()
self.file_set = None
self.cache_class = encoder.EncodingMemoryCache
self.score_function = None
self.videofile = DummyVideofile('foofile_640_480_30.yuv', clip_time=1)
self.optimizer = None
def StdOptimizer(self):
# This function is not in setup because some tests
# do not need it.
if not self.optimizer:
self.optimizer = optimizer.Optimizer(self.codec, self.file_set,
cache_class=self.cache_class)
return self.optimizer
def EncoderFromParameterString(self, parameter_string):
return encoder.Encoder(self.optimizer.context,
encoder.OptionValueSet(self.optimizer.context.codec.option_set,
parameter_string))
def testInit(self):
optimizer.Optimizer(self.codec, self.file_set,
cache_class=self.cache_class)
def test_AlternateScorer(self):
my_optimizer = optimizer.Optimizer(self.codec, self.file_set,
cache_class=self.cache_class,
score_function=Returns1)
my_optimizer.BestEncoding(100, self.videofile).Execute().Store()
self.assertAlmostEqual(1,
my_optimizer.Score(my_optimizer.BestEncoding(100, self.videofile)),
places=4)
def test_FirstBestEncodingNoScore(self):
my_optimizer = self.StdOptimizer()
encoding = my_optimizer.BestEncoding(100, self.videofile)
self.assertIsNone(encoding.Result())
def test_BestEncodingOneAlternative(self):
my_optimizer = self.StdOptimizer()
my_optimizer.BestEncoding(100, self.videofile).Store()
encoding = my_optimizer.BestEncoding(100, self.videofile)
self.assertEqual(encoding.videofile, self.videofile)
def test_BestEncodingExecuteGivesScore(self):
my_optimizer = self.StdOptimizer()
my_optimizer.BestEncoding(100, self.videofile).Execute().Store()
self.assertAlmostEqual(5, my_optimizer.Score(
my_optimizer.BestEncoding(100, self.videofile)),
places=4)
def test_BestEncodingOtherSpeedNoScore(self):
my_optimizer = self.StdOptimizer()
my_optimizer.BestEncoding(100, self.videofile).Execute().Store()
self.assertIsNone(my_optimizer.BestEncoding(200, self.videofile).Result())
def test_BestUntriedEncodingReturnsSomething(self):
my_optimizer = self.StdOptimizer()
first_encoding = my_optimizer.BestEncoding(100, self.videofile)
first_encoding.Execute().Store()
other_encoding = my_optimizer.BestUntriedEncoding(100, self.videofile)
self.assertTrue(other_encoding)
self.assertNotEqual(first_encoding.encoder.parameters.ToString(),
other_encoding.encoder.parameters.ToString())
def test_WorksBetterOnSomeOtherClip(self):
my_optimizer = self.StdOptimizer()
videofile2 = DummyVideofile('barfile_640_480_30.yuv', clip_time=1)
# Note - may have to do deterministic generation of these.
encoder1 = self.EncoderFromParameterString('--score=5') # Low score
encoder2 = self.EncoderFromParameterString('--score=10') # High score
# Store 2 scores for the second videofile.
encoding = encoder1.Encoding(100, videofile2)
encoding.Execute().Store()
encoding = encoder2.Encoding(100, videofile2)
encoding.Execute().Store()
# Store 1 score for the first videofile
first_encoding = encoder1.Encoding(100, self.videofile)
first_encoding.Execute().Store()
# pylint: disable=W0212
second_encoding = my_optimizer._WorksBetterOnSomeOtherClip(first_encoding,
100,
self.videofile)
self.assertTrue(second_encoding)
second_encoding.Execute()
self.assertEquals(first_encoding.videofile, second_encoding.videofile)
self.assertAlmostEqual(10, my_optimizer.Score(second_encoding),
places=4)
def test_ShorterParameterListsScoreHigher(self):
my_optimizer = self.StdOptimizer()
encoder1 = self.EncoderFromParameterString('--score=5')
encoder2 = self.EncoderFromParameterString(
'--score=5 --another_parameter=yes')
encoding1 = encoder1.Encoding(100, self.videofile)
encoding1.Execute()
encoding2 = encoder2.Encoding(100, self.videofile)
encoding2.Execute()
self.assertGreater(my_optimizer.Score(encoding1),
my_optimizer.Score(encoding2))
def test_EncodingWithOneLessParameter(self):
my_optimizer = self.StdOptimizer()
my_encoder = self.EncoderFromParameterString('--score=5')
first_encoding = my_encoder.Encoding(100, self.videofile)
# pylint: disable=W0212
next_encoding = my_optimizer._EncodingWithOneLessParameter(first_encoding,
100,
self.videofile,
None)
self.assertTrue(next_encoding)
self.assertEqual(next_encoding.encoder.parameters.ToString(), '')
def test_EncodingGoodOnOtherRate(self):
self.file_set = optimizer.FileAndRateSet(verify_files_present=False)
self.file_set.AddFilesAndRates([self.videofile.filename], [100, 200])
my_optimizer = self.StdOptimizer()
my_encoder = self.EncoderFromParameterString('--score=7')
my_encoder.Encoding(100, self.videofile).Execute().Store()
first_encoder = self.EncoderFromParameterString('--score=8')
first_encoding = first_encoder.Encoding(200, self.videofile)
first_encoding.Execute().Store()
# pylint: disable=W0212
next_encoding = my_optimizer._EncodingGoodOnOtherRate(first_encoding,
200,
self.videofile,
None)
self.assertTrue(next_encoding)
self.assertEqual('--score=7', next_encoding.encoder.parameters.ToString())
def test_BestOverallConfiguration(self):
self.file_set = optimizer.FileAndRateSet(verify_files_present=False)
self.file_set.AddFilesAndRates([self.videofile.filename], [100, 200])
my_optimizer = self.StdOptimizer()
# When there is nothing in the database, None should be returned.
best_encoder = my_optimizer.BestOverallEncoder()
self.assertIsNone(best_encoder)
# Fill in the database with all the files and rates.
my_encoder = self.EncoderFromParameterString('--score=7')
for rate, filename in self.file_set.AllFilesAndRates():
my_encoder.Encoding(rate, encoder.Videofile(filename)).Execute().Store()
best_encoder = my_optimizer.BestOverallEncoder()
self.assertTrue(best_encoder)
self.assertEquals(my_encoder.parameters.ToString(),
best_encoder.parameters.ToString())
# Add an incomplete encode. This should be ignored.
(self.EncoderFromParameterString('--score=9')
.Encoding(100, self.videofile).Execute().Store())
best_encoder = my_optimizer.BestOverallEncoder()
self.assertTrue(best_encoder)
self.assertEquals(my_encoder.parameters.ToString(),
best_encoder.parameters.ToString())
# Complete the set for 'score=9'. This should cause a change.
(self.EncoderFromParameterString('--score=9')
.Encoding(200, self.videofile).Execute().Store())
best_encoder = my_optimizer.BestOverallEncoder()
self.assertTrue(best_encoder)
self.assertEquals('--score=9',
best_encoder.parameters.ToString())
class TestOptimizerWithRealFiles(test_tools.FileUsingCodecTest):
def setUp(self):
self.codec = DummyCodec()
self.file_set = None
self.score_function = None
self.videofile = DummyVideofile('foofile_640_480_30.yuv', clip_time=1)
self.optimizer = None
def EncoderFromParameterString(self, parameter_string):
return encoder.Encoder(self.optimizer.context,
encoder.OptionValueSet(self.optimizer.context.codec.option_set,
parameter_string))
def test_BestOverallConfigurationNotInWorkDirectory(self):
other_dir = os.path.join(encoder_configuration.conf.sysdir(),
'multirepo_test')
os.mkdir(other_dir)
encoder_configuration.conf.override_scorepath_for_test([other_dir])
self.file_set = optimizer.FileAndRateSet(verify_files_present=False)
self.file_set.AddFilesAndRates([self.videofile.filename], [100, 200])
self.optimizer = optimizer.Optimizer(self.codec, self.file_set)
# When there is nothing in the database, None should be returned.
best_encoder = self.optimizer.BestOverallEncoder()
self.assertIsNone(best_encoder)
# Fill in the database with all the files and rates.
other_context = encoder.Context(self.codec, encoder.EncodingDiskCache,
scoredir='multirepo_test')
my_encoder = self.EncoderFromParameterString('--score=7')
other_context.cache.StoreEncoder(my_encoder)
my_encoder.context.cache.StoreEncoder(my_encoder)
for rate, filename in self.file_set.AllFilesAndRates():
my_encoding = my_encoder.Encoding(rate, encoder.Videofile(filename))
my_encoding.Execute()
other_context.cache.StoreEncoding(my_encoding)
# The best encoder should now be from the workdir, but the results are
# all fetched from the searchpath.
best_encoder = self.optimizer.BestOverallEncoder()
self.assertTrue(best_encoder)
self.assertEquals(my_encoder.parameters.ToString(),
best_encoder.parameters.ToString())
one_encoding = best_encoder.Encoding(100, self.videofile)
one_encoding.Recover()
self.assertTrue(one_encoding.Result())
def test_MultipleOptimizers(self):
# Make sure other score directories don't interfere with this test.
encoder_configuration.conf.override_scorepath_for_test([])
os.mkdir(os.path.join(encoder_configuration.conf.sysdir(), 'first_dir'))
os.mkdir(os.path.join(encoder_configuration.conf.sysdir(), 'second_dir'))
one_optimizer = optimizer.Optimizer(self.codec, scoredir='first_dir')
another_optimizer = optimizer.Optimizer(self.codec, scoredir='second_dir')
self.assertNotEqual(one_optimizer.context.cache.workdir,
another_optimizer.context.cache.workdir)
# Storing one encoding's score should not affect the other's.
one_encoding = one_optimizer.BestEncoding(100,
self.videofile)
one_encoding.Execute().Store()
another_encoding = another_optimizer.BestEncoding(100, self.videofile)
self.assertFalse(another_encoding.Result())
another_encoding.Recover()
self.assertFalse(another_encoding.Result())
class TestFileAndRateSet(unittest.TestCase):
def test_OneFileAddedAndReturned(self):
the_set = optimizer.FileAndRateSet(verify_files_present=False)
the_set.AddFilesAndRates(['filename'], [100], 'dirname')
self.assertEqual([(100, 'dirname/filename')], the_set.AllFilesAndRates())
def test_NoDirName(self):
the_set = optimizer.FileAndRateSet(verify_files_present=False)
the_set.AddFilesAndRates(['filename'], [100])
self.assertEqual([(100, 'filename')], the_set.AllFilesAndRates())
def test_OneFileMultipleRates(self):
the_set = optimizer.FileAndRateSet(verify_files_present=False)
the_set.AddFilesAndRates(['filename'], [100, 200], 'dirname')
self.assertEqual(set([(100, 'dirname/filename'),
(200, 'dirname/filename')]),
set(the_set.AllFilesAndRates()))
def test_TwoAddCalls(self):
the_set = optimizer.FileAndRateSet(verify_files_present=False)
the_set.AddFilesAndRates(['filename'], [100, 200], 'dirname')
the_set.AddFilesAndRates(['otherfilename'], [200, 300], 'dirname')
self.assertEqual(set([(100, 'dirname/filename'),
(200, 'dirname/filename'),
(200, 'dirname/otherfilename'),
(300, 'dirname/otherfilename')]),
set(the_set.AllFilesAndRates()))
def test_RatesForFile(self):
the_set = optimizer.FileAndRateSet(verify_files_present=False)
the_set.AddFilesAndRates(['filename'], [100, 200])
the_set.AddFilesAndRates(['otherfilename'], [200, 300])
self.assertEqual(set([100, 200]),
set(the_set.AllRatesForFile('filename')))
class TestFileAndRateSetWithRealFiles(test_tools.FileUsingCodecTest):
def test_AddMissingFile(self):
the_set = optimizer.FileAndRateSet()
the_set.AddFilesAndRates(['nosuchfile'], [100])
self.assertFalse(the_set.AllFilesAndRates())
self.assertFalse(the_set.set_is_complete)
def test_AddPresentFile(self):
the_set = optimizer.FileAndRateSet()
file_name = 'file_1024_768_30.yuv'
test_tools.MakeYuvFileWithOneBlankFrame(file_name)
the_set.AddFilesAndRates([file_name], [100],
basedir=encoder_configuration.conf.workdir())
self.assertTrue(the_set.AllFilesAndRates())
self.assertTrue(the_set.set_is_complete)
if __name__ == '__main__':
unittest.main()
|
the-stack_0_14990 | """StudentHomepage URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from app01 import views
urlpatterns = [
path('admin/', admin.site.urls),
path('', views.index),
path('login/', views.LoginView.as_view(), name='login'),
path('reg/', views.reg, name='reg'),
path('logout/', views.logout, name='logout'),
path('photo_album/', views.photo_album, name='photo_album'),
path('home/', views.home, name='home'),
path('school/', views.school, name='school'),
path('hobby/', views.Hobby.as_view()),
path('admirer/', views.Admirer.as_view()),
path('reading_matter/', views.ReadingMatter.as_view()),
]
|
the-stack_0_14991 | # This code gives back JSON file for:
# --> only in the documents where this word appears: how many time does the word appears on average
import os
import json
count_file = 0
word_count = 0
word_dic = {}
result_dic = {}
# This is path of data folder
path = '/media/neel/Extra/gigaword_eng_5/data/afp_eng/'
# This is path of output file
for filename in os.listdir(path):
# File count
file_add = ("/media/neel/Extra/gigaword_eng_5/word_per_doc/{}.json".format(str(filename)))
outfile = open(file_add, "w")
count_file += 1
file = open(path+filename,"r")
text = file.read().lower()
# Fetching only <p> </p> tage data
for item in text.split("</p>"):
if "<p>" in item:
temp_lis = []
data = str(item [ item.find("<p>")+len("<p>") : ])
data = data.replace(',','').replace('.','').replace('"','').replace("'","").replace("(","").replace(")","").replace('\n',' ').replace("-","")
temp_lis = data.split(" ")
# counting words
for word in temp_lis:
word_count += 1
try:
word_dic[str(word)] = int(word_dic[str(word)]) + 1
except:
word_dic[str(word)] = 1
file.close()
json.dump(word_dic, outfile)
outfile.close()
print('done')
|
the-stack_0_14993 | import discord
from discord.ext import commands
from asyncdagpi import ImageFeatures
class Image(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command()
async def colors(self, ctx, member:discord.Member=None):
if member is None:
member = ctx.author
url = str(member.avatar_url_as(format="png", size=1024))
img = await self.bot.dag.image_process(ImageFeatures.colors(), url)
await ctx.send(file = discord.File(fp=img.image,filename=f"pixel.{img.format}"))
@commands.command()
async def wanted(self, ctx, member:discord.Member=None):
if member is None:
member = ctx.author
warn_msg = 'the dagpi wanted endpoint has a flaw which makes it very slow to compute' if ctx.author.id in self.bot.owner_ids else 'This may take some time'
warn = await ctx.send(warn_msg)
url = str(member.avatar_url_as(format="png", size=1024))
img = await self.bot.dag.image_process(ImageFeatures.wanted(), url)
try:
await warn.delete()
except:
pass
finally:
await (commands.Context(prefix=ctx.prefix, message=ctx.message)).send(file = discord.File(fp=img.image,filename=f"pixel.{img.format}"))
def setup(bot):
bot.add_cog(Image(bot))
|
the-stack_0_14994 | import json
from argparse import ArgumentParser
from ibm_watson import AssistantV1
from ibm_cloud_sdk_core.authenticators import IAMAuthenticator
import dateutil.parser
import datetime
import time
DEFAULT_WCS_VERSION='2018-09-20'
DEFAULT_PAGE_SIZE=500
DEFAULT_NUMBER_OF_PAGES=20
def getAssistant(iam_apikey, url, version=DEFAULT_WCS_VERSION):
'''Retrieve Watson Assistant SDK object'''
authenticator = IAMAuthenticator(iam_apikey)
c = AssistantV1(
version=version,
authenticator=authenticator
)
c.set_service_url(url)
return c
def getLogs(iam_apikey, url, workspace_id, filter, page_size_limit=DEFAULT_PAGE_SIZE, page_num_limit=DEFAULT_NUMBER_OF_PAGES, version=DEFAULT_WCS_VERSION):
'''Public API for script, connects to Watson Assistant and downloads all logs'''
service = getAssistant(iam_apikey, url, version)
return getLogsInternal(service, workspace_id, filter, page_size_limit, page_num_limit)
def getLogsInternal(assistant, workspace_id, filter, page_size_limit=DEFAULT_PAGE_SIZE, page_num_limit=DEFAULT_NUMBER_OF_PAGES):
'''Fetches `page_size_limit` logs at a time through Watson Assistant log API, a maximum of `page_num_limit` times, and returns array of log events'''
cursor = None
pages_retrieved = 0
allLogs = []
noMore = False
while pages_retrieved < page_num_limit and noMore != True:
if workspace_id is None:
#all - requires a workspace_id, assistant id, or deployment id in the filter
output = assistant.list_all_logs(sort='-request_timestamp', filter=filter, page_limit=page_size_limit, cursor=cursor)
else:
output = assistant.list_logs(workspace_id=workspace_id, sort='-request_timestamp', filter=filter, page_limit=page_size_limit, cursor=cursor)
#Hack for API compatibility between v1 and v2 of the API - v2 adds a 'result' property on the response. v2 simplest form is list_logs().get_result()
output = json.loads(str(output))
if 'result' in output:
logs = output['result']
else:
logs = output
if 'pagination' in logs and len(logs['pagination']) != 0:
cursor = logs['pagination'].get('next_cursor', None)
#Do not DOS the list_logs function!
time.sleep(3.0)
else:
noMore = True
if 'logs' in logs:
allLogs.extend(logs['logs'])
pages_retrieved = pages_retrieved + 1
print("Fetched {} log pages".format(pages_retrieved))
else:
return None
#Analysis is easier when logs are in increasing timestamp order
allLogs.reverse()
return allLogs
def writeLogs(logs, output_file, output_columns="raw"):
'''
Writes log output to file system or screen. Includes three modes:
`raw`: logs are written in JSON format
`all`: all log columns useful for intent training are written in CSV format
`utterance`: only the `input.text` column is written (one per line)
'''
file = None
if output_file != None:
file = open(output_file,'w')
print("Writing logs to", output_file)
if 'raw' == output_columns:
writeOut(file, json.dumps(logs,indent=2))
if file is not None:
file.close()
return
if 'all' == output_columns:
writeOut(file, 'Utterance\tIntent\tConfidence\tDate\tLast Visited')
for log in logs:
utterance = log['request' ]['input']['text']
intent = 'unknown_intent'
confidence = 0.0
date = 'unknown_date'
last_visited = 'unknown_last_visited'
if 'response' in log and 'intents' in log['response'] and len(log['response']['intents'])>0:
intent = log['response']['intents'][0]['intent']
confidence = log['response']['intents'][0]['confidence']
dateStr = log['request_timestamp']
date = dateutil.parser.parse(dateStr).strftime("%Y-%m-%d")
if 'nodes_visited' in log['response']['output'] and len (log['response']['output']['nodes_visited']) > 0:
last_visited = log['response']['output']['nodes_visited'][-1]
if 'all' == output_columns:
output_line = '{}\t{}\t{}\t{}\t{}'.format(utterance, intent, confidence, date, last_visited)
else:
#assumed just 'utterance'
output_line = utterance
writeOut(file, output_line)
if output_file != None:
file.close()
def writeOut(file, message):
if file != None:
file.write(message + '\n')
else:
print(message)
def create_parser():
parser = ArgumentParser(description='Extracts Watson Assistant logs from a given workspace')
parser.add_argument('-c', '--output_columns', type=str, help='Which columns you want in output, either "utterance", "raw", or "all" (default is "raw")', default='raw')
parser.add_argument('-o', '--output_file', type=str, help='Filename to write results to')
parser.add_argument('-w', '--workspace_id', type=str, help='Workspace identifier')
parser.add_argument('-a', '--iam_apikey', type=str, required=True, help='Assistant service iam api key')
parser.add_argument('-f', '--filter', type=str, required=True, help='Watson Assistant log query filter')
parser.add_argument('-v', '--version', type=str, default=DEFAULT_WCS_VERSION, help="Watson Assistant version in YYYY-MM-DD form.")
parser.add_argument('-n', '--number_of_pages', type=int, default=DEFAULT_NUMBER_OF_PAGES, help='Number of result pages to download (default is {})'.format(DEFAULT_NUMBER_OF_PAGES))
parser.add_argument('-p', '--page_limit', type=int, default=DEFAULT_PAGE_SIZE, help='Number of results per page (default is {})'.format(DEFAULT_PAGE_SIZE))
parser.add_argument('-l', '--url', type=str, default='https://gateway.watsonplatform.net/assistant/api',
help='URL to Watson Assistant. Ex: https://gateway-wdc.watsonplatform.net/assistant/api')
return parser
if __name__ == '__main__':
ARGS = create_parser().parse_args()
service = getAssistant(ARGS.iam_apikey,ARGS.url,ARGS.version)
logs = getLogsInternal(service, ARGS.workspace_id, ARGS.filter, ARGS.page_limit, ARGS.number_of_pages)
writeLogs(logs, ARGS.output_file, ARGS.output_columns)
|
the-stack_0_14996 | from __future__ import division
import numpy as np
import seaborn as sns
import sys
from sys import platform as sys_pf
if sys_pf == 'Darwin':
import matplotlib
matplotlib.use("TkAgg")
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import pyplot as plt
def normalize(v):
norm=np.linalg.norm(v)
if norm==0:
return v
return v/norm
def visualize_cluster(No_of_traj, l_no, nrows, ncols, x, y, c, col):
Number_of_traj = No_of_traj
label_no =l_no
counter = 0
alpha = c[label_no] - 1
fig, axes = plt.subplots(nrows, ncols, sharex=True, sharey=True)
for row in axes:
for cols in range(ncols):
if (counter > alpha):
break
X = x[counter]
Y = y[counter]
row[cols].plot(X, Y, color=col[int(label_no)])
counter = counter + 1
fig.suptitle('Trajectories of Cluster '+str(label_no), fontsize='large')
plt.show()
def main():
#loading files ...
labels = np.loadtxt('labels.txt', delimiter=',')
labelsbefore = np.loadtxt('labelsbefore.txt', delimiter=',')
XA = np.loadtxt('XA.txt', delimiter=',')
XB = np.loadtxt('XB.txt', delimiter=',')
YA = np.loadtxt('YA.txt', delimiter=',')
YB = np.loadtxt('YB.txt', delimiter=',')
Number_of_traj = np.shape(XA)[0]
Number_of_frames = np.shape(XA)[1]
col = ['red', 'black', 'blue', 'green', 'cyan']
c = np.zeros( shape=(5), dtype=int)
for i in range(139):
if (int(labels[i]) == 0) : c[0] += 1
elif (int(labels[i]) == 1) : c[1] += 1
elif (int(labels[i]) == 2) : c[2] += 1
elif (int(labels[i]) == 3) : c[3] += 1
elif (int(labels[i]) == 4) : c[4] += 1
C0x = np.zeros(shape=(c[0],Number_of_frames))
C1x = np.zeros(shape=(c[1],Number_of_frames))
C2x = np.zeros(shape=(c[2],Number_of_frames))
C3x = np.zeros(shape=(c[3],Number_of_frames))
C4x = np.zeros(shape=(c[4],Number_of_frames))
C0y = np.zeros(shape=(c[0],Number_of_frames))
C1y = np.zeros(shape=(c[1],Number_of_frames))
C2y = np.zeros(shape=(c[2],Number_of_frames))
C3y = np.zeros(shape=(c[3],Number_of_frames))
C4y = np.zeros(shape=(c[4],Number_of_frames))
C0xb = np.zeros(shape=(c[0],Number_of_frames))
C1xb = np.zeros(shape=(c[1],Number_of_frames))
C2xb = np.zeros(shape=(c[2],Number_of_frames))
C3xb = np.zeros(shape=(c[3],Number_of_frames))
C4xb = np.zeros(shape=(c[4],Number_of_frames))
C0yb = np.zeros(shape=(c[0],Number_of_frames))
C1yb = np.zeros(shape=(c[1],Number_of_frames))
C2yb = np.zeros(shape=(c[2],Number_of_frames))
C3yb = np.zeros(shape=(c[3],Number_of_frames))
C4yb = np.zeros(shape=(c[4],Number_of_frames))
index = np.zeros( shape=(5), dtype= int)
for trajectory in range(139):
if (col[int(labels[trajectory])]) == 'red' :
C0x[index[0]] = XA[trajectory]
C0y[index[0]] = YA[trajectory]
C0xb[index[0]] = XB[trajectory]
C0yb[index[0]] = YB[trajectory]
index[0] +=1
elif (col[int(labels[trajectory])]) == 'black' :
C1x[index[1]] = XA[trajectory]
C1y[index[1]] = YA[trajectory]
C1xb[index[1]] = XB[trajectory]
C1yb[index[1]] = YB[trajectory]
index[1] +=1
elif (col[int(labels[trajectory])]) == 'blue' :
C2x[index[2]] = XA[trajectory]
C2y[index[2]] = YA[trajectory]
C2xb[index[2]] = XB[trajectory]
C2yb[index[2]] = YB[trajectory]
index[2] +=1
elif (col[int(labels[trajectory])]) == 'green' :
C3x[index[3]] = XA[trajectory]
C3y[index[3]] = YA[trajectory]
C3xb[index[3]] = XB[trajectory]
C3yb[index[3]] = YB[trajectory]
index[3] +=1
else :
C4x[index[4]] = XA[trajectory]
C4y[index[4]] = YA[trajectory]
C4xb[index[4]] = XB[trajectory]
C4yb[index[4]] = YB[trajectory]
index[4] +=1
print (index)
visualize_cluster(Number_of_traj, 0, 5, 6, C0xb, C0yb, c, col)
visualize_cluster(Number_of_traj, 1, 6, 8, C1xb, C1yb, c, col)
visualize_cluster(Number_of_traj, 2, 3, 6, C2xb, C2yb, c, col)
visualize_cluster(Number_of_traj, 3, 3, 2, C3xb, C3yb, c, col)
visualize_cluster(Number_of_traj, 4, 5, 8, C4xb, C4yb, c, col)
for Trajectories in range (Number_of_traj):
print (Trajectories, labelsbefore[Trajectories], labels[Trajectories])
if __name__ == "__main__":
main()
|
the-stack_0_14998 | import random
import numpy as np
import sys
from domain.make_env import make_env
from domain.task_gym import GymTask
from neat_src import *
class WannGymTask(GymTask):
"""Problem domain to be solved by neural network. Uses OpenAI Gym patterns.
"""
def __init__(self, game, paramOnly=False, nReps=1):
"""Initializes task environment
Args:
game - (string) - dict key of task to be solved (see domain/config.py)
Optional:
paramOnly - (bool) - only load parameters instead of launching task?
nReps - (nReps) - number of trials to get average fitness
"""
GymTask.__init__(self, game, paramOnly, nReps)
# -- 'Weight Agnostic Network' evaluation -------------------------------- -- #
def setWeights(self, wVec, wVal):
"""Set single shared weight of network
Args:
wVec - (np_array) - weight matrix as a flattened vector
[N**2 X 1]
wVal - (float) - value to assign to all weights
Returns:
wMat - (np_array) - weight matrix with single shared weight
[N X N]
"""
# Create connection matrix
wVec[np.isnan(wVec)] = 0
dim = int(np.sqrt(np.shape(wVec)[0]))
cMat = np.reshape(wVec,(dim,dim))
cMat[cMat!=0] = 1.0
# Assign value to all weights
wMat = np.copy(cMat) * wVal
return wMat
def getFitness(self, wVec, aVec, hyp, \
seed=-1,nRep=False,nVals=6,view=False,returnVals=False):
"""Get fitness of a single individual with distribution of weights
Args:
wVec - (np_array) - weight matrix as a flattened vector
[N**2 X 1]
aVec - (np_array) - activation function of each node
[N X 1] - stored as ints (see applyAct in ann.py)
hyp - (dict) - hyperparameters
['alg_wDist'] - weight distribution [standard;fixed;linspace]
['alg_absWCap'] - absolute value of highest weight for linspace
Optional:
seed - (int) - starting random seed for trials
nReps - (int) - number of trials to get average fitness
nVals - (int) - number of weight values to test
Returns:
fitness - (float) - mean reward over all trials
"""
if nRep is False:
nRep = hyp['alg_nReps']
# Set weight values to test WANN with
if (hyp['alg_wDist'] == "standard") and nVals==6: # Double, constant, and half signal
wVals = np.array((-2,-1.0,-0.5,0.5,1.0,2))
else:
wVals = np.linspace(-self.absWCap, self.absWCap ,nVals)
# Get reward from 'reps' rollouts -- test population on same seeds
reward = np.empty((nRep,nVals))
for iRep in range(nRep):
for iVal in range(nVals):
wMat = self.setWeights(wVec,wVals[iVal])
if seed == -1:
reward[iRep,iVal] = self.testInd(wMat, aVec, seed=seed,view=view)
else:
reward[iRep,iVal] = self.testInd(wMat, aVec, seed=seed+iRep,view=view)
if returnVals is True:
return np.mean(reward,axis=0), wVals
return np.mean(reward,axis=0)
|
the-stack_0_15000 | from socket import *
import threading
class RecvThread(threading.Thread):
def __init__(self, s, bufsize):
if not isinstance(s, socket):
raise TypeError
super(RecvThread, self).__init__()
self.s = s
self.bufsize = bufsize
def run(self):
while True:
data = self.s.recv(self.bufsize).decode()
if not data:
break
print(f'\r{data} \n> ', end='')
class SendThread(threading.Thread):
def __init__(self, s):
if not isinstance(s, socket):
raise TypeError
super(SendThread, self).__init__()
self.s = s
def run(self):
while True:
data = input('> ').encode()
self.s.send(data) |
the-stack_0_15001 | from typing import List, Any, Sequence
from .util import MLP, ThreadedIterator, SMALL_NUMBER
import tensorflow as tf
import numpy as np
import time
import pickle
import os
import shutil
class GGNN(object):
@classmethod
def default_params(cls):
return {
'num_epochs': 1,
'patience': 250,
'learning_rate': 0.001,
'clamp_gradient_norm': 1.0,
'out_layer_dropout_keep_prob': 1.0,
'hidden_size': 200,
'num_timesteps': 4,
'use_graph': True,
'task_ids': [0],
'random_seed': 0,
}
def __init__(self, data_training, data_testing, params=None, restore_file=None, freeze_graph_model=False,
log_dir="./logged_models", cleanup=False):
"""
Basic GGNN class that needs to be extended for use.
:param data_training: data set of PIGs for training [list].
:param data_testing: data set of PIGs for validation [list].
:param params: hyperparameters of the model [dict].
:param restore_file: path to a model that should be restored [str].
:param freeze_graph_model: do not train parameters of graph model (i.e. model is not trained) [bool].
:param log_dir: directory where the model is stored [str].
:param cleanup: clean directory, where the model is stored, before storing it [bool].
"""
# Collect parameters
store_params = params
self.params = self.default_params()
if store_params is not None:
self.params.update(store_params)
# Load data
self.max_num_vertices = 0
self.num_edge_types = 0
self.annotation_size = 0
self.train_data = self.load_data(data_training, is_training_data=True)
self.valid_data = self.load_data(data_testing, is_training_data=False)
self.freeze_graph_model = freeze_graph_model
self.restore = restore_file
# Safe best models/cleanup previous models
if cleanup:
shutil.rmtree(log_dir, ignore_errors=True)
self.log_dir = log_dir
os.makedirs(log_dir, exist_ok=True)
# Path to best model
self.best_model_file = os.path.join(log_dir,
"%s_best_model.pickle" % "_".join([time.strftime("%Y-%m-%d-%H-%M")]))
# Build the actual GGNN model
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
self.graph = tf.Graph()
self.sess = tf.Session(graph=self.graph, config=config)
with self.graph.as_default():
tf.set_random_seed(self.params['random_seed'])
self.placeholders = {}
self.weights = {}
self.ops = {}
self.make_model()
self.make_train_step()
# Restore/initialize variables
if restore_file is not None:
self.restore_model(restore_file)
else:
self.initialize_model()
@staticmethod
def graph_string_to_array(graph_string: str) -> List[List[int]]:
"""
Returns graph string from string.
:param graph_string: graph as string [str].
:return: graph string as array [list].
"""
return [[int(v) for v in s.split(' ')]
for s in graph_string.split('\n')]
def load_data(self, data, is_training_data: bool):
"""
Loads data
:param data: list of graphs [list]
A graph = {targets, graph, node_features}
:param is_training_data: boolean flag if data is for training or not [bool]
:return: raw process graphs [list]
A raw process graph = {adjacency_lists [dict], num_incoming_edge per_type [dict],
init [list], labels [list]}
"""
num_fwd_edge_types = 0
for g in data:
self.max_num_vertices = max(self.max_num_vertices, max([v for e in g['graph'] for v in [e[0], e[2]]]))
num_fwd_edge_types = max(num_fwd_edge_types, max([e[1] for e in g['graph']]))
self.num_edge_types = max(self.num_edge_types, num_fwd_edge_types)
self.annotation_size = max(self.annotation_size, len(data[0]["node_features"][0]))
return self.process_raw_graphs(data, is_training_data)
def process_raw_graphs(self, raw_data: Sequence[Any], is_training_data: bool) -> Any:
raise Exception("Models have to implement process_raw_graphs!")
def make_model(self):
"""
Makes the GGNN model.
:return: none.
"""
# Create placeholders for the GGNN model
self.placeholders['target_values'] = tf.placeholder(tf.float32, [len(self.params['task_ids']), None],
name='target_values')
self.placeholders['target_mask'] = tf.placeholder(tf.float32, [len(self.params['task_ids']), None],
name='target_mask')
self.placeholders['num_graphs'] = tf.placeholder(tf.int32, [], name='num_graphs')
self.placeholders['out_layer_dropout_keep_prob'] = tf.placeholder(tf.float32, [],
name='out_layer_dropout_keep_prob')
# Start message passing phase (i.e. update of node representations)
with tf.variable_scope("graph_mode"):
self.prepare_specific_graph_model()
if self.params['use_graph']:
self.ops['final_node_representations'] = self.compute_final_node_representations()
else:
self.ops['final_node_representations'] = tf.zeros_like(self.placeholders['initial_node_representation'])
# Start readout phase (i.e. mapping of node representations to output
self.ops['losses'] = []
for (internal_id, task_id) in enumerate(self.params['task_ids']):
with tf.variable_scope("out_layer_task%i" % task_id):
with tf.variable_scope("regression_gate"):
self.weights['regression_gate_task%i' % task_id] = MLP(2 * self.params['hidden_size'], 1, [],
self.placeholders[
'out_layer_dropout_keep_prob'])
with tf.variable_scope("regression"):
self.weights['regression_transform_task%i' % task_id] = MLP(self.params['hidden_size'], 1, [],
self.placeholders[
'out_layer_dropout_keep_prob'])
# Computes the output of the GGNN model
computed_values = self.gated_regression(self.ops['final_node_representations'],
self.weights['regression_gate_task%i' % task_id],
self.weights['regression_transform_task%i' % task_id])
# Computes the difference
diff = self.placeholders['target_values'][internal_id, :] - computed_values
# Ignore none comparisons
task_target_mask = self.placeholders['target_mask'][internal_id, :]
task_target_num = tf.reduce_sum(task_target_mask) + SMALL_NUMBER
diff = diff * task_target_mask # Mask out unused values
self.ops['accuracy_task%i' % task_id] = tf.reduce_sum(tf.cast(tf.equal(tf.round(computed_values),
self.placeholders[
'target_values'][internal_id,
:]), tf.float32))
# Calculate loss (here, normalised mean squared error)
task_loss = tf.reduce_sum(tf.square(diff)) / task_target_num
# Normalise loss
task_loss = task_loss * (1.0 / (self.params['task_sample_ratios'].get(task_id) or 1.0))
self.ops['losses'].append(task_loss)
self.ops['loss'] = tf.reduce_sum(self.ops['losses'])
def make_train_step(self):
"""
Performs a training step.
:return: none.
"""
trainable_vars = self.sess.graph.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
if self.freeze_graph_model:
graph_vars = set(self.sess.graph.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope="graph_model"))
filtered_vars = []
for var in trainable_vars:
if var not in graph_vars:
filtered_vars.append(var)
else:
print("Freezing weights of variable %s." % var.name)
trainable_vars = filtered_vars
optimizer = tf.train.AdadeltaOptimizer(1.0)
grads_and_vars = optimizer.compute_gradients(self.ops['loss'], var_list=trainable_vars)
clipped_grads = []
for grad, var in grads_and_vars:
if grad is not None:
clipped_grads.append((tf.clip_by_norm(grad, self.params['clamp_gradient_norm']), var))
else:
clipped_grads.append((grad, var))
self.ops['train_step'] = optimizer.apply_gradients(clipped_grads)
self.sess.run(tf.local_variables_initializer())
def gated_regression(self, last_h, regression_gate, regression_transform):
raise Exception("Models have to implement gated_regression!")
def prepare_specific_graph_model(self) -> None:
raise Exception("Models have to implement prepare_specific_graph_model!")
def compute_final_node_representations(self) -> tf.Tensor:
raise Exception("Models have to implement compute_final_node_representations!")
def make_minibatch_iterator(self, data: Any, is_training: bool):
raise Exception("Models have to implement make_minibatch_iterator!")
def run_epoch(self, data, is_training: bool):
"""
Performs an epoch (i.e. learning iteration).
:param data: set of graphs [list].
:param is_training: boolean flag if data is for training or not [bool].
:return: loss [list], accuracies [list], error_ratios [list], instance_per_sec [list].
"""
loss = 0
accuracies = []
accuracy_ops = [self.ops['accuracy_task%i' % task_id] for task_id in self.params['task_ids']]
start_time = time.time()
processed_graphs = 0
batch_iterator = ThreadedIterator(self.make_minibatch_iterator(data, is_training), max_queue_size=5)
for step, batch_data in enumerate(batch_iterator):
num_graphs = batch_data[self.placeholders['num_graphs']]
processed_graphs += num_graphs
if is_training:
batch_data[self.placeholders['out_layer_dropout_keep_prob']] = self.params[
'out_layer_dropout_keep_prob']
fetch_list = [self.ops['loss'], accuracy_ops, self.ops['train_step']]
else:
batch_data[self.placeholders['out_layer_dropout_keep_prob']] = 1.0
fetch_list = [self.ops['loss'], accuracy_ops]
result = self.sess.run(fetch_list, feed_dict=batch_data)
(batch_loss, batch_accuracies) = (result[0], result[1])
loss += batch_loss * num_graphs
accuracies.append(np.array(batch_accuracies))
accuracies = np.sum(accuracies, axis=0) / processed_graphs
loss = loss / processed_graphs
error_ratios = 1 - accuracies
instance_per_sec = processed_graphs / (time.time() - start_time)
return loss, accuracies, error_ratios, instance_per_sec
def train(self):
"""
Train the GGNN model.
:return: none.
"""
with self.graph.as_default():
if self.restore is not None:
# Epoch resume training
_, valid_accs, _, _ = self.run_epoch(self.valid_data, False)
best_val_acc = np.sum(valid_accs)
best_val_acc_epoch = 0
print("\r\x1b[KResumed operation, initial cum. val. acc: %.5f" % best_val_acc)
else:
(best_val_acc, best_val_acc_epoch) = (0, 0)
for epoch in range(1, self.params['num_epochs'] + 1):
print("== Epoch %i" % epoch)
# Epoch train
train_loss, train_acc, train_errs, train_speed = self.run_epoch(self.train_data, True)
accs_str = " ".join(["%i:%.5f" % (id, acc) for (id, acc) in zip(self.params['task_ids'], train_acc)])
errs_str = " ".join(["%i:%.5f" % (id, err) for (id, err) in zip(self.params['task_ids'], train_errs)])
print("\r\x1b[K Train: loss: %.5f | acc: %s | error_ratio: %s | instances/sec: %.2f" % (train_loss,
accs_str,
errs_str,
train_speed))
# Epoch validation
valid_loss, valid_accs, valid_errs, valid_speed = self.run_epoch(self.valid_data, False)
accs_str = " ".join(["%i:%.5f" % (id, acc) for (id, acc) in zip(self.params['task_ids'], valid_accs)])
errs_str = " ".join(["%i:%.5f" % (id, err) for (id, err) in zip(self.params['task_ids'], valid_errs)])
print("\r\x1b[K Valid: loss: %.5f | acc: %s | error_ratio: %s | instances/sec: %.2f" % (valid_loss,
accs_str,
errs_str,
valid_speed))
val_acc = np.sum(valid_accs) # type: float
if val_acc > best_val_acc:
# Save best model to self.best_model_file
self.save_model(self.best_model_file)
print("LOG: (Best epoch so far, cum. val. acc decreased to %.5f from %.5f. Saving to '%s')" %
(val_acc, best_val_acc, self.best_model_file))
best_val_acc = val_acc
best_val_acc_epoch = epoch
elif epoch - best_val_acc_epoch >= self.params['patience']:
print("LOG: Stopping training after %i epochs without improvement on validation accuracy." %
self.params['patience'])
break
def save_model(self, model_path: str) -> None:
"""
Saves the GGNN model.
:param model_path: path of GGNN model [str].
:return: none.
"""
weights_save = {}
for variable in self.sess.graph.get_collection(tf.GraphKeys.GLOBAL_VARIABLES):
assert variable.name not in weights_save
weights_save[variable.name] = self.sess.run(variable)
model_save = {"params": self.params, "weights": weights_save}
with open(model_path, 'wb') as out_file:
pickle.dump(model_save, out_file, pickle.HIGHEST_PROTOCOL)
def initialize_model(self) -> None:
"""
Initialises the GGNN model.
:return: none.
"""
init_op = tf.group(tf.global_variables_initializer(),
tf.local_variables_initializer())
self.sess.run(init_op)
def restore_model(self, path: str) -> None:
"""
Restores a GGNN model.
:param path: path of model [str]
:return: none.
"""
print("Restoring weights from file %s." % path)
with open(path, 'rb') as rest_file:
data_to_load = pickle.load(rest_file)
# Assert that we got the same model configuration
assert len(self.params) == len(data_to_load['params'])
for (par, par_value) in self.params.items():
# Different task_ids possible
if par not in ['task_ids', 'num_epochs']:
assert par_value == data_to_load['params'][par]
variables_to_initialize = []
with tf.name_scope("restore"):
restore_ops = []
used_vars = set()
for variable in self.sess.graph.get_collection(tf.GraphKeys.GLOBAL_VARIABLES):
used_vars.add(variable.name)
if variable.name in data_to_load['weights']:
restore_ops.append(variable.assign(data_to_load['weights'][variable.name]))
else:
print('Freshly initializing %s since no saved value was found.' % variable.name)
variables_to_initialize.append(variable)
for var_name in data_to_load['weights']:
if var_name not in used_vars:
print('Saved weights for %s not used by model.' % var_name)
restore_ops.append(tf.variables_initializer(variables_to_initialize))
self.sess.run(restore_ops)
|
the-stack_0_15003 | import os
import logging
import boto3
from botocore.exceptions import ClientError
from datetime import datetime as dt
import json
logger = logging.getLogger()
logger.setLevel(logging.INFO)
TABLE_NAME = "aqa_scores"
VIDEO_NAME_KEY = 'videoName'
VIDEO_SCORE_KEY = 'videoScore'
def handler(event, context):
try:
logger.info(event)
raw_query_str = event["rawQueryString"]
if "=" in raw_query_str:
video_key = raw_query_str.split("=")[-1]
else:
logger.error("Something wrong with the HTTP request, no valid query string available.")
return None
# Get region name associated with this lambda function
region = os.environ["AWS_REGION"]
video_score = fetch_db_result(region, video_key)
return video_score
except Exception as e:
logger.exceptions(e)
raise e
def fetch_db_result(region_name, video_key):
"""Fetch item from DynamoDB of a certain video key
:param region_name: string
:param video_key: string
:return: video_score of DB item with the video_key. If not present, returns None.
"""
db_client = boto3.client("dynamodb", region_name=region_name)
logger.info(f"Getting score for [{video_key}] from table:{TABLE_NAME}...")
try:
response = db_client.get_item(
TableName = TABLE_NAME,
Key = {VIDEO_NAME_KEY: { "S": video_key }},
ProjectionExpression= VIDEO_SCORE_KEY,
)
logger.info(response)
if "Item" in response:
score = response["Item"]["videoScore"]["N"]
logger.info(f"Score for video [{video_key}]: {score}")
else:
score = None
logger.info(f"Score for video [{video_key}] is not available")
return score
except ClientError as e:
logger.exceptions(e)
return None |
the-stack_0_15004 | import ast
import re
from abc import ABC, abstractmethod
from dataclasses import dataclass
from enum import Enum
from pathlib import PurePath
from typing import Callable, Dict, List, Optional, Sequence, TypeVar, Union
from .logging import log
from .utils import fmt_path, str_path
class ArrowHead(Enum):
NORMAL = 0
SEQUENCE = 1
class Ignore:
pass
class Empty:
pass
RightSide = Union[str, Ignore, Empty]
@dataclass
class Transformed:
path: PurePath
class Ignored:
pass
TransformResult = Optional[Union[Transformed, Ignored]]
@dataclass
class Rule:
left: str
left_index: int
name: str
head: ArrowHead
right: RightSide
right_index: int
def right_result(self, path: PurePath) -> Union[str, Transformed, Ignored]:
if isinstance(self.right, str):
return self.right
elif isinstance(self.right, Ignore):
return Ignored()
elif isinstance(self.right, Empty):
return Transformed(path)
else:
raise RuntimeError(f"Right side has invalid type {type(self.right)}")
class Transformation(ABC):
def __init__(self, rule: Rule):
self.rule = rule
@abstractmethod
def transform(self, path: PurePath) -> TransformResult:
pass
class ExactTf(Transformation):
def transform(self, path: PurePath) -> TransformResult:
if path != PurePath(self.rule.left):
return None
right = self.rule.right_result(path)
if not isinstance(right, str):
return right
return Transformed(PurePath(right))
class ExactReTf(Transformation):
def transform(self, path: PurePath) -> TransformResult:
match = re.fullmatch(self.rule.left, str_path(path))
if not match:
return None
right = self.rule.right_result(path)
if not isinstance(right, str):
return right
# For some reason, mypy thinks that "groups" has type List[str]. But
# since elements of "match.groups()" can be None, mypy is wrong.
groups: Sequence[Optional[str]] = [match[0]] + list(match.groups())
locals_dir: Dict[str, Union[str, int, float]] = {}
for i, group in enumerate(groups):
if group is None:
continue
locals_dir[f"g{i}"] = group
try:
locals_dir[f"i{i}"] = int(group)
except ValueError:
pass
try:
locals_dir[f"f{i}"] = float(group)
except ValueError:
pass
result = eval(f"f{right!r}", {}, locals_dir)
return Transformed(PurePath(result))
class RenamingParentsTf(Transformation):
def __init__(self, sub_tf: Transformation):
super().__init__(sub_tf.rule)
self.sub_tf = sub_tf
def transform(self, path: PurePath) -> TransformResult:
for i in range(len(path.parts), -1, -1):
parent = PurePath(*path.parts[:i])
child = PurePath(*path.parts[i:])
transformed = self.sub_tf.transform(parent)
if not transformed:
continue
elif isinstance(transformed, Transformed):
return Transformed(transformed.path / child)
elif isinstance(transformed, Ignored):
return transformed
else:
raise RuntimeError(f"Invalid transform result of type {type(transformed)}: {transformed}")
return None
class RenamingPartsTf(Transformation):
def __init__(self, sub_tf: Transformation):
super().__init__(sub_tf.rule)
self.sub_tf = sub_tf
def transform(self, path: PurePath) -> TransformResult:
result = PurePath()
any_part_matched = False
for part in path.parts:
transformed = self.sub_tf.transform(PurePath(part))
if not transformed:
result /= part
elif isinstance(transformed, Transformed):
result /= transformed.path
any_part_matched = True
elif isinstance(transformed, Ignored):
return transformed
else:
raise RuntimeError(f"Invalid transform result of type {type(transformed)}: {transformed}")
if any_part_matched:
return Transformed(result)
else:
return None
class RuleParseError(Exception):
def __init__(self, line: "Line", reason: str):
super().__init__(f"Error in rule on line {line.line_nr}, column {line.index}: {reason}")
self.line = line
self.reason = reason
def pretty_print(self) -> None:
log.error(f"Error parsing rule on line {self.line.line_nr}:")
log.error_contd(self.line.line)
spaces = " " * self.line.index
log.error_contd(f"{spaces}^--- {self.reason}")
T = TypeVar("T")
class Line:
def __init__(self, line: str, line_nr: int):
self._line = line
self._line_nr = line_nr
self._index = 0
@property
def line(self) -> str:
return self._line
@property
def line_nr(self) -> int:
return self._line_nr
@property
def index(self) -> int:
return self._index
@index.setter
def index(self, index: int) -> None:
self._index = index
@property
def rest(self) -> str:
return self.line[self.index:]
def peek(self, amount: int = 1) -> str:
return self.rest[:amount]
def take(self, amount: int = 1) -> str:
string = self.peek(amount)
self.index += len(string)
return string
def expect(self, string: str) -> str:
if self.peek(len(string)) == string:
return self.take(len(string))
else:
raise RuleParseError(self, f"Expected {string!r}")
def expect_with(self, string: str, value: T) -> T:
self.expect(string)
return value
def one_of(self, parsers: List[Callable[[], T]], description: str) -> T:
for parser in parsers:
index = self.index
try:
return parser()
except RuleParseError:
self.index = index
raise RuleParseError(self, description)
# RULE = LEFT SPACE '-' NAME '-' HEAD (SPACE RIGHT)?
# SPACE = ' '+
# NAME = '' | 'exact' | 'name' | 're' | 'exact-re' | 'name-re'
# HEAD = '>' | '>>'
# LEFT = STR | QUOTED_STR
# RIGHT = STR | QUOTED_STR | '!'
def parse_zero_or_more_spaces(line: Line) -> None:
while line.peek() == " ":
line.take()
def parse_one_or_more_spaces(line: Line) -> None:
line.expect(" ")
parse_zero_or_more_spaces(line)
def parse_str(line: Line) -> str:
result = []
while c := line.peek():
if c == " ":
break
else:
line.take()
result.append(c)
if result:
return "".join(result)
else:
raise RuleParseError(line, "Expected non-space character")
QUOTATION_MARKS = {'"', "'"}
def parse_quoted_str(line: Line) -> str:
escaped = False
# Points to first character of string literal
start_index = line.index
quotation_mark = line.peek()
if quotation_mark not in QUOTATION_MARKS:
raise RuleParseError(line, "Expected quotation mark")
line.take()
while c := line.peek():
if escaped:
escaped = False
line.take()
elif c == quotation_mark:
line.take()
stop_index = line.index
literal = line.line[start_index:stop_index]
try:
return ast.literal_eval(literal)
except SyntaxError as e:
line.index = start_index
raise RuleParseError(line, str(e)) from e
elif c == "\\":
escaped = True
line.take()
else:
line.take()
raise RuleParseError(line, "Expected end of string literal")
def parse_left(line: Line) -> str:
if line.peek() in QUOTATION_MARKS:
return parse_quoted_str(line)
else:
return parse_str(line)
def parse_right(line: Line) -> Union[str, Ignore]:
c = line.peek()
if c in QUOTATION_MARKS:
return parse_quoted_str(line)
else:
string = parse_str(line)
if string == "!":
return Ignore()
return string
def parse_arrow_name(line: Line) -> str:
return line.one_of([
lambda: line.expect("exact-re"),
lambda: line.expect("exact"),
lambda: line.expect("name-re"),
lambda: line.expect("name"),
lambda: line.expect("re"),
lambda: line.expect(""),
], "Expected arrow name")
def parse_arrow_head(line: Line) -> ArrowHead:
return line.one_of([
lambda: line.expect_with(">>", ArrowHead.SEQUENCE),
lambda: line.expect_with(">", ArrowHead.NORMAL),
], "Expected arrow head")
def parse_eol(line: Line) -> None:
if line.peek():
raise RuleParseError(line, "Expected end of line")
def parse_rule(line: Line) -> Rule:
parse_zero_or_more_spaces(line)
left_index = line.index
left = parse_left(line)
parse_one_or_more_spaces(line)
line.expect("-")
name = parse_arrow_name(line)
line.expect("-")
head = parse_arrow_head(line)
right_index = line.index
right: RightSide
try:
parse_zero_or_more_spaces(line)
parse_eol(line)
right = Empty()
except RuleParseError:
line.index = right_index
parse_one_or_more_spaces(line)
right = parse_right(line)
parse_eol(line)
return Rule(left, left_index, name, head, right, right_index)
def parse_transformation(line: Line) -> Transformation:
rule = parse_rule(line)
if rule.name == "":
return RenamingParentsTf(ExactTf(rule))
elif rule.name == "exact":
return ExactTf(rule)
elif rule.name == "name":
if len(PurePath(rule.left).parts) > 1:
line.index = rule.left_index
raise RuleParseError(line, "Expected name, not multiple segments")
return RenamingPartsTf(ExactTf(rule))
elif rule.name == "re":
return RenamingParentsTf(ExactReTf(rule))
elif rule.name == "exact-re":
return ExactReTf(rule)
elif rule.name == "name-re":
return RenamingPartsTf(ExactReTf(rule))
else:
raise RuntimeError(f"Invalid arrow name {rule.name!r}")
class Transformer:
def __init__(self, rules: str):
"""
May throw a RuleParseException.
"""
self._tfs = []
for i, line in enumerate(rules.split("\n")):
line = line.strip()
if line:
tf = parse_transformation(Line(line, i))
self._tfs.append((line, tf))
def transform(self, path: PurePath) -> Optional[PurePath]:
for i, (line, tf) in enumerate(self._tfs):
log.explain(f"Testing rule {i+1}: {line}")
try:
result = tf.transform(path)
except Exception as e:
log.warn(f"Error while testing rule {i+1}: {line}")
log.warn_contd(str(e))
continue
if not result:
continue
if isinstance(result, Ignored):
log.explain("Match found, path ignored")
return None
if tf.rule.head == ArrowHead.NORMAL:
log.explain(f"Match found, transformed path to {fmt_path(result.path)}")
path = result.path
break
elif tf.rule.head == ArrowHead.SEQUENCE:
log.explain(f"Match found, updated path to {fmt_path(result.path)}")
path = result.path
else:
raise RuntimeError(f"Invalid transform result of type {type(result)}: {result}")
log.explain(f"Final result: {fmt_path(path)}")
return path
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.