max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
quarkchain/evm/messages.py | QuarkChain/pyquarkchain | 237 | 12627821 | # Modified from pyethereum under MIT license
from collections import Counter
from fractions import Fraction
from quarkchain.utils import token_id_encode
import rlp
# to bypass circular imports
import quarkchain.core
from quarkchain.evm.utils import (
int256,
safe_ord,
bytearray_to_bytestr,
add_dict,
UINT128_MAX,
)
from rlp.sedes import big_endian_int, binary, CountableList, BigEndianInt
from rlp.sedes.binary import Binary
from quarkchain.rlp.utils import decode_hex, encode_hex
from quarkchain.evm import utils # FIXME: use eth_utils
from quarkchain.evm import bloom # FIXME: use eth_bloom
from quarkchain.evm import transactions
from quarkchain.evm import opcodes
from quarkchain.evm import vm
from quarkchain.evm.specials import specials as default_specials
from quarkchain.evm.exceptions import (
InvalidNonce,
InsufficientStartGas,
UnsignedTransaction,
BlockGasLimitReached,
InsufficientBalance,
InvalidNativeToken,
InvalidTransaction,
)
from quarkchain.evm.slogging import get_logger
from quarkchain.utils import token_id_decode, check, TOKEN_ID_MAX
from quarkchain.evm.specials import SystemContract
log = get_logger("eth.block")
log_tx = get_logger("eth.pb.tx")
log_msg = get_logger("eth.pb.msg")
log_state = get_logger("eth.pb.msg.state")
# contract creating transactions send to an empty address
CREATE_CONTRACT_ADDRESS = b""
# DEV OPTIONS
SKIP_MEDSTATES = False
check(TOKEN_ID_MAX <= UINT128_MAX)
def rp(tx, what, actual, target):
return "%r: %r actual:%r target:%r" % (tx, what, actual, target)
class Log(rlp.Serializable):
# TODO: original version used zpad (here replaced by int32.serialize); had
# comment "why zpad"?
fields = [
("address", Binary.fixed_length(20, allow_empty=True)),
("topics", CountableList(BigEndianInt(32))),
("data", binary),
]
def __init__(self, address, topics, data):
if len(address) == 40:
address = decode_hex(address)
assert len(address) == 20
super(Log, self).__init__(address, topics, data)
def bloomables(self):
return [self.address] + [BigEndianInt(32).serialize(x) for x in self.topics]
def to_dict(self):
return {
"bloom": encode_hex(bloom.b64(bloom.bloom_from_list(self.bloomables()))),
"address": encode_hex(self.address),
"data": b"0x" + encode_hex(self.data),
"topics": [encode_hex(utils.int32.serialize(t)) for t in self.topics],
}
def __repr__(self):
return "<Log(address=%r, topics=%r, data=%r)>" % (
encode_hex(self.address),
self.topics,
self.data,
)
class Receipt(rlp.Serializable):
fields = [
("state_root", binary),
(
"gas_used",
big_endian_int,
), # TODO: this is actually the cumulative gas used. fix it.
("bloom", int256),
("logs", CountableList(Log)),
("contract_address", Binary.fixed_length(20, allow_empty=True)),
("contract_full_shard_key", BigEndianInt(4)),
]
@property
def bloom(self):
bloomables = [x.bloomables() for x in self.logs]
return bloom.bloom_from_list(utils.flatten(bloomables))
def mk_receipt(state, success, logs, contract_address, contract_full_shard_key):
bloomables = [x.bloomables() for x in logs]
ret_bloom = bloom.bloom_from_list(utils.flatten(bloomables))
o = Receipt(
state_root=b"\x01" if success else b"",
gas_used=state.gas_used,
bloom=ret_bloom,
logs=logs,
contract_address=contract_address,
contract_full_shard_key=contract_full_shard_key,
)
return o
def convert_to_default_chain_token_gasprice(state, token_id, gas_price):
if token_id == state.shard_config.default_chain_token:
return gas_price
snapshot = state.snapshot()
_, genesis_token_gas_price = get_gas_utility_info(state, token_id, gas_price)
state.revert(snapshot)
return genesis_token_gas_price
def validate_transaction(state, tx):
# (1) The transaction signature is valid;
if not tx.sender: # sender is set and validated on Transaction initialization
raise UnsignedTransaction(tx)
if tx.version == 2:
# When tx.version == 2 (EIP155 tx), check
# 0. EIP155_SIGNER enable
# 1. tx.v == tx.network_id * 2 + 35 (+ 1)
# 2. gas_token_id & transfer_token_id should equal to default_token_id (like qkc)
# 3. tx.from_chain_id == tx.to_chain_id and tx.from_shard_key = 0 & tx.to_shard_key = 0
# 4. tx.network_id == chain_config.ETH_CHAIN_ID, where chain_config is derived from tx.from_chain_id
chain_config = state.qkc_config.CHAINS[tx.from_chain_id]
default_token_id = token_id_encode(chain_config.DEFAULT_CHAIN_TOKEN)
if (
state.qkc_config.ENABLE_EIP155_SIGNER_TIMESTAMP is not None
and state.timestamp < state.qkc_config.ENABLE_EIP155_SIGNER_TIMESTAMP
):
raise InvalidTransaction("EIP155 Signer is not enable yet.")
if tx.v != 35 + tx.network_id * 2 and tx.v != 36 + tx.network_id * 2:
raise InvalidTransaction(
"network_id {} does not match the signature v {}.".format(
tx.network_id, tx.v
)
)
if tx.from_chain_id != tx.to_chain_id:
raise InvalidTransaction(
"EIP155 Signer do not support cross shard transaction."
)
if tx.from_shard_key != 0 or tx.to_shard_key != 0:
raise InvalidTransaction(
"EIP155 Signer do not support cross shard transaction."
)
if tx.gas_token_id != default_token_id:
raise InvalidTransaction(
"EIP155 Signer only support {} as gas token.".format(
chain_config.DEFAULT_CHAIN_TOKEN
)
)
if tx.transfer_token_id != default_token_id:
raise InvalidTransaction(
"EIP155 Signer only support {} as transfer token.".format(
chain_config.DEFAULT_CHAIN_TOKEN
)
)
assert (tx.network_id == chain_config.ETH_CHAIN_ID, "Invalid network_id.")
assert (
tx.eth_chain_id - state.qkc_config.BASE_ETH_CHAIN_ID - 1
== tx.from_chain_id,
"Invalid Eth_Chain_Id.",
)
# (1a) startgas, gasprice, gas token id, transfer token id must be <= UINT128_MAX
if (
tx.startgas > UINT128_MAX
or tx.gasprice > UINT128_MAX
or tx.gas_token_id > TOKEN_ID_MAX
or tx.transfer_token_id > TOKEN_ID_MAX
):
raise InvalidTransaction("startgas, gasprice, and token_id must <= UINT128_MAX")
# (2) the transaction nonce is valid (equivalent to the
# sender account's current nonce);
req_nonce = state.get_nonce(tx.sender)
if req_nonce != tx.nonce:
raise InvalidNonce(rp(tx, "nonce", tx.nonce, req_nonce))
# (3) the gas limit is no smaller than the intrinsic gas,
# g0, used by the transaction;
total_gas = tx.intrinsic_gas_used
if tx.startgas < total_gas:
raise InsufficientStartGas(rp(tx, "startgas", tx.startgas, total_gas))
default_chain_token = state.shard_config.default_chain_token
bal = {tx.transfer_token_id: state.get_balance(tx.sender, tx.transfer_token_id)}
if tx.transfer_token_id != tx.gas_token_id:
bal[tx.gas_token_id] = state.get_balance(tx.sender, tx.gas_token_id)
# (4) requires non-zero balance for transfer_token_id and gas_token_id if non-default
for token_id in [tx.transfer_token_id, tx.gas_token_id]:
if token_id != default_chain_token and bal[token_id] == 0:
raise InvalidNativeToken(
"{}: non-default token {} has zero balance".format(
tx.__repr__(), token_id_decode(token_id)
)
)
# (5) the sender account balance contains at least the cost required in up-front payment
cost = Counter({tx.transfer_token_id: tx.value}) + Counter(
{tx.gas_token_id: tx.gasprice * tx.startgas}
)
for token_id, b in bal.items():
if b < cost[token_id]:
raise InsufficientBalance(
rp(
tx,
"token %s balance" % token_id_decode(token_id),
b,
cost[token_id],
)
)
# (6) if gas token non-default, need to check system contract for gas conversion
if tx.gasprice != 0 and tx.gas_token_id != default_chain_token:
snapshot = state.snapshot()
_, genesis_token_gas_price = pay_native_token_as_gas(
state, tx.gas_token_id, tx.startgas, tx.gasprice
)
state.revert(snapshot)
if genesis_token_gas_price == 0:
raise InvalidNativeToken(
"{}: non-default gas token {} not ready for being used to pay gas".format(
tx.__repr__(), token_id_decode(tx.gas_token_id)
)
)
# should be guaranteed by previous check. check added to make sure
bal_gas_reserve = state.get_balance(
SystemContract.GENERAL_NATIVE_TOKEN.addr(), state.genesis_token
)
if bal_gas_reserve < genesis_token_gas_price * tx.startgas:
raise InvalidNativeToken(
"{}: non-default gas token {} not enough reserve balance for conversion".format(
tx.__repr__(), token_id_decode(tx.gas_token_id)
)
)
# (7) check block gas limit
if state.gas_used + tx.startgas > state.gas_limit:
raise BlockGasLimitReached(
rp(tx, "gaslimit", state.gas_used + tx.startgas, state.gas_limit)
)
return True
def _refund(state, message, total, refund_percent):
to_refund = total * refund_percent // 100
to_burn = total - to_refund
state.delta_token_balance(message.sender, message.gas_token_id, to_refund)
# burn
if to_burn:
state.delta_token_balance(bytes(20), message.gas_token_id, to_burn)
def apply_transaction_message(
state,
message,
ext,
should_create_contract,
gas_used_start,
is_cross_shard=False,
contract_address=b"",
refund_rate=100,
):
# gas token should always be converted to genesis token
assert message.gas_token_id == state.genesis_token
local_fee_rate = (
1 - state.qkc_config.reward_tax_rate if state.qkc_config else Fraction(1)
)
evm_gas_start = message.gas
if not should_create_contract:
result, gas_remained, data = apply_msg(ext, message)
contract_address = b""
else: # CREATE
result, gas_remained, data = create_contract(ext, message, contract_address)
contract_address = (
data if (data and result) else b""
) # data could be [] when vm failed execution
assert gas_remained >= 0
log_tx.debug("TX APPLIED", result=result, gas_remained=gas_remained, data=data)
gas_used = evm_gas_start - gas_remained + gas_used_start
if not result:
log_tx.debug(
"TX FAILED",
reason="out of gas or transfer value is 0 and transfer token is non-default and un-queried",
gas_remained=gas_remained,
)
output = b""
success = 0
# Transaction success
else:
log_tx.debug("TX SUCCESS", data=data)
state.refunds += len(set(state.suicides)) * opcodes.GSUICIDEREFUND
if state.refunds > 0:
log_tx.debug("Refunding", gas_refunded=min(state.refunds, gas_used // 2))
gas_remained += min(state.refunds, gas_used // 2)
gas_used -= min(state.refunds, gas_used // 2)
state.refunds = 0
if not should_create_contract:
output = bytearray_to_bytestr(data)
else:
output = data
success = 1
_refund(state, message, ext.tx_gasprice * gas_remained, refund_rate)
fee = (
ext.tx_gasprice
* gas_used
* local_fee_rate.numerator
// local_fee_rate.denominator
)
state.delta_token_balance(state.block_coinbase, message.gas_token_id, fee)
add_dict(state.block_fee_tokens, {message.gas_token_id: fee})
state.gas_used += gas_used
# Clear suicides
suicides = state.suicides
state.suicides = []
for s in suicides:
state.del_account(s)
# Construct a receipt
r = mk_receipt(state, success, state.logs, contract_address, state.full_shard_key)
state.logs = []
if is_cross_shard:
if (
state.qkc_config.ENABLE_EVM_TIMESTAMP is None
or state.timestamp >= state.qkc_config.ENABLE_EVM_TIMESTAMP
):
state.add_xshard_deposit_receipt(r)
else:
state.add_receipt(r)
return success, output
def apply_xshard_deposit(state, deposit, gas_used_start):
state.logs = []
state.suicides = []
state.refunds = 0
state.full_shard_key = deposit.to_address.full_shard_key
# gas should be accounted by gas_used_start
state.delta_token_balance(
deposit.from_address.recipient, deposit.transfer_token_id, deposit.value
)
message_data = vm.CallData(
[safe_ord(x) for x in deposit.message_data], 0, len(deposit.message_data)
)
message = vm.Message(
deposit.from_address.recipient,
deposit.to_address.recipient,
deposit.value,
deposit.gas_remained,
message_data,
code_address=deposit.to_address.recipient,
from_full_shard_key=deposit.from_address.full_shard_key,
to_full_shard_key=deposit.to_address.full_shard_key,
tx_hash=deposit.tx_hash,
transfer_token_id=deposit.transfer_token_id,
gas_token_id=deposit.gas_token_id,
)
# MESSAGE
ext = VMExt(
state, sender=deposit.from_address.recipient, gas_price=deposit.gas_price
)
contract = deposit.to_address.recipient if deposit.create_contract else b""
return apply_transaction_message(
state,
message,
ext,
should_create_contract=deposit.create_contract,
gas_used_start=gas_used_start,
is_cross_shard=True,
contract_address=contract,
refund_rate=deposit.refund_rate,
)
def apply_transaction(state, tx: transactions.Transaction, tx_wrapper_hash):
"""tx_wrapper_hash is the hash for quarkchain.core.Transaction
TODO: remove quarkchain.core.Transaction wrapper and use evm.Transaction directly
"""
state.logs = []
state.suicides = []
state.refunds = 0
validate_transaction(state, tx)
state.full_shard_key = tx.to_full_shard_key
intrinsic_gas = tx.intrinsic_gas_used
log_tx.debug("TX NEW", txdict=tx.to_dict())
# start transacting #################
state.increment_nonce(tx.sender)
# part of fees should go to root chain miners
local_fee_rate = (
1 - state.qkc_config.reward_tax_rate if state.qkc_config else Fraction(1)
)
# buy startgas
gasprice, refund_rate = tx.gasprice, 100
# convert gas if using non-genesis native token
if gasprice != 0 and tx.gas_token_id != state.genesis_token:
refund_rate, converted_genesis_token_gas_price = pay_native_token_as_gas(
state, tx.gas_token_id, tx.startgas, tx.gasprice
)
# guaranteed by validation
check(converted_genesis_token_gas_price > 0)
gasprice = converted_genesis_token_gas_price
contract_addr = SystemContract.GENERAL_NATIVE_TOKEN.addr()
# guaranteed by validation
check(
state.deduct_value(
contract_addr,
state.genesis_token,
tx.startgas * converted_genesis_token_gas_price,
)
)
state.delta_token_balance(
contract_addr, tx.gas_token_id, tx.startgas * tx.gasprice
)
check(state.deduct_value(tx.sender, tx.gas_token_id, tx.startgas * tx.gasprice))
message_data = vm.CallData([safe_ord(x) for x in tx.data], 0, len(tx.data))
message = vm.Message(
tx.sender,
tx.to,
tx.value,
tx.startgas - intrinsic_gas,
message_data,
code_address=tx.to,
from_full_shard_key=tx.from_full_shard_key if not tx.is_testing else None,
to_full_shard_key=tx.to_full_shard_key if not tx.is_testing else None,
tx_hash=tx_wrapper_hash,
transfer_token_id=tx.transfer_token_id,
# always genesis token for gas token
gas_token_id=state.genesis_token,
)
# MESSAGE
ext = VMExt(state, tx.sender, gasprice)
contract_address = b""
if not tx.is_cross_shard:
return apply_transaction_message(
state, message, ext, tx.to == b"", intrinsic_gas, refund_rate=refund_rate
)
# handle xshard
local_gas_used = intrinsic_gas
remote_gas_reserved = 0
if transfer_failure_by_posw_balance_check(ext, message):
success = 0
# Currently, burn all gas
local_gas_used = tx.startgas
elif tx.to == b"":
check(state.deduct_value(tx.sender, tx.transfer_token_id, tx.value))
remote_gas_reserved = tx.startgas - intrinsic_gas
ext.add_cross_shard_transaction_deposit(
quarkchain.core.CrossShardTransactionDeposit(
tx_hash=tx_wrapper_hash,
from_address=quarkchain.core.Address(tx.sender, tx.from_full_shard_key),
to_address=quarkchain.core.Address(
mk_contract_address(
tx.sender, state.get_nonce(tx.sender), tx.from_full_shard_key
),
tx.to_full_shard_key,
),
value=tx.value,
# convert to genesis token and use converted gas price
gas_token_id=state.genesis_token,
gas_price=gasprice,
transfer_token_id=tx.transfer_token_id,
message_data=tx.data,
create_contract=True,
gas_remained=remote_gas_reserved,
refund_rate=refund_rate,
)
)
success = 1
else:
check(state.deduct_value(tx.sender, tx.transfer_token_id, tx.value))
if (
state.qkc_config.ENABLE_EVM_TIMESTAMP is None
or state.timestamp >= state.qkc_config.ENABLE_EVM_TIMESTAMP
):
remote_gas_reserved = tx.startgas - intrinsic_gas
ext.add_cross_shard_transaction_deposit(
quarkchain.core.CrossShardTransactionDeposit(
tx_hash=tx_wrapper_hash,
from_address=quarkchain.core.Address(tx.sender, tx.from_full_shard_key),
to_address=quarkchain.core.Address(tx.to, tx.to_full_shard_key),
value=tx.value,
# convert to genesis token and use converted gas price
gas_token_id=state.genesis_token,
gas_price=gasprice,
transfer_token_id=tx.transfer_token_id,
message_data=tx.data,
create_contract=False,
gas_remained=remote_gas_reserved,
refund_rate=refund_rate,
)
)
success = 1
gas_remained = tx.startgas - local_gas_used - remote_gas_reserved
_refund(state, message, ext.tx_gasprice * gas_remained, refund_rate)
# if x-shard, reserve part of the gas for the target shard miner for fee
fee = (
ext.tx_gasprice
* (local_gas_used - (opcodes.GTXXSHARDCOST if success else 0))
* local_fee_rate.numerator
// local_fee_rate.denominator
)
state.delta_token_balance(state.block_coinbase, state.genesis_token, fee)
add_dict(state.block_fee_tokens, {state.genesis_token: fee})
output = []
state.gas_used += local_gas_used
if (
state.qkc_config.ENABLE_EVM_TIMESTAMP is None
or state.timestamp >= state.qkc_config.ENABLE_EVM_TIMESTAMP
):
state.gas_used -= opcodes.GTXXSHARDCOST if success else 0
# Construct a receipt
r = mk_receipt(state, success, state.logs, contract_address, state.full_shard_key)
state.logs = []
state.add_receipt(r)
return success, output
# VM interface
class VMExt:
def __init__(self, state, sender, gas_price):
self.specials = {k: v for k, v in default_specials.items()}
for k, v in state.config["CUSTOM_SPECIALS"]:
self.specials[k] = v
self._state = state
self.get_code = state.get_code
self.set_code = state.set_code
self.get_balance = (
state.get_balance
) # gets default_chain_token balance if no token_id is passed in
self.set_token_balance = state.set_token_balance
self.set_balance = state.set_balance # gets default_chain_token balance
self.get_nonce = state.get_nonce
self.set_nonce = state.set_nonce
self.increment_nonce = state.increment_nonce
self.set_storage_data = state.set_storage_data
self.get_storage_data = state.get_storage_data
self.log_storage = lambda x: state.account_to_dict(x)
self.add_suicide = lambda x: state.add_suicide(x)
self.add_refund = lambda x: state.set_param("refunds", state.refunds + x)
self.block_hash = lambda x: state.get_block_hash(state.block_number - x - 1)
self.block_coinbase = state.block_coinbase
self.block_timestamp = state.timestamp
self.block_number = state.block_number
self.block_difficulty = state.block_difficulty
self.block_gas_limit = state.gas_limit
self.log = lambda addr, topics, data: state.add_log(Log(addr, topics, data))
self.create = lambda msg, salt: create_contract(
self, msg, contract_recipient=b"", salt=salt
)
self.msg = lambda msg: _apply_msg(self, msg, self.get_code(msg.code_address))
self.account_exists = state.account_exists
self.blockhash_store = 0x20
self.snapshot = state.snapshot
self.revert = state.revert
self.transfer_value = state.transfer_value
self.deduct_value = state.deduct_value
self.add_cross_shard_transaction_deposit = (
lambda deposit: state.xshard_list.append(deposit)
)
self.reset_storage = state.reset_storage
self.tx_origin = sender
self.tx_gasprice = gas_price
self.sender_disallow_map = state.sender_disallow_map
self.default_chain_token = state.shard_config.default_chain_token
self.chain_id = state.full_shard_key >> 16
def apply_msg(ext, msg):
return _apply_msg(ext, msg, ext.get_code(msg.code_address))
def transfer_failure_by_posw_balance_check(ext, msg):
return (
msg.sender in ext.sender_disallow_map
and msg.value + ext.sender_disallow_map[msg.sender]
> ext.get_balance(msg.sender)
)
def _apply_msg(ext, msg, code):
trace_msg = log_msg.is_active("trace")
if trace_msg:
log_msg.debug(
"MSG APPLY",
sender=encode_hex(msg.sender),
to=encode_hex(msg.to),
gas=msg.gas,
value=msg.value,
codelen=len(code),
data=encode_hex(msg.data.extract_all())
if msg.data.size < 2500
else ("data<%d>" % msg.data.size),
pre_storage=ext.log_storage(msg.to),
static=msg.static,
depth=msg.depth,
gas_token_id=msg.gas_token_id,
transfer_token_id=msg.transfer_token_id,
)
# early exit if msg.sender is disallowed
if transfer_failure_by_posw_balance_check(ext, msg):
log_msg.warn("SENDER NOT ALLOWED", sender=encode_hex(msg.sender))
return 0, 0, []
# transfer value, quit if not enough
snapshot = ext.snapshot()
if msg.transfers_value:
if not ext.transfer_value(msg.sender, msg.to, msg.transfer_token_id, msg.value):
log_msg.debug(
"MSG TRANSFER FAILED",
have=ext.get_balance(msg.sender, token_id=msg.transfer_token_id),
want=msg.value,
)
return 0, 0, []
# Main loop
special_proc, enable_ts = ext.specials.get(msg.code_address, (None, 0))
if special_proc and ext.block_timestamp > enable_ts:
res, gas, dat = special_proc(ext, msg)
else:
res, gas, dat = vm.vm_execute(ext, msg, code)
if trace_msg:
log_msg.debug(
"MSG APPLIED",
gas_remained=gas,
sender=encode_hex(msg.sender),
to=encode_hex(msg.to),
data=dat if len(dat) < 2500 else ("data<%d>" % len(dat)),
post_storage=ext.log_storage(msg.to),
)
if (
res == 1
and code != b""
and msg.transfer_token_id != ext.default_chain_token
and not msg.token_id_queried
and msg.value != 0
):
res = 0
if res == 0:
log_msg.debug("REVERTING")
ext.revert(snapshot)
return res, gas, dat
def mk_contract_address(sender, nonce, full_shard_key):
if full_shard_key is not None:
to_encode = [utils.normalize_address(sender), full_shard_key, nonce]
else:
# only happens for backward-compatible EVM tests
to_encode = [utils.normalize_address(sender), nonce]
return utils.sha3(rlp.encode(to_encode))[12:]
def mk_contract_address2(sender, salt: bytes, init_code_hash: bytes):
return utils.sha3(
b"\xff" + utils.normalize_address(sender) + salt + init_code_hash
)[12:]
def create_contract(ext, msg, contract_recipient=b"", salt=None):
log_msg.debug("CONTRACT CREATION")
if msg.transfer_token_id != ext.default_chain_token:
# TODODLL calling smart contract with non QKC transfer_token_id is not supported
return 0, msg.gas, b""
code = msg.data.extract_all()
if ext.tx_origin != msg.sender:
ext.increment_nonce(msg.sender)
if contract_recipient != b"":
# apply xshard deposit, where contract address has already been specified
msg.to = contract_recipient
elif salt is not None:
# create2
msg.to = mk_contract_address2(msg.sender, salt, utils.sha3(code))
else:
nonce = utils.encode_int(ext.get_nonce(msg.sender) - 1)
msg.to = mk_contract_address(msg.sender, nonce, msg.to_full_shard_key)
if ext.get_nonce(msg.to) or len(ext.get_code(msg.to)):
log_msg.debug("CREATING CONTRACT ON TOP OF EXISTING CONTRACT")
return 0, 0, b""
if ext.account_exists(msg.to):
ext.set_nonce(msg.to, 0)
ext.set_code(msg.to, b"")
ext.reset_storage(msg.to)
msg.is_create = True
# assert not ext.get_code(msg.to)
msg.data = vm.CallData([], 0, 0)
snapshot = ext.snapshot()
ext.set_nonce(msg.to, 1)
ext.reset_storage(msg.to)
res, gas, dat = _apply_msg(ext, msg, code)
log_msg.debug(
"CONTRACT CREATION FINISHED",
res=res,
gas=gas,
dat=dat if len(dat) < 2500 else ("data<%d>" % len(dat)),
)
if res:
if not len(dat):
# ext.set_code(msg.to, b'')
return 1, gas, msg.to
gcost = len(dat) * opcodes.GCONTRACTBYTE
if gas >= gcost and (len(dat) <= 24576):
gas -= gcost
else:
log_msg.debug(
"CONTRACT CREATION FAILED",
have=gas,
want=gcost,
block_number=ext.block_number,
)
ext.revert(snapshot)
return 0, 0, b""
ext.set_code(msg.to, bytearray_to_bytestr(dat))
log_msg.debug("SETTING CODE", addr=encode_hex(msg.to), lendat=len(dat))
return 1, gas, msg.to
else:
ext.revert(snapshot)
return 0, gas, dat
def _call_general_native_token_manager(state, data: bytes) -> (int, int):
contract_addr = SystemContract.GENERAL_NATIVE_TOKEN.addr()
code = state.get_code(contract_addr)
if not code:
return 0, 0
# Only contract itself can invoke payment
sender = contract_addr
# Call the `calculateGasPrice` function
message = vm.Message(
sender,
contract_addr,
0,
1000000, # Mock gas to guarantee msg will be applied
data,
code_address=contract_addr,
)
ext = VMExt(state, sender, gas_price=0)
result, _, output = apply_msg(ext, message)
if not result:
return 0, 0
refund_rate = int.from_bytes(output[:32], byteorder="big")
converted_gas_price = int.from_bytes(output[32:64], byteorder="big")
return refund_rate, converted_gas_price
def get_gas_utility_info(
state, token_id: int, gas_price_in_native_token: int
) -> (int, int):
# Call the `calculateGasPrice` function
data = (
bytes.fromhex("ce9e8c47")
+ token_id.to_bytes(32, byteorder="big")
+ gas_price_in_native_token.to_bytes(32, byteorder="big")
)
return _call_general_native_token_manager(state, data)
def pay_native_token_as_gas(
state, token_id: int, gas: int, gas_price_in_native_token: int
) -> (int, int):
# Call the `payAsGas` function
check(token_id <= TOKEN_ID_MAX)
check(gas <= UINT128_MAX)
check(gas_price_in_native_token <= UINT128_MAX)
data = (
bytes.fromhex("5ae8f7f1")
+ token_id.to_bytes(32, byteorder="big")
+ gas.to_bytes(32, byteorder="big")
+ gas_price_in_native_token.to_bytes(32, byteorder="big")
)
return _call_general_native_token_manager(state, data)
|
Python/samples/viewABC.py | PaulDoessel/ExocortexCrate | 105 | 12627833 | <reponame>PaulDoessel/ExocortexCrate
import _ExocortexAlembicPython as alembic
import sys
import argparse
# ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ----
# global variables
show_time = False
show_meta = False
show_size = False
show_vals = False
show_just_obj = False
show_ts = False
obj_filter = None
typ_filter = None
noo_filter = None
not_filter = None
# ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ----
# visit the hierarchy of properties and compounds
def visit_prop(prop, depth):
if prop.isCompound():
print(depth + "compound: " + prop.getName())
for sub_prop in prop.getPropertyNames():
visit_prop(prop.getProperty(sub_prop), depth+" ")
else:
print(depth + "property: \"" + prop.getName() + "\", " + prop.getType())
if show_size or show_vals:
for i in xrange(0, prop.getNbStoredSamples()):
if show_vals:
print(depth + "-> values: " + str(prop.getValues(i)) )
elif show_size:
print(depth + "-> size: " + str(len(prop.getValues(i))) )
# ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ----
# start the visit of the properties
def visit_object(obj):
for prop in obj.getPropertyNames():
visit_prop(obj.getProperty(prop), " ")
# ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ----
# inspect the base of the archive, use the filter to discard objects if necessary
def visit_alembic(abc_archive):
global show_just_obj
global obj_filter
global typ_filter
global noo_filter
global not_filter
global show_ts
if show_time:
print("Time sampling: " + str(abc_archive.getSampleTimes()))
for identifier in abc_archive.getIdentifiers():
if (obj_filter != None and identifier.find(obj_filter) < 0) or (noo_filter != None and identifier.find(noo_filter) >= 0):
continue # pass over this object!
obj = abc_archive.getObject(identifier)
obj_typ = obj.getType()
if (typ_filter != None and obj_typ.find(typ_filter) < 0) or (not_filter != None and obj_typ.find(not_filter) >= 0):
continue # pass over this object because of its type!
print("OBJ: " + identifier + ", " + obj_typ)
if show_meta:
print("-- meta data: " + str(obj.getMetaData()))
if show_ts:
print("-- TS index: " + str(obj.getTsIndex()))
if not show_just_obj:
visit_object(obj)
# ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ----
def main(args):
global show_time
global show_meta
global show_size
global show_vals
global show_just_obj
global obj_filter
global typ_filter
global noo_filter
global not_filter
global show_ts
# parser args
parser = argparse.ArgumentParser(description="Explore the structure of an Alembic file.")
parser.add_argument("abc_in", type=str, metavar="{Alembic file}", help="input Alembic file to explore")
parser.add_argument("-v", "--vals", action='store_true', help='show the values of the properties')
parser.add_argument("-s", "--size", action='store_true', help='show only the number of values stored in the properties')
parser.add_argument("-m", "--meta", action='store_true', help='show objects\' meta data')
parser.add_argument("-t", "--time", action='store_true', help='show time sampling')
parser.add_argument("-O", "--object", action='store_true', help='show only objects, not properties')
parser.add_argument("-f", "--filter", type=str, metavar="{id filter}", help="only show objects containing substring {id filter} in their identifier")
parser.add_argument("-T", "--typefilter", type=str, metavar="{type filter}", help="only show objects containing substring {type filter} in their type")
parser.add_argument("-nf", "--NOTfilter", type=str, metavar="{id filter}", help="only copy objects NOT containing substring {id filter} in their identifier")
parser.add_argument("-nT", "--NOTtypefilter", type=str, metavar="{type filter}", help="only copy objects NOT containing substring {type filter} in their type")
parser.add_argument("-S", "--samp", action='store_true', help="show object's time sampling index")
ns = vars(parser.parse_args(args[1:]))
show_time = ns["time"]
show_meta = ns["meta"]
show_size = ns["size"]
show_vals = ns["vals"]
show_ts = ns["samp"]
obj_filter = ns["filter"]
typ_filter = ns["typefilter"]
noo_filter = ns["NOTfilter"]
not_filter = ns["NOTtypefilter"]
show_just_obj = ns["object"]
abc_archive = alembic.getIArchive(ns["abc_in"])
print("\n\nExploring " + ns["abc_in"])
visit_alembic(abc_archive)
# ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ----
if __name__ == "__main__":
main(sys.argv)
|
testing/MLDB-1198-sum-inconsistency-test.py | kstepanmpmg/mldb | 665 | 12627853 | <filename>testing/MLDB-1198-sum-inconsistency-test.py
#
# MLDB-1198-sum-inconsistency-test.py
# Mich, 2015-12-15
# This file is part of MLDB. Copyright 2015 mldb.ai inc. All rights reserved.
#
from mldb import mldb
mldb.put('/v1/datasets/ds', {
'type' : 'sparse.mutable'
})
def insert_with_ts(ts):
mldb.post('/v1/datasets/ds/rows', {
'rowName' : 'row1',
'columns' : [
['colA', 1, ts],
]
})
insert_with_ts(1)
insert_with_ts(10)
insert_with_ts(100)
insert_with_ts(1000)
mldb.post('/v1/datasets/ds/commit')
query = 'SELECT sum("colA") as "colA" FROM ds'
res = mldb.query(query)
count = res[1][1]
mldb.log("First query count: {}".format(count))
query = "SELECT sum({*}) AS * FROM ds"
data = mldb.query(query)
mldb.log(data)
cols = data[0]
vals = data[1]
for col, val in zip(cols, vals):
if col == 'colA':
mldb.log(val)
assert count == val, ('First sum ({}) != second sum ({})'
.format(count, val))
query = 'SELECT count("colA") as "colA" FROM ds'
res = mldb.query(query)
count = res[1][1]
mldb.log("First query count: {}".format(count))
query = "SELECT count({*}) AS * FROM ds"
data = mldb.query(query)
cols = data[0]
vals = data[1]
for col, val in zip(cols, vals):
if col == 'colA':
mldb.log(val)
assert count == val, ('First sum ({}) != second sum ({})'
.format(count, val))
request.set_return("success")
|
tests/ptvsd/server/test_attach.py | int19h/ptvsd | 349 | 12627860 | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See LICENSE in the project root
# for license information.
from __future__ import absolute_import, division, print_function, unicode_literals
import pytest
from tests import debug
from tests.debug import runners, targets
from tests.patterns import some
@pytest.mark.parametrize("stop_method", ["break_into_debugger", "pause"])
@pytest.mark.parametrize("is_attached", ["is_attached", ""])
@pytest.mark.parametrize("wait_for_attach", ["wait_for_attach", ""])
@pytest.mark.parametrize("target", targets.all)
def test_attach_api(pyfile, target, wait_for_attach, is_attached, stop_method):
@pyfile
def code_to_debug():
from debug_me import backchannel, ptvsd, scratchpad
import sys
import time
_, host, port, wait_for_attach, is_attached, stop_method = sys.argv
port = int(port)
ptvsd.enable_attach((host, port))
if wait_for_attach:
backchannel.send("wait_for_attach")
ptvsd.wait_for_attach()
if is_attached:
backchannel.send("is_attached")
while not ptvsd.is_attached():
print("looping until is_attached")
time.sleep(0.1)
if stop_method == "break_into_debugger":
backchannel.send("break_into_debugger?")
assert backchannel.receive() == "proceed"
ptvsd.break_into_debugger()
print("break") # @break_into_debugger
else:
scratchpad["paused"] = False
backchannel.send("loop?")
assert backchannel.receive() == "proceed"
while not scratchpad["paused"]:
print("looping until paused")
time.sleep(0.1)
with debug.Session() as session:
host, port = runners.attach_by_socket.host, runners.attach_by_socket.port
session.config.update({"host": host, "port": port})
backchannel = session.open_backchannel()
session.spawn_debuggee(
[code_to_debug, host, port, wait_for_attach, is_attached, stop_method]
)
session.wait_for_enable_attach()
session.connect_to_adapter((host, port))
with session.request_attach():
pass
if wait_for_attach:
assert backchannel.receive() == "wait_for_attach"
if is_attached:
assert backchannel.receive() == "is_attached"
if stop_method == "break_into_debugger":
assert backchannel.receive() == "break_into_debugger?"
backchannel.send("proceed")
session.wait_for_stop(
expected_frames=[some.dap.frame(code_to_debug, "break_into_debugger")]
)
elif stop_method == "pause":
assert backchannel.receive() == "loop?"
backchannel.send("proceed")
session.request("pause", freeze=False)
session.wait_for_stop("pause")
session.scratchpad["paused"] = True
else:
pytest.fail(stop_method)
session.request_continue()
@pytest.mark.parametrize("run", runners.all_attach_by_socket)
def test_reattach(pyfile, target, run):
@pyfile
def code_to_debug():
from debug_me import ptvsd, scratchpad
import time
ptvsd.break_into_debugger()
object() # @first
scratchpad["exit"] = False
while not scratchpad["exit"]:
time.sleep(0.1)
ptvsd.break_into_debugger()
object() # @second
with debug.Session() as session1:
session1.captured_output = set()
session1.expected_exit_code = None # not expected to exit on disconnect
with run(session1, target(code_to_debug)):
pass
session1.wait_for_stop(expected_frames=[some.dap.frame(code_to_debug, "first")])
session1.disconnect()
with debug.Session() as session2:
session2.config.update(session1.config)
if "host" in session2.config:
session2.connect_to_adapter(
(session2.config["host"], session2.config["port"])
)
with session2.request_attach():
pass
session2.wait_for_stop(
expected_frames=[some.dap.frame(code_to_debug, "second")]
)
session2.scratchpad["exit"] = True
session2.request_continue()
@pytest.mark.parametrize("pid_type", ["int", "str"])
def test_attach_by_pid(pyfile, target, pid_type):
@pyfile
def code_to_debug():
import debug_me # noqa
import time
def do_something(i):
time.sleep(0.1)
proceed = True
print(i) # @bp
return proceed
for i in range(100):
if not do_something(i):
break
with debug.Session() as session:
def before_request(command, arguments):
if command == "attach":
assert isinstance(arguments["processId"], int)
if pid_type == "str":
arguments["processId"] = str(arguments["processId"])
session.before_request = before_request
session.config["redirectOutput"] = True
with session.attach_by_pid(target(code_to_debug), wait=False):
session.set_breakpoints(code_to_debug, all)
stop = session.wait_for_stop(
expected_frames=[some.dap.frame(code_to_debug, "bp")]
)
# Remove breakpoint and continue.
session.request(
"setExpression",
{"frameId": stop.frame_id, "expression": "proceed", "value": "False"},
)
session.set_breakpoints(code_to_debug, [])
session.request_continue()
session.wait_for_next_event(
"output", some.dict.containing({"category": "stdout"})
)
|
tools/auto_bisect/bisect_perf_regression.py | google-ar/chromium | 777 | 12627903 | <reponame>google-ar/chromium<filename>tools/auto_bisect/bisect_perf_regression.py
#!/usr/bin/env python
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Chromium auto-bisect tool
This script bisects a range of commits using binary search. It starts by getting
reference values for the specified "good" and "bad" commits. Then, for revisions
in between, it will get builds, run tests and classify intermediate revisions as
"good" or "bad" until an adjacent "good" and "bad" revision is found; this is
the culprit.
If the culprit is a roll of a depedency repository (e.g. v8), it will then
expand the revision range and continue the bisect until a culprit revision in
the dependency repository is found.
Example usage using git commit hashes, bisecting a performance test based on
the mean value of a particular metric:
./tools/auto_bisect/bisect_perf_regression.py
--command "out/Release/performance_ui_tests \
--gtest_filter=ShutdownTest.SimpleUserQuit"\
--metric shutdown/simple-user-quit
--good_revision 1f6e67861535121c5c819c16a666f2436c207e7b\
--bad-revision b732f23b4f81c382db0b23b9035f3dadc7d925bb\
Example usage using git commit positions, bisecting a functional test based on
whether it passes or fails.
./tools/auto_bisect/bisect_perf_regression.py\
--command "out/Release/content_unittests -single-process-tests \
--gtest_filter=GpuMemoryBufferImplTests"\
--good_revision 408222\
--bad_revision 408232\
--bisect_mode return_code\
--builder_type full
In practice, the auto-bisect tool is usually run on tryserver.chromium.perf
try bots, and is started by tools/run-bisect-perf-regression.py using
config parameters from tools/auto_bisect/bisect.cfg.
"""
import argparse
import copy
import errno
import hashlib
import json
import logging
import os
import re
import shlex
import shutil
import StringIO
import sys
import time
import urllib
import urllib2
sys.path.append(os.path.join(os.path.dirname(__file__), '..', '..',
'third_party', 'catapult', 'telemetry'))
from bisect_printer import BisectPrinter
from bisect_results import BisectResults
import bisect_results_json
from bisect_state import BisectState
import bisect_utils
import builder
import fetch_build
import math_utils
import query_crbug
import request_build
import source_control
# The script is in chromium/src/tools/auto_bisect. Throughout this script,
# we use paths to other things in the chromium/src repository.
# Possible return values from BisectPerformanceMetrics.RunTest.
BUILD_RESULT_SUCCEED = 0
BUILD_RESULT_FAIL = 1
BUILD_RESULT_SKIPPED = 2
# How many times to repeat the test on the last known good and first known bad
# revisions in order to assess a more accurate confidence score in the
# regression culprit.
BORDER_REVISIONS_EXTRA_RUNS = 2
# Patch template to add a new file, DEPS.sha under src folder.
# This file contains SHA1 value of the DEPS changes made while bisecting
# dependency repositories. This patch send along with DEPS patch to try server.
# When a build requested is posted with a patch, bisect builders on try server,
# once build is produced, it reads SHA value from this file and appends it
# to build archive filename.
DEPS_SHA_PATCH = """diff --git DEPS.sha DEPS.sha
new file mode 100644
--- /dev/null
+++ DEPS.sha
@@ -0,0 +1 @@
+%(deps_sha)s
"""
REGRESSION_NOT_REPRODUCED_MESSAGE_TEMPLATE = """
Bisect did not clearly reproduce a regression between the given "good"
and "bad" revisions.
Results:
"Good" revision: {good_rev}
\tMean: {good_mean}
\tStandard error: {good_std_err}
\tSample size: {good_sample_size}
"Bad" revision: {bad_rev}
\tMean: {bad_mean}
\tStandard error: {bad_std_err}
\tSample size: {bad_sample_size}
You may want to try bisecting on a different platform or metric.
"""
# Git branch name used to run bisect try jobs.
BISECT_TRYJOB_BRANCH = 'bisect-tryjob'
# Git master branch name.
BISECT_MASTER_BRANCH = 'master'
# File to store 'git diff' content.
BISECT_PATCH_FILE = 'deps_patch.txt'
# SVN repo where the bisect try jobs are submitted.
PERF_SVN_REPO_URL = 'svn://svn.chromium.org/chrome-try/try-perf'
FULL_SVN_REPO_URL = 'svn://svn.chromium.org/chrome-try/try'
ANDROID_CHROME_SVN_REPO_URL = ('svn://svn.chromium.org/chrome-try-internal/'
'try-perf')
PERF_DASH_RESULTS_URL = 'https://chromeperf.appspot.com/post_bisect_results'
class RunGitError(Exception):
def __str__(self):
return '%s\nError executing git command.' % self.args[0]
def GetSHA1HexDigest(contents):
"""Returns SHA1 hex digest of the given string."""
return hashlib.sha1(contents).hexdigest()
def WriteStringToFile(text, file_name):
"""Writes text to a file, raising an RuntimeError on failure."""
try:
with open(file_name, 'wb') as f:
f.write(text)
except IOError:
raise RuntimeError('Error writing to file [%s]' % file_name)
def ReadStringFromFile(file_name):
"""Writes text to a file, raising an RuntimeError on failure."""
try:
with open(file_name) as f:
return f.read()
except IOError:
raise RuntimeError('Error reading file [%s]' % file_name)
def ChangeBackslashToSlashInPatch(diff_text):
"""Formats file paths in the given patch text to Unix-style paths."""
if not diff_text:
return None
diff_lines = diff_text.split('\n')
for i in range(len(diff_lines)):
line = diff_lines[i]
if line.startswith('--- ') or line.startswith('+++ '):
diff_lines[i] = line.replace('\\', '/')
return '\n'.join(diff_lines)
def _ParseRevisionsFromDEPSFileManually(deps_file_contents):
"""Parses the vars section of the DEPS file using regular expressions.
Args:
deps_file_contents: The DEPS file contents as a string.
Returns:
A dictionary in the format {depot: revision} if successful, otherwise None.
"""
# We'll parse the "vars" section of the DEPS file.
rxp = re.compile('vars = {(?P<vars_body>[^}]+)', re.MULTILINE)
re_results = rxp.search(deps_file_contents)
if not re_results:
return None
# We should be left with a series of entries in the vars component of
# the DEPS file with the following format:
# 'depot_name': 'revision',
vars_body = re_results.group('vars_body')
rxp = re.compile(r"'(?P<depot_body>[\w_-]+)':[\s]+'(?P<rev_body>[\w@]+)'",
re.MULTILINE)
re_results = rxp.findall(vars_body)
return dict(re_results)
def _WaitUntilBuildIsReady(fetch_build_func, builder_name, build_request_id,
max_timeout, buildbot_server_url):
"""Waits until build is produced by bisect builder on try server.
Args:
fetch_build_func: Function to check and download build from cloud storage.
builder_name: Builder bot name on try server.
build_request_id: A unique ID of the build request posted to try server.
max_timeout: Maximum time to wait for the build.
buildbot_server_url: Buildbot url to check build status.
Returns:
Downloaded archive file path if exists, otherwise None.
"""
# Build number on the try server.
build_num = None
# Interval to check build on cloud storage.
poll_interval = 60
# Interval to check build status on try server in seconds.
status_check_interval = 600
last_status_check = time.time()
start_time = time.time()
while True:
# Checks for build on gs://chrome-perf and download if exists.
res = fetch_build_func()
if res:
return (res, 'Build successfully found')
elapsed_status_check = time.time() - last_status_check
# To avoid overloading try server with status check requests, we check
# build status for every 10 minutes.
if elapsed_status_check > status_check_interval:
last_status_check = time.time()
if not build_num:
# Get the build number on try server for the current build.
build_num = request_build.GetBuildNumFromBuilder(
build_request_id, builder_name, buildbot_server_url)
# Check the status of build using the build number.
# Note: Build is treated as PENDING if build number is not found
# on the the try server.
build_status, status_link = request_build.GetBuildStatus(
build_num, builder_name, buildbot_server_url)
if build_status == request_build.FAILED:
return (None, 'Failed to produce build, log: %s' % status_link)
elapsed_time = time.time() - start_time
if elapsed_time > max_timeout:
return (None, 'Timed out: %ss without build' % max_timeout)
logging.info('Time elapsed: %ss without build.', elapsed_time)
time.sleep(poll_interval)
# For some reason, mac bisect bots were not flushing stdout periodically.
# As a result buildbot command is timed-out. Flush stdout on all platforms
# while waiting for build.
sys.stdout.flush()
def _UpdateV8Branch(deps_content):
"""Updates V8 branch in DEPS file to process v8_bleeding_edge.
Check for "v8_branch" in DEPS file if exists update its value
with v8_bleeding_edge branch. Note: "v8_branch" is added to DEPS
variable from DEPS revision 254916, therefore check for "src/v8":
<v8 source path> in DEPS in order to support prior DEPS revisions
and update it.
Args:
deps_content: DEPS file contents to be modified.
Returns:
Modified DEPS file contents as a string.
"""
new_branch = r'branches/bleeding_edge'
v8_branch_pattern = re.compile(r'(?<="v8_branch": ")(.*)(?=")')
if re.search(v8_branch_pattern, deps_content):
deps_content = re.sub(v8_branch_pattern, new_branch, deps_content)
else:
# Replaces the branch assigned to "src/v8" key in DEPS file.
# Format of "src/v8" in DEPS:
# "src/v8":
# (Var("googlecode_url") % "v8") + "/trunk@" + Var("v8_revision"),
# So, "/trunk@" is replace with "/branches/bleeding_edge@"
v8_src_pattern = re.compile(
r'(?<="v8"\) \+ "/)(.*)(?=@" \+ Var\("v8_revision"\))', re.MULTILINE)
if re.search(v8_src_pattern, deps_content):
deps_content = re.sub(v8_src_pattern, new_branch, deps_content)
return deps_content
def _UpdateDEPSForAngle(revision, depot, deps_file):
"""Updates DEPS file with new revision for Angle repository.
This is a hack for Angle depot case because, in DEPS file "vars" dictionary
variable contains "angle_revision" key that holds git hash instead of
SVN revision.
And sometimes "angle_revision" key is not specified in "vars" variable,
in such cases check "deps" dictionary variable that matches
angle.git@[a-fA-F0-9]{40}$ and replace git hash.
"""
deps_var = bisect_utils.DEPOT_DEPS_NAME[depot]['deps_var']
try:
deps_contents = ReadStringFromFile(deps_file)
# Check whether the depot and revision pattern in DEPS file vars variable
# e.g. "angle_revision": "fa63e947cb3eccf463648d21a05d5002c9b8adfa".
angle_rev_pattern = re.compile(r'(?<="%s": ")([a-fA-F0-9]{40})(?=")' %
deps_var, re.MULTILINE)
match = re.search(angle_rev_pattern, deps_contents)
if match:
# Update the revision information for the given depot
new_data = re.sub(angle_rev_pattern, revision, deps_contents)
else:
# Check whether the depot and revision pattern in DEPS file deps
# variable. e.g.,
# "src/third_party/angle": Var("chromium_git") +
# "/angle/angle.git@fa63e947cb3eccf463648d21a05d5002c9b8adfa",.
angle_rev_pattern = re.compile(
r'(?<=angle\.git@)([a-fA-F0-9]{40})(?=")', re.MULTILINE)
match = re.search(angle_rev_pattern, deps_contents)
if not match:
logging.info('Could not find angle revision information in DEPS file.')
return False
new_data = re.sub(angle_rev_pattern, revision, deps_contents)
# Write changes to DEPS file
WriteStringToFile(new_data, deps_file)
return True
except IOError, e:
logging.warn('Something went wrong while updating DEPS file, %s', e)
return False
def _TryParseHistogramValuesFromOutput(metric, text):
"""Attempts to parse a metric in the format HISTOGRAM <graph: <trace>.
Args:
metric: The metric as a list of [<trace>, <value>] strings.
text: The text to parse the metric values from.
Returns:
A list of floating point numbers found, [] if none were found.
"""
metric_formatted = 'HISTOGRAM %s: %s= ' % (metric[0], metric[1])
text_lines = text.split('\n')
values_list = []
for current_line in text_lines:
if metric_formatted in current_line:
current_line = current_line[len(metric_formatted):]
try:
histogram_values = eval(current_line)
for b in histogram_values['buckets']:
average_for_bucket = float(b['high'] + b['low']) * 0.5
# Extends the list with N-elements with the average for that bucket.
values_list.extend([average_for_bucket] * b['count'])
except Exception:
pass
return values_list
def _TryParseResultValuesFromOutput(metric, text):
"""Attempts to parse a metric in the format RESULT <graph>: <trace>= ...
Args:
metric: The metric as a list of [<trace>, <value>] string pairs.
text: The text to parse the metric values from.
Returns:
A list of floating point numbers found.
"""
# Format is: RESULT <graph>: <trace>= <value> <units>
metric_re = re.escape('RESULT %s: %s=' % (metric[0], metric[1]))
# The log will be parsed looking for format:
# <*>RESULT <graph_name>: <trace_name>= <value>
single_result_re = re.compile(
metric_re + r'\s*(?P<VALUE>[-]?\d*(\.\d*)?)')
# The log will be parsed looking for format:
# <*>RESULT <graph_name>: <trace_name>= [<value>,value,value,...]
multi_results_re = re.compile(
metric_re + r'\s*\[\s*(?P<VALUES>[-]?[\d\., ]+)\s*\]')
# The log will be parsed looking for format:
# <*>RESULT <graph_name>: <trace_name>= {<mean>, <std deviation>}
mean_stddev_re = re.compile(
metric_re +
r'\s*\{\s*(?P<MEAN>[-]?\d*(\.\d*)?),\s*(?P<STDDEV>\d+(\.\d*)?)\s*\}')
text_lines = text.split('\n')
values_list = []
for current_line in text_lines:
# Parse the output from the performance test for the metric we're
# interested in.
single_result_match = single_result_re.search(current_line)
multi_results_match = multi_results_re.search(current_line)
mean_stddev_match = mean_stddev_re.search(current_line)
if (not single_result_match is None and
single_result_match.group('VALUE')):
values_list += [single_result_match.group('VALUE')]
elif (not multi_results_match is None and
multi_results_match.group('VALUES')):
metric_values = multi_results_match.group('VALUES')
values_list += metric_values.split(',')
elif (not mean_stddev_match is None and
mean_stddev_match.group('MEAN')):
values_list += [mean_stddev_match.group('MEAN')]
values_list = [float(v) for v in values_list
if bisect_utils.IsStringFloat(v)]
return values_list
def _ParseMetricValuesFromOutput(metric, text):
"""Parses output from performance_ui_tests and retrieves the results for
a given metric.
Args:
metric: The metric as a list of [<trace>, <value>] strings.
text: The text to parse the metric values from.
Returns:
A list of floating point numbers found.
"""
metric_values = _TryParseResultValuesFromOutput(metric, text)
if not metric_values:
metric_values = _TryParseHistogramValuesFromOutput(metric, text)
return metric_values
def _GenerateProfileIfNecessary(command_args):
"""Checks the command line of the performance test for dependencies on
profile generation, and runs tools/perf/generate_profile as necessary.
Args:
command_args: Command line being passed to performance test, as a list.
Returns:
False if profile generation was necessary and failed, otherwise True.
"""
if '--profile-dir' in ' '.join(command_args):
# If we were using python 2.7+, we could just use the argparse
# module's parse_known_args to grab --profile-dir. Since some of the
# bots still run 2.6, have to grab the arguments manually.
arg_dict = {}
args_to_parse = ['--profile-dir', '--browser']
for arg_to_parse in args_to_parse:
for i, current_arg in enumerate(command_args):
if arg_to_parse in current_arg:
current_arg_split = current_arg.split('=')
# Check 2 cases, --arg=<val> and --arg <val>
if len(current_arg_split) == 2:
arg_dict[arg_to_parse] = current_arg_split[1]
elif i + 1 < len(command_args):
arg_dict[arg_to_parse] = command_args[i+1]
path_to_generate = os.path.join('tools', 'perf', 'generate_profile')
if '--profile-dir' in arg_dict and '--browser' in arg_dict:
profile_path, profile_type = os.path.split(arg_dict['--profile-dir'])
return not bisect_utils.RunProcess(
[
'python', path_to_generate,
'--profile-type-to-generate', profile_type,
'--browser', arg_dict['--browser'],
'--output-dir', profile_path
])
return False
return True
def _IsRegressionReproduced(known_good_result, known_bad_result,
required_initial_confidence):
"""Checks whether the regression was reproduced based on the initial values.
Args:
known_good_result: A dict with the keys "values", "mean" and "std_err".
known_bad_result: Same as above.
required_initial_confidence: Minimum confidence score for the given
good and bad revisions to avoid early aborting.
Returns:
True if there is a clear change between the result values for the given
good and bad revisions, False otherwise.
"""
def PossiblyFlatten(values):
"""Flattens if needed, by averaging the values in each nested list."""
if isinstance(values, list) and all(isinstance(x, list) for x in values):
return map(math_utils.Mean, values)
return values
initial_confidence = BisectResults.ConfidenceScore(
PossiblyFlatten(known_bad_result['values']),
PossiblyFlatten(known_good_result['values']),
accept_single_bad_or_good=True)
return initial_confidence >= required_initial_confidence
def _RegressionNotReproducedWarningMessage(
good_revision, bad_revision, known_good_value, known_bad_value):
return REGRESSION_NOT_REPRODUCED_MESSAGE_TEMPLATE.format(
good_rev=good_revision,
good_mean=known_good_value['mean'],
good_std_err=known_good_value['std_err'],
good_sample_size=len(known_good_value['values']),
bad_rev=bad_revision,
bad_mean=known_bad_value['mean'],
bad_std_err=known_bad_value['std_err'],
bad_sample_size=len(known_bad_value['values']))
class DepotDirectoryRegistry(object):
def __init__(self, src_cwd):
self.depot_cwd = {}
for depot in bisect_utils.DEPOT_NAMES:
# The working directory of each depot is just the path to the depot, but
# since we're already in 'src', we can skip that part.
path_in_src = bisect_utils.DEPOT_DEPS_NAME[depot]['src'][4:]
self.SetDepotDir(depot, os.path.join(src_cwd, path_in_src))
self.SetDepotDir('chromium', src_cwd)
def SetDepotDir(self, depot_name, depot_dir):
self.depot_cwd[depot_name] = depot_dir
def GetDepotDir(self, depot_name):
if depot_name in self.depot_cwd:
return self.depot_cwd[depot_name]
else:
assert False, ('Unknown depot [ %s ] encountered. Possibly a new one '
'was added without proper support?' % depot_name)
def ChangeToDepotDir(self, depot_name):
"""Given a depot, changes to the appropriate working directory.
Args:
depot_name: The name of the depot (see DEPOT_NAMES).
"""
os.chdir(self.GetDepotDir(depot_name))
def _PrepareBisectBranch(parent_branch, new_branch):
"""Creates a new branch to submit bisect try job.
Args:
parent_branch: Parent branch to be used to create new branch.
new_branch: New branch name.
"""
current_branch, returncode = bisect_utils.RunGit(
['rev-parse', '--abbrev-ref', 'HEAD'])
if returncode:
raise RunGitError('Must be in a git repository to send changes to trybots.')
current_branch = current_branch.strip()
# Make sure current branch is master.
if current_branch != parent_branch:
output, returncode = bisect_utils.RunGit(['checkout', '-f', parent_branch])
if returncode:
raise RunGitError('Failed to checkout branch: %s.' % output)
# Delete new branch if exists.
output, returncode = bisect_utils.RunGit(['branch', '--list'])
if new_branch in output:
output, returncode = bisect_utils.RunGit(['branch', '-D', new_branch])
if returncode:
raise RunGitError('Deleting branch failed, %s', output)
# Check if the tree is dirty: make sure the index is up to date and then
# run diff-index.
bisect_utils.RunGit(['update-index', '--refresh', '-q'])
output, returncode = bisect_utils.RunGit(['diff-index', 'HEAD'])
if output:
raise RunGitError('Cannot send a try job with a dirty tree.')
# Create and check out the telemetry-tryjob branch, and edit the configs
# for the try job there.
output, returncode = bisect_utils.RunGit(['checkout', '-b', new_branch])
if returncode:
raise RunGitError('Failed to checkout branch: %s.' % output)
output, returncode = bisect_utils.RunGit(
['branch', '--set-upstream-to', parent_branch])
if returncode:
raise RunGitError('Error in git branch --set-upstream-to')
def _StartBuilderTryJob(
builder_type, git_revision, builder_name, job_name, patch=None):
"""Attempts to run a try job from the current directory.
Args:
builder_type: One of the builder types in fetch_build, e.g. "perf".
git_revision: A git commit hash.
builder_name: Name of the bisect bot to be used for try job.
bisect_job_name: Try job name, used to identify which bisect
job was responsible for requesting a build.
patch: A DEPS patch (used while bisecting dependency repositories),
or None if we're bisecting the top-level repository.
"""
# TODO(prasadv, qyearsley): Make this a method of BuildArchive
# (which may be renamed to BuilderTryBot or Builder).
try:
# Temporary branch for running a try job.
_PrepareBisectBranch(BISECT_MASTER_BRANCH, BISECT_TRYJOB_BRANCH)
patch_content = '/dev/null'
# Create a temporary patch file.
if patch:
WriteStringToFile(patch, BISECT_PATCH_FILE)
patch_content = BISECT_PATCH_FILE
try_command = [
'try',
'--bot=%s' % builder_name,
'--revision=%s' % git_revision,
'--name=%s' % job_name,
'--svn_repo=%s' % _TryJobSvnRepo(builder_type),
'--diff=%s' % patch_content,
]
# Execute try job to build revision.
print try_command
output, return_code = bisect_utils.RunGit(try_command)
command_string = ' '.join(['git'] + try_command)
if return_code:
raise RunGitError('Could not execute try job: %s.\n'
'Error: %s' % (command_string, output))
logging.info('Try job successfully submitted.\n TryJob Details: %s\n%s',
command_string, output)
finally:
# Delete patch file if exists.
try:
os.remove(BISECT_PATCH_FILE)
except OSError as e:
if e.errno != errno.ENOENT:
raise
# Checkout master branch and delete bisect-tryjob branch.
bisect_utils.RunGit(['checkout', '-f', BISECT_MASTER_BRANCH])
bisect_utils.RunGit(['branch', '-D', BISECT_TRYJOB_BRANCH])
def _TryJobSvnRepo(builder_type):
"""Returns an SVN repo to use for try jobs based on the builder type."""
if builder_type == fetch_build.PERF_BUILDER:
return PERF_SVN_REPO_URL
if builder_type == fetch_build.FULL_BUILDER:
return FULL_SVN_REPO_URL
if builder_type == fetch_build.ANDROID_CHROME_PERF_BUILDER:
return ANDROID_CHROME_SVN_REPO_URL
raise NotImplementedError('Unknown builder type "%s".' % builder_type)
class BisectPerformanceMetrics(object):
"""This class contains functionality to perform a bisection of a range of
revisions to narrow down where performance regressions may have occurred.
The main entry-point is the Run method.
"""
def __init__(self, opts, src_cwd):
"""Constructs a BisectPerformancesMetrics object.
Args:
opts: BisectOptions object containing parsed options.
src_cwd: Root src/ directory of the test repository (inside bisect/ dir).
"""
super(BisectPerformanceMetrics, self).__init__()
self.opts = opts
self.src_cwd = src_cwd
self.depot_registry = DepotDirectoryRegistry(self.src_cwd)
self.printer = BisectPrinter(self.opts, self.depot_registry)
self.cleanup_commands = []
self.warnings = []
self.builder = builder.Builder.FromOpts(opts)
def PerformCleanup(self):
"""Performs cleanup when script is finished."""
os.chdir(self.src_cwd)
for c in self.cleanup_commands:
if c[0] == 'mv':
shutil.move(c[1], c[2])
else:
assert False, 'Invalid cleanup command.'
def GetRevisionList(self, depot, bad_revision, good_revision):
"""Retrieves a list of all the commits between the bad revision and
last known good revision."""
cwd = self.depot_registry.GetDepotDir(depot)
return source_control.GetRevisionList(bad_revision, good_revision, cwd=cwd)
def _ParseRevisionsFromDEPSFile(self, depot):
"""Parses the local DEPS file to determine blink/skia/v8 revisions which may
be needed if the bisect recurses into those depots later.
Args:
depot: Name of depot being bisected.
Returns:
A dict in the format {depot:revision} if successful, otherwise None.
"""
try:
deps_data = {
'Var': lambda _: deps_data["vars"][_],
'From': lambda *args: None,
}
deps_file = bisect_utils.FILE_DEPS_GIT
if not os.path.exists(deps_file):
deps_file = bisect_utils.FILE_DEPS
execfile(deps_file, {}, deps_data)
deps_data = deps_data['deps']
rxp = re.compile(".git@(?P<revision>[a-fA-F0-9]+)")
results = {}
for depot_name, depot_data in bisect_utils.DEPOT_DEPS_NAME.iteritems():
if (depot_data.get('platform') and
depot_data.get('platform') != os.name):
continue
if depot_data.get('recurse') and depot in depot_data.get('from'):
depot_data_src = depot_data.get('src') or depot_data.get('src_old')
src_dir = deps_data.get(depot_data_src)
if src_dir:
self.depot_registry.SetDepotDir(depot_name, os.path.join(
self.src_cwd, depot_data_src[4:]))
re_results = rxp.search(src_dir)
if re_results:
results[depot_name] = re_results.group('revision')
else:
warning_text = ('Could not parse revision for %s while bisecting '
'%s' % (depot_name, depot))
if not warning_text in self.warnings:
self.warnings.append(warning_text)
else:
results[depot_name] = None
return results
except ImportError:
deps_file_contents = ReadStringFromFile(deps_file)
parse_results = _ParseRevisionsFromDEPSFileManually(deps_file_contents)
results = {}
for depot_name, depot_revision in parse_results.iteritems():
depot_revision = depot_revision.strip('@')
logging.warn(depot_name, depot_revision)
for cur_name, cur_data in bisect_utils.DEPOT_DEPS_NAME.iteritems():
if cur_data.get('deps_var') == depot_name:
src_name = cur_name
results[src_name] = depot_revision
break
return results
def _Get3rdPartyRevisions(self, depot):
"""Parses the DEPS file to determine WebKit/v8/etc... versions.
Args:
depot: A depot name. Should be in the DEPOT_NAMES list.
Returns:
A dict in the format {depot: revision} if successful, otherwise None.
"""
cwd = os.getcwd()
self.depot_registry.ChangeToDepotDir(depot)
results = {}
if depot == 'chromium' or depot == 'android-chrome':
results = self._ParseRevisionsFromDEPSFile(depot)
os.chdir(cwd)
if depot == 'v8':
# We can't try to map the trunk revision to bleeding edge yet, because
# we don't know which direction to try to search in. Have to wait until
# the bisect has narrowed the results down to 2 v8 rolls.
results['v8_bleeding_edge'] = None
return results
def BackupOrRestoreOutputDirectory(self, restore=False, build_type='Release'):
"""Backs up or restores build output directory based on restore argument.
Args:
restore: Indicates whether to restore or backup. Default is False(Backup)
build_type: Target build type ('Release', 'Debug', 'Release_x64' etc.)
Returns:
Path to backup or restored location as string. otherwise None if it fails.
"""
build_dir = os.path.abspath(
builder.GetBuildOutputDirectory(self.opts, self.src_cwd))
source_dir = os.path.join(build_dir, build_type)
destination_dir = os.path.join(build_dir, '%s.bak' % build_type)
if restore:
source_dir, destination_dir = destination_dir, source_dir
if os.path.exists(source_dir):
RemoveDirectoryTree(destination_dir)
shutil.move(source_dir, destination_dir)
return destination_dir
return None
def _DownloadAndUnzipBuild(self, revision, depot, build_type='Release',
create_patch=False):
"""Downloads the build archive for the given revision.
Args:
revision: The git revision to download.
depot: The name of a dependency repository. Should be in DEPOT_NAMES.
build_type: Target build type, e.g. Release', 'Debug', 'Release_x64' etc.
create_patch: Create a patch with any locally modified files.
Returns:
True if download succeeds, otherwise False.
"""
patch = None
patch_sha = None
if depot not in ('chromium', 'android-chrome'):
# Create a DEPS patch with new revision for dependency repository.
self._CreateDEPSPatch(depot, revision)
create_patch = True
if create_patch:
revision, patch = self._CreatePatch(revision)
if patch:
# Get the SHA of the DEPS changes patch.
patch_sha = GetSHA1HexDigest(patch)
# Update the DEPS changes patch with a patch to create a new file named
# 'DEPS.sha' and add patch_sha evaluated above to it.
patch = '%s\n%s' % (patch, DEPS_SHA_PATCH % {'deps_sha': patch_sha})
build_dir = builder.GetBuildOutputDirectory(self.opts, self.src_cwd)
downloaded_file = self._WaitForBuildDownload(
revision, build_dir, deps_patch=patch, deps_patch_sha=patch_sha)
if not downloaded_file:
return False
return self._UnzipAndMoveBuildProducts(downloaded_file, build_dir,
build_type=build_type)
def _WaitForBuildDownload(self, revision, build_dir, deps_patch=None,
deps_patch_sha=None):
"""Tries to download a zip archive for a build.
This involves seeing whether the archive is already available, and if not,
then requesting a build and waiting before downloading.
Args:
revision: A git commit hash.
build_dir: The directory to download the build into.
deps_patch: A patch which changes a dependency repository revision in
the DEPS, if applicable.
deps_patch_sha: The SHA1 hex digest of the above patch.
Returns:
File path of the downloaded file if successful, otherwise None.
"""
bucket_name, remote_path = fetch_build.GetBucketAndRemotePath(
revision, builder_type=self.opts.builder_type,
target_arch=self.opts.target_arch,
target_platform=self.opts.target_platform,
deps_patch_sha=deps_patch_sha,
extra_src=self.opts.extra_src)
output_dir = os.path.abspath(build_dir)
fetch_build_func = lambda: fetch_build.FetchFromCloudStorage(
bucket_name, remote_path, output_dir)
is_available = fetch_build.BuildIsAvailable(bucket_name, remote_path)
if is_available:
return fetch_build_func()
# When build archive doesn't exist, make a request and wait.
return self._RequestBuildAndWait(
revision, fetch_build_func, deps_patch=deps_patch)
def _RequestBuildAndWait(self, git_revision, fetch_build_func,
deps_patch=None):
"""Triggers a try job for a build job.
This function prepares and starts a try job for a builder, and waits for
the archive to be produced and archived. Once the build is ready it is
downloaded.
For performance tests, builders on the tryserver.chromium.perf are used.
TODO(qyearsley): Make this function take "builder_type" as a parameter
and make requests to different bot names based on that parameter.
Args:
git_revision: A git commit hash.
fetch_build_func: Function to check and download build from cloud storage.
deps_patch: DEPS patch string, used when bisecting dependency repos.
Returns:
Downloaded archive file path when requested build exists and download is
successful, otherwise None.
"""
if not fetch_build_func:
return None
# Create a unique ID for each build request posted to try server builders.
# This ID is added to "Reason" property of the build.
build_request_id = GetSHA1HexDigest(
'%s-%s-%s' % (git_revision, deps_patch, time.time()))
# Revert any changes to DEPS file.
bisect_utils.CheckRunGit(['reset', '--hard', 'HEAD'], cwd=self.src_cwd)
builder_name, build_timeout = fetch_build.GetBuilderNameAndBuildTime(
builder_type=self.opts.builder_type,
target_arch=self.opts.target_arch,
target_platform=self.opts.target_platform,
extra_src=self.opts.extra_src)
try:
_StartBuilderTryJob(self.opts.builder_type, git_revision, builder_name,
job_name=build_request_id, patch=deps_patch)
except RunGitError as e:
logging.warn('Failed to post builder try job for revision: [%s].\n'
'Error: %s', git_revision, e)
return None
# Get the buildbot master URL to monitor build status.
buildbot_server_url = fetch_build.GetBuildBotUrl(
builder_type=self.opts.builder_type,
target_arch=self.opts.target_arch,
target_platform=self.opts.target_platform,
extra_src=self.opts.extra_src)
archive_filename, error_msg = _WaitUntilBuildIsReady(
fetch_build_func, builder_name, build_request_id, build_timeout,
buildbot_server_url)
if not archive_filename:
logging.warn('%s [revision: %s]', error_msg, git_revision)
return archive_filename
def _UnzipAndMoveBuildProducts(self, downloaded_file, build_dir,
build_type='Release'):
"""Unzips the build archive and moves it to the build output directory.
The build output directory is wherever the binaries are expected to
be in order to start Chrome and run tests.
TODO: Simplify and clarify this method if possible.
Args:
downloaded_file: File path of the downloaded zip file.
build_dir: Directory where the the zip file was downloaded to.
build_type: "Release" or "Debug".
Returns:
True if successful, False otherwise.
"""
abs_build_dir = os.path.abspath(build_dir)
output_dir = os.path.join(abs_build_dir, self.GetZipFileBuildDirName())
logging.info('EXPERIMENTAL RUN, _UnzipAndMoveBuildProducts locals %s',
str(locals()))
try:
RemoveDirectoryTree(output_dir)
self.BackupOrRestoreOutputDirectory(restore=False)
# Build output directory based on target(e.g. out/Release, out/Debug).
target_build_output_dir = os.path.join(abs_build_dir, build_type)
logging.info('Extracting "%s" to "%s"', downloaded_file, abs_build_dir)
fetch_build.Unzip(downloaded_file, abs_build_dir)
if not os.path.exists(output_dir):
# Due to recipe changes, the builds extract folder contains
# out/Release instead of full-build-<platform>/Release.
if os.path.exists(os.path.join(abs_build_dir, 'out', build_type)):
output_dir = os.path.join(abs_build_dir, 'out', build_type)
else:
raise IOError('Missing extracted folder %s ' % output_dir)
logging.info('Moving build from %s to %s',
output_dir, target_build_output_dir)
shutil.move(output_dir, target_build_output_dir)
return True
except Exception as e:
logging.info('Something went wrong while extracting archive file: %s', e)
self.BackupOrRestoreOutputDirectory(restore=True)
# Cleanup any leftovers from unzipping.
if os.path.exists(output_dir):
RemoveDirectoryTree(output_dir)
finally:
# Delete downloaded archive
if os.path.exists(downloaded_file):
os.remove(downloaded_file)
return False
@staticmethod
def GetZipFileBuildDirName():
"""Gets the base file name of the zip file.
After extracting the zip file, this is the name of the directory where
the build files are expected to be. Possibly.
TODO: Make sure that this returns the actual directory name where the
Release or Debug directory is inside of the zip files. This probably
depends on the builder recipe, and may depend on whether the builder is
a perf builder or full builder.
Returns:
The name of the directory inside a build archive which is expected to
contain a Release or Debug directory.
"""
if bisect_utils.IsWindowsHost():
return 'full-build-win32'
if bisect_utils.IsLinuxHost():
return 'full-build-linux'
if bisect_utils.IsMacHost():
return 'full-build-mac'
raise NotImplementedError('Unknown platform "%s".' % sys.platform)
def IsDownloadable(self, depot):
"""Checks if build can be downloaded based on target platform and depot."""
if (self.opts.target_platform in ['chromium', 'android', 'android-chrome']
and self.opts.builder_type):
# In case of android-chrome platform, download archives only for
# android-chrome depot; for other depots such as chromium, v8, skia
# etc., build the binary locally.
if self.opts.target_platform == 'android-chrome':
return depot == 'android-chrome'
else:
return (depot == 'chromium' or
'chromium' in bisect_utils.DEPOT_DEPS_NAME[depot]['from'] or
'v8' in bisect_utils.DEPOT_DEPS_NAME[depot]['from'])
return False
def UpdateDepsContents(self, deps_contents, depot, git_revision, deps_key):
"""Returns modified version of DEPS file contents.
Args:
deps_contents: DEPS file content.
depot: Current depot being bisected.
git_revision: A git hash to be updated in DEPS.
deps_key: Key in vars section of DEPS file to be searched.
Returns:
Updated DEPS content as string if deps key is found, otherwise None.
"""
# Check whether the depot and revision pattern in DEPS file vars
# e.g. for webkit the format is "webkit_revision": "12345".
deps_revision = re.compile(r'(?<="%s": ")([0-9]+)(?=")' % deps_key,
re.MULTILINE)
new_data = None
if re.search(deps_revision, deps_contents):
commit_position = source_control.GetCommitPosition(
git_revision, self.depot_registry.GetDepotDir(depot))
if not commit_position:
logging.warn('Could not determine commit position for %s', git_revision)
return None
# Update the revision information for the given depot
new_data = re.sub(deps_revision, str(commit_position), deps_contents)
else:
# Check whether the depot and revision pattern in DEPS file vars
# e.g. for webkit the format is "webkit_revision": "559a6d4ab7a84c539..".
deps_revision = re.compile(
r'(?<=["\']%s["\']: ["\'])([a-fA-F0-9]{40})(?=["\'])' % deps_key,
re.MULTILINE)
if re.search(deps_revision, deps_contents):
new_data = re.sub(deps_revision, git_revision, deps_contents)
if new_data:
# For v8_bleeding_edge revisions change V8 branch in order
# to fetch bleeding edge revision.
if depot == 'v8_bleeding_edge':
new_data = _UpdateV8Branch(new_data)
if not new_data:
return None
return new_data
def UpdateDeps(self, revision, depot, deps_file):
"""Updates DEPS file with new revision of dependency repository.
This method search DEPS for a particular pattern in which depot revision
is specified (e.g "webkit_revision": "123456"). If a match is found then
it resolves the given git hash to SVN revision and replace it in DEPS file.
Args:
revision: A git hash revision of the dependency repository.
depot: Current depot being bisected.
deps_file: Path to DEPS file.
Returns:
True if DEPS file is modified successfully, otherwise False.
"""
if not os.path.exists(deps_file):
return False
deps_var = bisect_utils.DEPOT_DEPS_NAME[depot]['deps_var']
# Don't update DEPS file if deps_var is not set in DEPOT_DEPS_NAME.
if not deps_var:
logging.warn('DEPS update not supported for Depot: %s', depot)
return False
# Hack for Angle repository. In the DEPS file, "vars" dictionary variable
# contains "angle_revision" key that holds git hash instead of SVN revision.
# And sometime "angle_revision" key is not specified in "vars" variable.
# In such cases check, "deps" dictionary variable that matches
# angle.git@[a-fA-F0-9]{40}$ and replace git hash.
if depot == 'angle':
return _UpdateDEPSForAngle(revision, depot, deps_file)
try:
deps_contents = ReadStringFromFile(deps_file)
updated_deps_content = self.UpdateDepsContents(
deps_contents, depot, revision, deps_var)
# Write changes to DEPS file
if updated_deps_content:
WriteStringToFile(updated_deps_content, deps_file)
return True
except IOError, e:
logging.warn('Something went wrong while updating DEPS file. [%s]', e)
return False
def _CreateDEPSPatch(self, depot, revision):
"""Checks out the DEPS file at the specified revision and modifies it.
Args:
depot: Current depot being bisected.
revision: A git hash revision of the dependency repository.
"""
deps_file_path = os.path.join(self.src_cwd, bisect_utils.FILE_DEPS)
if not os.path.exists(deps_file_path):
raise RuntimeError('DEPS file does not exists.[%s]' % deps_file_path)
# Get current chromium revision (git hash).
cmd = ['rev-parse', 'HEAD']
chromium_sha = bisect_utils.CheckRunGit(cmd).strip()
if not chromium_sha:
raise RuntimeError('Failed to determine Chromium revision for %s' %
revision)
if ('chromium' in bisect_utils.DEPOT_DEPS_NAME[depot]['from'] or
'v8' in bisect_utils.DEPOT_DEPS_NAME[depot]['from']):
# Checkout DEPS file for the current chromium revision.
if not source_control.CheckoutFileAtRevision(
bisect_utils.FILE_DEPS, chromium_sha, cwd=self.src_cwd):
raise RuntimeError(
'DEPS checkout Failed for chromium revision : [%s]' % chromium_sha)
if not self.UpdateDeps(revision, depot, deps_file_path):
raise RuntimeError(
'Failed to update DEPS file for chromium: [%s]' % chromium_sha)
def _CreatePatch(self, revision):
"""Creates a patch from currently modified files.
Args:
depot: Current depot being bisected.
revision: A git hash revision of the dependency repository.
Returns:
A tuple with git hash of chromium revision and DEPS patch text.
"""
# Get current chromium revision (git hash).
chromium_sha = bisect_utils.CheckRunGit(['rev-parse', 'HEAD']).strip()
if not chromium_sha:
raise RuntimeError('Failed to determine Chromium revision for %s' %
revision)
# Checkout DEPS file for the current chromium revision.
diff_command = [
'diff',
'--src-prefix=',
'--dst-prefix=',
'--no-ext-diff',
'HEAD',
]
diff_text = bisect_utils.CheckRunGit(diff_command)
return (chromium_sha, ChangeBackslashToSlashInPatch(diff_text))
def ObtainBuild(
self, depot, revision=None, create_patch=False):
"""Obtains a build by either downloading or building directly.
Args:
depot: Dependency repository name.
revision: A git commit hash. If None is given, the currently checked-out
revision is built.
create_patch: Create a patch with any locally modified files.
Returns:
True for success.
"""
if self.opts.debug_ignore_build:
return True
build_success = False
cwd = os.getcwd()
os.chdir(self.src_cwd)
# Fetch build archive for the given revision from the cloud storage when
# the storage bucket is passed.
if self.IsDownloadable(depot) and revision:
build_success = self._DownloadAndUnzipBuild(
revision, depot, build_type='Release', create_patch=create_patch)
else:
# Print the current environment set on the machine.
print 'Full Environment:'
for key, value in sorted(os.environ.items()):
print '%s: %s' % (key, value)
# Print the environment before proceeding with compile.
sys.stdout.flush()
build_success = self.builder.Build(depot, self.opts)
os.chdir(cwd)
return build_success
def RunGClientHooks(self):
"""Runs gclient with runhooks command.
Returns:
True if gclient reports no errors.
"""
if self.opts.debug_ignore_build:
return True
# Some "runhooks" calls create symlinks that other (older?) versions
# do not handle correctly causing the build to fail. We want to avoid
# clearing the entire out/ directory so that changes close together will
# build faster so we just clear out all symlinks on the expectation that
# the next "runhooks" call will recreate everything properly. Ignore
# failures (like Windows that doesn't have "find").
try:
bisect_utils.RunProcess(
['find', 'out/', '-type', 'l', '-exec', 'rm', '-f', '{}', ';'],
cwd=self.src_cwd, shell=False)
except OSError:
pass
return not bisect_utils.RunGClient(['runhooks'], cwd=self.src_cwd)
def _IsBisectModeUsingMetric(self):
return self.opts.bisect_mode in [bisect_utils.BISECT_MODE_MEAN,
bisect_utils.BISECT_MODE_STD_DEV]
def _IsBisectModeReturnCode(self):
return self.opts.bisect_mode in [bisect_utils.BISECT_MODE_RETURN_CODE]
def _IsBisectModeStandardDeviation(self):
return self.opts.bisect_mode in [bisect_utils.BISECT_MODE_STD_DEV]
def RunPerformanceTestAndParseResults(
self, command_to_run, metric, reset_on_first_run=False,
upload_on_last_run=False, results_label=None, test_run_multiplier=1,
allow_flakes=True):
"""Runs a performance test on the current revision and parses the results.
Args:
command_to_run: The command to be run to execute the performance test.
metric: The metric to parse out from the results of the performance test.
This is the result chart name and trace name, separated by slash.
May be None for perf try jobs.
reset_on_first_run: If True, pass the flag --reset-results on first run.
upload_on_last_run: If True, pass the flag --upload-results on last run.
results_label: A value for the option flag --results-label.
The arguments reset_on_first_run, upload_on_last_run and results_label
are all ignored if the test is not a Telemetry test.
test_run_multiplier: Factor by which to multiply the number of test runs
and the timeout period specified in self.opts.
allow_flakes: Report success even if some tests fail to run.
Returns:
(values dict, 0) if --debug_ignore_perf_test was passed.
(values dict, 0, test output) if the test was run successfully.
(error message, -1) if the test couldn't be run.
(error message, -1, test output) if the test ran but there was an error.
"""
success_code, failure_code = 0, -1
if self.opts.debug_ignore_perf_test:
fake_results = {
'mean': 0.0,
'std_err': 0.0,
'std_dev': 0.0,
'values': [0.0]
}
# When debug_fake_test_mean is set, its value is returned as the mean
# and the flag is cleared so that further calls behave as if it wasn't
# set (returning the fake_results dict as defined above).
if self.opts.debug_fake_first_test_mean:
fake_results['mean'] = float(self.opts.debug_fake_first_test_mean)
self.opts.debug_fake_first_test_mean = 0
return (fake_results, success_code)
# For Windows platform set posix=False, to parse windows paths correctly.
# On Windows, path separators '\' or '\\' are replace by '' when posix=True,
# refer to http://bugs.python.org/issue1724822. By default posix=True.
args = shlex.split(command_to_run, posix=not bisect_utils.IsWindowsHost())
if not _GenerateProfileIfNecessary(args):
err_text = 'Failed to generate profile for performance test.'
return (err_text, failure_code)
is_telemetry = bisect_utils.IsTelemetryCommand(command_to_run)
start_time = time.time()
metric_values = []
output_of_all_runs = ''
repeat_count = self.opts.repeat_test_count * test_run_multiplier
return_codes = []
for i in xrange(repeat_count):
# Can ignore the return code since if the tests fail, it won't return 0.
current_args = copy.copy(args)
if is_telemetry:
if i == 0 and reset_on_first_run:
current_args.append('--reset-results')
if i == self.opts.repeat_test_count - 1 and upload_on_last_run:
current_args.append('--upload-results')
if results_label:
current_args.append('--results-label=%s' % results_label)
try:
output, return_code = bisect_utils.RunProcessAndRetrieveOutput(
current_args, cwd=self.src_cwd)
return_codes.append(return_code)
except OSError, e:
if e.errno == errno.ENOENT:
err_text = ('Something went wrong running the performance test. '
'Please review the command line:\n\n')
if 'src/' in ' '.join(args):
err_text += ('Check that you haven\'t accidentally specified a '
'path with src/ in the command.\n\n')
err_text += ' '.join(args)
err_text += '\n'
return (err_text, failure_code)
raise
output_of_all_runs += output
if self.opts.output_buildbot_annotations:
print output
if metric and self._IsBisectModeUsingMetric():
parsed_metric = _ParseMetricValuesFromOutput(metric, output)
if parsed_metric:
metric_values += parsed_metric
# If we're bisecting on a metric (ie, changes in the mean or
# standard deviation) and no metric values are produced, bail out.
if not metric_values:
break
elif self._IsBisectModeReturnCode():
metric_values.append(return_code)
# If there's a failed test, we can bail out early.
if return_code:
break
elapsed_minutes = (time.time() - start_time) / 60.0
time_limit = self.opts.max_time_minutes * test_run_multiplier
if elapsed_minutes >= time_limit:
break
if metric and len(metric_values) == 0:
err_text = 'Metric %s was not found in the test output.' % metric
# TODO(qyearsley): Consider also getting and displaying a list of metrics
# that were found in the output here.
return (err_text, failure_code, output_of_all_runs)
# If we're bisecting on return codes, we're really just looking for zero vs
# non-zero.
values = {}
if self._IsBisectModeReturnCode():
# If any of the return codes is non-zero, output 1.
overall_return_code = 0 if (
all(current_value == 0 for current_value in metric_values)) else 1
values = {
'mean': overall_return_code,
'std_err': 0.0,
'std_dev': 0.0,
'values': metric_values,
}
print 'Results of performance test: Command returned with %d' % (
overall_return_code)
print
elif metric:
# Need to get the average value if there were multiple values.
truncated_mean = math_utils.TruncatedMean(
metric_values, self.opts.truncate_percent)
standard_err = math_utils.StandardError(metric_values)
standard_dev = math_utils.StandardDeviation(metric_values)
if self._IsBisectModeStandardDeviation():
metric_values = [standard_dev]
values = {
'mean': truncated_mean,
'std_err': standard_err,
'std_dev': standard_dev,
'values': metric_values,
}
print 'Results of performance test: %12f %12f' % (
truncated_mean, standard_err)
print
overall_success = success_code
if not allow_flakes and not self._IsBisectModeReturnCode():
overall_success = (
success_code
if (all(current_value == 0 for current_value in return_codes))
else failure_code)
return (values, overall_success, output_of_all_runs)
def PerformPreBuildCleanup(self):
"""Performs cleanup between runs."""
print 'Cleaning up between runs.'
print
# Leaving these .pyc files around between runs may disrupt some perf tests.
for (path, _, files) in os.walk(self.src_cwd):
for cur_file in files:
if cur_file.endswith('.pyc'):
path_to_file = os.path.join(path, cur_file)
os.remove(path_to_file)
def _RunPostSync(self, _depot):
"""Performs any work after syncing.
Args:
depot: Depot name.
Returns:
True if successful.
"""
if 'android' in self.opts.target_platform:
if not builder.SetupAndroidBuildEnvironment(
self.opts, path_to_src=self.src_cwd):
return False
return self.RunGClientHooks()
@staticmethod
def ShouldSkipRevision(depot, revision):
"""Checks whether a particular revision can be safely skipped.
Some commits can be safely skipped (such as a DEPS roll for the repos
still using .DEPS.git), since the tool is git based those changes
would have no effect.
Args:
depot: The depot being bisected.
revision: Current revision we're synced to.
Returns:
True if we should skip building/testing this revision.
"""
# Skips revisions with DEPS on android-chrome.
if depot == 'android-chrome':
cmd = ['diff-tree', '--no-commit-id', '--name-only', '-r', revision]
output = bisect_utils.CheckRunGit(cmd)
files = output.splitlines()
if len(files) == 1 and files[0] == 'DEPS':
return True
return False
def RunTest(self, revision, depot, command, metric, skippable=False,
skip_sync=False, create_patch=False, force_build=False,
test_run_multiplier=1):
"""Performs a full sync/build/run of the specified revision.
Args:
revision: The revision to sync to.
depot: The depot that's being used at the moment (src, webkit, etc.)
command: The command to execute the performance test.
metric: The performance metric being tested.
skip_sync: Skip the sync step.
create_patch: Create a patch with any locally modified files.
force_build: Force a local build.
test_run_multiplier: Factor by which to multiply the given number of runs
and the set timeout period.
Returns:
On success, a tuple containing the results of the performance test.
Otherwise, a tuple with the error message.
"""
logging.info('Running RunTest with rev "%s", command "%s"',
revision, command)
# Decide which sync program to use.
sync_client = None
if depot == 'chromium' or depot == 'android-chrome':
sync_client = 'gclient'
# Do the syncing for all depots.
if not (self.opts.debug_ignore_sync or skip_sync):
if not self._SyncRevision(depot, revision, sync_client):
return ('Failed to sync: [%s]' % str(revision), BUILD_RESULT_FAIL)
# Try to do any post-sync steps. This may include "gclient runhooks".
if not self._RunPostSync(depot):
return ('Failed to run [gclient runhooks].', BUILD_RESULT_FAIL)
# Skip this revision if it can be skipped.
if skippable and self.ShouldSkipRevision(depot, revision):
return ('Skipped revision: [%s]' % str(revision),
BUILD_RESULT_SKIPPED)
# Obtain a build for this revision. This may be done by requesting a build
# from another builder, waiting for it and downloading it.
start_build_time = time.time()
revision_to_build = revision if not force_build else None
build_success = self.ObtainBuild(
depot, revision=revision_to_build, create_patch=create_patch)
if not build_success:
return ('Failed to build revision: [%s]' % str(revision),
BUILD_RESULT_FAIL)
after_build_time = time.time()
# Run the command and get the results.
results = self.RunPerformanceTestAndParseResults(
command, metric, test_run_multiplier=test_run_multiplier)
# Restore build output directory once the tests are done, to avoid
# any discrepancies.
if self.IsDownloadable(depot) and revision:
self.BackupOrRestoreOutputDirectory(restore=True)
# A value other than 0 indicates that the test couldn't be run, and results
# should also include an error message.
if results[1] != 0:
return results
external_revisions = self._Get3rdPartyRevisions(depot)
if not external_revisions is None:
return (results[0], results[1], external_revisions,
time.time() - after_build_time, after_build_time -
start_build_time)
else:
return ('Failed to parse DEPS file for external revisions.',
BUILD_RESULT_FAIL)
def _SyncRevision(self, depot, revision, sync_client):
"""Syncs depot to particular revision.
Args:
depot: The depot that's being used at the moment (src, webkit, etc.)
revision: The revision to sync to.
sync_client: Program used to sync, e.g. "gclient". Can be None.
Returns:
True if successful, False otherwise.
"""
self.depot_registry.ChangeToDepotDir(depot)
if sync_client:
self.PerformPreBuildCleanup()
# When using gclient to sync, you need to specify the depot you
# want so that all the dependencies sync properly as well.
# i.e. gclient sync src@<SHA1>
if sync_client == 'gclient' and revision:
revision = '%s@%s' % (bisect_utils.DEPOT_DEPS_NAME[depot]['src'],
revision)
if depot == 'chromium' and self.opts.target_platform == 'android-chrome':
return self._SyncRevisionsForAndroidChrome(revision)
return source_control.SyncToRevision(revision, sync_client)
def _SyncRevisionsForAndroidChrome(self, revision):
"""Syncs android-chrome and chromium repos to particular revision.
This is a special case for android-chrome as the gclient sync for chromium
overwrites the android-chrome revision to TOT. Therefore both the repos
are synced to known revisions.
Args:
revision: Git hash of the Chromium to sync.
Returns:
True if successful, False otherwise.
"""
revisions_list = [revision]
current_android_rev = source_control.GetCurrentRevision(
self.depot_registry.GetDepotDir('android-chrome'))
revisions_list.append(
'%s@%s' % (bisect_utils.DEPOT_DEPS_NAME['android-chrome']['src'],
current_android_rev))
return not bisect_utils.RunGClientAndSync(revisions_list)
def _CheckIfRunPassed(self, current_value, known_good_value, known_bad_value):
"""Given known good and bad values, decide if the current_value passed
or failed.
Args:
current_value: The value of the metric being checked.
known_bad_value: The reference value for a "failed" run.
known_good_value: The reference value for a "passed" run.
Returns:
True if the current_value is closer to the known_good_value than the
known_bad_value.
"""
if self.opts.bisect_mode == bisect_utils.BISECT_MODE_STD_DEV:
dist_to_good_value = abs(current_value['std_dev'] -
known_good_value['std_dev'])
dist_to_bad_value = abs(current_value['std_dev'] -
known_bad_value['std_dev'])
else:
dist_to_good_value = abs(current_value['mean'] - known_good_value['mean'])
dist_to_bad_value = abs(current_value['mean'] - known_bad_value['mean'])
return dist_to_good_value < dist_to_bad_value
def _GetV8BleedingEdgeFromV8TrunkIfMappable(
self, revision, bleeding_edge_branch):
"""Gets v8 bleeding edge revision mapped to v8 revision in trunk.
Args:
revision: A trunk V8 revision mapped to bleeding edge revision.
bleeding_edge_branch: Branch used to perform lookup of bleeding edge
revision.
Return:
A mapped bleeding edge revision if found, otherwise None.
"""
commit_position = source_control.GetCommitPosition(revision)
if bisect_utils.IsStringInt(commit_position):
# V8 is tricky to bisect, in that there are only a few instances when
# we can dive into bleeding_edge and get back a meaningful result.
# Try to detect a V8 "business as usual" case, which is when:
# 1. trunk revision N has description "Version X.Y.Z"
# 2. bleeding_edge revision (N-1) has description "Prepare push to
# trunk. Now working on X.Y.(Z+1)."
#
# As of 01/24/2014, V8 trunk descriptions are formatted:
# "Version 3.X.Y (based on bleeding_edge revision rZ)"
# So we can just try parsing that out first and fall back to the old way.
v8_dir = self.depot_registry.GetDepotDir('v8')
v8_bleeding_edge_dir = self.depot_registry.GetDepotDir('v8_bleeding_edge')
revision_info = source_control.QueryRevisionInfo(revision, cwd=v8_dir)
version_re = re.compile("Version (?P<values>[0-9,.]+)")
regex_results = version_re.search(revision_info['subject'])
if regex_results:
git_revision = None
if 'based on bleeding_edge' in revision_info['subject']:
try:
bleeding_edge_revision = revision_info['subject'].split(
'bleeding_edge revision r')[1]
bleeding_edge_revision = int(bleeding_edge_revision.split(')')[0])
bleeding_edge_url = ('https://v8.googlecode.com/svn/branches/'
'bleeding_edge@%s' % bleeding_edge_revision)
cmd = ['log',
'--format=%H',
'--grep',
bleeding_edge_url,
'-1',
bleeding_edge_branch]
output = bisect_utils.CheckRunGit(cmd, cwd=v8_dir)
if output:
git_revision = output.strip()
return git_revision
except (IndexError, ValueError):
pass
else:
# V8 rolls description changed after V8 git migration, new description
# includes "Version 3.X.Y (based on <git hash>)"
try:
rxp = re.compile('based on (?P<git_revision>[a-fA-F0-9]+)')
re_results = rxp.search(revision_info['subject'])
if re_results:
return re_results.group('git_revision')
except (IndexError, ValueError):
pass
if not git_revision:
# Wasn't successful, try the old way of looking for "Prepare push to"
git_revision = source_control.ResolveToRevision(
int(commit_position) - 1, 'v8_bleeding_edge',
bisect_utils.DEPOT_DEPS_NAME, -1, cwd=v8_bleeding_edge_dir)
if git_revision:
revision_info = source_control.QueryRevisionInfo(
git_revision, cwd=v8_bleeding_edge_dir)
if 'Prepare push to trunk' in revision_info['subject']:
return git_revision
return None
def _GetNearestV8BleedingEdgeFromTrunk(
self, revision, v8_branch, bleeding_edge_branch, search_forward=True):
"""Gets the nearest V8 roll and maps to bleeding edge revision.
V8 is a bit tricky to bisect since it isn't just rolled out like blink.
Each revision on trunk might just be whatever was in bleeding edge, rolled
directly out. Or it could be some mixture of previous v8 trunk versions,
with bits and pieces cherry picked out from bleeding edge. In order to
bisect, we need both the before/after versions on trunk v8 to be just pushes
from bleeding edge. With the V8 git migration, the branches got switched.
a) master (external/v8) == candidates (v8/v8)
b) bleeding_edge (external/v8) == master (v8/v8)
Args:
revision: A V8 revision to get its nearest bleeding edge revision
search_forward: Searches forward if True, otherwise search backward.
Return:
A mapped bleeding edge revision if found, otherwise None.
"""
cwd = self.depot_registry.GetDepotDir('v8')
cmd = ['log', '--format=%ct', '-1', revision]
output = bisect_utils.CheckRunGit(cmd, cwd=cwd)
commit_time = int(output)
commits = []
if search_forward:
cmd = ['log',
'--format=%H',
'--after=%d' % commit_time,
v8_branch,
'--reverse']
output = bisect_utils.CheckRunGit(cmd, cwd=cwd)
output = output.split()
commits = output
#Get 10 git hashes immediately after the given commit.
commits = commits[:10]
else:
cmd = ['log',
'--format=%H',
'-10',
'--before=%d' % commit_time,
v8_branch]
output = bisect_utils.CheckRunGit(cmd, cwd=cwd)
output = output.split()
commits = output
bleeding_edge_revision = None
for c in commits:
bleeding_edge_revision = self._GetV8BleedingEdgeFromV8TrunkIfMappable(
c, bleeding_edge_branch)
if bleeding_edge_revision:
break
return bleeding_edge_revision
def _FillInV8BleedingEdgeInfo(self, min_revision_state, max_revision_state):
cwd = self.depot_registry.GetDepotDir('v8')
# when "remote.origin.url" is https://chromium.googlesource.com/v8/v8.git
v8_branch = 'origin/candidates'
bleeding_edge_branch = 'origin/master'
# Support for the chromium revisions with external V8 repo.
# ie https://chromium.googlesource.com/external/v8.git
cmd = ['config', '--get', 'remote.origin.url']
v8_repo_url = bisect_utils.CheckRunGit(cmd, cwd=cwd)
if 'external/v8.git' in v8_repo_url:
v8_branch = 'origin/master'
bleeding_edge_branch = 'origin/bleeding_edge'
r1 = self._GetNearestV8BleedingEdgeFromTrunk(
min_revision_state.revision,
v8_branch,
bleeding_edge_branch,
search_forward=True)
r2 = self._GetNearestV8BleedingEdgeFromTrunk(
max_revision_state.revision,
v8_branch,
bleeding_edge_branch,
search_forward=False)
min_revision_state.external['v8_bleeding_edge'] = r1
max_revision_state.external['v8_bleeding_edge'] = r2
if (not self._GetV8BleedingEdgeFromV8TrunkIfMappable(
min_revision_state.revision, bleeding_edge_branch)
or not self._GetV8BleedingEdgeFromV8TrunkIfMappable(
max_revision_state.revision, bleeding_edge_branch)):
self.warnings.append(
'Trunk revisions in V8 did not map directly to bleeding_edge. '
'Attempted to expand the range to find V8 rolls which did map '
'directly to bleeding_edge revisions, but results might not be '
'valid.')
def _FindNextDepotToBisect(
self, current_depot, min_revision_state, max_revision_state):
"""Decides which depot the script should dive into next (if any).
Args:
current_depot: Current depot being bisected.
min_revision_state: State of the earliest revision in the bisect range.
max_revision_state: State of the latest revision in the bisect range.
Returns:
Name of the depot to bisect next, or None.
"""
external_depot = None
for next_depot in bisect_utils.DEPOT_NAMES:
if ('platform' in bisect_utils.DEPOT_DEPS_NAME[next_depot] and
bisect_utils.DEPOT_DEPS_NAME[next_depot]['platform'] != os.name):
continue
if not (bisect_utils.DEPOT_DEPS_NAME[next_depot]['recurse']
and min_revision_state.depot
in bisect_utils.DEPOT_DEPS_NAME[next_depot]['from']):
continue
if current_depot == 'v8':
# We grab the bleeding_edge info here rather than earlier because we
# finally have the revision range. From that we can search forwards and
# backwards to try to match trunk revisions to bleeding_edge.
self._FillInV8BleedingEdgeInfo(min_revision_state, max_revision_state)
if (min_revision_state.external.get(next_depot) ==
max_revision_state.external.get(next_depot)):
continue
if (min_revision_state.external.get(next_depot) and
max_revision_state.external.get(next_depot)):
external_depot = next_depot
break
return external_depot
def PrepareToBisectOnDepot(
self, current_depot, start_revision, end_revision, previous_revision):
"""Changes to the appropriate directory and gathers a list of revisions
to bisect between |start_revision| and |end_revision|.
Args:
current_depot: The depot we want to bisect.
start_revision: Start of the revision range.
end_revision: End of the revision range.
previous_revision: The last revision we synced to on |previous_depot|.
Returns:
A list containing the revisions between |start_revision| and
|end_revision| inclusive.
"""
# Change into working directory of external library to run
# subsequent commands.
self.depot_registry.ChangeToDepotDir(current_depot)
# V8 (and possibly others) is merged in periodically. Bisecting
# this directory directly won't give much good info.
if 'custom_deps' in bisect_utils.DEPOT_DEPS_NAME[current_depot]:
config_path = os.path.join(self.src_cwd, '..')
if bisect_utils.RunGClientAndCreateConfig(
self.opts, bisect_utils.DEPOT_DEPS_NAME[current_depot]['custom_deps'],
cwd=config_path):
return []
if bisect_utils.RunGClient(
['sync', '--revision', previous_revision], cwd=self.src_cwd):
return []
if current_depot == 'v8_bleeding_edge':
self.depot_registry.ChangeToDepotDir('chromium')
shutil.move('v8', 'v8.bak')
shutil.move('v8_bleeding_edge', 'v8')
self.cleanup_commands.append(['mv', 'v8', 'v8_bleeding_edge'])
self.cleanup_commands.append(['mv', 'v8.bak', 'v8'])
self.depot_registry.SetDepotDir(
'v8_bleeding_edge', os.path.join(self.src_cwd, 'v8'))
self.depot_registry.SetDepotDir(
'v8', os.path.join(self.src_cwd, 'v8.bak'))
self.depot_registry.ChangeToDepotDir(current_depot)
depot_revision_list = self.GetRevisionList(current_depot,
end_revision,
start_revision)
self.depot_registry.ChangeToDepotDir('chromium')
return depot_revision_list
def GatherReferenceValues(self, good_rev, bad_rev, cmd, metric, target_depot):
"""Gathers reference values by running the performance tests on the
known good and bad revisions.
Args:
good_rev: The last known good revision where the performance regression
has not occurred yet.
bad_rev: A revision where the performance regression has already occurred.
cmd: The command to execute the performance test.
metric: The metric being tested for regression.
Returns:
A tuple with the results of building and running each revision.
"""
bad_run_results = self.RunTest(bad_rev, target_depot, cmd, metric)
good_run_results = None
if not bad_run_results[1]:
good_run_results = self.RunTest(good_rev, target_depot, cmd, metric)
return (bad_run_results, good_run_results)
def PrintRevisionsToBisectMessage(self, revision_list, depot):
if self.opts.output_buildbot_annotations:
step_name = 'Bisection Range: [%s:%s - %s]' % (depot, revision_list[-1],
revision_list[0])
bisect_utils.OutputAnnotationStepStart(step_name)
print
print 'Revisions to bisect on [%s]:' % depot
for revision_id in revision_list:
print ' -> %s' % (revision_id, )
print
if self.opts.output_buildbot_annotations:
bisect_utils.OutputAnnotationStepClosed()
def NudgeRevisionsIfDEPSChange(self, bad_revision, good_revision,
good_svn_revision=None):
"""Checks to see if changes to DEPS file occurred, and that the revision
range also includes the change to .DEPS.git. If it doesn't, attempts to
expand the revision range to include it.
Args:
bad_revision: First known bad git revision.
good_revision: Last known good git revision.
good_svn_revision: Last known good svn revision.
Returns:
A tuple with the new bad and good revisions.
"""
# DONOT perform nudge because at revision 291563 .DEPS.git was removed
# and source contain only DEPS file for dependency changes.
if good_svn_revision >= 291563:
return (bad_revision, good_revision)
if self.opts.target_platform == 'chromium':
changes_to_deps = source_control.QueryFileRevisionHistory(
bisect_utils.FILE_DEPS, good_revision, bad_revision)
if changes_to_deps:
# DEPS file was changed, search from the oldest change to DEPS file to
# bad_revision to see if there are matching .DEPS.git changes.
oldest_deps_change = changes_to_deps[-1]
changes_to_gitdeps = source_control.QueryFileRevisionHistory(
bisect_utils.FILE_DEPS_GIT, oldest_deps_change, bad_revision)
if len(changes_to_deps) != len(changes_to_gitdeps):
# Grab the timestamp of the last DEPS change
cmd = ['log', '--format=%ct', '-1', changes_to_deps[0]]
output = bisect_utils.CheckRunGit(cmd)
commit_time = int(output)
# Try looking for a commit that touches the .DEPS.git file in the
# next 15 minutes after the DEPS file change.
cmd = [
'log', '--format=%H', '-1',
'--before=%d' % (commit_time + 900),
'--after=%d' % commit_time,
'origin/master', '--', bisect_utils.FILE_DEPS_GIT
]
output = bisect_utils.CheckRunGit(cmd)
output = output.strip()
if output:
self.warnings.append(
'Detected change to DEPS and modified '
'revision range to include change to .DEPS.git')
return (output, good_revision)
else:
self.warnings.append(
'Detected change to DEPS but couldn\'t find '
'matching change to .DEPS.git')
return (bad_revision, good_revision)
def CheckIfRevisionsInProperOrder(
self, target_depot, good_revision, bad_revision):
"""Checks that |good_revision| is an earlier revision than |bad_revision|.
Args:
good_revision: Number/tag of the known good revision.
bad_revision: Number/tag of the known bad revision.
Returns:
True if the revisions are in the proper order (good earlier than bad).
"""
cwd = self.depot_registry.GetDepotDir(target_depot)
good_position = source_control.GetCommitPosition(good_revision, cwd)
bad_position = source_control.GetCommitPosition(bad_revision, cwd)
# Compare commit timestamp for repos that don't support commit position.
if not (bad_position and good_position):
logging.info('Could not get commit positions for revisions %s and %s in '
'depot %s', good_position, bad_position, target_depot)
good_position = source_control.GetCommitTime(good_revision, cwd=cwd)
bad_position = source_control.GetCommitTime(bad_revision, cwd=cwd)
return good_position <= bad_position
def CanPerformBisect(self, good_revision, bad_revision):
"""Checks whether a given revision is bisectable.
Checks for following:
1. Non-bisectable revisions for android bots (refer to crbug.com/385324).
2. Non-bisectable revisions for Windows bots (refer to crbug.com/405274).
Args:
good_revision: Known good revision.
bad_revision: Known bad revision.
Returns:
A dictionary indicating the result. If revision is not bisectable,
this will contain the field "error", otherwise None.
"""
if self.opts.target_platform == 'android':
good_revision = source_control.GetCommitPosition(good_revision)
if (bisect_utils.IsStringInt(good_revision)
and good_revision < 265549):
return {'error': (
'Bisect cannot continue for the given revision range.\n'
'It is impossible to bisect Android regressions '
'prior to r265549, which allows the bisect bot to '
'rely on Telemetry to do apk installation of the most recently '
'built local ChromePublic (refer to crbug.com/385324).\n'
'Please try bisecting revisions greater than or equal to r265549.')}
if bisect_utils.IsWindowsHost():
good_revision = source_control.GetCommitPosition(good_revision)
bad_revision = source_control.GetCommitPosition(bad_revision)
if (bisect_utils.IsStringInt(good_revision) and
bisect_utils.IsStringInt(bad_revision)):
if (289987 <= good_revision < 290716 or
289987 <= bad_revision < 290716):
return {'error': ('Oops! Revision between r289987 and r290716 are '
'marked as dead zone for Windows due to '
'crbug.com/405274. Please try another range.')}
return None
def _GatherResultsFromRevertedCulpritCL(
self, results, target_depot, command_to_run, metric):
"""Gathers performance results with/without culprit CL.
Attempts to revert the culprit CL against ToT and runs the
performance tests again with and without the CL, adding the results to
the over bisect results.
Args:
results: BisectResults from the bisect.
target_depot: The target depot we're bisecting.
command_to_run: Specify the command to execute the performance test.
metric: The performance metric to monitor.
"""
run_results_tot, run_results_reverted = self._RevertCulpritCLAndRetest(
results, target_depot, command_to_run, metric)
results.AddRetestResults(run_results_tot, run_results_reverted)
if len(results.culprit_revisions) != 1:
return
# Cleanup reverted files if anything is left.
_, _, culprit_depot = results.culprit_revisions[0]
bisect_utils.CheckRunGit(
['reset', '--hard', 'HEAD'],
cwd=self.depot_registry.GetDepotDir(culprit_depot))
def _RevertCL(self, culprit_revision, culprit_depot):
"""Reverts the specified revision in the specified depot."""
if self.opts.output_buildbot_annotations:
bisect_utils.OutputAnnotationStepStart(
'Reverting culprit CL: %s' % culprit_revision)
_, return_code = bisect_utils.RunGit(
['revert', '--no-commit', culprit_revision],
cwd=self.depot_registry.GetDepotDir(culprit_depot))
if return_code:
bisect_utils.OutputAnnotationStepWarning()
bisect_utils.OutputAnnotationStepText('Failed to revert CL cleanly.')
if self.opts.output_buildbot_annotations:
bisect_utils.OutputAnnotationStepClosed()
return not return_code
def _RevertCulpritCLAndRetest(
self, results, target_depot, command_to_run, metric):
"""Reverts the culprit CL against ToT and runs the performance test.
Attempts to revert the culprit CL against ToT and runs the
performance tests again with and without the CL.
Args:
results: BisectResults from the bisect.
target_depot: The target depot we're bisecting.
command_to_run: Specify the command to execute the performance test.
metric: The performance metric to monitor.
Returns:
A tuple with the results of running the CL at ToT/reverted.
"""
# Might want to retest ToT with a revert of the CL to confirm that
# performance returns.
if results.confidence < bisect_utils.HIGH_CONFIDENCE:
return (None, None)
# If there were multiple culprit CLs, we won't try to revert.
if len(results.culprit_revisions) != 1:
return (None, None)
culprit_revision, _, culprit_depot = results.culprit_revisions[0]
if not self._SyncRevision(target_depot, None, 'gclient'):
return (None, None)
head_revision = bisect_utils.CheckRunGit(['log', '--format=%H', '-1'])
head_revision = head_revision.strip()
if not self._RevertCL(culprit_revision, culprit_depot):
return (None, None)
# If the culprit CL happened to be in a depot that gets pulled in, we
# can't revert the change and issue a try job to build, since that would
# require modifying both the DEPS file and files in another depot.
# Instead, we build locally.
force_build = (culprit_depot != target_depot)
if force_build:
results.warnings.append(
'Culprit CL is in another depot, attempting to revert and build'
' locally to retest. This may not match the performance of official'
' builds.')
run_results_reverted = self._RunTestWithAnnotations(
'Re-Testing ToT with reverted culprit',
'Failed to run reverted CL.',
head_revision, target_depot, command_to_run, metric, force_build)
# Clear the reverted file(s).
bisect_utils.RunGit(
['reset', '--hard', 'HEAD'],
cwd=self.depot_registry.GetDepotDir(culprit_depot))
# Retesting with the reverted CL failed, so bail out of retesting against
# ToT.
if run_results_reverted[1]:
return (None, None)
run_results_tot = self._RunTestWithAnnotations(
'Re-Testing ToT',
'Failed to run ToT.',
head_revision, target_depot, command_to_run, metric, force_build)
return (run_results_tot, run_results_reverted)
def PostBisectResults(self, bisect_results):
"""Posts bisect results to Perf Dashboard."""
bisect_utils.OutputAnnotationStepStart('Post Results')
results = bisect_results_json.Get(
bisect_results, self.opts, self.depot_registry)
results_json = json.dumps(results)
data = urllib.urlencode({'data': results_json})
request = urllib2.Request(PERF_DASH_RESULTS_URL)
try:
urllib2.urlopen(request, data)
except urllib2.URLError as e:
print 'Failed to post bisect results. Error: %s.' % e
bisect_utils.OutputAnnotationStepWarning()
bisect_utils.OutputAnnotationStepClosed()
def _RunTestWithAnnotations(
self, step_text, error_text, head_revision,
target_depot, command_to_run, metric, force_build):
"""Runs the performance test and outputs start/stop annotations.
Args:
results: BisectResults from the bisect.
target_depot: The target depot we're bisecting.
command_to_run: Specify the command to execute the performance test.
metric: The performance metric to monitor.
force_build: Whether to force a build locally.
Returns:
Results of the test.
"""
if self.opts.output_buildbot_annotations:
bisect_utils.OutputAnnotationStepStart(step_text)
# Build and run the test again with the reverted culprit CL against ToT.
run_test_results = self.RunTest(
head_revision, target_depot, command_to_run,
metric, skippable=False, skip_sync=True, create_patch=True,
force_build=force_build)
if self.opts.output_buildbot_annotations:
if run_test_results[1]:
bisect_utils.OutputAnnotationStepWarning()
bisect_utils.OutputAnnotationStepText(error_text)
bisect_utils.OutputAnnotationStepClosed()
return run_test_results
def Run(self, command_to_run, bad_revision_in, good_revision_in, metric):
"""Given known good and bad revisions, run a binary search on all
intermediate revisions to determine the CL where the performance regression
occurred.
Args:
command_to_run: Specify the command to execute the performance test.
good_revision: Number/tag of the known good revision.
bad_revision: Number/tag of the known bad revision.
metric: The performance metric to monitor.
Returns:
A BisectResults object.
"""
# Choose depot to bisect first
target_depot = 'chromium'
if self.opts.target_platform == 'android-chrome':
target_depot = 'android-chrome'
cwd = os.getcwd()
self.depot_registry.ChangeToDepotDir(target_depot)
# If they passed SVN revisions, we can try match them to git SHA1 hashes.
bad_revision = source_control.ResolveToRevision(
bad_revision_in, target_depot, bisect_utils.DEPOT_DEPS_NAME, 100)
good_revision = source_control.ResolveToRevision(
good_revision_in, target_depot, bisect_utils.DEPOT_DEPS_NAME, -100)
os.chdir(cwd)
if bad_revision is None:
return BisectResults(
error='Couldn\'t resolve [%s] to SHA1.' % bad_revision_in)
if good_revision is None:
return BisectResults(
error='Couldn\'t resolve [%s] to SHA1.' % good_revision_in)
# Check that they didn't accidentally swap good and bad revisions.
if not self.CheckIfRevisionsInProperOrder(
target_depot, good_revision, bad_revision):
return BisectResults(error='Bad rev (%s) appears to be earlier than good '
'rev (%s).' % (good_revision, bad_revision))
bad_revision, good_revision = self.NudgeRevisionsIfDEPSChange(
bad_revision, good_revision, good_revision_in)
if self.opts.output_buildbot_annotations:
bisect_utils.OutputAnnotationStepStart('Gathering Revisions')
cannot_bisect = self.CanPerformBisect(good_revision, bad_revision)
if cannot_bisect:
return BisectResults(error=cannot_bisect.get('error'))
print 'Gathering revision range for bisection.'
# Retrieve a list of revisions to do bisection on.
revision_list = self.GetRevisionList(target_depot, bad_revision,
good_revision)
if self.opts.output_buildbot_annotations:
bisect_utils.OutputAnnotationStepClosed()
if revision_list:
self.PrintRevisionsToBisectMessage(revision_list, target_depot)
if self.opts.output_buildbot_annotations:
bisect_utils.OutputAnnotationStepStart('Gathering Reference Values')
print 'Gathering reference values for bisection.'
# Perform the performance tests on the good and bad revisions, to get
# reference values.
bad_results, good_results = self.GatherReferenceValues(good_revision,
bad_revision,
command_to_run,
metric,
target_depot)
if self.opts.output_buildbot_annotations:
bisect_utils.OutputAnnotationStepClosed()
if bad_results[1]:
error = ('An error occurred while building and running the \'bad\' '
'reference value. The bisect cannot continue without '
'a working \'bad\' revision to start from.\n\nError: %s' %
bad_results[0])
return BisectResults(error=error)
if good_results[1]:
error = ('An error occurred while building and running the \'good\' '
'reference value. The bisect cannot continue without '
'a working \'good\' revision to start from.\n\nError: %s' %
good_results[0])
return BisectResults(error=error)
# We need these reference values to determine if later runs should be
# classified as pass or fail.
known_bad_value = bad_results[0]
known_good_value = good_results[0]
# Abort bisect early when the return codes for known good
# and known bad revisions are same.
if (self._IsBisectModeReturnCode() and
known_bad_value['mean'] == known_good_value['mean']):
return BisectResults(abort_reason=('known good and known bad revisions '
'returned same return code (return code=%s). '
'Continuing bisect might not yield any results.' %
known_bad_value['mean']))
# Check the direction of improvement only if the improvement_direction
# option is set to a specific direction (1 for higher is better or -1 for
# lower is better).
improvement_dir = self.opts.improvement_direction
if improvement_dir:
higher_is_better = improvement_dir > 0
if higher_is_better:
message = "Expecting higher values to be better for this metric, "
else:
message = "Expecting lower values to be better for this metric, "
metric_increased = known_bad_value['mean'] > known_good_value['mean']
if metric_increased:
message += "and the metric appears to have increased. "
else:
message += "and the metric appears to have decreased. "
if ((higher_is_better and metric_increased) or
(not higher_is_better and not metric_increased)):
error = (message + 'Then, the test results for the ends of the given '
'\'good\' - \'bad\' range of revisions represent an '
'improvement (and not a regression).')
return BisectResults(error=error)
logging.info(message + "Therefore we continue to bisect.")
bisect_state = BisectState(target_depot, revision_list)
revision_states = bisect_state.GetRevisionStates()
min_revision = 0
max_revision = len(revision_states) - 1
# Can just mark the good and bad revisions explicitly here since we
# already know the results.
bad_revision_state = revision_states[min_revision]
bad_revision_state.external = bad_results[2]
bad_revision_state.perf_time = bad_results[3]
bad_revision_state.build_time = bad_results[4]
bad_revision_state.passed = False
bad_revision_state.value = known_bad_value
good_revision_state = revision_states[max_revision]
good_revision_state.external = good_results[2]
good_revision_state.perf_time = good_results[3]
good_revision_state.build_time = good_results[4]
good_revision_state.passed = True
good_revision_state.value = known_good_value
# Check how likely it is that the good and bad results are different
# beyond chance-induced variation.
if not (self.opts.debug_ignore_regression_confidence or
self._IsBisectModeReturnCode()):
if not _IsRegressionReproduced(known_good_value, known_bad_value,
self.opts.required_initial_confidence):
# If there is no significant difference between "good" and "bad"
# revision results, then the "bad revision" is considered "good".
# TODO(qyearsley): Remove this if it is not necessary.
bad_revision_state.passed = True
self.warnings.append(_RegressionNotReproducedWarningMessage(
good_revision, bad_revision, known_good_value, known_bad_value))
return BisectResults(bisect_state, self.depot_registry, self.opts,
self.warnings)
while True:
if not revision_states:
break
if max_revision - min_revision <= 1:
min_revision_state = revision_states[min_revision]
max_revision_state = revision_states[max_revision]
current_depot = min_revision_state.depot
# TODO(sergiyb): Under which conditions can first two branches be hit?
if min_revision_state.passed == '?':
next_revision_index = min_revision
elif max_revision_state.passed == '?':
next_revision_index = max_revision
elif current_depot in ['android-chrome', 'chromium', 'v8']:
previous_revision = revision_states[min_revision].revision
# If there were changes to any of the external libraries we track,
# should bisect the changes there as well.
external_depot = self._FindNextDepotToBisect(
current_depot, min_revision_state, max_revision_state)
# If there was no change in any of the external depots, the search
# is over.
if not external_depot:
if current_depot == 'v8':
self.warnings.append(
'Unfortunately, V8 bisection couldn\'t '
'continue any further. The script can only bisect into '
'V8\'s bleeding_edge repository if both the current and '
'previous revisions in trunk map directly to revisions in '
'bleeding_edge.')
break
earliest_revision = max_revision_state.external[external_depot]
latest_revision = min_revision_state.external[external_depot]
new_revision_list = self.PrepareToBisectOnDepot(
external_depot, earliest_revision, latest_revision,
previous_revision)
if not new_revision_list:
error = ('An error occurred attempting to retrieve revision '
'range: [%s..%s]' % (earliest_revision, latest_revision))
return BisectResults(error=error)
revision_states = bisect_state.CreateRevisionStatesAfter(
external_depot, new_revision_list, current_depot,
previous_revision)
# Reset the bisection and perform it on the newly inserted states.
min_revision = 0
max_revision = len(revision_states) - 1
print ('Regression in metric %s appears to be the result of '
'changes in [%s].' % (metric, external_depot))
revision_list = [state.revision for state in revision_states]
self.PrintRevisionsToBisectMessage(revision_list, external_depot)
continue
else:
break
else:
next_revision_index = (int((max_revision - min_revision) / 2) +
min_revision)
next_revision_state = revision_states[next_revision_index]
next_revision = next_revision_state.revision
next_depot = next_revision_state.depot
self.depot_registry.ChangeToDepotDir(next_depot)
message = 'Working on [%s:%s]' % (next_depot, next_revision)
print message
if self.opts.output_buildbot_annotations:
bisect_utils.OutputAnnotationStepStart(message)
run_results = self.RunTest(next_revision, next_depot, command_to_run,
metric, skippable=True)
# If the build is successful, check whether or not the metric
# had regressed.
if not run_results[1]:
if len(run_results) > 2:
next_revision_state.external = run_results[2]
next_revision_state.perf_time = run_results[3]
next_revision_state.build_time = run_results[4]
passed_regression = self._CheckIfRunPassed(run_results[0],
known_good_value,
known_bad_value)
next_revision_state.passed = passed_regression
next_revision_state.value = run_results[0]
if passed_regression:
max_revision = next_revision_index
else:
min_revision = next_revision_index
else:
if run_results[1] == BUILD_RESULT_SKIPPED:
next_revision_state.passed = 'Skipped'
elif run_results[1] == BUILD_RESULT_FAIL:
next_revision_state.passed = 'Build Failed'
print run_results[0]
# If the build is broken, remove it and redo search.
revision_states.pop(next_revision_index)
max_revision -= 1
if self.opts.output_buildbot_annotations:
self.printer.PrintPartialResults(bisect_state)
bisect_utils.OutputAnnotationStepClosed()
self._ConfidenceExtraTestRuns(min_revision_state, max_revision_state,
command_to_run, metric)
results = BisectResults(bisect_state, self.depot_registry, self.opts,
self.warnings)
self._GatherResultsFromRevertedCulpritCL(
results, target_depot, command_to_run, metric)
return results
else:
# Weren't able to sync and retrieve the revision range.
error = ('An error occurred attempting to retrieve revision range: '
'[%s..%s]' % (good_revision, bad_revision))
return BisectResults(error=error)
def _ConfidenceExtraTestRuns(self, good_state, bad_state, command_to_run,
metric):
if (bool(good_state.passed) != bool(bad_state.passed)
and good_state.passed not in ('Skipped', 'Build Failed')
and bad_state.passed not in ('Skipped', 'Build Failed')):
for state in (good_state, bad_state):
run_results = self.RunTest(
state.revision,
state.depot,
command_to_run,
metric,
test_run_multiplier=BORDER_REVISIONS_EXTRA_RUNS)
# Is extend the right thing to do here?
if run_results[1] != BUILD_RESULT_FAIL:
state.value['values'].extend(run_results[0]['values'])
else:
warning_text = 'Re-test of revision %s failed with error message: %s'
warning_text %= (state.revision, run_results[0])
if warning_text not in self.warnings:
self.warnings.append(warning_text)
def _IsPlatformSupported():
"""Checks that this platform and build system are supported.
Args:
opts: The options parsed from the command line.
Returns:
True if the platform and build system are supported.
"""
# Haven't tested the script out on any other platforms yet.
supported = ['posix', 'nt']
return os.name in supported
def RemoveBuildFiles(build_type):
"""Removes build files from previous runs."""
out_dir = os.path.join('out', build_type)
build_dir = os.path.join('build', build_type)
logging.info('Removing build files in "%s" and "%s".',
os.path.abspath(out_dir), os.path.abspath(build_dir))
try:
RemakeDirectoryTree(out_dir)
RemakeDirectoryTree(build_dir)
except Exception as e:
raise RuntimeError('Got error in RemoveBuildFiles: %s' % e)
def RemakeDirectoryTree(path_to_dir):
"""Removes a directory tree and replaces it with an empty one.
Returns True if successful, False otherwise.
"""
RemoveDirectoryTree(path_to_dir)
MaybeMakeDirectory(path_to_dir)
def RemoveDirectoryTree(path_to_dir):
"""Removes a directory tree. Returns True if successful or False otherwise."""
if os.path.isfile(path_to_dir):
logging.info('REMOVING FILE %s' % path_to_dir)
os.remove(path_to_dir)
try:
if os.path.exists(path_to_dir):
shutil.rmtree(path_to_dir)
except OSError, e:
if e.errno != errno.ENOENT:
raise
# This is copied from build/scripts/common/chromium_utils.py.
def MaybeMakeDirectory(*path):
"""Creates an entire path, if it doesn't already exist."""
file_path = os.path.join(*path)
try:
os.makedirs(file_path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
class BisectOptions(object):
"""Options to be used when running bisection."""
def __init__(self):
super(BisectOptions, self).__init__()
self.target_platform = 'chromium'
self.build_preference = None
self.good_revision = None
self.bad_revision = None
self.use_goma = None
self.goma_dir = None
self.goma_threads = 64
self.repeat_test_count = 20
self.truncate_percent = 25
self.max_time_minutes = 20
self.metric = None
self.command = None
self.output_buildbot_annotations = None
self.no_custom_deps = False
self.working_directory = None
self.extra_src = None
self.debug_ignore_build = None
self.debug_ignore_sync = None
self.debug_ignore_perf_test = None
self.debug_ignore_regression_confidence = None
self.debug_fake_first_test_mean = 0
self.target_arch = 'ia32'
self.target_build_type = 'Release'
self.builder_type = 'perf'
self.bisect_mode = bisect_utils.BISECT_MODE_MEAN
self.improvement_direction = 0
self.bug_id = ''
self.required_initial_confidence = 80.0
self.try_job_id = None
@staticmethod
def _AddBisectOptionsGroup(parser):
group = parser.add_argument_group('Bisect options')
group.add_argument('-c', '--command', required=True,
help='A command to execute your performance test at '
'each point in the bisection.')
group.add_argument('-b', '--bad_revision', required=True,
help='A bad revision to start bisection. Must be later '
'than good revision. May be either a git or svn '
'revision.')
group.add_argument('-g', '--good_revision', required=True,
help='A revision to start bisection where performance '
'test is known to pass. Must be earlier than the '
'bad revision. May be either a git or a svn '
'revision.')
group.add_argument('-m', '--metric',
help='The desired metric to bisect on. For example '
'"vm_rss_final_b/vm_rss_f_b"')
group.add_argument('-d', '--improvement_direction', type=int, default=0,
help='An integer number representing the direction of '
'improvement. 1 for higher is better, -1 for lower '
'is better, 0 for ignore (default).')
group.add_argument('-r', '--repeat_test_count', type=int, default=20,
choices=range(1, 101),
help='The number of times to repeat the performance '
'test. Values will be clamped to range [1, 100]. '
'Default value is 20.')
group.add_argument('--max_time_minutes', type=int, default=20,
choices=range(1, 61),
help='The maximum time (in minutes) to take running the '
'performance tests. The script will run the '
'performance tests according to '
'--repeat_test_count, so long as it doesn\'t exceed'
' --max_time_minutes. Values will be clamped to '
'range [1, 60]. Default value is 20.')
group.add_argument('-t', '--truncate_percent', type=int, default=25,
help='The highest/lowest percent are discarded to form '
'a truncated mean. Values will be clamped to range '
'[0, 25]. Default value is 25 percent.')
group.add_argument('--bisect_mode', default=bisect_utils.BISECT_MODE_MEAN,
choices=[bisect_utils.BISECT_MODE_MEAN,
bisect_utils.BISECT_MODE_STD_DEV,
bisect_utils.BISECT_MODE_RETURN_CODE],
help='The bisect mode. Choices are to bisect on the '
'difference in mean, std_dev, or return_code.')
group.add_argument('--bug_id', default='',
help='The id for the bug associated with this bisect. ' +
'If this number is given, bisect will attempt to ' +
'verify that the bug is not closed before '
'starting.')
group.add_argument('--try_job_id', default=None,
help='The id assigned by Perf Dashboard when sending ' +
'try jobs.')
group.add_argument('--required_initial_confidence', type=float,
default=80.0,
help='The required confidence score for the initial '
'check to see whether there is a significant '
'difference between given good and bad revisions.')
@staticmethod
def _AddBuildOptionsGroup(parser):
group = parser.add_argument_group('Build options')
group.add_argument('-w', '--working_directory',
help='Path to the working directory where the script '
'will do an initial checkout of the chromium depot. The '
'files will be placed in a subdirectory "bisect" under '
'working_directory and that will be used to perform the '
'bisection. This parameter is optional, if it is not '
'supplied, the script will work from the current depot.')
group.add_argument('--build_preference',
choices=['msvs', 'ninja', 'make'],
help='The preferred build system to use. On linux/mac '
'the options are make/ninja. On Windows, the '
'options are msvs/ninja.')
group.add_argument('--target_platform', default='chromium',
choices=['chromium', 'android', 'android-chrome'],
help='The target platform. Choices are "chromium" '
'(current platform), or "android". If you specify '
'something other than "chromium", you must be '
'properly set up to build that platform.')
group.add_argument('--no_custom_deps', dest='no_custom_deps',
action='store_true', default=False,
help='Run the script with custom_deps or not.')
group.add_argument('--extra_src',
help='Path to a script which can be used to modify the '
'bisect script\'s behavior.')
group.add_argument('--use_goma', action='store_true',
help='Add a bunch of extra threads for goma, and enable '
'goma')
group.add_argument('--goma_dir',
help='Path to goma tools (or system default if not '
'specified).')
group.add_argument('--goma_threads', type=int, default='64',
help='Number of threads for goma, only if using goma.')
group.add_argument('--output_buildbot_annotations', action='store_true',
help='Add extra annotation output for buildbot.')
group.add_argument('--target_arch', default='ia32',
dest='target_arch',
choices=['ia32', 'x64', 'arm', 'arm64'],
help='The target build architecture. Choices are "ia32" '
'(default), "x64", "arm" or "arm64".')
group.add_argument('--target_build_type', default='Release',
choices=['Release', 'Debug', 'Release_x64'],
help='The target build type. Choices are "Release" '
'(default), Release_x64 or "Debug".')
group.add_argument('--builder_type', default=fetch_build.PERF_BUILDER,
choices=[fetch_build.PERF_BUILDER,
fetch_build.FULL_BUILDER,
fetch_build.ANDROID_CHROME_PERF_BUILDER, ''],
help='Type of builder to get build from. This '
'determines both the bot that builds and the '
'place where archived builds are downloaded from. '
'For local builds, an empty string can be passed.')
@staticmethod
def _AddDebugOptionsGroup(parser):
group = parser.add_argument_group('Debug options')
group.add_argument('--debug_ignore_build', action='store_true',
help='DEBUG: Don\'t perform builds.')
group.add_argument('--debug_ignore_sync', action='store_true',
help='DEBUG: Don\'t perform syncs.')
group.add_argument('--debug_ignore_perf_test', action='store_true',
help='DEBUG: Don\'t perform performance tests.')
group.add_argument('--debug_ignore_regression_confidence',
action='store_true',
help='DEBUG: Don\'t score the confidence of the initial '
'good and bad revisions\' test results.')
group.add_argument('--debug_fake_first_test_mean', type=int, default='0',
help='DEBUG: When faking performance tests, return this '
'value as the mean of the first performance test, '
'and return a mean of 0.0 for further tests.')
return group
@classmethod
def _CreateCommandLineParser(cls):
"""Creates a parser with bisect options.
Returns:
An instance of argparse.ArgumentParser.
"""
usage = ('%(prog)s [options] [-- chromium-options]\n'
'Perform binary search on revision history to find a minimal '
'range of revisions where a performance metric regressed.\n')
parser = argparse.ArgumentParser(usage=usage)
cls._AddBisectOptionsGroup(parser)
cls._AddBuildOptionsGroup(parser)
cls._AddDebugOptionsGroup(parser)
return parser
def ParseCommandLine(self):
"""Parses the command line for bisect options."""
parser = self._CreateCommandLineParser()
opts = parser.parse_args()
try:
if (not opts.metric and
opts.bisect_mode != bisect_utils.BISECT_MODE_RETURN_CODE):
raise RuntimeError('missing required parameter: --metric')
if opts.bisect_mode != bisect_utils.BISECT_MODE_RETURN_CODE:
metric_values = opts.metric.split('/')
if len(metric_values) != 2:
raise RuntimeError('Invalid metric specified: [%s]' % opts.metric)
opts.metric = metric_values
opts.truncate_percent = min(max(opts.truncate_percent, 0), 25) / 100.0
for k, v in opts.__dict__.iteritems():
assert hasattr(self, k), 'Invalid %s attribute in BisectOptions.' % k
setattr(self, k, v)
except RuntimeError, e:
output_string = StringIO.StringIO()
parser.print_help(file=output_string)
error_message = '%s\n\n%s' % (e.message, output_string.getvalue())
output_string.close()
raise RuntimeError(error_message)
@staticmethod
def FromDict(values):
"""Creates an instance of BisectOptions from a dictionary.
Args:
values: a dict containing options to set.
Returns:
An instance of BisectOptions.
"""
opts = BisectOptions()
for k, v in values.iteritems():
assert hasattr(opts, k), 'Invalid %s attribute in BisectOptions.' % k
setattr(opts, k, v)
if opts.metric and opts.bisect_mode != bisect_utils.BISECT_MODE_RETURN_CODE:
metric_values = opts.metric.split('/')
if len(metric_values) != 2:
raise RuntimeError('Invalid metric specified: [%s]' % opts.metric)
opts.metric = metric_values
if opts.target_arch == 'x64' and opts.target_build_type == 'Release':
opts.target_build_type = 'Release_x64'
opts.repeat_test_count = min(max(opts.repeat_test_count, 1), 100)
opts.max_time_minutes = min(max(opts.max_time_minutes, 1), 60)
opts.truncate_percent = min(max(opts.truncate_percent, 0), 25)
opts.truncate_percent = opts.truncate_percent / 100.0
return opts
def _ConfigureLogging():
"""Trivial logging config.
Configures logging to output any messages at or above INFO to standard out,
without any additional formatting.
"""
logging_format = '%(message)s'
logging.basicConfig(
stream=logging.sys.stdout, level=logging.INFO, format=logging_format)
def main():
_ConfigureLogging()
try:
opts = BisectOptions()
opts.ParseCommandLine()
if opts.bug_id:
if opts.output_buildbot_annotations:
bisect_utils.OutputAnnotationStepStart('Checking Issue Tracker')
issue_closed = query_crbug.CheckIssueClosed(opts.bug_id)
if issue_closed:
print 'Aborting bisect because bug is closed'
else:
print 'Could not confirm bug is closed, proceeding.'
if opts.output_buildbot_annotations:
bisect_utils.OutputAnnotationStepClosed()
if issue_closed:
results = BisectResults(abort_reason='the bug is closed.')
bisect_printer = BisectPrinter(opts)
bisect_printer.FormatAndPrintResults(results)
return 0
if opts.extra_src:
extra_src = bisect_utils.LoadExtraSrc(opts.extra_src)
if not extra_src:
raise RuntimeError('Invalid or missing --extra_src.')
bisect_utils.AddAdditionalDepotInfo(extra_src.GetAdditionalDepotInfo())
if opts.working_directory:
custom_deps = bisect_utils.DEFAULT_GCLIENT_CUSTOM_DEPS
if opts.no_custom_deps:
custom_deps = None
bisect_utils.CreateBisectDirectoryAndSetupDepot(opts, custom_deps)
os.chdir(os.path.join(os.getcwd(), 'src'))
RemoveBuildFiles(opts.target_build_type)
if not _IsPlatformSupported():
raise RuntimeError('Sorry, this platform isn\'t supported yet.')
if not source_control.IsInGitRepository():
raise RuntimeError(
'Sorry, only the git workflow is supported at the moment.')
# gClient sync seems to fail if you're not in master branch.
if (not source_control.IsInProperBranch() and
not opts.debug_ignore_sync and
not opts.working_directory):
raise RuntimeError('You must switch to master branch to run bisection.')
bisect_test = BisectPerformanceMetrics(opts, os.getcwd())
try:
results = bisect_test.Run(opts.command, opts.bad_revision,
opts.good_revision, opts.metric)
if results.error:
raise RuntimeError(results.error)
bisect_test.printer.FormatAndPrintResults(results)
bisect_test.PostBisectResults(results)
return 0
finally:
bisect_test.PerformCleanup()
except RuntimeError as e:
if opts.output_buildbot_annotations:
# The perf dashboard scrapes the "results" step in order to comment on
# bugs. If you change this, please update the perf dashboard as well.
bisect_utils.OutputAnnotationStepStart('Results')
print 'Runtime Error: %s' % e
if opts.output_buildbot_annotations:
bisect_utils.OutputAnnotationStepClosed()
return 1
if __name__ == '__main__':
sys.exit(main())
|
btk20_src/unit_test/log_power_extractor.py | musiclvme/distant_speech_recognition | 136 | 12627904 | #!/usr/bin/python
"""
Compute log power feature from an audio file
"""
import pickle, numpy
from btk20.common import *
from btk20.stream import *
from btk20.feature import *
D = 160 # 10 msec for 16 kHz audio
fft_len = 256
pow_num = fft_len//2 + 1
input_filename = "../tools/filterbank/Headset1.wav"
output_filename = "log_power.pickle"
# Audio file reader
samplefe = SampleFeaturePtr(block_len=D, shift_len=D, pad_zeros=False)
# Hamming window calculator
hammingfe = HammingFeaturePtr(samplefe)
# FFT feature extractor
fftfe = FFTFeaturePtr(hammingfe, fft_len=fft_len)
# Power (complex square) feature extractor
powerfe = SpectralPowerFeaturePtr(fftfe, pow_num=pow_num)
# Log feature extractor
logfe = LogFeaturePtr(powerfe)
# Reading the audio file
samplefe.read(input_filename)
with open(output_filename, 'w') as ofp:
frame_no = 0
# compute the log power feature at each frame
for log_vector in logfe:
# print the first 10-dimension vector
print('fr. {}: {}..'.format(frame_no, numpy.array2string(log_vector[0:10], formatter={'float_kind':lambda x: "%.2f" % x})))
pickle.dump(log_vector, ofp, True)
frame_no += 1
|
gherkin/python/gherkin/inout.py | codemrkay/cucumber | 3,974 | 12627922 | <gh_stars>1000+
from __future__ import print_function
import json
from .parser import Parser
from .token_scanner import TokenScanner
from .pickles.compiler import compile
from .errors import ParserException, CompositeParserException
class Inout(object):
def __init__(self, print_source, print_ast, print_pickles):
self.print_source = print_source
self.print_ast = print_ast
self.print_pickles = print_pickles
self.parser = Parser()
self.parser.stop_at_first_error = False
def process(self, input, output):
line = input.readline().rstrip()
event = json.loads(line)
if (event['type'] == 'source'):
uri = event['uri']
source = event['data']
token_scanner = TokenScanner(source)
try:
gherkin_document = self.parser.parse(token_scanner)
if (self.print_source):
print(line, file=output)
if (self.print_ast):
print(json.dumps(gherkin_document), file=output)
if (self.print_pickles):
pickles = compile(gherkin_document, uri)
for pickle in pickles:
print(json.dumps(pickle), file=output)
except CompositeParserException as e:
self.print_errors(output, e.errors, uri)
except ParserException as e:
self.print_errors(output, [e], uri)
def print_errors(self, output, errors, uri):
for error in errors:
attachment = {
'type': "attachment",
'source': {
'uri': uri,
'start': {
'line': error.location['line'],
'column': error.location['column']
}
},
'data': error.message,
'media': {
'encoding': "utf-8",
'type': "text/x.cucumber.stacktrace+plain"
}
}
print(json.dumps(attachment), file=output)
|
env/Lib/site-packages/jedi/inference/docstring_utils.py | andresgreen-byte/Laboratorio-1--Inversion-de-Capital | 4,213 | 12627930 | from jedi.inference.value import ModuleValue
from jedi.inference.context import ModuleContext
class DocstringModule(ModuleValue):
def __init__(self, in_module_context, **kwargs):
super().__init__(**kwargs)
self._in_module_context = in_module_context
def _as_context(self):
return DocstringModuleContext(self, self._in_module_context)
class DocstringModuleContext(ModuleContext):
def __init__(self, module_value, in_module_context):
super().__init__(module_value)
self._in_module_context = in_module_context
def get_filters(self, origin_scope=None, until_position=None):
yield from super().get_filters(until_position=until_position)
yield from self._in_module_context.get_filters()
|
functions/FuzzyMembership.py | mmfink/raster-functions | 173 | 12627939 | import numpy as np
import math
class FuzzyMembership():
def __init__(self):
self.name = "Fuzzy Membership Function"
self.description = ("Reclassifies or transforms the input data to a 0 to 1 "
"scale based on the possibility of being a member of a "
"specified set")
self.parA = {'minimum': 1., 'mid': None, 'meanMultipler': 1.}
self.parB = {'maximum': 1., 'stdMultipler': 1., 'spreadA': 0.1, 'spreadB': 5.}
def getParameterInfo(self):
return [
{
'name': 'raster',
'dataType': 'raster',
'value': None,
'required': True,
'displayName': "Input Raster",
'description': ("Fuzzy Membership tool - 0 is assigned to those locations that "
"are definitely not a member of the specified set. "
"1 is assigned to those values that are definitely a member "
"of the specified set, and the entire range of possibilities "
"between 0 and 1 are assigned to some level of possible membership.")
},
{
'name': 'mode',
'dataType': 'string',
'value': 'Linear',
'required': True,
'domain': ('Linear', 'Gaussian', 'Small', 'Large', 'Near', 'MSSmall', 'MSLarge'),
'displayName': "Fuzzy Membership Type",
'description': "Fuzzy Membership type."
},
{
'name': 'par1',
'dataType': 'numeric',
'value': None,
'required': False,
'displayName': "Input Parameter A",
'description': ("Linear : {minimum value}, Gaussian/Near/Small/Large : {mid point}, "
"MSSmall/MSLarge : {mean multiplier}.")
},
{
'name': 'par2',
'dataType': 'numeric',
'value': False,
'required': True,
'displayName': "Input Parameter B",
'description': ("Linear : {maximum value}, Gaussian/Near/Small/Large : {spread}, "
"MSSmall/MSLarge : {std deviation multiplier}. ")
},
{
'name': 'hedge',
'dataType': 'string',
'value': 'None',
'required': False,
'domain': ('None', 'Somewhat', 'Very'),
'displayName': "Hedge",
'description': ("A hedge increases or decreases the fuzzy membership values which modify the meaning of a fuzzy set. "
"None - No hedge applied. "
"Somewhat - The square root of the fuzzy membership function. Increases fuzzy membership functions. "
"Very- The square of the fuzzy membership function. Decreases fuzzy membership functions.")
},
]
def getConfiguration(self, **scalars):
return {
'inheritProperties': 2 | 4 | 8, # inherit everything but the pixel type (1)
'invalidateProperties': 2 | 4 | 8, # invalidate these aspects because we are modifying pixel values and updating key properties.
'inputMask': False # we don't need the input mask in .updatePixels()
}
def updateRasterInfo(self, **kwargs):
# output raster information
kwargs['output_info']['bandCount'] = 1
kwargs['output_info']['pixelType'] = 'f4'
kwargs['output_info']['statistics'] = ({'minimum': 0.0, 'maximum': 1.0},)
self.mode = kwargs['mode'].lower() # input fuzzy membership mode
self.hedge = kwargs['hedge'] # to modify fuzzy membership values
# statistics of input raster
stats = kwargs['raster_info']['statistics'][0]
self.mean, self.std = stats['mean'], stats['standardDeviation']
# assignment of fuzzy membership parameters
if kwargs['par1'] != 0.0:
self.parA = self.parA.fromkeys(self.parA, kwargs['par1'])
else:
self.parA['minimum'] = stats['minimum']
self.parA['mid'] = (stats['minimum']+stats['maximum'])/2
if kwargs['par2'] != 0.0:
self.parB = self.parB.fromkeys(self.parB, kwargs['par2'])
else:
self.parB['maximum'] = stats['maximum']
# check range of input range
# linear fuzzy membership min - max
if ((self.parA['minimum'] == self.parB['maximum']) and (self.mode == "linear")):
raise Exception("Linear minimum and maximum must be different.")
# spread values for fuzzy membership function
if ((self.parB['spreadA'] < 0.01 or self.parB['spreadA'] > 1) and (self.mode == 'gauss' or self.mode == 'near')) or \
((self.parB['spreadB'] < 1 or self.parB['spreadB'] > 10) and (self.mode == 'large' or self.mode == 'small')):
raise Exception("Spread value out of range.")
return kwargs
def updatePixels(self, tlc, shape, props, **pixelBlocks):
# get the input raster pixel block
r = np.array(pixelBlocks['raster_pixels'], dtype='f8', copy=False)
# fuzzy linear membership
if self.mode == "linear":
r = (r - self.parA['minimum']) / (self.parB['maximum'] - self.parA['minimum'])
# fuzzy gaussian membership.
elif self.mode == 'gaussian':
r = (np.e)**((-self.parB['spreadA']) * ((r - self.parA['mid'])**2))
# fuzzy large membership.
elif self.mode == 'large':
r = (1 / (1 + ((r / self.parA['mid'])**(-self.parB['spreadB']))))
# fuzzy small membership.
elif self.mode == 'small':
r = (1 / (1 + ((r / self.parA['mid'])**(self.parB['spreadB']))))
# fuzzy near membership.
elif self.mode == 'near':
r = (1 / (1 + (self.parB['spreadA'] * (r - self.parA['mid'])**2)))
# fuzzy mssmall membership.
elif self.mode == 'mssmall':
rTemp = (self.parB['stdMultipler'] * self.std) / (r - (self.parA['meanMultipler'] * self.mean) + (self.parB['stdMultipler'] * self.std))
np.putmask(r, r <= (self.mean * self.parA['meanMultipler']), 1.0)
np.putmask(r, r > (self.mean * self.parA['meanMultipler']), rTemp)
# fuzzy mslarge membership.
else:
rTemp = 1 - (self.parB['stdMultipler'] * self.std) / (r - (self.parA['meanMultipler'] * self.mean) + (self.parB['stdMultipler'] * self.std))
np.putmask(r, r <= (self.mean * self.parA['meanMultipler']), 0.0)
np.putmask(r, r > (self.mean * self.parA['meanMultipler']), rTemp)
# clip output values between [0.0, 1.0]
r = np.clip(r, 0.0, 1.0)
# hedge calculations
if (self.hedge == "SOMEWHAT"): r = r ** 0.5
elif (self.hedge == "VERY"): r = r ** 2
if len(r.shape) > 2:
pixelBlocks['output_pixels'] = r[0].astype(props['pixelType'], copy=False) # multi band raster
else:
pixelBlocks['output_pixels'] = r.astype(props['pixelType'], copy=False) # single band raster
return pixelBlocks
def updateKeyMetadata(self, names, bandIndex, **keyMetadata):
if bandIndex == -1:
keyMetadata['datatype'] = 'Scientific'
keyMetadata['variable'] = 'FuzzyMembership'
elif bandIndex == 0:
keyMetadata['wavelengthmin'] = None # reset inapplicable band-specific key metadata
keyMetadata['wavelengthmax'] = None
keyMetadata['bandname'] = 'FuzzyMembership'
# ----- ## ----- ## ----- ## ----- ## ----- ## ----- ## ----- ## ----- ##
"""
References:
[1]. Esri (2013): ArcGIS Resources. How Fuzzy Membership Works.
http://resources.arcgis.com/en/help/main/10.1/index.html#//009z000000rz000000
[2]. Esri (2013): ArcGIS Resources. An overview of fuzzy classes.
http://resources.arcgis.com/en/help/main/10.1/index.html#/An_overview_of_fuzzy_classes/005m00000019000000/
"""
|
GPU-Re-Ranking/utils.py | TxuanYu/Person_reID_baseline_pytorch | 3,358 | 12627950 | """
Understanding Image Retrieval Re-Ranking: A Graph Neural Network Perspective
<NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>
Project Page : https://github.com/Xuanmeng-Zhang/gnn-re-ranking
Paper: https://arxiv.org/abs/2012.07620v2
======================================================================
On the Market-1501 dataset, we accelerate the re-ranking processing from 89.2s to 9.4ms
with one K40m GPU, facilitating the real-time post-processing. Similarly, we observe
that our method achieves comparable or even better retrieval results on the other four
image retrieval benchmarks, i.e., VeRi-776, Oxford-5k, Paris-6k and University-1652,
with limited time cost.
"""
import pickle
import numpy as np
import torch
def load_pickle(pickle_path):
with open(pickle_path, 'rb') as f:
data = pickle.load(f)
return data
def save_pickle(pickle_path, data):
with open(pickle_path, 'wb') as f:
pickle.dump(data, f, protocol=pickle.HIGHEST_PROTOCOL)
def pairwise_squared_distance(x):
'''
x : (n_samples, n_points, dims)
return : (n_samples, n_points, n_points)
'''
x2s = (x * x).sum(-1, keepdim=True)
return x2s + x2s.transpose(-1, -2) - 2 * x @ x.transpose(-1, -2)
def pairwise_distance(x, y):
m, n = x.size(0), y.size(0)
x = x.view(m, -1)
y = y.view(n, -1)
dist = torch.pow(x, 2).sum(dim=1, keepdim=True).expand(m, n) + torch.pow(y, 2).sum(dim=1, keepdim=True).expand(n,m).t()
dist.addmm_(1, -2, x, y.t())
return dist
def cosine_similarity(x, y):
m, n = x.size(0), y.size(0)
x = x.view(m, -1)
y = y.view(n, -1)
y = y.t()
score = torch.mm(x, y)
return score
def evaluate_ranking_list(indices, query_label, query_cam, gallery_label, gallery_cam):
CMC = np.zeros((len(gallery_label)), dtype=np.int)
ap = 0.0
for i in range(len(query_label)):
ap_tmp, CMC_tmp = evaluate(indices[i],query_label[i], query_cam[i], gallery_label, gallery_cam)
if CMC_tmp[0]==-1:
continue
CMC = CMC + CMC_tmp
ap += ap_tmp
CMC = CMC.astype(np.float32)
CMC = CMC/len(query_label) #average CMC
print('Rank@1:%f Rank@5:%f Rank@10:%f mAP:%f'%(CMC[0],CMC[4],CMC[9],ap/len(query_label)))
def evaluate(index, ql,qc,gl,gc):
query_index = np.argwhere(gl==ql)
camera_index = np.argwhere(gc==qc)
good_index = np.setdiff1d(query_index, camera_index, assume_unique=True)
junk_index1 = np.argwhere(gl==-1)
junk_index2 = np.intersect1d(query_index, camera_index)
junk_index = np.append(junk_index2, junk_index1) #.flatten())
CMC_tmp = compute_mAP(index, good_index, junk_index)
return CMC_tmp
def compute_mAP(index, good_index, junk_index):
ap = 0
cmc = np.zeros((len(index)), dtype=np.int)
if good_index.size==0: # if empty
cmc[0] = -1
return ap,cmc
# remove junk_index
mask = np.in1d(index, junk_index, invert=True)
index = index[mask]
# find good_index index
ngood = len(good_index)
mask = np.in1d(index, good_index)
rows_good = np.argwhere(mask==True)
rows_good = rows_good.flatten()
cmc[rows_good[0]:] = 1
for i in range(ngood):
d_recall = 1.0/ngood
precision = (i+1)*1.0/(rows_good[i]+1)
if rows_good[i]!=0:
old_precision = i*1.0/rows_good[i]
else:
old_precision=1.0
ap = ap + d_recall*(old_precision + precision)/2
return ap, cmc
|
session-5/libs/celeb_vaegan.py | itamaro/CADL | 1,628 | 12627964 | """
Creative Applications of Deep Learning w/ Tensorflow.
Kadenze, Inc.
Copyright <NAME>, June 2016.
"""
import numpy as np
import tensorflow as tf
from tensorflow.python.platform import gfile
from .utils import download
from skimage.transform import resize as imresize
def celeb_vaegan_download():
"""Download a pretrained celeb vae/gan network."""
# Load the model and labels
model = download('https://s3.amazonaws.com/cadl/models/celeb.vaegan.tfmodel')
labels = download('https://s3.amazonaws.com/cadl/celeb-align/list_attr_celeba.txt')
return model, labels
def get_celeb_vaegan_model():
"""Get a pretrained model.
Returns
-------
net : dict
{
'graph_def': tf.GraphDef
The graph definition
'labels': list
List of different possible attributes from celeb
'attributes': np.ndarray
One hot encoding of the attributes per image
[n_els x n_labels]
'preprocess': function
Preprocess function
}
"""
# Download the trained net
model, labels = celeb_vaegan_download()
# Parse the ids and synsets
txt = open(labels).readlines()
n_els = int(txt[0].strip())
labels = txt[1].strip().split()
n_labels = len(labels)
attributes = np.zeros((n_els, n_labels), dtype=bool)
for i, txt_i in enumerate(txt[2:]):
attributes[i] = (np.array(txt_i.strip().split()[1:]).astype(int) > 0)
# Load the saved graph
with gfile.GFile(model, 'rb') as f:
graph_def = tf.GraphDef()
try:
graph_def.ParseFromString(f.read())
except:
print('try adding PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION=python' +
'to environment. e.g.:\n' +
'PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION=python ipython\n' +
'See here for info: ' +
'https://github.com/tensorflow/tensorflow/issues/582')
net = {
'graph_def': graph_def,
'labels': labels,
'attributes': attributes,
'preprocess': preprocess,
}
return net
def preprocess(img, crop_factor=0.8):
"""Replicate the preprocessing we did on the VAE/GAN.
This model used a crop_factor of 0.8 and crop size of [100, 100, 3].
"""
crop = np.min(img.shape[:2])
r = (img.shape[0] - crop) // 2
c = (img.shape[1] - crop) // 2
cropped = img[r: r + crop, c: c + crop]
r, c, *d = cropped.shape
if crop_factor < 1.0:
amt = (1 - crop_factor) / 2
h, w = int(c * amt), int(r * amt)
cropped = cropped[h:-h, w:-w]
rsz = imresize(cropped, (100, 100), preserve_range=False)
return rsz
|
plot_line.py | cogitate3/stock | 3,401 | 12627968 | <reponame>cogitate3/stock
# -*-coding=utf-8-*-
import datetime
import os
import random
import time
from optparse import OptionParser
__author__ = 'Rocky'
'''
http://30daydo.com
Contact: <EMAIL>
'''
import pandas as pd
import talib
import tushare as ts
import matplotlib as mpl
from mpl_finance import candlestick2_ochl, volume_overlay
import matplotlib.pyplot as plt
from configure.settings import DBSelector
import sys
if sys.platform=='linux':
# centos的配置, 根据自定义拷贝的字体
mpl.rcParams['font.sans-serif'] = ['Microsoft YaHei']
else:
mpl.rcParams['font.sans-serif'] = ['simhei']
mpl.rcParams['axes.unicode_minus'] = False
def get_basic_info():
DB = DBSelector()
engine = DB.get_engine('db_stock', 'qq')
base_info = pd.read_sql('tb_basic_info', engine, index_col='index')
return base_info
def check_path(root_path,current,filename):
folder_path = os.path.join(root_path, current)
if not os.path.exists(folder_path):
os.mkdir(folder_path)
full_path = os.path.join(folder_path, filename)
if os.path.exists(full_path):
return None
else:
return full_path
def plot_stock_line(api,code, name, table_type, current, root_path,start='2019-10-01', save=False):
title = '{}_{}_{}_{}'.format(current, code, name, table_type).replace('*', '_')
filename = title + '.png'
full_path = check_path(root_path,current,filename)
if full_path is None:
return
base_info = get_basic_info()
if code is None and name is not None:
code = base_info[base_info['name'] == name]['code'].values[0]
df = None
for _ in range(4):
try:
df = ts.bar(code, conn=api, start_date=start)
except Exception as e:
ts.close_apis(api)
time.sleep(random.random() * 30)
api = ts.get_apis()
else:
break
if df is None:
return
df = df.sort_index()
if name is None:
name = base_info[base_info['code'] == code]['name'].values[0]
df = df.reset_index()
df['datetime'] = df['datetime'].dt.strftime('%Y-%m-%d')
sma5 = talib.SMA(df['close'].values, 5)
sma20 = talib.SMA(df['close'].values, 10)
# ax.set_xticks(range(0,len(df),20))
# # ax.set_xticklabels(df['date'][::5])
# ax.set_xticklabels(df['datetime'][::20])
fig = plt.figure(figsize=(10, 8))
# fig,(ax,ax2)=plt.subplots(2,1,sharex=True,figsize=(16,10))
ax = fig.add_axes([0, 0.3, 1, 0.50])
ax2 = fig.add_axes([0, 0.1, 1, 0.20])
candlestick2_ochl(ax, df['open'], df['close'], df['high'], df['low'], width=1, colorup='r', colordown='g',
alpha=0.6)
ax.grid(True)
ax.set_title(title)
ax.plot(sma5, label='MA5')
ax.legend()
ax.plot(sma20, label='MA20')
ax.legend(loc=2)
ax.grid(True)
# df['vol'].plot(kind='bar')
volume_overlay(ax2, df['open'], df['close'], df['vol'], width=0.75, alpha=0.8, colordown='g', colorup='r')
ax2.set_xticks(range(0, len(df), 20))
# ax.set_xticklabels(df['date'][::5])
ax2.set_xticklabels(df['datetime'][::20])
plt.setp(ax2.get_xticklabels(), rotation=30, horizontalalignment='right')
ax2.grid(True)
plt.subplots_adjust(hspace=0.3)
if save:
# path = os.path.join(os.path.dirname(__file__),'data',TODAY)
fig.savefig(full_path)
else:
plt.show()
plt.close()
if __name__ == '__main__':
parser = OptionParser()
parser.add_option("-c", "--code",
dest="code",
help="-c 300141 #using code to find security")
parser.add_option("-n", "--name",
dest="name",
help="-n 和顺电气 #using code to find security")
(options, args) = parser.parse_args()
if len((sys.argv)) >= 2:
code = options.code
name = options.name
name = name.decode('utf-8')
else:
code = None
name = '泰永长征'
plot_stock_line(code=code, name=name, table_name='zdt', current='20180912', start='2018-02-01', save=False)
|
google_or_tools/post_office_problem2_sat.py | tias/hakank | 279 | 12627972 | # Copyright 2021 <NAME> <EMAIL>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Post office problem in OR-tools CP-SAT Solver.
Problem statement:
http://www-128.ibm.com/developerworks/linux/library/l-glpk2/
From Winston 'Operations Research: Applications and Algorithms':
'''
A post office requires a different number of full-time employees working
on different days of the week [summarized below]. Union rules state that
each full-time employee must work for 5 consecutive days and then receive
two days off. For example, an employee who works on Monday to Friday
must be off on Saturday and Sunday. The post office wants to meet its
daily requirements using only full-time employees. Minimize the number
of employees that must be hired.
To summarize the important information about the problem:
* Every full-time worker works for 5 consecutive days and takes 2 days off
* Day 1 (Monday): 17 workers needed
* Day 2 : 13 workers needed
* Day 3 : 15 workers needed
* Day 4 : 19 workers needed
* Day 5 : 14 workers needed
* Day 6 : 16 workers needed
* Day 7 (Sunday) : 11 workers needed
The post office needs to minimize the number of employees it needs
to hire to meet its demand.
'''
This is a port of my old CP model post_office_problem2.py
This model was created by <NAME> (<EMAIL>)
Also see my other OR-tools models: http://www.hakank.org/or_tools/
"""
from __future__ import print_function
from ortools.sat.python import cp_model as cp
import math, sys
from cp_sat_utils import scalar_product
def main():
model = cp.CpModel()
#
# data
#
# days 0..6, monday 0
n = 7
days = list(range(n))
need = [17, 13, 15, 19, 14, 16, 11]
# Total cost for the 5 day schedule.
# Base cost per day is 100.
# Working saturday is 100 extra
# Working sunday is 200 extra.
cost = [500, 600, 800, 800, 800, 800, 700]
#
# variables
#
# No. of workers starting at day i
x = [model.NewIntVar(0, 100, 'x[%i]' % i) for i in days]
total_cost = model.NewIntVar(0, 20000, 'total_cost')
num_workers = model.NewIntVar(0, 100, 'num_workers')
#
# constraints
#
scalar_product(model, x, cost, total_cost)
model.Add(num_workers == sum(x))
for i in days:
model.Add(sum(
[x[j] for j in days if j != (i + 5) % n and j != (i + 6) % n]) >= need[i])
# objective
model.Minimize(total_cost)
#
# search and result
#
solver = cp.CpSolver()
status = solver.Solve(model)
if status == cp.OPTIMAL:
print('num_workers:', solver.Value(num_workers))
print('total_cost:', solver.Value(total_cost))
print('x:', [solver.Value(x[i]) for i in days])
print()
print('NumConflicts:', solver.NumConflicts())
print('NumBranches:', solver.NumBranches())
print('WallTime:', solver.WallTime())
if __name__ == '__main__':
main()
|
models/__init__.py | psui3905/CCT | 308 | 12627990 | from .model import CCT |
tests/test_year_2005.py | l0pht511/jpholiday | 179 | 12628000 | # coding: utf-8
import datetime
import unittest
import jpholiday
class TestYear2005(unittest.TestCase):
def test_holiday(self):
"""
2005年祝日
"""
self.assertEqual(jpholiday.is_holiday_name(datetime.date(2005, 1, 1)), '元日')
self.assertEqual(jpholiday.is_holiday_name(datetime.date(2005, 1, 10)), '成人の日')
self.assertEqual(jpholiday.is_holiday_name(datetime.date(2005, 2, 11)), '建国記念の日')
self.assertEqual(jpholiday.is_holiday_name(datetime.date(2005, 3, 20)), '春分の日')
self.assertEqual(jpholiday.is_holiday_name(datetime.date(2005, 3, 21)), '春分の日 振替休日')
self.assertEqual(jpholiday.is_holiday_name(datetime.date(2005, 4, 29)), 'みどりの日')
self.assertEqual(jpholiday.is_holiday_name(datetime.date(2005, 5, 3)), '憲法記念日')
self.assertEqual(jpholiday.is_holiday_name(datetime.date(2005, 5, 4)), '国民の休日')
self.assertEqual(jpholiday.is_holiday_name(datetime.date(2005, 5, 5)), 'こどもの日')
self.assertEqual(jpholiday.is_holiday_name(datetime.date(2005, 7, 18)), '海の日')
self.assertEqual(jpholiday.is_holiday_name(datetime.date(2005, 9, 19)), '敬老の日')
self.assertEqual(jpholiday.is_holiday_name(datetime.date(2005, 9, 23)), '秋分の日')
self.assertEqual(jpholiday.is_holiday_name(datetime.date(2005, 10, 10)), '体育の日')
self.assertEqual(jpholiday.is_holiday_name(datetime.date(2005, 11, 3)), '文化の日')
self.assertEqual(jpholiday.is_holiday_name(datetime.date(2005, 11, 23)), '勤労感謝の日')
self.assertEqual(jpholiday.is_holiday_name(datetime.date(2005, 12, 23)), '天皇誕生日')
def test_count_month(self):
"""
2005年月祝日数
"""
self.assertEqual(len(jpholiday.month_holidays(2005, 1)), 2)
self.assertEqual(len(jpholiday.month_holidays(2005, 2)), 1)
self.assertEqual(len(jpholiday.month_holidays(2005, 3)), 2)
self.assertEqual(len(jpholiday.month_holidays(2005, 4)), 1)
self.assertEqual(len(jpholiday.month_holidays(2005, 5)), 3)
self.assertEqual(len(jpholiday.month_holidays(2005, 6)), 0)
self.assertEqual(len(jpholiday.month_holidays(2005, 7)), 1)
self.assertEqual(len(jpholiday.month_holidays(2005, 8)), 0)
self.assertEqual(len(jpholiday.month_holidays(2005, 9)), 2)
self.assertEqual(len(jpholiday.month_holidays(2005, 10)), 1)
self.assertEqual(len(jpholiday.month_holidays(2005, 11)), 2)
self.assertEqual(len(jpholiday.month_holidays(2005, 12)), 1)
def test_count_year(self):
"""
2005年祝日数
"""
self.assertEqual(len(jpholiday.year_holidays(2005)), 16)
|
build.py | ReneNyffenegger/mssql-scripter | 302 | 12628026 | <reponame>ReneNyffenegger/mssql-scripter
#!/usr/bin/env python
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from __future__ import print_function
from azure.storage.blob import BlockBlobService, ContentSettings
import os
import sys
import utility
import mssqlscripter.mssqltoolsservice.external as mssqltoolsservice
AZURE_STORAGE_CONNECTION_STRING = os.environ.get('AZURE_STORAGE_CONNECTION_STRING')
BLOB_CONTAINER_NAME = 'simple'
UPLOADED_PACKAGE_LINKS = []
def print_heading(heading, f=None):
print('{0}\n{1}\n{0}'.format('=' * len(heading), heading), file=f)
def build(platform_names):
"""
Builds mssql-scripter package.
"""
print_heading('Cleanup')
# clean
utility.clean_up(utility.MSSQLSCRIPTER_DIST_DIRECTORY)
print_heading('Running setup')
# install general requirements.
utility.exec_command('pip install -r dev_requirements.txt', utility.ROOT_DIR)
# convert windows line endings to unix for mssql-cli bash script
utility.exec_command('python dos2unix.py mssql-scripter mssql-scripter', utility.ROOT_DIR)
for platform in platform_names:
utility.clean_up(utility.MSSQLSCRIPTER_BUILD_DIRECTORY)
utility.cleaun_up_egg_info_sub_directories(utility.ROOT_DIR)
mssqltoolsservice.copy_sqltoolsservice(platform)
print_heading('Building mssql-scripter {} wheel package package'.format(platform))
utility.exec_command('python --version', utility.ROOT_DIR)
utility.exec_command(
'python setup.py check -r -s bdist_wheel --plat-name {}'.format(platform),
utility.ROOT_DIR,
continue_on_error=False)
mssqltoolsservice.clean_up_sqltoolsservice()
def _upload_index_file(service, blob_name, title, links):
print('Uploading index file {}'.format(blob_name))
service.create_blob_from_text(
container_name=BLOB_CONTAINER_NAME,
blob_name=blob_name,
text="<html><head><title>{0}</title></head><body><h1>{0}</h1>{1}</body></html>"
.format(title, '\n'.join(
['<a href="{0}">{0}</a><br/>'.format(link) for link in links])),
content_settings=ContentSettings(
content_type='text/html',
content_disposition=None,
content_encoding=None,
content_language=None,
content_md5=None,
cache_control=None
)
)
def _gen_pkg_index_html(service, pkg_name):
links = []
index_file_name = pkg_name+'/'
for blob in list(service.list_blobs(BLOB_CONTAINER_NAME, prefix=index_file_name)):
if blob.name == index_file_name:
# Exclude the index file from being added to the list
continue
links.append(blob.name.replace(index_file_name, ''))
_upload_index_file(service, index_file_name, 'Links for {}'.format(pkg_name), links)
UPLOADED_PACKAGE_LINKS.append(index_file_name)
def _upload_package(service, file_path, pkg_name):
print('Uploading {}'.format(file_path))
file_name = os.path.basename(file_path)
blob_name = '{}/{}'.format(pkg_name, file_name)
service.create_blob_from_path(
container_name=BLOB_CONTAINER_NAME,
blob_name=blob_name,
file_path=file_path
)
def validate_package(platform_names):
"""
Install mssql-scripter wheel package locally.
"""
root_dir = os.path.abspath(os.path.join(os.path.abspath(__file__), '..'))
# Local install of mssql-scripter.
mssqlscripter_wheel_dir = os.listdir(utility.MSSQLSCRIPTER_DIST_DIRECTORY)
current_platform = utility.get_current_platform()
mssqlscripter_wheel_name = [pkge for pkge in mssqlscripter_wheel_dir if current_platform in pkge]
# To ensure we have a clean install, we disable the cache as to prevent cache overshadowing actual changes made.
utility.exec_command(
'pip install --no-cache-dir --no-index ./dist/{}'.format(mssqlscripter_wheel_name),
root_dir, continue_on_error=False)
def publish_daily(platforms_names):
"""
Publish mssql-scripter wheel package to daily storage account.
"""
print('Publishing to simple container within storage account.')
assert AZURE_STORAGE_CONNECTION_STRING, 'Set AZURE_STORAGE_CONNECTION_STRING environment variable'
blob_service = BlockBlobService(connection_string=AZURE_STORAGE_CONNECTION_STRING)
print_heading('Uploading packages to blob storage ')
for pkg in os.listdir(utility.MSSQLSCRIPTER_DIST_DIRECTORY):
pkg_path = os.path.join(utility.MSSQLSCRIPTER_DIST_DIRECTORY, pkg)
print('Uploading package {}'.format(pkg_path))
_upload_package(blob_service, pkg_path, 'mssql-scripter')
# Upload index files
_gen_pkg_index_html(blob_service, 'mssql-scripter')
_upload_index_file(blob_service, 'index.html', 'Simple Index', UPLOADED_PACKAGE_LINKS)
def publish_official(platforms_names):
"""
Publish mssql-scripter wheel package to PyPi.
"""
mssqlscripter_wheel_dir = os.listdir(utility.MSSQLSCRIPTER_DIST_DIRECTORY)
# Run twine action for mssqlscripter.
# Only authorized users with credentials will be able to upload this package.
# Credentials will be stored in a .pypirc file.
for mssqlscripter_wheel_name in mssqlscripter_wheel_dir:
utility.exec_command(
'twine upload {}'.format(mssqlscripter_wheel_name),
utility.MSSQLSCRIPTER_DIST_DIRECTORY)
if __name__ == '__main__':
action = 'build'
supported_platforms = [
'win32',
'win_amd64',
'macosx_10_11_intel',
'manylinux1_x86_64']
targets = {
'build': build,
'validate_package': validate_package,
'publish_daily': publish_daily,
'publish_official': publish_official
}
if len(sys.argv) > 1:
action = sys.argv[1]
if len(sys.argv) > 2:
supported_platforms = [sys.argv[2]]
if action in targets:
targets[action](supported_platforms)
else:
print('{} is not a supported action'.format(action))
print('Supported actions are {}'.format(list(targets.keys())))
|
e2e_testing/torchscript/cast.py | pashu123/torch-mlir | 213 | 12628034 | <reponame>pashu123/torch-mlir
# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
# Also available under a BSD-style license. See LICENSE.
import torch
from torch_mlir_e2e_test.torchscript.framework import TestUtils
from torch_mlir_e2e_test.torchscript.registry import register_test_case
from torch_mlir_e2e_test.torchscript.annotations import annotate_args, export
# ==============================================================================
class TensorToIntZeroRank(torch.nn.Module):
def __init__(self):
super().__init__()
@export
@annotate_args([
None,
([], torch.int64, True),
])
def forward(self, x):
return int(x)
@register_test_case(module_factory=lambda: TensorToIntZeroRank())
def TensorToIntZeroRank_basic(module, tu: TestUtils):
module.forward(torch.randint(10, ()))
# ==============================================================================
class TensorToInt(torch.nn.Module):
def __init__(self):
super().__init__()
@export
@annotate_args([
None,
([-1, -1], torch.int64, True),
])
def forward(self, x):
return int(x)
@register_test_case(module_factory=lambda: TensorToInt())
def TensorToInt_basic(module, tu: TestUtils):
module.forward(torch.randint(10, (1, 1)))
# ==============================================================================
class TensorToFloatZeroRank(torch.nn.Module):
def __init__(self):
super().__init__()
@export
@annotate_args([
None,
([], torch.float64, True),
])
def forward(self, x):
return float(x)
@register_test_case(module_factory=lambda: TensorToFloatZeroRank())
def TensorToFloatZeroRank_basic(module, tu: TestUtils):
module.forward(torch.rand((), dtype=torch.float64))
# ==============================================================================
class TensorToFloat(torch.nn.Module):
def __init__(self):
super().__init__()
@export
@annotate_args([
None,
([-1, -1], torch.float64, True),
])
def forward(self, x):
return float(x)
@register_test_case(module_factory=lambda: TensorToFloat())
def TensorToFloat_basic(module, tu: TestUtils):
module.forward(torch.rand((1, 1), dtype=torch.float64))
# ==============================================================================
class TensorToBoolZeroRank(torch.nn.Module):
def __init__(self):
super().__init__()
@export
@annotate_args([
None,
([], torch.bool, True),
])
def forward(self, x):
return bool(x)
@register_test_case(module_factory=lambda: TensorToBoolZeroRank())
def TensorToBoolZeroRank_basic(module, tu: TestUtils):
module.forward(torch.tensor(1, dtype=torch.bool))
# ==============================================================================
class TensorToBool(torch.nn.Module):
def __init__(self):
super().__init__()
@export
@annotate_args([
None,
([-1, -1], torch.bool, True),
])
def forward(self, x):
return bool(x)
@register_test_case(module_factory=lambda: TensorToBool())
def TensorToBool_basic(module, tu: TestUtils):
module.forward(torch.tensor([[1]], dtype=torch.bool))
|
proxybroker/errors.py | askvrtsv/ProxyBroker | 3,157 | 12628038 | """Errors."""
class ProxyError(Exception):
pass
class NoProxyError(Exception):
pass
class ResolveError(Exception):
pass
class ProxyConnError(ProxyError):
errmsg = 'connection_failed'
class ProxyRecvError(ProxyError):
errmsg = 'connection_is_reset'
class ProxySendError(ProxyError):
errmsg = 'connection_is_reset'
class ProxyTimeoutError(ProxyError):
errmsg = 'connection_timeout'
class ProxyEmptyRecvError(ProxyError):
errmsg = 'empty_response'
class BadStatusError(Exception): # BadStatusLine
errmsg = 'bad_status'
class BadResponseError(Exception):
errmsg = 'bad_response'
class BadStatusLine(Exception):
errmsg = 'bad_status_line'
class ErrorOnStream(Exception):
errmsg = 'error_on_stream'
|
scripts/pipeline_example.py | Carromattsson/netrd | 116 | 12628063 | """
pipeline_example.py
------------
Example pipeline for netrd
author: <NAME>
email: <EMAIL>othylarock at gmail dot com
Submitted as part of the 2019 NetSI Collabathon
"""
# NOTE: !IMPORTANT! If you want to play and make changes,
# please make your own copy of this file (with a different name!)
# first and edit that!!! Leave this file alone except to fix a bug!
import netrd
from collections import defaultdict
import numpy as np
import matplotlib.pyplot as plt
## Load datasets
datasets = {'4-clique':netrd.utilities.read_time_series('../data/synth_4clique_N64_simple.csv'),
'BA':netrd.utilities.read_time_series('../data/synth_BAnetwork_N64_simple.csv'),
'ER':netrd.utilities.read_time_series('../data/synth_ERnetwork_N64_simple.csv')}
## Load reconstruction methods
reconstructors = {
'correlation_matrix':netrd.reconstruction.CorrelationMatrixReconstructor(),
'convergent_crossmappings':netrd.reconstruction.ConvergentCrossMappingReconstructor(),
'exact_mean_field':netrd.reconstruction.ExactMeanFieldReconstructor(),
'free_energy_minimization':netrd.reconstruction.FreeEnergyMinimizationReconstructor(),
'graphical_lasso':netrd.reconstruction.GraphicalLassoReconstructor(),
'maximum_likelihood':netrd.reconstruction.MaximumLikelihoodEstimationReconstructor(),
'mutual_information':netrd.reconstruction.MutualInformationMatrixReconstructor(),
'ou_inference':netrd.reconstruction.OUInferenceReconstructor(),
'partial_correlation':netrd.reconstruction.PartialCorrelationMatrixReconstructor(),
'regularized_correlation':netrd.reconstruction.RegularizedCorrelationMatrixReconstructor(),
'thouless_anderson_palmer':netrd.reconstruction.ThoulessAndersonPalmerReconstructor(),
'time_granger_causality':netrd.reconstruction.TimeGrangerCausalityReconstructor(),
'marchenko_pastur':netrd.reconstruction.MarchenkoPastur(),
#'naive_transfer_entropy':netrd.reconstruction.NaiveTransferEntropyReconstructor()
}
## Load distance methods
distance_methods = {'jaccard':netrd.distance.JaccardDistance(),
'hamming':netrd.distance.Hamming(),
'hamming_ipsen_mikhailov':netrd.distance.HammingIpsenMikhailov(),
#'portrait_divergence':netrd.distance.PortraitDivergence(),
#'resistance_perturbation':netrd.distance.ResistancePerturbation(),
'frobenius':netrd.distance.Frobenius(),
#'netsimilie':netrd.distance.NetSimile()
}
## get the names of the methods
reconstruction_methods = [method for method in reconstructors.keys()]
distance_methods_list = [method for method in distance_methods.keys()]
## Dictionary of dictionaries containing the reconstructed networks
## <dataset_name, <recon_method_name, reconstructed_graph>
networks = defaultdict(dict)
print('Computing network reconstructions')
## First get all of the reconstructions for every dataset
for data_name, time_series in datasets.items():
print('dataset: ' + str(data_name))
for reconstruction_method, reconstructor in reconstructors.items():
print(reconstruction_method + '...', end='')
networks[data_name][reconstruction_method] = reconstructor.fit(time_series)
print('done')
## 4-deep dict structure: <dataset_name, <rmethod1, <rmethod2, <dmethod, distance> > > >
distances = dict()
## In order to standardize, I am going to collect all of the
## outputs for each distance
per_distance_values = dict()
print('Computing network distances')
## Then, compute the distance between every reconstruction of every network
for data_name, networks_dict in networks.items():
per_distance_values[data_name] = defaultdict(list)
print('dataset: ' + str(data_name))
distances[data_name] = dict()
for distance_method, distance_function in distance_methods.items():
print(distance_method + '...', end='')
for reconstruction_method1, network1 in networks_dict.items():
distances[data_name].setdefault(reconstruction_method1, dict())
for reconstruction_method2, network2 in networks_dict.items():
distances[data_name][reconstruction_method1].setdefault(reconstruction_method2, dict())
distance = distance_function.dist(network1, network2)
distances[data_name][reconstruction_method1][reconstruction_method2].setdefault(distance_method, dict)
distances[data_name][reconstruction_method1][reconstruction_method2][distance_method] = distance
per_distance_values[data_name][distance_method].append(distance)
print('done')
## For each dataset and distance, store (max,min) tuple to use in standardization
max_min_distance_values = defaultdict(dict)
for data_name in networks.keys():
for distance_method in distance_methods_list:
max_min_distance_values[data_name][distance_method]=(np.max(per_distance_values[data_name][distance_method]), np.min(per_distance_values[data_name][distance_method]))
## Compute the similarity matrix by taking the average of the
## distance between every reconstruction matrix
number_of_reconstructors = len(reconstruction_methods)
name_map = {reconstruction_methods[i]:i for i in range(number_of_reconstructors)}
similarity_matrix = np.zeros((number_of_reconstructors,number_of_reconstructors))
for dataset, dataset_dict in distances.items():
for method1, method1_dict in dataset_dict.items():
for method2, method2_dict in dataset_dict.items():
for distance_method in method1_dict[method2].keys():
max_dist_val, min_dist_val = max_min_distance_values[data_name][distance_method]
similarity_matrix[name_map[method1], name_map[method2]] += (method1_dict[method2][distance_method] - min_dist_val) / (max_dist_val - min_dist_val)
avg_similarity = similarity_matrix / (number_of_reconstructors*len(datasets))
print('Generating collabathon_output.png')
reconstruction_names = list(name_map.keys())
N_methods = len(reconstruction_names)
mat = avg_similarity
#### plotting parameters ####
netrd_cmap = 'bone_r'
method_id = 'test'
width = 1.2
heigh = 1.2
mult = 8.0
###### plot the mat ###########
fig, ax0 = plt.subplots(1, 1, figsize=(width*mult,heigh*mult))
ax0.imshow(mat, aspect='auto', cmap=netrd_cmap)
###### be obsessive about it ###########
ax0.set_xticks(np.arange(0, N_methods, 1))
ax0.set_yticks(np.arange(0, N_methods, 1))
# ax0.set_xticklabels(np.arange(0, N_methods, 1), fontsize=2.0*mult)
# ax0.set_yticklabels(np.arange(0, N_methods, 1), fontsize=2.0*mult)
ax0.set_xticklabels(reconstruction_names, fontsize=1.5*mult, rotation=270)
ax0.set_yticklabels(reconstruction_names, fontsize=1.5*mult)
ax0.set_xticks(np.arange(-.5, N_methods-0.5, 1), minor=True)
ax0.set_yticks(np.arange(-.5, N_methods-0.5, 1), minor=True)
ax0.grid(which='minor', color='#333333', linestyle='-', linewidth=1.5)
ax0.set_title("Collabathon Fun Times Test Plot: \n Averaged Distance Between Reconstructed Networks",
fontsize=2.5*mult)
plt.savefig('collabathon_output.png', bbox_inches='tight', dpi=200)
|
external/lemonade/dist/lemonade/exceptions.py | almartin82/bayeslite | 964 | 12628073 |
class BadGrammarError(Exception):
pass
class EmptyGrammarError(BadGrammarError):
pass
|
src/python/nimbusml/examples/LightLda.py | michaelgsharp/NimbusML | 134 | 12628114 | ###############################################################################
# LightLda
from nimbusml import FileDataStream, Pipeline
from nimbusml.datasets import get_dataset
from nimbusml.feature_extraction.text import NGramFeaturizer, LightLda
from nimbusml.feature_extraction.text.extractor import Ngram
# data input as a FileDataStream
path = get_dataset('topics').as_filepath()
data = FileDataStream.read_csv(path, sep=",")
print(data.head())
# review review_reverse label
# 0 animals birds cats dogs fish horse radiation galaxy universe duck 1
# 1 horse birds house fish duck cats space galaxy universe radiation 0
# 2 car truck driver bus pickup bus pickup 1
# transform usage
pipeline = Pipeline(
[
NGramFeaturizer(
word_feature_extractor=Ngram(),
vector_normalizer='None',
columns=['review']),
LightLda(
num_topic=3,
columns=['review'])])
# fit and transform
features = pipeline.fit_transform(data)
print(features.head())
# label review.0 review.1 review.2 review_reverse
# 0 1 0.500000 0.333333 0.166667 radiation galaxy universe duck
# 1 0 0.000000 0.166667 0.833333 space galaxy universe radiation
# 2 1 0.400000 0.200000 0.400000 bus pickup
# 3 0 0.333333 0.333333 0.333333 car truck
# 4 1 1.000000 0.000000 0.000000 car truck driver bus pickup horse
|
openff/toolkit/utils/toolkit_registry.py | andrew-abimansour/openff-toolkit | 120 | 12628141 | "Registry for ToolkitWrapper objects"
__all__ = ("ToolkitRegistry",)
import inspect
import logging
from openff.toolkit.utils.ambertools_wrapper import AmberToolsToolkitWrapper
from openff.toolkit.utils.base_wrapper import ToolkitWrapper
from openff.toolkit.utils.builtin_wrapper import BuiltInToolkitWrapper
from openff.toolkit.utils.exceptions import (
InvalidToolkitError,
ToolkitUnavailableException,
)
from openff.toolkit.utils.openeye_wrapper import OpenEyeToolkitWrapper
from openff.toolkit.utils.rdkit_wrapper import RDKitToolkitWrapper
from openff.toolkit.utils.utils import all_subclasses
# =============================================================================================
# CONFIGURE LOGGER
# =============================================================================================
logger = logging.getLogger(__name__)
# =============================================================================================
# Implementation
# =============================================================================================
class ToolkitRegistry:
"""
Registry for ToolkitWrapper objects
Examples
--------
Register toolkits in a specified order, skipping if unavailable
>>> from openff.toolkit.utils.toolkits import ToolkitRegistry
>>> toolkit_precedence = [OpenEyeToolkitWrapper, RDKitToolkitWrapper, AmberToolsToolkitWrapper]
>>> toolkit_registry = ToolkitRegistry(toolkit_precedence)
>>> toolkit_registry
ToolkitRegistry containing OpenEye Toolkit, The RDKit, AmberTools
Register all available toolkits (in the order OpenEye, RDKit, AmberTools, built-in)
>>> toolkits = [OpenEyeToolkitWrapper, RDKitToolkitWrapper, AmberToolsToolkitWrapper, BuiltInToolkitWrapper]
>>> toolkit_registry = ToolkitRegistry(toolkit_precedence=toolkits)
>>> toolkit_registry
ToolkitRegistry containing OpenEye Toolkit, The RDKit, AmberTools, Built-in Toolkit
Retrieve the global singleton toolkit registry, which is created when this module is imported from all available
toolkits:
>>> from openff.toolkit.utils.toolkits import GLOBAL_TOOLKIT_REGISTRY as toolkit_registry
>>> toolkit_registry
ToolkitRegistry containing OpenEye Toolkit, The RDKit, AmberTools, Built-in Toolkit
Note that this will contain different ToolkitWrapper objects based on what toolkits
are currently installed.
.. warning :: This API is experimental and subject to change.
"""
def __init__(
self,
toolkit_precedence=[],
exception_if_unavailable=True,
_register_imported_toolkit_wrappers=False,
):
"""
Create an empty toolkit registry.
Parameters
----------
toolkit_precedence : list, default=[]
List of toolkit wrapper classes, in order of desired precedence when performing molecule operations. If
None, no toolkits will be registered.
exception_if_unavailable : bool, optional, default=True
If True, an exception will be raised if the toolkit is unavailable
_register_imported_toolkit_wrappers : bool, optional, default=False
If True, will attempt to register all imported ToolkitWrapper subclasses that can be
found in the order of toolkit_precedence, if specified. If toolkit_precedence is not
specified, the default order is [OpenEyeToolkitWrapper, RDKitToolkitWrapper,
AmberToolsToolkitWrapper, BuiltInToolkitWrapper].
"""
self._toolkits = list()
toolkits_to_register = list()
if _register_imported_toolkit_wrappers:
if toolkit_precedence is None:
toolkit_precedence = [
OpenEyeToolkitWrapper,
RDKitToolkitWrapper,
AmberToolsToolkitWrapper,
BuiltInToolkitWrapper,
]
all_importable_toolkit_wrappers = all_subclasses(ToolkitWrapper)
for toolkit in toolkit_precedence:
if toolkit in all_importable_toolkit_wrappers:
toolkits_to_register.append(toolkit)
else:
if toolkit_precedence:
toolkits_to_register = toolkit_precedence
if toolkits_to_register:
for toolkit in toolkits_to_register:
self.register_toolkit(
toolkit, exception_if_unavailable=exception_if_unavailable
)
@property
def registered_toolkits(self):
"""
List registered toolkits.
.. warning :: This API is experimental and subject to change.
.. todo :: Should this return a generator? Deep copies? Classes? Toolkit names?
Returns
-------
toolkits : iterable of toolkit objects
"""
return list(self._toolkits)
@property
def registered_toolkit_versions(self):
"""
Return a dict containing the version of each registered toolkit.
.. warning :: This API is experimental and subject to change.
Returns
-------
toolkit_versions : dict[str, str]
A dictionary mapping names and versions of wrapped toolkits
"""
return dict(
(tk.toolkit_name, tk.toolkit_version) for tk in self.registered_toolkits
)
def register_toolkit(self, toolkit_wrapper, exception_if_unavailable=True):
"""
Register the provided toolkit wrapper class, instantiating an object of it.
.. warning :: This API is experimental and subject to change.
.. todo ::
This method should raise an exception if the toolkit is unavailable, unless an optional argument
is specified that silently avoids registration of toolkits that are unavailable.
Parameters
----------
toolkit_wrapper : instance or subclass of ToolkitWrapper
The toolkit wrapper to register or its class.
exception_if_unavailable : bool, optional, default=True
If True, an exception will be raised if the toolkit is unavailable
"""
# Instantiate class if class, or just add if already instantiated.
if isinstance(toolkit_wrapper, type):
try:
toolkit_wrapper = toolkit_wrapper()
except ToolkitUnavailableException:
msg = "Unable to load toolkit '{}'. ".format(
toolkit_wrapper._toolkit_name
)
if exception_if_unavailable:
raise ToolkitUnavailableException(msg)
else:
if "OpenEye" in msg:
msg += (
"The Open Force Field Toolkit does not require the OpenEye Toolkits, and can "
"use RDKit/AmberTools instead. However, if you have a valid license for the "
"OpenEye Toolkits, consider installing them for faster performance and additional "
"file format support: "
"https://docs.eyesopen.com/toolkits/python/quickstart-python/linuxosx.html "
"OpenEye offers free Toolkit licenses for academics: "
"https://www.eyesopen.com/academic-licensing"
)
logger.warning(f"Warning: {msg}")
return
# Add toolkit to the registry.
self._toolkits.append(toolkit_wrapper)
def deregister_toolkit(self, toolkit_wrapper):
"""
Remove a ToolkitWrapper from the list of toolkits in this ToolkitRegistry
.. warning :: This API is experimental and subject to change.
Parameters
----------
toolkit_wrapper : instance or subclass of ToolkitWrapper
The toolkit wrapper to remove from the registry
Raises
------
InvalidToolkitError
If toolkit_wrapper is not a ToolkitWrapper or subclass
ToolkitUnavailableException
If toolkit_wrapper is not found in the registry
"""
# If passed a class, instantiate it
if inspect.isclass(toolkit_wrapper):
toolkit_wrapper = toolkit_wrapper()
if not isinstance(toolkit_wrapper, ToolkitWrapper):
msg = (
f"Argument {toolkit_wrapper} must an ToolkitWrapper "
f"or subclass of it. Found type {type(toolkit_wrapper)}."
)
raise InvalidToolkitError(msg)
toolkits_to_remove = []
for toolkit in self._toolkits:
if type(toolkit) == type(toolkit_wrapper):
toolkits_to_remove.append(toolkit)
if not toolkits_to_remove:
msg = (
f"Did not find {toolkit_wrapper} in registry. "
f"Currently registered toolkits are {self._toolkits}"
)
raise ToolkitUnavailableException(msg)
for toolkit_to_remove in toolkits_to_remove:
self._toolkits.remove(toolkit_to_remove)
def add_toolkit(self, toolkit_wrapper):
"""
Append a ToolkitWrapper onto the list of toolkits in this ToolkitRegistry
.. warning :: This API is experimental and subject to change.
Parameters
----------
toolkit_wrapper : openff.toolkit.utils.ToolkitWrapper
The ToolkitWrapper object to add to the list of registered toolkits
Raises
------
InvalidToolkitError
If toolkit_wrapper is not a ToolkitWrapper or subclass
"""
if not isinstance(toolkit_wrapper, ToolkitWrapper):
msg = "Something other than a ToolkitWrapper object was passed to ToolkitRegistry.add_toolkit()\n"
msg += "Given object {} of type {}".format(
toolkit_wrapper, type(toolkit_wrapper)
)
raise InvalidToolkitError(msg)
self._toolkits.append(toolkit_wrapper)
# TODO: Can we automatically resolve calls to methods that are not explicitly defined using some Python magic?
def resolve(self, method_name):
"""
Resolve the requested method name by checking all registered toolkits in
order of precedence for one that provides the requested method.
Parameters
----------
method_name : str
The name of the method to resolve
Returns
-------
method
The method of the first registered toolkit that provides the requested method name
Raises
------
NotImplementedError if the requested method cannot be found among the registered toolkits
Examples
--------
Create a molecule, and call the toolkit ``to_smiles()`` method directly
>>> from openff.toolkit.topology import Molecule
>>> molecule = Molecule.from_smiles('Cc1ccccc1')
>>> toolkit_registry = ToolkitRegistry([OpenEyeToolkitWrapper, RDKitToolkitWrapper, AmberToolsToolkitWrapper])
>>> method = toolkit_registry.resolve('to_smiles')
>>> smiles = method(molecule)
.. todo :: Is there a better way to figure out which toolkits implement given methods by introspection?
"""
for toolkit in self._toolkits:
if hasattr(toolkit, method_name):
method = getattr(toolkit, method_name)
return method
# No toolkit was found to provide the requested capability
# TODO: Can we help developers by providing a check for typos in expected method names?
msg = 'No registered toolkits can provide the capability "{}".\n'.format(
method_name
)
msg += "Available toolkits are: {}\n".format(self.registered_toolkits)
raise NotImplementedError(msg)
# TODO: Can we instead register available methods directly with `ToolkitRegistry`,
# so we can just use `ToolkitRegistry.method()`?
def call(self, method_name, *args, raise_exception_types=None, **kwargs):
"""
Execute the requested method by attempting to use all registered toolkits in order of precedence.
``*args`` and ``**kwargs`` are passed to the desired method, and return values of the method are returned
This is a convenient shorthand for ``toolkit_registry.resolve_method(method_name)(*args, **kwargs)``
Parameters
----------
method_name : str
The name of the method to execute
raise_exception_types : list of Exception subclasses, default=None
A list of exception-derived types to catch and raise immediately. If None, this will be set to [Exception],
which will raise an error immediately if the first ToolkitWrapper in the registry fails. To try each
ToolkitWrapper that provides a suitably-named method, set this to the empty list ([]). If all
ToolkitWrappers run without raising any exceptions in this list, a single ValueError will be raised
containing the each ToolkitWrapper that was tried and the exception it raised.
Raises
------
NotImplementedError if the requested method cannot be found among the registered toolkits
ValueError if no exceptions in the raise_exception_types list were raised by ToolkitWrappers, and
all ToolkitWrappers in the ToolkitRegistry were tried.
Other forms of exceptions are possible if raise_exception_types is specified.
These are defined by the ToolkitWrapper method being called.
Examples
--------
Create a molecule, and call the toolkit ``to_smiles()`` method directly
>>> from openff.toolkit.topology import Molecule
>>> molecule = Molecule.from_smiles('Cc1ccccc1')
>>> toolkit_registry = ToolkitRegistry([OpenEyeToolkitWrapper, RDKitToolkitWrapper])
>>> smiles = toolkit_registry.call('to_smiles', molecule)
"""
if raise_exception_types is None:
raise_exception_types = [Exception]
errors = list()
for toolkit in self._toolkits:
if hasattr(toolkit, method_name):
method = getattr(toolkit, method_name)
try:
return method(*args, **kwargs)
except Exception as e:
for exception_type in raise_exception_types:
if isinstance(e, exception_type):
raise e
errors.append((toolkit, e))
# No toolkit was found to provide the requested capability
# TODO: Can we help developers by providing a check for typos in expected method names?
msg = (
f'No registered toolkits can provide the capability "{method_name}" '
f'for args "{args}" and kwargs "{kwargs}"\n'
)
msg += "Available toolkits are: {}\n".format(self.registered_toolkits)
# Append information about toolkits that implemented the method, but could not handle the provided parameters
for toolkit, error in errors:
msg += " {} {} : {}\n".format(toolkit, type(error), error)
raise ValueError(msg)
def __repr__(self):
return "ToolkitRegistry containing " + ", ".join(
[tk.toolkit_name for tk in self._toolkits]
)
|
InvenTree/stock/migrations/0064_auto_20210621_1724.py | carlos-riquelme/InvenTree | 656 | 12628171 | # Generated by Django 3.2.4 on 2021-06-21 07:24
from django.db import migrations
def extract_purchase_price(apps, schema_editor):
"""
Find instances of StockItem which do *not* have a purchase price set,
but which point to a PurchaseOrder where there *is* a purchase price set.
Then, assign *that* purchase price to original StockItem.
This is to address an issue where older versions of InvenTree
did not correctly copy purchase price information cross to the StockItem objects.
Current InvenTree version (as of 2021-06-21) copy this information across correctly,
so this one-time data migration should suffice.
"""
# Required database models
StockItem = apps.get_model('stock', 'stockitem')
PurchaseOrder = apps.get_model('order', 'purchaseorder')
PurchaseOrderLineItem = apps.get_model('order', 'purchaseorderlineitem')
Part = apps.get_model('part', 'part')
# Find all the StockItem objects without a purchase_price which point to a PurchaseOrder
items = StockItem.objects.filter(purchase_price=None).exclude(purchase_order=None)
if items.count() > 0:
print(f"Found {items.count()} stock items with missing purchase price information")
update_count = 0
for item in items:
part_id = item.part
po = item.purchase_order
# Look for a matching PurchaseOrderLineItem (with a price)
lines = PurchaseOrderLineItem.objects.filter(part__part=part_id, order=po)
if lines.exists():
for line in lines:
if line.purchase_price is not None:
# Copy pricing information across
item.purchase_price = line.purchase_price
item.purchases_price_currency = line.purchase_price_currency
print(f"- Updating supplier price for {item.part.name} - {item.purchase_price} {item.purchase_price_currency}")
update_count += 1
item.save()
break
if update_count > 0:
print(f"Updated pricing for {update_count} stock items")
def reverse_operation(apps, schema_editor):
"""
DO NOTHING!
"""
pass
class Migration(migrations.Migration):
dependencies = [
('stock', '0063_auto_20210511_2343'),
]
operations = [
migrations.RunPython(extract_purchase_price, reverse_code=reverse_operation)
]
|
examples/parallel/davinci/wordfreq.py | chebee7i/ipython | 748 | 12628177 | """Count the frequencies of words in a string"""
from __future__ import division
from __future__ import print_function
import cmath as math
def wordfreq(text, is_filename=False):
"""Return a dictionary of words and word counts in a string."""
if is_filename:
with open(text) as f:
text = f.read()
freqs = {}
for word in text.split():
lword = word.lower()
freqs[lword] = freqs.get(lword, 0) + 1
return freqs
def print_wordfreq(freqs, n=10):
"""Print the n most common words and counts in the freqs dict."""
words, counts = freqs.keys(), freqs.values()
items = zip(counts, words)
items.sort(reverse=True)
for (count, word) in items[:n]:
print(word, count)
def wordfreq_to_weightsize(worddict, minsize=25, maxsize=50, minalpha=0.5, maxalpha=1.0):
mincount = min(worddict.itervalues())
maxcount = max(worddict.itervalues())
weights = {}
for k, v in worddict.iteritems():
w = (v-mincount)/(maxcount-mincount)
alpha = minalpha + (maxalpha-minalpha)*w
size = minsize + (maxsize-minsize)*w
weights[k] = (alpha, size)
return weights
def tagcloud(worddict, n=10, minsize=25, maxsize=50, minalpha=0.5, maxalpha=1.0):
from matplotlib import pyplot as plt
import random
worddict = wordfreq_to_weightsize(worddict, minsize, maxsize, minalpha, maxalpha)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_position([0.0,0.0,1.0,1.0])
plt.xticks([])
plt.yticks([])
words = worddict.keys()
alphas = [v[0] for v in worddict.values()]
sizes = [v[1] for v in worddict.values()]
items = zip(alphas, sizes, words)
items.sort(reverse=True)
for alpha, size, word in items[:n]:
# xpos = random.normalvariate(0.5, 0.3)
# ypos = random.normalvariate(0.5, 0.3)
xpos = random.uniform(0.0,1.0)
ypos = random.uniform(0.0,1.0)
ax.text(xpos, ypos, word.lower(), alpha=alpha, fontsize=size)
ax.autoscale_view()
return ax
|
scripts/astf/res.py | ajitkhaparde/trex-core | 956 | 12628189 | <reponame>ajitkhaparde/trex-core
# many templates,clients/server
# start -f astf/res.py -m 1 -t size=32,clients=65000,servers=65000,templates=100
from trex.astf.api import *
from trex.stl.trex_stl_packet_builder_scapy import ip2int, int2ip
import argparse
# we can send either Python bytes type as below:
http_req = b'GET /3384 HTTP/1.1\r\nHost: 172.16.58.3\r\nConnection: Keep-Alive\r\nUser-Agent: Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; SV1; .NET CLR 1.1.4322; .NET CLR 2.0.50727)\r\nAccept: */*\r\nAccept-Language: en-us\r\nAccept-Encoding: gzip, deflate, compress\r\n\r\n'
# or we can send Python string containing ascii chars, as below:
http_response_template = 'HTTP/1.1 200 OK\r\nServer: Microsoft-IIS/6.0\r\nContent-Type: text/html\r\nContent-Length: 32000\r\n\r\n<html><pre>{0}</pre></html>'
class Prof1():
def __init__(self):
pass # tunables
def ip_gen(self, client_base, server_base, client_ips, server_ips):
assert client_ips>0
assert server_ips>0
ip_gen_c = ASTFIPGenDist(ip_range = [client_base, int2ip(ip2int(client_base) + client_ips - 1)])
ip_gen_s = ASTFIPGenDist(ip_range = [server_base, int2ip(ip2int(server_base) + server_ips - 1)])
return ASTFIPGen(dist_client = ip_gen_c,
dist_server = ip_gen_s)
def create_profile(self,kwargs):
res_size = kwargs.get('size',16)
clients = kwargs.get('clients',255)
servers = kwargs.get('servers',255)
templates = kwargs.get('templates',255)
ip_gen = self.ip_gen('192.168.127.12', '192.168.127.12', clients, servers)
# client commands
http_response = http_response_template.format('*'*res_size)
http_response_template
prog_c = ASTFProgram()
prog_c.send(http_req)
prog_c.recv(len(http_response))
prog_s = ASTFProgram()
prog_s.recv(len(http_req))
prog_s.send(http_response)
templates_arr = []
for i in range(templates):
temp_c = ASTFTCPClientTemplate(program = prog_c, ip_gen = ip_gen, cps = i + 1)
temp_s = ASTFTCPServerTemplate(program = prog_s, assoc = ASTFAssociationRule(port = 80 + i))
template = ASTFTemplate(client_template = temp_c, server_template = temp_s)
templates_arr.append(template)
return ASTFProfile(default_ip_gen = ip_gen, templates = templates_arr)
def get_profile(self, tunables, **kwargs):
parser = argparse.ArgumentParser(description='Argparser for {}'.format(os.path.basename(__file__)),
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
args = parser.parse_args(tunables)
return self.create_profile(kwargs)
def register():
return Prof1()
|
ib/ext/Util.py | LewisW/IbPy | 1,260 | 12628249 | <gh_stars>1000+
#!/usr/bin/env python
""" generated source for module Util """
#
# Original file copyright original author(s).
# This file copyright <NAME>, <EMAIL>.
#
# WARNING: all changes to this file will be lost.
from ib.lib import Double, Integer
#
# * Util.java
#
# package: com.ib.client
# Making sure this works in python3 as well
def cmp(a, b):
return (a > b) - (a < b)
class Util(object):
""" generated source for class Util """
@classmethod
def StringIsEmpty(cls, strval):
""" generated source for method StringIsEmpty """
return strval is None or 0 == len(strval)
@classmethod
def NormalizeString(cls, strval):
""" generated source for method NormalizeString """
return strval if strval is not None else ""
@classmethod
def StringCompare(cls, lhs, rhs):
""" generated source for method StringCompare """
return cmp(cls.NormalizeString(str(lhs)), cls.NormalizeString(str(rhs)))
@classmethod
def StringCompareIgnCase(cls, lhs, rhs):
""" generated source for method StringCompareIgnCase """
return cmp(cls.NormalizeString(str(lhs)).lower(), cls.NormalizeString(str(rhs)).lower())
@classmethod
def VectorEqualsUnordered(cls, lhs, rhs):
""" generated source for method VectorEqualsUnordered """
if lhs == rhs:
return True
lhsCount = 0 if lhs is None else len(lhs)
rhsCount = 0 if rhs is None else len(rhs)
if lhsCount != rhsCount:
return False
if lhsCount == 0:
return True
matchedRhsElems = [bool() for __idx0 in range(rhsCount)]
lhsIdx = 0
while lhsIdx < lhsCount:
lhsElem = lhs[lhsIdx]
rhsIdx = 0
while rhsIdx < rhsCount:
if matchedRhsElems[rhsIdx]:
continue
if lhsElem == rhs[rhsIdx]:
matchedRhsElems[rhsIdx] = True
break
rhsIdx += 1
if rhsIdx >= rhsCount:
# no matching elem found
return False
lhsIdx += 1
return True
@classmethod
def IntMaxString(cls, value):
""" generated source for method IntMaxString """
return "" if (value == Integer.MAX_VALUE) else str(value)
@classmethod
def DoubleMaxString(cls, value):
""" generated source for method DoubleMaxString """
return "" if (value == Double.MAX_VALUE) else str(value)
|
examples/MIDI_to_CAN_example/CANToolz_config/BMW_F10_MUSIC.py | eik00d/CANSploit | 336 | 12628252 | load_modules = {
'hw_USBtin': {'port':'auto', 'debug':2, 'speed':500}, # IO hardware module
'ecu_controls':
{'bus':'BMW_F10',
'commands':[
# Music actions
{'High light - blink': '0x1ee:2:20ff', 'cmd':'127'},
{'TURN LEFT and RIGHT': '0x2fc:7:31000000000000', 'cmd':'126'},
{'TURN right ON PERMANENT': '0x1ee:2:01ff', 'cmd':'124'},
{'TURN right x3 or OFF PERMANENT':'0x1ee:2:02ff','cmd':'123'},
{'TURN left ON PERMANENT': '0x1ee:2:04ff', 'cmd':'122'},
{'TURN left x3 or OFF PERMANENT':'0x1ee:2:08ff','cmd':'121'},
{'STOP lights': '0x173:8:3afa00022000f2c4', 'cmd':'120'},
{'CLEANER - once':'0x2a6:2:02a1','cmd':'119'},
{'CLEANER x3 + Wasser':'0x2a6:2:10f1','cmd':'118'},
{'Mirrors+windows FOLD':'0x26e:8:5b5b4002ffffffff','cmd':"117"},
{'Mirrors UNfold':'0x2fc:7:12000000000000','cmd':"116"},
{'Windows open':'0x26e:8:49494001ffffffff','cmd':"115"},
{'Windows rear close + Mirrors FOLD':'0x26e:8:405b4002ffffffff','cmd':"114"},
{'Trunk open':'0x2a0:8:88888001ffffffff','cmd':"113"},
{'Trunk close':'0x23a:8:00f310f0fcf0ffff','cmd':"112"}
],
'statuses':[
]
}
}
# Now let's describe the logic of this test
actions = [
{'ecu_controls' : {}},
{'hw_USBtin': {'action':'write','pipe': 1}}
] |
WebMirror/management/rss_parser_funcs/feed_parse_extractCNovelProj.py | fake-name/ReadableWebProxy | 193 | 12628276 | def extractCNovelProj(item):
"""
"""
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('Please Be More Serious', 'Please Be More Serious', 'translated'),
('Still Not Wanting to Forget', 'Still Not Wanting to Forget', 'translated'),
('suddenly this summer', 'Suddenly, This Summer', 'translated'),
('mr earnest is my boyfriend', '<NAME> Is My Boyfriend', 'translated'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False |
InvenTree/company/migrations/0030_auto_20201112_1112.py | ArakniD/InvenTree | 656 | 12628280 | # Generated by Django 3.0.7 on 2020-11-12 00:12
import InvenTree.fields
import django.core.validators
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('company', '0029_company_currency'),
]
operations = [
migrations.AlterField(
model_name='supplierpricebreak',
name='quantity',
field=InvenTree.fields.RoundingDecimalField(decimal_places=5, default=1, help_text='Price break quantity', max_digits=15, validators=[django.core.validators.MinValueValidator(1)], verbose_name='Quantity'),
),
]
|
disnake_components/__init__.py | LOCUS-TEAM/py-cord-components | 351 | 12628295 | <reponame>LOCUS-TEAM/py-cord-components<filename>disnake_components/__init__.py
from .client import *
from .interaction import *
from .component import *
from .dpy_overrides import *
from .http import *
|
mead/pytorch/exporters.py | shar999/mead-baseline | 241 | 12628297 | import os
import logging
import torch
import torch.nn as nn
from typing import Dict, List
import baseline as bl
from eight_mile.pytorch.layers import (
CRF,
ViterbiBatchSize1,
TaggerGreedyDecoder,
ViterbiLogSoftmaxNormBatchSize1
)
from baseline.utils import (
exporter,
Offsets,
write_json,
load_vectorizers,
load_vocabs,
find_model_basename,
)
from baseline.model import load_model_for
from mead.utils import (
get_output_paths,
create_metadata,
save_to_bundle,
)
from mead.exporters import Exporter, register_exporter
__all__ = []
export = exporter(__all__)
logger = logging.getLogger('mead')
REMOTE_MODEL_NAME = 'model'
S1D = """
Common starlings may be kept as pets or as laboratory animals . Austrian <unk> <NAME> wrote of them in his book King Solomon 's Ring as " the poor man 's dog " and " something to love " , because nestlings are easily obtained from the wild and after careful hand rearing they are straightforward to look after . They adapt well to captivity , and thrive on a diet of standard bird feed and <unk> . Several birds may be kept in the same cage , and their <unk> makes them easy to train or study . The only disadvantages are their <unk> and indiscriminate defecation habits and the need to take precautions against diseases that may be transmitted to humans . As a laboratory bird , the common starling is second in numbers only to the domestic <unk> .
"""
S2D = [
"Common starlings may be kept as pets or as laboratory animals .",
"Austrian <unk> <NAME>nz wrote of them in his book King Solomon 's Ring as \" the poor man 's dog \" and \" something to love \" , because nestlings are easily obtained from the wild and after careful hand rearing they are straightforward to look after . ",
"They adapt well to captivity , and thrive on a diet of standard bird feed and <unk> . ",
"The only disadvantages are their <unk> and indiscriminate defecation habits and the need to take precautions against diseases that may be transmitted to humans . ",
"As a laboratory bird , the common starling is second in numbers only to the domestic <unk> ."
]
def create_data_dict(vocabs, vectorizers, transpose=False):
data = {}
lengths = None
for k, v in vectorizers.items():
data[k], feature_length = vectorizers[k].run(S1D.split(), vocabs[k])
data[k] = torch.LongTensor(data[k]).unsqueeze(0)
if not lengths:
lengths = [feature_length]
lengths = torch.LongTensor(lengths)
if transpose:
for k in vectorizers.keys():
if len(data[k].shape) > 1:
data[k] = data[k].transpose(0, 1)
data['lengths'] = lengths
return data
def create_data_dict_nbest(vocabs, vectorizers):
data = {}
length_tensor = None
for k, v in vectorizers.items():
vec, lengths = v.run(S2D, vocabs[k])
data[k] = torch.LongTensor(vec).unsqueeze(0)
if length_tensor is None:
length_tensor = torch.LongTensor(lengths).unsqueeze(0)
data['lengths'] = length_tensor
return data
@export
class PytorchONNXExporter(Exporter):
def __init__(self, task, **kwargs):
super().__init__(task, **kwargs)
self.transpose = kwargs.get('transpose', False)
self.tracing = kwargs.get('tracing', True)
self.default_size = int(kwargs.get('default_size', 100))
self.onnx_opset = int(kwargs.get('onnx_opset', 12))
self.nbest_inputs = bool(kwargs.get('nbest_input', False))
def apply_model_patches(self, model):
return model
def create_example_input(self, vocabs, vectorizers):
if self.nbest_inputs:
return create_data_dict_nbest(vocabs, vectorizers)
return create_data_dict(vocabs, vectorizers, self.transpose)
def create_example_output(self, model):
if hasattr(model, 'output'):
if isinstance(model.output, nn.ModuleList):
return [torch.ones((1, len(model.labels[i][1]))) for i in range(len(model.output))]
return torch.ones((1, len(model.labels)))
def create_model_inputs(self, model):
inputs = [k for k in model.embeddings.keys()] + ['lengths']
return inputs
def create_model_outputs(self, model):
if hasattr(model, 'output'):
if isinstance(model.output, nn.ModuleList):
logger.info("Multiheaded model")
return [f"output_{i}" for i in range(len(model.output))]
return ['output']
def create_dynamic_axes(self, model, vectorizers, inputs, outputs):
dynamics = {}
for name in outputs:
dynamics[name] = {1: 'sequence'}
for k in model.embeddings.keys():
if k == 'char':
dynamics[k] = {1: 'sequence', 2: 'chars'}
else:
dynamics[k] = {1: 'sequence'}
if self.nbest_inputs:
for name in inputs:
if 'lengths' in name:
dynamics[name] = {1: 'sequence'}
elif len(vectorizers[name].get_dims()) > 1:
dynamics[name] = {1: 'nbest', 2: 'sequence'}
else:
dynamics[name] = {1: 'sequence'}
logger.info(dynamics)
return dynamics
def _run(self, basename, output_dir, project=None, name=None, model_version=None, use_version=False, zip_results=True,
remote=False, use_all_features=False, **kwargs):
client_output, server_output = get_output_paths(
output_dir,
project, name,
model_version,
remote,
use_version=use_version
)
logger.info("Saving vectorizers and vocabs to %s", client_output)
logger.info("Saving serialized model to %s", server_output)
model, vectorizers, vocabs, model_name = self.load_model(basename)
# hacky fix for checkpoints trained before update of extra_tokens
for vec in vectorizers.values():
if not hasattr(vec, '_extra_tokens'):
vec._extra_tokens = ['[CLS]', '[MASK]']
# Triton server wants to see a specific name
model = self.apply_model_patches(model)
data = self.create_example_input(vocabs, vectorizers)
example_output = self.create_example_output(model)
if use_all_features:
inputs = list(data.keys())
else:
inputs = self.create_model_inputs(model)
outputs = self.create_model_outputs(model)
dynamics = self.create_dynamic_axes(model, vectorizers, inputs, outputs)
meta = create_metadata(
inputs, outputs,
self.sig_name,
model_name, model.lengths_key
)
if not self.tracing:
model = torch.jit.script(model)
logger.info("Exporting Model.")
logger.info("Model inputs: %s", inputs)
logger.info("Model outputs: %s", outputs)
onnx_model_name = REMOTE_MODEL_NAME if remote else model_name
torch.onnx.export(model, data,
verbose=True,
dynamic_axes=dynamics,
f=f'{server_output}/{onnx_model_name}.onnx',
input_names=inputs,
output_names=outputs,
opset_version=self.onnx_opset,
#propagate=True,
example_outputs=example_output)
logger.info("Saving metadata.")
save_to_bundle(client_output, basename, assets=meta, zip_results=zip_results)
logger.info('Successfully exported model to %s', output_dir)
return client_output, server_output
def load_model(self, model_dir):
model_name = find_model_basename(model_dir)
vectorizers = load_vectorizers(model_dir)
vocabs = load_vocabs(model_dir)
model = load_model_for(self.task.task_name(), model_name, device='cpu')
model = model.cpu()
model.eval()
model_name = os.path.basename(model_name)
return model, vectorizers, vocabs, model_name
class Embedder(nn.ModuleList):
def __init__(self, target):
super().__init__()
self.target = target
def forward(self, inputs: Dict[str, torch.Tensor]) -> torch.FloatTensor:
return self.target.embed(inputs)
@property
def embeddings(self):
return self.target.embeddings
@property
def lengths_key(self):
return self.target.lengths_key
@property
def embed_output_dim(self):
return self.target.embed_output_dim
@export
@register_exporter(task='classify', name='embed')
class EmbedPytorchONNXExporter(PytorchONNXExporter):
def __init__(self, task, **kwargs):
super().__init__(task, **kwargs)
self.sig_name = 'embed_text'
def load_model(self, model_dir):
model_name = find_model_basename(model_dir)
vectorizers = load_vectorizers(model_dir)
vocabs = load_vocabs(model_dir)
model = load_model_for(self.task.task_name(), model_name, device='cpu')
model = Embedder(model)
model = model.cpu()
model.eval()
model_name = os.path.basename(model_name)
return model, vectorizers, vocabs, model_name
def create_example_output(self, model):
return torch.ones((1, model.embed_output_dim), dtype=torch.float32)
@export
@register_exporter(task='classify', name='default')
class ClassifyPytorchONNXExporter(PytorchONNXExporter):
def __init__(self, task, **kwargs):
super().__init__(task, **kwargs)
self.sig_name = 'predict_text'
@export
@register_exporter(task='tagger', name='default')
class TaggerPytorchONNXExporter(PytorchONNXExporter):
def __init__(self, task, **kwargs):
super().__init__(task, **kwargs)
self.sig_name = 'tag_text'
def apply_model_patches(self, model):
if hasattr(model, 'decoder'):
if isinstance(model.decoder, CRF):
model.decoder.viterbi = ViterbiBatchSize1(model.decoder.viterbi.start_idx,
model.decoder.viterbi.end_idx)
elif isinstance(model.decoder, TaggerGreedyDecoder):
model.decoder.viterbi = ViterbiLogSoftmaxNormBatchSize1(
model.decoder.viterbi.start_idx,
model.decoder.viterbi.end_idx
)
return model
@export
@register_exporter(task='deps', name='default')
class DependencyParserPytorchONNXExporter(PytorchONNXExporter):
def __init__(self, task, **kwargs):
super().__init__(task, **kwargs)
self.sig_name = 'deps_text'
def create_example_output(self, model):
return torch.ones(1, self.default_size, self.default_size), torch.ones(1, self.default_size, len(model.labels))
def create_model_outputs(self, model):
return ['arcs', 'labels']
def apply_model_patches(self, model):
for _, e in model.embeddings.items():
# Turn off dropin flag, unsupported
# https://github.com/pytorch/pytorch/issues/49001
if hasattr(e, 'dropin'):
e.dropin = 0
return model
|
lightreid/data/datasets/partial_ilids.py | wangguanan/light-reid | 296 | 12628311 | <reponame>wangguanan/light-reid<filename>lightreid/data/datasets/partial_ilids.py<gh_stars>100-1000
"""
@author: wangguanan
@contact: <EMAIL>
"""
import os, copy
from .reid_samples import ReIDSamples
import torchvision
class PartialILIDS(ReIDSamples):
"""Partial Ilids
Only include query and gallery dataset
Suppose all query images belong to camera0, and gallery images camera1
"""
def __init__(self, data_path, combineall=False, download=False, **kwargs):
assert combineall is False, \
'unsupport combineall for {} dataset'.format(self.__class__.__name__)
assert download is False, \
'unsupport download, please automatically download {} dataset'.format(self.__class__.__name__)
self.data_path = data_path
query = self._get_probe_samples()
gallery = self._get_gallery_samples()
train = None
super(PartialILIDS, self).__init__(train, query, gallery)
def _get_probe_samples(self):
samples = []
f = open(os.path.join(self.data_path, 'Probe.txt'))
for line in f.readlines():
line = line.replace('\n', '')
image_path = line
pid = int(line.split('/')[1].replace('.jpg', ''))
samples.append([os.path.join(self.data_path, image_path), pid, 0])
return samples
def _get_gallery_samples(self):
samples = []
f = open(os.path.join(self.data_path, 'Gallery.txt'))
for line in f.readlines():
line = line.replace('\n', '')
image_path = line
pid = int(line.split('/')[1].replace('.jpg', ''))
samples.append([os.path.join(self.data_path, image_path), pid, 1])
return samples
|
models/total3d/modules/layout_estimation.py | Jerrypiglet/Total3DUnderstanding | 288 | 12628312 | <gh_stars>100-1000
# Definition of PoseNet
# author: ynie
# date: March, 2020
import torch
import torch.nn as nn
from models.registers import MODULES
from models.modules import resnet
from models.modules.resnet import model_urls
import torch.utils.model_zoo as model_zoo
@MODULES.register_module
class PoseNet(nn.Module):
def __init__(self, cfg, optim_spec=None):
super(PoseNet, self).__init__()
'''Optimizer parameters used in training'''
self.optim_spec = optim_spec
'''Module parameters'''
bin = cfg.dataset_config.bins
self.PITCH_BIN = len(bin['pitch_bin'])
self.ROLL_BIN = len(bin['roll_bin'])
self.LO_ORI_BIN = len(bin['layout_ori_bin'])
'''Modules'''
self.resnet = resnet.resnet34(pretrained=False)
self.fc_1 = nn.Linear(2048, 1024)
self.fc_2 = nn.Linear(1024, (self.PITCH_BIN + self.ROLL_BIN) * 2)
# fc for layout
self.fc_layout = nn.Linear(2048, 2048)
# for layout orientation
self.fc_3 = nn.Linear(2048, 1024)
self.fc_4 = nn.Linear(1024, self.LO_ORI_BIN * 2)
# for layout centroid and coefficients
self.fc_5 = nn.Linear(2048, 1024)
self.fc_6 = nn.Linear(1024, 6)
self.relu_1 = nn.LeakyReLU(0.2, inplace=True)
self.dropout_1 = nn.Dropout(p=0.5)
# initiate weights
for m in self.modules():
if isinstance(m, nn.Linear):
m.weight.data.normal_(0, 0.01)
if hasattr(m.bias, 'data'):
m.bias.data.zero_()
# load pretrained resnet
pretrained_dict = model_zoo.load_url(model_urls['resnet34'])
model_dict = self.resnet.state_dict()
pretrained_dict = {k:v for k, v in pretrained_dict.items() if k in model_dict}
model_dict.update(pretrained_dict)
self.resnet.load_state_dict(model_dict)
def forward(self, x):
x = self.resnet(x)
# branch for camera parameters
cam = self.fc_1(x)
cam = self.relu_1(cam)
cam = self.dropout_1(cam)
cam = self.fc_2(cam)
pitch_reg = cam[:, 0: self.PITCH_BIN]
pitch_cls = cam[:, self.PITCH_BIN: self.PITCH_BIN * 2]
roll_reg = cam[:, self.PITCH_BIN * 2: self.PITCH_BIN * 2 + self.ROLL_BIN]
roll_cls = cam[:, self.PITCH_BIN * 2 + self.ROLL_BIN: self.PITCH_BIN * 2 + self.ROLL_BIN * 2]
# branch for layout orientation, centroid and coefficients
lo = self.fc_layout(x)
lo = self.relu_1(lo)
lo = self.dropout_1(lo)
# branch for layout orientation
lo_ori = self.fc_3(lo)
lo_ori = self.relu_1(lo_ori)
lo_ori = self.dropout_1(lo_ori)
lo_ori = self.fc_4(lo_ori)
lo_ori_reg = lo_ori[:, :self.LO_ORI_BIN]
lo_ori_cls = lo_ori[:, self.LO_ORI_BIN:]
# branch for layout centroid and coefficients
lo_ct = self.fc_5(lo)
lo_ct = self.relu_1(lo_ct)
lo_ct = self.dropout_1(lo_ct)
lo_ct = self.fc_6(lo_ct)
lo_centroid = lo_ct[:, :3]
lo_coeffs = lo_ct[:, 3:]
return pitch_reg, roll_reg, pitch_cls, roll_cls, lo_ori_reg, lo_ori_cls, lo_centroid, lo_coeffs |
src/micropython/audio.py | julianrendell/vscode-python-devicesimulator | 151 | 12628316 | from common import utils
from common.telemetry import telemetry_py
from common.telemetry_events import TelemetryEvent
# The implementation is based off of https://microbit-micropython.readthedocs.io/en/v1.0.1/audio.html.
def play(source, wait=True, pin="pin0", return_pin=None):
"""
This function is not implemented in the simulator.
Play the source to completion.
``source`` is an iterable, each element of which must be an ``AudioFrame``.
If ``wait`` is ``True``, this function will block until the source is exhausted.
``pin`` specifies which pin the speaker is connected to.
``return_pin`` specifies a differential pin to connect to the speaker
instead of ground.
"""
utils.print_for_unimplemented_functions(play.__name__)
telemetry_py.send_telemetry(TelemetryEvent.MICROBIT_API_AUDIO)
class AudioFrame:
"""
This class is not implemented in the simulator.
An ``AudioFrame`` object is a list of 32 samples each of which is a signed byte
(whole number between -128 and 127).
It takes just over 4 ms to play a single frame.
"""
def __init__(self):
utils.print_for_unimplemented_functions(AudioFrame.__init__.__qualname__)
telemetry_py.send_telemetry(TelemetryEvent.MICROBIT_API_AUDIO)
|
vega/networks/pytorch/customs/modnas/arch_space/construct/torch/torch.py | This-50m/vega | 724 | 12628318 | <reponame>This-50m/vega<gh_stars>100-1000
# -*- coding:utf-8 -*-
# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the MIT License.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# MIT License for more details.
"""Torch constructors."""
import torch
from modnas.registry.construct import register
from modnas.arch_space.slot import Slot
from modnas.arch_space import ops
from modnas.core.param_space import ParamSpace
from modnas.utils.logging import get_logger
from modnas import backend
logger = get_logger('construct')
def parse_device(device):
"""Return device ids from config."""
if isinstance(device, int):
device = str(device)
if not isinstance(device, str):
return []
device = device.lower()
if device in ['cpu', 'nil', 'none']:
return []
if device == 'all':
return list(range(torch.cuda.device_count()))
else:
return [int(s) for s in device.split(',')]
def configure_ops(new_config):
"""Set global operator config."""
config = ops.config
config.update(new_config)
if isinstance(config.ops_order, str):
config.ops_order = config.ops_order.split('_')
if config.ops_order[-1] == 'bn':
config.conv.bias = False
if config.ops_order[0] == 'act':
config.act.inplace = False
logger.info('ops config: {}'.format(config.to_dict()))
@register
class TorchInitConstructor():
"""Constructor that initializes the architecture space."""
def __init__(self, seed=None, device=None, ops_conf=None):
self.seed = seed
self.device = device
self.ops_conf = ops_conf
def __call__(self, model):
"""Run constructor."""
Slot.reset()
ParamSpace().reset()
seed = self.seed
if seed:
backend.init_device(self.device, seed)
configure_ops(self.ops_conf or {})
return model
@register
class TorchToDevice():
"""Constructor that moves model to some device."""
def __init__(self, device='all', data_parallel=True):
device_ids = parse_device(device) or [None]
self.device_ids = device_ids
self.data_parallel = data_parallel
def __call__(self, model):
"""Run constructor."""
if model is None:
return
device_ids = self.device_ids
backend.set_device(device_ids[0])
if device_ids[0] is not None:
torch.cuda.set_device(device_ids[0])
model.to(device=device_ids[0])
if self.data_parallel and len(device_ids) > 1:
model = torch.nn.DataParallel(model, device_ids=device_ids)
return model
@register
class TorchCheckpointLoader():
"""Constructor that loads model checkpoints."""
def __init__(self, path):
logger.info('Loading torch checkpoint from {}'.format(path))
self.chkpt = torch.load(path)
def __call__(self, model):
"""Run constructor."""
model.load_state_dict(self.chkpt)
return model
|
la/oblas/data/zgemv01.py | wtsia/gosl | 1,811 | 12628348 | import numpy as np
a = np.array([[0.1 + 3j, 0.2, 0.3 - 0.3j],
[1.0 + 2j, 0.2, 0.3 - 0.4j],
[2.0 + 1j, 0.2, 0.3 - 0.5j],
[3.0 + 0.1j, 0.2, 0.3 - 0.6j]], dtype=complex)
alp = 0.5+1j
bet = 2.0+1j
x = np.array([20, 10, 30])
y = np.array([3, 1, 2, 4])
res = alp*np.dot(a,x) + bet*y
print res
y = res
res = alp*np.dot(a.T,y) + bet*x
print res
|
salt/log/mixins.py | tomdoherty/salt | 9,425 | 12628364 | """
:codeauthor: <NAME> (<EMAIL>)
salt.log.mixins
~~~~~~~~~~~~~~~
.. versionadded:: 0.17.0
Some mix-in classes to be used in salt's logging
"""
# pylint: disable=unused-import
from salt._logging.mixins import (
ExcInfoOnLogLevelFormatMixin as ExcInfoOnLogLevelFormatMixIn,
)
from salt._logging.mixins import LoggingGarbageMixin as LoggingGarbageMixIn
from salt._logging.mixins import LoggingMixinMeta as LoggingMixInMeta
from salt._logging.mixins import LoggingProfileMixin as LoggingProfileMixIn
from salt._logging.mixins import LoggingTraceMixin as LoggingTraceMixIn
# pylint: enable=unused-import
# from salt.utils.versions import warn_until_date
# warn_until_date(
# '20220101',
# 'Please stop using \'{name}\' and instead use \'salt._logging.mixins\'. '
# '\'{name}\' will go away after {{date}}.'.format(
# name=__name__
# )
# )
|
venv/Lib/site-packages/imageio/plugins/pillow_info.py | amelliaaas/tugastkc4 | 1,026 | 12628376 | # -*- coding: utf-8 -*-
# styletest: ignore E122 E123 E501
"""
Module that contain info about the Pillow formats. The first part of
this module generates this info and writes it to its own bottom half
if run as a script.
"""
def generate_info(): # pragma: no cover
from urllib.request import urlopen
import PIL
from PIL import Image
Image.init()
ids = []
formats = []
docs = {}
# Collect formats and their summary from plugin modules
for mod_name in dir(PIL):
if "ImagePlugin" in mod_name:
mod = getattr(PIL, mod_name)
for ob_name in dir(mod):
ob = getattr(mod, ob_name)
if isinstance(ob, type) and issubclass(ob, Image.Image):
if ob.format in ids:
print("Found duplicate for", ob.format)
else:
ids.append(ob.format)
formats.append((ob.format, ob.format_description))
# Add extension info
for i in range(len(formats)):
id, summary = formats[i]
ext = " ".join([e for e in Image.EXTENSION if Image.EXTENSION[e] == id])
formats[i] = id, summary, ext
# Get documentation of formats
url = "https://raw.githubusercontent.com/python-pillow/Pillow/master/docs/handbook/image-file-formats.rst" # noqa
lines = urlopen(url).read().decode().splitlines()
lines.append("End")
lines.append("---") # for the end
# Parse documentation
cur_name = ""
cur_part = []
for i in range(len(lines)):
line = lines[i]
if line.startswith(("^^^", "---", "===")):
if cur_name and cur_name in ids:
text = "\n".join(cur_part[:-1])
text = text.replace("versionadded::", "versionadded:: Pillow ")
text = text.replace("Image.open`", "Image.write`")
docs[cur_name] = text
cur_part = []
cur_name = lines[i - 1].strip().replace(" ", "").upper()
else:
cur_part.append(" " + line)
# Fill in the blancs
for id in ids:
if id in docs:
docs[id] = "*From the Pillow docs:*\n\n" + docs[id]
else:
docs[id] = "No docs for %s." % id
print("no docs for", id)
# Sort before writing
formats.sort(key=lambda x: x[0])
ids.sort()
# Read file ...
code = open(__file__, "rb").read().decode()
code, divider, _ = code.partition("## BELOW IS " + "AUTOGENERATED")
code += divider + "\n\n"
# Write formats
code += "pillow_formats = [\n"
for i in range(len(formats)):
print(formats[i])
code += " (%r, %r, %r),\n" % formats[i]
code += " ]\n\n\n"
# Write docs
code += "pillow_docs = {\n"
for id in ids:
code += '%r:\nu"""%s""",\n' % (id, docs[id])
code += "}\n"
# Write back
with open(__file__, "wb") as f:
f.write(code.encode())
if __name__ == "__main__":
generate_info()
# BELOW IS AUTOGENERATED
pillow_formats = [
("BMP", "Windows Bitmap", ".bmp"),
("BUFR", "BUFR", ".bufr"),
("CUR", "Windows Cursor", ".cur"),
("DCX", "Intel DCX", ".dcx"),
("DDS", "DirectDraw Surface", ".dds"),
("DIB", "Windows Bitmap", ""),
("EPS", "Encapsulated Postscript", ".ps .eps"),
("FITS", "FITS", ".fit .fits"),
("FLI", "Autodesk FLI/FLC Animation", ".fli .flc"),
("FPX", "FlashPix", ".fpx"),
("FTEX", "Texture File Format (IW2:EOC)", ".ftc .ftu"),
("GBR", "GIMP brush file", ".gbr"),
("GIF", "Compuserve GIF", ".gif"),
("GRIB", "GRIB", ".grib"),
("HDF5", "HDF5", ".h5 .hdf"),
("ICNS", "Mac OS icns resource", ".icns"),
("ICO", "Windows Icon", ".ico"),
("IM", "IFUNC Image Memory", ".im"),
("IMT", "IM Tools", ""),
("IPTC", "IPTC/NAA", ".iim"),
("JPEG", "JPEG (ISO 10918)", ".jfif .jpe .jpg .jpeg"),
("JPEG2000", "JPEG 2000 (ISO 15444)", ".jp2 .j2k .jpc .jpf .jpx .j2c"),
("MCIDAS", "McIdas area file", ""),
("MIC", "Microsoft Image Composer", ".mic"),
("MPEG", "MPEG", ".mpg .mpeg"),
("MPO", "MPO (CIPA DC-007)", ".mpo"),
("MSP", "Windows Paint", ".msp"),
("PCD", "Kodak PhotoCD", ".pcd"),
("PCX", "Paintbrush", ".pcx"),
("PIXAR", "PIXAR raster image", ".pxr"),
("PNG", "Portable network graphics", ".png"),
("PPM", "Pbmplus image", ".pbm .pgm .ppm"),
("PSD", "Adobe Photoshop", ".psd"),
("SGI", "SGI Image File Format", ".bw .rgb .rgba .sgi"),
("SPIDER", "Spider 2D image", ""),
("SUN", "Sun Raster File", ".ras"),
("TGA", "Targa", ".tga"),
("TIFF", "Adobe TIFF", ".tif .tiff"),
("WMF", "Windows Metafile", ".wmf .emf"),
("XBM", "X11 Bitmap", ".xbm"),
("XPM", "X11 Pixel Map", ".xpm"),
("XVThumb", "XV thumbnail image", ""),
]
pillow_docs = {
"BMP": u"""*From the Pillow docs:*
PIL reads and writes Windows and OS/2 BMP files containing ``1``, ``L``, ``P``,
or ``RGB`` data. 16-colour images are read as ``P`` images. Run-length encoding
is not supported.
The :py:meth:`~PIL.Image.Image.write` method sets the following
:py:attr:`~PIL.Image.Image.info` properties:
**compression**
Set to ``bmp_rle`` if the file is run-length encoded.
""",
"BUFR": u"""*From the Pillow docs:*
.. versionadded:: Pillow 1.1.3
PIL provides a stub driver for BUFR files.
To add read or write support to your application, use
:py:func:`PIL.BufrStubImagePlugin.register_handler`.
""",
"CUR": u"""*From the Pillow docs:*
CUR is used to store cursors on Windows. The CUR decoder reads the largest
available cursor. Animated cursors are not supported.
""",
"DCX": u"""*From the Pillow docs:*
DCX is a container file format for PCX files, defined by Intel. The DCX format
is commonly used in fax applications. The DCX decoder can read files containing
``1``, ``L``, ``P``, or ``RGB`` data.
When the file is opened, only the first image is read. You can use
:py:meth:`~file.seek` or :py:mod:`~PIL.ImageSequence` to read other images.
""",
"DDS": u"""*From the Pillow docs:*
DDS is a popular container texture format used in video games and natively
supported by DirectX.
Currently, DXT1, DXT3, and DXT5 pixel formats are supported and only in ``RGBA``
mode.
.. versionadded:: Pillow 3.4.0 DXT3
""",
"DIB": u"""No docs for DIB.""",
"EPS": u"""*From the Pillow docs:*
PIL identifies EPS files containing image data, and can read files that contain
embedded raster images (ImageData descriptors). If Ghostscript is available,
other EPS files can be read as well. The EPS driver can also write EPS
images. The EPS driver can read EPS images in ``L``, ``LAB``, ``RGB`` and
``CMYK`` mode, but Ghostscript may convert the images to ``RGB`` mode rather
than leaving them in the original color space. The EPS driver can write images
in ``L``, ``RGB`` and ``CMYK`` modes.
If Ghostscript is available, you can call the :py:meth:`~PIL.Image.Image.load`
method with the following parameter to affect how Ghostscript renders the EPS
**scale**
Affects the scale of the resultant rasterized image. If the EPS suggests
that the image be rendered at 100px x 100px, setting this parameter to
2 will make the Ghostscript render a 200px x 200px image instead. The
relative position of the bounding box is maintained::
im = Image.open(...)
im.size #(100,100)
im.load(scale=2)
im.size #(200,200)
""",
"FITS": u"""*From the Pillow docs:*
.. versionadded:: Pillow 1.1.5
PIL provides a stub driver for FITS files.
To add read or write support to your application, use
:py:func:`PIL.FitsStubImagePlugin.register_handler`.
""",
"FLI": u"""No docs for FLI.""",
"FPX": u"""*From the Pillow docs:*
PIL reads Kodak FlashPix files. In the current version, only the highest
resolution image is read from the file, and the viewing transform is not taken
into account.
.. note::
To enable full FlashPix support, you need to build and install the IJG JPEG
library before building the Python Imaging Library. See the distribution
README for details.
""",
"FTEX": u"""*From the Pillow docs:*
.. versionadded:: Pillow 3.2.0
The FTEX decoder reads textures used for 3D objects in
Independence War 2: Edge Of Chaos. The plugin reads a single texture
per file, in the compressed and uncompressed formats.
""",
"GBR": u"""*From the Pillow docs:*
The GBR decoder reads GIMP brush files, version 1 and 2.
The :py:meth:`~PIL.Image.Image.write` method sets the following
:py:attr:`~PIL.Image.Image.info` properties:
**comment**
The brush name.
**spacing**
The spacing between the brushes, in pixels. Version 2 only.
GD
^^
PIL reads uncompressed GD files. Note that this file format cannot be
automatically identified, so you must use :py:func:`PIL.GdImageFile.open` to
read such a file.
The :py:meth:`~PIL.Image.Image.write` method sets the following
:py:attr:`~PIL.Image.Image.info` properties:
**transparency**
Transparency color index. This key is omitted if the image is not
transparent.
""",
"GIF": u"""*From the Pillow docs:*
PIL reads GIF87a and GIF89a versions of the GIF file format. The library writes
run-length encoded files in GIF87a by default, unless GIF89a features
are used or GIF89a is already in use.
Note that GIF files are always read as grayscale (``L``)
or palette mode (``P``) images.
The :py:meth:`~PIL.Image.Image.write` method sets the following
:py:attr:`~PIL.Image.Image.info` properties:
**background**
Default background color (a palette color index).
**transparency**
Transparency color index. This key is omitted if the image is not
transparent.
**version**
Version (either ``GIF87a`` or ``GIF89a``).
**duration**
May not be present. The time to display the current frame
of the GIF, in milliseconds.
**loop**
May not be present. The number of times the GIF should loop.
Reading sequences
~~~~~~~~~~~~~~~~~
The GIF loader supports the :py:meth:`~file.seek` and :py:meth:`~file.tell`
methods. You can seek to the next frame (``im.seek(im.tell() + 1)``), or rewind
the file by seeking to the first frame. Random access is not supported.
``im.seek()`` raises an ``EOFError`` if you try to seek after the last frame.
Saving
~~~~~~
When calling :py:meth:`~PIL.Image.Image.save`, the following options
are available::
im.save(out, save_all=True, append_images=[im1, im2, ...])
**save_all**
If present and true, all frames of the image will be saved. If
not, then only the first frame of a multiframe image will be saved.
**append_images**
A list of images to append as additional frames. Each of the
images in the list can be single or multiframe images.
This is currently only supported for GIF, PDF, TIFF, and WebP.
**duration**
The display duration of each frame of the multiframe gif, in
milliseconds. Pass a single integer for a constant duration, or a
list or tuple to set the duration for each frame separately.
**loop**
Integer number of times the GIF should loop.
**optimize**
If present and true, attempt to compress the palette by
eliminating unused colors. This is only useful if the palette can
be compressed to the next smaller power of 2 elements.
**palette**
Use the specified palette for the saved image. The palette should
be a bytes or bytearray object containing the palette entries in
RGBRGB... form. It should be no more than 768 bytes. Alternately,
the palette can be passed in as an
:py:class:`PIL.ImagePalette.ImagePalette` object.
**disposal**
Indicates the way in which the graphic is to be treated after being displayed.
* 0 - No disposal specified.
* 1 - Do not dispose.
* 2 - Restore to background color.
* 3 - Restore to previous content.
Pass a single integer for a constant disposal, or a list or tuple
to set the disposal for each frame separately.
Reading local images
~~~~~~~~~~~~~~~~~~~~
The GIF loader creates an image memory the same size as the GIF file’s *logical
screen size*, and pastes the actual pixel data (the *local image*) into this
image. If you only want the actual pixel rectangle, you can manipulate the
:py:attr:`~PIL.Image.Image.size` and :py:attr:`~PIL.Image.Image.tile`
attributes before loading the file::
im = Image.open(...)
if im.tile[0][0] == "gif":
# only read the first "local image" from this GIF file
tag, (x0, y0, x1, y1), offset, extra = im.tile[0]
im.size = (x1 - x0, y1 - y0)
im.tile = [(tag, (0, 0) + im.size, offset, extra)]
""",
"GRIB": u"""*From the Pillow docs:*
.. versionadded:: Pillow 1.1.5
PIL provides a stub driver for GRIB files.
The driver requires the file to start with a GRIB header. If you have files
with embedded GRIB data, or files with multiple GRIB fields, your application
has to seek to the header before passing the file handle to PIL.
To add read or write support to your application, use
:py:func:`PIL.GribStubImagePlugin.register_handler`.
""",
"HDF5": u"""*From the Pillow docs:*
.. versionadded:: Pillow 1.1.5
PIL provides a stub driver for HDF5 files.
To add read or write support to your application, use
:py:func:`PIL.Hdf5StubImagePlugin.register_handler`.
""",
"ICNS": u"""*From the Pillow docs:*
PIL reads and (macOS only) writes macOS ``.icns`` files. By default, the
largest available icon is read, though you can override this by setting the
:py:attr:`~PIL.Image.Image.size` property before calling
:py:meth:`~PIL.Image.Image.load`. The :py:meth:`~PIL.Image.Image.write` method
sets the following :py:attr:`~PIL.Image.Image.info` property:
**sizes**
A list of supported sizes found in this icon file; these are a
3-tuple, ``(width, height, scale)``, where ``scale`` is 2 for a retina
icon and 1 for a standard icon. You *are* permitted to use this 3-tuple
format for the :py:attr:`~PIL.Image.Image.size` property if you set it
before calling :py:meth:`~PIL.Image.Image.load`; after loading, the size
will be reset to a 2-tuple containing pixel dimensions (so, e.g. if you
ask for ``(512, 512, 2)``, the final value of
:py:attr:`~PIL.Image.Image.size` will be ``(1024, 1024)``).
""",
"ICO": u"""*From the Pillow docs:*
ICO is used to store icons on Windows. The largest available icon is read.
The :py:meth:`~PIL.Image.Image.save` method supports the following options:
**sizes**
A list of sizes including in this ico file; these are a 2-tuple,
``(width, height)``; Default to ``[(16, 16), (24, 24), (32, 32), (48, 48),
(64, 64), (128, 128), (256, 256)]``. Any sizes bigger than the original
size or 256 will be ignored.
IM
^^
IM is a format used by LabEye and other applications based on the IFUNC image
processing library. The library reads and writes most uncompressed interchange
versions of this format.
IM is the only format that can store all internal PIL formats.
""",
"IM": u"""No docs for IM.""",
"IMT": u"""*From the Pillow docs:*
PIL reads Image Tools images containing ``L`` data.
""",
"IPTC": u"""No docs for IPTC.""",
"JPEG": u"""*From the Pillow docs:*
PIL reads JPEG, JFIF, and Adobe JPEG files containing ``L``, ``RGB``, or
``CMYK`` data. It writes standard and progressive JFIF files.
Using the :py:meth:`~PIL.Image.Image.draft` method, you can speed things up by
converting ``RGB`` images to ``L``, and resize images to 1/2, 1/4 or 1/8 of
their original size while loading them.
The :py:meth:`~PIL.Image.Image.write` method may set the following
:py:attr:`~PIL.Image.Image.info` properties if available:
**jfif**
JFIF application marker found. If the file is not a JFIF file, this key is
not present.
**jfif_version**
A tuple representing the jfif version, (major version, minor version).
**jfif_density**
A tuple representing the pixel density of the image, in units specified
by jfif_unit.
**jfif_unit**
Units for the jfif_density:
* 0 - No Units
* 1 - Pixels per Inch
* 2 - Pixels per Centimeter
**dpi**
A tuple representing the reported pixel density in pixels per inch, if
the file is a jfif file and the units are in inches.
**adobe**
Adobe application marker found. If the file is not an Adobe JPEG file, this
key is not present.
**adobe_transform**
Vendor Specific Tag.
**progression**
Indicates that this is a progressive JPEG file.
**icc_profile**
The ICC color profile for the image.
**exif**
Raw EXIF data from the image.
The :py:meth:`~PIL.Image.Image.save` method supports the following options:
**quality**
The image quality, on a scale from 1 (worst) to 95 (best). The default is
75. Values above 95 should be avoided; 100 disables portions of the JPEG
compression algorithm, and results in large files with hardly any gain in
image quality.
**optimize**
If present and true, indicates that the encoder should make an extra pass
over the image in order to select optimal encoder settings.
**progressive**
If present and true, indicates that this image should be stored as a
progressive JPEG file.
**dpi**
A tuple of integers representing the pixel density, ``(x,y)``.
**icc_profile**
If present and true, the image is stored with the provided ICC profile.
If this parameter is not provided, the image will be saved with no profile
attached. To preserve the existing profile::
im.save(filename, 'jpeg', icc_profile=im.info.get('icc_profile'))
**exif**
If present, the image will be stored with the provided raw EXIF data.
**subsampling**
If present, sets the subsampling for the encoder.
* ``keep``: Only valid for JPEG files, will retain the original image setting.
* ``4:4:4``, ``4:2:2``, ``4:2:0``: Specific sampling values
* ``-1``: equivalent to ``keep``
* ``0``: equivalent to ``4:4:4``
* ``1``: equivalent to ``4:2:2``
* ``2``: equivalent to ``4:2:0``
**qtables**
If present, sets the qtables for the encoder. This is listed as an
advanced option for wizards in the JPEG documentation. Use with
caution. ``qtables`` can be one of several types of values:
* a string, naming a preset, e.g. ``keep``, ``web_low``, or ``web_high``
* a list, tuple, or dictionary (with integer keys =
range(len(keys))) of lists of 64 integers. There must be
between 2 and 4 tables.
.. versionadded:: Pillow 2.5.0
.. note::
To enable JPEG support, you need to build and install the IJG JPEG library
before building the Python Imaging Library. See the distribution README for
details.
""",
"JPEG2000": u"""*From the Pillow docs:*
.. versionadded:: Pillow 2.4.0
PIL reads and writes JPEG 2000 files containing ``L``, ``LA``, ``RGB`` or
``RGBA`` data. It can also read files containing ``YCbCr`` data, which it
converts on read into ``RGB`` or ``RGBA`` depending on whether or not there is
an alpha channel. PIL supports JPEG 2000 raw codestreams (``.j2k`` files), as
well as boxed JPEG 2000 files (``.j2p`` or ``.jpx`` files). PIL does *not*
support files whose components have different sampling frequencies.
When loading, if you set the ``mode`` on the image prior to the
:py:meth:`~PIL.Image.Image.load` method being invoked, you can ask PIL to
convert the image to either ``RGB`` or ``RGBA`` rather than choosing for
itself. It is also possible to set ``reduce`` to the number of resolutions to
discard (each one reduces the size of the resulting image by a factor of 2),
and ``layers`` to specify the number of quality layers to load.
The :py:meth:`~PIL.Image.Image.save` method supports the following options:
**offset**
The image offset, as a tuple of integers, e.g. (16, 16)
**tile_offset**
The tile offset, again as a 2-tuple of integers.
**tile_size**
The tile size as a 2-tuple. If not specified, or if set to None, the
image will be saved without tiling.
**quality_mode**
Either `"rates"` or `"dB"` depending on the units you want to use to
specify image quality.
**quality_layers**
A sequence of numbers, each of which represents either an approximate size
reduction (if quality mode is `"rates"`) or a signal to noise ratio value
in decibels. If not specified, defaults to a single layer of full quality.
**num_resolutions**
The number of different image resolutions to be stored (which corresponds
to the number of Discrete Wavelet Transform decompositions plus one).
**codeblock_size**
The code-block size as a 2-tuple. Minimum size is 4 x 4, maximum is 1024 x
1024, with the additional restriction that no code-block may have more
than 4096 coefficients (i.e. the product of the two numbers must be no
greater than 4096).
**precinct_size**
The precinct size as a 2-tuple. Must be a power of two along both axes,
and must be greater than the code-block size.
**irreversible**
If ``True``, use the lossy Irreversible Color Transformation
followed by DWT 9-7. Defaults to ``False``, which means to use the
Reversible Color Transformation with DWT 5-3.
**progression**
Controls the progression order; must be one of ``"LRCP"``, ``"RLCP"``,
``"RPCL"``, ``"PCRL"``, ``"CPRL"``. The letters stand for Component,
Position, Resolution and Layer respectively and control the order of
encoding, the idea being that e.g. an image encoded using LRCP mode can
have its quality layers decoded as they arrive at the decoder, while one
encoded using RLCP mode will have increasing resolutions decoded as they
arrive, and so on.
**cinema_mode**
Set the encoder to produce output compliant with the digital cinema
specifications. The options here are ``"no"`` (the default),
``"cinema2k-24"`` for 24fps 2K, ``"cinema2k-48"`` for 48fps 2K, and
``"cinema4k-24"`` for 24fps 4K. Note that for compliant 2K files,
*at least one* of your image dimensions must match 2048 x 1080, while
for compliant 4K files, *at least one* of the dimensions must match
4096 x 2160.
.. note::
To enable JPEG 2000 support, you need to build and install the OpenJPEG
library, version 2.0.0 or higher, before building the Python Imaging
Library.
Windows users can install the OpenJPEG binaries available on the
OpenJPEG website, but must add them to their PATH in order to use PIL (if
you fail to do this, you will get errors about not being able to load the
``_imaging`` DLL).
""",
"MCIDAS": u"""*From the Pillow docs:*
PIL identifies and reads 8-bit McIdas area files.
""",
"MIC": u"""*From the Pillow docs:*
PIL identifies and reads Microsoft Image Composer (MIC) files. When opened, the
first sprite in the file is loaded. You can use :py:meth:`~file.seek` and
:py:meth:`~file.tell` to read other sprites from the file.
Note that there may be an embedded gamma of 2.2 in MIC files.
""",
"MPEG": u"""*From the Pillow docs:*
PIL identifies MPEG files.
""",
"MPO": u"""*From the Pillow docs:*
Pillow identifies and reads Multi Picture Object (MPO) files, loading the primary
image when first opened. The :py:meth:`~file.seek` and :py:meth:`~file.tell`
methods may be used to read other pictures from the file. The pictures are
zero-indexed and random access is supported.
""",
"MSP": u"""*From the Pillow docs:*
PIL identifies and reads MSP files from Windows 1 and 2. The library writes
uncompressed (Windows 1) versions of this format.
""",
"PCD": u"""*From the Pillow docs:*
PIL reads PhotoCD files containing ``RGB`` data. This only reads the 768x512
resolution image from the file. Higher resolutions are encoded in a proprietary
encoding.
""",
"PCX": u"""*From the Pillow docs:*
PIL reads and writes PCX files containing ``1``, ``L``, ``P``, or ``RGB`` data.
""",
"PIXAR": u"""*From the Pillow docs:*
PIL provides limited support for PIXAR raster files. The library can identify
and read “dumped” RGB files.
The format code is ``PIXAR``.
""",
"PNG": u"""*From the Pillow docs:*
PIL identifies, reads, and writes PNG files containing ``1``, ``L``, ``P``,
``RGB``, or ``RGBA`` data. Interlaced files are supported as of v1.1.7.
The :py:meth:`~PIL.Image.Image.write` method sets the following
:py:attr:`~PIL.Image.Image.info` properties, when appropriate:
**chromaticity**
The chromaticity points, as an 8 tuple of floats. (``White Point
X``, ``White Point Y``, ``Red X``, ``Red Y``, ``Green X``, ``Green
Y``, ``Blue X``, ``Blue Y``)
**gamma**
Gamma, given as a floating point number.
**srgb**
The sRGB rendering intent as an integer.
* 0 Perceptual
* 1 Relative Colorimetric
* 2 Saturation
* 3 Absolute Colorimetric
**transparency**
For ``P`` images: Either the palette index for full transparent pixels,
or a byte string with alpha values for each palette entry.
For ``L`` and ``RGB`` images, the color that represents full transparent
pixels in this image.
This key is omitted if the image is not a transparent palette image.
``Open`` also sets ``Image.text`` to a list of the values of the
``tEXt``, ``zTXt``, and ``iTXt`` chunks of the PNG image. Individual
compressed chunks are limited to a decompressed size of
``PngImagePlugin.MAX_TEXT_CHUNK``, by default 1MB, to prevent
decompression bombs. Additionally, the total size of all of the text
chunks is limited to ``PngImagePlugin.MAX_TEXT_MEMORY``, defaulting to
64MB.
The :py:meth:`~PIL.Image.Image.save` method supports the following options:
**optimize**
If present and true, instructs the PNG writer to make the output file as
small as possible. This includes extra processing in order to find optimal
encoder settings.
**transparency**
For ``P``, ``L``, and ``RGB`` images, this option controls what
color image to mark as transparent.
For ``P`` images, this can be a either the palette index,
or a byte string with alpha values for each palette entry.
**dpi**
A tuple of two numbers corresponding to the desired dpi in each direction.
**pnginfo**
A :py:class:`PIL.PngImagePlugin.PngInfo` instance containing text tags.
**compress_level**
ZLIB compression level, a number between 0 and 9: 1 gives best speed,
9 gives best compression, 0 gives no compression at all. Default is 6.
When ``optimize`` option is True ``compress_level`` has no effect
(it is set to 9 regardless of a value passed).
**icc_profile**
The ICC Profile to include in the saved file.
**bits (experimental)**
For ``P`` images, this option controls how many bits to store. If omitted,
the PNG writer uses 8 bits (256 colors).
**dictionary (experimental)**
Set the ZLIB encoder dictionary.
.. note::
To enable PNG support, you need to build and install the ZLIB compression
library before building the Python Imaging Library. See the installation
documentation for details.
""",
"PPM": u"""*From the Pillow docs:*
PIL reads and writes PBM, PGM and PPM files containing ``1``, ``L`` or ``RGB``
data.
""",
"PSD": u"""*From the Pillow docs:*
PIL identifies and reads PSD files written by Adobe Photoshop 2.5 and 3.0.
""",
"SGI": u"""*From the Pillow docs:*
Pillow reads and writes uncompressed ``L``, ``RGB``, and ``RGBA`` files.
""",
"SPIDER": u"""*From the Pillow docs:*
PIL reads and writes SPIDER image files of 32-bit floating point data
("F;32F").
PIL also reads SPIDER stack files containing sequences of SPIDER images. The
:py:meth:`~file.seek` and :py:meth:`~file.tell` methods are supported, and
random access is allowed.
The :py:meth:`~PIL.Image.Image.write` method sets the following attributes:
**format**
Set to ``SPIDER``
**istack**
Set to 1 if the file is an image stack, else 0.
**nimages**
Set to the number of images in the stack.
A convenience method, :py:meth:`~PIL.Image.Image.convert2byte`, is provided for
converting floating point data to byte data (mode ``L``)::
im = Image.open('image001.spi').convert2byte()
Writing files in SPIDER format
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The extension of SPIDER files may be any 3 alphanumeric characters. Therefore
the output format must be specified explicitly::
im.save('newimage.spi', format='SPIDER')
For more information about the SPIDER image processing package, see the
`SPIDER homepage`_ at `Wadsworth Center`_.
.. _SPIDER homepage: https://spider.wadsworth.org/spider_doc/spider/docs/spider.html
.. _Wadsworth Center: https://www.wadsworth.org/
""",
"SUN": u"""No docs for SUN.""",
"TGA": u"""*From the Pillow docs:*
PIL reads 24- and 32-bit uncompressed and run-length encoded TGA files.
""",
"TIFF": u"""*From the Pillow docs:*
Pillow reads and writes TIFF files. It can read both striped and tiled
images, pixel and plane interleaved multi-band images. If you have
libtiff and its headers installed, PIL can read and write many kinds
of compressed TIFF files. If not, PIL will only read and write
uncompressed files.
.. note::
Beginning in version 5.0.0, Pillow requires libtiff to read or
write compressed files. Prior to that release, Pillow had buggy
support for reading Packbits, LZW and JPEG compressed TIFFs
without using libtiff.
The :py:meth:`~PIL.Image.Image.write` method sets the following
:py:attr:`~PIL.Image.Image.info` properties:
**compression**
Compression mode.
.. versionadded:: Pillow 2.0.0
**dpi**
Image resolution as an ``(xdpi, ydpi)`` tuple, where applicable. You can use
the :py:attr:`~PIL.Image.Image.tag` attribute to get more detailed
information about the image resolution.
.. versionadded:: Pillow 1.1.5
**resolution**
Image resolution as an ``(xres, yres)`` tuple, where applicable. This is a
measurement in whichever unit is specified by the file.
.. versionadded:: Pillow 1.1.5
The :py:attr:`~PIL.Image.Image.tag_v2` attribute contains a dictionary
of TIFF metadata. The keys are numerical indexes from
:py:attr:`~PIL.TiffTags.TAGS_V2`. Values are strings or numbers for single
items, multiple values are returned in a tuple of values. Rational
numbers are returned as a :py:class:`~PIL.TiffImagePlugin.IFDRational`
object.
.. versionadded:: Pillow 3.0.0
For compatibility with legacy code, the
:py:attr:`~PIL.Image.Image.tag` attribute contains a dictionary of
decoded TIFF fields as returned prior to version 3.0.0. Values are
returned as either strings or tuples of numeric values. Rational
numbers are returned as a tuple of ``(numerator, denominator)``.
.. deprecated:: 3.0.0
Saving Tiff Images
~~~~~~~~~~~~~~~~~~
The :py:meth:`~PIL.Image.Image.save` method can take the following keyword arguments:
**save_all**
If true, Pillow will save all frames of the image to a multiframe tiff document.
.. versionadded:: Pillow 3.4.0
**tiffinfo**
A :py:class:`~PIL.TiffImagePlugin.ImageFileDirectory_v2` object or dict
object containing tiff tags and values. The TIFF field type is
autodetected for Numeric and string values, any other types
require using an :py:class:`~PIL.TiffImagePlugin.ImageFileDirectory_v2`
object and setting the type in
:py:attr:`~PIL.TiffImagePlugin.ImageFileDirectory_v2.tagtype` with
the appropriate numerical value from
``TiffTags.TYPES``.
.. versionadded:: Pillow 2.3.0
Metadata values that are of the rational type should be passed in
using a :py:class:`~PIL.TiffImagePlugin.IFDRational` object.
.. versionadded:: Pillow 3.1.0
For compatibility with legacy code, a
:py:class:`~PIL.TiffImagePlugin.ImageFileDirectory_v1` object may
be passed in this field. However, this is deprecated.
.. versionadded:: Pillow 3.0.0
.. note::
Only some tags are currently supported when writing using
libtiff. The supported list is found in
:py:attr:`~PIL:TiffTags.LIBTIFF_CORE`.
**compression**
A string containing the desired compression method for the
file. (valid only with libtiff installed) Valid compression
methods are: ``None``, ``"tiff_ccitt"``, ``"group3"``,
``"group4"``, ``"tiff_jpeg"``, ``"tiff_adobe_deflate"``,
``"tiff_thunderscan"``, ``"tiff_deflate"``, ``"tiff_sgilog"``,
``"tiff_sgilog24"``, ``"tiff_raw_16"``
These arguments to set the tiff header fields are an alternative to
using the general tags available through tiffinfo.
**description**
**software**
**date_time**
**artist**
**copyright**
Strings
**resolution_unit**
A string of "inch", "centimeter" or "cm"
**resolution**
**x_resolution**
**y_resolution**
**dpi**
Either a Float, 2 tuple of (numerator, denominator) or a
:py:class:`~PIL.TiffImagePlugin.IFDRational`. Resolution implies
an equal x and y resolution, dpi also implies a unit of inches.
""",
"WMF": u"""*From the Pillow docs:*
PIL can identify playable WMF files.
In PIL 1.1.4 and earlier, the WMF driver provides some limited rendering
support, but not enough to be useful for any real application.
In PIL 1.1.5 and later, the WMF driver is a stub driver. To add WMF read or
write support to your application, use
:py:func:`PIL.WmfImagePlugin.register_handler` to register a WMF handler.
::
from PIL import Image
from PIL import WmfImagePlugin
class WmfHandler:
def open(self, im):
...
def load(self, im):
...
return image
def save(self, im, fp, filename):
...
wmf_handler = WmfHandler()
WmfImagePlugin.register_handler(wmf_handler)
im = Image.open("sample.wmf")""",
"XBM": u"""*From the Pillow docs:*
PIL reads and writes X bitmap files (mode ``1``).
""",
"XPM": u"""*From the Pillow docs:*
PIL reads X pixmap files (mode ``P``) with 256 colors or less.
The :py:meth:`~PIL.Image.Image.write` method sets the following
:py:attr:`~PIL.Image.Image.info` properties:
**transparency**
Transparency color index. This key is omitted if the image is not
transparent.
""",
"XVThumb": u"""No docs for XVThumb.""",
}
|
bootstrap_test/conftest.py | lowang-bh/lain-1 | 524 | 12628379 | #!/usr/bin/env python2
# -*- coding: UTF-8 -*-
import pytest
import time
import subprocess32 as subproc
from config import CONFIG
import app_ctl
@pytest.fixture(scope="session")
def up_node1():
subproc.check_call(['vagrant', 'destroy', '-f', 'node1'])
subproc.check_call(['vagrant', 'up', 'node1', '--no-provision'])
yield "node1 is ready"
print("Destroying node1...")
subproc.call(['vagrant', 'destroy', '-f', 'node1'])
print("Node1 is destroyed.")
@pytest.fixture(scope="session")
def up_node2():
subproc.check_call(['vagrant', 'destroy', '-f', 'node2'])
subproc.check_call(['vagrant', 'up', 'node2'])
yield "node2 is ready"
print("Destroying node2...")
subproc.call(['vagrant', 'destroy', '-f', 'node2'])
print("Node2 is destroyed.")
@pytest.fixture(scope="session")
def up_node3():
subproc.check_call(['vagrant', 'destroy', '-f', 'node3'])
subproc.check_call(['vagrant', 'up', 'node3'])
yield "node3 is ready"
print("Destroying node3...")
subproc.call(['vagrant', 'destroy', '-f', 'node3'])
print("Node3 is destroyed.")
@pytest.fixture(scope="session")
def bootstrap(up_node1):
subproc.check_call([
'vagrant', 'ssh', 'node1', '-c',
'sudo /vagrant/bootstrap --pypi-mirror -m https://l2ohopf9.mirror.aliyuncs.com -r docker.io/laincloud --vip={}'.
format(CONFIG.vip)
])
@pytest.fixture(scope="session")
def prepare_demo_images(bootstrap):
subproc.check_call([
'vagrant', 'ssh', 'node1', '-c',
'sudo sh /vagrant/bootstrap_test/prepare_demo_images.sh'
])
@pytest.fixture(scope="session")
def reposit_ipaddr(prepare_demo_images):
app_ctl.reposit(CONFIG.ipaddr_resource_appname)
app_ctl.reposit(CONFIG.ipaddr_service_appname)
app_ctl.reposit(CONFIG.ipaddr_client_appname)
time.sleep(1)
@pytest.fixture(scope="session")
def deploy_ipaddr(reposit_ipaddr):
app_ctl.deploy(CONFIG.ipaddr_resource_appname)
app_ctl.deploy(CONFIG.ipaddr_service_appname)
time.sleep(60)
app_ctl.deploy(CONFIG.ipaddr_client_appname)
time.sleep(30)
@pytest.fixture(scope="session")
def add_node(bootstrap, up_node2, up_node3):
subproc.check_call([
'vagrant', 'ssh', 'node1', '-c',
'cd /vagrant/bootstrap_test && sudo ansible-playbook \
-i host_vars/test-nodes distribute_ssh_key.yaml'
])
subproc.check_call([
'vagrant', 'ssh', 'node1', '-c',
'sudo lainctl node add -p /vagrant/playbooks node2:192.168.77.22 ' +
'node3:192.168.77.23'
])
@pytest.fixture(scope="session")
def scale_ipaddr_client(deploy_ipaddr, add_node):
app_ctl.scale(CONFIG.ipaddr_client_appname, CONFIG.ipaddr_client_procname,
CONFIG.ipaddr_client_num_instances)
time.sleep(120)
|
tests/stg_with_known_weights.py | qiaone/GIF | 322 | 12628391 | import sys
sys.path.append('../')
from model import StyledGenerator, Discriminator
import torch
import numpy as np
generator = StyledGenerator(flame_dim=159,
all_stage_discrim=False,
embedding_vocab_size=70_000,
rendered_flame_ascondition=False,
inst_norm=True,
normal_maps_as_cond=True,
core_tensor_res=4,
use_styled_conv_stylegan2=True,
n_mlp=8)
# set all weights to 1s
mdl_state = generator.state_dict()
torch.manual_seed(2)
# tot_params = 0
# for name in mdl_state:
# if name.find('z_to_w') >= 0 or name.find('generator') >= 0 and name.find('embd') < 0 and \
# name.find('to_rgb.8') < 0 and name.find('to_rgb.7') < 0 and name.find('progression.8') < 0 \
# and name.find('progression.7') < 0:
# print(name)
# mdl_state[name] = mdl_state[name] * 0 + torch.randn(mdl_state[name].shape)
# tot_params += np.prod(mdl_state[name].shape)
# else:
# mdl_state[name] = mdl_state[name] * 0 + 6e-3
#
# print(f'Total set params are: {tot_params}')
tot_params = 0
for name in mdl_state:
if name.find('z_to_w') >= 0:
print(name)
mdl_state[name] = mdl_state[name] * 0 + torch.randn(mdl_state[name].shape)
tot_params += np.prod(mdl_state[name].shape)
else:
mdl_state[name] = mdl_state[name] * 0 + 6e-3
print(f'Total set params are: {tot_params} \n\n\n\n\n')
tot_params = 0
for i in range(7):
for name in mdl_state:
if name.find(f'progression.{i}.') >= 0:
mdl_state[name] = mdl_state[name] * 0 + torch.randn(mdl_state[name].shape)
tot_params += np.prod(mdl_state[name].shape)
print(f'{name} : {mdl_state[name].shape}; params this layer: {np.prod(mdl_state[name].shape)}')
# else:
# mdl_state[name] = mdl_state[name] * 0 + 6e-3
print(f'Total set params are: {tot_params} \n\n\n\n\n')
tot_params = 0
for i in range(7):
for name in mdl_state:
if name.find(f'to_rgb.{i}') >= 0:
mdl_state[name] = mdl_state[name] * 0 + torch.randn(mdl_state[name].shape)
tot_params += np.prod(mdl_state[name].shape)
print(f'{name} : {mdl_state[name].shape}; params this layer: {np.prod(mdl_state[name].shape)}')
# else:
# mdl_state[name] = mdl_state[name] * 0 + 6e-3
print(f'Total set params are: {tot_params} \n\n\n\n\n')
generator.load_state_dict(mdl_state)
input_indices = torch.zeros((1, ), dtype=torch.long)
flm_rndr = torch.zeros((1, 3, 4, 4))
torch.manual_seed(2)
forward_pass_gen = generator(flm_rndr, pose=None, step=6, alpha=1, input_indices=input_indices)
print(forward_pass_gen)
print(forward_pass_gen[0].shape)
# for param in generator.parameters():
# print(param) |
extra/python/example.py | ikrima/immer | 12,278 | 12628401 | <reponame>ikrima/immer
#!/usr/bin/env python##
# immer: immutable data structures for C++
# Copyright (C) 2016, 2017, 2018 <NAME>
#
# This software is distributed under the Boost Software License, Version 1.0.
# See accompanying file LICENSE or copy at http://boost.org/LICENSE_1_0.txt
# include:intro/start
import immer
v0 = immer.Vector().append(13).append(42)
assert v0[0] == 13
assert v0[1] == 42
assert len(v0) == 2
v1 = v0.set(0, 12)
assert v0.tolist() == [13, 42]
assert v1.tolist() == [12, 42]
# include:intro/end
|
exchangelib/services/upload_items.py | RossK1/exchangelib | 1,006 | 12628404 | <reponame>RossK1/exchangelib
from .common import EWSAccountService, to_item_id
from ..properties import ItemId, ParentFolderId
from ..util import create_element, set_xml_value, add_xml_child, MNS
class UploadItems(EWSAccountService):
"""MSDN: https://docs.microsoft.com/en-us/exchange/client-developer/web-service-reference/uploaditems-operation
"""
SERVICE_NAME = 'UploadItems'
element_container_name = '{%s}ItemId' % MNS
def call(self, items):
# _pool_requests expects 'items', not 'data'
return self._elems_to_objs(self._chunked_get_elements(self.get_payload, items=items))
def get_payload(self, items):
"""Upload given items to given account.
'items' is an iterable of tuples where the first element is a Folder instance representing the ParentFolder
that the item will be placed in and the second element is a tuple containing an optional ItemId, an optional
Item.is_associated boolean, and a Data string returned from an ExportItems.
call.
:param items:
"""
uploaditems = create_element('m:%s' % self.SERVICE_NAME)
itemselement = create_element('m:Items')
uploaditems.append(itemselement)
for parent_folder, (item_id, is_associated, data_str) in items:
# TODO: The full spec also allows the "UpdateOrCreate" create action.
item = create_element('t:Item', attrs=dict(CreateAction='Update' if item_id else 'CreateNew'))
if is_associated is not None:
item.set('IsAssociated', 'true' if is_associated else 'false')
parentfolderid = ParentFolderId(parent_folder.id, parent_folder.changekey)
set_xml_value(item, parentfolderid, version=self.account.version)
if item_id:
itemid = to_item_id(item_id, ItemId, version=self.account.version)
set_xml_value(item, itemid, version=self.account.version)
add_xml_child(item, 't:Data', data_str)
itemselement.append(item)
return uploaditems
def _elems_to_objs(self, elems):
for elem in elems:
if isinstance(elem, Exception):
yield elem
continue
yield elem.get(ItemId.ID_ATTR), elem.get(ItemId.CHANGEKEY_ATTR)
@classmethod
def _get_elements_in_container(cls, container):
return [container]
|
classification/datasets/mnist.py | LittleWat/MCD_DA | 464 | 12628419 | <gh_stars>100-1000
import numpy as np
from scipy.io import loadmat
def load_mnist(scale=True, usps=False, all_use=False):
mnist_data = loadmat('../data/mnist_data.mat')
if scale:
mnist_train = np.reshape(mnist_data['train_32'], (55000, 32, 32, 1))
mnist_test = np.reshape(mnist_data['test_32'], (10000, 32, 32, 1))
mnist_train = np.concatenate([mnist_train, mnist_train, mnist_train], 3)
mnist_test = np.concatenate([mnist_test, mnist_test, mnist_test], 3)
mnist_train = mnist_train.transpose(0, 3, 1, 2).astype(np.float32)
mnist_test = mnist_test.transpose(0, 3, 1, 2).astype(np.float32)
mnist_labels_train = mnist_data['label_train']
mnist_labels_test = mnist_data['label_test']
else:
mnist_train = mnist_data['train_28']
mnist_test = mnist_data['test_28']
mnist_labels_train = mnist_data['label_train']
mnist_labels_test = mnist_data['label_test']
mnist_train = mnist_train.astype(np.float32)
mnist_test = mnist_test.astype(np.float32)
mnist_train = mnist_train.transpose((0, 3, 1, 2))
mnist_test = mnist_test.transpose((0, 3, 1, 2))
train_label = np.argmax(mnist_labels_train, axis=1)
inds = np.random.permutation(mnist_train.shape[0])
mnist_train = mnist_train[inds]
train_label = train_label[inds]
test_label = np.argmax(mnist_labels_test, axis=1)
if usps and all_use != 'yes':
mnist_train = mnist_train[:2000]
train_label = train_label[:2000]
return mnist_train, train_label, mnist_test, test_label
|
src/schnetpack/nn/initializers.py | giadefa/schnetpack | 713 | 12628422 | <reponame>giadefa/schnetpack<filename>src/schnetpack/nn/initializers.py<gh_stars>100-1000
from functools import partial
from torch.nn.init import constant_
zeros_initializer = partial(constant_, val=0.0)
|
intro/matplotlib/examples/plot_bar.py | junghun73/Learning | 419 | 12628424 | """
Bar plots
==========
An example of bar plots with matplotlib.
"""
import numpy as np
import matplotlib.pyplot as plt
n = 12
X = np.arange(n)
Y1 = (1 - X / float(n)) * np.random.uniform(0.5, 1.0, n)
Y2 = (1 - X / float(n)) * np.random.uniform(0.5, 1.0, n)
plt.axes([0.025, 0.025, 0.95, 0.95])
plt.bar(X, +Y1, facecolor='#9999ff', edgecolor='white')
plt.bar(X, -Y2, facecolor='#ff9999', edgecolor='white')
for x, y in zip(X, Y1):
plt.text(x + 0.4, y + 0.05, '%.2f' % y, ha='center', va= 'bottom')
for x, y in zip(X, Y2):
plt.text(x + 0.4, -y - 0.05, '%.2f' % y, ha='center', va= 'top')
plt.xlim(-.5, n)
plt.xticks(())
plt.ylim(-1.25, 1.25)
plt.yticks(())
plt.show()
|
tests/regressiontests/mk_complex.py | GCBallesteros/imreg_dft | 167 | 12628426 | import numpy as np
import scipy as sp
import scipy.io
import scipy.signal
np.random.seed(4)
abs_val, phase_val = [sp.rand(13, 20) for _ in range(2)]
phase_val *= 2 * np.pi
shift = (2, 3)
for img in (abs_val, phase_val):
for ax in range(2):
img[:] = sp.signal.resample(img, int(img.shape[ax] * 1.5), axis=ax)
cplx = dest * np.exp(1j * np.pi * 2 * dest2)
first = cplx[shift[0]:, shift[1]:]
second = cplx[:-shift[0], :-shift[1]]
sp.io.savemat("first.mat", dict(rca=first))
sp.io.savemat("first2.mat", dict(rca=second))
|
airmozilla/manage/views/decorators.py | mozilla/airmozilla | 115 | 12628445 | <gh_stars>100-1000
import functools
import warnings
from django.conf import settings
from django.contrib.auth.decorators import user_passes_test
from django.contrib.auth.models import Permission
from django.shortcuts import redirect
from django.core.urlresolvers import reverse
staff_required = user_passes_test(lambda u: u.is_staff)
superuser_required = user_passes_test(lambda u: u.is_superuser)
def permission_required(perm):
if settings.DEBUG: # pragma: no cover
ct, codename = perm.split('.', 1)
if not Permission.objects.filter(
content_type__app_label=ct,
codename=codename
):
warnings.warn(
"No known permission called %r" % perm,
UserWarning,
2
)
def inner_render(fn):
@functools.wraps(fn)
def wrapped(request, *args, **kwargs):
# if you're not even authenticated, redirect to /login
if not request.user.has_perm(perm):
request.session['failed_permission'] = perm
return redirect(reverse('manage:insufficient_permissions'))
return fn(request, *args, **kwargs)
return wrapped
return inner_render
def cancel_redirect(redirect_view):
"""Redirect wrapper for POST requests which contain a cancel field."""
def inner_render(fn):
@functools.wraps(fn)
def wrapped(request, *args, **kwargs):
if request.method == 'POST' and 'cancel' in request.POST:
if callable(redirect_view):
url = redirect_view(request, *args, **kwargs)
else:
url = reverse(redirect_view)
return redirect(url)
return fn(request, *args, **kwargs)
return wrapped
return inner_render
|
vimiv/main_window.py | karlch/vimiv | 268 | 12628456 | # vim: ft=python fileencoding=utf-8 sw=4 et sts=4
"""Gtk.ScrolledWindow class which is usually the main window of vimiv.
The ScrolledWindow can either include a Gtk.Image in IMAGE mode or a
Gtk.IconView in THUMBNAIL mode.
"""
import os
from gi.repository import Gtk
from vimiv.helpers import listdir_wrapper
from vimiv.image import Image
from vimiv.thumbnail import Thumbnail
class MainWindow(Gtk.ScrolledWindow):
"""Main window of vimiv containing either an Image or an IconView.
Attributes:
image: Vimiv Image class which may be displayed.
thumbnail: Vimiv Thumbnail class which may be displayed.
_app: The main vimiv class to interact with.
"""
def __init__(self, app):
"""Initialize image and thumbnail attributes and configure self."""
super(MainWindow, self).__init__()
self._app = app
self.image = Image(app)
self.thumbnail = Thumbnail(app)
self.set_hexpand(True)
self.set_vexpand(True)
# Image is default
self.add(self.image)
self.connect("key_press_event", self._app["eventhandler"].on_key_press,
"IMAGE")
# Connect signals
self._app.connect("widget-layout-changed", self._on_widgets_changed)
self._app.connect("paths-changed", self._on_paths_changed)
def switch_to_child(self, new_child):
"""Switch the widget displayed in the main window.
Args:
new_child: The child to switch to.
"""
self.remove(self.get_child())
self.add(new_child)
self.show_all()
def center_window(self):
"""Center the widget in the current window."""
h_adj = self.get_hadjustment()
size = self.get_allocation()
h_middle = (h_adj.get_upper() - h_adj.get_lower() - size.width) / 2
h_adj.set_value(h_middle)
v_adj = self.get_vadjustment()
v_middle = (v_adj.get_upper() - v_adj.get_lower() - size.height) / 2
v_adj.set_value(v_middle)
self.set_hadjustment(h_adj)
self.set_vadjustment(v_adj)
def scroll(self, direction):
"""Scroll the correct object.
Args:
direction: Scroll direction to emit.
"""
if direction not in "hjklHJKL":
self._app["statusbar"].message(
"Invalid scroll direction " + direction, "error")
elif self.thumbnail.toggled:
self.thumbnail.move_direction(direction)
else:
self._scroll(direction)
return True # Deactivates default bindings (here for Arrows)
def _scroll(self, direction):
"""Scroll the widget.
Args:
direction: Direction to scroll in.
"""
steps = self._app["eventhandler"].num_receive()
scale = self.image.get_scroll_scale()
h_adj = self.get_hadjustment()
size = self.get_allocation()
h_size = h_adj.get_upper() - h_adj.get_lower() - size.width
h_step = h_size / scale * steps
v_adj = self.get_vadjustment()
v_size = v_adj.get_upper() - v_adj.get_lower() - size.height
v_step = v_size / scale * steps
# To the ends
if direction == "H":
h_adj.set_value(0)
elif direction == "J":
v_adj.set_value(v_size)
elif direction == "K":
v_adj.set_value(0)
elif direction == "L":
h_adj.set_value(h_size)
# By step
elif direction == "h":
h_adj.set_value(h_adj.get_value() - h_step)
elif direction == "j":
v_adj.set_value(v_adj.get_value() + v_step)
elif direction == "k":
v_adj.set_value(v_adj.get_value() - v_step)
elif direction == "l":
h_adj.set_value(h_adj.get_value() + h_step)
self.set_hadjustment(h_adj)
self.set_vadjustment(v_adj)
def _on_widgets_changed(self, app, widget):
"""Recalculate thumbnails or rezoom image when the layout changed."""
if self.thumbnail.toggled:
self.thumbnail.calculate_columns()
elif self._app.get_paths() and self.image.fit_image != "user":
self.image.zoom_to(0, self.image.fit_image)
def _on_paths_changed(self, app, transform):
"""Reload paths image and/or thumbnail when paths have changed."""
if self._app.get_paths():
# Get all files in directory again
focused_path = self._app.get_pos(True)
decremented_index = max(0, self._app.get_pos() - 1)
directory = os.path.dirname(focused_path)
files = [os.path.join(directory, fil)
for fil in listdir_wrapper(directory)]
self._app.populate(files)
# Reload thumbnail
if self.thumbnail.toggled:
self.thumbnail.on_paths_changed()
# Refocus the path
if focused_path in self._app.get_paths():
index = self._app.get_paths().index(focused_path)
# Stay as close as possible
else:
index = min(decremented_index, len(self._app.get_paths()) - 1)
if self.thumbnail.toggled:
self.thumbnail.move_to_pos(index)
else:
self._app["eventhandler"].set_num_str(index + 1)
self.image.move_pos()
self._app["statusbar"].update_info()
# We need to check again as populate was called
if not self._app.get_paths():
self.hide()
|
tdc/utils/__init__.py | ypapanik/TDC | 577 | 12628458 | from .load import distribution_dataset_load, \
generation_paired_dataset_load, \
three_dim_dataset_load,\
interaction_dataset_load,\
multi_dataset_load,\
property_dataset_load, \
oracle_load,\
receptor_load,\
bm_group_load
from .split import create_fold,\
create_fold_setting_cold,\
create_combination_split,\
create_fold_time,\
create_scaffold_split,\
create_group_split
from .misc import print_sys, install, fuzzy_search, \
save_dict, load_dict, \
to_submission_format
from .label_name_list import dataset2target_lists
from .label import NegSample, label_transform, convert_y_unit, \
convert_to_log, convert_back_log, binarize, \
label_dist
from .retrieve import get_label_map, get_reaction_type,\
retrieve_label_name_list, retrieve_dataset_names,\
retrieve_all_benchmarks, retrieve_benchmark_names
from .query import uniprot2seq, cid2smiles |
google_or_tools/set_covering_skiena_sat.py | tias/hakank | 279 | 12628502 | <reponame>tias/hakank<gh_stars>100-1000
# Copyright 2021 <NAME> <EMAIL>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Set covering in OR-tools CP-SAT Solver.
Example from Steven Skiena, The Stony Brook Algorithm Repository
http://www.cs.sunysb.edu/~algorith/files/set-cover.shtml
'''
Input Description: A set of subsets S_1, ..., S_m of the
universal set U = {1,...,n}.
Problem: What is the smallest subset of subsets T subset S such
that \cup_{t_i in T} t_i = U?
'''
Data is from the pictures INPUT/OUTPUT.
This is a port of my old CP model set_covering_skiena.py
This model was created by <NAME> (<EMAIL>)
Also see my other OR-tools models: http://www.hakank.org/or_tools/
"""
from __future__ import print_function
from ortools.sat.python import cp_model as cp
import math, sys
# from cp_sat_utils import *
def main():
model = cp.CpModel()
#
# data
#
num_sets = 7
num_elements = 12
belongs = [
# 1 2 3 4 5 6 7 8 9 0 1 2 elements
[1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # Set 1
[0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0], # 2
[0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0], # 3
[0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0], # 4
[0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0], # 5
[1, 1, 1, 0, 1, 0, 0, 0, 1, 1, 1, 0], # 6
[0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1] # 7
]
#
# variables
#
x = [model.NewIntVar(0, 1, 'x[%i]' % i) for i in range(num_sets)]
# number of choosen sets
z = model.NewIntVar(0, num_sets * 2, 'z')
# total number of elements in the choosen sets
tot_elements = model.NewIntVar(0, num_sets * num_elements,"tot_elements")
#
# constraints
#
model.Add(z == sum(x))
# all sets must be used
for j in range(num_elements):
model.Add(sum([belongs[i][j] * x[i] for i in range(num_sets)]) >= 1)
# number of used elements
model.Add(tot_elements == sum([
x[i] * belongs[i][j] for i in range(num_sets) for j in range(num_elements)
]))
# objective
model.Minimize(z)
#
# search and result
#
solver = cp.CpSolver()
status = solver.Solve(model)
if status == cp.OPTIMAL:
print('z:', solver.Value(z))
print('tot_elements:', solver.Value(tot_elements))
print('x:', [solver.Value(x[i]) for i in range(num_sets)])
print()
print('NumConflicts:', solver.NumConflicts())
print('NumBranches:', solver.NumBranches())
print('WallTime:', solver.WallTime())
if __name__ == '__main__':
main()
|
parcels/collection/collectionsoa.py | noemieplanat/Copy-parcels-master | 202 | 12628521 | <gh_stars>100-1000
from datetime import timedelta as delta
from operator import attrgetter
from ctypes import Structure, POINTER
from bisect import bisect_left
from math import floor
import numpy as np
from parcels.collection.collections import ParticleCollection
from parcels.collection.iterators import BaseParticleAccessor
from parcels.collection.iterators import BaseParticleCollectionIterator, BaseParticleCollectionIterable
from parcels.particle import ScipyParticle, JITParticle # noqa
from parcels.field import Field
from parcels.tools.loggers import logger
from parcels.tools.statuscodes import OperationCode
try:
from mpi4py import MPI
except:
MPI = None
if MPI:
try:
from sklearn.cluster import KMeans
except:
raise EnvironmentError('sklearn needs to be available if MPI is installed. '
'See http://oceanparcels.org/#parallel_install for more information')
def _to_write_particles(pd, time):
"""We don't want to write a particle that is not started yet.
Particle will be written if particle.time is between time-dt/2 and time+dt (/2)
"""
return ((np.less_equal(time - np.abs(pd['dt']/2), pd['time'], where=np.isfinite(pd['time']))
& np.greater_equal(time + np.abs(pd['dt'] / 2), pd['time'], where=np.isfinite(pd['time']))
| ((np.isnan(pd['dt'])) & np.equal(time, pd['time'], where=np.isfinite(pd['time']))))
& (np.isfinite(pd['id']))
& (np.isfinite(pd['time'])))
def _is_particle_started_yet(pd, time):
"""We don't want to write a particle that is not started yet.
Particle will be written if:
* particle.time is equal to time argument of pfile.write()
* particle.time is before time (in case particle was deleted between previous export and current one)
"""
return np.less_equal(pd['dt']*pd['time'], pd['dt']*time) | np.isclose(pd['time'], time)
def _convert_to_flat_array(var):
"""Convert lists and single integers/floats to one-dimensional numpy arrays
:param var: list or numeric to convert to a one-dimensional numpy array
"""
if isinstance(var, np.ndarray):
return var.flatten()
elif isinstance(var, (int, float, np.float32, np.int32)):
return np.array([var])
else:
return np.array(var)
class ParticleCollectionSOA(ParticleCollection):
def __init__(self, pclass, lon, lat, depth, time, lonlatdepth_dtype, pid_orig, partitions=None, ngrid=1, **kwargs):
"""
:param ngrid: number of grids in the fieldset of the overarching ParticleSet - required for initialising the
field references of the ctypes-link of particles that are allocated
"""
super(ParticleCollection, self).__init__()
assert pid_orig is not None, "particle IDs are None - incompatible with the collection. Invalid state."
pid = pid_orig + pclass.lastID
self._sorted = np.all(np.diff(pid) >= 0)
assert depth is not None, "particle's initial depth is None - incompatible with the collection. Invalid state."
assert lon.size == lat.size and lon.size == depth.size, (
'lon, lat, depth don''t all have the same lenghts')
assert lon.size == time.size, (
'time and positions (lon, lat, depth) don''t have the same lengths.')
# If partitions is false, the partitions are already initialised
if partitions is not None and partitions is not False:
self._pu_indicators = _convert_to_flat_array(partitions)
for kwvar in kwargs:
assert lon.size == kwargs[kwvar].size, (
'%s and positions (lon, lat, depth) don''t have the same lengths.' % kwvar)
offset = np.max(pid) if (pid is not None) and len(pid) > 0 else -1
if MPI:
mpi_comm = MPI.COMM_WORLD
mpi_rank = mpi_comm.Get_rank()
mpi_size = mpi_comm.Get_size()
if lon.size < mpi_size and mpi_size > 1:
raise RuntimeError('Cannot initialise with fewer particles than MPI processors')
if mpi_size > 1:
if partitions is not False:
if self._pu_indicators is None:
if mpi_rank == 0:
coords = np.vstack((lon, lat)).transpose()
kmeans = KMeans(n_clusters=mpi_size, random_state=0).fit(coords)
self._pu_indicators = kmeans.labels_
else:
self._pu_indicators = None
self._pu_indicators = mpi_comm.bcast(self._pu_indicators, root=0)
elif np.max(self._pu_indicators) >= mpi_size:
raise RuntimeError('Particle partitions must vary between 0 and the number of mpi procs')
lon = lon[self._pu_indicators == mpi_rank]
lat = lat[self._pu_indicators == mpi_rank]
time = time[self._pu_indicators == mpi_rank]
depth = depth[self._pu_indicators == mpi_rank]
pid = pid[self._pu_indicators == mpi_rank]
for kwvar in kwargs:
kwargs[kwvar] = kwargs[kwvar][self._pu_indicators == mpi_rank]
offset = MPI.COMM_WORLD.allreduce(offset, op=MPI.MAX)
pclass.setLastID(offset+1)
if lonlatdepth_dtype is None:
self._lonlatdepth_dtype = np.float32
else:
self._lonlatdepth_dtype = lonlatdepth_dtype
assert self._lonlatdepth_dtype in [np.float32, np.float64], \
'lon lat depth precision should be set to either np.float32 or np.float64'
pclass.set_lonlatdepth_dtype(self._lonlatdepth_dtype)
self._pclass = pclass
self._ptype = pclass.getPType()
self._data = {}
initialised = set()
self._ncount = len(lon)
for v in self.ptype.variables:
if v.name in ['xi', 'yi', 'zi', 'ti']:
self._data[v.name] = np.empty((len(lon), ngrid), dtype=v.dtype)
else:
self._data[v.name] = np.empty(self._ncount, dtype=v.dtype)
if lon is not None and lat is not None:
# Initialise from lists of lon/lat coordinates
assert self.ncount == len(lon) and self.ncount == len(lat), (
'Size of ParticleSet does not match length of lon and lat.')
# mimic the variables that get initialised in the constructor
self._data['lat'][:] = lat
self._data['lon'][:] = lon
self._data['depth'][:] = depth
self._data['time'][:] = time
self._data['id'][:] = pid
self._data['fileid'][:] = -1
# special case for exceptions which can only be handled from scipy
self._data['exception'] = np.empty(self.ncount, dtype=object)
initialised |= {'lat', 'lon', 'depth', 'time', 'id'}
# any fields that were provided on the command line
for kwvar, kwval in kwargs.items():
if not hasattr(pclass, kwvar):
raise RuntimeError('Particle class does not have Variable %s' % kwvar)
self._data[kwvar][:] = kwval
initialised.add(kwvar)
# initialise the rest to their default values
for v in self.ptype.variables:
if v.name in initialised:
continue
if isinstance(v.initial, Field):
for i in range(self.ncount):
if (time[i] is None) or (np.isnan(time[i])):
raise RuntimeError('Cannot initialise a Variable with a Field if no time provided (time-type: {} values: {}). Add a "time=" to ParticleSet construction'.format(type(time), time))
v.initial.fieldset.computeTimeChunk(time[i], 0)
self._data[v.name][i] = v.initial[
time[i], depth[i], lat[i], lon[i]
]
logger.warning_once("Particle initialisation from field can be very slow as it is computed in scipy mode.")
elif isinstance(v.initial, attrgetter):
self._data[v.name][:] = v.initial(self)
else:
self._data[v.name][:] = v.initial
initialised.add(v.name)
else:
raise ValueError("Latitude and longitude required for generating ParticleSet")
self._iterator = None
self._riterator = None
def __del__(self):
"""
Collection - Destructor
"""
super().__del__()
def iterator(self):
self._iterator = ParticleCollectionIteratorSOA(self)
return self._iterator
def __iter__(self):
"""Returns an Iterator that allows for forward iteration over the
elements in the ParticleCollection (e.g. `for p in pset:`).
"""
return self.iterator()
def reverse_iterator(self):
self._riterator = ParticleCollectionIteratorSOA(self, True)
return self._riterator
def __reversed__(self):
"""Returns an Iterator that allows for backwards iteration over
the elements in the ParticleCollection (e.g.
`for p in reversed(pset):`).
"""
return self.reverse_iterator()
def __getitem__(self, index):
"""
Access a particle in this collection using the fastest access
method for this collection - by its index.
:param index: int or np.int32 index of a particle in this collection
"""
return self.get_single_by_index(index)
def __getattr__(self, name):
"""
Access a single property of all particles.
:param name: name of the property
"""
for v in self.ptype.variables:
if v.name == name and name in self._data:
return self._data[name]
return False
def get_single_by_index(self, index):
"""
This function gets a (particle) object from the collection based on its index within the collection. For
collections that are not based on random access (e.g. ordered lists, sets, trees), this function involves a
translation of the index into the specific object reference in the collection - or (if unavoidable) the
translation of the collection from a none-indexable, none-random-access structure into an indexable structure.
In cases where a get-by-index would result in a performance malus, it is highly-advisable to use a different
get function, e.g. get-by-ID.
"""
super().get_single_by_index(index)
return ParticleAccessorSOA(self, index)
def get_single_by_object(self, particle_obj):
"""
This function gets a (particle) object from the collection based on its actual object. For collections that
are random-access and based on indices (e.g. unordered list, vectors, arrays and dense matrices), this function
would involve a parsing of the whole list and translation of the object into an index in the collection - which
results in a significant performance malus.
In cases where a get-by-object would result in a performance malus, it is highly-advisable to use a different
get function, e.g. get-by-index or get-by-ID.
In this specific implementation, we cannot look for the object
directly, so we will look for one of its properties (the ID) that
has the nice property of being stored in an ordered list (if the
collection is sorted)
"""
super().get_single_by_object(particle_obj)
return self.get_single_by_ID(particle_obj.id)
def get_single_by_ID(self, id):
"""
This function gets a (particle) object from the collection based on the object's ID. For some collections,
this operation may involve a parsing of the whole list and translation of the object's ID into an index or an
object reference in the collection - which results in a significant performance malus.
In cases where a get-by-ID would result in a performance malus, it is highly-advisable to use a different
get function, e.g. get-by-index.
This function uses binary search if we know the ID list to be sorted, and linear search otherwise. We assume
IDs are unique.
"""
super().get_single_by_ID(id)
# Use binary search if the collection is sorted, linear search otherwise
index = -1
if self._sorted:
index = bisect_left(self._data['id'], id)
if index == len(self._data['id']) or self._data['id'][index] != id:
raise ValueError("Trying to access a particle with a non-existing ID: %s." % id)
else:
index = np.where(self._data['id'] == id)[0][0]
return self.get_single_by_index(index)
def get_same(self, same_class):
"""
This function gets particles from this collection that are themselves stored in another object of an equi-
structured ParticleCollection.
"""
super().get_same(same_class)
raise NotImplementedError
def get_collection(self, pcollection):
"""
This function gets particles from this collection that are themselves stored in a ParticleCollection, which
is differently structured than this one. That means the other-collection has to be re-formatted first in an
intermediary format.
"""
super().get_collection(pcollection)
raise NotImplementedError
def get_multi_by_PyCollection_Particles(self, pycollectionp):
"""
This function gets particles from this collection, which are themselves in common Python collections, such as
lists, dicts and numpy structures. We can either directly get the referred Particle instances (for internally-
ordered collections, e.g. ordered lists, sets, trees) or we may need to parse each instance for its index (for
random-access structures), which results in a considerable performance malus.
For collections where get-by-object incurs a performance malus, it is advisable to multi-get particles
by indices or IDs.
"""
super().get_multi_by_PyCollection_Particles(pycollectionp)
raise NotImplementedError
def get_multi_by_indices(self, indices):
"""
This function gets particles from this collection based on their indices. This works best for random-access
collections (e.g. numpy's ndarrays, dense matrices and dense arrays), whereas internally ordered collections
shall rather use a get-via-object-reference strategy.
"""
super().get_multi_by_indices(indices)
if type(indices) is dict:
indices = list(indices.values())
return ParticleCollectionIteratorSOA(self, subset=indices)
def get_multi_by_IDs(self, ids):
"""
This function gets particles from this collection based on their IDs. For collections where this removal
strategy would require a collection transformation or by-ID parsing, it is advisable to rather apply a get-
by-objects or get-by-indices scheme.
Note that this implementation assumes that IDs of particles are strictly increasing with increasing index. So
a particle with a larger index will always have a larger ID as well. The assumption often holds for this
datastructure as new particles always get a larger ID than any existing particle (IDs are not recycled)
and their data are appended at the end of the list (largest index). This allows for the use of binary search
in the look-up. The collection maintains a `sorted` flag to indicate whether this assumption holds.
"""
super().get_multi_by_IDs(ids)
if type(ids) is dict:
ids = list(ids.values())
if len(ids) == 0:
return None
# Use binary search if the collection is sorted, linear search otherwise
indices = np.empty(len(ids), dtype=np.int32)
if self._sorted:
# This is efficient if len(ids) << self.len
sorted_ids = np.sort(np.array(ids))
indices = self._recursive_ID_lookup(0, len(self._data['id']), sorted_ids)
else:
indices = np.where(np.in1d(self._data['id'], ids))[0]
return self.get_multi_by_indices(indices)
def _recursive_ID_lookup(self, low, high, sublist):
"""Identify the middle element of the sublist and perform binary
search on it.
:param low: Lowerbound on the indices to search for IDs.
:param high: Upperbound on the indices to search for IDs.
:param sublist: (Sub)list of IDs to look for.
"""
median = floor(len(sublist) / 2)
index = bisect_left(self._data['id'][low:high], sublist[median])
if len(sublist) == 1:
# edge case
if index == len(self._data['id']) or \
self._data['id'][index] != sublist[median]:
return np.array([])
return np.array([index])
# The edge-cases have to be handled slightly differently
if index == len(self._data['id']):
# Continue with the same bounds, but drop the median.
return self._recursive_ID_lookup(low, high, np.delete(sublist, median))
elif self._data['id'][index] != sublist[median]:
# We can split, because we received the index that the median
# ID would have been inserted in, but we do not return the
# index and keep it in our search space.
left = self._recursive_ID_lookup(low, index, sublist[:median])
right = self._recursive_ID_lookup(index, high, sublist[median + 1:])
return np.concatenate((left, right))
# Otherwise, we located the median, so we include it in our
# result, and split the search space on it, without including it.
left = self._recursive_ID_lookup(low, index, sublist[:median])
right = self._recursive_ID_lookup(index + 1, high, sublist[median + 1:])
return np.concatenate((left, np.array(index), right))
def add_collection(self, pcollection):
"""
Adds another, differently structured ParticleCollection to this collection. This is done by, for example,
appending/adding the items of the other collection to this collection.
"""
super().add_collection(pcollection)
raise NotImplementedError
def add_single(self, particle_obj):
"""
Adding a single Particle to the collection - either as a 'Particle; object in parcels itself, or
via its ParticleAccessor.
"""
super().add_single(particle_obj)
raise NotImplementedError
def add_same(self, same_class):
"""
Adds another, equi-structured ParticleCollection to this collection. This is done by concatenating
both collections. The fact that they are of the same ParticleCollection's derivative simplifies
parsing and concatenation.
"""
super().add_same(same_class)
if same_class.ncount == 0:
return
if self._ncount == 0:
self._data = same_class._data
self._ncount = same_class.ncount
return
# Determine order of concatenation and update the sorted flag
if self._sorted and same_class._sorted \
and self._data['id'][0] > same_class._data['id'][-1]:
for d in self._data:
self._data[d] = np.concatenate((same_class._data[d], self._data[d]))
self._ncount += same_class.ncount
else:
if not (same_class._sorted
and self._data['id'][-1] < same_class._data['id'][0]):
self._sorted = False
for d in self._data:
self._data[d] = np.concatenate((self._data[d], same_class._data[d]))
self._ncount += same_class.ncount
def __iadd__(self, same_class):
"""
Performs an incremental addition of the equi-structured ParticleCollections, such to allow
a += b,
with 'a' and 'b' begin the two equi-structured objects (or: 'b' being and individual object).
This operation is equal to an in-place addition of (an) element(s).
"""
self.add_same(same_class)
return self
def insert(self, obj, index=None):
"""
This function allows to 'insert' a Particle (as object or via its accessor) into this collection. This method
needs to be specified to each collection individually. Some collections (e.g. unordered list) allow to define
the index where the object is to be inserted. Some collections can optionally insert an object at a specific
position - at a significant speed- and memory malus cost (e.g. vectors, arrays, dense matrices). Some
collections that manage a specified indexing order internally (e.g. ordered lists, sets, trees), and thus
have no use for an 'index' parameter. For those collections with an internally-enforced order, the function
mapping equates to:
insert(obj) -> add_single(obj)
"""
raise NotImplementedError
def push(self, particle_obj):
"""
This function pushes a Particle (as object or via its accessor) to the end of a collection ('end' definition
depends on the specific collection itself). For collections with an inherent indexing order (e.g. ordered lists,
sets, trees), the function just includes the object at its pre-defined position (i.e. not necessarily at the
end). For the collections, the function mapping equates to:
int32 push(particle_obj) -> add_single(particle_obj); return -1;
This function further returns the index, at which position the Particle has been inserted. By definition,
the index is positive, thus: a return of '-1' indicates push failure, NOT the last position in the collection.
Furthermore, collections that do not work on an index-preserving manner also return '-1'.
"""
raise NotImplementedError
def append(self, particle_obj):
"""
This function appends a Particle (as object or via its accessor) to the end of a collection ('end' definition
depends on the specific collection itself). For collections with an inherent indexing order (e.g. ordered lists,
sets, trees), the function just includes the object at its pre-defined position (i.e. not necessarily at the
end). For the collections, the function mapping equates to:
append(particle_obj) -> add_single(particle_obj)
The function - in contrast to 'push' - does not return the index of the inserted object.
"""
raise NotImplementedError
def __delitem__(self, key):
"""
This is the high-performance method to delete a specific object from this collection.
As the most-performant way depends on the specific collection in question, the function is abstract.
Highlight for the specific implementation:
The 'key' parameter should still be evaluated for being a single or a multi-entry delete, and needs to check
that it received the correct type of 'indexing' argument (i.e. index, id or iterator).
"""
self.delete_by_index(key)
def delete_by_index(self, index):
"""
This method deletes a particle from the the collection based on its index. It does not return the deleted item.
Semantically, the function appears similar to the 'remove' operation. That said, the function in OceanParcels -
instead of directly deleting the particle - just raises the 'deleted' status flag for the indexed particle.
In result, the particle still remains in the collection. The functional interpretation of the 'deleted' status
is handled by 'recovery' dictionary during simulation execution.
"""
super().delete_by_index(index)
self._data['state'][index] = OperationCode.Delete
def delete_by_ID(self, id):
"""
This method deletes a particle from the the collection based on its ID. It does not return the deleted item.
Semantically, the function appears similar to the 'remove' operation. That said, the function in OceanParcels -
instead of directly deleting the particle - just raises the 'deleted' status flag for the indexed particle.
In result, the particle still remains in the collection. The functional interpretation of the 'deleted' status
is handled by 'recovery' dictionary during simulation execution.
"""
super().delete_by_ID(id)
# Use binary search if the collection is sorted, linear search otherwise
index = -1
if self._sorted:
index = bisect_left(self._data['id'], id)
if index == len(self._data['id']) or \
self._data['id'][index] != id:
raise ValueError("Trying to delete a particle with a non-existing ID: %s." % id)
else:
index = np.where(self._data['id'] == id)[0][0]
self.delete_by_index(index)
def remove_single_by_index(self, index):
"""
This function removes a (particle) object from the collection based on its index within the collection. For
collections that are not based on random access (e.g. ordered lists, sets, trees), this function involves a
translation of the index into the specific object reference in the collection - or (if unavoidable) the
translation of the collection from a none-indexable, none-random-access structure into an indexable structure,
and then perform the removal.
In cases where a removal-by-index would result in a performance malus, it is highly-advisable to use a different
removal functions, e.g. remove-by-object or remove-by-ID.
"""
super().remove_single_by_index(index)
for d in self._data:
self._data[d] = np.delete(self._data[d], index, axis=0)
self._ncount -= 1
def remove_single_by_object(self, particle_obj):
"""
This function removes a (particle) object from the collection based on its actual object. For collections that
are random-access and based on indices (e.g. unordered list, vectors, arrays and dense matrices), this function
would involves a parsing of the whole list and translation of the object into an index in the collection to
perform the removal - which results in a significant performance malus.
In cases where a removal-by-object would result in a performance malus, it is highly-advisable to use a different
removal functions, e.g. remove-by-index or remove-by-ID.
"""
super().remove_single_by_object(particle_obj)
# We cannot look for the object directly, so we will look for one of
# its properties that has the nice property of being stored in an
# ordered list
self.remove_single_by_ID(particle_obj.id)
def remove_single_by_ID(self, id):
"""
This function removes a (particle) object from the collection based on the object's ID. For some collections,
this operation may involve a parsing of the whole list and translation of the object's ID into an index or an
object reference in the collection in order to perform the removal - which results in a significant performance
malus.
In cases where a removal-by-ID would result in a performance malus, it is highly-advisable to use a different
removal functions, e.g. remove-by-object or remove-by-index.
"""
super().remove_single_by_ID(id)
# Use binary search if the collection is sorted, linear search otherwise
index = -1
if self._sorted:
index = bisect_left(self._data['id'], id)
if index == len(self._data['id']) or \
self._data['id'][index] != id:
raise ValueError("Trying to remove a particle with a non-existing ID: %s." % id)
else:
index = np.where(self._data['id'] == id)[0][0]
self.remove_single_by_index(index)
def remove_same(self, same_class):
"""
This function removes particles from this collection that are themselves stored in another object of an equi-
structured ParticleCollection. As the structures of both collections are the same, a more efficient M-in-N
removal can be applied without an in-between reformatting.
"""
super().remove_same(same_class)
raise NotImplementedError
def remove_collection(self, pcollection):
"""
This function removes particles from this collection that are themselves stored in a ParticleCollection, which
is differently structured than this one. Tht means the removal first requires the removal-collection to be re-
formatted in an intermediary format, before executing the removal.
That said, this method should still be at least as efficient as a removal via common Python collections (i.e.
lists, dicts, numpy's nD arrays & dense arrays). Despite this, due to the reformatting, in some cases it may
be more efficient to remove items then rather by IDs oder indices.
"""
super().remove_collection(pcollection)
raise NotImplementedError
def remove_multi_by_PyCollection_Particles(self, pycollectionp):
"""
This function removes particles from this collection, which are themselves in common Python collections, such as
lists, dicts and numpy structures. In order to perform the removal, we can either directly remove the referred
Particle instances (for internally-ordered collections, e.g. ordered lists, sets, trees) or we may need to parse
each instance for its index (for random-access structures), which results in a considerable performance malus.
For collections where removal-by-object incurs a performance malus, it is advisable to multi-remove particles
by indices or IDs.
"""
super().remove_multi_by_PyCollection_Particles(pycollectionp)
raise NotImplementedError
def remove_multi_by_indices(self, indices):
"""
This function removes particles from this collection based on their indices. This works best for random-access
collections (e.g. numpy's ndarrays, dense matrices and dense arrays), whereas internally ordered collections
shall rather use a removal-via-object-reference strategy.
"""
super().remove_multi_by_indices(indices)
if type(indices) is dict:
indices = list(indices.values())
for d in self._data:
self._data[d] = np.delete(self._data[d], indices, axis=0)
self._ncount -= len(indices)
def remove_multi_by_IDs(self, ids):
"""
This function removes particles from this collection based on their IDs. For collections where this removal
strategy would require a collection transformation or by-ID parsing, it is advisable to rather apply a removal-
by-objects or removal-by-indices scheme.
"""
super().remove_multi_by_IDs(ids)
if type(ids) is dict:
ids = list(ids.values())
if len(ids) == 0:
return
# Use binary search if the collection is sorted, linear search otherwise
indices = np.empty(len(ids), dtype=np.int32)
if self._sorted:
# This is efficient if len(ids) << self.len
sorted_ids = np.sort(np.array(ids))
indices = self._recursive_ID_lookup(0, len(self._data['id']), sorted_ids)
else:
indices = np.where(np.in1d(self._data['id'], ids))[0]
self.remove_multi_by_indices(indices)
def __isub__(self, other):
"""
This method performs an incremental removal of the equi-structured ParticleCollections, such to allow
a -= b,
with 'a' and 'b' begin the two equi-structured objects (or: 'b' being and individual object).
This operation is equal to an in-place removal of (an) element(s).
"""
if other is None:
return
if type(other) is type(self):
self.remove_same(other)
elif (isinstance(other, BaseParticleAccessor)
or isinstance(other, ScipyParticle)):
self.remove_single_by_object(other)
else:
raise TypeError("Trying to do an incremental removal of an element of type %s, which is not supported." % type(other))
return self
def pop_single_by_index(self, index):
"""
Searches for Particle at index 'index', removes that Particle from the Collection and returns that Particle (or: ParticleAccessor).
If index is None, return last item (-1);
If index < 0: return from 'end' of collection.
If index is out of bounds, throws and OutOfRangeException.
If Particle cannot be retrieved, returns None.
"""
super().pop_single_by_index(index)
raise NotImplementedError
def pop_single_by_ID(self, id):
"""
Searches for Particle with ID 'id', removes that Particle from the Collection and returns that Particle (or: ParticleAccessor).
If Particle cannot be retrieved (e.g. because the ID is not available), returns None.
"""
super().pop_single_by_ID(id)
raise NotImplementedError
def pop_multi_by_indices(self, indices):
"""
Searches for Particles with the indices registered in 'indices', removes the Particles from the Collection and returns the Particles (or: their ParticleAccessors).
If indices is None -> Particle cannot be retrieved -> Assert-Error and return None
If index is None, return last item (-1);
If index < 0: return from 'end' of collection.
If index in 'indices' is out of bounds, throws and OutOfRangeException.
If Particles cannot be retrieved, returns None.
"""
super().pop_multi_by_indices(indices)
raise NotImplementedError
def pop_multi_by_IDs(self, ids):
"""
Searches for Particles with the IDs registered in 'ids', removes the Particles from the Collection and returns the Particles (or: their ParticleAccessors).
If Particles cannot be retrieved (e.g. because the IDs are not available), returns None.
"""
super().pop_multi_by_IDs(ids)
raise NotImplementedError
def _clear_deleted_(self):
"""
This (protected) function physically removes particles from the collection whose status is set to 'DELETE'.
It is the logical finalisation method of physically deleting particles that have been marked for deletion and
that have not otherwise been recovered.
This methods in heavily dependent on the actual collection type and should be implemented very specific
to the actual data structure, to remove objects 'the fastest way possible'.
"""
raise NotImplementedError
def merge(self, same_class=None):
"""
This function merge two strictly equally-structured ParticleCollections into one. This can be, for example,
quite handy to merge two particle subsets that - due to continuous removal - become too small to be effective.
On the other hand, this function can also internally merge individual particles that are tagged by status as
being 'merged' (see the particle status for information on that).
In order to distinguish both use cases, we can evaluate the 'same_class' parameter. In cases where this is
'None', the merge operation semantically refers to an internal merge of individual particles - otherwise,
it performs a 2-collection merge.
Comment: the function can be simplified later by pre-evaluating the function parameter and then reference
the individual, specific functions for internal- or external merge.
The function shall return the merged ParticleCollection.
"""
raise NotImplementedError
def split(self, indices=None):
"""
This function splits this collection into two disect equi-structured collections. The reason for it can, for
example, be that the set exceeds a pre-defined maximum number of elements, which for performance reasons
mandates a split.
On the other hand, this function can also internally split individual particles that are tagged byt status as
to be 'split' (see the particle status for information on that).
In order to distinguish both use cases, we can evaluate the 'indices' parameter. In cases where this is
'None', the split operation semantically refers to an internal split of individual particles - otherwise,
it performs a collection-split.
Comment: the function can be simplified later by pre-evaluating the function parameter and then reference
the individual, specific functions for element- or collection split.
The function shall return the newly created or extended Particle collection, i.e. either the collection that
results from a collection split or this very collection, containing the newly-split particles.
"""
raise NotImplementedError
def __sizeof__(self):
"""
This function returns the size in actual bytes required in memory to hold the collection. Ideally and simply,
the size is computed as follows:
sizeof(self) = len(self) * sizeof(pclass)
"""
raise NotImplementedError
def clear(self):
"""
This function physically removes all elements of the collection, yielding an empty collection as result of the
operation.
"""
raise NotImplementedError
def cstruct(self):
"""
'cstruct' returns the ctypes mapping of the particle data. This depends on the specific structure in question.
"""
class CParticles(Structure):
_fields_ = [(v.name, POINTER(np.ctypeslib.as_ctypes_type(v.dtype))) for v in self._ptype.variables]
def flatten_dense_data_array(vname):
data_flat = self._data[vname].view()
data_flat.shape = -1
return np.ctypeslib.as_ctypes(data_flat)
cdata = [flatten_dense_data_array(v.name) for v in self._ptype.variables]
cstruct = CParticles(*cdata)
return cstruct
def toDictionary(self, pfile, time, deleted_only=False):
"""
Convert all Particle data from one time step to a python dictionary.
:param pfile: ParticleFile object requesting the conversion
:param time: Time at which to write ParticleSet
:param deleted_only: Flag to write only the deleted Particles
returns two dictionaries: one for all variables to be written each outputdt,
and one for all variables to be written once
This function depends on the specific collection in question and thus needs to be specified in specific
derivative classes.
"""
data_dict = {}
data_dict_once = {}
time = time.total_seconds() if isinstance(time, delta) else time
indices_to_write = []
if pfile.lasttime_written != time and \
(pfile.write_ondelete is False or deleted_only is not False):
if self._data['id'].size == 0:
logger.warning("ParticleSet is empty on writing as array at time %g" % time)
else:
if deleted_only is not False:
if type(deleted_only) not in [list, np.ndarray] and deleted_only in [True, 1]:
indices_to_write = np.where(np.isin(self._data['state'],
[OperationCode.Delete]))[0]
elif type(deleted_only) in [list, np.ndarray]:
indices_to_write = deleted_only
else:
indices_to_write = _to_write_particles(self._data, time)
if np.any(indices_to_write):
for var in pfile.var_names:
data_dict[var] = self._data[var][indices_to_write]
pset_errs = ((self._data['state'][indices_to_write] != OperationCode.Delete) & np.greater(np.abs(time - self._data['time'][indices_to_write]), 1e-3, where=np.isfinite(self._data['time'][indices_to_write])))
if np.count_nonzero(pset_errs) > 0:
logger.warning_once('time argument in pfile.write() is {}, but particles have time {}'.format(time, self._data['time'][pset_errs]))
if len(pfile.var_names_once) > 0:
first_write = (_to_write_particles(self._data, time) & _is_particle_started_yet(self._data, time) & np.isin(self._data['id'], pfile.written_once, invert=True))
if np.any(first_write):
data_dict_once['id'] = np.array(self._data['id'][first_write]).astype(dtype=np.int64)
for var in pfile.var_names_once:
data_dict_once[var] = self._data[var][first_write]
pfile.written_once.extend(np.array(self._data['id'][first_write]).astype(dtype=np.int64).tolist())
if deleted_only is False:
pfile.lasttime_written = time
return data_dict, data_dict_once
def toArray(self):
"""
This function converts (or: transforms; reformats; translates) this collection into an array-like structure
(e.g. Python list or numpy nD array) that can be addressed by index. In the common case of 'no ID recovery',
the global ID and the index match exactly.
While this function may be very convenient for may users, it is STRONGLY DISADVISED to use the function to
often, and the performance- and memory overhead malus may be exceed any speed-up one could get from optimised
data structures - in fact, for large collections with an implicit-order structure (i.e. ordered lists, sets,
trees, etc.), this may be 'the most constly' function in any kind of simulation.
It can be - though - useful at the final stage of a simulation to dump the results to disk.
"""
raise NotImplementedError
def set_variable_write_status(self, var, write_status):
"""
Method to set the write status of a Variable
:param var: Name of the variable (string)
:param status: Write status of the variable (True, False or 'once')
"""
var_changed = False
for v in self._ptype.variables:
if v.name == var:
v.to_write = write_status
var_changed = True
if not var_changed:
raise SyntaxError('Could not change the write status of %s, because it is not a Variable name' % var)
class ParticleAccessorSOA(BaseParticleAccessor):
"""Wrapper that provides access to particle data in the collection,
as if interacting with the particle itself.
:param pcoll: ParticleCollection that the represented particle
belongs to.
:param index: The index at which the data for the represented
particle is stored in the corresponding data arrays
of the ParticleCollecion.
"""
_index = 0
_next_dt = None
def __init__(self, pcoll, index):
"""Initializes the ParticleAccessor to provide access to one
specific particle.
"""
super(ParticleAccessorSOA, self).__init__(pcoll)
self._index = index
self._next_dt = None
def __getattr__(self, name):
"""Get the value of an attribute of the particle.
:param name: Name of the requested particle attribute.
:return: The value of the particle attribute in the underlying
collection data array.
"""
if name in BaseParticleAccessor.__dict__.keys():
result = super(ParticleAccessorSOA, self).__getattr__(name)
elif name in type(self).__dict__.keys():
result = object.__getattribute__(self, name)
else:
result = self._pcoll.data[name][self._index]
return result
def __setattr__(self, name, value):
"""Set the value of an attribute of the particle.
:param name: Name of the particle attribute.
:param value: Value that will be assigned to the particle
attribute in the underlying collection data array.
"""
if name in BaseParticleAccessor.__dict__.keys():
super(ParticleAccessorSOA, self).__setattr__(name, value)
elif name in type(self).__dict__.keys():
object.__setattr__(self, name, value)
else:
self._pcoll.data[name][self._index] = value
def getPType(self):
return self._pcoll.ptype
def update_next_dt(self, next_dt=None):
if next_dt is None:
if self._next_dt is not None:
self._pcoll._data['dt'][self._index] = self._next_dt
self._next_dt = None
else:
self._next_dt = next_dt
def __repr__(self):
time_string = 'not_yet_set' if self.time is None or np.isnan(self.time) else "{:f}".format(self.time)
str = "P[%d](lon=%f, lat=%f, depth=%f, " % (self.id, self.lon, self.lat, self.depth)
for var in self._pcoll.ptype.variables:
if var.to_write is not False and var.name not in ['id', 'lon', 'lat', 'depth', 'time']:
str += "%s=%f, " % (var.name, getattr(self, var.name))
return str + "time=%s)" % time_string
class ParticleCollectionIterableSOA(BaseParticleCollectionIterable):
def __init__(self, pcoll, reverse=False, subset=None):
super(ParticleCollectionIterableSOA, self).__init__(pcoll, reverse, subset)
def __iter__(self):
return ParticleCollectionIteratorSOA(pcoll=self._pcoll_immutable, reverse=self._reverse, subset=self._subset)
def __len__(self):
"""Implementation needed for particle-particle interaction"""
return len(self._subset)
def __getitem__(self, items):
"""Implementation needed for particle-particle interaction"""
return ParticleAccessorSOA(self._pcoll_immutable, self._subset[items])
class ParticleCollectionIteratorSOA(BaseParticleCollectionIterator):
"""Iterator for looping over the particles in the ParticleCollection.
:param pcoll: ParticleCollection that stores the particles.
:param reverse: Flag to indicate reverse iteration (i.e. starting at
the largest index, instead of the smallest).
:param subset: Subset of indices to iterate over, this allows the
creation of an iterator that represents part of the
collection.
"""
def __init__(self, pcoll, reverse=False, subset=None):
if subset is not None:
if len(subset) > 0 and type(subset[0]) not in [int, np.int32, np.intp]:
raise TypeError("Iteration over a subset of particles in the"
" particleset requires a list or numpy array"
" of indices (of type int or np.int32).")
if reverse:
self._indices = subset.reverse()
else:
self._indices = subset
self.max_len = len(subset)
else:
self.max_len = len(pcoll)
if reverse:
self._indices = range(self.max_len - 1, -1, -1)
else:
self._indices = range(self.max_len)
self._reverse = reverse
self._pcoll = pcoll
self._index = 0
self._head = None
self._tail = None
if len(self._indices) > 0:
self._head = ParticleAccessorSOA(pcoll, self._indices[0])
self._tail = ParticleAccessorSOA(pcoll,
self._indices[self.max_len - 1])
self.p = self._head
def __next__(self):
"""Returns a ParticleAccessor for the next particle in the
ParticleSet.
"""
if self._index < self.max_len:
self.p = ParticleAccessorSOA(self._pcoll,
self._indices[self._index])
self._index += 1
return self.p
raise StopIteration
@property
def current(self):
return self.p
def __repr__(self):
dir_str = 'Backward' if self._reverse else 'Forward'
return "%s iteration at index %s of %s." % (dir_str, self._index, self.max_len)
|
demo/MixedPoisson.py | jaisw7/shenfun | 138 | 12628542 | <filename>demo/MixedPoisson.py
r"""Solve Poisson's equation using a mixed formulation
The Poisson equation is
.. math::
\nabla^2 u &= f \\
u(x, y=\pm 1) &= 0 \\
u(x=2\pi, y) &= u(x=0, y)
We solve using the mixed formulation
.. math::
g - \nabla(u) &= 0 \\
\nabla \cdot g &= f \\
u(x, y=\pm 1) &= 0 \\
u(x=2\pi, y) &= u(x=0, y) \\
g(x=2\pi, y) &= g(x=0, y)
We use a Tensorproductspace with Fourier expansions in the x-direction and
a composite Chebyshev basis in the y-direction. The equations are solved
coupled and implicit.
"""
import os
import sys
import numpy as np
from sympy import symbols, sin, cos
from shenfun import *
x, y = symbols("x,y", real=True)
family = sys.argv[-1].lower()
assert len(sys.argv) == 4, "Call with three command-line arguments: N[0], N[1] and family (Chebyshev/Legendre)"
assert family in ('legendre', 'chebyshev')
assert isinstance(int(sys.argv[-2]), int)
assert isinstance(int(sys.argv[-3]), int)
# Create a manufactured solution for verification
#ue = (sin(2*x)*cos(3*y))*(1-x**2)
ue = (sin(4*x)*cos(5*y))*(1-y**2)
dux = ue.diff(x, 1)
duy = ue.diff(y, 1)
fe = ue.diff(x, 2) + ue.diff(y, 2)
N = (int(sys.argv[-3]), int(sys.argv[-2]))
K0 = FunctionSpace(N[0], 'Fourier', dtype='d')
SD = FunctionSpace(N[1], family, bc=(0, 0))
ST = FunctionSpace(N[1], family)
TD = TensorProductSpace(comm, (K0, SD), axes=(1, 0))
TT = TensorProductSpace(comm, (K0, ST), axes=(1, 0))
VT = VectorSpace(TT)
Q = CompositeSpace([VT, TD])
gu = TrialFunction(Q)
pq = TestFunction(Q)
g, u = gu
p, q = pq
A00 = inner(p, g)
if family == 'legendre':
A01 = inner(div(p), u)
else:
A01 = inner(p, -grad(u))
A10 = inner(q, div(g))
# Get f and g on quad points
vfj = Array(Q, buffer=(0, 0, fe))
vj, fj = vfj
vf_hat = Function(Q)
v_hat, f_hat = vf_hat
f_hat = inner(q, fj, output_array=f_hat)
M = BlockMatrix(A00+A01+A10)
gu_hat = M.solve(vf_hat)
gu = gu_hat.backward()
g_, u_ = gu
uj = Array(TD, buffer=ue)
duxj = Array(TT, buffer=dux)
duyj = Array(TT, buffer=duy)
error = [comm.reduce(np.linalg.norm(uj-u_)),
comm.reduce(np.linalg.norm(duxj-g_[0])),
comm.reduce(np.linalg.norm(duyj-g_[1]))]
if comm.Get_rank() == 0:
print('Error u dudx dudy')
print(' %2.4e %2.4e %2.4e' %(error[0], error[1], error[2]))
assert np.all(abs(np.array(error)) < 1e-8), error
if 'pytest' not in os.environ:
import matplotlib.pyplot as plt
plt.figure()
X = TD.local_mesh(True)
plt.contourf(X[0], X[1], u_)
plt.figure()
plt.quiver(X[1], X[0], g_[1], g_[0])
plt.figure()
plt.spy(M.diags((0, 0)).toarray()) # The matrix for given Fourier wavenumber
plt.show()
|
docker/test/integration/minifi/processors/GenerateFlowFile.py | dtrodrigues/nifi-minifi-cpp | 113 | 12628601 | from ..core.Processor import Processor
class GenerateFlowFile(Processor):
def __init__(self, schedule={'scheduling period': '2 sec'}):
super(GenerateFlowFile, self).__init__(
'GenerateFlowFile',
schedule=schedule,
auto_terminate=['success'])
|
supervisor/superlance/timeoutconn.py | jzmq/minos | 365 | 12628614 | <filename>supervisor/superlance/timeoutconn.py
import httplib
import socket
class TimeoutHTTPConnection(httplib.HTTPConnection):
"""A customised HTTPConnection allowing a per-connection
timeout, specified at construction."""
timeout = None
def connect(self):
"""Override HTTPConnection.connect to connect to
host/port specified in __init__."""
msg = "getaddrinfo returns an empty list"
for res in socket.getaddrinfo(self.host, self.port,
0, socket.SOCK_STREAM):
af, socktype, proto, canonname, sa = res
try:
self.sock = socket.socket(af, socktype, proto)
if self.timeout: # this is the new bit
self.sock.settimeout(self.timeout)
self.sock.connect(sa)
except socket.error, msg:
if self.sock:
self.sock.close()
self.sock = None
continue
break
if not self.sock:
raise socket.error, msg
class TimeoutHTTPSConnection(httplib.HTTPSConnection):
timeout = None
def connect(self):
"Connect to a host on a given (SSL) port."
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if self.timeout:
self.sock.settimeout(self.timeout)
sock.connect((self.host, self.port))
ssl = socket.ssl(sock, self.key_file, self.cert_file)
self.sock = httplib.FakeSocket(sock, ssl)
|
platypush/plugins/clipboard.py | RichardChiang/platypush | 228 | 12628617 | from platypush.plugins import Plugin, action
class ClipboardPlugin(Plugin):
"""
Plugin to programmatically copy strings to your system clipboard
and get the current clipboard content.
Requires:
* **pyperclip** (``pip install pyperclip``)
"""
@action
def copy(self, text):
"""
Copies a text to the OS clipboard
:param text: Text to copy
:type text: str
"""
import pyperclip
pyperclip.copy(text)
@action
def paste(self):
"""
Get the current content of the clipboard
"""
import pyperclip
return pyperclip.paste()
# vim:sw=4:ts=4:et:
|
mindsdb/api/mongo/responders/list_indexes.py | yarenty/mindsdb | 261 | 12628626 | from mindsdb.api.mongo.classes import Responder
import mindsdb.api.mongo.functions as helpers
class Responce(Responder):
when = {'listIndexes': helpers.is_true}
def result(self, query, request_env, mindsdb_env, session):
return {
'cursor': [{
'v': 2,
'key': {
'_id': 1
},
'name': '_id_',
'ns': f"{query['$db']}.{query['listIndexes']}"
}],
'ok': 1,
}
responder = Responce()
|
util/update_lints.py | fkohlgrueber/rust-clippy-pattern | 1,686 | 12628629 | <filename>util/update_lints.py
#!/usr/bin/env python
import sys
def main():
print('Error: Please use `util/dev` to update lints')
return 1
if __name__ == '__main__':
sys.exit(main())
|
evaluate/gan.py | HappyBelief/ContraD | 168 | 12628673 | <gh_stars>100-1000
import os
import numpy as np
import torch
from torchvision.utils import make_grid
from evaluate import BaseEvaluator
from third_party.fid.inception import InceptionV3
from third_party.fid.fid_score import fid_score
from third_party.fid.fid_score import precompute_stats
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
class ImageGrid(BaseEvaluator):
def __init__(self, volatile=False):
self._images = []
self._steps = []
self.volatile = volatile
def update(self, step, images):
img_grid = make_grid(images[:64].cpu().data)
img_grid = np.transpose(img_grid.numpy(), (1, 2, 0))
self._images.append(img_grid)
self._steps.append(step)
if self.volatile:
self._images = self._images[-1:]
self._steps = self._steps[-1:]
return img_grid
@property
def value(self):
if len(self._images) > 0:
return self._images[-1]
else:
raise ValueError()
def summary(self):
return self._images
def reset(self):
self._images = []
self._steps = []
class FixedSampleGeneration(BaseEvaluator):
def __init__(self, G, volatile=False):
self._G = G
self._latent = G.sample_latent(16)
self._images = []
self._steps = []
self.volatile = volatile
def update(self, step):
with torch.no_grad():
img_grid = make_grid(self._G(self._latent).cpu().data, nrow=4)
img_grid = np.transpose(img_grid.numpy(), (1, 2, 0))
self._images.append(img_grid)
self._steps.append(step)
if self.volatile:
self._images = self._images[-1:]
self._steps = self._steps[-1:]
return img_grid
@property
def value(self):
if len(self._images) > 0:
return self._images[-1]
else:
raise ValueError()
def summary(self):
return self._images
def reset(self):
self._latent = self._G.sample_latent(64)
self._images = []
self._steps = []
class FIDScore(BaseEvaluator):
def __init__(self, dataset='cifar10', size=10000, n_avg=3):
assert n_avg > 0
self.dataset = dataset
self.size = size
self.n_avg = n_avg
self._precomputed_path = f'third_party/fid/{dataset}_stats.npz'
self._fid_model = InceptionV3().to(device)
self._history = []
self._best = []
self._steps = []
self.is_best = False
if not os.path.exists(self._precomputed_path):
print("FIDScore: No pre-computed stats found, computing a new one...")
precompute_stats(dataset, self._precomputed_path, model=self._fid_model)
def update(self, step, G):
scores = []
for _ in range(self.n_avg):
score = fid_score(self._precomputed_path, G, size=self.size,
model=self._fid_model, batch_size=50)
scores.append(score)
score_avg = np.mean(scores)
if len(self._best) == 0:
score_best = score_avg
self.is_best = True
else:
self.is_best = (score_avg < self._best[-1])
score_best = min(self._best[-1], score_avg)
self._history.append(scores)
self._steps.append(step)
self._best.append(score_best)
return score_avg
@property
def value(self):
if len(self._history) > 0:
return np.mean(self._history[-1])
else:
raise ValueError()
@property
def best(self):
if len(self._best) > 0:
return self._best[-1]
else:
raise ValueError()
def summary(self):
return self._history
def reset(self):
self._history = []
self._steps = []
self._best = []
def save(self, filename):
if len(self._history) == 0:
return
steps = np.array(self._steps)
history = np.array(self._history)
best = np.array(self._best)
history = np.c_[steps, history, history.mean(1), history.std(1), best]
header = 'step,'
header += ','.join([f'trial_{i}' for i in range(self.n_avg)])
header += ',mean,std,best'
np.savetxt(filename, history, fmt='%.3f', delimiter=",",
header=header, comments='') |
example/documents.py | closeio/flask-mongorest | 268 | 12628687 | <reponame>closeio/flask-mongorest
from mongoengine import *
class DateTime(Document):
datetime = DateTimeField()
class Language(Document):
name = StringField()
class Person(Document):
name = StringField()
languages = ListField(ReferenceField(Language))
class User(Document):
email = EmailField(unique=True, required=True)
first_name = StringField(max_length=50)
last_name = StringField(max_length=50)
emails = ListField(EmailField())
datetime = DateTimeField()
datetime_local = DateTimeField()
balance = IntField() # in cents
class Content(EmbeddedDocument):
text = StringField()
lang = StringField(max_length=3)
class Post(Document):
title = StringField(max_length=120, required=True)
description = StringField(max_length=120, required=False)
author = ReferenceField(User)
editor = ReferenceField(User)
tags = ListField(StringField(max_length=30))
try:
user_lists = ListField(SafeReferenceField(User))
except NameError:
user_lists = ListField(ReferenceField(User))
sections = ListField(EmbeddedDocumentField(Content))
content = EmbeddedDocumentField(Content)
is_published = BooleanField()
def primary_user(self):
return self.user_lists[0] if self.user_lists else None
|
public/pypyjs/modules/pandas.py | inkandswitch/livebook | 195 | 12628710 | <reponame>inkandswitch/livebook
import json
from copy import copy
from math import pow,sqrt
def do_math(func,data):
if len(data) > 0 and (type(data[0]) == int or type(data[0]) == float):
return func(data)
else:
return None
def mean(nums):
return sum(nums)/len(nums)
class IlocIndexer(object):
def __init__(self,df):
self._df = df
def __getitem__(self,i):
d = self._df
if type(i) == slice:
return DataFrame(d,idx=d._idx[i])
if type(i) == tuple:
return DataFrame(d,idx=d._idx[i[0]])
raise IndexError("Iloc Indexer Unsupported Input")
class Record(object):
def __init__(self, df, i):
self._df = df
self._i = i
def __getattr__(self,attr):
return self._df[attr][self._i]
class Series(object):
def __deepcopy__(self,memo):
return Series(self.data, sort=self.sort, column=self.column, idx=copy(self.idx))
def __init__(self, data, column=None, sort=None, idx=None, name=None):
if type(data) == Series:
self.data = data.data
self.column = column or data.column
self.sort = sort or data.sort
self.idx = idx or data.idx
elif type(data) == DataFrame:
if (data._sort == None): raise IndexError("Cannot coerce DataFrame to a Series without an index")
self.data = data._data
self.column = data._sort
self.sort = None
self.idx = idx or data._idx
elif type(data) == dict:
self.data = data
self.sort = sort
self.column = column
self.idx = idx or range(0,len(data[column]))
elif idx == None:
self.column = column or name or "series"
self.data = { self.column: list(data) }
self.sort = None
self.idx = range(0,len(data))
else:
self.data = data
self.column = column
self.sort = sort
self.idx = idx
def __str__(self):
return "Series:\n" + str(self.data) + "\nCol:" + str(self.column) + "\nSort:" + str(self.sort);
def __getitem__(self,i):
if type(i) == slice:
return Series(self, idx=self.idx[i])
else:
return self.data[self.column][self.idx[i]]
def __and__(self,other):
return Series([ self[i] & other[i] for i in range(0,len(self))])
def __iter__(self):
for i in range(0, len(self)):
yield self[i]
def __len__(self):
return len(self.idx)
def __eq__(self,arg): return self.apply(lambda x: x == arg)
def __ne__(self,arg): return self.apply(lambda x: x <> arg)
def __le__(self,arg): return self.apply(lambda x: x <= arg)
def __lt__(self,arg): return self.apply(lambda x: x < arg)
def __ge__(self,arg): return self.apply(lambda x: x >= arg)
def __gt__(self,arg): return self.apply(lambda x: x > arg)
def hist(self,bins=10):
from matplotlib import pyplot
l = sorted(self.tolist())
_min = l[0]
_max = l[-1]
step = (_max - _min) / float(bins)
buckets = [ _min + step * i for i in range(0,bins+1) ]
hist = [0] * bins
last_b = 0
for val in l:
for b in range(last_b,bins):
if val <= buckets[b+1]:
hist[b] += 1
last_b = b
break
data = { "hist": hist, "buckets": buckets[0:-1] }
pyplot.bar(buckets, hist)
def isnumeric(self):
return type(self[0]) in [int,float,long]
def isnull(self):
return self.apply(lambda x: x == None)
def dropna(self):
new_idx=[ i for i in self.idx if self.data[self.column][i] != None ]
return Series(self,idx=new_idx)
def unique(self):
memo = set()
new_idx = []
c = self.data[self.column]
for i in self.idx:
if c[i] not in memo:
new_idx.append(i)
memo.add(c[i])
return Series(self, idx=new_idx)
def sum(self):
return sum(self.tolist())
def apply(self,func):
return Series({ self.column: [ func(d) for d in self ] }, self.column, None, range(0,len(self)))
def tolist(self):
c = self.data[self.column]
return [ c[i] for i in self.idx]
def to_plot_data(self):
return { "x": self.sort, "columns": [
[self.sort] + [ self.data[self.sort][i] for i in self.idx ],
[self.column] + [ self.data[self.column][i] for i in self.idx ]
], "original_type": "series", }
def to_plot_data_v2(self):
return {"x": self.sort, "column": self.column, "data": self.data, "list": self.tolist()}
def describe(self):
return self.to_frame().describe()
def head(self,n=5):
return Series(self, idx=self.idx[0:n])
def tail(self,n=5):
return Series(self, idx=self.idx[-n:])
def get_index(self):
return Series(self,column=self.sort,idx=self.idx)
def value_counts(self):
values = [self.data[self.column][i] for i in self.idx]
uniques = list(set(values))
counts = [ values.count(val) for val in uniques ]
new_body = { self.column: uniques, "count": counts }
new_idx = sorted(range(0,len(uniques)),key=lambda i: counts[i],reverse=True)
return Series(new_body, "count", self.column, new_idx)
def to_frame(self):
if self.sort == None:
return DataFrame(self,columns=[self.column])
else:
return DataFrame(self,columns=[self.sort, self.column])
def to_js(self):
d1 = [self.data[self.column][i] for i in self.idx]
if self.sort == None:
return { "head":[self.column], "body":{self.column:d1}, "length":len(self) }
else:
d2 = [self.data[self.sort][i] for i in self.idx]
return { "sort": self.sort, "head":[self.sort, self.column], "body":{self.column:d1,self.sort:d2}, "length":len(self) }
def resample(self,rule,**kwargs):
keys = []
bins = {}
how = "mean"
_how = mean
if "how" in kwargs:
how = kwargs["how"]
_how = len ## todo
for key,val in self.iteritems():
#print "Resample key=%s,val=%s,rule=%s"%(key,val,rule)
if rule == "A": key = key[:4] + "-01-01"
if rule == "AS": key = key[:4] + "-12-31"
if rule == "M": key = key[:7] + "-01"
if key in bins:
bins[key].append(val)
else:
keys.append(key)
bins[key] = [val]
new_body = { self.column: [], how: [] }
new_head = [ self.column, how ]
for k in keys:
new_body[self.column].append(k)
new_body[how].append(_how(bins[k]))
return Series(new_body, how, self.column, range(0,len(new_body[how])))
def iteritems(self):
return [ ( self.data[self.sort][i], self.data[self.column][i] ) for i in self.idx].__iter__()
class DataFrame(object):
def __deepcopy__(self,memo):
return DataFrame(data=self._data, columns=copy(self._columns), sort=copy(self._sort), idx=copy(self._idx))
def __init__(self, base=None, data=None, columns=None, sort=None, idx=None):
self.iloc = IlocIndexer(self)
if type(base) == Series:
self._data = base.data
self._columns = [base.sort, base.column] if base.sort else [base.column]
self._sort = base.sort
self._idx = base.idx
elif type(base) == dict:
self._data = base
self._columns = columns or base.keys()
self._sort = sort or None
self._idx = idx or range(0,len(self._data[self._columns[0]]))
elif type(base) == DataFrame:
self._data = data or base._data
self._columns = columns or base._columns
self._sort = sort or base._sort
self._idx = idx or base._idx
else:
self._data = data
self._columns = columns
self._sort = sort
self._idx = idx
pass
self.__postinit__()
@staticmethod
def from_data(data):
return DataFrame(data["body"],columns=data["head"])
@staticmethod
def from_dict(data):
return DataFrame(data)
@staticmethod
def from_csv(path,**kargs):
return read_csv(path)
def __str__(self):
return "DataFrame:\n" + str(self._data)
def __postinit__(self):
self.shape = (len(self),len(self._columns))
self.columns = self._columns
if (self._sort):
self.index = self[self._sort]
else:
self.index = Series(range(0,len(self)))
def __setitem__(self,key,val):
## FIXME - this mutates the structure
if len(val) != len(self):
raise TypeError("__setitem__ called with an assignment of the wrong length")
try:
val2 = list(val) ## TODO - make a reverse index?
remapped = len(self)*[None]
for i in range(0,len(self)):
remapped[self._idx[i]] = val2[i]
self._data[key] = remapped
self._columns.index(key)
except ValueError:
self._columns.append(key)
def __getitem__(self,i):
# if type(i) == str and i == "_data":
# raise ValueError("NOPE")
if (type(i) is str or type(i) is unicode):
return Series(self._data,i,self._sort,self._idx)
elif (type(i) is Series):
return DataFrame(self, idx=[ self._idx[n] for n in range(0,len(self)) if i[n] ])
elif (i < 0 or i >= len(self)):
raise IndexError("DataFrame index out of range")
else:
return tuple(map(lambda x: self._data[x][self._idx[i]], self._columns))
def __getattr__(self,attr):
return self[attr]
def __iter__(self):
for i in range(0, len(self)):
yield self[i]
def __len__(self):
return len(self._idx)
def __blank_body__(self): ## TODO - need a simpler one here
body = {}
for h in self.columns: body[h] = []
return body
def insert(self,loc,column,val): ## FIXME - this is the only function we have that mutates - could effect older objects
self._columns.insert(loc,column)
self._data[column] = [ val for i in range(0,len(self)) ]
self.shape = (len(self),len(self._columns))
def apply(self,func):
new_data = {}
for c in self._columns:
new_data[c] = [ func(d) for d in self._data[c] ]
return DataFrame(self, data=new_data)
def set_index(self,index):
return DataFrame(self, sort=index)
def dropna(self,**kwargs):
new_idx = [i for i in self._idx if all([self._data[c][i] != None for c in self.columns])]
if kwargs is not None:
if "inplace" in kwargs and kwargs["inplace"] == True:
self._idx = new_idx
return self
return DataFrame(self, idx=new_idx)
def sort_values(self,by,ascending=True):
new_idx = sorted(self._idx,key=lambda i: self._data[by][i],reverse=(not ascending))
return DataFrame(self, idx=new_idx)
def groupby(self,by):
return GroupBy(self,by)
def to_js(self):
body = {}
for c in self._columns:
body[c] = [self._data[c][i] for i in self._idx]
return { "head":self._columns, "body":body, "length":len(self._idx), "sort": self._sort }
def describe(self):
funcs = ["count","mean","std","min","25","50","75","max"]
data = { "_id": funcs }
columns = [ c for c in self.columns if self[c].isnumeric() ]
sort = "_id"
idx = range(0,len(funcs))
for c in columns:
d = sorted(self[c].dropna().tolist())
l = len(d)
mean = sum(d)/l
std = sqrt(sum([ pow(mean - val, 2) for val in d ])/(l-1))
_min = d[0]
_25 = d[l/4]
_50 = d[l/2]
_75 = d[l*3/4]
_max = d[l-1]
data[c] = [ l, mean, std, _min, _25, _50, _75, _max ]
return DataFrame(data, columns=["_id"] + columns, sort=sort, idx=idx)
def head(self, n=5):
return DataFrame(self, idx=self._idx[0:n])
def tail(self, n=5):
return DataFrame(self, idx=self._idx[-n:])
def record(self, i):
return Record(self,i)
def iterrows(self):
return [ (i, Record(self,i)) for i in range(0,len(self))].__iter__()
class GroupBy:
def __init__(self, data, by):
self.groups = {}
for i in range(0, len(data)):
v = data.body[by][i]
if not v in self.groups:
self.groups[v] = data.select(by,v)
def __iter__(self):
for k in self.groups:
yield (k,self.groups[k])
class Cache:
csv = {}
def read_csv(filename, header=None, names=None):
import js
# pandas defaults `header` to 0 (row to be treated as a header)
# if `names` is specified, however, we use that
if header is None and names is None:
header = 0
if header is None and names is not None:
header = names
key = str([filename,header,names])
if key in Cache.csv:
return DataFrame.from_data(Cache.csv[key])
Cache.csv[key] = json.loads(str(js.globals.parse_raw_data(filename,header,names)))
return DataFrame.from_data(Cache.csv[key])
|
solutions/problem_006.py | ksvr444/daily-coding-problem | 1,921 | 12628712 | <reponame>ksvr444/daily-coding-problem
class Node:
def __init__(self, data):
self.data = data
self.both = id(data)
def __repr__(self):
return str(self.data)
a = Node("a")
b = Node("b")
c = Node("c")
d = Node("d")
e = Node("e")
# id_map simulates object pointer values
id_map = dict()
id_map[id("a")] = a
id_map[id("b")] = b
id_map[id("c")] = c
id_map[id("d")] = d
id_map[id("e")] = e
class LinkedList:
def __init__(self, node):
self.head = node
self.tail = node
self.head.both = 0
self.tail.both = 0
def add(self, element):
self.tail.both ^= id(element.data)
element.both = id(self.tail.data)
self.tail = element
def get(self, index):
prev_node_address = 0
result_node = self.head
for i in range(index):
next_node_address = prev_node_address ^ result_node.both
prev_node_address = id(result_node.data)
result_node = id_map[next_node_address]
return result_node.data
llist = LinkedList(c)
llist.add(d)
llist.add(e)
llist.add(a)
assert llist.get(0) == "c"
assert llist.get(1) == "d"
assert llist.get(2) == "e"
assert llist.get(3) == "a"
|
server/backend/app/utils.py | jakuta-tech/TinyCheck | 2,054 | 12628746 | <filename>server/backend/app/utils.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import yaml
import sys
import os
from functools import reduce
def read_config(path):
"""
Read a value from the configuration
:return: value (it can be any type)
"""
dir = "/".join(sys.path[0].split("/")[:-2])
config = yaml.load(open(os.path.join(dir, "config.yaml"), "r"),
Loader=yaml.SafeLoader)
return reduce(dict.get, path, config)
def write_config(cat, key, value):
"""
Write a new value in the configuration
:return: bool, operation status
"""
try:
dir = "/".join(sys.path[0].split("/")[:-2])
config = yaml.load(open(os.path.join(dir, "config.yaml"),
"r"), Loader=yaml.SafeLoader)
config[cat][key] = value
with open(os.path.join(dir, "config.yaml"), "w") as yaml_file:
yaml_file.write(yaml.dump(config, default_flow_style=False))
return True
except:
return False
|
indra/sources/gnbr/processor.py | johnbachman/belpy | 136 | 12628747 | <filename>indra/sources/gnbr/processor.py
"""This module contains the processor for GNBR. There are several, each
corresponding to different kinds of interactions."""
import re
import itertools as it
from typing import List
from copy import deepcopy
import pandas as pd
from indra.statements import *
from indra.databases import mesh_client
from indra.ontology.bio import bio_ontology
from indra.ontology.standardize import get_standard_agent
gene_gene_stmt_mappings = {
'V+': Activation,
'E+': IncreaseAmount,
'Q': IncreaseAmount,
'H': Complex
}
chem_gene_stmt_mappings = {
'A+': Activation,
'A-': Inhibition,
'N': Inhibition,
'B': Complex,
'E-': DecreaseAmount
}
gene_disease_stmt_mappings = {
'Te': Inhibition,
'G': Activation
}
chem_disease_stmt_mappings = {
'T': Inhibition,
'C': Inhibition,
'Pr': Inhibition,
'Pa': Inhibition
}
cheby_pattern = re.compile(r'^CHEBI:(\d+)$')
mesh_pattern = re.compile(r'^MESH:([CD]\d+)$')
mesh_no_prefix_pattern = re.compile(r'^[CD]\d+$')
entrez_pattern = re.compile(r'^(\d+)$')
entrez_with_tax_pattern = re.compile(r'^(\d+)\(Tax:(\d+)\)$')
omim_pattern = re.compile(r'^OMIM:(\d+)$')
omim_no_prefix_pattern = re.compile(r'^(\d+)$')
class GnbrProcessor:
"""A processor for interactions in the GNBR dataset.
Parameters
----------
df1 :
Dataframe of dependency paths and themes.
df2 :
Dataframe of dependency paths and agents.
first_type :
The type of the first entity in the data frame.
second_type :
The type of the second entity in the data frame.
"""
def __init__(self, df1: pd.DataFrame, df2: pd.DataFrame,
first_type: str, second_type: str,
indicator_only: bool = True) -> None:
self.df1 = df1
self.df2 = df2
self.df2.columns = ['id', 'sentence_num', 'nm_1_form', 'nm_1_loc',
'nm_2_form', 'nm_2_loc', 'nm_1_raw', 'nm_2_raw',
'nm_1_dbid', 'nm_2_dbid', '1_type', '2_type',
'path', 'sentence']
self.df2['path'] = df2['path'].str.lower()
self.first_type = first_type
self.second_type = second_type
self.indicator_only = indicator_only
self.statements = []
def extract_stmts(self):
"""Extend the statements list with mappings."""
if self.first_type == 'gene' and self.second_type == 'gene':
statement_mappings = gene_gene_stmt_mappings
elif self.first_type == 'chemical' and self.second_type == 'gene':
statement_mappings = chem_gene_stmt_mappings
elif self.first_type == 'gene' and self.second_type == 'disease':
statement_mappings = gene_disease_stmt_mappings
else:
statement_mappings = chem_disease_stmt_mappings
for rel_type, stmt_type in statement_mappings.items():
constraint = (self.df1[rel_type] > 0)
if self.indicator_only:
constraint &= (self.df1['%s.ind' % rel_type] == 1)
df_part = self.df1[constraint]
self.statements.extend(self._extract_stmts_by_class(df_part,
stmt_type))
def _extract_stmts_by_class(self, df, stmt_class):
"""Make a given class of Statements from a subset of the dataframe.
Parameters
----------
df :
Filtered dataframe to one particular relationship theme.
stmt_class :
Statement type matched to the type of the filtered dataframe.
Yields
------
stmt :
Statements produced from the dataframes.
"""
df_joint = df.join(self.df2.set_index('path'), on='path')
for index, row in df_joint.iterrows():
if self.first_type == 'gene':
first_agents = get_std_gene(row['nm_1_raw'],
row['nm_1_dbid'])
else:
first_agents = get_std_chemical(row['nm_1_raw'],
row['nm_1_dbid'])
if self.second_type == 'gene':
second_agents = get_std_gene(row['nm_2_raw'],
row['nm_2_dbid'])
else:
second_agents = get_std_disease(row['nm_2_raw'],
row['nm_2_dbid'])
evidence = get_evidence(row)
for first_agent, second_agent in it.product(first_agents,
second_agents):
if stmt_class == Complex:
stmt = stmt_class([first_agent, second_agent],
evidence=deepcopy(evidence))
else:
stmt = stmt_class(first_agent, second_agent,
evidence=deepcopy(evidence))
yield stmt
def get_std_gene(raw_string: str, db_id: str) -> List[Agent]:
"""Standardize gene names.
Parameters
----------
raw_string :
Name of the agent in the GNBR dataset.
db_id :
Entrez identifier of the agent.
Returns
-------
:
A standardized Agent object.
"""
# If neither a name nor a DB ID is given, we return empty
if pd.isna(db_id) and pd.isna(raw_string):
return []
# We add TEXT to db_refs if there is a raw_string
db_refs = {'TEXT': raw_string} if not pd.isna(raw_string) else {}
# In this case we know that there is no db_id but we have raw_string that
# we can use as a name and we return with that agent
if pd.isna(db_id):
return [Agent(raw_string, db_refs=db_refs)]
# Otherwise we have a db_id that we can process
else:
agents = []
for single_db_id in db_id.split(';'):
single_db_refs = deepcopy(db_refs)
name = raw_string if not pd.isna(raw_string) else single_db_id
if entrez_pattern.match(single_db_id):
single_db_refs['EGID'] = single_db_id
else:
match = entrez_with_tax_pattern.match(single_db_id)
if not match:
raise ValueError('Unexpected gene identifier: %s'
% single_db_id)
single_db_refs['EGID'] = match.groups()[0]
agents.append(get_standard_agent(name, single_db_refs))
return agents
def get_std_chemical(raw_string: str, db_id: str) -> List[Agent]:
"""Standardize chemical names.
Parameters
----------
raw_string :
Name of the agent in the GNBR dataset.
db_id :
Entrez identifier of the agent.
Returns
-------
:
A standardized Agent object.
"""
# If neither a name nor a DB ID is given, we return empty
if pd.isna(db_id) and pd.isna(raw_string):
return []
# We add TEXT to db_refs if there is a raw_string
db_refs = {'TEXT': raw_string} if not pd.isna(raw_string) else {}
# In this case we know that there is no db_id but we have raw_string that
# we can use as a name and we return with that agent
if pd.isna(db_id):
return [Agent(raw_string, db_refs=db_refs)]
# Otherwise we have a db_id that we can process
else:
agents = []
for single_db_id in db_id.split('|'):
single_db_refs = deepcopy(db_refs)
name = raw_string if not pd.isna(raw_string) else single_db_id
if cheby_pattern.match(single_db_id):
single_db_refs['CHEBI'] = single_db_id
elif mesh_pattern.match(single_db_id):
mesh_id = single_db_id[5:]
# There are often non-existent MESH IDs here for some reason
# that can be filtered out with this technique
if not mesh_client.get_mesh_name(mesh_id, offline=True):
continue
single_db_refs['MESH'] = mesh_id
elif mesh_no_prefix_pattern.match(single_db_id):
mesh_id = single_db_id
# There are often non-existent MESH IDs here for some reason
# that can be filtered out with this technique
if not mesh_client.get_mesh_name(mesh_id, offline=True):
continue
single_db_refs['MESH'] = single_db_id
else:
raise ValueError('Unexpected chemical identifier: %s'
% single_db_id)
agents.append(get_standard_agent(name, single_db_refs))
return agents
def get_std_disease(raw_string: str, db_id: str) -> List[Agent]:
"""Standardize disease names.
Parameters
----------
raw_string :
Name of the agent in the GNBR dataset.
db_id :
Entrez identifier of the agent.
Returns
-------
:
A standardized Agent object.
"""
agents = []
db_refs = {'TEXT': raw_string} if not pd.isna(raw_string) else {}
name = raw_string if not pd.isna(raw_string) else db_id
if pd.isna(db_id):
pass
elif omim_no_prefix_pattern.match(db_id):
db_refs['OMIM'] = db_id
elif omim_pattern.match(db_id):
db_refs['OMIM'] = db_id[5:]
elif mesh_no_prefix_pattern.match(db_id):
db_refs['MESH'] = db_id
elif mesh_pattern.match(db_id):
db_refs['MESH'] = db_id[5:]
else:
raise ValueError('Unexpected disease identifier: %s' % db_id)
agents.append(get_standard_agent(name, db_refs))
return agents
def get_evidence(row: pd.Series) -> Evidence:
"""Return evidence for a Statement.
Parameters
----------
row :
Currently investigated row of the dataframe.
Returns
-------
:
Evidence object with the source_api, the PMID and the original
sentence.
"""
pmid = str(row['id']) if row['id'] else None
evidence = Evidence(source_api='gnbr',
pmid=pmid,
text=row['sentence'],
text_refs={'PMID': pmid})
return evidence
|
examples/fall_back_on_default.py | snuderl/apischema | 118 | 12628750 | from dataclasses import dataclass, field
from pytest import raises
from apischema import ValidationError, deserialize
from apischema.metadata import fall_back_on_default
@dataclass
class Foo:
bar: str = "bar"
baz: str = field(default="baz", metadata=fall_back_on_default)
with raises(ValidationError):
deserialize(Foo, {"bar": 0})
assert deserialize(Foo, {"bar": 0}, fall_back_on_default=True) == Foo()
assert deserialize(Foo, {"baz": 0}) == Foo()
|
custom_components/unifiprotect/button.py | mjdyson/unifiprotect | 546 | 12628763 | <filename>custom_components/unifiprotect/button.py<gh_stars>100-1000
"""Support for Ubiquiti's UniFi Protect NVR."""
from __future__ import annotations
import logging
from typing import Callable, Sequence
from homeassistant.components.button import ButtonDeviceClass, ButtonEntity
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers.entity import Entity
from pyunifiprotect.data.base import ProtectAdoptableDeviceModel
from .const import DEVICES_THAT_ADOPT, DOMAIN
from .data import ProtectData
from .entity import ProtectDeviceEntity
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(
hass: HomeAssistant,
entry: ConfigEntry,
async_add_entities: Callable[[Sequence[Entity]], None],
) -> None:
"""Discover devices on a UniFi Protect NVR."""
data: ProtectData = hass.data[DOMAIN][entry.entry_id]
async_add_entities(
[
ProtectButton(
data,
device,
)
for device in data.get_by_types(DEVICES_THAT_ADOPT)
]
)
class ProtectButton(ProtectDeviceEntity, ButtonEntity):
"""A Ubiquiti UniFi Protect Reboot button."""
def __init__(
self,
data: ProtectData,
device: ProtectAdoptableDeviceModel,
):
"""Initialize an UniFi camera."""
super().__init__(data, device)
self._attr_name = f"{self.device.name} Reboot Device"
self._attr_entity_registry_enabled_default = False
self._attr_device_class = ButtonDeviceClass.RESTART
@callback
async def async_press(self) -> None:
"""Press the button."""
_LOGGER.debug("Rebooting %s with id %s", self.device.model, self.device.id)
await self.device.reboot()
|
app/quicktime.py | IDAPluginProject/ida-minsc | 211 | 12628768 | <reponame>IDAPluginProject/ida-minsc
'''QuickTime stuff'''
EXPORT = [ 'nameDispatch', 'nameAllDispatches' ]
import idc,idautils
import function,database
import app
def nextMnemonic(ea, mnem, maxaddr=0xc0*0x1000000):
res = idc.print_insn_mnem(ea)
if res == "": return idc.BADADDR
if res == mnem: return ea
return nextMnemonic( idc.next_head(ea, maxaddr), mnem, maxaddr )
def prevMnemonic(ea, mnem, minaddr=0):
res = idc.print_insn_mnem(ea)
#print("%x -> %s"% (ea, res))
if res == "": return idc.BADADDR
if res == mnem: return ea
return prevMnemonic( idc.prev_head(ea, minaddr), mnem, minaddr )
def getMinorDispatchTableAddress(ea):
"""find address of last lea in function"""
start = idc.get_func_attr(ea, idc.FUNCATTR_START)
end = idc.prev_head( idc.get_func_attr(ea, idc.FUNCATTR_END), start)
res = prevMnemonic(end, 'lea', start)
assert res != idc.BADADDR
return idc.get_operand_value(res, 1)
def getMajorDispatchTableAddress():
"""find quicktime major dispatch table"""
res = idc.get_name_ea_simple('theQuickTimeDispatcher')
res = nextMnemonic(res, 'lea', idc.get_func_attr(res, idc.FUNCATTR_END))
assert res != idc.BADADDR
return idc.get_operand_value(res, 1)
def resolveDispatcher(code):
major = (code & 0x00ff0000) >> 0x10
minor = code & 0xff00ffff
res = getMajorDispatchTableAddress() + major*8
majorFlag = idc.get_wide_dword(res)
majorAddress = idc.get_wide_dword(res+4)
if majorFlag != 0:
return majorAddress + (minor*0x10)
#print("%x"% getMinorDispatchTableAddress(majorAddress))
#print("resolved by 0x%x(%x)"% (majorAddress, minor))
return majorAddress
def getDispatchCode(ea):
# get dispatch code out of an instruction
first, second = (idc.print_operand(ea, 0), idc.get_operand_value(ea, 1))
if first == 'eax':
return second
raise ValueError("Search resulted in address %08x, but instruction '%s' does fulfill requested constraints"% (ea, idc.print_insn_mnem(ea)))
def FindLastAssignment(ea, register):
start,end = database.guessrange(ea)
while ea > start:
ea = database.prev(ea)
m = idc.print_insn_mnem(ea)
r = idc.print_operand(ea, 0)
if m == 'mov' and r == register:
return ea
continue
raise ValueError('FindLastAssignment(0x%x, %s) Found no matches'% (ea, register))
def nameDispatch(address):
'''Name the dispatch function at the specified address in quicktime.qts'''
try:
start, end = function.range(address)
except ValueError:
print('%x making a function'% address)
function.make(address)
start, end = function.range(address)
try:
ea = FindLastAssignment(address, 'eax')
code = getDispatchCode(ea)
except ValueError:
print('%08x - Unable to find dispatch code'% address)
return
ofs = database.getoffset(start)
function.setName(start, 'dispatch_%08x_%x'% (code, ofs))
function.tag(start, 'code', hex(code))
function.tag(start, 'group', 'dispatch')
try:
function.tag(start, 'realname', app.__quicktime.qt_fv_list[code])
except KeyError:
pass
try:
function.tag(start, 'address', hex(resolveDispatcher(code)), repeatable=True)
except:
pass
def nameAllDispatches(ea):
'''Using the address of {theQuickTimeDispatcher}, name and tag all discovered dispatch calls in quicktime.qts'''
for address in idautils.DataRefsTo(ea):
nameDispatch(address)
return
|
autovideo/utils/axolotl_utils.py | datamllab/autovideo | 233 | 12628784 | <reponame>datamllab/autovideo
import os
import uuid
import shutil
import pathlib
import pandas as pd
from d3m.metadata.problem import TaskKeyword, PerformanceMetric
from d3m.metadata import base as metadata_base
from axolotl.utils import pipeline as pipeline_utils
from axolotl.utils import data_problem
from axolotl.backend.simple import SimpleRunner
def generate_classification_dataset_problem(df, target_index, media_dir):
if not os.path.isabs(medVia_dir):
media_dir = os.path.abspath(media_dir)
dataset, problem = data_problem.generate_dataset_problem(df,
target_index=target_index,
media_dir=media_dir,
performance_metrics=[{'metric': PerformanceMetric.ACCURACY}],
task_keywords=[TaskKeyword.CLASSIFICATION,])
return dataset, problem
def generate_dataset(df, target_index, media_dir):
if not os.path.isabs(media_dir):
media_dir = os.path.abspath(media_dir)
dataset = data_problem.import_input_data(df,
y=None,
target_index=target_index,
media_dir=media_dir)
return dataset
def generate_classification_problem(dataset):
problem = data_problem.generate_problem_description(dataset,
performance_metrics=[{'metric': PerformanceMetric.ACCURACY}],
task_keywords=[TaskKeyword.CLASSIFICATION,])
return problem
def fit(train_dataset, train_media_dir, target_index, pipeline):
train_dataset = generate_dataset(train_dataset, target_index, train_media_dir)
problem = generate_classification_problem(train_dataset)
# Start backend
backend = SimpleRunner(random_seed=0)
# Fit
pipeline_result = backend.fit_pipeline(problem, pipeline, [train_dataset])
if pipeline_result.status == "ERRORED":
raise pipeline_result.error
# Fetch the runtime and dataaset metadata
fitted_pipeline = {
'runtime': backend.fitted_pipelines[pipeline_result.fitted_pipeline_id],
'dataset_metadata': train_dataset.metadata
}
return pipeline_result.output, fitted_pipeline
def produce(test_dataset, test_media_dir, target_index, fitted_pipeline):
test_dataset['label'] = -1
test_dataset = generate_dataset(test_dataset, target_index, test_media_dir)
test_dataset.metadata = fitted_pipeline['dataset_metadata']
metadata_dict = test_dataset.metadata.query(('learningData', metadata_base.ALL_ELEMENTS, 1))
metadata_dict = {key: metadata_dict[key] for key in metadata_dict}
metadata_dict['location_base_uris'] = [pathlib.Path(os.path.abspath(test_media_dir)).as_uri()+'/']
test_dataset.metadata = test_dataset.metadata.update(('learningData', metadata_base.ALL_ELEMENTS, 1), metadata_dict)
# Start backend
backend = SimpleRunner(random_seed=0)
_id = str(uuid.uuid4())
backend.fitted_pipelines[_id] = fitted_pipeline['runtime']
# Produce
pipeline_result = backend.produce_pipeline(_id, [test_dataset])
if pipeline_result.status == "ERRORED":
raise pipeline_result.error
return pipeline_result.output
def fit_produce(train_dataset, train_media_dir, test_dataset, test_media_dir, target_index, pipeline):
_, fitted_pipeline = fit(train_dataset, train_media_dir, target_index, pipeline)
output = produce(test_dataset, test_media_dir, target_index, fitted_pipeline)
return output
def produce_by_path(fitted_pipeline, video_path):
tmp_dir = os.path.join("tmp", str(uuid.uuid4()))
os.makedirs(tmp_dir, exist_ok=True)
video_name = video_path.split('/')[-1]
shutil.copy(video_path, tmp_dir)
dataset = {
'd3mIndex': [0],
'video': [video_name]
}
dataset = pd.DataFrame(data=dataset)
# Produce
predictions = produce(test_dataset=dataset,
test_media_dir=tmp_dir,
target_index=2,
fitted_pipeline=fitted_pipeline)
shutil.rmtree(tmp_dir)
return predictions
|
gff/BCBio/__init__.py | bgruening/bcbb | 339 | 12628787 | """BCBio module
"""
|
kitti360scripts/devkits/convertOxtsPose/python/data.py | carloradice/kitti360Scripts | 214 | 12628800 | <reponame>carloradice/kitti360Scripts
import os
import numpy as np
def loadOxtsData(oxts_dir, frames=None):
''' reads GPS/IMU data from files to memory. requires base directory
(=sequence directory as parameter). if frames is not specified, loads all frames. '''
ts = []
if frames==None:
ts = loadTimestamps(oxts_dir)
oxts = []
for i in range(len(ts)):
if len(ts[i]):
try:
oxts.append(np.loadtxt(os.path.join(oxts_dir, 'data', '%010d.txt'%i)))
except:
oxts.append([])
else:
oxts.append([])
else:
if len(frames)>1:
k = 1
oxts = []
for i in range(len(frames)):
try:
oxts.append(np.loadtxt(os.path.join(oxts_dir, 'data', '%010d.txt'%k)))
except:
oxts.append([])
k=k+1
# no list for single value
else:
file_name = os.path.join(oxts_dir, 'data', '%010d.txt'%k)
try:
oxts = np.loadtxt(file_name)
except:
oxts = []
return oxts,ts
def loadTimestamps(ts_dir):
''' load timestamps '''
with open(os.path.join(ts_dir, 'timestamps.txt')) as f:
data=f.read().splitlines()
ts = [l.split(' ')[0] for l in data]
return ts
def loadPoses (pos_file):
''' load system poses '''
data = np.loadtxt(pos_file)
ts = data[:, 0].astype(np.int)
poses = np.reshape(data[:, 1:], (-1, 3, 4))
poses = np.concatenate((poses, np.tile(np.array([0, 0, 0, 1]).reshape(1,1,4),(poses.shape[0],1,1))), 1)
return ts, poses
|
DeepBind/custom_keras_objects.py | Luma-1994/lama | 137 | 12628805 | import tensorflow as tf
OBJECTS = {"tf": tf}
|
Training Utility/somatictrainer/util.py | ZackFreedman/Somatic | 328 | 12628828 | import logging
import time
import numpy as np
import quaternion
logging.basicConfig(level=logging.INFO)
def lookRotation(forward, up):
"""
Quaternion that rotates world to face Forward, while keeping orientation dictated by Up
See https://answers.unity.com/questions/467614/what-is-the-source-code-of-quaternionlookrotation.html
:type forward: np.array
:type up: np.array
"""
up /= np.linalg.norm(up)
vector = forward / np.linalg.norm(forward)
vector2 = np.cross(up, vector)
vector2 /= np.linalg.norm(vector2)
vector3 = np.cross(vector, vector2)
m00 = vector2[0]
m01 = vector2[1]
m02 = vector2[2]
m10 = vector3[0]
m11 = vector3[1]
m12 = vector3[2]
m20 = vector[0]
m21 = vector[1]
m22 = vector[2]
num8 = (m00 + m11) + m22
output = quaternion.quaternion()
if num8 > 0:
num = np.sqrt(num8 + 1)
output.w = num / 2
num = 0.5 / num
output.x = (m12 - m21) * num
output.y = (m20 - m02) * num
output.z = (m01 - m10) * num
elif m00 >= m11 and m00 >= m22:
num7 = np.sqrt((m00 + 1) - m11 - m22)
num4 = 0.5 / num7
output.x = num7 / 2
output.y = (m01 + m10) * num4
output.z = (m02 + m20) * num4
output.w = (m12 - m21) * num4
elif m11 > m22:
num6 = np.sqrt(m11 + 1 - m00 - m22)
num3 = 0.5 / num6
output.x = (m10 + m01) * num3
output.y = num6 / 2
output.z = (m21 + m12) * num3
output.w = (m20 - m02) * num3
else:
num5 = np.sqrt(m22 + 1 - m00 - m11)
num2 = 0.5 / num5
output.x = (m20 + m02) * num2
output.y = (m21 + m12) * num2
output.z = num5 / 2
output.w = (m01 - m10) * num2
return output
def custom_interpolate(value, in_min, in_max, out_min, out_max, clamp=False):
interpolated = (value - in_min) * (out_max - out_min) / (in_max - in_min) + out_min
if clamp:
return np.clip(interpolated, out_min, out_max)
else:
return interpolated
def custom_euler(q):
# h = np.arctan2(np.square(q.x) - np.square(q.y) - np.square(q.z) + np.square(q.w), 2 * (q.x * q.y + q.z * q.w))
# h = np.arctan2(2 * (q.x * q.y + q.z * q.w), np.square(q.x) - np.square(q.y) - np.square(q.z) + np.square(q.w))
# p = np.arcsin(np.clip(-2 * (q.x * q.z - q.y * q.w), -1, 1))
# r = np.arctan2(np.square(q.z) + np.square(q.w) - np.square(q.x) - np.square(q.y), 2 * (q.x * q.w + q.y * q.z))
# r = np.arctan2(2 * (q.x * q.w + q.y * q.z), np.square(q.z) + np.square(q.w) - np.square(q.x) - np.square(q.y))
h = np.arctan2(2 * (q.w * q.z + q.x * q.y), 1 - 2 * (np.square(q.y) + np.square(q.z)))
p = np.arcsin(2 * (q.w * q.y - q.z * q.x))
r = np.arctan2(2 * (q.w * q.x + q.y * q.z), 1 - 2 * (np.square(q.x) + np.square(q.z)))
if h < 0:
h += 2 * np.pi
return h, p, r
def custom_euler_to_quat(yaw, pitch, roll):
cy = np.cos(yaw * 0.5)
sy = np.sin(yaw * 0.5)
cp = np.cos(pitch * 0.5)
sp = np.sin(pitch * 0.5)
cr = np.cos(roll * 0.5)
sr = np.sin(roll * 0.5)
q = quaternion.quaternion()
q.w = cr * cp * cy + sr * sp * sy
q.x = sr * cp * cy - cr * sp * sy
q.y = cr * sp * cy + sr * cp * sy
q.z = cr * cp * sy - sr * sp * cy
return q
def process_samples(samples: np.array, desired_length):
logger = logging.getLogger('process_samples()')
benchmark = time.perf_counter()
def clock_that_step(description, benchmark):
logger.debug('{} took {:.0f} ms'.format(description.capitalize(), (time.perf_counter() - benchmark) * 1000))
return time.perf_counter()
if not len(samples) > 1:
raise AttributeError('Sample list is empty')
logger.debug('Normalizing:\n{0!r}'.format(samples))
# Strip redundant bearings
unique_bearings = [samples[0]]
for index, bearing in enumerate(samples):
if not index:
continue
if np.isclose(bearing[0], samples[index - 1, 0]) and np.isclose(bearing[1], samples[index - 1, 1]):
logger.debug('Discarding redundant point ({:.3f}, {:.3f})'.format(bearing[0], bearing[1]))
else:
unique_bearings.append(bearing)
samples = np.array(unique_bearings)
benchmark = clock_that_step('Stripping dupes', benchmark)
# Remap standardized bearings so gestures are the same size
yaw_min = min(samples[:, 0])
yaw_max = max(samples[:, 0])
pitch_min = min(samples[:, 1])
pitch_max = max(samples[:, 1])
magnitude = np.linalg.norm([yaw_max - yaw_min, pitch_max - pitch_min])
fudge_factor = 1 / 10
logger.debug('Yaw min: {:.3f} Pitch min: {:.3f} Yaw max: {:.3f} Pitch max: {:.3f} Trim length: {:.3f}'.format(
yaw_min, pitch_min, yaw_max, pitch_max, magnitude * fudge_factor))
early_crap_count = 0
for i in range(1, len(samples)):
if np.linalg.norm([samples[i, 0] - samples[0, 0],
samples[i, 1] - samples[0, 1]]) > magnitude * fudge_factor:
logger.debug('Done stripping leading points - ({:.3f}, {:.3f}) is far enough from start point '
'({:.3f}, {:.3f}). Had to be {:.3f} units away, and is {:.3f}.'.format(
samples[i, 0], samples[i, 1], samples[0, 0], samples[0, 1],
magnitude * fudge_factor,
np.linalg.norm([samples[i, 0] - samples[0, 0],
samples[i, 1] - samples[0, 1]])))
break
else:
logger.debug('Stripping leading point ({:.3f}, {:.3f}) - too close to start point ({:.3f}, {:.3f}). '
'Must be {:.3f} units away, but is {:.3f}.'.format(
samples[i, 0], samples[i, 1], samples[0, 0], samples[0, 1],
magnitude * fudge_factor,
np.linalg.norm([samples[i, 0] - samples[0, 0],
samples[i, 1] - samples[0, 1]])))
early_crap_count += 1
start_point = samples[0]
trimmed = samples[early_crap_count + 1:].tolist()
samples = np.array([start_point] + trimmed)
benchmark = clock_that_step('Trimming early slop', benchmark)
# logger.debug('Early crap stripped: {}'.format(samples))
late_crap_count = 0
for i in range(2, len(samples)):
if np.linalg.norm([samples[-i, 0] - samples[- 1, 0],
samples[-i, 1] - samples[- 1, 1]]) > magnitude * fudge_factor:
logger.debug('Done stripping trailing points - ({:.3f}, {:.3f}) is far enough from endpoint '
'({:.3f}, {:.3f}). Had to be {:.3f} units away, and is {:.3f}.'.format(
samples[-i, 0], samples[-i, 1], samples[-1, 0], samples[-1, 1],
magnitude * fudge_factor,
np.linalg.norm([samples[-i, 0] - samples[- 1, 0],
samples[-i, 1] - samples[- 1, 1]])))
break
else:
logger.debug('Stripping trailing point ({:.3f}, {:.3f}) - too close to endpoint ({:.3f}, {:.3f}). '
'Must be {:.3f} units away, but is {:.3f}.'.format(
samples[-i, 0], samples[-i, 1], samples[-1, 0], samples[-1, 1],
magnitude * fudge_factor,
np.linalg.norm([samples[-i, 0] - samples[- 1, 0],
samples[-i, 1] - samples[- 1, 1]])))
late_crap_count += 1
if late_crap_count:
endpoint = samples[-1]
trimmed = samples[:(late_crap_count + 1) * -1].tolist()
samples = np.array(trimmed + [endpoint])
logger.debug('Late crap stripped: {}'.format(samples))
benchmark = clock_that_step('Trimming late slop', benchmark)
# Standardize bearings 'curve' to evenly-spaced points
cumulative_segment_lengths = [0]
for index, sample in enumerate(samples):
if index == 0:
continue
segment_length = np.linalg.norm([sample[0] - samples[index - 1][0], sample[1] - samples[index - 1][1]])
cumulative_segment_lengths.append(segment_length + cumulative_segment_lengths[index - 1])
logger.debug('Segment ending in point {} length {:.3f} Cumul: {:.3f}'.format(
index, segment_length, cumulative_segment_lengths[index]))
curve_length = cumulative_segment_lengths[-1]
target_segment_length = curve_length / (desired_length - 1)
benchmark = clock_that_step('Calculating segment lengths', benchmark)
# logger.debug(
# 'Segment lengths: {} - {} segments, {} points'.format(segment_lengths, len(segment_lengths), len(samples)))
logger.debug('Total length: {:.2f} Target segment length: {:.4f}'.format(curve_length, target_segment_length))
standardized_bearings = [samples[0]]
first_longer_sample = 0
for i in range(1, desired_length):
target_length = i * target_segment_length
logger.debug('Looking to place a point at {:.3f} units along curve'.format(target_length))
if cumulative_segment_lengths[first_longer_sample] > target_length:
logger.debug('Previous point at {:.3f} units along curve still works'.format(
cumulative_segment_lengths[first_longer_sample]))
else:
while cumulative_segment_lengths[first_longer_sample] < target_length \
and not np.isclose(cumulative_segment_lengths[first_longer_sample], target_length):
logger.debug(
'Cumulative length of {:.3f} is too short - advancing to segment ending at point {}'.format(
cumulative_segment_lengths[first_longer_sample], first_longer_sample))
first_longer_sample += 1
if first_longer_sample >= len(cumulative_segment_lengths):
raise AttributeError("Entire line isn't long enough?!")
low_point = samples[first_longer_sample - 1]
high_point = samples[first_longer_sample]
position_along_segment = ((target_length - cumulative_segment_lengths[first_longer_sample - 1]) /
(cumulative_segment_lengths[first_longer_sample]
- cumulative_segment_lengths[first_longer_sample - 1]))
standardized_point_x = low_point[0] + position_along_segment * (high_point[0] - low_point[0])
standardized_point_y = low_point[1] + position_along_segment * (high_point[1] - low_point[1])
standardized_point = [standardized_point_x, standardized_point_y]
logger.debug('Placed point {:.3f} units ({:.0f}%) along the {:.3f} line between {} and {} ==> {}'
.format(target_length - cumulative_segment_lengths[first_longer_sample - 1],
position_along_segment * 100,
cumulative_segment_lengths[first_longer_sample]
- cumulative_segment_lengths[first_longer_sample - 1],
low_point, high_point, standardized_point))
standardized_bearings.append(standardized_point)
logger.debug('Done interpolating. Scaling into 0-1 fractional dims')
benchmark = clock_that_step('Interpolation', benchmark)
# Move lowest and leftest points to the edge
standardized_bearings = [[y - yaw_min, p - pitch_min] for y, p in standardized_bearings]
# Rescale, preserving proportions
total_width = yaw_max - yaw_min
total_height = pitch_max - pitch_min
standardized_bearings = np.array([[custom_interpolate(y, 0, max(total_width, total_height), 0, 1),
custom_interpolate(p, 0, max(total_width, total_height), 0, 1)]
for y, p in standardized_bearings])
clock_that_step('Resizing', benchmark)
return standardized_bearings
def wrapped_delta(old, new):
delta = old - new
if delta > np.pi:
delta -= 2 * np.pi
elif delta < -np.pi:
delta += 2 * np.pi
return delta
def bearing_delta(old, new):
return np.array([wrapped_delta(old[0], new[0]),
wrapped_delta(old[1], new[1])])
# This is taken from https://github.com/pyserial/pyserial/issues/216#issuecomment-369414522
class ReadLine:
def __init__(self, s):
self.buf = bytearray()
self.s = s # Serial object
def readline(self):
timeout = self.s.timeout
self.s.timeout = 0.1
i = self.buf.find(b"\n")
if i >= 0:
r = self.buf[:i + 1]
self.buf = self.buf[i + 1:]
self.s.timeout = timeout
return r
while True:
i = max(1, min(2048, self.s.in_waiting))
data = self.s.read(i)
i = data.find(b"\n")
if i >= 0:
r = self.buf + data[:i + 1]
self.buf[0:] = data[i + 1:]
self.s.timeout = timeout
return r
else:
self.buf.extend(data)
|
transferLearning_MoleculeNet_PPI/bio/pretrain_graphcl.py | Shen-Lab/GraphCL | 275 | 12628832 | <reponame>Shen-Lab/GraphCL<gh_stars>100-1000
import argparse
from loader import BioDataset_aug
from torch_geometric.data import DataLoader
from torch_geometric.nn.inits import uniform
from torch_geometric.nn import global_mean_pool
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from tqdm import tqdm
import numpy as np
from model import GNN
from sklearn.metrics import roc_auc_score
import pandas as pd
from copy import deepcopy
def cycle_index(num, shift):
arr = torch.arange(num) + shift
arr[-shift:] = torch.arange(shift)
return arr
class Discriminator(nn.Module):
def __init__(self, hidden_dim):
super(Discriminator, self).__init__()
self.weight = nn.Parameter(torch.Tensor(hidden_dim, hidden_dim))
self.reset_parameters()
def reset_parameters(self):
size = self.weight.size(0)
uniform(size, self.weight)
def forward(self, x, summary):
h = torch.matmul(summary, self.weight)
return torch.sum(x*h, dim = 1)
class graphcl(nn.Module):
def __init__(self, gnn):
super(graphcl, self).__init__()
self.gnn = gnn
self.pool = global_mean_pool
self.projection_head = nn.Sequential(nn.Linear(300, 300), nn.ReLU(inplace=True), nn.Linear(300, 300))
def forward_cl(self, x, edge_index, edge_attr, batch):
x = self.gnn(x, edge_index, edge_attr)
x = self.pool(x, batch)
x = self.projection_head(x)
return x
def loss_cl(self, x1, x2):
T = 0.1
batch_size, _ = x1.size()
x1_abs = x1.norm(dim=1)
x2_abs = x2.norm(dim=1)
sim_matrix = torch.einsum('ik,jk->ij', x1, x2) / torch.einsum('i,j->ij', x1_abs, x2_abs)
sim_matrix = torch.exp(sim_matrix / T)
pos_sim = sim_matrix[range(batch_size), range(batch_size)]
loss = pos_sim / (sim_matrix.sum(dim=1) - pos_sim)
loss = - torch.log(loss).mean()
return loss
def train(args, model, device, dataset, optimizer):
dataset.aug = "none"
dataset1 = dataset.shuffle()
dataset2 = deepcopy(dataset1)
dataset1.aug, dataset1.aug_ratio = args.aug1, args.aug_ratio1
dataset2.aug, dataset2.aug_ratio = args.aug2, args.aug_ratio2
loader1 = DataLoader(dataset1, batch_size=args.batch_size, num_workers = args.num_workers, shuffle=False)
loader2 = DataLoader(dataset2, batch_size=args.batch_size, num_workers = args.num_workers, shuffle=False)
model.train()
train_acc_accum = 0
train_loss_accum = 0
for step, batch in enumerate(tqdm(zip(loader1, loader2), desc="Iteration")):
batch1, batch2 = batch
batch1 = batch1.to(device)
batch2 = batch2.to(device)
optimizer.zero_grad()
x1 = model.forward_cl(batch1.x, batch1.edge_index, batch1.edge_attr, batch1.batch)
x2 = model.forward_cl(batch2.x, batch2.edge_index, batch2.edge_attr, batch2.batch)
loss = model.loss_cl(x1, x2)
loss.backward()
optimizer.step()
train_loss_accum += float(loss.detach().cpu().item())
# acc = (torch.sum(positive_score > 0) + torch.sum(negative_score < 0)).to(torch.float32)/float(2*len(positive_score))
acc = torch.tensor(0)
train_acc_accum += float(acc.detach().cpu().item())
return train_acc_accum/(step+1), train_loss_accum/(step+1)
def main():
# Training settings
parser = argparse.ArgumentParser(description='PyTorch implementation of pre-training of graph neural networks')
parser.add_argument('--device', type=int, default=0,
help='which gpu to use if any (default: 0)')
parser.add_argument('--batch_size', type=int, default=256,
help='input batch size for training (default: 256)')
parser.add_argument('--epochs', type=int, default=100,
help='number of epochs to train (default: 100)')
parser.add_argument('--lr', type=float, default=0.001,
help='learning rate (default: 0.001)')
parser.add_argument('--decay', type=float, default=0,
help='weight decay (default: 0)')
parser.add_argument('--num_layer', type=int, default=5,
help='number of GNN message passing layers (default: 5).')
parser.add_argument('--emb_dim', type=int, default=300,
help='embedding dimensions (default: 300)')
parser.add_argument('--dropout_ratio', type=float, default=0,
help='dropout ratio (default: 0)')
parser.add_argument('--JK', type=str, default="last",
help='how the node features across layers are combined. last, sum, max or concat')
parser.add_argument('--gnn_type', type=str, default="gin")
parser.add_argument('--model_file', type = str, default = '', help='filename to output the pre-trained model')
parser.add_argument('--seed', type=int, default=0, help = "Seed for splitting dataset.")
parser.add_argument('--num_workers', type=int, default = 4, help='number of workers for dataset loading')
parser.add_argument('--aug1', type=str, default = 'none')
parser.add_argument('--aug_ratio1', type=float, default = 0.2)
parser.add_argument('--aug2', type=str, default = 'none')
parser.add_argument('--aug_ratio2', type=float, default = 0.2)
args = parser.parse_args()
torch.manual_seed(0)
np.random.seed(0)
device = torch.device("cuda:" + str(args.device)) if torch.cuda.is_available() else torch.device("cpu")
if torch.cuda.is_available():
torch.cuda.manual_seed_all(0)
#set up dataset
root_unsupervised = 'dataset/unsupervised'
dataset = BioDataset_aug(root_unsupervised, data_type='unsupervised')
print(dataset)
#set up model
gnn = GNN(args.num_layer, args.emb_dim, JK = args.JK, drop_ratio = args.dropout_ratio, gnn_type = args.gnn_type)
model = graphcl(gnn)
model.to(device)
#set up optimizer
optimizer = optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.decay)
print(optimizer)
for epoch in range(1, args.epochs):
print("====epoch " + str(epoch))
train_acc, train_loss = train(args, model, device, dataset, optimizer)
print(train_acc)
print(train_loss)
if epoch % 20 == 0:
torch.save(model.gnn.state_dict(), "./models_graphcl/graphcl_" + str(epoch) + ".pth")
if __name__ == "__main__":
main()
|
glasses/utils/Tracker.py | rentainhe/glasses | 271 | 12628864 | <gh_stars>100-1000
import torch.nn as nn
from torch import Tensor
from dataclasses import dataclass, field
from typing import List
@dataclass
class Tracker:
"""This class tracks all the operations of a given module by performing a forward pass.
Example:
>>> import torch
>>> import torch.nn as nn
>>> from glasses.utils import Tracker
>>> model = nn.Sequential(nn.Linear(1, 64), nn.ReLU(), nn.Linear(64,10), nn.ReLU())
>>> tr = Tracker(model)
>>> tr(x)
>>> print(tr.traced) # all operations
>>> print('-----')
>>> print(tr.parametrized) # all operations with learnable params
outputs
``[Linear(in_features=1, out_features=64, bias=True),
ReLU(),
Linear(in_features=64, out_features=10, bias=True),
ReLU()]
-----
[Linear(in_features=1, out_features=64, bias=True),
Linear(in_features=64, out_features=10, bias=True)]``
"""
module: nn.Module
traced: List[nn.Module] = field(default_factory=list)
handles: list = field(default_factory=list)
def _forward_hook(self, m, inputs: Tensor, outputs: Tensor):
has_not_submodules = (
len(list(m.modules())) == 1
or isinstance(m, nn.Conv2d)
or isinstance(m, nn.BatchNorm2d)
)
if has_not_submodules:
self.traced.append(m)
def __call__(self, x: Tensor) -> Tensor:
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook))
self.module(x)
list(map(lambda x: x.remove(), self.handles))
return self
@property
def parametrized(self):
# check the len of the state_dict keys to see if we have learnable params
return list(filter(lambda x: len(list(x.state_dict().keys())) > 0, self.traced))
|
setup.py | szaydel/pickledb | 668 | 12628882 | <gh_stars>100-1000
"""
pickleDB
--------
pickleDB is lightweight, fast, and simple database based on Python's own
json module. And it's BSD licensed!
pickleDB is Fun
```````````````
::
>>> import pickledb
>>> db = pickledb.load('test.db', False)
>>> db.set('key', 'value')
>>> db.get('key')
'value'
>>> db.dump()
True
And Easy to Install
```````````````````
::
$ pip install pickledb
Links
`````
* `website <https://patx.github.io/pickledb>`_
* `documentation <http://patx.github.io/pickledb/commands.html>`_
* `pypi <http://pypi.python.org/pypi/pickleDB>`_
* `github repo <https://github.com/patx/pickledb>`_
Latest Release Notes (version: 0.9)
```````````````````````````````````
* Now load() uses *'rt'* mode instead of 'rb' (0.9.2)
* Change lrem(name) to *lremlist(name)* (0.9)
* Add *lremvalue(name, value)* (0.9)
* Add load() option to use sigterm handler or not (0.9)
* All *keys* must now be strings (0.8)
* All *names* for lists must now be strings (0.8)
* All *names* for dicts must now be strings (0.8)
* The get(key) function now returns *False* instead of None if there is no key (0.8)
* Switched to Python's built in json module from simplejson (0.8.1)
"""
from distutils.core import setup
setup(name="pickleDB",
version="0.9.3",
description="A lightweight and simple database using json.",
long_description=__doc__,
author="<NAME>",
author_email="<EMAIL>",
license="three-clause BSD",
url="http://github.com/patx/pickledb",
classifiers = [
"Programming Language :: Python",
"License :: OSI Approved :: BSD License",
"Intended Audience :: Developers",
"Topic :: Database" ],
py_modules=['pickledb'],)
|
python/tests/sql/test_aggregate_functions.py | andreicovaliov/incubator-sedona | 747 | 12628887 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from shapely.geometry import Polygon
from tests import csv_point_input_location, union_polygon_input_location
from tests.test_base import TestBase
class TestConstructors(TestBase):
def test_st_envelope_aggr(self):
point_csv_df = self.spark.read.format("csv").\
option("delimiter", ",").\
option("header", "false").\
load(csv_point_input_location)
point_csv_df.createOrReplaceTempView("pointtable")
point_df = self.spark.sql("select ST_Point(cast(pointtable._c0 as Decimal(24,20)), cast(pointtable._c1 as Decimal(24,20))) as arealandmark from pointtable")
point_df.createOrReplaceTempView("pointdf")
boundary = self.spark.sql("select ST_Envelope_Aggr(pointdf.arealandmark) from pointdf")
coordinates = [
(1.1, 101.1),
(1.1, 1100.1),
(1000.1, 1100.1),
(1000.1, 101.1),
(1.1, 101.1)
]
polygon = Polygon(coordinates)
assert boundary.take(1)[0][0] == polygon
def test_st_union_aggr(self):
polygon_csv_df = self.spark.read.format("csv").\
option("delimiter", ",").\
option("header", "false").\
load(union_polygon_input_location)
polygon_csv_df.createOrReplaceTempView("polygontable")
polygon_csv_df.show()
polygon_df = self.spark.sql("select ST_PolygonFromEnvelope(cast(polygontable._c0 as Decimal(24,20)),cast(polygontable._c1 as Decimal(24,20)), cast(polygontable._c2 as Decimal(24,20)), cast(polygontable._c3 as Decimal(24,20))) as polygonshape from polygontable")
polygon_df.createOrReplaceTempView("polygondf")
polygon_df.show()
union = self.spark.sql("select ST_Union_Aggr(polygondf.polygonshape) from polygondf")
assert union.take(1)[0][0].area == 10100
|
icu_sources.py | plasticity-admin/supersqlite | 687 | 12628900 | icu_sources = [
'utypes.cpp',
'uloc.cpp',
'ustring.cpp',
'ucase.cpp',
'ubrk.cpp',
'brkiter.cpp',
'filteredbrk.cpp',
'ucharstriebuilder.cpp',
'uobject.cpp',
'resbund.cpp',
'servrbf.cpp',
'servlkf.cpp',
'serv.cpp',
'servnotf.cpp',
'servls.cpp',
'servlk.cpp',
'servslkf.cpp',
'stringtriebuilder.cpp',
'uvector.cpp',
'ustrenum.cpp',
'uenum.cpp',
'unistr.cpp',
'appendable.cpp',
'rbbi.cpp',
'rbbi_cache.cpp',
'cstring.cpp',
'umath.cpp',
'charstr.cpp',
'rbbidata.cpp',
'ustrfmt.cpp',
'ucharstrie.cpp',
'uloc_keytype.cpp',
'uhash.cpp',
'locdispnames.cpp',
'brkeng.cpp',
'dictionarydata.cpp',
'udataswp.cpp',
'uinvchar.cpp',
'uresbund.cpp',
'uresdata.cpp', # modified due to duplicate symbol `gEmptyString2`
'resource.cpp',
'locavailable.cpp',
'utrie2.cpp',
'ucol_swp.cpp',
'utrie_swap.cpp',
'schriter.cpp',
'uchriter.cpp',
'locid.cpp', # modified due to duplicate include `bytesinkutil.h`
'locbased.cpp',
'chariter.cpp',
'uvectr32.cpp',
'bytestrie.cpp',
'ustack.cpp',
'umutex.cpp',
'uniset.cpp', # modified due to duplicate symbol `compareUnicodeString2`
'stringpiece.cpp',
'locutil.cpp',
'unifilt.cpp',
'util.cpp', # modified due to duplicate symbol `BACKSLASH2`, `UPPER_U2`, and `LOWER_U2`
'bmpset.cpp',
'unifunct.cpp',
'unisetspan.cpp',
'uniset_props.cpp', # modified due to duplicate include `_dbgct2`
'patternprops.cpp',
'bytesinkutil.cpp', # modified due to duplicate include `bytesinkutil.h`
'dictbe.cpp',
'rbbirb.cpp',
'utext.cpp', # modified due to duplicate symbol `gEmptyString3`
'utf_impl.cpp',
'propsvec.cpp',
'locmap.cpp',
'loclikely.cpp',
'uloc_tag.cpp',
'ustrtrns.cpp',
'udatamem.cpp',
'putil.cpp',
'uhash_us.cpp',
'uprops.cpp',
'uchar.cpp', # modified due to duplicate symbol `_enumPropertyStartsRange2`
'parsepos.cpp',
'ruleiter.cpp',
'rbbitblb.cpp',
'edits.cpp',
'rbbinode.cpp',
'bytestream.cpp',
'rbbiscan.cpp',
'loadednormalizer2impl.cpp',
'characterproperties.cpp',
'locresdata.cpp',
'normalizer2impl.cpp', # modified due to duplicate include `bytesinkutil.h`
'normalizer2.cpp',
'rbbisetb.cpp',
'rbbistbl.cpp',
'unistr_case.cpp',
'unames.cpp', # modified due to duplicate symbol `DATA_TYPE2`
'propname.cpp',
'ustrcase.cpp',
'ustrcase_locale.cpp',
'ubidi.cpp',
'ucptrie.cpp',
'umutablecptrie.cpp', # modified due to duplicate symbol `getRange2` and `OVERFLOW2`
'cmemory.cpp',
'utrie2_builder.cpp', # modified due to duplicate symbol `writeBlock2`
'uscript.cpp',
'uscript_props.cpp',
'utrie.cpp', # modified due to duplicate symbol `equal_uint322` and `enumSameValue2`
'ucmndata.cpp',
'uarrsort.cpp',
'umapfile.cpp',
'ucln_cmn.cpp', # modified due to duplicate include `ucln_imp.h`
'uregex.cpp', # modified due to duplicate symbol `BACKSLASH3`
'ucol.cpp',
'coll.cpp', # modified due to duplicate symbol `gService2`, `getService2`, `initService2`, `hasService2`, `availableLocaleList2`
'collation.cpp',
'ucoleitr.cpp',
'rematch.cpp', # modified due to duplicate symbol `BACKSLASH4`
'regexcmp.cpp',
'repattrn.cpp',
'collationroot.cpp',
'ucol_res.cpp',
'collationbuilder.cpp',
'coleitr.cpp',
'sharedobject.cpp',
'collationdata.cpp',
'uiter.cpp',
'ucln_in.cpp', # modified due to duplicate symbol `copyright2` and duplicate include `ucln_imp.h`
'uniset_closure.cpp',
'unifiedcache.cpp', # modified due to duplicate symbol `gCacheInitOnce2`
'regexst.cpp',
'collationweights.cpp',
'caniter.cpp',
'collationiterator.cpp',
'collationfastlatin.cpp',
'collationtailoring.cpp',
'usetiter.cpp',
'collationdatareader.cpp',
'collationruleparser.cpp',
'collationdatabuilder.cpp',
'regeximp.cpp',
'collationsets.cpp',
'utf16collationiterator.cpp',
'uvectr64.cpp',
'rulebasedcollator.cpp',
'collationrootelements.cpp',
'ucol_sit.cpp', # modified due to duplicate symbol `internalBufferSize2`
'ulist.cpp',
'uset.cpp',
'regextxt.cpp',
'ucharstrieiterator.cpp',
'collationfcd.cpp',
'collationkeys.cpp',
'unistr_case_locale.cpp',
'collationsettings.cpp',
'collationcompare.cpp',
'utf8collationiterator.cpp',
'uitercollationiterator.cpp',
'collationfastlatinbuilder.cpp',
'collationdatawriter.cpp',
'uset_props.cpp',
'utrace.cpp',
'sortkey.cpp',
'unistr_titlecase_brkiter.cpp',
'ubidi_props.cpp', # modified due to duplicate symbol `_enumPropertyStartsRange3`
'bocsu.cpp',
'ubidiln.cpp',
'ubidiwrt.cpp',
'ustr_titlecase_brkiter.cpp',
'wintz.cpp',
'stubdata.cpp',
'udata.cpp',
# modified due to to comment out `extern "C" const DataHeader U_DATA_API
# U_ICUDATA_ENTRY_POINT;` and cast `(const DataHeader*)` due to
# stubdata.cpp being added
]
# Other modifications:
# Modify: regexcst.h
# Replace the header gaurd with:
# #ifndef REGEXCST_H
# #define REGEXCST_H
# Modify: regexcmp.h
# Replace the header gaurd with:
# #ifndef REGEXCMP_H
# #define REGEXCMP_H
# Modify: regexcst.h
# Append '2' to every enum in Regex_PatternParseAction
# Replace all of the references to those enums in regexcst.h and regexcmp.cpp
# Modify: regexcst.h
# Replace: `gRuleParseStateTable` symbol with `gRuleParseStateTable2`
# Replace with `gRuleParseStateTable` in regexcmp.cpp
|
mopidy_soundcloud/actor.py | Laurentww/mopidy-soundcloud | 161 | 12628913 | <gh_stars>100-1000
import logging
import pykka
from mopidy import backend
from mopidy_soundcloud.library import SoundCloudLibraryProvider
from mopidy_soundcloud.soundcloud import SoundCloudClient
logger = logging.getLogger(__name__)
class SoundCloudBackend(pykka.ThreadingActor, backend.Backend):
def __init__(self, config, audio):
super().__init__()
self.config = config
self.remote = SoundCloudClient(config)
self.library = SoundCloudLibraryProvider(backend=self)
self.playback = SoundCloudPlaybackProvider(audio=audio, backend=self)
self.uri_schemes = ["soundcloud", "sc"]
def on_start(self):
username = self.remote.user.get("username")
if username is not None:
logger.info(f"Logged in to SoundCloud as {username!r}")
class SoundCloudPlaybackProvider(backend.PlaybackProvider):
def translate_uri(self, uri):
track_id = self.backend.remote.parse_track_uri(uri)
track = self.backend.remote.get_track(track_id, True)
if track is None:
return None
return track.uri
|
migrations/versions/bb816156989f_moss_results.py | chrononyan/ok | 148 | 12628918 | """moss_results
Revision ID: bb816156989f
Revises: c71e52b908e6
Create Date: 2017-09-08 14:01:37.766562
"""
# revision identifiers, used by Alembic.
revision = '<KEY>'
down_revision = 'c71e52b908e6'
from alembic import op
import sqlalchemy as sa
import server
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('moss_result',
sa.Column('created', sa.DateTime(timezone=True), server_default=sa.text('now()'), nullable=False),
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('run_time', sa.DateTime(timezone=True), nullable=False),
sa.Column('primary_id', sa.Integer(), nullable=False),
sa.Column('primary_matches', server.models.JsonBlob(), nullable=False),
sa.Column('secondary_id', sa.Integer(), nullable=False),
sa.Column('secondary_matches', server.models.JsonBlob(), nullable=False),
sa.Column('similarity', sa.Integer(), nullable=False),
sa.Column('tags', server.models.StringList(), nullable=False),
sa.ForeignKeyConstraint(['primary_id'], ['backup.id'], name=op.f('fk_moss_result_primary_id_backup')),
sa.ForeignKeyConstraint(['secondary_id'], ['backup.id'], name=op.f('fk_moss_result_secondary_id_backup')),
sa.PrimaryKeyConstraint('id', name=op.f('pk_moss_result'))
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('moss_result')
# ### end Alembic commands ###
|
vendor/xlsxwriter/chart_stock.py | Lwz1966/QQ-Groups-Spider | 882 | 12628924 | <filename>vendor/xlsxwriter/chart_stock.py
###############################################################################
#
# ChartStock - A class for writing the Excel XLSX Stock charts.
#
# Copyright 2013-2017, <NAME>, <EMAIL>
#
from . import chart
class ChartStock(chart.Chart):
"""
A class for writing the Excel XLSX Stock charts.
"""
###########################################################################
#
# Public API.
#
###########################################################################
def __init__(self, options=None):
"""
Constructor.
"""
super(ChartStock, self).__init__()
if options is None:
options = {}
self.show_crosses = 0
self.hi_low_lines = {}
self.date_category = True
# Override and reset the default axis values.
self.x_axis['defaults']['num_format'] = 'dd/mm/yyyy'
self.x2_axis['defaults']['num_format'] = 'dd/mm/yyyy'
# Set the available data label positions for this chart type.
self.label_position_default = 'right'
self.label_positions = {
'center': 'ctr',
'right': 'r',
'left': 'l',
'above': 't',
'below': 'b',
# For backward compatibility.
'top': 't',
'bottom': 'b'}
self.set_x_axis({})
self.set_x2_axis({})
###########################################################################
#
# Private API.
#
###########################################################################
def _write_chart_type(self, args):
# Override the virtual superclass method with a chart specific method.
# Write the c:stockChart element.
self._write_stock_chart(args)
###########################################################################
#
# XML methods.
#
###########################################################################
def _write_stock_chart(self, args):
# Write the <c:stockChart> element.
# Overridden to add hi_low_lines().
if args['primary_axes']:
series = self._get_primary_axes_series()
else:
series = self._get_secondary_axes_series()
if not len(series):
return
# Add default formatting to the series data.
self._modify_series_formatting()
self._xml_start_tag('c:stockChart')
# Write the series elements.
for data in series:
self._write_ser(data)
# Write the c:dropLines element.
self._write_drop_lines()
# Write the c:hiLowLines element.
if args.get('primary_axes'):
self._write_hi_low_lines()
# Write the c:upDownBars element.
self._write_up_down_bars()
# Write the c:axId elements
self._write_axis_ids(args)
self._xml_end_tag('c:stockChart')
def _modify_series_formatting(self):
# Add default formatting to the series data.
index = 0
for series in self.series:
if index % 4 != 3:
if not series['line']['defined']:
series['line'] = {'width': 2.25,
'none': 1,
'defined': 1}
if series['marker'] is None:
if index % 4 == 2:
series['marker'] = {'type': 'dot', 'size': 3}
else:
series['marker'] = {'type': 'none'}
index += 1
|
egg/zoo/mnist_vae/train.py | vengalraoguttha/EGG | 254 | 12628934 | # Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import pathlib
import torch
import torch.utils.data
from torch import nn
from torch.nn import functional as F
from torchvision import datasets, transforms, utils
import egg.core as core
class Sender(nn.Module):
def __init__(self, message_dim):
super(Sender, self).__init__()
self.fc1 = nn.Linear(784, 400)
self.fc21 = nn.Linear(400, message_dim)
self.fc22 = nn.Linear(400, message_dim)
def forward(self, x):
x = F.relu(self.fc1(x))
mu, logvar = self.fc21(x), self.fc22(x)
return mu, logvar
class Receiver(nn.Module):
def __init__(self, message_dim):
super(Receiver, self).__init__()
self.fc3 = nn.Linear(message_dim, 400)
self.fc4 = nn.Linear(400, 784)
def forward(self, x):
x = F.relu(self.fc3(x))
return torch.sigmoid(self.fc4(x))
class VAE_Game(nn.Module):
def __init__(self, sender, receiver):
super().__init__()
self.sender = sender
self.receiver = receiver
@staticmethod
def reparameterize(mu, logvar):
std = torch.exp(0.5 * logvar)
eps = torch.randn_like(std)
return mu + eps * std
def forward(self, *batch):
sender_input = batch[0]
sender_input = sender_input.view(-1, 784)
mu, logvar = self.sender(sender_input)
if self.train:
message = self.reparameterize(mu, logvar)
else:
message = mu
receiver_output = self.receiver(message)
BCE = F.binary_cross_entropy(receiver_output, sender_input, reduction="sum")
KLD = -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp())
loss = BCE + KLD
log = core.Interaction(
sender_input=sender_input,
receiver_input=None,
labels=None,
aux_input=None,
receiver_output=receiver_output.detach(),
message=message.detach(),
message_length=torch.ones(message.size(0)),
aux={},
)
return loss.mean(), log
class ImageDumpCallback(core.Callback):
def __init__(self, eval_dataset, device):
super().__init__()
self.eval_dataset = eval_dataset
self.device = device
def on_epoch_end(self, loss, logs, epoch):
dump_dir = pathlib.Path.cwd() / "dump" / str(epoch)
dump_dir.mkdir(exist_ok=True, parents=True)
self.trainer.game.eval()
for i in range(5):
example = self.eval_dataset[i]
example = core.move_to(example, self.device)
_, interaction = self.trainer.game(*example)
image = example[0][0]
output = interaction.receiver_output.view(28, 28)
image = image.view(28, 28)
utils.save_image(
torch.cat([image, output], dim=1), dump_dir / (str(i) + ".png")
)
self.trainer.game.train()
def main(params):
opts = core.init(params=params)
kwargs = {"num_workers": 1, "pin_memory": True} if opts.cuda else {}
transform = transforms.ToTensor()
train_loader = torch.utils.data.DataLoader(
datasets.MNIST("./data", train=True, download=True, transform=transform),
batch_size=opts.batch_size,
shuffle=True,
**kwargs
)
test_loader = torch.utils.data.DataLoader(
datasets.MNIST("./data", train=False, transform=transform),
batch_size=opts.batch_size,
shuffle=True,
**kwargs
)
sender = Sender(opts.vocab_size)
receiver = Receiver(opts.vocab_size)
game = VAE_Game(sender, receiver)
optimizer = core.build_optimizer(game.parameters())
# initialize and launch the trainer
trainer = core.Trainer(
game=game,
optimizer=optimizer,
train_data=train_loader,
validation_data=test_loader,
callbacks=[
core.ConsoleLogger(as_json=True, print_train_loss=True),
ImageDumpCallback(test_loader.dataset, opts.device),
],
)
trainer.train(n_epochs=opts.n_epochs)
core.close()
if __name__ == "__main__":
import sys
main(sys.argv[1:])
|
DataServer/const.py | ajransaputra/geoip-attack-map | 299 | 12628938 | <reponame>ajransaputra/geoip-attack-map
META = [{
'lookup': 'city',
'tag': 'city',
'path': ['names','en'],
},{
'lookup': 'continent',
'tag': 'continent',
'path': ['names','en'],
},{
'lookup': 'continent_code',
'tag': 'continent',
'path': ['code'],
},{
'lookup': 'country',
'tag': 'country',
'path': ['names','en'],
},{
'lookup': 'iso_code',
'tag': 'country',
'path': ['iso_code'],
},{
'lookup': 'latitude',
'tag': 'location',
'path': ['latitude'],
},{
'lookup': 'longitude',
'tag': 'location',
'path': ['longitude'],
},{
'lookup': 'metro_code',
'tag': 'location',
'path': ['metro_code'],
},{
'lookup': 'postal_code',
'tag': 'postal',
'path': ['code'],
}]
PORTMAP = {
0:"DoS", # Denial of Service
1:"ICMP", # ICMP
20:"FTP", # FTP Data
21:"FTP", # FTP Control
22:"SSH", # SSH
23:"TELNET", # Telnet
25:"EMAIL", # SMTP
43:"WHOIS", # Whois
53:"DNS", # DNS
80:"HTTP", # HTTP
88:"AUTH", # Kerberos
109:"EMAIL", # POP v2
110:"EMAIL", # POP v3
115:"FTP", # SFTP
118:"SQL", # SQL
143:"EMAIL", # IMAP
156:"SQL", # SQL
161:"SNMP", # SNMP
220:"EMAIL", # IMAP v3
389:"AUTH", # LDAP
443:"HTTPS", # HTTPS
445:"SMB", # SMB
636:"AUTH", # LDAP of SSL/TLS
1433:"SQL", # MySQL Server
1434:"SQL", # MySQL Monitor
3306:"SQL", # MySQL
3389:"RDP", # RDP
5900:"RDP", # VNC:0
5901:"RDP", # VNC:1
5902:"RDP", # VNC:2
5903:"RDP", # VNC:3
8080:"HTTP", # HTTP Alternative
}
|
src/oci/operator_access_control/models/operator_action.py | Manny27nyc/oci-python-sdk | 249 | 12628950 | # coding: utf-8
# Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class OperatorAction(object):
"""
Details of the operator action. Operator actions are a pre-defined set of commands available to the operator on different layers of the infrastructure. Although the groupings may differ depending on the infrastructure layers,
the groups are designed to enable the operator access to commands to resolve a specific set of issues. The infrastructure layers controlled by the Operator Control include Dom0, CellServer, and Control Plane Server (CPS).
There are five groups available to the operator. x-obmcs-top-level-enum: '#/definitions/OperatorActionCategories' enum: *OPERATORACTIONCATEGORIES
The following infrastructure layers are controlled by the operator actions x-obmcs-top-level-enum: '#/definitions/InfrastructureLayers' enum: *INFRASTRUCTURELAYERS
"""
def __init__(self, **kwargs):
"""
Initializes a new OperatorAction object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param id:
The value to assign to the id property of this OperatorAction.
:type id: str
:param name:
The value to assign to the name property of this OperatorAction.
:type name: str
:param component:
The value to assign to the component property of this OperatorAction.
:type component: str
:param description:
The value to assign to the description property of this OperatorAction.
:type description: str
:param properties:
The value to assign to the properties property of this OperatorAction.
:type properties: list[oci.operator_access_control.models.OperatorActionProperties]
"""
self.swagger_types = {
'id': 'str',
'name': 'str',
'component': 'str',
'description': 'str',
'properties': 'list[OperatorActionProperties]'
}
self.attribute_map = {
'id': 'id',
'name': 'name',
'component': 'component',
'description': 'description',
'properties': 'properties'
}
self._id = None
self._name = None
self._component = None
self._description = None
self._properties = None
@property
def id(self):
"""
**[Required]** Gets the id of this OperatorAction.
Unique Oracle assigned identifier for the operator action.
:return: The id of this OperatorAction.
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""
Sets the id of this OperatorAction.
Unique Oracle assigned identifier for the operator action.
:param id: The id of this OperatorAction.
:type: str
"""
self._id = id
@property
def name(self):
"""
**[Required]** Gets the name of this OperatorAction.
Name of the operator action.
:return: The name of this OperatorAction.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""
Sets the name of this OperatorAction.
Name of the operator action.
:param name: The name of this OperatorAction.
:type: str
"""
self._name = name
@property
def component(self):
"""
Gets the component of this OperatorAction.
Name of the infrastructure layer associated with the operator action.
:return: The component of this OperatorAction.
:rtype: str
"""
return self._component
@component.setter
def component(self, component):
"""
Sets the component of this OperatorAction.
Name of the infrastructure layer associated with the operator action.
:param component: The component of this OperatorAction.
:type: str
"""
self._component = component
@property
def description(self):
"""
Gets the description of this OperatorAction.
Description of the operator action in terms of associated risk profile, and characteristics of the operating system commands made
available to the operator under this operator action.
:return: The description of this OperatorAction.
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""
Sets the description of this OperatorAction.
Description of the operator action in terms of associated risk profile, and characteristics of the operating system commands made
available to the operator under this operator action.
:param description: The description of this OperatorAction.
:type: str
"""
self._description = description
@property
def properties(self):
"""
Gets the properties of this OperatorAction.
Fine grained properties associated with the operator control.
:return: The properties of this OperatorAction.
:rtype: list[oci.operator_access_control.models.OperatorActionProperties]
"""
return self._properties
@properties.setter
def properties(self, properties):
"""
Sets the properties of this OperatorAction.
Fine grained properties associated with the operator control.
:param properties: The properties of this OperatorAction.
:type: list[oci.operator_access_control.models.OperatorActionProperties]
"""
self._properties = properties
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
|
finrl_meta/env_execution_optimizing/order_execution_qlib/trade/vecenv.py | eitin-infant/FinRL-Meta | 214 | 12628954 | import gym
import time
import ctypes
import numpy as np
from collections import OrderedDict
from multiprocessing.context import Process
from multiprocessing import Array, Pipe, connection, Queue
from typing import Any, List, Tuple, Union, Callable, Optional
from tianshou.env.worker import EnvWorker
from tianshou.env.utils import CloudpickleWrapper
_NP_TO_CT = {
np.bool: ctypes.c_bool,
np.bool_: ctypes.c_bool,
np.uint8: ctypes.c_uint8,
np.uint16: ctypes.c_uint16,
np.uint32: ctypes.c_uint32,
np.uint64: ctypes.c_uint64,
np.int8: ctypes.c_int8,
np.int16: ctypes.c_int16,
np.int32: ctypes.c_int32,
np.int64: ctypes.c_int64,
np.float32: ctypes.c_float,
np.float64: ctypes.c_double,
}
class ShArray:
"""Wrapper of multiprocessing Array."""
def __init__(self, dtype: np.generic, shape: Tuple[int]) -> None:
self.arr = Array(
_NP_TO_CT[dtype.type], # type: ignore
int(np.prod(shape)),
)
self.dtype = dtype
self.shape = shape
def save(self, ndarray: np.ndarray) -> None:
"""
:param ndarray: np.ndarray:
:param ndarray: np.ndarray:
:param ndarray: np.ndarray:
"""
assert isinstance(ndarray, np.ndarray)
dst = self.arr.get_obj()
dst_np = np.frombuffer(dst, dtype=self.dtype).reshape(self.shape)
np.copyto(dst_np, ndarray)
def get(self) -> np.ndarray:
""" """
obj = self.arr.get_obj()
return np.frombuffer(obj, dtype=self.dtype).reshape(self.shape)
def _setup_buf(space: gym.Space) -> Union[dict, tuple, ShArray]:
"""
:param space: gym.Space:
:param space: gym.Space:
:param space: gym.Space:
"""
if isinstance(space, gym.spaces.Dict):
assert isinstance(space.spaces, OrderedDict)
return {k: _setup_buf(v) for k, v in space.spaces.items()}
elif isinstance(space, gym.spaces.Tuple):
assert isinstance(space.spaces, tuple)
return tuple([_setup_buf(t) for t in space.spaces])
else:
return ShArray(space.dtype, space.shape)
def _worker(
parent: connection.Connection,
p: connection.Connection,
env_fn_wrapper: CloudpickleWrapper,
obs_bufs: Optional[Union[dict, tuple, ShArray]] = None,
) -> None:
"""
:param parent: connection.Connection:
:param p: connection.Connection:
:param env_fn_wrapper: CloudpickleWrapper:
:param obs_bufs: Optional[Union[dict:
:param tuple: param ShArray]]: (Default value = None)
:param parent: connection.Connection:
:param p: connection.Connection:
:param env_fn_wrapper: CloudpickleWrapper:
:param obs_bufs: Optional[Union[dict:
:param ShArray]]: (Default value = None)
:param parent: connection.Connection:
:param p: connection.Connection:
:param env_fn_wrapper: CloudpickleWrapper:
:param obs_bufs: Optional[Union[dict:
"""
def _encode_obs(obs: Union[dict, tuple, np.ndarray], buffer: Union[dict, tuple, ShArray],) -> None:
"""
:param obs: Union[dict:
:param tuple: param np.ndarray]:
:param buffer: Union[dict:
:param ShArray:
:param obs: Union[dict:
:param np.ndarray]:
:param buffer: Union[dict:
:param ShArray]:
:param obs: Union[dict:
:param buffer: Union[dict:
"""
if isinstance(obs, np.ndarray) and isinstance(buffer, ShArray):
buffer.save(obs)
elif isinstance(obs, tuple) and isinstance(buffer, tuple):
for o, b in zip(obs, buffer):
_encode_obs(o, b)
elif isinstance(obs, dict) and isinstance(buffer, dict):
for k in obs.keys():
_encode_obs(obs[k], buffer[k])
return None
parent.close()
env = env_fn_wrapper.data()
try:
while True:
try:
cmd, data = p.recv()
except EOFError: # the pipe has been closed
p.close()
break
if cmd == "step":
obs, reward, done, info = env.step(data)
if obs_bufs is not None:
_encode_obs(obs, obs_bufs)
obs = None
p.send((obs, reward, done, info))
elif cmd == "reset":
obs = env.reset(data)
if obs_bufs is not None:
_encode_obs(obs, obs_bufs)
obs = None
p.send(obs)
elif cmd == "close":
p.send(env.close())
p.close()
break
elif cmd == "render":
p.send(env.render(**data) if hasattr(env, "render") else None)
elif cmd == "seed":
p.send(env.seed(data) if hasattr(env, "seed") else None)
elif cmd == "getattr":
p.send(getattr(env, data) if hasattr(env, data) else None)
elif cmd == "toggle_log":
env.toggle_log(data)
else:
p.close()
raise NotImplementedError
except KeyboardInterrupt:
p.close()
class SubprocEnvWorker(EnvWorker):
"""Subprocess worker used in SubprocVectorEnv and ShmemVectorEnv."""
def __init__(self, env_fn: Callable[[], gym.Env], share_memory: bool = False) -> None:
super().__init__(env_fn)
self.parent_remote, self.child_remote = Pipe()
self.share_memory = share_memory
self.buffer: Optional[Union[dict, tuple, ShArray]] = None
if self.share_memory:
dummy = env_fn()
obs_space = dummy.observation_space
dummy.close()
del dummy
self.buffer = _setup_buf(obs_space)
args = (
self.parent_remote,
self.child_remote,
CloudpickleWrapper(env_fn),
self.buffer,
)
self.process = Process(target=_worker, args=args, daemon=True)
self.process.start()
self.child_remote.close()
def __getattr__(self, key: str) -> Any:
self.parent_remote.send(["getattr", key])
return self.parent_remote.recv()
def _decode_obs(self) -> Union[dict, tuple, np.ndarray]:
""" """
def decode_obs(buffer: Optional[Union[dict, tuple, ShArray]]) -> Union[dict, tuple, np.ndarray]:
"""
:param buffer: Optional[Union[dict:
:param tuple: param ShArray]]:
:param buffer: Optional[Union[dict:
:param ShArray]]:
:param buffer: Optional[Union[dict:
"""
if isinstance(buffer, ShArray):
return buffer.get()
elif isinstance(buffer, tuple):
return tuple([decode_obs(b) for b in buffer])
elif isinstance(buffer, dict):
return {k: decode_obs(v) for k, v in buffer.items()}
else:
raise NotImplementedError
return decode_obs(self.buffer)
def reset(self, sample) -> Any:
"""
:param sample:
"""
self.parent_remote.send(["reset", sample])
# obs = self.parent_remote.recv()
# if self.share_memory:
# obs = self._decode_obs()
# return obs
def get_reset_result(self):
""" """
obs = self.parent_remote.recv()
if self.share_memory:
obs = self._decode_obs()
return obs
@staticmethod
def wait( # type: ignore
workers: List["SubprocEnvWorker"], wait_num: int, timeout: Optional[float] = None,
) -> List["SubprocEnvWorker"]:
"""
:param # type: ignoreworkers: List["SubprocEnvWorker"]:
:param wait_num: int:
:param timeout: Optional[float]: (Default value = None)
:param # type: ignoreworkers: List["SubprocEnvWorker"]:
:param wait_num: int:
:param timeout: Optional[float]: (Default value = None)
"""
remain_conns = conns = [x.parent_remote for x in workers]
ready_conns: List[connection.Connection] = []
remain_time, t1 = timeout, time.time()
while len(remain_conns) > 0 and len(ready_conns) < wait_num:
if timeout:
remain_time = timeout - (time.time() - t1)
if remain_time <= 0:
break
# connection.wait hangs if the list is empty
new_ready_conns = connection.wait(remain_conns, timeout=remain_time)
ready_conns.extend(new_ready_conns) # type: ignore
remain_conns = [conn for conn in remain_conns if conn not in ready_conns]
return [workers[conns.index(con)] for con in ready_conns]
def send_action(self, action: np.ndarray) -> None:
"""
:param action: np.ndarray:
:param action: np.ndarray:
:param action: np.ndarray:
"""
self.parent_remote.send(["step", action])
def toggle_log(self, log):
self.parent_remote.send(["toggle_log", log])
def get_result(self,) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
""" """
obs, rew, done, info = self.parent_remote.recv()
if self.share_memory:
obs = self._decode_obs()
return obs, rew, done, info
def seed(self, seed: Optional[int] = None) -> Optional[List[int]]:
"""
:param seed: Optional[int]: (Default value = None)
:param seed: Optional[int]: (Default value = None)
:param seed: Optional[int]: (Default value = None)
"""
self.parent_remote.send(["seed", seed])
return self.parent_remote.recv()
def render(self, **kwargs: Any) -> Any:
"""
:param **kwargs: Any:
:param **kwargs: Any:
"""
self.parent_remote.send(["render", kwargs])
return self.parent_remote.recv()
def close_env(self) -> None:
""" """
try:
self.parent_remote.send(["close", None])
# mp may be deleted so it may raise AttributeError
self.parent_remote.recv()
self.process.join()
except (BrokenPipeError, EOFError, AttributeError):
pass
# ensure the subproc is terminated
self.process.terminate()
class BaseVectorEnv(gym.Env):
"""Base class for vectorized environments wrapper.
Usage:
::
env_num = 8
envs = DummyVectorEnv([lambda: gym.make(task) for _ in range(env_num)])
assert len(envs) == env_num
It accepts a list of environment generators. In other words, an environment
generator ``efn`` of a specific task means that ``efn()`` returns the
environment of the given task, for example, ``gym.make(task)``.
All of the VectorEnv must inherit :class:`~tianshou.env.BaseVectorEnv`.
Here are some other usages:
::
envs.seed(2) # which is equal to the next line
envs.seed([2, 3, 4, 5, 6, 7, 8, 9]) # set specific seed for each env
obs = envs.reset() # reset all environments
obs = envs.reset([0, 5, 7]) # reset 3 specific environments
obs, rew, done, info = envs.step([1] * 8) # step synchronously
envs.render() # render all environments
envs.close() # close all environments
.. warning::
If you use your own environment, please make sure the ``seed`` method
is set up properly, e.g.,
::
def seed(self, seed):
np.random.seed(seed)
Otherwise, the outputs of these envs may be the same with each other.
:param env_fns: a list of callable envs
:param env:
:param worker_fn: a callable worker
:param worker: which contains the i
:param int: wait_num
:param env: step
:param environments: to finish a step is time
:param return: when
:param simulation: in these environments
:param is: disabled
:param float: timeout
:param vectorized: step it only deal with those environments spending time
:param within: timeout
"""
def __init__(
self,
env_fns: List[Callable[[], gym.Env]],
worker_fn: Callable[[Callable[[], gym.Env]], EnvWorker],
sampler=None,
testing: Optional[bool] = False,
wait_num: Optional[int] = None,
timeout: Optional[float] = None,
) -> None:
self._env_fns = env_fns
# A VectorEnv contains a pool of EnvWorkers, which corresponds to
# interact with the given envs (one worker <-> one env).
self.workers = [worker_fn(fn) for fn in env_fns]
self.worker_class = type(self.workers[0])
assert issubclass(self.worker_class, EnvWorker)
assert all([isinstance(w, self.worker_class) for w in self.workers])
self.env_num = len(env_fns)
self.wait_num = wait_num or len(env_fns)
assert 1 <= self.wait_num <= len(env_fns), f"wait_num should be in [1, {len(env_fns)}], but got {wait_num}"
self.timeout = timeout
assert self.timeout is None or self.timeout > 0, f"timeout is {timeout}, it should be positive if provided!"
self.is_async = self.wait_num != len(env_fns) or timeout is not None or testing
self.waiting_conn: List[EnvWorker] = []
# environments in self.ready_id is actually ready
# but environments in self.waiting_id are just waiting when checked,
# and they may be ready now, but this is not known until we check it
# in the step() function
self.waiting_id: List[int] = []
# all environments are ready in the beginning
self.ready_id = list(range(self.env_num))
self.is_closed = False
self.sampler = sampler
self.sample_obs = None
def _assert_is_not_closed(self) -> None:
""" """
assert not self.is_closed, f"Methods of {self.__class__.__name__} cannot be called after " "close."
def __len__(self) -> int:
"""Return len(self), which is the number of environments."""
return self.env_num
def __getattribute__(self, key: str) -> Any:
"""Switch the attribute getter depending on the key.
Any class who inherits ``gym.Env`` will inherit some attributes, like
``action_space``. However, we would like the attribute lookup to go
straight into the worker (in fact, this vector env's action_space is
always None).
"""
if key in [
"metadata",
"reward_range",
"spec",
"action_space",
"observation_space",
]: # reserved keys in gym.Env
return self.__getattr__(key)
else:
return super().__getattribute__(key)
def __getattr__(self, key: str) -> List[Any]:
"""Fetch a list of env attributes.
This function tries to retrieve an attribute from each individual
wrapped environment, if it does not belong to the wrapping vector
environment class.
"""
return [getattr(worker, key) for worker in self.workers]
def _wrap_id(self, id: Optional[Union[int, List[int], np.ndarray]] = None) -> Union[List[int], np.ndarray]:
"""
:param id: Optional[Union[int:
:param List: int]:
:param np: ndarray]]: (Default value = None)
:param id: Optional[Union[int:
:param List[int]:
:param np.ndarray]]: (Default value = None)
:param id: Optional[Union[int:
"""
if id is None:
id = list(range(self.env_num))
elif np.isscalar(id):
id = [id]
return id
def _assert_id(self, id: List[int]) -> None:
"""
:param id: List[int]:
:param id: List[int]:
:param id: List[int]:
"""
for i in id:
assert i not in self.waiting_id, f"Cannot interact with environment {i} which is stepping now."
assert i in self.ready_id, f"Can only interact with ready environments {self.ready_id}."
def reset(self, id: Optional[Union[int, List[int], np.ndarray]] = None) -> np.ndarray:
"""Reset the state of some envs and return initial observations.
If id is None, reset the state of all the environments and return
initial observations, otherwise reset the specific environments with
the given id, either an int or a list.
:param id: Optional[Union[int:
:param List: int]:
:param np: ndarray]]: (Default value = None)
:param id: Optional[Union[int:
:param List[int]:
:param np.ndarray]]: (Default value = None)
:param id: Optional[Union[int:
"""
start_time = time.time()
self._assert_is_not_closed()
id = self._wrap_id(id)
if self.is_async:
self._assert_id(id)
obs = []
stop_id = []
for i in id:
sample = self.sampler.sample()
if sample is None:
stop_id.append(i)
else:
self.workers[i].reset(sample)
for i in id:
if i in stop_id:
obs.append(self.sample_obs)
else:
this_obs = self.workers[i].get_reset_result()
if self.sample_obs is None:
self.sample_obs = this_obs
for j in range(len(obs)):
if obs[j] is None:
obs[j] = self.sample_obs
obs.append(this_obs)
if len(obs) > 0:
obs = np.stack(obs)
# if len(stop_id)> 0:
# obs_zero =
# print(time.time() - start_timed)
return obs, stop_id
def toggle_log(self, log):
for worker in self.workers:
worker.toggle_log(log)
def reset_sampler(self):
""" """
self.sampler.reset()
def step(self, action: np.ndarray, id: Optional[Union[int, List[int], np.ndarray]] = None) -> List[np.ndarray]:
"""Run one timestep of some environments' dynamics.
If id is None, run one timestep of all the environments’ dynamics;
otherwise run one timestep for some environments with given id, either
an int or a list. When the end of episode is reached, you are
responsible for calling reset(id) to reset this environment’s state.
Accept a batch of action and return a tuple (batch_obs, batch_rew,
batch_done, batch_info) in numpy format.
:param numpy: ndarray action: a batch of action provided by the agent.
:param action: np.ndarray:
:param id: Optional[Union[int:
:param List: int]:
:param np: ndarray]]: (Default value = None)
:param action: np.ndarray:
:param id: Optional[Union[int:
:param List[int]:
:param np.ndarray]]: (Default value = None)
:param action: np.ndarray:
:param id: Optional[Union[int:
:rtype: A tuple including four items
"""
self._assert_is_not_closed()
id = self._wrap_id(id)
if not self.is_async:
assert len(action) == len(id)
for i, j in enumerate(id):
self.workers[j].send_action(action[i])
result = []
for j in id:
obs, rew, done, info = self.workers[j].get_result()
info["env_id"] = j
result.append((obs, rew, done, info))
else:
if action is not None:
self._assert_id(id)
assert len(action) == len(id)
for i, (act, env_id) in enumerate(zip(action, id)):
self.workers[env_id].send_action(act)
self.waiting_conn.append(self.workers[env_id])
self.waiting_id.append(env_id)
self.ready_id = [x for x in self.ready_id if x not in id]
ready_conns: List[EnvWorker] = []
while not ready_conns:
ready_conns = self.worker_class.wait(self.waiting_conn, self.wait_num, self.timeout)
result = []
for conn in ready_conns:
waiting_index = self.waiting_conn.index(conn)
self.waiting_conn.pop(waiting_index)
env_id = self.waiting_id.pop(waiting_index)
obs, rew, done, info = conn.get_result()
info["env_id"] = env_id
result.append((obs, rew, done, info))
self.ready_id.append(env_id)
return list(map(np.stack, zip(*result)))
def seed(self, seed: Optional[Union[int, List[int]]] = None) -> List[Optional[List[int]]]:
"""Set the seed for all environments.
Accept ``None``, an int (which will extend ``i`` to
``[i, i + 1, i + 2, ...]``) or a list.
:param seed: Optional[Union[int:
:param List: int]]]: (Default value = None)
:param seed: Optional[Union[int:
:param List[int]]]: (Default value = None)
:param seed: Optional[Union[int:
:returns: The list of seeds used in this env's random number generators.
The first value in the list should be the "main" seed, or the value
which a reproducer pass to "seed".
"""
self._assert_is_not_closed()
seed_list: Union[List[None], List[int]]
if seed is None:
seed_list = [seed] * self.env_num
elif isinstance(seed, int):
seed_list = [seed + i for i in range(self.env_num)]
else:
seed_list = seed
return [w.seed(s) for w, s in zip(self.workers, seed_list)]
def render(self, **kwargs: Any) -> List[Any]:
"""Render all of the environments.
:param **kwargs: Any:
:param **kwargs: Any:
"""
self._assert_is_not_closed()
if self.is_async and len(self.waiting_id) > 0:
raise RuntimeError(f"Environments {self.waiting_id} are still stepping, cannot " "render them now.")
return [w.render(**kwargs) for w in self.workers]
def close(self) -> None:
"""Close all of the environments.
This function will be called only once (if not, it will be called
during garbage collected). This way, ``close`` of all workers can be
assured.
"""
self._assert_is_not_closed()
for w in self.workers:
w.close()
self.is_closed = True
def __del__(self) -> None:
"""Redirect to self.close()."""
if not self.is_closed:
self.close()
class SubprocVectorEnv(BaseVectorEnv):
"""Vectorized environment wrapper based on subprocess.
.. seealso::
Please refer to :class:`~tianshou.env.BaseVectorEnv` for more detailed
explanation.
"""
def __init__(
self,
env_fns: List[Callable[[], gym.Env]],
sampler=None,
testing=False,
wait_num: Optional[int] = None,
timeout: Optional[float] = None,
) -> None:
def worker_fn(fn: Callable[[], gym.Env]) -> SubprocEnvWorker:
"""
:param fn: Callable[[]:
:param gym: Env]:
:param fn: Callable[[]:
:param gym.Env]:
:param fn: Callable[[]:
"""
return SubprocEnvWorker(fn, share_memory=False)
super().__init__(env_fns, worker_fn, sampler, testing, wait_num=wait_num, timeout=timeout)
class ShmemVectorEnv(BaseVectorEnv):
"""Optimized SubprocVectorEnv with shared buffers to exchange observations.
ShmemVectorEnv has exactly the same API as SubprocVectorEnv.
.. seealso::
Please refer to :class:`~tianshou.env.SubprocVectorEnv` for more
detailed explanation.
"""
def __init__(
self,
env_fns: List[Callable[[], gym.Env]],
sampler=None,
testing=False,
wait_num: Optional[int] = None,
timeout: Optional[float] = None,
) -> None:
def worker_fn(fn: Callable[[], gym.Env]) -> SubprocEnvWorker:
"""
:param fn: Callable[[]:
:param gym: Env]:
:param fn: Callable[[]:
:param gym.Env]:
:param fn: Callable[[]:
"""
return SubprocEnvWorker(fn, share_memory=True)
super().__init__(env_fns, worker_fn, sampler, testing, wait_num=wait_num, timeout=timeout)
|
office365/sharepoint/fields/field_thumbnail.py | rikeshtailor/Office365-REST-Python-Client | 544 | 12628970 | from office365.sharepoint.fields.field import Field
class FieldThumbnail(Field):
pass
|
Graphs/ShortesrReachInAGraph.py | WinterSoldier13/interview-preparation-kit | 175 | 12628982 | import queue
import collections
class Graph:
def __init__(self, n):
self.n = n
self.edges = collections.defaultdict(lambda: [])
def connect(self,x,y):
self.edges[x].append(y)
self.edges[y].append(x)
def find_all_distances(self, root):
distances = [-1 for i in range(self.n)]
unvisited = set([i for i in range(self.n)])
q = queue.Queue()
distances[root] = 0
unvisited.remove(root)
q.put(root)
while not q.empty():
node = q.get()
children = self.edges[node]
height = distances[node]
for child in children:
if child in unvisited:
distances[child] = height + 6
unvisited.remove(child)
q.put(child)
distances.pop(root)
print(" ".join(map(str,distances)))
t = int(input())
for i in range(t):
n,m = [int(value) for value in input().split()]
graph = Graph(n)
for i in range(m):
x,y = [int(x) for x in input().split()]
graph.connect(x-1,y-1)
s = int(input())
graph.find_all_distances(s-1)
|
homeassistant/components/volkszaehler/__init__.py | domwillcode/home-assistant | 30,023 | 12629011 | """The volkszaehler component."""
|
picoCTF-web/api/common.py | minhnq1618/picoCTF | 280 | 12629022 | """Classes and functions used by multiple modules in the system."""
import uuid
from hashlib import md5
import bcrypt
from voluptuous import Invalid, MultipleInvalid
def token():
"""
Generate a random but insecure token.
Returns:
The randomly generated token
"""
return str(uuid.uuid4().hex)
def hash(string):
"""
Hash a string.
Args:
string: string to be hashed.
Returns:
The hex digest of the string.
"""
return md5(string.encode("utf-8")).hexdigest()
class PicoException(Exception):
"""
General class for exceptions in the picoCTF API.
Allows specification of a message and response code to display to the
client, as well as an optional field for arbitrary data.
The 'data' field will not be displayed to clients but will be stored
in the database, making it ideal for storing stack traces, etc.
"""
def __init__(self, message, status_code=500, data=None):
"""Initialize a new PicoException."""
Exception.__init__(self)
self.message = message
self.status_code = status_code
self.data = data
def to_dict(self):
"""Convert a PicoException to a dict for serialization."""
rv = dict()
rv["message"] = self.message
return rv
def check(*callback_tuples):
"""
Voluptuous wrapper function to raise our PicoException.
Args:
callback_tuples: a callback_tuple should contain
(status, msg, callbacks)
Returns:
Returns a function callback for the Schema
"""
def v(value):
"""
Try to validate the value with the given callbacks.
Args:
value: the item to validate
Raises:
PicoException with 400 status code and error msg.
Returns:
The value if the validation callbacks are satisfied.
"""
for msg, callbacks in callback_tuples:
for callback in callbacks:
try:
result = callback(value)
if not result and type(result) == bool:
raise Invalid()
except Exception:
raise PicoException(msg, 400)
return value
return v
def validate(schema, data):
"""
Wrap the call to voluptuous schema to raise the proper exception.
Args:
schema: The voluptuous Schema object
data: The validation data for the schema object
Raises:
PicoException with 400 status code and the voluptuous error message
"""
try:
schema(data)
except MultipleInvalid as error:
raise PicoException(error.msg, 400)
def hash_password(password):
"""
Hash plaintext password.
Args:
password: <PASSWORD>
Returns:
Secure hash of password.
"""
return bcrypt.hashpw(password.encode("utf-8"), bcrypt.gensalt(8))
|
fedml_experiments/distributed/classical_vertical_fl/main_vfl.py | xuwanwei/FedML | 1,120 | 12629058 | import argparse
import logging
import os
import random
import socket
import sys
from sklearn.utils import shuffle
import numpy as np
import psutil
import setproctitle
import torch
sys.path.insert(0, os.path.abspath(os.path.join(os.getcwd(), "../../../")))
from fedml_api.model.finance.vfl_classifier import VFLClassifier
from fedml_api.model.finance.vfl_feature_extractor import VFLFeatureExtractor
from fedml_api.data_preprocessing.lending_club_loan.lending_club_dataset import loan_load_three_party_data
from fedml_api.data_preprocessing.NUS_WIDE.nus_wide_dataset import NUS_WIDE_load_three_party_data
from fedml_api.distributed.classical_vertical_fl.vfl_api import FedML_VFL_distributed
from fedml_api.distributed.fedavg.FedAvgAPI import FedML_init
def add_args(parser):
"""
parser : argparse.ArgumentParser
return a parser added with args required by fit
"""
parser.add_argument('--dataset', type=str, default='lending_club_loan', metavar='N',
help='dataset used for training')
parser.add_argument('--client_number', type=int, default=2, metavar='NN',
help='number of workers in a distributed cluster')
parser.add_argument('--comm_round', type=int, default=100,
help='how many round of communications we shoud use')
parser.add_argument('--batch_size', type=int, default=256, metavar='N',
help='input batch size for training (default: 64)')
parser.add_argument('--lr', type=float, default=0.01, metavar='LR',
help='learning rate (default: 0.001)')
parser.add_argument('--frequency_of_the_test', type=int, default=30,
help='the frequency of the algorithms')
args = parser.parse_args()
return args
def init_training_device(process_ID, fl_worker_num, gpu_num_per_machine):
# initialize the mapping from process ID to GPU ID: <process ID, GPU ID>
if process_ID == 0:
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
return device
process_gpu_dict = dict()
for client_index in range(fl_worker_num):
gpu_index = client_index % gpu_num_per_machine
process_gpu_dict[client_index] = gpu_index
logging.info(process_gpu_dict)
device = torch.device("cuda:" + str(process_gpu_dict[process_ID - 1]) if torch.cuda.is_available() else "cpu")
logging.info(device)
return device
if __name__ == "__main__":
# initialize distributed computing (MPI)
comm, process_id, worker_number = FedML_init()
# parse python script input parameters
parser = argparse.ArgumentParser()
args = add_args(parser)
# customize the process name
str_process_name = "Federated Learning:" + str(process_id)
setproctitle.setproctitle(str_process_name)
# customize the log format
logging.basicConfig(level=logging.INFO,
format=str(
process_id) + ' - %(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s',
datefmt='%a, %d %b %Y %H:%M:%S')
hostname = socket.gethostname()
logging.info("#############process ID = " + str(process_id) +
", host name = " + hostname + "########" +
", process ID = " + str(os.getpid()) +
", process Name = " + str(psutil.Process(os.getpid())))
# Set the random seed. The np.random seed determines the dataset partition.
# The torch_manual_seed determines the initial weight.
# We fix these two, so that we can reproduce the result.
seed = 0
np.random.seed(seed)
torch.manual_seed(worker_number)
random.seed(0)
# GPU management
logging.info("process_id = %d, size = %d" % (process_id, worker_number))
device = init_training_device(process_id, worker_number-1, 4)
# load data
print("################################ Prepare Data ############################")
if args.dataset == "lending_club_loan":
data_dir = "../../../data/lending_club_loan/"
train, test = loan_load_three_party_data(data_dir)
elif args.dataset == "NUS_WIDE":
data_dir = "../../../data/NUS_WIDE"
class_lbls = ['person', 'animal']
train, test = NUS_WIDE_load_three_party_data(data_dir, class_lbls, neg_label=0)
else:
data_dir = "../../../data/lending_club_loan/"
train, test = loan_load_three_party_data(data_dir)
Xa_train, Xb_train, Xc_train, y_train = train
Xa_test, Xb_test, Xc_test, y_test = test
Xa_train, Xb_train, Xc_train, y_train = shuffle(Xa_train, Xb_train, Xc_train, y_train)
Xa_test, Xb_test, Xc_test, y_test = shuffle(Xa_test, Xb_test, Xc_test, y_test)
train = [Xa_train, Xb_train, Xc_train, y_train]
test = [Xa_test, Xb_test, Xc_test, y_test]
guest_data = [Xa_train, y_train, Xa_test, y_test]
host_data = None
if process_id == 1:
host_data = [Xb_train, Xb_test]
elif process_id == 2:
host_data = [Xc_train, Xc_test]
# create models for each worker
if process_id == 0:
guest_feature_extractor = VFLFeatureExtractor(input_dim=Xa_train.shape[1], output_dim=10).to(device)
guest_classifier = VFLClassifier(guest_feature_extractor.get_output_dim(), 1).to(device)
guest_model = [guest_feature_extractor, guest_classifier]
host_model = [None, None]
elif process_id == 1:
host_feature_extractor = VFLFeatureExtractor(input_dim=Xb_train.shape[1], output_dim=10).to(device)
host_classifier = VFLClassifier(host_feature_extractor.get_output_dim(), 1).to(device)
host_model = [host_feature_extractor, host_classifier]
guest_model = [None, None]
elif process_id == 2:
host_feature_extractor = VFLFeatureExtractor(input_dim=Xc_train.shape[1], output_dim=10).to(device)
host_classifier = VFLClassifier(host_feature_extractor.get_output_dim(), 1).to(device)
host_model = [host_feature_extractor, host_classifier]
guest_model = [None, None]
else:
guest_model = [None, None]
host_model = [None, None]
FedML_VFL_distributed(process_id, worker_number, comm, args, device, guest_data, guest_model, host_data, host_model)
|
glance/tests/unit/common/test_swift_store_utils.py | daespinel/glance | 309 | 12629072 | <reponame>daespinel/glance<filename>glance/tests/unit/common/test_swift_store_utils.py
# Copyright 2014 Rackspace
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import fixtures
from glance.common import exception
from glance.common import swift_store_utils
from glance.tests.unit import base
class TestSwiftParams(base.IsolatedUnitTest):
def setUp(self):
super(TestSwiftParams, self).setUp()
conf_file = "glance-swift.conf"
test_dir = self.useFixture(fixtures.TempDir()).path
self.swift_config_file = self._copy_data_file(conf_file, test_dir)
self.config(swift_store_config_file=self.swift_config_file)
def test_multiple_swift_account_enabled(self):
self.config(swift_store_config_file="glance-swift.conf")
self.assertTrue(
swift_store_utils.is_multiple_swift_store_accounts_enabled())
def test_multiple_swift_account_disabled(self):
self.config(swift_store_config_file=None)
self.assertFalse(
swift_store_utils.is_multiple_swift_store_accounts_enabled())
def test_swift_config_file_doesnt_exist(self):
self.config(swift_store_config_file='fake-file.conf')
self.assertRaises(exception.InvalidSwiftStoreConfiguration,
swift_store_utils.SwiftParams)
def test_swift_config_uses_default_values_multiple_account_disabled(self):
default_user = 'user_default'
default_key = 'key_default'
default_auth_address = 'auth@default.<EMAIL>'
default_account_reference = 'ref_default'
confs = {'swift_store_config_file': None,
'swift_store_user': default_user,
'swift_store_key': default_key,
'swift_store_auth_address': default_auth_address,
'default_swift_reference': default_account_reference}
self.config(**confs)
swift_params = swift_store_utils.SwiftParams().params
self.assertEqual(1, len(swift_params.keys()))
self.assertEqual(default_user,
swift_params[default_account_reference]['user']
)
self.assertEqual(default_key,
swift_params[default_account_reference]['key']
)
self.assertEqual(default_auth_address,
swift_params[default_account_reference]
['auth_address']
)
def test_swift_store_config_validates_for_creds_auth_address(self):
swift_params = swift_store_utils.SwiftParams().params
self.assertEqual('tenant:user1',
swift_params['ref1']['user']
)
self.assertEqual('key1',
swift_params['ref1']['key']
)
self.assertEqual('example.com',
swift_params['ref1']['auth_address'])
self.assertEqual('user2',
swift_params['ref2']['user'])
self.assertEqual('key2',
swift_params['ref2']['key'])
self.assertEqual('http://example.com',
swift_params['ref2']['auth_address']
)
|
plenum/test/simulation/sim_random.py | jandayanan/indy-plenum | 148 | 12629085 | import string
from abc import ABC, abstractmethod
from random import Random
from typing import Any, Iterable, List, Optional
class SimRandom(ABC):
@abstractmethod
def integer(self, min_value: int, max_value: int) -> int:
pass
@abstractmethod
def float(self, min_value: float, max_value: float) -> float:
pass
@abstractmethod
def string(self, min_len: int, max_len: Optional[int] = None,
alphabet: Optional[str] = string.ascii_letters + string.digits) -> str:
pass
@abstractmethod
def choice(self, *args) -> Any:
pass
@abstractmethod
def sample(self, population: List, num: int) -> List:
pass
@abstractmethod
def shuffle(self, items: List) -> List:
pass
class DefaultSimRandom(SimRandom):
def __init__(self, seed=0):
self._random = Random(seed)
def integer(self, min_value: int, max_value: int) -> int:
return self._random.randint(min_value, max_value)
def float(self, min_value: float, max_value: float) -> float:
return self._random.uniform(min_value, max_value)
def string(self, min_len: int, max_len: Optional[int] = None,
alphabet: Optional[str] = string.ascii_letters + string.digits) -> str:
if max_len is None:
max_len = min_len
_len = self.integer(min_len, max_len)
return ''.join(self.choice(*alphabet) for _ in range(_len))
def choice(self, *args) -> Any:
return self._random.choice(args)
def sample(self, population: Iterable, num: int) -> List:
return self._random.sample(population, num)
def shuffle(self, items: List) -> List:
result = items.copy()
self._random.shuffle(result)
return result
|
Chapter13/lib/ksy/rfp_server.py | feiwang20/DRLHandsOn-Playground | 2,497 | 12629090 | # This is a generated file! Please edit source .ksy file and use kaitai-struct-compiler to rebuild
import array
import struct
import zlib
from enum import Enum
from pkg_resources import parse_version
from kaitaistruct import __version__ as ks_version, KaitaiStruct, KaitaiStream, BytesIO
if parse_version(ks_version) < parse_version('0.7'):
raise Exception("Incompatible Kaitai Struct Python API: 0.7 or later is required, but you have %s" % (ks_version))
class RfpServer(KaitaiStruct):
class MessageType(Enum):
fb_update = 0
set_colormap = 1
bell = 2
cut_text = 3
class Encoding(Enum):
raw = 0
copy_rect = 1
rre = 2
zrle = 16
cursor = 4294967057
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self.header = self._root.Header(self._io, self, self._root)
self.messages = []
while not self._io.is_eof():
self.messages.append(self._root.Message(self._io, self, self._root))
class RectZrleEncoding(KaitaiStruct):
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self.length = self._io.read_u4be()
self.data = self._io.read_bytes(self.length)
class RectCursorPseudoEncoding(KaitaiStruct):
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self.data = self._io.read_bytes((((self._parent.header.width * self._parent.header.height) * self._root.header.server_init.pixel_format.bpp // 8) + (self._parent.header.height * ((self._parent.header.width + 7) >> 3))))
class RectCopyRectEncoding(KaitaiStruct):
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self.data = self._io.read_bytes(4)
class RectRawEncoding(KaitaiStruct):
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self.data = self._io.read_bytes(((self._parent.header.width * self._parent.header.height) * self._root.header.server_init.pixel_format.bpp // 8))
class PixelFormat(KaitaiStruct):
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self.bpp = self._io.read_u1()
self.depth = self._io.read_u1()
self.big_endian = self._io.read_u1()
self.true_color = self._io.read_u1()
self.red_max = self._io.read_u2be()
self.green_max = self._io.read_u2be()
self.blue_max = self._io.read_u2be()
self.red_shift = self._io.read_u1()
self.green_shift = self._io.read_u1()
self.blue_shift = self._io.read_u1()
self.padding = self._io.read_bytes(3)
class RectHeader(KaitaiStruct):
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self.pos_x = self._io.read_u2be()
self.pos_y = self._io.read_u2be()
self.width = self._io.read_u2be()
self.height = self._io.read_u2be()
self.encoding = self._root.Encoding(self._io.read_u4be())
class RectRreEncoding(KaitaiStruct):
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self.subrects_count = self._io.read_u4be()
self.background = self._io.read_bytes(self._root.header.server_init.pixel_format.bpp // 8)
self.data = self._io.read_bytes((self.subrects_count * (self._root.header.server_init.pixel_format.bpp // 8 + 8)))
class MsgSetColormap(KaitaiStruct):
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self.padding = self._io.read_bytes(1)
self.first_color = self._io.read_u2be()
self.number_colors = self._io.read_u2be()
self.data = self._io.read_bytes((self.number_colors * 6))
class MsgBell(KaitaiStruct):
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self.empty = self._io.read_bytes(0)
class MsgCutText(KaitaiStruct):
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self.padding = self._io.read_bytes(3)
self.length = self._io.read_u4be()
self.text = (self._io.read_bytes(self.length)).decode(u"ascii")
class Header(KaitaiStruct):
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self.magic = (self._io.read_bytes_term(10, False, True, True)).decode(u"ascii")
self.some_data = self._io.read_bytes(4)
self.challenge = self._io.read_bytes(16)
self.security_status = self._io.read_u4be()
self.server_init = self._root.ServerInit(self._io, self, self._root)
class Message(KaitaiStruct):
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self.message_type = self._root.MessageType(self._io.read_u1())
_on = self.message_type
if _on == self._root.MessageType.fb_update:
self.message_body = self._root.MsgFbUpdate(self._io, self, self._root)
elif _on == self._root.MessageType.set_colormap:
self.message_body = self._root.MsgSetColormap(self._io, self, self._root)
elif _on == self._root.MessageType.bell:
self.message_body = self._root.MsgBell(self._io, self, self._root)
elif _on == self._root.MessageType.cut_text:
self.message_body = self._root.MsgCutText(self._io, self, self._root)
class MsgFbUpdate(KaitaiStruct):
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self.padding = self._io.read_u1()
self.rects_count = self._io.read_u2be()
self.rects = [None] * (self.rects_count)
for i in range(self.rects_count):
self.rects[i] = self._root.Rectangle(self._io, self, self._root)
class Rectangle(KaitaiStruct):
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self.header = self._root.RectHeader(self._io, self, self._root)
_on = self.header.encoding
if _on == self._root.Encoding.rre:
self.body = self._root.RectRreEncoding(self._io, self, self._root)
elif _on == self._root.Encoding.raw:
self.body = self._root.RectRawEncoding(self._io, self, self._root)
elif _on == self._root.Encoding.cursor:
self.body = self._root.RectCursorPseudoEncoding(self._io, self, self._root)
elif _on == self._root.Encoding.copy_rect:
self.body = self._root.RectCopyRectEncoding(self._io, self, self._root)
elif _on == self._root.Encoding.zrle:
self.body = self._root.RectZrleEncoding(self._io, self, self._root)
class ServerInit(KaitaiStruct):
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self.width = self._io.read_u2be()
self.height = self._io.read_u2be()
self.pixel_format = self._root.PixelFormat(self._io, self, self._root)
self.name_len = self._io.read_u4be()
self.name = (self._io.read_bytes(self.name_len)).decode(u"ascii")
|
etl/parsers/etw/Microsoft_Windows_NlaSvc.py | IMULMUL/etl-parser | 104 | 12629106 | # -*- coding: utf-8 -*-
"""
Microsoft-Windows-NlaSvc
GUID : 63b530f8-29c9-4880-a5b4-b8179096e7b8
"""
from construct import Int8sl, Int8ul, Int16ul, Int16sl, Int32sl, Int32ul, Int64sl, Int64ul, Bytes, Double, Float32l, Struct
from etl.utils import WString, CString, SystemTime, Guid
from etl.dtyp import Sid
from etl.parsers.etw.core import Etw, declare, guid
@declare(guid=guid("63b530f8-29c9-4880-a5b4-b8179096e7b8"), event_id=4001, version=0)
class Microsoft_Windows_NlaSvc_4001_0(Etw):
pattern = Struct(
"InterfaceGuid" / Guid,
"CurrentOrNextState" / Int8ul
)
@declare(guid=guid("63b530f8-29c9-4880-a5b4-b8179096e7b8"), event_id=4002, version=0)
class Microsoft_Windows_NlaSvc_4002_0(Etw):
pattern = Struct(
"InterfaceGuid" / Guid,
"CurrentOrNextState" / Int8ul
)
@declare(guid=guid("63b530f8-29c9-4880-a5b4-b8179096e7b8"), event_id=4101, version=0)
class Microsoft_Windows_NlaSvc_4101_0(Etw):
pattern = Struct(
"InterfaceGuid" / Guid,
"AdapterName" / WString
)
@declare(guid=guid("63b530f8-29c9-4880-a5b4-b8179096e7b8"), event_id=4102, version=0)
class Microsoft_Windows_NlaSvc_4102_0(Etw):
pattern = Struct(
"InterfaceGuid" / Guid,
"AdapterName" / WString
)
@declare(guid=guid("63b530f8-29c9-4880-a5b4-b8179096e7b8"), event_id=4103, version=0)
class Microsoft_Windows_NlaSvc_4103_0(Etw):
pattern = Struct(
"InterfaceGuid" / Guid,
"MibNotificationType" / Int32ul
)
@declare(guid=guid("63b530f8-29c9-4880-a5b4-b8179096e7b8"), event_id=4104, version=0)
class Microsoft_Windows_NlaSvc_4104_0(Etw):
pattern = Struct(
"InterfaceGuid" / Guid,
"MibNotificationType" / Int32ul
)
@declare(guid=guid("63b530f8-29c9-4880-a5b4-b8179096e7b8"), event_id=4203, version=0)
class Microsoft_Windows_NlaSvc_4203_0(Etw):
pattern = Struct(
"InterfaceGuid" / Guid,
"GatewayIpAddress" / WString
)
@declare(guid=guid("63b530f8-29c9-4880-a5b4-b8179096e7b8"), event_id=4204, version=0)
class Microsoft_Windows_NlaSvc_4204_0(Etw):
pattern = Struct(
"InterfaceGuid" / Guid,
"GatewayIpAddress" / WString,
"ErrorCode" / Int32ul,
"NlnsState" / Int32ul,
"MacAddrLen" / Int16ul,
"MacAddr" / Bytes(lambda this: this.MacAddrLen)
)
@declare(guid=guid("63b530f8-29c9-4880-a5b4-b8179096e7b8"), event_id=4205, version=0)
class Microsoft_Windows_NlaSvc_4205_0(Etw):
pattern = Struct(
"InterfaceGuid" / Guid,
"GatewayIpAddress" / WString,
"ErrorCode" / Int32ul
)
@declare(guid=guid("63b530f8-29c9-4880-a5b4-b8179096e7b8"), event_id=4251, version=0)
class Microsoft_Windows_NlaSvc_4251_0(Etw):
pattern = Struct(
"PluginName" / WString,
"EntityName" / WString,
"IndicatedRowCount" / Int16ul,
"RowsWithInterfacesIndicatedCount" / Int16ul,
"RowInterfaceGuid" / Guid
)
@declare(guid=guid("63b530f8-29c9-4880-a5b4-b8179096e7b8"), event_id=4261, version=0)
class Microsoft_Windows_NlaSvc_4261_0(Etw):
pattern = Struct(
"InterfaceGuid" / Guid,
"NlaState" / Int32ul
)
@declare(guid=guid("63b530f8-29c9-4880-a5b4-b8179096e7b8"), event_id=4311, version=0)
class Microsoft_Windows_NlaSvc_4311_0(Etw):
pattern = Struct(
"DnsSuffix" / WString,
"Flags" / Int32ul,
"ErrorCode" / Int32ul,
"RetrievedDomain" / WString,
"RetrievedForest" / WString
)
@declare(guid=guid("63b530f8-29c9-4880-a5b4-b8179096e7b8"), event_id=4312, version=0)
class Microsoft_Windows_NlaSvc_4312_0(Etw):
pattern = Struct(
"DnsSuffix" / WString,
"Flags" / Int32ul,
"ErrorCode" / Int32ul,
"RetrievedDomain" / WString,
"RetrievedForest" / WString
)
@declare(guid=guid("63b530f8-29c9-4880-a5b4-b8179096e7b8"), event_id=4313, version=0)
class Microsoft_Windows_NlaSvc_4313_0(Etw):
pattern = Struct(
"DnsSuffix" / WString,
"Flags" / Int32ul,
"ErrorCode" / Int32ul,
"RetrievedDomain" / WString,
"RetrievedForest" / WString
)
@declare(guid=guid("63b530f8-29c9-4880-a5b4-b8179096e7b8"), event_id=4321, version=0)
class Microsoft_Windows_NlaSvc_4321_0(Etw):
pattern = Struct(
"DnsSuffix" / WString,
"Flags" / Int32ul,
"ErrorCode" / Int32ul,
"RetrievedDomain" / WString,
"RetrievedForest" / WString
)
@declare(guid=guid("63b530f8-29c9-4880-a5b4-b8179096e7b8"), event_id=4322, version=0)
class Microsoft_Windows_NlaSvc_4322_0(Etw):
pattern = Struct(
"DnsSuffix" / WString,
"Flags" / Int32ul,
"ErrorCode" / Int32ul,
"RetrievedDomain" / WString,
"RetrievedForest" / WString
)
@declare(guid=guid("63b530f8-29c9-4880-a5b4-b8179096e7b8"), event_id=4323, version=0)
class Microsoft_Windows_NlaSvc_4323_0(Etw):
pattern = Struct(
"DnsSuffix" / WString,
"Flags" / Int32ul,
"ErrorCode" / Int32ul,
"RetrievedDomain" / WString,
"RetrievedForest" / WString
)
@declare(guid=guid("63b530f8-29c9-4880-a5b4-b8179096e7b8"), event_id=4331, version=0)
class Microsoft_Windows_NlaSvc_4331_0(Etw):
pattern = Struct(
"DnsSuffix" / WString,
"Flags" / Int32ul,
"ErrorCode" / Int32ul,
"RetrievedDomain" / WString,
"RetrievedForest" / WString
)
@declare(guid=guid("63b530f8-29c9-4880-a5b4-b8179096e7b8"), event_id=4332, version=0)
class Microsoft_Windows_NlaSvc_4332_0(Etw):
pattern = Struct(
"DnsSuffix" / WString,
"Flags" / Int32ul,
"ErrorCode" / Int32ul,
"RetrievedDomain" / WString,
"RetrievedForest" / WString
)
@declare(guid=guid("63b530f8-29c9-4880-a5b4-b8179096e7b8"), event_id=4333, version=0)
class Microsoft_Windows_NlaSvc_4333_0(Etw):
pattern = Struct(
"DnsSuffix" / WString,
"Flags" / Int32ul,
"ErrorCode" / Int32ul,
"RetrievedDomain" / WString,
"RetrievedForest" / WString
)
@declare(guid=guid("63b530f8-29c9-4880-a5b4-b8179096e7b8"), event_id=4341, version=0)
class Microsoft_Windows_NlaSvc_4341_0(Etw):
pattern = Struct(
"InterfaceName" / WString,
"Addresses" / WString,
"TryCount" / Int32ul,
"ErrorCode" / Int32ul
)
@declare(guid=guid("63b530f8-29c9-4880-a5b4-b8179096e7b8"), event_id=4342, version=0)
class Microsoft_Windows_NlaSvc_4342_0(Etw):
pattern = Struct(
"InterfaceName" / WString,
"Addresses" / WString,
"TryCount" / Int32ul,
"ErrorCode" / Int32ul
)
@declare(guid=guid("63b530f8-29c9-4880-a5b4-b8179096e7b8"), event_id=4343, version=0)
class Microsoft_Windows_NlaSvc_4343_0(Etw):
pattern = Struct(
"InterfaceName" / WString,
"Addresses" / WString,
"TryCount" / Int32ul,
"ErrorCode" / Int32ul
)
@declare(guid=guid("63b530f8-29c9-4880-a5b4-b8179096e7b8"), event_id=4351, version=0)
class Microsoft_Windows_NlaSvc_4351_0(Etw):
pattern = Struct(
"Addresses" / WString,
"DcName" / WString,
"TryNumber" / Int32ul,
"TryCount" / Int32ul,
"ErrorCode" / Int32ul
)
@declare(guid=guid("63b530f8-29c9-4880-a5b4-b8179096e7b8"), event_id=4352, version=0)
class Microsoft_Windows_NlaSvc_4352_0(Etw):
pattern = Struct(
"Addresses" / WString,
"DcName" / WString,
"TryNumber" / Int32ul,
"TryCount" / Int32ul,
"ErrorCode" / Int32ul
)
@declare(guid=guid("63b530f8-29c9-4880-a5b4-b8179096e7b8"), event_id=4353, version=0)
class Microsoft_Windows_NlaSvc_4353_0(Etw):
pattern = Struct(
"Addresses" / WString,
"DcName" / WString,
"TryNumber" / Int32ul,
"TryCount" / Int32ul,
"ErrorCode" / Int32ul
)
@declare(guid=guid("63b530f8-29c9-4880-a5b4-b8179096e7b8"), event_id=4354, version=0)
class Microsoft_Windows_NlaSvc_4354_0(Etw):
pattern = Struct(
"Addresses" / WString,
"DcName" / WString,
"TryNumber" / Int32ul,
"TryCount" / Int32ul,
"ErrorCode" / Int32ul
)
@declare(guid=guid("63b530f8-29c9-4880-a5b4-b8179096e7b8"), event_id=4355, version=0)
class Microsoft_Windows_NlaSvc_4355_0(Etw):
pattern = Struct(
"Addresses" / WString,
"DcName" / WString,
"TryNumber" / Int32ul,
"TryCount" / Int32ul,
"ErrorCode" / Int32ul
)
@declare(guid=guid("63b530f8-29c9-4880-a5b4-b8179096e7b8"), event_id=4356, version=0)
class Microsoft_Windows_NlaSvc_4356_0(Etw):
pattern = Struct(
"Addresses" / WString,
"DcName" / WString,
"TryNumber" / Int32ul,
"TryCount" / Int32ul,
"ErrorCode" / Int32ul
)
@declare(guid=guid("63b530f8-29c9-4880-a5b4-b8179096e7b8"), event_id=4401, version=0)
class Microsoft_Windows_NlaSvc_4401_0(Etw):
pattern = Struct(
"InterfaceGuid" / Guid,
"SignatureLength" / Int16ul,
"Signature" / Bytes(lambda this: this.SignatureLength),
"SignatureSource" / Int32ul
)
@declare(guid=guid("63b530f8-29c9-4880-a5b4-b8179096e7b8"), event_id=4402, version=0)
class Microsoft_Windows_NlaSvc_4402_0(Etw):
pattern = Struct(
"InterfaceGuid" / Guid,
"SignatureLength" / Int16ul,
"Signature" / Bytes(lambda this: this.SignatureLength),
"SignatureSource" / Int32ul
)
@declare(guid=guid("63b530f8-29c9-4880-a5b4-b8179096e7b8"), event_id=4403, version=0)
class Microsoft_Windows_NlaSvc_4403_0(Etw):
pattern = Struct(
"InterfaceGuid" / Guid,
"SignatureLength" / Int16ul,
"Signature" / Bytes(lambda this: this.SignatureLength),
"SignatureSource" / Int32ul
)
@declare(guid=guid("63b530f8-29c9-4880-a5b4-b8179096e7b8"), event_id=4404, version=0)
class Microsoft_Windows_NlaSvc_4404_0(Etw):
pattern = Struct(
"InterfaceGuid" / Guid,
"SignatureLength" / Int16ul,
"Signature" / Bytes(lambda this: this.SignatureLength),
"SignatureSource" / Int32ul
)
@declare(guid=guid("63b530f8-29c9-4880-a5b4-b8179096e7b8"), event_id=4405, version=0)
class Microsoft_Windows_NlaSvc_4405_0(Etw):
pattern = Struct(
"InterfaceGuid" / Guid,
"SignatureLength" / Int16ul,
"Signature" / Bytes(lambda this: this.SignatureLength),
"SignatureSource" / Int32ul
)
@declare(guid=guid("63b530f8-29c9-4880-a5b4-b8179096e7b8"), event_id=4407, version=0)
class Microsoft_Windows_NlaSvc_4407_0(Etw):
pattern = Struct(
"InterfaceGuid" / Guid,
"AdapterName" / WString
)
@declare(guid=guid("63b530f8-29c9-4880-a5b4-b8179096e7b8"), event_id=4408, version=0)
class Microsoft_Windows_NlaSvc_4408_0(Etw):
pattern = Struct(
"InterfaceGuid" / Guid,
"AdapterName" / WString
)
@declare(guid=guid("63b530f8-29c9-4880-a5b4-b8179096e7b8"), event_id=4409, version=0)
class Microsoft_Windows_NlaSvc_4409_0(Etw):
pattern = Struct(
"InterfaceGuid" / Guid,
"AdapterName" / WString
)
@declare(guid=guid("63b530f8-29c9-4880-a5b4-b8179096e7b8"), event_id=4410, version=0)
class Microsoft_Windows_NlaSvc_4410_0(Etw):
pattern = Struct(
"InterfaceGuid" / Guid,
"SignatureLength" / Int16ul,
"Signature" / Bytes(lambda this: this.SignatureLength),
"SignatureSource" / Int32ul
)
@declare(guid=guid("63b530f8-29c9-4880-a5b4-b8179096e7b8"), event_id=4411, version=0)
class Microsoft_Windows_NlaSvc_4411_0(Etw):
pattern = Struct(
"InterfaceGuid" / Guid,
"SignatureLength" / Int16ul,
"Signature" / Bytes(lambda this: this.SignatureLength),
"SignatureSource" / Int32ul
)
@declare(guid=guid("63b530f8-29c9-4880-a5b4-b8179096e7b8"), event_id=4451, version=0)
class Microsoft_Windows_NlaSvc_4451_0(Etw):
pattern = Struct(
"InterfaceGuid" / Guid,
"AuthCapUnlikelyReason" / Int32ul,
"SpeculativeTimeout" / Int32ul
)
@declare(guid=guid("63b530f8-29c9-4880-a5b4-b8179096e7b8"), event_id=5001, version=0)
class Microsoft_Windows_NlaSvc_5001_0(Etw):
pattern = Struct(
"InterfaceGuid" / Guid,
"NlaState" / Int32ul
)
@declare(guid=guid("63b530f8-29c9-4880-a5b4-b8179096e7b8"), event_id=5002, version=0)
class Microsoft_Windows_NlaSvc_5002_0(Etw):
pattern = Struct(
"InterfaceGuid" / Guid,
"SignatureSource" / Int32ul,
"SignatureCharacteristics" / Int32ul
)
@declare(guid=guid("63b530f8-29c9-4880-a5b4-b8179096e7b8"), event_id=6101, version=0)
class Microsoft_Windows_NlaSvc_6101_0(Etw):
pattern = Struct(
"InterfaceGuid" / Guid,
"AdapterName" / WString
)
@declare(guid=guid("63b530f8-29c9-4880-a5b4-b8179096e7b8"), event_id=6102, version=0)
class Microsoft_Windows_NlaSvc_6102_0(Etw):
pattern = Struct(
"InterfaceGuid" / Guid,
"AdapterName" / WString
)
@declare(guid=guid("63b530f8-29c9-4880-a5b4-b8179096e7b8"), event_id=6103, version=0)
class Microsoft_Windows_NlaSvc_6103_0(Etw):
pattern = Struct(
"InterfaceGuid" / Guid,
"AdapterName" / WString
)
@declare(guid=guid("63b530f8-29c9-4880-a5b4-b8179096e7b8"), event_id=6104, version=0)
class Microsoft_Windows_NlaSvc_6104_0(Etw):
pattern = Struct(
"InterfaceGuid" / Guid,
"AdapterName" / WString
)
|
tests/ignite/handlers/test_stores.py | Juddd/ignite | 4,119 | 12629126 | import pytest
from ignite.engine.engine import Engine, Events
from ignite.handlers import EpochOutputStore
@pytest.fixture
def dummy_evaluator():
def dummy_process_function(engine, batch):
return 1, 0
dummy_evaluator = Engine(dummy_process_function)
return dummy_evaluator
@pytest.fixture
def eos():
return EpochOutputStore()
def test_no_transform(dummy_evaluator, eos):
eos.attach(dummy_evaluator)
dummy_evaluator.run(range(1))
assert eos.data == [(1, 0)]
def test_transform(dummy_evaluator):
eos = EpochOutputStore(output_transform=lambda x: x[0])
eos.attach(dummy_evaluator)
dummy_evaluator.run(range(1))
assert eos.data == [1]
def test_reset(dummy_evaluator, eos):
eos.attach(dummy_evaluator)
dummy_evaluator.run(range(2))
eos.reset()
assert eos.data == []
def test_update_one_iteration(dummy_evaluator, eos):
eos.attach(dummy_evaluator)
dummy_evaluator.run(range(1))
assert len(eos.data) == 1
def test_update_five_iterations(dummy_evaluator, eos):
eos.attach(dummy_evaluator)
dummy_evaluator.run(range(5))
assert len(eos.data) == 5
def test_attatch(dummy_evaluator, eos):
eos.attach(dummy_evaluator)
assert dummy_evaluator.has_event_handler(eos.reset, Events.EPOCH_STARTED)
assert dummy_evaluator.has_event_handler(eos.update, Events.ITERATION_COMPLETED)
def test_store_data(dummy_evaluator, eos):
eos.attach(dummy_evaluator, name="eval_data")
dummy_evaluator.run(range(1))
assert dummy_evaluator.state.eval_data == eos.data
|
tests/_apis/team_fight_tactics/test_SummonerApi.py | TheBoringBakery/Riot-Watcher | 489 | 12629149 | <reponame>TheBoringBakery/Riot-Watcher
from unittest.mock import MagicMock
import pytest
from riotwatcher._apis.team_fight_tactics import SummonerApi
@pytest.mark.tft
@pytest.mark.unit
class TestSummonerApi:
def test_by_account(self):
mock_base_api = MagicMock()
expected_return = object()
mock_base_api.raw_request.return_value = expected_return
summoner = SummonerApi(mock_base_api)
region = "afas"
encrypted_account_id = "15asf2-54321"
ret = summoner.by_account(region, encrypted_account_id)
mock_base_api.raw_request.assert_called_once_with(
SummonerApi.__name__,
summoner.by_account.__name__,
region,
f"https://{region}.api.riotgames.com/tft/summoner/v1/summoners/by-account/{encrypted_account_id}",
{},
)
assert ret is expected_return
def test_by_name(self):
mock_base_api = MagicMock()
expected_return = object()
mock_base_api.raw_request.return_value = expected_return
summoner = SummonerApi(mock_base_api)
region = "afas"
summoner_name = "pseudonym117"
ret = summoner.by_name(region, summoner_name)
mock_base_api.raw_request.assert_called_once_with(
SummonerApi.__name__,
summoner.by_name.__name__,
region,
f"https://{region}.api.riotgames.com/tft/summoner/v1/summoners/by-name/{summoner_name}",
{},
)
assert ret is expected_return
def test_by_puuid(self):
mock_base_api = MagicMock()
expected_return = object()
mock_base_api.raw_request.return_value = expected_return
summoner = SummonerApi(mock_base_api)
region = "afas"
puuid = "15462gsfg321"
ret = summoner.by_puuid(region, puuid)
mock_base_api.raw_request.assert_called_once_with(
SummonerApi.__name__,
summoner.by_puuid.__name__,
region,
f"https://{region}.api.riotgames.com/tft/summoner/v1/summoners/by-puuid/{puuid}",
{},
)
assert ret is expected_return
def test_by_id(self):
mock_base_api = MagicMock()
expected_return = object()
mock_base_api.raw_request.return_value = expected_return
summoner = SummonerApi(mock_base_api)
region = "afas"
encrypted_summoner_id = "sdfgasg222"
ret = summoner.by_id(region, encrypted_summoner_id)
mock_base_api.raw_request.assert_called_once_with(
SummonerApi.__name__,
summoner.by_id.__name__,
region,
f"https://{region}.api.riotgames.com/tft/summoner/v1/summoners/{encrypted_summoner_id}",
{},
)
assert ret is expected_return
|
lhotse/recipes/commonvoice.py | stachu86/lhotse | 353 | 12629166 | <filename>lhotse/recipes/commonvoice.py
"""
Official description from the "about" page of the Mozilla CommonVoice project
(source link: https://commonvoice.mozilla.org/en/about)
Why Common Voice?
Mozilla Common Voice is an initiative to help teach machines how real people speak.
This project is an effort to bridge the digital speech divide. Voice recognition technologies bring a human dimension to our devices, but developers need an enormous amount of voice data to build them. Currently, most of that data is expensive and proprietary. We want to make voice data freely and publicly available, and make sure the data represents the diversity of real people. Together we can make voice recognition better for everyone.
How does it work?
We’re crowdsourcing an open-source dataset of voices. Donate your voice, validate the accuracy of other people’s clips, make the dataset better for everyone.
"""
import logging
import shutil
import tarfile
import warnings
from collections import defaultdict
from pathlib import Path
from typing import Any, Dict, Iterable, Optional, Sequence, Tuple, Union
from tqdm.auto import tqdm
from lhotse import load_manifest, validate_recordings_and_supervisions
from lhotse.audio import Recording, RecordingSet
from lhotse.supervision import SupervisionSegment, SupervisionSet
from lhotse.utils import Pathlike, is_module_available, urlretrieve_progress
DEFAULT_COMMONVOICE_URL = "https://voice-prod-bundler-ee1969a6ce8178826482b88e843c335139bd3fb4.s3.amazonaws.com"
DEFAULT_COMMONVOICE_RELEASE = "cv-corpus-5.1-2020-06-22"
COMMONVOICE_LANGS = "en de fr cy tt kab ca zh-TW it fa eu es ru tr nl eo zh-CN rw pt zh-HK cs pl uk".split()
COMMONVOICE_SPLITS = ("train", "dev", "test", "validated", "invalidated", "other")
COMMONVOICE_DEFAULT_SPLITS = ("train", "dev", "test")
# TODO: a list of mapping from language codes (e.g., "en") to actual language names (e.g., "US English")
COMMONVOICE_CODE2LANG = {}
def download_commonvoice(
target_dir: Pathlike = ".",
languages: Union[str, Iterable[str]] = "all",
force_download: bool = False,
base_url: str = DEFAULT_COMMONVOICE_URL,
release: str = DEFAULT_COMMONVOICE_RELEASE,
) -> None:
"""
Download and untar the CommonVoice dataset.
:param target_dir: Pathlike, the path of the dir to storage the dataset.
:param languages: one of: 'all' (downloads all known languages); a single language code (e.g., 'en'),
or a list of language codes.
:param force_download: Bool, if True, download the tars no matter if the tars exist.
:param base_url: str, the base URL for CommonVoice.
:param release: str, the name of the CommonVoice release (e.g., "cv-corpus-5.1-2020-06-22").
It is used as part of the download URL.
"""
# note(pzelasko): This code should work in general if we supply the right URL,
# but the URL stopped working during the development of this script --
# I'm not going to fight this, maybe somebody else would be interested to pick it up.
raise NotImplementedError(
"CommonVoice requires you to enter e-mail to download the data"
"-- please download it manually for now. "
"We are open to contributions to support downloading CV via lhotse."
)
target_dir = Path(target_dir)
target_dir.mkdir(parents=True, exist_ok=True)
url = f"{base_url}/{release}"
if languages == "all":
languages = COMMONVOICE_LANGS
elif isinstance(languages, str):
languages = [languages]
else:
languages = list(languages)
logging.info(
f"About to download {len(languages)} CommonVoice languages: {languages}"
)
for lang in tqdm(languages, desc="Downloading CommonVoice languages"):
logging.info(f"Language: {lang}")
# Split directory exists and seem valid? Skip this split.
part_dir = target_dir / release / lang
completed_detector = part_dir / ".completed"
if completed_detector.is_file():
logging.info(f"Skipping {lang} because {completed_detector} exists.")
continue
# Maybe-download the archive.
tar_name = f"{lang}.tar.gz"
tar_path = target_dir / tar_name
if force_download or not tar_path.is_file():
urlretrieve_progress(url, filename=tar_path, desc=f"Downloading {tar_name}")
logging.info(f"Downloading finished: {lang}")
# Remove partial unpacked files, if any, and unpack everything.
logging.info(f"Unpacking archive: {lang}")
shutil.rmtree(part_dir, ignore_errors=True)
with tarfile.open(tar_path) as tar:
tar.extractall(path=target_dir)
completed_detector.touch()
def prepare_commonvoice(
corpus_dir: Pathlike,
output_dir: Pathlike,
languages: Union[str, Sequence[str]] = "auto",
splits: Union[str, Sequence[str]] = COMMONVOICE_DEFAULT_SPLITS,
num_jobs: int = 1,
) -> Dict[str, Dict[str, Dict[str, Union[RecordingSet, SupervisionSet]]]]:
"""
Returns the manifests which consist of the Recordings and Supervisions.
When all the manifests are available in the ``output_dir``, it will simply read and return them.
This function expects the input directory structure of::
>>> metadata_path = corpus_dir / language_code / "{train,dev,test}.tsv"
>>> # e.g. pl_train_metadata_path = "/path/to/cv-corpus-7.0-2021-07-21/pl/train.tsv"
>>> audio_path = corpus_dir / language_code / "clips"
>>> # e.g. pl_audio_path = "/path/to/cv-corpus-7.0-2021-07-21/pl/clips"
Returns a dict with 3-level structure (lang -> split -> manifest-type)::
>>> {'en/fr/pl/...': {'train/dev/test': {'recordings/supervisions': manifest}}}
:param corpus_dir: Pathlike, the path to the downloaded corpus.
:param output_dir: Pathlike, the path where to write the manifests.
:param languages: 'auto' (prepare all discovered data) or a list of language codes.
:param splits: by default ``['train', 'dev', 'test']``, can also include
``'validated'``, ``'invalidated'``, and ``'other'``.
:param num_jobs: How many concurrent workers to use for scanning of the audio files.
:return: a dict with manifests for all specified languagues and their train/dev/test splits.
"""
if not is_module_available("pandas"):
raise ValueError(
"To prepare CommonVoice data, please 'pip install pandas' first."
)
if num_jobs > 1:
warnings.warn(
"num_jobs>1 currently not supported for CommonVoice data prep;"
"setting to 1."
)
corpus_dir = Path(corpus_dir)
assert corpus_dir.is_dir(), f"No such directory: {corpus_dir}"
assert output_dir is not None, (
"CommonVoice recipe requires to specify the output "
"manifest directory (output_dir cannot be None)."
)
output_dir = Path(output_dir)
output_dir.mkdir(parents=True, exist_ok=True)
if languages == "auto":
languages = set(COMMONVOICE_LANGS).intersection(
path.name for path in corpus_dir.glob("*")
)
if not languages:
raise ValueError(
f"Could not find any of CommonVoice languages in: {corpus_dir}"
)
elif isinstance(languages, str):
languages = [languages]
manifests = {}
for lang in tqdm(languages, desc="Processing CommonVoice languages"):
logging.info(f"Language: {lang}")
lang_path = corpus_dir / lang
# Maybe the manifests already exist: we can read them and save a bit of preparation time.
# Pattern: "cv_recordings_en_train.jsonl.gz" / "cv_supervisions_en_train.jsonl.gz"
lang_manifests = read_cv_manifests_if_cached(
output_dir=output_dir, language=lang
)
for part in splits:
logging.info(f"Split: {part}")
if part in lang_manifests:
logging.info(
f"CommonVoice language: {lang} already prepared - skipping."
)
continue
recording_set, supervision_set = prepare_single_commonvoice_tsv(
lang=lang,
part=part,
output_dir=output_dir,
lang_path=lang_path,
)
lang_manifests[part] = {
"supervisions": supervision_set,
"recordings": recording_set,
}
manifests[lang] = lang_manifests
return manifests
def prepare_single_commonvoice_tsv(
lang: str,
part: str,
output_dir: Pathlike,
lang_path: Pathlike,
) -> Tuple[RecordingSet, SupervisionSet]:
"""
Prepares part of CommonVoice data from a single TSV file.
:param lang: string language code (e.g., "en").
:param part: which split to prepare (e.g., "train", "validated", etc.).
:param output_dir: path to directory where we will store the manifests.
:param lang_path: path to a CommonVoice directory for a specific language
(e.g., "/path/to/cv-corpus-7.0-2021-07-21/pl").
:return: a tuple of (RecordingSet, SupervisionSet) objects opened in lazy mode,
as CommonVoice manifests may be fairly large in memory.
"""
if not is_module_available("pandas"):
raise ValueError(
"To prepare CommonVoice data, please 'pip install pandas' first."
)
import pandas as pd
lang_path = Path(lang_path)
output_dir = Path(output_dir)
tsv_path = lang_path / f"{part}.tsv"
# Read the metadata
df = pd.read_csv(tsv_path, sep="\t")
# Scan all the audio files
with RecordingSet.open_writer(
output_dir / f"cv_recordings_{lang}_{part}.jsonl.gz",
overwrite=False,
) as recs_writer, SupervisionSet.open_writer(
output_dir / f"cv_supervisions_{lang}_{part}.jsonl.gz",
overwrite=False,
) as sups_writer:
for idx, row in tqdm(
df.iterrows(),
desc="Processing audio files",
total=len(df),
):
try:
result = parse_utterance(row, lang_path, lang)
if result is None:
continue
recording, segment = result
validate_recordings_and_supervisions(recording, segment)
recs_writer.write(recording)
sups_writer.write(segment)
except Exception as e:
logging.error(
f"Error when processing TSV file: line no. {idx}: '{row}'.\n"
f"Original error type: '{type(e)}' and message: {e}"
)
continue
recordings = RecordingSet.from_jsonl_lazy(recs_writer.path)
supervisions = SupervisionSet.from_jsonl_lazy(sups_writer.path)
return recordings, supervisions
def parse_utterance(
row: Any, lang_path: Path, language: str
) -> Tuple[Recording, SupervisionSegment]:
# Create the Recording first
audio_path = lang_path / "clips" / row.path
if not audio_path.is_file():
raise ValueError(f"No such file: {audio_path}")
recording_id = Path(row.path).stem
recording = Recording.from_file(audio_path, recording_id=recording_id)
# Then, create the corresponding supervisions
segment = SupervisionSegment(
id=recording_id,
recording_id=recording_id,
start=0.0,
duration=recording.duration,
channel=0,
# Look up language code => language name mapping (it is empty at the time of writing this comment)
# if the language code is unknown, fall back to using the language code.
language=COMMONVOICE_CODE2LANG.get(language, language),
speaker=row.client_id,
text=row.sentence.strip(),
gender=row.gender if row.gender != "nan" else None,
custom={
"age": row.age if row.age != "nan" else None,
"accent": row.accent if row.accent != "nan" else None,
},
)
return recording, segment
def read_cv_manifests_if_cached(
output_dir: Optional[Pathlike],
language: str,
) -> Dict[str, Dict[str, Union[RecordingSet, SupervisionSet]]]:
"""
Returns:
{'train': {'recordings': ..., 'supervisions': ...}, 'dev': ..., 'test': ...}
"""
if output_dir is None:
return {}
manifests = defaultdict(dict)
for part in ["train", "dev", "test"]:
for manifest in ["recordings", "supervisions"]:
path = output_dir / f"cv_{manifest}_{language}_{part}.jsonl.gz"
if not path.is_file():
continue
manifests[part][manifest] = load_manifest(path)
return manifests
|
examples/render/render_euler_spiral.py | jkjt/ezdxf | 515 | 12629169 | <reponame>jkjt/ezdxf
# Copyright (c) 2010-2021, <NAME>
# License: MIT License
from pathlib import Path
from math import radians
import ezdxf
from ezdxf.render import EulerSpiral
from ezdxf.math import Matrix44
NAME = "euler_spiral.dxf"
DIR = Path("~/Desktop/Outbox").expanduser()
def four_c(curvature, length, rotation):
spiral = EulerSpiral(curvature=curvature)
render(
spiral, length, tmatrix(2, 2, angle=rotation), dxfattribs={"color": 1}
)
# scaling sx=-1 is mirror about y-axis
render(
spiral,
length,
tmatrix(2, 2, sx=-1, sy=1, angle=rotation),
dxfattribs={"color": 2},
)
# scaling sy=-1 is mirror about x-axis
render(
spiral,
length,
tmatrix(2, 2, sx=1, sy=-1, angle=rotation),
dxfattribs={"color": 3},
)
render(
spiral,
length,
tmatrix(2, 2, sx=-1, sy=-1, angle=rotation),
dxfattribs={"color": 4},
)
def render(spiral, length, matrix, dxfattribs):
spiral.render_polyline(
msp, length, segments=100, matrix=matrix, dxfattribs=dxfattribs
)
spiral.render_spline(
msp,
length,
fit_points=10,
matrix=matrix,
dxfattribs={"color": 6, "linetype": "DASHED"},
)
def tmatrix(dx, dy, sx=1, sy=1, angle=0):
return Matrix44.chain(
Matrix44.scale(sx=sx, sy=sy, sz=1),
Matrix44.z_rotate(radians(angle)),
Matrix44.translate(dx, dy, 0),
)
doc = ezdxf.new("R2000", setup=True)
msp = doc.modelspace()
msp.add_line((-20, 0), (20, 0), dxfattribs={"linetype": "PHANTOM"})
msp.add_line((0, -20), (0, 20), dxfattribs={"linetype": "PHANTOM"})
for rotation in [0, 30, 45, 60, 75, 90]:
four_c(10.0, 25, rotation)
fname = DIR / NAME
doc.saveas(fname)
print(f"created: {fname}\n")
|
spytest/tests/routing/BGP/test_bgp_4node.py | shubav/sonic-mgmt | 132 | 12629195 | # BGP 4 node topology test cases
import pytest
from spytest import st, utils
import apis.routing.ip as ipapi
import apis.routing.bgp as bgpapi
import BGP.bgp4nodelib as bgp4nodelib
from spytest.utils import exec_all
from utilities.common import poll_wait
@pytest.fixture(scope="module", autouse=True)
def bgp_module_hooks(request):
bgp_pre_config()
yield
bgp_pre_config_cleanup()
# bgp module level pre config function
def bgp_pre_config():
global topo
st.banner("BGP MODULE CONFIG - START")
st.log("Ensure minimum linear 4-node topology")
st.ensure_min_topology('D1D2:1', 'D2D3:1', 'D3D4:1')
bgp4nodelib.l3_ipv4v6_address_config_unconfig(config='yes', config_type='all')
# Ping Verification
if not bgp4nodelib.l3tc_vrfipv4v6_address_ping_test(config_type='all', ping_count=3):
st.error("Ping failed between DUTs")
st.report_fail('test_case_failed')
topo = bgp4nodelib.get_confed_topology_info()
st.log(topo)
st.banner("BGP MODULE CONFIG - END")
# bgp module level pre config cleanup function
def bgp_pre_config_cleanup():
st.banner("BGP MODULE CONFIG CLEANUP - START")
bgp4nodelib.l3_ipv4v6_address_config_unconfig(config='no')
st.banner("BGP MODULE CONFIG CLEANUP - END")
@pytest.fixture(scope="function")
def bgp_func_hooks(request):
yield
@pytest.mark.bgp_ft
@pytest.mark.community
@pytest.mark.community_pass
def test_ft_bgp_ebgp_multihop_4byteASN():
"""
Verify the functioning of ebgp multihop command with 4 byte ASN
"""
# On DUT1 and DUT3, create BGP with 4byte ASN
dut1_as = 6500001
dut1 = topo['dut_list'][0]
dut3_as = 6500002
dut3 = topo['dut_list'][2]
result = 0
wait_timer = 150
st.banner("Verify the ebgp multihop functionality with 4 byte AS Number")
# Configure bgp on DUT1 and configure DUT3 as neighbor with ebgp-multihop ttl set to 5
st.log("Configure eBGP on DUT1 with Neighbor as DUT3 with multihop set to maximum hops of 5")
bgpapi.config_bgp(dut1, local_as=dut1_as, neighbor=topo['D3D2P1_ipv4'], remote_as=dut3_as, config_type_list=["neighbor","ebgp_mhop"], ebgp_mhop='5')
# Add static route towards neighbor DUT3
st.log("Add static route towards DUT3")
ipapi.create_static_route(dut1, topo['D1D2P1_neigh_ipv4'], "{}/24".format(topo['D3D2P1_ipv4']))
# Configure bgp on DUT3 and configure DUT1 as neighbor with ebgp-multihop ttl set to 5
st.log("Configure eBGP on DUT3 with DUT1 as Neighbor with multihop set to maximum hops of 5")
bgpapi.config_bgp(dut3, local_as=dut3_as, neighbor=topo['D1D2P1_ipv4'], remote_as=dut1_as, config_type_list=["neighbor","ebgp_mhop"], ebgp_mhop='5')
# Add static route towards neighbor DUT1
st.log("Add static route towards DUT1")
ipapi.create_static_route(dut3, topo['D3D2P1_neigh_ipv4'], "{}/24".format(topo['D1D2P1_ipv4']))
st.log("Verify BGP neighborship on DUT1")
#result = bgpapi.verify_bgp_summary(dut1, family='ipv4', neighbor=topo['D3D2P1_ipv4'], state='Established')
if not utils.poll_wait(bgpapi.verify_bgp_summary, wait_timer, dut1, family='ipv4', neighbor=topo['D3D2P1_ipv4'],
state='Established'):
st.log("Failed to form BGP eBGP multihop peering with 4byte ASN")
result += 1
if result == 0:
st.log("Pass: BGP neighborship established between DUT1 and DUT3")
else:
st.error("Fail: BGP neighborship not established between DUT1 and DUT3")
st.banner("Collecting techsupport")
exec_all(True, [[st.generate_tech_support, topo['dut_list'][0], "test_ft_bgp_ebgp_multihop_4byteASN"],
[st.generate_tech_support, topo['dut_list'][1], "test_ft_bgp_ebgp_multihop_4byteASN"],
[st.generate_tech_support, topo['dut_list'][2], "test_ft_bgp_ebgp_multihop_4byteASN"]])
#Clear applied configs
st.banner("Cleanup for TestFunction")
bgpapi.cleanup_router_bgp(dut1)
bgpapi.cleanup_router_bgp(dut3)
ipapi.delete_static_route(dut1, topo['D1D2P1_neigh_ipv4'], "{}/24".format(topo['D3D2P1_ipv4']))
ipapi.delete_static_route(dut3, topo['D3D2P1_neigh_ipv4'], "{}/24".format(topo['D1D2P1_ipv4']))
if result == 0:
st.report_pass("test_case_passed")
else:
st.report_fail("test_case_failed")
################################################################################
# BGP Confederation test cases - START
def bgp_confed_pre_config():
st.banner("BGP CONFED CLASS CONFIG - START")
bgp4nodelib.l3tc_vrfipv4v6_confed_bgp_config(config='yes')
# BGP Neighbour Verification
if not poll_wait(bgp4nodelib.l3tc_vrfipv4v6_address_confed_bgp_check, 10, config_type='all'):
st.error("Neighborship failed to Establish between DUTs")
st.report_fail('test_case_failed')
st.log("Getting all topology info related to connectivity / TG and other parameters between duts")
st.banner("BGP CONFED CLASS CONFIG - END")
def bgp_confed_pre_config_cleanup():
st.banner("BGP CONFED CLASS CONFIG CLEANUP - START")
bgp4nodelib.l3tc_vrfipv4v6_confed_bgp_config(config='no')
st.banner("BGP RIF CLASS CONFIG CLEANUP - END")
@pytest.fixture(scope='class')
def bgp_confed_class_hook(request):
bgp_confed_pre_config()
yield
bgp_confed_pre_config_cleanup()
# TestBGPConfed class
@pytest.mark.usefixtures('bgp_confed_class_hook')
class TestBGPConfed():
@pytest.mark.bgp_ft
@pytest.mark.community
@pytest.mark.community_pass
def test_ipv6_confed_route_distribution(self):
st.banner("Verify the config of BGP v6 confederation and router advertisement")
st.log("Advertise a network from DUT1 and check if it is learnt on confederation peer DUT3")
dut1_name = topo['dut_list'][0]
dut3_name = topo['dut_list'][2]
network_ipv4 = '192.168.127.12/24'
network_ipv6 = 'fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b/64'
# Advertise a network to peer
bgpapi.config_bgp_network_advertise(dut1_name, topo['D1_as'], network_ipv4, network_import_check=True)
bgpapi.config_bgp_network_advertise(dut1_name, topo['D1_as'], network_ipv6, addr_family='ipv6', config='yes', network_import_check=True)
entries = bgpapi.get_ip_bgp_route(dut3_name, family="ipv4", network=network_ipv4)
entries1 = bgpapi.get_ip_bgp_route(dut3_name, family="ipv6", network="2000:1::/64")
if entries and entries1:
st.log("Pass: Routes advertised by DUT1 found on DUT3")
else:
st.error("Fail: Route advertised by DUT1 not found on DUT3")
st.banner("Collecting techsupport")
exec_all(True, [[st.generate_tech_support, topo['dut_list'][0], "test_ipv6_confed_route_distribution"],
[st.generate_tech_support, topo['dut_list'][1], "test_ipv6_confed_route_distribution"],
[st.generate_tech_support, topo['dut_list'][2], "test_ipv6_confed_route_distribution"]])
# Clear applied configs
st.banner("Cleanup for TestFunction")
bgpapi.config_bgp_network_advertise(dut1_name, topo['D1_as'], network_ipv4, config='no' )
bgpapi.config_bgp_network_advertise(dut1_name, topo['D1_as'], network_ipv6, addr_family='ipv6', config='no')
if entries and entries1:
st.report_pass("test_case_passed")
else:
st.report_fail("test_case_failed")
@pytest.mark.bgp_ft
@pytest.mark.community
@pytest.mark.community_pass
def test_ipv6_confed_with_rr(self):
st.banner("Verify Route Reflector behavior within a confederation of BGP v6 peers")
st.banner("Consider the right confederation iBGP AS and check Route Reflector functionality between the 3 iBGP Routers")
network_ipv4 = '172.16.58.3/24'
network_ipv6 = 'fc00:e968:6179::de52:7100/64'
# iBGP AS is one of D2/D3/D4 ASN
iBGP_as=topo['D2_as']
st.log("Advertise an IPv4 and an IPv6 network from DUT2 through BGP")
bgpapi.config_bgp_network_advertise(topo['dut_list'][1], iBGP_as, network_ipv4, network_import_check=True)
bgpapi.config_bgp_network_advertise(topo['dut_list'][1], iBGP_as, network_ipv6, addr_family='ipv6', config='yes', network_import_check=True)
st.log("Check the network on the 3rd iBGP peer DUT4 is not learnt because Route Reflector is not configured on peer DUT3")
entries = bgpapi.get_ip_bgp_route(topo['dut_list'][3], family="ipv4", network=network_ipv4)
entries1 = bgpapi.get_ip_bgp_route(topo['dut_list'][3], family="ipv6", network="3000:1::/64")
if not entries and not entries1:
st.log("Pass: DUT4 did not learn routes without configuring Route Reflector on peer DUT3")
else:
st.error("Fail: DUT4 learned route without configuring Route Reflector on peer DUT3")
st.banner("Collecting techsupport")
exec_all(True, [[st.generate_tech_support, topo['dut_list'][1], "test_ipv6_confed_with_rr"],
[st.generate_tech_support, topo['dut_list'][2], "test_ipv6_confed_with_rr"],
[st.generate_tech_support, topo['dut_list'][3], "test_ipv6_confed_with_rr"]])
# Clear applied configurations
st.banner("Cleanup for TestFunction")
bgpapi.config_bgp_network_advertise(topo['dut_list'][1], iBGP_as, network_ipv4, config='no' )
bgpapi.config_bgp_network_advertise(topo['dut_list'][1], iBGP_as, network_ipv6, addr_family='ipv6', config='no')
st.report_fail("test_case_failed")
st.log("Now configure Route Reflector on DUT3")
bgpapi.create_bgp_route_reflector_client(topo.dut_list[2], iBGP_as, 'ipv4', topo['D3D4P1_neigh_ipv4'], 'yes')
bgpapi.create_bgp_route_reflector_client(topo.dut_list[2], iBGP_as, 'ipv6', topo['D3D4P1_neigh_ipv6'], 'yes')
st.wait(10)
st.log("Now the routes should be learnt on the 3rd IBGP peer DUT4")
entries2 = bgpapi.get_ip_bgp_route(topo['dut_list'][3], family="ipv4", network=network_ipv4)
entries3 = bgpapi.get_ip_bgp_route(topo['dut_list'][3], family="ipv6", network="3000:1::/64")
if entries2 and entries3:
st.log("Pass: DUT4 learned the routes advertised by peer DUT2")
else:
st.error("Fail: DUT4 did not learn the routes advertised by peer DUT2")
st.banner("Collecting techsupport")
exec_all(True, [[st.generate_tech_support, topo['dut_list'][1], "test_ipv6_confed_with_rr"],
[st.generate_tech_support, topo['dut_list'][2], "test_ipv6_confed_with_rr"],
[st.generate_tech_support, topo['dut_list'][3], "test_ipv6_confed_with_rr"]])
# Clear applied configurations
st.banner("Cleanup for TestFunction")
bgpapi.config_bgp_network_advertise(topo['dut_list'][1], iBGP_as, network_ipv4, config='no' )
bgpapi.config_bgp_network_advertise(topo['dut_list'][1], iBGP_as, network_ipv6, addr_family='ipv6', config='no')
bgpapi.create_bgp_route_reflector_client(topo.dut_list[2], iBGP_as, 'ipv4', topo['D3D4P1_neigh_ipv4'], 'no')
bgpapi.create_bgp_route_reflector_client(topo.dut_list[2], iBGP_as, 'ipv6', topo['D3D4P1_neigh_ipv6'], 'no')
if entries2 and entries3:
st.report_pass("test_case_passed")
else:
st.report_fail("test_case_failed")
@pytest.mark.rmap
@pytest.mark.bgp_ft
@pytest.mark.community
@pytest.mark.community_fail
def test_confed_route_distribution_with_rmap(self):
st.banner("Verify the behavior of Route-Maps over confederation peers")
result = False
network1 = '172.16.58.3/24'
network2 = '172.16.17.32/24'
network3 = '192.168.127.12'
as_path = '200'
access_list1 = 'test-access-list1'
access_list2 = 'test-access-list2'
access_list3 = 'test-access-list3'
st.log("Create access-lists and a route-map in DUT1, add to it permit, deny and AS-path prepending policies")
# Create access-list test-access-list1
ipapi.config_access_list(topo['dut_list'][0], access_list1, network3+'/24', 'permit', seq_num="1")
# Create route-map and permit network3
ipapi.config_route_map_match_ip_address(topo['dut_list'][0], 'test-rmap', 'permit', '10', access_list1)
# Add set option to prepend as-path 200
ipapi.config_route_map_set_aspath(topo['dut_list'][0], 'test-rmap', 'permit', '10', as_path)
# Create access-list test-access-list2
ipapi.config_access_list(topo['dut_list'][0], access_list2, network1, 'deny', seq_num="2")
# In route-map, deny network1
ipapi.config_route_map_match_ip_address(topo['dut_list'][0], 'test-rmap', 'deny', '20', access_list2)
# Create access-list test-access-list3
ipapi.config_access_list(topo['dut_list'][0], access_list3, network2, 'permit', seq_num="3")
# In route-map, permit network2
ipapi.config_route_map_match_ip_address(topo['dut_list'][0], 'test-rmap', 'permit', '30', access_list3)
# Advertise three networks from leaf
st.log("Advertise the networks from DUT1 through BGP and associate with the route-map")
bgpapi.advertise_bgp_network(topo['dut_list'][0], topo['D1_as'], network1, 'test-rmap', network_import_check=True)
bgpapi.advertise_bgp_network(topo['dut_list'][0], topo['D1_as'], network2, 'test-rmap', network_import_check=True)
bgpapi.advertise_bgp_network(topo['dut_list'][0], topo['D1_as'], network3+'/24', 'test-rmap', network_import_check=True)
st.log("Verify in peer DUT2 the network configured in {} has the AS-path prepended".format(access_list1))
# Verify that the neighbor has the as-path prepended
output = bgpapi.show_bgp_ipvx_prefix(topo['dut_list'][1], prefix=network3, masklen=topo['D1_as'])
for x in output: # type: basestring
peer_asn = x['peerasn']
peer_asn = peer_asn.split()
for each in peer_asn:
if each == as_path:
result = True
if result:
st.log("Pass: AS-Path {} found to be prepended with network {}/24".format(as_path, network3))
else:
st.error("Fail: AS-Path {} not found to be prepended".format(as_path))
# Verify that network1 is not present in ip route table
st.log("Verify that peer DUT2 not learnt the network configured as 'deny' in {}".format(access_list2))
n1 = ipapi.verify_ip_route(topo['dut_list'][1], ip_address=network1)
if n1 is False:
result = result & True
st.log("Pass: DUT2 did not learn network {}".format(network1))
else:
result = result & False
st.error("Fail: DUT2 learned the network {}".format(network1))
# Verify that network2 is present in ip route table
st.log("Verify that peer DUT2 learnt the network configured as 'permit' in {}".format(access_list3))
n2 = ipapi.verify_ip_route(topo['dut_list'][1], ip_address=network2)
if n2:
result = result & True
st.log("Pass: DUT2 learned the network {}".format(network2))
else:
result = result & False
st.error("Fail: DUT2 did not learn network {}".format(network2))
if not result:
st.banner("Collecting techsupport")
exec_all(True, [[st.generate_tech_support, topo['dut_list'][0], "test_confed_route_distribution_with_rmap"],
[st.generate_tech_support, topo['dut_list'][1], "test_confed_route_distribution_with_rmap"]])
ipapi.config_route_map_mode(topo['dut_list'][0], 'test-rmap', 'permit', '10', config='no')
# Clear applied configurations
st.banner("Cleanup for TestFunction")
ipapi.config_access_list(topo['dut_list'][0], 'test-access-list3', network2, 'permit', config='no', seq_num="3")
ipapi.config_access_list(topo['dut_list'][0], 'test-access-list2', network1, 'deny', config='no', seq_num="2")
ipapi.config_access_list(topo['dut_list'][0], 'test-access-list1', network3+'/24', 'permit', config='no', seq_num="1")
bgpapi.advertise_bgp_network(topo['dut_list'][0], topo['D1_as'], network1, 'test-rmap', config='no')
bgpapi.advertise_bgp_network(topo['dut_list'][0], topo['D1_as'], network2, 'test-rmap', config='no')
bgpapi.advertise_bgp_network(topo['dut_list'][0], topo['D1_as'], network3+'/24', 'test-rmap', config='no')
if result:
st.report_pass("test_case_passed")
else:
st.report_fail("test_case_failed")
# BGP Confederation test cases - END
################################################################################
|
converter/ch_ppocr_v2_rec_converter.py | JimEverest/PaddleOCR2Pytorch | 364 | 12629201 | # https://zhuanlan.zhihu.com/p/335753926
import os, sys
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from collections import OrderedDict
import numpy as np
import cv2
import torch
from pytorchocr.base_ocr_v20 import BaseOCRV20
class PPOCRv2RecConverter(BaseOCRV20):
def __init__(self, config, paddle_pretrained_model_path, **kwargs):
para_state_dict, opti_state_dict = self.read_paddle_weights(paddle_pretrained_model_path)
out_channels = list(para_state_dict.values())[-1].shape[0]
print('out_channels: ', out_channels)
print(type(kwargs), kwargs)
kwargs['out_channels'] = out_channels
super(PPOCRv2RecConverter, self).__init__(config, **kwargs)
# self.load_paddle_weights(paddle_pretrained_model_path)
self.load_paddle_weights([para_state_dict, opti_state_dict])
print('model is loaded: {}'.format(paddle_pretrained_model_path))
self.net.eval()
def load_paddle_weights(self, paddle_weights):
para_state_dict, opti_state_dict = paddle_weights
[print('paddle: {} ---- {}'.format(k, v.shape)) for k, v in para_state_dict.items()]
[print('pytorch: {} ---- {}'.format(k, v.shape)) for k, v in self.net.state_dict().items()]
for k,v in self.net.state_dict().items():
if k.endswith('num_batches_tracked'):
continue
ppname = k
ppname = ppname.replace('.running_mean', '._mean')
ppname = ppname.replace('.running_var', '._variance')
if k.startswith('backbone.'):
ppname = ppname.replace('backbone.', 'Student.backbone.')
elif k.startswith('neck.'):
ppname = ppname.replace('neck.', 'Student.neck.')
elif k.startswith('head.'):
ppname = ppname.replace('head.', 'Student.head.')
else:
print('Redundance:')
print(k)
raise ValueError
try:
if ppname.endswith('fc1.weight') or ppname.endswith('fc2.weight'):
self.net.state_dict()[k].copy_(torch.Tensor(para_state_dict[ppname].T))
else:
self.net.state_dict()[k].copy_(torch.Tensor(para_state_dict[ppname]))
except Exception as e:
print('pytorch: {}, {}'.format(k, v.size()))
print('paddle: {}, {}'.format(ppname, para_state_dict[ppname].shape))
raise e
print('model is loaded.')
if __name__ == '__main__':
import argparse, json, textwrap, sys, os
parser = argparse.ArgumentParser()
parser.add_argument("--src_model_path", type=str, help='Assign the paddleOCR trained model(best_accuracy)')
args = parser.parse_args()
cfg = {'model_type':'rec',
'algorithm':'CRNN',
'Transform':None,
'Backbone':{'name':'MobileNetV1Enhance', 'scale':0.5},
'Neck':{'name':'SequenceEncoder', 'hidden_size':64, 'encoder_type':'rnn'},
'Head':{'name':'CTCHead', 'mid_channels': 96, 'fc_decay': 2e-05}}
paddle_pretrained_model_path = os.path.join(os.path.abspath(args.src_model_path), 'best_accuracy')
converter = PPOCRv2RecConverter(cfg, paddle_pretrained_model_path)
np.random.seed(666)
inputs = np.random.randn(1,3,32,320).astype(np.float32)
inp = torch.from_numpy(inputs)
out = converter.net(inp)
out = out.data.numpy()
print('out:', np.sum(out), np.mean(out), np.max(out), np.min(out))
# save
converter.save_pytorch_weights('ch_ptocr_v2_rec_infer.pth')
print('done.')
|
paver/hg.py | timgates42/paver | 270 | 12629220 | <filename>paver/hg.py
"""Convenience functions for working with mercurial
This module does not include any tasks, only functions.
At this point, these functions do not use any kind of library. They require
the hg binary on the PATH."""
from paver.easy import sh
def clone(url, dest_folder, rev=None):
"""Clone a mercurial repository.
Parameters:
url (string): The path to clone the repository from. Could be local
or remote.
dest_folder (string): The local folder where the repository will be
cloned.
rev=None (string or None): If specified, the revision to clone to.
If omitted or `None`, all changes will be cloned.
Returns:
None"""
rev_string = ''
if rev:
rev_string = ' -r %s' % rev
sh('hg clone{rev} {url} {dest}'.format(
rev=rev_string, url=url, dest=dest_folder))
def pull(repo_path, rev=None, url=None):
"""Pull changes into a mercurial repository.
Parameters:
repo_path (string): The local path to a mercurial repository.
rev=None (string or None): If specified, the revision to pull to.
If omitted or `None`, all changes will be pulled.
url=None (string or None): If specified, the repository to pull from.
If omitted or `None`, the default location of the repository will
be used.
Returns:
None"""
rev_string = ''
if rev:
rev_string = ' -r %s' % rev
url_string = ''
if url:
url_string = ' ' + url
sh('hg pull{rev} -R {repo}{url}'.format(rev=rev_string,
repo=repo_path,
url=url_string))
def latest_tag(repo_path, relative_to='tip'):
"""Get the latest tag from a mercurial repository.
Parameters:
repo_path (string): The local path to a mercurial repository.
relative_to='tip' (string): If provided, the revision to use as
a reference. Defaults to 'tip'.
Returns:
The string name of the latest tag."""
stdout = sh('hg log --template "{{latesttag}}" -r {rev} -R {repo}'.format(
rev=relative_to, repo=repo_path), capture=True)
return stdout.strip()
def update(repo_path, rev='tip', clean=False):
"""Update a mercurial repository to a revision.
Parameters:
repo_path (string): The local path to a mercurial repository.
rev='tip' (string): If provided, the revision to update to. If
omitted, 'tip' will be used.
clean=False (bool): If `True`, the update will discard uncommitted
changes.
Returns:
None"""
clean_string = ''
if clean:
clean_string = ' --clean'
sh('hg update -r {rev} -R {repo}{clean}'.format(
rev=rev, repo=repo_path, clean=clean_string))
def branches(repo_path, closed=False):
"""List branches for the target repository.
Parameters:
repo_path (string): The local path to a mercurial repository.
closed=False (bool): Whether to include closed branches in the
branch list.
Returns:
A python tuple. The first item of the tuple is the current branch.
The second item of the tuple is a list of the branches"""
current_branch = sh('hg branch -R {repo}'.format(repo=repo_path),
capture=True).strip()
closed_string = ''
if closed:
closed_string = ' --closed'
stdout_string = sh('hg branches -R {repo}{closed}'.format(
repo=repo_path, closed=closed_string), capture=True)
# Branch list comes out in the format:
# <branchname> <revnum>:<sha1>
branches = [line.split()[0] for line in stdout_string.split('\n')
if len(line) > 0]
return current_branch, branches
|
tests/trainers/lightning/test_validation.py | facebookresearch/pythia | 3,252 | 12629227 | <filename>tests/trainers/lightning/test_validation.py
# Copyright (c) Facebook, Inc. and its affiliates.
import gc
import unittest
from typing import Any, Dict, Optional
from unittest.mock import MagicMock, patch
from mmf.common.meter import Meter
from mmf.trainers.callbacks.logistics import LogisticsCallback
from mmf.trainers.lightning_core.loop_callback import LightningLoopCallback
from mmf.trainers.lightning_core.loop_callback_with_torchmetrics import (
LightningTorchMetricsCallback,
)
from mmf.trainers.lightning_core.torchmetric import LightningTorchMetrics
from mmf.utils.logger import TensorboardLogger
from mmf.utils.timer import Timer
from tests.trainers.test_utils import (
get_config_with_defaults,
get_lightning_trainer,
get_mmf_trainer,
run_lightning_trainer,
)
class TestLightningTrainerValidation(unittest.TestCase):
def setUp(self):
self.ground_truths = [
{
"current_iteration": 3,
"num_updates": 3,
"max_updates": 8,
"avg_loss": 9705.2953125,
},
{
"current_iteration": 6,
"num_updates": 6,
"max_updates": 8,
"avg_loss": 9703.29765625,
},
{
"current_iteration": 8,
"num_updates": 8,
"max_updates": 8,
"avg_loss": 9701.88046875,
},
]
def teardown(self):
del self.ground_truths
gc.collect()
@patch("mmf.common.test_reporter.PathManager.mkdirs")
@patch("mmf.trainers.lightning_trainer.get_mmf_env", return_value="")
def test_validation(self, log_dir, mkdirs):
config = self._get_config(
max_steps=8,
batch_size=2,
val_check_interval=3,
log_every_n_steps=9, # turn it off
limit_val_batches=1.0,
)
trainer = get_lightning_trainer(config=config, prepare_trainer=False)
callback = LightningLoopCallback(trainer)
trainer.callbacks.append(callback)
lightning_values = []
def log_values(
current_iteration: int,
num_updates: int,
max_updates: int,
meter: Meter,
extra: Dict[str, Any],
tb_writer: TensorboardLogger,
):
lightning_values.append(
{
"current_iteration": current_iteration,
"num_updates": num_updates,
"max_updates": max_updates,
"avg_loss": meter.loss.avg,
}
)
with patch(
"mmf.trainers.lightning_core.loop_callback.summarize_report",
side_effect=log_values,
):
run_lightning_trainer(trainer)
self.assertEqual(len(self.ground_truths), len(lightning_values))
for gt, lv in zip(self.ground_truths, lightning_values):
keys = list(gt.keys())
self.assertListEqual(keys, list(lv.keys()))
for key in keys:
if key == "num_updates" and gt[key] == self.ground_truths[-1][key]:
# After training, in the last evaluation run, mmf's num updates is 8
# while lightning's num updates is 9, this is due to a hack to
# assign the lightning num_updates to be the trainer.global_step+1.
#
# This is necessary because of a lightning bug: trainer.global_step
# is 1 off less than the actual step count. When on_train_batch_end
# is called for the first time, the trainer.global_step should be 1,
# rather than 0, since 1 update/step has already been done.
#
# When lightning fixes its bug, we will update this test to remove
# the hack. # issue: 6997 in pytorch lightning
self.assertAlmostEqual(gt[key], lv[key] - 1, 1)
else:
self.assertAlmostEqual(gt[key], lv[key], 1)
# TODO: update test function with avg_loss
@patch("mmf.common.test_reporter.PathManager.mkdirs")
@patch("mmf.trainers.lightning_trainer.get_mmf_env", return_value="")
def test_validation_torchmetrics(self, log_dir, mkdirs):
config = self._get_config(
max_steps=8,
batch_size=2,
val_check_interval=3,
log_every_n_steps=9, # turn it off
limit_val_batches=1.0,
)
trainer = get_lightning_trainer(config=config, prepare_trainer=False)
trainer.torchmetrics = LightningTorchMetrics([])
callback = LightningTorchMetricsCallback(trainer)
trainer.callbacks.append(callback)
lightning_values = []
def log_values(
extra: Optional[Dict],
num_updates: int,
max_updates: int,
log_type: str = "train",
):
lightning_values.append(
{"num_updates": num_updates, "max_updates": max_updates}
)
with patch(
"mmf.trainers.lightning_core.loop_callback_with_torchmetrics"
+ ".LightningTorchMetricsCallback._log_metrics_and_extra",
side_effect=log_values,
):
run_lightning_trainer(trainer)
self.assertEqual(len(self.ground_truths), len(lightning_values))
for gt, lv in zip(self.ground_truths, lightning_values):
for key in ["num_updates", "max_updates"]:
if key == "num_updates" and gt[key] == self.ground_truths[-1][key]:
# to understand the reason of using lv[key] - 1 (intead of lv[key])
# see comments in test_validation
self.assertAlmostEqual(gt[key], lv[key] - 1, 1)
else:
self.assertAlmostEqual(gt[key], lv[key], 1)
@patch("mmf.common.test_reporter.PathManager.mkdirs")
@patch("torch.utils.tensorboard.SummaryWriter")
@patch("mmf.common.test_reporter.get_mmf_env", return_value="")
@patch("mmf.trainers.callbacks.logistics.summarize_report")
def test_validation_parity(self, summarize_report_fn, test_reporter, sw, mkdirs):
config = self._get_mmf_config(
max_updates=8, max_epochs=None, batch_size=2, evaluation_interval=3
)
mmf_trainer = get_mmf_trainer(config=config)
mmf_trainer.load_metrics()
logistics_callback = LogisticsCallback(mmf_trainer.config, mmf_trainer)
logistics_callback.snapshot_timer = Timer()
logistics_callback.train_timer = Timer()
mmf_trainer.logistics_callback = logistics_callback
mmf_trainer.callbacks.append(logistics_callback)
mmf_trainer.early_stop_callback = MagicMock(return_value=None)
mmf_trainer.on_validation_end = logistics_callback.on_validation_end
mmf_trainer.training_loop()
calls = summarize_report_fn.call_args_list
self.assertEqual(3, len(calls))
self.assertEqual(len(self.ground_truths), len(calls))
self._check_values(calls)
def _check_values(self, calls):
for (_, kwargs), gt in zip(calls, self.ground_truths):
for key, value in gt.items():
if key == "avg_loss":
self.assertAlmostEqual(kwargs["meter"].loss.avg, value, 1)
else:
self.assertAlmostEqual(kwargs[key], value, 1)
def _get_config(
self,
max_steps,
batch_size,
val_check_interval,
log_every_n_steps,
limit_val_batches,
):
config = {
"trainer": {
"params": {
"max_steps": max_steps,
"log_every_n_steps": log_every_n_steps,
"val_check_interval": val_check_interval,
"limit_val_batches": limit_val_batches,
}
},
"training": {"batch_size": batch_size},
}
return get_config_with_defaults(config)
def _get_mmf_config(self, max_updates, max_epochs, batch_size, evaluation_interval):
config = {
"training": {
"max_updates": max_updates,
"max_epochs": max_epochs,
"batch_size": batch_size,
"evaluation_interval": evaluation_interval,
}
}
return get_config_with_defaults(config)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.