docstring
stringlengths 52
499
| function
stringlengths 67
35.2k
| __index_level_0__
int64 52.6k
1.16M
|
---|---|---|
Check if ``jsondata`` has the structure of a keystore file version 3.
Note that this test is not complete, e.g. it doesn't check key derivation or cipher parameters.
Copied from https://github.com/vbuterin/pybitcointools
Args:
jsondata: Dictionary containing the data from the json file
Returns:
`True` if the data appears to be valid, otherwise `False` | def check_keystore_json(jsondata: Dict) -> bool:
if 'crypto' not in jsondata and 'Crypto' not in jsondata:
return False
if 'version' not in jsondata:
return False
if jsondata['version'] != 3:
return False
crypto = jsondata.get('crypto', jsondata.get('Crypto'))
if 'cipher' not in crypto:
return False
if 'ciphertext' not in crypto:
return False
if 'kdf' not in crypto:
return False
if 'mac' not in crypto:
return False
return True | 181,202 |
Find the keystore file for an account, unlock it and get the private key
Args:
address: The Ethereum address for which to find the keyfile in the system
password: Mostly for testing purposes. A password can be provided
as the function argument here. If it's not then the
user is interactively queried for one.
Returns
The private key associated with the address | def get_privkey(self, address: AddressHex, password: str) -> PrivateKey:
address = add_0x_prefix(address).lower()
if not self.address_in_keystore(address):
raise ValueError('Keystore file not found for %s' % address)
with open(self.accounts[address]) as data_file:
data = json.load(data_file)
acc = Account(data, password, self.accounts[address])
return acc.privkey | 181,205 |
Load an account from a keystore file.
Args:
path: full path to the keyfile
password: the password to decrypt the key file or `None` to leave it encrypted | def load(cls, path: str, password: str = None) -> 'Account':
with open(path) as f:
keystore = json.load(f)
if not check_keystore_json(keystore):
raise ValueError('Invalid keystore file')
return Account(keystore, password, path=path) | 181,207 |
Consume tokens.
Args:
tokens (float): number of transport tokens to consume
Returns:
wait_time (float): waiting time for the consumer | def consume(self, tokens):
wait_time = 0.
self.tokens -= tokens
if self.tokens < 0:
self._get_tokens()
if self.tokens < 0:
wait_time = -self.tokens / self.fill_rate
return wait_time | 181,301 |
Checks if the account has enough balance to handle the lifecycles of all
open channels as well as the to be created channels.
Note: This is just an estimation.
Args:
raiden: A raiden service instance
channels_to_open: The number of new channels that should be opened
Returns:
Tuple of a boolean denoting if the account has enough balance for
the remaining lifecycle events and the estimate for the remaining
lifecycle cost | def has_enough_gas_reserve(
raiden,
channels_to_open: int = 0,
) -> Tuple[bool, int]:
secure_reserve_estimate = get_reserve_estimate(raiden, channels_to_open)
current_account_balance = raiden.chain.client.balance(raiden.chain.client.address)
return secure_reserve_estimate <= current_account_balance, secure_reserve_estimate | 181,307 |
Initializes a new `LogFilter`
Args:
config: Dictionary mapping module names to logging level
default_level: The default logging level | def __init__(self, config: Dict[str, str], default_level: str):
self._should_log: Dict[Tuple[str, str], bool] = {}
# the empty module is not matched, so set it here
self._default_level = config.get('', default_level)
self._log_rules = [
(logger.split('.') if logger else list(), level)
for logger, level in config.items()
] | 181,331 |
Creates a new channel in the TokenNetwork contract.
Args:
partner: The peer to open the channel with.
settle_timeout: The settle timeout to use for this channel.
given_block_identifier: The block identifier of the state change that
prompted this proxy action
Returns:
The ChannelID of the new netting channel. | def new_netting_channel(
self,
partner: Address,
settle_timeout: int,
given_block_identifier: BlockSpecification,
) -> ChannelID:
checking_block = self.client.get_checking_block()
self._new_channel_preconditions(
partner=partner,
settle_timeout=settle_timeout,
block_identifier=given_block_identifier,
)
log_details = {
'peer1': pex(self.node_address),
'peer2': pex(partner),
}
gas_limit = self.proxy.estimate_gas(
checking_block,
'openChannel',
participant1=self.node_address,
participant2=partner,
settle_timeout=settle_timeout,
)
if not gas_limit:
self.proxy.jsonrpc_client.check_for_insufficient_eth(
transaction_name='openChannel',
transaction_executed=False,
required_gas=GAS_REQUIRED_FOR_OPEN_CHANNEL,
block_identifier=checking_block,
)
self._new_channel_postconditions(
partner=partner,
block=checking_block,
)
log.critical('new_netting_channel call will fail', **log_details)
raise RaidenUnrecoverableError('Creating a new channel will fail')
log.debug('new_netting_channel called', **log_details)
# Prevent concurrent attempts to open a channel with the same token and
# partner address.
if gas_limit and partner not in self.open_channel_transactions:
new_open_channel_transaction = AsyncResult()
self.open_channel_transactions[partner] = new_open_channel_transaction
gas_limit = safe_gas_limit(gas_limit, GAS_REQUIRED_FOR_OPEN_CHANNEL)
try:
transaction_hash = self.proxy.transact(
'openChannel',
gas_limit,
participant1=self.node_address,
participant2=partner,
settle_timeout=settle_timeout,
)
self.client.poll(transaction_hash)
receipt_or_none = check_transaction_threw(self.client, transaction_hash)
if receipt_or_none:
self._new_channel_postconditions(
partner=partner,
block=receipt_or_none['blockNumber'],
)
log.critical('new_netting_channel failed', **log_details)
raise RaidenUnrecoverableError('creating new channel failed')
except Exception as e:
log.critical('new_netting_channel failed', **log_details)
new_open_channel_transaction.set_exception(e)
raise
else:
new_open_channel_transaction.set(transaction_hash)
finally:
self.open_channel_transactions.pop(partner, None)
else:
# All other concurrent threads should block on the result of opening this channel
self.open_channel_transactions[partner].get()
channel_identifier: ChannelID = self._detail_channel(
participant1=self.node_address,
participant2=partner,
block_identifier='latest',
).channel_identifier
log_details['channel_identifier'] = str(channel_identifier)
log.info('new_netting_channel successful', **log_details)
return channel_identifier | 181,364 |
Install a new filter for an array of topics emitted by the contract.
Args:
topics: A list of event ids to filter for. Can also be None,
in which case all events are queried.
from_block: The block number at which to start looking for events.
to_block: The block number at which to stop looking for events.
Return:
Filter: The filter instance. | def events_filter(
self,
topics: List[str] = None,
from_block: BlockSpecification = None,
to_block: BlockSpecification = None,
) -> StatelessFilter:
return self.client.new_filter(
self.address,
topics=topics,
from_block=from_block,
to_block=to_block,
) | 181,386 |
Install a new filter for all the events emitted by the current token network contract
Args:
from_block: Create filter starting from this block number (default: 0).
to_block: Create filter stopping at this block number (default: 'latest').
Return:
The filter instance. | def all_events_filter(
self,
from_block: BlockSpecification = GENESIS_BLOCK_NUMBER,
to_block: BlockSpecification = 'latest',
) -> StatelessFilter:
return self.events_filter(None, from_block, to_block) | 181,387 |
Register a token with the raiden token manager.
Args:
registry_address: registry address
token_address_hex (string): a hex encoded token address.
Returns:
The token network proxy. | def register_token(
self,
registry_address_hex: typing.AddressHex,
token_address_hex: typing.AddressHex,
retry_timeout: typing.NetworkTimeout = DEFAULT_RETRY_TIMEOUT,
) -> TokenNetwork:
registry_address = decode_hex(registry_address_hex)
token_address = decode_hex(token_address_hex)
registry = self._raiden.chain.token_network_registry(registry_address)
contracts_version = self._raiden.contract_manager.contracts_version
if contracts_version == DEVELOPMENT_CONTRACT_VERSION:
token_network_address = registry.add_token_with_limits(
token_address=token_address,
channel_participant_deposit_limit=UINT256_MAX,
token_network_deposit_limit=UINT256_MAX,
)
else:
token_network_address = registry.add_token_without_limits(
token_address=token_address,
)
# Register the channel manager with the raiden registry
waiting.wait_for_payment_network(
self._raiden,
registry.address,
token_address,
retry_timeout,
)
return self._raiden.chain.token_network(token_network_address) | 181,413 |
Wait until a contract is mined
Args:
contract_address_hex (string): hex encoded address of the contract
timeout (int): time to wait for the contract to get mined
Returns:
True if the contract got mined, false otherwise | def wait_for_contract(self, contract_address_hex, timeout=None):
contract_address = decode_hex(contract_address_hex)
start_time = time.time()
result = self._raiden.chain.client.web3.eth.getCode(
to_checksum_address(contract_address),
)
current_time = time.time()
while not result:
if timeout and start_time + timeout > current_time:
return False
result = self._raiden.chain.client.web3.eth.getCode(
to_checksum_address(contract_address),
)
gevent.sleep(0.5)
current_time = time.time()
return len(result) > 0 | 181,415 |
Save events.
Args:
state_change_identifier: Id of the state change that generate these events.
events: List of Event objects. | def write_events(self, events):
with self.write_lock, self.conn:
self.conn.executemany(
'INSERT INTO state_events('
' identifier, source_statechange_id, log_time, data'
') VALUES(?, ?, ?, ?)',
events,
) | 181,425 |
Delete state changes.
Args:
state_changes_to_delete: List of ids to delete. | def delete_state_changes(self, state_changes_to_delete: List[int]) -> None:
with self.write_lock, self.conn:
self.conn.executemany(
'DELETE FROM state_events WHERE identifier = ?',
state_changes_to_delete,
) | 181,426 |
Save events.
Args:
state_change_identifier: Id of the state change that generate these events.
events: List of Event objects. | def write_events(self, state_change_identifier, events, log_time):
events_data = [
(None, state_change_identifier, log_time, self.serializer.serialize(event))
for event in events
]
return super().write_events(events_data) | 181,450 |
Open a port for the raiden service (listening at `internal_port`) through
UPnP.
Args:
internal_port (int): the target port of the raiden service
external_start_port (int): query for an external port starting here
(default: internal_port)
Returns:
external_ip_address, external_port (tuple(str, int)): if successful or None | def open_port(upnp, internal_port, external_start_port=None):
if external_start_port is None:
external_start_port = internal_port
if upnp is None:
return False
def register(internal, external):
# test existing mappings
mapping = upnp.getspecificportmapping(external, 'UDP')
if mapping is not None:
# FIXME: figure out semantics of the last two values
lanaddr, internal_mapped, name, _, _ = mapping
is_valid_mapping = (
lanaddr == upnp.lanaddr and
name == RAIDEN_IDENTIFICATOR and
internal_mapped == internal
)
is_not_our_mapping = (
internal_mapped != internal and
name != RAIDEN_IDENTIFICATOR
)
is_previous_mapping = (
internal_mapped != internal and
name == RAIDEN_IDENTIFICATOR and
lanaddr == upnp.lanaddr
)
if is_valid_mapping:
log.debug(
'keeping pre-existing portmapping',
internal=internal,
external=external,
lanaddr=lanaddr,
)
return True
elif lanaddr != upnp.lanaddr:
# don't touch other people's mappings
log.debug(
'ignoring existing mapping for other IP',
internal=internal,
external=external,
other_ip=lanaddr,
our_ip=upnp.lanaddr,
)
return False
elif is_not_our_mapping:
log.debug(
'ignoring existing mapping for other program',
name=name,
)
# some other program uses our port
return False
elif is_previous_mapping:
# we ran before on a different internal port
log.debug('releasing previous port mapping')
upnp.deleteportmapping(external, 'UDP')
log.debug('trying to create new port mapping', internal=internal, external=external)
return upnp.addportmapping(
external,
'UDP',
upnp.lanaddr,
internal,
RAIDEN_IDENTIFICATOR,
'',
)
external_port = external_start_port
success = register(internal_port, external_port)
while not success and external_port <= MAX_PORT:
external_port += 1
log.debug('trying', external=external_port)
success = register(internal_port, external_port)
if success:
return upnp.externalipaddress(), external_port
else:
log.error(
'could not register a port-mapping',
location='FIXME',
)
return False
return False | 181,496 |
Try to release the port mapping for `external_port`.
Args:
external_port (int): the port that was previously forwarded to.
Returns:
success (boolean): if the release was successful. | def release_port(upnp, external_port):
mapping = upnp.getspecificportmapping(external_port, 'UDP')
if mapping is None:
log.error('could not find a port mapping', external=external_port)
return False
else:
log.debug('found existing port mapping', mapping=mapping)
if upnp.deleteportmapping(external_port, 'UDP'):
log.info('successfully released port mapping', external=external_port)
return True
log.warning(
'could not release port mapping, check your router for stale mappings',
)
return False | 181,497 |
Calculate a blocktime estimate based on some past blocks.
Args:
oldest: delta in block numbers to go back.
Return:
average block time in seconds | def estimate_blocktime(self, oldest: int = 256) -> float:
last_block_number = self.block_number()
# around genesis block there is nothing to estimate
if last_block_number < 1:
return 15
# if there are less than `oldest` blocks available, start at block 1
if last_block_number < oldest:
interval = (last_block_number - 1) or 1
else:
interval = last_block_number - oldest
assert interval > 0
last_timestamp = self.get_block_header(last_block_number)['timestamp']
first_timestamp = self.get_block_header(last_block_number - interval)['timestamp']
delta = last_timestamp - first_timestamp
return delta / interval | 181,502 |
Return a proxy for interacting with a smart contract.
Args:
contract_interface: The contract interface as defined by the json.
address: The contract's address. | def new_contract_proxy(self, contract_interface, contract_address: Address):
return ContractProxy(
self,
contract=self.new_contract(contract_interface, contract_address),
) | 181,550 |
Wait until the `transaction_hash` is applied or rejected.
Args:
transaction_hash: Transaction hash that we are waiting for. | def poll(
self,
transaction_hash: bytes,
):
if len(transaction_hash) != 32:
raise ValueError(
'transaction_hash must be a 32 byte hash',
)
transaction_hash = encode_hex(transaction_hash)
# used to check if the transaction was removed, this could happen
# if gas price is too low:
#
# > Transaction (acbca3d6) below gas price (tx=1 Wei ask=18
# > Shannon). All sequential txs from this address(7d0eae79)
# > will be ignored
#
last_result = None
while True:
# Could return None for a short period of time, until the
# transaction is added to the pool
transaction = self.web3.eth.getTransaction(transaction_hash)
# if the transaction was added to the pool and then removed
if transaction is None and last_result is not None:
raise Exception('invalid transaction, check gas price')
# the transaction was added to the pool and mined
if transaction and transaction['blockNumber'] is not None:
last_result = transaction
# this will wait for both APPLIED and REVERTED transactions
transaction_block = transaction['blockNumber']
confirmation_block = transaction_block + self.default_block_num_confirmations
block_number = self.block_number()
if block_number >= confirmation_block:
return transaction
gevent.sleep(1.0) | 181,555 |
Helper function to unpack event data using a provided ABI
Args:
abi: The ABI of the contract, not the ABI of the event
log_: The raw event data
Returns:
The decoded event | def decode_event(abi: ABI, log_: Dict) -> Dict:
if isinstance(log_['topics'][0], str):
log_['topics'][0] = decode_hex(log_['topics'][0])
elif isinstance(log_['topics'][0], int):
log_['topics'][0] = decode_hex(hex(log_['topics'][0]))
event_id = log_['topics'][0]
events = filter_by_type('event', abi)
topic_to_event_abi = {
event_abi_to_log_topic(event_abi): event_abi
for event_abi in events
}
event_abi = topic_to_event_abi[event_id]
return get_event_data(event_abi, log_) | 181,610 |
Returns events emmitted by a contract for a given event name, within a certain range.
Args:
web3: A Web3 instance
contract_manager: A contract manager
contract_address: The address of the contract to be filtered, can be `None`
contract_name: The name of the contract
topics: The topics to filter for
from_block: The block to start search events
to_block: The block to stop searching for events
Returns:
All matching events | def query_blockchain_events(
web3: Web3,
contract_manager: ContractManager,
contract_address: Address,
contract_name: str,
topics: List,
from_block: BlockNumber,
to_block: BlockNumber,
) -> List[Dict]:
filter_params = {
'fromBlock': from_block,
'toBlock': to_block,
'address': to_checksum_address(contract_address),
'topics': topics,
}
events = web3.eth.getLogs(filter_params)
contract_abi = contract_manager.get_contract_abi(contract_name)
return [
decode_event(
abi=contract_abi,
log_=raw_event,
)
for raw_event in events
] | 181,611 |
Returns the path with the highest `version` number.
Raises:
AssertionError: If any of the `paths` in the list is an invalid name.
Args:
paths: A list of file names. | def latest_db_file(paths: List[str]) -> Optional[str]:
dbs = {}
for db_path in paths:
matches = VERSION_RE.match(os.path.basename(db_path))
assert matches, f'Invalid path name {db_path}'
try:
version = int(matches.group(1))
except ValueError:
continue
dbs[version] = db_path
if dbs:
highest_version = sorted(dbs)[-1]
return dbs[highest_version]
return None | 181,643 |
Returns a filtered list of `paths`, where every name matches our format.
Args:
paths: A list of file names. | def filter_db_names(paths: List[str]) -> List[str]:
return [
db_path
for db_path in paths
if VERSION_RE.match(os.path.basename(db_path))
] | 181,644 |
Automatically maintain channels open for the given token network.
Args:
token_address: the ERC20 token network to connect to.
funds: the amount of funds that can be used by the ConnectionMananger.
initial_channel_target: number of channels to open proactively.
joinable_funds_target: fraction of the funds that will be used to join
channels opened by other participants. | def token_network_connect(
self,
registry_address: PaymentNetworkID,
token_address: TokenAddress,
funds: TokenAmount,
initial_channel_target: int = 3,
joinable_funds_target: float = 0.4,
) -> None:
if not is_binary_address(registry_address):
raise InvalidAddress('registry_address must be a valid address in binary')
if not is_binary_address(token_address):
raise InvalidAddress('token_address must be a valid address in binary')
token_network_identifier = views.get_token_network_identifier_by_token_address(
chain_state=views.state_from_raiden(self.raiden),
payment_network_id=registry_address,
token_address=token_address,
)
connection_manager = self.raiden.connection_manager_for_token_network(
token_network_identifier,
)
has_enough_reserve, estimated_required_reserve = has_enough_gas_reserve(
raiden=self.raiden,
channels_to_open=initial_channel_target,
)
if not has_enough_reserve:
raise InsufficientGasReserve((
'The account balance is below the estimated amount necessary to '
'finish the lifecycles of all active channels. A balance of at '
f'least {estimated_required_reserve} wei is required.'
))
connection_manager.connect(
funds=funds,
initial_channel_target=initial_channel_target,
joinable_funds_target=joinable_funds_target,
) | 181,725 |
After Raiden learns about a new block this function must be called to
handle expiration of the hash time locks.
Args:
state: The current state.
Return:
TransitionResult: The resulting iteration | def handle_block(
mediator_state: MediatorTransferState,
state_change: Block,
channelidentifiers_to_channels: ChannelMap,
pseudo_random_generator: random.Random,
) -> TransitionResult[MediatorTransferState]:
expired_locks_events = events_to_remove_expired_locks(
mediator_state,
channelidentifiers_to_channels,
state_change.block_number,
pseudo_random_generator,
)
secret_reveal_events = events_for_onchain_secretreveal_if_dangerzone(
channelmap=channelidentifiers_to_channels,
secrethash=mediator_state.secrethash,
transfers_pair=mediator_state.transfers_pair,
block_number=state_change.block_number,
block_hash=state_change.block_hash,
)
unlock_fail_events = events_for_expired_pairs(
channelidentifiers_to_channels=channelidentifiers_to_channels,
transfers_pair=mediator_state.transfers_pair,
waiting_transfer=mediator_state.waiting_transfer,
block_number=state_change.block_number,
)
iteration = TransitionResult(
mediator_state,
unlock_fail_events + secret_reveal_events + expired_locks_events,
)
return iteration | 181,880 |
Keep listening for events forever.
Args:
timeout_ms: How long to poll the Home Server for before retrying.
exception_handler: Optional exception handler function which can
be used to handle exceptions in the caller thread.
bad_sync_timeout: Base time to wait after an error before retrying.
Will be increased according to exponential backoff. | def listen_forever(
self,
timeout_ms: int = 30000,
exception_handler: Callable[[Exception], None] = None,
bad_sync_timeout: int = 5,
):
_bad_sync_timeout = bad_sync_timeout
self.should_listen = True
while self.should_listen:
try:
# may be killed and raise exception from _handle_thread
self._sync(timeout_ms)
_bad_sync_timeout = bad_sync_timeout
except MatrixRequestError as e:
log.warning('A MatrixRequestError occured during sync.')
if e.code >= 500:
log.warning(
'Problem occured serverside. Waiting',
wait_for=_bad_sync_timeout,
)
gevent.sleep(_bad_sync_timeout)
_bad_sync_timeout = min(_bad_sync_timeout * 2, self.bad_sync_timeout_limit)
else:
raise
except MatrixHttpLibError:
log.exception('A MatrixHttpLibError occured during sync.')
if self.should_listen:
gevent.sleep(_bad_sync_timeout)
_bad_sync_timeout = min(_bad_sync_timeout * 2, self.bad_sync_timeout_limit)
except Exception as e:
log.exception('Exception thrown during sync')
if exception_handler is not None:
exception_handler(e)
else:
raise | 182,225 |
Start a listener greenlet to listen for events in the background.
Args:
timeout_ms: How long to poll the Home Server for before retrying.
exception_handler: Optional exception handler function which can
be used to handle exceptions in the caller thread. | def start_listener_thread(self, timeout_ms: int = 30000, exception_handler: Callable = None):
assert not self.should_listen and self.sync_thread is None, 'Already running'
self.should_listen = True
self.sync_thread = gevent.spawn(self.listen_forever, timeout_ms, exception_handler)
self.sync_thread.name = f'GMatrixClient.listen_forever user_id:{self.user_id}' | 182,226 |
Search user directory for a given term, returning a list of users
Args:
term: term to be searched for
Returns:
user_list: list of users returned by server-side search | def search_user_directory(self, term: str) -> List[User]:
response = self.api._send(
'POST',
'/user_directory/search',
{
'search_term': term,
},
)
try:
return [
User(self.api, _user['user_id'], _user['display_name'])
for _user in response['results']
]
except KeyError:
return [] | 182,228 |
Send typing event directly to api
Args:
room: room to send typing event to
timeout: timeout for the event, in ms | def typing(self, room: Room, timeout: int = 5000):
path = f'/rooms/{quote(room.room_id)}/typing/{quote(self.user_id)}'
return self.api._send('PUT', path, {'typing': True, 'timeout': timeout}) | 182,231 |
Initialize the state manager.
Args:
state_transition: function that can apply a StateChange message.
current_state: current application state. | def __init__(self, state_transition: Callable, current_state: Optional[State]) -> None:
if not callable(state_transition):
raise ValueError('state_transition must be a callable')
self.state_transition = state_transition
self.current_state = current_state | 182,304 |
Apply the `state_change` in the current machine and return the
resulting events.
Args:
state_change: An object representation of a state
change.
Return:
A list of events produced by the state transition.
It's the upper layer's responsibility to decided how to handle
these events. | def dispatch(self, state_change: StateChange) -> List[Event]:
assert isinstance(state_change, StateChange)
# the state objects must be treated as immutable, so make a copy of the
# current state and pass the copy to the state machine to be modified.
next_state = deepcopy(self.current_state)
# update the current state by applying the change
iteration = self.state_transition(
next_state,
state_change,
)
assert isinstance(iteration, TransitionResult)
self.current_state = iteration.new_state
events = iteration.events
assert isinstance(self.current_state, (State, type(None)))
assert all(isinstance(e, Event) for e in events)
return events | 182,305 |
Sorts a list of servers by http round-trip time
Params:
servers: sequence of http server urls
Returns:
sequence of pairs of url,rtt in seconds, sorted by rtt, excluding failed servers
(possibly empty) | def sort_servers_closest(servers: Sequence[str]) -> Sequence[Tuple[str, float]]:
if not {urlparse(url).scheme for url in servers}.issubset({'http', 'https'}):
raise TransportError('Invalid server urls')
get_rtt_jobs = set(
gevent.spawn(lambda url: (url, get_http_rtt(url)), server_url)
for server_url
in servers
)
# these tasks should never raise, returns None on errors
gevent.joinall(get_rtt_jobs, raise_error=False) # block and wait tasks
sorted_servers: List[Tuple[str, float]] = sorted(
(job.value for job in get_rtt_jobs if job.value[1] is not None),
key=itemgetter(1),
)
log.debug('Matrix homeserver RTT times', rtt_times=sorted_servers)
return sorted_servers | 182,408 |
Given a list of possible servers, chooses the closest available and create a GMatrixClient
Params:
servers: list of servers urls, with scheme (http or https)
Rest of args and kwargs are forwarded to GMatrixClient constructor
Returns:
GMatrixClient instance for one of the available servers | def make_client(servers: Sequence[str], *args, **kwargs) -> GMatrixClient:
if len(servers) > 1:
sorted_servers = [
server_url
for (server_url, _) in sort_servers_closest(servers)
]
log.info(
'Automatically selecting matrix homeserver based on RTT',
sorted_servers=sorted_servers,
)
elif len(servers) == 1:
sorted_servers = servers
else:
raise TransportError('No valid servers list given')
last_ex = None
for server_url in sorted_servers:
server_url: str = server_url
client = GMatrixClient(server_url, *args, **kwargs)
try:
client.api._send('GET', '/versions', api_path='/_matrix/client')
except MatrixError as ex:
log.warning('Selected server not usable', server_url=server_url, _exception=ex)
last_ex = ex
else:
break
else:
raise TransportError(
'Unable to find a reachable Matrix server. Please check your network connectivity.',
) from last_ex
return client | 182,409 |
Sends a message to one of the global rooms
These rooms aren't being listened on and therefore no reply could be heard, so these
messages are sent in a send-and-forget async way.
The actual room name is composed from the suffix given as parameter and chain name or id
e.g.: raiden_ropsten_discovery
Params:
room: name suffix as passed in config['global_rooms'] list
message: Message instance to be serialized and sent | def send_global(self, room: str, message: Message) -> None:
self._global_send_queue.put((room, message))
self._global_send_event.set() | 182,459 |
Helper function to unpack event data using a provided ABI
Args:
abi: The ABI of the contract, not the ABI of the event
log: The raw event data
Returns:
The decoded event | def decode_event(abi: Dict, log: Dict):
if isinstance(log['topics'][0], str):
log['topics'][0] = decode_hex(log['topics'][0])
elif isinstance(log['topics'][0], int):
log['topics'][0] = decode_hex(hex(log['topics'][0]))
event_id = log['topics'][0]
events = filter_by_type('event', abi)
topic_to_event_abi = {
event_abi_to_log_topic(event_abi): event_abi
for event_abi in events
}
event_abi = topic_to_event_abi[event_id]
return get_event_data(event_abi, log) | 182,717 |
Resample the dataset.
Args:
seed (int, optional): Seed for resampling. By default no seed is
used. | def resample(self, seed=None):
if seed is not None:
gen = torch.manual_seed(seed)
else:
gen = torch.default_generator
if self.replacement:
self.perm = torch.LongTensor(len(self)).random_(
len(self.dataset), generator=gen)
else:
self.perm = torch.randperm(
len(self.dataset), generator=gen).narrow(0, 0, len(self)) | 182,751 |
Outputs a function which will log the arguments to Visdom in an appropriate way.
Args:
vis_fn: A function, such as self.vis.image | def _viz_prototype(self, vis_fn):
def _viz_logger(*args, **kwargs):
self.win = vis_fn(*args,
win=self.win,
env=self.env,
opts=self.opts,
**kwargs)
return _viz_logger | 182,794 |
Multiple lines can be added to the same plot with the "name" attribute (see example)
Args:
fields: Currently unused
plot_type: {scatter, line}
Examples:
>>> scatter_logger = VisdomPlotLogger('line')
>>> scatter_logger.log(stats['epoch'], loss_meter.value()[0], name="train")
>>> scatter_logger.log(stats['epoch'], loss_meter.value()[0], name="test") | def __init__(self, plot_type, fields=None, win=None, env=None, opts={}, port=8097, server="localhost", name=None):
super(VisdomPlotLogger, self).__init__(fields, win, env, opts, port, server)
valid_plot_types = {
"scatter": self.viz.scatter,
"line": self.viz.line}
self.plot_type = plot_type
# Set chart type
if plot_type not in valid_plot_types.keys():
raise ValueError("plot_type \'{}\' not found. Must be one of {}".format(
plot_type, valid_plot_types.keys()))
self.chart = valid_plot_types[plot_type] | 182,798 |
Update the results file with new information.
Args:
task_name (str): Name of the currently running task. A previously unseen
``task_name`` will create a new entry in both :attr:`tasks`
and :attr:`results`.
result: This will be appended to the list in :attr:`results` which
corresponds to the ``task_name`` in ``task_name``:attr:`tasks`. | def update(self, task_name, result):
with open(self.filepath, 'rb') as f:
existing_results = pickle.load(f)
if task_name not in self.tasks:
self._add_task(task_name)
existing_results['tasks'].append(task_name)
existing_results['results'].append([])
task_name_idx = existing_results['tasks'].index(task_name)
results = existing_results['results'][task_name_idx]
results.append(result)
with open(self.filepath, 'wb') as f:
pickle.dump(existing_results, f) | 182,821 |
Function to sample a new configuration
This function is called inside Hyperband to query a new configuration
Parameters:
-----------
budget: float
the budget for which this configuration is scheduled
returns: config
should return a valid configuration | def get_config(self, budget):
self.logger.debug('start sampling a new configuration.')
sample = None
info_dict = {}
# If no model is available, sample from prior
# also mix in a fraction of random configs
if len(self.kde_models.keys()) == 0 or np.random.rand() < self.random_fraction:
sample = self.configspace.sample_configuration()
info_dict['model_based_pick'] = False
best = np.inf
best_vector = None
if sample is None:
try:
#sample from largest budget
budget = max(self.kde_models.keys())
l = self.kde_models[budget]['good'].pdf
g = self.kde_models[budget]['bad' ].pdf
minimize_me = lambda x: max(1e-32, g(x))/max(l(x),1e-32)
kde_good = self.kde_models[budget]['good']
kde_bad = self.kde_models[budget]['bad']
for i in range(self.num_samples):
idx = np.random.randint(0, len(kde_good.data))
datum = kde_good.data[idx]
vector = []
for m,bw,t in zip(datum, kde_good.bw, self.vartypes):
bw = max(bw, self.min_bandwidth)
if t == 0:
bw = self.bw_factor*bw
try:
vector.append(sps.truncnorm.rvs(-m/bw,(1-m)/bw, loc=m, scale=bw))
except:
self.logger.warning("Truncated Normal failed for:\ndatum=%s\nbandwidth=%s\nfor entry with value %s"%(datum, kde_good.bw, m))
self.logger.warning("data in the KDE:\n%s"%kde_good.data)
else:
if np.random.rand() < (1-bw):
vector.append(int(m))
else:
vector.append(np.random.randint(t))
val = minimize_me(vector)
if not np.isfinite(val):
self.logger.warning('sampled vector: %s has EI value %s'%(vector, val))
self.logger.warning("data in the KDEs:\n%s\n%s"%(kde_good.data, kde_bad.data))
self.logger.warning("bandwidth of the KDEs:\n%s\n%s"%(kde_good.bw, kde_bad.bw))
self.logger.warning("l(x) = %s"%(l(vector)))
self.logger.warning("g(x) = %s"%(g(vector)))
# right now, this happens because a KDE does not contain all values for a categorical parameter
# this cannot be fixed with the statsmodels KDE, so for now, we are just going to evaluate this one
# if the good_kde has a finite value, i.e. there is no config with that value in the bad kde, so it shouldn't be terrible.
if np.isfinite(l(vector)):
best_vector = vector
break
if val < best:
best = val
best_vector = vector
if best_vector is None:
self.logger.debug("Sampling based optimization with %i samples failed -> using random configuration"%self.num_samples)
sample = self.configspace.sample_configuration().get_dictionary()
info_dict['model_based_pick'] = False
else:
self.logger.debug('best_vector: {}, {}, {}, {}'.format(best_vector, best, l(best_vector), g(best_vector)))
for i, hp_value in enumerate(best_vector):
if isinstance(
self.configspace.get_hyperparameter(
self.configspace.get_hyperparameter_by_idx(i)
),
ConfigSpace.hyperparameters.CategoricalHyperparameter
):
best_vector[i] = int(np.rint(best_vector[i]))
sample = ConfigSpace.Configuration(self.configspace, vector=best_vector).get_dictionary()
try:
sample = ConfigSpace.util.deactivate_inactive_hyperparameters(
configuration_space=self.configspace,
configuration=sample
)
info_dict['model_based_pick'] = True
except Exception as e:
self.logger.warning(("="*50 + "\n")*3 +\
"Error converting configuration:\n%s"%sample+\
"\n here is a traceback:" +\
traceback.format_exc())
raise(e)
except:
self.logger.warning("Sampling based optimization with %i samples failed\n %s \nUsing random configuration"%(self.num_samples, traceback.format_exc()))
sample = self.configspace.sample_configuration()
info_dict['model_based_pick'] = False
try:
sample = ConfigSpace.util.deactivate_inactive_hyperparameters(
configuration_space=self.configspace,
configuration=sample.get_dictionary()
).get_dictionary()
except Exception as e:
self.logger.warning("Error (%s) converting configuration: %s -> "
"using random configuration!",
e,
sample)
sample = self.configspace.sample_configuration().get_dictionary()
self.logger.debug('done sampling a new configuration.')
return sample, info_dict | 182,930 |
Function to sample a new configuration
This function is called inside Hyperband to query a new configuration
Parameters:
-----------
budget: float
the budget for which this configuration is scheduled
returns: config
should return a valid configuration | def get_config(self, budget):
sample = None
info_dict = {}
# If no model is available, sample from prior
# also mix in a fraction of random configs
if len(self.kde_models.keys()) == 0 or np.random.rand() < self.random_fraction:
sample = self.configspace.sample_configuration()
info_dict['model_based_pick'] = False
if sample is None:
try:
#import pdb; pdb.set_trace()
samples = self.kde_models[budget]['good'].sample(self.num_samples)
ei = self.kde_models[budget]['good'].pdf(samples)/self.kde_models[budget]['bad'].pdf(samples)
best_idx = np.argmax(ei)
best_vector = samples[best_idx]
sample = ConfigSpace.Configuration(self.configspace, vector=best_vector)
sample = ConfigSpace.util.deactivate_inactive_hyperparameters(
configuration_space=self.configspace,
configuration=sample.get_dictionary()
)
info_dict['model_based_pick'] = True
except Exception as e:
self.logger.warning(("="*50 + "\n")*3 +\
"Error sampling a configuration!\n"+\
"Models for budgets: %s"%(self.kde_models.keys()) +\
"\n here is a traceback:" +\
traceback.format_exc())
for b,l in self.losses.items():
self.logger.debug("budget: {}\nlosses:{}".format(b,l))
sample = self.configspace.sample_configuration()
info_dict['model_based_pick'] = False
return sample.get_dictionary(), info_dict | 182,949 |
function to register finished runs
Every time a run has finished, this function should be called
to register it with the result logger. If overwritten, make
sure to call this method from the base class to ensure proper
logging.
Parameters:
-----------
job: hpbandster.distributed.dispatcher.Job object
contains all the info about the run | def new_result(self, job, update_model=True):
super().new_result(job)
if job.result is None:
# One could skip crashed results, but we decided
# assign a +inf loss and count them as bad configurations
loss = np.inf
else:
loss = job.result["loss"]
budget = job.kwargs["budget"]
if budget not in self.configs.keys():
self.configs[budget] = []
self.losses[budget] = []
if len(self.configs.keys()) == 1:
min_num_points = 6
else:
min_num_points = self.min_points_in_model
# skip model building if we already have a bigger model
if max(list(self.kde_models.keys()) + [-np.inf]) > budget:
return
# We want to get a numerical representation of the configuration in the original space
conf = ConfigSpace.Configuration(self.configspace, job.kwargs["config"]).get_array().tolist()
#import pdb; pdb.set_trace()
if conf in self.configs[budget]:
i = self.configs[budget].index(conf)
self.losses[budget][i].append(loss)
print('-'*50)
print('ran config %s with loss %f again'%(conf, loss))
else:
self.configs[budget].append(conf)
self.losses[budget].append([loss])
# skip model building:
# a) if not enough points are available
tmp = np.array([np.mean(r) for r in self.losses[budget]])
if np.sum(np.isfinite(tmp)) < min_num_points:
self.logger.debug("Only %i successful run(s) for budget %f available, need more than %s -> can't build model!"%(np.sum(np.isfinite(tmp)), budget, min_num_points))
return
# b) during warnm starting when we feed previous results in and only update once
if not update_model:
return
if budget not in self.kde_models.keys():
self.kde_models[budget] = {
'good': MultivariateKDE(self.configspace, min_bandwidth=self.min_bandwidth, fully_dimensional=self.fully_dimensional),
'bad' : MultivariateKDE(self.configspace, min_bandwidth=self.min_bandwidth, fully_dimensional=self.fully_dimensional)
}
#import pdb; pdb.set_trace()
num_configs = len(self.losses[budget])
train_configs = np.array(self.configs[budget][-num_configs:])
train_losses = np.array(list(map(np.mean, self.losses[budget][-num_configs:])))
n_good= max(3,(num_configs * self.top_n_percent) // 100)
n_bad = num_configs-n_good
# Refit KDE for the current budget
idx = np.argsort(train_losses)
train_data_good = self.impute_conditional_data(train_configs[idx[:n_good]])
train_data_bad = self.impute_conditional_data(train_configs[idx[n_good:n_good+n_bad+1]])
self.kde_models[budget]['bad'].fit(train_data_bad, bw_estimator=self.bw_estimator)
self.kde_models[budget]['good'].fit(train_data_good, bw_estimator=self.bw_estimator)
if self.bw_estimator in ['mlcv'] and n_good < 3:
self.kde_models[budget]['good'].bandwidths[:] = self.kde_models[budget]['bad'].bandwidths
# update probs for the categorical parameters for later sampling
self.logger.debug('done building a new model for budget %f based on %i/%i split\nBest loss for this budget:%f\n\n\n\n\n'%(budget, n_good, n_bad, np.min(train_losses))) | 182,950 |
Iteration class to resample new configurations along side keeping the good ones
in SuccessiveHalving.
Parameters:
-----------
resampling_rate: float
fraction of configurations that are resampled at each stage
min_samples_advance:int
number of samples that are guaranteed to proceed to the next
stage regardless of the fraction. | def __init__(self, *args, resampling_rate = 0.5, min_samples_advance = 1, **kwargs):
self.resampling_rate = resampling_rate
self.min_samples_advance = min_samples_advance | 182,955 |
BO-HB uses (just like Hyperband) SuccessiveHalving for each iteration.
See Li et al. (2016) for reference.
Parameters:
-----------
iteration: int
the index of the iteration to be instantiated
Returns:
--------
SuccessiveHalving: the SuccessiveHalving iteration with the
corresponding number of configurations | def get_next_iteration(self, iteration, iteration_kwargs={}):
min_budget = max( self.min_budget, self.config_generator.largest_budget_with_model())
max_budget = self.max_budget
eta = self.eta
# precompute some HB stuff
max_SH_iter = -int(np.log(min_budget/max_budget)/np.log(eta)) + 1
budgets = max_budget * np.power(eta, -np.linspace(max_SH_iter-1, 0, max_SH_iter))
# number of 'SH rungs'
s = max_SH_iter - 1
# number of configurations in that bracket
n0 = int(np.floor((self.max_SH_iter)/(s+1)) * eta**s)
ns = np.array([max(int(n0*(eta**(-i))), 1) for i in range(s+1)])
while (ns * budgets[-s-1:]).sum() <= self.budget_per_iteration:
n0 += 1
ns = np.array([max(int(n0*(eta**(-i))), 1) for i in range(s+1)])
n0 -= 1
ns = np.array([max(int(n0*(eta**(-i))), 1) for i in range(s+1)])
assert (ns * budgets[-s-1:]).sum() <= self.budget_per_iteration, 'Sampled iteration exceeds the budget per iteration!'
return(SuccessiveHalving(HPB_iter=iteration, num_configs=ns, budgets=budgets, config_sampler=self.config_generator.get_config, **iteration_kwargs)) | 182,958 |
Function to sample a new configuration
This function is called inside Hyperband to query a new configuration
Parameters:
-----------
budget: float
the budget for which this configuration is scheduled
returns: config
should return a valid configuration | def get_config(self, budget):
# No observations available for this budget sample from the prior
if len(self.kde_models.keys()) == 0:
return self.configspace.sample_configuration().get_dictionary()
# If we haven't seen anything with this budget, we sample from the kde trained on the highest budget
if budget not in self.kde_models.keys():
budget = sorted(self.kde_models.keys())[-1]
# TODO: This only works in continuous space and with gaussian kernels
kde = self.kde_models[budget]
idx = np.random.randint(0, len(self.kde_models[budget].data))
vector = [sps.truncnorm.rvs(-m/bw,(1-m)/bw, loc=m, scale=bw) for m,bw in zip(self.kde_models[budget].data[idx], kde.bw)]
if np.any(np.array(vector)>1) or np.any(np.array(vector)<0):
raise RuntimeError("truncated normal sampling problems!")
sample = ConfigSpace.Configuration(self.configspace, vector=vector)
return sample.get_dictionary(), {} | 182,960 |
function to register finished runs
Every time a run has finished, this function should be called
to register it with the result logger. If overwritten, make
sure to call this method from the base class to ensure proper
logging.
Parameters:
-----------
job_id: dict
a dictionary containing all the info about the run
job_result: dict
contains all the results of the job, i.e. it's a dict with
the keys 'loss' and 'info' | def new_result(self, job):
super(KernelDensityEstimator, self).new_result(job)
budget = job.kwargs["budget"]
if budget not in self.configs.keys():
self.configs[budget] = []
self.losses[budget] = []
# We want to get a numerical representation of the configuration in the original space
conf = ConfigSpace.Configuration(self.configspace, job.kwargs['config'])
self.configs[budget].append(conf.get_array())
self.losses[budget].append(job.result['result']["loss"])
# Check if we have enough data points to fit a KDE
if len(self.configs[budget]) % self.update_after_n_points == 0:
train_configs, train_losses = [], []
train_configs.extend(self.configs[budget])
train_losses.extend(self.losses[budget])
n = int(self.top_n_percent * len(train_configs) / 100.)
remaining_budgets = list(self.configs.keys())
remaining_budgets.remove(budget)
remaining_budgets.sort(reverse=True)
for b in remaining_budgets:
if n >= self.min_points_in_model: break
train_configs.extend(self.configs[b])
train_losses.extend(self.losses[b])
n = int(self.top_n_percent * len(train_configs) / 100.)
if len(train_losses) < self.min_points_in_model:
return
n = max(self.min_points_in_model, n)
# Refit KDE for the current budget
idx = np.argsort(train_losses)
train_data = (np.array(train_configs)[idx])[:n]
self.kde_models[budget] = sm.nonparametric.KDEMultivariate(data=train_data,
var_type=self.var_type,
bw='cv_ls') | 182,961 |
predict the loss of an unseen configuration
Parameters:
-----------
times: numpy array
times where to predict the loss
config: numpy array
the numerical representation of the config
Returns:
--------
mean and variance prediction at input times for the given config | def predict_unseen(self, times, config):
assert np.all(times > 0) and np.all(times <= self.max_num_epochs)
x = np.array(config)[None, :]
idx = times / self.max_num_epochs
x = np.repeat(x, idx.shape[0], axis=0)
x = np.concatenate((x, idx[:, None]), axis=1)
mean, var = self.model.predict(x)
return 1 - mean, var | 182,982 |
function to sample a new configuration
This function is called inside Hyperband to query a new configuration
Parameters:
-----------
budget: float
the budget for which this configuration is scheduled
returns: config
should return a valid configuration | def get_config(self, budget):
self.lock.acquire()
if not self.is_trained:
c = self.config_space.sample_configuration().get_array()
else:
candidates = np.array([self.config_space.sample_configuration().get_array()
for _ in range(self.n_candidates)])
# We are only interested on the asymptotic value
projected_candidates = np.concatenate((candidates, np.ones([self.n_candidates, 1])), axis=1)
# Compute the upper confidence bound of the function at the asymptote
m, v = self.model.predict(projected_candidates)
ucb_values = m + self.delta * np.sqrt(v)
print(ucb_values)
# Sample a configuration based on the ucb values
p = np.ones(self.n_candidates) * (ucb_values / np.sum(ucb_values))
idx = np.random.choice(self.n_candidates, 1, False, p)
c = candidates[idx][0]
config = ConfigSpace.Configuration(self.config_space, vector=c)
self.lock.release()
return config.get_dictionary(), {} | 183,046 |
starts a Pyro4 nameserver in a daemon thread
Parameters:
-----------
host: str
the hostname to use for the nameserver
port: int
the port to be used. Default =0 means a random port
nic_name: str
name of the network interface to use
Returns:
--------
tuple (str, int):
the host name and the used port | def start_local_nameserver(host=None, port=0, nic_name=None):
if host is None:
if nic_name is None:
host = 'localhost'
else:
host = nic_name_to_host(nic_name)
uri, ns, _ = Pyro4.naming.startNS(host=host, port=port)
host, port = ns.locationStr.split(':')
thread = threading.Thread(target=ns.requestLoop, name='Pyro4 nameserver started by HpBandSter')
thread.daemon=True
thread.start()
return(host, int(port)) | 183,048 |
Connect to the device.
Args:
banner: See protocol_handler.Connect.
**kwargs: See protocol_handler.Connect and adb_commands.ConnectDevice for kwargs.
Includes handle, rsa_keys, and auth_timeout_ms.
Returns:
An instance of this class if the device connected successfully. | def _Connect(self, banner=None, **kwargs):
if not banner:
banner = socket.gethostname().encode()
conn_str = self.protocol_handler.Connect(self._handle, banner=banner, **kwargs)
# Remove banner and colons after device state (state::banner)
parts = conn_str.split(b'::')
self._device_state = parts[0]
# Break out the build prop info
self.build_props = str(parts[1].split(b';'))
return True | 183,205 |
Removes a package from the device.
Args:
package_name: Package name of target package.
keep_data: whether to keep the data and cache directories
timeout_ms: Expected timeout for pushing and installing.
Returns:
The pm uninstall output. | def Uninstall(self, package_name, keep_data=False, timeout_ms=None):
cmd = ['pm uninstall']
if keep_data:
cmd.append('-k')
cmd.append('"%s"' % package_name)
return self.Shell(' '.join(cmd), timeout_ms=timeout_ms) | 183,207 |
Return a directory listing of the given path.
Args:
device_path: Directory to list. | def List(self, device_path):
connection = self.protocol_handler.Open(self._handle, destination=b'sync:')
listing = self.filesync_handler.List(connection, device_path)
connection.Close()
return listing | 183,211 |
Reboot the device.
Args:
destination: Specify 'bootloader' for fastboot. | def Reboot(self, destination=b''):
self.protocol_handler.Open(self._handle, b'reboot:%s' % destination) | 183,212 |
Run command on the device, returning the output.
Args:
command: Shell command to run
timeout_ms: Maximum time to allow the command to run. | def Shell(self, command, timeout_ms=None):
return self.protocol_handler.Command(
self._handle, service=b'shell', command=command,
timeout_ms=timeout_ms) | 183,213 |
Run command on the device, yielding each line of output.
Args:
command: Command to run on the target.
timeout_ms: Maximum time to allow the command to run.
Yields:
The responses from the shell command. | def StreamingShell(self, command, timeout_ms=None):
return self.protocol_handler.StreamingCommand(
self._handle, service=b'shell', command=command,
timeout_ms=timeout_ms) | 183,214 |
Initialize USB Handle.
Arguments:
device: libusb_device to connect to.
setting: libusb setting with the correct endpoints to communicate with.
usb_info: String describing the usb path/serial/device, for debugging.
timeout_ms: Timeout in milliseconds for all I/O. | def __init__(self, device, setting, usb_info=None, timeout_ms=None):
self._setting = setting
self._device = device
self._handle = None
self._usb_info = usb_info or ''
self._timeout_ms = timeout_ms if timeout_ms else DEFAULT_TIMEOUT_MS
self._max_read_packet_len = 0 | 183,220 |
Find and return the first matching device.
Args:
setting_matcher: See cls.FindDevices.
device_matcher: See cls.FindDevices.
**kwargs: See cls.FindDevices.
Returns:
An instance of UsbHandle.
Raises:
DeviceNotFoundError: Raised if the device is not available. | def FindFirst(cls, setting_matcher, device_matcher=None, **kwargs):
try:
return next(cls.FindDevices(
setting_matcher, device_matcher=device_matcher, **kwargs))
except StopIteration:
raise usb_exceptions.DeviceNotFoundError(
'No device available, or it is in the wrong configuration.') | 183,230 |
Initialize the TCP Handle.
Arguments:
serial: Android device serial of the form host or host:port.
Host may be an IP address or a host name. | def __init__(self, serial, timeout_ms=None):
# if necessary, convert serial to a unicode string
if isinstance(serial, (bytes, bytearray)):
serial = serial.decode('utf-8')
if ':' in serial:
self.host, self.port = serial.split(':')
else:
self.host = serial
self.port = 5555
self._connection = None
self._serial_number = '%s:%s' % (self.host, self.port)
self._timeout_ms = float(timeout_ms) if timeout_ms else None
self._connect() | 183,232 |
Constructs a FastbootProtocol instance.
Args:
usb: UsbHandle instance.
chunk_kb: Packet size. For older devices, 4 may be required. | def __init__(self, usb, chunk_kb=1024):
self.usb = usb
self.chunk_kb = chunk_kb | 183,259 |
Sends a command to the device.
Args:
command: The command to send.
arg: Optional argument to the command. | def SendCommand(self, command, arg=None):
if arg is not None:
if not isinstance(arg, bytes):
arg = arg.encode('utf8')
command = b'%s:%s' % (command, arg)
self._Write(io.BytesIO(command), len(command)) | 183,260 |
Accepts normal responses from the device.
Args:
timeout_ms: Timeout in milliseconds to wait for each response.
info_cb: Optional callback for text sent from the bootloader.
Returns:
OKAY packet's message. | def HandleSimpleResponses(
self, timeout_ms=None, info_cb=DEFAULT_MESSAGE_CALLBACK):
return self._AcceptResponses(b'OKAY', info_cb, timeout_ms=timeout_ms) | 183,261 |
Flashes a partition from the file on disk.
Args:
partition: Partition name to flash to.
source_file: Filename to download to the device.
source_len: Optional length of source_file, uses os.stat if not provided.
info_cb: See Download.
progress_callback: See Download.
Returns:
Download and flash responses, normally nothing. | def FlashFromFile(self, partition, source_file, source_len=0,
info_cb=DEFAULT_MESSAGE_CALLBACK, progress_callback=None):
if source_len == 0:
# Fall back to stat.
source_len = os.stat(source_file).st_size
download_response = self.Download(
source_file, source_len=source_len, info_cb=info_cb,
progress_callback=progress_callback)
flash_response = self.Flash(partition, info_cb=info_cb)
return download_response + flash_response | 183,267 |
Flashes the last downloaded file to the given partition.
Args:
partition: Partition to overwrite with the new image.
timeout_ms: Optional timeout in milliseconds to wait for it to finish.
info_cb: See Download. Usually no messages.
Returns:
Response to a download request, normally nothing. | def Flash(self, partition, timeout_ms=0, info_cb=DEFAULT_MESSAGE_CALLBACK):
return self._SimpleCommand(b'flash', arg=partition, info_cb=info_cb,
timeout_ms=timeout_ms) | 183,269 |
Erases the given partition.
Args:
partition: Partition to clear. | def Erase(self, partition, timeout_ms=None):
self._SimpleCommand(b'erase', arg=partition, timeout_ms=timeout_ms) | 183,270 |
Returns the given variable's definition.
Args:
var: A variable the bootloader tracks. Use 'all' to get them all.
info_cb: See Download. Usually no messages.
Returns:
Value of var according to the current bootloader. | def Getvar(self, var, info_cb=DEFAULT_MESSAGE_CALLBACK):
return self._SimpleCommand(b'getvar', arg=var, info_cb=info_cb) | 183,271 |
Executes an OEM command on the device.
Args:
command: Command to execute, such as 'poweroff' or 'bootconfig read'.
timeout_ms: Optional timeout in milliseconds to wait for a response.
info_cb: See Download. Messages vary based on command.
Returns:
The final response from the device. | def Oem(self, command, timeout_ms=None, info_cb=DEFAULT_MESSAGE_CALLBACK):
if not isinstance(command, bytes):
command = command.encode('utf8')
return self._SimpleCommand(
b'oem %s' % command, timeout_ms=timeout_ms, info_cb=info_cb) | 183,272 |
Reboots the device.
Args:
target_mode: Normal reboot when unspecified. Can specify other target
modes such as 'recovery' or 'bootloader'.
timeout_ms: Optional timeout in milliseconds to wait for a response.
Returns:
Usually the empty string. Depends on the bootloader and the target_mode. | def Reboot(self, target_mode=b'', timeout_ms=None):
return self._SimpleCommand(
b'reboot', arg=target_mode or None, timeout_ms=timeout_ms) | 183,273 |
Push a file-like object to the device.
Args:
connection: ADB connection
datafile: File-like object for reading from
filename: Filename to push to
st_mode: stat mode for filename
mtime: modification time
progress_callback: callback method that accepts filename, bytes_written and total_bytes
Raises:
PushFailedError: Raised on push failure. | def Push(cls, connection, datafile, filename,
st_mode=DEFAULT_PUSH_MODE, mtime=0, progress_callback=None):
fileinfo = ('{},{}'.format(filename, int(st_mode))).encode('utf-8')
cnxn = FileSyncConnection(connection, b'<2I')
cnxn.Send(b'SEND', fileinfo)
if progress_callback:
total_bytes = os.fstat(datafile.fileno()).st_size if isinstance(datafile, file) else -1
progress = cls._HandleProgress(lambda current: progress_callback(filename, current, total_bytes))
next(progress)
while True:
data = datafile.read(MAX_PUSH_DATA)
if data:
cnxn.Send(b'DATA', data)
if progress_callback:
progress.send(len(data))
else:
break
if mtime == 0:
mtime = int(time.time())
# DONE doesn't send data, but it hides the last bit of data in the size
# field.
cnxn.Send(b'DONE', size=mtime)
for cmd_id, _, data in cnxn.ReadUntil((), b'OKAY', b'FAIL'):
if cmd_id == b'OKAY':
return
raise PushFailedError(data) | 183,285 |
Send/buffer FileSync packets.
Packets are buffered and only flushed when this connection is read from. All
messages have a response from the device, so this will always get flushed.
Args:
command_id: Command to send.
data: Optional data to send, must set data or size.
size: Optionally override size from len(data). | def Send(self, command_id, data=b'', size=0):
if data:
if not isinstance(data, bytes):
data = data.encode('utf8')
size = len(data)
if not self._CanAddToSendBuffer(len(data)):
self._Flush()
buf = struct.pack(b'<2I', self.id_to_wire[command_id], size) + data
self.send_buffer[self.send_idx:self.send_idx + len(buf)] = buf
self.send_idx += len(buf) | 183,287 |
Prints a directory listing.
Args:
device_path: Directory to list. | def List(device, device_path):
files = device.List(device_path)
files.sort(key=lambda x: x.filename)
maxname = max(len(f.filename) for f in files)
maxsize = max(len(str(f.size)) for f in files)
for f in files:
mode = (
('d' if stat.S_ISDIR(f.mode) else '-') +
('r' if f.mode & stat.S_IRUSR else '-') +
('w' if f.mode & stat.S_IWUSR else '-') +
('x' if f.mode & stat.S_IXUSR else '-') +
('r' if f.mode & stat.S_IRGRP else '-') +
('w' if f.mode & stat.S_IWGRP else '-') +
('x' if f.mode & stat.S_IXGRP else '-') +
('r' if f.mode & stat.S_IROTH else '-') +
('w' if f.mode & stat.S_IWOTH else '-') +
('x' if f.mode & stat.S_IXOTH else '-'))
t = time.gmtime(f.mtime)
yield '%s %*d %04d-%02d-%02d %02d:%02d:%02d %-*s\n' % (
mode, maxsize, f.size,
t.tm_year, t.tm_mon, t.tm_mday, t.tm_hour, t.tm_min, t.tm_sec,
maxname, f.filename) | 183,294 |
Runs a command on the device and prints the stdout.
Args:
command: Command to run on the target. | def Shell(device, *command):
if command:
return device.StreamingShell(' '.join(command))
else:
# Retrieve the initial terminal prompt to use as a delimiter for future reads
terminal_prompt = device.InteractiveShell()
print(terminal_prompt.decode('utf-8'))
# Accept user input in a loop and write that into the interactive shells stdin, then print output
while True:
cmd = input('> ')
if not cmd:
continue
elif cmd == 'exit':
break
else:
stdout = device.InteractiveShell(cmd, strip_cmd=True, delim=terminal_prompt, strip_delim=True)
if stdout:
if isinstance(stdout, bytes):
stdout = stdout.decode('utf-8')
print(stdout)
device.Close() | 183,295 |
Computes the expected number of false positives caused by using
u to approximate set sizes in the interval [l, u], assuming uniform
distribution of set sizes within the interval.
Args:
l: the lower bound on set sizes.
u: the upper bound on set sizes.
cum_counts: the complete cummulative distribution of set sizes.
sizes: the complete domain of set sizes.
Return (float): the expected number of false positives. | def _compute_nfp_uniform(l, u, cum_counts, sizes):
if l > u:
raise ValueError("l must be less or equal to u")
if l == 0:
n = cum_counts[u]
else:
n = cum_counts[u]-cum_counts[l-1]
return n * float(sizes[u] - sizes[l]) / float(2*sizes[u]) | 183,565 |
Computes the matrix of expected false positives for all possible
sub-intervals of the complete domain of set sizes, assuming uniform
distribution of set_sizes within each sub-intervals.
Args:
cum_counts: the complete cummulative distribution of set sizes.
sizes: the complete domain of set sizes.
Return (np.array): the 2-D array of expected number of false positives
for every pair of [l, u] interval, where l is axis-0 and u is
axis-1. | def _compute_nfps_uniform(cum_counts, sizes):
nfps = np.zeros((len(sizes), len(sizes)))
# All u an l are inclusive bounds for intervals.
# Compute p = 1, the NFPs
for l in range(len(sizes)):
for u in range(l, len(sizes)):
nfps[l, u] = _compute_nfp_uniform(l, u, cum_counts, sizes)
return nfps | 183,566 |
Computes the expected number of false positives caused by using
u to approximate set sizes in the interval [l, u], using the real
set size distribution.
Args:
l: the lower bound on set sizes.
u: the upper bound on set sizes.
counts: the complete distribution of set sizes.
sizes: the complete domain of set sizes.
Return (float): the expected number of false positives. | def _compute_nfp_real(l, u, counts, sizes):
if l > u:
raise ValueError("l must be less or equal to u")
return np.sum((float(sizes[u])-sizes[l:u+1])/float(sizes[u])*counts[l:u+1]) | 183,567 |
Computes the matrix of expected false positives for all possible
sub-intervals of the complete domain of set sizes.
Args:
counts: the complete distribution of set sizes.
sizes: the complete domain of set sizes.
Return (np.array): the 2-D array of expected number of false positives
for every pair of [l, u] interval, where l is axis-0 and u is
axis-1. | def _compute_nfps_real(counts, sizes):
nfps = np.zeros((len(sizes), len(sizes)))
# All u an l are inclusive bounds for intervals.
# Compute p = 1, the NFPs
for l in range(len(sizes)):
for u in range(l, len(sizes)):
nfps[l, u] = _compute_nfp_real(l, u, counts, sizes)
return nfps | 183,568 |
Initialize the slots of the LeanMinHash.
Args:
seed (int): The random seed controls the set of random
permutation functions generated for this LeanMinHash.
hashvalues: The hash values is the internal state of the LeanMinHash. | def _initialize_slots(self, seed, hashvalues):
self.seed = seed
self.hashvalues = self._parse_hashvalues(hashvalues) | 183,605 |
Compute the byte size after serialization.
Args:
byteorder (str, optional): This is byte order of the serialized data. Use one
of the `byte order characters
<https://docs.python.org/3/library/struct.html#byte-order-size-and-alignment>`_:
``@``, ``=``, ``<``, ``>``, and ``!``.
Default is ``@`` -- the native order.
Returns:
int: Size in number of bytes after serialization. | def bytesize(self, byteorder='@'):
# Use 8 bytes to store the seed integer
seed_size = struct.calcsize(byteorder+'q')
# Use 4 bytes to store the number of hash values
length_size = struct.calcsize(byteorder+'i')
# Use 4 bytes to store each hash value as we are using the lower 32 bit
hashvalue_size = struct.calcsize(byteorder+'I')
return seed_size + length_size + len(self) * hashvalue_size | 183,607 |
Estimate the `Jaccard similarity`_ (resemblance) between the sets
represented by this MinHash and the other.
Args:
other (datasketch.MinHash): The other MinHash.
Returns:
float: The Jaccard similarity, which is between 0.0 and 1.0. | def jaccard(self, other):
if other.seed != self.seed:
raise ValueError("Cannot compute Jaccard given MinHash with\
different seeds")
if len(self) != len(other):
raise ValueError("Cannot compute Jaccard given MinHash with\
different numbers of permutation functions")
return np.float(np.count_nonzero(self.hashvalues==other.hashvalues)) /\
np.float(len(self)) | 183,616 |
Merge the other MinHash with this one, making this one the union
of both.
Args:
other (datasketch.MinHash): The other MinHash. | def merge(self, other):
if other.seed != self.seed:
raise ValueError("Cannot merge MinHash with\
different seeds")
if len(self) != len(other):
raise ValueError("Cannot merge MinHash with\
different numbers of permutation functions")
self.hashvalues = np.minimum(other.hashvalues, self.hashvalues) | 183,618 |
Create a MinHash which is the union of the MinHash objects passed as arguments.
Args:
*mhs: The MinHash objects to be united. The argument list length is variable,
but must be at least 2.
Returns:
datasketch.MinHash: A new union MinHash. | def union(cls, *mhs):
if len(mhs) < 2:
raise ValueError("Cannot union less than 2 MinHash")
num_perm = len(mhs[0])
seed = mhs[0].seed
if any((seed != m.seed or num_perm != len(m)) for m in mhs):
raise ValueError("The unioning MinHash must have the\
same seed and number of permutation functions")
hashvalues = np.minimum.reduce([m.hashvalues for m in mhs])
permutations = mhs[0].permutations
return cls(num_perm=num_perm, seed=seed, hashvalues=hashvalues,
permutations=permutations) | 183,621 |
Index all sets given their keys, MinHashes, and sizes.
It can be called only once after the index is created.
Args:
entries (`iterable` of `tuple`): An iterable of tuples, each must be
in the form of `(key, minhash, size)`, where `key` is the unique
identifier of a set, `minhash` is the MinHash of the set,
and `size` is the size or number of unique items in the set.
Note:
`size` must be positive. | def index(self, entries):
if not self.is_empty():
raise ValueError("Cannot call index again on a non-empty index")
if not isinstance(entries, list):
queue = deque([])
for key, minhash, size in entries:
if size <= 0:
raise ValueError("Set size must be positive")
queue.append((key, minhash, size))
entries = list(queue)
if len(entries) == 0:
raise ValueError("entries is empty")
# Create optimal partitions.
sizes, counts = np.array(sorted(
Counter(e[2] for e in entries).most_common())).T
partitions = optimal_partitions(sizes, counts, len(self.indexes))
for i, (lower, upper) in enumerate(partitions):
self.lowers[i], self.uppers[i] = lower, upper
# Insert into partitions.
entries.sort(key=lambda e : e[2])
curr_part = 0
for key, minhash, size in entries:
if size > self.uppers[curr_part]:
curr_part += 1
for r in self.indexes[curr_part]:
self.indexes[curr_part][r].insert(key, minhash) | 183,627 |
Giving the MinHash and size of the query set, retrieve
keys that references sets with containment with respect to
the query set greater than the threshold.
Args:
minhash (datasketch.MinHash): The MinHash of the query set.
size (int): The size (number of unique items) of the query set.
Returns:
`iterator` of keys. | def query(self, minhash, size):
for i, index in enumerate(self.indexes):
u = self.uppers[i]
if u is None:
continue
b, r = self._get_optimal_param(u, size)
for key in index[r]._query_b(minhash, b):
yield key | 183,628 |
Estimate the `weighted Jaccard similarity`_ between the
multi-sets represented by this weighted MinHash and the other.
Args:
other (datasketch.WeightedMinHash): The other weighted MinHash.
Returns:
float: The weighted Jaccard similarity between 0.0 and 1.0.
.. _`weighted Jaccard similarity`: http://mathoverflow.net/questions/123339/weighted-jaccard-similarity | def jaccard(self, other):
if other.seed != self.seed:
raise ValueError("Cannot compute Jaccard given WeightedMinHash objects with\
different seeds")
if len(self) != len(other):
raise ValueError("Cannot compute Jaccard given WeightedMinHash objects with\
different numbers of hash values")
# Check how many pairs of (k, t) hashvalues are equal
intersection = 0
for this, that in zip(self.hashvalues, other.hashvalues):
if np.array_equal(this, that):
intersection += 1
return float(intersection) / float(len(self)) | 183,632 |
Create a new weighted MinHash given a weighted Jaccard vector.
Each dimension is an integer
frequency of the corresponding element in the multi-set represented
by the vector.
Args:
v (numpy.array): The Jaccard vector. | def minhash(self, v):
if not isinstance(v, collections.Iterable):
raise TypeError("Input vector must be an iterable")
if not len(v) == self.dim:
raise ValueError("Input dimension mismatch, expecting %d" % self.dim)
if not isinstance(v, np.ndarray):
v = np.array(v, dtype=np.float32)
elif v.dtype != np.float32:
v = v.astype(np.float32)
hashvalues = np.zeros((self.sample_size, 2), dtype=np.int)
vzeros = (v == 0)
if vzeros.all():
raise ValueError("Input is all zeros")
v[vzeros] = np.nan
vlog = np.log(v)
for i in range(self.sample_size):
t = np.floor((vlog / self.rs[i]) + self.betas[i])
ln_y = (t - self.betas[i]) * self.rs[i]
ln_a = self.ln_cs[i] - ln_y - self.rs[i]
k = np.nanargmin(ln_a)
hashvalues[i][0], hashvalues[i][1] = k, int(t[k])
return WeightedMinHash(self.seed, hashvalues) | 183,634 |
Remove the key from the index.
Args:
key (hashable): The unique identifier of a set. | def remove(self, key):
if self.prepickle:
key = pickle.dumps(key)
if key not in self.keys:
raise ValueError("The given key does not exist")
for H, hashtable in zip(self.keys[key], self.hashtables):
hashtable.remove_val(H, key)
if not hashtable.get(H):
hashtable.remove(H)
self.keys.remove(key) | 183,648 |
Returns the bucket allocation counts (see :func:`~datasketch.MinHashLSH.get_counts` above)
restricted to the list of keys given.
Args:
keys (hashable) : the keys for which to get the bucket allocation
counts | def get_subset_counts(self, *keys):
if self.prepickle:
key_set = [pickle.dumps(key) for key in set(keys)]
else:
key_set = list(set(keys))
hashtables = [unordered_storage({'type': 'dict'}) for _ in
range(self.b)]
Hss = self.keys.getmany(*key_set)
for key, Hs in zip(key_set, Hss):
for H, hashtable in zip(Hs, hashtables):
hashtable.insert(H, key)
return [hashtable.itemcounts() for hashtable in hashtables] | 183,650 |
Merge the other HyperLogLog with this one, making this the union of the
two.
Args:
other (datasketch.HyperLogLog): | def merge(self, other):
if self.m != other.m or self.p != other.p:
raise ValueError("Cannot merge HyperLogLog with different\
precisions.")
self.reg = np.maximum(self.reg, other.reg) | 183,667 |
Check equivalence between two HyperLogLogs
Args:
other (datasketch.HyperLogLog):
Returns:
bool: True if both have the same internal state. | def __eq__(self, other):
return type(self) is type(other) and \
self.p == other.p and \
self.m == other.m and \
np.array_equal(self.reg, other.reg) | 183,669 |
Add a unique key, together
with a MinHash (or weighted MinHash) of the set referenced by the key.
Note:
The key won't be searchbale until the
:func:`datasketch.MinHashLSHForest.index` method is called.
Args:
key (hashable): The unique identifier of the set.
minhash (datasketch.MinHash): The MinHash of the set. | def add(self, key, minhash):
if len(minhash) < self.k*self.l:
raise ValueError("The num_perm of MinHash out of range")
if key in self.keys:
raise ValueError("The given key has already been added")
self.keys[key] = [self._H(minhash.hashvalues[start:end])
for start, end in self.hashranges]
for H, hashtable in zip(self.keys[key], self.hashtables):
hashtable[H].append(key) | 183,687 |
Return the approximate top-k keys that have the highest
Jaccard similarities to the query set.
Args:
minhash (datasketch.MinHash): The MinHash of the query set.
k (int): The maximum number of keys to return.
Returns:
`list` of at most k keys. | def query(self, minhash, k):
if k <= 0:
raise ValueError("k must be positive")
if len(minhash) < self.k*self.l:
raise ValueError("The num_perm of MinHash out of range")
results = set()
r = self.k
while r > 0:
for key in self._query(minhash, r, self.l):
results.add(key)
if len(results) >= k:
return list(results)
r -= 1
return list(results) | 183,690 |
Find matching expectations within _expectation_config.
Args:
expectation_type=None : The name of the expectation type to be matched.
column=None : The name of the column to be matched.
expectation_kwargs=None : A dictionary of kwargs to match against.
Returns:
A list of indexes for matching expectation objects.
If there are no matches, the list will be empty. | def find_expectation_indexes(self,
expectation_type=None,
column=None,
expectation_kwargs=None
):
if expectation_kwargs == None:
expectation_kwargs = {}
if "column" in expectation_kwargs and column != None and column != expectation_kwargs["column"]:
raise ValueError("Conflicting column names in remove_expectation: %s and %s" % (
column, expectation_kwargs["column"]))
if column != None:
expectation_kwargs["column"] = column
match_indexes = []
for i, exp in enumerate(self._expectations_config.expectations):
if expectation_type == None or (expectation_type == exp['expectation_type']):
# if column == None or ('column' not in exp['kwargs']) or (exp['kwargs']['column'] == column) or (exp['kwargs']['column']==:
match = True
for k, v in expectation_kwargs.items():
if k in exp['kwargs'] and exp['kwargs'][k] == v:
continue
else:
match = False
if match:
match_indexes.append(i)
return match_indexes | 183,849 |
Get an evaluation parameter value that has been stored in meta.
Args:
parameter_name (string): The name of the parameter to store.
default_value (any): The default value to be returned if the parameter is not found.
Returns:
The current value of the evaluation parameter. | def get_evaluation_parameter(self, parameter_name, default_value=None):
if "evaluation_parameters" in self._expectations_config and \
parameter_name in self._expectations_config['evaluation_parameters']:
return self._expectations_config['evaluation_parameters'][parameter_name]
else:
return default_value | 183,856 |
Provide a value to be stored in the data_asset evaluation_parameters object and used to evaluate
parameterized expectations.
Args:
parameter_name (string): The name of the kwarg to be replaced at evaluation time
parameter_value (any): The value to be used | def set_evaluation_parameter(self, parameter_name, parameter_value):
if 'evaluation_parameters' not in self._expectations_config:
self._expectations_config['evaluation_parameters'] = {}
self._expectations_config['evaluation_parameters'].update(
{parameter_name: parameter_value}) | 183,857 |
Convenience method for creating weights from categorical data.
Args:
data (list-like): The data from which to construct the estimate.
Returns:
A new partition object::
{
"partition": (list) The categorical values present in the data
"weights": (list) The weights of the values in the partition.
} | def categorical_partition_data(data):
# Make dropna explicit (even though it defaults to true)
series = pd.Series(data)
value_counts = series.value_counts(dropna=True)
# Compute weights using denominator only of nonnull values
null_indexes = series.isnull()
nonnull_count = (null_indexes == False).sum()
weights = value_counts.values / nonnull_count
return {
"values": value_counts.index.tolist(),
"weights": weights
} | 183,867 |
This function will take a dataset and add expectations that each column present exists.
Args:
inspect_dataset (great_expectations.dataset): The dataset to inspect and to which to add expectations. | def columns_exist(inspect_dataset):
# Attempt to get column names. For pandas, columns is just a list of strings
if not hasattr(inspect_dataset, "columns"):
warnings.warn(
"No columns list found in dataset; no autoinspection performed.")
return
elif isinstance(inspect_dataset.columns[0], string_types):
columns = inspect_dataset.columns
elif isinstance(inspect_dataset.columns[0], dict) and "name" in inspect_dataset.columns[0]:
columns = [col['name'] for col in inspect_dataset.columns]
else:
raise AutoInspectError(
"Unable to determine column names for this dataset.")
create_multiple_expectations(
inspect_dataset, columns, "expect_column_to_exist") | 183,877 |
Helper function to convert a dict object to one that is serializable
Args:
test_obj: an object to attempt to convert a corresponding json-serializable object
Returns:
(dict) A converted test_object
Warning:
test_obj may also be converted in place. | def recursively_convert_to_json_serializable(test_obj):
# Validate that all aruguments are of approved types, coerce if it's easy, else exception
# print(type(test_obj), test_obj)
# Note: Not 100% sure I've resolved this correctly...
try:
if not isinstance(test_obj, list) and np.isnan(test_obj):
# np.isnan is functionally vectorized, but we only want to apply this to single objects
# Hence, why we test for `not isinstance(list))`
return None
except TypeError:
pass
except ValueError:
pass
if isinstance(test_obj, (string_types, integer_types, float, bool)):
# No problem to encode json
return test_obj
elif isinstance(test_obj, dict):
new_dict = {}
for key in test_obj:
# A pandas index can be numeric, and a dict key can be numeric, but a json key must be a string
new_dict[str(key)] = recursively_convert_to_json_serializable(
test_obj[key])
return new_dict
elif isinstance(test_obj, (list, tuple, set)):
new_list = []
for val in test_obj:
new_list.append(recursively_convert_to_json_serializable(val))
return new_list
elif isinstance(test_obj, (np.ndarray, pd.Index)):
#test_obj[key] = test_obj[key].tolist()
# If we have an array or index, convert it first to a list--causing coercion to float--and then round
# to the number of digits for which the string representation will equal the float representation
return [recursively_convert_to_json_serializable(x) for x in test_obj.tolist()]
# Note: This clause has to come after checking for np.ndarray or we get:
# `ValueError: The truth value of an array with more than one element is ambiguous. Use a.any() or a.all()`
elif test_obj is None:
# No problem to encode json
return test_obj
elif isinstance(test_obj, (datetime.datetime, datetime.date)):
return str(test_obj)
# Use built in base type from numpy, https://docs.scipy.org/doc/numpy-1.13.0/user/basics.types.html
# https://github.com/numpy/numpy/pull/9505
elif np.issubdtype(type(test_obj), np.bool_):
return bool(test_obj)
elif np.issubdtype(type(test_obj), np.integer) or np.issubdtype(type(test_obj), np.uint):
return int(test_obj)
elif np.issubdtype(type(test_obj), np.floating):
# Note: Use np.floating to avoid FutureWarning from numpy
return float(round(test_obj, sys.float_info.dig))
elif isinstance(test_obj, pd.DataFrame):
return recursively_convert_to_json_serializable(test_obj.to_dict(orient='records'))
# elif np.issubdtype(type(test_obj), np.complexfloating):
# Note: Use np.complexfloating to avoid Future Warning from numpy
# Complex numbers consist of two floating point numbers
# return complex(
# float(round(test_obj.real, sys.float_info.dig)),
# float(round(test_obj.imag, sys.float_info.dig)))
elif isinstance(test_obj, decimal.Decimal):
return float(test_obj)
else:
raise TypeError('%s is of type %s which cannot be serialized.' % (
str(test_obj), type(test_obj).__name__)) | 183,902 |
Read a file using Pandas read_excel and return a great_expectations dataset.
Args:
filename (string): path to file to read
dataset_class (Dataset class): class to which to convert resulting Pandas df
expectations_config (string): path to great_expectations config file
Returns:
great_expectations dataset or ordered dict of great_expectations datasets,
if multiple worksheets are imported | def read_excel(
filename,
dataset_class=dataset.pandas_dataset.PandasDataset,
expectations_config=None,
autoinspect_func=None,
*args, **kwargs
):
df = pd.read_excel(filename, *args, **kwargs)
if isinstance(df, dict):
for key in df:
df[key] = _convert_to_dataset_class(
df[key], dataset_class, expectations_config, autoinspect_func)
else:
df = _convert_to_dataset_class(
df, dataset_class, expectations_config, autoinspect_func)
return df | 183,913 |
Context manager for creating a unix-domain socket and listen for
ffmpeg progress events.
The socket filename is yielded from the context manager and the
socket is closed when the context manager is exited.
Args:
handler: a function to be called when progress events are
received; receives a ``key`` argument and ``value``
argument. (The example ``show_progress`` below uses tqdm)
Yields:
socket_filename: the name of the socket file. | def _watch_progress(handler):
with _tmpdir_scope() as tmpdir:
socket_filename = os.path.join(tmpdir, 'sock')
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
with contextlib.closing(sock):
sock.bind(socket_filename)
sock.listen(1)
child = gevent.spawn(_do_watch_progress, socket_filename, sock, handler)
try:
yield socket_filename
except:
gevent.kill(child)
raise | 187,664 |
Change the PTS (presentation timestamp) of the input frames.
Args:
expr: The expression which is evaluated for each frame to construct its timestamp.
Official documentation: `setpts, asetpts <https://ffmpeg.org/ffmpeg-filters.html#setpts_002c-asetpts>`__ | def setpts(stream, expr):
return FilterNode(stream, setpts.__name__, args=[expr]).stream() | 187,673 |
Crop the input video.
Args:
x: The horizontal position, in the input video, of the left edge of
the output video.
y: The vertical position, in the input video, of the top edge of the
output video.
width: The width of the output video. Must be greater than 0.
heigth: The height of the output video. Must be greater than 0.
Official documentation: `crop <https://ffmpeg.org/ffmpeg-filters.html#crop>`__ | def crop(stream, x, y, width, height, **kwargs):
return FilterNode(
stream,
crop.__name__,
args=[width, height, x, y],
kwargs=kwargs
).stream() | 187,676 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.