docstring
stringlengths
52
499
function
stringlengths
67
35.2k
__index_level_0__
int64
52.6k
1.16M
Returns the value at an address if it was an input to the txn but never set. It returns None if that address was never set in the merkle database, or if the address is not within the context. Args: addresses (list of str): The full 70 character addresses. Returns: (list): bytes at that address but not set within the context
def get_if_not_set(self, addresses): with self._lock: results = [] for add in addresses: results.append(self._get_if_not_set(add)) return results
163,861
Create futures needed before starting the process of reading the address's value from the merkle tree. Args: addresses (list of str): addresses in the txn's inputs that aren't in any base context (or any in the chain).
def create_prefetch(self, addresses): with self._lock: for add in addresses: self._state[add] = _ContextFuture(address=add, wait_for_tree=True)
163,864
Create futures from inputs with the current value for that address at the start of that context. Args: address_values (list of tuple): The tuple is string, bytes of the address and value.
def create_initial(self, address_values): with self._lock: for add, val in address_values: self._state[add] = _ContextFuture(address=add, result=val)
163,865
Set the result for each future at the given addresses with the value stored in the merkle database. Args: address_value_dict (dict of str: bytes): The unique full addresses that the bytes values should be set with.
def set_from_tree(self, address_value_dict): for address, value in address_value_dict.items(): if address in self._state: self._state[address].set_result(result=value, from_tree=True)
163,866
Called in the context manager's delete method to either mark an entry for deletion , or create a new future and immediately set it for deletion in the future. Args: address_list (list of str): The unique full addresses. Raises: AuthorizationException
def delete_direct(self, addresses): with self._lock: for address in addresses: self._validate_write(address) if address in self._state: self._state[address].set_deleted() else: fut = _ContextFuture(address=address) self._state[address] = fut fut.set_deleted()
163,867
Called in the context manager's set method to either overwrite the value for an address, or create a new future and immediately set a value in the future. Args: address_value_dict (dict of str:bytes): The unique full addresses with bytes to set at that address. Raises: AuthorizationException
def set_direct(self, address_value_dict): with self._lock: for address, value in address_value_dict.items(): self._validate_write(address) if address in self._state: self._state[address].set_result(result=value) else: fut = _ContextFuture(address=address) self._state[address] = fut fut.set_result(result=value)
163,868
Raises an exception if the address is not allowed to be read in this context, based on txn inputs. Args: address (str): An address to be validated. Returns: None Raises: AuthorizationException
def validate_read(self, address): if not any(address.startswith(ns) for ns in self._read_list): raise AuthorizationException(address=address)
163,870
Set the addresses's value unless the future has been declared read only. Args: result (bytes): The value at an address. from_tree (bool): Whether the value is being set by a read from the merkle tree. Returns: None
def set_result(self, result, from_tree=False): if self._read_only: if not from_tree: LOGGER.warning("Tried to set address %s on a" " read-only context.", self.address) return with self._condition: if self._read_only: if not from_tree: LOGGER.warning("Tried to set address %s on a" " read-only context.", self.address) return if from_tree: # If the result has not been set in the context, overwrite the # value with the value from the merkle tree. Otherwise, do # nothing. if not self._result_set_in_context: self._result = result self._tree_has_set = True else: self._result = result self._result_set_in_context = True self._deleted = False self._condition.notify_all()
163,874
Runs the transaction list or show command, printing to the console Args: args: The parsed arguments sent to the command at runtime
def do_transaction(args): rest_client = RestClient(args.url, args.user) if args.subcommand == 'list': transactions = rest_client.list_transactions() keys = ('transaction_id', 'family', 'version', 'size', 'payload') headers = tuple(k.upper() if k != 'version' else 'VERS' for k in keys) def parse_txn_row(transaction, decode=True): decoded = b64decode(transaction['payload']) return ( transaction['header_signature'], transaction['header']['family_name'], transaction['header']['family_version'], len(decoded), str(decoded) if decode else transaction['payload']) if args.format == 'default': fmt.print_terminal_table(headers, transactions, parse_txn_row) elif args.format == 'csv': fmt.print_csv(headers, transactions, parse_txn_row) elif args.format == 'json' or args.format == 'yaml': data = [{k: d for k, d in zip(keys, parse_txn_row(b, False))} for b in transactions] if args.format == 'yaml': fmt.print_yaml(data) elif args.format == 'json': fmt.print_json(data) else: raise AssertionError('Missing handler: {}'.format(args.format)) else: raise AssertionError('Missing handler: {}'.format(args.format)) if args.subcommand == 'show': output = rest_client.get_transaction(args.transaction_id) if args.key: if args.key == 'payload': output = b64decode(output['payload']) elif args.key in output: output = output[args.key] elif args.key in output['header']: output = output['header'][args.key] else: raise CliException( 'Key "{}" not found in transaction or header'.format( args.key)) if args.format == 'yaml': fmt.print_yaml(output) elif args.format == 'json': fmt.print_json(output) else: raise AssertionError('Missing handler: {}'.format(args.format))
163,893
Returns the TransactionReceipt Args: txn_id (str): the id of the transaction for which the receipt should be retrieved. Returns: TransactionReceipt: The receipt for the given transaction id. Raises: KeyError: if the transaction id is unknown.
def get(self, txn_id): if txn_id not in self._receipt_db: raise KeyError('Unknown transaction id {}'.format(txn_id)) txn_receipt_bytes = self._receipt_db[txn_id] txn_receipt = TransactionReceipt() txn_receipt.ParseFromString(txn_receipt_bytes) return txn_receipt
163,896
Sends a list of batches to the validator. Args: batch_list (:obj:`BatchList`): the list of batches Returns: dict: the json result data, as a dict
def send_batches(self, batch_list): if isinstance(batch_list, BaseMessage): batch_list = batch_list.SerializeToString() return self._post('/batches', batch_list)
163,903
Adds arguments parsers for the block list and block show commands Args: subparsers: Add parsers to this subparser object parent_parser: The parent argparse.ArgumentParser object
def add_block_parser(subparsers, parent_parser): parser = subparsers.add_parser( 'block', description='Provides subcommands to display information about the ' 'blocks in the current blockchain.', help='Displays information on blocks in the current blockchain') grand_parsers = parser.add_subparsers( title='subcommands', dest='subcommand') grand_parsers.required = True description = ( 'Displays information for all blocks on the current ' 'blockchain, including the block id and number, public keys all ' 'of allsigners, and number of transactions and batches.') list_parser = grand_parsers.add_parser( 'list', help='Displays information for all blocks on the current blockchain', description=description, parents=[base_http_parser(), base_list_parser()], formatter_class=argparse.RawDescriptionHelpFormatter) list_parser.add_argument( '-n', '--count', default=100, type=int, help='the number of blocks to list', ) description = ( 'Displays information about the specified block on ' 'the current blockchain') show_parser = grand_parsers.add_parser( 'show', help=description, description=description + '.', parents=[base_http_parser(), base_show_parser()], formatter_class=argparse.RawDescriptionHelpFormatter) show_parser.add_argument( 'block_id', type=str, help='id (header_signature) of the block')
163,926
Runs the block list or block show command, printing output to the console Args: args: The parsed arguments sent to the command at runtime
def do_block(args): rest_client = RestClient(args.url, args.user) if args.subcommand == 'list': block_generator = rest_client.list_blocks() blocks = [] left = args.count for block in block_generator: blocks.append(block) left -= 1 if left <= 0: break keys = ('num', 'block_id', 'batches', 'txns', 'signer') headers = tuple(k.upper() if k != 'batches' else 'BATS' for k in keys) def parse_block_row(block): batches = block.get('batches', []) txns = [t for b in batches for t in b['transactions']] return ( block['header'].get('block_num', 0), block['header_signature'], len(batches), len(txns), block['header']['signer_public_key']) if args.format == 'default': fmt.print_terminal_table(headers, blocks, parse_block_row) elif args.format == 'csv': fmt.print_csv(headers, blocks, parse_block_row) elif args.format == 'json' or args.format == 'yaml': data = [{k: d for k, d in zip(keys, parse_block_row(b))} for b in blocks] if args.format == 'yaml': fmt.print_yaml(data) elif args.format == 'json': fmt.print_json(data) else: raise AssertionError('Missing handler: {}'.format(args.format)) else: raise AssertionError('Missing handler: {}'.format(args.format)) if args.subcommand == 'show': output = rest_client.get_block(args.block_id) if args.key: if args.key in output: output = output[args.key] elif args.key in output['header']: output = output['header'][args.key] else: raise CliException( 'key "{}" not found in block or header'.format(args.key)) if args.format == 'yaml': fmt.print_yaml(output) elif args.format == 'json': fmt.print_json(output) else: raise AssertionError('Missing handler: {}'.format(args.format))
163,927
Register a callback for a specific connection state change. Register a callback to be triggered when the connection changes to the specified state, signified by a ConnectionEvent. The callback must be a coroutine. Args: event_type (ConnectionEvent): the connection event to listen for callback (coroutine): a coroutine to call on the event occurrence
def on_connection_state_change(self, event_type, callback): listeners = self._connection_state_listeners.get(event_type, []) listeners.append(callback) self._connection_state_listeners[event_type] = listeners
163,942
Constructor for the LMDBNoLockDatabase class. Args: filename (str): The filename of the database file. flag (str): a flag indicating the mode for opening the database. Refer to the documentation for anydbm.open().
def __init__(self, filename, flag): super(LMDBNoLockDatabase, self).__init__() create = bool(flag == 'c') if flag == 'n': if os.path.isfile(filename): os.remove(filename) create = True self._lmdb = lmdb.Environment( path=filename, map_size=1024**4, map_async=True, writemap=True, readahead=False, subdir=False, create=create, lock=True)
163,950
Removes a key:value from the database Args: key (str): The key to remove.
def delete(self, key): with self._lmdb.begin(write=True, buffers=True) as txn: txn.delete(key.encode())
163,955
Checks if a status enum matches the trigger originally set, and if so, raises the appropriate error. Args: status (int, enum): A protobuf enum response status to check. Raises: AssertionError: If trigger or error were not set. _ApiError: If the statuses don't match. Do not catch. Will be caught automatically and sent back to the client.
def check(cls, status): assert cls.trigger is not None, 'Invalid ErrorTrap, trigger not set' assert cls.error is not None, 'Invalid ErrorTrap, error not set' if status == cls.trigger: # pylint: disable=not-callable # cls.error will be callable at runtime raise cls.error()
163,989
Does the tree contain an address. Args: item (str): An address. Returns: (bool): True if it does contain, False otherwise.
def __contains__(self, item): try: _libexec('merkle_db_contains', self.pointer, item.encode()) # No error implies found return True except KeyError: return False
163,994
Do initialization necessary for the consensus to claim a block, this may include initiating voting activates, starting proof of work hash generation, or create a PoET wait timer. Args: block_header (BlockHeader): the BlockHeader to initialize. Returns: True
def initialize_block(self, block_header): # Using the current chain head, we need to create a state view so we # can get our config values. state_view = \ BlockWrapper.state_view_for_block( self._block_cache.block_store.chain_head, self._state_view_factory) settings_view = SettingsView(state_view) self._min_wait_time = settings_view.get_setting( "sawtooth.consensus.min_wait_time", self._min_wait_time, int) self._max_wait_time = settings_view.get_setting( "sawtooth.consensus.max_wait_time", self._max_wait_time, int) self._valid_block_publishers = settings_view.get_setting( "sawtooth.consensus.valid_block_publishers", self._valid_block_publishers, list) block_header.consensus = b"Devmode" self._start_time = time.time() self._wait_time = random.uniform( self._min_wait_time, self._max_wait_time) return True
164,007
The longest chain is selected. If they are equal, then the hash value of the previous block id and publisher signature is computed. The lowest result value is the winning block. Args: cur_fork_head: The current head of the block chain. new_fork_head: The head of the fork that is being evaluated. Returns: bool: True if choosing the new chain head, False if choosing the current chain head.
def compare_forks(self, cur_fork_head, new_fork_head): # If the new fork head is not DevMode consensus, bail out. This should # never happen, but we need to protect against it. if new_fork_head.consensus != b"Devmode": raise \ TypeError( 'New fork head {} is not a DevMode block'.format( new_fork_head.identifier[:8])) # If the current fork head is not DevMode consensus, check the new fork # head to see if its immediate predecessor is the current fork head. If # so that means that consensus mode is changing. If not, we are again # in a situation that should never happen, but we need to guard # against. if cur_fork_head.consensus != b"Devmode": if new_fork_head.previous_block_id == cur_fork_head.identifier: LOGGER.info( 'Choose new fork %s: New fork head switches consensus to ' 'DevMode', new_fork_head.identifier[:8]) return True raise \ TypeError( 'Trying to compare a DevMode block {} to a non-DevMode ' 'block {} that is not the direct predecessor'.format( new_fork_head.identifier[:8], cur_fork_head.identifier[:8])) if new_fork_head.block_num == cur_fork_head.block_num: cur_fork_hash = self.hash_signer_public_key( cur_fork_head.header.signer_public_key, cur_fork_head.header.previous_block_id) new_fork_hash = self.hash_signer_public_key( new_fork_head.header.signer_public_key, new_fork_head.header.previous_block_id) result = new_fork_hash < cur_fork_hash else: result = new_fork_head.block_num > cur_fork_head.block_num return result
164,010
Check the public key of a node on the network to see if they are permitted to participate. The roles being checked are the following, from first to last: "network" "default" The first role that is set will be the one used to enforce if the node is allowed. Args: public_key (string): The public key belonging to a node on the network
def check_network_role(self, public_key): state_root = self._current_root_func() if state_root == INIT_ROOT_KEY: LOGGER.debug("Chain head is not set yet. Permit all.") return True self._cache.update_view(state_root) role = self._cache.get_role("network", state_root) if role is None: policy_name = "default" else: policy_name = role.policy_name policy = self._cache.get_policy(policy_name, state_root) if policy is not None: if not self._allowed(public_key, policy): LOGGER.debug("Node is not permitted: %s.", public_key) return False return True
164,016
Used to retrieve an identity role. Args: item (string): the name of the role to be fetched state_root(string): The state root of the previous block. from_state (bool): Whether the identity value should be read directly from state, instead of using the cached values. This should be used when the state_root passed is not from the current chain head.
def get_role(self, item, state_root, from_state=False): if from_state: # if from state use identity_view and do not add to cache if self._identity_view is None: self.update_view(state_root) value = self._identity_view.get_role(item) return value value = self._cache.get(item) if value is None: if self._identity_view is None: self.update_view(state_root) value = self._identity_view.get_role(item) self._cache[item] = value return value
164,021
Takes a statuses dict and formats it for transmission with Protobuf and ZMQ. Args: statuses (dict of int): Dict with batch ids as the key, status as value batch_ids (list of str): The batch ids in their original order tracker (BatchTracker): A batch tracker with access to invalid info
def _format_batch_statuses(statuses, batch_ids, tracker): proto_statuses = [] for batch_id in batch_ids: if statuses[batch_id] == \ client_batch_submit_pb2.ClientBatchStatus.INVALID: invalid_txns = tracker.get_invalid_txn_info(batch_id) for txn_info in invalid_txns: try: txn_info['transaction_id'] = txn_info.pop('id') except KeyError as e: LOGGER.debug(e) else: invalid_txns = None proto_statuses.append( client_batch_submit_pb2.ClientBatchStatus( batch_id=batch_id, status=statuses[batch_id], invalid_transactions=invalid_txns)) return proto_statuses
164,022
Handles parsing incoming requests, and wrapping the final response. Args: connection_id (str): ZMQ identity sent over ZMQ socket message_content (bytes): Byte encoded request protobuf to be parsed Returns: HandlerResult: result to be sent in response back to client
def handle(self, connection_id, message_content): try: request = self._request_proto() request.ParseFromString(message_content) except DecodeError: LOGGER.info('Protobuf %s failed to deserialize', request) return self._wrap_result(self._status.INTERNAL_ERROR) try: response = self._respond(request) except _ResponseFailed as e: response = e.status return self._wrap_result(response)
164,025
Wraps child's response in a HandlerResult to be sent back to client. Args: response (enum or dict): Either an integer status enum, or a dict of attributes to be added to the protobuf response.
def _wrap_result(self, response): if isinstance(response, int): response = self._wrap_response(response) return HandlerResult( status=HandlerStatus.RETURN, message_out=self._response_proto(**response), message_type=self._response_type)
164,026
Convenience method to wrap a status with any key word args. Args: status (enum): enum response status, defaults to OK Returns: dict: inlcudes a 'status' attribute and any key word arguments
def _wrap_response(self, status=None, **kwargs): kwargs['status'] = status if status is not None else self._status.OK return kwargs
164,027
Fetches the request specified head block, or the chain head. Note: This method will fail if `_block_store` has not been set Args: request (object): The parsed protobuf request object Returns: Block: the block object at the head of the requested chain Raises: ResponseFailed: Failed to retrieve a head block
def _get_head_block(self, request): if request.head_id: if self._id_regex.fullmatch(request.head_id) is None: LOGGER.debug('Invalid head id requested: %s', request.head_id) raise _ResponseFailed(self._status.NO_ROOT) try: return self._block_store[request.head_id] except KeyError as e: LOGGER.debug('Unable to find block "%s" in store', e) raise _ResponseFailed(self._status.NO_ROOT) else: return self._get_chain_head()
164,028
Sets the root of the merkle tree, returning any head id used. Note: This method will fail if `_tree` has not been set Args: request (object): The parsed protobuf request object Returns: str: the state root of the head block used to specify the root Raises: ResponseFailed: Failed to set the root if the merkle tree
def _set_root(self, request): if request.state_root: root = request.state_root else: head = self._get_chain_head() root = head.state_root_hash try: self._tree.set_merkle_root(root) except KeyError as e: LOGGER.debug('Unable to find root "%s" in database', e) raise _ResponseFailed(self._status.NO_ROOT) return root
164,030
Validates a list of ids, raising a ResponseFailed error if invalid. Args: resource_id (list of str): The ids to validate Raises: ResponseFailed: The id was invalid, and a status of INVALID_ID will be sent with the response.
def _validate_ids(self, resource_ids): for resource_id in resource_ids: if self._id_regex.fullmatch(resource_id) is None: LOGGER.debug('Invalid resource id requested: %s', resource_id) raise _ResponseFailed(self._status.INVALID_ID)
164,032
Validates a state root, raising a ResponseFailed error if invalid. Args: state_root (str): The state_root to validate Raises: ResponseFailed: The state_root was invalid, and a status of INVALID_ROOT will be sent with the response.
def _validate_state_root(self, state_root): if self._state_root_regex.fullmatch(state_root) is None: LOGGER.debug('Invalid state root: %s', state_root) raise _ResponseFailed(self._status.INVALID_ROOT)
164,033
Validates a namespace, raising a ResponseFailed error if invalid. Args: state_root (str): The state_root to validate Raises: ResponseFailed: The state_root was invalid, and a status of INVALID_ROOT will be sent with the response.
def _validate_namespace(self, namespace): if self._namespace_regex.fullmatch(namespace) is None: LOGGER.debug('Invalid namespace: %s', namespace) raise _ResponseFailed(self._status.INVALID_ADDRESS)
164,034
Truncates a list of resources based on ClientPagingControls Args: request (object): The parsed protobuf request object resources (list of objects): The resources to be paginated Returns: list: The paginated list of resources object: The ClientPagingResponse to be sent back to the client
def paginate_resources(cls, request, resources, on_fail_status): if not resources: return (resources, client_list_control_pb2.ClientPagingResponse()) paging = request.paging limit = min(paging.limit, MAX_PAGE_SIZE) or DEFAULT_PAGE_SIZE # Find the start index from the location marker sent try: if paging.start: start_index = cls.index_by_id(paging.start, resources) else: start_index = 0 if start_index < 0 or start_index >= len(resources): raise AssertionError except AssertionError: raise _ResponseFailed(on_fail_status) paged_resources = resources[start_index: start_index + limit] if start_index + limit < len(resources): paging_response = client_list_control_pb2.ClientPagingResponse( next=cls.id_by_index(start_index + limit, resources), start=cls.id_by_index(start_index, resources), limit=limit) else: paging_response = client_list_control_pb2.ClientPagingResponse( start=cls.id_by_index(start_index, resources), limit=limit) return paged_resources, paging_response
164,035
Helper method to fetch the index of a resource by its id or address Args: resources (list of objects): The resources to be paginated target_id (string): The address or header_signature of the resource Returns: integer: The index of the target resource Raises: AssertionError: Raised if the target is not found
def index_by_id(cls, target_id, resources): for index in range(len(resources)): if cls.id_by_index(index, resources) == target_id: return index raise AssertionError
164,036
Helper method to fetch the id or address of a resource by its index Args: resources (list of objects): The resources to be paginated index (integer): The index of the target resource Returns: str: The address or header_signature of the resource, returns an empty string if not found
def id_by_index(index, resources): if index < 0 or index >= len(resources): return '' try: return resources[index].header_signature except AttributeError: return resources[index].address
164,037
Sorts a list of resources based on a list of sort controls Args: request (object): The parsed protobuf request object resources (list of objects): The resources to be sorted fail_enum (int, enum): The enum status to raise with invalid keys header_proto(class): Class to decode a resources header Returns: list: The sorted list of resources
def sort_resources(cls, request, resources, fail_enum, header_proto=None): if not request.sorting: return resources value_handlers = cls._get_handler_set(request, fail_enum, header_proto) def sorter(resource_a, resource_b): for handler in value_handlers: val_a, val_b = handler.get_sort_values(resource_a, resource_b) if val_a < val_b: return handler.xform_result(-1) if val_a > val_b: return handler.xform_result(1) return 0 return sorted(resources, key=cmp_to_key(sorter))
164,038
Called by the BatchTracker the _BatchWaiter is observing. Should not be called by handlers. Args: statuses (dict of int): A dict with keys of batch ids, and values of status enums
def notify_batches_finished(self, statuses): with self._wait_condition: self._statuses = statuses self._wait_condition.notify()
164,041
Locks until a list of batch ids is committed to the block chain or a timeout is exceeded. Returns the statuses of those batches. Args: batch_ids (list of str): The ids of the batches to wait for timeout(int): Maximum time in seconds to wait for Returns: list of BatchStatus: BatchStatuses to send back to client
def wait_for_batches(self, batch_ids, timeout=None): self._batch_tracker.watch_statuses(self, batch_ids) timeout = timeout or DEFAULT_TIMEOUT start_time = time() with self._wait_condition: while True: if self._statuses is not None: return _format_batch_statuses( self._statuses, batch_ids, self._batch_tracker) if time() - start_time > timeout: statuses = self._batch_tracker.get_statuses(batch_ids) return _format_batch_statuses( statuses, batch_ids, self._batch_tracker) self._wait_condition.wait(timeout - (time() - start_time))
164,042
Verify that the directory exists and is readable and writable. Args: path (str): a directory which should exist and be writable human_readable_name (str): a human readable string for the directory which is used in logging statements Returns: bool: False if an error exists, True otherwise.
def check_directory(path, human_readable_name): if not os.path.exists(path): LOGGER.error("%s directory does not exist: %s", human_readable_name, path) return False if not os.path.isdir(path): LOGGER.error("%s directory is not a directory: %s", human_readable_name, path) return False errors = True if not os.access(path, os.R_OK): LOGGER.error("%s directory is not readable: %s", human_readable_name, path) errors = False if not os.access(path, os.W_OK): LOGGER.error("%s directory is not writable: %s", human_readable_name, path) errors = False return errors
164,072
Reads the given file as a hex key. Args: key_filename: The filename where the key is stored. If None, defaults to the default key for the current user. Returns: Signer: the signer Raises: CliException: If unable to read the file.
def _read_signer(key_filename): filename = key_filename if filename is None: filename = os.path.join(os.path.expanduser('~'), '.sawtooth', 'keys', getpass.getuser() + '.priv') try: with open(filename, 'r') as key_file: signing_key = key_file.read().strip() except IOError as e: raise CliException('Unable to read key file: {}'.format(str(e))) try: private_key = Secp256k1PrivateKey.from_hex(signing_key) except ParseError as e: raise CliException('Unable to read key in file: {}'.format(str(e))) context = create_context('secp256k1') crypto_factory = CryptoFactory(context) return crypto_factory.new_signer(private_key)
164,082
Creates a batch from a list of transactions and a public key, and signs the resulting batch with the given signing key. Args: signer (:obj:`Signer`): The cryptographic signer transactions (list of `Transaction`): The transactions to add to the batch. Returns: `Batch`: The constructed and signed batch.
def _create_batch(signer, transactions): txn_ids = [txn.header_signature for txn in transactions] batch_header = BatchHeader( signer_public_key=signer.get_public_key().as_hex(), transaction_ids=txn_ids).SerializeToString() return Batch( header=batch_header, header_signature=signer.sign(batch_header), transactions=transactions)
164,083
Create a ExecutionContext to run a transaction against. Args: state_hash: (str): Merkle root to base state on. base_contexts (list of str): Context ids of contexts that will have their state applied to make this context. inputs (list of str): Addresses that can be read from. outputs (list of str): Addresses that can be written to. Returns: context_id (str): the unique context_id of the session
def create_context(self, state_hash, base_contexts, inputs, outputs): for address in inputs: if not self.namespace_is_valid(address): raise CreateContextException( "Address or namespace {} listed in inputs is not " "valid".format(address)) for address in outputs: if not self.namespace_is_valid(address): raise CreateContextException( "Address or namespace {} listed in outputs is not " "valid".format(address)) addresses_to_find = [add for add in inputs if len(add) == 70] address_values, reads = self._find_address_values_in_chain( base_contexts=base_contexts, addresses_to_find=addresses_to_find) context = ExecutionContext( state_hash=state_hash, read_list=inputs, write_list=outputs, base_context_ids=base_contexts) contexts_asked_not_found = [cid for cid in base_contexts if cid not in self._contexts] if contexts_asked_not_found: raise KeyError( "Basing a new context off of context ids {} " "that are not in context manager".format( contexts_asked_not_found)) context.create_initial(address_values) self._contexts[context.session_id] = context if reads: context.create_prefetch(reads) self._address_queue.put_nowait( (context.session_id, state_hash, reads)) return context.session_id
164,087
Breadth first search through the chain of contexts searching for the bytes values at the addresses in addresses_to_find. Args: base_contexts (list of str): The context ids to start with. addresses_to_find (list of str): Addresses to find values in the chain of contexts. Returns: tuple of found address_values and still not found addresses
def _find_address_values_in_chain(self, base_contexts, addresses_to_find): contexts_in_chain = deque() contexts_in_chain.extend(base_contexts) reads = list(addresses_to_find) address_values = [] context_ids_already_searched = [] context_ids_already_searched.extend(base_contexts) # There are two loop exit conditions, either all the addresses that # are being searched for have been found, or we run out of contexts # in the chain of contexts. while reads: try: current_c_id = contexts_in_chain.popleft() except IndexError: # There aren't any more contexts known about. break current_context = self._contexts[current_c_id] # First, check for addresses that have been deleted. deleted_addresses = current_context.get_if_deleted(reads) for address in deleted_addresses: if address is not None: address_values.append((address, None)) reads = list(set(reads) - set(deleted_addresses)) # Second, check for addresses that have been set in the context, # and remove those addresses from being asked about again. Here # any value of None means the address hasn't been set. values = current_context.get_if_set(reads) addresses_not_found = [] for address, value in zip(reads, values): if value is not None: address_values.append((address, value)) else: addresses_not_found.append(address) reads = addresses_not_found # Next check for addresses that might be in a context # because they were inputs. addresses_in_inputs = [address for address in reads if address in current_context] values = current_context.get_if_not_set(addresses_in_inputs) address_values.extend(list(zip(addresses_in_inputs, values))) for add in addresses_in_inputs: reads.remove(add) for c_id in current_context.base_contexts: if c_id not in context_ids_already_searched: contexts_in_chain.append(c_id) context_ids_already_searched.append(c_id) return address_values, reads
164,088
Delete contexts from the ContextManager. Args: context_id_list (list): a list of context ids Returns: None
def delete_contexts(self, context_id_list): for c_id in context_id_list: if c_id in self._contexts: del self._contexts[c_id]
164,089
Within a context, append data to the execution result. Args: context_id (str): the context id returned by create_context data (bytes): data to append Returns: (bool): True if the operation is successful, False if the context_id doesn't reference a known context.
def add_execution_data(self, context_id, data): if context_id not in self._contexts: LOGGER.warning("Context_id not in contexts, %s", context_id) return False context = self._contexts.get(context_id) context.add_execution_data(data) return True
164,095
Within a context, append data to the execution result. Args: context_id (str): the context id returned by create_context data_type (str): type of data to append data (bytes): data to append Returns: (bool): True if the operation is successful, False if the context_id doesn't reference a known context.
def add_execution_event(self, context_id, event): if context_id not in self._contexts: LOGGER.warning("Context_id not in contexts, %s", context_id) return False context = self._contexts.get(context_id) context.add_execution_event(event) return True
164,096
Applies the given puts and deletes atomically. Args: puts (:iterable:`tuple`): an iterable of key/value pairs to insert deletes (:iterable:str:) an iterable of keys to delete
def update(self, puts, deletes): with self._lmdb.begin(write=True, buffers=True) as txn: cursor = txn.cursor(self._main_db) # Process deletes first, to handle the case of new items replacing # old index locations for key in deletes: if not cursor.set_key(key.encode()): # value doesn't exist continue value = self._deserializer(bytes(cursor.value())) cursor.delete() for (index_db, index_key_fn) in self._indexes.values(): index_keys = index_key_fn(value) index_cursor = txn.cursor(index_db) for idx_key in index_keys: if index_cursor.set_key(idx_key): index_cursor.delete() # process all the inserts for key, value in puts: packed = self._serializer(value) cursor.put(key.encode(), packed, overwrite=True) for (index_db, index_key_fn) in self._indexes.values(): index_keys = index_key_fn(value) index_cursor = txn.cursor(index_db) for idx_key in index_keys: index_cursor.put(idx_key, key.encode()) self.sync()
164,127
Walk to ADDRESS, creating nodes if necessary, and set the data there to UPDATER(data). Arguments: address (str): the address to be updated
def update(self, address, updater, prune=False): node = self._get_or_create(address) node.data = updater(node.data) if prune: node.children.clear()
164,261
Remove all children (and descendants) below ADDRESS. Arguments: address (str): the address to be pruned
def prune(self, address): try: for step in self._walk_to_address(address): node = step except AddressNotInTree: return node.children.clear()
164,262
Returns a stream of pairs of node addresses and data, raising AddressNotInTree if ADDRESS is not in the tree. First the ancestors of ADDRESS (including itself) are yielded, earliest to latest, and then the descendants of ADDRESS are yielded in an unspecified order. Arguments: address (str): the address to be walked
def walk(self, address): for step in self._walk_to_address(address): node = step yield node.address, node.data to_process = deque() to_process.extendleft( node.children) while to_process: node = to_process.pop() yield node.address, node.data if node.children: to_process.extendleft( node.children)
164,263
Returns all predecessor transaction ids for a write of the provided address. Arguments: address (str): the radix address Returns: a set of transaction ids
def find_write_predecessors(self, address): # A write operation must be preceded by: # - The "enclosing writer", which is the writer at the address or # the nearest writer higher (closer to the root) in the tree. # - The "enclosing readers", which are the readers at the address # or higher in the tree. # - The "children writers", which include all writers which are # lower in the tree than the address. # - The "children readers", which include all readers which are # lower in the tree than the address. # # The enclosing writer must be added as it may have modified a node # which must not happen after the current write. # # Writers which are higher in the tree than the enclosing writer may # have modified a node at or under the given address. However, we do # not need to include them here as they will have been considered a # predecessor to the enclosing writer. # # Enclosing readers must be included. Technically, we only need to add # enclosing readers which occurred after the enclosing writer, since # the readers preceding the writer will have been considered a # predecessor of the enclosing writer. However, with the current # data structure we can not determine the difference between readers # so we specify them all; this is mostly harmless as it will not change # the eventual sort order generated by the scheduler. # # Children readers must be added, since their reads must happen prior # to the write. predecessors = set() enclosing_writer = None node_stream = self._tree.walk(address) address_len = len(address) # First, walk down from the root to the address, collecting all readers # and updating the enclosing_writer if needed. try: for node_address, node in node_stream: if node is not None: predecessors.update(node.readers) if node.writer is not None: enclosing_writer = node.writer if len(node_address) >= address_len: break # If the address isn't on the tree, then there aren't any # predecessors below the node to worry about (because there # isn't anything at all), so return the predecessors that have # already been collected. except AddressNotInTree as err: if err.match is not None: return self.find_write_predecessors(err.match) return predecessors finally: if enclosing_writer is not None: predecessors.add(enclosing_writer) # Next, descend down the tree starting at the address node and # find all descendant readers and writers. for _, node in node_stream: if node is not None: if node.writer is not None: predecessors.add(node.writer) predecessors.update(node.readers) return predecessors
164,268
Add a predecessor-successor relationship between one txn id and a set of predecessors. Args: txn_id (str): The transaction id of the transaction. predecessors (set): The transaction ids of the transaction's predecessors Returns: None
def add_relationship(self, txn_id, predecessors): all_pred = set(predecessors) for pred in predecessors: all_pred.update(self._predecessors_by_id[pred]) self._predecessors_by_id[txn_id] = all_pred
164,269
Returns whether the predecessor is a predecessor or a predecessor of a predecessor...of any of the others. Args: predecessor (str): The txn id of the predecessor. others (list(str)): The txn id of the successor. Returns: (bool)
def is_predecessor_of_other(self, predecessor, others): return any(predecessor in self._predecessors_by_id[o] for o in others)
164,270
Starting with the batch referenced by batch_signature, iterate back through the batches and for each valid batch collect the context_id. At the end remove contexts for txns that are other txn's predecessors. Args: batch_signature (str): The batch to start from, moving back through the batches in the scheduler Returns: (list): Context ids that haven't been previous base contexts.
def _get_contexts_for_squash(self, batch_signature): batch = self._batches_by_id[batch_signature].batch index = self._batches.index(batch) contexts = [] txns_added_predecessors = [] for b in self._batches[index::-1]: batch_is_valid = True contexts_from_batch = [] for txn in b.transactions[::-1]: result = self._txn_results[txn.header_signature] if not result.is_valid: batch_is_valid = False break else: txn_id = txn.header_signature if txn_id not in txns_added_predecessors: txns_added_predecessors.append( self._txn_predecessors[txn_id]) contexts_from_batch.append(result.context_id) if batch_is_valid: contexts.extend(contexts_from_batch) return contexts
164,277
Decide if possible_successor should be replayed. Args: txn_id (str): Id of txn in failed batch. possible_successor (str): Id of txn to possibly replay. already_seen (list): A list of possible_successors that have been replayed. Returns: (bool): If the possible_successor should be replayed.
def _is_txn_to_replay(self, txn_id, possible_successor, already_seen): is_successor = self._is_predecessor_of_possible_successor( txn_id, possible_successor) in_different_batch = not self._is_in_same_batch(txn_id, possible_successor) has_not_been_seen = possible_successor not in already_seen return is_successor and in_different_batch and has_not_been_seen
164,281
Remove transactions from scheduled and txn_results for successors of txns in a failed batch. These transactions will now, or in the future be rescheduled in next_transaction; giving a replay ability. Args: sig (str): Transaction header signature
def _remove_subsequent_result_because_of_batch_failure(self, sig): batch = self._batches_by_txn_id[sig] seen = [] for txn in batch.transactions: txn_id = txn.header_signature for poss_successor in self._scheduled.copy(): if not self.is_transaction_in_schedule(poss_successor): continue if self._is_txn_to_replay(txn_id, poss_successor, seen): if self._txn_has_result(poss_successor): del self._txn_results[poss_successor] self._scheduled.remove(poss_successor) self._txns_available[poss_successor] = \ self._transactions[poss_successor] else: self._outstanding.add(poss_successor) seen.append(poss_successor)
164,282
Set the first batch id that doesn't have all results. Args: txn_signature (str): The txn identifier of the transaction with results being set.
def _set_least_batch_id(self, txn_signature): batch = self._batches_by_txn_id[txn_signature] least_index = self._index_of_batch( self._batches_by_id[self._least_batch_id_wo_results].batch) current_index = self._index_of_batch(batch) all_prior = False if current_index <= least_index: return # Test to see if all batches from the least_batch to # the prior batch to the current batch have results. if all( all(t.header_signature in self._txn_results for t in b.transactions) for b in self._batches[least_index:current_index]): all_prior = True if not all_prior: return possible_least = self._batches[current_index].header_signature # Find the first batch from the current batch on, that doesn't have # all results. for b in self._batches[current_index:]: if not all(t.header_signature in self._txn_results for t in b.transactions): possible_least = b.header_signature break self._least_batch_id_wo_results = possible_least
164,285
Returns whether the transaction is in a valid batch. Args: txn_id (str): The transaction header signature. Returns: (bool): True if the txn's batch is valid, False otherwise.
def _txn_is_in_valid_batch(self, txn_id): batch = self._batches_by_txn_id[txn_id] # Return whether every transaction in the batch with a # transaction result is valid return all( self._txn_results[sig].is_valid for sig in set(self._txn_results).intersection( (txn.header_signature for txn in batch.transactions)))
164,288
Executes the key generation operation, given the parsed arguments. Args: args (:obj:`Namespace`): The parsed args.
def do_keygen(args): if args.key_name is not None: key_name = args.key_name else: key_name = 'validator' key_dir = get_key_dir() if not os.path.exists(key_dir): raise CliException("Key directory does not exist: {}".format(key_dir)) priv_filename = os.path.join(key_dir, key_name + '.priv') pub_filename = os.path.join(key_dir, key_name + '.pub') if not args.force: file_exists = False for filename in [priv_filename, pub_filename]: if os.path.exists(filename): file_exists = True print('file exists: {}'.format(filename), file=sys.stderr) if file_exists: raise CliException( 'files exist, rerun with --force to overwrite existing files') context = create_context('secp256k1') private_key = context.new_random_private_key() public_key = context.get_public_key(private_key) try: priv_exists = os.path.exists(priv_filename) with open(priv_filename, 'w') as priv_fd: if not args.quiet: if priv_exists: print('overwriting file: {}'.format(priv_filename)) else: print('writing file: {}'.format(priv_filename)) priv_fd.write(private_key.as_hex()) priv_fd.write('\n') # Get the uid and gid of the key directory keydir_info = os.stat(key_dir) keydir_gid = keydir_info.st_gid keydir_uid = keydir_info.st_uid # Set user and group on keys to the user/group of the key directory os.chown(priv_filename, keydir_uid, keydir_gid) # Set the private key u+rw g+r os.chmod(priv_filename, 0o640) pub_exists = os.path.exists(pub_filename) with open(pub_filename, 'w') as pub_fd: if not args.quiet: if pub_exists: print('overwriting file: {}'.format(pub_filename)) else: print('writing file: {}'.format(pub_filename)) pub_fd.write(public_key.as_hex()) pub_fd.write('\n') # Set user and group on keys to the user/group of the key directory os.chown(pub_filename, keydir_uid, keydir_gid) # Set the public key u+rw g+r o+r os.chmod(pub_filename, 0o644) except IOError as ioe: raise CliException('IOError: {}'.format(str(ioe)))
164,303
Sends a message containing our peers to the connection identified by connection_id. Args: connection_id (str): A unique identifier which identifies an connection on the network server socket.
def send_peers(self, connection_id): with self._lock: # Needs to actually be the list of advertised endpoints of # our peers peer_endpoints = list(self._peers.values()) if self._endpoint: peer_endpoints.append(self._endpoint) peers_response = GetPeersResponse(peer_endpoints=peer_endpoints) try: # Send a one_way message because the connection will be closed # if this is a temp connection. self._network.send( validator_pb2.Message.GOSSIP_GET_PEERS_RESPONSE, peers_response.SerializeToString(), connection_id, one_way=True) except ValueError: LOGGER.debug("Connection disconnected: %s", connection_id)
164,306
Adds candidate endpoints to the list of endpoints to attempt to peer with. Args: peer_endpoints ([str]): A list of public uri's which the validator can attempt to peer with.
def add_candidate_peer_endpoints(self, peer_endpoints): if self._topology: self._topology.add_candidate_peer_endpoints(peer_endpoints) else: LOGGER.debug("Could not add peer endpoints to topology. " "ConnectionManager does not exist.")
164,307
Registers a connected connection_id. Args: connection_id (str): A unique identifier which identifies an connection on the network server socket. endpoint (str): The publically reachable endpoint of the new peer
def register_peer(self, connection_id, endpoint): with self._lock: if len(self._peers) < self._maximum_peer_connectivity: self._peers[connection_id] = endpoint self._topology.set_connection_status(connection_id, PeerStatus.PEER) LOGGER.debug("Added connection_id %s with endpoint %s, " "connected identities are now %s", connection_id, endpoint, self._peers) else: raise PeeringException( "At maximum configured number of peers: {} " "Rejecting peering request from {}.".format( self._maximum_peer_connectivity, endpoint)) public_key = self.peer_to_public_key(connection_id) if public_key: self._consensus_notifier.notify_peer_connected(public_key)
164,309
Removes a connection_id from the registry. Args: connection_id (str): A unique identifier which identifies an connection on the network server socket.
def unregister_peer(self, connection_id): public_key = self.peer_to_public_key(connection_id) if public_key: self._consensus_notifier.notify_peer_disconnected(public_key) with self._lock: if connection_id in self._peers: del self._peers[connection_id] LOGGER.debug("Removed connection_id %s, " "connected identities are now %s", connection_id, self._peers) self._topology.set_connection_status(connection_id, PeerStatus.TEMP) else: LOGGER.warning("Connection unregister failed as connection " "was not registered: %s", connection_id)
164,310
Sends a message via the network. Args: message_type (str): The type of the message. message (bytes): The message to be sent. connection_id (str): The connection to send it to.
def send(self, message_type, message, connection_id, one_way=False): try: self._network.send(message_type, message, connection_id, one_way=one_way) except ValueError: LOGGER.debug("Connection %s is no longer valid. " "Removing from list of peers.", connection_id) if connection_id in self._peers: del self._peers[connection_id]
164,320
Broadcast gossip messages. Broadcast the message to all peers unless they are in the excluded list. Args: gossip_message: The message to be broadcast. message_type: Type of the message. exclude: A list of connection_ids that should be excluded from this broadcast.
def broadcast(self, gossip_message, message_type, exclude=None): with self._lock: if exclude is None: exclude = [] for connection_id in self._peers.copy(): if connection_id not in exclude and \ self._network.is_connection_handshake_complete( connection_id): self.send( message_type, gossip_message.SerializeToString(), connection_id, one_way=True)
164,321
Adds candidate endpoints to the list of endpoints to attempt to peer with. Args: peer_endpoints ([str]): A list of public uri's which the validator can attempt to peer with.
def add_candidate_peer_endpoints(self, peer_endpoints): with self._lock: for endpoint in peer_endpoints: if endpoint not in self._candidate_peer_endpoints: self._candidate_peer_endpoints.append(endpoint)
164,331
Constructs an owned pointer. Initializing the pointer is left to the extending classes Args: drop_ffi_call_fn (str): the name of the FFI function to call on drop or garbage collection. initialized_ptr (ctypes.c_void_p:optional): a preinitialized pointer to the native memory
def __init__(self, drop_ffi_call_fn, initialized_ptr=None): if initialized_ptr is not None: self._ptr = initialized_ptr else: self._ptr = ctypes.c_void_p() self._drop_ffi_fn = drop_ffi_call_fn
164,348
Starts the genesis block creation process. Will call the given `on_done` callback on successful completion. Args: on_done (function): a function called on completion Raises: InvalidGenesisStateError: raises this error if a genesis block is unable to be produced, or the resulting block-chain-id saved.
def start(self, on_done): genesis_file = os.path.join(self._data_dir, 'genesis.batch') try: with open(genesis_file, 'rb') as batch_file: genesis_data = genesis_pb2.GenesisData() genesis_data.ParseFromString(batch_file.read()) LOGGER.info('Producing genesis block from %s', genesis_file) except IOError: raise InvalidGenesisStateError( "Genesis File {} specified, but unreadable".format( genesis_file)) initial_state_root = self._context_manager.get_first_root() genesis_batches = [batch for batch in genesis_data.batches] if genesis_batches: scheduler = SerialScheduler( self._context_manager.get_squash_handler(), initial_state_root, always_persist=True) LOGGER.debug('Adding %s batches', len(genesis_data.batches)) for batch in genesis_data.batches: scheduler.add_batch(batch) self._transaction_executor.execute(scheduler) scheduler.finalize() scheduler.complete(block=True) txn_receipts = [] state_hash = initial_state_root for batch in genesis_batches: result = scheduler.get_batch_execution_result( batch.header_signature) if result is None or not result.is_valid: raise InvalidGenesisStateError( 'Unable to create genesis block, due to batch {}' .format(batch.header_signature)) if result.state_hash is not None: state_hash = result.state_hash txn_results = scheduler.get_transaction_execution_results( batch.header_signature) txn_receipts += self._make_receipts(txn_results) settings_view = SettingsView( self._state_view_factory.create_view(state_hash)) name = settings_view.get_setting('sawtooth.consensus.algorithm.name') version = settings_view.get_setting( 'sawtooth.consensus.algorithm.version') if name is None or version is None: raise LocalConfigurationError( 'Unable to start validator; sawtooth.consensus.algorithm.name ' 'and sawtooth.consensus.algorithm.version must be set in the ' 'genesis block.') LOGGER.debug('Produced state hash %s for genesis block.', state_hash) block_builder = self._generate_genesis_block() block_builder.add_batches(genesis_batches) block_builder.set_state_hash(state_hash) block_publisher = self._get_block_publisher(initial_state_root) if not block_publisher.initialize_block(block_builder.block_header): LOGGER.error('Consensus refused to initialize consensus block.') raise InvalidGenesisConsensusError( 'Consensus refused to initialize genesis block.') if not block_publisher.finalize_block(block_builder.block_header): LOGGER.error('Consensus refused to finalize genesis block.') raise InvalidGenesisConsensusError( 'Consensus refused to finalize genesis block.') self._sign_block(block_builder) block = block_builder.build_block() blkw = BlockWrapper(block=block) LOGGER.info('Genesis block created: %s', blkw) self._block_manager.put([blkw.block]) self._block_manager.persist(blkw.identifier, "commit_store") self._txn_receipt_store.chain_update(block, txn_receipts) self._chain_id_manager.save_block_chain_id(block.header_signature) LOGGER.debug('Deleting genesis data.') os.remove(genesis_file) if on_done is not None: on_done()
164,361
Returns the block publisher based on the consensus module set by the "sawtooth_settings" transaction family. Args: state_hash (str): The current state root hash for reading settings. Raises: InvalidGenesisStateError: if any errors occur getting the BlockPublisher.
def _get_block_publisher(self, state_hash): state_view = self._state_view_factory.create_view(state_hash) try: class BatchPublisher: def send(self, transactions): # Consensus implementations are expected to have handling # in place for genesis operation. This should includes # adding any authorization and registrations required # for the genesis node to the Genesis Batch list and # detecting validation of the Genesis Block and handle it # correctly. Batch publication is not allowed during # genesis operation since there is no network to validate # the batch yet. raise InvalidGenesisConsensusError( 'Consensus cannot send transactions during genesis.') consensus = ConsensusFactory.get_configured_consensus_module( NULL_BLOCK_IDENTIFIER, state_view) return consensus.BlockPublisher( BlockCache(self._block_store), state_view_factory=self._state_view_factory, batch_publisher=BatchPublisher(), data_dir=self._data_dir, config_dir=self._config_dir, validator_id=self._identity_signer.get_public_key().as_hex()) except UnknownConsensusModuleError as e: raise InvalidGenesisStateError(e)
164,362
Adds argument parser for the peer command Args: subparsers: Add parsers to this subparser object parent_parser: The parent argparse.ArgumentParser object
def add_peer_parser(subparsers, parent_parser): parser = subparsers.add_parser( 'peer', help='Displays information about validator peers', description="Provides a subcommand to list a validator's peers") grand_parsers = parser.add_subparsers(title='subcommands', dest='subcommand') grand_parsers.required = True add_peer_list_parser(grand_parsers, parent_parser)
164,383
Setup the asyncio event loop. Args: socket_type (int from zmq.*): One of zmq.DEALER or zmq.ROUTER complete_or_error_queue (queue.Queue): A way to propagate errors back to the calling thread. Needed since this function is directly used in Thread. Returns: None
def setup(self, socket_type, complete_or_error_queue): try: if self._secured: if self._server_public_key is None or \ self._server_private_key is None: raise LocalConfigurationError( "Attempting to start socket in secure mode, " "but complete server keys were not provided") self._event_loop = zmq.asyncio.ZMQEventLoop() asyncio.set_event_loop(self._event_loop) self._context = zmq.asyncio.Context() self._socket = self._context.socket(socket_type) self._socket.set(zmq.TCP_KEEPALIVE, 1) self._socket.set(zmq.TCP_KEEPALIVE_IDLE, self._connection_timeout) self._socket.set(zmq.TCP_KEEPALIVE_INTVL, self._heartbeat_interval) if socket_type == zmq.DEALER: self._socket.identity = "{}-{}".format( self._zmq_identity, hashlib.sha512(uuid.uuid4().hex.encode() ).hexdigest()[:23]).encode('ascii') if self._secured: # Generate ephemeral certificates for this connection public_key, secretkey = zmq.curve_keypair() self._socket.curve_publickey = public_key self._socket.curve_secretkey = secretkey self._socket.curve_serverkey = self._server_public_key self._socket.connect(self._address) elif socket_type == zmq.ROUTER: if self._secured: auth = AsyncioAuthenticator(self._context) self._auth = auth auth.start() auth.configure_curve(domain='*', location=zmq.auth.CURVE_ALLOW_ANY) self._socket.curve_secretkey = self._server_private_key self._socket.curve_publickey = self._server_public_key self._socket.curve_server = True try: self._socket.bind(self._address) except zmq.error.ZMQError as e: raise LocalConfigurationError( "Can't bind to {}: {}".format(self._address, str(e))) else: LOGGER.info("Listening on %s", self._address) self._dispatcher.add_send_message(self._connection, self.send_message) self._dispatcher.add_send_last_message(self._connection, self.send_last_message) asyncio.ensure_future(self._remove_expired_futures(), loop=self._event_loop) asyncio.ensure_future(self._receive_message(), loop=self._event_loop) asyncio.ensure_future(self._dispatch_message(), loop=self._event_loop) self._dispatcher_queue = asyncio.Queue() if self._monitor: self._monitor_fd = "inproc://monitor.s-{}".format( _generate_id()[0:5]) self._monitor_sock = self._socket.get_monitor_socket( zmq.EVENT_DISCONNECTED, addr=self._monitor_fd) asyncio.ensure_future(self._monitor_disconnects(), loop=self._event_loop) except Exception as e: # Put the exception on the queue where in start we are waiting # for it. complete_or_error_queue.put_nowait(e) self._close_sockets() raise if self._heartbeat: asyncio.ensure_future(self._do_heartbeat(), loop=self._event_loop) # Put a 'complete with the setup tasks' sentinel on the queue. complete_or_error_queue.put_nowait(_STARTUP_COMPLETE_SENTINEL) asyncio.ensure_future(self._notify_started(), loop=self._event_loop) self._event_loop.run_forever() # event_loop.stop called elsewhere will cause the loop to break out # of run_forever then it can be closed and the context destroyed. self._event_loop.close() self._close_sockets()
164,416
Adds an outbound connection to the network. Args: uri (str): The zmq-style (e.g. tcp://hostname:port) uri to attempt to connect to.
def add_outbound_connection(self, uri): LOGGER.debug("Adding connection to %s", uri) conn = OutboundConnection( connections=self._connections, endpoint=uri, dispatcher=self._dispatcher, zmq_identity=self._zmq_identity, secured=self._secured, server_public_key=self._server_public_key, server_private_key=self._server_private_key, future_callback_threadpool=self._future_callback_threadpool, heartbeat=True, connection_timeout=self._connection_timeout) self.outbound_connections[uri] = conn conn.start() self._add_connection(conn, uri) connect_message = ConnectionRequest(endpoint=self._public_endpoint) conn.send( validator_pb2.Message.NETWORK_CONNECT, connect_message.SerializeToString(), callback=partial( self._connect_callback, connection=conn, )) return conn
164,428
Returns the connection id associated with a publically reachable endpoint or raises KeyError if the endpoint is not found. Args: endpoint (str): A zmq-style uri which identifies a publically reachable endpoint.
def get_connection_id_by_endpoint(self, endpoint): with self._connections_lock: for connection_id in self._connections: connection_info = self._connections[connection_id] if connection_info.uri == endpoint: return connection_id raise KeyError()
164,439
Adds the endpoint to the connection definition. When the connection is created by the send/receive thread, we do not yet have the endpoint of the remote node. That is not known until we process the incoming ConnectRequest. Args: connection_id (str): The identifier for the connection. endpoint (str): A zmq-style uri which identifies a publically reachable endpoint.
def update_connection_endpoint(self, connection_id, endpoint): if connection_id in self._connections: connection_info = self._connections[connection_id] self._connections[connection_id] = \ ConnectionInfo(connection_info.connection_type, connection_info.connection, endpoint, connection_info.status, connection_info.public_key) else: LOGGER.debug("Could not update the endpoint %s for " "connection_id %s. The connection does not " "exist.", endpoint, connection_id)
164,440
Adds the public_key to the connection definition. Args: connection_id (str): The identifier for the connection. public_key (str): The public key used to enforce permissions on connections.
def update_connection_public_key(self, connection_id, public_key): if connection_id in self._connections: connection_info = self._connections[connection_id] self._connections[connection_id] = \ ConnectionInfo(connection_info.connection_type, connection_info.connection, connection_info.uri, connection_info.status, public_key) else: LOGGER.debug("Could not update the public key %s for " "connection_id %s. The connection does not " "exist.", public_key, connection_id)
164,441
Sends a message of message_type Args: message_type (validator_pb2.Message): enum value data (bytes): serialized protobuf callback (function): a callback function to call when a response to this message is received Returns: future.Future
def send(self, message_type, data, callback=None, one_way=False): message = validator_pb2.Message( correlation_id=_generate_id(), content=data, message_type=message_type) fut = future.Future(message.correlation_id, message.content, callback, timeout=self._connection_timeout) if not one_way: self._futures.put(fut) self._send_receive_thread.send_message(message) return fut
164,449
Returns the state view for an arbitrary block. Args: block_wrapper (BlockWrapper): The block for which a state view is to be returned state_view_factory (StateViewFactory): The state view factory used to create the StateView object Returns: StateView object associated with the block
def state_view_for_block(block_wrapper, state_view_factory): state_root_hash = \ block_wrapper.state_root_hash \ if block_wrapper is not None else None return state_view_factory.create_view(state_root_hash)
164,451
Returns the settings view for an arbitrary block. Args: block_wrapper (BlockWrapper): The block for which a settings view is to be returned settings_view_factory (SettingsViewFactory): The settings view factory used to create the SettingsView object Returns: SettingsView object associated with the block
def settings_view_for_block(block_wrapper, settings_view_factory): state_root_hash = \ block_wrapper.state_root_hash \ if block_wrapper is not None else None return settings_view_factory.create_settings_view(state_root_hash)
164,452
Remove spines of axis. Parameters: ax: axes to operate on sides: list of sides: top, left, bottom, right Examples: removespines(ax, ['top']) removespines(ax, ['top', 'bottom', 'right', 'left'])
def remove_spines(ax, sides): for side in sides: ax.spines[side].set_visible(False) return ax
165,344
Move the entire spine relative to the figure. Parameters: ax: axes to operate on sides: list of sides to move. Sides: top, left, bottom, right dists: list of float distances to move. Should match sides in length. Example: move_spines(ax, sides=['left', 'bottom'], dists=[-0.02, 0.1])
def move_spines(ax, sides, dists): for side, dist in zip(sides, dists): ax.spines[side].set_position(("axes", dist)) return ax
165,345
Remove ticks from axis. Parameters: ax: axes to work on x: if True, remove xticks. Default False. y: if True, remove yticks. Default False. Examples: removeticks(ax, x=True) removeticks(ax, x=True, y=True)
def remove_ticks(ax, x=False, y=False): if x: ax.xaxis.set_ticks_position("none") if y: ax.yaxis.set_ticks_position("none") return ax
165,346
Normalize embeddings matrix row-wise. Args: ord: normalization order. Possible values {1, 2, 'inf', '-inf'}
def normalize_words(self, ord=2, inplace=False): if ord == 2: ord = None # numpy uses this flag to indicate l2. vectors = self.vectors.T / np.linalg.norm(self.vectors, ord, axis=1) if inplace: self.vectors = vectors.T return self return Embedding(vectors=vectors.T, vocabulary=self.vocabulary)
165,393
Return the nearest k words to the given `word`. Args: word (string): single word. top_k (integer): decides how many neighbors to report. Returns: A list of words sorted by the distances. The closest is the first. Note: L2 metric is used to calculate distances.
def nearest_neighbors(self, word, top_k=10): #TODO(rmyeid): Use scikit ball tree, if scikit is available point = self[word] diff = self.vectors - point distances = np.linalg.norm(diff, axis=1) top_ids = distances.argsort()[1:top_k+1] return [self.vocabulary.id_word[i] for i in top_ids]
165,394
Calculate eucledean pairwise distances between `word` and `words`. Args: word (string): single word. words (list): list of strings. Returns: numpy array of the distances. Note: L2 metric is used to calculate distances.
def distances(self, word, words): point = self[word] vectors = np.asarray([self[w] for w in words]) diff = vectors - point distances = np.linalg.norm(diff, axis=1) return distances
165,395
Return the collection that represents a specific language or task. Args: lang (string): Language code. task (string): Task name.
def get_collection(self, lang=None, task=None): if lang: id = "{}{}".format(Downloader.LANG_PREFIX, lang) elif task: id = "{}{}".format(Downloader.TASK_PREFIX, task) else: raise ValueError("You should pass either the task or the lang") try: return self.info(id) except ValueError as e: if lang: raise LanguageNotSupported("Language {} is not supported".format(id)) if task: raise TaskNotSupported("Task {} is not supported".format(id))
165,435
Return True if polyglot supports the language. Args: lang (string): Language code.
def supported_language(lang): try: self.get_collection(lang=lang) return True except LanguageNotSupported as e: return False
165,436
Languages that are covered by a specific task. Args: task (string): Task name.
def supported_languages(self, task=None): if task: collection = self.get_collection(task=task) return [isoLangs[x.id.split('.')[1]]["name"] for x in collection.packages] else: return [x.name.split()[0] for x in self.collections() if Downloader.LANG_PREFIX in x.id]
165,437
Languages that are covered by a specific task. Args: lang (string): Language code name.
def supported_tasks(self, lang=None): if lang: collection = self.get_collection(lang=lang) return [x.id.split('.')[0] for x in collection.packages] else: return [x.name.split()[0] for x in self.collections() if Downloader.TASK_PREFIX in x.id]
165,439
Concatenates two text objects the same way Python strings are concatenated. Arguments: - `other`: a string or a text object
def __add__(self, other): if isinstance(other, basestring): return self.__class__(self.raw + other) elif isinstance(other, BaseBlob): return self.__class__(self.raw + other.raw) else: raise TypeError('Operands must be either strings or {0} objects' .format(self.__class__.__name__))
165,467
Detector of the language used in `text`. Args: text (string): unicode string.
def __init__(self, text, quiet=False): self.__text = text self.reliable = True self.quiet = quiet self.detect(text)
165,495
Decide which language is used to write the text. The method tries first to detect the language with high reliability. If that is not possible, the method switches to best effort strategy. Args: text (string): A snippet of text, the longer it is the more reliable we can detect the language used to write the text.
def detect(self, text): t = text.encode("utf-8") reliable, index, top_3_choices = cld2.detect(t, bestEffort=False) if not reliable: self.reliable = False reliable, index, top_3_choices = cld2.detect(t, bestEffort=True) if not self.quiet: if not reliable: raise UnknownLanguage("Try passing a longer snippet of text") else: logger.warning("Detector is not able to detect the language reliably.") self.languages = [Language(x) for x in top_3_choices] self.language = self.languages[0] return self.language
165,496
Return filename that contains specific language resource name. Args: name (string): Name of the resource. lang (string): language code to be loaded.
def locate_resource(name, lang, filter=None): task_dir = resource_dir.get(name, name) package_id = u"{}.{}".format(task_dir, lang) p = path.join(polyglot_path, task_dir, lang) if not path.isdir(p): if downloader.status(package_id) != downloader.INSTALLED: raise ValueError("This resource is available in the index " "but not downloaded, yet. Try to run\n\n" "polyglot download {}".format(package_id)) return path.join(p, os.listdir(p)[0])
165,507
Return a word embeddings object for `lang` and of type `type` Args: lang (string): language code. task (string): parameters that define task. type (string): skipgram, cw, cbow ... noramlized (boolean): returns noramlized word embeddings vectors.
def load_embeddings(lang="en", task="embeddings", type="cw", normalize=False): src_dir = "_".join((type, task)) if type else task p = locate_resource(src_dir, lang) e = Embedding.load(p) if type == "cw": e.apply_expansion(CaseExpander) e.apply_expansion(DigitExpander) if type == "sgns": e.apply_expansion(CaseExpander) if type == "ue": e.apply_expansion(CaseExpander) if normalize: e.normalize_words(inplace=True) return e
165,508
Return a CountedVocabulary object. Args: lang (string): language code. type (string): wiki,...
def load_vocabulary(lang="en", type="wiki"): src_dir = "{}_vocab".format(type) p = locate_resource(src_dir, lang) return CountedVocabulary.from_vocabfile(p)
165,509
Return a named entity extractor parameters for `lang` and of version `version` Args: lang (string): language code. version (string): version of the parameters to be used.
def load_ner_model(lang="en", version="2"): src_dir = "ner{}".format(version) p = locate_resource(src_dir, lang) fh = _open(p) try: return pickle.load(fh) except UnicodeDecodeError: fh.seek(0) return pickle.load(fh, encoding='latin1')
165,510
Return a part of speech tagger parameters for `lang` and of version `version` Args: lang (string): language code. version (string): version of the parameters to be used.
def load_pos_model(lang="en", version="2"): src_dir = "pos{}".format(version) p = locate_resource(src_dir, lang) fh = _open(p) return dict(np.load(fh))
165,511
Return a morfessor model for `lang` and of version `version` Args: lang (string): language code. version (string): version of the parameters to be used.
def load_morfessor_model(lang="en", version="2"): src_dir = "morph{}".format(version) p = locate_resource(src_dir, lang) file_handler = _open(p) tmp_file_ = NamedTemporaryFile(delete=False) tmp_file_.write(file_handler.read()) tmp_file_.close() io = morfessor.MorfessorIO() model = io.read_any_model(tmp_file_.name) os.remove(tmp_file_.name) return model
165,513
Return a morfessor model for `lang` and of version `version` Args: lang (string): language code. version (string): version of the parameters to be used.
def load_transliteration_table(lang="en", version="2"): src_dir = "transliteration{}".format(version) p = locate_resource(src_dir, lang) file_handler = _open(p) return pickle.load(file_handler)
165,514
Build attributes word_id and id_word from input. Args: words (list): list of sorted words according to frequency.
def __init__(self, words=None): words = self.sanitize_words(words) self.word_id = {w:i for i, w in enumerate(words)} self.id_word = {i:w for w,i in iteritems(self.word_id)}
165,539
Build attributes word_id and id_word from input. Args: word_count (dictionary): A dictionary of the type word:count or list of tuples of the type (word, count).
def __init__(self, word_count=None): if isinstance(word_count, dict): word_count = iteritems(word_count) sorted_counts = list(sorted(word_count, key=lambda wc: wc[1], reverse=True)) words = [w for w,c in sorted_counts] super(CountedVocabulary, self).__init__(words=words) self.word_count = dict(sorted_counts)
165,540
Returns a vocabulary with the most frequent `k` words. Args: k (integer): specifies the top k most frequent words to be returned.
def most_frequent(self, k): word_count = {w:self.word_count[w] for w in self.words[:k]} return CountedVocabulary(word_count=word_count)
165,543