docstring
stringlengths
52
499
function
stringlengths
67
35.2k
__index_level_0__
int64
52.6k
1.16M
Combine package specifications' limitations. Args: specs (list of PackageSpec): the package specifications. Returns: list of PackageSpec: the new, merged list of PackageSpec.
def combine(specs): new_specs = {} for spec in specs: if new_specs.get(spec, None) is None: new_specs[spec] = spec else: new_specs[spec].add(spec) return list(new_specs.values())
891,176
Find method. Args: package (str): package to find. **kwargs (): additional keyword arguments. Returns: PackageSpec: the PackageSpec corresponding to the package, or None.
def find(self, package, **kwargs): if not exists(package): return None name, path = None, None enforce_init = kwargs.pop('enforce_init', True) if isdir(package): if isfile(join(package, '__init__.py')) or not enforce_init: name, path = basename(package), package elif isfile(package) and package.endswith('.py'): name, path = splitext(basename(package))[0], package if name and path: return PackageSpec(name, path) return None
891,177
Find method. Args: package (str): package to find. **kwargs (): additional keyword arguments. Returns: PackageSpec: the PackageSpec corresponding to the package, or None.
def find(self, package, **kwargs): spec = find_spec(package) if spec is None: return None limit = [] if '.' in package: package, limit = package.split('.', 1) limit = [limit] spec = find_spec(package) if spec is not None: if spec.submodule_search_locations: path = spec.submodule_search_locations[0] elif spec.origin and spec.origin != 'built-in': path = spec.origin else: return None return PackageSpec(spec.name, path, limit) return None
891,178
Initialization method. Args: finders (list of classes): list of package finder classes (not instances) in a specific order. Default: [LocalPackageFinder, InstalledPackageFinder].
def __init__(self, finders=None): if finders is None: self.finders = [LocalPackageFinder(), InstalledPackageFinder()] else: self.finders = [f() for f in finders]
891,179
Find a package using package finders. Return the first package found. Args: package (str): package to find. **kwargs (): additional keyword arguments used by finders. Returns: PackageSpec: if package found, else None
def find(self, package, **kwargs): for finder in self.finders: package_spec = finder.find(package, **kwargs) if package_spec: return package_spec return None
891,180
Retrieves environment variables from a namespace. Args: prefix (str): The prefix, without a trailing underscore. Returns: list: A list of environment variable keys and values.
def get_environ(cls, prefix): return ((key[len(prefix) + 1:], value) for key, value in os.environ.items() if key.startswith('%s_' % prefix))
891,361
Retrieves an environment variable value as a dictionary. Args: name (str): The case-insensitive, unprefixed variable name. default: If provided, a default value will be returned instead of throwing ``EnvironmentError``. Returns: dict: The environment variable's value as a ``dict``. Raises: EnvironmentError: If the environment variable does not exist, and ``default`` was not provided.
def get_dict(self, name, default=None): if name not in self: if default is not None: return default raise EnvironmentError.not_found(self._prefix, name) return dict(**self.get(name))
891,363
Create new storage service client. Arguments: environment(str): The service environment to be used for the client. 'prod' or 'dev'. access_token(str): The access token used to authenticate with the service Returns: A storage_service.Client instance
def new(cls, access_token, environment='prod'): api_client = ApiClient.new(access_token, environment) return cls(api_client)
891,483
Check if a certain path exists in the storage service. Args: path (str): The path to be checked Returns: True if the path exists, False otherwise Raises: StorageArgumentException: Invalid arguments StorageForbiddenException: Server response code 403 StorageNotFoundException: Server response code 404 StorageException: other 400-600 error codes
def exists(self, path): self.__validate_storage_path(path) try: metadata = self.api_client.get_entity_by_query(path=path) except StorageNotFoundException: return False return metadata and 'uuid' in metadata
891,486
Get the parent entity of the entity pointed by the given path. Args: path (str): The path of the entity whose parent is needed Returns: A JSON object of the parent entity if found. Raises: StorageArgumentException: Invalid arguments StorageForbiddenException: Server response code 403 StorageNotFoundException: Server response code 404 StorageException: other 400-600 error codes
def get_parent(self, path): self.__validate_storage_path(path, projects_allowed=False) path_steps = [step for step in path.split('/') if step] del path_steps[-1] parent_path = '/{0}'.format('/'.join(path_steps)) return self.api_client.get_entity_by_query(path=parent_path)
891,487
Create a folder in the storage service pointed by the given path. Args: path (str): The path of the folder to be created Returns: None Raises: StorageArgumentException: Invalid arguments StorageForbiddenException: Server response code 403 StorageNotFoundException: Server response code 404 StorageException: other 400-600 error codes
def mkdir(self, path): self.__validate_storage_path(path, projects_allowed=False) parent_metadata = self.get_parent(path) self.api_client.create_folder(path.split('/')[-1], parent_metadata['uuid'])
891,488
Delete an entity from the storage service using its path. Args: path(str): The path of the entity to be delete Returns: The uuid of created file entity as string Raises: StorageArgumentException: Invalid arguments StorageForbiddenException: Server response code 403 StorageNotFoundException: Server response code 404 StorageException: other 400-600 error codes
def delete(self, path): self.__validate_storage_path(path, projects_allowed=False) entity = self.api_client.get_entity_by_query(path=path) if entity['entity_type'] in self.__BROWSABLE_TYPES: # At this point it can only be a folder contents = self.api_client.list_folder_content(entity['uuid']) if contents['count'] > 0: raise StorageArgumentException( 'This method cannot delete non-empty folder. Please empty the folder first.') self.api_client.delete_folder(entity['uuid']) elif entity['entity_type'] == 'file': self.api_client.delete_file(entity['uuid'])
891,490
Initialization method. Args: *packages (args): list of packages to search for. build_tree (bool): auto-build the tree or not. build_dependencies (bool): auto-build the dependencies or not. enforce_init (bool): if True, only treat directories if they contain an ``__init__.py`` file.
def __init__(self, *packages, build_tree=True, build_dependencies=True, enforce_init=True): self.finder = Finder() self.specs = [] self.not_found = [] self.enforce_init = enforce_init specs = [] for package in packages: spec = self.finder.find(package, enforce_init=enforce_init) if spec: specs.append(spec) else: self.not_found.append(package) if not specs: print('** dependenpy: DSM empty.', file=sys.stderr) self.specs = PackageSpec.combine(specs) for m in self.not_found: print('** dependenpy: Not found: %s.' % m, file=sys.stderr) super().__init__(build_tree) if build_tree and build_dependencies: self.build_dependencies()
891,666
Return the number of dependencies of this package to the given node. Args: to (Package/Module): target node. Returns: int: number of dependencies.
def cardinal(self, to): return sum(m.cardinal(to) for m in self.submodules)
891,671
Initialization method. Args: name (str): name of the module. path (str): path to the module. dsm (DSM): parent DSM. package (Package): parent Package.
def __init__(self, name, path, dsm=None, package=None): super().__init__() self.name = name self.path = path self.package = package self.dsm = dsm self.dependencies = []
891,672
Whether given item is contained inside this module. Args: item (Package/Module): a package or module. Returns: bool: True if self is item or item is self's package and self if an ``__init__`` module.
def __contains__(self, item): if self is item: return True elif self.package is item and self.name == '__init__': return True return False
891,673
Return all the import statements given an AST body (AST nodes). Args: ast_body (compiled code's body): the body to filter. Returns: list of dict: the import statements.
def get_imports(self, ast_body): imports = [] for node in ast_body: if isinstance(node, ast.Import): imports.extend({'target': name.name, 'lineno': node.lineno} for name in node.names) elif isinstance(node, ast.ImportFrom): for name in node.names: name = ( self.absolute_name(self.depth - node.level) + '.' if node.level > 0 else '' ) + ( node.module + '.' if node.module else '' ) + name.name imports.append({'target': name, 'lineno': node.lineno}) elif isinstance(node, Module.RECURSIVE_NODES): imports.extend(self.get_imports(node.body)) if isinstance(node, ast.Try): imports.extend(self.get_imports(node.finalbody)) return imports
891,680
Return the number of dependencies of this module to the given node. Args: to (Package/Module): the target node. Returns: int: number of dependencies.
def cardinal(self, to): return sum(1 for _ in filter( lambda d: not d.external and d.target in to, self.dependencies))
891,681
Initialization method. Args: source (Module): source Module. lineno (int): number of line at which import statement occurs. target (str/Module/Package): the target node. what (str): what is imported (optional).
def __init__(self, source, lineno, target, what=None): self.source = source self.lineno = lineno self.target = target self.what = what
891,682
Add SSH public key to a user's profile. Args: username: Username to attach SSH public key to filename: Filename containing keys to add (optional) Raises: ldap3.core.exceptions.LDAPNoSuchAttributeResult: ldapPublicKey isn't attached to objectClass
def add(self, username, user_api, filename=None): keys = API.__get_keys(filename) user = user_api.find(username)[0] distinguished_name = user.entry_dn if 'ldapPublicKey' not in user.objectClass: raise ldap3.core.exceptions.LDAPNoSuchAttributeResult( 'LDAP Public Key Object Class not found. ' + 'Please ensure user was created correctly.') else: for key in list(set(keys)): # prevents duplicate insertion print(key) try: SSHKey(key).parse() except Exception as err: raise err from None else: operation = {'sshPublicKey': [(ldap3.MODIFY_ADD, [key])]} self.client.modify(distinguished_name, operation)
891,829
Fetch keys from ldap. Args: username Username associated with keys to fetch (optional) Returns: Array of dictionaries in '{username: [public keys]}' format
def get_keys_from_ldap(self, username=None): result_dict = {} filter = ['(sshPublicKey=*)'] if username is not None: filter.append('(uid={})'.format(username)) attributes = ['uid', 'sshPublicKey'] results = self.client.search(filter, attributes) for result in results: result_dict[result.uid.value] = result.sshPublicKey.values return result_dict
891,832
Read a package list from a given file path. Args: filepath: file path of the package list. Returns: a list of package names.
def load_requires_from_file(filepath): with open(filepath) as fp: return [pkg_name.strip() for pkg_name in fp.readlines()]
891,869
Constructor. Args: element: Object to add into the node.
def __init__(self, element=None): super(TreeMapNode, self).__init__() self._element = element self._nodes = dict() self._parent = None self._depth = -1
892,048
Attach node to its parent. Args: node: Parent node. Note: ``node`` can be ``None``. In that case, the node is detached from its previous parent.
def set_parent(self, node): self._parent = node if node is None: # detach from parent self._depth = 0 else: self._depth = node.get_depth() + 1
892,049
Add one child node to this node. Args: name (str): Name of the child. node (TreeMapNode): Node to add. Warning: No test is done to see whether or not a node was already attached with that name. If this is the case, the new node takes the place of the old one that is now unreachable. See :meth:`set_unique_child_node`.
def set_child_node(self, name, node): assert isinstance(node, TreeMapNode) self._nodes[name] = node node.set_parent(self)
892,050
Add one child node to this node. Args: name (str): Name of the child. node (TreeMapNode): Node to add. Note: The name must **not** be in use.
def set_unique_child_node(self, name, node): try: temp = self._nodes[name] raise RuntimeError("Name '%s' is already used for child node" % name) except KeyError: pass self.set_child_node(name, node)
892,051
Constructor. Args: element: object to attach to this root.
def __init__(self, element=None): super(RootTreeMapNode, self).__init__(element) self._depth = 0
892,056
Container to store an "action". Every file(s) generation is considered as an action. Args: file_pattern: fnmatch pattern. action_function: Callback without argument. See documentation.
def __init__(self, file_pattern, action_function): super(GeneratorAction, self).__init__() self.__file_pattern = file_pattern self.__action_function = action_function
892,084
Return the **first** compatible :class:`GeneratorAction` for a given filename or ``None`` if none is found. Args: filename (str): The filename of the template to process.
def get_compatible_generator_action(self, filename): # find first compatible generator action for action in self.__generator_actions: if action.act_on_file(filename): return action return None
892,086
Constructor of a :program:`cygenja` template machine. Args: directory (str): Absolute or relative base directory. Everything happens in that directory and sub-directories. jinja2_environment: :program:`Jinja2` environment. logger: A logger (from the standard ``logging``) or ``None`` is no logging is wanted. raise_exception_on_warning (bool): If set to ``True``, raise a ``RuntimeError`` when logging a warning.
def __init__(self, directory, jinja2_environment, logger=None, raise_exception_on_warning=False): super(Generator, self).__init__() # before all the rest, prepare logging self.__logger = logger self.__raise_exception_on_warning = raise_exception_on_warning # test if directory exists if not os.path.isdir(directory): self.log_error('Main directory \'%s\' does not exists!' % directory) self.__root_directory = os.path.abspath(directory) # main base directory self.__jinja2_environment = jinja2_environment self.__jinja2_predefined_filters = self.__jinja2_environment.filters.keys() self.__extensions = {} self.__actions = TreeMap() self.__default_action = None
892,087
Log a warning if ``logger`` exists. Args: msg: Warning to log. Warning: Can raise a ``RuntimeError`` if this was asked in the constructor.
def log_warning(self, msg): if self.__logger: self.__logger.warning(msg) if self.__raise_exception_on_warning: raise RuntimeError(msg)
892,088
Log an error and raise an exception. Args: msg: Error message to log. Raises: RuntimeError: With the message.
def log_error(self, msg): if self.__logger: self.__logger.error(msg) raise RuntimeError(msg)
892,089
Add/register one filter. Args: filter_name (str): Filter name used inside :program:`Jinja2` tags. filter_ref: Reference to the filter itself, i.e. the corresponding :program:`Python` function. force (bool): If set to ``True``, forces the registration of a filter no matter if it already exists or not. Note: The list of user added/registered filters can be retrieve with :mth:`registered_filters_list`
def register_filter(self, filter_name, filter_ref, force=False): if not force and (filter_name in self.filters_list()): self.log_warning("Extension %s already exist, ignore redefinition." % ext_in) return self.__jinja2_environment.filters[filter_name] = filter_ref
892,090
Add/register filters. Args: filters (dict): Dictionary of Python functions to use as :program:`Jinja2` filters. force (bool): If set to ``True``, forces the registration of a filter no matter if it already exists or not.
def register_filters(self, filters, force=False): for filter_name, filter_ref in filters.items(): self.register_filter(filter_name, filter_ref, force)
892,091
Add/register a file extension. Args: ext_in (str): Extension of input files. ext_out (str): Extension of corresponding output files. force (bool): If ``force`` is set to ``True``, simply overwrite existing extensions, otherwise do nothing. If the ``logger`` is set, log a warning about the duplicate extension if ``force == False``.
def register_extension(self, ext_in, ext_out, force=False): if not force and (ext_in in self.__extensions.keys()): self.log_warning("Extension %s already exist, ignore redefinition." % ext_in) return self.__extensions[ext_in] = ext_out
892,094
Add/register extensions. Args: exts (dict): force (bool): If ``force`` is set to ``True``, simply overwrite existing extensions, otherwise do nothing. If the ``logger`` is set, log a warning about the duplicate extension if ``force == False``.
def register_extensions(self, exts, force=False): for ext_in, ext_out in exts.items(): self.register_extension(ext_in, ext_out, force)
892,095
Add action into the dictionary of actions. Args: relative_directory: action:
def __add_action(self, relative_directory, action): generator_action_container = self.__actions.retrieve_element_or_default(relative_directory, None) if generator_action_container is None: generator_action_container = GeneratorActionContainer() generator_action_container.add_generator_action(action) self.__actions.add_element(location=relative_directory, element=generator_action_container) else: generator_action_container.add_generator_action(action)
892,096
Detect if given function is really an action function. Args: action_function: Function to test. Note: We don't care if the variable refer to a function but rather if it is callable or not.
def __is_function_action(self, action_function): # test if function returns a couple of values is_function_action = True if not hasattr(action_function, '__call__'): return False # OK, callable. Do we receive the right arguments? try: for end_string, context in action_function(): if not isinstance(end_string, basestring): self.log_error("Action function must return end of filename as a string as first argument") if not isinstance(context, dict): self.log_error("Action function must return context as a dict as second argument") break except Exception: is_function_action = False return is_function_action
892,097
Create a new storage service REST client. Arguments: environment: The service environment to be used for the client access_token: The access token used to authenticate with the service Returns: A storage_service.api.ApiClient instance Example: >>> storage_client = ApiClient.new(my_access_token)
def new(cls, access_token, environment='prod'): request = RequestBuilder \ .request(environment) \ .to_service(cls.SERVICE_NAME, cls.SERVICE_VERSION) \ .throw( StorageForbiddenException, lambda resp: 'You are forbidden to do this.' if resp.status_code == 403 else None ) \ .throw( StorageNotFoundException, lambda resp: 'The entity is not found' if resp.status_code == 404 else None ) \ .throw( StorageException, lambda resp: 'Server response: {0} - {1}'.format(resp.status_code, resp.text) if not resp.ok else None ) authenticated_request = request.with_token(access_token) return cls(request, authenticated_request)
892,368
Delete a project. It will recursively delete all the content. Args: project (str): The UUID of the project to be deleted. Returns: None Raises: StorageArgumentException: Invalid arguments StorageForbiddenException: 403 StorageNotFoundException: 404 HTTPError: other non-20x error codes
def delete_project(self, project): if not is_valid_uuid(project): raise StorageArgumentException( 'Invalid UUID for project: {0}'.format(project)) self._authenticated_request \ .to_endpoint('project/{}/'.format(project)) \ .delete()
892,379
Delete a folder. It will recursively delete all the content. Args: folder_id (str): The UUID of the folder to be deleted. Returns: None Raises: StorageArgumentException: Invalid arguments StorageForbiddenException: 403 StorageNotFoundException: 404 HTTPError: other non-20x error codes
def delete_folder(self, folder): if not is_valid_uuid(folder): raise StorageArgumentException( 'Invalid UUID for folder: {0}'.format(folder)) self._authenticated_request \ .to_endpoint('folder/{}/'.format(folder)) \ .delete()
892,383
Copy file content from source file to target file. Args: file_id (str): The UUID of the file whose content is written. source_file (str): The UUID of the file whose content is copied. Returns: None Raises: StorageArgumentException: Invalid arguments StorageForbiddenException: Server response code 403 StorageNotFoundException: Server response code 404 StorageException: other 400-600 error codes
def copy_file_content(self, file_id, source_file): if not is_valid_uuid(file_id): raise StorageArgumentException( 'Invalid UUID for file_id: {0}'.format(file_id)) if not is_valid_uuid(source_file): raise StorageArgumentException( 'Invalid UUID for source_file: {0}'.format(source_file)) self._authenticated_request \ .to_endpoint('file/{}/content/'.format(file_id)) \ .with_headers({'X-Copy-From': source_file}) \ .put()
892,385
Delete a file. Args: file_id (str): The UUID of the file to delete. Returns: None Raises: StorageArgumentException: Invalid arguments StorageForbiddenException: Server response code 403 StorageNotFoundException: Server response code 404 StorageException: other 400-600 error codes
def delete_file(self, file_id): if not is_valid_uuid(file_id): raise StorageArgumentException( 'Invalid UUID for file_id: {0}'.format(file_id)) self._authenticated_request \ .to_endpoint('file/{}/'.format(file_id)) \ .delete()
892,388
Find out equality of two SQL items. See https://docs.djangoproject.com/en/1.8/ref/migration-operations/#runsql. Args: sqls1, sqls2: SQL items, have the same format as supported by Django's RunSQL operation. Returns: (bool) `True` if equal, otherwise `False`.
def is_sql_equal(sqls1, sqls2): is_seq1 = isinstance(sqls1, (list, tuple)) is_seq2 = isinstance(sqls2, (list, tuple)) if not is_seq1: sqls1 = (sqls1,) if not is_seq2: sqls2 = (sqls2,) if len(sqls1) != len(sqls2): return False for sql1, sql2 in zip(sqls1, sqls2): sql1, params1 = _sql_params(sql1) sql2, params2 = _sql_params(sql2) if sql1 != sql2 or params1 != params2: return False return True
892,415
Generate forward operations for changing/creating SQL item dependencies. Dependencies are only in-memory and should be reflecting database dependencies, so changing them in SQL config does not alter database. Such actions are persisted in separate type operation - `AlterSQLState`. Args: dep_changed_keys (list): Data about keys, that have their dependencies changed. List of tuples (key, removed depndencies, added_dependencies).
def _generate_altered_sql_dependencies(self, dep_changed_keys): for key, removed_deps, added_deps in dep_changed_keys: app_label, sql_name = key operation = AlterSQLState(sql_name, add_dependencies=tuple(added_deps), remove_dependencies=tuple(removed_deps)) sql_deps = [key] self.add_sql_operation(app_label, sql_name, operation, sql_deps)
892,421
Consume and process arguments and store the result. ARGS: argv <list str>: The argument list to parse. usedname <str>: The string used by the user to invoke the option. location <str>: A user friendly sring describing where the parser got this data from.
def parse(self, argv, usedname, location): try: value = self.format.parse(argv) except formats.BadNumberOfArguments, e: raise BadNumberOfArguments(usedname, e.required, e.supplied) except formats.BadArgument, e: raise BadArgument(usedname, e.argument, e.message) if self.recurring: self.value.append(value) else: self.value = value self.location = location
892,460
Parse a string lexically and store the result. ARGS: argsstr <str>: The string to parse. usedname <str>: The string used by the user to invoke the option. location <str>: A user friendly sring describing where the parser got this data from.
def parsestr(self, argsstr, usedname, location): try: value = self.format.parsestr(argsstr) except formats.BadNumberOfArguments, e: raise BadNumberOfArguments(usedname, e.required, e.supplied) except formats.BadArgument, e: raise BadArgument(usedname, e.argument, e.message) if self.recurring: self.value.append(value) else: self.value = value self.location = location
892,461
Textwrap an indented paragraph. ARGS: width = 0 <int>: Maximum allowed page width. 0 means use default from self.iMaxHelpWidth.
def _wrap(self, text, indent=0, width=0): text = _list(text) if not width: width = self.width paragraph = text[0].lstrip() s = ' ' * (len(text[0]) - len(paragraph) + indent) wrapped = textwrap.wrap(paragraph.strip(), width, initial_indent=s, subsequent_indent=s) return '\n'.join(wrapped)
892,480
Textwrap usage instructions. ARGS: width = 0 <int>: Maximum allowed page width. 0 means use default from self.iMaxHelpWidth.
def _wrapusage(self, usage=None, width=0): if not width: width = self.width return textwrap.fill('USAGE: ' + self.format_usage(usage), width=width, subsequent_indent=' ...')
892,483
Return brief help containing Title and usage instructions. ARGS: width = 0 <int>: Maximum allowed page width. 0 means use default from self.iMaxHelpWidth.
def shorthelp(self, width=0): out = [] out.append(self._wrap(self.docs['title'], width=width)) if self.docs['description']: out.append(self._wrap(self.docs['description'], indent=2, width=width)) out.append('') out.append(self._wrapusage(width=width)) out.append('') return '\n'.join(out)
892,484
Get node corresponding to last location in a :class:`LocationDescriptor` object. Args: loc_descriptor: A :class:`LocationDescriptor` object create_non_existing_nodes (bool): Do we create non existing nodes along the way (including last node)? Raises: RuntimeError if a node along the path given in by the :class:`LocationDescriptor` object does not exist **if** ``create_non_existing_nodes`` is set to ``False``.
def _get_node(self, loc_descriptor, create_non_existing_nodes=False): node = self._root_node for location in loc_descriptor.generate_all_sub_locations(): child = node.get_child_node_or_default(location, None) if child is None: if not create_non_existing_nodes: raise RuntimeError("Node at location '%s' in '%s' does not exist!" % (location, loc_descriptor.to_string())) else: # create empty node child = TreeMapNode(None) node.set_child_node(location, child) self._nbr_of_nodes += 1 node = child return node
892,496
Sets the service name and version the request should target Args: service (str): The name of the service as displayed in the services.json file version (str): The version of the service as displayed in the services.json file Returns: The request builder instance in order to chain calls
def to_service(self, service, version): service_url = self._service_locator.get_service_url(service, version) return self.__copy_and_set('service_url', self.__strip_trailing_slashes(service_url))
892,932
Adds headers to the request Args: headers (dict): The headers to add the request headers Returns: The request builder instance in order to chain calls
def with_headers(self, headers): copy = headers.copy() copy.update(self._headers) return self.__copy_and_set('headers', copy)
892,933
Adds parameters to the request params Args: params (dict): The parameters to add to the request params Returns: The request builder instance in order to chain calls
def with_params(self, params): copy = params.copy() copy.update(self._params) return self.__copy_and_set('params', copy)
892,934
Defines if the an exception should be thrown after the request is sent Args: exception_class (class): The class of the exception to instantiate should_throw (function): The predicate that should indicate if the exception should be thrown. This function will be called with the response as a parameter Returns: The request builder instance in order to chain calls
def throw(self, exception_class, should_throw): return self.__copy_and_set('throws', self._throws + [(exception_class, should_throw)])
892,935
Initialization method. Args: build_tree (bool): whether to immediately build the tree or not.
def __init__(self, build_tree=True): self._target_cache = {} self._item_cache = {} self._contains_cache = {} self._matrix_cache = {} self._graph_cache = {} self._treemap_cache = None self.modules = [] self.packages = [] if build_tree: self.build_tree()
892,986
Get result of _contains, cache it and return it. Args: item (Package/Module): a package or module. Returns: bool: True if self contains item, False otherwise.
def __contains__(self, item): if item not in self._contains_cache: self._contains_cache[item] = self._contains(item) return self._contains_cache[item]
892,987
Return the corresponding Package or Module object. Args: item (str): name of the package/module, dot-separated. Returns: Package/Module: corresponding object.
def __getitem__(self, item): depth = item.count('.') + 1 parts = item.split('.', 1) for m in self.modules: if parts[0] == m.name: if depth == 1: return m for p in self.packages: if parts[0] == p.name: if depth == 1: return p item = p.get(parts[1]) if item: return item raise KeyError(item)
892,988
Whether given item is contained inside the node modules/packages. Args: item (Package/Module): a package or module. Returns: bool: True if self is item or item in self's packages/modules.
def _contains(self, item): if self is item: return True for m in self.modules: if item in m: return True for p in self.packages: if item in p: return True return False
892,990
Get item through ``__getitem__`` and cache the result. Args: item (str): name of package or module. Returns: Package/Module: the corresponding object.
def get(self, item): if item not in self._item_cache: try: item = self.__getitem__(item) except KeyError: item = None self._item_cache[item] = item return self._item_cache[item]
892,991
Get the result of _get_target, cache it and return it. Args: target (str): target to find. Returns: Package/Module: package containing target or corresponding module.
def get_target(self, target): if target not in self._target_cache: self._target_cache[target] = self._get_target(target) return self._target_cache[target]
892,992
Get the Package or Module related to given target. Args: target (str): target to find. Returns: Package/Module: package containing target or corresponding module.
def _get_target(self, target): depth = target.count('.') + 1 parts = target.split('.', 1) for m in self.modules: if parts[0] == m.name: if depth < 3: return m for p in self.packages: if parts[0] == p.name: if depth == 1: return p # pylama:ignore=W0212 target = p._get_target(parts[1]) if target: return target # FIXME: can lead to internal dep instead of external # see example with django.contrib.auth.forms # importing forms from django # Idea: when parsing files with ast, record what objects # are defined in the module. Then check here if the given # part is one of these objects. if depth < 3: return p return None
892,993
Print the graph for self's nodes. Args: format (str): output format (csv, json or text). output (file): file descriptor on which to write. depth (int): depth of the graph.
def print_graph(self, format=None, output=sys.stdout, depth=0, **kwargs): graph = self.as_graph(depth=depth) graph.print(format=format, output=output, **kwargs)
892,995
Print the matrix for self's nodes. Args: format (str): output format (csv, json or text). output (file): file descriptor on which to write. depth (int): depth of the matrix.
def print_matrix(self, format=None, output=sys.stdout, depth=0, **kwargs): matrix = self.as_matrix(depth=depth) matrix.print(format=format, output=output, **kwargs)
892,996
Print the matrix for self's nodes. Args: format (str): output format (csv, json or text). output (file): file descriptor on which to write.
def print_treemap(self, format=None, output=sys.stdout, **kwargs): treemap = self.as_treemap() treemap.print(format=format, output=output, **kwargs)
892,997
Create a graph with self as node, cache it, return it. Args: depth (int): depth of the graph. Returns: Graph: an instance of Graph.
def as_graph(self, depth=0): if depth in self._graph_cache: return self._graph_cache[depth] self._graph_cache[depth] = graph = Graph(self, depth=depth) return graph
893,001
Create a matrix with self as node, cache it, return it. Args: depth (int): depth of the matrix. Returns: Matrix: an instance of Matrix.
def as_matrix(self, depth=0): if depth in self._matrix_cache: return self._matrix_cache[depth] self._matrix_cache[depth] = matrix = Matrix(self, depth=depth) return matrix
893,002
Return the absolute name of the node. Concatenate names from root to self within depth. Args: depth (int): maximum depth to go to. Returns: str: absolute name of the node (until given depth is reached).
def absolute_name(self, depth=0): node, node_depth = self, self.depth if depth < 1: depth = node_depth while node_depth > depth and node.package is not None: node = node.package node_depth -= 1 names = [] while node is not None: names.append(node.name) node = node.package return '.'.join(reversed(names))
893,006
Returns the best **real** type for a **real** sum for a given type. For instance: INT32_t -> FLOAT64_t Args: cysparse_type:
def cysparse_type_to_real_sum_cysparse_type(cysparse_type): r_type = None if cysparse_type in ['INT32_t', 'UINT32_t', 'INT64_t', 'UINT64_t']: r_type = 'FLOAT64_t' elif cysparse_type in ['FLOAT32_t', 'FLOAT64_t']: r_type = 'FLOAT64_t' elif cysparse_type in ['FLOAT128_t']: r_type = 'FLOAT128_t' elif cysparse_type in ['COMPLEX64_t', 'COMPLEX128_t']: r_type = 'FLOAT64_t' elif cysparse_type in ['COMPLEX256_t']: r_type = 'FLOAT128_t' else: raise TypeError("Not a recognized type") assert r_type in ['FLOAT64_t', 'FLOAT128_t'] return r_type
893,069
Returns the **real** type for the real or imaginary part of a **real** complex type. For instance: COMPLEX128_t -> FLOAT64_t Args: cysparse:
def cysparse_real_type_from_real_cysparse_complex_type(cysparse_type): r_type = None if cysparse_type in ['COMPLEX64_t']: r_type = 'FLOAT32_t' elif cysparse_type in ['COMPLEX128_t']: r_type = 'FLOAT64_t' elif cysparse_type in ['COMPLEX256_t']: r_type = 'FLOAT128_t' else: raise TypeError("Not a recognized complex type") return r_type
893,070
Main function. This function is the command line entry point. Args: args (list of str): the arguments passed to the program. Returns: int: return code being 0 (OK), 1 (dsm empty) or 2 (error).
def main(args=None): parser = get_parser() args = parser.parse_args(args=args) if not (args.matrix or args.dependencies or args.treemap or args.graph): args.matrix = True # split comma-separated args packages = [] for arg in args.packages: if ',' in arg: for package in arg.split(','): if package not in packages: packages.append(package) elif arg not in packages: packages.append(arg) # guess convenient depth depth = args.depth if depth is None: depth = guess_depth(packages) # open file if not stdout output = args.output if isinstance(output, str): output = open(output, 'w') dsm = DSM(*packages, build_tree=True, build_dependencies=True, enforce_init=not args.greedy) if dsm.empty: return 1 indent = args.indent if indent is None: if args.format == CSV: indent = 0 else: indent = 2 elif indent < 0 and args.format == JSON: # special case for json.dumps indent argument indent = None try: if args.dependencies: dsm.print(format=args.format, output=output, indent=indent) elif args.matrix: dsm.print_matrix(format=args.format, output=output, depth=depth, indent=indent) elif args.treemap: dsm.print_treemap(format=args.format, output=output) elif args.graph: dsm.print_graph(format=args.format, output=output, depth=depth, indent=indent) except BrokenPipeError: # avoid traceback return 2 return 0
893,126
Outputs the XML content (string) into a file. If `output_loc` is supplied and it's a file (not directory), the output will be saved there and the `filename` is ignored. Args: xml_str: string with XML document output_loc: file or directory for saving the file filename: file name that will be used if `output_loc` is directory If it is needed and is not supplied, it will be generated
def write_xml(xml_str, output_loc=None, filename=None): if not xml_str: raise Dump2PolarionException("No data to write.") filename_fin = _get_filename(output_loc=output_loc, filename=filename) with io.open(filename_fin, "w", encoding="utf-8") as xml_file: xml_file.write(get_unicode_str(xml_str)) logger.info("Data written to '%s'", filename_fin)
893,417
Outputs the XML content (from XML element) into a file. If `output_loc` is supplied and it's a file (not directory), the output will be saved there and the `filename` is ignored. Args: xml_root: root element ot the XML document output_loc: file or directory for saving the file filename: file name that will be used if `output_loc` is directory If it is needed and is not supplied, it will be generated
def write_xml_root(xml_root, output_loc=None, filename=None): if xml_root is None: raise Dump2PolarionException("No data to write.") filename_fin = _get_filename(output_loc=output_loc, filename=filename) et = etree.ElementTree(xml_root) et.write(filename_fin, xml_declaration=True, pretty_print=True, encoding="utf-8") logger.info("Data written to '%s'", filename_fin)
893,418
Yield a list of files with their base directories, recursively or not. Returns: A list of (base_directory, filename) Args: directory: base directory to start the search. pattern: fnmatch pattern for filenames. complete_filename: return complete filename or not? recursively: do we recurse or not?
def find_files(directory, pattern, recursively=True): for root, dirs, files in os.walk(directory): for basename in files: if fnmatch.fnmatch(basename, pattern): yield root, basename if not recursively: break
893,606
Find user with given username. Args: username Username of the user to search for Raises: ldap_tools.exceptions.NoUserFound: No users returned by LDAP ldap_tools.exceptions.TooManyResults: Multiple users returned by LDAP
def find(self, username): filter = ['(uid={})'.format(username)] results = self.client.search(filter) if len(results) < 1: raise ldap_tools.exceptions.NoUserFound( 'User ({}) not found'.format(username)) return # pragma: no cover elif len(results) > 1: raise ldap_tools.exceptions.TooManyResults( 'Multiple users found. Please narrow your search.') return # pragma: no cover else: return results
893,800
Constructor. Args: locations: Can be either a string with sub-strings joined by the separation character or a list of strings, each giving a location. separation_char: Separation character in the location string. Raises: TypeError: if argument is not recognized as either a string, a list of strings or ``None``. Notes: Empty :class:`LocationDescriptor`s **are** allowed and empty locations are also allowed.
def __init__(self, locations=None, separation_char=os.sep): super(LocationDescriptor, self).__init__() self._separation_char = separation_char # type tests if isinstance(locations, list): self._locations_list = list(locations) elif isinstance(locations, str) or isinstance(locations, unicode): self._locations_list = locations.split(self._separation_char) elif locations is None: self._locations_list = list() else: raise TypeError("Argument in constructor not recognized.")
893,938
Return a given sub location, 0-based. Args: nbr: Returns:
def sub_location(self, nbr): assert nbr > -1, "Sub location number must be greater or equal to 0!" assert nbr < self.nbr_of_sub_locations() - 1, "Sub location number must be lower than %d!" % self.nbr_of_sub_locations() - 1 return self._locations_list[nbr]
893,939
Return the internal location list. Args: lower_bound: upper_bound: Returns:
def get_locations_list(self, lower_bound=0, upper_bound=None): real_upper_bound = upper_bound if upper_bound is None: real_upper_bound = self.nbr_of_sub_locations() try: return self._locations_list[lower_bound:real_upper_bound] except: return list()
893,940
Create a **new** :class:`LocationDescriptor` object that is the sum of this one and another. Args: self: This :class:`LocationDescriptor` object. other: Another :class:`LocationDescriptor` object. Returns: Sum of both :class:`LocationDescriptor` objects.
def __add__(self, other): # sanity tests assert isinstance(other, LocationDescriptor), "You can only add LocationDescriptor together." assert self._separation_char == other._separation_char, \ "You can only add LocationDescriptor together if they share the same separator character." new_location_string_list = self.get_locations_list() + other.get_locations_list() return LocationDescriptor(new_location_string_list)
893,941
**Extend** an existing :class:`LocationDescriptor` object by another. Args: self: This :class:`LocationDescriptor` object. other: Another :class:`LocationDescriptor` object. Returns: The updated :class:`LocationDescriptor` object itself.
def __iadd__(self, other): # sanity tests assert isinstance(other, LocationDescriptor), "You can only add LocationDescriptor together." assert self._separation_char == other._separation_char, \ "You can only add LocationDescriptor together if they share the same separator character." self._locations_list.extend(other._locations_list) return self
893,942
Detect if another object is equal to this :class:`LocationDescriptor` object. Args: other: object to test.
def __eq__(self, other): if not isinstance(other, LocationDescriptor): return False nbr_of_sub_locations = self.nbr_of_sub_locations() if nbr_of_sub_locations != other.nbr_of_sub_locations(): return False for i in range(nbr_of_sub_locations): if self._locations_list[i] != other._locations_list[i]: return False return True
893,943
String representation of :class:`LocationDescriptor` object. Args: other_separation_char: If needed, another separator character can be used. Returns:
def to_string(self, other_separation_char=None): separation_char = self._separation_char if other_separation_char is not None: separation_char = other_separation_char return separation_char.join(self._locations_list)
893,944
Serialize structured data into a stream in JSON, YAML, or LHA format. If stream is None, return the produced string instead. Parameters: - fmt: should be 'json' (default), 'yaml', or 'lha' - stream: if None, return string
def dump(d, fmt='json', stream=None): if fmt == 'json': return _dump_json(d, stream=stream) elif fmt == 'yaml': return yaml.dump(d, stream) elif fmt == 'lha': s = _dump_lha(d) if stream is None: return s else: return stream.write(s)
893,974
Calculates the fraction of the correctly classified samples over all. Parameters: ----------- y : vector, shape (n_samples,) The target labels. y_pred : vector, shape (n_samples,) The predicted labels. Returns: -------- accuracy : float number, the fraction of the correctly classified samples over all
def accuracy_score(y, y_pred): y, y_pred = convert_assert(y, y_pred) return np.count_nonzero(y == y_pred) / y.size
894,046
True-positives Parameters: ----------- y : vector, shape (n_samples,) The target labels. y_pred : vector, shape (n_samples,) The predicted labels. Returns: -------- tp : integer, the number of true-positives
def true_positives(y, y_pred): y, y_pred = convert_assert(y, y_pred) assert_binary_problem(y) return np.count_nonzero(y_pred[y == 1] == 1)
894,047
False-positives Parameters: ----------- y : vector, shape (n_samples,) The target labels. y_pred : vector, shape (n_samples,) The predicted labels. Returns: -------- fp : integer, the number of false-positives
def false_positives(y, y_pred): y, y_pred = convert_assert(y, y_pred) assert_binary_problem(y) return np.count_nonzero(y_pred[y == 0] == 1)
894,048
True-negatives Parameters: ----------- y : vector, shape (n_samples,) The target labels. y_pred : vector, shape (n_samples,) The predicted labels. Returns: -------- tn : integer, the number of true-negatives
def true_negatives(y, y_pred): y, y_pred = convert_assert(y, y_pred) assert_binary_problem(y) return np.count_nonzero(y_pred[y == 0] == 0)
894,049
False-negatives Parameters: ----------- y : vector, shape (n_samples,) The target labels. y_pred : vector, shape (n_samples,) The predicted labels. Returns: -------- fn : integer, the number of false-negatives
def false_negatives(y, y_pred): y, y_pred = convert_assert(y, y_pred) assert_binary_problem(y) return np.count_nonzero(y_pred[y == 1] == 0)
894,050
Precision score precision = true_positives / (true_positives + false_positives) Parameters: ----------- y : vector, shape (n_samples,) The target labels. y_pred : vector, shape (n_samples,) The predicted labels. Returns: -------- precision : float
def precision(y, y_pred): tp = true_positives(y, y_pred) fp = false_positives(y, y_pred) return tp / (tp + fp)
894,051
Recall score recall = true_positives / (true_positives + false_negatives) Parameters: ----------- y : vector, shape (n_samples,) The target labels. y_pred : vector, shape (n_samples,) The predicted labels. Returns: -------- recall : float
def recall(y, y_pred): tp = true_positives(y, y_pred) fn = false_negatives(y, y_pred) return tp / (tp + fn)
894,052
F1 score f1_score = 2 * precision*recall / (precision + recall) Parameters: ----------- y : vector, shape (n_samples,) The target labels. y_pred : vector, shape (n_samples,) The predicted labels. Returns: -------- f1_score : float
def f1_score(y, y_pred): p = precision(y, y_pred) r = recall(y, y_pred) return 2*p*r / (p+r)
894,053
Extract request timeout from e.g. ``Retry-After`` header. Notes: Per :rfc:`2616#section-14.37`, the ``Retry-After`` header can be either an integer number of seconds or an HTTP date. This function can handle either. Arguments: http_date (:py:class:`str`): The date to parse. Returns: :py:class:`int`: The timeout, in seconds.
def calculate_timeout(http_date): try: return int(http_date) except ValueError: date_after = parse(http_date) utc_now = datetime.now(tz=timezone.utc) return int((date_after - utc_now).total_seconds())
894,398
Node constructor Args: job: parents: children: ...
def __init__(self, job, parents=None, children=None, pre_script=None, pre_script_args=None, post_script=None, post_script_args=None, variables=None, # VARS JobName macroname="string" [macroname="string"... ] priority=None, # PRIORITY JobName PriorityValue category=None, # CATEGORY JobName CategoryName retry=None, # JobName NumberOfRetries [UNLESS-EXIT value] retry_unless_exit_value=None, pre_skip=None, # JobName non-zero-exit-code abort_dag_on=None, # JobName AbortExitValue [RETURN DAGReturnValue] abort_dag_on_return_value=None, dir=None, noop=None, done=None, ): self.job = job self._parent_nodes = parents or set() self._link_parent_nodes() self._child_nodes = children or set() self._link_child_nodes() self.pre_script = pre_script self.pre_script_args = pre_script_args self.post_script = post_script self.post_script_args = post_script_args self.vars = variables or dict() self.priority = priority self.category = category self.retry = retry self.retry_unless_exit_value = retry_unless_exit_value self.pre_skip = pre_skip self.abort_dag_on = abort_dag_on self.abort_dag_on_return_value = abort_dag_on_return_value self.dir = dir self.noop = noop self.done = done
894,412
Calculates the mean samples per class Parameters: ----------- X : array-like, shape (m, n) - the samples y : array-like, shape (m, ) - the class labels Returns: -------- mean_vectors : array-like, shape (k, ) Those are the mean samples from each k classes.
def calculate_mean_vectors(X, y): return [np.mean(X[y == cl, :], axis=0) for cl in np.unique(y)]
894,465
Calculates the Within-Class Scatter matrix Parameters: ----------- X : array-like, shape (m, n) - the samples y : array-like, shape (m, ) - the class labels Returns: -------- within_class_scatter_matrix : array-like, shape (n, n)
def calculate_within_class_scatter_matrix(X, y): mean_vectors = calculate_mean_vectors(X, y) n_features = X.shape[1] Sw = np.zeros((n_features, n_features)) for cl, m in zip(np.unique(y), mean_vectors): Si = np.zeros((n_features, n_features)) m = m.reshape(n_features, 1) for x in X[y == cl, :]: v = x.reshape(n_features, 1) - m Si += v @ v.T Sw += Si return Sw
894,466
Calculates the Between-Class Scatter matrix Parameters: ----------- X : array-like, shape (m, n) - the samples y : array-like, shape (m, ) - the class labels Returns: -------- between_class_scatter_matrix : array-like, shape (n, n)
def calculate_between_class_scatter_matrix(X, y): mean_vectors = calculate_mean_vectors(X, y) n_features = X.shape[1] Sb = np.zeros((n_features, n_features)) m = np.mean(X, axis=0).reshape(n_features, 1) for cl, m_i in zip(np.unique(y), mean_vectors): v = m_i.reshape(n_features, 1) - m Sb += X[y == cl, :].shape[0] * v @ v.T return Sb
894,467
Calculates the Variance-Covariance matrix Parameters: ----------- X : array-like, shape (m, n) - the data Returns: -------- variance_covariance_matrix : array-like, shape(n, n)
def calculate_covariance_matrix(X): n_features = X.shape[1] S = np.zeros((n_features, n_features)) m = np.mean(X, axis=0).reshape(n_features, 1) for x in X: v = x.reshape(n_features, 1) - m S += v @ v.T return 1/(X.shape[0]-1) * S
894,468
Returns the specified parameters for the current preprocessor. Parameters: ----------- keys : variable sized list, containing the names of the requested parameters Returns: -------- values : list or dictionary, if any `keys` are specified those named parameters' values are returned, otherwise all parameters are returned as a dictionary
def get_params(self, *keys): if len(keys) == 0: return vars(self) else: return [vars(self)[k] for k in keys]
894,494
Sets new values to the specified parameters. Parameters: ----------- params : variable sized dictionary, n key-word arguments Example: ``` scaler.set_params(std=0.30) ``` Returns: -------- void : void, returns nothing
def set_params(self, **params): for k, v in params.items(): vars(self)[k] = v
894,495
This function can be used to look up the EPSG spatial reference system using the web service available at: http://prj2epsg.org Args: wellKnownText (str): The Well Known Text definition of the spatial reference system. Returns: int: Spatial Reference ID
def lookupSpatialReferenceID(wellKnownText): payload = {'mode': 'wkt', 'terms': wellKnownText} try: r = requests.get('http://prj2epsg.org/search.json', params=payload) except requests.exceptions.ConnectionError: print("SRID Lookup Error: Could not automatically determine spatial " "reference ID, because there is no internet connection. " "Please check connection and try again.") exit(1) if r.status_code == 200: json = r.json() for code in json['codes']: return code['code']
894,596