id
int32
0
252k
repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
51
19.8k
code_tokens
sequence
docstring
stringlengths
3
17.3k
docstring_tokens
sequence
sha
stringlengths
40
40
url
stringlengths
87
242
247,500
PyFilesystem/pyfilesystem2
fs/walk.py
Walker._check_scan_dir
def _check_scan_dir(self, fs, path, info, depth): # type: (FS, Text, Info, int) -> bool """Check if a directory contents should be scanned.""" if self.max_depth is not None and depth >= self.max_depth: return False return self.check_scan_dir(fs, path, info)
python
def _check_scan_dir(self, fs, path, info, depth): # type: (FS, Text, Info, int) -> bool if self.max_depth is not None and depth >= self.max_depth: return False return self.check_scan_dir(fs, path, info)
[ "def", "_check_scan_dir", "(", "self", ",", "fs", ",", "path", ",", "info", ",", "depth", ")", ":", "# type: (FS, Text, Info, int) -> bool", "if", "self", ".", "max_depth", "is", "not", "None", "and", "depth", ">=", "self", ".", "max_depth", ":", "return", "False", "return", "self", ".", "check_scan_dir", "(", "fs", ",", "path", ",", "info", ")" ]
Check if a directory contents should be scanned.
[ "Check", "if", "a", "directory", "contents", "should", "be", "scanned", "." ]
047f3593f297d1442194cda3da7a7335bcc9c14a
https://github.com/PyFilesystem/pyfilesystem2/blob/047f3593f297d1442194cda3da7a7335bcc9c14a/fs/walk.py#L228-L233
247,501
PyFilesystem/pyfilesystem2
fs/walk.py
Walker.check_file
def check_file(self, fs, info): # type: (FS, Info) -> bool """Check if a filename should be included. Override to exclude files from the walk. Arguments: fs (FS): A filesystem instance. info (Info): A resource info object. Returns: bool: `True` if the file should be included. """ if self.exclude is not None and fs.match(self.exclude, info.name): return False return fs.match(self.filter, info.name)
python
def check_file(self, fs, info): # type: (FS, Info) -> bool if self.exclude is not None and fs.match(self.exclude, info.name): return False return fs.match(self.filter, info.name)
[ "def", "check_file", "(", "self", ",", "fs", ",", "info", ")", ":", "# type: (FS, Info) -> bool", "if", "self", ".", "exclude", "is", "not", "None", "and", "fs", ".", "match", "(", "self", ".", "exclude", ",", "info", ".", "name", ")", ":", "return", "False", "return", "fs", ".", "match", "(", "self", ".", "filter", ",", "info", ".", "name", ")" ]
Check if a filename should be included. Override to exclude files from the walk. Arguments: fs (FS): A filesystem instance. info (Info): A resource info object. Returns: bool: `True` if the file should be included.
[ "Check", "if", "a", "filename", "should", "be", "included", "." ]
047f3593f297d1442194cda3da7a7335bcc9c14a
https://github.com/PyFilesystem/pyfilesystem2/blob/047f3593f297d1442194cda3da7a7335bcc9c14a/fs/walk.py#L254-L271
247,502
PyFilesystem/pyfilesystem2
fs/walk.py
Walker._scan
def _scan( self, fs, # type: FS dir_path, # type: Text namespaces=None, # type: Optional[Collection[Text]] ): # type: (...) -> Iterator[Info] """Get an iterator of `Info` objects for a directory path. Arguments: fs (FS): A filesystem instance. dir_path (str): A path to a directory on the filesystem. namespaces (list): A list of additional namespaces to include in the `Info` objects. Returns: ~collections.Iterator: iterator of `Info` objects for resources within the given path. """ try: for info in fs.scandir(dir_path, namespaces=namespaces): yield info except FSError as error: if not self.on_error(dir_path, error): six.reraise(type(error), error)
python
def _scan( self, fs, # type: FS dir_path, # type: Text namespaces=None, # type: Optional[Collection[Text]] ): # type: (...) -> Iterator[Info] try: for info in fs.scandir(dir_path, namespaces=namespaces): yield info except FSError as error: if not self.on_error(dir_path, error): six.reraise(type(error), error)
[ "def", "_scan", "(", "self", ",", "fs", ",", "# type: FS", "dir_path", ",", "# type: Text", "namespaces", "=", "None", ",", "# type: Optional[Collection[Text]]", ")", ":", "# type: (...) -> Iterator[Info]", "try", ":", "for", "info", "in", "fs", ".", "scandir", "(", "dir_path", ",", "namespaces", "=", "namespaces", ")", ":", "yield", "info", "except", "FSError", "as", "error", ":", "if", "not", "self", ".", "on_error", "(", "dir_path", ",", "error", ")", ":", "six", ".", "reraise", "(", "type", "(", "error", ")", ",", "error", ")" ]
Get an iterator of `Info` objects for a directory path. Arguments: fs (FS): A filesystem instance. dir_path (str): A path to a directory on the filesystem. namespaces (list): A list of additional namespaces to include in the `Info` objects. Returns: ~collections.Iterator: iterator of `Info` objects for resources within the given path.
[ "Get", "an", "iterator", "of", "Info", "objects", "for", "a", "directory", "path", "." ]
047f3593f297d1442194cda3da7a7335bcc9c14a
https://github.com/PyFilesystem/pyfilesystem2/blob/047f3593f297d1442194cda3da7a7335bcc9c14a/fs/walk.py#L273-L298
247,503
PyFilesystem/pyfilesystem2
fs/walk.py
BoundWalker._make_walker
def _make_walker(self, *args, **kwargs): # type: (*Any, **Any) -> Walker """Create a walker instance. """ walker = self.walker_class(*args, **kwargs) return walker
python
def _make_walker(self, *args, **kwargs): # type: (*Any, **Any) -> Walker walker = self.walker_class(*args, **kwargs) return walker
[ "def", "_make_walker", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "# type: (*Any, **Any) -> Walker", "walker", "=", "self", ".", "walker_class", "(", "*", "args", ",", "*", "*", "kwargs", ")", "return", "walker" ]
Create a walker instance.
[ "Create", "a", "walker", "instance", "." ]
047f3593f297d1442194cda3da7a7335bcc9c14a
https://github.com/PyFilesystem/pyfilesystem2/blob/047f3593f297d1442194cda3da7a7335bcc9c14a/fs/walk.py#L529-L534
247,504
PyFilesystem/pyfilesystem2
fs/walk.py
BoundWalker.dirs
def dirs(self, path="/", **kwargs): # type: (Text, **Any) -> Iterator[Text] """Walk a filesystem, yielding absolute paths to directories. Arguments: path (str): A path to a directory. Keyword Arguments: ignore_errors (bool): If `True`, any errors reading a directory will be ignored, otherwise exceptions will be raised. on_error (callable): If ``ignore_errors`` is `False`, then this callable will be invoked with a path and the exception object. It should return `True` to ignore the error, or `False` to re-raise it. search (str): If ``'breadth'`` then the directory will be walked *top down*. Set to ``'depth'`` to walk *bottom up*. filter_dirs (list, optional): A list of patterns that will be used to match directories paths. The walk will only open directories that match at least one of these patterns. exclude_dirs (list): A list of patterns that will be used to filter out directories from the walk, e.g. ``['*.svn', '*.git']``. max_depth (int, optional): Maximum directory depth to walk. Returns: ~collections.Iterator: an iterator over directory paths (absolute from the filesystem root). This method invokes `Walker.dirs` with the bound `FS` object. """ walker = self._make_walker(**kwargs) return walker.dirs(self.fs, path=path)
python
def dirs(self, path="/", **kwargs): # type: (Text, **Any) -> Iterator[Text] walker = self._make_walker(**kwargs) return walker.dirs(self.fs, path=path)
[ "def", "dirs", "(", "self", ",", "path", "=", "\"/\"", ",", "*", "*", "kwargs", ")", ":", "# type: (Text, **Any) -> Iterator[Text]", "walker", "=", "self", ".", "_make_walker", "(", "*", "*", "kwargs", ")", "return", "walker", ".", "dirs", "(", "self", ".", "fs", ",", "path", "=", "path", ")" ]
Walk a filesystem, yielding absolute paths to directories. Arguments: path (str): A path to a directory. Keyword Arguments: ignore_errors (bool): If `True`, any errors reading a directory will be ignored, otherwise exceptions will be raised. on_error (callable): If ``ignore_errors`` is `False`, then this callable will be invoked with a path and the exception object. It should return `True` to ignore the error, or `False` to re-raise it. search (str): If ``'breadth'`` then the directory will be walked *top down*. Set to ``'depth'`` to walk *bottom up*. filter_dirs (list, optional): A list of patterns that will be used to match directories paths. The walk will only open directories that match at least one of these patterns. exclude_dirs (list): A list of patterns that will be used to filter out directories from the walk, e.g. ``['*.svn', '*.git']``. max_depth (int, optional): Maximum directory depth to walk. Returns: ~collections.Iterator: an iterator over directory paths (absolute from the filesystem root). This method invokes `Walker.dirs` with the bound `FS` object.
[ "Walk", "a", "filesystem", "yielding", "absolute", "paths", "to", "directories", "." ]
047f3593f297d1442194cda3da7a7335bcc9c14a
https://github.com/PyFilesystem/pyfilesystem2/blob/047f3593f297d1442194cda3da7a7335bcc9c14a/fs/walk.py#L641-L674
247,505
PyFilesystem/pyfilesystem2
fs/walk.py
BoundWalker.info
def info( self, path="/", # type: Text namespaces=None, # type: Optional[Collection[Text]] **kwargs # type: Any ): # type: (...) -> Iterator[Tuple[Text, Info]] """Walk a filesystem, yielding path and `Info` of resources. Arguments: path (str): A path to a directory. namespaces (list, optional): A list of namespaces to include in the resource information, e.g. ``['basic', 'access']`` (defaults to ``['basic']``). Keyword Arguments: ignore_errors (bool): If `True`, any errors reading a directory will be ignored, otherwise exceptions will be raised. on_error (callable): If ``ignore_errors`` is `False`, then this callable will be invoked with a path and the exception object. It should return `True` to ignore the error, or `False` to re-raise it. search (str): If ``'breadth'`` then the directory will be walked *top down*. Set to ``'depth'`` to walk *bottom up*. filter (list): If supplied, this parameter should be a list of file name patterns, e.g. ``['*.py']``. Files will only be returned if the final component matches one of the patterns. exclude (list, optional): If supplied, this parameter should be a list of filename patterns, e.g. ``['~*', '.*']``. Files matching any of these patterns will be removed from the walk. filter_dirs (list, optional): A list of patterns that will be used to match directories paths. The walk will only open directories that match at least one of these patterns. exclude_dirs (list): A list of patterns that will be used to filter out directories from the walk, e.g. ``['*.svn', '*.git']``. max_depth (int, optional): Maximum directory depth to walk. Returns: ~collections.Iterable: an iterable yielding tuples of ``(<absolute path>, <resource info>)``. This method invokes `Walker.info` with the bound `FS` object. """ walker = self._make_walker(**kwargs) return walker.info(self.fs, path=path, namespaces=namespaces)
python
def info( self, path="/", # type: Text namespaces=None, # type: Optional[Collection[Text]] **kwargs # type: Any ): # type: (...) -> Iterator[Tuple[Text, Info]] walker = self._make_walker(**kwargs) return walker.info(self.fs, path=path, namespaces=namespaces)
[ "def", "info", "(", "self", ",", "path", "=", "\"/\"", ",", "# type: Text", "namespaces", "=", "None", ",", "# type: Optional[Collection[Text]]", "*", "*", "kwargs", "# type: Any", ")", ":", "# type: (...) -> Iterator[Tuple[Text, Info]]", "walker", "=", "self", ".", "_make_walker", "(", "*", "*", "kwargs", ")", "return", "walker", ".", "info", "(", "self", ".", "fs", ",", "path", "=", "path", ",", "namespaces", "=", "namespaces", ")" ]
Walk a filesystem, yielding path and `Info` of resources. Arguments: path (str): A path to a directory. namespaces (list, optional): A list of namespaces to include in the resource information, e.g. ``['basic', 'access']`` (defaults to ``['basic']``). Keyword Arguments: ignore_errors (bool): If `True`, any errors reading a directory will be ignored, otherwise exceptions will be raised. on_error (callable): If ``ignore_errors`` is `False`, then this callable will be invoked with a path and the exception object. It should return `True` to ignore the error, or `False` to re-raise it. search (str): If ``'breadth'`` then the directory will be walked *top down*. Set to ``'depth'`` to walk *bottom up*. filter (list): If supplied, this parameter should be a list of file name patterns, e.g. ``['*.py']``. Files will only be returned if the final component matches one of the patterns. exclude (list, optional): If supplied, this parameter should be a list of filename patterns, e.g. ``['~*', '.*']``. Files matching any of these patterns will be removed from the walk. filter_dirs (list, optional): A list of patterns that will be used to match directories paths. The walk will only open directories that match at least one of these patterns. exclude_dirs (list): A list of patterns that will be used to filter out directories from the walk, e.g. ``['*.svn', '*.git']``. max_depth (int, optional): Maximum directory depth to walk. Returns: ~collections.Iterable: an iterable yielding tuples of ``(<absolute path>, <resource info>)``. This method invokes `Walker.info` with the bound `FS` object.
[ "Walk", "a", "filesystem", "yielding", "path", "and", "Info", "of", "resources", "." ]
047f3593f297d1442194cda3da7a7335bcc9c14a
https://github.com/PyFilesystem/pyfilesystem2/blob/047f3593f297d1442194cda3da7a7335bcc9c14a/fs/walk.py#L676-L724
247,506
PyFilesystem/pyfilesystem2
fs/tools.py
remove_empty
def remove_empty(fs, path): # type: (FS, Text) -> None """Remove all empty parents. Arguments: fs (FS): A filesystem instance. path (str): Path to a directory on the filesystem. """ path = abspath(normpath(path)) try: while path not in ("", "/"): fs.removedir(path) path = dirname(path) except DirectoryNotEmpty: pass
python
def remove_empty(fs, path): # type: (FS, Text) -> None path = abspath(normpath(path)) try: while path not in ("", "/"): fs.removedir(path) path = dirname(path) except DirectoryNotEmpty: pass
[ "def", "remove_empty", "(", "fs", ",", "path", ")", ":", "# type: (FS, Text) -> None", "path", "=", "abspath", "(", "normpath", "(", "path", ")", ")", "try", ":", "while", "path", "not", "in", "(", "\"\"", ",", "\"/\"", ")", ":", "fs", ".", "removedir", "(", "path", ")", "path", "=", "dirname", "(", "path", ")", "except", "DirectoryNotEmpty", ":", "pass" ]
Remove all empty parents. Arguments: fs (FS): A filesystem instance. path (str): Path to a directory on the filesystem.
[ "Remove", "all", "empty", "parents", "." ]
047f3593f297d1442194cda3da7a7335bcc9c14a
https://github.com/PyFilesystem/pyfilesystem2/blob/047f3593f297d1442194cda3da7a7335bcc9c14a/fs/tools.py#L23-L38
247,507
PyFilesystem/pyfilesystem2
fs/tools.py
copy_file_data
def copy_file_data(src_file, dst_file, chunk_size=None): # type: (IO, IO, Optional[int]) -> None """Copy data from one file object to another. Arguments: src_file (io.IOBase): File open for reading. dst_file (io.IOBase): File open for writing. chunk_size (int): Number of bytes to copy at a time (or `None` to use sensible default). """ _chunk_size = 1024 * 1024 if chunk_size is None else chunk_size read = src_file.read write = dst_file.write # The 'or None' is so that it works with binary and text files for chunk in iter(lambda: read(_chunk_size) or None, None): write(chunk)
python
def copy_file_data(src_file, dst_file, chunk_size=None): # type: (IO, IO, Optional[int]) -> None _chunk_size = 1024 * 1024 if chunk_size is None else chunk_size read = src_file.read write = dst_file.write # The 'or None' is so that it works with binary and text files for chunk in iter(lambda: read(_chunk_size) or None, None): write(chunk)
[ "def", "copy_file_data", "(", "src_file", ",", "dst_file", ",", "chunk_size", "=", "None", ")", ":", "# type: (IO, IO, Optional[int]) -> None", "_chunk_size", "=", "1024", "*", "1024", "if", "chunk_size", "is", "None", "else", "chunk_size", "read", "=", "src_file", ".", "read", "write", "=", "dst_file", ".", "write", "# The 'or None' is so that it works with binary and text files", "for", "chunk", "in", "iter", "(", "lambda", ":", "read", "(", "_chunk_size", ")", "or", "None", ",", "None", ")", ":", "write", "(", "chunk", ")" ]
Copy data from one file object to another. Arguments: src_file (io.IOBase): File open for reading. dst_file (io.IOBase): File open for writing. chunk_size (int): Number of bytes to copy at a time (or `None` to use sensible default).
[ "Copy", "data", "from", "one", "file", "object", "to", "another", "." ]
047f3593f297d1442194cda3da7a7335bcc9c14a
https://github.com/PyFilesystem/pyfilesystem2/blob/047f3593f297d1442194cda3da7a7335bcc9c14a/fs/tools.py#L41-L57
247,508
PyFilesystem/pyfilesystem2
fs/tools.py
get_intermediate_dirs
def get_intermediate_dirs(fs, dir_path): # type: (FS, Text) -> List[Text] """Get a list of non-existing intermediate directories. Arguments: fs (FS): A filesystem instance. dir_path (str): A path to a new directory on the filesystem. Returns: list: A list of non-existing paths. Raises: ~fs.errors.DirectoryExpected: If a path component references a file and not a directory. """ intermediates = [] with fs.lock(): for path in recursepath(abspath(dir_path), reverse=True): try: resource = fs.getinfo(path) except ResourceNotFound: intermediates.append(abspath(path)) else: if resource.is_dir: break raise errors.DirectoryExpected(dir_path) return intermediates[::-1][:-1]
python
def get_intermediate_dirs(fs, dir_path): # type: (FS, Text) -> List[Text] intermediates = [] with fs.lock(): for path in recursepath(abspath(dir_path), reverse=True): try: resource = fs.getinfo(path) except ResourceNotFound: intermediates.append(abspath(path)) else: if resource.is_dir: break raise errors.DirectoryExpected(dir_path) return intermediates[::-1][:-1]
[ "def", "get_intermediate_dirs", "(", "fs", ",", "dir_path", ")", ":", "# type: (FS, Text) -> List[Text]", "intermediates", "=", "[", "]", "with", "fs", ".", "lock", "(", ")", ":", "for", "path", "in", "recursepath", "(", "abspath", "(", "dir_path", ")", ",", "reverse", "=", "True", ")", ":", "try", ":", "resource", "=", "fs", ".", "getinfo", "(", "path", ")", "except", "ResourceNotFound", ":", "intermediates", ".", "append", "(", "abspath", "(", "path", ")", ")", "else", ":", "if", "resource", ".", "is_dir", ":", "break", "raise", "errors", ".", "DirectoryExpected", "(", "dir_path", ")", "return", "intermediates", "[", ":", ":", "-", "1", "]", "[", ":", "-", "1", "]" ]
Get a list of non-existing intermediate directories. Arguments: fs (FS): A filesystem instance. dir_path (str): A path to a new directory on the filesystem. Returns: list: A list of non-existing paths. Raises: ~fs.errors.DirectoryExpected: If a path component references a file and not a directory.
[ "Get", "a", "list", "of", "non", "-", "existing", "intermediate", "directories", "." ]
047f3593f297d1442194cda3da7a7335bcc9c14a
https://github.com/PyFilesystem/pyfilesystem2/blob/047f3593f297d1442194cda3da7a7335bcc9c14a/fs/tools.py#L60-L87
247,509
soynatan/django-easy-audit
easyaudit/admin_helpers.py
prettify_json
def prettify_json(json_string): """Given a JSON string, it returns it as a safe formatted HTML""" try: data = json.loads(json_string) html = '<pre>' + json.dumps(data, sort_keys=True, indent=4) + '</pre>' except: html = json_string return mark_safe(html)
python
def prettify_json(json_string): try: data = json.loads(json_string) html = '<pre>' + json.dumps(data, sort_keys=True, indent=4) + '</pre>' except: html = json_string return mark_safe(html)
[ "def", "prettify_json", "(", "json_string", ")", ":", "try", ":", "data", "=", "json", ".", "loads", "(", "json_string", ")", "html", "=", "'<pre>'", "+", "json", ".", "dumps", "(", "data", ",", "sort_keys", "=", "True", ",", "indent", "=", "4", ")", "+", "'</pre>'", "except", ":", "html", "=", "json_string", "return", "mark_safe", "(", "html", ")" ]
Given a JSON string, it returns it as a safe formatted HTML
[ "Given", "a", "JSON", "string", "it", "returns", "it", "as", "a", "safe", "formatted", "HTML" ]
03e05bc94beb29fc3e4ff86e313a6fef4b766b4b
https://github.com/soynatan/django-easy-audit/blob/03e05bc94beb29fc3e4ff86e313a6fef4b766b4b/easyaudit/admin_helpers.py#L21-L29
247,510
soynatan/django-easy-audit
easyaudit/admin_helpers.py
EasyAuditModelAdmin.purge_objects
def purge_objects(self, request): """ Removes all objects in this table. This action first displays a confirmation page; next, it deletes all objects and redirects back to the change list. """ def truncate_table(model): if settings.TRUNCATE_TABLE_SQL_STATEMENT: from django.db import connection sql = settings.TRUNCATE_TABLE_SQL_STATEMENT.format(db_table=model._meta.db_table) cursor = connection.cursor() cursor.execute(sql) else: model.objects.all().delete() modeladmin = self opts = modeladmin.model._meta # Check that the user has delete permission for the actual model if not request.user.is_superuser: raise PermissionDenied if not modeladmin.has_delete_permission(request): raise PermissionDenied # If the user has already confirmed or cancelled the deletion, # (eventually) do the deletion and return to the change list view again. if request.method == 'POST': if 'btn-confirm' in request.POST: try: n = modeladmin.model.objects.count() truncate_table(modeladmin.model) modeladmin.message_user(request, _("Successfully removed %d rows" % n), messages.SUCCESS); except Exception as e: modeladmin.message_user(request, _(u'ERROR') + ': %r' % e, messages.ERROR) else: modeladmin.message_user(request, _("Action cancelled by user"), messages.SUCCESS); return HttpResponseRedirect(reverse('admin:%s_%s_changelist' % (opts.app_label, opts.model_name))) context = { "title": _("Purge all %s ... are you sure?") % opts.verbose_name_plural, "opts": opts, "app_label": opts.app_label, } # Display the confirmation page return render( request, 'admin/easyaudit/purge_confirmation.html', context )
python
def purge_objects(self, request): def truncate_table(model): if settings.TRUNCATE_TABLE_SQL_STATEMENT: from django.db import connection sql = settings.TRUNCATE_TABLE_SQL_STATEMENT.format(db_table=model._meta.db_table) cursor = connection.cursor() cursor.execute(sql) else: model.objects.all().delete() modeladmin = self opts = modeladmin.model._meta # Check that the user has delete permission for the actual model if not request.user.is_superuser: raise PermissionDenied if not modeladmin.has_delete_permission(request): raise PermissionDenied # If the user has already confirmed or cancelled the deletion, # (eventually) do the deletion and return to the change list view again. if request.method == 'POST': if 'btn-confirm' in request.POST: try: n = modeladmin.model.objects.count() truncate_table(modeladmin.model) modeladmin.message_user(request, _("Successfully removed %d rows" % n), messages.SUCCESS); except Exception as e: modeladmin.message_user(request, _(u'ERROR') + ': %r' % e, messages.ERROR) else: modeladmin.message_user(request, _("Action cancelled by user"), messages.SUCCESS); return HttpResponseRedirect(reverse('admin:%s_%s_changelist' % (opts.app_label, opts.model_name))) context = { "title": _("Purge all %s ... are you sure?") % opts.verbose_name_plural, "opts": opts, "app_label": opts.app_label, } # Display the confirmation page return render( request, 'admin/easyaudit/purge_confirmation.html', context )
[ "def", "purge_objects", "(", "self", ",", "request", ")", ":", "def", "truncate_table", "(", "model", ")", ":", "if", "settings", ".", "TRUNCATE_TABLE_SQL_STATEMENT", ":", "from", "django", ".", "db", "import", "connection", "sql", "=", "settings", ".", "TRUNCATE_TABLE_SQL_STATEMENT", ".", "format", "(", "db_table", "=", "model", ".", "_meta", ".", "db_table", ")", "cursor", "=", "connection", ".", "cursor", "(", ")", "cursor", ".", "execute", "(", "sql", ")", "else", ":", "model", ".", "objects", ".", "all", "(", ")", ".", "delete", "(", ")", "modeladmin", "=", "self", "opts", "=", "modeladmin", ".", "model", ".", "_meta", "# Check that the user has delete permission for the actual model", "if", "not", "request", ".", "user", ".", "is_superuser", ":", "raise", "PermissionDenied", "if", "not", "modeladmin", ".", "has_delete_permission", "(", "request", ")", ":", "raise", "PermissionDenied", "# If the user has already confirmed or cancelled the deletion,", "# (eventually) do the deletion and return to the change list view again.", "if", "request", ".", "method", "==", "'POST'", ":", "if", "'btn-confirm'", "in", "request", ".", "POST", ":", "try", ":", "n", "=", "modeladmin", ".", "model", ".", "objects", ".", "count", "(", ")", "truncate_table", "(", "modeladmin", ".", "model", ")", "modeladmin", ".", "message_user", "(", "request", ",", "_", "(", "\"Successfully removed %d rows\"", "%", "n", ")", ",", "messages", ".", "SUCCESS", ")", "except", "Exception", "as", "e", ":", "modeladmin", ".", "message_user", "(", "request", ",", "_", "(", "u'ERROR'", ")", "+", "': %r'", "%", "e", ",", "messages", ".", "ERROR", ")", "else", ":", "modeladmin", ".", "message_user", "(", "request", ",", "_", "(", "\"Action cancelled by user\"", ")", ",", "messages", ".", "SUCCESS", ")", "return", "HttpResponseRedirect", "(", "reverse", "(", "'admin:%s_%s_changelist'", "%", "(", "opts", ".", "app_label", ",", "opts", ".", "model_name", ")", ")", ")", "context", "=", "{", "\"title\"", ":", "_", "(", "\"Purge all %s ... are you sure?\"", ")", "%", "opts", ".", "verbose_name_plural", ",", "\"opts\"", ":", "opts", ",", "\"app_label\"", ":", "opts", ".", "app_label", ",", "}", "# Display the confirmation page", "return", "render", "(", "request", ",", "'admin/easyaudit/purge_confirmation.html'", ",", "context", ")" ]
Removes all objects in this table. This action first displays a confirmation page; next, it deletes all objects and redirects back to the change list.
[ "Removes", "all", "objects", "in", "this", "table", ".", "This", "action", "first", "displays", "a", "confirmation", "page", ";", "next", "it", "deletes", "all", "objects", "and", "redirects", "back", "to", "the", "change", "list", "." ]
03e05bc94beb29fc3e4ff86e313a6fef4b766b4b
https://github.com/soynatan/django-easy-audit/blob/03e05bc94beb29fc3e4ff86e313a6fef4b766b4b/easyaudit/admin_helpers.py#L66-L116
247,511
soynatan/django-easy-audit
easyaudit/settings.py
get_model_list
def get_model_list(class_list): """ Receives a list of strings with app_name.model_name format and turns them into classes. If an item is already a class it ignores it. """ for idx, item in enumerate(class_list): if isinstance(item, six.string_types): model_class = apps.get_model(item) class_list[idx] = model_class
python
def get_model_list(class_list): for idx, item in enumerate(class_list): if isinstance(item, six.string_types): model_class = apps.get_model(item) class_list[idx] = model_class
[ "def", "get_model_list", "(", "class_list", ")", ":", "for", "idx", ",", "item", "in", "enumerate", "(", "class_list", ")", ":", "if", "isinstance", "(", "item", ",", "six", ".", "string_types", ")", ":", "model_class", "=", "apps", ".", "get_model", "(", "item", ")", "class_list", "[", "idx", "]", "=", "model_class" ]
Receives a list of strings with app_name.model_name format and turns them into classes. If an item is already a class it ignores it.
[ "Receives", "a", "list", "of", "strings", "with", "app_name", ".", "model_name", "format", "and", "turns", "them", "into", "classes", ".", "If", "an", "item", "is", "already", "a", "class", "it", "ignores", "it", "." ]
03e05bc94beb29fc3e4ff86e313a6fef4b766b4b
https://github.com/soynatan/django-easy-audit/blob/03e05bc94beb29fc3e4ff86e313a6fef4b766b4b/easyaudit/settings.py#L15-L24
247,512
soynatan/django-easy-audit
easyaudit/signals/model_signals.py
should_audit
def should_audit(instance): """Returns True or False to indicate whether the instance should be audited or not, depending on the project settings.""" # do not audit any model listed in UNREGISTERED_CLASSES for unregistered_class in UNREGISTERED_CLASSES: if isinstance(instance, unregistered_class): return False # only audit models listed in REGISTERED_CLASSES (if it's set) if len(REGISTERED_CLASSES) > 0: for registered_class in REGISTERED_CLASSES: if isinstance(instance, registered_class): break else: return False # all good return True
python
def should_audit(instance): # do not audit any model listed in UNREGISTERED_CLASSES for unregistered_class in UNREGISTERED_CLASSES: if isinstance(instance, unregistered_class): return False # only audit models listed in REGISTERED_CLASSES (if it's set) if len(REGISTERED_CLASSES) > 0: for registered_class in REGISTERED_CLASSES: if isinstance(instance, registered_class): break else: return False # all good return True
[ "def", "should_audit", "(", "instance", ")", ":", "# do not audit any model listed in UNREGISTERED_CLASSES", "for", "unregistered_class", "in", "UNREGISTERED_CLASSES", ":", "if", "isinstance", "(", "instance", ",", "unregistered_class", ")", ":", "return", "False", "# only audit models listed in REGISTERED_CLASSES (if it's set)", "if", "len", "(", "REGISTERED_CLASSES", ")", ">", "0", ":", "for", "registered_class", "in", "REGISTERED_CLASSES", ":", "if", "isinstance", "(", "instance", ",", "registered_class", ")", ":", "break", "else", ":", "return", "False", "# all good", "return", "True" ]
Returns True or False to indicate whether the instance should be audited or not, depending on the project settings.
[ "Returns", "True", "or", "False", "to", "indicate", "whether", "the", "instance", "should", "be", "audited", "or", "not", "depending", "on", "the", "project", "settings", "." ]
03e05bc94beb29fc3e4ff86e313a6fef4b766b4b
https://github.com/soynatan/django-easy-audit/blob/03e05bc94beb29fc3e4ff86e313a6fef4b766b4b/easyaudit/signals/model_signals.py#L23-L41
247,513
soynatan/django-easy-audit
easyaudit/signals/model_signals.py
_m2m_rev_field_name
def _m2m_rev_field_name(model1, model2): """Gets the name of the reverse m2m accessor from `model1` to `model2` For example, if User has a ManyToManyField connected to Group, `_m2m_rev_field_name(Group, User)` retrieves the name of the field on Group that lists a group's Users. (By default, this field is called `user_set`, but the name can be overridden). """ m2m_field_names = [ rel.get_accessor_name() for rel in model1._meta.get_fields() if rel.many_to_many and rel.auto_created and rel.related_model == model2 ] return m2m_field_names[0]
python
def _m2m_rev_field_name(model1, model2): m2m_field_names = [ rel.get_accessor_name() for rel in model1._meta.get_fields() if rel.many_to_many and rel.auto_created and rel.related_model == model2 ] return m2m_field_names[0]
[ "def", "_m2m_rev_field_name", "(", "model1", ",", "model2", ")", ":", "m2m_field_names", "=", "[", "rel", ".", "get_accessor_name", "(", ")", "for", "rel", "in", "model1", ".", "_meta", ".", "get_fields", "(", ")", "if", "rel", ".", "many_to_many", "and", "rel", ".", "auto_created", "and", "rel", ".", "related_model", "==", "model2", "]", "return", "m2m_field_names", "[", "0", "]" ]
Gets the name of the reverse m2m accessor from `model1` to `model2` For example, if User has a ManyToManyField connected to Group, `_m2m_rev_field_name(Group, User)` retrieves the name of the field on Group that lists a group's Users. (By default, this field is called `user_set`, but the name can be overridden).
[ "Gets", "the", "name", "of", "the", "reverse", "m2m", "accessor", "from", "model1", "to", "model2" ]
03e05bc94beb29fc3e4ff86e313a6fef4b766b4b
https://github.com/soynatan/django-easy-audit/blob/03e05bc94beb29fc3e4ff86e313a6fef4b766b4b/easyaudit/signals/model_signals.py#L174-L188
247,514
wookayin/gpustat
gpustat/core.py
GPUStatCollection.new_query
def new_query(): """Query the information of all the GPUs on local machine""" N.nvmlInit() def _decode(b): if isinstance(b, bytes): return b.decode() # for python3, to unicode return b def get_gpu_info(handle): """Get one GPU information specified by nvml handle""" def get_process_info(nv_process): """Get the process information of specific pid""" process = {} ps_process = psutil.Process(pid=nv_process.pid) process['username'] = ps_process.username() # cmdline returns full path; # as in `ps -o comm`, get short cmdnames. _cmdline = ps_process.cmdline() if not _cmdline: # sometimes, zombie or unknown (e.g. [kworker/8:2H]) process['command'] = '?' else: process['command'] = os.path.basename(_cmdline[0]) # Bytes to MBytes process['gpu_memory_usage'] = nv_process.usedGpuMemory // MB process['pid'] = nv_process.pid return process name = _decode(N.nvmlDeviceGetName(handle)) uuid = _decode(N.nvmlDeviceGetUUID(handle)) try: temperature = N.nvmlDeviceGetTemperature( handle, N.NVML_TEMPERATURE_GPU ) except N.NVMLError: temperature = None # Not supported try: memory = N.nvmlDeviceGetMemoryInfo(handle) # in Bytes except N.NVMLError: memory = None # Not supported try: utilization = N.nvmlDeviceGetUtilizationRates(handle) except N.NVMLError: utilization = None # Not supported try: power = N.nvmlDeviceGetPowerUsage(handle) except N.NVMLError: power = None try: power_limit = N.nvmlDeviceGetEnforcedPowerLimit(handle) except N.NVMLError: power_limit = None try: nv_comp_processes = \ N.nvmlDeviceGetComputeRunningProcesses(handle) except N.NVMLError: nv_comp_processes = None # Not supported try: nv_graphics_processes = \ N.nvmlDeviceGetGraphicsRunningProcesses(handle) except N.NVMLError: nv_graphics_processes = None # Not supported if nv_comp_processes is None and nv_graphics_processes is None: processes = None else: processes = [] nv_comp_processes = nv_comp_processes or [] nv_graphics_processes = nv_graphics_processes or [] for nv_process in nv_comp_processes + nv_graphics_processes: # TODO: could be more information such as system memory # usage, CPU percentage, create time etc. try: process = get_process_info(nv_process) processes.append(process) except psutil.NoSuchProcess: # TODO: add some reminder for NVML broken context # e.g. nvidia-smi reset or reboot the system pass index = N.nvmlDeviceGetIndex(handle) gpu_info = { 'index': index, 'uuid': uuid, 'name': name, 'temperature.gpu': temperature, 'utilization.gpu': utilization.gpu if utilization else None, 'power.draw': power // 1000 if power is not None else None, 'enforced.power.limit': power_limit // 1000 if power_limit is not None else None, # Convert bytes into MBytes 'memory.used': memory.used // MB if memory else None, 'memory.total': memory.total // MB if memory else None, 'processes': processes, } return gpu_info # 1. get the list of gpu and status gpu_list = [] device_count = N.nvmlDeviceGetCount() for index in range(device_count): handle = N.nvmlDeviceGetHandleByIndex(index) gpu_info = get_gpu_info(handle) gpu_stat = GPUStat(gpu_info) gpu_list.append(gpu_stat) # 2. additional info (driver version, etc). try: driver_version = _decode(N.nvmlSystemGetDriverVersion()) except N.NVMLError: driver_version = None # N/A N.nvmlShutdown() return GPUStatCollection(gpu_list, driver_version=driver_version)
python
def new_query(): N.nvmlInit() def _decode(b): if isinstance(b, bytes): return b.decode() # for python3, to unicode return b def get_gpu_info(handle): """Get one GPU information specified by nvml handle""" def get_process_info(nv_process): """Get the process information of specific pid""" process = {} ps_process = psutil.Process(pid=nv_process.pid) process['username'] = ps_process.username() # cmdline returns full path; # as in `ps -o comm`, get short cmdnames. _cmdline = ps_process.cmdline() if not _cmdline: # sometimes, zombie or unknown (e.g. [kworker/8:2H]) process['command'] = '?' else: process['command'] = os.path.basename(_cmdline[0]) # Bytes to MBytes process['gpu_memory_usage'] = nv_process.usedGpuMemory // MB process['pid'] = nv_process.pid return process name = _decode(N.nvmlDeviceGetName(handle)) uuid = _decode(N.nvmlDeviceGetUUID(handle)) try: temperature = N.nvmlDeviceGetTemperature( handle, N.NVML_TEMPERATURE_GPU ) except N.NVMLError: temperature = None # Not supported try: memory = N.nvmlDeviceGetMemoryInfo(handle) # in Bytes except N.NVMLError: memory = None # Not supported try: utilization = N.nvmlDeviceGetUtilizationRates(handle) except N.NVMLError: utilization = None # Not supported try: power = N.nvmlDeviceGetPowerUsage(handle) except N.NVMLError: power = None try: power_limit = N.nvmlDeviceGetEnforcedPowerLimit(handle) except N.NVMLError: power_limit = None try: nv_comp_processes = \ N.nvmlDeviceGetComputeRunningProcesses(handle) except N.NVMLError: nv_comp_processes = None # Not supported try: nv_graphics_processes = \ N.nvmlDeviceGetGraphicsRunningProcesses(handle) except N.NVMLError: nv_graphics_processes = None # Not supported if nv_comp_processes is None and nv_graphics_processes is None: processes = None else: processes = [] nv_comp_processes = nv_comp_processes or [] nv_graphics_processes = nv_graphics_processes or [] for nv_process in nv_comp_processes + nv_graphics_processes: # TODO: could be more information such as system memory # usage, CPU percentage, create time etc. try: process = get_process_info(nv_process) processes.append(process) except psutil.NoSuchProcess: # TODO: add some reminder for NVML broken context # e.g. nvidia-smi reset or reboot the system pass index = N.nvmlDeviceGetIndex(handle) gpu_info = { 'index': index, 'uuid': uuid, 'name': name, 'temperature.gpu': temperature, 'utilization.gpu': utilization.gpu if utilization else None, 'power.draw': power // 1000 if power is not None else None, 'enforced.power.limit': power_limit // 1000 if power_limit is not None else None, # Convert bytes into MBytes 'memory.used': memory.used // MB if memory else None, 'memory.total': memory.total // MB if memory else None, 'processes': processes, } return gpu_info # 1. get the list of gpu and status gpu_list = [] device_count = N.nvmlDeviceGetCount() for index in range(device_count): handle = N.nvmlDeviceGetHandleByIndex(index) gpu_info = get_gpu_info(handle) gpu_stat = GPUStat(gpu_info) gpu_list.append(gpu_stat) # 2. additional info (driver version, etc). try: driver_version = _decode(N.nvmlSystemGetDriverVersion()) except N.NVMLError: driver_version = None # N/A N.nvmlShutdown() return GPUStatCollection(gpu_list, driver_version=driver_version)
[ "def", "new_query", "(", ")", ":", "N", ".", "nvmlInit", "(", ")", "def", "_decode", "(", "b", ")", ":", "if", "isinstance", "(", "b", ",", "bytes", ")", ":", "return", "b", ".", "decode", "(", ")", "# for python3, to unicode", "return", "b", "def", "get_gpu_info", "(", "handle", ")", ":", "\"\"\"Get one GPU information specified by nvml handle\"\"\"", "def", "get_process_info", "(", "nv_process", ")", ":", "\"\"\"Get the process information of specific pid\"\"\"", "process", "=", "{", "}", "ps_process", "=", "psutil", ".", "Process", "(", "pid", "=", "nv_process", ".", "pid", ")", "process", "[", "'username'", "]", "=", "ps_process", ".", "username", "(", ")", "# cmdline returns full path;", "# as in `ps -o comm`, get short cmdnames.", "_cmdline", "=", "ps_process", ".", "cmdline", "(", ")", "if", "not", "_cmdline", ":", "# sometimes, zombie or unknown (e.g. [kworker/8:2H])", "process", "[", "'command'", "]", "=", "'?'", "else", ":", "process", "[", "'command'", "]", "=", "os", ".", "path", ".", "basename", "(", "_cmdline", "[", "0", "]", ")", "# Bytes to MBytes", "process", "[", "'gpu_memory_usage'", "]", "=", "nv_process", ".", "usedGpuMemory", "//", "MB", "process", "[", "'pid'", "]", "=", "nv_process", ".", "pid", "return", "process", "name", "=", "_decode", "(", "N", ".", "nvmlDeviceGetName", "(", "handle", ")", ")", "uuid", "=", "_decode", "(", "N", ".", "nvmlDeviceGetUUID", "(", "handle", ")", ")", "try", ":", "temperature", "=", "N", ".", "nvmlDeviceGetTemperature", "(", "handle", ",", "N", ".", "NVML_TEMPERATURE_GPU", ")", "except", "N", ".", "NVMLError", ":", "temperature", "=", "None", "# Not supported", "try", ":", "memory", "=", "N", ".", "nvmlDeviceGetMemoryInfo", "(", "handle", ")", "# in Bytes", "except", "N", ".", "NVMLError", ":", "memory", "=", "None", "# Not supported", "try", ":", "utilization", "=", "N", ".", "nvmlDeviceGetUtilizationRates", "(", "handle", ")", "except", "N", ".", "NVMLError", ":", "utilization", "=", "None", "# Not supported", "try", ":", "power", "=", "N", ".", "nvmlDeviceGetPowerUsage", "(", "handle", ")", "except", "N", ".", "NVMLError", ":", "power", "=", "None", "try", ":", "power_limit", "=", "N", ".", "nvmlDeviceGetEnforcedPowerLimit", "(", "handle", ")", "except", "N", ".", "NVMLError", ":", "power_limit", "=", "None", "try", ":", "nv_comp_processes", "=", "N", ".", "nvmlDeviceGetComputeRunningProcesses", "(", "handle", ")", "except", "N", ".", "NVMLError", ":", "nv_comp_processes", "=", "None", "# Not supported", "try", ":", "nv_graphics_processes", "=", "N", ".", "nvmlDeviceGetGraphicsRunningProcesses", "(", "handle", ")", "except", "N", ".", "NVMLError", ":", "nv_graphics_processes", "=", "None", "# Not supported", "if", "nv_comp_processes", "is", "None", "and", "nv_graphics_processes", "is", "None", ":", "processes", "=", "None", "else", ":", "processes", "=", "[", "]", "nv_comp_processes", "=", "nv_comp_processes", "or", "[", "]", "nv_graphics_processes", "=", "nv_graphics_processes", "or", "[", "]", "for", "nv_process", "in", "nv_comp_processes", "+", "nv_graphics_processes", ":", "# TODO: could be more information such as system memory", "# usage, CPU percentage, create time etc.", "try", ":", "process", "=", "get_process_info", "(", "nv_process", ")", "processes", ".", "append", "(", "process", ")", "except", "psutil", ".", "NoSuchProcess", ":", "# TODO: add some reminder for NVML broken context", "# e.g. nvidia-smi reset or reboot the system", "pass", "index", "=", "N", ".", "nvmlDeviceGetIndex", "(", "handle", ")", "gpu_info", "=", "{", "'index'", ":", "index", ",", "'uuid'", ":", "uuid", ",", "'name'", ":", "name", ",", "'temperature.gpu'", ":", "temperature", ",", "'utilization.gpu'", ":", "utilization", ".", "gpu", "if", "utilization", "else", "None", ",", "'power.draw'", ":", "power", "//", "1000", "if", "power", "is", "not", "None", "else", "None", ",", "'enforced.power.limit'", ":", "power_limit", "//", "1000", "if", "power_limit", "is", "not", "None", "else", "None", ",", "# Convert bytes into MBytes", "'memory.used'", ":", "memory", ".", "used", "//", "MB", "if", "memory", "else", "None", ",", "'memory.total'", ":", "memory", ".", "total", "//", "MB", "if", "memory", "else", "None", ",", "'processes'", ":", "processes", ",", "}", "return", "gpu_info", "# 1. get the list of gpu and status", "gpu_list", "=", "[", "]", "device_count", "=", "N", ".", "nvmlDeviceGetCount", "(", ")", "for", "index", "in", "range", "(", "device_count", ")", ":", "handle", "=", "N", ".", "nvmlDeviceGetHandleByIndex", "(", "index", ")", "gpu_info", "=", "get_gpu_info", "(", "handle", ")", "gpu_stat", "=", "GPUStat", "(", "gpu_info", ")", "gpu_list", ".", "append", "(", "gpu_stat", ")", "# 2. additional info (driver version, etc).", "try", ":", "driver_version", "=", "_decode", "(", "N", ".", "nvmlSystemGetDriverVersion", "(", ")", ")", "except", "N", ".", "NVMLError", ":", "driver_version", "=", "None", "# N/A", "N", ".", "nvmlShutdown", "(", ")", "return", "GPUStatCollection", "(", "gpu_list", ",", "driver_version", "=", "driver_version", ")" ]
Query the information of all the GPUs on local machine
[ "Query", "the", "information", "of", "all", "the", "GPUs", "on", "local", "machine" ]
28299cdcf55dd627fdd9800cf344988b43188ee8
https://github.com/wookayin/gpustat/blob/28299cdcf55dd627fdd9800cf344988b43188ee8/gpustat/core.py#L262-L385
247,515
wookayin/gpustat
gpustat/__main__.py
print_gpustat
def print_gpustat(json=False, debug=False, **kwargs): ''' Display the GPU query results into standard output. ''' try: gpu_stats = GPUStatCollection.new_query() except Exception as e: sys.stderr.write('Error on querying NVIDIA devices.' ' Use --debug flag for details\n') if debug: try: import traceback traceback.print_exc(file=sys.stderr) except Exception: # NVMLError can't be processed by traceback: # https://bugs.python.org/issue28603 # as a workaround, simply re-throw the exception raise e sys.exit(1) if json: gpu_stats.print_json(sys.stdout) else: gpu_stats.print_formatted(sys.stdout, **kwargs)
python
def print_gpustat(json=False, debug=False, **kwargs): ''' Display the GPU query results into standard output. ''' try: gpu_stats = GPUStatCollection.new_query() except Exception as e: sys.stderr.write('Error on querying NVIDIA devices.' ' Use --debug flag for details\n') if debug: try: import traceback traceback.print_exc(file=sys.stderr) except Exception: # NVMLError can't be processed by traceback: # https://bugs.python.org/issue28603 # as a workaround, simply re-throw the exception raise e sys.exit(1) if json: gpu_stats.print_json(sys.stdout) else: gpu_stats.print_formatted(sys.stdout, **kwargs)
[ "def", "print_gpustat", "(", "json", "=", "False", ",", "debug", "=", "False", ",", "*", "*", "kwargs", ")", ":", "try", ":", "gpu_stats", "=", "GPUStatCollection", ".", "new_query", "(", ")", "except", "Exception", "as", "e", ":", "sys", ".", "stderr", ".", "write", "(", "'Error on querying NVIDIA devices.'", "' Use --debug flag for details\\n'", ")", "if", "debug", ":", "try", ":", "import", "traceback", "traceback", ".", "print_exc", "(", "file", "=", "sys", ".", "stderr", ")", "except", "Exception", ":", "# NVMLError can't be processed by traceback:", "# https://bugs.python.org/issue28603", "# as a workaround, simply re-throw the exception", "raise", "e", "sys", ".", "exit", "(", "1", ")", "if", "json", ":", "gpu_stats", ".", "print_json", "(", "sys", ".", "stdout", ")", "else", ":", "gpu_stats", ".", "print_formatted", "(", "sys", ".", "stdout", ",", "*", "*", "kwargs", ")" ]
Display the GPU query results into standard output.
[ "Display", "the", "GPU", "query", "results", "into", "standard", "output", "." ]
28299cdcf55dd627fdd9800cf344988b43188ee8
https://github.com/wookayin/gpustat/blob/28299cdcf55dd627fdd9800cf344988b43188ee8/gpustat/__main__.py#L14-L37
247,516
westonplatter/fast_arrow
fast_arrow/resources/option.py
Option.fetch_list
def fetch_list(cls, client, ids): """ fetch instruments by ids """ results = [] request_url = "https://api.robinhood.com/options/instruments/" for _ids in chunked_list(ids, 50): params = {"ids": ",".join(_ids)} data = client.get(request_url, params=params) partial_results = data["results"] while data["next"]: data = client.get(data["next"]) partial_results.extend(data["results"]) results.extend(partial_results) return results
python
def fetch_list(cls, client, ids): results = [] request_url = "https://api.robinhood.com/options/instruments/" for _ids in chunked_list(ids, 50): params = {"ids": ",".join(_ids)} data = client.get(request_url, params=params) partial_results = data["results"] while data["next"]: data = client.get(data["next"]) partial_results.extend(data["results"]) results.extend(partial_results) return results
[ "def", "fetch_list", "(", "cls", ",", "client", ",", "ids", ")", ":", "results", "=", "[", "]", "request_url", "=", "\"https://api.robinhood.com/options/instruments/\"", "for", "_ids", "in", "chunked_list", "(", "ids", ",", "50", ")", ":", "params", "=", "{", "\"ids\"", ":", "\",\"", ".", "join", "(", "_ids", ")", "}", "data", "=", "client", ".", "get", "(", "request_url", ",", "params", "=", "params", ")", "partial_results", "=", "data", "[", "\"results\"", "]", "while", "data", "[", "\"next\"", "]", ":", "data", "=", "client", ".", "get", "(", "data", "[", "\"next\"", "]", ")", "partial_results", ".", "extend", "(", "data", "[", "\"results\"", "]", ")", "results", ".", "extend", "(", "partial_results", ")", "return", "results" ]
fetch instruments by ids
[ "fetch", "instruments", "by", "ids" ]
514cbca4994f52a97222058167830a302e313d04
https://github.com/westonplatter/fast_arrow/blob/514cbca4994f52a97222058167830a302e313d04/fast_arrow/resources/option.py#L44-L62
247,517
westonplatter/fast_arrow
fast_arrow/resources/option.py
Option.in_chain
def in_chain(cls, client, chain_id, expiration_dates=[]): """ fetch all option instruments in an options chain - expiration_dates = optionally scope """ request_url = "https://api.robinhood.com/options/instruments/" params = { "chain_id": chain_id, "expiration_dates": ",".join(expiration_dates) } data = client.get(request_url, params=params) results = data['results'] while data['next']: data = client.get(data['next']) results.extend(data['results']) return results
python
def in_chain(cls, client, chain_id, expiration_dates=[]): request_url = "https://api.robinhood.com/options/instruments/" params = { "chain_id": chain_id, "expiration_dates": ",".join(expiration_dates) } data = client.get(request_url, params=params) results = data['results'] while data['next']: data = client.get(data['next']) results.extend(data['results']) return results
[ "def", "in_chain", "(", "cls", ",", "client", ",", "chain_id", ",", "expiration_dates", "=", "[", "]", ")", ":", "request_url", "=", "\"https://api.robinhood.com/options/instruments/\"", "params", "=", "{", "\"chain_id\"", ":", "chain_id", ",", "\"expiration_dates\"", ":", "\",\"", ".", "join", "(", "expiration_dates", ")", "}", "data", "=", "client", ".", "get", "(", "request_url", ",", "params", "=", "params", ")", "results", "=", "data", "[", "'results'", "]", "while", "data", "[", "'next'", "]", ":", "data", "=", "client", ".", "get", "(", "data", "[", "'next'", "]", ")", "results", ".", "extend", "(", "data", "[", "'results'", "]", ")", "return", "results" ]
fetch all option instruments in an options chain - expiration_dates = optionally scope
[ "fetch", "all", "option", "instruments", "in", "an", "options", "chain", "-", "expiration_dates", "=", "optionally", "scope" ]
514cbca4994f52a97222058167830a302e313d04
https://github.com/westonplatter/fast_arrow/blob/514cbca4994f52a97222058167830a302e313d04/fast_arrow/resources/option.py#L65-L83
247,518
westonplatter/fast_arrow
fast_arrow/option_strategies/iron_condor.py
IronCondor.generate_by_deltas
def generate_by_deltas(cls, options, width, put_inner_lte_delta, call_inner_lte_delta): """ totally just playing around ideas for the API. this IC sells - credit put spread - credit call spread the approach - set width for the wing spread (eg, 1, ie, 1 unit width spread) - set delta for inner leg of the put credit spread (eg, -0.2) - set delta for inner leg of the call credit spread (eg, 0.1) """ raise Exception("Not Implemented starting at the 0.3.0 release") # # put credit spread # put_options_unsorted = list( filter(lambda x: x['type'] == 'put', options)) put_options = cls.sort_by_strike_price(put_options_unsorted) deltas_as_strings = [x['delta'] for x in put_options] deltas = cls.strings_to_np_array(deltas_as_strings) put_inner_index = np.argmin(deltas >= put_inner_lte_delta) - 1 put_outer_index = put_inner_index - width put_inner_leg = cls.gen_leg( put_options[put_inner_index]["instrument"], "sell") put_outer_leg = cls.gen_leg( put_options[put_outer_index]["instrument"], "buy") # # call credit spread # call_options_unsorted = list( filter(lambda x: x['type'] == 'call', options)) call_options = cls.sort_by_strike_price(call_options_unsorted) deltas_as_strings = [x['delta'] for x in call_options] x = np.array(deltas_as_strings) deltas = x.astype(np.float) # because deep ITM call options have a delta that comes up as NaN, # but are approximately 0.99 or 1.0, I'm replacing Nan with 1.0 # so np.argmax is able to walk up the index until it finds # "call_inner_lte_delta" # @TODO change this so (put credit / call credit) spreads work the same where_are_NaNs = np.isnan(deltas) deltas[where_are_NaNs] = 1.0 call_inner_index = np.argmax(deltas <= call_inner_lte_delta) call_outer_index = call_inner_index + width call_inner_leg = cls.gen_leg( call_options[call_inner_index]["instrument"], "sell") call_outer_leg = cls.gen_leg( call_options[call_outer_index]["instrument"], "buy") legs = [put_outer_leg, put_inner_leg, call_inner_leg, call_outer_leg] # # price calcs # price = ( - Decimal(put_options[put_outer_index]['adjusted_mark_price']) + Decimal(put_options[put_inner_index]['adjusted_mark_price']) + Decimal(call_options[call_inner_index]['adjusted_mark_price']) - Decimal(call_options[call_outer_index]['adjusted_mark_price']) ) # # provide max bid ask spread diff # ic_options = [ put_options[put_outer_index], put_options[put_inner_index], call_options[call_inner_index], call_options[call_outer_index] ] max_bid_ask_spread = cls.max_bid_ask_spread(ic_options) return {"legs": legs, "price": price, "max_bid_ask_spread": max_bid_ask_spread}
python
def generate_by_deltas(cls, options, width, put_inner_lte_delta, call_inner_lte_delta): raise Exception("Not Implemented starting at the 0.3.0 release") # # put credit spread # put_options_unsorted = list( filter(lambda x: x['type'] == 'put', options)) put_options = cls.sort_by_strike_price(put_options_unsorted) deltas_as_strings = [x['delta'] for x in put_options] deltas = cls.strings_to_np_array(deltas_as_strings) put_inner_index = np.argmin(deltas >= put_inner_lte_delta) - 1 put_outer_index = put_inner_index - width put_inner_leg = cls.gen_leg( put_options[put_inner_index]["instrument"], "sell") put_outer_leg = cls.gen_leg( put_options[put_outer_index]["instrument"], "buy") # # call credit spread # call_options_unsorted = list( filter(lambda x: x['type'] == 'call', options)) call_options = cls.sort_by_strike_price(call_options_unsorted) deltas_as_strings = [x['delta'] for x in call_options] x = np.array(deltas_as_strings) deltas = x.astype(np.float) # because deep ITM call options have a delta that comes up as NaN, # but are approximately 0.99 or 1.0, I'm replacing Nan with 1.0 # so np.argmax is able to walk up the index until it finds # "call_inner_lte_delta" # @TODO change this so (put credit / call credit) spreads work the same where_are_NaNs = np.isnan(deltas) deltas[where_are_NaNs] = 1.0 call_inner_index = np.argmax(deltas <= call_inner_lte_delta) call_outer_index = call_inner_index + width call_inner_leg = cls.gen_leg( call_options[call_inner_index]["instrument"], "sell") call_outer_leg = cls.gen_leg( call_options[call_outer_index]["instrument"], "buy") legs = [put_outer_leg, put_inner_leg, call_inner_leg, call_outer_leg] # # price calcs # price = ( - Decimal(put_options[put_outer_index]['adjusted_mark_price']) + Decimal(put_options[put_inner_index]['adjusted_mark_price']) + Decimal(call_options[call_inner_index]['adjusted_mark_price']) - Decimal(call_options[call_outer_index]['adjusted_mark_price']) ) # # provide max bid ask spread diff # ic_options = [ put_options[put_outer_index], put_options[put_inner_index], call_options[call_inner_index], call_options[call_outer_index] ] max_bid_ask_spread = cls.max_bid_ask_spread(ic_options) return {"legs": legs, "price": price, "max_bid_ask_spread": max_bid_ask_spread}
[ "def", "generate_by_deltas", "(", "cls", ",", "options", ",", "width", ",", "put_inner_lte_delta", ",", "call_inner_lte_delta", ")", ":", "raise", "Exception", "(", "\"Not Implemented starting at the 0.3.0 release\"", ")", "#", "# put credit spread", "#", "put_options_unsorted", "=", "list", "(", "filter", "(", "lambda", "x", ":", "x", "[", "'type'", "]", "==", "'put'", ",", "options", ")", ")", "put_options", "=", "cls", ".", "sort_by_strike_price", "(", "put_options_unsorted", ")", "deltas_as_strings", "=", "[", "x", "[", "'delta'", "]", "for", "x", "in", "put_options", "]", "deltas", "=", "cls", ".", "strings_to_np_array", "(", "deltas_as_strings", ")", "put_inner_index", "=", "np", ".", "argmin", "(", "deltas", ">=", "put_inner_lte_delta", ")", "-", "1", "put_outer_index", "=", "put_inner_index", "-", "width", "put_inner_leg", "=", "cls", ".", "gen_leg", "(", "put_options", "[", "put_inner_index", "]", "[", "\"instrument\"", "]", ",", "\"sell\"", ")", "put_outer_leg", "=", "cls", ".", "gen_leg", "(", "put_options", "[", "put_outer_index", "]", "[", "\"instrument\"", "]", ",", "\"buy\"", ")", "#", "# call credit spread", "#", "call_options_unsorted", "=", "list", "(", "filter", "(", "lambda", "x", ":", "x", "[", "'type'", "]", "==", "'call'", ",", "options", ")", ")", "call_options", "=", "cls", ".", "sort_by_strike_price", "(", "call_options_unsorted", ")", "deltas_as_strings", "=", "[", "x", "[", "'delta'", "]", "for", "x", "in", "call_options", "]", "x", "=", "np", ".", "array", "(", "deltas_as_strings", ")", "deltas", "=", "x", ".", "astype", "(", "np", ".", "float", ")", "# because deep ITM call options have a delta that comes up as NaN,", "# but are approximately 0.99 or 1.0, I'm replacing Nan with 1.0", "# so np.argmax is able to walk up the index until it finds", "# \"call_inner_lte_delta\"", "# @TODO change this so (put credit / call credit) spreads work the same", "where_are_NaNs", "=", "np", ".", "isnan", "(", "deltas", ")", "deltas", "[", "where_are_NaNs", "]", "=", "1.0", "call_inner_index", "=", "np", ".", "argmax", "(", "deltas", "<=", "call_inner_lte_delta", ")", "call_outer_index", "=", "call_inner_index", "+", "width", "call_inner_leg", "=", "cls", ".", "gen_leg", "(", "call_options", "[", "call_inner_index", "]", "[", "\"instrument\"", "]", ",", "\"sell\"", ")", "call_outer_leg", "=", "cls", ".", "gen_leg", "(", "call_options", "[", "call_outer_index", "]", "[", "\"instrument\"", "]", ",", "\"buy\"", ")", "legs", "=", "[", "put_outer_leg", ",", "put_inner_leg", ",", "call_inner_leg", ",", "call_outer_leg", "]", "#", "# price calcs", "#", "price", "=", "(", "-", "Decimal", "(", "put_options", "[", "put_outer_index", "]", "[", "'adjusted_mark_price'", "]", ")", "+", "Decimal", "(", "put_options", "[", "put_inner_index", "]", "[", "'adjusted_mark_price'", "]", ")", "+", "Decimal", "(", "call_options", "[", "call_inner_index", "]", "[", "'adjusted_mark_price'", "]", ")", "-", "Decimal", "(", "call_options", "[", "call_outer_index", "]", "[", "'adjusted_mark_price'", "]", ")", ")", "#", "# provide max bid ask spread diff", "#", "ic_options", "=", "[", "put_options", "[", "put_outer_index", "]", ",", "put_options", "[", "put_inner_index", "]", ",", "call_options", "[", "call_inner_index", "]", ",", "call_options", "[", "call_outer_index", "]", "]", "max_bid_ask_spread", "=", "cls", ".", "max_bid_ask_spread", "(", "ic_options", ")", "return", "{", "\"legs\"", ":", "legs", ",", "\"price\"", ":", "price", ",", "\"max_bid_ask_spread\"", ":", "max_bid_ask_spread", "}" ]
totally just playing around ideas for the API. this IC sells - credit put spread - credit call spread the approach - set width for the wing spread (eg, 1, ie, 1 unit width spread) - set delta for inner leg of the put credit spread (eg, -0.2) - set delta for inner leg of the call credit spread (eg, 0.1)
[ "totally", "just", "playing", "around", "ideas", "for", "the", "API", "." ]
514cbca4994f52a97222058167830a302e313d04
https://github.com/westonplatter/fast_arrow/blob/514cbca4994f52a97222058167830a302e313d04/fast_arrow/option_strategies/iron_condor.py#L41-L127
247,519
westonplatter/fast_arrow
fast_arrow/resources/option_chain.py
OptionChain.fetch
def fetch(cls, client, _id, symbol): """ fetch option chain for instrument """ url = "https://api.robinhood.com/options/chains/" params = { "equity_instrument_ids": _id, "state": "active", "tradability": "tradable" } data = client.get(url, params=params) def filter_func(x): return x["symbol"] == symbol results = list(filter(filter_func, data["results"])) return results[0]
python
def fetch(cls, client, _id, symbol): url = "https://api.robinhood.com/options/chains/" params = { "equity_instrument_ids": _id, "state": "active", "tradability": "tradable" } data = client.get(url, params=params) def filter_func(x): return x["symbol"] == symbol results = list(filter(filter_func, data["results"])) return results[0]
[ "def", "fetch", "(", "cls", ",", "client", ",", "_id", ",", "symbol", ")", ":", "url", "=", "\"https://api.robinhood.com/options/chains/\"", "params", "=", "{", "\"equity_instrument_ids\"", ":", "_id", ",", "\"state\"", ":", "\"active\"", ",", "\"tradability\"", ":", "\"tradable\"", "}", "data", "=", "client", ".", "get", "(", "url", ",", "params", "=", "params", ")", "def", "filter_func", "(", "x", ")", ":", "return", "x", "[", "\"symbol\"", "]", "==", "symbol", "results", "=", "list", "(", "filter", "(", "filter_func", ",", "data", "[", "\"results\"", "]", ")", ")", "return", "results", "[", "0", "]" ]
fetch option chain for instrument
[ "fetch", "option", "chain", "for", "instrument" ]
514cbca4994f52a97222058167830a302e313d04
https://github.com/westonplatter/fast_arrow/blob/514cbca4994f52a97222058167830a302e313d04/fast_arrow/resources/option_chain.py#L4-L19
247,520
westonplatter/fast_arrow
fast_arrow/client.py
Client.authenticate
def authenticate(self): ''' Authenticate using data in `options` ''' if "username" in self.options and "password" in self.options: self.login_oauth2( self.options["username"], self.options["password"], self.options.get('mfa_code')) elif "access_token" in self.options: if "refresh_token" in self.options: self.access_token = self.options["access_token"] self.refresh_token = self.options["refresh_token"] self.__set_account_info() else: self.authenticated = False return self.authenticated
python
def authenticate(self): ''' Authenticate using data in `options` ''' if "username" in self.options and "password" in self.options: self.login_oauth2( self.options["username"], self.options["password"], self.options.get('mfa_code')) elif "access_token" in self.options: if "refresh_token" in self.options: self.access_token = self.options["access_token"] self.refresh_token = self.options["refresh_token"] self.__set_account_info() else: self.authenticated = False return self.authenticated
[ "def", "authenticate", "(", "self", ")", ":", "if", "\"username\"", "in", "self", ".", "options", "and", "\"password\"", "in", "self", ".", "options", ":", "self", ".", "login_oauth2", "(", "self", ".", "options", "[", "\"username\"", "]", ",", "self", ".", "options", "[", "\"password\"", "]", ",", "self", ".", "options", ".", "get", "(", "'mfa_code'", ")", ")", "elif", "\"access_token\"", "in", "self", ".", "options", ":", "if", "\"refresh_token\"", "in", "self", ".", "options", ":", "self", ".", "access_token", "=", "self", ".", "options", "[", "\"access_token\"", "]", "self", ".", "refresh_token", "=", "self", ".", "options", "[", "\"refresh_token\"", "]", "self", ".", "__set_account_info", "(", ")", "else", ":", "self", ".", "authenticated", "=", "False", "return", "self", ".", "authenticated" ]
Authenticate using data in `options`
[ "Authenticate", "using", "data", "in", "options" ]
514cbca4994f52a97222058167830a302e313d04
https://github.com/westonplatter/fast_arrow/blob/514cbca4994f52a97222058167830a302e313d04/fast_arrow/client.py#L27-L43
247,521
westonplatter/fast_arrow
fast_arrow/client.py
Client.get
def get(self, url=None, params=None, retry=True): ''' Execute HTTP GET ''' headers = self._gen_headers(self.access_token, url) attempts = 1 while attempts <= HTTP_ATTEMPTS_MAX: try: res = requests.get(url, headers=headers, params=params, timeout=15, verify=self.certs) res.raise_for_status() return res.json() except requests.exceptions.RequestException as e: attempts += 1 if res.status_code in [400]: raise e elif retry and res.status_code in [403]: self.relogin_oauth2()
python
def get(self, url=None, params=None, retry=True): ''' Execute HTTP GET ''' headers = self._gen_headers(self.access_token, url) attempts = 1 while attempts <= HTTP_ATTEMPTS_MAX: try: res = requests.get(url, headers=headers, params=params, timeout=15, verify=self.certs) res.raise_for_status() return res.json() except requests.exceptions.RequestException as e: attempts += 1 if res.status_code in [400]: raise e elif retry and res.status_code in [403]: self.relogin_oauth2()
[ "def", "get", "(", "self", ",", "url", "=", "None", ",", "params", "=", "None", ",", "retry", "=", "True", ")", ":", "headers", "=", "self", ".", "_gen_headers", "(", "self", ".", "access_token", ",", "url", ")", "attempts", "=", "1", "while", "attempts", "<=", "HTTP_ATTEMPTS_MAX", ":", "try", ":", "res", "=", "requests", ".", "get", "(", "url", ",", "headers", "=", "headers", ",", "params", "=", "params", ",", "timeout", "=", "15", ",", "verify", "=", "self", ".", "certs", ")", "res", ".", "raise_for_status", "(", ")", "return", "res", ".", "json", "(", ")", "except", "requests", ".", "exceptions", ".", "RequestException", "as", "e", ":", "attempts", "+=", "1", "if", "res", ".", "status_code", "in", "[", "400", "]", ":", "raise", "e", "elif", "retry", "and", "res", ".", "status_code", "in", "[", "403", "]", ":", "self", ".", "relogin_oauth2", "(", ")" ]
Execute HTTP GET
[ "Execute", "HTTP", "GET" ]
514cbca4994f52a97222058167830a302e313d04
https://github.com/westonplatter/fast_arrow/blob/514cbca4994f52a97222058167830a302e313d04/fast_arrow/client.py#L45-L65
247,522
westonplatter/fast_arrow
fast_arrow/client.py
Client._gen_headers
def _gen_headers(self, bearer, url): ''' Generate headders, adding in Oauth2 bearer token if present ''' headers = { "Accept": "*/*", "Accept-Encoding": "gzip, deflate", "Accept-Language": ("en;q=1, fr;q=0.9, de;q=0.8, ja;q=0.7, " + "nl;q=0.6, it;q=0.5"), "User-Agent": ("Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) " + "AppleWebKit/537.36 (KHTML, like Gecko) " + "Chrome/68.0.3440.106 Safari/537.36"), } if bearer: headers["Authorization"] = "Bearer {0}".format(bearer) if url == "https://api.robinhood.com/options/orders/": headers["Content-Type"] = "application/json; charset=utf-8" return headers
python
def _gen_headers(self, bearer, url): ''' Generate headders, adding in Oauth2 bearer token if present ''' headers = { "Accept": "*/*", "Accept-Encoding": "gzip, deflate", "Accept-Language": ("en;q=1, fr;q=0.9, de;q=0.8, ja;q=0.7, " + "nl;q=0.6, it;q=0.5"), "User-Agent": ("Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) " + "AppleWebKit/537.36 (KHTML, like Gecko) " + "Chrome/68.0.3440.106 Safari/537.36"), } if bearer: headers["Authorization"] = "Bearer {0}".format(bearer) if url == "https://api.robinhood.com/options/orders/": headers["Content-Type"] = "application/json; charset=utf-8" return headers
[ "def", "_gen_headers", "(", "self", ",", "bearer", ",", "url", ")", ":", "headers", "=", "{", "\"Accept\"", ":", "\"*/*\"", ",", "\"Accept-Encoding\"", ":", "\"gzip, deflate\"", ",", "\"Accept-Language\"", ":", "(", "\"en;q=1, fr;q=0.9, de;q=0.8, ja;q=0.7, \"", "+", "\"nl;q=0.6, it;q=0.5\"", ")", ",", "\"User-Agent\"", ":", "(", "\"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) \"", "+", "\"AppleWebKit/537.36 (KHTML, like Gecko) \"", "+", "\"Chrome/68.0.3440.106 Safari/537.36\"", ")", ",", "}", "if", "bearer", ":", "headers", "[", "\"Authorization\"", "]", "=", "\"Bearer {0}\"", ".", "format", "(", "bearer", ")", "if", "url", "==", "\"https://api.robinhood.com/options/orders/\"", ":", "headers", "[", "\"Content-Type\"", "]", "=", "\"application/json; charset=utf-8\"", "return", "headers" ]
Generate headders, adding in Oauth2 bearer token if present
[ "Generate", "headders", "adding", "in", "Oauth2", "bearer", "token", "if", "present" ]
514cbca4994f52a97222058167830a302e313d04
https://github.com/westonplatter/fast_arrow/blob/514cbca4994f52a97222058167830a302e313d04/fast_arrow/client.py#L89-L107
247,523
westonplatter/fast_arrow
fast_arrow/client.py
Client.logout_oauth2
def logout_oauth2(self): ''' Logout for given Oauth2 bearer token ''' url = "https://api.robinhood.com/oauth2/revoke_token/" data = { "client_id": CLIENT_ID, "token": self.refresh_token, } res = self.post(url, payload=data) if res is None: self.account_id = None self.account_url = None self.access_token = None self.refresh_token = None self.mfa_code = None self.scope = None self.authenticated = False return True else: raise AuthenticationError("fast_arrow could not log out.")
python
def logout_oauth2(self): ''' Logout for given Oauth2 bearer token ''' url = "https://api.robinhood.com/oauth2/revoke_token/" data = { "client_id": CLIENT_ID, "token": self.refresh_token, } res = self.post(url, payload=data) if res is None: self.account_id = None self.account_url = None self.access_token = None self.refresh_token = None self.mfa_code = None self.scope = None self.authenticated = False return True else: raise AuthenticationError("fast_arrow could not log out.")
[ "def", "logout_oauth2", "(", "self", ")", ":", "url", "=", "\"https://api.robinhood.com/oauth2/revoke_token/\"", "data", "=", "{", "\"client_id\"", ":", "CLIENT_ID", ",", "\"token\"", ":", "self", ".", "refresh_token", ",", "}", "res", "=", "self", ".", "post", "(", "url", ",", "payload", "=", "data", ")", "if", "res", "is", "None", ":", "self", ".", "account_id", "=", "None", "self", ".", "account_url", "=", "None", "self", ".", "access_token", "=", "None", "self", ".", "refresh_token", "=", "None", "self", ".", "mfa_code", "=", "None", "self", ".", "scope", "=", "None", "self", ".", "authenticated", "=", "False", "return", "True", "else", ":", "raise", "AuthenticationError", "(", "\"fast_arrow could not log out.\"", ")" ]
Logout for given Oauth2 bearer token
[ "Logout", "for", "given", "Oauth2", "bearer", "token" ]
514cbca4994f52a97222058167830a302e313d04
https://github.com/westonplatter/fast_arrow/blob/514cbca4994f52a97222058167830a302e313d04/fast_arrow/client.py#L178-L198
247,524
westonplatter/fast_arrow
fast_arrow/resources/stock.py
Stock.fetch
def fetch(cls, client, symbol): """ fetch data for stock """ assert(type(symbol) is str) url = ("https://api.robinhood.com/instruments/?symbol={0}". format(symbol)) data = client.get(url) return data["results"][0]
python
def fetch(cls, client, symbol): assert(type(symbol) is str) url = ("https://api.robinhood.com/instruments/?symbol={0}". format(symbol)) data = client.get(url) return data["results"][0]
[ "def", "fetch", "(", "cls", ",", "client", ",", "symbol", ")", ":", "assert", "(", "type", "(", "symbol", ")", "is", "str", ")", "url", "=", "(", "\"https://api.robinhood.com/instruments/?symbol={0}\"", ".", "format", "(", "symbol", ")", ")", "data", "=", "client", ".", "get", "(", "url", ")", "return", "data", "[", "\"results\"", "]", "[", "0", "]" ]
fetch data for stock
[ "fetch", "data", "for", "stock" ]
514cbca4994f52a97222058167830a302e313d04
https://github.com/westonplatter/fast_arrow/blob/514cbca4994f52a97222058167830a302e313d04/fast_arrow/resources/stock.py#L7-L16
247,525
westonplatter/fast_arrow
fast_arrow/option_strategies/vertical.py
Vertical.gen_df
def gen_df(cls, options, width, spread_type="call", spread_kind="buy"): """ Generate Pandas Dataframe of Vertical :param options: python dict of options. :param width: offset for spread. Must be integer. :param spread_type: call or put. defaults to "call". :param spread_kind: buy or sell. defaults to "buy". """ assert type(width) is int assert spread_type in ["call", "put"] assert spread_kind in ["buy", "sell"] # get CALLs or PUTs options = list(filter(lambda x: x["type"] == spread_type, options)) coef = (1 if spread_type == "put" else -1) shift = width * coef df = pd.DataFrame.from_dict(options) df['expiration_date'] = pd.to_datetime( df['expiration_date'], format="%Y-%m-%d") df['adjusted_mark_price'] = pd.to_numeric(df['adjusted_mark_price']) df['strike_price'] = pd.to_numeric(df['strike_price']) df.sort_values(["expiration_date", "strike_price"], inplace=True) for k, v in df.groupby("expiration_date"): sdf = v.shift(shift) df.loc[v.index, "strike_price_shifted"] = sdf["strike_price"] df.loc[v.index, "delta_shifted"] = sdf["delta"] df.loc[v.index, "volume_shifted"] = sdf["volume"] df.loc[v.index, "open_interest_shifted"] = sdf["open_interest"] df.loc[v.index, "instrument_shifted"] = sdf["instrument"] df.loc[v.index, "adjusted_mark_price_shift"] = \ sdf["adjusted_mark_price"] if spread_kind == "sell": df.loc[v.index, "margin"] = \ abs(sdf["strike_price"] - v["strike_price"]) else: df.loc[v.index, "margin"] = 0.0 if spread_kind == "buy": df.loc[v.index, "premium_adjusted_mark_price"] = ( v["adjusted_mark_price"] - sdf["adjusted_mark_price"]) elif spread_kind == "sell": df.loc[v.index, "premium_adjusted_mark_price"] = ( sdf["adjusted_mark_price"] - v["adjusted_mark_price"]) return df
python
def gen_df(cls, options, width, spread_type="call", spread_kind="buy"): assert type(width) is int assert spread_type in ["call", "put"] assert spread_kind in ["buy", "sell"] # get CALLs or PUTs options = list(filter(lambda x: x["type"] == spread_type, options)) coef = (1 if spread_type == "put" else -1) shift = width * coef df = pd.DataFrame.from_dict(options) df['expiration_date'] = pd.to_datetime( df['expiration_date'], format="%Y-%m-%d") df['adjusted_mark_price'] = pd.to_numeric(df['adjusted_mark_price']) df['strike_price'] = pd.to_numeric(df['strike_price']) df.sort_values(["expiration_date", "strike_price"], inplace=True) for k, v in df.groupby("expiration_date"): sdf = v.shift(shift) df.loc[v.index, "strike_price_shifted"] = sdf["strike_price"] df.loc[v.index, "delta_shifted"] = sdf["delta"] df.loc[v.index, "volume_shifted"] = sdf["volume"] df.loc[v.index, "open_interest_shifted"] = sdf["open_interest"] df.loc[v.index, "instrument_shifted"] = sdf["instrument"] df.loc[v.index, "adjusted_mark_price_shift"] = \ sdf["adjusted_mark_price"] if spread_kind == "sell": df.loc[v.index, "margin"] = \ abs(sdf["strike_price"] - v["strike_price"]) else: df.loc[v.index, "margin"] = 0.0 if spread_kind == "buy": df.loc[v.index, "premium_adjusted_mark_price"] = ( v["adjusted_mark_price"] - sdf["adjusted_mark_price"]) elif spread_kind == "sell": df.loc[v.index, "premium_adjusted_mark_price"] = ( sdf["adjusted_mark_price"] - v["adjusted_mark_price"]) return df
[ "def", "gen_df", "(", "cls", ",", "options", ",", "width", ",", "spread_type", "=", "\"call\"", ",", "spread_kind", "=", "\"buy\"", ")", ":", "assert", "type", "(", "width", ")", "is", "int", "assert", "spread_type", "in", "[", "\"call\"", ",", "\"put\"", "]", "assert", "spread_kind", "in", "[", "\"buy\"", ",", "\"sell\"", "]", "# get CALLs or PUTs", "options", "=", "list", "(", "filter", "(", "lambda", "x", ":", "x", "[", "\"type\"", "]", "==", "spread_type", ",", "options", ")", ")", "coef", "=", "(", "1", "if", "spread_type", "==", "\"put\"", "else", "-", "1", ")", "shift", "=", "width", "*", "coef", "df", "=", "pd", ".", "DataFrame", ".", "from_dict", "(", "options", ")", "df", "[", "'expiration_date'", "]", "=", "pd", ".", "to_datetime", "(", "df", "[", "'expiration_date'", "]", ",", "format", "=", "\"%Y-%m-%d\"", ")", "df", "[", "'adjusted_mark_price'", "]", "=", "pd", ".", "to_numeric", "(", "df", "[", "'adjusted_mark_price'", "]", ")", "df", "[", "'strike_price'", "]", "=", "pd", ".", "to_numeric", "(", "df", "[", "'strike_price'", "]", ")", "df", ".", "sort_values", "(", "[", "\"expiration_date\"", ",", "\"strike_price\"", "]", ",", "inplace", "=", "True", ")", "for", "k", ",", "v", "in", "df", ".", "groupby", "(", "\"expiration_date\"", ")", ":", "sdf", "=", "v", ".", "shift", "(", "shift", ")", "df", ".", "loc", "[", "v", ".", "index", ",", "\"strike_price_shifted\"", "]", "=", "sdf", "[", "\"strike_price\"", "]", "df", ".", "loc", "[", "v", ".", "index", ",", "\"delta_shifted\"", "]", "=", "sdf", "[", "\"delta\"", "]", "df", ".", "loc", "[", "v", ".", "index", ",", "\"volume_shifted\"", "]", "=", "sdf", "[", "\"volume\"", "]", "df", ".", "loc", "[", "v", ".", "index", ",", "\"open_interest_shifted\"", "]", "=", "sdf", "[", "\"open_interest\"", "]", "df", ".", "loc", "[", "v", ".", "index", ",", "\"instrument_shifted\"", "]", "=", "sdf", "[", "\"instrument\"", "]", "df", ".", "loc", "[", "v", ".", "index", ",", "\"adjusted_mark_price_shift\"", "]", "=", "sdf", "[", "\"adjusted_mark_price\"", "]", "if", "spread_kind", "==", "\"sell\"", ":", "df", ".", "loc", "[", "v", ".", "index", ",", "\"margin\"", "]", "=", "abs", "(", "sdf", "[", "\"strike_price\"", "]", "-", "v", "[", "\"strike_price\"", "]", ")", "else", ":", "df", ".", "loc", "[", "v", ".", "index", ",", "\"margin\"", "]", "=", "0.0", "if", "spread_kind", "==", "\"buy\"", ":", "df", ".", "loc", "[", "v", ".", "index", ",", "\"premium_adjusted_mark_price\"", "]", "=", "(", "v", "[", "\"adjusted_mark_price\"", "]", "-", "sdf", "[", "\"adjusted_mark_price\"", "]", ")", "elif", "spread_kind", "==", "\"sell\"", ":", "df", ".", "loc", "[", "v", ".", "index", ",", "\"premium_adjusted_mark_price\"", "]", "=", "(", "sdf", "[", "\"adjusted_mark_price\"", "]", "-", "v", "[", "\"adjusted_mark_price\"", "]", ")", "return", "df" ]
Generate Pandas Dataframe of Vertical :param options: python dict of options. :param width: offset for spread. Must be integer. :param spread_type: call or put. defaults to "call". :param spread_kind: buy or sell. defaults to "buy".
[ "Generate", "Pandas", "Dataframe", "of", "Vertical" ]
514cbca4994f52a97222058167830a302e313d04
https://github.com/westonplatter/fast_arrow/blob/514cbca4994f52a97222058167830a302e313d04/fast_arrow/option_strategies/vertical.py#L7-L57
247,526
westonplatter/fast_arrow
fast_arrow/resources/stock_marketdata.py
StockMarketdata.quote_by_instruments
def quote_by_instruments(cls, client, ids): """ create instrument urls, fetch, return results """ base_url = "https://api.robinhood.com/instruments" id_urls = ["{}/{}/".format(base_url, _id) for _id in ids] return cls.quotes_by_instrument_urls(client, id_urls)
python
def quote_by_instruments(cls, client, ids): base_url = "https://api.robinhood.com/instruments" id_urls = ["{}/{}/".format(base_url, _id) for _id in ids] return cls.quotes_by_instrument_urls(client, id_urls)
[ "def", "quote_by_instruments", "(", "cls", ",", "client", ",", "ids", ")", ":", "base_url", "=", "\"https://api.robinhood.com/instruments\"", "id_urls", "=", "[", "\"{}/{}/\"", ".", "format", "(", "base_url", ",", "_id", ")", "for", "_id", "in", "ids", "]", "return", "cls", ".", "quotes_by_instrument_urls", "(", "client", ",", "id_urls", ")" ]
create instrument urls, fetch, return results
[ "create", "instrument", "urls", "fetch", "return", "results" ]
514cbca4994f52a97222058167830a302e313d04
https://github.com/westonplatter/fast_arrow/blob/514cbca4994f52a97222058167830a302e313d04/fast_arrow/resources/stock_marketdata.py#L13-L19
247,527
westonplatter/fast_arrow
fast_arrow/resources/stock_marketdata.py
StockMarketdata.quotes_by_instrument_urls
def quotes_by_instrument_urls(cls, client, urls): """ fetch and return results """ instruments = ",".join(urls) params = {"instruments": instruments} url = "https://api.robinhood.com/marketdata/quotes/" data = client.get(url, params=params) results = data["results"] while "next" in data and data["next"]: data = client.get(data["next"]) results.extend(data["results"]) return results
python
def quotes_by_instrument_urls(cls, client, urls): instruments = ",".join(urls) params = {"instruments": instruments} url = "https://api.robinhood.com/marketdata/quotes/" data = client.get(url, params=params) results = data["results"] while "next" in data and data["next"]: data = client.get(data["next"]) results.extend(data["results"]) return results
[ "def", "quotes_by_instrument_urls", "(", "cls", ",", "client", ",", "urls", ")", ":", "instruments", "=", "\",\"", ".", "join", "(", "urls", ")", "params", "=", "{", "\"instruments\"", ":", "instruments", "}", "url", "=", "\"https://api.robinhood.com/marketdata/quotes/\"", "data", "=", "client", ".", "get", "(", "url", ",", "params", "=", "params", ")", "results", "=", "data", "[", "\"results\"", "]", "while", "\"next\"", "in", "data", "and", "data", "[", "\"next\"", "]", ":", "data", "=", "client", ".", "get", "(", "data", "[", "\"next\"", "]", ")", "results", ".", "extend", "(", "data", "[", "\"results\"", "]", ")", "return", "results" ]
fetch and return results
[ "fetch", "and", "return", "results" ]
514cbca4994f52a97222058167830a302e313d04
https://github.com/westonplatter/fast_arrow/blob/514cbca4994f52a97222058167830a302e313d04/fast_arrow/resources/stock_marketdata.py#L22-L34
247,528
westonplatter/fast_arrow
fast_arrow/resources/option_position.py
OptionPosition.all
def all(cls, client, **kwargs): """ fetch all option positions """ max_date = kwargs['max_date'] if 'max_date' in kwargs else None max_fetches = \ kwargs['max_fetches'] if 'max_fetches' in kwargs else None url = 'https://api.robinhood.com/options/positions/' params = {} data = client.get(url, params=params) results = data["results"] if is_max_date_gt(max_date, results[-1]['updated_at'][0:10]): return results if max_fetches == 1: return results fetches = 1 while data["next"]: fetches = fetches + 1 data = client.get(data["next"]) results.extend(data["results"]) if is_max_date_gt(max_date, results[-1]['updated_at'][0:10]): return results if max_fetches and (fetches >= max_fetches): return results return results
python
def all(cls, client, **kwargs): max_date = kwargs['max_date'] if 'max_date' in kwargs else None max_fetches = \ kwargs['max_fetches'] if 'max_fetches' in kwargs else None url = 'https://api.robinhood.com/options/positions/' params = {} data = client.get(url, params=params) results = data["results"] if is_max_date_gt(max_date, results[-1]['updated_at'][0:10]): return results if max_fetches == 1: return results fetches = 1 while data["next"]: fetches = fetches + 1 data = client.get(data["next"]) results.extend(data["results"]) if is_max_date_gt(max_date, results[-1]['updated_at'][0:10]): return results if max_fetches and (fetches >= max_fetches): return results return results
[ "def", "all", "(", "cls", ",", "client", ",", "*", "*", "kwargs", ")", ":", "max_date", "=", "kwargs", "[", "'max_date'", "]", "if", "'max_date'", "in", "kwargs", "else", "None", "max_fetches", "=", "kwargs", "[", "'max_fetches'", "]", "if", "'max_fetches'", "in", "kwargs", "else", "None", "url", "=", "'https://api.robinhood.com/options/positions/'", "params", "=", "{", "}", "data", "=", "client", ".", "get", "(", "url", ",", "params", "=", "params", ")", "results", "=", "data", "[", "\"results\"", "]", "if", "is_max_date_gt", "(", "max_date", ",", "results", "[", "-", "1", "]", "[", "'updated_at'", "]", "[", "0", ":", "10", "]", ")", ":", "return", "results", "if", "max_fetches", "==", "1", ":", "return", "results", "fetches", "=", "1", "while", "data", "[", "\"next\"", "]", ":", "fetches", "=", "fetches", "+", "1", "data", "=", "client", ".", "get", "(", "data", "[", "\"next\"", "]", ")", "results", ".", "extend", "(", "data", "[", "\"results\"", "]", ")", "if", "is_max_date_gt", "(", "max_date", ",", "results", "[", "-", "1", "]", "[", "'updated_at'", "]", "[", "0", ":", "10", "]", ")", ":", "return", "results", "if", "max_fetches", "and", "(", "fetches", ">=", "max_fetches", ")", ":", "return", "results", "return", "results" ]
fetch all option positions
[ "fetch", "all", "option", "positions" ]
514cbca4994f52a97222058167830a302e313d04
https://github.com/westonplatter/fast_arrow/blob/514cbca4994f52a97222058167830a302e313d04/fast_arrow/resources/option_position.py#L10-L37
247,529
westonplatter/fast_arrow
fast_arrow/resources/option_position.py
OptionPosition.mergein_marketdata_list
def mergein_marketdata_list(cls, client, option_positions): """ Fetch and merge in Marketdata for each option position """ ids = cls._extract_ids(option_positions) mds = OptionMarketdata.quotes_by_instrument_ids(client, ids) results = [] for op in option_positions: # @TODO optimize this so it's better than O(n^2) md = [x for x in mds if x['instrument'] == op['option']][0] # there is no overlap in keys so this is fine merged_dict = dict(list(op.items()) + list(md.items())) results.append(merged_dict) return results
python
def mergein_marketdata_list(cls, client, option_positions): ids = cls._extract_ids(option_positions) mds = OptionMarketdata.quotes_by_instrument_ids(client, ids) results = [] for op in option_positions: # @TODO optimize this so it's better than O(n^2) md = [x for x in mds if x['instrument'] == op['option']][0] # there is no overlap in keys so this is fine merged_dict = dict(list(op.items()) + list(md.items())) results.append(merged_dict) return results
[ "def", "mergein_marketdata_list", "(", "cls", ",", "client", ",", "option_positions", ")", ":", "ids", "=", "cls", ".", "_extract_ids", "(", "option_positions", ")", "mds", "=", "OptionMarketdata", ".", "quotes_by_instrument_ids", "(", "client", ",", "ids", ")", "results", "=", "[", "]", "for", "op", "in", "option_positions", ":", "# @TODO optimize this so it's better than O(n^2)", "md", "=", "[", "x", "for", "x", "in", "mds", "if", "x", "[", "'instrument'", "]", "==", "op", "[", "'option'", "]", "]", "[", "0", "]", "# there is no overlap in keys so this is fine", "merged_dict", "=", "dict", "(", "list", "(", "op", ".", "items", "(", ")", ")", "+", "list", "(", "md", ".", "items", "(", ")", ")", ")", "results", ".", "append", "(", "merged_dict", ")", "return", "results" ]
Fetch and merge in Marketdata for each option position
[ "Fetch", "and", "merge", "in", "Marketdata", "for", "each", "option", "position" ]
514cbca4994f52a97222058167830a302e313d04
https://github.com/westonplatter/fast_arrow/blob/514cbca4994f52a97222058167830a302e313d04/fast_arrow/resources/option_position.py#L47-L61
247,530
etingof/snmpsim
snmpsim/record/snmprec.py
SnmprecRecord.evaluateRawString
def evaluateRawString(self, escaped): """Evaluates raw Python string like `ast.literal_eval` does""" unescaped = [] hexdigit = None escape = False for char in escaped: number = ord(char) if hexdigit is not None: if hexdigit: number = (int(hexdigit, 16) << 4) + int(char, 16) hexdigit = None else: hexdigit = char continue if escape: escape = False try: number = self.ESCAPE_CHARS[number] except KeyError: if number == 120: hexdigit = '' continue raise ValueError('Unknown escape character %c' % char) elif number == 92: # '\' escape = True continue unescaped.append(number) return unescaped
python
def evaluateRawString(self, escaped): unescaped = [] hexdigit = None escape = False for char in escaped: number = ord(char) if hexdigit is not None: if hexdigit: number = (int(hexdigit, 16) << 4) + int(char, 16) hexdigit = None else: hexdigit = char continue if escape: escape = False try: number = self.ESCAPE_CHARS[number] except KeyError: if number == 120: hexdigit = '' continue raise ValueError('Unknown escape character %c' % char) elif number == 92: # '\' escape = True continue unescaped.append(number) return unescaped
[ "def", "evaluateRawString", "(", "self", ",", "escaped", ")", ":", "unescaped", "=", "[", "]", "hexdigit", "=", "None", "escape", "=", "False", "for", "char", "in", "escaped", ":", "number", "=", "ord", "(", "char", ")", "if", "hexdigit", "is", "not", "None", ":", "if", "hexdigit", ":", "number", "=", "(", "int", "(", "hexdigit", ",", "16", ")", "<<", "4", ")", "+", "int", "(", "char", ",", "16", ")", "hexdigit", "=", "None", "else", ":", "hexdigit", "=", "char", "continue", "if", "escape", ":", "escape", "=", "False", "try", ":", "number", "=", "self", ".", "ESCAPE_CHARS", "[", "number", "]", "except", "KeyError", ":", "if", "number", "==", "120", ":", "hexdigit", "=", "''", "continue", "raise", "ValueError", "(", "'Unknown escape character %c'", "%", "char", ")", "elif", "number", "==", "92", ":", "# '\\'", "escape", "=", "True", "continue", "unescaped", ".", "append", "(", "number", ")", "return", "unescaped" ]
Evaluates raw Python string like `ast.literal_eval` does
[ "Evaluates", "raw", "Python", "string", "like", "ast", ".", "literal_eval", "does" ]
c6a2c2c6db39620dcdd4f60c79cd545962a1493a
https://github.com/etingof/snmpsim/blob/c6a2c2c6db39620dcdd4f60c79cd545962a1493a/snmpsim/record/snmprec.py#L42-L80
247,531
basler/pypylon
scripts/builddoxy2swig/doxy2swig/doxy2swig.py
Doxy2SWIG.subnode_parse
def subnode_parse(self, node, pieces=None, indent=0, ignore=[], restrict=None): """Parse the subnodes of a given node. Subnodes with tags in the `ignore` list are ignored. If pieces is given, use this as target for the parse results instead of self.pieces. Indent all lines by the amount given in `indent`. Note that the initial content in `pieces` is not indented. The final result is in any case added to self.pieces.""" if pieces is not None: old_pieces, self.pieces = self.pieces, pieces else: old_pieces = [] if type(indent) is int: indent = indent * ' ' if len(indent) > 0: pieces = ''.join(self.pieces) i_piece = pieces[:len(indent)] if self.pieces[-1:] == ['']: self.pieces = [pieces[len(indent):]] + [''] elif self.pieces != []: self.pieces = [pieces[len(indent):]] self.indent += len(indent) for n in node.childNodes: if restrict is not None: if n.nodeType == n.ELEMENT_NODE and n.tagName in restrict: self.parse(n) elif n.nodeType != n.ELEMENT_NODE or n.tagName not in ignore: self.parse(n) if len(indent) > 0: self.pieces = shift(self.pieces, indent, i_piece) self.indent -= len(indent) old_pieces.extend(self.pieces) self.pieces = old_pieces
python
def subnode_parse(self, node, pieces=None, indent=0, ignore=[], restrict=None): if pieces is not None: old_pieces, self.pieces = self.pieces, pieces else: old_pieces = [] if type(indent) is int: indent = indent * ' ' if len(indent) > 0: pieces = ''.join(self.pieces) i_piece = pieces[:len(indent)] if self.pieces[-1:] == ['']: self.pieces = [pieces[len(indent):]] + [''] elif self.pieces != []: self.pieces = [pieces[len(indent):]] self.indent += len(indent) for n in node.childNodes: if restrict is not None: if n.nodeType == n.ELEMENT_NODE and n.tagName in restrict: self.parse(n) elif n.nodeType != n.ELEMENT_NODE or n.tagName not in ignore: self.parse(n) if len(indent) > 0: self.pieces = shift(self.pieces, indent, i_piece) self.indent -= len(indent) old_pieces.extend(self.pieces) self.pieces = old_pieces
[ "def", "subnode_parse", "(", "self", ",", "node", ",", "pieces", "=", "None", ",", "indent", "=", "0", ",", "ignore", "=", "[", "]", ",", "restrict", "=", "None", ")", ":", "if", "pieces", "is", "not", "None", ":", "old_pieces", ",", "self", ".", "pieces", "=", "self", ".", "pieces", ",", "pieces", "else", ":", "old_pieces", "=", "[", "]", "if", "type", "(", "indent", ")", "is", "int", ":", "indent", "=", "indent", "*", "' '", "if", "len", "(", "indent", ")", ">", "0", ":", "pieces", "=", "''", ".", "join", "(", "self", ".", "pieces", ")", "i_piece", "=", "pieces", "[", ":", "len", "(", "indent", ")", "]", "if", "self", ".", "pieces", "[", "-", "1", ":", "]", "==", "[", "''", "]", ":", "self", ".", "pieces", "=", "[", "pieces", "[", "len", "(", "indent", ")", ":", "]", "]", "+", "[", "''", "]", "elif", "self", ".", "pieces", "!=", "[", "]", ":", "self", ".", "pieces", "=", "[", "pieces", "[", "len", "(", "indent", ")", ":", "]", "]", "self", ".", "indent", "+=", "len", "(", "indent", ")", "for", "n", "in", "node", ".", "childNodes", ":", "if", "restrict", "is", "not", "None", ":", "if", "n", ".", "nodeType", "==", "n", ".", "ELEMENT_NODE", "and", "n", ".", "tagName", "in", "restrict", ":", "self", ".", "parse", "(", "n", ")", "elif", "n", ".", "nodeType", "!=", "n", ".", "ELEMENT_NODE", "or", "n", ".", "tagName", "not", "in", "ignore", ":", "self", ".", "parse", "(", "n", ")", "if", "len", "(", "indent", ")", ">", "0", ":", "self", ".", "pieces", "=", "shift", "(", "self", ".", "pieces", ",", "indent", ",", "i_piece", ")", "self", ".", "indent", "-=", "len", "(", "indent", ")", "old_pieces", ".", "extend", "(", "self", ".", "pieces", ")", "self", ".", "pieces", "=", "old_pieces" ]
Parse the subnodes of a given node. Subnodes with tags in the `ignore` list are ignored. If pieces is given, use this as target for the parse results instead of self.pieces. Indent all lines by the amount given in `indent`. Note that the initial content in `pieces` is not indented. The final result is in any case added to self.pieces.
[ "Parse", "the", "subnodes", "of", "a", "given", "node", ".", "Subnodes", "with", "tags", "in", "the", "ignore", "list", "are", "ignored", ".", "If", "pieces", "is", "given", "use", "this", "as", "target", "for", "the", "parse", "results", "instead", "of", "self", ".", "pieces", ".", "Indent", "all", "lines", "by", "the", "amount", "given", "in", "indent", ".", "Note", "that", "the", "initial", "content", "in", "pieces", "is", "not", "indented", ".", "The", "final", "result", "is", "in", "any", "case", "added", "to", "self", ".", "pieces", "." ]
d3510fa419b1c2b17f3f0b80a5fbb720c7b84008
https://github.com/basler/pypylon/blob/d3510fa419b1c2b17f3f0b80a5fbb720c7b84008/scripts/builddoxy2swig/doxy2swig/doxy2swig.py#L224-L254
247,532
basler/pypylon
scripts/builddoxy2swig/doxy2swig/doxy2swig.py
Doxy2SWIG.surround_parse
def surround_parse(self, node, pre_char, post_char): """Parse the subnodes of a given node. Subnodes with tags in the `ignore` list are ignored. Prepend `pre_char` and append `post_char` to the output in self.pieces.""" self.add_text(pre_char) self.subnode_parse(node) self.add_text(post_char)
python
def surround_parse(self, node, pre_char, post_char): self.add_text(pre_char) self.subnode_parse(node) self.add_text(post_char)
[ "def", "surround_parse", "(", "self", ",", "node", ",", "pre_char", ",", "post_char", ")", ":", "self", ".", "add_text", "(", "pre_char", ")", "self", ".", "subnode_parse", "(", "node", ")", "self", ".", "add_text", "(", "post_char", ")" ]
Parse the subnodes of a given node. Subnodes with tags in the `ignore` list are ignored. Prepend `pre_char` and append `post_char` to the output in self.pieces.
[ "Parse", "the", "subnodes", "of", "a", "given", "node", ".", "Subnodes", "with", "tags", "in", "the", "ignore", "list", "are", "ignored", ".", "Prepend", "pre_char", "and", "append", "post_char", "to", "the", "output", "in", "self", ".", "pieces", "." ]
d3510fa419b1c2b17f3f0b80a5fbb720c7b84008
https://github.com/basler/pypylon/blob/d3510fa419b1c2b17f3f0b80a5fbb720c7b84008/scripts/builddoxy2swig/doxy2swig/doxy2swig.py#L256-L262
247,533
basler/pypylon
scripts/builddoxy2swig/doxy2swig/doxy2swig.py
Doxy2SWIG.get_specific_subnodes
def get_specific_subnodes(self, node, name, recursive=0): """Given a node and a name, return a list of child `ELEMENT_NODEs`, that have a `tagName` matching the `name`. Search recursively for `recursive` levels. """ children = [x for x in node.childNodes if x.nodeType == x.ELEMENT_NODE] ret = [x for x in children if x.tagName == name] if recursive > 0: for x in children: ret.extend(self.get_specific_subnodes(x, name, recursive-1)) return ret
python
def get_specific_subnodes(self, node, name, recursive=0): children = [x for x in node.childNodes if x.nodeType == x.ELEMENT_NODE] ret = [x for x in children if x.tagName == name] if recursive > 0: for x in children: ret.extend(self.get_specific_subnodes(x, name, recursive-1)) return ret
[ "def", "get_specific_subnodes", "(", "self", ",", "node", ",", "name", ",", "recursive", "=", "0", ")", ":", "children", "=", "[", "x", "for", "x", "in", "node", ".", "childNodes", "if", "x", ".", "nodeType", "==", "x", ".", "ELEMENT_NODE", "]", "ret", "=", "[", "x", "for", "x", "in", "children", "if", "x", ".", "tagName", "==", "name", "]", "if", "recursive", ">", "0", ":", "for", "x", "in", "children", ":", "ret", ".", "extend", "(", "self", ".", "get_specific_subnodes", "(", "x", ",", "name", ",", "recursive", "-", "1", ")", ")", "return", "ret" ]
Given a node and a name, return a list of child `ELEMENT_NODEs`, that have a `tagName` matching the `name`. Search recursively for `recursive` levels.
[ "Given", "a", "node", "and", "a", "name", "return", "a", "list", "of", "child", "ELEMENT_NODEs", "that", "have", "a", "tagName", "matching", "the", "name", ".", "Search", "recursively", "for", "recursive", "levels", "." ]
d3510fa419b1c2b17f3f0b80a5fbb720c7b84008
https://github.com/basler/pypylon/blob/d3510fa419b1c2b17f3f0b80a5fbb720c7b84008/scripts/builddoxy2swig/doxy2swig/doxy2swig.py#L265-L275
247,534
basler/pypylon
scripts/builddoxy2swig/doxy2swig/doxy2swig.py
Doxy2SWIG.get_specific_nodes
def get_specific_nodes(self, node, names): """Given a node and a sequence of strings in `names`, return a dictionary containing the names as keys and child `ELEMENT_NODEs`, that have a `tagName` equal to the name. """ nodes = [(x.tagName, x) for x in node.childNodes if x.nodeType == x.ELEMENT_NODE and x.tagName in names] return dict(nodes)
python
def get_specific_nodes(self, node, names): nodes = [(x.tagName, x) for x in node.childNodes if x.nodeType == x.ELEMENT_NODE and x.tagName in names] return dict(nodes)
[ "def", "get_specific_nodes", "(", "self", ",", "node", ",", "names", ")", ":", "nodes", "=", "[", "(", "x", ".", "tagName", ",", "x", ")", "for", "x", "in", "node", ".", "childNodes", "if", "x", ".", "nodeType", "==", "x", ".", "ELEMENT_NODE", "and", "x", ".", "tagName", "in", "names", "]", "return", "dict", "(", "nodes", ")" ]
Given a node and a sequence of strings in `names`, return a dictionary containing the names as keys and child `ELEMENT_NODEs`, that have a `tagName` equal to the name.
[ "Given", "a", "node", "and", "a", "sequence", "of", "strings", "in", "names", "return", "a", "dictionary", "containing", "the", "names", "as", "keys", "and", "child", "ELEMENT_NODEs", "that", "have", "a", "tagName", "equal", "to", "the", "name", "." ]
d3510fa419b1c2b17f3f0b80a5fbb720c7b84008
https://github.com/basler/pypylon/blob/d3510fa419b1c2b17f3f0b80a5fbb720c7b84008/scripts/builddoxy2swig/doxy2swig/doxy2swig.py#L277-L286
247,535
basler/pypylon
scripts/builddoxy2swig/doxy2swig/doxy2swig.py
Doxy2SWIG.add_text
def add_text(self, value): """Adds text corresponding to `value` into `self.pieces`.""" if isinstance(value, (list, tuple)): self.pieces.extend(value) else: self.pieces.append(value)
python
def add_text(self, value): if isinstance(value, (list, tuple)): self.pieces.extend(value) else: self.pieces.append(value)
[ "def", "add_text", "(", "self", ",", "value", ")", ":", "if", "isinstance", "(", "value", ",", "(", "list", ",", "tuple", ")", ")", ":", "self", ".", "pieces", ".", "extend", "(", "value", ")", "else", ":", "self", ".", "pieces", ".", "append", "(", "value", ")" ]
Adds text corresponding to `value` into `self.pieces`.
[ "Adds", "text", "corresponding", "to", "value", "into", "self", ".", "pieces", "." ]
d3510fa419b1c2b17f3f0b80a5fbb720c7b84008
https://github.com/basler/pypylon/blob/d3510fa419b1c2b17f3f0b80a5fbb720c7b84008/scripts/builddoxy2swig/doxy2swig/doxy2swig.py#L288-L293
247,536
basler/pypylon
scripts/builddoxy2swig/doxy2swig/doxy2swig.py
Doxy2SWIG.start_new_paragraph
def start_new_paragraph(self): """Make sure to create an empty line. This is overridden, if the previous text ends with the special marker ''. In that case, nothing is done. """ if self.pieces[-1:] == ['']: # respect special marker return elif self.pieces == []: # first paragraph, add '\n', override with '' self.pieces = ['\n'] elif self.pieces[-1][-1:] != '\n': # previous line not ended self.pieces.extend([' \n' ,'\n']) else: #default self.pieces.append('\n')
python
def start_new_paragraph(self): if self.pieces[-1:] == ['']: # respect special marker return elif self.pieces == []: # first paragraph, add '\n', override with '' self.pieces = ['\n'] elif self.pieces[-1][-1:] != '\n': # previous line not ended self.pieces.extend([' \n' ,'\n']) else: #default self.pieces.append('\n')
[ "def", "start_new_paragraph", "(", "self", ")", ":", "if", "self", ".", "pieces", "[", "-", "1", ":", "]", "==", "[", "''", "]", ":", "# respect special marker", "return", "elif", "self", ".", "pieces", "==", "[", "]", ":", "# first paragraph, add '\\n', override with ''", "self", ".", "pieces", "=", "[", "'\\n'", "]", "elif", "self", ".", "pieces", "[", "-", "1", "]", "[", "-", "1", ":", "]", "!=", "'\\n'", ":", "# previous line not ended", "self", ".", "pieces", ".", "extend", "(", "[", "' \\n'", ",", "'\\n'", "]", ")", "else", ":", "#default", "self", ".", "pieces", ".", "append", "(", "'\\n'", ")" ]
Make sure to create an empty line. This is overridden, if the previous text ends with the special marker ''. In that case, nothing is done.
[ "Make", "sure", "to", "create", "an", "empty", "line", ".", "This", "is", "overridden", "if", "the", "previous", "text", "ends", "with", "the", "special", "marker", ".", "In", "that", "case", "nothing", "is", "done", "." ]
d3510fa419b1c2b17f3f0b80a5fbb720c7b84008
https://github.com/basler/pypylon/blob/d3510fa419b1c2b17f3f0b80a5fbb720c7b84008/scripts/builddoxy2swig/doxy2swig/doxy2swig.py#L295-L306
247,537
basler/pypylon
scripts/builddoxy2swig/doxy2swig/doxy2swig.py
Doxy2SWIG.add_line_with_subsequent_indent
def add_line_with_subsequent_indent(self, line, indent=4): """Add line of text and wrap such that subsequent lines are indented by `indent` spaces. """ if isinstance(line, (list, tuple)): line = ''.join(line) line = line.strip() width = self.textwidth-self.indent-indent wrapped_lines = textwrap.wrap(line[indent:], width=width) for i in range(len(wrapped_lines)): if wrapped_lines[i] != '': wrapped_lines[i] = indent * ' ' + wrapped_lines[i] self.pieces.append(line[:indent] + '\n'.join(wrapped_lines)[indent:] + ' \n')
python
def add_line_with_subsequent_indent(self, line, indent=4): if isinstance(line, (list, tuple)): line = ''.join(line) line = line.strip() width = self.textwidth-self.indent-indent wrapped_lines = textwrap.wrap(line[indent:], width=width) for i in range(len(wrapped_lines)): if wrapped_lines[i] != '': wrapped_lines[i] = indent * ' ' + wrapped_lines[i] self.pieces.append(line[:indent] + '\n'.join(wrapped_lines)[indent:] + ' \n')
[ "def", "add_line_with_subsequent_indent", "(", "self", ",", "line", ",", "indent", "=", "4", ")", ":", "if", "isinstance", "(", "line", ",", "(", "list", ",", "tuple", ")", ")", ":", "line", "=", "''", ".", "join", "(", "line", ")", "line", "=", "line", ".", "strip", "(", ")", "width", "=", "self", ".", "textwidth", "-", "self", ".", "indent", "-", "indent", "wrapped_lines", "=", "textwrap", ".", "wrap", "(", "line", "[", "indent", ":", "]", ",", "width", "=", "width", ")", "for", "i", "in", "range", "(", "len", "(", "wrapped_lines", ")", ")", ":", "if", "wrapped_lines", "[", "i", "]", "!=", "''", ":", "wrapped_lines", "[", "i", "]", "=", "indent", "*", "' '", "+", "wrapped_lines", "[", "i", "]", "self", ".", "pieces", ".", "append", "(", "line", "[", ":", "indent", "]", "+", "'\\n'", ".", "join", "(", "wrapped_lines", ")", "[", "indent", ":", "]", "+", "' \\n'", ")" ]
Add line of text and wrap such that subsequent lines are indented by `indent` spaces.
[ "Add", "line", "of", "text", "and", "wrap", "such", "that", "subsequent", "lines", "are", "indented", "by", "indent", "spaces", "." ]
d3510fa419b1c2b17f3f0b80a5fbb720c7b84008
https://github.com/basler/pypylon/blob/d3510fa419b1c2b17f3f0b80a5fbb720c7b84008/scripts/builddoxy2swig/doxy2swig/doxy2swig.py#L308-L320
247,538
basler/pypylon
scripts/builddoxy2swig/doxy2swig/doxy2swig.py
Doxy2SWIG.extract_text
def extract_text(self, node): """Return the string representation of the node or list of nodes by parsing the subnodes, but returning the result as a string instead of adding it to `self.pieces`. Note that this allows extracting text even if the node is in the ignore list. """ if not isinstance(node, (list, tuple)): node = [node] pieces, self.pieces = self.pieces, [''] for n in node: for sn in n.childNodes: self.parse(sn) ret = ''.join(self.pieces) self.pieces = pieces return ret
python
def extract_text(self, node): if not isinstance(node, (list, tuple)): node = [node] pieces, self.pieces = self.pieces, [''] for n in node: for sn in n.childNodes: self.parse(sn) ret = ''.join(self.pieces) self.pieces = pieces return ret
[ "def", "extract_text", "(", "self", ",", "node", ")", ":", "if", "not", "isinstance", "(", "node", ",", "(", "list", ",", "tuple", ")", ")", ":", "node", "=", "[", "node", "]", "pieces", ",", "self", ".", "pieces", "=", "self", ".", "pieces", ",", "[", "''", "]", "for", "n", "in", "node", ":", "for", "sn", "in", "n", ".", "childNodes", ":", "self", ".", "parse", "(", "sn", ")", "ret", "=", "''", ".", "join", "(", "self", ".", "pieces", ")", "self", ".", "pieces", "=", "pieces", "return", "ret" ]
Return the string representation of the node or list of nodes by parsing the subnodes, but returning the result as a string instead of adding it to `self.pieces`. Note that this allows extracting text even if the node is in the ignore list.
[ "Return", "the", "string", "representation", "of", "the", "node", "or", "list", "of", "nodes", "by", "parsing", "the", "subnodes", "but", "returning", "the", "result", "as", "a", "string", "instead", "of", "adding", "it", "to", "self", ".", "pieces", ".", "Note", "that", "this", "allows", "extracting", "text", "even", "if", "the", "node", "is", "in", "the", "ignore", "list", "." ]
d3510fa419b1c2b17f3f0b80a5fbb720c7b84008
https://github.com/basler/pypylon/blob/d3510fa419b1c2b17f3f0b80a5fbb720c7b84008/scripts/builddoxy2swig/doxy2swig/doxy2swig.py#L322-L335
247,539
basler/pypylon
scripts/builddoxy2swig/doxy2swig/doxy2swig.py
Doxy2SWIG.get_function_signature
def get_function_signature(self, node): """Returns the function signature string for memberdef nodes.""" name = self.extract_text(self.get_specific_subnodes(node, 'name')) if self.with_type_info: argsstring = self.extract_text(self.get_specific_subnodes(node, 'argsstring')) else: argsstring = [] param_id = 1 for n_param in self.get_specific_subnodes(node, 'param'): declname = self.extract_text(self.get_specific_subnodes(n_param, 'declname')) if not declname: declname = 'arg' + str(param_id) defval = self.extract_text(self.get_specific_subnodes(n_param, 'defval')) if defval: defval = '=' + defval argsstring.append(declname + defval) param_id = param_id + 1 argsstring = '(' + ', '.join(argsstring) + ')' type = self.extract_text(self.get_specific_subnodes(node, 'type')) function_definition = name + argsstring if type != '' and type != 'void': function_definition = function_definition + ' -> ' + type return '`' + function_definition + '` '
python
def get_function_signature(self, node): name = self.extract_text(self.get_specific_subnodes(node, 'name')) if self.with_type_info: argsstring = self.extract_text(self.get_specific_subnodes(node, 'argsstring')) else: argsstring = [] param_id = 1 for n_param in self.get_specific_subnodes(node, 'param'): declname = self.extract_text(self.get_specific_subnodes(n_param, 'declname')) if not declname: declname = 'arg' + str(param_id) defval = self.extract_text(self.get_specific_subnodes(n_param, 'defval')) if defval: defval = '=' + defval argsstring.append(declname + defval) param_id = param_id + 1 argsstring = '(' + ', '.join(argsstring) + ')' type = self.extract_text(self.get_specific_subnodes(node, 'type')) function_definition = name + argsstring if type != '' and type != 'void': function_definition = function_definition + ' -> ' + type return '`' + function_definition + '` '
[ "def", "get_function_signature", "(", "self", ",", "node", ")", ":", "name", "=", "self", ".", "extract_text", "(", "self", ".", "get_specific_subnodes", "(", "node", ",", "'name'", ")", ")", "if", "self", ".", "with_type_info", ":", "argsstring", "=", "self", ".", "extract_text", "(", "self", ".", "get_specific_subnodes", "(", "node", ",", "'argsstring'", ")", ")", "else", ":", "argsstring", "=", "[", "]", "param_id", "=", "1", "for", "n_param", "in", "self", ".", "get_specific_subnodes", "(", "node", ",", "'param'", ")", ":", "declname", "=", "self", ".", "extract_text", "(", "self", ".", "get_specific_subnodes", "(", "n_param", ",", "'declname'", ")", ")", "if", "not", "declname", ":", "declname", "=", "'arg'", "+", "str", "(", "param_id", ")", "defval", "=", "self", ".", "extract_text", "(", "self", ".", "get_specific_subnodes", "(", "n_param", ",", "'defval'", ")", ")", "if", "defval", ":", "defval", "=", "'='", "+", "defval", "argsstring", ".", "append", "(", "declname", "+", "defval", ")", "param_id", "=", "param_id", "+", "1", "argsstring", "=", "'('", "+", "', '", ".", "join", "(", "argsstring", ")", "+", "')'", "type", "=", "self", ".", "extract_text", "(", "self", ".", "get_specific_subnodes", "(", "node", ",", "'type'", ")", ")", "function_definition", "=", "name", "+", "argsstring", "if", "type", "!=", "''", "and", "type", "!=", "'void'", ":", "function_definition", "=", "function_definition", "+", "' -> '", "+", "type", "return", "'`'", "+", "function_definition", "+", "'` '" ]
Returns the function signature string for memberdef nodes.
[ "Returns", "the", "function", "signature", "string", "for", "memberdef", "nodes", "." ]
d3510fa419b1c2b17f3f0b80a5fbb720c7b84008
https://github.com/basler/pypylon/blob/d3510fa419b1c2b17f3f0b80a5fbb720c7b84008/scripts/builddoxy2swig/doxy2swig/doxy2swig.py#L337-L359
247,540
basler/pypylon
scripts/builddoxy2swig/doxy2swig/doxy2swig.py
Doxy2SWIG.handle_typical_memberdefs_no_overload
def handle_typical_memberdefs_no_overload(self, signature, memberdef_nodes): """Produce standard documentation for memberdef_nodes.""" for n in memberdef_nodes: self.add_text(['\n', '%feature("docstring") ', signature, ' "', '\n']) if self.with_function_signature: self.add_line_with_subsequent_indent(self.get_function_signature(n)) self.subnode_parse(n, pieces=[], ignore=['definition', 'name']) self.add_text(['";', '\n'])
python
def handle_typical_memberdefs_no_overload(self, signature, memberdef_nodes): for n in memberdef_nodes: self.add_text(['\n', '%feature("docstring") ', signature, ' "', '\n']) if self.with_function_signature: self.add_line_with_subsequent_indent(self.get_function_signature(n)) self.subnode_parse(n, pieces=[], ignore=['definition', 'name']) self.add_text(['";', '\n'])
[ "def", "handle_typical_memberdefs_no_overload", "(", "self", ",", "signature", ",", "memberdef_nodes", ")", ":", "for", "n", "in", "memberdef_nodes", ":", "self", ".", "add_text", "(", "[", "'\\n'", ",", "'%feature(\"docstring\") '", ",", "signature", ",", "' \"'", ",", "'\\n'", "]", ")", "if", "self", ".", "with_function_signature", ":", "self", ".", "add_line_with_subsequent_indent", "(", "self", ".", "get_function_signature", "(", "n", ")", ")", "self", ".", "subnode_parse", "(", "n", ",", "pieces", "=", "[", "]", ",", "ignore", "=", "[", "'definition'", ",", "'name'", "]", ")", "self", ".", "add_text", "(", "[", "'\";'", ",", "'\\n'", "]", ")" ]
Produce standard documentation for memberdef_nodes.
[ "Produce", "standard", "documentation", "for", "memberdef_nodes", "." ]
d3510fa419b1c2b17f3f0b80a5fbb720c7b84008
https://github.com/basler/pypylon/blob/d3510fa419b1c2b17f3f0b80a5fbb720c7b84008/scripts/builddoxy2swig/doxy2swig/doxy2swig.py#L431-L438
247,541
basler/pypylon
scripts/builddoxy2swig/doxy2swig/doxy2swig.py
Doxy2SWIG.handle_typical_memberdefs
def handle_typical_memberdefs(self, signature, memberdef_nodes): """Produces docstring entries containing an "Overloaded function" section with the documentation for each overload, if the function is overloaded and self.with_overloaded_functions is set. Else, produce normal documentation. """ if len(memberdef_nodes) == 1 or not self.with_overloaded_functions: self.handle_typical_memberdefs_no_overload(signature, memberdef_nodes) return self.add_text(['\n', '%feature("docstring") ', signature, ' "', '\n']) if self.with_function_signature: for n in memberdef_nodes: self.add_line_with_subsequent_indent(self.get_function_signature(n)) self.add_text('\n') self.add_text(['Overloaded function', '\n', '-------------------']) for n in memberdef_nodes: self.add_text('\n') self.add_line_with_subsequent_indent('* ' + self.get_function_signature(n)) self.subnode_parse(n, pieces=[], indent=4, ignore=['definition', 'name']) self.add_text(['";', '\n'])
python
def handle_typical_memberdefs(self, signature, memberdef_nodes): if len(memberdef_nodes) == 1 or not self.with_overloaded_functions: self.handle_typical_memberdefs_no_overload(signature, memberdef_nodes) return self.add_text(['\n', '%feature("docstring") ', signature, ' "', '\n']) if self.with_function_signature: for n in memberdef_nodes: self.add_line_with_subsequent_indent(self.get_function_signature(n)) self.add_text('\n') self.add_text(['Overloaded function', '\n', '-------------------']) for n in memberdef_nodes: self.add_text('\n') self.add_line_with_subsequent_indent('* ' + self.get_function_signature(n)) self.subnode_parse(n, pieces=[], indent=4, ignore=['definition', 'name']) self.add_text(['";', '\n'])
[ "def", "handle_typical_memberdefs", "(", "self", ",", "signature", ",", "memberdef_nodes", ")", ":", "if", "len", "(", "memberdef_nodes", ")", "==", "1", "or", "not", "self", ".", "with_overloaded_functions", ":", "self", ".", "handle_typical_memberdefs_no_overload", "(", "signature", ",", "memberdef_nodes", ")", "return", "self", ".", "add_text", "(", "[", "'\\n'", ",", "'%feature(\"docstring\") '", ",", "signature", ",", "' \"'", ",", "'\\n'", "]", ")", "if", "self", ".", "with_function_signature", ":", "for", "n", "in", "memberdef_nodes", ":", "self", ".", "add_line_with_subsequent_indent", "(", "self", ".", "get_function_signature", "(", "n", ")", ")", "self", ".", "add_text", "(", "'\\n'", ")", "self", ".", "add_text", "(", "[", "'Overloaded function'", ",", "'\\n'", ",", "'-------------------'", "]", ")", "for", "n", "in", "memberdef_nodes", ":", "self", ".", "add_text", "(", "'\\n'", ")", "self", ".", "add_line_with_subsequent_indent", "(", "'* '", "+", "self", ".", "get_function_signature", "(", "n", ")", ")", "self", ".", "subnode_parse", "(", "n", ",", "pieces", "=", "[", "]", ",", "indent", "=", "4", ",", "ignore", "=", "[", "'definition'", ",", "'name'", "]", ")", "self", ".", "add_text", "(", "[", "'\";'", ",", "'\\n'", "]", ")" ]
Produces docstring entries containing an "Overloaded function" section with the documentation for each overload, if the function is overloaded and self.with_overloaded_functions is set. Else, produce normal documentation.
[ "Produces", "docstring", "entries", "containing", "an", "Overloaded", "function", "section", "with", "the", "documentation", "for", "each", "overload", "if", "the", "function", "is", "overloaded", "and", "self", ".", "with_overloaded_functions", "is", "set", ".", "Else", "produce", "normal", "documentation", "." ]
d3510fa419b1c2b17f3f0b80a5fbb720c7b84008
https://github.com/basler/pypylon/blob/d3510fa419b1c2b17f3f0b80a5fbb720c7b84008/scripts/builddoxy2swig/doxy2swig/doxy2swig.py#L440-L461
247,542
basler/pypylon
scripts/builddoxy2swig/doxy2swig/doxy2swig.py
Doxy2SWIG.do_memberdef
def do_memberdef(self, node): """Handle cases outside of class, struct, file or namespace. These are now dealt with by `handle_overloaded_memberfunction`. Do these even exist??? """ prot = node.attributes['prot'].value id = node.attributes['id'].value kind = node.attributes['kind'].value tmp = node.parentNode.parentNode.parentNode compdef = tmp.getElementsByTagName('compounddef')[0] cdef_kind = compdef.attributes['kind'].value if cdef_kind in ('file', 'namespace', 'class', 'struct'): # These cases are now handled by `handle_typical_memberdefs` return if prot != 'public': return first = self.get_specific_nodes(node, ('definition', 'name')) name = self.extract_text(first['name']) if name[:8] == 'operator': # Don't handle operators yet. return if not 'definition' in first or kind in ['variable', 'typedef']: return data = self.extract_text(first['definition']) self.add_text('\n') self.add_text(['/* where did this entry come from??? */', '\n']) self.add_text('%feature("docstring") %s "\n%s' % (data, data)) for n in node.childNodes: if n not in first.values(): self.parse(n) self.add_text(['";', '\n'])
python
def do_memberdef(self, node): prot = node.attributes['prot'].value id = node.attributes['id'].value kind = node.attributes['kind'].value tmp = node.parentNode.parentNode.parentNode compdef = tmp.getElementsByTagName('compounddef')[0] cdef_kind = compdef.attributes['kind'].value if cdef_kind in ('file', 'namespace', 'class', 'struct'): # These cases are now handled by `handle_typical_memberdefs` return if prot != 'public': return first = self.get_specific_nodes(node, ('definition', 'name')) name = self.extract_text(first['name']) if name[:8] == 'operator': # Don't handle operators yet. return if not 'definition' in first or kind in ['variable', 'typedef']: return data = self.extract_text(first['definition']) self.add_text('\n') self.add_text(['/* where did this entry come from??? */', '\n']) self.add_text('%feature("docstring") %s "\n%s' % (data, data)) for n in node.childNodes: if n not in first.values(): self.parse(n) self.add_text(['";', '\n'])
[ "def", "do_memberdef", "(", "self", ",", "node", ")", ":", "prot", "=", "node", ".", "attributes", "[", "'prot'", "]", ".", "value", "id", "=", "node", ".", "attributes", "[", "'id'", "]", ".", "value", "kind", "=", "node", ".", "attributes", "[", "'kind'", "]", ".", "value", "tmp", "=", "node", ".", "parentNode", ".", "parentNode", ".", "parentNode", "compdef", "=", "tmp", ".", "getElementsByTagName", "(", "'compounddef'", ")", "[", "0", "]", "cdef_kind", "=", "compdef", ".", "attributes", "[", "'kind'", "]", ".", "value", "if", "cdef_kind", "in", "(", "'file'", ",", "'namespace'", ",", "'class'", ",", "'struct'", ")", ":", "# These cases are now handled by `handle_typical_memberdefs`", "return", "if", "prot", "!=", "'public'", ":", "return", "first", "=", "self", ".", "get_specific_nodes", "(", "node", ",", "(", "'definition'", ",", "'name'", ")", ")", "name", "=", "self", ".", "extract_text", "(", "first", "[", "'name'", "]", ")", "if", "name", "[", ":", "8", "]", "==", "'operator'", ":", "# Don't handle operators yet.", "return", "if", "not", "'definition'", "in", "first", "or", "kind", "in", "[", "'variable'", ",", "'typedef'", "]", ":", "return", "data", "=", "self", ".", "extract_text", "(", "first", "[", "'definition'", "]", ")", "self", ".", "add_text", "(", "'\\n'", ")", "self", ".", "add_text", "(", "[", "'/* where did this entry come from??? */'", ",", "'\\n'", "]", ")", "self", ".", "add_text", "(", "'%feature(\"docstring\") %s \"\\n%s'", "%", "(", "data", ",", "data", ")", ")", "for", "n", "in", "node", ".", "childNodes", ":", "if", "n", "not", "in", "first", ".", "values", "(", ")", ":", "self", ".", "parse", "(", "n", ")", "self", ".", "add_text", "(", "[", "'\";'", ",", "'\\n'", "]", ")" ]
Handle cases outside of class, struct, file or namespace. These are now dealt with by `handle_overloaded_memberfunction`. Do these even exist???
[ "Handle", "cases", "outside", "of", "class", "struct", "file", "or", "namespace", ".", "These", "are", "now", "dealt", "with", "by", "handle_overloaded_memberfunction", ".", "Do", "these", "even", "exist???" ]
d3510fa419b1c2b17f3f0b80a5fbb720c7b84008
https://github.com/basler/pypylon/blob/d3510fa419b1c2b17f3f0b80a5fbb720c7b84008/scripts/builddoxy2swig/doxy2swig/doxy2swig.py#L699-L730
247,543
basler/pypylon
scripts/builddoxy2swig/doxy2swig/doxy2swig.py
Doxy2SWIG.do_header
def do_header(self, node): """For a user defined section def a header field is present which should not be printed as such, so we comment it in the output.""" data = self.extract_text(node) self.add_text('\n/*\n %s \n*/\n' % data) # If our immediate sibling is a 'description' node then we # should comment that out also and remove it from the parent # node's children. parent = node.parentNode idx = parent.childNodes.index(node) if len(parent.childNodes) >= idx + 2: nd = parent.childNodes[idx + 2] if nd.nodeName == 'description': nd = parent.removeChild(nd) self.add_text('\n/*') self.subnode_parse(nd) self.add_text('\n*/\n')
python
def do_header(self, node): data = self.extract_text(node) self.add_text('\n/*\n %s \n*/\n' % data) # If our immediate sibling is a 'description' node then we # should comment that out also and remove it from the parent # node's children. parent = node.parentNode idx = parent.childNodes.index(node) if len(parent.childNodes) >= idx + 2: nd = parent.childNodes[idx + 2] if nd.nodeName == 'description': nd = parent.removeChild(nd) self.add_text('\n/*') self.subnode_parse(nd) self.add_text('\n*/\n')
[ "def", "do_header", "(", "self", ",", "node", ")", ":", "data", "=", "self", ".", "extract_text", "(", "node", ")", "self", ".", "add_text", "(", "'\\n/*\\n %s \\n*/\\n'", "%", "data", ")", "# If our immediate sibling is a 'description' node then we", "# should comment that out also and remove it from the parent", "# node's children.", "parent", "=", "node", ".", "parentNode", "idx", "=", "parent", ".", "childNodes", ".", "index", "(", "node", ")", "if", "len", "(", "parent", ".", "childNodes", ")", ">=", "idx", "+", "2", ":", "nd", "=", "parent", ".", "childNodes", "[", "idx", "+", "2", "]", "if", "nd", ".", "nodeName", "==", "'description'", ":", "nd", "=", "parent", ".", "removeChild", "(", "nd", ")", "self", ".", "add_text", "(", "'\\n/*'", ")", "self", ".", "subnode_parse", "(", "nd", ")", "self", ".", "add_text", "(", "'\\n*/\\n'", ")" ]
For a user defined section def a header field is present which should not be printed as such, so we comment it in the output.
[ "For", "a", "user", "defined", "section", "def", "a", "header", "field", "is", "present", "which", "should", "not", "be", "printed", "as", "such", "so", "we", "comment", "it", "in", "the", "output", "." ]
d3510fa419b1c2b17f3f0b80a5fbb720c7b84008
https://github.com/basler/pypylon/blob/d3510fa419b1c2b17f3f0b80a5fbb720c7b84008/scripts/builddoxy2swig/doxy2swig/doxy2swig.py#L738-L755
247,544
basler/pypylon
scripts/generatedoc/generatedoc.py
visiblename
def visiblename(name, all=None, obj=None): """Decide whether to show documentation on a variable.""" # Certain special names are redundant or internal. # XXX Remove __initializing__? if name in {'__author__', '__builtins__', '__cached__', '__credits__', '__date__', '__doc__', '__file__', '__spec__', '__loader__', '__module__', '__name__', '__package__', '__path__', '__qualname__', '__slots__', '__version__'}: return 0 if name.endswith("_swigregister"): return 0 if name.startswith("__swig"): return 0 # Private names are hidden, but special names are displayed. if name.startswith('__') and name.endswith('__'): return 1 # Namedtuples have public fields and methods with a single leading underscore if name.startswith('_') and hasattr(obj, '_fields'): return True if all is not None: # only document that which the programmer exported in __all__ return name in all else: return not name.startswith('_')
python
def visiblename(name, all=None, obj=None): # Certain special names are redundant or internal. # XXX Remove __initializing__? if name in {'__author__', '__builtins__', '__cached__', '__credits__', '__date__', '__doc__', '__file__', '__spec__', '__loader__', '__module__', '__name__', '__package__', '__path__', '__qualname__', '__slots__', '__version__'}: return 0 if name.endswith("_swigregister"): return 0 if name.startswith("__swig"): return 0 # Private names are hidden, but special names are displayed. if name.startswith('__') and name.endswith('__'): return 1 # Namedtuples have public fields and methods with a single leading underscore if name.startswith('_') and hasattr(obj, '_fields'): return True if all is not None: # only document that which the programmer exported in __all__ return name in all else: return not name.startswith('_')
[ "def", "visiblename", "(", "name", ",", "all", "=", "None", ",", "obj", "=", "None", ")", ":", "# Certain special names are redundant or internal.", "# XXX Remove __initializing__?", "if", "name", "in", "{", "'__author__'", ",", "'__builtins__'", ",", "'__cached__'", ",", "'__credits__'", ",", "'__date__'", ",", "'__doc__'", ",", "'__file__'", ",", "'__spec__'", ",", "'__loader__'", ",", "'__module__'", ",", "'__name__'", ",", "'__package__'", ",", "'__path__'", ",", "'__qualname__'", ",", "'__slots__'", ",", "'__version__'", "}", ":", "return", "0", "if", "name", ".", "endswith", "(", "\"_swigregister\"", ")", ":", "return", "0", "if", "name", ".", "startswith", "(", "\"__swig\"", ")", ":", "return", "0", "# Private names are hidden, but special names are displayed.", "if", "name", ".", "startswith", "(", "'__'", ")", "and", "name", ".", "endswith", "(", "'__'", ")", ":", "return", "1", "# Namedtuples have public fields and methods with a single leading underscore", "if", "name", ".", "startswith", "(", "'_'", ")", "and", "hasattr", "(", "obj", ",", "'_fields'", ")", ":", "return", "True", "if", "all", "is", "not", "None", ":", "# only document that which the programmer exported in __all__", "return", "name", "in", "all", "else", ":", "return", "not", "name", ".", "startswith", "(", "'_'", ")" ]
Decide whether to show documentation on a variable.
[ "Decide", "whether", "to", "show", "documentation", "on", "a", "variable", "." ]
d3510fa419b1c2b17f3f0b80a5fbb720c7b84008
https://github.com/basler/pypylon/blob/d3510fa419b1c2b17f3f0b80a5fbb720c7b84008/scripts/generatedoc/generatedoc.py#L1-L43
247,545
SFDO-Tooling/CumulusCI
cumulusci/tasks/bulkdata.py
_download_file
def _download_file(uri, bulk_api): """Download the bulk API result file for a single batch""" resp = requests.get(uri, headers=bulk_api.headers(), stream=True) with tempfile.TemporaryFile("w+b") as f: for chunk in resp.iter_content(chunk_size=None): f.write(chunk) f.seek(0) yield f
python
def _download_file(uri, bulk_api): resp = requests.get(uri, headers=bulk_api.headers(), stream=True) with tempfile.TemporaryFile("w+b") as f: for chunk in resp.iter_content(chunk_size=None): f.write(chunk) f.seek(0) yield f
[ "def", "_download_file", "(", "uri", ",", "bulk_api", ")", ":", "resp", "=", "requests", ".", "get", "(", "uri", ",", "headers", "=", "bulk_api", ".", "headers", "(", ")", ",", "stream", "=", "True", ")", "with", "tempfile", ".", "TemporaryFile", "(", "\"w+b\"", ")", "as", "f", ":", "for", "chunk", "in", "resp", ".", "iter_content", "(", "chunk_size", "=", "None", ")", ":", "f", ".", "write", "(", "chunk", ")", "f", ".", "seek", "(", "0", ")", "yield", "f" ]
Download the bulk API result file for a single batch
[ "Download", "the", "bulk", "API", "result", "file", "for", "a", "single", "batch" ]
e19047921ca771a297e045f22f0bb201651bb6f7
https://github.com/SFDO-Tooling/CumulusCI/blob/e19047921ca771a297e045f22f0bb201651bb6f7/cumulusci/tasks/bulkdata.py#L801-L808
247,546
SFDO-Tooling/CumulusCI
cumulusci/tasks/bulkdata.py
LoadData._load_mapping
def _load_mapping(self, mapping): """Load data for a single step.""" mapping["oid_as_pk"] = bool(mapping.get("fields", {}).get("Id")) job_id, local_ids_for_batch = self._create_job(mapping) result = self._wait_for_job(job_id) # We store inserted ids even if some batches failed self._store_inserted_ids(mapping, job_id, local_ids_for_batch) return result
python
def _load_mapping(self, mapping): mapping["oid_as_pk"] = bool(mapping.get("fields", {}).get("Id")) job_id, local_ids_for_batch = self._create_job(mapping) result = self._wait_for_job(job_id) # We store inserted ids even if some batches failed self._store_inserted_ids(mapping, job_id, local_ids_for_batch) return result
[ "def", "_load_mapping", "(", "self", ",", "mapping", ")", ":", "mapping", "[", "\"oid_as_pk\"", "]", "=", "bool", "(", "mapping", ".", "get", "(", "\"fields\"", ",", "{", "}", ")", ".", "get", "(", "\"Id\"", ")", ")", "job_id", ",", "local_ids_for_batch", "=", "self", ".", "_create_job", "(", "mapping", ")", "result", "=", "self", ".", "_wait_for_job", "(", "job_id", ")", "# We store inserted ids even if some batches failed", "self", ".", "_store_inserted_ids", "(", "mapping", ",", "job_id", ",", "local_ids_for_batch", ")", "return", "result" ]
Load data for a single step.
[ "Load", "data", "for", "a", "single", "step", "." ]
e19047921ca771a297e045f22f0bb201651bb6f7
https://github.com/SFDO-Tooling/CumulusCI/blob/e19047921ca771a297e045f22f0bb201651bb6f7/cumulusci/tasks/bulkdata.py#L268-L275
247,547
SFDO-Tooling/CumulusCI
cumulusci/tasks/bulkdata.py
LoadData._create_job
def _create_job(self, mapping): """Initiate a bulk insert and upload batches to run in parallel.""" job_id = self.bulk.create_insert_job(mapping["sf_object"], contentType="CSV") self.logger.info(" Created bulk job {}".format(job_id)) # Upload batches local_ids_for_batch = {} for batch_file, local_ids in self._get_batches(mapping): batch_id = self.bulk.post_batch(job_id, batch_file) local_ids_for_batch[batch_id] = local_ids self.logger.info(" Uploaded batch {}".format(batch_id)) self.bulk.close_job(job_id) return job_id, local_ids_for_batch
python
def _create_job(self, mapping): job_id = self.bulk.create_insert_job(mapping["sf_object"], contentType="CSV") self.logger.info(" Created bulk job {}".format(job_id)) # Upload batches local_ids_for_batch = {} for batch_file, local_ids in self._get_batches(mapping): batch_id = self.bulk.post_batch(job_id, batch_file) local_ids_for_batch[batch_id] = local_ids self.logger.info(" Uploaded batch {}".format(batch_id)) self.bulk.close_job(job_id) return job_id, local_ids_for_batch
[ "def", "_create_job", "(", "self", ",", "mapping", ")", ":", "job_id", "=", "self", ".", "bulk", ".", "create_insert_job", "(", "mapping", "[", "\"sf_object\"", "]", ",", "contentType", "=", "\"CSV\"", ")", "self", ".", "logger", ".", "info", "(", "\" Created bulk job {}\"", ".", "format", "(", "job_id", ")", ")", "# Upload batches", "local_ids_for_batch", "=", "{", "}", "for", "batch_file", ",", "local_ids", "in", "self", ".", "_get_batches", "(", "mapping", ")", ":", "batch_id", "=", "self", ".", "bulk", ".", "post_batch", "(", "job_id", ",", "batch_file", ")", "local_ids_for_batch", "[", "batch_id", "]", "=", "local_ids", "self", ".", "logger", ".", "info", "(", "\" Uploaded batch {}\"", ".", "format", "(", "batch_id", ")", ")", "self", ".", "bulk", ".", "close_job", "(", "job_id", ")", "return", "job_id", ",", "local_ids_for_batch" ]
Initiate a bulk insert and upload batches to run in parallel.
[ "Initiate", "a", "bulk", "insert", "and", "upload", "batches", "to", "run", "in", "parallel", "." ]
e19047921ca771a297e045f22f0bb201651bb6f7
https://github.com/SFDO-Tooling/CumulusCI/blob/e19047921ca771a297e045f22f0bb201651bb6f7/cumulusci/tasks/bulkdata.py#L277-L290
247,548
SFDO-Tooling/CumulusCI
cumulusci/tasks/bulkdata.py
LoadData._get_batches
def _get_batches(self, mapping, batch_size=10000): """Get data from the local db""" action = mapping.get("action", "insert") fields = mapping.get("fields", {}).copy() static = mapping.get("static", {}) lookups = mapping.get("lookups", {}) record_type = mapping.get("record_type") # Skip Id field on insert if action == "insert" and "Id" in fields: del fields["Id"] # Build the list of fields to import columns = [] columns.extend(fields.keys()) columns.extend(lookups.keys()) columns.extend(static.keys()) if record_type: columns.append("RecordTypeId") # default to the profile assigned recordtype if we can't find any # query for the RT by developer name query = ( "SELECT Id FROM RecordType WHERE SObjectType='{0}'" "AND DeveloperName = '{1}' LIMIT 1" ) record_type_id = self.sf.query( query.format(mapping.get("sf_object"), record_type) )["records"][0]["Id"] query = self._query_db(mapping) total_rows = 0 batch_num = 1 def start_batch(): batch_file = io.BytesIO() writer = unicodecsv.writer(batch_file) writer.writerow(columns) batch_ids = [] return batch_file, writer, batch_ids batch_file, writer, batch_ids = start_batch() for row in query.yield_per(batch_size): total_rows += 1 # Add static values to row pkey = row[0] row = list(row[1:]) + list(static.values()) if record_type: row.append(record_type_id) writer.writerow([self._convert(value) for value in row]) batch_ids.append(pkey) # Yield and start a new file every [batch_size] rows if not total_rows % batch_size: batch_file.seek(0) self.logger.info(" Processing batch {}".format(batch_num)) yield batch_file, batch_ids batch_file, writer, batch_ids = start_batch() batch_num += 1 # Yield result file for final batch if batch_ids: batch_file.seek(0) yield batch_file, batch_ids self.logger.info( " Prepared {} rows for import to {}".format( total_rows, mapping["sf_object"] ) )
python
def _get_batches(self, mapping, batch_size=10000): action = mapping.get("action", "insert") fields = mapping.get("fields", {}).copy() static = mapping.get("static", {}) lookups = mapping.get("lookups", {}) record_type = mapping.get("record_type") # Skip Id field on insert if action == "insert" and "Id" in fields: del fields["Id"] # Build the list of fields to import columns = [] columns.extend(fields.keys()) columns.extend(lookups.keys()) columns.extend(static.keys()) if record_type: columns.append("RecordTypeId") # default to the profile assigned recordtype if we can't find any # query for the RT by developer name query = ( "SELECT Id FROM RecordType WHERE SObjectType='{0}'" "AND DeveloperName = '{1}' LIMIT 1" ) record_type_id = self.sf.query( query.format(mapping.get("sf_object"), record_type) )["records"][0]["Id"] query = self._query_db(mapping) total_rows = 0 batch_num = 1 def start_batch(): batch_file = io.BytesIO() writer = unicodecsv.writer(batch_file) writer.writerow(columns) batch_ids = [] return batch_file, writer, batch_ids batch_file, writer, batch_ids = start_batch() for row in query.yield_per(batch_size): total_rows += 1 # Add static values to row pkey = row[0] row = list(row[1:]) + list(static.values()) if record_type: row.append(record_type_id) writer.writerow([self._convert(value) for value in row]) batch_ids.append(pkey) # Yield and start a new file every [batch_size] rows if not total_rows % batch_size: batch_file.seek(0) self.logger.info(" Processing batch {}".format(batch_num)) yield batch_file, batch_ids batch_file, writer, batch_ids = start_batch() batch_num += 1 # Yield result file for final batch if batch_ids: batch_file.seek(0) yield batch_file, batch_ids self.logger.info( " Prepared {} rows for import to {}".format( total_rows, mapping["sf_object"] ) )
[ "def", "_get_batches", "(", "self", ",", "mapping", ",", "batch_size", "=", "10000", ")", ":", "action", "=", "mapping", ".", "get", "(", "\"action\"", ",", "\"insert\"", ")", "fields", "=", "mapping", ".", "get", "(", "\"fields\"", ",", "{", "}", ")", ".", "copy", "(", ")", "static", "=", "mapping", ".", "get", "(", "\"static\"", ",", "{", "}", ")", "lookups", "=", "mapping", ".", "get", "(", "\"lookups\"", ",", "{", "}", ")", "record_type", "=", "mapping", ".", "get", "(", "\"record_type\"", ")", "# Skip Id field on insert", "if", "action", "==", "\"insert\"", "and", "\"Id\"", "in", "fields", ":", "del", "fields", "[", "\"Id\"", "]", "# Build the list of fields to import", "columns", "=", "[", "]", "columns", ".", "extend", "(", "fields", ".", "keys", "(", ")", ")", "columns", ".", "extend", "(", "lookups", ".", "keys", "(", ")", ")", "columns", ".", "extend", "(", "static", ".", "keys", "(", ")", ")", "if", "record_type", ":", "columns", ".", "append", "(", "\"RecordTypeId\"", ")", "# default to the profile assigned recordtype if we can't find any", "# query for the RT by developer name", "query", "=", "(", "\"SELECT Id FROM RecordType WHERE SObjectType='{0}'\"", "\"AND DeveloperName = '{1}' LIMIT 1\"", ")", "record_type_id", "=", "self", ".", "sf", ".", "query", "(", "query", ".", "format", "(", "mapping", ".", "get", "(", "\"sf_object\"", ")", ",", "record_type", ")", ")", "[", "\"records\"", "]", "[", "0", "]", "[", "\"Id\"", "]", "query", "=", "self", ".", "_query_db", "(", "mapping", ")", "total_rows", "=", "0", "batch_num", "=", "1", "def", "start_batch", "(", ")", ":", "batch_file", "=", "io", ".", "BytesIO", "(", ")", "writer", "=", "unicodecsv", ".", "writer", "(", "batch_file", ")", "writer", ".", "writerow", "(", "columns", ")", "batch_ids", "=", "[", "]", "return", "batch_file", ",", "writer", ",", "batch_ids", "batch_file", ",", "writer", ",", "batch_ids", "=", "start_batch", "(", ")", "for", "row", "in", "query", ".", "yield_per", "(", "batch_size", ")", ":", "total_rows", "+=", "1", "# Add static values to row", "pkey", "=", "row", "[", "0", "]", "row", "=", "list", "(", "row", "[", "1", ":", "]", ")", "+", "list", "(", "static", ".", "values", "(", ")", ")", "if", "record_type", ":", "row", ".", "append", "(", "record_type_id", ")", "writer", ".", "writerow", "(", "[", "self", ".", "_convert", "(", "value", ")", "for", "value", "in", "row", "]", ")", "batch_ids", ".", "append", "(", "pkey", ")", "# Yield and start a new file every [batch_size] rows", "if", "not", "total_rows", "%", "batch_size", ":", "batch_file", ".", "seek", "(", "0", ")", "self", ".", "logger", ".", "info", "(", "\" Processing batch {}\"", ".", "format", "(", "batch_num", ")", ")", "yield", "batch_file", ",", "batch_ids", "batch_file", ",", "writer", ",", "batch_ids", "=", "start_batch", "(", ")", "batch_num", "+=", "1", "# Yield result file for final batch", "if", "batch_ids", ":", "batch_file", ".", "seek", "(", "0", ")", "yield", "batch_file", ",", "batch_ids", "self", ".", "logger", ".", "info", "(", "\" Prepared {} rows for import to {}\"", ".", "format", "(", "total_rows", ",", "mapping", "[", "\"sf_object\"", "]", ")", ")" ]
Get data from the local db
[ "Get", "data", "from", "the", "local", "db" ]
e19047921ca771a297e045f22f0bb201651bb6f7
https://github.com/SFDO-Tooling/CumulusCI/blob/e19047921ca771a297e045f22f0bb201651bb6f7/cumulusci/tasks/bulkdata.py#L292-L364
247,549
SFDO-Tooling/CumulusCI
cumulusci/tasks/bulkdata.py
LoadData._query_db
def _query_db(self, mapping): """Build a query to retrieve data from the local db. Includes columns from the mapping as well as joining to the id tables to get real SF ids for lookups. """ model = self.models[mapping.get("table")] # Use primary key instead of the field mapped to SF Id fields = mapping.get("fields", {}).copy() if mapping["oid_as_pk"]: del fields["Id"] id_column = model.__table__.primary_key.columns.keys()[0] columns = [getattr(model, id_column)] for f in fields.values(): columns.append(model.__table__.columns[f]) lookups = mapping.get("lookups", {}).copy() for lookup in lookups.values(): lookup["aliased_table"] = aliased( self.metadata.tables["{}_sf_ids".format(lookup["table"])] ) columns.append(lookup["aliased_table"].columns.sf_id) query = self.session.query(*columns) if "record_type" in mapping and hasattr(model, "record_type"): query = query.filter(model.record_type == mapping["record_type"]) if "filters" in mapping: filter_args = [] for f in mapping["filters"]: filter_args.append(text(f)) query = query.filter(*filter_args) for sf_field, lookup in lookups.items(): # Outer join with lookup ids table: # returns main obj even if lookup is null key_field = get_lookup_key_field(lookup, sf_field) value_column = getattr(model, key_field) query = query.outerjoin( lookup["aliased_table"], lookup["aliased_table"].columns.id == value_column, ) # Order by foreign key to minimize lock contention # by trying to keep lookup targets in the same batch lookup_column = getattr(model, key_field) query = query.order_by(lookup_column) self.logger.info(str(query)) return query
python
def _query_db(self, mapping): model = self.models[mapping.get("table")] # Use primary key instead of the field mapped to SF Id fields = mapping.get("fields", {}).copy() if mapping["oid_as_pk"]: del fields["Id"] id_column = model.__table__.primary_key.columns.keys()[0] columns = [getattr(model, id_column)] for f in fields.values(): columns.append(model.__table__.columns[f]) lookups = mapping.get("lookups", {}).copy() for lookup in lookups.values(): lookup["aliased_table"] = aliased( self.metadata.tables["{}_sf_ids".format(lookup["table"])] ) columns.append(lookup["aliased_table"].columns.sf_id) query = self.session.query(*columns) if "record_type" in mapping and hasattr(model, "record_type"): query = query.filter(model.record_type == mapping["record_type"]) if "filters" in mapping: filter_args = [] for f in mapping["filters"]: filter_args.append(text(f)) query = query.filter(*filter_args) for sf_field, lookup in lookups.items(): # Outer join with lookup ids table: # returns main obj even if lookup is null key_field = get_lookup_key_field(lookup, sf_field) value_column = getattr(model, key_field) query = query.outerjoin( lookup["aliased_table"], lookup["aliased_table"].columns.id == value_column, ) # Order by foreign key to minimize lock contention # by trying to keep lookup targets in the same batch lookup_column = getattr(model, key_field) query = query.order_by(lookup_column) self.logger.info(str(query)) return query
[ "def", "_query_db", "(", "self", ",", "mapping", ")", ":", "model", "=", "self", ".", "models", "[", "mapping", ".", "get", "(", "\"table\"", ")", "]", "# Use primary key instead of the field mapped to SF Id", "fields", "=", "mapping", ".", "get", "(", "\"fields\"", ",", "{", "}", ")", ".", "copy", "(", ")", "if", "mapping", "[", "\"oid_as_pk\"", "]", ":", "del", "fields", "[", "\"Id\"", "]", "id_column", "=", "model", ".", "__table__", ".", "primary_key", ".", "columns", ".", "keys", "(", ")", "[", "0", "]", "columns", "=", "[", "getattr", "(", "model", ",", "id_column", ")", "]", "for", "f", "in", "fields", ".", "values", "(", ")", ":", "columns", ".", "append", "(", "model", ".", "__table__", ".", "columns", "[", "f", "]", ")", "lookups", "=", "mapping", ".", "get", "(", "\"lookups\"", ",", "{", "}", ")", ".", "copy", "(", ")", "for", "lookup", "in", "lookups", ".", "values", "(", ")", ":", "lookup", "[", "\"aliased_table\"", "]", "=", "aliased", "(", "self", ".", "metadata", ".", "tables", "[", "\"{}_sf_ids\"", ".", "format", "(", "lookup", "[", "\"table\"", "]", ")", "]", ")", "columns", ".", "append", "(", "lookup", "[", "\"aliased_table\"", "]", ".", "columns", ".", "sf_id", ")", "query", "=", "self", ".", "session", ".", "query", "(", "*", "columns", ")", "if", "\"record_type\"", "in", "mapping", "and", "hasattr", "(", "model", ",", "\"record_type\"", ")", ":", "query", "=", "query", ".", "filter", "(", "model", ".", "record_type", "==", "mapping", "[", "\"record_type\"", "]", ")", "if", "\"filters\"", "in", "mapping", ":", "filter_args", "=", "[", "]", "for", "f", "in", "mapping", "[", "\"filters\"", "]", ":", "filter_args", ".", "append", "(", "text", "(", "f", ")", ")", "query", "=", "query", ".", "filter", "(", "*", "filter_args", ")", "for", "sf_field", ",", "lookup", "in", "lookups", ".", "items", "(", ")", ":", "# Outer join with lookup ids table:", "# returns main obj even if lookup is null", "key_field", "=", "get_lookup_key_field", "(", "lookup", ",", "sf_field", ")", "value_column", "=", "getattr", "(", "model", ",", "key_field", ")", "query", "=", "query", ".", "outerjoin", "(", "lookup", "[", "\"aliased_table\"", "]", ",", "lookup", "[", "\"aliased_table\"", "]", ".", "columns", ".", "id", "==", "value_column", ",", ")", "# Order by foreign key to minimize lock contention", "# by trying to keep lookup targets in the same batch", "lookup_column", "=", "getattr", "(", "model", ",", "key_field", ")", "query", "=", "query", ".", "order_by", "(", "lookup_column", ")", "self", ".", "logger", ".", "info", "(", "str", "(", "query", ")", ")", "return", "query" ]
Build a query to retrieve data from the local db. Includes columns from the mapping as well as joining to the id tables to get real SF ids for lookups.
[ "Build", "a", "query", "to", "retrieve", "data", "from", "the", "local", "db", "." ]
e19047921ca771a297e045f22f0bb201651bb6f7
https://github.com/SFDO-Tooling/CumulusCI/blob/e19047921ca771a297e045f22f0bb201651bb6f7/cumulusci/tasks/bulkdata.py#L366-L413
247,550
SFDO-Tooling/CumulusCI
cumulusci/tasks/bulkdata.py
LoadData._store_inserted_ids
def _store_inserted_ids(self, mapping, job_id, local_ids_for_batch): """Get the job results and store inserted SF Ids in a new table""" id_table_name = self._reset_id_table(mapping) conn = self.session.connection() for batch_id, local_ids in local_ids_for_batch.items(): try: results_url = "{}/job/{}/batch/{}/result".format( self.bulk.endpoint, job_id, batch_id ) # Download entire result file to a temporary file first # to avoid the server dropping connections with _download_file(results_url, self.bulk) as f: self.logger.info( " Downloaded results for batch {}".format(batch_id) ) self._store_inserted_ids_for_batch( f, local_ids, id_table_name, conn ) self.logger.info( " Updated {} for batch {}".format(id_table_name, batch_id) ) except Exception: # pragma: nocover # If we can't download one result file, # don't let that stop us from downloading the others self.logger.error( "Could not download batch results: {}".format(batch_id) ) continue self.session.commit()
python
def _store_inserted_ids(self, mapping, job_id, local_ids_for_batch): id_table_name = self._reset_id_table(mapping) conn = self.session.connection() for batch_id, local_ids in local_ids_for_batch.items(): try: results_url = "{}/job/{}/batch/{}/result".format( self.bulk.endpoint, job_id, batch_id ) # Download entire result file to a temporary file first # to avoid the server dropping connections with _download_file(results_url, self.bulk) as f: self.logger.info( " Downloaded results for batch {}".format(batch_id) ) self._store_inserted_ids_for_batch( f, local_ids, id_table_name, conn ) self.logger.info( " Updated {} for batch {}".format(id_table_name, batch_id) ) except Exception: # pragma: nocover # If we can't download one result file, # don't let that stop us from downloading the others self.logger.error( "Could not download batch results: {}".format(batch_id) ) continue self.session.commit()
[ "def", "_store_inserted_ids", "(", "self", ",", "mapping", ",", "job_id", ",", "local_ids_for_batch", ")", ":", "id_table_name", "=", "self", ".", "_reset_id_table", "(", "mapping", ")", "conn", "=", "self", ".", "session", ".", "connection", "(", ")", "for", "batch_id", ",", "local_ids", "in", "local_ids_for_batch", ".", "items", "(", ")", ":", "try", ":", "results_url", "=", "\"{}/job/{}/batch/{}/result\"", ".", "format", "(", "self", ".", "bulk", ".", "endpoint", ",", "job_id", ",", "batch_id", ")", "# Download entire result file to a temporary file first", "# to avoid the server dropping connections", "with", "_download_file", "(", "results_url", ",", "self", ".", "bulk", ")", "as", "f", ":", "self", ".", "logger", ".", "info", "(", "\" Downloaded results for batch {}\"", ".", "format", "(", "batch_id", ")", ")", "self", ".", "_store_inserted_ids_for_batch", "(", "f", ",", "local_ids", ",", "id_table_name", ",", "conn", ")", "self", ".", "logger", ".", "info", "(", "\" Updated {} for batch {}\"", ".", "format", "(", "id_table_name", ",", "batch_id", ")", ")", "except", "Exception", ":", "# pragma: nocover", "# If we can't download one result file,", "# don't let that stop us from downloading the others", "self", ".", "logger", ".", "error", "(", "\"Could not download batch results: {}\"", ".", "format", "(", "batch_id", ")", ")", "continue", "self", ".", "session", ".", "commit", "(", ")" ]
Get the job results and store inserted SF Ids in a new table
[ "Get", "the", "job", "results", "and", "store", "inserted", "SF", "Ids", "in", "a", "new", "table" ]
e19047921ca771a297e045f22f0bb201651bb6f7
https://github.com/SFDO-Tooling/CumulusCI/blob/e19047921ca771a297e045f22f0bb201651bb6f7/cumulusci/tasks/bulkdata.py#L421-L449
247,551
SFDO-Tooling/CumulusCI
cumulusci/tasks/bulkdata.py
LoadData._reset_id_table
def _reset_id_table(self, mapping): """Create an empty table to hold the inserted SF Ids""" if not hasattr(self, "_initialized_id_tables"): self._initialized_id_tables = set() id_table_name = "{}_sf_ids".format(mapping["table"]) if id_table_name not in self._initialized_id_tables: if id_table_name in self.metadata.tables: self.metadata.remove(self.metadata.tables[id_table_name]) id_table = Table( id_table_name, self.metadata, Column("id", Unicode(255), primary_key=True), Column("sf_id", Unicode(18)), ) if id_table.exists(): id_table.drop() id_table.create() self._initialized_id_tables.add(id_table_name) return id_table_name
python
def _reset_id_table(self, mapping): if not hasattr(self, "_initialized_id_tables"): self._initialized_id_tables = set() id_table_name = "{}_sf_ids".format(mapping["table"]) if id_table_name not in self._initialized_id_tables: if id_table_name in self.metadata.tables: self.metadata.remove(self.metadata.tables[id_table_name]) id_table = Table( id_table_name, self.metadata, Column("id", Unicode(255), primary_key=True), Column("sf_id", Unicode(18)), ) if id_table.exists(): id_table.drop() id_table.create() self._initialized_id_tables.add(id_table_name) return id_table_name
[ "def", "_reset_id_table", "(", "self", ",", "mapping", ")", ":", "if", "not", "hasattr", "(", "self", ",", "\"_initialized_id_tables\"", ")", ":", "self", ".", "_initialized_id_tables", "=", "set", "(", ")", "id_table_name", "=", "\"{}_sf_ids\"", ".", "format", "(", "mapping", "[", "\"table\"", "]", ")", "if", "id_table_name", "not", "in", "self", ".", "_initialized_id_tables", ":", "if", "id_table_name", "in", "self", ".", "metadata", ".", "tables", ":", "self", ".", "metadata", ".", "remove", "(", "self", ".", "metadata", ".", "tables", "[", "id_table_name", "]", ")", "id_table", "=", "Table", "(", "id_table_name", ",", "self", ".", "metadata", ",", "Column", "(", "\"id\"", ",", "Unicode", "(", "255", ")", ",", "primary_key", "=", "True", ")", ",", "Column", "(", "\"sf_id\"", ",", "Unicode", "(", "18", ")", ")", ",", ")", "if", "id_table", ".", "exists", "(", ")", ":", "id_table", ".", "drop", "(", ")", "id_table", ".", "create", "(", ")", "self", ".", "_initialized_id_tables", ".", "add", "(", "id_table_name", ")", "return", "id_table_name" ]
Create an empty table to hold the inserted SF Ids
[ "Create", "an", "empty", "table", "to", "hold", "the", "inserted", "SF", "Ids" ]
e19047921ca771a297e045f22f0bb201651bb6f7
https://github.com/SFDO-Tooling/CumulusCI/blob/e19047921ca771a297e045f22f0bb201651bb6f7/cumulusci/tasks/bulkdata.py#L451-L469
247,552
SFDO-Tooling/CumulusCI
cumulusci/tasks/bulkdata.py
QueryData._get_mapping_for_table
def _get_mapping_for_table(self, table): """ Returns the first mapping for a table name """ for mapping in self.mappings.values(): if mapping["table"] == table: return mapping
python
def _get_mapping_for_table(self, table): for mapping in self.mappings.values(): if mapping["table"] == table: return mapping
[ "def", "_get_mapping_for_table", "(", "self", ",", "table", ")", ":", "for", "mapping", "in", "self", ".", "mappings", ".", "values", "(", ")", ":", "if", "mapping", "[", "\"table\"", "]", "==", "table", ":", "return", "mapping" ]
Returns the first mapping for a table name
[ "Returns", "the", "first", "mapping", "for", "a", "table", "name" ]
e19047921ca771a297e045f22f0bb201651bb6f7
https://github.com/SFDO-Tooling/CumulusCI/blob/e19047921ca771a297e045f22f0bb201651bb6f7/cumulusci/tasks/bulkdata.py#L690-L694
247,553
SFDO-Tooling/CumulusCI
cumulusci/core/config/BaseProjectConfig.py
BaseProjectConfig._load_config
def _load_config(self): """ Loads the configuration from YAML, if no override config was passed in initially. """ if ( self.config ): # any config being pre-set at init will short circuit out, but not a plain {} return # Verify that we're in a project repo_root = self.repo_root if not repo_root: raise NotInProject( "No git repository was found in the current path. You must be in a git repository to set up and use CCI for a project." ) # Verify that the project's root has a config file if not self.config_project_path: raise ProjectConfigNotFound( "The file {} was not found in the repo root: {}. Are you in a CumulusCI Project directory?".format( self.config_filename, repo_root ) ) # Load the project's yaml config file with open(self.config_project_path, "r") as f_config: project_config = ordered_yaml_load(f_config) if project_config: self.config_project.update(project_config) # Load the local project yaml config file if it exists if self.config_project_local_path: with open(self.config_project_local_path, "r") as f_local_config: local_config = ordered_yaml_load(f_local_config) if local_config: self.config_project_local.update(local_config) # merge in any additional yaml that was passed along if self.additional_yaml: additional_yaml_config = ordered_yaml_load(self.additional_yaml) if additional_yaml_config: self.config_additional_yaml.update(additional_yaml_config) self.config = merge_config( OrderedDict( [ ("global_config", self.config_global), ("global_local", self.config_global_local), ("project_config", self.config_project), ("project_local_config", self.config_project_local), ("additional_yaml", self.config_additional_yaml), ] ) )
python
def _load_config(self): if ( self.config ): # any config being pre-set at init will short circuit out, but not a plain {} return # Verify that we're in a project repo_root = self.repo_root if not repo_root: raise NotInProject( "No git repository was found in the current path. You must be in a git repository to set up and use CCI for a project." ) # Verify that the project's root has a config file if not self.config_project_path: raise ProjectConfigNotFound( "The file {} was not found in the repo root: {}. Are you in a CumulusCI Project directory?".format( self.config_filename, repo_root ) ) # Load the project's yaml config file with open(self.config_project_path, "r") as f_config: project_config = ordered_yaml_load(f_config) if project_config: self.config_project.update(project_config) # Load the local project yaml config file if it exists if self.config_project_local_path: with open(self.config_project_local_path, "r") as f_local_config: local_config = ordered_yaml_load(f_local_config) if local_config: self.config_project_local.update(local_config) # merge in any additional yaml that was passed along if self.additional_yaml: additional_yaml_config = ordered_yaml_load(self.additional_yaml) if additional_yaml_config: self.config_additional_yaml.update(additional_yaml_config) self.config = merge_config( OrderedDict( [ ("global_config", self.config_global), ("global_local", self.config_global_local), ("project_config", self.config_project), ("project_local_config", self.config_project_local), ("additional_yaml", self.config_additional_yaml), ] ) )
[ "def", "_load_config", "(", "self", ")", ":", "if", "(", "self", ".", "config", ")", ":", "# any config being pre-set at init will short circuit out, but not a plain {}", "return", "# Verify that we're in a project", "repo_root", "=", "self", ".", "repo_root", "if", "not", "repo_root", ":", "raise", "NotInProject", "(", "\"No git repository was found in the current path. You must be in a git repository to set up and use CCI for a project.\"", ")", "# Verify that the project's root has a config file", "if", "not", "self", ".", "config_project_path", ":", "raise", "ProjectConfigNotFound", "(", "\"The file {} was not found in the repo root: {}. Are you in a CumulusCI Project directory?\"", ".", "format", "(", "self", ".", "config_filename", ",", "repo_root", ")", ")", "# Load the project's yaml config file", "with", "open", "(", "self", ".", "config_project_path", ",", "\"r\"", ")", "as", "f_config", ":", "project_config", "=", "ordered_yaml_load", "(", "f_config", ")", "if", "project_config", ":", "self", ".", "config_project", ".", "update", "(", "project_config", ")", "# Load the local project yaml config file if it exists", "if", "self", ".", "config_project_local_path", ":", "with", "open", "(", "self", ".", "config_project_local_path", ",", "\"r\"", ")", "as", "f_local_config", ":", "local_config", "=", "ordered_yaml_load", "(", "f_local_config", ")", "if", "local_config", ":", "self", ".", "config_project_local", ".", "update", "(", "local_config", ")", "# merge in any additional yaml that was passed along", "if", "self", ".", "additional_yaml", ":", "additional_yaml_config", "=", "ordered_yaml_load", "(", "self", ".", "additional_yaml", ")", "if", "additional_yaml_config", ":", "self", ".", "config_additional_yaml", ".", "update", "(", "additional_yaml_config", ")", "self", ".", "config", "=", "merge_config", "(", "OrderedDict", "(", "[", "(", "\"global_config\"", ",", "self", ".", "config_global", ")", ",", "(", "\"global_local\"", ",", "self", ".", "config_global_local", ")", ",", "(", "\"project_config\"", ",", "self", ".", "config_project", ")", ",", "(", "\"project_local_config\"", ",", "self", ".", "config_project_local", ")", ",", "(", "\"additional_yaml\"", ",", "self", ".", "config_additional_yaml", ")", ",", "]", ")", ")" ]
Loads the configuration from YAML, if no override config was passed in initially.
[ "Loads", "the", "configuration", "from", "YAML", "if", "no", "override", "config", "was", "passed", "in", "initially", "." ]
e19047921ca771a297e045f22f0bb201651bb6f7
https://github.com/SFDO-Tooling/CumulusCI/blob/e19047921ca771a297e045f22f0bb201651bb6f7/cumulusci/core/config/BaseProjectConfig.py#L58-L111
247,554
SFDO-Tooling/CumulusCI
cumulusci/core/config/BaseProjectConfig.py
BaseProjectConfig.init_sentry
def init_sentry(self,): """ Initializes sentry.io error logging for this session """ if not self.use_sentry: return sentry_config = self.keychain.get_service("sentry") tags = { "repo": self.repo_name, "branch": self.repo_branch, "commit": self.repo_commit, "cci version": cumulusci.__version__, } tags.update(self.config.get("sentry_tags", {})) env = self.config.get("sentry_environment", "CumulusCI CLI") self.sentry = raven.Client( dsn=sentry_config.dsn, environment=env, tags=tags, processors=("raven.processors.SanitizePasswordsProcessor",), )
python
def init_sentry(self,): if not self.use_sentry: return sentry_config = self.keychain.get_service("sentry") tags = { "repo": self.repo_name, "branch": self.repo_branch, "commit": self.repo_commit, "cci version": cumulusci.__version__, } tags.update(self.config.get("sentry_tags", {})) env = self.config.get("sentry_environment", "CumulusCI CLI") self.sentry = raven.Client( dsn=sentry_config.dsn, environment=env, tags=tags, processors=("raven.processors.SanitizePasswordsProcessor",), )
[ "def", "init_sentry", "(", "self", ",", ")", ":", "if", "not", "self", ".", "use_sentry", ":", "return", "sentry_config", "=", "self", ".", "keychain", ".", "get_service", "(", "\"sentry\"", ")", "tags", "=", "{", "\"repo\"", ":", "self", ".", "repo_name", ",", "\"branch\"", ":", "self", ".", "repo_branch", ",", "\"commit\"", ":", "self", ".", "repo_commit", ",", "\"cci version\"", ":", "cumulusci", ".", "__version__", ",", "}", "tags", ".", "update", "(", "self", ".", "config", ".", "get", "(", "\"sentry_tags\"", ",", "{", "}", ")", ")", "env", "=", "self", ".", "config", ".", "get", "(", "\"sentry_environment\"", ",", "\"CumulusCI CLI\"", ")", "self", ".", "sentry", "=", "raven", ".", "Client", "(", "dsn", "=", "sentry_config", ".", "dsn", ",", "environment", "=", "env", ",", "tags", "=", "tags", ",", "processors", "=", "(", "\"raven.processors.SanitizePasswordsProcessor\"", ",", ")", ",", ")" ]
Initializes sentry.io error logging for this session
[ "Initializes", "sentry", ".", "io", "error", "logging", "for", "this", "session" ]
e19047921ca771a297e045f22f0bb201651bb6f7
https://github.com/SFDO-Tooling/CumulusCI/blob/e19047921ca771a297e045f22f0bb201651bb6f7/cumulusci/core/config/BaseProjectConfig.py#L361-L383
247,555
SFDO-Tooling/CumulusCI
cumulusci/core/config/BaseProjectConfig.py
BaseProjectConfig.get_previous_version
def get_previous_version(self): """Query GitHub releases to find the previous production release""" gh = self.get_github_api() repo = gh.repository(self.repo_owner, self.repo_name) most_recent = None for release in repo.releases(): # Return the second release that matches the release prefix if release.tag_name.startswith(self.project__git__prefix_release): if most_recent is None: most_recent = release else: return LooseVersion(self.get_version_for_tag(release.tag_name))
python
def get_previous_version(self): gh = self.get_github_api() repo = gh.repository(self.repo_owner, self.repo_name) most_recent = None for release in repo.releases(): # Return the second release that matches the release prefix if release.tag_name.startswith(self.project__git__prefix_release): if most_recent is None: most_recent = release else: return LooseVersion(self.get_version_for_tag(release.tag_name))
[ "def", "get_previous_version", "(", "self", ")", ":", "gh", "=", "self", ".", "get_github_api", "(", ")", "repo", "=", "gh", ".", "repository", "(", "self", ".", "repo_owner", ",", "self", ".", "repo_name", ")", "most_recent", "=", "None", "for", "release", "in", "repo", ".", "releases", "(", ")", ":", "# Return the second release that matches the release prefix", "if", "release", ".", "tag_name", ".", "startswith", "(", "self", ".", "project__git__prefix_release", ")", ":", "if", "most_recent", "is", "None", ":", "most_recent", "=", "release", "else", ":", "return", "LooseVersion", "(", "self", ".", "get_version_for_tag", "(", "release", ".", "tag_name", ")", ")" ]
Query GitHub releases to find the previous production release
[ "Query", "GitHub", "releases", "to", "find", "the", "previous", "production", "release" ]
e19047921ca771a297e045f22f0bb201651bb6f7
https://github.com/SFDO-Tooling/CumulusCI/blob/e19047921ca771a297e045f22f0bb201651bb6f7/cumulusci/core/config/BaseProjectConfig.py#L418-L429
247,556
SFDO-Tooling/CumulusCI
cumulusci/core/config/BaseProjectConfig.py
BaseProjectConfig.get_static_dependencies
def get_static_dependencies(self, dependencies=None, include_beta=None): """Resolves the project -> dependencies section of cumulusci.yml to convert dynamic github dependencies into static dependencies by inspecting the referenced repositories Keyword arguments: :param dependencies: a list of dependencies to resolve :param include_beta: when true, return the latest github release, even if pre-release; else return the latest stable release """ if not dependencies: dependencies = self.project__dependencies if not dependencies: return [] static_dependencies = [] for dependency in dependencies: if "github" not in dependency: static_dependencies.append(dependency) else: static = self.process_github_dependency( dependency, include_beta=include_beta ) static_dependencies.extend(static) return static_dependencies
python
def get_static_dependencies(self, dependencies=None, include_beta=None): if not dependencies: dependencies = self.project__dependencies if not dependencies: return [] static_dependencies = [] for dependency in dependencies: if "github" not in dependency: static_dependencies.append(dependency) else: static = self.process_github_dependency( dependency, include_beta=include_beta ) static_dependencies.extend(static) return static_dependencies
[ "def", "get_static_dependencies", "(", "self", ",", "dependencies", "=", "None", ",", "include_beta", "=", "None", ")", ":", "if", "not", "dependencies", ":", "dependencies", "=", "self", ".", "project__dependencies", "if", "not", "dependencies", ":", "return", "[", "]", "static_dependencies", "=", "[", "]", "for", "dependency", "in", "dependencies", ":", "if", "\"github\"", "not", "in", "dependency", ":", "static_dependencies", ".", "append", "(", "dependency", ")", "else", ":", "static", "=", "self", ".", "process_github_dependency", "(", "dependency", ",", "include_beta", "=", "include_beta", ")", "static_dependencies", ".", "extend", "(", "static", ")", "return", "static_dependencies" ]
Resolves the project -> dependencies section of cumulusci.yml to convert dynamic github dependencies into static dependencies by inspecting the referenced repositories Keyword arguments: :param dependencies: a list of dependencies to resolve :param include_beta: when true, return the latest github release, even if pre-release; else return the latest stable release
[ "Resolves", "the", "project", "-", ">", "dependencies", "section", "of", "cumulusci", ".", "yml", "to", "convert", "dynamic", "github", "dependencies", "into", "static", "dependencies", "by", "inspecting", "the", "referenced", "repositories" ]
e19047921ca771a297e045f22f0bb201651bb6f7
https://github.com/SFDO-Tooling/CumulusCI/blob/e19047921ca771a297e045f22f0bb201651bb6f7/cumulusci/core/config/BaseProjectConfig.py#L496-L521
247,557
SFDO-Tooling/CumulusCI
cumulusci/core/tasks.py
BaseTask._init_logger
def _init_logger(self): """ Initializes self.logger """ if self.flow: self.logger = self.flow.logger.getChild(self.__class__.__name__) else: self.logger = logging.getLogger(__name__)
python
def _init_logger(self): if self.flow: self.logger = self.flow.logger.getChild(self.__class__.__name__) else: self.logger = logging.getLogger(__name__)
[ "def", "_init_logger", "(", "self", ")", ":", "if", "self", ".", "flow", ":", "self", ".", "logger", "=", "self", ".", "flow", ".", "logger", ".", "getChild", "(", "self", ".", "__class__", ".", "__name__", ")", "else", ":", "self", ".", "logger", "=", "logging", ".", "getLogger", "(", "__name__", ")" ]
Initializes self.logger
[ "Initializes", "self", ".", "logger" ]
e19047921ca771a297e045f22f0bb201651bb6f7
https://github.com/SFDO-Tooling/CumulusCI/blob/e19047921ca771a297e045f22f0bb201651bb6f7/cumulusci/core/tasks.py#L79-L84
247,558
SFDO-Tooling/CumulusCI
cumulusci/core/tasks.py
BaseTask._init_options
def _init_options(self, kwargs): """ Initializes self.options """ self.options = self.task_config.options if self.options is None: self.options = {} if kwargs: self.options.update(kwargs) # Handle dynamic lookup of project_config values via $project_config.attr for option, value in list(self.options.items()): try: if value.startswith("$project_config."): attr = value.replace("$project_config.", "", 1) self.options[option] = getattr(self.project_config, attr, None) except AttributeError: pass
python
def _init_options(self, kwargs): self.options = self.task_config.options if self.options is None: self.options = {} if kwargs: self.options.update(kwargs) # Handle dynamic lookup of project_config values via $project_config.attr for option, value in list(self.options.items()): try: if value.startswith("$project_config."): attr = value.replace("$project_config.", "", 1) self.options[option] = getattr(self.project_config, attr, None) except AttributeError: pass
[ "def", "_init_options", "(", "self", ",", "kwargs", ")", ":", "self", ".", "options", "=", "self", ".", "task_config", ".", "options", "if", "self", ".", "options", "is", "None", ":", "self", ".", "options", "=", "{", "}", "if", "kwargs", ":", "self", ".", "options", ".", "update", "(", "kwargs", ")", "# Handle dynamic lookup of project_config values via $project_config.attr", "for", "option", ",", "value", "in", "list", "(", "self", ".", "options", ".", "items", "(", ")", ")", ":", "try", ":", "if", "value", ".", "startswith", "(", "\"$project_config.\"", ")", ":", "attr", "=", "value", ".", "replace", "(", "\"$project_config.\"", ",", "\"\"", ",", "1", ")", "self", ".", "options", "[", "option", "]", "=", "getattr", "(", "self", ".", "project_config", ",", "attr", ",", "None", ")", "except", "AttributeError", ":", "pass" ]
Initializes self.options
[ "Initializes", "self", ".", "options" ]
e19047921ca771a297e045f22f0bb201651bb6f7
https://github.com/SFDO-Tooling/CumulusCI/blob/e19047921ca771a297e045f22f0bb201651bb6f7/cumulusci/core/tasks.py#L86-L101
247,559
SFDO-Tooling/CumulusCI
cumulusci/core/tasks.py
BaseTask._log_begin
def _log_begin(self): """ Log the beginning of the task execution """ self.logger.info("Beginning task: %s", self.__class__.__name__) if self.salesforce_task and not self.flow: self.logger.info("%15s %s", "As user:", self.org_config.username) self.logger.info("%15s %s", "In org:", self.org_config.org_id) self.logger.info("")
python
def _log_begin(self): self.logger.info("Beginning task: %s", self.__class__.__name__) if self.salesforce_task and not self.flow: self.logger.info("%15s %s", "As user:", self.org_config.username) self.logger.info("%15s %s", "In org:", self.org_config.org_id) self.logger.info("")
[ "def", "_log_begin", "(", "self", ")", ":", "self", ".", "logger", ".", "info", "(", "\"Beginning task: %s\"", ",", "self", ".", "__class__", ".", "__name__", ")", "if", "self", ".", "salesforce_task", "and", "not", "self", ".", "flow", ":", "self", ".", "logger", ".", "info", "(", "\"%15s %s\"", ",", "\"As user:\"", ",", "self", ".", "org_config", ".", "username", ")", "self", ".", "logger", ".", "info", "(", "\"%15s %s\"", ",", "\"In org:\"", ",", "self", ".", "org_config", ".", "org_id", ")", "self", ".", "logger", ".", "info", "(", "\"\"", ")" ]
Log the beginning of the task execution
[ "Log", "the", "beginning", "of", "the", "task", "execution" ]
e19047921ca771a297e045f22f0bb201651bb6f7
https://github.com/SFDO-Tooling/CumulusCI/blob/e19047921ca771a297e045f22f0bb201651bb6f7/cumulusci/core/tasks.py#L166-L172
247,560
SFDO-Tooling/CumulusCI
cumulusci/core/tasks.py
BaseTask._poll
def _poll(self): """ poll for a result in a loop """ while True: self.poll_count += 1 self._poll_action() if self.poll_complete: break time.sleep(self.poll_interval_s) self._poll_update_interval()
python
def _poll(self): while True: self.poll_count += 1 self._poll_action() if self.poll_complete: break time.sleep(self.poll_interval_s) self._poll_update_interval()
[ "def", "_poll", "(", "self", ")", ":", "while", "True", ":", "self", ".", "poll_count", "+=", "1", "self", ".", "_poll_action", "(", ")", "if", "self", ".", "poll_complete", ":", "break", "time", ".", "sleep", "(", "self", ".", "poll_interval_s", ")", "self", ".", "_poll_update_interval", "(", ")" ]
poll for a result in a loop
[ "poll", "for", "a", "result", "in", "a", "loop" ]
e19047921ca771a297e045f22f0bb201651bb6f7
https://github.com/SFDO-Tooling/CumulusCI/blob/e19047921ca771a297e045f22f0bb201651bb6f7/cumulusci/core/tasks.py#L204-L212
247,561
SFDO-Tooling/CumulusCI
cumulusci/core/tasks.py
BaseTask._poll_update_interval
def _poll_update_interval(self): """ update the polling interval to be used next iteration """ # Increase by 1 second every 3 polls if old_div(self.poll_count, 3) > self.poll_interval_level: self.poll_interval_level += 1 self.poll_interval_s += 1 self.logger.info( "Increased polling interval to %d seconds", self.poll_interval_s )
python
def _poll_update_interval(self): # Increase by 1 second every 3 polls if old_div(self.poll_count, 3) > self.poll_interval_level: self.poll_interval_level += 1 self.poll_interval_s += 1 self.logger.info( "Increased polling interval to %d seconds", self.poll_interval_s )
[ "def", "_poll_update_interval", "(", "self", ")", ":", "# Increase by 1 second every 3 polls", "if", "old_div", "(", "self", ".", "poll_count", ",", "3", ")", ">", "self", ".", "poll_interval_level", ":", "self", ".", "poll_interval_level", "+=", "1", "self", ".", "poll_interval_s", "+=", "1", "self", ".", "logger", ".", "info", "(", "\"Increased polling interval to %d seconds\"", ",", "self", ".", "poll_interval_s", ")" ]
update the polling interval to be used next iteration
[ "update", "the", "polling", "interval", "to", "be", "used", "next", "iteration" ]
e19047921ca771a297e045f22f0bb201651bb6f7
https://github.com/SFDO-Tooling/CumulusCI/blob/e19047921ca771a297e045f22f0bb201651bb6f7/cumulusci/core/tasks.py#L221-L229
247,562
SFDO-Tooling/CumulusCI
cumulusci/core/config/BaseGlobalConfig.py
BaseGlobalConfig.get_project_config
def get_project_config(self, *args, **kwargs): """ Returns a ProjectConfig for the given project """ warnings.warn( "BaseGlobalConfig.get_project_config is pending deprecation", DeprecationWarning, ) return self.project_config_class(self, *args, **kwargs)
python
def get_project_config(self, *args, **kwargs): warnings.warn( "BaseGlobalConfig.get_project_config is pending deprecation", DeprecationWarning, ) return self.project_config_class(self, *args, **kwargs)
[ "def", "get_project_config", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "warnings", ".", "warn", "(", "\"BaseGlobalConfig.get_project_config is pending deprecation\"", ",", "DeprecationWarning", ",", ")", "return", "self", ".", "project_config_class", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
Returns a ProjectConfig for the given project
[ "Returns", "a", "ProjectConfig", "for", "the", "given", "project" ]
e19047921ca771a297e045f22f0bb201651bb6f7
https://github.com/SFDO-Tooling/CumulusCI/blob/e19047921ca771a297e045f22f0bb201651bb6f7/cumulusci/core/config/BaseGlobalConfig.py#L25-L31
247,563
SFDO-Tooling/CumulusCI
cumulusci/core/config/BaseGlobalConfig.py
BaseGlobalConfig._load_config
def _load_config(self): """ Loads the local configuration """ # load the global config with open(self.config_global_path, "r") as f_config: config = ordered_yaml_load(f_config) self.config_global = config # Load the local config if self.config_global_local_path: config = ordered_yaml_load(open(self.config_global_local_path, "r")) self.config_global_local = config self.config = merge_config( OrderedDict( [ ("global_config", self.config_global), ("global_local", self.config_global_local), ] ) )
python
def _load_config(self): # load the global config with open(self.config_global_path, "r") as f_config: config = ordered_yaml_load(f_config) self.config_global = config # Load the local config if self.config_global_local_path: config = ordered_yaml_load(open(self.config_global_local_path, "r")) self.config_global_local = config self.config = merge_config( OrderedDict( [ ("global_config", self.config_global), ("global_local", self.config_global_local), ] ) )
[ "def", "_load_config", "(", "self", ")", ":", "# load the global config", "with", "open", "(", "self", ".", "config_global_path", ",", "\"r\"", ")", "as", "f_config", ":", "config", "=", "ordered_yaml_load", "(", "f_config", ")", "self", ".", "config_global", "=", "config", "# Load the local config", "if", "self", ".", "config_global_local_path", ":", "config", "=", "ordered_yaml_load", "(", "open", "(", "self", ".", "config_global_local_path", ",", "\"r\"", ")", ")", "self", ".", "config_global_local", "=", "config", "self", ".", "config", "=", "merge_config", "(", "OrderedDict", "(", "[", "(", "\"global_config\"", ",", "self", ".", "config_global", ")", ",", "(", "\"global_local\"", ",", "self", ".", "config_global_local", ")", ",", "]", ")", ")" ]
Loads the local configuration
[ "Loads", "the", "local", "configuration" ]
e19047921ca771a297e045f22f0bb201651bb6f7
https://github.com/SFDO-Tooling/CumulusCI/blob/e19047921ca771a297e045f22f0bb201651bb6f7/cumulusci/core/config/BaseGlobalConfig.py#L51-L70
247,564
SFDO-Tooling/CumulusCI
cumulusci/core/config/OrgConfig.py
OrgConfig.username
def username(self): """ Username for the org connection. """ username = self.config.get("username") if not username: username = self.userinfo__preferred_username return username
python
def username(self): username = self.config.get("username") if not username: username = self.userinfo__preferred_username return username
[ "def", "username", "(", "self", ")", ":", "username", "=", "self", ".", "config", ".", "get", "(", "\"username\"", ")", "if", "not", "username", ":", "username", "=", "self", ".", "userinfo__preferred_username", "return", "username" ]
Username for the org connection.
[ "Username", "for", "the", "org", "connection", "." ]
e19047921ca771a297e045f22f0bb201651bb6f7
https://github.com/SFDO-Tooling/CumulusCI/blob/e19047921ca771a297e045f22f0bb201651bb6f7/cumulusci/core/config/OrgConfig.py#L67-L72
247,565
SFDO-Tooling/CumulusCI
cumulusci/cli/cci.py
timestamp_file
def timestamp_file(): """Opens a file for tracking the time of the last version check""" config_dir = os.path.join( os.path.expanduser("~"), BaseGlobalConfig.config_local_dir ) if not os.path.exists(config_dir): os.mkdir(config_dir) timestamp_file = os.path.join(config_dir, "cumulus_timestamp") try: with open(timestamp_file, "r+") as f: yield f except IOError: # file does not exist with open(timestamp_file, "w+") as f: yield f
python
def timestamp_file(): config_dir = os.path.join( os.path.expanduser("~"), BaseGlobalConfig.config_local_dir ) if not os.path.exists(config_dir): os.mkdir(config_dir) timestamp_file = os.path.join(config_dir, "cumulus_timestamp") try: with open(timestamp_file, "r+") as f: yield f except IOError: # file does not exist with open(timestamp_file, "w+") as f: yield f
[ "def", "timestamp_file", "(", ")", ":", "config_dir", "=", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "expanduser", "(", "\"~\"", ")", ",", "BaseGlobalConfig", ".", "config_local_dir", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "config_dir", ")", ":", "os", ".", "mkdir", "(", "config_dir", ")", "timestamp_file", "=", "os", ".", "path", ".", "join", "(", "config_dir", ",", "\"cumulus_timestamp\"", ")", "try", ":", "with", "open", "(", "timestamp_file", ",", "\"r+\"", ")", "as", "f", ":", "yield", "f", "except", "IOError", ":", "# file does not exist", "with", "open", "(", "timestamp_file", ",", "\"w+\"", ")", "as", "f", ":", "yield", "f" ]
Opens a file for tracking the time of the last version check
[ "Opens", "a", "file", "for", "tracking", "the", "time", "of", "the", "last", "version", "check" ]
e19047921ca771a297e045f22f0bb201651bb6f7
https://github.com/SFDO-Tooling/CumulusCI/blob/e19047921ca771a297e045f22f0bb201651bb6f7/cumulusci/cli/cci.py#L53-L69
247,566
SFDO-Tooling/CumulusCI
cumulusci/cli/cci.py
pass_config
def pass_config(func=None, **config_kw): """Decorator which passes the CCI config object as the first arg to a click command.""" def decorate(func): def new_func(*args, **kw): config = load_config(**config_kw) func(config, *args, **kw) return functools.update_wrapper(new_func, func) if func is None: return decorate else: return decorate(func)
python
def pass_config(func=None, **config_kw): def decorate(func): def new_func(*args, **kw): config = load_config(**config_kw) func(config, *args, **kw) return functools.update_wrapper(new_func, func) if func is None: return decorate else: return decorate(func)
[ "def", "pass_config", "(", "func", "=", "None", ",", "*", "*", "config_kw", ")", ":", "def", "decorate", "(", "func", ")", ":", "def", "new_func", "(", "*", "args", ",", "*", "*", "kw", ")", ":", "config", "=", "load_config", "(", "*", "*", "config_kw", ")", "func", "(", "config", ",", "*", "args", ",", "*", "*", "kw", ")", "return", "functools", ".", "update_wrapper", "(", "new_func", ",", "func", ")", "if", "func", "is", "None", ":", "return", "decorate", "else", ":", "return", "decorate", "(", "func", ")" ]
Decorator which passes the CCI config object as the first arg to a click command.
[ "Decorator", "which", "passes", "the", "CCI", "config", "object", "as", "the", "first", "arg", "to", "a", "click", "command", "." ]
e19047921ca771a297e045f22f0bb201651bb6f7
https://github.com/SFDO-Tooling/CumulusCI/blob/e19047921ca771a297e045f22f0bb201651bb6f7/cumulusci/cli/cci.py#L213-L226
247,567
SFDO-Tooling/CumulusCI
cumulusci/cli/cci.py
ConnectServiceCommand.list_commands
def list_commands(self, ctx): """ list the services that can be configured """ config = load_config(**self.load_config_kwargs) services = self._get_services_config(config) return sorted(services.keys())
python
def list_commands(self, ctx): config = load_config(**self.load_config_kwargs) services = self._get_services_config(config) return sorted(services.keys())
[ "def", "list_commands", "(", "self", ",", "ctx", ")", ":", "config", "=", "load_config", "(", "*", "*", "self", ".", "load_config_kwargs", ")", "services", "=", "self", ".", "_get_services_config", "(", "config", ")", "return", "sorted", "(", "services", ".", "keys", "(", ")", ")" ]
list the services that can be configured
[ "list", "the", "services", "that", "can", "be", "configured" ]
e19047921ca771a297e045f22f0bb201651bb6f7
https://github.com/SFDO-Tooling/CumulusCI/blob/e19047921ca771a297e045f22f0bb201651bb6f7/cumulusci/cli/cci.py#L612-L616
247,568
SFDO-Tooling/CumulusCI
cumulusci/utils.py
parse_api_datetime
def parse_api_datetime(value): """ parse a datetime returned from the salesforce API. in python 3 we should just use a strptime %z, but until then we're just going to assert that its a fixed offset of +0000 since thats the observed behavior. getting python 2 to support fixed offset parsing is too complicated for what we need imo.""" dt = datetime.strptime(value[0:DATETIME_LEN], API_DATE_FORMAT) offset_str = value[DATETIME_LEN:] assert offset_str in ["+0000", "Z"], "The Salesforce API returned a weird timezone." return dt
python
def parse_api_datetime(value): dt = datetime.strptime(value[0:DATETIME_LEN], API_DATE_FORMAT) offset_str = value[DATETIME_LEN:] assert offset_str in ["+0000", "Z"], "The Salesforce API returned a weird timezone." return dt
[ "def", "parse_api_datetime", "(", "value", ")", ":", "dt", "=", "datetime", ".", "strptime", "(", "value", "[", "0", ":", "DATETIME_LEN", "]", ",", "API_DATE_FORMAT", ")", "offset_str", "=", "value", "[", "DATETIME_LEN", ":", "]", "assert", "offset_str", "in", "[", "\"+0000\"", ",", "\"Z\"", "]", ",", "\"The Salesforce API returned a weird timezone.\"", "return", "dt" ]
parse a datetime returned from the salesforce API. in python 3 we should just use a strptime %z, but until then we're just going to assert that its a fixed offset of +0000 since thats the observed behavior. getting python 2 to support fixed offset parsing is too complicated for what we need imo.
[ "parse", "a", "datetime", "returned", "from", "the", "salesforce", "API", "." ]
e19047921ca771a297e045f22f0bb201651bb6f7
https://github.com/SFDO-Tooling/CumulusCI/blob/e19047921ca771a297e045f22f0bb201651bb6f7/cumulusci/utils.py#L38-L47
247,569
SFDO-Tooling/CumulusCI
cumulusci/utils.py
removeXmlElement
def removeXmlElement(name, directory, file_pattern, logger=None): """ Recursively walk a directory and remove XML elements """ for path, dirs, files in os.walk(os.path.abspath(directory)): for filename in fnmatch.filter(files, file_pattern): filepath = os.path.join(path, filename) remove_xml_element_file(name, filepath)
python
def removeXmlElement(name, directory, file_pattern, logger=None): for path, dirs, files in os.walk(os.path.abspath(directory)): for filename in fnmatch.filter(files, file_pattern): filepath = os.path.join(path, filename) remove_xml_element_file(name, filepath)
[ "def", "removeXmlElement", "(", "name", ",", "directory", ",", "file_pattern", ",", "logger", "=", "None", ")", ":", "for", "path", ",", "dirs", ",", "files", "in", "os", ".", "walk", "(", "os", ".", "path", ".", "abspath", "(", "directory", ")", ")", ":", "for", "filename", "in", "fnmatch", ".", "filter", "(", "files", ",", "file_pattern", ")", ":", "filepath", "=", "os", ".", "path", ".", "join", "(", "path", ",", "filename", ")", "remove_xml_element_file", "(", "name", ",", "filepath", ")" ]
Recursively walk a directory and remove XML elements
[ "Recursively", "walk", "a", "directory", "and", "remove", "XML", "elements" ]
e19047921ca771a297e045f22f0bb201651bb6f7
https://github.com/SFDO-Tooling/CumulusCI/blob/e19047921ca771a297e045f22f0bb201651bb6f7/cumulusci/utils.py#L100-L105
247,570
SFDO-Tooling/CumulusCI
cumulusci/utils.py
remove_xml_element_file
def remove_xml_element_file(name, path): """ Remove XML elements from a single file """ ET.register_namespace("", "http://soap.sforce.com/2006/04/metadata") tree = elementtree_parse_file(path) tree = remove_xml_element(name, tree) return tree.write(path, encoding=UTF8, xml_declaration=True)
python
def remove_xml_element_file(name, path): ET.register_namespace("", "http://soap.sforce.com/2006/04/metadata") tree = elementtree_parse_file(path) tree = remove_xml_element(name, tree) return tree.write(path, encoding=UTF8, xml_declaration=True)
[ "def", "remove_xml_element_file", "(", "name", ",", "path", ")", ":", "ET", ".", "register_namespace", "(", "\"\"", ",", "\"http://soap.sforce.com/2006/04/metadata\"", ")", "tree", "=", "elementtree_parse_file", "(", "path", ")", "tree", "=", "remove_xml_element", "(", "name", ",", "tree", ")", "return", "tree", ".", "write", "(", "path", ",", "encoding", "=", "UTF8", ",", "xml_declaration", "=", "True", ")" ]
Remove XML elements from a single file
[ "Remove", "XML", "elements", "from", "a", "single", "file" ]
e19047921ca771a297e045f22f0bb201651bb6f7
https://github.com/SFDO-Tooling/CumulusCI/blob/e19047921ca771a297e045f22f0bb201651bb6f7/cumulusci/utils.py#L108-L113
247,571
SFDO-Tooling/CumulusCI
cumulusci/utils.py
remove_xml_element_string
def remove_xml_element_string(name, content): """ Remove XML elements from a string """ ET.register_namespace("", "http://soap.sforce.com/2006/04/metadata") tree = ET.fromstring(content) tree = remove_xml_element(name, tree) clean_content = ET.tostring(tree, encoding=UTF8) return clean_content
python
def remove_xml_element_string(name, content): ET.register_namespace("", "http://soap.sforce.com/2006/04/metadata") tree = ET.fromstring(content) tree = remove_xml_element(name, tree) clean_content = ET.tostring(tree, encoding=UTF8) return clean_content
[ "def", "remove_xml_element_string", "(", "name", ",", "content", ")", ":", "ET", ".", "register_namespace", "(", "\"\"", ",", "\"http://soap.sforce.com/2006/04/metadata\"", ")", "tree", "=", "ET", ".", "fromstring", "(", "content", ")", "tree", "=", "remove_xml_element", "(", "name", ",", "tree", ")", "clean_content", "=", "ET", ".", "tostring", "(", "tree", ",", "encoding", "=", "UTF8", ")", "return", "clean_content" ]
Remove XML elements from a string
[ "Remove", "XML", "elements", "from", "a", "string" ]
e19047921ca771a297e045f22f0bb201651bb6f7
https://github.com/SFDO-Tooling/CumulusCI/blob/e19047921ca771a297e045f22f0bb201651bb6f7/cumulusci/utils.py#L116-L122
247,572
SFDO-Tooling/CumulusCI
cumulusci/utils.py
remove_xml_element
def remove_xml_element(name, tree): """ Removes XML elements from an ElementTree content tree """ # root = tree.getroot() remove = tree.findall( ".//{{http://soap.sforce.com/2006/04/metadata}}{}".format(name) ) if not remove: return tree parent_map = {c: p for p in tree.iter() for c in p} for elem in remove: parent = parent_map[elem] parent.remove(elem) return tree
python
def remove_xml_element(name, tree): # root = tree.getroot() remove = tree.findall( ".//{{http://soap.sforce.com/2006/04/metadata}}{}".format(name) ) if not remove: return tree parent_map = {c: p for p in tree.iter() for c in p} for elem in remove: parent = parent_map[elem] parent.remove(elem) return tree
[ "def", "remove_xml_element", "(", "name", ",", "tree", ")", ":", "# root = tree.getroot()", "remove", "=", "tree", ".", "findall", "(", "\".//{{http://soap.sforce.com/2006/04/metadata}}{}\"", ".", "format", "(", "name", ")", ")", "if", "not", "remove", ":", "return", "tree", "parent_map", "=", "{", "c", ":", "p", "for", "p", "in", "tree", ".", "iter", "(", ")", "for", "c", "in", "p", "}", "for", "elem", "in", "remove", ":", "parent", "=", "parent_map", "[", "elem", "]", "parent", ".", "remove", "(", "elem", ")", "return", "tree" ]
Removes XML elements from an ElementTree content tree
[ "Removes", "XML", "elements", "from", "an", "ElementTree", "content", "tree" ]
e19047921ca771a297e045f22f0bb201651bb6f7
https://github.com/SFDO-Tooling/CumulusCI/blob/e19047921ca771a297e045f22f0bb201651bb6f7/cumulusci/utils.py#L125-L140
247,573
SFDO-Tooling/CumulusCI
cumulusci/utils.py
temporary_dir
def temporary_dir(): """Context manager that creates a temporary directory and chdirs to it. When the context manager exits it returns to the previous cwd and deletes the temporary directory. """ d = tempfile.mkdtemp() try: with cd(d): yield d finally: if os.path.exists(d): shutil.rmtree(d)
python
def temporary_dir(): d = tempfile.mkdtemp() try: with cd(d): yield d finally: if os.path.exists(d): shutil.rmtree(d)
[ "def", "temporary_dir", "(", ")", ":", "d", "=", "tempfile", ".", "mkdtemp", "(", ")", "try", ":", "with", "cd", "(", "d", ")", ":", "yield", "d", "finally", ":", "if", "os", ".", "path", ".", "exists", "(", "d", ")", ":", "shutil", ".", "rmtree", "(", "d", ")" ]
Context manager that creates a temporary directory and chdirs to it. When the context manager exits it returns to the previous cwd and deletes the temporary directory.
[ "Context", "manager", "that", "creates", "a", "temporary", "directory", "and", "chdirs", "to", "it", "." ]
e19047921ca771a297e045f22f0bb201651bb6f7
https://github.com/SFDO-Tooling/CumulusCI/blob/e19047921ca771a297e045f22f0bb201651bb6f7/cumulusci/utils.py#L448-L460
247,574
SFDO-Tooling/CumulusCI
cumulusci/utils.py
in_directory
def in_directory(filepath, dirpath): """Returns a boolean for whether filepath is contained in dirpath. Normalizes the paths (e.g. resolving symlinks and ..) so this is the safe way to make sure a user-configured path is located inside the user's project repo. """ filepath = os.path.realpath(filepath) dirpath = os.path.realpath(dirpath) return filepath == dirpath or filepath.startswith(os.path.join(dirpath, ""))
python
def in_directory(filepath, dirpath): filepath = os.path.realpath(filepath) dirpath = os.path.realpath(dirpath) return filepath == dirpath or filepath.startswith(os.path.join(dirpath, ""))
[ "def", "in_directory", "(", "filepath", ",", "dirpath", ")", ":", "filepath", "=", "os", ".", "path", ".", "realpath", "(", "filepath", ")", "dirpath", "=", "os", ".", "path", ".", "realpath", "(", "dirpath", ")", "return", "filepath", "==", "dirpath", "or", "filepath", ".", "startswith", "(", "os", ".", "path", ".", "join", "(", "dirpath", ",", "\"\"", ")", ")" ]
Returns a boolean for whether filepath is contained in dirpath. Normalizes the paths (e.g. resolving symlinks and ..) so this is the safe way to make sure a user-configured path is located inside the user's project repo.
[ "Returns", "a", "boolean", "for", "whether", "filepath", "is", "contained", "in", "dirpath", "." ]
e19047921ca771a297e045f22f0bb201651bb6f7
https://github.com/SFDO-Tooling/CumulusCI/blob/e19047921ca771a297e045f22f0bb201651bb6f7/cumulusci/utils.py#L469-L478
247,575
SFDO-Tooling/CumulusCI
cumulusci/utils.py
log_progress
def log_progress( iterable, logger, batch_size=10000, progress_message="Processing... ({})", done_message="Done! (Total: {})", ): """Log progress while iterating. """ i = 0 for x in iterable: yield x i += 1 if not i % batch_size: logger.info(progress_message.format(i)) logger.info(done_message.format(i))
python
def log_progress( iterable, logger, batch_size=10000, progress_message="Processing... ({})", done_message="Done! (Total: {})", ): i = 0 for x in iterable: yield x i += 1 if not i % batch_size: logger.info(progress_message.format(i)) logger.info(done_message.format(i))
[ "def", "log_progress", "(", "iterable", ",", "logger", ",", "batch_size", "=", "10000", ",", "progress_message", "=", "\"Processing... ({})\"", ",", "done_message", "=", "\"Done! (Total: {})\"", ",", ")", ":", "i", "=", "0", "for", "x", "in", "iterable", ":", "yield", "x", "i", "+=", "1", "if", "not", "i", "%", "batch_size", ":", "logger", ".", "info", "(", "progress_message", ".", "format", "(", "i", ")", ")", "logger", ".", "info", "(", "done_message", ".", "format", "(", "i", ")", ")" ]
Log progress while iterating.
[ "Log", "progress", "while", "iterating", "." ]
e19047921ca771a297e045f22f0bb201651bb6f7
https://github.com/SFDO-Tooling/CumulusCI/blob/e19047921ca771a297e045f22f0bb201651bb6f7/cumulusci/utils.py#L481-L496
247,576
SFDO-Tooling/CumulusCI
cumulusci/robotframework/CumulusCI.py
CumulusCI.login_url
def login_url(self, org=None): """ Returns the login url which will automatically log into the target Salesforce org. By default, the org_name passed to the library constructor is used but this can be overridden with the org option to log into a different org. """ if org is None: org = self.org else: org = self.keychain.get_org(org) return org.start_url
python
def login_url(self, org=None): if org is None: org = self.org else: org = self.keychain.get_org(org) return org.start_url
[ "def", "login_url", "(", "self", ",", "org", "=", "None", ")", ":", "if", "org", "is", "None", ":", "org", "=", "self", ".", "org", "else", ":", "org", "=", "self", ".", "keychain", ".", "get_org", "(", "org", ")", "return", "org", ".", "start_url" ]
Returns the login url which will automatically log into the target Salesforce org. By default, the org_name passed to the library constructor is used but this can be overridden with the org option to log into a different org.
[ "Returns", "the", "login", "url", "which", "will", "automatically", "log", "into", "the", "target", "Salesforce", "org", ".", "By", "default", "the", "org_name", "passed", "to", "the", "library", "constructor", "is", "used", "but", "this", "can", "be", "overridden", "with", "the", "org", "option", "to", "log", "into", "a", "different", "org", "." ]
e19047921ca771a297e045f22f0bb201651bb6f7
https://github.com/SFDO-Tooling/CumulusCI/blob/e19047921ca771a297e045f22f0bb201651bb6f7/cumulusci/robotframework/CumulusCI.py#L105-L115
247,577
SFDO-Tooling/CumulusCI
cumulusci/robotframework/CumulusCI.py
CumulusCI.run_task
def run_task(self, task_name, **options): """ Runs a named CumulusCI task for the current project with optional support for overriding task options via kwargs. Examples: | =Keyword= | =task_name= | =task_options= | =comment= | | Run Task | deploy | | Run deploy with standard options | | Run Task | deploy | path=path/to/some/metadata | Run deploy with custom path | """ task_config = self.project_config.get_task(task_name) class_path = task_config.class_path logger.console("\n") task_class, task_config = self._init_task(class_path, options, task_config) return self._run_task(task_class, task_config)
python
def run_task(self, task_name, **options): task_config = self.project_config.get_task(task_name) class_path = task_config.class_path logger.console("\n") task_class, task_config = self._init_task(class_path, options, task_config) return self._run_task(task_class, task_config)
[ "def", "run_task", "(", "self", ",", "task_name", ",", "*", "*", "options", ")", ":", "task_config", "=", "self", ".", "project_config", ".", "get_task", "(", "task_name", ")", "class_path", "=", "task_config", ".", "class_path", "logger", ".", "console", "(", "\"\\n\"", ")", "task_class", ",", "task_config", "=", "self", ".", "_init_task", "(", "class_path", ",", "options", ",", "task_config", ")", "return", "self", ".", "_run_task", "(", "task_class", ",", "task_config", ")" ]
Runs a named CumulusCI task for the current project with optional support for overriding task options via kwargs. Examples: | =Keyword= | =task_name= | =task_options= | =comment= | | Run Task | deploy | | Run deploy with standard options | | Run Task | deploy | path=path/to/some/metadata | Run deploy with custom path |
[ "Runs", "a", "named", "CumulusCI", "task", "for", "the", "current", "project", "with", "optional", "support", "for", "overriding", "task", "options", "via", "kwargs", "." ]
e19047921ca771a297e045f22f0bb201651bb6f7
https://github.com/SFDO-Tooling/CumulusCI/blob/e19047921ca771a297e045f22f0bb201651bb6f7/cumulusci/robotframework/CumulusCI.py#L137-L150
247,578
SFDO-Tooling/CumulusCI
cumulusci/robotframework/CumulusCI.py
CumulusCI.run_task_class
def run_task_class(self, class_path, **options): """ Runs a CumulusCI task class with task options via kwargs. Use this keyword to run logic from CumulusCI tasks which have not been configured in the project's cumulusci.yml file. This is most useful in cases where a test needs to use task logic for logic unique to the test and thus not worth making into a named task for the project Examples: | =Keyword= | =task_class= | =task_options= | | Run Task Class | cumulusci.task.utils.DownloadZip | url=http://test.com/test.zip dir=test_zip | """ logger.console("\n") task_class, task_config = self._init_task(class_path, options, TaskConfig()) return self._run_task(task_class, task_config)
python
def run_task_class(self, class_path, **options): logger.console("\n") task_class, task_config = self._init_task(class_path, options, TaskConfig()) return self._run_task(task_class, task_config)
[ "def", "run_task_class", "(", "self", ",", "class_path", ",", "*", "*", "options", ")", ":", "logger", ".", "console", "(", "\"\\n\"", ")", "task_class", ",", "task_config", "=", "self", ".", "_init_task", "(", "class_path", ",", "options", ",", "TaskConfig", "(", ")", ")", "return", "self", ".", "_run_task", "(", "task_class", ",", "task_config", ")" ]
Runs a CumulusCI task class with task options via kwargs. Use this keyword to run logic from CumulusCI tasks which have not been configured in the project's cumulusci.yml file. This is most useful in cases where a test needs to use task logic for logic unique to the test and thus not worth making into a named task for the project Examples: | =Keyword= | =task_class= | =task_options= | | Run Task Class | cumulusci.task.utils.DownloadZip | url=http://test.com/test.zip dir=test_zip |
[ "Runs", "a", "CumulusCI", "task", "class", "with", "task", "options", "via", "kwargs", "." ]
e19047921ca771a297e045f22f0bb201651bb6f7
https://github.com/SFDO-Tooling/CumulusCI/blob/e19047921ca771a297e045f22f0bb201651bb6f7/cumulusci/robotframework/CumulusCI.py#L152-L167
247,579
SFDO-Tooling/CumulusCI
cumulusci/core/config/BaseTaskFlowConfig.py
BaseTaskFlowConfig.get_task
def get_task(self, name): """ Returns a TaskConfig """ config = getattr(self, "tasks__{}".format(name)) if not config: raise TaskNotFoundError("Task not found: {}".format(name)) return TaskConfig(config)
python
def get_task(self, name): config = getattr(self, "tasks__{}".format(name)) if not config: raise TaskNotFoundError("Task not found: {}".format(name)) return TaskConfig(config)
[ "def", "get_task", "(", "self", ",", "name", ")", ":", "config", "=", "getattr", "(", "self", ",", "\"tasks__{}\"", ".", "format", "(", "name", ")", ")", "if", "not", "config", ":", "raise", "TaskNotFoundError", "(", "\"Task not found: {}\"", ".", "format", "(", "name", ")", ")", "return", "TaskConfig", "(", "config", ")" ]
Returns a TaskConfig
[ "Returns", "a", "TaskConfig" ]
e19047921ca771a297e045f22f0bb201651bb6f7
https://github.com/SFDO-Tooling/CumulusCI/blob/e19047921ca771a297e045f22f0bb201651bb6f7/cumulusci/core/config/BaseTaskFlowConfig.py#L32-L37
247,580
SFDO-Tooling/CumulusCI
cumulusci/core/config/BaseTaskFlowConfig.py
BaseTaskFlowConfig.get_flow
def get_flow(self, name): """ Returns a FlowConfig """ config = getattr(self, "flows__{}".format(name)) if not config: raise FlowNotFoundError("Flow not found: {}".format(name)) return FlowConfig(config)
python
def get_flow(self, name): config = getattr(self, "flows__{}".format(name)) if not config: raise FlowNotFoundError("Flow not found: {}".format(name)) return FlowConfig(config)
[ "def", "get_flow", "(", "self", ",", "name", ")", ":", "config", "=", "getattr", "(", "self", ",", "\"flows__{}\"", ".", "format", "(", "name", ")", ")", "if", "not", "config", ":", "raise", "FlowNotFoundError", "(", "\"Flow not found: {}\"", ".", "format", "(", "name", ")", ")", "return", "FlowConfig", "(", "config", ")" ]
Returns a FlowConfig
[ "Returns", "a", "FlowConfig" ]
e19047921ca771a297e045f22f0bb201651bb6f7
https://github.com/SFDO-Tooling/CumulusCI/blob/e19047921ca771a297e045f22f0bb201651bb6f7/cumulusci/core/config/BaseTaskFlowConfig.py#L43-L48
247,581
SFDO-Tooling/CumulusCI
cumulusci/tasks/release_notes/generator.py
BaseReleaseNotesGenerator.render
def render(self): """ Returns the rendered release notes from all parsers as a string """ release_notes = [] for parser in self.parsers: parser_content = parser.render() if parser_content is not None: release_notes.append(parser_content) return u"\r\n\r\n".join(release_notes)
python
def render(self): release_notes = [] for parser in self.parsers: parser_content = parser.render() if parser_content is not None: release_notes.append(parser_content) return u"\r\n\r\n".join(release_notes)
[ "def", "render", "(", "self", ")", ":", "release_notes", "=", "[", "]", "for", "parser", "in", "self", ".", "parsers", ":", "parser_content", "=", "parser", ".", "render", "(", ")", "if", "parser_content", "is", "not", "None", ":", "release_notes", ".", "append", "(", "parser_content", ")", "return", "u\"\\r\\n\\r\\n\"", ".", "join", "(", "release_notes", ")" ]
Returns the rendered release notes from all parsers as a string
[ "Returns", "the", "rendered", "release", "notes", "from", "all", "parsers", "as", "a", "string" ]
e19047921ca771a297e045f22f0bb201651bb6f7
https://github.com/SFDO-Tooling/CumulusCI/blob/e19047921ca771a297e045f22f0bb201651bb6f7/cumulusci/tasks/release_notes/generator.py#L52-L59
247,582
SFDO-Tooling/CumulusCI
cumulusci/tasks/release_notes/generator.py
GithubReleaseNotesGenerator._update_release_content
def _update_release_content(self, release, content): """Merge existing and new release content.""" if release.body: new_body = [] current_parser = None is_start_line = False for parser in self.parsers: parser.replaced = False # update existing sections for line in release.body.splitlines(): if current_parser: if current_parser._is_end_line(current_parser._process_line(line)): parser_content = current_parser.render() if parser_content: # replace existing section with new content new_body.append(parser_content + "\r\n") current_parser = None for parser in self.parsers: if ( parser._render_header().strip() == parser._process_line(line).strip() ): parser.replaced = True current_parser = parser is_start_line = True break else: is_start_line = False if is_start_line: continue if current_parser: continue else: # preserve existing sections new_body.append(line.strip()) # catch section without end line if current_parser: new_body.append(current_parser.render()) # add new sections at bottom for parser in self.parsers: parser_content = parser.render() if parser_content and not parser.replaced: new_body.append(parser_content + "\r\n") content = u"\r\n".join(new_body) return content
python
def _update_release_content(self, release, content): if release.body: new_body = [] current_parser = None is_start_line = False for parser in self.parsers: parser.replaced = False # update existing sections for line in release.body.splitlines(): if current_parser: if current_parser._is_end_line(current_parser._process_line(line)): parser_content = current_parser.render() if parser_content: # replace existing section with new content new_body.append(parser_content + "\r\n") current_parser = None for parser in self.parsers: if ( parser._render_header().strip() == parser._process_line(line).strip() ): parser.replaced = True current_parser = parser is_start_line = True break else: is_start_line = False if is_start_line: continue if current_parser: continue else: # preserve existing sections new_body.append(line.strip()) # catch section without end line if current_parser: new_body.append(current_parser.render()) # add new sections at bottom for parser in self.parsers: parser_content = parser.render() if parser_content and not parser.replaced: new_body.append(parser_content + "\r\n") content = u"\r\n".join(new_body) return content
[ "def", "_update_release_content", "(", "self", ",", "release", ",", "content", ")", ":", "if", "release", ".", "body", ":", "new_body", "=", "[", "]", "current_parser", "=", "None", "is_start_line", "=", "False", "for", "parser", "in", "self", ".", "parsers", ":", "parser", ".", "replaced", "=", "False", "# update existing sections", "for", "line", "in", "release", ".", "body", ".", "splitlines", "(", ")", ":", "if", "current_parser", ":", "if", "current_parser", ".", "_is_end_line", "(", "current_parser", ".", "_process_line", "(", "line", ")", ")", ":", "parser_content", "=", "current_parser", ".", "render", "(", ")", "if", "parser_content", ":", "# replace existing section with new content", "new_body", ".", "append", "(", "parser_content", "+", "\"\\r\\n\"", ")", "current_parser", "=", "None", "for", "parser", "in", "self", ".", "parsers", ":", "if", "(", "parser", ".", "_render_header", "(", ")", ".", "strip", "(", ")", "==", "parser", ".", "_process_line", "(", "line", ")", ".", "strip", "(", ")", ")", ":", "parser", ".", "replaced", "=", "True", "current_parser", "=", "parser", "is_start_line", "=", "True", "break", "else", ":", "is_start_line", "=", "False", "if", "is_start_line", ":", "continue", "if", "current_parser", ":", "continue", "else", ":", "# preserve existing sections", "new_body", ".", "append", "(", "line", ".", "strip", "(", ")", ")", "# catch section without end line", "if", "current_parser", ":", "new_body", ".", "append", "(", "current_parser", ".", "render", "(", ")", ")", "# add new sections at bottom", "for", "parser", "in", "self", ".", "parsers", ":", "parser_content", "=", "parser", ".", "render", "(", ")", "if", "parser_content", "and", "not", "parser", ".", "replaced", ":", "new_body", ".", "append", "(", "parser_content", "+", "\"\\r\\n\"", ")", "content", "=", "u\"\\r\\n\"", ".", "join", "(", "new_body", ")", "return", "content" ]
Merge existing and new release content.
[ "Merge", "existing", "and", "new", "release", "content", "." ]
e19047921ca771a297e045f22f0bb201651bb6f7
https://github.com/SFDO-Tooling/CumulusCI/blob/e19047921ca771a297e045f22f0bb201651bb6f7/cumulusci/tasks/release_notes/generator.py#L139-L191
247,583
SFDO-Tooling/CumulusCI
cumulusci/core/runtime.py
BaseCumulusCI.get_flow
def get_flow(self, name, options=None): """ Get a primed and readytogo flow coordinator. """ config = self.project_config.get_flow(name) callbacks = self.callback_class() coordinator = FlowCoordinator( self.project_config, config, name=name, options=options, skip=None, callbacks=callbacks, ) return coordinator
python
def get_flow(self, name, options=None): config = self.project_config.get_flow(name) callbacks = self.callback_class() coordinator = FlowCoordinator( self.project_config, config, name=name, options=options, skip=None, callbacks=callbacks, ) return coordinator
[ "def", "get_flow", "(", "self", ",", "name", ",", "options", "=", "None", ")", ":", "config", "=", "self", ".", "project_config", ".", "get_flow", "(", "name", ")", "callbacks", "=", "self", ".", "callback_class", "(", ")", "coordinator", "=", "FlowCoordinator", "(", "self", ".", "project_config", ",", "config", ",", "name", "=", "name", ",", "options", "=", "options", ",", "skip", "=", "None", ",", "callbacks", "=", "callbacks", ",", ")", "return", "coordinator" ]
Get a primed and readytogo flow coordinator.
[ "Get", "a", "primed", "and", "readytogo", "flow", "coordinator", "." ]
e19047921ca771a297e045f22f0bb201651bb6f7
https://github.com/SFDO-Tooling/CumulusCI/blob/e19047921ca771a297e045f22f0bb201651bb6f7/cumulusci/core/runtime.py#L94-L106
247,584
SFDO-Tooling/CumulusCI
cumulusci/core/keychain/EnvironmentProjectKeychain.py
EnvironmentProjectKeychain._get_env
def _get_env(self): """ loads the environment variables as unicode if ascii """ env = {} for k, v in os.environ.items(): k = k.decode() if isinstance(k, bytes) else k v = v.decode() if isinstance(v, bytes) else v env[k] = v return list(env.items())
python
def _get_env(self): env = {} for k, v in os.environ.items(): k = k.decode() if isinstance(k, bytes) else k v = v.decode() if isinstance(v, bytes) else v env[k] = v return list(env.items())
[ "def", "_get_env", "(", "self", ")", ":", "env", "=", "{", "}", "for", "k", ",", "v", "in", "os", ".", "environ", ".", "items", "(", ")", ":", "k", "=", "k", ".", "decode", "(", ")", "if", "isinstance", "(", "k", ",", "bytes", ")", "else", "k", "v", "=", "v", ".", "decode", "(", ")", "if", "isinstance", "(", "v", ",", "bytes", ")", "else", "v", "env", "[", "k", "]", "=", "v", "return", "list", "(", "env", ".", "items", "(", ")", ")" ]
loads the environment variables as unicode if ascii
[ "loads", "the", "environment", "variables", "as", "unicode", "if", "ascii" ]
e19047921ca771a297e045f22f0bb201651bb6f7
https://github.com/SFDO-Tooling/CumulusCI/blob/e19047921ca771a297e045f22f0bb201651bb6f7/cumulusci/core/keychain/EnvironmentProjectKeychain.py#L20-L27
247,585
SFDO-Tooling/CumulusCI
cumulusci/core/utils.py
import_class
def import_class(path): """ Import a class from a string module class path """ components = path.split(".") module = components[:-1] module = ".".join(module) mod = __import__(module, fromlist=[native_str(components[-1])]) return getattr(mod, native_str(components[-1]))
python
def import_class(path): components = path.split(".") module = components[:-1] module = ".".join(module) mod = __import__(module, fromlist=[native_str(components[-1])]) return getattr(mod, native_str(components[-1]))
[ "def", "import_class", "(", "path", ")", ":", "components", "=", "path", ".", "split", "(", "\".\"", ")", "module", "=", "components", "[", ":", "-", "1", "]", "module", "=", "\".\"", ".", "join", "(", "module", ")", "mod", "=", "__import__", "(", "module", ",", "fromlist", "=", "[", "native_str", "(", "components", "[", "-", "1", "]", ")", "]", ")", "return", "getattr", "(", "mod", ",", "native_str", "(", "components", "[", "-", "1", "]", ")", ")" ]
Import a class from a string module class path
[ "Import", "a", "class", "from", "a", "string", "module", "class", "path" ]
e19047921ca771a297e045f22f0bb201651bb6f7
https://github.com/SFDO-Tooling/CumulusCI/blob/e19047921ca771a297e045f22f0bb201651bb6f7/cumulusci/core/utils.py#L24-L30
247,586
SFDO-Tooling/CumulusCI
cumulusci/core/utils.py
parse_datetime
def parse_datetime(dt_str, format): """Create a timezone-aware datetime object from a datetime string.""" t = time.strptime(dt_str, format) return datetime(t[0], t[1], t[2], t[3], t[4], t[5], t[6], pytz.UTC)
python
def parse_datetime(dt_str, format): t = time.strptime(dt_str, format) return datetime(t[0], t[1], t[2], t[3], t[4], t[5], t[6], pytz.UTC)
[ "def", "parse_datetime", "(", "dt_str", ",", "format", ")", ":", "t", "=", "time", ".", "strptime", "(", "dt_str", ",", "format", ")", "return", "datetime", "(", "t", "[", "0", "]", ",", "t", "[", "1", "]", ",", "t", "[", "2", "]", ",", "t", "[", "3", "]", ",", "t", "[", "4", "]", ",", "t", "[", "5", "]", ",", "t", "[", "6", "]", ",", "pytz", ".", "UTC", ")" ]
Create a timezone-aware datetime object from a datetime string.
[ "Create", "a", "timezone", "-", "aware", "datetime", "object", "from", "a", "datetime", "string", "." ]
e19047921ca771a297e045f22f0bb201651bb6f7
https://github.com/SFDO-Tooling/CumulusCI/blob/e19047921ca771a297e045f22f0bb201651bb6f7/cumulusci/core/utils.py#L33-L36
247,587
SFDO-Tooling/CumulusCI
cumulusci/core/utils.py
process_list_arg
def process_list_arg(arg): """ Parse a string into a list separated by commas with whitespace stripped """ if isinstance(arg, list): return arg elif isinstance(arg, basestring): args = [] for part in arg.split(","): args.append(part.strip()) return args
python
def process_list_arg(arg): if isinstance(arg, list): return arg elif isinstance(arg, basestring): args = [] for part in arg.split(","): args.append(part.strip()) return args
[ "def", "process_list_arg", "(", "arg", ")", ":", "if", "isinstance", "(", "arg", ",", "list", ")", ":", "return", "arg", "elif", "isinstance", "(", "arg", ",", "basestring", ")", ":", "args", "=", "[", "]", "for", "part", "in", "arg", ".", "split", "(", "\",\"", ")", ":", "args", ".", "append", "(", "part", ".", "strip", "(", ")", ")", "return", "args" ]
Parse a string into a list separated by commas with whitespace stripped
[ "Parse", "a", "string", "into", "a", "list", "separated", "by", "commas", "with", "whitespace", "stripped" ]
e19047921ca771a297e045f22f0bb201651bb6f7
https://github.com/SFDO-Tooling/CumulusCI/blob/e19047921ca771a297e045f22f0bb201651bb6f7/cumulusci/core/utils.py#L50-L58
247,588
SFDO-Tooling/CumulusCI
cumulusci/core/utils.py
decode_to_unicode
def decode_to_unicode(content): """ decode ISO-8859-1 to unicode, when using sf api """ if content and not isinstance(content, str): try: # Try to decode ISO-8859-1 to unicode return content.decode("ISO-8859-1") except UnicodeEncodeError: # Assume content is unicode already return content return content
python
def decode_to_unicode(content): if content and not isinstance(content, str): try: # Try to decode ISO-8859-1 to unicode return content.decode("ISO-8859-1") except UnicodeEncodeError: # Assume content is unicode already return content return content
[ "def", "decode_to_unicode", "(", "content", ")", ":", "if", "content", "and", "not", "isinstance", "(", "content", ",", "str", ")", ":", "try", ":", "# Try to decode ISO-8859-1 to unicode", "return", "content", ".", "decode", "(", "\"ISO-8859-1\"", ")", "except", "UnicodeEncodeError", ":", "# Assume content is unicode already", "return", "content", "return", "content" ]
decode ISO-8859-1 to unicode, when using sf api
[ "decode", "ISO", "-", "8859", "-", "1", "to", "unicode", "when", "using", "sf", "api" ]
e19047921ca771a297e045f22f0bb201651bb6f7
https://github.com/SFDO-Tooling/CumulusCI/blob/e19047921ca771a297e045f22f0bb201651bb6f7/cumulusci/core/utils.py#L61-L70
247,589
SFDO-Tooling/CumulusCI
cumulusci/tasks/metadeploy.py
Publish._find_or_create_version
def _find_or_create_version(self, product): """Create a Version in MetaDeploy if it doesn't already exist """ tag = self.options["tag"] label = self.project_config.get_version_for_tag(tag) result = self._call_api( "GET", "/versions", params={"product": product["id"], "label": label} ) if len(result["data"]) == 0: version = self._call_api( "POST", "/versions", json={ "product": product["url"], "label": label, "description": self.options.get("description", ""), "is_production": True, "commit_ish": tag, "is_listed": False, }, ) self.logger.info("Created {}".format(version["url"])) else: version = result["data"][0] self.logger.info("Found {}".format(version["url"])) return version
python
def _find_or_create_version(self, product): tag = self.options["tag"] label = self.project_config.get_version_for_tag(tag) result = self._call_api( "GET", "/versions", params={"product": product["id"], "label": label} ) if len(result["data"]) == 0: version = self._call_api( "POST", "/versions", json={ "product": product["url"], "label": label, "description": self.options.get("description", ""), "is_production": True, "commit_ish": tag, "is_listed": False, }, ) self.logger.info("Created {}".format(version["url"])) else: version = result["data"][0] self.logger.info("Found {}".format(version["url"])) return version
[ "def", "_find_or_create_version", "(", "self", ",", "product", ")", ":", "tag", "=", "self", ".", "options", "[", "\"tag\"", "]", "label", "=", "self", ".", "project_config", ".", "get_version_for_tag", "(", "tag", ")", "result", "=", "self", ".", "_call_api", "(", "\"GET\"", ",", "\"/versions\"", ",", "params", "=", "{", "\"product\"", ":", "product", "[", "\"id\"", "]", ",", "\"label\"", ":", "label", "}", ")", "if", "len", "(", "result", "[", "\"data\"", "]", ")", "==", "0", ":", "version", "=", "self", ".", "_call_api", "(", "\"POST\"", ",", "\"/versions\"", ",", "json", "=", "{", "\"product\"", ":", "product", "[", "\"url\"", "]", ",", "\"label\"", ":", "label", ",", "\"description\"", ":", "self", ".", "options", ".", "get", "(", "\"description\"", ",", "\"\"", ")", ",", "\"is_production\"", ":", "True", ",", "\"commit_ish\"", ":", "tag", ",", "\"is_listed\"", ":", "False", ",", "}", ",", ")", "self", ".", "logger", ".", "info", "(", "\"Created {}\"", ".", "format", "(", "version", "[", "\"url\"", "]", ")", ")", "else", ":", "version", "=", "result", "[", "\"data\"", "]", "[", "0", "]", "self", ".", "logger", ".", "info", "(", "\"Found {}\"", ".", "format", "(", "version", "[", "\"url\"", "]", ")", ")", "return", "version" ]
Create a Version in MetaDeploy if it doesn't already exist
[ "Create", "a", "Version", "in", "MetaDeploy", "if", "it", "doesn", "t", "already", "exist" ]
e19047921ca771a297e045f22f0bb201651bb6f7
https://github.com/SFDO-Tooling/CumulusCI/blob/e19047921ca771a297e045f22f0bb201651bb6f7/cumulusci/tasks/metadeploy.py#L168-L194
247,590
SFDO-Tooling/CumulusCI
cumulusci/tasks/robotframework/libdoc.py
RobotLibDoc._render_html
def _render_html(self, libraries): """Generate the html. `libraries` is a list of LibraryDocumentation objects""" title = self.options.get("title", "Keyword Documentation") date = time.strftime("%A %B %d, %I:%M %p") cci_version = cumulusci.__version__ stylesheet_path = os.path.join(os.path.dirname(__file__), "stylesheet.css") with open(stylesheet_path) as f: stylesheet = f.read() jinjaenv = jinja2.Environment( loader=jinja2.FileSystemLoader(os.path.dirname(__file__)), autoescape=False ) jinjaenv.filters["robot_html"] = robot.utils.html_format template = jinjaenv.get_template("template.html") return template.render( libraries=libraries, title=title, cci_version=cci_version, stylesheet=stylesheet, date=date, )
python
def _render_html(self, libraries): title = self.options.get("title", "Keyword Documentation") date = time.strftime("%A %B %d, %I:%M %p") cci_version = cumulusci.__version__ stylesheet_path = os.path.join(os.path.dirname(__file__), "stylesheet.css") with open(stylesheet_path) as f: stylesheet = f.read() jinjaenv = jinja2.Environment( loader=jinja2.FileSystemLoader(os.path.dirname(__file__)), autoescape=False ) jinjaenv.filters["robot_html"] = robot.utils.html_format template = jinjaenv.get_template("template.html") return template.render( libraries=libraries, title=title, cci_version=cci_version, stylesheet=stylesheet, date=date, )
[ "def", "_render_html", "(", "self", ",", "libraries", ")", ":", "title", "=", "self", ".", "options", ".", "get", "(", "\"title\"", ",", "\"Keyword Documentation\"", ")", "date", "=", "time", ".", "strftime", "(", "\"%A %B %d, %I:%M %p\"", ")", "cci_version", "=", "cumulusci", ".", "__version__", "stylesheet_path", "=", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "dirname", "(", "__file__", ")", ",", "\"stylesheet.css\"", ")", "with", "open", "(", "stylesheet_path", ")", "as", "f", ":", "stylesheet", "=", "f", ".", "read", "(", ")", "jinjaenv", "=", "jinja2", ".", "Environment", "(", "loader", "=", "jinja2", ".", "FileSystemLoader", "(", "os", ".", "path", ".", "dirname", "(", "__file__", ")", ")", ",", "autoescape", "=", "False", ")", "jinjaenv", ".", "filters", "[", "\"robot_html\"", "]", "=", "robot", ".", "utils", ".", "html_format", "template", "=", "jinjaenv", ".", "get_template", "(", "\"template.html\"", ")", "return", "template", ".", "render", "(", "libraries", "=", "libraries", ",", "title", "=", "title", ",", "cci_version", "=", "cci_version", ",", "stylesheet", "=", "stylesheet", ",", "date", "=", "date", ",", ")" ]
Generate the html. `libraries` is a list of LibraryDocumentation objects
[ "Generate", "the", "html", ".", "libraries", "is", "a", "list", "of", "LibraryDocumentation", "objects" ]
e19047921ca771a297e045f22f0bb201651bb6f7
https://github.com/SFDO-Tooling/CumulusCI/blob/e19047921ca771a297e045f22f0bb201651bb6f7/cumulusci/tasks/robotframework/libdoc.py#L84-L106
247,591
SFDO-Tooling/CumulusCI
cumulusci/core/config/ScratchOrgConfig.py
ScratchOrgConfig.generate_password
def generate_password(self): """Generates an org password with the sfdx utility. """ if self.password_failed: self.logger.warning("Skipping resetting password since last attempt failed") return # Set a random password so it's available via cci org info command = sarge.shell_format( "sfdx force:user:password:generate -u {0}", self.username ) self.logger.info( "Generating scratch org user password with command {}".format(command) ) p = sarge.Command( command, stdout=sarge.Capture(buffer_size=-1), stderr=sarge.Capture(buffer_size=-1), shell=True, ) p.run() stderr = io.TextIOWrapper(p.stderr).readlines() stdout = io.TextIOWrapper(p.stdout).readlines() if p.returncode: self.config["password_failed"] = True # Don't throw an exception because of failure creating the # password, just notify in a log message self.logger.warning( "Failed to set password: \n{}\n{}".format( "\n".join(stdout), "\n".join(stderr) ) )
python
def generate_password(self): if self.password_failed: self.logger.warning("Skipping resetting password since last attempt failed") return # Set a random password so it's available via cci org info command = sarge.shell_format( "sfdx force:user:password:generate -u {0}", self.username ) self.logger.info( "Generating scratch org user password with command {}".format(command) ) p = sarge.Command( command, stdout=sarge.Capture(buffer_size=-1), stderr=sarge.Capture(buffer_size=-1), shell=True, ) p.run() stderr = io.TextIOWrapper(p.stderr).readlines() stdout = io.TextIOWrapper(p.stdout).readlines() if p.returncode: self.config["password_failed"] = True # Don't throw an exception because of failure creating the # password, just notify in a log message self.logger.warning( "Failed to set password: \n{}\n{}".format( "\n".join(stdout), "\n".join(stderr) ) )
[ "def", "generate_password", "(", "self", ")", ":", "if", "self", ".", "password_failed", ":", "self", ".", "logger", ".", "warning", "(", "\"Skipping resetting password since last attempt failed\"", ")", "return", "# Set a random password so it's available via cci org info", "command", "=", "sarge", ".", "shell_format", "(", "\"sfdx force:user:password:generate -u {0}\"", ",", "self", ".", "username", ")", "self", ".", "logger", ".", "info", "(", "\"Generating scratch org user password with command {}\"", ".", "format", "(", "command", ")", ")", "p", "=", "sarge", ".", "Command", "(", "command", ",", "stdout", "=", "sarge", ".", "Capture", "(", "buffer_size", "=", "-", "1", ")", ",", "stderr", "=", "sarge", ".", "Capture", "(", "buffer_size", "=", "-", "1", ")", ",", "shell", "=", "True", ",", ")", "p", ".", "run", "(", ")", "stderr", "=", "io", ".", "TextIOWrapper", "(", "p", ".", "stderr", ")", ".", "readlines", "(", ")", "stdout", "=", "io", ".", "TextIOWrapper", "(", "p", ".", "stdout", ")", ".", "readlines", "(", ")", "if", "p", ".", "returncode", ":", "self", ".", "config", "[", "\"password_failed\"", "]", "=", "True", "# Don't throw an exception because of failure creating the", "# password, just notify in a log message", "self", ".", "logger", ".", "warning", "(", "\"Failed to set password: \\n{}\\n{}\"", ".", "format", "(", "\"\\n\"", ".", "join", "(", "stdout", ")", ",", "\"\\n\"", ".", "join", "(", "stderr", ")", ")", ")" ]
Generates an org password with the sfdx utility.
[ "Generates", "an", "org", "password", "with", "the", "sfdx", "utility", "." ]
e19047921ca771a297e045f22f0bb201651bb6f7
https://github.com/SFDO-Tooling/CumulusCI/blob/e19047921ca771a297e045f22f0bb201651bb6f7/cumulusci/core/config/ScratchOrgConfig.py#L241-L274
247,592
SFDO-Tooling/CumulusCI
cumulusci/core/keychain/BaseProjectKeychain.py
BaseProjectKeychain._convert_connected_app
def _convert_connected_app(self): """Convert Connected App to service""" if self.services and "connected_app" in self.services: # already a service return connected_app = self.get_connected_app() if not connected_app: # not configured return self.logger.warning( "Reading Connected App info from deprecated config." " Connected App should be changed to a service." " If using environment keychain, update the environment variable." " Otherwise, it has been handled automatically and you should not" " see this message again." ) ca_config = ServiceConfig( { "callback_url": connected_app.callback_url, "client_id": connected_app.client_id, "client_secret": connected_app.client_secret, } ) self.set_service("connected_app", ca_config)
python
def _convert_connected_app(self): if self.services and "connected_app" in self.services: # already a service return connected_app = self.get_connected_app() if not connected_app: # not configured return self.logger.warning( "Reading Connected App info from deprecated config." " Connected App should be changed to a service." " If using environment keychain, update the environment variable." " Otherwise, it has been handled automatically and you should not" " see this message again." ) ca_config = ServiceConfig( { "callback_url": connected_app.callback_url, "client_id": connected_app.client_id, "client_secret": connected_app.client_secret, } ) self.set_service("connected_app", ca_config)
[ "def", "_convert_connected_app", "(", "self", ")", ":", "if", "self", ".", "services", "and", "\"connected_app\"", "in", "self", ".", "services", ":", "# already a service", "return", "connected_app", "=", "self", ".", "get_connected_app", "(", ")", "if", "not", "connected_app", ":", "# not configured", "return", "self", ".", "logger", ".", "warning", "(", "\"Reading Connected App info from deprecated config.\"", "\" Connected App should be changed to a service.\"", "\" If using environment keychain, update the environment variable.\"", "\" Otherwise, it has been handled automatically and you should not\"", "\" see this message again.\"", ")", "ca_config", "=", "ServiceConfig", "(", "{", "\"callback_url\"", ":", "connected_app", ".", "callback_url", ",", "\"client_id\"", ":", "connected_app", ".", "client_id", ",", "\"client_secret\"", ":", "connected_app", ".", "client_secret", ",", "}", ")", "self", ".", "set_service", "(", "\"connected_app\"", ",", "ca_config", ")" ]
Convert Connected App to service
[ "Convert", "Connected", "App", "to", "service" ]
e19047921ca771a297e045f22f0bb201651bb6f7
https://github.com/SFDO-Tooling/CumulusCI/blob/e19047921ca771a297e045f22f0bb201651bb6f7/cumulusci/core/keychain/BaseProjectKeychain.py#L22-L45
247,593
SFDO-Tooling/CumulusCI
cumulusci/core/keychain/BaseProjectKeychain.py
BaseProjectKeychain._load_scratch_orgs
def _load_scratch_orgs(self): """ Creates all scratch org configs for the project in the keychain if a keychain org doesn't already exist """ current_orgs = self.list_orgs() if not self.project_config.orgs__scratch: return for config_name in self.project_config.orgs__scratch.keys(): if config_name in current_orgs: # Don't overwrite an existing keychain org continue self.create_scratch_org(config_name, config_name)
python
def _load_scratch_orgs(self): current_orgs = self.list_orgs() if not self.project_config.orgs__scratch: return for config_name in self.project_config.orgs__scratch.keys(): if config_name in current_orgs: # Don't overwrite an existing keychain org continue self.create_scratch_org(config_name, config_name)
[ "def", "_load_scratch_orgs", "(", "self", ")", ":", "current_orgs", "=", "self", ".", "list_orgs", "(", ")", "if", "not", "self", ".", "project_config", ".", "orgs__scratch", ":", "return", "for", "config_name", "in", "self", ".", "project_config", ".", "orgs__scratch", ".", "keys", "(", ")", ":", "if", "config_name", "in", "current_orgs", ":", "# Don't overwrite an existing keychain org", "continue", "self", ".", "create_scratch_org", "(", "config_name", ",", "config_name", ")" ]
Creates all scratch org configs for the project in the keychain if a keychain org doesn't already exist
[ "Creates", "all", "scratch", "org", "configs", "for", "the", "project", "in", "the", "keychain", "if", "a", "keychain", "org", "doesn", "t", "already", "exist" ]
e19047921ca771a297e045f22f0bb201651bb6f7
https://github.com/SFDO-Tooling/CumulusCI/blob/e19047921ca771a297e045f22f0bb201651bb6f7/cumulusci/core/keychain/BaseProjectKeychain.py#L59-L69
247,594
SFDO-Tooling/CumulusCI
cumulusci/core/keychain/BaseProjectKeychain.py
BaseProjectKeychain.change_key
def change_key(self, key): """ re-encrypt stored services and orgs with the new key """ services = {} for service_name in self.list_services(): services[service_name] = self.get_service(service_name) orgs = {} for org_name in self.list_orgs(): orgs[org_name] = self.get_org(org_name) self.key = key if orgs: for org_name, org_config in list(orgs.items()): self.set_org(org_config) if services: for service_name, service_config in list(services.items()): self.set_service(service_name, service_config) self._convert_connected_app()
python
def change_key(self, key): services = {} for service_name in self.list_services(): services[service_name] = self.get_service(service_name) orgs = {} for org_name in self.list_orgs(): orgs[org_name] = self.get_org(org_name) self.key = key if orgs: for org_name, org_config in list(orgs.items()): self.set_org(org_config) if services: for service_name, service_config in list(services.items()): self.set_service(service_name, service_config) self._convert_connected_app()
[ "def", "change_key", "(", "self", ",", "key", ")", ":", "services", "=", "{", "}", "for", "service_name", "in", "self", ".", "list_services", "(", ")", ":", "services", "[", "service_name", "]", "=", "self", ".", "get_service", "(", "service_name", ")", "orgs", "=", "{", "}", "for", "org_name", "in", "self", ".", "list_orgs", "(", ")", ":", "orgs", "[", "org_name", "]", "=", "self", ".", "get_org", "(", "org_name", ")", "self", ".", "key", "=", "key", "if", "orgs", ":", "for", "org_name", ",", "org_config", "in", "list", "(", "orgs", ".", "items", "(", ")", ")", ":", "self", ".", "set_org", "(", "org_config", ")", "if", "services", ":", "for", "service_name", ",", "service_config", "in", "list", "(", "services", ".", "items", "(", ")", ")", ":", "self", ".", "set_service", "(", "service_name", ",", "service_config", ")", "self", ".", "_convert_connected_app", "(", ")" ]
re-encrypt stored services and orgs with the new key
[ "re", "-", "encrypt", "stored", "services", "and", "orgs", "with", "the", "new", "key" ]
e19047921ca771a297e045f22f0bb201651bb6f7
https://github.com/SFDO-Tooling/CumulusCI/blob/e19047921ca771a297e045f22f0bb201651bb6f7/cumulusci/core/keychain/BaseProjectKeychain.py#L95-L116
247,595
SFDO-Tooling/CumulusCI
cumulusci/core/keychain/BaseProjectKeychain.py
BaseProjectKeychain.get_default_org
def get_default_org(self): """ retrieve the name and configuration of the default org """ for org in self.list_orgs(): org_config = self.get_org(org) if org_config.default: return org, org_config return None, None
python
def get_default_org(self): for org in self.list_orgs(): org_config = self.get_org(org) if org_config.default: return org, org_config return None, None
[ "def", "get_default_org", "(", "self", ")", ":", "for", "org", "in", "self", ".", "list_orgs", "(", ")", ":", "org_config", "=", "self", ".", "get_org", "(", "org", ")", "if", "org_config", ".", "default", ":", "return", "org", ",", "org_config", "return", "None", ",", "None" ]
retrieve the name and configuration of the default org
[ "retrieve", "the", "name", "and", "configuration", "of", "the", "default", "org" ]
e19047921ca771a297e045f22f0bb201651bb6f7
https://github.com/SFDO-Tooling/CumulusCI/blob/e19047921ca771a297e045f22f0bb201651bb6f7/cumulusci/core/keychain/BaseProjectKeychain.py#L143-L149
247,596
SFDO-Tooling/CumulusCI
cumulusci/core/keychain/BaseProjectKeychain.py
BaseProjectKeychain.set_default_org
def set_default_org(self, name): """ set the default org for tasks by name key """ org = self.get_org(name) self.unset_default_org() org.config["default"] = True self.set_org(org)
python
def set_default_org(self, name): org = self.get_org(name) self.unset_default_org() org.config["default"] = True self.set_org(org)
[ "def", "set_default_org", "(", "self", ",", "name", ")", ":", "org", "=", "self", ".", "get_org", "(", "name", ")", "self", ".", "unset_default_org", "(", ")", "org", ".", "config", "[", "\"default\"", "]", "=", "True", "self", ".", "set_org", "(", "org", ")" ]
set the default org for tasks by name key
[ "set", "the", "default", "org", "for", "tasks", "by", "name", "key" ]
e19047921ca771a297e045f22f0bb201651bb6f7
https://github.com/SFDO-Tooling/CumulusCI/blob/e19047921ca771a297e045f22f0bb201651bb6f7/cumulusci/core/keychain/BaseProjectKeychain.py#L151-L156
247,597
SFDO-Tooling/CumulusCI
cumulusci/core/keychain/BaseProjectKeychain.py
BaseProjectKeychain.unset_default_org
def unset_default_org(self): """ unset the default orgs for tasks """ for org in self.list_orgs(): org_config = self.get_org(org) if org_config.default: del org_config.config["default"] self.set_org(org_config)
python
def unset_default_org(self): for org in self.list_orgs(): org_config = self.get_org(org) if org_config.default: del org_config.config["default"] self.set_org(org_config)
[ "def", "unset_default_org", "(", "self", ")", ":", "for", "org", "in", "self", ".", "list_orgs", "(", ")", ":", "org_config", "=", "self", ".", "get_org", "(", "org", ")", "if", "org_config", ".", "default", ":", "del", "org_config", ".", "config", "[", "\"default\"", "]", "self", ".", "set_org", "(", "org_config", ")" ]
unset the default orgs for tasks
[ "unset", "the", "default", "orgs", "for", "tasks" ]
e19047921ca771a297e045f22f0bb201651bb6f7
https://github.com/SFDO-Tooling/CumulusCI/blob/e19047921ca771a297e045f22f0bb201651bb6f7/cumulusci/core/keychain/BaseProjectKeychain.py#L158-L164
247,598
SFDO-Tooling/CumulusCI
cumulusci/core/keychain/BaseProjectKeychain.py
BaseProjectKeychain.get_org
def get_org(self, name): """ retrieve an org configuration by name key """ if name not in self.orgs: self._raise_org_not_found(name) return self._get_org(name)
python
def get_org(self, name): if name not in self.orgs: self._raise_org_not_found(name) return self._get_org(name)
[ "def", "get_org", "(", "self", ",", "name", ")", ":", "if", "name", "not", "in", "self", ".", "orgs", ":", "self", ".", "_raise_org_not_found", "(", "name", ")", "return", "self", ".", "_get_org", "(", "name", ")" ]
retrieve an org configuration by name key
[ "retrieve", "an", "org", "configuration", "by", "name", "key" ]
e19047921ca771a297e045f22f0bb201651bb6f7
https://github.com/SFDO-Tooling/CumulusCI/blob/e19047921ca771a297e045f22f0bb201651bb6f7/cumulusci/core/keychain/BaseProjectKeychain.py#L166-L170
247,599
SFDO-Tooling/CumulusCI
cumulusci/core/keychain/BaseProjectKeychain.py
BaseProjectKeychain.list_orgs
def list_orgs(self): """ list the orgs configured in the keychain """ orgs = list(self.orgs.keys()) orgs.sort() return orgs
python
def list_orgs(self): orgs = list(self.orgs.keys()) orgs.sort() return orgs
[ "def", "list_orgs", "(", "self", ")", ":", "orgs", "=", "list", "(", "self", ".", "orgs", ".", "keys", "(", ")", ")", "orgs", ".", "sort", "(", ")", "return", "orgs" ]
list the orgs configured in the keychain
[ "list", "the", "orgs", "configured", "in", "the", "keychain" ]
e19047921ca771a297e045f22f0bb201651bb6f7
https://github.com/SFDO-Tooling/CumulusCI/blob/e19047921ca771a297e045f22f0bb201651bb6f7/cumulusci/core/keychain/BaseProjectKeychain.py#L178-L182