repo
stringlengths
7
55
path
stringlengths
4
223
url
stringlengths
87
315
code
stringlengths
75
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
avg_line_len
float64
7.91
980
titusjan/argos
argos/repo/baserti.py
https://github.com/titusjan/argos/blob/20d0a3cae26c36ea789a5d219c02ca7df21279dd/argos/repo/baserti.py#L261-L275
def decoration(self): """ The displayed icon. Shows open icon when node was visited (children are fetched). This allows users for instance to collapse a directory node but still see that it was visited, which may be useful if there is a huge list of directories. """ rtiIconFactory = RtiIconFactory.singleton() if self._exception: return rtiIconFactory.getIcon(rtiIconFactory.ERROR, isOpen=False, color=rtiIconFactory.COLOR_ERROR) else: return rtiIconFactory.getIcon(self.iconGlyph, isOpen=not self.canFetchChildren(), color=self.iconColor)
[ "def", "decoration", "(", "self", ")", ":", "rtiIconFactory", "=", "RtiIconFactory", ".", "singleton", "(", ")", "if", "self", ".", "_exception", ":", "return", "rtiIconFactory", ".", "getIcon", "(", "rtiIconFactory", ".", "ERROR", ",", "isOpen", "=", "False", ",", "color", "=", "rtiIconFactory", ".", "COLOR_ERROR", ")", "else", ":", "return", "rtiIconFactory", ".", "getIcon", "(", "self", ".", "iconGlyph", ",", "isOpen", "=", "not", "self", ".", "canFetchChildren", "(", ")", ",", "color", "=", "self", ".", "iconColor", ")" ]
The displayed icon. Shows open icon when node was visited (children are fetched). This allows users for instance to collapse a directory node but still see that it was visited, which may be useful if there is a huge list of directories.
[ "The", "displayed", "icon", "." ]
python
train
47.466667
spacetelescope/stsci.tools
lib/stsci/tools/logutil.py
https://github.com/spacetelescope/stsci.tools/blob/9a022503ad24ca54ce83331482dfa3ff6de9f403/lib/stsci/tools/logutil.py#L274-L310
def find_actual_caller(self): """ Returns the full-qualified module name, full pathname, line number, and function in which `StreamTeeLogger.write()` was called. For example, if this instance is used to replace `sys.stdout`, this will return the location of any print statement. """ # Gleaned from code in the logging module itself... try: f = sys._getframe(1) ##f = inspect.currentframe(1) except Exception: f = None # On some versions of IronPython, currentframe() returns None if # IronPython isn't run with -X:Frames. if f is not None: f = f.f_back rv = "(unknown module)", "(unknown file)", 0, "(unknown function)" while hasattr(f, "f_code"): co = f.f_code filename = os.path.normcase(co.co_filename) mod = inspect.getmodule(f) if mod is None: modname = '__main__' else: modname = mod.__name__ if modname == __name__: # Crawl back until the first frame outside of this module f = f.f_back continue rv = (modname, filename, f.f_lineno, co.co_name) break return rv
[ "def", "find_actual_caller", "(", "self", ")", ":", "# Gleaned from code in the logging module itself...", "try", ":", "f", "=", "sys", ".", "_getframe", "(", "1", ")", "##f = inspect.currentframe(1)", "except", "Exception", ":", "f", "=", "None", "# On some versions of IronPython, currentframe() returns None if", "# IronPython isn't run with -X:Frames.", "if", "f", "is", "not", "None", ":", "f", "=", "f", ".", "f_back", "rv", "=", "\"(unknown module)\"", ",", "\"(unknown file)\"", ",", "0", ",", "\"(unknown function)\"", "while", "hasattr", "(", "f", ",", "\"f_code\"", ")", ":", "co", "=", "f", ".", "f_code", "filename", "=", "os", ".", "path", ".", "normcase", "(", "co", ".", "co_filename", ")", "mod", "=", "inspect", ".", "getmodule", "(", "f", ")", "if", "mod", "is", "None", ":", "modname", "=", "'__main__'", "else", ":", "modname", "=", "mod", ".", "__name__", "if", "modname", "==", "__name__", ":", "# Crawl back until the first frame outside of this module", "f", "=", "f", ".", "f_back", "continue", "rv", "=", "(", "modname", ",", "filename", ",", "f", ".", "f_lineno", ",", "co", ".", "co_name", ")", "break", "return", "rv" ]
Returns the full-qualified module name, full pathname, line number, and function in which `StreamTeeLogger.write()` was called. For example, if this instance is used to replace `sys.stdout`, this will return the location of any print statement.
[ "Returns", "the", "full", "-", "qualified", "module", "name", "full", "pathname", "line", "number", "and", "function", "in", "which", "StreamTeeLogger", ".", "write", "()", "was", "called", ".", "For", "example", "if", "this", "instance", "is", "used", "to", "replace", "sys", ".", "stdout", "this", "will", "return", "the", "location", "of", "any", "print", "statement", "." ]
python
train
34.621622
bukun/TorCMS
torcms/handlers/page_handler.py
https://github.com/bukun/TorCMS/blob/6567c7fe2604a1d646d4570c017840958630ed2b/torcms/handlers/page_handler.py#L197-L212
def add_page(self, slug): ''' Add new page. ''' post_data = self.get_post_data() post_data['user_name'] = self.userinfo.user_name if MWiki.get_by_uid(slug): self.set_status(400) return False else: MWiki.create_page(slug, post_data) tornado.ioloop.IOLoop.instance().add_callback(self.cele_gen_whoosh) self.redirect('/page/{0}'.format(slug))
[ "def", "add_page", "(", "self", ",", "slug", ")", ":", "post_data", "=", "self", ".", "get_post_data", "(", ")", "post_data", "[", "'user_name'", "]", "=", "self", ".", "userinfo", ".", "user_name", "if", "MWiki", ".", "get_by_uid", "(", "slug", ")", ":", "self", ".", "set_status", "(", "400", ")", "return", "False", "else", ":", "MWiki", ".", "create_page", "(", "slug", ",", "post_data", ")", "tornado", ".", "ioloop", ".", "IOLoop", ".", "instance", "(", ")", ".", "add_callback", "(", "self", ".", "cele_gen_whoosh", ")", "self", ".", "redirect", "(", "'/page/{0}'", ".", "format", "(", "slug", ")", ")" ]
Add new page.
[ "Add", "new", "page", "." ]
python
train
27.6875
MaxStrange/AudioSegment
algorithms/asa.py
https://github.com/MaxStrange/AudioSegment/blob/1daefb8de626ddff3ff7016697c3ad31d262ecd6/algorithms/asa.py#L857-L870
def _update_segmentation_mask_if_overlap(toupdate, other, id, otherid): """ Merges the segments specified by `id` (found in `toupdate`) and `otherid` (found in `other`) if they overlap at all. Updates `toupdate` accordingly. """ # If there is any overlap or touching, merge the two, otherwise just return yourmask = other == otherid mymask = toupdate == id overlap_exists = np.any(yourmask & mymask) if not overlap_exists: return yourfidxs, yoursidxs = np.where(other == otherid) toupdate[yourfidxs, yoursidxs] = id
[ "def", "_update_segmentation_mask_if_overlap", "(", "toupdate", ",", "other", ",", "id", ",", "otherid", ")", ":", "# If there is any overlap or touching, merge the two, otherwise just return", "yourmask", "=", "other", "==", "otherid", "mymask", "=", "toupdate", "==", "id", "overlap_exists", "=", "np", ".", "any", "(", "yourmask", "&", "mymask", ")", "if", "not", "overlap_exists", ":", "return", "yourfidxs", ",", "yoursidxs", "=", "np", ".", "where", "(", "other", "==", "otherid", ")", "toupdate", "[", "yourfidxs", ",", "yoursidxs", "]", "=", "id" ]
Merges the segments specified by `id` (found in `toupdate`) and `otherid` (found in `other`) if they overlap at all. Updates `toupdate` accordingly.
[ "Merges", "the", "segments", "specified", "by", "id", "(", "found", "in", "toupdate", ")", "and", "otherid", "(", "found", "in", "other", ")", "if", "they", "overlap", "at", "all", ".", "Updates", "toupdate", "accordingly", "." ]
python
test
39.642857
pyviz/holoviews
holoviews/util/__init__.py
https://github.com/pyviz/holoviews/blob/ae0dd2f3de448b0ca5e9065aabd6ef8d84c7e655/holoviews/util/__init__.py#L115-L148
def _group_kwargs_to_options(cls, obj, kwargs): "Format option group kwargs into canonical options format" groups = Options._option_groups if set(kwargs.keys()) - set(groups): raise Exception("Keyword options %s must be one of %s" % (groups, ','.join(repr(g) for g in groups))) elif not all(isinstance(v, dict) for v in kwargs.values()): raise Exception("The %s options must be specified using dictionary groups" % ','.join(repr(k) for k in kwargs.keys())) # Check whether the user is specifying targets (such as 'Image.Foo') targets = [grp and all(k[0].isupper() for k in grp) for grp in kwargs.values()] if any(targets) and not all(targets): raise Exception("Cannot mix target specification keys such as 'Image' with non-target keywords.") elif not any(targets): # Not targets specified - add current object as target sanitized_group = util.group_sanitizer(obj.group) if obj.label: identifier = ('%s.%s.%s' % ( obj.__class__.__name__, sanitized_group, util.label_sanitizer(obj.label))) elif sanitized_group != obj.__class__.__name__: identifier = '%s.%s' % (obj.__class__.__name__, sanitized_group) else: identifier = obj.__class__.__name__ options = {identifier:{grp:kws for (grp,kws) in kwargs.items()}} else: dfltdict = defaultdict(dict) for grp, entries in kwargs.items(): for identifier, kws in entries.items(): dfltdict[identifier][grp] = kws options = dict(dfltdict) return options
[ "def", "_group_kwargs_to_options", "(", "cls", ",", "obj", ",", "kwargs", ")", ":", "groups", "=", "Options", ".", "_option_groups", "if", "set", "(", "kwargs", ".", "keys", "(", ")", ")", "-", "set", "(", "groups", ")", ":", "raise", "Exception", "(", "\"Keyword options %s must be one of %s\"", "%", "(", "groups", ",", "','", ".", "join", "(", "repr", "(", "g", ")", "for", "g", "in", "groups", ")", ")", ")", "elif", "not", "all", "(", "isinstance", "(", "v", ",", "dict", ")", "for", "v", "in", "kwargs", ".", "values", "(", ")", ")", ":", "raise", "Exception", "(", "\"The %s options must be specified using dictionary groups\"", "%", "','", ".", "join", "(", "repr", "(", "k", ")", "for", "k", "in", "kwargs", ".", "keys", "(", ")", ")", ")", "# Check whether the user is specifying targets (such as 'Image.Foo')", "targets", "=", "[", "grp", "and", "all", "(", "k", "[", "0", "]", ".", "isupper", "(", ")", "for", "k", "in", "grp", ")", "for", "grp", "in", "kwargs", ".", "values", "(", ")", "]", "if", "any", "(", "targets", ")", "and", "not", "all", "(", "targets", ")", ":", "raise", "Exception", "(", "\"Cannot mix target specification keys such as 'Image' with non-target keywords.\"", ")", "elif", "not", "any", "(", "targets", ")", ":", "# Not targets specified - add current object as target", "sanitized_group", "=", "util", ".", "group_sanitizer", "(", "obj", ".", "group", ")", "if", "obj", ".", "label", ":", "identifier", "=", "(", "'%s.%s.%s'", "%", "(", "obj", ".", "__class__", ".", "__name__", ",", "sanitized_group", ",", "util", ".", "label_sanitizer", "(", "obj", ".", "label", ")", ")", ")", "elif", "sanitized_group", "!=", "obj", ".", "__class__", ".", "__name__", ":", "identifier", "=", "'%s.%s'", "%", "(", "obj", ".", "__class__", ".", "__name__", ",", "sanitized_group", ")", "else", ":", "identifier", "=", "obj", ".", "__class__", ".", "__name__", "options", "=", "{", "identifier", ":", "{", "grp", ":", "kws", "for", "(", "grp", ",", "kws", ")", "in", "kwargs", ".", "items", "(", ")", "}", "}", "else", ":", "dfltdict", "=", "defaultdict", "(", "dict", ")", "for", "grp", ",", "entries", "in", "kwargs", ".", "items", "(", ")", ":", "for", "identifier", ",", "kws", "in", "entries", ".", "items", "(", ")", ":", "dfltdict", "[", "identifier", "]", "[", "grp", "]", "=", "kws", "options", "=", "dict", "(", "dfltdict", ")", "return", "options" ]
Format option group kwargs into canonical options format
[ "Format", "option", "group", "kwargs", "into", "canonical", "options", "format" ]
python
train
51.911765
dw/mitogen
ansible_mitogen/connection.py
https://github.com/dw/mitogen/blob/a7fdb55e1300a7e0a5e404b09eb730cf9a525da7/ansible_mitogen/connection.py#L314-L328
def _connect_mitogen_su(spec): """ Return ContextService arguments for su as a first class connection. """ return { 'method': 'su', 'kwargs': { 'username': spec.remote_user(), 'password': spec.password(), 'python_path': spec.python_path(), 'su_path': spec.become_exe(), 'connect_timeout': spec.timeout(), 'remote_name': get_remote_name(spec), } }
[ "def", "_connect_mitogen_su", "(", "spec", ")", ":", "return", "{", "'method'", ":", "'su'", ",", "'kwargs'", ":", "{", "'username'", ":", "spec", ".", "remote_user", "(", ")", ",", "'password'", ":", "spec", ".", "password", "(", ")", ",", "'python_path'", ":", "spec", ".", "python_path", "(", ")", ",", "'su_path'", ":", "spec", ".", "become_exe", "(", ")", ",", "'connect_timeout'", ":", "spec", ".", "timeout", "(", ")", ",", "'remote_name'", ":", "get_remote_name", "(", "spec", ")", ",", "}", "}" ]
Return ContextService arguments for su as a first class connection.
[ "Return", "ContextService", "arguments", "for", "su", "as", "a", "first", "class", "connection", "." ]
python
train
29.866667
knipknap/SpiffWorkflow
SpiffWorkflow/specs/base.py
https://github.com/knipknap/SpiffWorkflow/blob/f0af7f59a332e0619e4f3c00a7d4a3d230760e00/SpiffWorkflow/specs/base.py#L196-L205
def connect(self, taskspec): """ Connect the *following* task to this one. In other words, the given task is added as an output task. :type taskspec: TaskSpec :param taskspec: The new output task. """ self.outputs.append(taskspec) taskspec._connect_notify(self)
[ "def", "connect", "(", "self", ",", "taskspec", ")", ":", "self", ".", "outputs", ".", "append", "(", "taskspec", ")", "taskspec", ".", "_connect_notify", "(", "self", ")" ]
Connect the *following* task to this one. In other words, the given task is added as an output task. :type taskspec: TaskSpec :param taskspec: The new output task.
[ "Connect", "the", "*", "following", "*", "task", "to", "this", "one", ".", "In", "other", "words", "the", "given", "task", "is", "added", "as", "an", "output", "task", "." ]
python
valid
31.8
senaite/senaite.core
bika/lims/upgrade/utils.py
https://github.com/senaite/senaite.core/blob/7602ce2ea2f9e81eb34e20ce17b98a3e70713f85/bika/lims/upgrade/utils.py#L130-L204
def makeMigrator(context, portal_type, remove_old_value=True): """ generate a migrator for the given at-based portal type """ meta_type = portal_type class BlobMigrator(BaseInlineMigrator): """in-place migrator for archetypes based content that copies file/image data from old non-blob fields to new fields with the same name provided by archetypes.schemaextender. see `plone3 to 4 migration guide`__ .. __: https://plone.org/documentation/manual/upgrade-guide/version /upgrading-plone-3-x-to-4.0/updating-add-on-products-for-plone-4.0 /use-plone.app.blob-based-blob-storage """ src_portal_type = portal_type src_meta_type = meta_type dst_portal_type = portal_type dst_meta_type = meta_type fields = [] def getFields(self, obj): if not self.fields: # get the blob fields to migrate from the first object for field in ISchema(obj).fields(): if IBlobField.providedBy(field): self.fields.append(field.getName()) return self.fields @property def fields_map(self): fields = self.getFields(None) return dict([(name, None) for name in fields]) def migrate_data(self): fields = self.getFields(self.obj) for name in fields: # access old field by not using schemaextender oldfield = self.obj.schema[name] is_imagefield = False if hasattr(oldfield, 'removeScales'): # clean up old image scales is_imagefield = True oldfield.removeScales(self.obj) value = oldfield.get(self.obj) if not value: # no image/file data: don't copy it over to blob field # this way it's save to run migration multiple times w/o # overwriting existing data continue if isinstance(aq_base(value), BlobWrapper): # already a blob field, no need to migrate it continue # access new field via schemaextender field = self.obj.getField(name) field.getMutator(self.obj)(value) if remove_old_value: # Remove data from old field to not end up with data # stored twice - in ZODB and blobstorage if is_imagefield: oldfield.set(self.obj, 'DELETE_IMAGE') else: oldfield.set(self.obj, 'DELETE_FILE') def last_migrate_reindex(self): # The original method checks the modification date in order to # keep the old one, but we don't care about it. self.obj.reindexObject() return BlobMigrator
[ "def", "makeMigrator", "(", "context", ",", "portal_type", ",", "remove_old_value", "=", "True", ")", ":", "meta_type", "=", "portal_type", "class", "BlobMigrator", "(", "BaseInlineMigrator", ")", ":", "\"\"\"in-place migrator for archetypes based content that copies\n file/image data from old non-blob fields to new fields with the same\n name provided by archetypes.schemaextender.\n\n see `plone3 to 4 migration guide`__\n\n .. __: https://plone.org/documentation/manual/upgrade-guide/version\n /upgrading-plone-3-x-to-4.0/updating-add-on-products-for-plone-4.0\n /use-plone.app.blob-based-blob-storage\n \"\"\"", "src_portal_type", "=", "portal_type", "src_meta_type", "=", "meta_type", "dst_portal_type", "=", "portal_type", "dst_meta_type", "=", "meta_type", "fields", "=", "[", "]", "def", "getFields", "(", "self", ",", "obj", ")", ":", "if", "not", "self", ".", "fields", ":", "# get the blob fields to migrate from the first object", "for", "field", "in", "ISchema", "(", "obj", ")", ".", "fields", "(", ")", ":", "if", "IBlobField", ".", "providedBy", "(", "field", ")", ":", "self", ".", "fields", ".", "append", "(", "field", ".", "getName", "(", ")", ")", "return", "self", ".", "fields", "@", "property", "def", "fields_map", "(", "self", ")", ":", "fields", "=", "self", ".", "getFields", "(", "None", ")", "return", "dict", "(", "[", "(", "name", ",", "None", ")", "for", "name", "in", "fields", "]", ")", "def", "migrate_data", "(", "self", ")", ":", "fields", "=", "self", ".", "getFields", "(", "self", ".", "obj", ")", "for", "name", "in", "fields", ":", "# access old field by not using schemaextender", "oldfield", "=", "self", ".", "obj", ".", "schema", "[", "name", "]", "is_imagefield", "=", "False", "if", "hasattr", "(", "oldfield", ",", "'removeScales'", ")", ":", "# clean up old image scales", "is_imagefield", "=", "True", "oldfield", ".", "removeScales", "(", "self", ".", "obj", ")", "value", "=", "oldfield", ".", "get", "(", "self", ".", "obj", ")", "if", "not", "value", ":", "# no image/file data: don't copy it over to blob field", "# this way it's save to run migration multiple times w/o", "# overwriting existing data", "continue", "if", "isinstance", "(", "aq_base", "(", "value", ")", ",", "BlobWrapper", ")", ":", "# already a blob field, no need to migrate it", "continue", "# access new field via schemaextender", "field", "=", "self", ".", "obj", ".", "getField", "(", "name", ")", "field", ".", "getMutator", "(", "self", ".", "obj", ")", "(", "value", ")", "if", "remove_old_value", ":", "# Remove data from old field to not end up with data", "# stored twice - in ZODB and blobstorage", "if", "is_imagefield", ":", "oldfield", ".", "set", "(", "self", ".", "obj", ",", "'DELETE_IMAGE'", ")", "else", ":", "oldfield", ".", "set", "(", "self", ".", "obj", ",", "'DELETE_FILE'", ")", "def", "last_migrate_reindex", "(", "self", ")", ":", "# The original method checks the modification date in order to", "# keep the old one, but we don't care about it.", "self", ".", "obj", ".", "reindexObject", "(", ")", "return", "BlobMigrator" ]
generate a migrator for the given at-based portal type
[ "generate", "a", "migrator", "for", "the", "given", "at", "-", "based", "portal", "type" ]
python
train
38.72
PrefPy/prefpy
prefpy/gmm_mixpl_moments.py
https://github.com/PrefPy/prefpy/blob/f395ba3782f05684fa5de0cece387a6da9391d02/prefpy/gmm_mixpl_moments.py#L49-L92
def top2_full(votes): """ Description: Top 2 alternatives 16 moment conditions values calculation Parameters: votes: ordinal preference data (numpy ndarray of integers) """ res = np.zeros(16) for vote in votes: # the top ranked alternative is in vote[0][0], second in vote[1][0] if vote[0][0] == 0: # i.e. the first alt is ranked first res[0] += 1 if vote[1][0] == 1: # i.e. the second alt is ranked second res[4] += 1 elif vote[1][0] == 2: res[5] += 1 elif vote[1][0] == 3: res[6] += 1 elif vote[0][0] == 1: res[1] += 1 if vote[1][0] == 0: res[7] += 1 elif vote[1][0] == 2: res[8] += 1 elif vote[1][0] == 3: res[9] += 1 elif vote[0][0] == 2: res[2] += 1 if vote[1][0] == 0: res[10] += 1 elif vote[1][0] == 1: res[11] += 1 elif vote[1][0] == 3: res[12] += 1 elif vote[0][0] == 3: res[3] += 1 if vote[1][0] == 0: res[13] += 1 elif vote[1][0] == 1: res[14] += 1 elif vote[1][0] == 2: res[15] += 1 res /= len(votes) return res
[ "def", "top2_full", "(", "votes", ")", ":", "res", "=", "np", ".", "zeros", "(", "16", ")", "for", "vote", "in", "votes", ":", "# the top ranked alternative is in vote[0][0], second in vote[1][0]", "if", "vote", "[", "0", "]", "[", "0", "]", "==", "0", ":", "# i.e. the first alt is ranked first", "res", "[", "0", "]", "+=", "1", "if", "vote", "[", "1", "]", "[", "0", "]", "==", "1", ":", "# i.e. the second alt is ranked second", "res", "[", "4", "]", "+=", "1", "elif", "vote", "[", "1", "]", "[", "0", "]", "==", "2", ":", "res", "[", "5", "]", "+=", "1", "elif", "vote", "[", "1", "]", "[", "0", "]", "==", "3", ":", "res", "[", "6", "]", "+=", "1", "elif", "vote", "[", "0", "]", "[", "0", "]", "==", "1", ":", "res", "[", "1", "]", "+=", "1", "if", "vote", "[", "1", "]", "[", "0", "]", "==", "0", ":", "res", "[", "7", "]", "+=", "1", "elif", "vote", "[", "1", "]", "[", "0", "]", "==", "2", ":", "res", "[", "8", "]", "+=", "1", "elif", "vote", "[", "1", "]", "[", "0", "]", "==", "3", ":", "res", "[", "9", "]", "+=", "1", "elif", "vote", "[", "0", "]", "[", "0", "]", "==", "2", ":", "res", "[", "2", "]", "+=", "1", "if", "vote", "[", "1", "]", "[", "0", "]", "==", "0", ":", "res", "[", "10", "]", "+=", "1", "elif", "vote", "[", "1", "]", "[", "0", "]", "==", "1", ":", "res", "[", "11", "]", "+=", "1", "elif", "vote", "[", "1", "]", "[", "0", "]", "==", "3", ":", "res", "[", "12", "]", "+=", "1", "elif", "vote", "[", "0", "]", "[", "0", "]", "==", "3", ":", "res", "[", "3", "]", "+=", "1", "if", "vote", "[", "1", "]", "[", "0", "]", "==", "0", ":", "res", "[", "13", "]", "+=", "1", "elif", "vote", "[", "1", "]", "[", "0", "]", "==", "1", ":", "res", "[", "14", "]", "+=", "1", "elif", "vote", "[", "1", "]", "[", "0", "]", "==", "2", ":", "res", "[", "15", "]", "+=", "1", "res", "/=", "len", "(", "votes", ")", "return", "res" ]
Description: Top 2 alternatives 16 moment conditions values calculation Parameters: votes: ordinal preference data (numpy ndarray of integers)
[ "Description", ":", "Top", "2", "alternatives", "16", "moment", "conditions", "values", "calculation", "Parameters", ":", "votes", ":", "ordinal", "preference", "data", "(", "numpy", "ndarray", "of", "integers", ")" ]
python
train
30.727273
aws/aws-encryption-sdk-python
src/aws_encryption_sdk/internal/formatting/deserialize.py
https://github.com/aws/aws-encryption-sdk-python/blob/d182155d5fb1ef176d9e7d0647679737d5146495/src/aws_encryption_sdk/internal/formatting/deserialize.py#L325-L368
def deserialize_frame(stream, header, verifier=None): """Deserializes a frame from a body. :param stream: Source data stream :type stream: io.BytesIO :param header: Deserialized header :type header: aws_encryption_sdk.structures.MessageHeader :param verifier: Signature verifier object (optional) :type verifier: aws_encryption_sdk.internal.crypto.Verifier :returns: Deserialized frame and a boolean stating if this is the final frame :rtype: :class:`aws_encryption_sdk.internal.structures.MessageFrameBody` and bool """ _LOGGER.debug("Starting frame deserialization") frame_data = {} final_frame = False (sequence_number,) = unpack_values(">I", stream, verifier) if sequence_number == SequenceIdentifier.SEQUENCE_NUMBER_END.value: _LOGGER.debug("Deserializing final frame") (sequence_number,) = unpack_values(">I", stream, verifier) final_frame = True else: _LOGGER.debug("Deserializing frame sequence number %d", int(sequence_number)) frame_data["final_frame"] = final_frame frame_data["sequence_number"] = sequence_number (frame_iv,) = unpack_values(">{iv_len}s".format(iv_len=header.algorithm.iv_len), stream, verifier) frame_data["iv"] = frame_iv if final_frame is True: (content_length,) = unpack_values(">I", stream, verifier) if content_length >= header.frame_length: raise SerializationError( "Invalid final frame length: {final} >= {normal}".format( final=content_length, normal=header.frame_length ) ) else: content_length = header.frame_length (frame_content, frame_tag) = unpack_values( ">{content_len}s{auth_len}s".format(content_len=content_length, auth_len=header.algorithm.auth_len), stream, verifier, ) frame_data["ciphertext"] = frame_content frame_data["tag"] = frame_tag return MessageFrameBody(**frame_data), final_frame
[ "def", "deserialize_frame", "(", "stream", ",", "header", ",", "verifier", "=", "None", ")", ":", "_LOGGER", ".", "debug", "(", "\"Starting frame deserialization\"", ")", "frame_data", "=", "{", "}", "final_frame", "=", "False", "(", "sequence_number", ",", ")", "=", "unpack_values", "(", "\">I\"", ",", "stream", ",", "verifier", ")", "if", "sequence_number", "==", "SequenceIdentifier", ".", "SEQUENCE_NUMBER_END", ".", "value", ":", "_LOGGER", ".", "debug", "(", "\"Deserializing final frame\"", ")", "(", "sequence_number", ",", ")", "=", "unpack_values", "(", "\">I\"", ",", "stream", ",", "verifier", ")", "final_frame", "=", "True", "else", ":", "_LOGGER", ".", "debug", "(", "\"Deserializing frame sequence number %d\"", ",", "int", "(", "sequence_number", ")", ")", "frame_data", "[", "\"final_frame\"", "]", "=", "final_frame", "frame_data", "[", "\"sequence_number\"", "]", "=", "sequence_number", "(", "frame_iv", ",", ")", "=", "unpack_values", "(", "\">{iv_len}s\"", ".", "format", "(", "iv_len", "=", "header", ".", "algorithm", ".", "iv_len", ")", ",", "stream", ",", "verifier", ")", "frame_data", "[", "\"iv\"", "]", "=", "frame_iv", "if", "final_frame", "is", "True", ":", "(", "content_length", ",", ")", "=", "unpack_values", "(", "\">I\"", ",", "stream", ",", "verifier", ")", "if", "content_length", ">=", "header", ".", "frame_length", ":", "raise", "SerializationError", "(", "\"Invalid final frame length: {final} >= {normal}\"", ".", "format", "(", "final", "=", "content_length", ",", "normal", "=", "header", ".", "frame_length", ")", ")", "else", ":", "content_length", "=", "header", ".", "frame_length", "(", "frame_content", ",", "frame_tag", ")", "=", "unpack_values", "(", "\">{content_len}s{auth_len}s\"", ".", "format", "(", "content_len", "=", "content_length", ",", "auth_len", "=", "header", ".", "algorithm", ".", "auth_len", ")", ",", "stream", ",", "verifier", ",", ")", "frame_data", "[", "\"ciphertext\"", "]", "=", "frame_content", "frame_data", "[", "\"tag\"", "]", "=", "frame_tag", "return", "MessageFrameBody", "(", "*", "*", "frame_data", ")", ",", "final_frame" ]
Deserializes a frame from a body. :param stream: Source data stream :type stream: io.BytesIO :param header: Deserialized header :type header: aws_encryption_sdk.structures.MessageHeader :param verifier: Signature verifier object (optional) :type verifier: aws_encryption_sdk.internal.crypto.Verifier :returns: Deserialized frame and a boolean stating if this is the final frame :rtype: :class:`aws_encryption_sdk.internal.structures.MessageFrameBody` and bool
[ "Deserializes", "a", "frame", "from", "a", "body", "." ]
python
train
44.636364
LogicalDash/LiSE
LiSE/LiSE/examples/sickle.py
https://github.com/LogicalDash/LiSE/blob/fe6fd4f0a7c1780e065f4c9babb9bc443af6bb84/LiSE/LiSE/examples/sickle.py#L30-L162
def install( engine, n_creatures=5, n_sickles=3, malaria_chance=.05, mate_chance=.05, mapsize=(1, 1), startpos=(0, 0) ): """Natural Selection on Sickle Cell Anemia If anyone carries a pair of sickle betaglobin genes, they die of sickle cell anemia. Individuals with 1x betaglobin, 1x sickle betaglobin are immune to malaria. """ initmap = nx.grid_2d_graph(*mapsize) phys = engine.new_character("physical", data=initmap) species = engine.new_character( "species", mate_chance=mate_chance, malaria_chance=malaria_chance, n_creatures=n_creatures, ) for n in range(0, n_creatures): name = "critter" + str(n) phys.add_thing( name=name, location=startpos, sickle_a=(n < n_sickles), sickle_b=False, male=engine.coinflip(), last_mate_turn=-1 ) assert name in phys.thing assert name not in phys.place assert name in phys.node, "couldn't add node {} to phys.node".format(name) assert hasattr(phys.node[name], 'location') species.add_avatar("physical", name) assert hasattr(species.avatar['physical'][name], 'location') # putting dieoff earlier in the code than mate means that dieoff will # be followed before mate is @species.avatar.rule def dieoff(critter): critter.delete() assert (critter.name not in critter.character.node) if critter['from_malaria']: return 'malaria' else: return 'anemia' @species.avatar.rule def mate(critter): """If I share my location with another critter, attempt to mate""" suitors = list( oc for oc in critter.location.contents() if oc['male'] != critter['male'] ) assert (len(suitors) > 0) other_critter = critter.engine.choice(suitors) sickles = [ critter['sickle_a'], critter['sickle_b'], other_critter['sickle_a'], other_critter['sickle_b'] ] engine.shuffle(sickles) name = "critter" + str(species.stat["n_creatures"]) species.stat["n_creatures"] += 1 engine.character["physical"].add_thing( name, critter["location"], sickle_a=sickles.pop(), sickle_b=sickles.pop(), male=engine.coinflip(), last_mate_turn=engine.turn ) species.add_avatar("physical", name) critter['last_mate_turn'] = other_critter['last_mate_turn'] = \ engine.turn return 'mated' @mate.prereq def once_per_turn(critter): return critter['last_mate_turn'] < critter.engine.turn @mate.prereq def mate_present(critter): for oc in critter.location.contents(): if oc['male'] != critter['male']: return True return False @mate.trigger def in_the_mood(critter): return critter.engine.random() < critter.user.stat['mate_chance'] @dieoff.trigger def sickle2(critter): r = critter['sickle_a'] and critter['sickle_b'] if r: critter['from_malaria'] = False return r @dieoff.trigger def malaria(critter): r = ( critter.engine.random() < critter.user.stat['malaria_chance'] and not (critter['sickle_a'] or critter['sickle_b']) ) if r: critter['from_malaria'] = True return r # it would make more sense to keep using species.avatar.rule, this # is just a test @phys.thing.rule def wander(critter): dests = list(critter.character.place.keys()) dests.remove(critter['location']) dest = critter.engine.choice(dests) critter.travel_to(dest) @wander.trigger def not_travelling(critter): return critter.next_location is None @wander.prereq def big_map(critter): return len(critter.character.place) > 1
[ "def", "install", "(", "engine", ",", "n_creatures", "=", "5", ",", "n_sickles", "=", "3", ",", "malaria_chance", "=", ".05", ",", "mate_chance", "=", ".05", ",", "mapsize", "=", "(", "1", ",", "1", ")", ",", "startpos", "=", "(", "0", ",", "0", ")", ")", ":", "initmap", "=", "nx", ".", "grid_2d_graph", "(", "*", "mapsize", ")", "phys", "=", "engine", ".", "new_character", "(", "\"physical\"", ",", "data", "=", "initmap", ")", "species", "=", "engine", ".", "new_character", "(", "\"species\"", ",", "mate_chance", "=", "mate_chance", ",", "malaria_chance", "=", "malaria_chance", ",", "n_creatures", "=", "n_creatures", ",", ")", "for", "n", "in", "range", "(", "0", ",", "n_creatures", ")", ":", "name", "=", "\"critter\"", "+", "str", "(", "n", ")", "phys", ".", "add_thing", "(", "name", "=", "name", ",", "location", "=", "startpos", ",", "sickle_a", "=", "(", "n", "<", "n_sickles", ")", ",", "sickle_b", "=", "False", ",", "male", "=", "engine", ".", "coinflip", "(", ")", ",", "last_mate_turn", "=", "-", "1", ")", "assert", "name", "in", "phys", ".", "thing", "assert", "name", "not", "in", "phys", ".", "place", "assert", "name", "in", "phys", ".", "node", ",", "\"couldn't add node {} to phys.node\"", ".", "format", "(", "name", ")", "assert", "hasattr", "(", "phys", ".", "node", "[", "name", "]", ",", "'location'", ")", "species", ".", "add_avatar", "(", "\"physical\"", ",", "name", ")", "assert", "hasattr", "(", "species", ".", "avatar", "[", "'physical'", "]", "[", "name", "]", ",", "'location'", ")", "# putting dieoff earlier in the code than mate means that dieoff will", "# be followed before mate is", "@", "species", ".", "avatar", ".", "rule", "def", "dieoff", "(", "critter", ")", ":", "critter", ".", "delete", "(", ")", "assert", "(", "critter", ".", "name", "not", "in", "critter", ".", "character", ".", "node", ")", "if", "critter", "[", "'from_malaria'", "]", ":", "return", "'malaria'", "else", ":", "return", "'anemia'", "@", "species", ".", "avatar", ".", "rule", "def", "mate", "(", "critter", ")", ":", "\"\"\"If I share my location with another critter, attempt to mate\"\"\"", "suitors", "=", "list", "(", "oc", "for", "oc", "in", "critter", ".", "location", ".", "contents", "(", ")", "if", "oc", "[", "'male'", "]", "!=", "critter", "[", "'male'", "]", ")", "assert", "(", "len", "(", "suitors", ")", ">", "0", ")", "other_critter", "=", "critter", ".", "engine", ".", "choice", "(", "suitors", ")", "sickles", "=", "[", "critter", "[", "'sickle_a'", "]", ",", "critter", "[", "'sickle_b'", "]", ",", "other_critter", "[", "'sickle_a'", "]", ",", "other_critter", "[", "'sickle_b'", "]", "]", "engine", ".", "shuffle", "(", "sickles", ")", "name", "=", "\"critter\"", "+", "str", "(", "species", ".", "stat", "[", "\"n_creatures\"", "]", ")", "species", ".", "stat", "[", "\"n_creatures\"", "]", "+=", "1", "engine", ".", "character", "[", "\"physical\"", "]", ".", "add_thing", "(", "name", ",", "critter", "[", "\"location\"", "]", ",", "sickle_a", "=", "sickles", ".", "pop", "(", ")", ",", "sickle_b", "=", "sickles", ".", "pop", "(", ")", ",", "male", "=", "engine", ".", "coinflip", "(", ")", ",", "last_mate_turn", "=", "engine", ".", "turn", ")", "species", ".", "add_avatar", "(", "\"physical\"", ",", "name", ")", "critter", "[", "'last_mate_turn'", "]", "=", "other_critter", "[", "'last_mate_turn'", "]", "=", "engine", ".", "turn", "return", "'mated'", "@", "mate", ".", "prereq", "def", "once_per_turn", "(", "critter", ")", ":", "return", "critter", "[", "'last_mate_turn'", "]", "<", "critter", ".", "engine", ".", "turn", "@", "mate", ".", "prereq", "def", "mate_present", "(", "critter", ")", ":", "for", "oc", "in", "critter", ".", "location", ".", "contents", "(", ")", ":", "if", "oc", "[", "'male'", "]", "!=", "critter", "[", "'male'", "]", ":", "return", "True", "return", "False", "@", "mate", ".", "trigger", "def", "in_the_mood", "(", "critter", ")", ":", "return", "critter", ".", "engine", ".", "random", "(", ")", "<", "critter", ".", "user", ".", "stat", "[", "'mate_chance'", "]", "@", "dieoff", ".", "trigger", "def", "sickle2", "(", "critter", ")", ":", "r", "=", "critter", "[", "'sickle_a'", "]", "and", "critter", "[", "'sickle_b'", "]", "if", "r", ":", "critter", "[", "'from_malaria'", "]", "=", "False", "return", "r", "@", "dieoff", ".", "trigger", "def", "malaria", "(", "critter", ")", ":", "r", "=", "(", "critter", ".", "engine", ".", "random", "(", ")", "<", "critter", ".", "user", ".", "stat", "[", "'malaria_chance'", "]", "and", "not", "(", "critter", "[", "'sickle_a'", "]", "or", "critter", "[", "'sickle_b'", "]", ")", ")", "if", "r", ":", "critter", "[", "'from_malaria'", "]", "=", "True", "return", "r", "# it would make more sense to keep using species.avatar.rule, this", "# is just a test", "@", "phys", ".", "thing", ".", "rule", "def", "wander", "(", "critter", ")", ":", "dests", "=", "list", "(", "critter", ".", "character", ".", "place", ".", "keys", "(", ")", ")", "dests", ".", "remove", "(", "critter", "[", "'location'", "]", ")", "dest", "=", "critter", ".", "engine", ".", "choice", "(", "dests", ")", "critter", ".", "travel_to", "(", "dest", ")", "@", "wander", ".", "trigger", "def", "not_travelling", "(", "critter", ")", ":", "return", "critter", ".", "next_location", "is", "None", "@", "wander", ".", "prereq", "def", "big_map", "(", "critter", ")", ":", "return", "len", "(", "critter", ".", "character", ".", "place", ")", ">", "1" ]
Natural Selection on Sickle Cell Anemia If anyone carries a pair of sickle betaglobin genes, they die of sickle cell anemia. Individuals with 1x betaglobin, 1x sickle betaglobin are immune to malaria.
[ "Natural", "Selection", "on", "Sickle", "Cell", "Anemia" ]
python
train
29.834586
apple/turicreate
deps/src/libxml2-2.9.1/python/libxml2.py
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/libxml2-2.9.1/python/libxml2.py#L6344-L6352
def RelaxNGValidateCtxt(self, reader, options): """Use RelaxNG schema context to validate the document as it is processed. Activation is only possible before the first Read(). If @ctxt is None, then RelaxNG schema validation is deactivated. """ if reader is None: reader__o = None else: reader__o = reader._o ret = libxml2mod.xmlTextReaderRelaxNGValidateCtxt(reader__o, self._o, options) return ret
[ "def", "RelaxNGValidateCtxt", "(", "self", ",", "reader", ",", "options", ")", ":", "if", "reader", "is", "None", ":", "reader__o", "=", "None", "else", ":", "reader__o", "=", "reader", ".", "_o", "ret", "=", "libxml2mod", ".", "xmlTextReaderRelaxNGValidateCtxt", "(", "reader__o", ",", "self", ".", "_o", ",", "options", ")", "return", "ret" ]
Use RelaxNG schema context to validate the document as it is processed. Activation is only possible before the first Read(). If @ctxt is None, then RelaxNG schema validation is deactivated.
[ "Use", "RelaxNG", "schema", "context", "to", "validate", "the", "document", "as", "it", "is", "processed", ".", "Activation", "is", "only", "possible", "before", "the", "first", "Read", "()", ".", "If" ]
python
train
51.222222
xi/ldif3
ldif3.py
https://github.com/xi/ldif3/blob/debc4222bb48492de0d3edcc3c71fdae5bc612a4/ldif3.py#L261-L277
def _iter_unfolded_lines(self): """Iter input unfoled lines. Skip comments.""" line = self._input_file.readline() while line: self.line_counter += 1 self.byte_counter += len(line) line = self._strip_line_sep(line) nextline = self._input_file.readline() while nextline and nextline[:1] == b' ': line += self._strip_line_sep(nextline)[1:] nextline = self._input_file.readline() if not line.startswith(b'#'): yield line line = nextline
[ "def", "_iter_unfolded_lines", "(", "self", ")", ":", "line", "=", "self", ".", "_input_file", ".", "readline", "(", ")", "while", "line", ":", "self", ".", "line_counter", "+=", "1", "self", ".", "byte_counter", "+=", "len", "(", "line", ")", "line", "=", "self", ".", "_strip_line_sep", "(", "line", ")", "nextline", "=", "self", ".", "_input_file", ".", "readline", "(", ")", "while", "nextline", "and", "nextline", "[", ":", "1", "]", "==", "b' '", ":", "line", "+=", "self", ".", "_strip_line_sep", "(", "nextline", ")", "[", "1", ":", "]", "nextline", "=", "self", ".", "_input_file", ".", "readline", "(", ")", "if", "not", "line", ".", "startswith", "(", "b'#'", ")", ":", "yield", "line", "line", "=", "nextline" ]
Iter input unfoled lines. Skip comments.
[ "Iter", "input", "unfoled", "lines", ".", "Skip", "comments", "." ]
python
train
33.823529
Cymmetria/honeycomb
honeycomb/utils/plugin_utils.py
https://github.com/Cymmetria/honeycomb/blob/33ea91b5cf675000e4e85dd02efe580ea6e95c86/honeycomb/utils/plugin_utils.py#L330-L339
def get_select_items(items): """Return list of possible select items.""" option_items = list() for item in items: if isinstance(item, dict) and defs.VALUE in item and defs.LABEL in item: option_items.append(item[defs.VALUE]) else: raise exceptions.ParametersFieldError(item, "a dictionary with {} and {}" .format(defs.LABEL, defs.VALUE)) return option_items
[ "def", "get_select_items", "(", "items", ")", ":", "option_items", "=", "list", "(", ")", "for", "item", "in", "items", ":", "if", "isinstance", "(", "item", ",", "dict", ")", "and", "defs", ".", "VALUE", "in", "item", "and", "defs", ".", "LABEL", "in", "item", ":", "option_items", ".", "append", "(", "item", "[", "defs", ".", "VALUE", "]", ")", "else", ":", "raise", "exceptions", ".", "ParametersFieldError", "(", "item", ",", "\"a dictionary with {} and {}\"", ".", "format", "(", "defs", ".", "LABEL", ",", "defs", ".", "VALUE", ")", ")", "return", "option_items" ]
Return list of possible select items.
[ "Return", "list", "of", "possible", "select", "items", "." ]
python
train
45.4
StackStorm/pybind
pybind/nos/v6_0_2f/ip/dns/__init__.py
https://github.com/StackStorm/pybind/blob/44c467e71b2b425be63867aba6e6fa28b2cfe7fb/pybind/nos/v6_0_2f/ip/dns/__init__.py#L127-L148
def _set_name_server(self, v, load=False): """ Setter method for name_server, mapped from YANG variable /ip/dns/name_server (list) If this variable is read-only (config: false) in the source YANG file, then _set_name_server is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_name_server() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=YANGListType("name_server_ip",name_server.name_server, yang_name="name-server", rest_name="name-server", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='name-server-ip', extensions={u'tailf-common': {u'cli-suppress-key-sort': None, u'cli-suppress-mode': None, u'callpoint': u'IpadmNameSrvCallpoint', u'info': u' Name Server configurations', u'cli-suppress-list-no': None}}), is_container='list', yang_name="name-server", rest_name="name-server", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-suppress-key-sort': None, u'cli-suppress-mode': None, u'callpoint': u'IpadmNameSrvCallpoint', u'info': u' Name Server configurations', u'cli-suppress-list-no': None}}, namespace='urn:brocade.com:mgmt:brocade-ip-administration', defining_module='brocade-ip-administration', yang_type='list', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """name_server must be of a type compatible with list""", 'defined-type': "list", 'generated-type': """YANGDynClass(base=YANGListType("name_server_ip",name_server.name_server, yang_name="name-server", rest_name="name-server", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='name-server-ip', extensions={u'tailf-common': {u'cli-suppress-key-sort': None, u'cli-suppress-mode': None, u'callpoint': u'IpadmNameSrvCallpoint', u'info': u' Name Server configurations', u'cli-suppress-list-no': None}}), is_container='list', yang_name="name-server", rest_name="name-server", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-suppress-key-sort': None, u'cli-suppress-mode': None, u'callpoint': u'IpadmNameSrvCallpoint', u'info': u' Name Server configurations', u'cli-suppress-list-no': None}}, namespace='urn:brocade.com:mgmt:brocade-ip-administration', defining_module='brocade-ip-administration', yang_type='list', is_config=True)""", }) self.__name_server = t if hasattr(self, '_set'): self._set()
[ "def", "_set_name_server", "(", "self", ",", "v", ",", "load", "=", "False", ")", ":", "if", "hasattr", "(", "v", ",", "\"_utype\"", ")", ":", "v", "=", "v", ".", "_utype", "(", "v", ")", "try", ":", "t", "=", "YANGDynClass", "(", "v", ",", "base", "=", "YANGListType", "(", "\"name_server_ip\"", ",", "name_server", ".", "name_server", ",", "yang_name", "=", "\"name-server\"", ",", "rest_name", "=", "\"name-server\"", ",", "parent", "=", "self", ",", "is_container", "=", "'list'", ",", "user_ordered", "=", "False", ",", "path_helper", "=", "self", ".", "_path_helper", ",", "yang_keys", "=", "'name-server-ip'", ",", "extensions", "=", "{", "u'tailf-common'", ":", "{", "u'cli-suppress-key-sort'", ":", "None", ",", "u'cli-suppress-mode'", ":", "None", ",", "u'callpoint'", ":", "u'IpadmNameSrvCallpoint'", ",", "u'info'", ":", "u' Name Server configurations'", ",", "u'cli-suppress-list-no'", ":", "None", "}", "}", ")", ",", "is_container", "=", "'list'", ",", "yang_name", "=", "\"name-server\"", ",", "rest_name", "=", "\"name-server\"", ",", "parent", "=", "self", ",", "path_helper", "=", "self", ".", "_path_helper", ",", "extmethods", "=", "self", ".", "_extmethods", ",", "register_paths", "=", "True", ",", "extensions", "=", "{", "u'tailf-common'", ":", "{", "u'cli-suppress-key-sort'", ":", "None", ",", "u'cli-suppress-mode'", ":", "None", ",", "u'callpoint'", ":", "u'IpadmNameSrvCallpoint'", ",", "u'info'", ":", "u' Name Server configurations'", ",", "u'cli-suppress-list-no'", ":", "None", "}", "}", ",", "namespace", "=", "'urn:brocade.com:mgmt:brocade-ip-administration'", ",", "defining_module", "=", "'brocade-ip-administration'", ",", "yang_type", "=", "'list'", ",", "is_config", "=", "True", ")", "except", "(", "TypeError", ",", "ValueError", ")", ":", "raise", "ValueError", "(", "{", "'error-string'", ":", "\"\"\"name_server must be of a type compatible with list\"\"\"", ",", "'defined-type'", ":", "\"list\"", ",", "'generated-type'", ":", "\"\"\"YANGDynClass(base=YANGListType(\"name_server_ip\",name_server.name_server, yang_name=\"name-server\", rest_name=\"name-server\", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='name-server-ip', extensions={u'tailf-common': {u'cli-suppress-key-sort': None, u'cli-suppress-mode': None, u'callpoint': u'IpadmNameSrvCallpoint', u'info': u' Name Server configurations', u'cli-suppress-list-no': None}}), is_container='list', yang_name=\"name-server\", rest_name=\"name-server\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-suppress-key-sort': None, u'cli-suppress-mode': None, u'callpoint': u'IpadmNameSrvCallpoint', u'info': u' Name Server configurations', u'cli-suppress-list-no': None}}, namespace='urn:brocade.com:mgmt:brocade-ip-administration', defining_module='brocade-ip-administration', yang_type='list', is_config=True)\"\"\"", ",", "}", ")", "self", ".", "__name_server", "=", "t", "if", "hasattr", "(", "self", ",", "'_set'", ")", ":", "self", ".", "_set", "(", ")" ]
Setter method for name_server, mapped from YANG variable /ip/dns/name_server (list) If this variable is read-only (config: false) in the source YANG file, then _set_name_server is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_name_server() directly.
[ "Setter", "method", "for", "name_server", "mapped", "from", "YANG", "variable", "/", "ip", "/", "dns", "/", "name_server", "(", "list", ")", "If", "this", "variable", "is", "read", "-", "only", "(", "config", ":", "false", ")", "in", "the", "source", "YANG", "file", "then", "_set_name_server", "is", "considered", "as", "a", "private", "method", ".", "Backends", "looking", "to", "populate", "this", "variable", "should", "do", "so", "via", "calling", "thisObj", ".", "_set_name_server", "()", "directly", "." ]
python
train
120.227273
hsolbrig/pyjsg
pyjsg/parser_impl/jsg_lexerruleblock_parser.py
https://github.com/hsolbrig/pyjsg/blob/9b2b8fa8e3b8448abe70b09f804a79f0f31b32b7/pyjsg/parser_impl/jsg_lexerruleblock_parser.py#L89-L93
def visitLexerBlock(self, ctx: jsgParser.LexerBlockContext): """ lexerBlock: OPREN lexeraltList CPREN """ self._rulePattern += '(' self.visitChildren(ctx) self._rulePattern += ')'
[ "def", "visitLexerBlock", "(", "self", ",", "ctx", ":", "jsgParser", ".", "LexerBlockContext", ")", ":", "self", ".", "_rulePattern", "+=", "'('", "self", ".", "visitChildren", "(", "ctx", ")", "self", ".", "_rulePattern", "+=", "')'" ]
lexerBlock: OPREN lexeraltList CPREN
[ "lexerBlock", ":", "OPREN", "lexeraltList", "CPREN" ]
python
train
41.4
materialsproject/pymatgen
pymatgen/alchemy/materials.py
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/alchemy/materials.py#L260-L297
def from_cif_string(cif_string, transformations=None, primitive=True, occupancy_tolerance=1.): """ Generates TransformedStructure from a cif string. Args: cif_string (str): Input cif string. Should contain only one structure. For cifs containing multiple structures, please use CifTransmuter. transformations ([Transformations]): Sequence of transformations to be applied to the input structure. primitive (bool): Option to set if the primitive cell should be extracted. Defaults to True. However, there are certain instances where you might want to use a non-primitive cell, e.g., if you are trying to generate all possible orderings of partial removals or order a disordered structure. occupancy_tolerance (float): If total occupancy of a site is between 1 and occupancy_tolerance, the occupancies will be scaled down to 1. Returns: TransformedStructure """ parser = CifParser.from_string(cif_string, occupancy_tolerance) raw_string = re.sub(r"'", "\"", cif_string) cif_dict = parser.as_dict() cif_keys = list(cif_dict.keys()) s = parser.get_structures(primitive)[0] partial_cif = cif_dict[cif_keys[0]] if "_database_code_ICSD" in partial_cif: source = partial_cif["_database_code_ICSD"] + "-ICSD" else: source = "uploaded cif" source_info = {"source": source, "datetime": str(datetime.datetime.now()), "original_file": raw_string, "cif_data": cif_dict[cif_keys[0]]} return TransformedStructure(s, transformations, history=[source_info])
[ "def", "from_cif_string", "(", "cif_string", ",", "transformations", "=", "None", ",", "primitive", "=", "True", ",", "occupancy_tolerance", "=", "1.", ")", ":", "parser", "=", "CifParser", ".", "from_string", "(", "cif_string", ",", "occupancy_tolerance", ")", "raw_string", "=", "re", ".", "sub", "(", "r\"'\"", ",", "\"\\\"\"", ",", "cif_string", ")", "cif_dict", "=", "parser", ".", "as_dict", "(", ")", "cif_keys", "=", "list", "(", "cif_dict", ".", "keys", "(", ")", ")", "s", "=", "parser", ".", "get_structures", "(", "primitive", ")", "[", "0", "]", "partial_cif", "=", "cif_dict", "[", "cif_keys", "[", "0", "]", "]", "if", "\"_database_code_ICSD\"", "in", "partial_cif", ":", "source", "=", "partial_cif", "[", "\"_database_code_ICSD\"", "]", "+", "\"-ICSD\"", "else", ":", "source", "=", "\"uploaded cif\"", "source_info", "=", "{", "\"source\"", ":", "source", ",", "\"datetime\"", ":", "str", "(", "datetime", ".", "datetime", ".", "now", "(", ")", ")", ",", "\"original_file\"", ":", "raw_string", ",", "\"cif_data\"", ":", "cif_dict", "[", "cif_keys", "[", "0", "]", "]", "}", "return", "TransformedStructure", "(", "s", ",", "transformations", ",", "history", "=", "[", "source_info", "]", ")" ]
Generates TransformedStructure from a cif string. Args: cif_string (str): Input cif string. Should contain only one structure. For cifs containing multiple structures, please use CifTransmuter. transformations ([Transformations]): Sequence of transformations to be applied to the input structure. primitive (bool): Option to set if the primitive cell should be extracted. Defaults to True. However, there are certain instances where you might want to use a non-primitive cell, e.g., if you are trying to generate all possible orderings of partial removals or order a disordered structure. occupancy_tolerance (float): If total occupancy of a site is between 1 and occupancy_tolerance, the occupancies will be scaled down to 1. Returns: TransformedStructure
[ "Generates", "TransformedStructure", "from", "a", "cif", "string", "." ]
python
train
48.552632
pyca/pynacl
src/nacl/bindings/crypto_kx.py
https://github.com/pyca/pynacl/blob/0df0c2c7693fa5d316846111ce510702756f5feb/src/nacl/bindings/crypto_kx.py#L55-L77
def crypto_kx_seed_keypair(seed): """ Generate a keypair with a given seed. This is functionally the same as crypto_box_seed_keypair, however it uses the blake2b hash primitive instead of sha512. It is included mainly for api consistency when using crypto_kx. :param seed: random seed :type seed: bytes :return: (public_key, secret_key) :rtype: (bytes, bytes) """ public_key = ffi.new("unsigned char[]", crypto_kx_PUBLIC_KEY_BYTES) secret_key = ffi.new("unsigned char[]", crypto_kx_SECRET_KEY_BYTES) ensure(isinstance(seed, bytes) and len(seed) == crypto_kx_SEED_BYTES, 'Seed must be a {0} byte long bytes sequence'.format( crypto_kx_SEED_BYTES), raising=exc.TypeError) res = lib.crypto_kx_seed_keypair(public_key, secret_key, seed) ensure(res == 0, "Key generation failed.", raising=exc.CryptoError) return (ffi.buffer(public_key, crypto_kx_PUBLIC_KEY_BYTES)[:], ffi.buffer(secret_key, crypto_kx_SECRET_KEY_BYTES)[:])
[ "def", "crypto_kx_seed_keypair", "(", "seed", ")", ":", "public_key", "=", "ffi", ".", "new", "(", "\"unsigned char[]\"", ",", "crypto_kx_PUBLIC_KEY_BYTES", ")", "secret_key", "=", "ffi", ".", "new", "(", "\"unsigned char[]\"", ",", "crypto_kx_SECRET_KEY_BYTES", ")", "ensure", "(", "isinstance", "(", "seed", ",", "bytes", ")", "and", "len", "(", "seed", ")", "==", "crypto_kx_SEED_BYTES", ",", "'Seed must be a {0} byte long bytes sequence'", ".", "format", "(", "crypto_kx_SEED_BYTES", ")", ",", "raising", "=", "exc", ".", "TypeError", ")", "res", "=", "lib", ".", "crypto_kx_seed_keypair", "(", "public_key", ",", "secret_key", ",", "seed", ")", "ensure", "(", "res", "==", "0", ",", "\"Key generation failed.\"", ",", "raising", "=", "exc", ".", "CryptoError", ")", "return", "(", "ffi", ".", "buffer", "(", "public_key", ",", "crypto_kx_PUBLIC_KEY_BYTES", ")", "[", ":", "]", ",", "ffi", ".", "buffer", "(", "secret_key", ",", "crypto_kx_SECRET_KEY_BYTES", ")", "[", ":", "]", ")" ]
Generate a keypair with a given seed. This is functionally the same as crypto_box_seed_keypair, however it uses the blake2b hash primitive instead of sha512. It is included mainly for api consistency when using crypto_kx. :param seed: random seed :type seed: bytes :return: (public_key, secret_key) :rtype: (bytes, bytes)
[ "Generate", "a", "keypair", "with", "a", "given", "seed", ".", "This", "is", "functionally", "the", "same", "as", "crypto_box_seed_keypair", "however", "it", "uses", "the", "blake2b", "hash", "primitive", "instead", "of", "sha512", ".", "It", "is", "included", "mainly", "for", "api", "consistency", "when", "using", "crypto_kx", ".", ":", "param", "seed", ":", "random", "seed", ":", "type", "seed", ":", "bytes", ":", "return", ":", "(", "public_key", "secret_key", ")", ":", "rtype", ":", "(", "bytes", "bytes", ")" ]
python
train
44.391304
ArduPilot/MAVProxy
MAVProxy/mavproxy.py
https://github.com/ArduPilot/MAVProxy/blob/f50bdeff33064876f7dc8dc4683d278ff47f75d5/MAVProxy/mavproxy.py#L950-L959
def input_loop(): '''wait for user input''' while mpstate.status.exit != True: try: if mpstate.status.exit != True: line = input(mpstate.rl.prompt) except EOFError: mpstate.status.exit = True sys.exit(1) mpstate.input_queue.put(line)
[ "def", "input_loop", "(", ")", ":", "while", "mpstate", ".", "status", ".", "exit", "!=", "True", ":", "try", ":", "if", "mpstate", ".", "status", ".", "exit", "!=", "True", ":", "line", "=", "input", "(", "mpstate", ".", "rl", ".", "prompt", ")", "except", "EOFError", ":", "mpstate", ".", "status", ".", "exit", "=", "True", "sys", ".", "exit", "(", "1", ")", "mpstate", ".", "input_queue", ".", "put", "(", "line", ")" ]
wait for user input
[ "wait", "for", "user", "input" ]
python
train
30.8
raiden-network/raiden-contracts
raiden_contracts/contract_manager.py
https://github.com/raiden-network/raiden-contracts/blob/a7e72a9477f2204b03f3706360ea8d9c0a8e7063/raiden_contracts/contract_manager.py#L145-L167
def merge_deployment_data(dict1: DeployedContracts, dict2: DeployedContracts) -> DeployedContracts: """ Take contents of two deployment JSON files and merge them The dictionary under 'contracts' key will be merged. The 'contracts' contents from different JSON files must not overlap. The contents under other keys must be identical. """ if not dict1: return dict2 if not dict2: return dict1 common_contracts: Dict[str, DeployedContract] = deepcopy(dict1['contracts']) assert not common_contracts.keys() & dict2['contracts'].keys() common_contracts.update(dict2['contracts']) assert dict2['chain_id'] == dict1['chain_id'] assert dict2['contracts_version'] == dict1['contracts_version'] return { 'contracts': common_contracts, 'chain_id': dict1['chain_id'], 'contracts_version': dict1['contracts_version'], }
[ "def", "merge_deployment_data", "(", "dict1", ":", "DeployedContracts", ",", "dict2", ":", "DeployedContracts", ")", "->", "DeployedContracts", ":", "if", "not", "dict1", ":", "return", "dict2", "if", "not", "dict2", ":", "return", "dict1", "common_contracts", ":", "Dict", "[", "str", ",", "DeployedContract", "]", "=", "deepcopy", "(", "dict1", "[", "'contracts'", "]", ")", "assert", "not", "common_contracts", ".", "keys", "(", ")", "&", "dict2", "[", "'contracts'", "]", ".", "keys", "(", ")", "common_contracts", ".", "update", "(", "dict2", "[", "'contracts'", "]", ")", "assert", "dict2", "[", "'chain_id'", "]", "==", "dict1", "[", "'chain_id'", "]", "assert", "dict2", "[", "'contracts_version'", "]", "==", "dict1", "[", "'contracts_version'", "]", "return", "{", "'contracts'", ":", "common_contracts", ",", "'chain_id'", ":", "dict1", "[", "'chain_id'", "]", ",", "'contracts_version'", ":", "dict1", "[", "'contracts_version'", "]", ",", "}" ]
Take contents of two deployment JSON files and merge them The dictionary under 'contracts' key will be merged. The 'contracts' contents from different JSON files must not overlap. The contents under other keys must be identical.
[ "Take", "contents", "of", "two", "deployment", "JSON", "files", "and", "merge", "them" ]
python
train
38.391304
deepmind/sonnet
sonnet/python/modules/conv.py
https://github.com/deepmind/sonnet/blob/00612ca3178964d86b556e062694d808ff81fcca/sonnet/python/modules/conv.py#L1517-L1545
def transpose(self, name=None): """Returns matching `Conv1D` module. Args: name: Optional string assigning name of transpose module. The default name is constructed by appending "_transpose" to `self.name`. Returns: `Conv1D` module. """ if name is None: name = self.module_name + "_transpose" if self._data_format == DATA_FORMAT_NWC: stride = self._stride[1:-1] else: # self._data_format == DATA_FORMAT_NCW stride = self._stride[2:] return Conv1D(output_channels=lambda: self.input_channels, kernel_shape=self.kernel_shape, stride=stride, padding=self.padding, use_bias=self._use_bias, initializers=self.initializers, partitioners=self.partitioners, regularizers=self.regularizers, data_format=self._data_format, custom_getter=self._custom_getter, name=name)
[ "def", "transpose", "(", "self", ",", "name", "=", "None", ")", ":", "if", "name", "is", "None", ":", "name", "=", "self", ".", "module_name", "+", "\"_transpose\"", "if", "self", ".", "_data_format", "==", "DATA_FORMAT_NWC", ":", "stride", "=", "self", ".", "_stride", "[", "1", ":", "-", "1", "]", "else", ":", "# self._data_format == DATA_FORMAT_NCW", "stride", "=", "self", ".", "_stride", "[", "2", ":", "]", "return", "Conv1D", "(", "output_channels", "=", "lambda", ":", "self", ".", "input_channels", ",", "kernel_shape", "=", "self", ".", "kernel_shape", ",", "stride", "=", "stride", ",", "padding", "=", "self", ".", "padding", ",", "use_bias", "=", "self", ".", "_use_bias", ",", "initializers", "=", "self", ".", "initializers", ",", "partitioners", "=", "self", ".", "partitioners", ",", "regularizers", "=", "self", ".", "regularizers", ",", "data_format", "=", "self", ".", "_data_format", ",", "custom_getter", "=", "self", ".", "_custom_getter", ",", "name", "=", "name", ")" ]
Returns matching `Conv1D` module. Args: name: Optional string assigning name of transpose module. The default name is constructed by appending "_transpose" to `self.name`. Returns: `Conv1D` module.
[ "Returns", "matching", "Conv1D", "module", "." ]
python
train
33.965517
boriel/zxbasic
arch/zx48k/backend/__8bit.py
https://github.com/boriel/zxbasic/blob/23b28db10e41117805bdb3c0f78543590853b132/arch/zx48k/backend/__8bit.py#L244-L290
def _mul8(ins): """ Multiplies 2 las values from the stack. Optimizations: * If any of the ops is ZERO, then do A = 0 ==> XOR A, cause A * 0 = 0 * A = 0 * If any ot the ops is ONE, do NOTHING A * 1 = 1 * A = A """ op1, op2 = tuple(ins.quad[2:]) if _int_ops(op1, op2) is not None: op1, op2 = _int_ops(op1, op2) output = _8bit_oper(op1) if op2 == 1: # A * 1 = 1 * A = A output.append('push af') return output if op2 == 0: output.append('xor a') output.append('push af') return output if op2 == 2: # A * 2 == A SLA 1 output.append('add a, a') output.append('push af') return output if op2 == 4: # A * 4 == A SLA 2 output.append('add a, a') output.append('add a, a') output.append('push af') return output output.append('ld h, %i' % int8(op2)) else: if op2[0] == '_': # stack optimization op1, op2 = op2, op1 output = _8bit_oper(op1, op2) output.append('call __MUL8_FAST') # Inmmediate output.append('push af') REQUIRES.add('mul8.asm') return output
[ "def", "_mul8", "(", "ins", ")", ":", "op1", ",", "op2", "=", "tuple", "(", "ins", ".", "quad", "[", "2", ":", "]", ")", "if", "_int_ops", "(", "op1", ",", "op2", ")", "is", "not", "None", ":", "op1", ",", "op2", "=", "_int_ops", "(", "op1", ",", "op2", ")", "output", "=", "_8bit_oper", "(", "op1", ")", "if", "op2", "==", "1", ":", "# A * 1 = 1 * A = A", "output", ".", "append", "(", "'push af'", ")", "return", "output", "if", "op2", "==", "0", ":", "output", ".", "append", "(", "'xor a'", ")", "output", ".", "append", "(", "'push af'", ")", "return", "output", "if", "op2", "==", "2", ":", "# A * 2 == A SLA 1", "output", ".", "append", "(", "'add a, a'", ")", "output", ".", "append", "(", "'push af'", ")", "return", "output", "if", "op2", "==", "4", ":", "# A * 4 == A SLA 2", "output", ".", "append", "(", "'add a, a'", ")", "output", ".", "append", "(", "'add a, a'", ")", "output", ".", "append", "(", "'push af'", ")", "return", "output", "output", ".", "append", "(", "'ld h, %i'", "%", "int8", "(", "op2", ")", ")", "else", ":", "if", "op2", "[", "0", "]", "==", "'_'", ":", "# stack optimization", "op1", ",", "op2", "=", "op2", ",", "op1", "output", "=", "_8bit_oper", "(", "op1", ",", "op2", ")", "output", ".", "append", "(", "'call __MUL8_FAST'", ")", "# Inmmediate", "output", ".", "append", "(", "'push af'", ")", "REQUIRES", ".", "add", "(", "'mul8.asm'", ")", "return", "output" ]
Multiplies 2 las values from the stack. Optimizations: * If any of the ops is ZERO, then do A = 0 ==> XOR A, cause A * 0 = 0 * A = 0 * If any ot the ops is ONE, do NOTHING A * 1 = 1 * A = A
[ "Multiplies", "2", "las", "values", "from", "the", "stack", "." ]
python
train
25.744681
cloud9ers/gurumate
environment/lib/python2.7/site-packages/IPython/frontend/html/notebook/kernelmanager.py
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/frontend/html/notebook/kernelmanager.py#L324-L327
def create_iopub_stream(self, kernel_id): """Create a new iopub stream.""" self._check_kernel_id(kernel_id) return super(MappingKernelManager, self).create_iopub_stream(kernel_id)
[ "def", "create_iopub_stream", "(", "self", ",", "kernel_id", ")", ":", "self", ".", "_check_kernel_id", "(", "kernel_id", ")", "return", "super", "(", "MappingKernelManager", ",", "self", ")", ".", "create_iopub_stream", "(", "kernel_id", ")" ]
Create a new iopub stream.
[ "Create", "a", "new", "iopub", "stream", "." ]
python
test
50
theislab/scanpy
scanpy/plotting/_anndata.py
https://github.com/theislab/scanpy/blob/9e4e5ee02e04cf618872d9b098e24f0542e8b227/scanpy/plotting/_anndata.py#L1320-L1620
def dotplot(adata, var_names, groupby=None, use_raw=None, log=False, num_categories=7, expression_cutoff=0., mean_only_expressed=False, color_map='Reds', dot_max=None, dot_min=None, figsize=None, dendrogram=False, gene_symbols=None, var_group_positions=None, standard_scale=None, smallest_dot=0., var_group_labels=None, var_group_rotation=None, layer=None, show=None, save=None, **kwds): """\ Makes a *dot plot* of the expression values of `var_names`. For each var_name and each `groupby` category a dot is plotted. Each dot represents two values: mean expression within each category (visualized by color) and fraction of cells expressing the var_name in the category (visualized by the size of the dot). If groupby is not given, the dotplot assumes that all data belongs to a single category. **Note**: A gene is considered expressed if the expression value in the adata (or adata.raw) is above the specified threshold which is zero by default. An example of dotplot usage is to visualize, for multiple marker genes, the mean value and the percentage of cells expressing the gene accross multiple clusters. Parameters ---------- {common_plot_args} expression_cutoff : `float` (default: `0.`) Expression cutoff that is used for binarizing the gene expression and determining the fraction of cells expressing given genes. A gene is expressed only if the expression value is greater than this threshold. mean_only_expressed : `bool` (default: `False`) If True, gene expression is averaged only over the cells expressing the given genes. color_map : `str`, optional (default: `Reds`) String denoting matplotlib color map. dot_max : `float` optional (default: `None`) If none, the maximum dot size is set to the maximum fraction value found (e.g. 0.6). If given, the value should be a number between 0 and 1. All fractions larger than dot_max are clipped to this value. dot_min : `float` optional (default: `None`) If none, the minimum dot size is set to 0. If given, the value should be a number between 0 and 1. All fractions smaller than dot_min are clipped to this value. standard_scale : {{'var', 'group'}}, optional (default: None) Whether or not to standardize that dimension between 0 and 1, meaning for each variable or group, subtract the minimum and divide each by its maximum. smallest_dot : `float` optional (default: 0.) If none, the smallest dot has size 0. All expression levels with `dot_min` are potted with `smallest_dot` dot size. {show_save_ax} **kwds : keyword arguments Are passed to `matplotlib.pyplot.scatter`. Returns ------- List of :class:`~matplotlib.axes.Axes` Examples ------- >>> adata = sc.datasets.pbmc68k_reduced() >>> sc.pl.dotplot(adata, ['C1QA', 'PSAP', 'CD79A', 'CD79B', 'CST3', 'LYZ'], ... groupby='bulk_labels', dendrogram=True) """ if use_raw is None and adata.raw is not None: use_raw = True if isinstance(var_names, str): var_names = [var_names] categories, obs_tidy = _prepare_dataframe(adata, var_names, groupby, use_raw, log, num_categories, layer=layer, gene_symbols=gene_symbols) # for if category defined by groupby (if any) compute for each var_name # 1. the fraction of cells in the category having a value > expression_cutoff # 2. the mean value over the category # 1. compute fraction of cells having value > expression_cutoff # transform obs_tidy into boolean matrix using the expression_cutoff obs_bool = obs_tidy > expression_cutoff # compute the sum per group which in the boolean matrix this is the number # of values > expression_cutoff, and divide the result by the total number of values # in the group (given by `count()`) fraction_obs = obs_bool.groupby(level=0).sum() / obs_bool.groupby(level=0).count() # 2. compute mean value if mean_only_expressed: mean_obs = obs_tidy.mask(~obs_bool).groupby(level=0).mean().fillna(0) else: mean_obs = obs_tidy.groupby(level=0).mean() if standard_scale == 'group': mean_obs = mean_obs.sub(mean_obs.min(1), axis=0) mean_obs = mean_obs.div(mean_obs.max(1), axis=0).fillna(0) elif standard_scale == 'var': mean_obs -= mean_obs.min(0) mean_obs = (mean_obs / mean_obs.max(0)).fillna(0) elif standard_scale is None: pass else: logg.warn('Unknown type for standard_scale, ignored') dendro_width = 0.8 if dendrogram else 0 colorbar_width = 0.2 colorbar_width_spacer = 0.5 size_legend_width = 0.25 if figsize is None: height = len(categories) * 0.3 + 1 # +1 for labels # if the number of categories is small (eg 1 or 2) use # a larger height height = max([1.5, height]) heatmap_width = len(var_names) * 0.35 width = heatmap_width + colorbar_width + size_legend_width + dendro_width + colorbar_width_spacer else: width, height = figsize heatmap_width = width - (colorbar_width + size_legend_width + dendro_width + colorbar_width_spacer) # colorbar ax width should not change with differences in the width of the image # otherwise can become too small if var_group_positions is not None and len(var_group_positions) > 0: # add some space in case 'brackets' want to be plotted on top of the image height_ratios = [0.5, 10] else: height_ratios = [0, 10.5] # define a layout of 2 rows x 5 columns # first row is for 'brackets' (if no brackets needed, the height of this row is zero) # second row is for main content. This second row # is divided into 4 axes: # first ax is for the main figure # second ax is for dendrogram (if present) # third ax is for the color bar legend # fourth ax is for an spacer that avoids the ticks # from the color bar to be hidden beneath the size lengend axis # fifth ax is to plot the size legend fig = pl.figure(figsize=(width, height)) axs = gridspec.GridSpec(nrows=2, ncols=5, wspace=0.02, hspace=0.04, width_ratios=[heatmap_width, dendro_width, colorbar_width, colorbar_width_spacer, size_legend_width], height_ratios=height_ratios) if len(categories) < 4: # when few categories are shown, the colorbar and size legend # need to be larger than the main plot, otherwise they would look # compressed. For this, the dotplot ax is split into two: axs2 = gridspec.GridSpecFromSubplotSpec(2, 1, subplot_spec=axs[1, 0], height_ratios=[len(categories) * 0.3, 1]) dot_ax = fig.add_subplot(axs2[0]) else: dot_ax = fig.add_subplot(axs[1, 0]) color_legend = fig.add_subplot(axs[1, 2]) if groupby is None or len(categories) <= 1: # dendrogram can only be computed between groupby categories dendrogram = False if dendrogram: dendro_data = _reorder_categories_after_dendrogram(adata, groupby, dendrogram, var_names=var_names, var_group_labels=var_group_labels, var_group_positions=var_group_positions) var_group_labels = dendro_data['var_group_labels'] var_group_positions = dendro_data['var_group_positions'] # reorder matrix if dendro_data['var_names_idx_ordered'] is not None: # reorder columns (usually genes) if needed. This only happens when # var_group_positions and var_group_labels is set mean_obs = mean_obs.iloc[:,dendro_data['var_names_idx_ordered']] fraction_obs = fraction_obs.iloc[:, dendro_data['var_names_idx_ordered']] # reorder rows (categories) to match the dendrogram order mean_obs = mean_obs.iloc[dendro_data['categories_idx_ordered'], :] fraction_obs = fraction_obs.iloc[dendro_data['categories_idx_ordered'], :] y_ticks = range(mean_obs.shape[0]) dendro_ax = fig.add_subplot(axs[1, 1], sharey=dot_ax) _plot_dendrogram(dendro_ax, adata, groupby, dendrogram_key=dendrogram, ticks=y_ticks) # to keep the size_legen of about the same height, irrespective # of the number of categories, the fourth ax is subdivided into two parts size_legend_height = min(1.3, height) # wspace is proportional to the width but a constant value is # needed such that the spacing is the same for thinner or wider images. wspace = 10.5 / width axs3 = gridspec.GridSpecFromSubplotSpec(2, 1, subplot_spec=axs[1, 4], wspace=wspace, height_ratios=[size_legend_height / height, (height - size_legend_height) / height]) # make scatter plot in which # x = var_names # y = groupby category # size = fraction # color = mean expression y, x = np.indices(mean_obs.shape) y = y.flatten() x = x.flatten() frac = fraction_obs.values.flatten() mean_flat = mean_obs.values.flatten() cmap = pl.get_cmap(color_map) if dot_max is None: dot_max = np.ceil(max(frac) * 10) / 10 else: if dot_max < 0 or dot_max > 1: raise ValueError("`dot_max` value has to be between 0 and 1") if dot_min is None: dot_min = 0 else: if dot_min < 0 or dot_min > 1: raise ValueError("`dot_min` value has to be between 0 and 1") if dot_min != 0 or dot_max != 1: # clip frac between dot_min and dot_max frac = np.clip(frac, dot_min, dot_max) old_range = dot_max - dot_min # re-scale frac between 0 and 1 frac = ((frac - dot_min) / old_range) size = (frac * 10) ** 2 size += smallest_dot import matplotlib.colors normalize = matplotlib.colors.Normalize(vmin=kwds.get('vmin'), vmax=kwds.get('vmax')) colors = cmap(normalize(mean_flat)) dot_ax.scatter(x, y, color=colors, s=size, cmap=cmap, norm=None, edgecolor='none', **kwds) y_ticks = range(mean_obs.shape[0]) dot_ax.set_yticks(y_ticks) dot_ax.set_yticklabels([mean_obs.index[idx] for idx in y_ticks]) x_ticks = range(mean_obs.shape[1]) dot_ax.set_xticks(x_ticks) dot_ax.set_xticklabels([mean_obs.columns[idx] for idx in x_ticks], rotation=90) dot_ax.tick_params(axis='both', labelsize='small') dot_ax.grid(False) dot_ax.set_xlim(-0.5, len(var_names) + 0.5) dot_ax.set_ylabel(groupby) # to be consistent with the heatmap plot, is better to # invert the order of the y-axis, such that the first group is on # top ymin, ymax = dot_ax.get_ylim() dot_ax.set_ylim(ymax+0.5, ymin - 0.5) dot_ax.set_xlim(-1, len(var_names)) # plot group legends on top of dot_ax (if given) if var_group_positions is not None and len(var_group_positions) > 0: gene_groups_ax = fig.add_subplot(axs[0, 0], sharex=dot_ax) _plot_gene_groups_brackets(gene_groups_ax, group_positions=var_group_positions, group_labels=var_group_labels, rotation=var_group_rotation) # plot colorbar import matplotlib.colorbar matplotlib.colorbar.ColorbarBase(color_legend, cmap=cmap, norm=normalize) # for the dot size legend, use step between dot_max and dot_min # based on how different they are. diff = dot_max - dot_min if 0.3 < diff <= 0.6: step = 0.1 elif diff <= 0.3: step = 0.05 else: step = 0.2 # a descending range that is afterwards inverted is used # to guarantee that dot_max is in the legend. fracs_legends = np.arange(dot_max, dot_min, step * -1)[::-1] if dot_min != 0 or dot_max != 1: fracs_values = ((fracs_legends - dot_min) / old_range) else: fracs_values = fracs_legends size = (fracs_values * 10) ** 2 size += smallest_dot color = [cmap(normalize(value)) for value in np.repeat(max(mean_flat) * 0.7, len(size))] # plot size bar size_legend = fig.add_subplot(axs3[0]) size_legend.scatter(np.repeat(0, len(size)), range(len(size)), s=size, color=color) size_legend.set_yticks(range(len(size))) labels = ["{:.0%}".format(x) for x in fracs_legends] if dot_max < 1: labels[-1] = ">" + labels[-1] size_legend.set_yticklabels(labels) size_legend.set_yticklabels(["{:.0%}".format(x) for x in fracs_legends]) size_legend.tick_params(axis='y', left=False, labelleft=False, labelright=True) # remove x ticks and labels size_legend.tick_params(axis='x', bottom=False, labelbottom=False) # remove surrounding lines size_legend.spines['right'].set_visible(False) size_legend.spines['top'].set_visible(False) size_legend.spines['left'].set_visible(False) size_legend.spines['bottom'].set_visible(False) size_legend.grid(False) ymin, ymax = size_legend.get_ylim() size_legend.set_ylim(ymin, ymax+0.5) utils.savefig_or_show('dotplot', show=show, save=save) return axs
[ "def", "dotplot", "(", "adata", ",", "var_names", ",", "groupby", "=", "None", ",", "use_raw", "=", "None", ",", "log", "=", "False", ",", "num_categories", "=", "7", ",", "expression_cutoff", "=", "0.", ",", "mean_only_expressed", "=", "False", ",", "color_map", "=", "'Reds'", ",", "dot_max", "=", "None", ",", "dot_min", "=", "None", ",", "figsize", "=", "None", ",", "dendrogram", "=", "False", ",", "gene_symbols", "=", "None", ",", "var_group_positions", "=", "None", ",", "standard_scale", "=", "None", ",", "smallest_dot", "=", "0.", ",", "var_group_labels", "=", "None", ",", "var_group_rotation", "=", "None", ",", "layer", "=", "None", ",", "show", "=", "None", ",", "save", "=", "None", ",", "*", "*", "kwds", ")", ":", "if", "use_raw", "is", "None", "and", "adata", ".", "raw", "is", "not", "None", ":", "use_raw", "=", "True", "if", "isinstance", "(", "var_names", ",", "str", ")", ":", "var_names", "=", "[", "var_names", "]", "categories", ",", "obs_tidy", "=", "_prepare_dataframe", "(", "adata", ",", "var_names", ",", "groupby", ",", "use_raw", ",", "log", ",", "num_categories", ",", "layer", "=", "layer", ",", "gene_symbols", "=", "gene_symbols", ")", "# for if category defined by groupby (if any) compute for each var_name", "# 1. the fraction of cells in the category having a value > expression_cutoff", "# 2. the mean value over the category", "# 1. compute fraction of cells having value > expression_cutoff", "# transform obs_tidy into boolean matrix using the expression_cutoff", "obs_bool", "=", "obs_tidy", ">", "expression_cutoff", "# compute the sum per group which in the boolean matrix this is the number", "# of values > expression_cutoff, and divide the result by the total number of values", "# in the group (given by `count()`)", "fraction_obs", "=", "obs_bool", ".", "groupby", "(", "level", "=", "0", ")", ".", "sum", "(", ")", "/", "obs_bool", ".", "groupby", "(", "level", "=", "0", ")", ".", "count", "(", ")", "# 2. compute mean value", "if", "mean_only_expressed", ":", "mean_obs", "=", "obs_tidy", ".", "mask", "(", "~", "obs_bool", ")", ".", "groupby", "(", "level", "=", "0", ")", ".", "mean", "(", ")", ".", "fillna", "(", "0", ")", "else", ":", "mean_obs", "=", "obs_tidy", ".", "groupby", "(", "level", "=", "0", ")", ".", "mean", "(", ")", "if", "standard_scale", "==", "'group'", ":", "mean_obs", "=", "mean_obs", ".", "sub", "(", "mean_obs", ".", "min", "(", "1", ")", ",", "axis", "=", "0", ")", "mean_obs", "=", "mean_obs", ".", "div", "(", "mean_obs", ".", "max", "(", "1", ")", ",", "axis", "=", "0", ")", ".", "fillna", "(", "0", ")", "elif", "standard_scale", "==", "'var'", ":", "mean_obs", "-=", "mean_obs", ".", "min", "(", "0", ")", "mean_obs", "=", "(", "mean_obs", "/", "mean_obs", ".", "max", "(", "0", ")", ")", ".", "fillna", "(", "0", ")", "elif", "standard_scale", "is", "None", ":", "pass", "else", ":", "logg", ".", "warn", "(", "'Unknown type for standard_scale, ignored'", ")", "dendro_width", "=", "0.8", "if", "dendrogram", "else", "0", "colorbar_width", "=", "0.2", "colorbar_width_spacer", "=", "0.5", "size_legend_width", "=", "0.25", "if", "figsize", "is", "None", ":", "height", "=", "len", "(", "categories", ")", "*", "0.3", "+", "1", "# +1 for labels", "# if the number of categories is small (eg 1 or 2) use", "# a larger height", "height", "=", "max", "(", "[", "1.5", ",", "height", "]", ")", "heatmap_width", "=", "len", "(", "var_names", ")", "*", "0.35", "width", "=", "heatmap_width", "+", "colorbar_width", "+", "size_legend_width", "+", "dendro_width", "+", "colorbar_width_spacer", "else", ":", "width", ",", "height", "=", "figsize", "heatmap_width", "=", "width", "-", "(", "colorbar_width", "+", "size_legend_width", "+", "dendro_width", "+", "colorbar_width_spacer", ")", "# colorbar ax width should not change with differences in the width of the image", "# otherwise can become too small", "if", "var_group_positions", "is", "not", "None", "and", "len", "(", "var_group_positions", ")", ">", "0", ":", "# add some space in case 'brackets' want to be plotted on top of the image", "height_ratios", "=", "[", "0.5", ",", "10", "]", "else", ":", "height_ratios", "=", "[", "0", ",", "10.5", "]", "# define a layout of 2 rows x 5 columns", "# first row is for 'brackets' (if no brackets needed, the height of this row is zero)", "# second row is for main content. This second row", "# is divided into 4 axes:", "# first ax is for the main figure", "# second ax is for dendrogram (if present)", "# third ax is for the color bar legend", "# fourth ax is for an spacer that avoids the ticks", "# from the color bar to be hidden beneath the size lengend axis", "# fifth ax is to plot the size legend", "fig", "=", "pl", ".", "figure", "(", "figsize", "=", "(", "width", ",", "height", ")", ")", "axs", "=", "gridspec", ".", "GridSpec", "(", "nrows", "=", "2", ",", "ncols", "=", "5", ",", "wspace", "=", "0.02", ",", "hspace", "=", "0.04", ",", "width_ratios", "=", "[", "heatmap_width", ",", "dendro_width", ",", "colorbar_width", ",", "colorbar_width_spacer", ",", "size_legend_width", "]", ",", "height_ratios", "=", "height_ratios", ")", "if", "len", "(", "categories", ")", "<", "4", ":", "# when few categories are shown, the colorbar and size legend", "# need to be larger than the main plot, otherwise they would look", "# compressed. For this, the dotplot ax is split into two:", "axs2", "=", "gridspec", ".", "GridSpecFromSubplotSpec", "(", "2", ",", "1", ",", "subplot_spec", "=", "axs", "[", "1", ",", "0", "]", ",", "height_ratios", "=", "[", "len", "(", "categories", ")", "*", "0.3", ",", "1", "]", ")", "dot_ax", "=", "fig", ".", "add_subplot", "(", "axs2", "[", "0", "]", ")", "else", ":", "dot_ax", "=", "fig", ".", "add_subplot", "(", "axs", "[", "1", ",", "0", "]", ")", "color_legend", "=", "fig", ".", "add_subplot", "(", "axs", "[", "1", ",", "2", "]", ")", "if", "groupby", "is", "None", "or", "len", "(", "categories", ")", "<=", "1", ":", "# dendrogram can only be computed between groupby categories", "dendrogram", "=", "False", "if", "dendrogram", ":", "dendro_data", "=", "_reorder_categories_after_dendrogram", "(", "adata", ",", "groupby", ",", "dendrogram", ",", "var_names", "=", "var_names", ",", "var_group_labels", "=", "var_group_labels", ",", "var_group_positions", "=", "var_group_positions", ")", "var_group_labels", "=", "dendro_data", "[", "'var_group_labels'", "]", "var_group_positions", "=", "dendro_data", "[", "'var_group_positions'", "]", "# reorder matrix", "if", "dendro_data", "[", "'var_names_idx_ordered'", "]", "is", "not", "None", ":", "# reorder columns (usually genes) if needed. This only happens when", "# var_group_positions and var_group_labels is set", "mean_obs", "=", "mean_obs", ".", "iloc", "[", ":", ",", "dendro_data", "[", "'var_names_idx_ordered'", "]", "]", "fraction_obs", "=", "fraction_obs", ".", "iloc", "[", ":", ",", "dendro_data", "[", "'var_names_idx_ordered'", "]", "]", "# reorder rows (categories) to match the dendrogram order", "mean_obs", "=", "mean_obs", ".", "iloc", "[", "dendro_data", "[", "'categories_idx_ordered'", "]", ",", ":", "]", "fraction_obs", "=", "fraction_obs", ".", "iloc", "[", "dendro_data", "[", "'categories_idx_ordered'", "]", ",", ":", "]", "y_ticks", "=", "range", "(", "mean_obs", ".", "shape", "[", "0", "]", ")", "dendro_ax", "=", "fig", ".", "add_subplot", "(", "axs", "[", "1", ",", "1", "]", ",", "sharey", "=", "dot_ax", ")", "_plot_dendrogram", "(", "dendro_ax", ",", "adata", ",", "groupby", ",", "dendrogram_key", "=", "dendrogram", ",", "ticks", "=", "y_ticks", ")", "# to keep the size_legen of about the same height, irrespective", "# of the number of categories, the fourth ax is subdivided into two parts", "size_legend_height", "=", "min", "(", "1.3", ",", "height", ")", "# wspace is proportional to the width but a constant value is", "# needed such that the spacing is the same for thinner or wider images.", "wspace", "=", "10.5", "/", "width", "axs3", "=", "gridspec", ".", "GridSpecFromSubplotSpec", "(", "2", ",", "1", ",", "subplot_spec", "=", "axs", "[", "1", ",", "4", "]", ",", "wspace", "=", "wspace", ",", "height_ratios", "=", "[", "size_legend_height", "/", "height", ",", "(", "height", "-", "size_legend_height", ")", "/", "height", "]", ")", "# make scatter plot in which", "# x = var_names", "# y = groupby category", "# size = fraction", "# color = mean expression", "y", ",", "x", "=", "np", ".", "indices", "(", "mean_obs", ".", "shape", ")", "y", "=", "y", ".", "flatten", "(", ")", "x", "=", "x", ".", "flatten", "(", ")", "frac", "=", "fraction_obs", ".", "values", ".", "flatten", "(", ")", "mean_flat", "=", "mean_obs", ".", "values", ".", "flatten", "(", ")", "cmap", "=", "pl", ".", "get_cmap", "(", "color_map", ")", "if", "dot_max", "is", "None", ":", "dot_max", "=", "np", ".", "ceil", "(", "max", "(", "frac", ")", "*", "10", ")", "/", "10", "else", ":", "if", "dot_max", "<", "0", "or", "dot_max", ">", "1", ":", "raise", "ValueError", "(", "\"`dot_max` value has to be between 0 and 1\"", ")", "if", "dot_min", "is", "None", ":", "dot_min", "=", "0", "else", ":", "if", "dot_min", "<", "0", "or", "dot_min", ">", "1", ":", "raise", "ValueError", "(", "\"`dot_min` value has to be between 0 and 1\"", ")", "if", "dot_min", "!=", "0", "or", "dot_max", "!=", "1", ":", "# clip frac between dot_min and dot_max", "frac", "=", "np", ".", "clip", "(", "frac", ",", "dot_min", ",", "dot_max", ")", "old_range", "=", "dot_max", "-", "dot_min", "# re-scale frac between 0 and 1", "frac", "=", "(", "(", "frac", "-", "dot_min", ")", "/", "old_range", ")", "size", "=", "(", "frac", "*", "10", ")", "**", "2", "size", "+=", "smallest_dot", "import", "matplotlib", ".", "colors", "normalize", "=", "matplotlib", ".", "colors", ".", "Normalize", "(", "vmin", "=", "kwds", ".", "get", "(", "'vmin'", ")", ",", "vmax", "=", "kwds", ".", "get", "(", "'vmax'", ")", ")", "colors", "=", "cmap", "(", "normalize", "(", "mean_flat", ")", ")", "dot_ax", ".", "scatter", "(", "x", ",", "y", ",", "color", "=", "colors", ",", "s", "=", "size", ",", "cmap", "=", "cmap", ",", "norm", "=", "None", ",", "edgecolor", "=", "'none'", ",", "*", "*", "kwds", ")", "y_ticks", "=", "range", "(", "mean_obs", ".", "shape", "[", "0", "]", ")", "dot_ax", ".", "set_yticks", "(", "y_ticks", ")", "dot_ax", ".", "set_yticklabels", "(", "[", "mean_obs", ".", "index", "[", "idx", "]", "for", "idx", "in", "y_ticks", "]", ")", "x_ticks", "=", "range", "(", "mean_obs", ".", "shape", "[", "1", "]", ")", "dot_ax", ".", "set_xticks", "(", "x_ticks", ")", "dot_ax", ".", "set_xticklabels", "(", "[", "mean_obs", ".", "columns", "[", "idx", "]", "for", "idx", "in", "x_ticks", "]", ",", "rotation", "=", "90", ")", "dot_ax", ".", "tick_params", "(", "axis", "=", "'both'", ",", "labelsize", "=", "'small'", ")", "dot_ax", ".", "grid", "(", "False", ")", "dot_ax", ".", "set_xlim", "(", "-", "0.5", ",", "len", "(", "var_names", ")", "+", "0.5", ")", "dot_ax", ".", "set_ylabel", "(", "groupby", ")", "# to be consistent with the heatmap plot, is better to", "# invert the order of the y-axis, such that the first group is on", "# top", "ymin", ",", "ymax", "=", "dot_ax", ".", "get_ylim", "(", ")", "dot_ax", ".", "set_ylim", "(", "ymax", "+", "0.5", ",", "ymin", "-", "0.5", ")", "dot_ax", ".", "set_xlim", "(", "-", "1", ",", "len", "(", "var_names", ")", ")", "# plot group legends on top of dot_ax (if given)", "if", "var_group_positions", "is", "not", "None", "and", "len", "(", "var_group_positions", ")", ">", "0", ":", "gene_groups_ax", "=", "fig", ".", "add_subplot", "(", "axs", "[", "0", ",", "0", "]", ",", "sharex", "=", "dot_ax", ")", "_plot_gene_groups_brackets", "(", "gene_groups_ax", ",", "group_positions", "=", "var_group_positions", ",", "group_labels", "=", "var_group_labels", ",", "rotation", "=", "var_group_rotation", ")", "# plot colorbar", "import", "matplotlib", ".", "colorbar", "matplotlib", ".", "colorbar", ".", "ColorbarBase", "(", "color_legend", ",", "cmap", "=", "cmap", ",", "norm", "=", "normalize", ")", "# for the dot size legend, use step between dot_max and dot_min", "# based on how different they are.", "diff", "=", "dot_max", "-", "dot_min", "if", "0.3", "<", "diff", "<=", "0.6", ":", "step", "=", "0.1", "elif", "diff", "<=", "0.3", ":", "step", "=", "0.05", "else", ":", "step", "=", "0.2", "# a descending range that is afterwards inverted is used", "# to guarantee that dot_max is in the legend.", "fracs_legends", "=", "np", ".", "arange", "(", "dot_max", ",", "dot_min", ",", "step", "*", "-", "1", ")", "[", ":", ":", "-", "1", "]", "if", "dot_min", "!=", "0", "or", "dot_max", "!=", "1", ":", "fracs_values", "=", "(", "(", "fracs_legends", "-", "dot_min", ")", "/", "old_range", ")", "else", ":", "fracs_values", "=", "fracs_legends", "size", "=", "(", "fracs_values", "*", "10", ")", "**", "2", "size", "+=", "smallest_dot", "color", "=", "[", "cmap", "(", "normalize", "(", "value", ")", ")", "for", "value", "in", "np", ".", "repeat", "(", "max", "(", "mean_flat", ")", "*", "0.7", ",", "len", "(", "size", ")", ")", "]", "# plot size bar", "size_legend", "=", "fig", ".", "add_subplot", "(", "axs3", "[", "0", "]", ")", "size_legend", ".", "scatter", "(", "np", ".", "repeat", "(", "0", ",", "len", "(", "size", ")", ")", ",", "range", "(", "len", "(", "size", ")", ")", ",", "s", "=", "size", ",", "color", "=", "color", ")", "size_legend", ".", "set_yticks", "(", "range", "(", "len", "(", "size", ")", ")", ")", "labels", "=", "[", "\"{:.0%}\"", ".", "format", "(", "x", ")", "for", "x", "in", "fracs_legends", "]", "if", "dot_max", "<", "1", ":", "labels", "[", "-", "1", "]", "=", "\">\"", "+", "labels", "[", "-", "1", "]", "size_legend", ".", "set_yticklabels", "(", "labels", ")", "size_legend", ".", "set_yticklabels", "(", "[", "\"{:.0%}\"", ".", "format", "(", "x", ")", "for", "x", "in", "fracs_legends", "]", ")", "size_legend", ".", "tick_params", "(", "axis", "=", "'y'", ",", "left", "=", "False", ",", "labelleft", "=", "False", ",", "labelright", "=", "True", ")", "# remove x ticks and labels", "size_legend", ".", "tick_params", "(", "axis", "=", "'x'", ",", "bottom", "=", "False", ",", "labelbottom", "=", "False", ")", "# remove surrounding lines", "size_legend", ".", "spines", "[", "'right'", "]", ".", "set_visible", "(", "False", ")", "size_legend", ".", "spines", "[", "'top'", "]", ".", "set_visible", "(", "False", ")", "size_legend", ".", "spines", "[", "'left'", "]", ".", "set_visible", "(", "False", ")", "size_legend", ".", "spines", "[", "'bottom'", "]", ".", "set_visible", "(", "False", ")", "size_legend", ".", "grid", "(", "False", ")", "ymin", ",", "ymax", "=", "size_legend", ".", "get_ylim", "(", ")", "size_legend", ".", "set_ylim", "(", "ymin", ",", "ymax", "+", "0.5", ")", "utils", ".", "savefig_or_show", "(", "'dotplot'", ",", "show", "=", "show", ",", "save", "=", "save", ")", "return", "axs" ]
\ Makes a *dot plot* of the expression values of `var_names`. For each var_name and each `groupby` category a dot is plotted. Each dot represents two values: mean expression within each category (visualized by color) and fraction of cells expressing the var_name in the category (visualized by the size of the dot). If groupby is not given, the dotplot assumes that all data belongs to a single category. **Note**: A gene is considered expressed if the expression value in the adata (or adata.raw) is above the specified threshold which is zero by default. An example of dotplot usage is to visualize, for multiple marker genes, the mean value and the percentage of cells expressing the gene accross multiple clusters. Parameters ---------- {common_plot_args} expression_cutoff : `float` (default: `0.`) Expression cutoff that is used for binarizing the gene expression and determining the fraction of cells expressing given genes. A gene is expressed only if the expression value is greater than this threshold. mean_only_expressed : `bool` (default: `False`) If True, gene expression is averaged only over the cells expressing the given genes. color_map : `str`, optional (default: `Reds`) String denoting matplotlib color map. dot_max : `float` optional (default: `None`) If none, the maximum dot size is set to the maximum fraction value found (e.g. 0.6). If given, the value should be a number between 0 and 1. All fractions larger than dot_max are clipped to this value. dot_min : `float` optional (default: `None`) If none, the minimum dot size is set to 0. If given, the value should be a number between 0 and 1. All fractions smaller than dot_min are clipped to this value. standard_scale : {{'var', 'group'}}, optional (default: None) Whether or not to standardize that dimension between 0 and 1, meaning for each variable or group, subtract the minimum and divide each by its maximum. smallest_dot : `float` optional (default: 0.) If none, the smallest dot has size 0. All expression levels with `dot_min` are potted with `smallest_dot` dot size. {show_save_ax} **kwds : keyword arguments Are passed to `matplotlib.pyplot.scatter`. Returns ------- List of :class:`~matplotlib.axes.Axes` Examples ------- >>> adata = sc.datasets.pbmc68k_reduced() >>> sc.pl.dotplot(adata, ['C1QA', 'PSAP', 'CD79A', 'CD79B', 'CST3', 'LYZ'], ... groupby='bulk_labels', dendrogram=True)
[ "\\", "Makes", "a", "*", "dot", "plot", "*", "of", "the", "expression", "values", "of", "var_names", "." ]
python
train
43.790698
kislyuk/aegea
aegea/packages/github3/github.py
https://github.com/kislyuk/aegea/blob/94957e9dba036eae3052e2662c208b259c08399a/aegea/packages/github3/github.py#L771-L797
def iter_starred(self, login=None, sort=None, direction=None, number=-1, etag=None): """Iterate over repositories starred by ``login`` or the authenticated user. .. versionchanged:: 0.5 Added sort and direction parameters (optional) as per the change in GitHub's API. :param str login: (optional), name of user whose stars you want to see :param str sort: (optional), either 'created' (when the star was created) or 'updated' (when the repository was last pushed to) :param str direction: (optional), either 'asc' or 'desc'. Default: 'desc' :param int number: (optional), number of repositories to return. Default: -1 returns all repositories :param str etag: (optional), ETag from a previous request to the same endpoint :returns: generator of :class:`Repository <github3.repos.Repository>` """ if login: return self.user(login).iter_starred(sort, direction) params = {'sort': sort, 'direction': direction} self._remove_none(params) url = self._build_url('user', 'starred') return self._iter(int(number), url, Repository, params, etag)
[ "def", "iter_starred", "(", "self", ",", "login", "=", "None", ",", "sort", "=", "None", ",", "direction", "=", "None", ",", "number", "=", "-", "1", ",", "etag", "=", "None", ")", ":", "if", "login", ":", "return", "self", ".", "user", "(", "login", ")", ".", "iter_starred", "(", "sort", ",", "direction", ")", "params", "=", "{", "'sort'", ":", "sort", ",", "'direction'", ":", "direction", "}", "self", ".", "_remove_none", "(", "params", ")", "url", "=", "self", ".", "_build_url", "(", "'user'", ",", "'starred'", ")", "return", "self", ".", "_iter", "(", "int", "(", "number", ")", ",", "url", ",", "Repository", ",", "params", ",", "etag", ")" ]
Iterate over repositories starred by ``login`` or the authenticated user. .. versionchanged:: 0.5 Added sort and direction parameters (optional) as per the change in GitHub's API. :param str login: (optional), name of user whose stars you want to see :param str sort: (optional), either 'created' (when the star was created) or 'updated' (when the repository was last pushed to) :param str direction: (optional), either 'asc' or 'desc'. Default: 'desc' :param int number: (optional), number of repositories to return. Default: -1 returns all repositories :param str etag: (optional), ETag from a previous request to the same endpoint :returns: generator of :class:`Repository <github3.repos.Repository>`
[ "Iterate", "over", "repositories", "starred", "by", "login", "or", "the", "authenticated", "user", "." ]
python
train
45.777778
IBMStreams/pypi.streamsx
streamsx/topology/topology.py
https://github.com/IBMStreams/pypi.streamsx/blob/abd67b4757120f6f805787fba390f53e9df9cdd8/streamsx/topology/topology.py#L1425-L1440
def set_consistent(self, consistent_config): """ Indicates that the stream is the start of a consistent region. Args: consistent_config(consistent.ConsistentRegionConfig): the configuration of the consistent region. Returns: Stream: Returns this stream. .. versionadded:: 1.11 """ # add job control plane if needed self.topology._add_job_control_plane() self.oport.operator.consistent(consistent_config) return self._make_placeable()
[ "def", "set_consistent", "(", "self", ",", "consistent_config", ")", ":", "# add job control plane if needed", "self", ".", "topology", ".", "_add_job_control_plane", "(", ")", "self", ".", "oport", ".", "operator", ".", "consistent", "(", "consistent_config", ")", "return", "self", ".", "_make_placeable", "(", ")" ]
Indicates that the stream is the start of a consistent region. Args: consistent_config(consistent.ConsistentRegionConfig): the configuration of the consistent region. Returns: Stream: Returns this stream. .. versionadded:: 1.11
[ "Indicates", "that", "the", "stream", "is", "the", "start", "of", "a", "consistent", "region", "." ]
python
train
32.375
vaexio/vaex
packages/vaex-core/vaex/dataframe.py
https://github.com/vaexio/vaex/blob/a45b672f8287afca2ada8e36b74b604b9b28dd85/packages/vaex-core/vaex/dataframe.py#L3322-L3325
def delete_variable(self, name): """Deletes a variable from a DataFrame.""" del self.variables[name] self.signal_variable_changed.emit(self, name, "delete")
[ "def", "delete_variable", "(", "self", ",", "name", ")", ":", "del", "self", ".", "variables", "[", "name", "]", "self", ".", "signal_variable_changed", ".", "emit", "(", "self", ",", "name", ",", "\"delete\"", ")" ]
Deletes a variable from a DataFrame.
[ "Deletes", "a", "variable", "from", "a", "DataFrame", "." ]
python
test
44.25
kislyuk/aegea
aegea/audit.py
https://github.com/kislyuk/aegea/blob/94957e9dba036eae3052e2662c208b259c08399a/aegea/audit.py#L54-L60
def audit_1_1(self): """1.1 Avoid the use of the "root" account (Scored)""" for row in self.credential_report: if row["user"] == "<root_account>": for field in "password_last_used", "access_key_1_last_used_date", "access_key_2_last_used_date": if row[field] != "N/A" and self.parse_date(row[field]) > datetime.now(tzutc()) - timedelta(days=1): raise Exception("Root account last used less than a day ago ({})".format(field))
[ "def", "audit_1_1", "(", "self", ")", ":", "for", "row", "in", "self", ".", "credential_report", ":", "if", "row", "[", "\"user\"", "]", "==", "\"<root_account>\"", ":", "for", "field", "in", "\"password_last_used\"", ",", "\"access_key_1_last_used_date\"", ",", "\"access_key_2_last_used_date\"", ":", "if", "row", "[", "field", "]", "!=", "\"N/A\"", "and", "self", ".", "parse_date", "(", "row", "[", "field", "]", ")", ">", "datetime", ".", "now", "(", "tzutc", "(", ")", ")", "-", "timedelta", "(", "days", "=", "1", ")", ":", "raise", "Exception", "(", "\"Root account last used less than a day ago ({})\"", ".", "format", "(", "field", ")", ")" ]
1.1 Avoid the use of the "root" account (Scored)
[ "1", ".", "1", "Avoid", "the", "use", "of", "the", "root", "account", "(", "Scored", ")" ]
python
train
72.285714
erocarrera/pefile
pefile.py
https://github.com/erocarrera/pefile/blob/8a78a2e251a3f2336c232bf411133927b479edf2/pefile.py#L741-L747
def add_lines(self, txt, indent=0): """Adds a list of lines. The list can be indented with the optional argument 'indent'. """ for line in txt: self.add_line(line, indent)
[ "def", "add_lines", "(", "self", ",", "txt", ",", "indent", "=", "0", ")", ":", "for", "line", "in", "txt", ":", "self", ".", "add_line", "(", "line", ",", "indent", ")" ]
Adds a list of lines. The list can be indented with the optional argument 'indent'.
[ "Adds", "a", "list", "of", "lines", "." ]
python
train
30
tdryer/hangups
docs/generate_proto_docs.py
https://github.com/tdryer/hangups/blob/85c0bf0a57698d077461283895707260f9dbf931/docs/generate_proto_docs.py#L132-L177
def generate_message_doc(message_descriptor, locations, path, name_prefix=''): """Generate docs for message and nested messages and enums. Args: message_descriptor: descriptor_pb2.DescriptorProto instance for message to generate docs for. locations: Dictionary of location paths tuples to descriptor_pb2.SourceCodeInfo.Location instances. path: Path tuple to the message definition. name_prefix: Optional prefix for this message's name. """ # message_type is 4 prefixed_name = name_prefix + message_descriptor.name print(make_subsection(prefixed_name)) location = locations[path] if location.HasField('leading_comments'): print(textwrap.dedent(location.leading_comments)) row_tuples = [] for field_index, field in enumerate(message_descriptor.field): field_location = locations[path + (2, field_index)] if field.type not in [11, 14]: type_str = TYPE_TO_STR[field.type] else: type_str = make_link(field.type_name.lstrip('.')) row_tuples.append(( make_code(field.name), field.number, type_str, LABEL_TO_STR[field.label], textwrap.fill(get_comment_from_location(field_location), INFINITY), )) print_table(('Field', 'Number', 'Type', 'Label', 'Description'), row_tuples) # Generate nested messages nested_types = enumerate(message_descriptor.nested_type) for index, nested_message_desc in nested_types: generate_message_doc(nested_message_desc, locations, path + (3, index), name_prefix=prefixed_name + '.') # Generate nested enums for index, nested_enum_desc in enumerate(message_descriptor.enum_type): generate_enum_doc(nested_enum_desc, locations, path + (4, index), name_prefix=prefixed_name + '.')
[ "def", "generate_message_doc", "(", "message_descriptor", ",", "locations", ",", "path", ",", "name_prefix", "=", "''", ")", ":", "# message_type is 4", "prefixed_name", "=", "name_prefix", "+", "message_descriptor", ".", "name", "print", "(", "make_subsection", "(", "prefixed_name", ")", ")", "location", "=", "locations", "[", "path", "]", "if", "location", ".", "HasField", "(", "'leading_comments'", ")", ":", "print", "(", "textwrap", ".", "dedent", "(", "location", ".", "leading_comments", ")", ")", "row_tuples", "=", "[", "]", "for", "field_index", ",", "field", "in", "enumerate", "(", "message_descriptor", ".", "field", ")", ":", "field_location", "=", "locations", "[", "path", "+", "(", "2", ",", "field_index", ")", "]", "if", "field", ".", "type", "not", "in", "[", "11", ",", "14", "]", ":", "type_str", "=", "TYPE_TO_STR", "[", "field", ".", "type", "]", "else", ":", "type_str", "=", "make_link", "(", "field", ".", "type_name", ".", "lstrip", "(", "'.'", ")", ")", "row_tuples", ".", "append", "(", "(", "make_code", "(", "field", ".", "name", ")", ",", "field", ".", "number", ",", "type_str", ",", "LABEL_TO_STR", "[", "field", ".", "label", "]", ",", "textwrap", ".", "fill", "(", "get_comment_from_location", "(", "field_location", ")", ",", "INFINITY", ")", ",", ")", ")", "print_table", "(", "(", "'Field'", ",", "'Number'", ",", "'Type'", ",", "'Label'", ",", "'Description'", ")", ",", "row_tuples", ")", "# Generate nested messages", "nested_types", "=", "enumerate", "(", "message_descriptor", ".", "nested_type", ")", "for", "index", ",", "nested_message_desc", "in", "nested_types", ":", "generate_message_doc", "(", "nested_message_desc", ",", "locations", ",", "path", "+", "(", "3", ",", "index", ")", ",", "name_prefix", "=", "prefixed_name", "+", "'.'", ")", "# Generate nested enums", "for", "index", ",", "nested_enum_desc", "in", "enumerate", "(", "message_descriptor", ".", "enum_type", ")", ":", "generate_enum_doc", "(", "nested_enum_desc", ",", "locations", ",", "path", "+", "(", "4", ",", "index", ")", ",", "name_prefix", "=", "prefixed_name", "+", "'.'", ")" ]
Generate docs for message and nested messages and enums. Args: message_descriptor: descriptor_pb2.DescriptorProto instance for message to generate docs for. locations: Dictionary of location paths tuples to descriptor_pb2.SourceCodeInfo.Location instances. path: Path tuple to the message definition. name_prefix: Optional prefix for this message's name.
[ "Generate", "docs", "for", "message", "and", "nested", "messages", "and", "enums", "." ]
python
valid
41.847826
oceanprotocol/squid-py
squid_py/ocean/ocean_agreements.py
https://github.com/oceanprotocol/squid-py/blob/43a5b7431627e4c9ab7382ed9eb8153e96ed4483/squid_py/ocean/ocean_agreements.py#L273-L296
def is_access_granted(self, agreement_id, did, consumer_address): """ Check permission for the agreement. Verify on-chain that the `consumer_address` has permission to access the given asset `did` according to the `agreement_id`. :param agreement_id: id of the agreement, hex str :param did: DID, str :param consumer_address: ethereum account address of consumer, hex str :return: bool True if user has permission """ agreement_consumer = self._keeper.escrow_access_secretstore_template.get_agreement_consumer( agreement_id) if agreement_consumer != consumer_address: logger.warning(f'Invalid consumer address {consumer_address} and/or ' f'service agreement id {agreement_id} (did {did})' f', agreement consumer is {agreement_consumer}') return False document_id = did_to_id(did) return self._keeper.access_secret_store_condition.check_permissions( document_id, consumer_address )
[ "def", "is_access_granted", "(", "self", ",", "agreement_id", ",", "did", ",", "consumer_address", ")", ":", "agreement_consumer", "=", "self", ".", "_keeper", ".", "escrow_access_secretstore_template", ".", "get_agreement_consumer", "(", "agreement_id", ")", "if", "agreement_consumer", "!=", "consumer_address", ":", "logger", ".", "warning", "(", "f'Invalid consumer address {consumer_address} and/or '", "f'service agreement id {agreement_id} (did {did})'", "f', agreement consumer is {agreement_consumer}'", ")", "return", "False", "document_id", "=", "did_to_id", "(", "did", ")", "return", "self", ".", "_keeper", ".", "access_secret_store_condition", ".", "check_permissions", "(", "document_id", ",", "consumer_address", ")" ]
Check permission for the agreement. Verify on-chain that the `consumer_address` has permission to access the given asset `did` according to the `agreement_id`. :param agreement_id: id of the agreement, hex str :param did: DID, str :param consumer_address: ethereum account address of consumer, hex str :return: bool True if user has permission
[ "Check", "permission", "for", "the", "agreement", "." ]
python
train
44.75
sirfoga/pyhal
hal/help.py
https://github.com/sirfoga/pyhal/blob/4394d8a1f7e45bea28a255ec390f4962ee64d33a/hal/help.py#L76-L84
def as_sql(self): """Gets report as json :return: json-formatted report """ labels, data = self._get_table() table = SqlTable(labels, data, "{:.3f}", "\n") return str(table)
[ "def", "as_sql", "(", "self", ")", ":", "labels", ",", "data", "=", "self", ".", "_get_table", "(", ")", "table", "=", "SqlTable", "(", "labels", ",", "data", ",", "\"{:.3f}\"", ",", "\"\\n\"", ")", "return", "str", "(", "table", ")" ]
Gets report as json :return: json-formatted report
[ "Gets", "report", "as", "json" ]
python
train
23.888889
jpablo128/simplystatic
simplystatic/s2site.py
https://github.com/jpablo128/simplystatic/blob/91ac579c8f34fa240bef9b87adb0116c6b40b24d/simplystatic/s2site.py#L79-L99
def discover_base_dir(start_dir): '''Return start_dir or the parent dir that has the s2 marker. Starting from the specified directory, and going up the parent chain, check each directory to see if it's a base_dir (contains the "marker" directory *s2*) and return it. Otherwise, return the start_dir. ''' if is_base_dir(start_dir): return start_dir pcl = start_dir.split('/') #path component list found_base_dir = None for i in range(1, len(pcl)+1): d2c = '/'.join(pcl[:-i]) if (d2c == ''): d2c = '/' if is_base_dir(d2c): found_base_dir = d2c break return found_base_dir
[ "def", "discover_base_dir", "(", "start_dir", ")", ":", "if", "is_base_dir", "(", "start_dir", ")", ":", "return", "start_dir", "pcl", "=", "start_dir", ".", "split", "(", "'/'", ")", "#path component list", "found_base_dir", "=", "None", "for", "i", "in", "range", "(", "1", ",", "len", "(", "pcl", ")", "+", "1", ")", ":", "d2c", "=", "'/'", ".", "join", "(", "pcl", "[", ":", "-", "i", "]", ")", "if", "(", "d2c", "==", "''", ")", ":", "d2c", "=", "'/'", "if", "is_base_dir", "(", "d2c", ")", ":", "found_base_dir", "=", "d2c", "break", "return", "found_base_dir" ]
Return start_dir or the parent dir that has the s2 marker. Starting from the specified directory, and going up the parent chain, check each directory to see if it's a base_dir (contains the "marker" directory *s2*) and return it. Otherwise, return the start_dir.
[ "Return", "start_dir", "or", "the", "parent", "dir", "that", "has", "the", "s2", "marker", "." ]
python
train
31.619048
DEIB-GECO/PyGMQL
gmql/managers.py
https://github.com/DEIB-GECO/PyGMQL/blob/e58b2f9402a86056dcda484a32e3de0bb06ed991/gmql/managers.py#L205-L233
def login(): """ Enables the user to login to the remote GMQL service. If both username and password are None, the user will be connected as guest. """ from .RemoteConnection.RemoteManager import RemoteManager global __remote_manager, __session_manager logger = logging.getLogger() remote_address = get_remote_address() res = __session_manager.get_session(remote_address) if res is None: # there is no session for this address, let's login as guest warnings.warn("There is no active session for address {}. Logging as Guest user".format(remote_address)) rm = RemoteManager(address=remote_address) rm.login() session_type = "guest" else: # there is a previous session for this address, let's do an auto login # using that access token logger.info("Logging using stored authentication token") rm = RemoteManager(address=remote_address, auth_token=res[1]) # if the access token is not valid anymore (therefore we are in guest mode) # the auto_login function will perform a guest login from scratch session_type = rm.auto_login(how=res[2]) # store the new session __remote_manager = rm access_time = int(time.time()) auth_token = rm.auth_token __session_manager.add_session(remote_address, auth_token, access_time, session_type)
[ "def", "login", "(", ")", ":", "from", ".", "RemoteConnection", ".", "RemoteManager", "import", "RemoteManager", "global", "__remote_manager", ",", "__session_manager", "logger", "=", "logging", ".", "getLogger", "(", ")", "remote_address", "=", "get_remote_address", "(", ")", "res", "=", "__session_manager", ".", "get_session", "(", "remote_address", ")", "if", "res", "is", "None", ":", "# there is no session for this address, let's login as guest", "warnings", ".", "warn", "(", "\"There is no active session for address {}. Logging as Guest user\"", ".", "format", "(", "remote_address", ")", ")", "rm", "=", "RemoteManager", "(", "address", "=", "remote_address", ")", "rm", ".", "login", "(", ")", "session_type", "=", "\"guest\"", "else", ":", "# there is a previous session for this address, let's do an auto login", "# using that access token", "logger", ".", "info", "(", "\"Logging using stored authentication token\"", ")", "rm", "=", "RemoteManager", "(", "address", "=", "remote_address", ",", "auth_token", "=", "res", "[", "1", "]", ")", "# if the access token is not valid anymore (therefore we are in guest mode)", "# the auto_login function will perform a guest login from scratch", "session_type", "=", "rm", ".", "auto_login", "(", "how", "=", "res", "[", "2", "]", ")", "# store the new session", "__remote_manager", "=", "rm", "access_time", "=", "int", "(", "time", ".", "time", "(", ")", ")", "auth_token", "=", "rm", ".", "auth_token", "__session_manager", ".", "add_session", "(", "remote_address", ",", "auth_token", ",", "access_time", ",", "session_type", ")" ]
Enables the user to login to the remote GMQL service. If both username and password are None, the user will be connected as guest.
[ "Enables", "the", "user", "to", "login", "to", "the", "remote", "GMQL", "service", ".", "If", "both", "username", "and", "password", "are", "None", "the", "user", "will", "be", "connected", "as", "guest", "." ]
python
train
46.655172
jssimporter/python-jss
jss/jamf_software_server.py
https://github.com/jssimporter/python-jss/blob/b95185d74e0c0531b0b563f280d4129e21d5fe5d/jss/jamf_software_server.py#L988-L997
def _build_jss_object_list(self, response, obj_class): """Build a JSSListData object from response.""" response_objects = [item for item in response if item is not None and item.tag != "size"] objects = [ JSSListData(obj_class, {i.tag: i.text for i in response_object}, self) for response_object in response_objects] return JSSObjectList(self, obj_class, objects)
[ "def", "_build_jss_object_list", "(", "self", ",", "response", ",", "obj_class", ")", ":", "response_objects", "=", "[", "item", "for", "item", "in", "response", "if", "item", "is", "not", "None", "and", "item", ".", "tag", "!=", "\"size\"", "]", "objects", "=", "[", "JSSListData", "(", "obj_class", ",", "{", "i", ".", "tag", ":", "i", ".", "text", "for", "i", "in", "response_object", "}", ",", "self", ")", "for", "response_object", "in", "response_objects", "]", "return", "JSSObjectList", "(", "self", ",", "obj_class", ",", "objects", ")" ]
Build a JSSListData object from response.
[ "Build", "a", "JSSListData", "object", "from", "response", "." ]
python
train
47.9
gawel/irc3
irc3/base.py
https://github.com/gawel/irc3/blob/cd27840a5809a1f803dc620860fe75d83d2a2ec8/irc3/base.py#L173-L193
def detach_events(self, *events): """Detach one or more events from the bot instance""" reg = self.registry delete = defaultdict(list) # remove from self.events all_events = reg.events for e in events: regexp = getattr(e.regexp, 're', e.regexp) iotype = e.iotype if e in all_events[iotype].get(regexp, []): all_events[iotype][regexp].remove(e) if not all_events[iotype][regexp]: del all_events[iotype][regexp] # need to delete from self.events_re delete[iotype].append(regexp) # delete from events_re for iotype, regexps in delete.items(): reg.events_re[iotype] = [r for r in reg.events_re[iotype] if r[0] not in regexps]
[ "def", "detach_events", "(", "self", ",", "*", "events", ")", ":", "reg", "=", "self", ".", "registry", "delete", "=", "defaultdict", "(", "list", ")", "# remove from self.events", "all_events", "=", "reg", ".", "events", "for", "e", "in", "events", ":", "regexp", "=", "getattr", "(", "e", ".", "regexp", ",", "'re'", ",", "e", ".", "regexp", ")", "iotype", "=", "e", ".", "iotype", "if", "e", "in", "all_events", "[", "iotype", "]", ".", "get", "(", "regexp", ",", "[", "]", ")", ":", "all_events", "[", "iotype", "]", "[", "regexp", "]", ".", "remove", "(", "e", ")", "if", "not", "all_events", "[", "iotype", "]", "[", "regexp", "]", ":", "del", "all_events", "[", "iotype", "]", "[", "regexp", "]", "# need to delete from self.events_re", "delete", "[", "iotype", "]", ".", "append", "(", "regexp", ")", "# delete from events_re", "for", "iotype", ",", "regexps", "in", "delete", ".", "items", "(", ")", ":", "reg", ".", "events_re", "[", "iotype", "]", "=", "[", "r", "for", "r", "in", "reg", ".", "events_re", "[", "iotype", "]", "if", "r", "[", "0", "]", "not", "in", "regexps", "]" ]
Detach one or more events from the bot instance
[ "Detach", "one", "or", "more", "events", "from", "the", "bot", "instance" ]
python
train
40.190476
Qiskit/qiskit-terra
qiskit/transpiler/passes/merge_adjacent_barriers.py
https://github.com/Qiskit/qiskit-terra/blob/d4f58d903bc96341b816f7c35df936d6421267d1/qiskit/transpiler/passes/merge_adjacent_barriers.py#L76-L143
def _collect_potential_merges(dag, barriers): """ Returns a dict of DAGNode : Barrier objects, where the barrier needs to be inserted where the corresponding DAGNode appears in the main DAG """ # if only got 1 or 0 barriers then can't merge if len(barriers) < 2: return None # mapping from the node that will be the main barrier to the # barrier object that gets built up node_to_barrier_qubits = {} # Start from the first barrier current_barrier = barriers[0] end_of_barrier = current_barrier current_barrier_nodes = [current_barrier] current_qubits = set(current_barrier.qargs) current_ancestors = dag.ancestors(current_barrier) current_descendants = dag.descendants(current_barrier) barrier_to_add = Barrier(len(current_qubits)) for next_barrier in barriers[1:]: # Remove all barriers that have already been included in this new barrier from the set # of ancestors/descendants as they will be removed from the new DAG when it is created next_ancestors = {nd for nd in dag.ancestors(next_barrier) if nd not in current_barrier_nodes} next_descendants = {nd for nd in dag.descendants(next_barrier) if nd not in current_barrier_nodes} next_qubits = set(next_barrier.qargs) if ( not current_qubits.isdisjoint(next_qubits) and current_ancestors.isdisjoint(next_descendants) and current_descendants.isdisjoint(next_ancestors) ): # can be merged current_ancestors = current_ancestors | next_ancestors current_descendants = current_descendants | next_descendants current_qubits = current_qubits | next_qubits # update the barrier that will be added back to include this barrier barrier_to_add = Barrier(len(current_qubits)) else: # store the previously made barrier if barrier_to_add: node_to_barrier_qubits[end_of_barrier] = current_qubits # reset the properties current_qubits = set(next_barrier.qargs) current_ancestors = dag.ancestors(next_barrier) current_descendants = dag.descendants(next_barrier) barrier_to_add = Barrier(len(current_qubits)) current_barrier_nodes = [] end_of_barrier = next_barrier current_barrier_nodes.append(end_of_barrier) if barrier_to_add: node_to_barrier_qubits[end_of_barrier] = current_qubits return node_to_barrier_qubits
[ "def", "_collect_potential_merges", "(", "dag", ",", "barriers", ")", ":", "# if only got 1 or 0 barriers then can't merge", "if", "len", "(", "barriers", ")", "<", "2", ":", "return", "None", "# mapping from the node that will be the main barrier to the", "# barrier object that gets built up", "node_to_barrier_qubits", "=", "{", "}", "# Start from the first barrier", "current_barrier", "=", "barriers", "[", "0", "]", "end_of_barrier", "=", "current_barrier", "current_barrier_nodes", "=", "[", "current_barrier", "]", "current_qubits", "=", "set", "(", "current_barrier", ".", "qargs", ")", "current_ancestors", "=", "dag", ".", "ancestors", "(", "current_barrier", ")", "current_descendants", "=", "dag", ".", "descendants", "(", "current_barrier", ")", "barrier_to_add", "=", "Barrier", "(", "len", "(", "current_qubits", ")", ")", "for", "next_barrier", "in", "barriers", "[", "1", ":", "]", ":", "# Remove all barriers that have already been included in this new barrier from the set", "# of ancestors/descendants as they will be removed from the new DAG when it is created", "next_ancestors", "=", "{", "nd", "for", "nd", "in", "dag", ".", "ancestors", "(", "next_barrier", ")", "if", "nd", "not", "in", "current_barrier_nodes", "}", "next_descendants", "=", "{", "nd", "for", "nd", "in", "dag", ".", "descendants", "(", "next_barrier", ")", "if", "nd", "not", "in", "current_barrier_nodes", "}", "next_qubits", "=", "set", "(", "next_barrier", ".", "qargs", ")", "if", "(", "not", "current_qubits", ".", "isdisjoint", "(", "next_qubits", ")", "and", "current_ancestors", ".", "isdisjoint", "(", "next_descendants", ")", "and", "current_descendants", ".", "isdisjoint", "(", "next_ancestors", ")", ")", ":", "# can be merged", "current_ancestors", "=", "current_ancestors", "|", "next_ancestors", "current_descendants", "=", "current_descendants", "|", "next_descendants", "current_qubits", "=", "current_qubits", "|", "next_qubits", "# update the barrier that will be added back to include this barrier", "barrier_to_add", "=", "Barrier", "(", "len", "(", "current_qubits", ")", ")", "else", ":", "# store the previously made barrier", "if", "barrier_to_add", ":", "node_to_barrier_qubits", "[", "end_of_barrier", "]", "=", "current_qubits", "# reset the properties", "current_qubits", "=", "set", "(", "next_barrier", ".", "qargs", ")", "current_ancestors", "=", "dag", ".", "ancestors", "(", "next_barrier", ")", "current_descendants", "=", "dag", ".", "descendants", "(", "next_barrier", ")", "barrier_to_add", "=", "Barrier", "(", "len", "(", "current_qubits", ")", ")", "current_barrier_nodes", "=", "[", "]", "end_of_barrier", "=", "next_barrier", "current_barrier_nodes", ".", "append", "(", "end_of_barrier", ")", "if", "barrier_to_add", ":", "node_to_barrier_qubits", "[", "end_of_barrier", "]", "=", "current_qubits", "return", "node_to_barrier_qubits" ]
Returns a dict of DAGNode : Barrier objects, where the barrier needs to be inserted where the corresponding DAGNode appears in the main DAG
[ "Returns", "a", "dict", "of", "DAGNode", ":", "Barrier", "objects", "where", "the", "barrier", "needs", "to", "be", "inserted", "where", "the", "corresponding", "DAGNode", "appears", "in", "the", "main", "DAG" ]
python
test
40.720588
DLR-RM/RAFCON
source/rafcon/gui/models/container_state.py
https://github.com/DLR-RM/RAFCON/blob/24942ef1a904531f49ab8830a1dbb604441be498/source/rafcon/gui/models/container_state.py#L371-L380
def get_scoped_variable_m(self, data_port_id): """Returns the scoped variable model for the given data port id :param data_port_id: The data port id to search for :return: The model of the scoped variable with the given id """ for scoped_variable_m in self.scoped_variables: if scoped_variable_m.scoped_variable.data_port_id == data_port_id: return scoped_variable_m return None
[ "def", "get_scoped_variable_m", "(", "self", ",", "data_port_id", ")", ":", "for", "scoped_variable_m", "in", "self", ".", "scoped_variables", ":", "if", "scoped_variable_m", ".", "scoped_variable", ".", "data_port_id", "==", "data_port_id", ":", "return", "scoped_variable_m", "return", "None" ]
Returns the scoped variable model for the given data port id :param data_port_id: The data port id to search for :return: The model of the scoped variable with the given id
[ "Returns", "the", "scoped", "variable", "model", "for", "the", "given", "data", "port", "id" ]
python
train
44.6
vahtras/findifftool
findifftool/core.py
https://github.com/vahtras/findifftool/blob/d1b36cc852acc2594c95a4bf7a786d68369802b3/findifftool/core.py#L94-L123
def ndhess(f, delta=DELTA): """ Returns numerical hessian function of given input function Input: f, scalar function of an numpy array object delta(optional), finite difference step Output: hessian function object """ def hess_f(*args, **kwargs): x = args[0] hess_val = numpy.zeros(x.shape + x.shape) it = numpy.nditer(x, op_flags=['readwrite'], flags=['multi_index']) for xi in it: i = it.multi_index jt = numpy.nditer(x, op_flags=['readwrite'], flags=['multi_index']) for xj in jt: j = jt.multi_index xi += delta/2 xj += delta/2 fpp = f(x) xj -= delta fpm = f(x) xi -= delta fmm = f(x) xj += delta fmp = f(x) xi += delta/2 xj -= delta/2 hess_val[i + j] = (fpp + fmm - fpm - fmp)/delta**2 return hess_val return hess_f
[ "def", "ndhess", "(", "f", ",", "delta", "=", "DELTA", ")", ":", "def", "hess_f", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "x", "=", "args", "[", "0", "]", "hess_val", "=", "numpy", ".", "zeros", "(", "x", ".", "shape", "+", "x", ".", "shape", ")", "it", "=", "numpy", ".", "nditer", "(", "x", ",", "op_flags", "=", "[", "'readwrite'", "]", ",", "flags", "=", "[", "'multi_index'", "]", ")", "for", "xi", "in", "it", ":", "i", "=", "it", ".", "multi_index", "jt", "=", "numpy", ".", "nditer", "(", "x", ",", "op_flags", "=", "[", "'readwrite'", "]", ",", "flags", "=", "[", "'multi_index'", "]", ")", "for", "xj", "in", "jt", ":", "j", "=", "jt", ".", "multi_index", "xi", "+=", "delta", "/", "2", "xj", "+=", "delta", "/", "2", "fpp", "=", "f", "(", "x", ")", "xj", "-=", "delta", "fpm", "=", "f", "(", "x", ")", "xi", "-=", "delta", "fmm", "=", "f", "(", "x", ")", "xj", "+=", "delta", "fmp", "=", "f", "(", "x", ")", "xi", "+=", "delta", "/", "2", "xj", "-=", "delta", "/", "2", "hess_val", "[", "i", "+", "j", "]", "=", "(", "fpp", "+", "fmm", "-", "fpm", "-", "fmp", ")", "/", "delta", "**", "2", "return", "hess_val", "return", "hess_f" ]
Returns numerical hessian function of given input function Input: f, scalar function of an numpy array object delta(optional), finite difference step Output: hessian function object
[ "Returns", "numerical", "hessian", "function", "of", "given", "input", "function", "Input", ":", "f", "scalar", "function", "of", "an", "numpy", "array", "object", "delta", "(", "optional", ")", "finite", "difference", "step", "Output", ":", "hessian", "function", "object" ]
python
test
33.766667
cloud9ers/gurumate
environment/lib/python2.7/site-packages/IPython/utils/traitlets.py
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/utils/traitlets.py#L120-L140
def parse_notifier_name(name): """Convert the name argument to a list of names. Examples -------- >>> parse_notifier_name('a') ['a'] >>> parse_notifier_name(['a','b']) ['a', 'b'] >>> parse_notifier_name(None) ['anytrait'] """ if isinstance(name, str): return [name] elif name is None: return ['anytrait'] elif isinstance(name, (list, tuple)): for n in name: assert isinstance(n, str), "names must be strings" return name
[ "def", "parse_notifier_name", "(", "name", ")", ":", "if", "isinstance", "(", "name", ",", "str", ")", ":", "return", "[", "name", "]", "elif", "name", "is", "None", ":", "return", "[", "'anytrait'", "]", "elif", "isinstance", "(", "name", ",", "(", "list", ",", "tuple", ")", ")", ":", "for", "n", "in", "name", ":", "assert", "isinstance", "(", "n", ",", "str", ")", ",", "\"names must be strings\"", "return", "name" ]
Convert the name argument to a list of names. Examples -------- >>> parse_notifier_name('a') ['a'] >>> parse_notifier_name(['a','b']) ['a', 'b'] >>> parse_notifier_name(None) ['anytrait']
[ "Convert", "the", "name", "argument", "to", "a", "list", "of", "names", "." ]
python
test
23.714286
dh1tw/pyhamtools
pyhamtools/locator.py
https://github.com/dh1tw/pyhamtools/blob/ee7e4b8732e23c298da10e07163748156c16d0fa/pyhamtools/locator.py#L239-L266
def calculate_heading_longpath(locator1, locator2): """calculates the heading from the first to the second locator (long path) Args: locator1 (string): Locator, either 4 or 6 characters locator2 (string): Locator, either 4 or 6 characters Returns: float: Long path heading in deg Raises: ValueError: When called with wrong or invalid input arg AttributeError: When args are not a string Example: The following calculates the long path heading from locator1 to locator2 >>> from pyhamtools.locator import calculate_heading_longpath >>> calculate_heading_longpath("JN48QM", "QF67bf") 254.3136 """ heading = calculate_heading(locator1, locator2) lp = (heading + 180)%360 return lp
[ "def", "calculate_heading_longpath", "(", "locator1", ",", "locator2", ")", ":", "heading", "=", "calculate_heading", "(", "locator1", ",", "locator2", ")", "lp", "=", "(", "heading", "+", "180", ")", "%", "360", "return", "lp" ]
calculates the heading from the first to the second locator (long path) Args: locator1 (string): Locator, either 4 or 6 characters locator2 (string): Locator, either 4 or 6 characters Returns: float: Long path heading in deg Raises: ValueError: When called with wrong or invalid input arg AttributeError: When args are not a string Example: The following calculates the long path heading from locator1 to locator2 >>> from pyhamtools.locator import calculate_heading_longpath >>> calculate_heading_longpath("JN48QM", "QF67bf") 254.3136
[ "calculates", "the", "heading", "from", "the", "first", "to", "the", "second", "locator", "(", "long", "path", ")" ]
python
train
29.107143
f3at/feat
src/feat/models/getter.py
https://github.com/f3at/feat/blob/15da93fc9d6ec8154f52a9172824e25821195ef8/src/feat/models/getter.py#L250-L262
def value_attr(attr_name): """ Creates a getter that will retrieve value's attribute with specified name. @param attr_name: the name of an attribute belonging to the value. @type attr_name: str """ def value_attr(value, context, **_params): value = getattr(value, attr_name) return _attr(value) return value_attr
[ "def", "value_attr", "(", "attr_name", ")", ":", "def", "value_attr", "(", "value", ",", "context", ",", "*", "*", "_params", ")", ":", "value", "=", "getattr", "(", "value", ",", "attr_name", ")", "return", "_attr", "(", "value", ")", "return", "value_attr" ]
Creates a getter that will retrieve value's attribute with specified name. @param attr_name: the name of an attribute belonging to the value. @type attr_name: str
[ "Creates", "a", "getter", "that", "will", "retrieve", "value", "s", "attribute", "with", "specified", "name", "." ]
python
train
26.923077
ihgazni2/elist
elist/elist.py
https://github.com/ihgazni2/elist/blob/8c07b5029bda34ead60ce10335ceb145f209263c/elist/elist.py#L3946-L3984
def uniqualize(l,**kwargs): ''' from elist.elist import * l = [1, 2, 2] new = uniqualize(l) new id(l) id(new) #### l = [1, 2, 2] rslt = uniqualize(l,mode="original") rslt id(l) id(rslt) ''' if('mode' in kwargs): mode = kwargs['mode'] else: mode = 'new' pt = copy.deepcopy(l) seqs =[] freq = {} for i in range(0,pt.__len__()): v = pt[i] if(v in freq): freq[v] = freq[v] + 1 else: freq[v] = 0 seqs.append(i) #####下面是影响速度的关键,append特别耗时 npt = select_seqs(pt,seqs) ######################## pt = npt if(mode == 'new'): return(npt) else: l.clear() l.extend(npt) return(l)
[ "def", "uniqualize", "(", "l", ",", "*", "*", "kwargs", ")", ":", "if", "(", "'mode'", "in", "kwargs", ")", ":", "mode", "=", "kwargs", "[", "'mode'", "]", "else", ":", "mode", "=", "'new'", "pt", "=", "copy", ".", "deepcopy", "(", "l", ")", "seqs", "=", "[", "]", "freq", "=", "{", "}", "for", "i", "in", "range", "(", "0", ",", "pt", ".", "__len__", "(", ")", ")", ":", "v", "=", "pt", "[", "i", "]", "if", "(", "v", "in", "freq", ")", ":", "freq", "[", "v", "]", "=", "freq", "[", "v", "]", "+", "1", "else", ":", "freq", "[", "v", "]", "=", "0", "seqs", ".", "append", "(", "i", ")", "#####下面是影响速度的关键,append特别耗时", "npt", "=", "select_seqs", "(", "pt", ",", "seqs", ")", "########################", "pt", "=", "npt", "if", "(", "mode", "==", "'new'", ")", ":", "return", "(", "npt", ")", "else", ":", "l", ".", "clear", "(", ")", "l", ".", "extend", "(", "npt", ")", "return", "(", "l", ")" ]
from elist.elist import * l = [1, 2, 2] new = uniqualize(l) new id(l) id(new) #### l = [1, 2, 2] rslt = uniqualize(l,mode="original") rslt id(l) id(rslt)
[ "from", "elist", ".", "elist", "import", "*", "l", "=", "[", "1", "2", "2", "]", "new", "=", "uniqualize", "(", "l", ")", "new", "id", "(", "l", ")", "id", "(", "new", ")", "####", "l", "=", "[", "1", "2", "2", "]", "rslt", "=", "uniqualize", "(", "l", "mode", "=", "original", ")", "rslt", "id", "(", "l", ")", "id", "(", "rslt", ")" ]
python
valid
20.153846
project-ncl/pnc-cli
pnc_cli/buildrecords.py
https://github.com/project-ncl/pnc-cli/blob/3dc149bf84928f60a8044ac50b58bbaddd451902/pnc_cli/buildrecords.py#L13-L19
def list_build_records(page_size=200, page_index=0, sort="", q=""): """ List all BuildRecords """ data = list_build_records_raw(page_size, page_index, sort, q) if data: return utils.format_json_list(data)
[ "def", "list_build_records", "(", "page_size", "=", "200", ",", "page_index", "=", "0", ",", "sort", "=", "\"\"", ",", "q", "=", "\"\"", ")", ":", "data", "=", "list_build_records_raw", "(", "page_size", ",", "page_index", ",", "sort", ",", "q", ")", "if", "data", ":", "return", "utils", ".", "format_json_list", "(", "data", ")" ]
List all BuildRecords
[ "List", "all", "BuildRecords" ]
python
train
32.285714
jhermann/pygments-markdown-lexer
src/pygments_markdown_lexer/__init__.py
https://github.com/jhermann/pygments-markdown-lexer/blob/e651a9a3f664285b01451eb39232b1ad9af65956/src/pygments_markdown_lexer/__init__.py#L34-L43
def setup(app): """ Initializer for Sphinx extension API. See http://www.sphinx-doc.org/en/stable/extdev/index.html#dev-extensions. """ lexer = MarkdownLexer() for alias in lexer.aliases: app.add_lexer(alias, lexer) return dict(version=__version__)
[ "def", "setup", "(", "app", ")", ":", "lexer", "=", "MarkdownLexer", "(", ")", "for", "alias", "in", "lexer", ".", "aliases", ":", "app", ".", "add_lexer", "(", "alias", ",", "lexer", ")", "return", "dict", "(", "version", "=", "__version__", ")" ]
Initializer for Sphinx extension API. See http://www.sphinx-doc.org/en/stable/extdev/index.html#dev-extensions.
[ "Initializer", "for", "Sphinx", "extension", "API", "." ]
python
valid
27.7
espressif/esptool
ecdsa/rfc6979.py
https://github.com/espressif/esptool/blob/c583756c118039cfcfe256f7a3285618914d16a5/ecdsa/rfc6979.py#L54-L103
def generate_k(order, secexp, hash_func, data): ''' order - order of the DSA generator used in the signature secexp - secure exponent (private key) in numeric form hash_func - reference to the same hash function used for generating hash data - hash in binary form of the signing data ''' qlen = bit_length(order) holen = hash_func().digest_size rolen = (qlen + 7) / 8 bx = number_to_string(secexp, order) + bits2octets(data, order) # Step B v = b('\x01') * holen # Step C k = b('\x00') * holen # Step D k = hmac.new(k, v+b('\x00')+bx, hash_func).digest() # Step E v = hmac.new(k, v, hash_func).digest() # Step F k = hmac.new(k, v+b('\x01')+bx, hash_func).digest() # Step G v = hmac.new(k, v, hash_func).digest() # Step H while True: # Step H1 t = b('') # Step H2 while len(t) < rolen: v = hmac.new(k, v, hash_func).digest() t += v # Step H3 secret = bits2int(t, qlen) if secret >= 1 and secret < order: return secret k = hmac.new(k, v+b('\x00'), hash_func).digest() v = hmac.new(k, v, hash_func).digest()
[ "def", "generate_k", "(", "order", ",", "secexp", ",", "hash_func", ",", "data", ")", ":", "qlen", "=", "bit_length", "(", "order", ")", "holen", "=", "hash_func", "(", ")", ".", "digest_size", "rolen", "=", "(", "qlen", "+", "7", ")", "/", "8", "bx", "=", "number_to_string", "(", "secexp", ",", "order", ")", "+", "bits2octets", "(", "data", ",", "order", ")", "# Step B", "v", "=", "b", "(", "'\\x01'", ")", "*", "holen", "# Step C", "k", "=", "b", "(", "'\\x00'", ")", "*", "holen", "# Step D", "k", "=", "hmac", ".", "new", "(", "k", ",", "v", "+", "b", "(", "'\\x00'", ")", "+", "bx", ",", "hash_func", ")", ".", "digest", "(", ")", "# Step E", "v", "=", "hmac", ".", "new", "(", "k", ",", "v", ",", "hash_func", ")", ".", "digest", "(", ")", "# Step F", "k", "=", "hmac", ".", "new", "(", "k", ",", "v", "+", "b", "(", "'\\x01'", ")", "+", "bx", ",", "hash_func", ")", ".", "digest", "(", ")", "# Step G", "v", "=", "hmac", ".", "new", "(", "k", ",", "v", ",", "hash_func", ")", ".", "digest", "(", ")", "# Step H", "while", "True", ":", "# Step H1", "t", "=", "b", "(", "''", ")", "# Step H2", "while", "len", "(", "t", ")", "<", "rolen", ":", "v", "=", "hmac", ".", "new", "(", "k", ",", "v", ",", "hash_func", ")", ".", "digest", "(", ")", "t", "+=", "v", "# Step H3", "secret", "=", "bits2int", "(", "t", ",", "qlen", ")", "if", "secret", ">=", "1", "and", "secret", "<", "order", ":", "return", "secret", "k", "=", "hmac", ".", "new", "(", "k", ",", "v", "+", "b", "(", "'\\x00'", ")", ",", "hash_func", ")", ".", "digest", "(", ")", "v", "=", "hmac", ".", "new", "(", "k", ",", "v", ",", "hash_func", ")", ".", "digest", "(", ")" ]
order - order of the DSA generator used in the signature secexp - secure exponent (private key) in numeric form hash_func - reference to the same hash function used for generating hash data - hash in binary form of the signing data
[ "order", "-", "order", "of", "the", "DSA", "generator", "used", "in", "the", "signature", "secexp", "-", "secure", "exponent", "(", "private", "key", ")", "in", "numeric", "form", "hash_func", "-", "reference", "to", "the", "same", "hash", "function", "used", "for", "generating", "hash", "data", "-", "hash", "in", "binary", "form", "of", "the", "signing", "data" ]
python
train
23.76
veeti/decent
decent/error.py
https://github.com/veeti/decent/blob/07b11536953b9cf4402c65f241706ab717b90bff/decent/error.py#L28-L37
def as_dict(self, join='.'): """ Returns the error as a path to message dictionary. Paths are joined with the ``join`` string. """ if self.path: path = [str(node) for node in self.path] else: path = '' return { join.join(path): self.message }
[ "def", "as_dict", "(", "self", ",", "join", "=", "'.'", ")", ":", "if", "self", ".", "path", ":", "path", "=", "[", "str", "(", "node", ")", "for", "node", "in", "self", ".", "path", "]", "else", ":", "path", "=", "''", "return", "{", "join", ".", "join", "(", "path", ")", ":", "self", ".", "message", "}" ]
Returns the error as a path to message dictionary. Paths are joined with the ``join`` string.
[ "Returns", "the", "error", "as", "a", "path", "to", "message", "dictionary", ".", "Paths", "are", "joined", "with", "the", "join", "string", "." ]
python
train
31.3
slightlynybbled/tk_tools
tk_tools/groups.py
https://github.com/slightlynybbled/tk_tools/blob/7c1792cad42890251a34f0617ce9b4b3e7abcf50/tk_tools/groups.py#L148-L198
def add_row(self, data: list=None): """ Add a row of data to the current widget, add a <Tab> \ binding to the last element of the last row, and set \ the focus at the beginning of the next row. :param data: a row of data :return: None """ # validation if self.headers and data: if len(self.headers) != len(data): raise ValueError offset = 0 if not self.headers else 1 row = list() if data: for i, element in enumerate(data): contents = '' if element is None else str(element) entry = ttk.Entry(self) entry.insert(0, contents) entry.grid(row=len(self._rows) + offset, column=i, sticky='E,W') row.append(entry) else: for i in range(self.num_of_columns): entry = ttk.Entry(self) entry.grid(row=len(self._rows) + offset, column=i, sticky='E,W') row.append(entry) self._rows.append(row) # clear all bindings for row in self._rows: for widget in row: widget.unbind('<Tab>') def add(e): self.add_row() last_entry = self._rows[-1][-1] last_entry.bind('<Tab>', add) e = self._rows[-1][0] e.focus_set() self._redraw()
[ "def", "add_row", "(", "self", ",", "data", ":", "list", "=", "None", ")", ":", "# validation", "if", "self", ".", "headers", "and", "data", ":", "if", "len", "(", "self", ".", "headers", ")", "!=", "len", "(", "data", ")", ":", "raise", "ValueError", "offset", "=", "0", "if", "not", "self", ".", "headers", "else", "1", "row", "=", "list", "(", ")", "if", "data", ":", "for", "i", ",", "element", "in", "enumerate", "(", "data", ")", ":", "contents", "=", "''", "if", "element", "is", "None", "else", "str", "(", "element", ")", "entry", "=", "ttk", ".", "Entry", "(", "self", ")", "entry", ".", "insert", "(", "0", ",", "contents", ")", "entry", ".", "grid", "(", "row", "=", "len", "(", "self", ".", "_rows", ")", "+", "offset", ",", "column", "=", "i", ",", "sticky", "=", "'E,W'", ")", "row", ".", "append", "(", "entry", ")", "else", ":", "for", "i", "in", "range", "(", "self", ".", "num_of_columns", ")", ":", "entry", "=", "ttk", ".", "Entry", "(", "self", ")", "entry", ".", "grid", "(", "row", "=", "len", "(", "self", ".", "_rows", ")", "+", "offset", ",", "column", "=", "i", ",", "sticky", "=", "'E,W'", ")", "row", ".", "append", "(", "entry", ")", "self", ".", "_rows", ".", "append", "(", "row", ")", "# clear all bindings", "for", "row", "in", "self", ".", "_rows", ":", "for", "widget", "in", "row", ":", "widget", ".", "unbind", "(", "'<Tab>'", ")", "def", "add", "(", "e", ")", ":", "self", ".", "add_row", "(", ")", "last_entry", "=", "self", ".", "_rows", "[", "-", "1", "]", "[", "-", "1", "]", "last_entry", ".", "bind", "(", "'<Tab>'", ",", "add", ")", "e", "=", "self", ".", "_rows", "[", "-", "1", "]", "[", "0", "]", "e", ".", "focus_set", "(", ")", "self", ".", "_redraw", "(", ")" ]
Add a row of data to the current widget, add a <Tab> \ binding to the last element of the last row, and set \ the focus at the beginning of the next row. :param data: a row of data :return: None
[ "Add", "a", "row", "of", "data", "to", "the", "current", "widget", "add", "a", "<Tab", ">", "\\", "binding", "to", "the", "last", "element", "of", "the", "last", "row", "and", "set", "\\", "the", "focus", "at", "the", "beginning", "of", "the", "next", "row", "." ]
python
train
28.823529
serge-sans-paille/pythran
pythran/backend.py
https://github.com/serge-sans-paille/pythran/blob/7e1b5af2dddfabc50bd2a977f0178be269b349b5/pythran/backend.py#L688-L758
def visit_For(self, node): """ Create For representation for Cxx generation. Examples -------- >> for i in xrange(10): >> ... work ... Becomes >> typename returnable<decltype(__builtin__.xrange(10))>::type __iterX = __builtin__.xrange(10); >> ... possible container size reservation ... >> for (auto&& i: __iterX) >> ... the work ... This function also handle assignment for local variables. We can notice that three kind of loop are possible: - Normal for loop on iterator - Autofor loop. - Normal for loop using integer variable iteration Kind of loop used depend on OpenMP, yield use and variable scope. """ if not isinstance(node.target, ast.Name): raise PythranSyntaxError( "Using something other than an identifier as loop target", node.target) target = self.visit(node.target) # Handle the body of the for loop loop_body = Block([self.visit(stmt) for stmt in node.body]) # Declare local variables at the top of the loop body loop_body = self.process_locals(node, loop_body, node.target.id) iterable = self.visit(node.iter) if self.can_use_c_for(node): header, loop = self.gen_c_for(node, target, loop_body) else: if self.can_use_autofor(node): header = [] self.ldecls.remove(node.target.id) autofor = AutoFor(target, iterable, loop_body) loop = [self.process_omp_attachements(node, autofor)] else: # Iterator declaration local_iter = "__iter{0}".format(id(node)) local_iter_decl = self.types.builder.Assignable( self.types[node.iter]) self.handle_omp_for(node, local_iter) # Assign iterable # For C loop, it avoids issues # if the upper bound is assigned in the loop asgnt = self.make_assign(local_iter_decl, local_iter, iterable) header = [Statement(asgnt)] loop = self.gen_for(node, target, local_iter, local_iter_decl, loop_body) # For xxxComprehension, it is replaced by a for loop. In this case, # pre-allocate size of container. for comp in metadata.get(node, metadata.Comprehension): header.append(Statement("pythonic::utils::reserve({0},{1})".format( comp.target, iterable))) return Block(header + loop)
[ "def", "visit_For", "(", "self", ",", "node", ")", ":", "if", "not", "isinstance", "(", "node", ".", "target", ",", "ast", ".", "Name", ")", ":", "raise", "PythranSyntaxError", "(", "\"Using something other than an identifier as loop target\"", ",", "node", ".", "target", ")", "target", "=", "self", ".", "visit", "(", "node", ".", "target", ")", "# Handle the body of the for loop", "loop_body", "=", "Block", "(", "[", "self", ".", "visit", "(", "stmt", ")", "for", "stmt", "in", "node", ".", "body", "]", ")", "# Declare local variables at the top of the loop body", "loop_body", "=", "self", ".", "process_locals", "(", "node", ",", "loop_body", ",", "node", ".", "target", ".", "id", ")", "iterable", "=", "self", ".", "visit", "(", "node", ".", "iter", ")", "if", "self", ".", "can_use_c_for", "(", "node", ")", ":", "header", ",", "loop", "=", "self", ".", "gen_c_for", "(", "node", ",", "target", ",", "loop_body", ")", "else", ":", "if", "self", ".", "can_use_autofor", "(", "node", ")", ":", "header", "=", "[", "]", "self", ".", "ldecls", ".", "remove", "(", "node", ".", "target", ".", "id", ")", "autofor", "=", "AutoFor", "(", "target", ",", "iterable", ",", "loop_body", ")", "loop", "=", "[", "self", ".", "process_omp_attachements", "(", "node", ",", "autofor", ")", "]", "else", ":", "# Iterator declaration", "local_iter", "=", "\"__iter{0}\"", ".", "format", "(", "id", "(", "node", ")", ")", "local_iter_decl", "=", "self", ".", "types", ".", "builder", ".", "Assignable", "(", "self", ".", "types", "[", "node", ".", "iter", "]", ")", "self", ".", "handle_omp_for", "(", "node", ",", "local_iter", ")", "# Assign iterable", "# For C loop, it avoids issues", "# if the upper bound is assigned in the loop", "asgnt", "=", "self", ".", "make_assign", "(", "local_iter_decl", ",", "local_iter", ",", "iterable", ")", "header", "=", "[", "Statement", "(", "asgnt", ")", "]", "loop", "=", "self", ".", "gen_for", "(", "node", ",", "target", ",", "local_iter", ",", "local_iter_decl", ",", "loop_body", ")", "# For xxxComprehension, it is replaced by a for loop. In this case,", "# pre-allocate size of container.", "for", "comp", "in", "metadata", ".", "get", "(", "node", ",", "metadata", ".", "Comprehension", ")", ":", "header", ".", "append", "(", "Statement", "(", "\"pythonic::utils::reserve({0},{1})\"", ".", "format", "(", "comp", ".", "target", ",", "iterable", ")", ")", ")", "return", "Block", "(", "header", "+", "loop", ")" ]
Create For representation for Cxx generation. Examples -------- >> for i in xrange(10): >> ... work ... Becomes >> typename returnable<decltype(__builtin__.xrange(10))>::type __iterX = __builtin__.xrange(10); >> ... possible container size reservation ... >> for (auto&& i: __iterX) >> ... the work ... This function also handle assignment for local variables. We can notice that three kind of loop are possible: - Normal for loop on iterator - Autofor loop. - Normal for loop using integer variable iteration Kind of loop used depend on OpenMP, yield use and variable scope.
[ "Create", "For", "representation", "for", "Cxx", "generation", "." ]
python
train
37.070423
Cue/scales
src/greplin/scales/aggregation.py
https://github.com/Cue/scales/blob/0aced26eb050ceb98ee9d5d6cdca8db448666986/src/greplin/scales/aggregation.py#L255-L258
def result(self): """Formats the result.""" self.__result.sort(cmp = self.__cmp, key = self.__key, reverse = self.__reverse) return self.__result
[ "def", "result", "(", "self", ")", ":", "self", ".", "__result", ".", "sort", "(", "cmp", "=", "self", ".", "__cmp", ",", "key", "=", "self", ".", "__key", ",", "reverse", "=", "self", ".", "__reverse", ")", "return", "self", ".", "__result" ]
Formats the result.
[ "Formats", "the", "result", "." ]
python
train
38.5
geertj/gruvi
lib/gruvi/process.py
https://github.com/geertj/gruvi/blob/1d77ca439600b6ea7a19aa1ee85dca0f3be3f3f8/lib/gruvi/process.py#L309-L350
def communicate(self, input=None, timeout=-1): """Communicate with the child and return its output. If *input* is provided, it is sent to the client. Concurrent with sending the input, the child's standard output and standard error are read, until the child exits. The return value is a tuple ``(stdout_data, stderr_data)`` containing the data read from standard output and standard error. """ if self._process is None: raise RuntimeError('no child process') if timeout == -1: timeout = self._timeout output = [[], []] def writer(stream, data): offset = 0 while offset < len(data): buf = data[offset:offset+4096] stream.write(buf) offset += len(buf) stream.close() def reader(stream, data): while True: if self._encoding: buf = stream.read(4096) else: buf = stream.read1() if not buf: break data.append(buf) if self.stdin: fibers.spawn(writer, self.stdin, input or b'') if self.stdout: fibers.spawn(reader, self.stdout, output[0]) if self.stderr: fibers.spawn(reader, self.stderr, output[1]) self.wait(timeout) empty = '' if self._encoding else b'' stdout_data = empty.join(output[0]) stderr_data = empty.join(output[1]) return (stdout_data, stderr_data)
[ "def", "communicate", "(", "self", ",", "input", "=", "None", ",", "timeout", "=", "-", "1", ")", ":", "if", "self", ".", "_process", "is", "None", ":", "raise", "RuntimeError", "(", "'no child process'", ")", "if", "timeout", "==", "-", "1", ":", "timeout", "=", "self", ".", "_timeout", "output", "=", "[", "[", "]", ",", "[", "]", "]", "def", "writer", "(", "stream", ",", "data", ")", ":", "offset", "=", "0", "while", "offset", "<", "len", "(", "data", ")", ":", "buf", "=", "data", "[", "offset", ":", "offset", "+", "4096", "]", "stream", ".", "write", "(", "buf", ")", "offset", "+=", "len", "(", "buf", ")", "stream", ".", "close", "(", ")", "def", "reader", "(", "stream", ",", "data", ")", ":", "while", "True", ":", "if", "self", ".", "_encoding", ":", "buf", "=", "stream", ".", "read", "(", "4096", ")", "else", ":", "buf", "=", "stream", ".", "read1", "(", ")", "if", "not", "buf", ":", "break", "data", ".", "append", "(", "buf", ")", "if", "self", ".", "stdin", ":", "fibers", ".", "spawn", "(", "writer", ",", "self", ".", "stdin", ",", "input", "or", "b''", ")", "if", "self", ".", "stdout", ":", "fibers", ".", "spawn", "(", "reader", ",", "self", ".", "stdout", ",", "output", "[", "0", "]", ")", "if", "self", ".", "stderr", ":", "fibers", ".", "spawn", "(", "reader", ",", "self", ".", "stderr", ",", "output", "[", "1", "]", ")", "self", ".", "wait", "(", "timeout", ")", "empty", "=", "''", "if", "self", ".", "_encoding", "else", "b''", "stdout_data", "=", "empty", ".", "join", "(", "output", "[", "0", "]", ")", "stderr_data", "=", "empty", ".", "join", "(", "output", "[", "1", "]", ")", "return", "(", "stdout_data", ",", "stderr_data", ")" ]
Communicate with the child and return its output. If *input* is provided, it is sent to the client. Concurrent with sending the input, the child's standard output and standard error are read, until the child exits. The return value is a tuple ``(stdout_data, stderr_data)`` containing the data read from standard output and standard error.
[ "Communicate", "with", "the", "child", "and", "return", "its", "output", "." ]
python
train
37.02381
dhermes/bezier
src/bezier/surface.py
https://github.com/dhermes/bezier/blob/4f941f82637a8e70a5b159a9203132192e23406b/src/bezier/surface.py#L554-L598
def evaluate_cartesian(self, s, t, _verify=True): r"""Compute a point on the surface. Evaluates :math:`B\left(1 - s - t, s, t\right)` by calling :meth:`evaluate_barycentric`: This method acts as a (partial) inverse to :meth:`locate`. .. testsetup:: surface-cartesian import numpy as np import bezier .. doctest:: surface-cartesian :options: +NORMALIZE_WHITESPACE >>> nodes = np.asfortranarray([ ... [0.0, 0.5, 1.0 , 0.0, 0.5, 0.25], ... [0.0, 0.5, 0.625, 0.5, 0.5, 1.0 ], ... ]) >>> surface = bezier.Surface(nodes, degree=2) >>> point = surface.evaluate_cartesian(0.125, 0.375) >>> point array([[0.16015625], [0.44726562]]) >>> surface.evaluate_barycentric(0.5, 0.125, 0.375) array([[0.16015625], [0.44726562]]) Args: s (float): Parameter along the reference triangle. t (float): Parameter along the reference triangle. _verify (Optional[bool]): Indicates if the coordinates should be verified inside of the reference triangle. Defaults to :data:`True`. Returns: numpy.ndarray: The point on the surface (as a two dimensional NumPy array). """ if _verify: self._verify_cartesian(s, t) return _surface_helpers.evaluate_barycentric( self._nodes, self._degree, 1.0 - s - t, s, t )
[ "def", "evaluate_cartesian", "(", "self", ",", "s", ",", "t", ",", "_verify", "=", "True", ")", ":", "if", "_verify", ":", "self", ".", "_verify_cartesian", "(", "s", ",", "t", ")", "return", "_surface_helpers", ".", "evaluate_barycentric", "(", "self", ".", "_nodes", ",", "self", ".", "_degree", ",", "1.0", "-", "s", "-", "t", ",", "s", ",", "t", ")" ]
r"""Compute a point on the surface. Evaluates :math:`B\left(1 - s - t, s, t\right)` by calling :meth:`evaluate_barycentric`: This method acts as a (partial) inverse to :meth:`locate`. .. testsetup:: surface-cartesian import numpy as np import bezier .. doctest:: surface-cartesian :options: +NORMALIZE_WHITESPACE >>> nodes = np.asfortranarray([ ... [0.0, 0.5, 1.0 , 0.0, 0.5, 0.25], ... [0.0, 0.5, 0.625, 0.5, 0.5, 1.0 ], ... ]) >>> surface = bezier.Surface(nodes, degree=2) >>> point = surface.evaluate_cartesian(0.125, 0.375) >>> point array([[0.16015625], [0.44726562]]) >>> surface.evaluate_barycentric(0.5, 0.125, 0.375) array([[0.16015625], [0.44726562]]) Args: s (float): Parameter along the reference triangle. t (float): Parameter along the reference triangle. _verify (Optional[bool]): Indicates if the coordinates should be verified inside of the reference triangle. Defaults to :data:`True`. Returns: numpy.ndarray: The point on the surface (as a two dimensional NumPy array).
[ "r", "Compute", "a", "point", "on", "the", "surface", "." ]
python
train
34.222222
jwodder/doapi
doapi/ssh_key.py
https://github.com/jwodder/doapi/blob/b1306de86a01d8ae7b9c1fe2699765bb82e4f310/doapi/ssh_key.py#L58-L68
def fetch(self): """ Fetch & return a new `SSHKey` object representing the SSH key's current state :rtype: SSHKey :raises DOAPIError: if the API endpoint replies with an error (e.g., if the SSH key no longer exists) """ api = self.doapi_manager return api._ssh_key(api.request(self.url)["ssh_key"])
[ "def", "fetch", "(", "self", ")", ":", "api", "=", "self", ".", "doapi_manager", "return", "api", ".", "_ssh_key", "(", "api", ".", "request", "(", "self", ".", "url", ")", "[", "\"ssh_key\"", "]", ")" ]
Fetch & return a new `SSHKey` object representing the SSH key's current state :rtype: SSHKey :raises DOAPIError: if the API endpoint replies with an error (e.g., if the SSH key no longer exists)
[ "Fetch", "&", "return", "a", "new", "SSHKey", "object", "representing", "the", "SSH", "key", "s", "current", "state" ]
python
train
33.181818
lrq3000/pyFileFixity
pyFileFixity/lib/distance/distance/_simpledists.py
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/distance/distance/_simpledists.py#L27-L34
def jaccard(seq1, seq2): """Compute the Jaccard distance between the two sequences `seq1` and `seq2`. They should contain hashable items. The return value is a float between 0 and 1, where 0 means equal, and 1 totally different. """ set1, set2 = set(seq1), set(seq2) return 1 - len(set1 & set2) / float(len(set1 | set2))
[ "def", "jaccard", "(", "seq1", ",", "seq2", ")", ":", "set1", ",", "set2", "=", "set", "(", "seq1", ")", ",", "set", "(", "seq2", ")", "return", "1", "-", "len", "(", "set1", "&", "set2", ")", "/", "float", "(", "len", "(", "set1", "|", "set2", ")", ")" ]
Compute the Jaccard distance between the two sequences `seq1` and `seq2`. They should contain hashable items. The return value is a float between 0 and 1, where 0 means equal, and 1 totally different.
[ "Compute", "the", "Jaccard", "distance", "between", "the", "two", "sequences", "seq1", "and", "seq2", ".", "They", "should", "contain", "hashable", "items", ".", "The", "return", "value", "is", "a", "float", "between", "0", "and", "1", "where", "0", "means", "equal", "and", "1", "totally", "different", "." ]
python
train
40.125
openstack/pyghmi
pyghmi/ipmi/oem/lenovo/handler.py
https://github.com/openstack/pyghmi/blob/f710b1d30a8eed19a9e86f01f9351c737666f3e5/pyghmi/ipmi/oem/lenovo/handler.py#L359-L371
def has_tsm(self): """True if this particular server have a TSM based service processor """ if (self.oemid['manufacturer_id'] == 19046 and self.oemid['device_id'] == 32): try: self.ipmicmd.xraw_command(netfn=0x3a, command=0xf) except pygexc.IpmiException as ie: if ie.ipmicode == 193: return False raise return True return False
[ "def", "has_tsm", "(", "self", ")", ":", "if", "(", "self", ".", "oemid", "[", "'manufacturer_id'", "]", "==", "19046", "and", "self", ".", "oemid", "[", "'device_id'", "]", "==", "32", ")", ":", "try", ":", "self", ".", "ipmicmd", ".", "xraw_command", "(", "netfn", "=", "0x3a", ",", "command", "=", "0xf", ")", "except", "pygexc", ".", "IpmiException", "as", "ie", ":", "if", "ie", ".", "ipmicode", "==", "193", ":", "return", "False", "raise", "return", "True", "return", "False" ]
True if this particular server have a TSM based service processor
[ "True", "if", "this", "particular", "server", "have", "a", "TSM", "based", "service", "processor" ]
python
train
36
finklabs/metrics
metrics/metrics_utils.py
https://github.com/finklabs/metrics/blob/fd9974af498831664b9ae8e8f3834e1ec2e8a699/metrics/metrics_utils.py#L168-L224
def summary(processors, metrics, context): """Print the summary""" # display aggregated metric values on language level def display_header(processors, before='', after=''): """Display the header for the summary results.""" print(before, end=' ') for processor in processors: processor.display_header() print(after) def display_separator(processors, before='', after=''): """Display the header for the summary results.""" print(before, end=' ') for processor in processors: processor.display_separator() print(after) def display_metrics(processors, before='', after='', metrics=[]): """Display the header for the summary results.""" print(before, end=' ') for processor in processors: processor.display_metrics(metrics) print(after) summary = {} for m in metrics: lang = metrics[m]['language'] has_key = lang in summary if not has_key: summary[lang] = {'file_count': 0, 'language': lang} summary[lang]['file_count'] += 1 for i in metrics[m]: if i not in ['sloc', 'comments', 'mccabe']: # include metrics to be used continue if not has_key: summary[lang][i] = 0 summary[lang][i] += metrics[m][i] total = {'language': 'Total'} for m in summary: for i in summary[m]: if i == 'language': continue if i not in total: total[i] = 0 total[i] += summary[m][i] print('Metrics Summary:') display_header(processors, 'Files', '') display_separator(processors, '-'*5, '') for k in sorted(summary.keys(), key=str.lower): display_metrics(processors, '%5d' % summary[k]['file_count'], '', summary[k]) display_separator(processors, '-'*5, '') display_metrics(processors, '%5d' % total['file_count'], '', total)
[ "def", "summary", "(", "processors", ",", "metrics", ",", "context", ")", ":", "# display aggregated metric values on language level", "def", "display_header", "(", "processors", ",", "before", "=", "''", ",", "after", "=", "''", ")", ":", "\"\"\"Display the header for the summary results.\"\"\"", "print", "(", "before", ",", "end", "=", "' '", ")", "for", "processor", "in", "processors", ":", "processor", ".", "display_header", "(", ")", "print", "(", "after", ")", "def", "display_separator", "(", "processors", ",", "before", "=", "''", ",", "after", "=", "''", ")", ":", "\"\"\"Display the header for the summary results.\"\"\"", "print", "(", "before", ",", "end", "=", "' '", ")", "for", "processor", "in", "processors", ":", "processor", ".", "display_separator", "(", ")", "print", "(", "after", ")", "def", "display_metrics", "(", "processors", ",", "before", "=", "''", ",", "after", "=", "''", ",", "metrics", "=", "[", "]", ")", ":", "\"\"\"Display the header for the summary results.\"\"\"", "print", "(", "before", ",", "end", "=", "' '", ")", "for", "processor", "in", "processors", ":", "processor", ".", "display_metrics", "(", "metrics", ")", "print", "(", "after", ")", "summary", "=", "{", "}", "for", "m", "in", "metrics", ":", "lang", "=", "metrics", "[", "m", "]", "[", "'language'", "]", "has_key", "=", "lang", "in", "summary", "if", "not", "has_key", ":", "summary", "[", "lang", "]", "=", "{", "'file_count'", ":", "0", ",", "'language'", ":", "lang", "}", "summary", "[", "lang", "]", "[", "'file_count'", "]", "+=", "1", "for", "i", "in", "metrics", "[", "m", "]", ":", "if", "i", "not", "in", "[", "'sloc'", ",", "'comments'", ",", "'mccabe'", "]", ":", "# include metrics to be used", "continue", "if", "not", "has_key", ":", "summary", "[", "lang", "]", "[", "i", "]", "=", "0", "summary", "[", "lang", "]", "[", "i", "]", "+=", "metrics", "[", "m", "]", "[", "i", "]", "total", "=", "{", "'language'", ":", "'Total'", "}", "for", "m", "in", "summary", ":", "for", "i", "in", "summary", "[", "m", "]", ":", "if", "i", "==", "'language'", ":", "continue", "if", "i", "not", "in", "total", ":", "total", "[", "i", "]", "=", "0", "total", "[", "i", "]", "+=", "summary", "[", "m", "]", "[", "i", "]", "print", "(", "'Metrics Summary:'", ")", "display_header", "(", "processors", ",", "'Files'", ",", "''", ")", "display_separator", "(", "processors", ",", "'-'", "*", "5", ",", "''", ")", "for", "k", "in", "sorted", "(", "summary", ".", "keys", "(", ")", ",", "key", "=", "str", ".", "lower", ")", ":", "display_metrics", "(", "processors", ",", "'%5d'", "%", "summary", "[", "k", "]", "[", "'file_count'", "]", ",", "''", ",", "summary", "[", "k", "]", ")", "display_separator", "(", "processors", ",", "'-'", "*", "5", ",", "''", ")", "display_metrics", "(", "processors", ",", "'%5d'", "%", "total", "[", "'file_count'", "]", ",", "''", ",", "total", ")" ]
Print the summary
[ "Print", "the", "summary" ]
python
train
34.77193
dwavesystems/dimod
dimod/binary_quadratic_model.py
https://github.com/dwavesystems/dimod/blob/beff1b7f86b559d923ac653c1de6d593876d6d38/dimod/binary_quadratic_model.py#L1497-L1520
def energies(self, samples_like, dtype=np.float): """Determine the energies of the given samples. Args: samples_like (samples_like): A collection of raw samples. `samples_like` is an extension of NumPy's array_like structure. See :func:`.as_samples`. dtype (:class:`numpy.dtype`): The data type of the returned energies. Returns: :obj:`numpy.ndarray`: The energies. """ samples, labels = as_samples(samples_like) if all(v == idx for idx, v in enumerate(labels)): ldata, (irow, icol, qdata), offset = self.to_numpy_vectors(dtype=dtype) else: ldata, (irow, icol, qdata), offset = self.to_numpy_vectors(variable_order=labels, dtype=dtype) energies = samples.dot(ldata) + (samples[:, irow]*samples[:, icol]).dot(qdata) + offset return np.asarray(energies, dtype=dtype)
[ "def", "energies", "(", "self", ",", "samples_like", ",", "dtype", "=", "np", ".", "float", ")", ":", "samples", ",", "labels", "=", "as_samples", "(", "samples_like", ")", "if", "all", "(", "v", "==", "idx", "for", "idx", ",", "v", "in", "enumerate", "(", "labels", ")", ")", ":", "ldata", ",", "(", "irow", ",", "icol", ",", "qdata", ")", ",", "offset", "=", "self", ".", "to_numpy_vectors", "(", "dtype", "=", "dtype", ")", "else", ":", "ldata", ",", "(", "irow", ",", "icol", ",", "qdata", ")", ",", "offset", "=", "self", ".", "to_numpy_vectors", "(", "variable_order", "=", "labels", ",", "dtype", "=", "dtype", ")", "energies", "=", "samples", ".", "dot", "(", "ldata", ")", "+", "(", "samples", "[", ":", ",", "irow", "]", "*", "samples", "[", ":", ",", "icol", "]", ")", ".", "dot", "(", "qdata", ")", "+", "offset", "return", "np", ".", "asarray", "(", "energies", ",", "dtype", "=", "dtype", ")" ]
Determine the energies of the given samples. Args: samples_like (samples_like): A collection of raw samples. `samples_like` is an extension of NumPy's array_like structure. See :func:`.as_samples`. dtype (:class:`numpy.dtype`): The data type of the returned energies. Returns: :obj:`numpy.ndarray`: The energies.
[ "Determine", "the", "energies", "of", "the", "given", "samples", "." ]
python
train
38.625
belbio/bel
bel/nanopub/pubmed.py
https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/nanopub/pubmed.py#L302-L319
def get_pubmed_for_beleditor(pmid: str) -> Mapping[str, Any]: """Get fully annotated pubmed doc with Pubtator and full entity/annotation_types Args: pmid: Pubmed PMID Returns: Mapping[str, Any]: pubmed dictionary """ pubmed = get_pubmed(pmid) pubtator = get_pubtator(pmid) pubmed["annotations"] = copy.deepcopy(pubtator["annotations"]) # Add entity types and annotation types to annotations pubmed = enhance_pubmed_annotations(pubmed) return pubmed
[ "def", "get_pubmed_for_beleditor", "(", "pmid", ":", "str", ")", "->", "Mapping", "[", "str", ",", "Any", "]", ":", "pubmed", "=", "get_pubmed", "(", "pmid", ")", "pubtator", "=", "get_pubtator", "(", "pmid", ")", "pubmed", "[", "\"annotations\"", "]", "=", "copy", ".", "deepcopy", "(", "pubtator", "[", "\"annotations\"", "]", ")", "# Add entity types and annotation types to annotations", "pubmed", "=", "enhance_pubmed_annotations", "(", "pubmed", ")", "return", "pubmed" ]
Get fully annotated pubmed doc with Pubtator and full entity/annotation_types Args: pmid: Pubmed PMID Returns: Mapping[str, Any]: pubmed dictionary
[ "Get", "fully", "annotated", "pubmed", "doc", "with", "Pubtator", "and", "full", "entity", "/", "annotation_types" ]
python
train
27.333333
elkiwy/paynter
paynter/image.py
https://github.com/elkiwy/paynter/blob/f73cb5bb010a6b32ee41640a50396ed0bae8d496/paynter/image.py#L54-L62
def newLayer(self, effect=''): """ Creates a new :py:class:`Layer` and set that as the active. :param effect: A string with the blend mode for that layer that will be used when during the rendering process. The accepted values are: :code:`'soft_light','lighten','screen','dodge','addition','darken','multiply','hard_light','difference','subtract','grain_extract','grain_merge','divide','overlay'`. :rtype: Nothing. """ self.layers.append(Layer(effect = effect)) self.activeLayer = len(self.layers)-1
[ "def", "newLayer", "(", "self", ",", "effect", "=", "''", ")", ":", "self", ".", "layers", ".", "append", "(", "Layer", "(", "effect", "=", "effect", ")", ")", "self", ".", "activeLayer", "=", "len", "(", "self", ".", "layers", ")", "-", "1" ]
Creates a new :py:class:`Layer` and set that as the active. :param effect: A string with the blend mode for that layer that will be used when during the rendering process. The accepted values are: :code:`'soft_light','lighten','screen','dodge','addition','darken','multiply','hard_light','difference','subtract','grain_extract','grain_merge','divide','overlay'`. :rtype: Nothing.
[ "Creates", "a", "new", ":", "py", ":", "class", ":", "Layer", "and", "set", "that", "as", "the", "active", ".", ":", "param", "effect", ":", "A", "string", "with", "the", "blend", "mode", "for", "that", "layer", "that", "will", "be", "used", "when", "during", "the", "rendering", "process", ".", "The", "accepted", "values", "are", ":", ":", "code", ":", "soft_light", "lighten", "screen", "dodge", "addition", "darken", "multiply", "hard_light", "difference", "subtract", "grain_extract", "grain_merge", "divide", "overlay", ".", ":", "rtype", ":", "Nothing", "." ]
python
train
56.444444
RudolfCardinal/pythonlib
cardinal_pythonlib/django/mail.py
https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/django/mail.py#L93-L126
def open(self) -> bool: """ Ensures we have a connection to the email server. Returns whether or not a new connection was required (True or False). """ if self.connection: # Nothing to do if the connection is already open. return False connection_params = {'local_hostname': DNS_NAME.get_fqdn()} if self.timeout is not None: connection_params['timeout'] = self.timeout try: self.connection = smtplib.SMTP(self.host, self.port, **connection_params) # TLS context = ssl.SSLContext(self._protocol()) if self.ssl_certfile: context.load_cert_chain(certfile=self.ssl_certfile, keyfile=self.ssl_keyfile) self.connection.ehlo() self.connection.starttls(context=context) self.connection.ehlo() if self.username and self.password: self.connection.login(self.username, self.password) log.debug("Successful SMTP connection/login") else: log.debug("Successful SMTP connection (without login)") return True except smtplib.SMTPException: log.debug("SMTP connection and/or login failed") if not self.fail_silently: raise
[ "def", "open", "(", "self", ")", "->", "bool", ":", "if", "self", ".", "connection", ":", "# Nothing to do if the connection is already open.", "return", "False", "connection_params", "=", "{", "'local_hostname'", ":", "DNS_NAME", ".", "get_fqdn", "(", ")", "}", "if", "self", ".", "timeout", "is", "not", "None", ":", "connection_params", "[", "'timeout'", "]", "=", "self", ".", "timeout", "try", ":", "self", ".", "connection", "=", "smtplib", ".", "SMTP", "(", "self", ".", "host", ",", "self", ".", "port", ",", "*", "*", "connection_params", ")", "# TLS", "context", "=", "ssl", ".", "SSLContext", "(", "self", ".", "_protocol", "(", ")", ")", "if", "self", ".", "ssl_certfile", ":", "context", ".", "load_cert_chain", "(", "certfile", "=", "self", ".", "ssl_certfile", ",", "keyfile", "=", "self", ".", "ssl_keyfile", ")", "self", ".", "connection", ".", "ehlo", "(", ")", "self", ".", "connection", ".", "starttls", "(", "context", "=", "context", ")", "self", ".", "connection", ".", "ehlo", "(", ")", "if", "self", ".", "username", "and", "self", ".", "password", ":", "self", ".", "connection", ".", "login", "(", "self", ".", "username", ",", "self", ".", "password", ")", "log", ".", "debug", "(", "\"Successful SMTP connection/login\"", ")", "else", ":", "log", ".", "debug", "(", "\"Successful SMTP connection (without login)\"", ")", "return", "True", "except", "smtplib", ".", "SMTPException", ":", "log", ".", "debug", "(", "\"SMTP connection and/or login failed\"", ")", "if", "not", "self", ".", "fail_silently", ":", "raise" ]
Ensures we have a connection to the email server. Returns whether or not a new connection was required (True or False).
[ "Ensures", "we", "have", "a", "connection", "to", "the", "email", "server", ".", "Returns", "whether", "or", "not", "a", "new", "connection", "was", "required", "(", "True", "or", "False", ")", "." ]
python
train
40.823529
rodluger/everest
everest/missions/k2/utils.py
https://github.com/rodluger/everest/blob/6779591f9f8b3556847e2fbf761bdfac7520eaea/everest/missions/k2/utils.py#L315-L331
def Module(EPIC, campaign=None): ''' Returns the module number for a given EPIC target. ''' channel = Channel(EPIC, campaign=campaign) nums = {2: 1, 3: 5, 4: 9, 6: 13, 7: 17, 8: 21, 9: 25, 10: 29, 11: 33, 12: 37, 13: 41, 14: 45, 15: 49, 16: 53, 17: 57, 18: 61, 19: 65, 20: 69, 22: 73, 23: 77, 24: 81} for c in [channel, channel - 1, channel - 2, channel - 3]: if c in nums.values(): for mod, chan in nums.items(): if chan == c: return mod return None
[ "def", "Module", "(", "EPIC", ",", "campaign", "=", "None", ")", ":", "channel", "=", "Channel", "(", "EPIC", ",", "campaign", "=", "campaign", ")", "nums", "=", "{", "2", ":", "1", ",", "3", ":", "5", ",", "4", ":", "9", ",", "6", ":", "13", ",", "7", ":", "17", ",", "8", ":", "21", ",", "9", ":", "25", ",", "10", ":", "29", ",", "11", ":", "33", ",", "12", ":", "37", ",", "13", ":", "41", ",", "14", ":", "45", ",", "15", ":", "49", ",", "16", ":", "53", ",", "17", ":", "57", ",", "18", ":", "61", ",", "19", ":", "65", ",", "20", ":", "69", ",", "22", ":", "73", ",", "23", ":", "77", ",", "24", ":", "81", "}", "for", "c", "in", "[", "channel", ",", "channel", "-", "1", ",", "channel", "-", "2", ",", "channel", "-", "3", "]", ":", "if", "c", "in", "nums", ".", "values", "(", ")", ":", "for", "mod", ",", "chan", "in", "nums", ".", "items", "(", ")", ":", "if", "chan", "==", "c", ":", "return", "mod", "return", "None" ]
Returns the module number for a given EPIC target.
[ "Returns", "the", "module", "number", "for", "a", "given", "EPIC", "target", "." ]
python
train
32.705882
bspaans/python-mingus
mingus/core/chords.py
https://github.com/bspaans/python-mingus/blob/aa5a5d992d45ada61be0f9f86261380731bd7749/mingus/core/chords.py#L1021-L1082
def determine_extended_chord5(chord, shorthand=False, no_inversions=False, no_polychords=False): """Determine the names of an extended chord.""" if len(chord) != 5: # warning raise exeption: not an extended chord return False def inversion_exhauster(chord, shorthand, tries, result, polychords): """Recursive helper function.""" def add_result(short): result.append((short, tries, chord[0])) triads = determine_triad(chord[:3], True, True) sevenths = determine_seventh(chord[:4], True, True, True) # Determine polychords if tries == 1 and not no_polychords: polychords += determine_polychords(chord, shorthand) intval4 = intervals.determine(chord[0], chord[4]) for seventh in sevenths: seventh = seventh[len(chord[0]):] if seventh == 'M7': if intval4 == 'major second': add_result('M9') elif seventh == 'm7': if intval4 == 'major second': add_result('m9') elif intval4 == 'perfect fourth': add_result('m11') elif seventh == '7': if intval4 == 'major second': add_result('9') elif intval4 == 'minor second': add_result('7b9') elif intval4 == 'augmented second': add_result('7#9') elif intval4 == 'minor third': add_result('7b12') elif intval4 == 'augmented fourth': add_result('7#11') elif intval4 == 'major sixth': add_result('13') elif seventh == 'M6': if intval4 == 'major second': add_result('6/9') elif intval4 == 'minor seventh': add_result('6/7') if tries != 5 and not no_inversions: return inversion_exhauster([chord[-1]] + chord[:-1], shorthand, tries + 1, result, polychords) else: res = [] for r in result: if shorthand: res.append(r[2] + r[0]) else: res.append(r[2] + chord_shorthand_meaning[r[0]] + int_desc(r[1])) return res + polychords return inversion_exhauster(chord, shorthand, 1, [], [])
[ "def", "determine_extended_chord5", "(", "chord", ",", "shorthand", "=", "False", ",", "no_inversions", "=", "False", ",", "no_polychords", "=", "False", ")", ":", "if", "len", "(", "chord", ")", "!=", "5", ":", "# warning raise exeption: not an extended chord", "return", "False", "def", "inversion_exhauster", "(", "chord", ",", "shorthand", ",", "tries", ",", "result", ",", "polychords", ")", ":", "\"\"\"Recursive helper function.\"\"\"", "def", "add_result", "(", "short", ")", ":", "result", ".", "append", "(", "(", "short", ",", "tries", ",", "chord", "[", "0", "]", ")", ")", "triads", "=", "determine_triad", "(", "chord", "[", ":", "3", "]", ",", "True", ",", "True", ")", "sevenths", "=", "determine_seventh", "(", "chord", "[", ":", "4", "]", ",", "True", ",", "True", ",", "True", ")", "# Determine polychords", "if", "tries", "==", "1", "and", "not", "no_polychords", ":", "polychords", "+=", "determine_polychords", "(", "chord", ",", "shorthand", ")", "intval4", "=", "intervals", ".", "determine", "(", "chord", "[", "0", "]", ",", "chord", "[", "4", "]", ")", "for", "seventh", "in", "sevenths", ":", "seventh", "=", "seventh", "[", "len", "(", "chord", "[", "0", "]", ")", ":", "]", "if", "seventh", "==", "'M7'", ":", "if", "intval4", "==", "'major second'", ":", "add_result", "(", "'M9'", ")", "elif", "seventh", "==", "'m7'", ":", "if", "intval4", "==", "'major second'", ":", "add_result", "(", "'m9'", ")", "elif", "intval4", "==", "'perfect fourth'", ":", "add_result", "(", "'m11'", ")", "elif", "seventh", "==", "'7'", ":", "if", "intval4", "==", "'major second'", ":", "add_result", "(", "'9'", ")", "elif", "intval4", "==", "'minor second'", ":", "add_result", "(", "'7b9'", ")", "elif", "intval4", "==", "'augmented second'", ":", "add_result", "(", "'7#9'", ")", "elif", "intval4", "==", "'minor third'", ":", "add_result", "(", "'7b12'", ")", "elif", "intval4", "==", "'augmented fourth'", ":", "add_result", "(", "'7#11'", ")", "elif", "intval4", "==", "'major sixth'", ":", "add_result", "(", "'13'", ")", "elif", "seventh", "==", "'M6'", ":", "if", "intval4", "==", "'major second'", ":", "add_result", "(", "'6/9'", ")", "elif", "intval4", "==", "'minor seventh'", ":", "add_result", "(", "'6/7'", ")", "if", "tries", "!=", "5", "and", "not", "no_inversions", ":", "return", "inversion_exhauster", "(", "[", "chord", "[", "-", "1", "]", "]", "+", "chord", "[", ":", "-", "1", "]", ",", "shorthand", ",", "tries", "+", "1", ",", "result", ",", "polychords", ")", "else", ":", "res", "=", "[", "]", "for", "r", "in", "result", ":", "if", "shorthand", ":", "res", ".", "append", "(", "r", "[", "2", "]", "+", "r", "[", "0", "]", ")", "else", ":", "res", ".", "append", "(", "r", "[", "2", "]", "+", "chord_shorthand_meaning", "[", "r", "[", "0", "]", "]", "+", "int_desc", "(", "r", "[", "1", "]", ")", ")", "return", "res", "+", "polychords", "return", "inversion_exhauster", "(", "chord", ",", "shorthand", ",", "1", ",", "[", "]", ",", "[", "]", ")" ]
Determine the names of an extended chord.
[ "Determine", "the", "names", "of", "an", "extended", "chord", "." ]
python
train
39.064516
dw/mitogen
ansible_mitogen/runner.py
https://github.com/dw/mitogen/blob/a7fdb55e1300a7e0a5e404b09eb730cf9a525da7/ansible_mitogen/runner.py#L920-L928
def _get_args_contents(self): """ Mimic the argument formatting behaviour of ActionBase._execute_module(). """ return ' '.join( '%s=%s' % (key, shlex_quote(str(self.args[key]))) for key in self.args ) + ' '
[ "def", "_get_args_contents", "(", "self", ")", ":", "return", "' '", ".", "join", "(", "'%s=%s'", "%", "(", "key", ",", "shlex_quote", "(", "str", "(", "self", ".", "args", "[", "key", "]", ")", ")", ")", "for", "key", "in", "self", ".", "args", ")", "+", "' '" ]
Mimic the argument formatting behaviour of ActionBase._execute_module().
[ "Mimic", "the", "argument", "formatting", "behaviour", "of", "ActionBase", ".", "_execute_module", "()", "." ]
python
train
30
JarryShaw/PyPCAPKit
src/const/ipx/socket.py
https://github.com/JarryShaw/PyPCAPKit/blob/c7f0da9aebc2cf210bf8f8b912f7d3cbb98ca10e/src/const/ipx/socket.py#L37-L56
def _missing_(cls, value): """Lookup function used when value is not found.""" if not (isinstance(value, int) and 0x0000 <= value <= 0xFFFF): raise ValueError('%r is not a valid %s' % (value, cls.__name__)) if 0x0001 <= value <= 0x0BB8: extend_enum(cls, 'Registered by Xerox [0x%s]' % hex(value)[2:].upper().zfill(4), value) return cls(value) if 0x0020 <= value <= 0x003F: extend_enum(cls, 'Experimental [0x%s]' % hex(value)[2:].upper().zfill(4), value) return cls(value) if 0x0BB9 <= value <= 0xFFFF: extend_enum(cls, 'Dynamically Assigned [0x%s]' % hex(value)[2:].upper().zfill(4), value) return cls(value) if 0x4000 <= value <= 0x4FFF: extend_enum(cls, 'Dynamically Assigned Socket Numbers [0x%s]' % hex(value)[2:].upper().zfill(4), value) return cls(value) if 0x8000 <= value <= 0xFFFF: extend_enum(cls, 'Statically Assigned Socket Numbers [0x%s]' % hex(value)[2:].upper().zfill(4), value) return cls(value) super()._missing_(value)
[ "def", "_missing_", "(", "cls", ",", "value", ")", ":", "if", "not", "(", "isinstance", "(", "value", ",", "int", ")", "and", "0x0000", "<=", "value", "<=", "0xFFFF", ")", ":", "raise", "ValueError", "(", "'%r is not a valid %s'", "%", "(", "value", ",", "cls", ".", "__name__", ")", ")", "if", "0x0001", "<=", "value", "<=", "0x0BB8", ":", "extend_enum", "(", "cls", ",", "'Registered by Xerox [0x%s]'", "%", "hex", "(", "value", ")", "[", "2", ":", "]", ".", "upper", "(", ")", ".", "zfill", "(", "4", ")", ",", "value", ")", "return", "cls", "(", "value", ")", "if", "0x0020", "<=", "value", "<=", "0x003F", ":", "extend_enum", "(", "cls", ",", "'Experimental [0x%s]'", "%", "hex", "(", "value", ")", "[", "2", ":", "]", ".", "upper", "(", ")", ".", "zfill", "(", "4", ")", ",", "value", ")", "return", "cls", "(", "value", ")", "if", "0x0BB9", "<=", "value", "<=", "0xFFFF", ":", "extend_enum", "(", "cls", ",", "'Dynamically Assigned [0x%s]'", "%", "hex", "(", "value", ")", "[", "2", ":", "]", ".", "upper", "(", ")", ".", "zfill", "(", "4", ")", ",", "value", ")", "return", "cls", "(", "value", ")", "if", "0x4000", "<=", "value", "<=", "0x4FFF", ":", "extend_enum", "(", "cls", ",", "'Dynamically Assigned Socket Numbers [0x%s]'", "%", "hex", "(", "value", ")", "[", "2", ":", "]", ".", "upper", "(", ")", ".", "zfill", "(", "4", ")", ",", "value", ")", "return", "cls", "(", "value", ")", "if", "0x8000", "<=", "value", "<=", "0xFFFF", ":", "extend_enum", "(", "cls", ",", "'Statically Assigned Socket Numbers [0x%s]'", "%", "hex", "(", "value", ")", "[", "2", ":", "]", ".", "upper", "(", ")", ".", "zfill", "(", "4", ")", ",", "value", ")", "return", "cls", "(", "value", ")", "super", "(", ")", ".", "_missing_", "(", "value", ")" ]
Lookup function used when value is not found.
[ "Lookup", "function", "used", "when", "value", "is", "not", "found", "." ]
python
train
55.65
wrobstory/vincent
vincent/visualization.py
https://github.com/wrobstory/vincent/blob/c5a06e50179015fbb788a7a42e4570ff4467a9e9/vincent/visualization.py#L153-L161
def marks(value): """list or KeyedList of ``Mark`` : Mark definitions Marks are the visual objects (such as lines, bars, etc.) that represent the data in the visualization space. See the :class:`Mark` class for details. """ for i, entry in enumerate(value): _assert_is_type('marks[{0}]'.format(i), entry, Mark)
[ "def", "marks", "(", "value", ")", ":", "for", "i", ",", "entry", "in", "enumerate", "(", "value", ")", ":", "_assert_is_type", "(", "'marks[{0}]'", ".", "format", "(", "i", ")", ",", "entry", ",", "Mark", ")" ]
list or KeyedList of ``Mark`` : Mark definitions Marks are the visual objects (such as lines, bars, etc.) that represent the data in the visualization space. See the :class:`Mark` class for details.
[ "list", "or", "KeyedList", "of", "Mark", ":", "Mark", "definitions" ]
python
train
40.333333
jobovy/galpy
galpy/potential/TwoPowerSphericalPotential.py
https://github.com/jobovy/galpy/blob/9c5b9fe65d58835624dffe432be282060918ee08/galpy/potential/TwoPowerSphericalPotential.py#L897-L915
def _Rforce(self,R,z,phi=0.,t=0.): """ NAME: _Rforce PURPOSE: evaluate the radial force for this potential INPUT: R - Galactocentric cylindrical radius z - vertical height phi - azimuth t - time OUTPUT: the radial force HISTORY: 2010-07-09 - Written - Bovy (NYU) """ Rz= R**2.+z**2. sqrtRz= numpy.sqrt(Rz) return R*(1./Rz/(self.a+sqrtRz)-numpy.log(1.+sqrtRz/self.a)/sqrtRz/Rz)
[ "def", "_Rforce", "(", "self", ",", "R", ",", "z", ",", "phi", "=", "0.", ",", "t", "=", "0.", ")", ":", "Rz", "=", "R", "**", "2.", "+", "z", "**", "2.", "sqrtRz", "=", "numpy", ".", "sqrt", "(", "Rz", ")", "return", "R", "*", "(", "1.", "/", "Rz", "/", "(", "self", ".", "a", "+", "sqrtRz", ")", "-", "numpy", ".", "log", "(", "1.", "+", "sqrtRz", "/", "self", ".", "a", ")", "/", "sqrtRz", "/", "Rz", ")" ]
NAME: _Rforce PURPOSE: evaluate the radial force for this potential INPUT: R - Galactocentric cylindrical radius z - vertical height phi - azimuth t - time OUTPUT: the radial force HISTORY: 2010-07-09 - Written - Bovy (NYU)
[ "NAME", ":", "_Rforce", "PURPOSE", ":", "evaluate", "the", "radial", "force", "for", "this", "potential", "INPUT", ":", "R", "-", "Galactocentric", "cylindrical", "radius", "z", "-", "vertical", "height", "phi", "-", "azimuth", "t", "-", "time", "OUTPUT", ":", "the", "radial", "force", "HISTORY", ":", "2010", "-", "07", "-", "09", "-", "Written", "-", "Bovy", "(", "NYU", ")" ]
python
train
27.684211
mfcloud/python-zvm-sdk
zvmsdk/vmops.py
https://github.com/mfcloud/python-zvm-sdk/blob/de9994ceca764f5460ce51bd74237986341d8e3c/zvmsdk/vmops.py#L140-L144
def guest_reset(self, userid): """Reset z/VM instance.""" LOG.info("Begin to reset vm %s", userid) self._smtclient.guest_reset(userid) LOG.info("Complete reset vm %s", userid)
[ "def", "guest_reset", "(", "self", ",", "userid", ")", ":", "LOG", ".", "info", "(", "\"Begin to reset vm %s\"", ",", "userid", ")", "self", ".", "_smtclient", ".", "guest_reset", "(", "userid", ")", "LOG", ".", "info", "(", "\"Complete reset vm %s\"", ",", "userid", ")" ]
Reset z/VM instance.
[ "Reset", "z", "/", "VM", "instance", "." ]
python
train
40.6
econ-ark/HARK
HARK/ConsumptionSaving/ConsMarkovModel.py
https://github.com/econ-ark/HARK/blob/3d184153a189e618a87c9540df1cd12044039cc5/HARK/ConsumptionSaving/ConsMarkovModel.py#L245-L263
def calcEndOfPrdvPP(self): ''' Calculates end-of-period marginal marginal value using a pre-defined array of next period market resources in self.mNrmNext. Parameters ---------- none Returns ------- EndOfPrdvPP : np.array End-of-period marginal marginal value of assets at each value in the grid of assets. ''' EndOfPrdvPP = self.DiscFacEff*self.Rfree*self.Rfree*self.PermGroFac**(-self.CRRA-1.0)*\ np.sum(self.PermShkVals_temp**(-self.CRRA-1.0)*self.vPPfuncNext(self.mNrmNext) *self.ShkPrbs_temp,axis=0) return EndOfPrdvPP
[ "def", "calcEndOfPrdvPP", "(", "self", ")", ":", "EndOfPrdvPP", "=", "self", ".", "DiscFacEff", "*", "self", ".", "Rfree", "*", "self", ".", "Rfree", "*", "self", ".", "PermGroFac", "**", "(", "-", "self", ".", "CRRA", "-", "1.0", ")", "*", "np", ".", "sum", "(", "self", ".", "PermShkVals_temp", "**", "(", "-", "self", ".", "CRRA", "-", "1.0", ")", "*", "self", ".", "vPPfuncNext", "(", "self", ".", "mNrmNext", ")", "*", "self", ".", "ShkPrbs_temp", ",", "axis", "=", "0", ")", "return", "EndOfPrdvPP" ]
Calculates end-of-period marginal marginal value using a pre-defined array of next period market resources in self.mNrmNext. Parameters ---------- none Returns ------- EndOfPrdvPP : np.array End-of-period marginal marginal value of assets at each value in the grid of assets.
[ "Calculates", "end", "-", "of", "-", "period", "marginal", "marginal", "value", "using", "a", "pre", "-", "defined", "array", "of", "next", "period", "market", "resources", "in", "self", ".", "mNrmNext", "." ]
python
train
35.315789
ABI-Software/MeshParser
src/meshparser/base/parser.py
https://github.com/ABI-Software/MeshParser/blob/08dc0ce7c44d0149b443261ff6d3708e28a928e7/src/meshparser/base/parser.py#L41-L69
def getElements(self, zero_based=True, pared=False): """ Get the elements of the mesh as a list of point index list. :param zero_based: use zero based index of points if true otherwise use 1-based index of points. :param pared: use the pared down list of points :return: A list of point index lists """ points = self._points[:] elements = self._elements[:] offset = 0 if not zero_based: offset = 1 np = None if pared: np = NodePare() np.addPoints(points) np.parePoints() if pared or not zero_based: modified_elements = [] for element in elements: modified_element = [index + offset if np is None else np.getParedIndex(index) + offset for index in element] modified_elements.append(modified_element) elements = modified_elements return elements
[ "def", "getElements", "(", "self", ",", "zero_based", "=", "True", ",", "pared", "=", "False", ")", ":", "points", "=", "self", ".", "_points", "[", ":", "]", "elements", "=", "self", ".", "_elements", "[", ":", "]", "offset", "=", "0", "if", "not", "zero_based", ":", "offset", "=", "1", "np", "=", "None", "if", "pared", ":", "np", "=", "NodePare", "(", ")", "np", ".", "addPoints", "(", "points", ")", "np", ".", "parePoints", "(", ")", "if", "pared", "or", "not", "zero_based", ":", "modified_elements", "=", "[", "]", "for", "element", "in", "elements", ":", "modified_element", "=", "[", "index", "+", "offset", "if", "np", "is", "None", "else", "np", ".", "getParedIndex", "(", "index", ")", "+", "offset", "for", "index", "in", "element", "]", "modified_elements", ".", "append", "(", "modified_element", ")", "elements", "=", "modified_elements", "return", "elements" ]
Get the elements of the mesh as a list of point index list. :param zero_based: use zero based index of points if true otherwise use 1-based index of points. :param pared: use the pared down list of points :return: A list of point index lists
[ "Get", "the", "elements", "of", "the", "mesh", "as", "a", "list", "of", "point", "index", "list", ".", ":", "param", "zero_based", ":", "use", "zero", "based", "index", "of", "points", "if", "true", "otherwise", "use", "1", "-", "based", "index", "of", "points", ".", ":", "param", "pared", ":", "use", "the", "pared", "down", "list", "of", "points", ":", "return", ":", "A", "list", "of", "point", "index", "lists" ]
python
train
33.896552
dixudx/rtcclient
rtcclient/template.py
https://github.com/dixudx/rtcclient/blob/1721dd0b047478f5bdd6359b07a2c503cfafd86f/rtcclient/template.py#L43-L81
def render(self, template, **kwargs): """Renders the template :param template: The template to render. The template is actually a file, which is usually generated by :class:`rtcclient.template.Templater.getTemplate` and can also be modified by user accordingly. :param kwargs: The `kwargs` dict is used to fill the template. These two parameter are mandatory: * description * title Some of below parameters (which may not be included in some customized workitem type ) are mandatory if `keep` (parameter in :class:`rtcclient.template.Templater.getTemplate`) is set to `False`; Optional for otherwise. * teamArea (Team Area) * ownedBy (Owned By) * plannedFor(Planned For) * severity(Severity) * priority(Priority) * filedAgainst(Filed Against) Actually all these needed keywords/attributes/fields can be retrieved by :class:`rtcclient.template.Templater.listFields` :return: the :class:`string` object :rtype: string """ try: temp = self.environment.get_template(template) return temp.render(**kwargs) except AttributeError: err_msg = "Invalid value for 'template'" self.log.error(err_msg) raise exception.BadValue(err_msg)
[ "def", "render", "(", "self", ",", "template", ",", "*", "*", "kwargs", ")", ":", "try", ":", "temp", "=", "self", ".", "environment", ".", "get_template", "(", "template", ")", "return", "temp", ".", "render", "(", "*", "*", "kwargs", ")", "except", "AttributeError", ":", "err_msg", "=", "\"Invalid value for 'template'\"", "self", ".", "log", ".", "error", "(", "err_msg", ")", "raise", "exception", ".", "BadValue", "(", "err_msg", ")" ]
Renders the template :param template: The template to render. The template is actually a file, which is usually generated by :class:`rtcclient.template.Templater.getTemplate` and can also be modified by user accordingly. :param kwargs: The `kwargs` dict is used to fill the template. These two parameter are mandatory: * description * title Some of below parameters (which may not be included in some customized workitem type ) are mandatory if `keep` (parameter in :class:`rtcclient.template.Templater.getTemplate`) is set to `False`; Optional for otherwise. * teamArea (Team Area) * ownedBy (Owned By) * plannedFor(Planned For) * severity(Severity) * priority(Priority) * filedAgainst(Filed Against) Actually all these needed keywords/attributes/fields can be retrieved by :class:`rtcclient.template.Templater.listFields` :return: the :class:`string` object :rtype: string
[ "Renders", "the", "template" ]
python
train
37.512821
nornir-automation/nornir
nornir/core/connections.py
https://github.com/nornir-automation/nornir/blob/3425c47fd870db896cb80f619bae23bd98d50c74/nornir/core/connections.py#L63-L82
def register(cls, name: str, plugin: Type[ConnectionPlugin]) -> None: """Registers a connection plugin with a specified name Args: name: name of the connection plugin to register plugin: defined connection plugin class Raises: :obj:`nornir.core.exceptions.ConnectionPluginAlreadyRegistered` if another plugin with the specified name was already registered """ existing_plugin = cls.available.get(name) if existing_plugin is None: cls.available[name] = plugin elif existing_plugin != plugin: raise ConnectionPluginAlreadyRegistered( f"Connection plugin {plugin.__name__} can't be registered as " f"{name!r} because plugin {existing_plugin.__name__} " f"was already registered under this name" )
[ "def", "register", "(", "cls", ",", "name", ":", "str", ",", "plugin", ":", "Type", "[", "ConnectionPlugin", "]", ")", "->", "None", ":", "existing_plugin", "=", "cls", ".", "available", ".", "get", "(", "name", ")", "if", "existing_plugin", "is", "None", ":", "cls", ".", "available", "[", "name", "]", "=", "plugin", "elif", "existing_plugin", "!=", "plugin", ":", "raise", "ConnectionPluginAlreadyRegistered", "(", "f\"Connection plugin {plugin.__name__} can't be registered as \"", "f\"{name!r} because plugin {existing_plugin.__name__} \"", "f\"was already registered under this name\"", ")" ]
Registers a connection plugin with a specified name Args: name: name of the connection plugin to register plugin: defined connection plugin class Raises: :obj:`nornir.core.exceptions.ConnectionPluginAlreadyRegistered` if another plugin with the specified name was already registered
[ "Registers", "a", "connection", "plugin", "with", "a", "specified", "name" ]
python
train
43.4
CyberReboot/vent
vent/extras/rmq_es_connector/rmq_es_connector.py
https://github.com/CyberReboot/vent/blob/9956a09146b11a89a0eabab3bc7ce8906d124885/vent/extras/rmq_es_connector/rmq_es_connector.py#L33-L58
def connections(self, wait): """ wait for connections to both rabbitmq and elasticsearch to be made before binding a routing key to a channel and sending messages to elasticsearch """ while wait: try: params = pika.ConnectionParameters(host=self.rmq_host, port=self.rmq_port) connection = pika.BlockingConnection(params) self.channel = connection.channel() self.channel.exchange_declare(exchange='topic_recs', exchange_type='topic') result = self.channel.queue_declare() self.queue_name = result.method.queue self.es_conn = Elasticsearch([{'host': self.es_host, 'port': self.es_port}]) wait = False print('connected to rabbitmq and elasticsearch...') except Exception as e: # pragma: no cover print(str(e)) print('waiting for connection to rabbitmq...' + str(e)) time.sleep(2) wait = True
[ "def", "connections", "(", "self", ",", "wait", ")", ":", "while", "wait", ":", "try", ":", "params", "=", "pika", ".", "ConnectionParameters", "(", "host", "=", "self", ".", "rmq_host", ",", "port", "=", "self", ".", "rmq_port", ")", "connection", "=", "pika", ".", "BlockingConnection", "(", "params", ")", "self", ".", "channel", "=", "connection", ".", "channel", "(", ")", "self", ".", "channel", ".", "exchange_declare", "(", "exchange", "=", "'topic_recs'", ",", "exchange_type", "=", "'topic'", ")", "result", "=", "self", ".", "channel", ".", "queue_declare", "(", ")", "self", ".", "queue_name", "=", "result", ".", "method", ".", "queue", "self", ".", "es_conn", "=", "Elasticsearch", "(", "[", "{", "'host'", ":", "self", ".", "es_host", ",", "'port'", ":", "self", ".", "es_port", "}", "]", ")", "wait", "=", "False", "print", "(", "'connected to rabbitmq and elasticsearch...'", ")", "except", "Exception", "as", "e", ":", "# pragma: no cover", "print", "(", "str", "(", "e", ")", ")", "print", "(", "'waiting for connection to rabbitmq...'", "+", "str", "(", "e", ")", ")", "time", ".", "sleep", "(", "2", ")", "wait", "=", "True" ]
wait for connections to both rabbitmq and elasticsearch to be made before binding a routing key to a channel and sending messages to elasticsearch
[ "wait", "for", "connections", "to", "both", "rabbitmq", "and", "elasticsearch", "to", "be", "made", "before", "binding", "a", "routing", "key", "to", "a", "channel", "and", "sending", "messages", "to", "elasticsearch" ]
python
train
45.730769
BerkeleyAutomation/autolab_core
autolab_core/rigid_transformations.py
https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/rigid_transformations.py#L951-L970
def sph_coords_to_pose(theta, psi): """ Convert spherical coordinates to a pose. Parameters ---------- theta : float azimuth angle psi : float elevation angle Returns ------- :obj:`RigidTransformation` rigid transformation corresponding to rotation with no translation """ # rotate about the z and y axes individually rot_z = RigidTransform.z_axis_rotation(theta) rot_y = RigidTransform.y_axis_rotation(psi) R = rot_y.dot(rot_z) return RigidTransform(rotation=R)
[ "def", "sph_coords_to_pose", "(", "theta", ",", "psi", ")", ":", "# rotate about the z and y axes individually", "rot_z", "=", "RigidTransform", ".", "z_axis_rotation", "(", "theta", ")", "rot_y", "=", "RigidTransform", ".", "y_axis_rotation", "(", "psi", ")", "R", "=", "rot_y", ".", "dot", "(", "rot_z", ")", "return", "RigidTransform", "(", "rotation", "=", "R", ")" ]
Convert spherical coordinates to a pose. Parameters ---------- theta : float azimuth angle psi : float elevation angle Returns ------- :obj:`RigidTransformation` rigid transformation corresponding to rotation with no translation
[ "Convert", "spherical", "coordinates", "to", "a", "pose", ".", "Parameters", "----------", "theta", ":", "float", "azimuth", "angle", "psi", ":", "float", "elevation", "angle" ]
python
train
30.05
mikedh/trimesh
trimesh/bounds.py
https://github.com/mikedh/trimesh/blob/25e059bf6d4caa74f62ffd58ce4f61a90ee4e518/trimesh/bounds.py#L373-L407
def corners(bounds): """ Given a pair of axis aligned bounds, return all 8 corners of the bounding box. Parameters ---------- bounds : (2,3) or (2,2) float Axis aligned bounds Returns ---------- corners : (8,3) float Corner vertices of the cube """ bounds = np.asanyarray(bounds, dtype=np.float64) if util.is_shape(bounds, (2, 2)): bounds = np.column_stack((bounds, [0, 0])) elif not util.is_shape(bounds, (2, 3)): raise ValueError('bounds must be (2,2) or (2,3)!') minx, miny, minz, maxx, maxy, maxz = np.arange(6) corner_index = np.array([minx, miny, minz, maxx, miny, minz, maxx, maxy, minz, minx, maxy, minz, minx, miny, maxz, maxx, miny, maxz, maxx, maxy, maxz, minx, maxy, maxz]).reshape((-1, 3)) corners = bounds.reshape(-1)[corner_index] return corners
[ "def", "corners", "(", "bounds", ")", ":", "bounds", "=", "np", ".", "asanyarray", "(", "bounds", ",", "dtype", "=", "np", ".", "float64", ")", "if", "util", ".", "is_shape", "(", "bounds", ",", "(", "2", ",", "2", ")", ")", ":", "bounds", "=", "np", ".", "column_stack", "(", "(", "bounds", ",", "[", "0", ",", "0", "]", ")", ")", "elif", "not", "util", ".", "is_shape", "(", "bounds", ",", "(", "2", ",", "3", ")", ")", ":", "raise", "ValueError", "(", "'bounds must be (2,2) or (2,3)!'", ")", "minx", ",", "miny", ",", "minz", ",", "maxx", ",", "maxy", ",", "maxz", "=", "np", ".", "arange", "(", "6", ")", "corner_index", "=", "np", ".", "array", "(", "[", "minx", ",", "miny", ",", "minz", ",", "maxx", ",", "miny", ",", "minz", ",", "maxx", ",", "maxy", ",", "minz", ",", "minx", ",", "maxy", ",", "minz", ",", "minx", ",", "miny", ",", "maxz", ",", "maxx", ",", "miny", ",", "maxz", ",", "maxx", ",", "maxy", ",", "maxz", ",", "minx", ",", "maxy", ",", "maxz", "]", ")", ".", "reshape", "(", "(", "-", "1", ",", "3", ")", ")", "corners", "=", "bounds", ".", "reshape", "(", "-", "1", ")", "[", "corner_index", "]", "return", "corners" ]
Given a pair of axis aligned bounds, return all 8 corners of the bounding box. Parameters ---------- bounds : (2,3) or (2,2) float Axis aligned bounds Returns ---------- corners : (8,3) float Corner vertices of the cube
[ "Given", "a", "pair", "of", "axis", "aligned", "bounds", "return", "all", "8", "corners", "of", "the", "bounding", "box", "." ]
python
train
29.457143
saltstack/salt
salt/loader.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/loader.py#L1096-L1155
def _inject_into_mod(mod, name, value, force_lock=False): ''' Inject a variable into a module. This is used to inject "globals" like ``__salt__``, ``__pillar``, or ``grains``. Instead of injecting the value directly, a ``ThreadLocalProxy`` is created. If such a proxy is already present under the specified name, it is updated with the new value. This update only affects the current thread, so that the same name can refer to different values depending on the thread of execution. This is important for data that is not truly global. For example, pillar data might be dynamically overriden through function parameters and thus the actual values available in pillar might depend on the thread that is calling a module. mod: module object into which the value is going to be injected. name: name of the variable that is injected into the module. value: value that is injected into the variable. The value is not injected directly, but instead set as the new reference of the proxy that has been created for the variable. force_lock: whether the lock should be acquired before checking whether a proxy object for the specified name has already been injected into the module. If ``False`` (the default), this function checks for the module's variable without acquiring the lock and only acquires the lock if a new proxy has to be created and injected. ''' old_value = getattr(mod, name, None) # We use a double-checked locking scheme in order to avoid taking the lock # when a proxy object has already been injected. # In most programming languages, double-checked locking is considered # unsafe when used without explicit memory barriers because one might read # an uninitialized value. In CPython it is safe due to the global # interpreter lock (GIL). In Python implementations that do not have the # GIL, it could be unsafe, but at least Jython also guarantees that (for # Python objects) memory is not corrupted when writing and reading without # explicit synchronization # (http://www.jython.org/jythonbook/en/1.0/Concurrency.html). # Please note that in order to make this code safe in a runtime environment # that does not make this guarantees, it is not sufficient. The # ThreadLocalProxy must also be created with fallback_to_shared set to # False or a lock must be added to the ThreadLocalProxy. if force_lock: with _inject_into_mod.lock: if isinstance(old_value, ThreadLocalProxy): ThreadLocalProxy.set_reference(old_value, value) else: setattr(mod, name, ThreadLocalProxy(value, True)) else: if isinstance(old_value, ThreadLocalProxy): ThreadLocalProxy.set_reference(old_value, value) else: _inject_into_mod(mod, name, value, True)
[ "def", "_inject_into_mod", "(", "mod", ",", "name", ",", "value", ",", "force_lock", "=", "False", ")", ":", "old_value", "=", "getattr", "(", "mod", ",", "name", ",", "None", ")", "# We use a double-checked locking scheme in order to avoid taking the lock", "# when a proxy object has already been injected.", "# In most programming languages, double-checked locking is considered", "# unsafe when used without explicit memory barriers because one might read", "# an uninitialized value. In CPython it is safe due to the global", "# interpreter lock (GIL). In Python implementations that do not have the", "# GIL, it could be unsafe, but at least Jython also guarantees that (for", "# Python objects) memory is not corrupted when writing and reading without", "# explicit synchronization", "# (http://www.jython.org/jythonbook/en/1.0/Concurrency.html).", "# Please note that in order to make this code safe in a runtime environment", "# that does not make this guarantees, it is not sufficient. The", "# ThreadLocalProxy must also be created with fallback_to_shared set to", "# False or a lock must be added to the ThreadLocalProxy.", "if", "force_lock", ":", "with", "_inject_into_mod", ".", "lock", ":", "if", "isinstance", "(", "old_value", ",", "ThreadLocalProxy", ")", ":", "ThreadLocalProxy", ".", "set_reference", "(", "old_value", ",", "value", ")", "else", ":", "setattr", "(", "mod", ",", "name", ",", "ThreadLocalProxy", "(", "value", ",", "True", ")", ")", "else", ":", "if", "isinstance", "(", "old_value", ",", "ThreadLocalProxy", ")", ":", "ThreadLocalProxy", ".", "set_reference", "(", "old_value", ",", "value", ")", "else", ":", "_inject_into_mod", "(", "mod", ",", "name", ",", "value", ",", "True", ")" ]
Inject a variable into a module. This is used to inject "globals" like ``__salt__``, ``__pillar``, or ``grains``. Instead of injecting the value directly, a ``ThreadLocalProxy`` is created. If such a proxy is already present under the specified name, it is updated with the new value. This update only affects the current thread, so that the same name can refer to different values depending on the thread of execution. This is important for data that is not truly global. For example, pillar data might be dynamically overriden through function parameters and thus the actual values available in pillar might depend on the thread that is calling a module. mod: module object into which the value is going to be injected. name: name of the variable that is injected into the module. value: value that is injected into the variable. The value is not injected directly, but instead set as the new reference of the proxy that has been created for the variable. force_lock: whether the lock should be acquired before checking whether a proxy object for the specified name has already been injected into the module. If ``False`` (the default), this function checks for the module's variable without acquiring the lock and only acquires the lock if a new proxy has to be created and injected.
[ "Inject", "a", "variable", "into", "a", "module", ".", "This", "is", "used", "to", "inject", "globals", "like", "__salt__", "__pillar", "or", "grains", "." ]
python
train
48.45
PyCQA/pylint-django
pylint_django/augmentations/__init__.py
https://github.com/PyCQA/pylint-django/blob/0bbee433519f48134df4a797341c4196546a454e/pylint_django/augmentations/__init__.py#L483-L498
def _attribute_is_magic(node, attrs, parents): """Checks that node is an attribute used inside one of allowed parents""" if node.attrname not in attrs: return False if not node.last_child(): return False try: for cls in node.last_child().inferred(): if isinstance(cls, Super): cls = cls._self_class # pylint: disable=protected-access if node_is_subclass(cls, *parents) or cls.qname() in parents: return True except InferenceError: pass return False
[ "def", "_attribute_is_magic", "(", "node", ",", "attrs", ",", "parents", ")", ":", "if", "node", ".", "attrname", "not", "in", "attrs", ":", "return", "False", "if", "not", "node", ".", "last_child", "(", ")", ":", "return", "False", "try", ":", "for", "cls", "in", "node", ".", "last_child", "(", ")", ".", "inferred", "(", ")", ":", "if", "isinstance", "(", "cls", ",", "Super", ")", ":", "cls", "=", "cls", ".", "_self_class", "# pylint: disable=protected-access", "if", "node_is_subclass", "(", "cls", ",", "*", "parents", ")", "or", "cls", ".", "qname", "(", ")", "in", "parents", ":", "return", "True", "except", "InferenceError", ":", "pass", "return", "False" ]
Checks that node is an attribute used inside one of allowed parents
[ "Checks", "that", "node", "is", "an", "attribute", "used", "inside", "one", "of", "allowed", "parents" ]
python
train
34.1875
olitheolix/qtmacs
qtmacs/auxiliary.py
https://github.com/olitheolix/qtmacs/blob/36253b082b82590f183fe154b053eb3a1e741be2/qtmacs/auxiliary.py#L1057-L1101
def qteIsQtmacsWidget(widgetObj): """ Determine if a widget is part of Qtmacs widget hierarchy. A widget belongs to the Qtmacs hierarchy if it, or one of its parents, has a "_qteAdmin" attribute (added via ``qteAddWidget``). Since every applet has this attribute is guaranteed that the function returns **True** if the widget is embedded inside somewhere. |Args| * ``widgetObj`` (**QWidget**): the widget to test. |Returns| * **bool**: **True** if the widget, or one of its ancestors in the Qt hierarchy have a '_qteAdmin' attribute. |Raises| * **None** """ if widgetObj is None: return False if hasattr(widgetObj, '_qteAdmin'): return True # Keep track of the already visited objects to avoid infinite loops. visited = [widgetObj] # Traverse the hierarchy until a parent features the '_qteAdmin' # attribute, the parent is None, or the parent is an already # visited widget. wid = widgetObj.parent() while wid not in visited: if hasattr(wid, '_qteAdmin'): return True elif wid is None: return False else: visited.append(wid) wid = wid.parent() return False
[ "def", "qteIsQtmacsWidget", "(", "widgetObj", ")", ":", "if", "widgetObj", "is", "None", ":", "return", "False", "if", "hasattr", "(", "widgetObj", ",", "'_qteAdmin'", ")", ":", "return", "True", "# Keep track of the already visited objects to avoid infinite loops.", "visited", "=", "[", "widgetObj", "]", "# Traverse the hierarchy until a parent features the '_qteAdmin'", "# attribute, the parent is None, or the parent is an already", "# visited widget.", "wid", "=", "widgetObj", ".", "parent", "(", ")", "while", "wid", "not", "in", "visited", ":", "if", "hasattr", "(", "wid", ",", "'_qteAdmin'", ")", ":", "return", "True", "elif", "wid", "is", "None", ":", "return", "False", "else", ":", "visited", ".", "append", "(", "wid", ")", "wid", "=", "wid", ".", "parent", "(", ")", "return", "False" ]
Determine if a widget is part of Qtmacs widget hierarchy. A widget belongs to the Qtmacs hierarchy if it, or one of its parents, has a "_qteAdmin" attribute (added via ``qteAddWidget``). Since every applet has this attribute is guaranteed that the function returns **True** if the widget is embedded inside somewhere. |Args| * ``widgetObj`` (**QWidget**): the widget to test. |Returns| * **bool**: **True** if the widget, or one of its ancestors in the Qt hierarchy have a '_qteAdmin' attribute. |Raises| * **None**
[ "Determine", "if", "a", "widget", "is", "part", "of", "Qtmacs", "widget", "hierarchy", "." ]
python
train
26.955556
Locu/chronology
kronos/kronos/storage/base.py
https://github.com/Locu/chronology/blob/0edf3ee3286c76e242cbf92436ffa9c836b428e2/kronos/kronos/storage/base.py#L57-L77
def retrieve(self, namespace, stream, start_time, end_time, start_id, configuration, order=ResultOrder.ASCENDING, limit=sys.maxint): """ Retrieves all the events for `stream` from `start_time` (inclusive) till `end_time` (inclusive). Alternatively to `start_time`, `start_id` can be provided, and then all events from `start_id` (exclusive) till `end_time` (inclusive) are returned. `start_id` should be used in cases when the client got disconnected from the server before all the events in the requested time window had been returned. `order` can be one of ResultOrder.ASCENDING or ResultOrder.DESCENDING. Returns an iterator over all JSON serialized (strings) events. """ if not start_id: start_id = uuid_from_kronos_time(start_time, _type=UUIDType.LOWEST) else: start_id = TimeUUID(start_id) if uuid_to_kronos_time(start_id) > end_time: return [] return self._retrieve(namespace, stream, start_id, end_time, order, limit, configuration)
[ "def", "retrieve", "(", "self", ",", "namespace", ",", "stream", ",", "start_time", ",", "end_time", ",", "start_id", ",", "configuration", ",", "order", "=", "ResultOrder", ".", "ASCENDING", ",", "limit", "=", "sys", ".", "maxint", ")", ":", "if", "not", "start_id", ":", "start_id", "=", "uuid_from_kronos_time", "(", "start_time", ",", "_type", "=", "UUIDType", ".", "LOWEST", ")", "else", ":", "start_id", "=", "TimeUUID", "(", "start_id", ")", "if", "uuid_to_kronos_time", "(", "start_id", ")", ">", "end_time", ":", "return", "[", "]", "return", "self", ".", "_retrieve", "(", "namespace", ",", "stream", ",", "start_id", ",", "end_time", ",", "order", ",", "limit", ",", "configuration", ")" ]
Retrieves all the events for `stream` from `start_time` (inclusive) till `end_time` (inclusive). Alternatively to `start_time`, `start_id` can be provided, and then all events from `start_id` (exclusive) till `end_time` (inclusive) are returned. `start_id` should be used in cases when the client got disconnected from the server before all the events in the requested time window had been returned. `order` can be one of ResultOrder.ASCENDING or ResultOrder.DESCENDING. Returns an iterator over all JSON serialized (strings) events.
[ "Retrieves", "all", "the", "events", "for", "stream", "from", "start_time", "(", "inclusive", ")", "till", "end_time", "(", "inclusive", ")", ".", "Alternatively", "to", "start_time", "start_id", "can", "be", "provided", "and", "then", "all", "events", "from", "start_id", "(", "exclusive", ")", "till", "end_time", "(", "inclusive", ")", "are", "returned", ".", "start_id", "should", "be", "used", "in", "cases", "when", "the", "client", "got", "disconnected", "from", "the", "server", "before", "all", "the", "events", "in", "the", "requested", "time", "window", "had", "been", "returned", ".", "order", "can", "be", "one", "of", "ResultOrder", ".", "ASCENDING", "or", "ResultOrder", ".", "DESCENDING", "." ]
python
train
49.333333
arviz-devs/arviz
arviz/utils.py
https://github.com/arviz-devs/arviz/blob/d04d8da07f029fd2931f48d2f7f324cf393e5277/arviz/utils.py#L52-L82
def conditional_jit(function=None, **kwargs): # noqa: D202 """Use numba's jit decorator if numba is installed. Notes ----- If called without arguments then return wrapped function. @conditional_jit def my_func(): return else called with arguments @conditional_jit(nopython=True) def my_func(): return """ def wrapper(function): try: numba = importlib.import_module("numba") return numba.jit(**kwargs)(function) except ImportError: return function if function: return wrapper(function) else: return wrapper
[ "def", "conditional_jit", "(", "function", "=", "None", ",", "*", "*", "kwargs", ")", ":", "# noqa: D202", "def", "wrapper", "(", "function", ")", ":", "try", ":", "numba", "=", "importlib", ".", "import_module", "(", "\"numba\"", ")", "return", "numba", ".", "jit", "(", "*", "*", "kwargs", ")", "(", "function", ")", "except", "ImportError", ":", "return", "function", "if", "function", ":", "return", "wrapper", "(", "function", ")", "else", ":", "return", "wrapper" ]
Use numba's jit decorator if numba is installed. Notes ----- If called without arguments then return wrapped function. @conditional_jit def my_func(): return else called with arguments @conditional_jit(nopython=True) def my_func(): return
[ "Use", "numba", "s", "jit", "decorator", "if", "numba", "is", "installed", "." ]
python
train
21.064516
googleapis/google-cloud-python
bigtable/google/cloud/bigtable_v2/gapic/bigtable_client.py
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/bigtable/google/cloud/bigtable_v2/gapic/bigtable_client.py#L542-L652
def check_and_mutate_row( self, table_name, row_key, app_profile_id=None, predicate_filter=None, true_mutations=None, false_mutations=None, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): """ Mutates a row atomically based on the output of a predicate Reader filter. Example: >>> from google.cloud import bigtable_v2 >>> >>> client = bigtable_v2.BigtableClient() >>> >>> table_name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') >>> >>> # TODO: Initialize `row_key`: >>> row_key = b'' >>> >>> response = client.check_and_mutate_row(table_name, row_key) Args: table_name (str): The unique name of the table to which the conditional mutation should be applied. Values are of the form ``projects/<project>/instances/<instance>/tables/<table>``. row_key (bytes): The key of the row to which the conditional mutation should be applied. app_profile_id (str): This value specifies routing for replication. If not specified, the "default" application profile will be used. predicate_filter (Union[dict, ~google.cloud.bigtable_v2.types.RowFilter]): The filter to be applied to the contents of the specified row. Depending on whether or not any results are yielded, either ``true_mutations`` or ``false_mutations`` will be executed. If unset, checks that the row contains any values at all. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.bigtable_v2.types.RowFilter` true_mutations (list[Union[dict, ~google.cloud.bigtable_v2.types.Mutation]]): Changes to be atomically applied to the specified row if ``predicate_filter`` yields at least one cell when applied to ``row_key``. Entries are applied in order, meaning that earlier mutations can be masked by later ones. Must contain at least one entry if ``false_mutations`` is empty, and at most 100000. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.bigtable_v2.types.Mutation` false_mutations (list[Union[dict, ~google.cloud.bigtable_v2.types.Mutation]]): Changes to be atomically applied to the specified row if ``predicate_filter`` does not yield any cells when applied to ``row_key``. Entries are applied in order, meaning that earlier mutations can be masked by later ones. Must contain at least one entry if ``true_mutations`` is empty, and at most 100000. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.bigtable_v2.types.Mutation` retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the method. Returns: A :class:`~google.cloud.bigtable_v2.types.CheckAndMutateRowResponse` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. google.api_core.exceptions.RetryError: If the request failed due to a retryable error and retry attempts failed. ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. if "check_and_mutate_row" not in self._inner_api_calls: self._inner_api_calls[ "check_and_mutate_row" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.check_and_mutate_row, default_retry=self._method_configs["CheckAndMutateRow"].retry, default_timeout=self._method_configs["CheckAndMutateRow"].timeout, client_info=self._client_info, ) request = bigtable_pb2.CheckAndMutateRowRequest( table_name=table_name, row_key=row_key, app_profile_id=app_profile_id, predicate_filter=predicate_filter, true_mutations=true_mutations, false_mutations=false_mutations, ) if metadata is None: metadata = [] metadata = list(metadata) try: routing_header = [("table_name", table_name)] except AttributeError: pass else: routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( routing_header ) metadata.append(routing_metadata) return self._inner_api_calls["check_and_mutate_row"]( request, retry=retry, timeout=timeout, metadata=metadata )
[ "def", "check_and_mutate_row", "(", "self", ",", "table_name", ",", "row_key", ",", "app_profile_id", "=", "None", ",", "predicate_filter", "=", "None", ",", "true_mutations", "=", "None", ",", "false_mutations", "=", "None", ",", "retry", "=", "google", ".", "api_core", ".", "gapic_v1", ".", "method", ".", "DEFAULT", ",", "timeout", "=", "google", ".", "api_core", ".", "gapic_v1", ".", "method", ".", "DEFAULT", ",", "metadata", "=", "None", ",", ")", ":", "# Wrap the transport method to add retry and timeout logic.", "if", "\"check_and_mutate_row\"", "not", "in", "self", ".", "_inner_api_calls", ":", "self", ".", "_inner_api_calls", "[", "\"check_and_mutate_row\"", "]", "=", "google", ".", "api_core", ".", "gapic_v1", ".", "method", ".", "wrap_method", "(", "self", ".", "transport", ".", "check_and_mutate_row", ",", "default_retry", "=", "self", ".", "_method_configs", "[", "\"CheckAndMutateRow\"", "]", ".", "retry", ",", "default_timeout", "=", "self", ".", "_method_configs", "[", "\"CheckAndMutateRow\"", "]", ".", "timeout", ",", "client_info", "=", "self", ".", "_client_info", ",", ")", "request", "=", "bigtable_pb2", ".", "CheckAndMutateRowRequest", "(", "table_name", "=", "table_name", ",", "row_key", "=", "row_key", ",", "app_profile_id", "=", "app_profile_id", ",", "predicate_filter", "=", "predicate_filter", ",", "true_mutations", "=", "true_mutations", ",", "false_mutations", "=", "false_mutations", ",", ")", "if", "metadata", "is", "None", ":", "metadata", "=", "[", "]", "metadata", "=", "list", "(", "metadata", ")", "try", ":", "routing_header", "=", "[", "(", "\"table_name\"", ",", "table_name", ")", "]", "except", "AttributeError", ":", "pass", "else", ":", "routing_metadata", "=", "google", ".", "api_core", ".", "gapic_v1", ".", "routing_header", ".", "to_grpc_metadata", "(", "routing_header", ")", "metadata", ".", "append", "(", "routing_metadata", ")", "return", "self", ".", "_inner_api_calls", "[", "\"check_and_mutate_row\"", "]", "(", "request", ",", "retry", "=", "retry", ",", "timeout", "=", "timeout", ",", "metadata", "=", "metadata", ")" ]
Mutates a row atomically based on the output of a predicate Reader filter. Example: >>> from google.cloud import bigtable_v2 >>> >>> client = bigtable_v2.BigtableClient() >>> >>> table_name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') >>> >>> # TODO: Initialize `row_key`: >>> row_key = b'' >>> >>> response = client.check_and_mutate_row(table_name, row_key) Args: table_name (str): The unique name of the table to which the conditional mutation should be applied. Values are of the form ``projects/<project>/instances/<instance>/tables/<table>``. row_key (bytes): The key of the row to which the conditional mutation should be applied. app_profile_id (str): This value specifies routing for replication. If not specified, the "default" application profile will be used. predicate_filter (Union[dict, ~google.cloud.bigtable_v2.types.RowFilter]): The filter to be applied to the contents of the specified row. Depending on whether or not any results are yielded, either ``true_mutations`` or ``false_mutations`` will be executed. If unset, checks that the row contains any values at all. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.bigtable_v2.types.RowFilter` true_mutations (list[Union[dict, ~google.cloud.bigtable_v2.types.Mutation]]): Changes to be atomically applied to the specified row if ``predicate_filter`` yields at least one cell when applied to ``row_key``. Entries are applied in order, meaning that earlier mutations can be masked by later ones. Must contain at least one entry if ``false_mutations`` is empty, and at most 100000. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.bigtable_v2.types.Mutation` false_mutations (list[Union[dict, ~google.cloud.bigtable_v2.types.Mutation]]): Changes to be atomically applied to the specified row if ``predicate_filter`` does not yield any cells when applied to ``row_key``. Entries are applied in order, meaning that earlier mutations can be masked by later ones. Must contain at least one entry if ``true_mutations`` is empty, and at most 100000. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.bigtable_v2.types.Mutation` retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the method. Returns: A :class:`~google.cloud.bigtable_v2.types.CheckAndMutateRowResponse` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. google.api_core.exceptions.RetryError: If the request failed due to a retryable error and retry attempts failed. ValueError: If the parameters are invalid.
[ "Mutates", "a", "row", "atomically", "based", "on", "the", "output", "of", "a", "predicate", "Reader", "filter", "." ]
python
train
48.810811
onecodex/onecodex
onecodex/taxonomy.py
https://github.com/onecodex/onecodex/blob/326a0a1af140e3a57ccf31c3c9c5e17a5775c13d/onecodex/taxonomy.py#L5-L42
def tree_build(self): """Build a tree from the taxonomy data present in this `ClassificationsDataFrame` or `SampleCollection`. Returns ------- `skbio.tree.TreeNode`, the root node of a tree that contains all the taxa in the current analysis and their parents leading back to the root node. """ from skbio.tree import TreeNode # build all the nodes nodes = {} for tax_id in self.taxonomy.index: node = TreeNode(name=tax_id, length=1) node.tax_name = self.taxonomy["name"][tax_id] node.rank = self.taxonomy["rank"][tax_id] node.parent_tax_id = self.taxonomy["parent_tax_id"][tax_id] nodes[tax_id] = node # generate all the links for tax_id in self.taxonomy.index: try: parent = nodes[nodes[tax_id].parent_tax_id] except KeyError: if tax_id != "1": warnings.warn( "tax_id={} has parent_tax_id={} which is not in tree" "".format(tax_id, nodes[tax_id].parent_tax_id) ) continue parent.append(nodes[tax_id]) return nodes["1"]
[ "def", "tree_build", "(", "self", ")", ":", "from", "skbio", ".", "tree", "import", "TreeNode", "# build all the nodes", "nodes", "=", "{", "}", "for", "tax_id", "in", "self", ".", "taxonomy", ".", "index", ":", "node", "=", "TreeNode", "(", "name", "=", "tax_id", ",", "length", "=", "1", ")", "node", ".", "tax_name", "=", "self", ".", "taxonomy", "[", "\"name\"", "]", "[", "tax_id", "]", "node", ".", "rank", "=", "self", ".", "taxonomy", "[", "\"rank\"", "]", "[", "tax_id", "]", "node", ".", "parent_tax_id", "=", "self", ".", "taxonomy", "[", "\"parent_tax_id\"", "]", "[", "tax_id", "]", "nodes", "[", "tax_id", "]", "=", "node", "# generate all the links", "for", "tax_id", "in", "self", ".", "taxonomy", ".", "index", ":", "try", ":", "parent", "=", "nodes", "[", "nodes", "[", "tax_id", "]", ".", "parent_tax_id", "]", "except", "KeyError", ":", "if", "tax_id", "!=", "\"1\"", ":", "warnings", ".", "warn", "(", "\"tax_id={} has parent_tax_id={} which is not in tree\"", "\"\"", ".", "format", "(", "tax_id", ",", "nodes", "[", "tax_id", "]", ".", "parent_tax_id", ")", ")", "continue", "parent", ".", "append", "(", "nodes", "[", "tax_id", "]", ")", "return", "nodes", "[", "\"1\"", "]" ]
Build a tree from the taxonomy data present in this `ClassificationsDataFrame` or `SampleCollection`. Returns ------- `skbio.tree.TreeNode`, the root node of a tree that contains all the taxa in the current analysis and their parents leading back to the root node.
[ "Build", "a", "tree", "from", "the", "taxonomy", "data", "present", "in", "this", "ClassificationsDataFrame", "or", "SampleCollection", "." ]
python
train
32.473684
galactics/beyond
beyond/orbits/ephem.py
https://github.com/galactics/beyond/blob/7a7590ff0fd4c0bac3e8e383ecca03caa98e5742/beyond/orbits/ephem.py#L316-L325
def ephem(self, *args, **kwargs): """Create an Ephem object which is a subset of this one Take the same keyword arguments as :py:meth:`ephemeris` Return: Ephem: """ return self.__class__(self.ephemeris(*args, **kwargs))
[ "def", "ephem", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "self", ".", "__class__", "(", "self", ".", "ephemeris", "(", "*", "args", ",", "*", "*", "kwargs", ")", ")" ]
Create an Ephem object which is a subset of this one Take the same keyword arguments as :py:meth:`ephemeris` Return: Ephem:
[ "Create", "an", "Ephem", "object", "which", "is", "a", "subset", "of", "this", "one" ]
python
train
26.5
log2timeline/plaso
plaso/filters/file_entry.py
https://github.com/log2timeline/plaso/blob/9c564698d2da3ffbe23607a3c54c0582ea18a6cc/plaso/filters/file_entry.py#L246-L254
def Print(self, output_writer): """Prints a human readable version of the filter. Args: output_writer (CLIOutputWriter): output writer. """ if self._names: output_writer.Write('\tnames: {0:s}\n'.format( ', '.join(self._names)))
[ "def", "Print", "(", "self", ",", "output_writer", ")", ":", "if", "self", ".", "_names", ":", "output_writer", ".", "Write", "(", "'\\tnames: {0:s}\\n'", ".", "format", "(", "', '", ".", "join", "(", "self", ".", "_names", ")", ")", ")" ]
Prints a human readable version of the filter. Args: output_writer (CLIOutputWriter): output writer.
[ "Prints", "a", "human", "readable", "version", "of", "the", "filter", "." ]
python
train
28.666667
vladcalin/gemstone
gemstone/event/transport/base.py
https://github.com/vladcalin/gemstone/blob/325a49d17621b9d45ffd2b5eca6f0de284de8ba4/gemstone/event/transport/base.py#L69-L83
def run_on_main_thread(self, func, args=None, kwargs=None): """ Runs the ``func`` callable on the main thread, by using the provided microservice instance's IOLoop. :param func: callable to run on the main thread :param args: tuple or list with the positional arguments. :param kwargs: dict with the keyword arguments. :return: """ if not args: args = () if not kwargs: kwargs = {} self.microservice.get_io_loop().add_callback(func, *args, **kwargs)
[ "def", "run_on_main_thread", "(", "self", ",", "func", ",", "args", "=", "None", ",", "kwargs", "=", "None", ")", ":", "if", "not", "args", ":", "args", "=", "(", ")", "if", "not", "kwargs", ":", "kwargs", "=", "{", "}", "self", ".", "microservice", ".", "get_io_loop", "(", ")", ".", "add_callback", "(", "func", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
Runs the ``func`` callable on the main thread, by using the provided microservice instance's IOLoop. :param func: callable to run on the main thread :param args: tuple or list with the positional arguments. :param kwargs: dict with the keyword arguments. :return:
[ "Runs", "the", "func", "callable", "on", "the", "main", "thread", "by", "using", "the", "provided", "microservice", "instance", "s", "IOLoop", "." ]
python
train
36.533333
pydata/xarray
xarray/core/indexing.py
https://github.com/pydata/xarray/blob/6d93a95d05bdbfc33fff24064f67d29dd891ab58/xarray/core/indexing.py#L260-L279
def slice_slice(old_slice, applied_slice, size): """Given a slice and the size of the dimension to which it will be applied, index it with another slice to return a new slice equivalent to applying the slices sequentially """ step = (old_slice.step or 1) * (applied_slice.step or 1) # For now, use the hack of turning old_slice into an ndarray to reconstruct # the slice start and stop. This is not entirely ideal, but it is still # definitely better than leaving the indexer as an array. items = _expand_slice(old_slice, size)[applied_slice] if len(items) > 0: start = items[0] stop = items[-1] + int(np.sign(step)) if stop < 0: stop = None else: start = 0 stop = 0 return slice(start, stop, step)
[ "def", "slice_slice", "(", "old_slice", ",", "applied_slice", ",", "size", ")", ":", "step", "=", "(", "old_slice", ".", "step", "or", "1", ")", "*", "(", "applied_slice", ".", "step", "or", "1", ")", "# For now, use the hack of turning old_slice into an ndarray to reconstruct", "# the slice start and stop. This is not entirely ideal, but it is still", "# definitely better than leaving the indexer as an array.", "items", "=", "_expand_slice", "(", "old_slice", ",", "size", ")", "[", "applied_slice", "]", "if", "len", "(", "items", ")", ">", "0", ":", "start", "=", "items", "[", "0", "]", "stop", "=", "items", "[", "-", "1", "]", "+", "int", "(", "np", ".", "sign", "(", "step", ")", ")", "if", "stop", "<", "0", ":", "stop", "=", "None", "else", ":", "start", "=", "0", "stop", "=", "0", "return", "slice", "(", "start", ",", "stop", ",", "step", ")" ]
Given a slice and the size of the dimension to which it will be applied, index it with another slice to return a new slice equivalent to applying the slices sequentially
[ "Given", "a", "slice", "and", "the", "size", "of", "the", "dimension", "to", "which", "it", "will", "be", "applied", "index", "it", "with", "another", "slice", "to", "return", "a", "new", "slice", "equivalent", "to", "applying", "the", "slices", "sequentially" ]
python
train
39
aliyun/aliyun-odps-python-sdk
odps/models/instance.py
https://github.com/aliyun/aliyun-odps-python-sdk/blob/4b0de18f5864386df6068f26f026e62f932c41e4/odps/models/instance.py#L385-L414
def get_task_cost(self, task_name): """ Get task cost :param task_name: name of the task :return: task cost :rtype: Instance.TaskCost :Example: >>> cost = instance.get_task_cost(instance.get_task_names()[0]) >>> cost.cpu_cost 200 >>> cost.memory_cost 4096 >>> cost.input_size 0 """ summary = self.get_task_summary(task_name) if summary is None: return None if 'Cost' in summary: task_cost = summary['Cost'] cpu_cost = task_cost.get('CPU') memory = task_cost.get('Memory') input_size = task_cost.get('Input') return Instance.TaskCost(cpu_cost, memory, input_size)
[ "def", "get_task_cost", "(", "self", ",", "task_name", ")", ":", "summary", "=", "self", ".", "get_task_summary", "(", "task_name", ")", "if", "summary", "is", "None", ":", "return", "None", "if", "'Cost'", "in", "summary", ":", "task_cost", "=", "summary", "[", "'Cost'", "]", "cpu_cost", "=", "task_cost", ".", "get", "(", "'CPU'", ")", "memory", "=", "task_cost", ".", "get", "(", "'Memory'", ")", "input_size", "=", "task_cost", ".", "get", "(", "'Input'", ")", "return", "Instance", ".", "TaskCost", "(", "cpu_cost", ",", "memory", ",", "input_size", ")" ]
Get task cost :param task_name: name of the task :return: task cost :rtype: Instance.TaskCost :Example: >>> cost = instance.get_task_cost(instance.get_task_names()[0]) >>> cost.cpu_cost 200 >>> cost.memory_cost 4096 >>> cost.input_size 0
[ "Get", "task", "cost" ]
python
train
24.9
MacHu-GWU/rolex-project
rolex/generator.py
https://github.com/MacHu-GWU/rolex-project/blob/a1111b410ed04b4b6eddd81df110fa2dacfa6537/rolex/generator.py#L354-L365
def rnd_datetime_array(size, start=datetime(1970, 1, 1), end=None): """ Array or Matrix of random datetime generator. :returns: 1d or 2d array of datetime.date """ if end is None: end = datetime.now() start = parser.parse_datetime(start) end = parser.parse_datetime(end) _assert_correct_start_end(start, end) return _randn(size, _rnd_datetime, start, end)
[ "def", "rnd_datetime_array", "(", "size", ",", "start", "=", "datetime", "(", "1970", ",", "1", ",", "1", ")", ",", "end", "=", "None", ")", ":", "if", "end", "is", "None", ":", "end", "=", "datetime", ".", "now", "(", ")", "start", "=", "parser", ".", "parse_datetime", "(", "start", ")", "end", "=", "parser", ".", "parse_datetime", "(", "end", ")", "_assert_correct_start_end", "(", "start", ",", "end", ")", "return", "_randn", "(", "size", ",", "_rnd_datetime", ",", "start", ",", "end", ")" ]
Array or Matrix of random datetime generator. :returns: 1d or 2d array of datetime.date
[ "Array", "or", "Matrix", "of", "random", "datetime", "generator", "." ]
python
train
32.416667
klahnakoski/pyLibrary
mo_collections/matrix.py
https://github.com/klahnakoski/pyLibrary/blob/fa2dcbc48fda8d26999baef400e9a98149e0b982/mo_collections/matrix.py#L179-L206
def groupby(self, io_select): """ SLICE THIS MATRIX INTO ONES WITH LESS DIMENSIONALITY io_select - 1 IF GROUPING BY THIS DIMENSION, 0 IF FLATTENING return - """ # offsets WILL SERVE TO MASK DIMS WE ARE NOT GROUPING BY, AND SERVE AS RELATIVE INDEX FOR EACH COORDINATE offsets = [] new_dim = [] acc = 1 for i, d in reversed(enumerate(self.dims)): if not io_select[i]: new_dim.insert(0, d) offsets.insert(0, acc * io_select[i]) acc *= d if not new_dim: # WHEN groupby ALL DIMENSIONS, ONLY THE VALUES REMAIN # RETURN AN ITERATOR OF PAIRS (c, v), WHERE # c - COORDINATES INTO THE CUBE # v - VALUE AT GIVEN COORDINATES return ((c, self[c]) for c in self._all_combos()) else: output = [[None, Matrix(dims=new_dim)] for i in range(acc)] _groupby(self.cube, 0, offsets, 0, output, tuple(), []) return output
[ "def", "groupby", "(", "self", ",", "io_select", ")", ":", "# offsets WILL SERVE TO MASK DIMS WE ARE NOT GROUPING BY, AND SERVE AS RELATIVE INDEX FOR EACH COORDINATE", "offsets", "=", "[", "]", "new_dim", "=", "[", "]", "acc", "=", "1", "for", "i", ",", "d", "in", "reversed", "(", "enumerate", "(", "self", ".", "dims", ")", ")", ":", "if", "not", "io_select", "[", "i", "]", ":", "new_dim", ".", "insert", "(", "0", ",", "d", ")", "offsets", ".", "insert", "(", "0", ",", "acc", "*", "io_select", "[", "i", "]", ")", "acc", "*=", "d", "if", "not", "new_dim", ":", "# WHEN groupby ALL DIMENSIONS, ONLY THE VALUES REMAIN", "# RETURN AN ITERATOR OF PAIRS (c, v), WHERE", "# c - COORDINATES INTO THE CUBE", "# v - VALUE AT GIVEN COORDINATES", "return", "(", "(", "c", ",", "self", "[", "c", "]", ")", "for", "c", "in", "self", ".", "_all_combos", "(", ")", ")", "else", ":", "output", "=", "[", "[", "None", ",", "Matrix", "(", "dims", "=", "new_dim", ")", "]", "for", "i", "in", "range", "(", "acc", ")", "]", "_groupby", "(", "self", ".", "cube", ",", "0", ",", "offsets", ",", "0", ",", "output", ",", "tuple", "(", ")", ",", "[", "]", ")", "return", "output" ]
SLICE THIS MATRIX INTO ONES WITH LESS DIMENSIONALITY io_select - 1 IF GROUPING BY THIS DIMENSION, 0 IF FLATTENING return -
[ "SLICE", "THIS", "MATRIX", "INTO", "ONES", "WITH", "LESS", "DIMENSIONALITY", "io_select", "-", "1", "IF", "GROUPING", "BY", "THIS", "DIMENSION", "0", "IF", "FLATTENING", "return", "-" ]
python
train
36.214286
gatkin/declxml
declxml.py
https://github.com/gatkin/declxml/blob/3a2324b43aee943e82a04587fbb68932c6f392ba/declxml.py#L558-L576
def named_tuple( element_name, # type: Text tuple_type, # type: Type[Tuple] child_processors, # type: List[Processor] required=True, # type: bool alias=None, # type: Optional[Text] hooks=None # type: Optional[Hooks] ): # type: (...) -> RootProcessor """ Create a processor for namedtuple values. :param tuple_type: The namedtuple type. See also :func:`declxml.dictionary` """ converter = _named_tuple_converter(tuple_type) processor = _Aggregate(element_name, converter, child_processors, required, alias) return _processor_wrap_if_hooks(processor, hooks)
[ "def", "named_tuple", "(", "element_name", ",", "# type: Text", "tuple_type", ",", "# type: Type[Tuple]", "child_processors", ",", "# type: List[Processor]", "required", "=", "True", ",", "# type: bool", "alias", "=", "None", ",", "# type: Optional[Text]", "hooks", "=", "None", "# type: Optional[Hooks]", ")", ":", "# type: (...) -> RootProcessor", "converter", "=", "_named_tuple_converter", "(", "tuple_type", ")", "processor", "=", "_Aggregate", "(", "element_name", ",", "converter", ",", "child_processors", ",", "required", ",", "alias", ")", "return", "_processor_wrap_if_hooks", "(", "processor", ",", "hooks", ")" ]
Create a processor for namedtuple values. :param tuple_type: The namedtuple type. See also :func:`declxml.dictionary`
[ "Create", "a", "processor", "for", "namedtuple", "values", "." ]
python
train
33.105263
kwikteam/phy
phy/io/context.py
https://github.com/kwikteam/phy/blob/7e9313dc364304b7d2bd03b92938347343703003/phy/io/context.py#L106-L121
def memcache(self, f): """Cache a function in memory using an internal dictionary.""" name = _fullname(f) cache = self.load_memcache(name) @wraps(f) def memcached(*args): """Cache the function in memory.""" # The arguments need to be hashable. Much faster than using hash(). h = args out = cache.get(h, None) if out is None: out = f(*args) cache[h] = out return out return memcached
[ "def", "memcache", "(", "self", ",", "f", ")", ":", "name", "=", "_fullname", "(", "f", ")", "cache", "=", "self", ".", "load_memcache", "(", "name", ")", "@", "wraps", "(", "f", ")", "def", "memcached", "(", "*", "args", ")", ":", "\"\"\"Cache the function in memory.\"\"\"", "# The arguments need to be hashable. Much faster than using hash().", "h", "=", "args", "out", "=", "cache", ".", "get", "(", "h", ",", "None", ")", "if", "out", "is", "None", ":", "out", "=", "f", "(", "*", "args", ")", "cache", "[", "h", "]", "=", "out", "return", "out", "return", "memcached" ]
Cache a function in memory using an internal dictionary.
[ "Cache", "a", "function", "in", "memory", "using", "an", "internal", "dictionary", "." ]
python
train
32.5
bitesofcode/projexui
projexui/widgets/xorbquerywidget/xorbquerywidget.py
https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xorbquerywidget/xorbquerywidget.py#L89-L97
def cleanupContainers(self): """ Cleans up all containers to the right of the current one. """ for i in range(self.count() - 1, self.currentIndex(), -1): widget = self.widget(i) widget.close() widget.setParent(None) widget.deleteLater()
[ "def", "cleanupContainers", "(", "self", ")", ":", "for", "i", "in", "range", "(", "self", ".", "count", "(", ")", "-", "1", ",", "self", ".", "currentIndex", "(", ")", ",", "-", "1", ")", ":", "widget", "=", "self", ".", "widget", "(", "i", ")", "widget", ".", "close", "(", ")", "widget", ".", "setParent", "(", "None", ")", "widget", ".", "deleteLater", "(", ")" ]
Cleans up all containers to the right of the current one.
[ "Cleans", "up", "all", "containers", "to", "the", "right", "of", "the", "current", "one", "." ]
python
train
35.111111
ilevkivskyi/typing_inspect
typing_inspect.py
https://github.com/ilevkivskyi/typing_inspect/blob/fd81278cc440b6003f8298bcb22d5bc0f82ee3cd/typing_inspect.py#L257-L279
def get_last_args(tp): """Get last arguments of (multiply) subscripted type. Parameters for Callable are flattened. Examples:: get_last_args(int) == () get_last_args(Union) == () get_last_args(ClassVar[int]) == (int,) get_last_args(Union[T, int]) == (T, int) get_last_args(Iterable[Tuple[T, S]][int, T]) == (int, T) get_last_args(Callable[[T], int]) == (T, int) get_last_args(Callable[[], int]) == (int,) """ if NEW_TYPING: raise ValueError('This function is only supported in Python 3.6,' ' use get_args instead') if is_classvar(tp): return (tp.__type__,) if tp.__type__ is not None else () if ( is_generic_type(tp) or is_union_type(tp) or is_callable_type(tp) or is_tuple_type(tp) ): return tp.__args__ if tp.__args__ is not None else () return ()
[ "def", "get_last_args", "(", "tp", ")", ":", "if", "NEW_TYPING", ":", "raise", "ValueError", "(", "'This function is only supported in Python 3.6,'", "' use get_args instead'", ")", "if", "is_classvar", "(", "tp", ")", ":", "return", "(", "tp", ".", "__type__", ",", ")", "if", "tp", ".", "__type__", "is", "not", "None", "else", "(", ")", "if", "(", "is_generic_type", "(", "tp", ")", "or", "is_union_type", "(", "tp", ")", "or", "is_callable_type", "(", "tp", ")", "or", "is_tuple_type", "(", "tp", ")", ")", ":", "return", "tp", ".", "__args__", "if", "tp", ".", "__args__", "is", "not", "None", "else", "(", ")", "return", "(", ")" ]
Get last arguments of (multiply) subscripted type. Parameters for Callable are flattened. Examples:: get_last_args(int) == () get_last_args(Union) == () get_last_args(ClassVar[int]) == (int,) get_last_args(Union[T, int]) == (T, int) get_last_args(Iterable[Tuple[T, S]][int, T]) == (int, T) get_last_args(Callable[[T], int]) == (T, int) get_last_args(Callable[[], int]) == (int,)
[ "Get", "last", "arguments", "of", "(", "multiply", ")", "subscripted", "type", ".", "Parameters", "for", "Callable", "are", "flattened", ".", "Examples", "::" ]
python
train
38.434783
ska-sa/katcp-python
katcp/core.py
https://github.com/ska-sa/katcp-python/blob/9127c826a1d030c53b84d0e95743e20e5c5ea153/katcp/core.py#L1155-L1177
def string(cls, name, description=None, unit='', default=None, initial_status=None): """Instantiate a new string sensor object. Parameters ---------- name : str The name of the sensor. description : str A short description of the sensor. units : str The units of the sensor value. May be the empty string if there are no applicable units. default : string An initial value for the sensor. Defaults to the empty string. initial_status : int enum or None An initial status for the sensor. If None, defaults to Sensor.UNKNOWN. `initial_status` must be one of the keys in Sensor.STATUSES """ return cls(cls.STRING, name, description, unit, None, default, initial_status)
[ "def", "string", "(", "cls", ",", "name", ",", "description", "=", "None", ",", "unit", "=", "''", ",", "default", "=", "None", ",", "initial_status", "=", "None", ")", ":", "return", "cls", "(", "cls", ".", "STRING", ",", "name", ",", "description", ",", "unit", ",", "None", ",", "default", ",", "initial_status", ")" ]
Instantiate a new string sensor object. Parameters ---------- name : str The name of the sensor. description : str A short description of the sensor. units : str The units of the sensor value. May be the empty string if there are no applicable units. default : string An initial value for the sensor. Defaults to the empty string. initial_status : int enum or None An initial status for the sensor. If None, defaults to Sensor.UNKNOWN. `initial_status` must be one of the keys in Sensor.STATUSES
[ "Instantiate", "a", "new", "string", "sensor", "object", "." ]
python
train
37.217391
mardiros/pyshop
pyshop/views/xmlrpc.py
https://github.com/mardiros/pyshop/blob/b42510b9c3fa16e0e5710457401ac38fea5bf7a0/pyshop/views/xmlrpc.py#L19-L26
def list_packages(request): """ Retrieve a list of the package names registered with the package index. Returns a list of name strings. """ session = DBSession() names = [p.name for p in Package.all(session, order_by=Package.name)] return names
[ "def", "list_packages", "(", "request", ")", ":", "session", "=", "DBSession", "(", ")", "names", "=", "[", "p", ".", "name", "for", "p", "in", "Package", ".", "all", "(", "session", ",", "order_by", "=", "Package", ".", "name", ")", "]", "return", "names" ]
Retrieve a list of the package names registered with the package index. Returns a list of name strings.
[ "Retrieve", "a", "list", "of", "the", "package", "names", "registered", "with", "the", "package", "index", ".", "Returns", "a", "list", "of", "name", "strings", "." ]
python
train
33.125
bitesofcode/projexui
projexui/widgets/xnodewidget/xnodescene.py
https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xnodewidget/xnodescene.py#L440-L468
def autoLayoutSelected( self, padX = None, padY = None, direction = Qt.Horizontal, layout = 'Layered', animate = 0, centerOn = None, center = None): """ Automatically lays out all the selected nodes in the scene using the \ autoLayoutNodes method. :param padX | <int> || None | default is 2 * cell width padY | <int> || None | default is 2 * cell height direction | <Qt.Direction> layout | <str> | name of the layout plugin to use animate | <int> | number of seconds to animate over :return {<XNode>: <QRectF>, ..} | new rects per node """ nodes = self.selectedNodes() return self.autoLayoutNodes(nodes, padX, padY, direction, layout, animate, centerOn, center)
[ "def", "autoLayoutSelected", "(", "self", ",", "padX", "=", "None", ",", "padY", "=", "None", ",", "direction", "=", "Qt", ".", "Horizontal", ",", "layout", "=", "'Layered'", ",", "animate", "=", "0", ",", "centerOn", "=", "None", ",", "center", "=", "None", ")", ":", "nodes", "=", "self", ".", "selectedNodes", "(", ")", "return", "self", ".", "autoLayoutNodes", "(", "nodes", ",", "padX", ",", "padY", ",", "direction", ",", "layout", ",", "animate", ",", "centerOn", ",", "center", ")" ]
Automatically lays out all the selected nodes in the scene using the \ autoLayoutNodes method. :param padX | <int> || None | default is 2 * cell width padY | <int> || None | default is 2 * cell height direction | <Qt.Direction> layout | <str> | name of the layout plugin to use animate | <int> | number of seconds to animate over :return {<XNode>: <QRectF>, ..} | new rects per node
[ "Automatically", "lays", "out", "all", "the", "selected", "nodes", "in", "the", "scene", "using", "the", "\\", "autoLayoutNodes", "method", ".", ":", "param", "padX", "|", "<int", ">", "||", "None", "|", "default", "is", "2", "*", "cell", "width", "padY", "|", "<int", ">", "||", "None", "|", "default", "is", "2", "*", "cell", "height", "direction", "|", "<Qt", ".", "Direction", ">", "layout", "|", "<str", ">", "|", "name", "of", "the", "layout", "plugin", "to", "use", "animate", "|", "<int", ">", "|", "number", "of", "seconds", "to", "animate", "over", ":", "return", "{", "<XNode", ">", ":", "<QRectF", ">", "..", "}", "|", "new", "rects", "per", "node" ]
python
train
43.62069
arviz-devs/arviz
arviz/data/inference_data.py
https://github.com/arviz-devs/arviz/blob/d04d8da07f029fd2931f48d2f7f324cf393e5277/arviz/data/inference_data.py#L66-L95
def to_netcdf(self, filename, compress=True): """Write InferenceData to file using netcdf4. Parameters ---------- filename : str Location to write to compress : bool Whether to compress result. Note this saves disk space, but may make saving and loading somewhat slower (default: True). Returns ------- str Location of netcdf file """ mode = "w" # overwrite first, then append if self._groups: # check's whether a group is present or not. for group in self._groups: data = getattr(self, group) kwargs = {} if compress: kwargs["encoding"] = {var_name: {"zlib": True} for var_name in data.variables} data.to_netcdf(filename, mode=mode, group=group, **kwargs) data.close() mode = "a" else: # creates a netcdf file for an empty InferenceData object. empty_netcdf_file = nc.Dataset(filename, mode="w", format="NETCDF4") empty_netcdf_file.close() return filename
[ "def", "to_netcdf", "(", "self", ",", "filename", ",", "compress", "=", "True", ")", ":", "mode", "=", "\"w\"", "# overwrite first, then append", "if", "self", ".", "_groups", ":", "# check's whether a group is present or not.", "for", "group", "in", "self", ".", "_groups", ":", "data", "=", "getattr", "(", "self", ",", "group", ")", "kwargs", "=", "{", "}", "if", "compress", ":", "kwargs", "[", "\"encoding\"", "]", "=", "{", "var_name", ":", "{", "\"zlib\"", ":", "True", "}", "for", "var_name", "in", "data", ".", "variables", "}", "data", ".", "to_netcdf", "(", "filename", ",", "mode", "=", "mode", ",", "group", "=", "group", ",", "*", "*", "kwargs", ")", "data", ".", "close", "(", ")", "mode", "=", "\"a\"", "else", ":", "# creates a netcdf file for an empty InferenceData object.", "empty_netcdf_file", "=", "nc", ".", "Dataset", "(", "filename", ",", "mode", "=", "\"w\"", ",", "format", "=", "\"NETCDF4\"", ")", "empty_netcdf_file", ".", "close", "(", ")", "return", "filename" ]
Write InferenceData to file using netcdf4. Parameters ---------- filename : str Location to write to compress : bool Whether to compress result. Note this saves disk space, but may make saving and loading somewhat slower (default: True). Returns ------- str Location of netcdf file
[ "Write", "InferenceData", "to", "file", "using", "netcdf4", "." ]
python
train
37.866667
globocom/GloboNetworkAPI-client-python
networkapiclient/ApiEquipment.py
https://github.com/globocom/GloboNetworkAPI-client-python/blob/cf34f913da48d9abbf750114f5d2ac4b2dde137d/networkapiclient/ApiEquipment.py#L36-L44
def get_equipment(self, **kwargs): """ Return list environments related with environment vip """ uri = 'api/v3/equipment/' uri = self.prepare_url(uri, kwargs) return super(ApiEquipment, self).get(uri)
[ "def", "get_equipment", "(", "self", ",", "*", "*", "kwargs", ")", ":", "uri", "=", "'api/v3/equipment/'", "uri", "=", "self", ".", "prepare_url", "(", "uri", ",", "kwargs", ")", "return", "super", "(", "ApiEquipment", ",", "self", ")", ".", "get", "(", "uri", ")" ]
Return list environments related with environment vip
[ "Return", "list", "environments", "related", "with", "environment", "vip" ]
python
train
26.888889
cloud9ers/gurumate
environment/lib/python2.7/site-packages/IPython/core/display.py
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/core/display.py#L167-L184
def display_latex(*objs, **kwargs): """Display the LaTeX representation of an object. Parameters ---------- objs : tuple of objects The Python objects to display, or if raw=True raw latex data to display. raw : bool Are the data objects raw data or Python objects that need to be formatted before display? [default: False] """ raw = kwargs.pop('raw',False) if raw: for obj in objs: publish_latex(obj) else: display(*objs, include=['text/plain','text/latex'])
[ "def", "display_latex", "(", "*", "objs", ",", "*", "*", "kwargs", ")", ":", "raw", "=", "kwargs", ".", "pop", "(", "'raw'", ",", "False", ")", "if", "raw", ":", "for", "obj", "in", "objs", ":", "publish_latex", "(", "obj", ")", "else", ":", "display", "(", "*", "objs", ",", "include", "=", "[", "'text/plain'", ",", "'text/latex'", "]", ")" ]
Display the LaTeX representation of an object. Parameters ---------- objs : tuple of objects The Python objects to display, or if raw=True raw latex data to display. raw : bool Are the data objects raw data or Python objects that need to be formatted before display? [default: False]
[ "Display", "the", "LaTeX", "representation", "of", "an", "object", "." ]
python
test
29.888889
EnigmaBridge/jbossply
jbossply/jbossparser.py
https://github.com/EnigmaBridge/jbossply/blob/44b30b15982cae781f0c356fab7263751b20b4d0/jbossply/jbossparser.py#L291-L299
def p_members(self, p): """members : | members member VALUE_SEPARATOR | members member""" if len(p) == 1: p[0] = list() else: p[1].append(p[2]) p[0] = p[1]
[ "def", "p_members", "(", "self", ",", "p", ")", ":", "if", "len", "(", "p", ")", "==", "1", ":", "p", "[", "0", "]", "=", "list", "(", ")", "else", ":", "p", "[", "1", "]", ".", "append", "(", "p", "[", "2", "]", ")", "p", "[", "0", "]", "=", "p", "[", "1", "]" ]
members : | members member VALUE_SEPARATOR | members member
[ "members", ":", "|", "members", "member", "VALUE_SEPARATOR", "|", "members", "member" ]
python
train
27.222222
zsimic/runez
src/runez/logsetup.py
https://github.com/zsimic/runez/blob/14363b719a1aae1528859a501a22d075ce0abfcc/src/runez/logsetup.py#L355-L364
def silence(cls, *modules, **kwargs): """ Args: *modules: Modules, or names of modules to silence (by setting their log level to WARNING or above) **kwargs: Pass as kwargs due to python 2.7, would be level=logging.WARNING otherwise """ level = kwargs.pop("level", logging.WARNING) for mod in modules: name = mod.__name__ if hasattr(mod, "__name__") else mod logging.getLogger(name).setLevel(level)
[ "def", "silence", "(", "cls", ",", "*", "modules", ",", "*", "*", "kwargs", ")", ":", "level", "=", "kwargs", ".", "pop", "(", "\"level\"", ",", "logging", ".", "WARNING", ")", "for", "mod", "in", "modules", ":", "name", "=", "mod", ".", "__name__", "if", "hasattr", "(", "mod", ",", "\"__name__\"", ")", "else", "mod", "logging", ".", "getLogger", "(", "name", ")", ".", "setLevel", "(", "level", ")" ]
Args: *modules: Modules, or names of modules to silence (by setting their log level to WARNING or above) **kwargs: Pass as kwargs due to python 2.7, would be level=logging.WARNING otherwise
[ "Args", ":", "*", "modules", ":", "Modules", "or", "names", "of", "modules", "to", "silence", "(", "by", "setting", "their", "log", "level", "to", "WARNING", "or", "above", ")", "**", "kwargs", ":", "Pass", "as", "kwargs", "due", "to", "python", "2", ".", "7", "would", "be", "level", "=", "logging", ".", "WARNING", "otherwise" ]
python
train
47.6