repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
nicolargo/glances
glances/cpu_percent.py
https://github.com/nicolargo/glances/blob/5bd4d587a736e0d2b03170b56926841d2a3eb7ee/glances/cpu_percent.py#L62-L93
def __get_percpu(self): """Update and/or return the per CPU list using the psutil library.""" # Never update more than 1 time per cached_time if self.timer_percpu.finished(): self.percpu_percent = [] for cpu_number, cputimes in enumerate(psutil.cpu_times_percent(interval=0.0, percpu=True)): cpu = {'key': self.get_key(), 'cpu_number': cpu_number, 'total': round(100 - cputimes.idle, 1), 'user': cputimes.user, 'system': cputimes.system, 'idle': cputimes.idle} # The following stats are for API purposes only if hasattr(cputimes, 'nice'): cpu['nice'] = cputimes.nice if hasattr(cputimes, 'iowait'): cpu['iowait'] = cputimes.iowait if hasattr(cputimes, 'irq'): cpu['irq'] = cputimes.irq if hasattr(cputimes, 'softirq'): cpu['softirq'] = cputimes.softirq if hasattr(cputimes, 'steal'): cpu['steal'] = cputimes.steal if hasattr(cputimes, 'guest'): cpu['guest'] = cputimes.guest if hasattr(cputimes, 'guest_nice'): cpu['guest_nice'] = cputimes.guest_nice # Append new CPU to the list self.percpu_percent.append(cpu) # Reset timer for cache self.timer_percpu = Timer(self.cached_time) return self.percpu_percent
[ "def", "__get_percpu", "(", "self", ")", ":", "# Never update more than 1 time per cached_time", "if", "self", ".", "timer_percpu", ".", "finished", "(", ")", ":", "self", ".", "percpu_percent", "=", "[", "]", "for", "cpu_number", ",", "cputimes", "in", "enumerate", "(", "psutil", ".", "cpu_times_percent", "(", "interval", "=", "0.0", ",", "percpu", "=", "True", ")", ")", ":", "cpu", "=", "{", "'key'", ":", "self", ".", "get_key", "(", ")", ",", "'cpu_number'", ":", "cpu_number", ",", "'total'", ":", "round", "(", "100", "-", "cputimes", ".", "idle", ",", "1", ")", ",", "'user'", ":", "cputimes", ".", "user", ",", "'system'", ":", "cputimes", ".", "system", ",", "'idle'", ":", "cputimes", ".", "idle", "}", "# The following stats are for API purposes only", "if", "hasattr", "(", "cputimes", ",", "'nice'", ")", ":", "cpu", "[", "'nice'", "]", "=", "cputimes", ".", "nice", "if", "hasattr", "(", "cputimes", ",", "'iowait'", ")", ":", "cpu", "[", "'iowait'", "]", "=", "cputimes", ".", "iowait", "if", "hasattr", "(", "cputimes", ",", "'irq'", ")", ":", "cpu", "[", "'irq'", "]", "=", "cputimes", ".", "irq", "if", "hasattr", "(", "cputimes", ",", "'softirq'", ")", ":", "cpu", "[", "'softirq'", "]", "=", "cputimes", ".", "softirq", "if", "hasattr", "(", "cputimes", ",", "'steal'", ")", ":", "cpu", "[", "'steal'", "]", "=", "cputimes", ".", "steal", "if", "hasattr", "(", "cputimes", ",", "'guest'", ")", ":", "cpu", "[", "'guest'", "]", "=", "cputimes", ".", "guest", "if", "hasattr", "(", "cputimes", ",", "'guest_nice'", ")", ":", "cpu", "[", "'guest_nice'", "]", "=", "cputimes", ".", "guest_nice", "# Append new CPU to the list", "self", ".", "percpu_percent", ".", "append", "(", "cpu", ")", "# Reset timer for cache", "self", ".", "timer_percpu", "=", "Timer", "(", "self", ".", "cached_time", ")", "return", "self", ".", "percpu_percent" ]
Update and/or return the per CPU list using the psutil library.
[ "Update", "and", "/", "or", "return", "the", "per", "CPU", "list", "using", "the", "psutil", "library", "." ]
python
train
liftoff/pyminifier
pyminifier/minification.py
https://github.com/liftoff/pyminifier/blob/087ea7b0c8c964f1f907c3f350f5ce281798db86/pyminifier/minification.py#L31-L72
def remove_comments(tokens): """ Removes comments from *tokens* which is expected to be a list equivalent of tokenize.generate_tokens() (so we can update in-place). .. note:: * If the comment makes up the whole line, the newline will also be removed (so you don't end up with lots of blank lines). * Preserves shebangs and encoding strings. """ preserved_shebang = "" preserved_encoding = "" # This (short) loop preserves shebangs and encoding strings: for tok in tokens[0:4]: # Will always be in the first four tokens line = tok[4] # Save the first comment line if it starts with a shebang # (e.g. '#!/usr/bin/env python') if analyze.shebang.match(line): # Must be first line preserved_shebang = line # Save the encoding string (must be first or second line in file) # (e.g. '# -*- coding: utf-8 -*-') elif analyze.encoding.match(line): preserved_encoding = line # Now remove comments: prev_tok_type = 0 for index, tok in enumerate(tokens): token_type = tok[0] if token_type == tokenize.COMMENT: tokens[index][1] = '' # Making it an empty string removes it # TODO: Figure out a way to make this work #elif prev_tok_type == tokenize.COMMENT: #if token_type == tokenize.NL: #tokens[index][1] = '' # Remove trailing newline prev_tok_type = token_type # Prepend our preserved items back into the token list: if preserved_shebang: # Have to re-tokenize them io_obj = io.StringIO(preserved_shebang + preserved_encoding) preserved = [list(a) for a in tokenize.generate_tokens(io_obj.readline)] preserved.pop() # Get rid of ENDMARKER preserved.reverse() # Round and round we go! for item in preserved: tokens.insert(0, item)
[ "def", "remove_comments", "(", "tokens", ")", ":", "preserved_shebang", "=", "\"\"", "preserved_encoding", "=", "\"\"", "# This (short) loop preserves shebangs and encoding strings:", "for", "tok", "in", "tokens", "[", "0", ":", "4", "]", ":", "# Will always be in the first four tokens", "line", "=", "tok", "[", "4", "]", "# Save the first comment line if it starts with a shebang", "# (e.g. '#!/usr/bin/env python')", "if", "analyze", ".", "shebang", ".", "match", "(", "line", ")", ":", "# Must be first line", "preserved_shebang", "=", "line", "# Save the encoding string (must be first or second line in file)", "# (e.g. '# -*- coding: utf-8 -*-')", "elif", "analyze", ".", "encoding", ".", "match", "(", "line", ")", ":", "preserved_encoding", "=", "line", "# Now remove comments:", "prev_tok_type", "=", "0", "for", "index", ",", "tok", "in", "enumerate", "(", "tokens", ")", ":", "token_type", "=", "tok", "[", "0", "]", "if", "token_type", "==", "tokenize", ".", "COMMENT", ":", "tokens", "[", "index", "]", "[", "1", "]", "=", "''", "# Making it an empty string removes it", "# TODO: Figure out a way to make this work", "#elif prev_tok_type == tokenize.COMMENT:", "#if token_type == tokenize.NL:", "#tokens[index][1] = '' # Remove trailing newline", "prev_tok_type", "=", "token_type", "# Prepend our preserved items back into the token list:", "if", "preserved_shebang", ":", "# Have to re-tokenize them", "io_obj", "=", "io", ".", "StringIO", "(", "preserved_shebang", "+", "preserved_encoding", ")", "preserved", "=", "[", "list", "(", "a", ")", "for", "a", "in", "tokenize", ".", "generate_tokens", "(", "io_obj", ".", "readline", ")", "]", "preserved", ".", "pop", "(", ")", "# Get rid of ENDMARKER", "preserved", ".", "reverse", "(", ")", "# Round and round we go!", "for", "item", "in", "preserved", ":", "tokens", ".", "insert", "(", "0", ",", "item", ")" ]
Removes comments from *tokens* which is expected to be a list equivalent of tokenize.generate_tokens() (so we can update in-place). .. note:: * If the comment makes up the whole line, the newline will also be removed (so you don't end up with lots of blank lines). * Preserves shebangs and encoding strings.
[ "Removes", "comments", "from", "*", "tokens", "*", "which", "is", "expected", "to", "be", "a", "list", "equivalent", "of", "tokenize", ".", "generate_tokens", "()", "(", "so", "we", "can", "update", "in", "-", "place", ")", "." ]
python
train
archman/beamline
beamline/models.py
https://github.com/archman/beamline/blob/417bc5dc13e754bc89d246427984590fced64d07/beamline/models.py#L146-L159
def getAllConfig(self, fmt='json'): """ return all element configurations as json string file. could be further processed by beamline.Lattice class :param fmt: 'json' (default) or 'dict' """ for e in self.getCtrlConf(msgout=False): self._lattice_confdict.update(e.dumpConfig(type='simu')) self._lattice_confdict.update(self._lattice.dumpConfig()) if fmt == 'json': return json.dumps(self._lattice_confdict) else: return self._lattice_confdict
[ "def", "getAllConfig", "(", "self", ",", "fmt", "=", "'json'", ")", ":", "for", "e", "in", "self", ".", "getCtrlConf", "(", "msgout", "=", "False", ")", ":", "self", ".", "_lattice_confdict", ".", "update", "(", "e", ".", "dumpConfig", "(", "type", "=", "'simu'", ")", ")", "self", ".", "_lattice_confdict", ".", "update", "(", "self", ".", "_lattice", ".", "dumpConfig", "(", ")", ")", "if", "fmt", "==", "'json'", ":", "return", "json", ".", "dumps", "(", "self", ".", "_lattice_confdict", ")", "else", ":", "return", "self", ".", "_lattice_confdict" ]
return all element configurations as json string file. could be further processed by beamline.Lattice class :param fmt: 'json' (default) or 'dict'
[ "return", "all", "element", "configurations", "as", "json", "string", "file", ".", "could", "be", "further", "processed", "by", "beamline", ".", "Lattice", "class" ]
python
train
zeth/inputs
inputs.py
https://github.com/zeth/inputs/blob/a46681dbf77d6ab07834f550e5855c1f50701f99/inputs.py#L3202-L3208
def _update_all_devices(self): """Update the all_devices list.""" self.all_devices = [] self.all_devices.extend(self.keyboards) self.all_devices.extend(self.mice) self.all_devices.extend(self.gamepads) self.all_devices.extend(self.other_devices)
[ "def", "_update_all_devices", "(", "self", ")", ":", "self", ".", "all_devices", "=", "[", "]", "self", ".", "all_devices", ".", "extend", "(", "self", ".", "keyboards", ")", "self", ".", "all_devices", ".", "extend", "(", "self", ".", "mice", ")", "self", ".", "all_devices", ".", "extend", "(", "self", ".", "gamepads", ")", "self", ".", "all_devices", ".", "extend", "(", "self", ".", "other_devices", ")" ]
Update the all_devices list.
[ "Update", "the", "all_devices", "list", "." ]
python
train
AkihikoITOH/capybara
capybara/virtualenv/lib/python2.7/site-packages/jinja2/environment.py
https://github.com/AkihikoITOH/capybara/blob/e86c2173ea386654f4ae061148e8fbe3f25e715c/capybara/virtualenv/lib/python2.7/site-packages/jinja2/environment.py#L375-L390
def getitem(self, obj, argument): """Get an item or attribute of an object but prefer the item.""" try: return obj[argument] except (TypeError, LookupError): if isinstance(argument, string_types): try: attr = str(argument) except Exception: pass else: try: return getattr(obj, attr) except AttributeError: pass return self.undefined(obj=obj, name=argument)
[ "def", "getitem", "(", "self", ",", "obj", ",", "argument", ")", ":", "try", ":", "return", "obj", "[", "argument", "]", "except", "(", "TypeError", ",", "LookupError", ")", ":", "if", "isinstance", "(", "argument", ",", "string_types", ")", ":", "try", ":", "attr", "=", "str", "(", "argument", ")", "except", "Exception", ":", "pass", "else", ":", "try", ":", "return", "getattr", "(", "obj", ",", "attr", ")", "except", "AttributeError", ":", "pass", "return", "self", ".", "undefined", "(", "obj", "=", "obj", ",", "name", "=", "argument", ")" ]
Get an item or attribute of an object but prefer the item.
[ "Get", "an", "item", "or", "attribute", "of", "an", "object", "but", "prefer", "the", "item", "." ]
python
test
docker/docker-py
docker/api/container.py
https://github.com/docker/docker-py/blob/613d6aad83acc9931ff2ecfd6a6c7bd8061dc125/docker/api/container.py#L1057-L1092
def start(self, container, *args, **kwargs): """ Start a container. Similar to the ``docker start`` command, but doesn't support attach options. **Deprecation warning:** Passing configuration options in ``start`` is no longer supported. Users are expected to provide host config options in the ``host_config`` parameter of :py:meth:`~ContainerApiMixin.create_container`. Args: container (str): The container to start Raises: :py:class:`docker.errors.APIError` If the server returns an error. :py:class:`docker.errors.DeprecatedMethod` If any argument besides ``container`` are provided. Example: >>> container = cli.create_container( ... image='busybox:latest', ... command='/bin/sleep 30') >>> cli.start(container=container.get('Id')) """ if args or kwargs: raise errors.DeprecatedMethod( 'Providing configuration in the start() method is no longer ' 'supported. Use the host_config param in create_container ' 'instead.' ) url = self._url("/containers/{0}/start", container) res = self._post(url) self._raise_for_status(res)
[ "def", "start", "(", "self", ",", "container", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "args", "or", "kwargs", ":", "raise", "errors", ".", "DeprecatedMethod", "(", "'Providing configuration in the start() method is no longer '", "'supported. Use the host_config param in create_container '", "'instead.'", ")", "url", "=", "self", ".", "_url", "(", "\"/containers/{0}/start\"", ",", "container", ")", "res", "=", "self", ".", "_post", "(", "url", ")", "self", ".", "_raise_for_status", "(", "res", ")" ]
Start a container. Similar to the ``docker start`` command, but doesn't support attach options. **Deprecation warning:** Passing configuration options in ``start`` is no longer supported. Users are expected to provide host config options in the ``host_config`` parameter of :py:meth:`~ContainerApiMixin.create_container`. Args: container (str): The container to start Raises: :py:class:`docker.errors.APIError` If the server returns an error. :py:class:`docker.errors.DeprecatedMethod` If any argument besides ``container`` are provided. Example: >>> container = cli.create_container( ... image='busybox:latest', ... command='/bin/sleep 30') >>> cli.start(container=container.get('Id'))
[ "Start", "a", "container", ".", "Similar", "to", "the", "docker", "start", "command", "but", "doesn", "t", "support", "attach", "options", "." ]
python
train
biocommons/hgvs
hgvs/variantmapper.py
https://github.com/biocommons/hgvs/blob/4d16efb475e1802b2531a2f1c373e8819d8e533b/hgvs/variantmapper.py#L336-L368
def n_to_c(self, var_n): """Given a parsed n. variant, return a c. variant on the specified transcript using the specified alignment method (default is "transcript" indicating a self alignment). :param hgvs.sequencevariant.SequenceVariant var_n: a variant object :returns: variant object (:class:`hgvs.sequencevariant.SequenceVariant`) :raises HGVSInvalidVariantError: if var_n is not of type "n" """ if not (var_n.type == "n"): raise HGVSInvalidVariantError("Expected n. variant; got " + str(var_n)) if self._validator: self._validator.validate(var_n) var_n.fill_ref(self.hdp) tm = self._fetch_AlignmentMapper( tx_ac=var_n.ac, alt_ac=var_n.ac, alt_aln_method="transcript") pos_c = tm.n_to_c(var_n.posedit.pos) if (isinstance(var_n.posedit.edit, hgvs.edit.NARefAlt) or isinstance(var_n.posedit.edit, hgvs.edit.Dup) or isinstance(var_n.posedit.edit, hgvs.edit.Inv)): edit_c = copy.deepcopy(var_n.posedit.edit) else: raise HGVSUnsupportedOperationError( "Only NARefAlt/Dup/Inv types are currently implemented") var_c = hgvs.sequencevariant.SequenceVariant( ac=var_n.ac, type="c", posedit=hgvs.posedit.PosEdit(pos_c, edit_c)) if self.replace_reference: self._replace_reference(var_c) if self.add_gene_symbol: self._update_gene_symbol(var_c, var_n.gene) return var_c
[ "def", "n_to_c", "(", "self", ",", "var_n", ")", ":", "if", "not", "(", "var_n", ".", "type", "==", "\"n\"", ")", ":", "raise", "HGVSInvalidVariantError", "(", "\"Expected n. variant; got \"", "+", "str", "(", "var_n", ")", ")", "if", "self", ".", "_validator", ":", "self", ".", "_validator", ".", "validate", "(", "var_n", ")", "var_n", ".", "fill_ref", "(", "self", ".", "hdp", ")", "tm", "=", "self", ".", "_fetch_AlignmentMapper", "(", "tx_ac", "=", "var_n", ".", "ac", ",", "alt_ac", "=", "var_n", ".", "ac", ",", "alt_aln_method", "=", "\"transcript\"", ")", "pos_c", "=", "tm", ".", "n_to_c", "(", "var_n", ".", "posedit", ".", "pos", ")", "if", "(", "isinstance", "(", "var_n", ".", "posedit", ".", "edit", ",", "hgvs", ".", "edit", ".", "NARefAlt", ")", "or", "isinstance", "(", "var_n", ".", "posedit", ".", "edit", ",", "hgvs", ".", "edit", ".", "Dup", ")", "or", "isinstance", "(", "var_n", ".", "posedit", ".", "edit", ",", "hgvs", ".", "edit", ".", "Inv", ")", ")", ":", "edit_c", "=", "copy", ".", "deepcopy", "(", "var_n", ".", "posedit", ".", "edit", ")", "else", ":", "raise", "HGVSUnsupportedOperationError", "(", "\"Only NARefAlt/Dup/Inv types are currently implemented\"", ")", "var_c", "=", "hgvs", ".", "sequencevariant", ".", "SequenceVariant", "(", "ac", "=", "var_n", ".", "ac", ",", "type", "=", "\"c\"", ",", "posedit", "=", "hgvs", ".", "posedit", ".", "PosEdit", "(", "pos_c", ",", "edit_c", ")", ")", "if", "self", ".", "replace_reference", ":", "self", ".", "_replace_reference", "(", "var_c", ")", "if", "self", ".", "add_gene_symbol", ":", "self", ".", "_update_gene_symbol", "(", "var_c", ",", "var_n", ".", "gene", ")", "return", "var_c" ]
Given a parsed n. variant, return a c. variant on the specified transcript using the specified alignment method (default is "transcript" indicating a self alignment). :param hgvs.sequencevariant.SequenceVariant var_n: a variant object :returns: variant object (:class:`hgvs.sequencevariant.SequenceVariant`) :raises HGVSInvalidVariantError: if var_n is not of type "n"
[ "Given", "a", "parsed", "n", ".", "variant", "return", "a", "c", ".", "variant", "on", "the", "specified", "transcript", "using", "the", "specified", "alignment", "method", "(", "default", "is", "transcript", "indicating", "a", "self", "alignment", ")", "." ]
python
train
adamziel/python_translate
python_translate/translations.py
https://github.com/adamziel/python_translate/blob/0aee83f434bd2d1b95767bcd63adb7ac7036c7df/python_translate/translations.py#L132-L140
def replace(self, messages, domain='messages'): """ Sets translations for a given domain. """ assert isinstance(messages, (dict, CaseInsensitiveDict)) assert isinstance(domain, (str, unicode)) self.messages[domain] = CaseInsensitiveDict({}) self.add(messages, domain)
[ "def", "replace", "(", "self", ",", "messages", ",", "domain", "=", "'messages'", ")", ":", "assert", "isinstance", "(", "messages", ",", "(", "dict", ",", "CaseInsensitiveDict", ")", ")", "assert", "isinstance", "(", "domain", ",", "(", "str", ",", "unicode", ")", ")", "self", ".", "messages", "[", "domain", "]", "=", "CaseInsensitiveDict", "(", "{", "}", ")", "self", ".", "add", "(", "messages", ",", "domain", ")" ]
Sets translations for a given domain.
[ "Sets", "translations", "for", "a", "given", "domain", "." ]
python
train
pycontribs/pyrax
pyrax/autoscale.py
https://github.com/pycontribs/pyrax/blob/9ddfd5064b3a292d7337906f3b2d5dce95b50b99/pyrax/autoscale.py#L569-L603
def update_policy(self, scaling_group, policy, name=None, policy_type=None, cooldown=None, change=None, is_percent=False, desired_capacity=None, args=None): """ Updates the specified policy. One or more of the parameters may be specified. """ uri = "/%s/%s/policies/%s" % (self.uri_base, utils.get_id(scaling_group), utils.get_id(policy)) if not isinstance(policy, AutoScalePolicy): # Received an ID policy = self.get_policy(scaling_group, policy) body = {"name": name or policy.name, "type": policy_type or policy.type, "cooldown": cooldown or policy.cooldown, } if desired_capacity is not None: body["desiredCapacity"] = desired_capacity elif change is not None: if is_percent: body["changePercent"] = change else: body["change"] = change else: if getattr(policy, "changePercent", None) is not None: body["changePercent"] = policy.changePercent elif getattr(policy, "change", None) is not None: body["change"] = policy.change elif getattr(policy, "desiredCapacity", None) is not None: body["desiredCapacity"] = policy.desiredCapacity args = args or getattr(policy, "args", None) if args is not None: body["args"] = args resp, resp_body = self.api.method_put(uri, body=body) return None
[ "def", "update_policy", "(", "self", ",", "scaling_group", ",", "policy", ",", "name", "=", "None", ",", "policy_type", "=", "None", ",", "cooldown", "=", "None", ",", "change", "=", "None", ",", "is_percent", "=", "False", ",", "desired_capacity", "=", "None", ",", "args", "=", "None", ")", ":", "uri", "=", "\"/%s/%s/policies/%s\"", "%", "(", "self", ".", "uri_base", ",", "utils", ".", "get_id", "(", "scaling_group", ")", ",", "utils", ".", "get_id", "(", "policy", ")", ")", "if", "not", "isinstance", "(", "policy", ",", "AutoScalePolicy", ")", ":", "# Received an ID", "policy", "=", "self", ".", "get_policy", "(", "scaling_group", ",", "policy", ")", "body", "=", "{", "\"name\"", ":", "name", "or", "policy", ".", "name", ",", "\"type\"", ":", "policy_type", "or", "policy", ".", "type", ",", "\"cooldown\"", ":", "cooldown", "or", "policy", ".", "cooldown", ",", "}", "if", "desired_capacity", "is", "not", "None", ":", "body", "[", "\"desiredCapacity\"", "]", "=", "desired_capacity", "elif", "change", "is", "not", "None", ":", "if", "is_percent", ":", "body", "[", "\"changePercent\"", "]", "=", "change", "else", ":", "body", "[", "\"change\"", "]", "=", "change", "else", ":", "if", "getattr", "(", "policy", ",", "\"changePercent\"", ",", "None", ")", "is", "not", "None", ":", "body", "[", "\"changePercent\"", "]", "=", "policy", ".", "changePercent", "elif", "getattr", "(", "policy", ",", "\"change\"", ",", "None", ")", "is", "not", "None", ":", "body", "[", "\"change\"", "]", "=", "policy", ".", "change", "elif", "getattr", "(", "policy", ",", "\"desiredCapacity\"", ",", "None", ")", "is", "not", "None", ":", "body", "[", "\"desiredCapacity\"", "]", "=", "policy", ".", "desiredCapacity", "args", "=", "args", "or", "getattr", "(", "policy", ",", "\"args\"", ",", "None", ")", "if", "args", "is", "not", "None", ":", "body", "[", "\"args\"", "]", "=", "args", "resp", ",", "resp_body", "=", "self", ".", "api", ".", "method_put", "(", "uri", ",", "body", "=", "body", ")", "return", "None" ]
Updates the specified policy. One or more of the parameters may be specified.
[ "Updates", "the", "specified", "policy", ".", "One", "or", "more", "of", "the", "parameters", "may", "be", "specified", "." ]
python
train
aeguana/PyFileMaker
PyFileMaker/FMServer.py
https://github.com/aeguana/PyFileMaker/blob/ef269b52a97e329d91da3c4851ddac800d7fd7e6/PyFileMaker/FMServer.py#L346-L359
def getDbNames(self): """This function returns the list of open databases""" request = [] request.append(uu({'-dbnames': '' })) result = self._doRequest(request) result = FMResultset.FMResultset(result) dbNames = [] for dbName in result.resultset: dbNames.append(string.lower(dbName['DATABASE_NAME'])) return dbNames
[ "def", "getDbNames", "(", "self", ")", ":", "request", "=", "[", "]", "request", ".", "append", "(", "uu", "(", "{", "'-dbnames'", ":", "''", "}", ")", ")", "result", "=", "self", ".", "_doRequest", "(", "request", ")", "result", "=", "FMResultset", ".", "FMResultset", "(", "result", ")", "dbNames", "=", "[", "]", "for", "dbName", "in", "result", ".", "resultset", ":", "dbNames", ".", "append", "(", "string", ".", "lower", "(", "dbName", "[", "'DATABASE_NAME'", "]", ")", ")", "return", "dbNames" ]
This function returns the list of open databases
[ "This", "function", "returns", "the", "list", "of", "open", "databases" ]
python
train
plandes/actioncli
src/python/zensols/actioncli/factory.py
https://github.com/plandes/actioncli/blob/d1c4ea27e6f3394b30a1652ddd4b916160662773/src/python/zensols/actioncli/factory.py#L133-L141
def _find_class(self, class_name): "Resolve the class from the name." classes = {} classes.update(globals()) classes.update(self.INSTANCE_CLASSES) logger.debug(f'looking up class: {class_name}') cls = classes[class_name] logger.debug(f'found class: {cls}') return cls
[ "def", "_find_class", "(", "self", ",", "class_name", ")", ":", "classes", "=", "{", "}", "classes", ".", "update", "(", "globals", "(", ")", ")", "classes", ".", "update", "(", "self", ".", "INSTANCE_CLASSES", ")", "logger", ".", "debug", "(", "f'looking up class: {class_name}'", ")", "cls", "=", "classes", "[", "class_name", "]", "logger", ".", "debug", "(", "f'found class: {cls}'", ")", "return", "cls" ]
Resolve the class from the name.
[ "Resolve", "the", "class", "from", "the", "name", "." ]
python
train
pypa/pipenv
pipenv/vendor/requests/utils.py
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/requests/utils.py#L259-L281
def from_key_val_list(value): """Take an object and test to see if it can be represented as a dictionary. Unless it can not be represented as such, return an OrderedDict, e.g., :: >>> from_key_val_list([('key', 'val')]) OrderedDict([('key', 'val')]) >>> from_key_val_list('string') ValueError: cannot encode objects that are not 2-tuples >>> from_key_val_list({'key': 'val'}) OrderedDict([('key', 'val')]) :rtype: OrderedDict """ if value is None: return None if isinstance(value, (str, bytes, bool, int)): raise ValueError('cannot encode objects that are not 2-tuples') return OrderedDict(value)
[ "def", "from_key_val_list", "(", "value", ")", ":", "if", "value", "is", "None", ":", "return", "None", "if", "isinstance", "(", "value", ",", "(", "str", ",", "bytes", ",", "bool", ",", "int", ")", ")", ":", "raise", "ValueError", "(", "'cannot encode objects that are not 2-tuples'", ")", "return", "OrderedDict", "(", "value", ")" ]
Take an object and test to see if it can be represented as a dictionary. Unless it can not be represented as such, return an OrderedDict, e.g., :: >>> from_key_val_list([('key', 'val')]) OrderedDict([('key', 'val')]) >>> from_key_val_list('string') ValueError: cannot encode objects that are not 2-tuples >>> from_key_val_list({'key': 'val'}) OrderedDict([('key', 'val')]) :rtype: OrderedDict
[ "Take", "an", "object", "and", "test", "to", "see", "if", "it", "can", "be", "represented", "as", "a", "dictionary", ".", "Unless", "it", "can", "not", "be", "represented", "as", "such", "return", "an", "OrderedDict", "e", ".", "g", "." ]
python
train
shveenkov/aiotarantool-queue-python
aiotarantool_queue/queue.py
https://github.com/shveenkov/aiotarantool-queue-python/blob/b84a1e704f63f7b8cb14cbca5ec99ab8047d1715/aiotarantool_queue/queue.py#L151-L155
def cmd(self, cmd_name): """ Returns tarantool queue command name for current tube. """ return "{0}.tube.{1}:{2}".format(self.queue.lua_queue_name, self.name, cmd_name)
[ "def", "cmd", "(", "self", ",", "cmd_name", ")", ":", "return", "\"{0}.tube.{1}:{2}\"", ".", "format", "(", "self", ".", "queue", ".", "lua_queue_name", ",", "self", ".", "name", ",", "cmd_name", ")" ]
Returns tarantool queue command name for current tube.
[ "Returns", "tarantool", "queue", "command", "name", "for", "current", "tube", "." ]
python
train
dhylands/rshell
rshell/main.py
https://github.com/dhylands/rshell/blob/a92a8fa8074ac792241c83c640a51b394667c324/rshell/main.py#L354-L361
def escape(str): """Precede all special characters with a backslash.""" out = '' for char in str: if char in '\\ ': out += '\\' out += char return out
[ "def", "escape", "(", "str", ")", ":", "out", "=", "''", "for", "char", "in", "str", ":", "if", "char", "in", "'\\\\ '", ":", "out", "+=", "'\\\\'", "out", "+=", "char", "return", "out" ]
Precede all special characters with a backslash.
[ "Precede", "all", "special", "characters", "with", "a", "backslash", "." ]
python
train
googleads/googleads-python-lib
googleads/common.py
https://github.com/googleads/googleads-python-lib/blob/aa3b1b474b0f9789ca55ca46f4b2b57aeae38874/googleads/common.py#L348-L376
def _ExtractProxyConfig(product_yaml_key, proxy_config_data): """Returns an initialized ProxyConfig using the given proxy_config_data. Args: product_yaml_key: a string indicating the client being loaded. proxy_config_data: a dict containing the contents of proxy_config from the YAML file. Returns: If there is a proxy to configure in proxy_config, this will return a ProxyConfig instance with those settings. Otherwise, it will return None. Raises: A GoogleAdsValueError if one of the required keys specified by _PROXY_KEYS is missing. """ cafile = proxy_config_data.get('cafile', None) disable_certificate_validation = proxy_config_data.get( 'disable_certificate_validation', False) http_proxy = proxy_config_data.get(_HTTP_PROXY_YAML_KEY) https_proxy = proxy_config_data.get(_HTTPS_PROXY_YAML_KEY) proxy_config = ProxyConfig( http_proxy=http_proxy, https_proxy=https_proxy, cafile=cafile, disable_certificate_validation=disable_certificate_validation) return proxy_config
[ "def", "_ExtractProxyConfig", "(", "product_yaml_key", ",", "proxy_config_data", ")", ":", "cafile", "=", "proxy_config_data", ".", "get", "(", "'cafile'", ",", "None", ")", "disable_certificate_validation", "=", "proxy_config_data", ".", "get", "(", "'disable_certificate_validation'", ",", "False", ")", "http_proxy", "=", "proxy_config_data", ".", "get", "(", "_HTTP_PROXY_YAML_KEY", ")", "https_proxy", "=", "proxy_config_data", ".", "get", "(", "_HTTPS_PROXY_YAML_KEY", ")", "proxy_config", "=", "ProxyConfig", "(", "http_proxy", "=", "http_proxy", ",", "https_proxy", "=", "https_proxy", ",", "cafile", "=", "cafile", ",", "disable_certificate_validation", "=", "disable_certificate_validation", ")", "return", "proxy_config" ]
Returns an initialized ProxyConfig using the given proxy_config_data. Args: product_yaml_key: a string indicating the client being loaded. proxy_config_data: a dict containing the contents of proxy_config from the YAML file. Returns: If there is a proxy to configure in proxy_config, this will return a ProxyConfig instance with those settings. Otherwise, it will return None. Raises: A GoogleAdsValueError if one of the required keys specified by _PROXY_KEYS is missing.
[ "Returns", "an", "initialized", "ProxyConfig", "using", "the", "given", "proxy_config_data", "." ]
python
train
aerkalov/ebooklib
ebooklib/epub.py
https://github.com/aerkalov/ebooklib/blob/305f2dd7f02923ffabf9586a5d16266113d00c4a/ebooklib/epub.py#L319-L326
def get_links_of_type(self, link_type): """ Returns list of additional links of specific type. :Returns: As tuple returns list of links. """ return (link for link in self.links if link.get('type', '') == link_type)
[ "def", "get_links_of_type", "(", "self", ",", "link_type", ")", ":", "return", "(", "link", "for", "link", "in", "self", ".", "links", "if", "link", ".", "get", "(", "'type'", ",", "''", ")", "==", "link_type", ")" ]
Returns list of additional links of specific type. :Returns: As tuple returns list of links.
[ "Returns", "list", "of", "additional", "links", "of", "specific", "type", "." ]
python
train
gem/oq-engine
openquake/hazardlib/correlation.py
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/hazardlib/correlation.py#L92-L113
def get_lower_triangle_correlation_matrix(self, sites, imt): """ Get lower-triangle matrix as a result of Cholesky-decomposition of correlation matrix. The resulting matrix should have zeros on values above the main diagonal. The actual implementations of :class:`BaseCorrelationModel` interface might calculate the matrix considering site collection and IMT (like :class:`JB2009CorrelationModel` does) or might have it pre-constructed for a specific site collection and IMT, in which case they will need to make sure that parameters to this function match parameters that were used to pre-calculate decomposed correlation matrix. :param sites: :class:`~openquake.hazardlib.site.SiteCollection` to create correlation matrix for. :param imt: Intensity measure type object, see :mod:`openquake.hazardlib.imt`. """ return numpy.linalg.cholesky(self._get_correlation_matrix(sites, imt))
[ "def", "get_lower_triangle_correlation_matrix", "(", "self", ",", "sites", ",", "imt", ")", ":", "return", "numpy", ".", "linalg", ".", "cholesky", "(", "self", ".", "_get_correlation_matrix", "(", "sites", ",", "imt", ")", ")" ]
Get lower-triangle matrix as a result of Cholesky-decomposition of correlation matrix. The resulting matrix should have zeros on values above the main diagonal. The actual implementations of :class:`BaseCorrelationModel` interface might calculate the matrix considering site collection and IMT (like :class:`JB2009CorrelationModel` does) or might have it pre-constructed for a specific site collection and IMT, in which case they will need to make sure that parameters to this function match parameters that were used to pre-calculate decomposed correlation matrix. :param sites: :class:`~openquake.hazardlib.site.SiteCollection` to create correlation matrix for. :param imt: Intensity measure type object, see :mod:`openquake.hazardlib.imt`.
[ "Get", "lower", "-", "triangle", "matrix", "as", "a", "result", "of", "Cholesky", "-", "decomposition", "of", "correlation", "matrix", "." ]
python
train
kakwa/ldapcherry
ldapcherry/__init__.py
https://github.com/kakwa/ldapcherry/blob/b5e7cb6a44065abc30d164e72981b3713a172dda/ldapcherry/__init__.py#L231-L276
def _set_access_log(self, config, level): """ Configure access logs """ access_handler = self._get_param( 'global', 'log.access_handler', config, 'syslog', ) # log format for syslog syslog_formatter = logging.Formatter( "ldapcherry[%(process)d]: %(message)s" ) # replace access log handler by a syslog handler if access_handler == 'syslog': cherrypy.log.access_log.handlers = [] handler = logging.handlers.SysLogHandler( address='/dev/log', facility='user', ) handler.setFormatter(syslog_formatter) cherrypy.log.access_log.addHandler(handler) # if stdout, open a logger on stdout elif access_handler == 'stdout': cherrypy.log.access_log.handlers = [] handler = logging.StreamHandler(sys.stdout) formatter = logging.Formatter( 'ldapcherry.access - %(levelname)s - %(message)s' ) handler.setFormatter(formatter) cherrypy.log.access_log.addHandler(handler) # if file, we keep the default elif access_handler == 'file': pass # replace access log handler by a null handler elif access_handler == 'none': cherrypy.log.access_log.handlers = [] handler = logging.NullHandler() cherrypy.log.access_log.addHandler(handler) # set log level cherrypy.log.access_log.setLevel(level)
[ "def", "_set_access_log", "(", "self", ",", "config", ",", "level", ")", ":", "access_handler", "=", "self", ".", "_get_param", "(", "'global'", ",", "'log.access_handler'", ",", "config", ",", "'syslog'", ",", ")", "# log format for syslog", "syslog_formatter", "=", "logging", ".", "Formatter", "(", "\"ldapcherry[%(process)d]: %(message)s\"", ")", "# replace access log handler by a syslog handler", "if", "access_handler", "==", "'syslog'", ":", "cherrypy", ".", "log", ".", "access_log", ".", "handlers", "=", "[", "]", "handler", "=", "logging", ".", "handlers", ".", "SysLogHandler", "(", "address", "=", "'/dev/log'", ",", "facility", "=", "'user'", ",", ")", "handler", ".", "setFormatter", "(", "syslog_formatter", ")", "cherrypy", ".", "log", ".", "access_log", ".", "addHandler", "(", "handler", ")", "# if stdout, open a logger on stdout", "elif", "access_handler", "==", "'stdout'", ":", "cherrypy", ".", "log", ".", "access_log", ".", "handlers", "=", "[", "]", "handler", "=", "logging", ".", "StreamHandler", "(", "sys", ".", "stdout", ")", "formatter", "=", "logging", ".", "Formatter", "(", "'ldapcherry.access - %(levelname)s - %(message)s'", ")", "handler", ".", "setFormatter", "(", "formatter", ")", "cherrypy", ".", "log", ".", "access_log", ".", "addHandler", "(", "handler", ")", "# if file, we keep the default", "elif", "access_handler", "==", "'file'", ":", "pass", "# replace access log handler by a null handler", "elif", "access_handler", "==", "'none'", ":", "cherrypy", ".", "log", ".", "access_log", ".", "handlers", "=", "[", "]", "handler", "=", "logging", ".", "NullHandler", "(", ")", "cherrypy", ".", "log", ".", "access_log", ".", "addHandler", "(", "handler", ")", "# set log level", "cherrypy", ".", "log", ".", "access_log", ".", "setLevel", "(", "level", ")" ]
Configure access logs
[ "Configure", "access", "logs" ]
python
train
klen/makesite
makesite/install.py
https://github.com/klen/makesite/blob/f6f77a43a04a256189e8fffbeac1ffd63f35a10c/makesite/install.py#L102-L120
def _get_source(self): " Get source from CVS or filepath. " source_dir = op.join(self.deploy_dir, 'source') for tp, cmd in settings.SRC_CLONE: if self.src.startswith(tp + '+'): program = which(tp) assert program, '%s not found.' % tp cmd = cmd % dict(src=self.src[len(tp) + 1:], source_dir=source_dir, branch=self.branch) cmd = "sudo -u %s %s" % (self['src_user'], cmd) call(cmd, shell=True) self.templates.append('src-%s' % tp) break else: self.templates.append('src-dir') copytree(self.src, source_dir) return source_dir
[ "def", "_get_source", "(", "self", ")", ":", "source_dir", "=", "op", ".", "join", "(", "self", ".", "deploy_dir", ",", "'source'", ")", "for", "tp", ",", "cmd", "in", "settings", ".", "SRC_CLONE", ":", "if", "self", ".", "src", ".", "startswith", "(", "tp", "+", "'+'", ")", ":", "program", "=", "which", "(", "tp", ")", "assert", "program", ",", "'%s not found.'", "%", "tp", "cmd", "=", "cmd", "%", "dict", "(", "src", "=", "self", ".", "src", "[", "len", "(", "tp", ")", "+", "1", ":", "]", ",", "source_dir", "=", "source_dir", ",", "branch", "=", "self", ".", "branch", ")", "cmd", "=", "\"sudo -u %s %s\"", "%", "(", "self", "[", "'src_user'", "]", ",", "cmd", ")", "call", "(", "cmd", ",", "shell", "=", "True", ")", "self", ".", "templates", ".", "append", "(", "'src-%s'", "%", "tp", ")", "break", "else", ":", "self", ".", "templates", ".", "append", "(", "'src-dir'", ")", "copytree", "(", "self", ".", "src", ",", "source_dir", ")", "return", "source_dir" ]
Get source from CVS or filepath.
[ "Get", "source", "from", "CVS", "or", "filepath", "." ]
python
train
a1ezzz/wasp-general
wasp_general/task/thread.py
https://github.com/a1ezzz/wasp-general/blob/1029839d33eb663f8dec76c1c46754d53c1de4a9/wasp_general/task/thread.py#L421-L427
def thread_stopped(self): """ :meth:`.WThreadTask._polling_iteration` implementation """ if self.__current_task is not None: task = self.__task_chain[self.__current_task] task.stop() self.__current_task = None
[ "def", "thread_stopped", "(", "self", ")", ":", "if", "self", ".", "__current_task", "is", "not", "None", ":", "task", "=", "self", ".", "__task_chain", "[", "self", ".", "__current_task", "]", "task", ".", "stop", "(", ")", "self", ".", "__current_task", "=", "None" ]
:meth:`.WThreadTask._polling_iteration` implementation
[ ":", "meth", ":", ".", "WThreadTask", ".", "_polling_iteration", "implementation" ]
python
train
waqasbhatti/astrobase
astrobase/lcproc/lcvfeatures.py
https://github.com/waqasbhatti/astrobase/blob/2922a14619d183fb28005fa7d02027ac436f2265/astrobase/lcproc/lcvfeatures.py#L376-L454
def parallel_varfeatures(lclist, outdir, maxobjects=None, timecols=None, magcols=None, errcols=None, mindet=1000, lcformat='hat-sql', lcformatdir=None, nworkers=NCPUS): '''This runs variable feature extraction in parallel for all LCs in `lclist`. Parameters ---------- lclist : list of str The list of light curve file names to process. outdir : str The directory where the output varfeatures pickle files will be written. maxobjects : int The number of LCs to process from `lclist`. timecols : list of str or None The timecol keys to use from the lcdict in calculating the features. magcols : list of str or None The magcol keys to use from the lcdict in calculating the features. errcols : list of str or None The errcol keys to use from the lcdict in calculating the features. mindet : int The minimum number of LC points required to generate variability features. lcformat : str This is the `formatkey` associated with your light curve format, which you previously passed in to the `lcproc.register_lcformat` function. This will be used to look up how to find and read the light curves specified in `basedir` or `use_list_of_filenames`. lcformatdir : str or None If this is provided, gives the path to a directory when you've stored your lcformat description JSONs, other than the usual directories lcproc knows to search for them in. Use this along with `lcformat` to specify an LC format JSON file that's not currently registered with lcproc. nworkers : int The number of parallel workers to launch. Returns ------- dict A dict with key:val pairs of input LC file name : the generated variability features pickles for each of the input LCs, with results for each magcol in the input `magcol` or light curve format's default `magcol` list. ''' # make sure to make the output directory if it doesn't exist if not os.path.exists(outdir): os.makedirs(outdir) if maxobjects: lclist = lclist[:maxobjects] tasks = [(x, outdir, timecols, magcols, errcols, mindet, lcformat, lcformatdir) for x in lclist] with ProcessPoolExecutor(max_workers=nworkers) as executor: resultfutures = executor.map(varfeatures_worker, tasks) results = [x for x in resultfutures] resdict = {os.path.basename(x):y for (x,y) in zip(lclist, results)} return resdict
[ "def", "parallel_varfeatures", "(", "lclist", ",", "outdir", ",", "maxobjects", "=", "None", ",", "timecols", "=", "None", ",", "magcols", "=", "None", ",", "errcols", "=", "None", ",", "mindet", "=", "1000", ",", "lcformat", "=", "'hat-sql'", ",", "lcformatdir", "=", "None", ",", "nworkers", "=", "NCPUS", ")", ":", "# make sure to make the output directory if it doesn't exist", "if", "not", "os", ".", "path", ".", "exists", "(", "outdir", ")", ":", "os", ".", "makedirs", "(", "outdir", ")", "if", "maxobjects", ":", "lclist", "=", "lclist", "[", ":", "maxobjects", "]", "tasks", "=", "[", "(", "x", ",", "outdir", ",", "timecols", ",", "magcols", ",", "errcols", ",", "mindet", ",", "lcformat", ",", "lcformatdir", ")", "for", "x", "in", "lclist", "]", "with", "ProcessPoolExecutor", "(", "max_workers", "=", "nworkers", ")", "as", "executor", ":", "resultfutures", "=", "executor", ".", "map", "(", "varfeatures_worker", ",", "tasks", ")", "results", "=", "[", "x", "for", "x", "in", "resultfutures", "]", "resdict", "=", "{", "os", ".", "path", ".", "basename", "(", "x", ")", ":", "y", "for", "(", "x", ",", "y", ")", "in", "zip", "(", "lclist", ",", "results", ")", "}", "return", "resdict" ]
This runs variable feature extraction in parallel for all LCs in `lclist`. Parameters ---------- lclist : list of str The list of light curve file names to process. outdir : str The directory where the output varfeatures pickle files will be written. maxobjects : int The number of LCs to process from `lclist`. timecols : list of str or None The timecol keys to use from the lcdict in calculating the features. magcols : list of str or None The magcol keys to use from the lcdict in calculating the features. errcols : list of str or None The errcol keys to use from the lcdict in calculating the features. mindet : int The minimum number of LC points required to generate variability features. lcformat : str This is the `formatkey` associated with your light curve format, which you previously passed in to the `lcproc.register_lcformat` function. This will be used to look up how to find and read the light curves specified in `basedir` or `use_list_of_filenames`. lcformatdir : str or None If this is provided, gives the path to a directory when you've stored your lcformat description JSONs, other than the usual directories lcproc knows to search for them in. Use this along with `lcformat` to specify an LC format JSON file that's not currently registered with lcproc. nworkers : int The number of parallel workers to launch. Returns ------- dict A dict with key:val pairs of input LC file name : the generated variability features pickles for each of the input LCs, with results for each magcol in the input `magcol` or light curve format's default `magcol` list.
[ "This", "runs", "variable", "feature", "extraction", "in", "parallel", "for", "all", "LCs", "in", "lclist", "." ]
python
valid
connectordb/connectordb-python
connectordb/_stream.py
https://github.com/connectordb/connectordb-python/blob/2092b0cb30898139a247176bcf433d5a4abde7cb/connectordb/_stream.py#L162-L173
def unsubscribe(self, transform="", downlink=False): """Unsubscribes from a previously subscribed stream. Note that the same values of transform and downlink must be passed in order to do the correct unsubscribe:: s.subscribe(callback,transform="if last") s.unsubscribe(transform="if last") """ streampath = self.path if downlink: streampath += "/downlink" return self.db.unsubscribe(streampath, transform)
[ "def", "unsubscribe", "(", "self", ",", "transform", "=", "\"\"", ",", "downlink", "=", "False", ")", ":", "streampath", "=", "self", ".", "path", "if", "downlink", ":", "streampath", "+=", "\"/downlink\"", "return", "self", ".", "db", ".", "unsubscribe", "(", "streampath", ",", "transform", ")" ]
Unsubscribes from a previously subscribed stream. Note that the same values of transform and downlink must be passed in order to do the correct unsubscribe:: s.subscribe(callback,transform="if last") s.unsubscribe(transform="if last")
[ "Unsubscribes", "from", "a", "previously", "subscribed", "stream", ".", "Note", "that", "the", "same", "values", "of", "transform", "and", "downlink", "must", "be", "passed", "in", "order", "to", "do", "the", "correct", "unsubscribe", "::" ]
python
test
rocky/python3-trepan
trepan/lib/disassemble.py
https://github.com/rocky/python3-trepan/blob/14e91bc0acce090d67be145b1ac040cab92ac5f3/trepan/lib/disassemble.py#L35-L128
def dis(msg, msg_nocr, section, errmsg, x=None, start_line=-1, end_line=None, relative_pos = False, highlight='light', start_offset=0, end_offset=None, include_header=False): """Disassemble classes, methods, functions, or code. With no argument, disassemble the last traceback. """ lasti = -1 if x is None: distb() return None, None if start_offset is None: start_offset = 0 mess = '' if start_line > 1: mess += "from line %d " % start_line elif start_offset > 1: mess = "from offset %d " % start_offset if end_line: mess += "to line %d" % end_line elif end_offset: mess += "to offset %d" % end_offset sectioned = False # Try to dogpaddle to the code object for the type setting x if hasattr(types, 'InstanceType') and isinstance(x, types.InstanceType): x = x.__class__ if inspect.ismethod(x): section("Disassembly of %s: %s" % (x, mess)) sectioned = True x = x.im_func elif inspect.isfunction(x) or inspect.isgeneratorfunction(x): section("Disassembly of %s: %s" % (x, mess)) x = x.func_code sectioned = True elif inspect.isgenerator(x): section("Disassembly of %s: %s" % (x, mess)) frame = x.gi_frame lasti = frame.f_last_i x = x.gi_code sectioned = True elif inspect.isframe(x): section("Disassembly of %s: %s" % (x, mess)) sectioned = True if hasattr(x, 'f_lasti'): lasti = x.f_lasti if lasti == -1: lasti = 0 pass opc = get_opcode(PYTHON_VERSION, IS_PYPY) x = x.f_code if include_header: header_lines = Bytecode(x, opc).info().split("\n") header = '\n'.join([format_token(Mformat.Comment, h) for h in header_lines]) msg(header) pass elif inspect.iscode(x): pass if hasattr(x, '__dict__'): # Class or module items = sorted(x.__dict__.items()) for name, x1 in items: if isinstance(x1, _have_code): if not sectioned: section("Disassembly of %s: " % x) try: dis(msg, msg_nocr, section, errmsg, x1, start_line=start_line, end_line=end_line, relative_pos = relative_pos) msg("") except TypeError: _, msg, _ = sys.exc_info() errmsg("Sorry:", msg) pass pass pass pass elif hasattr(x, 'co_code'): # Code object if not sectioned: section("Disassembly of %s: " % x) return disassemble(msg, msg_nocr, section, x, lasti=lasti, start_line=start_line, end_line=end_line, relative_pos = relative_pos, highlight = highlight, start_offset = start_offset, end_offset = end_offset) elif isinstance(x, str): # Source code return disassemble_string(msg, msg_nocr, x,) else: errmsg("Don't know how to disassemble %s objects." % type(x).__name__) return None, None
[ "def", "dis", "(", "msg", ",", "msg_nocr", ",", "section", ",", "errmsg", ",", "x", "=", "None", ",", "start_line", "=", "-", "1", ",", "end_line", "=", "None", ",", "relative_pos", "=", "False", ",", "highlight", "=", "'light'", ",", "start_offset", "=", "0", ",", "end_offset", "=", "None", ",", "include_header", "=", "False", ")", ":", "lasti", "=", "-", "1", "if", "x", "is", "None", ":", "distb", "(", ")", "return", "None", ",", "None", "if", "start_offset", "is", "None", ":", "start_offset", "=", "0", "mess", "=", "''", "if", "start_line", ">", "1", ":", "mess", "+=", "\"from line %d \"", "%", "start_line", "elif", "start_offset", ">", "1", ":", "mess", "=", "\"from offset %d \"", "%", "start_offset", "if", "end_line", ":", "mess", "+=", "\"to line %d\"", "%", "end_line", "elif", "end_offset", ":", "mess", "+=", "\"to offset %d\"", "%", "end_offset", "sectioned", "=", "False", "# Try to dogpaddle to the code object for the type setting x", "if", "hasattr", "(", "types", ",", "'InstanceType'", ")", "and", "isinstance", "(", "x", ",", "types", ".", "InstanceType", ")", ":", "x", "=", "x", ".", "__class__", "if", "inspect", ".", "ismethod", "(", "x", ")", ":", "section", "(", "\"Disassembly of %s: %s\"", "%", "(", "x", ",", "mess", ")", ")", "sectioned", "=", "True", "x", "=", "x", ".", "im_func", "elif", "inspect", ".", "isfunction", "(", "x", ")", "or", "inspect", ".", "isgeneratorfunction", "(", "x", ")", ":", "section", "(", "\"Disassembly of %s: %s\"", "%", "(", "x", ",", "mess", ")", ")", "x", "=", "x", ".", "func_code", "sectioned", "=", "True", "elif", "inspect", ".", "isgenerator", "(", "x", ")", ":", "section", "(", "\"Disassembly of %s: %s\"", "%", "(", "x", ",", "mess", ")", ")", "frame", "=", "x", ".", "gi_frame", "lasti", "=", "frame", ".", "f_last_i", "x", "=", "x", ".", "gi_code", "sectioned", "=", "True", "elif", "inspect", ".", "isframe", "(", "x", ")", ":", "section", "(", "\"Disassembly of %s: %s\"", "%", "(", "x", ",", "mess", ")", ")", "sectioned", "=", "True", "if", "hasattr", "(", "x", ",", "'f_lasti'", ")", ":", "lasti", "=", "x", ".", "f_lasti", "if", "lasti", "==", "-", "1", ":", "lasti", "=", "0", "pass", "opc", "=", "get_opcode", "(", "PYTHON_VERSION", ",", "IS_PYPY", ")", "x", "=", "x", ".", "f_code", "if", "include_header", ":", "header_lines", "=", "Bytecode", "(", "x", ",", "opc", ")", ".", "info", "(", ")", ".", "split", "(", "\"\\n\"", ")", "header", "=", "'\\n'", ".", "join", "(", "[", "format_token", "(", "Mformat", ".", "Comment", ",", "h", ")", "for", "h", "in", "header_lines", "]", ")", "msg", "(", "header", ")", "pass", "elif", "inspect", ".", "iscode", "(", "x", ")", ":", "pass", "if", "hasattr", "(", "x", ",", "'__dict__'", ")", ":", "# Class or module", "items", "=", "sorted", "(", "x", ".", "__dict__", ".", "items", "(", ")", ")", "for", "name", ",", "x1", "in", "items", ":", "if", "isinstance", "(", "x1", ",", "_have_code", ")", ":", "if", "not", "sectioned", ":", "section", "(", "\"Disassembly of %s: \"", "%", "x", ")", "try", ":", "dis", "(", "msg", ",", "msg_nocr", ",", "section", ",", "errmsg", ",", "x1", ",", "start_line", "=", "start_line", ",", "end_line", "=", "end_line", ",", "relative_pos", "=", "relative_pos", ")", "msg", "(", "\"\"", ")", "except", "TypeError", ":", "_", ",", "msg", ",", "_", "=", "sys", ".", "exc_info", "(", ")", "errmsg", "(", "\"Sorry:\"", ",", "msg", ")", "pass", "pass", "pass", "pass", "elif", "hasattr", "(", "x", ",", "'co_code'", ")", ":", "# Code object", "if", "not", "sectioned", ":", "section", "(", "\"Disassembly of %s: \"", "%", "x", ")", "return", "disassemble", "(", "msg", ",", "msg_nocr", ",", "section", ",", "x", ",", "lasti", "=", "lasti", ",", "start_line", "=", "start_line", ",", "end_line", "=", "end_line", ",", "relative_pos", "=", "relative_pos", ",", "highlight", "=", "highlight", ",", "start_offset", "=", "start_offset", ",", "end_offset", "=", "end_offset", ")", "elif", "isinstance", "(", "x", ",", "str", ")", ":", "# Source code", "return", "disassemble_string", "(", "msg", ",", "msg_nocr", ",", "x", ",", ")", "else", ":", "errmsg", "(", "\"Don't know how to disassemble %s objects.\"", "%", "type", "(", "x", ")", ".", "__name__", ")", "return", "None", ",", "None" ]
Disassemble classes, methods, functions, or code. With no argument, disassemble the last traceback.
[ "Disassemble", "classes", "methods", "functions", "or", "code", "." ]
python
test
nderkach/airbnb-python
airbnb/api.py
https://github.com/nderkach/airbnb-python/blob/0b3ed69518e41383eca93ae11b24247f3cc69a27/airbnb/api.py#L55-L64
def randomizable(function): """ A decorator which randomizes requests if needed """ @functools.wraps(function) def wrapper(self, *args, **kwargs): if self.randomize: self.randomize_headers() return function(self, *args, **kwargs) return wrapper
[ "def", "randomizable", "(", "function", ")", ":", "@", "functools", ".", "wraps", "(", "function", ")", "def", "wrapper", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "self", ".", "randomize", ":", "self", ".", "randomize_headers", "(", ")", "return", "function", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", "return", "wrapper" ]
A decorator which randomizes requests if needed
[ "A", "decorator", "which", "randomizes", "requests", "if", "needed" ]
python
train
PMEAL/OpenPNM
openpnm/algorithms/ReactiveTransport.py
https://github.com/PMEAL/OpenPNM/blob/0547b5724ffedc0a593aae48639d36fe10e0baed/openpnm/algorithms/ReactiveTransport.py#L54-L120
def setup(self, phase=None, quantity='', conductance='', r_tolerance=None, max_iter=None, relaxation_source=None, relaxation_quantity=None, **kwargs): r""" This method takes several arguments that are essential to running the algorithm and adds them to the settings Parameters ---------- phase : OpenPNM Phase object The phase on which the algorithm is to be run. If no value is given, the existing value is kept. quantity : string The name of the physical quantity to be calcualted such as ``'pore.xxx'``. conductance : string The name of the pore-scale transport conductance values. These are typically calculated by a model attached to a *Physics* object associated with the given *Phase*. Example; ``'throat.yyy'``. r_tolerance : scalar Tolerance to achieve. The solver returns a solution when 'residual' falls below 'r_tolerance'. The default value is 0.001. max_iter : scalar The maximum number of iterations the solver can perform to find a solution. The default value is 5000. relaxation_source : scalar, between 0 and 1 A relaxation factor to control under-relaxation of the source term. Factor approaching 0 : improved stability but slow simulation. Factor approaching 1 : fast simulation but may be unstable. Default value is 1 (no under-relaxation). relaxation_quantity : scalar, between 0 and 1 A relaxation factor to control under-relaxation for the quantity solving for. Factor approaching 0 : improved stability but slow simulation. Factor approaching 1 : fast simulation but may be unstable. Default value is 1 (no under-relaxation). Notes ----- Under-relaxation is a technique used for improving stability of a computation, particularly in the presence of highly non-linear terms. Under-relaxation used here limits the change in a variable from one iteration to the next. An optimum choice of the relaxation factor is one that is small enough to ensure stable simulation and large enough to speed up the computation. """ if phase: self.settings['phase'] = phase.name if quantity: self.settings['quantity'] = quantity if conductance: self.settings['conductance'] = conductance if r_tolerance: self.settings['r_tolerance'] = r_tolerance if max_iter: self.settings['max_iter'] = max_iter if relaxation_source: self.settings['relaxation_source'] = relaxation_source if relaxation_quantity: self.settings['relaxation_quantity'] = relaxation_quantity super().setup(**kwargs)
[ "def", "setup", "(", "self", ",", "phase", "=", "None", ",", "quantity", "=", "''", ",", "conductance", "=", "''", ",", "r_tolerance", "=", "None", ",", "max_iter", "=", "None", ",", "relaxation_source", "=", "None", ",", "relaxation_quantity", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "phase", ":", "self", ".", "settings", "[", "'phase'", "]", "=", "phase", ".", "name", "if", "quantity", ":", "self", ".", "settings", "[", "'quantity'", "]", "=", "quantity", "if", "conductance", ":", "self", ".", "settings", "[", "'conductance'", "]", "=", "conductance", "if", "r_tolerance", ":", "self", ".", "settings", "[", "'r_tolerance'", "]", "=", "r_tolerance", "if", "max_iter", ":", "self", ".", "settings", "[", "'max_iter'", "]", "=", "max_iter", "if", "relaxation_source", ":", "self", ".", "settings", "[", "'relaxation_source'", "]", "=", "relaxation_source", "if", "relaxation_quantity", ":", "self", ".", "settings", "[", "'relaxation_quantity'", "]", "=", "relaxation_quantity", "super", "(", ")", ".", "setup", "(", "*", "*", "kwargs", ")" ]
r""" This method takes several arguments that are essential to running the algorithm and adds them to the settings Parameters ---------- phase : OpenPNM Phase object The phase on which the algorithm is to be run. If no value is given, the existing value is kept. quantity : string The name of the physical quantity to be calcualted such as ``'pore.xxx'``. conductance : string The name of the pore-scale transport conductance values. These are typically calculated by a model attached to a *Physics* object associated with the given *Phase*. Example; ``'throat.yyy'``. r_tolerance : scalar Tolerance to achieve. The solver returns a solution when 'residual' falls below 'r_tolerance'. The default value is 0.001. max_iter : scalar The maximum number of iterations the solver can perform to find a solution. The default value is 5000. relaxation_source : scalar, between 0 and 1 A relaxation factor to control under-relaxation of the source term. Factor approaching 0 : improved stability but slow simulation. Factor approaching 1 : fast simulation but may be unstable. Default value is 1 (no under-relaxation). relaxation_quantity : scalar, between 0 and 1 A relaxation factor to control under-relaxation for the quantity solving for. Factor approaching 0 : improved stability but slow simulation. Factor approaching 1 : fast simulation but may be unstable. Default value is 1 (no under-relaxation). Notes ----- Under-relaxation is a technique used for improving stability of a computation, particularly in the presence of highly non-linear terms. Under-relaxation used here limits the change in a variable from one iteration to the next. An optimum choice of the relaxation factor is one that is small enough to ensure stable simulation and large enough to speed up the computation.
[ "r", "This", "method", "takes", "several", "arguments", "that", "are", "essential", "to", "running", "the", "algorithm", "and", "adds", "them", "to", "the", "settings" ]
python
train
accraze/pymtranslate
pymtranslate/translator.py
https://github.com/accraze/pymtranslate/blob/b18f9d7e8ef1583c988e8beb6c3304d362a4d979/pymtranslate/translator.py#L103-L179
def iterateEM(self, count): ''' Iterate through all transmissions of english to foreign words. keep count of repeated occurences do until convergence set count(e|f) to 0 for all e,f set total(f) to 0 for all f for all sentence pairs (e_s,f_s) set total_s(e) = 0 for all e for all words e in e_s for all words f in f_s total_s(e) += t(e|f) for all words e in e_s for all words f in f_s count(e|f) += t(e|f) / total_s(e) total(f) += t(e|f) / total_s(e) for all f for all e t(e|f) = count(e|f) / total(f) ''' for iter in range(count): countef = {} totalf = {} # set the count of the words to zero for word in self.en_words: if(word not in self.probs): continue word_probs = self.probs[word] count = dict([(w, 0) for w in word_probs]) countef[word] = count totalf[word] = 0 self.countef = countef self.totalf = totalf # NOW iterate over each word pair for (es, ds) in self.sent_pairs: es_split = es.split() ds_split = ds.split() for d in ds_split: self.totals[d] = 0 for e in es_split: if (e not in self.transmissions): continue e_trans = self.transmissions[e] if (d not in e_trans): continue self.totals[d] += e_trans[d] # Get count(e|f) and total(f) for e in es_split: if(e not in self.transmissions): continue if (d not in self.transmissions[e]): continue self.countef[e][ d] += self.transmissions[e][d] / self.totals[d] self.totalf[ e] += self.transmissions[e][d] / self.totals[d] for e in self.en_words: if (e not in self.probs): continue e_prob = self.probs[e] for d in e_prob: self.transmissions[e][d] = self.countef[ e][d] / self.totalf[e]
[ "def", "iterateEM", "(", "self", ",", "count", ")", ":", "for", "iter", "in", "range", "(", "count", ")", ":", "countef", "=", "{", "}", "totalf", "=", "{", "}", "# set the count of the words to zero", "for", "word", "in", "self", ".", "en_words", ":", "if", "(", "word", "not", "in", "self", ".", "probs", ")", ":", "continue", "word_probs", "=", "self", ".", "probs", "[", "word", "]", "count", "=", "dict", "(", "[", "(", "w", ",", "0", ")", "for", "w", "in", "word_probs", "]", ")", "countef", "[", "word", "]", "=", "count", "totalf", "[", "word", "]", "=", "0", "self", ".", "countef", "=", "countef", "self", ".", "totalf", "=", "totalf", "# NOW iterate over each word pair", "for", "(", "es", ",", "ds", ")", "in", "self", ".", "sent_pairs", ":", "es_split", "=", "es", ".", "split", "(", ")", "ds_split", "=", "ds", ".", "split", "(", ")", "for", "d", "in", "ds_split", ":", "self", ".", "totals", "[", "d", "]", "=", "0", "for", "e", "in", "es_split", ":", "if", "(", "e", "not", "in", "self", ".", "transmissions", ")", ":", "continue", "e_trans", "=", "self", ".", "transmissions", "[", "e", "]", "if", "(", "d", "not", "in", "e_trans", ")", ":", "continue", "self", ".", "totals", "[", "d", "]", "+=", "e_trans", "[", "d", "]", "# Get count(e|f) and total(f)", "for", "e", "in", "es_split", ":", "if", "(", "e", "not", "in", "self", ".", "transmissions", ")", ":", "continue", "if", "(", "d", "not", "in", "self", ".", "transmissions", "[", "e", "]", ")", ":", "continue", "self", ".", "countef", "[", "e", "]", "[", "d", "]", "+=", "self", ".", "transmissions", "[", "e", "]", "[", "d", "]", "/", "self", ".", "totals", "[", "d", "]", "self", ".", "totalf", "[", "e", "]", "+=", "self", ".", "transmissions", "[", "e", "]", "[", "d", "]", "/", "self", ".", "totals", "[", "d", "]", "for", "e", "in", "self", ".", "en_words", ":", "if", "(", "e", "not", "in", "self", ".", "probs", ")", ":", "continue", "e_prob", "=", "self", ".", "probs", "[", "e", "]", "for", "d", "in", "e_prob", ":", "self", ".", "transmissions", "[", "e", "]", "[", "d", "]", "=", "self", ".", "countef", "[", "e", "]", "[", "d", "]", "/", "self", ".", "totalf", "[", "e", "]" ]
Iterate through all transmissions of english to foreign words. keep count of repeated occurences do until convergence set count(e|f) to 0 for all e,f set total(f) to 0 for all f for all sentence pairs (e_s,f_s) set total_s(e) = 0 for all e for all words e in e_s for all words f in f_s total_s(e) += t(e|f) for all words e in e_s for all words f in f_s count(e|f) += t(e|f) / total_s(e) total(f) += t(e|f) / total_s(e) for all f for all e t(e|f) = count(e|f) / total(f)
[ "Iterate", "through", "all", "transmissions", "of", "english", "to", "foreign", "words", ".", "keep", "count", "of", "repeated", "occurences", "do", "until", "convergence", "set", "count", "(", "e|f", ")", "to", "0", "for", "all", "e", "f", "set", "total", "(", "f", ")", "to", "0", "for", "all", "f", "for", "all", "sentence", "pairs", "(", "e_s", "f_s", ")", "set", "total_s", "(", "e", ")", "=", "0", "for", "all", "e", "for", "all", "words", "e", "in", "e_s", "for", "all", "words", "f", "in", "f_s", "total_s", "(", "e", ")", "+", "=", "t", "(", "e|f", ")", "for", "all", "words", "e", "in", "e_s", "for", "all", "words", "f", "in", "f_s", "count", "(", "e|f", ")", "+", "=", "t", "(", "e|f", ")", "/", "total_s", "(", "e", ")", "total", "(", "f", ")", "+", "=", "t", "(", "e|f", ")", "/", "total_s", "(", "e", ")", "for", "all", "f", "for", "all", "e", "t", "(", "e|f", ")", "=", "count", "(", "e|f", ")", "/", "total", "(", "f", ")" ]
python
test
bsmurphy/PyKrige
pykrige/uk.py
https://github.com/bsmurphy/PyKrige/blob/a4db3003b0b5688658c12faeb95a5a8b2b14b433/pykrige/uk.py#L859-L1076
def execute(self, style, xpoints, ypoints, mask=None, backend='vectorized', specified_drift_arrays=None): """Calculates a kriged grid and the associated variance. Includes drift terms. This is now the method that performs the main kriging calculation. Note that currently measurements (i.e., z values) are considered 'exact'. This means that, when a specified coordinate for interpolation is exactly the same as one of the data points, the variogram evaluated at the point is forced to be zero. Also, the diagonal of the kriging matrix is also always forced to be zero. In forcing the variogram evaluated at data points to be zero, we are effectively saying that there is no variance at that point (no uncertainty, so the value is 'exact'). In the future, the code may include an extra 'exact_values' boolean flag that can be adjusted to specify whether to treat the measurements as 'exact'. Setting the flag to false would indicate that the variogram should not be forced to be zero at zero distance (i.e., when evaluated at data points). Instead, the uncertainty in the point will be equal to the nugget. This would mean that the diagonal of the kriging matrix would be set to the nugget instead of to zero. Parameters ---------- style : str Specifies how to treat input kriging points. Specifying 'grid' treats xpoints and ypoints as two arrays of x and y coordinates that define a rectangular grid. Specifying 'points' treats xpoints and ypoints as two arrays that provide coordinate pairs at which to solve the kriging system. Specifying 'masked' treats xpoints and ypoints as two arrays of x and y coordinates that define a rectangular grid and uses mask to only evaluate specific points in the grid. xpoints : array_like, shape (N,) or (N, 1) If style is specific as 'grid' or 'masked', x-coordinates of MxN grid. If style is specified as 'points', x-coordinates of specific points at which to solve kriging system. ypoints : array-like, shape (M,) or (M, 1) If style is specified as 'grid' or 'masked', y-coordinates of MxN grid. If style is specified as 'points', y-coordinates of specific points at which to solve kriging system. Note that in this case, xpoints and ypoints must have the same dimensions (i.e., M = N). mask : boolean array, shape (M, N), optional Specifies the points in the rectangular grid defined by xpoints and ypoints that are to be excluded in the kriging calculations. Must be provided if style is specified as 'masked'. False indicates that the point should not be masked, so the kriging system will be solved at the point. True indicates that the point should be masked, so the kriging system should will not be solved at the point. backend : str, optional Specifies which approach to use in kriging. Specifying 'vectorized' will solve the entire kriging problem at once in a vectorized operation. This approach is faster but also can consume a significant amount of memory for large grids and/or large datasets. Specifying 'loop' will loop through each point at which the kriging system is to be solved. This approach is slower but also less memory-intensive. Default is 'vectorized'. Note that Cython backend is not supported for UK. specified_drift_arrays : list of array-like objects, optional Specifies the drift values at the points at which the kriging system is to be evaluated. Required if 'specified' drift provided in the list of drift terms when instantiating the UniversalKriging class. Must be a list of arrays in the same order as the list provided when instantiating the kriging object. Array(s) must be the same dimension as the specified grid or have the same number of points as the specified points; i.e., the arrays either must be shape (M, N), where M is the number of y grid-points and N is the number of x grid-points, or shape (M, ) or (N, 1), where M is the number of points at which to evaluate the kriging system. Returns ------- zvalues : ndarray, shape (M, N) or (N, 1) Z-values of specified grid or at the specified set of points. If style was specified as 'masked', zvalues will be a numpy masked array. sigmasq : ndarray, shape (M, N) or (N, 1) Variance at specified grid points or at the specified set of points. If style was specified as 'masked', sigmasq will be a numpy masked array. """ if self.verbose: print("Executing Universal Kriging...\n") if style != 'grid' and style != 'masked' and style != 'points': raise ValueError("style argument must be 'grid', 'points', " "or 'masked'") n = self.X_ADJUSTED.shape[0] n_withdrifts = n xpts = np.atleast_1d(np.squeeze(np.array(xpoints, copy=True))) ypts = np.atleast_1d(np.squeeze(np.array(ypoints, copy=True))) nx = xpts.size ny = ypts.size if self.regional_linear_drift: n_withdrifts += 2 if self.point_log_drift: n_withdrifts += self.point_log_array.shape[0] if self.external_Z_drift: n_withdrifts += 1 if self.specified_drift: n_withdrifts += len(self.specified_drift_data_arrays) if self.functional_drift: n_withdrifts += len(self.functional_drift_terms) a = self._get_kriging_matrix(n, n_withdrifts) if style in ['grid', 'masked']: if style == 'masked': if mask is None: raise IOError("Must specify boolean masking array when " "style is 'masked'.") if mask.shape[0] != ny or mask.shape[1] != nx: if mask.shape[0] == nx and mask.shape[1] == ny: mask = mask.T else: raise ValueError("Mask dimensions do not match " "specified grid dimensions.") mask = mask.flatten() npt = ny*nx grid_x, grid_y = np.meshgrid(xpts, ypts) xpts = grid_x.flatten() ypts = grid_y.flatten() elif style == 'points': if xpts.size != ypts.size: raise ValueError("xpoints and ypoints must have same " "dimensions when treated as listing " "discrete points.") npt = nx else: raise ValueError("style argument must be 'grid', 'points', " "or 'masked'") if specified_drift_arrays is None: specified_drift_arrays = [] spec_drift_grids = [] if self.specified_drift: if len(specified_drift_arrays) == 0: raise ValueError("Must provide drift values for kriging points " "when using 'specified' drift capability.") if type(specified_drift_arrays) is not list: raise TypeError("Arrays for specified drift terms must be " "encapsulated in a list.") for spec in specified_drift_arrays: if style in ['grid', 'masked']: if spec.ndim < 2: raise ValueError("Dimensions of drift values array do " "not match specified grid dimensions.") elif spec.shape[0] != ny or spec.shape[1] != nx: if spec.shape[0] == nx and spec.shape[1] == ny: spec_drift_grids.append(np.squeeze(spec.T)) else: raise ValueError("Dimensions of drift values array " "do not match specified grid " "dimensions.") else: spec_drift_grids.append(np.squeeze(spec)) elif style == 'points': if spec.ndim != 1: raise ValueError("Dimensions of drift values array do " "not match specified grid dimensions.") elif spec.shape[0] != xpts.size: raise ValueError("Number of supplied drift values in " "array do not match specified number " "of kriging points.") else: spec_drift_grids.append(np.squeeze(spec)) if len(spec_drift_grids) != len(self.specified_drift_data_arrays): raise ValueError("Inconsistent number of specified drift " "terms supplied.") else: if len(specified_drift_arrays) != 0: warnings.warn("Provided specified drift values, but " "'specified' drift was not initialized during " "instantiation of UniversalKriging class.", RuntimeWarning) xy_points_original = \ np.concatenate((xpts[:, np.newaxis], ypts[:, np.newaxis]), axis=1) xpts, ypts = _adjust_for_anisotropy(np.vstack((xpts, ypts)).T, [self.XCENTER, self.YCENTER], [self.anisotropy_scaling], [self.anisotropy_angle]).T xy_points = \ np.concatenate((xpts[:, np.newaxis], ypts[:, np.newaxis]), axis=1) xy_data = np.concatenate((self.X_ADJUSTED[:, np.newaxis], self.Y_ADJUSTED[:, np.newaxis]), axis=1) if style != 'masked': mask = np.zeros(npt, dtype='bool') bd = cdist(xy_points, xy_data, 'euclidean') if backend == 'vectorized': zvalues, sigmasq = self._exec_vector(a, bd, xy_points, xy_points_original, mask, n_withdrifts, spec_drift_grids) elif backend == 'loop': zvalues, sigmasq = self._exec_loop(a, bd, xy_points, xy_points_original, mask, n_withdrifts, spec_drift_grids) else: raise ValueError('Specified backend {} is not supported ' 'for 2D universal kriging.'.format(backend)) if style == 'masked': zvalues = np.ma.array(zvalues, mask=mask) sigmasq = np.ma.array(sigmasq, mask=mask) if style in ['masked', 'grid']: zvalues = zvalues.reshape((ny, nx)) sigmasq = sigmasq.reshape((ny, nx)) return zvalues, sigmasq
[ "def", "execute", "(", "self", ",", "style", ",", "xpoints", ",", "ypoints", ",", "mask", "=", "None", ",", "backend", "=", "'vectorized'", ",", "specified_drift_arrays", "=", "None", ")", ":", "if", "self", ".", "verbose", ":", "print", "(", "\"Executing Universal Kriging...\\n\"", ")", "if", "style", "!=", "'grid'", "and", "style", "!=", "'masked'", "and", "style", "!=", "'points'", ":", "raise", "ValueError", "(", "\"style argument must be 'grid', 'points', \"", "\"or 'masked'\"", ")", "n", "=", "self", ".", "X_ADJUSTED", ".", "shape", "[", "0", "]", "n_withdrifts", "=", "n", "xpts", "=", "np", ".", "atleast_1d", "(", "np", ".", "squeeze", "(", "np", ".", "array", "(", "xpoints", ",", "copy", "=", "True", ")", ")", ")", "ypts", "=", "np", ".", "atleast_1d", "(", "np", ".", "squeeze", "(", "np", ".", "array", "(", "ypoints", ",", "copy", "=", "True", ")", ")", ")", "nx", "=", "xpts", ".", "size", "ny", "=", "ypts", ".", "size", "if", "self", ".", "regional_linear_drift", ":", "n_withdrifts", "+=", "2", "if", "self", ".", "point_log_drift", ":", "n_withdrifts", "+=", "self", ".", "point_log_array", ".", "shape", "[", "0", "]", "if", "self", ".", "external_Z_drift", ":", "n_withdrifts", "+=", "1", "if", "self", ".", "specified_drift", ":", "n_withdrifts", "+=", "len", "(", "self", ".", "specified_drift_data_arrays", ")", "if", "self", ".", "functional_drift", ":", "n_withdrifts", "+=", "len", "(", "self", ".", "functional_drift_terms", ")", "a", "=", "self", ".", "_get_kriging_matrix", "(", "n", ",", "n_withdrifts", ")", "if", "style", "in", "[", "'grid'", ",", "'masked'", "]", ":", "if", "style", "==", "'masked'", ":", "if", "mask", "is", "None", ":", "raise", "IOError", "(", "\"Must specify boolean masking array when \"", "\"style is 'masked'.\"", ")", "if", "mask", ".", "shape", "[", "0", "]", "!=", "ny", "or", "mask", ".", "shape", "[", "1", "]", "!=", "nx", ":", "if", "mask", ".", "shape", "[", "0", "]", "==", "nx", "and", "mask", ".", "shape", "[", "1", "]", "==", "ny", ":", "mask", "=", "mask", ".", "T", "else", ":", "raise", "ValueError", "(", "\"Mask dimensions do not match \"", "\"specified grid dimensions.\"", ")", "mask", "=", "mask", ".", "flatten", "(", ")", "npt", "=", "ny", "*", "nx", "grid_x", ",", "grid_y", "=", "np", ".", "meshgrid", "(", "xpts", ",", "ypts", ")", "xpts", "=", "grid_x", ".", "flatten", "(", ")", "ypts", "=", "grid_y", ".", "flatten", "(", ")", "elif", "style", "==", "'points'", ":", "if", "xpts", ".", "size", "!=", "ypts", ".", "size", ":", "raise", "ValueError", "(", "\"xpoints and ypoints must have same \"", "\"dimensions when treated as listing \"", "\"discrete points.\"", ")", "npt", "=", "nx", "else", ":", "raise", "ValueError", "(", "\"style argument must be 'grid', 'points', \"", "\"or 'masked'\"", ")", "if", "specified_drift_arrays", "is", "None", ":", "specified_drift_arrays", "=", "[", "]", "spec_drift_grids", "=", "[", "]", "if", "self", ".", "specified_drift", ":", "if", "len", "(", "specified_drift_arrays", ")", "==", "0", ":", "raise", "ValueError", "(", "\"Must provide drift values for kriging points \"", "\"when using 'specified' drift capability.\"", ")", "if", "type", "(", "specified_drift_arrays", ")", "is", "not", "list", ":", "raise", "TypeError", "(", "\"Arrays for specified drift terms must be \"", "\"encapsulated in a list.\"", ")", "for", "spec", "in", "specified_drift_arrays", ":", "if", "style", "in", "[", "'grid'", ",", "'masked'", "]", ":", "if", "spec", ".", "ndim", "<", "2", ":", "raise", "ValueError", "(", "\"Dimensions of drift values array do \"", "\"not match specified grid dimensions.\"", ")", "elif", "spec", ".", "shape", "[", "0", "]", "!=", "ny", "or", "spec", ".", "shape", "[", "1", "]", "!=", "nx", ":", "if", "spec", ".", "shape", "[", "0", "]", "==", "nx", "and", "spec", ".", "shape", "[", "1", "]", "==", "ny", ":", "spec_drift_grids", ".", "append", "(", "np", ".", "squeeze", "(", "spec", ".", "T", ")", ")", "else", ":", "raise", "ValueError", "(", "\"Dimensions of drift values array \"", "\"do not match specified grid \"", "\"dimensions.\"", ")", "else", ":", "spec_drift_grids", ".", "append", "(", "np", ".", "squeeze", "(", "spec", ")", ")", "elif", "style", "==", "'points'", ":", "if", "spec", ".", "ndim", "!=", "1", ":", "raise", "ValueError", "(", "\"Dimensions of drift values array do \"", "\"not match specified grid dimensions.\"", ")", "elif", "spec", ".", "shape", "[", "0", "]", "!=", "xpts", ".", "size", ":", "raise", "ValueError", "(", "\"Number of supplied drift values in \"", "\"array do not match specified number \"", "\"of kriging points.\"", ")", "else", ":", "spec_drift_grids", ".", "append", "(", "np", ".", "squeeze", "(", "spec", ")", ")", "if", "len", "(", "spec_drift_grids", ")", "!=", "len", "(", "self", ".", "specified_drift_data_arrays", ")", ":", "raise", "ValueError", "(", "\"Inconsistent number of specified drift \"", "\"terms supplied.\"", ")", "else", ":", "if", "len", "(", "specified_drift_arrays", ")", "!=", "0", ":", "warnings", ".", "warn", "(", "\"Provided specified drift values, but \"", "\"'specified' drift was not initialized during \"", "\"instantiation of UniversalKriging class.\"", ",", "RuntimeWarning", ")", "xy_points_original", "=", "np", ".", "concatenate", "(", "(", "xpts", "[", ":", ",", "np", ".", "newaxis", "]", ",", "ypts", "[", ":", ",", "np", ".", "newaxis", "]", ")", ",", "axis", "=", "1", ")", "xpts", ",", "ypts", "=", "_adjust_for_anisotropy", "(", "np", ".", "vstack", "(", "(", "xpts", ",", "ypts", ")", ")", ".", "T", ",", "[", "self", ".", "XCENTER", ",", "self", ".", "YCENTER", "]", ",", "[", "self", ".", "anisotropy_scaling", "]", ",", "[", "self", ".", "anisotropy_angle", "]", ")", ".", "T", "xy_points", "=", "np", ".", "concatenate", "(", "(", "xpts", "[", ":", ",", "np", ".", "newaxis", "]", ",", "ypts", "[", ":", ",", "np", ".", "newaxis", "]", ")", ",", "axis", "=", "1", ")", "xy_data", "=", "np", ".", "concatenate", "(", "(", "self", ".", "X_ADJUSTED", "[", ":", ",", "np", ".", "newaxis", "]", ",", "self", ".", "Y_ADJUSTED", "[", ":", ",", "np", ".", "newaxis", "]", ")", ",", "axis", "=", "1", ")", "if", "style", "!=", "'masked'", ":", "mask", "=", "np", ".", "zeros", "(", "npt", ",", "dtype", "=", "'bool'", ")", "bd", "=", "cdist", "(", "xy_points", ",", "xy_data", ",", "'euclidean'", ")", "if", "backend", "==", "'vectorized'", ":", "zvalues", ",", "sigmasq", "=", "self", ".", "_exec_vector", "(", "a", ",", "bd", ",", "xy_points", ",", "xy_points_original", ",", "mask", ",", "n_withdrifts", ",", "spec_drift_grids", ")", "elif", "backend", "==", "'loop'", ":", "zvalues", ",", "sigmasq", "=", "self", ".", "_exec_loop", "(", "a", ",", "bd", ",", "xy_points", ",", "xy_points_original", ",", "mask", ",", "n_withdrifts", ",", "spec_drift_grids", ")", "else", ":", "raise", "ValueError", "(", "'Specified backend {} is not supported '", "'for 2D universal kriging.'", ".", "format", "(", "backend", ")", ")", "if", "style", "==", "'masked'", ":", "zvalues", "=", "np", ".", "ma", ".", "array", "(", "zvalues", ",", "mask", "=", "mask", ")", "sigmasq", "=", "np", ".", "ma", ".", "array", "(", "sigmasq", ",", "mask", "=", "mask", ")", "if", "style", "in", "[", "'masked'", ",", "'grid'", "]", ":", "zvalues", "=", "zvalues", ".", "reshape", "(", "(", "ny", ",", "nx", ")", ")", "sigmasq", "=", "sigmasq", ".", "reshape", "(", "(", "ny", ",", "nx", ")", ")", "return", "zvalues", ",", "sigmasq" ]
Calculates a kriged grid and the associated variance. Includes drift terms. This is now the method that performs the main kriging calculation. Note that currently measurements (i.e., z values) are considered 'exact'. This means that, when a specified coordinate for interpolation is exactly the same as one of the data points, the variogram evaluated at the point is forced to be zero. Also, the diagonal of the kriging matrix is also always forced to be zero. In forcing the variogram evaluated at data points to be zero, we are effectively saying that there is no variance at that point (no uncertainty, so the value is 'exact'). In the future, the code may include an extra 'exact_values' boolean flag that can be adjusted to specify whether to treat the measurements as 'exact'. Setting the flag to false would indicate that the variogram should not be forced to be zero at zero distance (i.e., when evaluated at data points). Instead, the uncertainty in the point will be equal to the nugget. This would mean that the diagonal of the kriging matrix would be set to the nugget instead of to zero. Parameters ---------- style : str Specifies how to treat input kriging points. Specifying 'grid' treats xpoints and ypoints as two arrays of x and y coordinates that define a rectangular grid. Specifying 'points' treats xpoints and ypoints as two arrays that provide coordinate pairs at which to solve the kriging system. Specifying 'masked' treats xpoints and ypoints as two arrays of x and y coordinates that define a rectangular grid and uses mask to only evaluate specific points in the grid. xpoints : array_like, shape (N,) or (N, 1) If style is specific as 'grid' or 'masked', x-coordinates of MxN grid. If style is specified as 'points', x-coordinates of specific points at which to solve kriging system. ypoints : array-like, shape (M,) or (M, 1) If style is specified as 'grid' or 'masked', y-coordinates of MxN grid. If style is specified as 'points', y-coordinates of specific points at which to solve kriging system. Note that in this case, xpoints and ypoints must have the same dimensions (i.e., M = N). mask : boolean array, shape (M, N), optional Specifies the points in the rectangular grid defined by xpoints and ypoints that are to be excluded in the kriging calculations. Must be provided if style is specified as 'masked'. False indicates that the point should not be masked, so the kriging system will be solved at the point. True indicates that the point should be masked, so the kriging system should will not be solved at the point. backend : str, optional Specifies which approach to use in kriging. Specifying 'vectorized' will solve the entire kriging problem at once in a vectorized operation. This approach is faster but also can consume a significant amount of memory for large grids and/or large datasets. Specifying 'loop' will loop through each point at which the kriging system is to be solved. This approach is slower but also less memory-intensive. Default is 'vectorized'. Note that Cython backend is not supported for UK. specified_drift_arrays : list of array-like objects, optional Specifies the drift values at the points at which the kriging system is to be evaluated. Required if 'specified' drift provided in the list of drift terms when instantiating the UniversalKriging class. Must be a list of arrays in the same order as the list provided when instantiating the kriging object. Array(s) must be the same dimension as the specified grid or have the same number of points as the specified points; i.e., the arrays either must be shape (M, N), where M is the number of y grid-points and N is the number of x grid-points, or shape (M, ) or (N, 1), where M is the number of points at which to evaluate the kriging system. Returns ------- zvalues : ndarray, shape (M, N) or (N, 1) Z-values of specified grid or at the specified set of points. If style was specified as 'masked', zvalues will be a numpy masked array. sigmasq : ndarray, shape (M, N) or (N, 1) Variance at specified grid points or at the specified set of points. If style was specified as 'masked', sigmasq will be a numpy masked array.
[ "Calculates", "a", "kriged", "grid", "and", "the", "associated", "variance", ".", "Includes", "drift", "terms", ".", "This", "is", "now", "the", "method", "that", "performs", "the", "main", "kriging", "calculation", ".", "Note", "that", "currently", "measurements", "(", "i", ".", "e", ".", "z", "values", ")", "are", "considered", "exact", ".", "This", "means", "that", "when", "a", "specified", "coordinate", "for", "interpolation", "is", "exactly", "the", "same", "as", "one", "of", "the", "data", "points", "the", "variogram", "evaluated", "at", "the", "point", "is", "forced", "to", "be", "zero", ".", "Also", "the", "diagonal", "of", "the", "kriging", "matrix", "is", "also", "always", "forced", "to", "be", "zero", ".", "In", "forcing", "the", "variogram", "evaluated", "at", "data", "points", "to", "be", "zero", "we", "are", "effectively", "saying", "that", "there", "is", "no", "variance", "at", "that", "point", "(", "no", "uncertainty", "so", "the", "value", "is", "exact", ")", ".", "In", "the", "future", "the", "code", "may", "include", "an", "extra", "exact_values", "boolean", "flag", "that", "can", "be", "adjusted", "to", "specify", "whether", "to", "treat", "the", "measurements", "as", "exact", ".", "Setting", "the", "flag", "to", "false", "would", "indicate", "that", "the", "variogram", "should", "not", "be", "forced", "to", "be", "zero", "at", "zero", "distance", "(", "i", ".", "e", ".", "when", "evaluated", "at", "data", "points", ")", ".", "Instead", "the", "uncertainty", "in", "the", "point", "will", "be", "equal", "to", "the", "nugget", ".", "This", "would", "mean", "that", "the", "diagonal", "of", "the", "kriging", "matrix", "would", "be", "set", "to", "the", "nugget", "instead", "of", "to", "zero", ".", "Parameters", "----------", "style", ":", "str", "Specifies", "how", "to", "treat", "input", "kriging", "points", ".", "Specifying", "grid", "treats", "xpoints", "and", "ypoints", "as", "two", "arrays", "of", "x", "and", "y", "coordinates", "that", "define", "a", "rectangular", "grid", ".", "Specifying", "points", "treats", "xpoints", "and", "ypoints", "as", "two", "arrays", "that", "provide", "coordinate", "pairs", "at", "which", "to", "solve", "the", "kriging", "system", ".", "Specifying", "masked", "treats", "xpoints", "and", "ypoints", "as", "two", "arrays", "of", "x", "and", "y", "coordinates", "that", "define", "a", "rectangular", "grid", "and", "uses", "mask", "to", "only", "evaluate", "specific", "points", "in", "the", "grid", ".", "xpoints", ":", "array_like", "shape", "(", "N", ")", "or", "(", "N", "1", ")", "If", "style", "is", "specific", "as", "grid", "or", "masked", "x", "-", "coordinates", "of", "MxN", "grid", ".", "If", "style", "is", "specified", "as", "points", "x", "-", "coordinates", "of", "specific", "points", "at", "which", "to", "solve", "kriging", "system", ".", "ypoints", ":", "array", "-", "like", "shape", "(", "M", ")", "or", "(", "M", "1", ")", "If", "style", "is", "specified", "as", "grid", "or", "masked", "y", "-", "coordinates", "of", "MxN", "grid", ".", "If", "style", "is", "specified", "as", "points", "y", "-", "coordinates", "of", "specific", "points", "at", "which", "to", "solve", "kriging", "system", ".", "Note", "that", "in", "this", "case", "xpoints", "and", "ypoints", "must", "have", "the", "same", "dimensions", "(", "i", ".", "e", ".", "M", "=", "N", ")", ".", "mask", ":", "boolean", "array", "shape", "(", "M", "N", ")", "optional", "Specifies", "the", "points", "in", "the", "rectangular", "grid", "defined", "by", "xpoints", "and", "ypoints", "that", "are", "to", "be", "excluded", "in", "the", "kriging", "calculations", ".", "Must", "be", "provided", "if", "style", "is", "specified", "as", "masked", ".", "False", "indicates", "that", "the", "point", "should", "not", "be", "masked", "so", "the", "kriging", "system", "will", "be", "solved", "at", "the", "point", ".", "True", "indicates", "that", "the", "point", "should", "be", "masked", "so", "the", "kriging", "system", "should", "will", "not", "be", "solved", "at", "the", "point", ".", "backend", ":", "str", "optional", "Specifies", "which", "approach", "to", "use", "in", "kriging", ".", "Specifying", "vectorized", "will", "solve", "the", "entire", "kriging", "problem", "at", "once", "in", "a", "vectorized", "operation", ".", "This", "approach", "is", "faster", "but", "also", "can", "consume", "a", "significant", "amount", "of", "memory", "for", "large", "grids", "and", "/", "or", "large", "datasets", ".", "Specifying", "loop", "will", "loop", "through", "each", "point", "at", "which", "the", "kriging", "system", "is", "to", "be", "solved", ".", "This", "approach", "is", "slower", "but", "also", "less", "memory", "-", "intensive", ".", "Default", "is", "vectorized", ".", "Note", "that", "Cython", "backend", "is", "not", "supported", "for", "UK", ".", "specified_drift_arrays", ":", "list", "of", "array", "-", "like", "objects", "optional", "Specifies", "the", "drift", "values", "at", "the", "points", "at", "which", "the", "kriging", "system", "is", "to", "be", "evaluated", ".", "Required", "if", "specified", "drift", "provided", "in", "the", "list", "of", "drift", "terms", "when", "instantiating", "the", "UniversalKriging", "class", ".", "Must", "be", "a", "list", "of", "arrays", "in", "the", "same", "order", "as", "the", "list", "provided", "when", "instantiating", "the", "kriging", "object", ".", "Array", "(", "s", ")", "must", "be", "the", "same", "dimension", "as", "the", "specified", "grid", "or", "have", "the", "same", "number", "of", "points", "as", "the", "specified", "points", ";", "i", ".", "e", ".", "the", "arrays", "either", "must", "be", "shape", "(", "M", "N", ")", "where", "M", "is", "the", "number", "of", "y", "grid", "-", "points", "and", "N", "is", "the", "number", "of", "x", "grid", "-", "points", "or", "shape", "(", "M", ")", "or", "(", "N", "1", ")", "where", "M", "is", "the", "number", "of", "points", "at", "which", "to", "evaluate", "the", "kriging", "system", ".", "Returns", "-------", "zvalues", ":", "ndarray", "shape", "(", "M", "N", ")", "or", "(", "N", "1", ")", "Z", "-", "values", "of", "specified", "grid", "or", "at", "the", "specified", "set", "of", "points", ".", "If", "style", "was", "specified", "as", "masked", "zvalues", "will", "be", "a", "numpy", "masked", "array", ".", "sigmasq", ":", "ndarray", "shape", "(", "M", "N", ")", "or", "(", "N", "1", ")", "Variance", "at", "specified", "grid", "points", "or", "at", "the", "specified", "set", "of", "points", ".", "If", "style", "was", "specified", "as", "masked", "sigmasq", "will", "be", "a", "numpy", "masked", "array", "." ]
python
train
DAI-Lab/Copulas
copulas/multivariate/vine.py
https://github.com/DAI-Lab/Copulas/blob/821df61c3d36a6b81ef2883935f935c2eaaa862c/copulas/multivariate/vine.py#L82-L107
def fit(self, X, truncated=3): """Fit a vine model to the data. Args: X(numpy.ndarray): data to be fitted. truncated(int): max level to build the vine. """ self.n_sample, self.n_var = X.shape self.columns = X.columns self.tau_mat = X.corr(method='kendall').values self.u_matrix = np.empty([self.n_sample, self.n_var]) self.truncated = truncated self.depth = self.n_var - 1 self.trees = [] self.unis, self.ppfs = [], [] for i, col in enumerate(X): uni = self.model() uni.fit(X[col]) self.u_matrix[:, i] = uni.cumulative_distribution(X[col]) self.unis.append(uni) self.ppfs.append(uni.percent_point) self.train_vine(self.vine_type) self.fitted = True
[ "def", "fit", "(", "self", ",", "X", ",", "truncated", "=", "3", ")", ":", "self", ".", "n_sample", ",", "self", ".", "n_var", "=", "X", ".", "shape", "self", ".", "columns", "=", "X", ".", "columns", "self", ".", "tau_mat", "=", "X", ".", "corr", "(", "method", "=", "'kendall'", ")", ".", "values", "self", ".", "u_matrix", "=", "np", ".", "empty", "(", "[", "self", ".", "n_sample", ",", "self", ".", "n_var", "]", ")", "self", ".", "truncated", "=", "truncated", "self", ".", "depth", "=", "self", ".", "n_var", "-", "1", "self", ".", "trees", "=", "[", "]", "self", ".", "unis", ",", "self", ".", "ppfs", "=", "[", "]", ",", "[", "]", "for", "i", ",", "col", "in", "enumerate", "(", "X", ")", ":", "uni", "=", "self", ".", "model", "(", ")", "uni", ".", "fit", "(", "X", "[", "col", "]", ")", "self", ".", "u_matrix", "[", ":", ",", "i", "]", "=", "uni", ".", "cumulative_distribution", "(", "X", "[", "col", "]", ")", "self", ".", "unis", ".", "append", "(", "uni", ")", "self", ".", "ppfs", ".", "append", "(", "uni", ".", "percent_point", ")", "self", ".", "train_vine", "(", "self", ".", "vine_type", ")", "self", ".", "fitted", "=", "True" ]
Fit a vine model to the data. Args: X(numpy.ndarray): data to be fitted. truncated(int): max level to build the vine.
[ "Fit", "a", "vine", "model", "to", "the", "data", "." ]
python
train
moonlitesolutions/SolrClient
SolrClient/helpers/reindexer.py
https://github.com/moonlitesolutions/SolrClient/blob/19c5280c9f8e97ee104d22ae883c4ccfd7c4f43b/SolrClient/helpers/reindexer.py#L99-L139
def _from_solr(self, fq=[], report_frequency = 25): ''' Method for retrieving batch data from Solr. ''' cursor = '*' stime = datetime.now() query_count = 0 while True: #Get data with starting cursorMark query = self._get_query(cursor) #Add FQ to the query. This is used by resume to filter on date fields and when specifying document subset. #Not included in _get_query for more flexibiilty. if fq: if 'fq' in query: [query['fq'].append(x) for x in fq] else: query['fq'] = fq results = self._source.query(self._source_coll, query) query_count += 1 if query_count % report_frequency == 0: self.log.info("Processed {} Items in {} Seconds. Apprximately {} items/minute".format( self._items_processed, int((datetime.now()-stime).seconds), str(int(self._items_processed / ((datetime.now()-stime).seconds/60))) )) if results.get_results_count(): #If we got items back, get the new cursor and yield the docs self._items_processed += results.get_results_count() cursor = results.get_cursor() #Remove ignore fields docs = self._trim_fields(results.docs) yield docs if results.get_results_count() < self._rows: #Less results than asked, probably done break else: #No Results, probably done :) self.log.debug("Got zero Results with cursor: {}".format(cursor)) break
[ "def", "_from_solr", "(", "self", ",", "fq", "=", "[", "]", ",", "report_frequency", "=", "25", ")", ":", "cursor", "=", "'*'", "stime", "=", "datetime", ".", "now", "(", ")", "query_count", "=", "0", "while", "True", ":", "#Get data with starting cursorMark\r", "query", "=", "self", ".", "_get_query", "(", "cursor", ")", "#Add FQ to the query. This is used by resume to filter on date fields and when specifying document subset.\r", "#Not included in _get_query for more flexibiilty.\r", "if", "fq", ":", "if", "'fq'", "in", "query", ":", "[", "query", "[", "'fq'", "]", ".", "append", "(", "x", ")", "for", "x", "in", "fq", "]", "else", ":", "query", "[", "'fq'", "]", "=", "fq", "results", "=", "self", ".", "_source", ".", "query", "(", "self", ".", "_source_coll", ",", "query", ")", "query_count", "+=", "1", "if", "query_count", "%", "report_frequency", "==", "0", ":", "self", ".", "log", ".", "info", "(", "\"Processed {} Items in {} Seconds. Apprximately {} items/minute\"", ".", "format", "(", "self", ".", "_items_processed", ",", "int", "(", "(", "datetime", ".", "now", "(", ")", "-", "stime", ")", ".", "seconds", ")", ",", "str", "(", "int", "(", "self", ".", "_items_processed", "/", "(", "(", "datetime", ".", "now", "(", ")", "-", "stime", ")", ".", "seconds", "/", "60", ")", ")", ")", ")", ")", "if", "results", ".", "get_results_count", "(", ")", ":", "#If we got items back, get the new cursor and yield the docs\r", "self", ".", "_items_processed", "+=", "results", ".", "get_results_count", "(", ")", "cursor", "=", "results", ".", "get_cursor", "(", ")", "#Remove ignore fields\r", "docs", "=", "self", ".", "_trim_fields", "(", "results", ".", "docs", ")", "yield", "docs", "if", "results", ".", "get_results_count", "(", ")", "<", "self", ".", "_rows", ":", "#Less results than asked, probably done\r", "break", "else", ":", "#No Results, probably done :)\r", "self", ".", "log", ".", "debug", "(", "\"Got zero Results with cursor: {}\"", ".", "format", "(", "cursor", ")", ")", "break" ]
Method for retrieving batch data from Solr.
[ "Method", "for", "retrieving", "batch", "data", "from", "Solr", "." ]
python
train
carlosp420/dataset-creator
dataset_creator/base_dataset.py
https://github.com/carlosp420/dataset-creator/blob/ea27340b145cb566a36c1836ff42263f1b2003a0/dataset_creator/base_dataset.py#L332-L344
def make_slash_number(self): """ Charset lines have \2 or \3 depending on type of partitioning and codon positions requested for our dataset. :return: """ if self.partitioning == 'by codon position' and self.codon_positions == '1st-2nd': return '\\2' elif self.partitioning in ['by codon position', '1st-2nd, 3rd'] and self.codon_positions in ['ALL', None]: return '\\3' else: return ''
[ "def", "make_slash_number", "(", "self", ")", ":", "if", "self", ".", "partitioning", "==", "'by codon position'", "and", "self", ".", "codon_positions", "==", "'1st-2nd'", ":", "return", "'\\\\2'", "elif", "self", ".", "partitioning", "in", "[", "'by codon position'", ",", "'1st-2nd, 3rd'", "]", "and", "self", ".", "codon_positions", "in", "[", "'ALL'", ",", "None", "]", ":", "return", "'\\\\3'", "else", ":", "return", "''" ]
Charset lines have \2 or \3 depending on type of partitioning and codon positions requested for our dataset. :return:
[ "Charset", "lines", "have", "\\", "2", "or", "\\", "3", "depending", "on", "type", "of", "partitioning", "and", "codon", "positions", "requested", "for", "our", "dataset", "." ]
python
train
PyMySQL/Tornado-MySQL
tornado_mysql/converters.py
https://github.com/PyMySQL/Tornado-MySQL/blob/75d3466e4332e43b2bf853799f1122dec5da60bc/tornado_mysql/converters.py#L98-L129
def convert_datetime(obj): """Returns a DATETIME or TIMESTAMP column value as a datetime object: >>> datetime_or_None('2007-02-25 23:06:20') datetime.datetime(2007, 2, 25, 23, 6, 20) >>> datetime_or_None('2007-02-25T23:06:20') datetime.datetime(2007, 2, 25, 23, 6, 20) Illegal values are returned as None: >>> datetime_or_None('2007-02-31T23:06:20') is None True >>> datetime_or_None('0000-00-00 00:00:00') is None True """ if ' ' in obj: sep = ' ' elif 'T' in obj: sep = 'T' else: return convert_date(obj) try: ymd, hms = obj.split(sep, 1) usecs = '0' if '.' in hms: hms, usecs = hms.split('.') usecs = float('0.' + usecs) * 1e6 return datetime.datetime(*[ int(x) for x in ymd.split('-')+hms.split(':')+[usecs] ]) except ValueError: return convert_date(obj)
[ "def", "convert_datetime", "(", "obj", ")", ":", "if", "' '", "in", "obj", ":", "sep", "=", "' '", "elif", "'T'", "in", "obj", ":", "sep", "=", "'T'", "else", ":", "return", "convert_date", "(", "obj", ")", "try", ":", "ymd", ",", "hms", "=", "obj", ".", "split", "(", "sep", ",", "1", ")", "usecs", "=", "'0'", "if", "'.'", "in", "hms", ":", "hms", ",", "usecs", "=", "hms", ".", "split", "(", "'.'", ")", "usecs", "=", "float", "(", "'0.'", "+", "usecs", ")", "*", "1e6", "return", "datetime", ".", "datetime", "(", "*", "[", "int", "(", "x", ")", "for", "x", "in", "ymd", ".", "split", "(", "'-'", ")", "+", "hms", ".", "split", "(", "':'", ")", "+", "[", "usecs", "]", "]", ")", "except", "ValueError", ":", "return", "convert_date", "(", "obj", ")" ]
Returns a DATETIME or TIMESTAMP column value as a datetime object: >>> datetime_or_None('2007-02-25 23:06:20') datetime.datetime(2007, 2, 25, 23, 6, 20) >>> datetime_or_None('2007-02-25T23:06:20') datetime.datetime(2007, 2, 25, 23, 6, 20) Illegal values are returned as None: >>> datetime_or_None('2007-02-31T23:06:20') is None True >>> datetime_or_None('0000-00-00 00:00:00') is None True
[ "Returns", "a", "DATETIME", "or", "TIMESTAMP", "column", "value", "as", "a", "datetime", "object", ":" ]
python
train
tanghaibao/jcvi
jcvi/formats/sbt.py
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/sbt.py#L108-L182
def names(args): """ %prog names namelist templatefile Generate name blocks from the `namelist` file. The `namelist` file is tab-delimited that contains >=4 columns of data. Three columns are mandatory. First name, middle initial and last name. First row is table header. For the extra columns, the first column will go in the `$N0` field in the template file, second to the `$N1` field, etc. In the alternative mode, the namelist just contains several sections. First row will go in the `$N0` in the template file, second to the `$N1` field. The namelist may look like: [Sequence] Bruce A. Roe, Frederic Debelle, Giles Oldroyd, Rene Geurts [Manuscript] Haibao Tang1, Vivek Krishnakumar1, Shelby Bidwell1, Benjamin Rosen1 Then in this example Sequence section goes into N0, Manuscript goes into N1. Useful hints for constructing the template file can be found in: <http://www.ncbi.nlm.nih.gov/IEB/ToolBox/CPP_DOC/asn_spec/seq.asn.html> Often the template file can be retrieved from web form: <http://www.ncbi.nlm.nih.gov/WebSub/template.cgi> """ p = OptionParser(names.__doc__) opts, args = p.parse_args(args) if len(args) != 2: sys.exit(p.print_help()) namelist, templatefile = args # First check the alternative format if open(namelist).read()[0] == '[': out = parse_names(namelist) make_template(templatefile, out) return reader = csv.reader(open(namelist), delimiter="\t") header = next(reader) ncols = len(header) assert ncols > 3 nextras = ncols - 3 blocks = [] bools = [] for row in reader: first, middle, last = row[:3] extras = row[3:] bools.append([(x.upper() == 'Y') for x in extras]) middle = middle.strip() if middle != "": middle = middle.rstrip('.') + '.' initials = "{0}.{1}".format(first[0], middle) suffix = "" nameblock = NameTemplate.format(last=last, first=first, initials=initials, suffix=suffix) blocks.append(nameblock) selected_idx = zip(*bools) out = [] * nextras for i, sbools in enumerate(selected_idx): selected = [] for b, ss in zip(blocks, sbools): if ss: selected.append(b) bigblock = ",\n".join(selected) out.append(bigblock) logging.debug("List N{0} contains a total of {1} names.".format(i, len(selected))) make_template(templatefile, out)
[ "def", "names", "(", "args", ")", ":", "p", "=", "OptionParser", "(", "names", ".", "__doc__", ")", "opts", ",", "args", "=", "p", ".", "parse_args", "(", "args", ")", "if", "len", "(", "args", ")", "!=", "2", ":", "sys", ".", "exit", "(", "p", ".", "print_help", "(", ")", ")", "namelist", ",", "templatefile", "=", "args", "# First check the alternative format", "if", "open", "(", "namelist", ")", ".", "read", "(", ")", "[", "0", "]", "==", "'['", ":", "out", "=", "parse_names", "(", "namelist", ")", "make_template", "(", "templatefile", ",", "out", ")", "return", "reader", "=", "csv", ".", "reader", "(", "open", "(", "namelist", ")", ",", "delimiter", "=", "\"\\t\"", ")", "header", "=", "next", "(", "reader", ")", "ncols", "=", "len", "(", "header", ")", "assert", "ncols", ">", "3", "nextras", "=", "ncols", "-", "3", "blocks", "=", "[", "]", "bools", "=", "[", "]", "for", "row", "in", "reader", ":", "first", ",", "middle", ",", "last", "=", "row", "[", ":", "3", "]", "extras", "=", "row", "[", "3", ":", "]", "bools", ".", "append", "(", "[", "(", "x", ".", "upper", "(", ")", "==", "'Y'", ")", "for", "x", "in", "extras", "]", ")", "middle", "=", "middle", ".", "strip", "(", ")", "if", "middle", "!=", "\"\"", ":", "middle", "=", "middle", ".", "rstrip", "(", "'.'", ")", "+", "'.'", "initials", "=", "\"{0}.{1}\"", ".", "format", "(", "first", "[", "0", "]", ",", "middle", ")", "suffix", "=", "\"\"", "nameblock", "=", "NameTemplate", ".", "format", "(", "last", "=", "last", ",", "first", "=", "first", ",", "initials", "=", "initials", ",", "suffix", "=", "suffix", ")", "blocks", ".", "append", "(", "nameblock", ")", "selected_idx", "=", "zip", "(", "*", "bools", ")", "out", "=", "[", "]", "*", "nextras", "for", "i", ",", "sbools", "in", "enumerate", "(", "selected_idx", ")", ":", "selected", "=", "[", "]", "for", "b", ",", "ss", "in", "zip", "(", "blocks", ",", "sbools", ")", ":", "if", "ss", ":", "selected", ".", "append", "(", "b", ")", "bigblock", "=", "\",\\n\"", ".", "join", "(", "selected", ")", "out", ".", "append", "(", "bigblock", ")", "logging", ".", "debug", "(", "\"List N{0} contains a total of {1} names.\"", ".", "format", "(", "i", ",", "len", "(", "selected", ")", ")", ")", "make_template", "(", "templatefile", ",", "out", ")" ]
%prog names namelist templatefile Generate name blocks from the `namelist` file. The `namelist` file is tab-delimited that contains >=4 columns of data. Three columns are mandatory. First name, middle initial and last name. First row is table header. For the extra columns, the first column will go in the `$N0` field in the template file, second to the `$N1` field, etc. In the alternative mode, the namelist just contains several sections. First row will go in the `$N0` in the template file, second to the `$N1` field. The namelist may look like: [Sequence] Bruce A. Roe, Frederic Debelle, Giles Oldroyd, Rene Geurts [Manuscript] Haibao Tang1, Vivek Krishnakumar1, Shelby Bidwell1, Benjamin Rosen1 Then in this example Sequence section goes into N0, Manuscript goes into N1. Useful hints for constructing the template file can be found in: <http://www.ncbi.nlm.nih.gov/IEB/ToolBox/CPP_DOC/asn_spec/seq.asn.html> Often the template file can be retrieved from web form: <http://www.ncbi.nlm.nih.gov/WebSub/template.cgi>
[ "%prog", "names", "namelist", "templatefile" ]
python
train
devassistant/devassistant
devassistant/gui/gui_helper.py
https://github.com/devassistant/devassistant/blob/2dbfeaa666a64127263664d18969c55d19ecc83e/devassistant/gui/gui_helper.py#L78-L95
def button_with_image(self, description, image=None, sensitive=True): """ The function creates a button with image """ btn = self.create_button() btn.set_sensitive(sensitive) h_box = self.create_box() try: img = self.create_image(image_name=image, scale_ratio=btn.get_scale_factor(), window=btn.get_window()) except: # Older GTK+ than 3.10 img = self.create_image(image_name=image) h_box.pack_start(img, False, False, 12) label = self.create_label(description) h_box.pack_start(label, False, False, 0) btn.add(h_box) return btn
[ "def", "button_with_image", "(", "self", ",", "description", ",", "image", "=", "None", ",", "sensitive", "=", "True", ")", ":", "btn", "=", "self", ".", "create_button", "(", ")", "btn", ".", "set_sensitive", "(", "sensitive", ")", "h_box", "=", "self", ".", "create_box", "(", ")", "try", ":", "img", "=", "self", ".", "create_image", "(", "image_name", "=", "image", ",", "scale_ratio", "=", "btn", ".", "get_scale_factor", "(", ")", ",", "window", "=", "btn", ".", "get_window", "(", ")", ")", "except", ":", "# Older GTK+ than 3.10", "img", "=", "self", ".", "create_image", "(", "image_name", "=", "image", ")", "h_box", ".", "pack_start", "(", "img", ",", "False", ",", "False", ",", "12", ")", "label", "=", "self", ".", "create_label", "(", "description", ")", "h_box", ".", "pack_start", "(", "label", ",", "False", ",", "False", ",", "0", ")", "btn", ".", "add", "(", "h_box", ")", "return", "btn" ]
The function creates a button with image
[ "The", "function", "creates", "a", "button", "with", "image" ]
python
train
Vital-Fernandez/dazer
bin/lib/CodeTools/various.py
https://github.com/Vital-Fernandez/dazer/blob/3c9ae8ae6d40ea33f22cc20dc11365d6d6e65244/bin/lib/CodeTools/various.py#L75-L77
def ufloatDict_nominal(self, ufloat_dict): 'This gives us a dictionary of nominal values from a dictionary of uncertainties' return OrderedDict(izip(ufloat_dict.keys(), map(lambda x: x.nominal_value, ufloat_dict.values())))
[ "def", "ufloatDict_nominal", "(", "self", ",", "ufloat_dict", ")", ":", "return", "OrderedDict", "(", "izip", "(", "ufloat_dict", ".", "keys", "(", ")", ",", "map", "(", "lambda", "x", ":", "x", ".", "nominal_value", ",", "ufloat_dict", ".", "values", "(", ")", ")", ")", ")" ]
This gives us a dictionary of nominal values from a dictionary of uncertainties
[ "This", "gives", "us", "a", "dictionary", "of", "nominal", "values", "from", "a", "dictionary", "of", "uncertainties" ]
python
train
manns/pyspread
pyspread/src/gui/_grid_renderer.py
https://github.com/manns/pyspread/blob/0e2fd44c2e0f06605efc3058c20a43a8c1f9e7e0/pyspread/src/gui/_grid_renderer.py#L301-L365
def Draw(self, grid, attr, dc, rect, row, col, isSelected): """Draws the cell border and content using pycairo""" key = row, col, grid.current_table # If cell is merge draw the merging cell if invisibile if grid.code_array.cell_attributes[key]["merge_area"]: key = self.get_merging_cell(grid, key) drawn_rect = self._get_drawn_rect(grid, key, rect) if drawn_rect is None: return cell_cache_key = self._get_draw_cache_key(grid, key, drawn_rect, isSelected) mdc = wx.MemoryDC() if vlc is not None and key in self.video_cells and \ grid.code_array.cell_attributes[key]["panel_cell"]: # Update video position of previously created video panel self.video_cells[key].SetClientRect(drawn_rect) elif cell_cache_key in self.cell_cache: mdc.SelectObject(self.cell_cache[cell_cache_key]) else: code = grid.code_array(key) if vlc is not None and code is not None and \ grid.code_array.cell_attributes[key]["panel_cell"]: try: # A panel is to be displayed panel_cls = grid.code_array[key] # Assert that we have a subclass of a wxPanel that we # can instantiate assert issubclass(panel_cls, wx.Panel) video_panel = panel_cls(grid) video_panel.SetClientRect(drawn_rect) # Register video cell self.video_cells[key] = video_panel return except Exception, err: # Someting is wrong with the panel to be displayed post_command_event(grid.main_window, self.StatusBarMsg, text=unicode(err)) bmp = self._get_cairo_bmp(mdc, key, drawn_rect, isSelected, grid._view_frozen) else: bmp = self._get_cairo_bmp(mdc, key, drawn_rect, isSelected, grid._view_frozen) # Put resulting bmp into cache self.cell_cache[cell_cache_key] = bmp dc.Blit(drawn_rect.x, drawn_rect.y, drawn_rect.width, drawn_rect.height, mdc, 0, 0, wx.COPY) # Draw cursor if grid.actions.cursor[:2] == (row, col): self.update_cursor(dc, grid, row, col)
[ "def", "Draw", "(", "self", ",", "grid", ",", "attr", ",", "dc", ",", "rect", ",", "row", ",", "col", ",", "isSelected", ")", ":", "key", "=", "row", ",", "col", ",", "grid", ".", "current_table", "# If cell is merge draw the merging cell if invisibile", "if", "grid", ".", "code_array", ".", "cell_attributes", "[", "key", "]", "[", "\"merge_area\"", "]", ":", "key", "=", "self", ".", "get_merging_cell", "(", "grid", ",", "key", ")", "drawn_rect", "=", "self", ".", "_get_drawn_rect", "(", "grid", ",", "key", ",", "rect", ")", "if", "drawn_rect", "is", "None", ":", "return", "cell_cache_key", "=", "self", ".", "_get_draw_cache_key", "(", "grid", ",", "key", ",", "drawn_rect", ",", "isSelected", ")", "mdc", "=", "wx", ".", "MemoryDC", "(", ")", "if", "vlc", "is", "not", "None", "and", "key", "in", "self", ".", "video_cells", "and", "grid", ".", "code_array", ".", "cell_attributes", "[", "key", "]", "[", "\"panel_cell\"", "]", ":", "# Update video position of previously created video panel", "self", ".", "video_cells", "[", "key", "]", ".", "SetClientRect", "(", "drawn_rect", ")", "elif", "cell_cache_key", "in", "self", ".", "cell_cache", ":", "mdc", ".", "SelectObject", "(", "self", ".", "cell_cache", "[", "cell_cache_key", "]", ")", "else", ":", "code", "=", "grid", ".", "code_array", "(", "key", ")", "if", "vlc", "is", "not", "None", "and", "code", "is", "not", "None", "and", "grid", ".", "code_array", ".", "cell_attributes", "[", "key", "]", "[", "\"panel_cell\"", "]", ":", "try", ":", "# A panel is to be displayed", "panel_cls", "=", "grid", ".", "code_array", "[", "key", "]", "# Assert that we have a subclass of a wxPanel that we", "# can instantiate", "assert", "issubclass", "(", "panel_cls", ",", "wx", ".", "Panel", ")", "video_panel", "=", "panel_cls", "(", "grid", ")", "video_panel", ".", "SetClientRect", "(", "drawn_rect", ")", "# Register video cell", "self", ".", "video_cells", "[", "key", "]", "=", "video_panel", "return", "except", "Exception", ",", "err", ":", "# Someting is wrong with the panel to be displayed", "post_command_event", "(", "grid", ".", "main_window", ",", "self", ".", "StatusBarMsg", ",", "text", "=", "unicode", "(", "err", ")", ")", "bmp", "=", "self", ".", "_get_cairo_bmp", "(", "mdc", ",", "key", ",", "drawn_rect", ",", "isSelected", ",", "grid", ".", "_view_frozen", ")", "else", ":", "bmp", "=", "self", ".", "_get_cairo_bmp", "(", "mdc", ",", "key", ",", "drawn_rect", ",", "isSelected", ",", "grid", ".", "_view_frozen", ")", "# Put resulting bmp into cache", "self", ".", "cell_cache", "[", "cell_cache_key", "]", "=", "bmp", "dc", ".", "Blit", "(", "drawn_rect", ".", "x", ",", "drawn_rect", ".", "y", ",", "drawn_rect", ".", "width", ",", "drawn_rect", ".", "height", ",", "mdc", ",", "0", ",", "0", ",", "wx", ".", "COPY", ")", "# Draw cursor", "if", "grid", ".", "actions", ".", "cursor", "[", ":", "2", "]", "==", "(", "row", ",", "col", ")", ":", "self", ".", "update_cursor", "(", "dc", ",", "grid", ",", "row", ",", "col", ")" ]
Draws the cell border and content using pycairo
[ "Draws", "the", "cell", "border", "and", "content", "using", "pycairo" ]
python
train
randomdude999/rule_n
rule_n.py
https://github.com/randomdude999/rule_n/blob/4d8d72e71a9f1eaacb193d5b4383fba9f8cf67a6/rule_n.py#L206-L231
def process(self, state): """Process a state and return the next state Usage: out = rule_110.process([True, False, True]) len(out) # 5, because a False is added to either side out == [True, True, True, True, False] out = rule_110.process([False, True, False, True]) len(out) # still 5, because leading / trailing False's are removed out2 = rule_110.process([1, 0, 1]) # Any data type in the list is okay, as # long as it's boolean value is correct out == out2 """ if not isinstance(state, list): raise TypeError("state must be list") if self.finite_canvas: state = _crop_list_to_size(state, self.canvas_size) else: state = _remove_lead_trail_false(state) state.insert(0, self.default_val) state.append(self.default_val) new_state = [] for i in range(0, len(state)): result = _process_cell(i, state, finite=self.finite_canvas) new_state.append(self.rules[result]) return new_state
[ "def", "process", "(", "self", ",", "state", ")", ":", "if", "not", "isinstance", "(", "state", ",", "list", ")", ":", "raise", "TypeError", "(", "\"state must be list\"", ")", "if", "self", ".", "finite_canvas", ":", "state", "=", "_crop_list_to_size", "(", "state", ",", "self", ".", "canvas_size", ")", "else", ":", "state", "=", "_remove_lead_trail_false", "(", "state", ")", "state", ".", "insert", "(", "0", ",", "self", ".", "default_val", ")", "state", ".", "append", "(", "self", ".", "default_val", ")", "new_state", "=", "[", "]", "for", "i", "in", "range", "(", "0", ",", "len", "(", "state", ")", ")", ":", "result", "=", "_process_cell", "(", "i", ",", "state", ",", "finite", "=", "self", ".", "finite_canvas", ")", "new_state", ".", "append", "(", "self", ".", "rules", "[", "result", "]", ")", "return", "new_state" ]
Process a state and return the next state Usage: out = rule_110.process([True, False, True]) len(out) # 5, because a False is added to either side out == [True, True, True, True, False] out = rule_110.process([False, True, False, True]) len(out) # still 5, because leading / trailing False's are removed out2 = rule_110.process([1, 0, 1]) # Any data type in the list is okay, as # long as it's boolean value is correct out == out2
[ "Process", "a", "state", "and", "return", "the", "next", "state", "Usage", ":" ]
python
train
apetrynet/pyfilemail
pyfilemail/transfer.py
https://github.com/apetrynet/pyfilemail/blob/eb81b0e69ff42f4335d5298833e4769b750bf397/pyfilemail/transfer.py#L523-L550
def rename_file(self, fmfile, newname): """Rename file in transfer. :param fmfile: file data from filemail containing fileid :param newname: new file name :type fmfile: ``dict`` :type newname: ``str`` or ``unicode`` :rtype: ``bool`` """ if not isinstance(fmfile, dict): raise FMBaseError('fmfile must be a <dict>') method, url = get_URL('file_rename') payload = { 'apikey': self.config.get('apikey'), 'logintoken': self.session.cookies.get('logintoken'), 'fileid': fmfile.get('fileid'), 'filename': newname } res = getattr(self.session, method)(url, params=payload) if res.status_code == 200: self._complete = True return True hellraiser(res)
[ "def", "rename_file", "(", "self", ",", "fmfile", ",", "newname", ")", ":", "if", "not", "isinstance", "(", "fmfile", ",", "dict", ")", ":", "raise", "FMBaseError", "(", "'fmfile must be a <dict>'", ")", "method", ",", "url", "=", "get_URL", "(", "'file_rename'", ")", "payload", "=", "{", "'apikey'", ":", "self", ".", "config", ".", "get", "(", "'apikey'", ")", ",", "'logintoken'", ":", "self", ".", "session", ".", "cookies", ".", "get", "(", "'logintoken'", ")", ",", "'fileid'", ":", "fmfile", ".", "get", "(", "'fileid'", ")", ",", "'filename'", ":", "newname", "}", "res", "=", "getattr", "(", "self", ".", "session", ",", "method", ")", "(", "url", ",", "params", "=", "payload", ")", "if", "res", ".", "status_code", "==", "200", ":", "self", ".", "_complete", "=", "True", "return", "True", "hellraiser", "(", "res", ")" ]
Rename file in transfer. :param fmfile: file data from filemail containing fileid :param newname: new file name :type fmfile: ``dict`` :type newname: ``str`` or ``unicode`` :rtype: ``bool``
[ "Rename", "file", "in", "transfer", "." ]
python
train
nilp0inter/cpe
cpe/comp/cpecomp_simple.py
https://github.com/nilp0inter/cpe/blob/670d947472a7652af5149324977b50f9a7af9bcf/cpe/comp/cpecomp_simple.py#L184-L195
def _is_valid_language(self): """ Return True if the value of component in attribute "language" is valid, and otherwise False. :returns: True if value is valid, False otherwise :rtype: boolean """ comp_str = self._encoded_value.lower() lang_rxc = re.compile(CPEComponentSimple._LANGTAG_PATTERN) return lang_rxc.match(comp_str) is not None
[ "def", "_is_valid_language", "(", "self", ")", ":", "comp_str", "=", "self", ".", "_encoded_value", ".", "lower", "(", ")", "lang_rxc", "=", "re", ".", "compile", "(", "CPEComponentSimple", ".", "_LANGTAG_PATTERN", ")", "return", "lang_rxc", ".", "match", "(", "comp_str", ")", "is", "not", "None" ]
Return True if the value of component in attribute "language" is valid, and otherwise False. :returns: True if value is valid, False otherwise :rtype: boolean
[ "Return", "True", "if", "the", "value", "of", "component", "in", "attribute", "language", "is", "valid", "and", "otherwise", "False", "." ]
python
train
transifex/transifex-python-library
txlib/http/auth.py
https://github.com/transifex/transifex-python-library/blob/9fea86b718973de35ccca6d54bd1f445c9632406/txlib/http/auth.py#L74-L86
def populate_request_data(self, request_args): """Add the authentication info to the supplied dictionary. We use the `requests.HTTPBasicAuth` class as the `auth` param. Args: `request_args`: The arguments that will be passed to the request. Returns: The updated arguments for the request. """ request_args['auth'] = HTTPBasicAuth( self._username, self._password) return request_args
[ "def", "populate_request_data", "(", "self", ",", "request_args", ")", ":", "request_args", "[", "'auth'", "]", "=", "HTTPBasicAuth", "(", "self", ".", "_username", ",", "self", ".", "_password", ")", "return", "request_args" ]
Add the authentication info to the supplied dictionary. We use the `requests.HTTPBasicAuth` class as the `auth` param. Args: `request_args`: The arguments that will be passed to the request. Returns: The updated arguments for the request.
[ "Add", "the", "authentication", "info", "to", "the", "supplied", "dictionary", "." ]
python
train
bastikr/boolean.py
boolean/boolean.py
https://github.com/bastikr/boolean.py/blob/e984df480afc60605e9501a0d3d54d667e8f7dbf/boolean/boolean.py#L483-L503
def _rdistributive(self, expr, op_example): """ Recursively flatten the `expr` expression for the `op_example` AND or OR operation instance exmaple. """ if expr.isliteral: return expr expr_class = expr.__class__ args = (self._rdistributive(arg, op_example) for arg in expr.args) args = tuple(arg.simplify() for arg in args) if len(args) == 1: return args[0] expr = expr_class(*args) dualoperation = op_example.dual if isinstance(expr, dualoperation): expr = expr.distributive() return expr
[ "def", "_rdistributive", "(", "self", ",", "expr", ",", "op_example", ")", ":", "if", "expr", ".", "isliteral", ":", "return", "expr", "expr_class", "=", "expr", ".", "__class__", "args", "=", "(", "self", ".", "_rdistributive", "(", "arg", ",", "op_example", ")", "for", "arg", "in", "expr", ".", "args", ")", "args", "=", "tuple", "(", "arg", ".", "simplify", "(", ")", "for", "arg", "in", "args", ")", "if", "len", "(", "args", ")", "==", "1", ":", "return", "args", "[", "0", "]", "expr", "=", "expr_class", "(", "*", "args", ")", "dualoperation", "=", "op_example", ".", "dual", "if", "isinstance", "(", "expr", ",", "dualoperation", ")", ":", "expr", "=", "expr", ".", "distributive", "(", ")", "return", "expr" ]
Recursively flatten the `expr` expression for the `op_example` AND or OR operation instance exmaple.
[ "Recursively", "flatten", "the", "expr", "expression", "for", "the", "op_example", "AND", "or", "OR", "operation", "instance", "exmaple", "." ]
python
train
bhmm/bhmm
bhmm/hmm/generic_hmm.py
https://github.com/bhmm/bhmm/blob/9804d18c2ddb684fb4d90b544cc209617a89ca9a/bhmm/hmm/generic_hmm.py#L398-L431
def collect_observations_in_state(self, observations, state_index): # TODO: this would work well in a subclass with data """Collect a vector of all observations belonging to a specified hidden state. Parameters ---------- observations : list of numpy.array List of observed trajectories. state_index : int The index of the hidden state for which corresponding observations are to be retrieved. dtype : numpy.dtype, optional, default=numpy.float64 The numpy dtype to use to store the collected observations. Returns ------- collected_observations : numpy.array with shape (nsamples,) The collected vector of observations belonging to the specified hidden state. Raises ------ RuntimeError A RuntimeError is raised if the HMM model does not yet have a hidden state trajectory associated with it. """ if not self.hidden_state_trajectories: raise RuntimeError('HMM model does not have a hidden state trajectory.') dtype = observations[0].dtype collected_observations = np.array([], dtype=dtype) for (s_t, o_t) in zip(self.hidden_state_trajectories, observations): indices = np.where(s_t == state_index)[0] collected_observations = np.append(collected_observations, o_t[indices]) return collected_observations
[ "def", "collect_observations_in_state", "(", "self", ",", "observations", ",", "state_index", ")", ":", "# TODO: this would work well in a subclass with data", "if", "not", "self", ".", "hidden_state_trajectories", ":", "raise", "RuntimeError", "(", "'HMM model does not have a hidden state trajectory.'", ")", "dtype", "=", "observations", "[", "0", "]", ".", "dtype", "collected_observations", "=", "np", ".", "array", "(", "[", "]", ",", "dtype", "=", "dtype", ")", "for", "(", "s_t", ",", "o_t", ")", "in", "zip", "(", "self", ".", "hidden_state_trajectories", ",", "observations", ")", ":", "indices", "=", "np", ".", "where", "(", "s_t", "==", "state_index", ")", "[", "0", "]", "collected_observations", "=", "np", ".", "append", "(", "collected_observations", ",", "o_t", "[", "indices", "]", ")", "return", "collected_observations" ]
Collect a vector of all observations belonging to a specified hidden state. Parameters ---------- observations : list of numpy.array List of observed trajectories. state_index : int The index of the hidden state for which corresponding observations are to be retrieved. dtype : numpy.dtype, optional, default=numpy.float64 The numpy dtype to use to store the collected observations. Returns ------- collected_observations : numpy.array with shape (nsamples,) The collected vector of observations belonging to the specified hidden state. Raises ------ RuntimeError A RuntimeError is raised if the HMM model does not yet have a hidden state trajectory associated with it.
[ "Collect", "a", "vector", "of", "all", "observations", "belonging", "to", "a", "specified", "hidden", "state", "." ]
python
train
wummel/linkchecker
doc/examples/filter_xml_output.py
https://github.com/wummel/linkchecker/blob/c2ce810c3fb00b895a841a7be6b2e78c64e7b042/doc/examples/filter_xml_output.py#L35-L45
def filter_tree(tree): """Filter all 401 errors.""" to_remove = [] for elem in tree.findall('urldata'): valid = elem.find('valid') if valid is not None and valid.text == '0' and \ valid.attrib.get('result', '').startswith('401'): to_remove.append(elem) root = tree.getroot() for elem in to_remove: root.remove(elem)
[ "def", "filter_tree", "(", "tree", ")", ":", "to_remove", "=", "[", "]", "for", "elem", "in", "tree", ".", "findall", "(", "'urldata'", ")", ":", "valid", "=", "elem", ".", "find", "(", "'valid'", ")", "if", "valid", "is", "not", "None", "and", "valid", ".", "text", "==", "'0'", "and", "valid", ".", "attrib", ".", "get", "(", "'result'", ",", "''", ")", ".", "startswith", "(", "'401'", ")", ":", "to_remove", ".", "append", "(", "elem", ")", "root", "=", "tree", ".", "getroot", "(", ")", "for", "elem", "in", "to_remove", ":", "root", ".", "remove", "(", "elem", ")" ]
Filter all 401 errors.
[ "Filter", "all", "401", "errors", "." ]
python
train
Microsoft/ApplicationInsights-Python
applicationinsights/channel/contracts/Location.py
https://github.com/Microsoft/ApplicationInsights-Python/blob/8452ab7126f9bb6964637d4aa1258c2af17563d6/applicationinsights/channel/contracts/Location.py#L31-L40
def ip(self, value): """The ip property. Args: value (string). the property value. """ if value == self._defaults['ai.location.ip'] and 'ai.location.ip' in self._values: del self._values['ai.location.ip'] else: self._values['ai.location.ip'] = value
[ "def", "ip", "(", "self", ",", "value", ")", ":", "if", "value", "==", "self", ".", "_defaults", "[", "'ai.location.ip'", "]", "and", "'ai.location.ip'", "in", "self", ".", "_values", ":", "del", "self", ".", "_values", "[", "'ai.location.ip'", "]", "else", ":", "self", ".", "_values", "[", "'ai.location.ip'", "]", "=", "value" ]
The ip property. Args: value (string). the property value.
[ "The", "ip", "property", ".", "Args", ":", "value", "(", "string", ")", ".", "the", "property", "value", "." ]
python
train
aiogram/aiogram
aiogram/types/message.py
https://github.com/aiogram/aiogram/blob/2af930149ce2482547721e2c8755c10307295e48/aiogram/types/message.py#L252-L263
def url(self) -> str: """ Get URL for the message :return: str """ if self.chat.type not in [ChatType.SUPER_GROUP, ChatType.CHANNEL]: raise TypeError('Invalid chat type!') elif not self.chat.username: raise TypeError('This chat does not have @username') return f"https://t.me/{self.chat.username}/{self.message_id}"
[ "def", "url", "(", "self", ")", "->", "str", ":", "if", "self", ".", "chat", ".", "type", "not", "in", "[", "ChatType", ".", "SUPER_GROUP", ",", "ChatType", ".", "CHANNEL", "]", ":", "raise", "TypeError", "(", "'Invalid chat type!'", ")", "elif", "not", "self", ".", "chat", ".", "username", ":", "raise", "TypeError", "(", "'This chat does not have @username'", ")", "return", "f\"https://t.me/{self.chat.username}/{self.message_id}\"" ]
Get URL for the message :return: str
[ "Get", "URL", "for", "the", "message" ]
python
train
googleapis/google-cloud-python
storage/google/cloud/storage/notification.py
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/storage/google/cloud/storage/notification.py#L291-L321
def reload(self, client=None): """Update this notification from the server configuration. See: https://cloud.google.com/storage/docs/json_api/v1/notifications/get If :attr:`user_project` is set on the bucket, bills the API request to that project. :type client: :class:`~google.cloud.storage.client.Client` or ``NoneType`` :param client: Optional. The client to use. If not passed, falls back to the ``client`` stored on the current bucket. :rtype: bool :returns: True, if the notification exists, else False. :raises ValueError: if the notification has no ID. """ if self.notification_id is None: raise ValueError("Notification not intialized by server") client = self._require_client(client) query_params = {} if self.bucket.user_project is not None: query_params["userProject"] = self.bucket.user_project response = client._connection.api_request( method="GET", path=self.path, query_params=query_params ) self._set_properties(response)
[ "def", "reload", "(", "self", ",", "client", "=", "None", ")", ":", "if", "self", ".", "notification_id", "is", "None", ":", "raise", "ValueError", "(", "\"Notification not intialized by server\"", ")", "client", "=", "self", ".", "_require_client", "(", "client", ")", "query_params", "=", "{", "}", "if", "self", ".", "bucket", ".", "user_project", "is", "not", "None", ":", "query_params", "[", "\"userProject\"", "]", "=", "self", ".", "bucket", ".", "user_project", "response", "=", "client", ".", "_connection", ".", "api_request", "(", "method", "=", "\"GET\"", ",", "path", "=", "self", ".", "path", ",", "query_params", "=", "query_params", ")", "self", ".", "_set_properties", "(", "response", ")" ]
Update this notification from the server configuration. See: https://cloud.google.com/storage/docs/json_api/v1/notifications/get If :attr:`user_project` is set on the bucket, bills the API request to that project. :type client: :class:`~google.cloud.storage.client.Client` or ``NoneType`` :param client: Optional. The client to use. If not passed, falls back to the ``client`` stored on the current bucket. :rtype: bool :returns: True, if the notification exists, else False. :raises ValueError: if the notification has no ID.
[ "Update", "this", "notification", "from", "the", "server", "configuration", "." ]
python
train
ubyssey/dispatch
dispatch/modules/content/mixins.py
https://github.com/ubyssey/dispatch/blob/8da6084fe61726f20e9cf675190480cfc45ee764/dispatch/modules/content/mixins.py#L27-L49
def get_author_string(self, links=False): saved_args = locals() saved_args = saved_args['links'] """Returns list of authors as a comma-separated string (with 'and' before last author).""" def format_author(author): if links and author.person.slug: return '<a href="/authors/%s/">%s</a>' % (author.person.slug, author.person.full_name) return author.person.full_name if links == True or links == False: authors = map(format_author, self.authors.all()) else: authors = map(format_author, saved_args) if not authors: return "" elif len(authors) == 1: # If this is the only author, just return author name return authors[0] return ", ".join(authors[0:-1]) + " and " + authors[-1]
[ "def", "get_author_string", "(", "self", ",", "links", "=", "False", ")", ":", "saved_args", "=", "locals", "(", ")", "saved_args", "=", "saved_args", "[", "'links'", "]", "def", "format_author", "(", "author", ")", ":", "if", "links", "and", "author", ".", "person", ".", "slug", ":", "return", "'<a href=\"/authors/%s/\">%s</a>'", "%", "(", "author", ".", "person", ".", "slug", ",", "author", ".", "person", ".", "full_name", ")", "return", "author", ".", "person", ".", "full_name", "if", "links", "==", "True", "or", "links", "==", "False", ":", "authors", "=", "map", "(", "format_author", ",", "self", ".", "authors", ".", "all", "(", ")", ")", "else", ":", "authors", "=", "map", "(", "format_author", ",", "saved_args", ")", "if", "not", "authors", ":", "return", "\"\"", "elif", "len", "(", "authors", ")", "==", "1", ":", "# If this is the only author, just return author name", "return", "authors", "[", "0", "]", "return", "\", \"", ".", "join", "(", "authors", "[", "0", ":", "-", "1", "]", ")", "+", "\" and \"", "+", "authors", "[", "-", "1", "]" ]
Returns list of authors as a comma-separated string (with 'and' before last author).
[ "Returns", "list", "of", "authors", "as", "a", "comma", "-", "separated", "string", "(", "with", "and", "before", "last", "author", ")", "." ]
python
test
StackStorm/pybind
pybind/nos/v6_0_2f/qos/__init__.py
https://github.com/StackStorm/pybind/blob/44c467e71b2b425be63867aba6e6fa28b2cfe7fb/pybind/nos/v6_0_2f/qos/__init__.py#L164-L185
def _set_queue(self, v, load=False): """ Setter method for queue, mapped from YANG variable /qos/queue (container) If this variable is read-only (config: false) in the source YANG file, then _set_queue is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_queue() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=queue.queue, is_container='container', presence=False, yang_name="queue", rest_name="queue", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure egress queueing', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-qos', defining_module='brocade-qos', yang_type='container', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """queue must be of a type compatible with container""", 'defined-type': "container", 'generated-type': """YANGDynClass(base=queue.queue, is_container='container', presence=False, yang_name="queue", rest_name="queue", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure egress queueing', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-qos', defining_module='brocade-qos', yang_type='container', is_config=True)""", }) self.__queue = t if hasattr(self, '_set'): self._set()
[ "def", "_set_queue", "(", "self", ",", "v", ",", "load", "=", "False", ")", ":", "if", "hasattr", "(", "v", ",", "\"_utype\"", ")", ":", "v", "=", "v", ".", "_utype", "(", "v", ")", "try", ":", "t", "=", "YANGDynClass", "(", "v", ",", "base", "=", "queue", ".", "queue", ",", "is_container", "=", "'container'", ",", "presence", "=", "False", ",", "yang_name", "=", "\"queue\"", ",", "rest_name", "=", "\"queue\"", ",", "parent", "=", "self", ",", "path_helper", "=", "self", ".", "_path_helper", ",", "extmethods", "=", "self", ".", "_extmethods", ",", "register_paths", "=", "True", ",", "extensions", "=", "{", "u'tailf-common'", ":", "{", "u'info'", ":", "u'Configure egress queueing'", ",", "u'cli-incomplete-no'", ":", "None", "}", "}", ",", "namespace", "=", "'urn:brocade.com:mgmt:brocade-qos'", ",", "defining_module", "=", "'brocade-qos'", ",", "yang_type", "=", "'container'", ",", "is_config", "=", "True", ")", "except", "(", "TypeError", ",", "ValueError", ")", ":", "raise", "ValueError", "(", "{", "'error-string'", ":", "\"\"\"queue must be of a type compatible with container\"\"\"", ",", "'defined-type'", ":", "\"container\"", ",", "'generated-type'", ":", "\"\"\"YANGDynClass(base=queue.queue, is_container='container', presence=False, yang_name=\"queue\", rest_name=\"queue\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure egress queueing', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-qos', defining_module='brocade-qos', yang_type='container', is_config=True)\"\"\"", ",", "}", ")", "self", ".", "__queue", "=", "t", "if", "hasattr", "(", "self", ",", "'_set'", ")", ":", "self", ".", "_set", "(", ")" ]
Setter method for queue, mapped from YANG variable /qos/queue (container) If this variable is read-only (config: false) in the source YANG file, then _set_queue is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_queue() directly.
[ "Setter", "method", "for", "queue", "mapped", "from", "YANG", "variable", "/", "qos", "/", "queue", "(", "container", ")", "If", "this", "variable", "is", "read", "-", "only", "(", "config", ":", "false", ")", "in", "the", "source", "YANG", "file", "then", "_set_queue", "is", "considered", "as", "a", "private", "method", ".", "Backends", "looking", "to", "populate", "this", "variable", "should", "do", "so", "via", "calling", "thisObj", ".", "_set_queue", "()", "directly", "." ]
python
train
log2timeline/dfvfs
dfvfs/file_io/gzip_file_io.py
https://github.com/log2timeline/dfvfs/blob/2b3ccd115f9901d89f383397d4a1376a873c83c4/dfvfs/file_io/gzip_file_io.py#L117-L144
def read(self, size=None): """Reads a byte string from the gzip file at the current offset. The function will read a byte string up to the specified size or all of the remaining data if no size was specified. Args: size (Optional[int]): number of bytes to read, where None is all remaining data. Returns: bytes: data read. Raises: IOError: if the read failed. OSError: if the read failed. """ data = b'' while ((size and len(data) < size) and self._current_offset < self.uncompressed_data_size): member = self._GetMemberForOffset(self._current_offset) member_offset = self._current_offset - member.uncompressed_data_offset data_read = member.ReadAtOffset(member_offset, size) if data_read: self._current_offset += len(data_read) data = b''.join([data, data_read]) return data
[ "def", "read", "(", "self", ",", "size", "=", "None", ")", ":", "data", "=", "b''", "while", "(", "(", "size", "and", "len", "(", "data", ")", "<", "size", ")", "and", "self", ".", "_current_offset", "<", "self", ".", "uncompressed_data_size", ")", ":", "member", "=", "self", ".", "_GetMemberForOffset", "(", "self", ".", "_current_offset", ")", "member_offset", "=", "self", ".", "_current_offset", "-", "member", ".", "uncompressed_data_offset", "data_read", "=", "member", ".", "ReadAtOffset", "(", "member_offset", ",", "size", ")", "if", "data_read", ":", "self", ".", "_current_offset", "+=", "len", "(", "data_read", ")", "data", "=", "b''", ".", "join", "(", "[", "data", ",", "data_read", "]", ")", "return", "data" ]
Reads a byte string from the gzip file at the current offset. The function will read a byte string up to the specified size or all of the remaining data if no size was specified. Args: size (Optional[int]): number of bytes to read, where None is all remaining data. Returns: bytes: data read. Raises: IOError: if the read failed. OSError: if the read failed.
[ "Reads", "a", "byte", "string", "from", "the", "gzip", "file", "at", "the", "current", "offset", "." ]
python
train
jason-weirather/py-seq-tools
seqtools/graph/__init__.py
https://github.com/jason-weirather/py-seq-tools/blob/f642c2c73ffef2acc83656a78059a476fc734ca1/seqtools/graph/__init__.py#L57-L66
def get_root_graph(self,root): """Return back a graph containing just the root and children""" children = self.get_children(root) g = Graph() nodes = [root]+children for node in nodes: g.add_node(node) node_ids = [x.id for x in nodes] edges = [x for x in self._edges.values() if x.node1.id in node_ids and x.node2.id in node_ids] for e in edges: g.add_edge(e) return g
[ "def", "get_root_graph", "(", "self", ",", "root", ")", ":", "children", "=", "self", ".", "get_children", "(", "root", ")", "g", "=", "Graph", "(", ")", "nodes", "=", "[", "root", "]", "+", "children", "for", "node", "in", "nodes", ":", "g", ".", "add_node", "(", "node", ")", "node_ids", "=", "[", "x", ".", "id", "for", "x", "in", "nodes", "]", "edges", "=", "[", "x", "for", "x", "in", "self", ".", "_edges", ".", "values", "(", ")", "if", "x", ".", "node1", ".", "id", "in", "node_ids", "and", "x", ".", "node2", ".", "id", "in", "node_ids", "]", "for", "e", "in", "edges", ":", "g", ".", "add_edge", "(", "e", ")", "return", "g" ]
Return back a graph containing just the root and children
[ "Return", "back", "a", "graph", "containing", "just", "the", "root", "and", "children" ]
python
train
itamarst/eliot
eliot/_traceback.py
https://github.com/itamarst/eliot/blob/c03c96520c5492fadfc438b4b0f6336e2785ba2d/eliot/_traceback.py#L101-L126
def writeFailure(failure, logger=None): """ Write a L{twisted.python.failure.Failure} to the log. This is for situations where you got an unexpected exception and want to log a traceback. For example, if you have C{Deferred} that might error, you'll want to wrap it with a L{eliot.twisted.DeferredContext} and then add C{writeFailure} as the error handler to get the traceback logged: d = DeferredContext(dostuff()) d.addCallback(process) # Final error handler. d.addErrback(writeFailure) @param failure: L{Failure} to write to the log. @type logger: L{eliot.ILogger}. Will be deprecated at some point, so just ignore it. @return: None """ # Failure.getBriefTraceback does not include source code, so does not do # I/O. _writeTracebackMessage( logger, failure.value.__class__, failure.value, failure.getBriefTraceback())
[ "def", "writeFailure", "(", "failure", ",", "logger", "=", "None", ")", ":", "# Failure.getBriefTraceback does not include source code, so does not do", "# I/O.", "_writeTracebackMessage", "(", "logger", ",", "failure", ".", "value", ".", "__class__", ",", "failure", ".", "value", ",", "failure", ".", "getBriefTraceback", "(", ")", ")" ]
Write a L{twisted.python.failure.Failure} to the log. This is for situations where you got an unexpected exception and want to log a traceback. For example, if you have C{Deferred} that might error, you'll want to wrap it with a L{eliot.twisted.DeferredContext} and then add C{writeFailure} as the error handler to get the traceback logged: d = DeferredContext(dostuff()) d.addCallback(process) # Final error handler. d.addErrback(writeFailure) @param failure: L{Failure} to write to the log. @type logger: L{eliot.ILogger}. Will be deprecated at some point, so just ignore it. @return: None
[ "Write", "a", "L", "{", "twisted", ".", "python", ".", "failure", ".", "Failure", "}", "to", "the", "log", "." ]
python
train
svenkreiss/pysparkling
pysparkling/streaming/context.py
https://github.com/svenkreiss/pysparkling/blob/596d0ef2793100f7115efe228ff9bfc17beaa08d/pysparkling/streaming/context.py#L208-L221
def stop(self, stopSparkContext=True, stopGraceFully=False): """Stop processing streams. :param stopSparkContext: stop the SparkContext (NOT IMPLEMENTED) :param stopGracefully: stop gracefully (NOT IMPLEMENTED) """ while self._on_stop_cb: cb = self._on_stop_cb.pop() log.debug('calling on_stop_cb {}'.format(cb)) cb() IOLoop.current().stop() StreamingContext._activeContext = None
[ "def", "stop", "(", "self", ",", "stopSparkContext", "=", "True", ",", "stopGraceFully", "=", "False", ")", ":", "while", "self", ".", "_on_stop_cb", ":", "cb", "=", "self", ".", "_on_stop_cb", ".", "pop", "(", ")", "log", ".", "debug", "(", "'calling on_stop_cb {}'", ".", "format", "(", "cb", ")", ")", "cb", "(", ")", "IOLoop", ".", "current", "(", ")", ".", "stop", "(", ")", "StreamingContext", ".", "_activeContext", "=", "None" ]
Stop processing streams. :param stopSparkContext: stop the SparkContext (NOT IMPLEMENTED) :param stopGracefully: stop gracefully (NOT IMPLEMENTED)
[ "Stop", "processing", "streams", "." ]
python
train
cloud9ers/gurumate
environment/lib/python2.7/site-packages/IPython/parallel/controller/heartmonitor.py
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/parallel/controller/heartmonitor.py#L153-L166
def handle_pong(self, msg): "a heart just beat" current = str_to_bytes(str(self.lifetime)) last = str_to_bytes(str(self.last_ping)) if msg[1] == current: delta = time.time()-self.tic # self.log.debug("heartbeat::heart %r took %.2f ms to respond"%(msg[0], 1000*delta)) self.responses.add(msg[0]) elif msg[1] == last: delta = time.time()-self.tic + (self.lifetime-self.last_ping) self.log.warn("heartbeat::heart %r missed a beat, and took %.2f ms to respond", msg[0], 1000*delta) self.responses.add(msg[0]) else: self.log.warn("heartbeat::got bad heartbeat (possibly old?): %s (current=%.3f)", msg[1], self.lifetime)
[ "def", "handle_pong", "(", "self", ",", "msg", ")", ":", "current", "=", "str_to_bytes", "(", "str", "(", "self", ".", "lifetime", ")", ")", "last", "=", "str_to_bytes", "(", "str", "(", "self", ".", "last_ping", ")", ")", "if", "msg", "[", "1", "]", "==", "current", ":", "delta", "=", "time", ".", "time", "(", ")", "-", "self", ".", "tic", "# self.log.debug(\"heartbeat::heart %r took %.2f ms to respond\"%(msg[0], 1000*delta))", "self", ".", "responses", ".", "add", "(", "msg", "[", "0", "]", ")", "elif", "msg", "[", "1", "]", "==", "last", ":", "delta", "=", "time", ".", "time", "(", ")", "-", "self", ".", "tic", "+", "(", "self", ".", "lifetime", "-", "self", ".", "last_ping", ")", "self", ".", "log", ".", "warn", "(", "\"heartbeat::heart %r missed a beat, and took %.2f ms to respond\"", ",", "msg", "[", "0", "]", ",", "1000", "*", "delta", ")", "self", ".", "responses", ".", "add", "(", "msg", "[", "0", "]", ")", "else", ":", "self", ".", "log", ".", "warn", "(", "\"heartbeat::got bad heartbeat (possibly old?): %s (current=%.3f)\"", ",", "msg", "[", "1", "]", ",", "self", ".", "lifetime", ")" ]
a heart just beat
[ "a", "heart", "just", "beat" ]
python
test
ayust/kitnirc
kitnirc/client.py
https://github.com/ayust/kitnirc/blob/cf19fe39219da75f053e1a3976bf21331b6fefea/kitnirc/client.py#L332-L341
def send(self, *args): """Sends a single raw message to the IRC server. Arguments are automatically joined by spaces. No newlines are allowed. """ msg = " ".join(a.nick if isinstance(a, User) else str(a) for a in args) if "\n" in msg: raise ValueError("Cannot send() a newline. Args: %s" % repr(args)) _log.debug("%s <-- %s", self.server.host, msg) self.socket.send(msg + "\r\n")
[ "def", "send", "(", "self", ",", "*", "args", ")", ":", "msg", "=", "\" \"", ".", "join", "(", "a", ".", "nick", "if", "isinstance", "(", "a", ",", "User", ")", "else", "str", "(", "a", ")", "for", "a", "in", "args", ")", "if", "\"\\n\"", "in", "msg", ":", "raise", "ValueError", "(", "\"Cannot send() a newline. Args: %s\"", "%", "repr", "(", "args", ")", ")", "_log", ".", "debug", "(", "\"%s <-- %s\"", ",", "self", ".", "server", ".", "host", ",", "msg", ")", "self", ".", "socket", ".", "send", "(", "msg", "+", "\"\\r\\n\"", ")" ]
Sends a single raw message to the IRC server. Arguments are automatically joined by spaces. No newlines are allowed.
[ "Sends", "a", "single", "raw", "message", "to", "the", "IRC", "server", "." ]
python
train
brocade/pynos
pynos/versions/ver_6/ver_6_0_1/yang/brocade_fcoe_ext.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/brocade_fcoe_ext.py#L481-L495
def fcoe_get_interface_output_fcoe_intf_list_fcoe_intf_tx_accepts(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") fcoe_get_interface = ET.Element("fcoe_get_interface") config = fcoe_get_interface output = ET.SubElement(fcoe_get_interface, "output") fcoe_intf_list = ET.SubElement(output, "fcoe-intf-list") fcoe_intf_fcoe_port_id_key = ET.SubElement(fcoe_intf_list, "fcoe-intf-fcoe-port-id") fcoe_intf_fcoe_port_id_key.text = kwargs.pop('fcoe_intf_fcoe_port_id') fcoe_intf_tx_accepts = ET.SubElement(fcoe_intf_list, "fcoe-intf-tx-accepts") fcoe_intf_tx_accepts.text = kwargs.pop('fcoe_intf_tx_accepts') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "fcoe_get_interface_output_fcoe_intf_list_fcoe_intf_tx_accepts", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "fcoe_get_interface", "=", "ET", ".", "Element", "(", "\"fcoe_get_interface\"", ")", "config", "=", "fcoe_get_interface", "output", "=", "ET", ".", "SubElement", "(", "fcoe_get_interface", ",", "\"output\"", ")", "fcoe_intf_list", "=", "ET", ".", "SubElement", "(", "output", ",", "\"fcoe-intf-list\"", ")", "fcoe_intf_fcoe_port_id_key", "=", "ET", ".", "SubElement", "(", "fcoe_intf_list", ",", "\"fcoe-intf-fcoe-port-id\"", ")", "fcoe_intf_fcoe_port_id_key", ".", "text", "=", "kwargs", ".", "pop", "(", "'fcoe_intf_fcoe_port_id'", ")", "fcoe_intf_tx_accepts", "=", "ET", ".", "SubElement", "(", "fcoe_intf_list", ",", "\"fcoe-intf-tx-accepts\"", ")", "fcoe_intf_tx_accepts", ".", "text", "=", "kwargs", ".", "pop", "(", "'fcoe_intf_tx_accepts'", ")", "callback", "=", "kwargs", ".", "pop", "(", "'callback'", ",", "self", ".", "_callback", ")", "return", "callback", "(", "config", ")" ]
Auto Generated Code
[ "Auto", "Generated", "Code" ]
python
train
molmod/molmod
molmod/zmatrix.py
https://github.com/molmod/molmod/blob/a7b5b4364ed514ad4c465856c05b5eda1cb561e0/molmod/zmatrix.py#L89-L118
def _get_new_ref(self, existing_refs): """Get a new reference atom for a row in the ZMatrix The reference atoms should obey the following conditions: - They must be different - They must be neighbours in the bond graph - They must have an index lower than the current atom If multiple candidate refs can be found, take the heaviest atom """ # ref0 is the atom whose position is defined by the current row in the # zmatrix. ref0 = existing_refs[0] for ref in existing_refs: # try to find a neighbor of the ref that can serve as the new ref result = None for n in sorted(self.graph.neighbors[ref]): if self.new_index[n] > self.new_index[ref0]: # index is too high, zmatrix rows can't refer to future # atoms continue if n in existing_refs: # ref is already in use continue if result is None or self.graph.numbers[n] <= self.graph.numbers[result]: # acceptable ref, prefer heaviest atom result = n if result is not None: return result raise RuntimeError("Could not find new reference.")
[ "def", "_get_new_ref", "(", "self", ",", "existing_refs", ")", ":", "# ref0 is the atom whose position is defined by the current row in the", "# zmatrix.", "ref0", "=", "existing_refs", "[", "0", "]", "for", "ref", "in", "existing_refs", ":", "# try to find a neighbor of the ref that can serve as the new ref", "result", "=", "None", "for", "n", "in", "sorted", "(", "self", ".", "graph", ".", "neighbors", "[", "ref", "]", ")", ":", "if", "self", ".", "new_index", "[", "n", "]", ">", "self", ".", "new_index", "[", "ref0", "]", ":", "# index is too high, zmatrix rows can't refer to future", "# atoms", "continue", "if", "n", "in", "existing_refs", ":", "# ref is already in use", "continue", "if", "result", "is", "None", "or", "self", ".", "graph", ".", "numbers", "[", "n", "]", "<=", "self", ".", "graph", ".", "numbers", "[", "result", "]", ":", "# acceptable ref, prefer heaviest atom", "result", "=", "n", "if", "result", "is", "not", "None", ":", "return", "result", "raise", "RuntimeError", "(", "\"Could not find new reference.\"", ")" ]
Get a new reference atom for a row in the ZMatrix The reference atoms should obey the following conditions: - They must be different - They must be neighbours in the bond graph - They must have an index lower than the current atom If multiple candidate refs can be found, take the heaviest atom
[ "Get", "a", "new", "reference", "atom", "for", "a", "row", "in", "the", "ZMatrix" ]
python
train
althonos/moclo
moclo/moclo/core/parts.py
https://github.com/althonos/moclo/blob/28a03748df8a2fa43f0c0c8098ca64d11559434e/moclo/moclo/core/parts.py#L102-L112
def characterize(cls, record): """Load the record in a concrete subclass of this type. """ classes = list(cls.__subclasses__()) if not isabstract(cls): classes.append(cls) for subclass in classes: entity = subclass(record) if entity.is_valid(): return entity raise RuntimeError("could not find the type for '{}'".format(record.id))
[ "def", "characterize", "(", "cls", ",", "record", ")", ":", "classes", "=", "list", "(", "cls", ".", "__subclasses__", "(", ")", ")", "if", "not", "isabstract", "(", "cls", ")", ":", "classes", ".", "append", "(", "cls", ")", "for", "subclass", "in", "classes", ":", "entity", "=", "subclass", "(", "record", ")", "if", "entity", ".", "is_valid", "(", ")", ":", "return", "entity", "raise", "RuntimeError", "(", "\"could not find the type for '{}'\"", ".", "format", "(", "record", ".", "id", ")", ")" ]
Load the record in a concrete subclass of this type.
[ "Load", "the", "record", "in", "a", "concrete", "subclass", "of", "this", "type", "." ]
python
train
osrg/ryu
ryu/services/protocols/bgp/utils/bgp.py
https://github.com/osrg/ryu/blob/6f906e72c92e10bd0264c9b91a2f7bb85b97780c/ryu/services/protocols/bgp/utils/bgp.py#L204-L229
def create_v4flowspec_actions(actions=None): """ Create list of traffic filtering actions for Ipv4 Flow Specification and VPNv4 Flow Specification. `` actions`` specifies Traffic Filtering Actions of Flow Specification as a dictionary type value. Returns a list of extended community values. """ from ryu.services.protocols.bgp.api.prefix import ( FLOWSPEC_ACTION_TRAFFIC_RATE, FLOWSPEC_ACTION_TRAFFIC_ACTION, FLOWSPEC_ACTION_REDIRECT, FLOWSPEC_ACTION_TRAFFIC_MARKING, ) # Supported action type for IPv4 and VPNv4. action_types = { FLOWSPEC_ACTION_TRAFFIC_RATE: BGPFlowSpecTrafficRateCommunity, FLOWSPEC_ACTION_TRAFFIC_ACTION: BGPFlowSpecTrafficActionCommunity, FLOWSPEC_ACTION_REDIRECT: BGPFlowSpecRedirectCommunity, FLOWSPEC_ACTION_TRAFFIC_MARKING: BGPFlowSpecTrafficMarkingCommunity, } return _create_actions(actions, action_types)
[ "def", "create_v4flowspec_actions", "(", "actions", "=", "None", ")", ":", "from", "ryu", ".", "services", ".", "protocols", ".", "bgp", ".", "api", ".", "prefix", "import", "(", "FLOWSPEC_ACTION_TRAFFIC_RATE", ",", "FLOWSPEC_ACTION_TRAFFIC_ACTION", ",", "FLOWSPEC_ACTION_REDIRECT", ",", "FLOWSPEC_ACTION_TRAFFIC_MARKING", ",", ")", "# Supported action type for IPv4 and VPNv4.", "action_types", "=", "{", "FLOWSPEC_ACTION_TRAFFIC_RATE", ":", "BGPFlowSpecTrafficRateCommunity", ",", "FLOWSPEC_ACTION_TRAFFIC_ACTION", ":", "BGPFlowSpecTrafficActionCommunity", ",", "FLOWSPEC_ACTION_REDIRECT", ":", "BGPFlowSpecRedirectCommunity", ",", "FLOWSPEC_ACTION_TRAFFIC_MARKING", ":", "BGPFlowSpecTrafficMarkingCommunity", ",", "}", "return", "_create_actions", "(", "actions", ",", "action_types", ")" ]
Create list of traffic filtering actions for Ipv4 Flow Specification and VPNv4 Flow Specification. `` actions`` specifies Traffic Filtering Actions of Flow Specification as a dictionary type value. Returns a list of extended community values.
[ "Create", "list", "of", "traffic", "filtering", "actions", "for", "Ipv4", "Flow", "Specification", "and", "VPNv4", "Flow", "Specification", "." ]
python
train
danilobellini/audiolazy
audiolazy/lazy_math.py
https://github.com/danilobellini/audiolazy/blob/dba0a278937909980ed40b976d866b8e97c35dee/audiolazy/lazy_math.py#L95-L109
def factorial(n): """ Factorial function that works with really big numbers. """ if isinstance(n, float): if n.is_integer(): n = int(n) if not isinstance(n, INT_TYPES): raise TypeError("Non-integer input (perhaps you need Euler Gamma " "function or Gauss Pi function)") if n < 0: raise ValueError("Input shouldn't be negative") return reduce(operator.mul, it.takewhile(lambda m: m <= n, it.count(2)), 1)
[ "def", "factorial", "(", "n", ")", ":", "if", "isinstance", "(", "n", ",", "float", ")", ":", "if", "n", ".", "is_integer", "(", ")", ":", "n", "=", "int", "(", "n", ")", "if", "not", "isinstance", "(", "n", ",", "INT_TYPES", ")", ":", "raise", "TypeError", "(", "\"Non-integer input (perhaps you need Euler Gamma \"", "\"function or Gauss Pi function)\"", ")", "if", "n", "<", "0", ":", "raise", "ValueError", "(", "\"Input shouldn't be negative\"", ")", "return", "reduce", "(", "operator", ".", "mul", ",", "it", ".", "takewhile", "(", "lambda", "m", ":", "m", "<=", "n", ",", "it", ".", "count", "(", "2", ")", ")", ",", "1", ")" ]
Factorial function that works with really big numbers.
[ "Factorial", "function", "that", "works", "with", "really", "big", "numbers", "." ]
python
train
SoCo/SoCo
soco/snapshot.py
https://github.com/SoCo/SoCo/blob/671937e07d7973b78c0cbee153d4f3ad68ec48c6/soco/snapshot.py#L276-L291
def _restore_queue(self): """Restore the previous state of the queue. Note: The restore currently adds the items back into the queue using the URI, for items the Sonos system already knows about this is OK, but for other items, they may be missing some of their metadata as it will not be automatically picked up. """ if self.queue is not None: # Clear the queue so that it can be reset self.device.clear_queue() # Now loop around all the queue entries adding them for queue_group in self.queue: for queue_item in queue_group: self.device.add_uri_to_queue(queue_item.uri)
[ "def", "_restore_queue", "(", "self", ")", ":", "if", "self", ".", "queue", "is", "not", "None", ":", "# Clear the queue so that it can be reset", "self", ".", "device", ".", "clear_queue", "(", ")", "# Now loop around all the queue entries adding them", "for", "queue_group", "in", "self", ".", "queue", ":", "for", "queue_item", "in", "queue_group", ":", "self", ".", "device", ".", "add_uri_to_queue", "(", "queue_item", ".", "uri", ")" ]
Restore the previous state of the queue. Note: The restore currently adds the items back into the queue using the URI, for items the Sonos system already knows about this is OK, but for other items, they may be missing some of their metadata as it will not be automatically picked up.
[ "Restore", "the", "previous", "state", "of", "the", "queue", "." ]
python
train
optimizely/python-sdk
optimizely/decision_service.py
https://github.com/optimizely/python-sdk/blob/ec028d9efcf22498c3820f2650fa10f5c30bec90/optimizely/decision_service.py#L79-L103
def get_stored_variation(self, experiment, user_profile): """ Determine if the user has a stored variation available for the given experiment and return that. Args: experiment: Object representing the experiment for which user is to be bucketed. user_profile: UserProfile object representing the user's profile. Returns: Variation if available. None otherwise. """ user_id = user_profile.user_id variation_id = user_profile.get_variation_for_experiment(experiment.id) if variation_id: variation = self.config.get_variation_from_id(experiment.key, variation_id) if variation: self.logger.info('Found a stored decision. User "%s" is in variation "%s" of experiment "%s".' % ( user_id, variation.key, experiment.key )) return variation return None
[ "def", "get_stored_variation", "(", "self", ",", "experiment", ",", "user_profile", ")", ":", "user_id", "=", "user_profile", ".", "user_id", "variation_id", "=", "user_profile", ".", "get_variation_for_experiment", "(", "experiment", ".", "id", ")", "if", "variation_id", ":", "variation", "=", "self", ".", "config", ".", "get_variation_from_id", "(", "experiment", ".", "key", ",", "variation_id", ")", "if", "variation", ":", "self", ".", "logger", ".", "info", "(", "'Found a stored decision. User \"%s\" is in variation \"%s\" of experiment \"%s\".'", "%", "(", "user_id", ",", "variation", ".", "key", ",", "experiment", ".", "key", ")", ")", "return", "variation", "return", "None" ]
Determine if the user has a stored variation available for the given experiment and return that. Args: experiment: Object representing the experiment for which user is to be bucketed. user_profile: UserProfile object representing the user's profile. Returns: Variation if available. None otherwise.
[ "Determine", "if", "the", "user", "has", "a", "stored", "variation", "available", "for", "the", "given", "experiment", "and", "return", "that", "." ]
python
train
gawel/irc3
irc3/base.py
https://github.com/gawel/irc3/blob/cd27840a5809a1f803dc620860fe75d83d2a2ec8/irc3/base.py#L230-L262
def reload(self, *modules): """Reload one or more plugins""" self.notify('before_reload') if 'configfiles' in self.config: # reload configfiles self.log.info('Reloading configuration...') cfg = utils.parse_config( self.server and 'server' or 'bot', *self.config['configfiles']) self.config.update(cfg) self.log.info('Reloading python code...') if not modules: modules = self.registry.includes scanned = list(reversed(self.registry.scanned)) # reset includes and events self.registry.reset() to_scan = [] for module_name, categories in scanned: if module_name in modules: module = utils.maybedotted(module_name) reload_module(module) to_scan.append((module_name, categories)) # rescan all modules for module_name, categories in to_scan: self.include(module_name, venusian_categories=categories) self.registry.reloading = {} self.notify('after_reload')
[ "def", "reload", "(", "self", ",", "*", "modules", ")", ":", "self", ".", "notify", "(", "'before_reload'", ")", "if", "'configfiles'", "in", "self", ".", "config", ":", "# reload configfiles", "self", ".", "log", ".", "info", "(", "'Reloading configuration...'", ")", "cfg", "=", "utils", ".", "parse_config", "(", "self", ".", "server", "and", "'server'", "or", "'bot'", ",", "*", "self", ".", "config", "[", "'configfiles'", "]", ")", "self", ".", "config", ".", "update", "(", "cfg", ")", "self", ".", "log", ".", "info", "(", "'Reloading python code...'", ")", "if", "not", "modules", ":", "modules", "=", "self", ".", "registry", ".", "includes", "scanned", "=", "list", "(", "reversed", "(", "self", ".", "registry", ".", "scanned", ")", ")", "# reset includes and events", "self", ".", "registry", ".", "reset", "(", ")", "to_scan", "=", "[", "]", "for", "module_name", ",", "categories", "in", "scanned", ":", "if", "module_name", "in", "modules", ":", "module", "=", "utils", ".", "maybedotted", "(", "module_name", ")", "reload_module", "(", "module", ")", "to_scan", ".", "append", "(", "(", "module_name", ",", "categories", ")", ")", "# rescan all modules", "for", "module_name", ",", "categories", "in", "to_scan", ":", "self", ".", "include", "(", "module_name", ",", "venusian_categories", "=", "categories", ")", "self", ".", "registry", ".", "reloading", "=", "{", "}", "self", ".", "notify", "(", "'after_reload'", ")" ]
Reload one or more plugins
[ "Reload", "one", "or", "more", "plugins" ]
python
train
spyder-ide/spyder
spyder/plugins/editor/widgets/base.py
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/plugins/editor/widgets/base.py#L1168-L1190
def mousePressEvent(self, event): """Reimplement Qt method""" if sys.platform.startswith('linux') and event.button() == Qt.MidButton: self.calltip_widget.hide() self.setFocus() event = QMouseEvent(QEvent.MouseButtonPress, event.pos(), Qt.LeftButton, Qt.LeftButton, Qt.NoModifier) QPlainTextEdit.mousePressEvent(self, event) QPlainTextEdit.mouseReleaseEvent(self, event) # Send selection text to clipboard to be able to use # the paste method and avoid the strange Issue 1445 # NOTE: This issue seems a focusing problem but it # seems really hard to track mode_clip = QClipboard.Clipboard mode_sel = QClipboard.Selection text_clip = QApplication.clipboard().text(mode=mode_clip) text_sel = QApplication.clipboard().text(mode=mode_sel) QApplication.clipboard().setText(text_sel, mode=mode_clip) self.paste() QApplication.clipboard().setText(text_clip, mode=mode_clip) else: self.calltip_widget.hide() QPlainTextEdit.mousePressEvent(self, event)
[ "def", "mousePressEvent", "(", "self", ",", "event", ")", ":", "if", "sys", ".", "platform", ".", "startswith", "(", "'linux'", ")", "and", "event", ".", "button", "(", ")", "==", "Qt", ".", "MidButton", ":", "self", ".", "calltip_widget", ".", "hide", "(", ")", "self", ".", "setFocus", "(", ")", "event", "=", "QMouseEvent", "(", "QEvent", ".", "MouseButtonPress", ",", "event", ".", "pos", "(", ")", ",", "Qt", ".", "LeftButton", ",", "Qt", ".", "LeftButton", ",", "Qt", ".", "NoModifier", ")", "QPlainTextEdit", ".", "mousePressEvent", "(", "self", ",", "event", ")", "QPlainTextEdit", ".", "mouseReleaseEvent", "(", "self", ",", "event", ")", "# Send selection text to clipboard to be able to use\r", "# the paste method and avoid the strange Issue 1445\r", "# NOTE: This issue seems a focusing problem but it\r", "# seems really hard to track\r", "mode_clip", "=", "QClipboard", ".", "Clipboard", "mode_sel", "=", "QClipboard", ".", "Selection", "text_clip", "=", "QApplication", ".", "clipboard", "(", ")", ".", "text", "(", "mode", "=", "mode_clip", ")", "text_sel", "=", "QApplication", ".", "clipboard", "(", ")", ".", "text", "(", "mode", "=", "mode_sel", ")", "QApplication", ".", "clipboard", "(", ")", ".", "setText", "(", "text_sel", ",", "mode", "=", "mode_clip", ")", "self", ".", "paste", "(", ")", "QApplication", ".", "clipboard", "(", ")", ".", "setText", "(", "text_clip", ",", "mode", "=", "mode_clip", ")", "else", ":", "self", ".", "calltip_widget", ".", "hide", "(", ")", "QPlainTextEdit", ".", "mousePressEvent", "(", "self", ",", "event", ")" ]
Reimplement Qt method
[ "Reimplement", "Qt", "method" ]
python
train
boriel/zxbasic
arch/zx48k/backend/__init__.py
https://github.com/boriel/zxbasic/blob/23b28db10e41117805bdb3c0f78543590853b132/arch/zx48k/backend/__init__.py#L1433-L1457
def _call(ins): """ Calls a function XXXX (or address XXXX) 2nd parameter contains size of the returning result if any, and will be pushed onto the stack. """ output = [] output.append('call %s' % str(ins.quad[1])) try: val = int(ins.quad[2]) if val == 1: output.append('push af') # Byte else: if val > 4: output.extend(_fpush()) else: if val > 2: output.append('push de') if val > 1: output.append('push hl') except ValueError: pass return output
[ "def", "_call", "(", "ins", ")", ":", "output", "=", "[", "]", "output", ".", "append", "(", "'call %s'", "%", "str", "(", "ins", ".", "quad", "[", "1", "]", ")", ")", "try", ":", "val", "=", "int", "(", "ins", ".", "quad", "[", "2", "]", ")", "if", "val", "==", "1", ":", "output", ".", "append", "(", "'push af'", ")", "# Byte", "else", ":", "if", "val", ">", "4", ":", "output", ".", "extend", "(", "_fpush", "(", ")", ")", "else", ":", "if", "val", ">", "2", ":", "output", ".", "append", "(", "'push de'", ")", "if", "val", ">", "1", ":", "output", ".", "append", "(", "'push hl'", ")", "except", "ValueError", ":", "pass", "return", "output" ]
Calls a function XXXX (or address XXXX) 2nd parameter contains size of the returning result if any, and will be pushed onto the stack.
[ "Calls", "a", "function", "XXXX", "(", "or", "address", "XXXX", ")", "2nd", "parameter", "contains", "size", "of", "the", "returning", "result", "if", "any", "and", "will", "be", "pushed", "onto", "the", "stack", "." ]
python
train
StackStorm/pybind
pybind/nos/v6_0_2f/mac_group/__init__.py
https://github.com/StackStorm/pybind/blob/44c467e71b2b425be63867aba6e6fa28b2cfe7fb/pybind/nos/v6_0_2f/mac_group/__init__.py#L131-L152
def _set_mac_group_entry(self, v, load=False): """ Setter method for mac_group_entry, mapped from YANG variable /mac_group/mac_group_entry (list) If this variable is read-only (config: false) in the source YANG file, then _set_mac_group_entry is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_mac_group_entry() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=YANGListType("entry_address",mac_group_entry.mac_group_entry, yang_name="mac-group-entry", rest_name="mac", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='entry-address', extensions={u'tailf-common': {u'info': u'Add mac-address to the mac-group.\nMac mask is optional.', u'cli-no-key-completion': None, u'callpoint': u'mac-group-entry-config', u'cli-suppress-list-no': None, u'cli-compact-syntax': None, u'cli-sequence-commands': None, u'cli-suppress-mode': None, u'alt-name': u'mac'}}), is_container='list', yang_name="mac-group-entry", rest_name="mac", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Add mac-address to the mac-group.\nMac mask is optional.', u'cli-no-key-completion': None, u'callpoint': u'mac-group-entry-config', u'cli-suppress-list-no': None, u'cli-compact-syntax': None, u'cli-sequence-commands': None, u'cli-suppress-mode': None, u'alt-name': u'mac'}}, namespace='urn:brocade.com:mgmt:brocade-mac-address-table', defining_module='brocade-mac-address-table', yang_type='list', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """mac_group_entry must be of a type compatible with list""", 'defined-type': "list", 'generated-type': """YANGDynClass(base=YANGListType("entry_address",mac_group_entry.mac_group_entry, yang_name="mac-group-entry", rest_name="mac", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='entry-address', extensions={u'tailf-common': {u'info': u'Add mac-address to the mac-group.\nMac mask is optional.', u'cli-no-key-completion': None, u'callpoint': u'mac-group-entry-config', u'cli-suppress-list-no': None, u'cli-compact-syntax': None, u'cli-sequence-commands': None, u'cli-suppress-mode': None, u'alt-name': u'mac'}}), is_container='list', yang_name="mac-group-entry", rest_name="mac", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Add mac-address to the mac-group.\nMac mask is optional.', u'cli-no-key-completion': None, u'callpoint': u'mac-group-entry-config', u'cli-suppress-list-no': None, u'cli-compact-syntax': None, u'cli-sequence-commands': None, u'cli-suppress-mode': None, u'alt-name': u'mac'}}, namespace='urn:brocade.com:mgmt:brocade-mac-address-table', defining_module='brocade-mac-address-table', yang_type='list', is_config=True)""", }) self.__mac_group_entry = t if hasattr(self, '_set'): self._set()
[ "def", "_set_mac_group_entry", "(", "self", ",", "v", ",", "load", "=", "False", ")", ":", "if", "hasattr", "(", "v", ",", "\"_utype\"", ")", ":", "v", "=", "v", ".", "_utype", "(", "v", ")", "try", ":", "t", "=", "YANGDynClass", "(", "v", ",", "base", "=", "YANGListType", "(", "\"entry_address\"", ",", "mac_group_entry", ".", "mac_group_entry", ",", "yang_name", "=", "\"mac-group-entry\"", ",", "rest_name", "=", "\"mac\"", ",", "parent", "=", "self", ",", "is_container", "=", "'list'", ",", "user_ordered", "=", "False", ",", "path_helper", "=", "self", ".", "_path_helper", ",", "yang_keys", "=", "'entry-address'", ",", "extensions", "=", "{", "u'tailf-common'", ":", "{", "u'info'", ":", "u'Add mac-address to the mac-group.\\nMac mask is optional.'", ",", "u'cli-no-key-completion'", ":", "None", ",", "u'callpoint'", ":", "u'mac-group-entry-config'", ",", "u'cli-suppress-list-no'", ":", "None", ",", "u'cli-compact-syntax'", ":", "None", ",", "u'cli-sequence-commands'", ":", "None", ",", "u'cli-suppress-mode'", ":", "None", ",", "u'alt-name'", ":", "u'mac'", "}", "}", ")", ",", "is_container", "=", "'list'", ",", "yang_name", "=", "\"mac-group-entry\"", ",", "rest_name", "=", "\"mac\"", ",", "parent", "=", "self", ",", "path_helper", "=", "self", ".", "_path_helper", ",", "extmethods", "=", "self", ".", "_extmethods", ",", "register_paths", "=", "True", ",", "extensions", "=", "{", "u'tailf-common'", ":", "{", "u'info'", ":", "u'Add mac-address to the mac-group.\\nMac mask is optional.'", ",", "u'cli-no-key-completion'", ":", "None", ",", "u'callpoint'", ":", "u'mac-group-entry-config'", ",", "u'cli-suppress-list-no'", ":", "None", ",", "u'cli-compact-syntax'", ":", "None", ",", "u'cli-sequence-commands'", ":", "None", ",", "u'cli-suppress-mode'", ":", "None", ",", "u'alt-name'", ":", "u'mac'", "}", "}", ",", "namespace", "=", "'urn:brocade.com:mgmt:brocade-mac-address-table'", ",", "defining_module", "=", "'brocade-mac-address-table'", ",", "yang_type", "=", "'list'", ",", "is_config", "=", "True", ")", "except", "(", "TypeError", ",", "ValueError", ")", ":", "raise", "ValueError", "(", "{", "'error-string'", ":", "\"\"\"mac_group_entry must be of a type compatible with list\"\"\"", ",", "'defined-type'", ":", "\"list\"", ",", "'generated-type'", ":", "\"\"\"YANGDynClass(base=YANGListType(\"entry_address\",mac_group_entry.mac_group_entry, yang_name=\"mac-group-entry\", rest_name=\"mac\", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='entry-address', extensions={u'tailf-common': {u'info': u'Add mac-address to the mac-group.\\nMac mask is optional.', u'cli-no-key-completion': None, u'callpoint': u'mac-group-entry-config', u'cli-suppress-list-no': None, u'cli-compact-syntax': None, u'cli-sequence-commands': None, u'cli-suppress-mode': None, u'alt-name': u'mac'}}), is_container='list', yang_name=\"mac-group-entry\", rest_name=\"mac\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Add mac-address to the mac-group.\\nMac mask is optional.', u'cli-no-key-completion': None, u'callpoint': u'mac-group-entry-config', u'cli-suppress-list-no': None, u'cli-compact-syntax': None, u'cli-sequence-commands': None, u'cli-suppress-mode': None, u'alt-name': u'mac'}}, namespace='urn:brocade.com:mgmt:brocade-mac-address-table', defining_module='brocade-mac-address-table', yang_type='list', is_config=True)\"\"\"", ",", "}", ")", "self", ".", "__mac_group_entry", "=", "t", "if", "hasattr", "(", "self", ",", "'_set'", ")", ":", "self", ".", "_set", "(", ")" ]
Setter method for mac_group_entry, mapped from YANG variable /mac_group/mac_group_entry (list) If this variable is read-only (config: false) in the source YANG file, then _set_mac_group_entry is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_mac_group_entry() directly.
[ "Setter", "method", "for", "mac_group_entry", "mapped", "from", "YANG", "variable", "/", "mac_group", "/", "mac_group_entry", "(", "list", ")", "If", "this", "variable", "is", "read", "-", "only", "(", "config", ":", "false", ")", "in", "the", "source", "YANG", "file", "then", "_set_mac_group_entry", "is", "considered", "as", "a", "private", "method", ".", "Backends", "looking", "to", "populate", "this", "variable", "should", "do", "so", "via", "calling", "thisObj", ".", "_set_mac_group_entry", "()", "directly", "." ]
python
train
aliyun/aliyun-odps-python-sdk
odps/lib/cloudpickle.py
https://github.com/aliyun/aliyun-odps-python-sdk/blob/4b0de18f5864386df6068f26f026e62f932c41e4/odps/lib/cloudpickle.py#L354-L363
def save_module(self, obj): """ Save a module as an import """ self.modules.add(obj) if _is_dynamic(obj): self.save_reduce(dynamic_subimport, (obj.__name__, vars(obj)), obj=obj) else: self.save_reduce(subimport, (obj.__name__,), obj=obj)
[ "def", "save_module", "(", "self", ",", "obj", ")", ":", "self", ".", "modules", ".", "add", "(", "obj", ")", "if", "_is_dynamic", "(", "obj", ")", ":", "self", ".", "save_reduce", "(", "dynamic_subimport", ",", "(", "obj", ".", "__name__", ",", "vars", "(", "obj", ")", ")", ",", "obj", "=", "obj", ")", "else", ":", "self", ".", "save_reduce", "(", "subimport", ",", "(", "obj", ".", "__name__", ",", ")", ",", "obj", "=", "obj", ")" ]
Save a module as an import
[ "Save", "a", "module", "as", "an", "import" ]
python
train
klen/muffin-admin
muffin_admin/peewee.py
https://github.com/klen/muffin-admin/blob/404dc8e5107e943b7c42fa21c679c34ddb4de1d5/muffin_admin/peewee.py#L233-L235
def filter_query(self, query, field, value): """Filter a query.""" return query.where(field ** "%{}%".format(value.lower()))
[ "def", "filter_query", "(", "self", ",", "query", ",", "field", ",", "value", ")", ":", "return", "query", ".", "where", "(", "field", "**", "\"%{}%\"", ".", "format", "(", "value", ".", "lower", "(", ")", ")", ")" ]
Filter a query.
[ "Filter", "a", "query", "." ]
python
train
databio/pypiper
pypiper/utils.py
https://github.com/databio/pypiper/blob/00e6c2b94033c4187d47ff14c5580bbfc2ff097f/pypiper/utils.py#L360-L370
def is_in_file_tree(fpath, folder): """ Determine whether a file is in a folder. :param str fpath: filepath to investigate :param folder: path to folder to query :return bool: whether the path indicated is in the folder indicated """ file_folder, _ = os.path.split(fpath) other_folder = os.path.join(folder, "") return other_folder.startswith(file_folder)
[ "def", "is_in_file_tree", "(", "fpath", ",", "folder", ")", ":", "file_folder", ",", "_", "=", "os", ".", "path", ".", "split", "(", "fpath", ")", "other_folder", "=", "os", ".", "path", ".", "join", "(", "folder", ",", "\"\"", ")", "return", "other_folder", ".", "startswith", "(", "file_folder", ")" ]
Determine whether a file is in a folder. :param str fpath: filepath to investigate :param folder: path to folder to query :return bool: whether the path indicated is in the folder indicated
[ "Determine", "whether", "a", "file", "is", "in", "a", "folder", "." ]
python
train
meejah/txtorcon
txtorcon/addrmap.py
https://github.com/meejah/txtorcon/blob/14053b95adf0b4bd9dd9c317bece912a26578a93/txtorcon/addrmap.py#L37-L90
def update(self, *args): """ deals with an update from Tor; see parsing logic in torcontroller """ gmtexpires = None (name, ip, expires) = args[:3] for arg in args: if arg.lower().startswith('expires='): gmtexpires = arg[8:] if gmtexpires is None: if len(args) == 3: gmtexpires = expires else: if args[2] == 'NEVER': gmtexpires = args[2] else: gmtexpires = args[3] self.name = name # "www.example.com" self.ip = maybe_ip_addr(ip) # IPV4Address instance, or string if self.ip == '<error>': self._expire() return fmt = "%Y-%m-%d %H:%M:%S" # if we already have expiry times, etc then we want to # properly delay our timeout oldexpires = self.expires if gmtexpires.upper() == 'NEVER': # FIXME can I just select a date 100 years in the future instead? self.expires = None else: self.expires = datetime.datetime.strptime(gmtexpires, fmt) self.created = datetime.datetime.utcnow() if self.expires is not None: if oldexpires is None: if self.expires <= self.created: diff = datetime.timedelta(seconds=0) else: diff = self.expires - self.created self.expiry = self.map.scheduler.callLater(diff.seconds, self._expire) else: diff = self.expires - oldexpires self.expiry.delay(diff.seconds)
[ "def", "update", "(", "self", ",", "*", "args", ")", ":", "gmtexpires", "=", "None", "(", "name", ",", "ip", ",", "expires", ")", "=", "args", "[", ":", "3", "]", "for", "arg", "in", "args", ":", "if", "arg", ".", "lower", "(", ")", ".", "startswith", "(", "'expires='", ")", ":", "gmtexpires", "=", "arg", "[", "8", ":", "]", "if", "gmtexpires", "is", "None", ":", "if", "len", "(", "args", ")", "==", "3", ":", "gmtexpires", "=", "expires", "else", ":", "if", "args", "[", "2", "]", "==", "'NEVER'", ":", "gmtexpires", "=", "args", "[", "2", "]", "else", ":", "gmtexpires", "=", "args", "[", "3", "]", "self", ".", "name", "=", "name", "# \"www.example.com\"", "self", ".", "ip", "=", "maybe_ip_addr", "(", "ip", ")", "# IPV4Address instance, or string", "if", "self", ".", "ip", "==", "'<error>'", ":", "self", ".", "_expire", "(", ")", "return", "fmt", "=", "\"%Y-%m-%d %H:%M:%S\"", "# if we already have expiry times, etc then we want to", "# properly delay our timeout", "oldexpires", "=", "self", ".", "expires", "if", "gmtexpires", ".", "upper", "(", ")", "==", "'NEVER'", ":", "# FIXME can I just select a date 100 years in the future instead?", "self", ".", "expires", "=", "None", "else", ":", "self", ".", "expires", "=", "datetime", ".", "datetime", ".", "strptime", "(", "gmtexpires", ",", "fmt", ")", "self", ".", "created", "=", "datetime", ".", "datetime", ".", "utcnow", "(", ")", "if", "self", ".", "expires", "is", "not", "None", ":", "if", "oldexpires", "is", "None", ":", "if", "self", ".", "expires", "<=", "self", ".", "created", ":", "diff", "=", "datetime", ".", "timedelta", "(", "seconds", "=", "0", ")", "else", ":", "diff", "=", "self", ".", "expires", "-", "self", ".", "created", "self", ".", "expiry", "=", "self", ".", "map", ".", "scheduler", ".", "callLater", "(", "diff", ".", "seconds", ",", "self", ".", "_expire", ")", "else", ":", "diff", "=", "self", ".", "expires", "-", "oldexpires", "self", ".", "expiry", ".", "delay", "(", "diff", ".", "seconds", ")" ]
deals with an update from Tor; see parsing logic in torcontroller
[ "deals", "with", "an", "update", "from", "Tor", ";", "see", "parsing", "logic", "in", "torcontroller" ]
python
train
computational-metabolomics/msp2db
msp2db/utils.py
https://github.com/computational-metabolomics/msp2db/blob/f86f01efca26fd2745547c9993f97337c6bef123/msp2db/utils.py#L6-L28
def get_precursor_mz(exact_mass, precursor_type): """ Calculate precursor mz based on exact mass and precursor type Args: exact_mass (float): exact mass of compound of interest precursor_type (str): Precursor type (currently only works with '[M-H]-', '[M+H]+' and '[M+H-H2O]+' Return: neutral mass of compound """ # these are just taken from what was present in the massbank .msp file for those missing the exact mass d = {'[M-H]-': -1.007276, '[M+H]+': 1.007276, '[M+H-H2O]+': 1.007276 - ((1.007276 * 2) + 15.9949) } try: return exact_mass + d[precursor_type] except KeyError as e: print(e) return False
[ "def", "get_precursor_mz", "(", "exact_mass", ",", "precursor_type", ")", ":", "# these are just taken from what was present in the massbank .msp file for those missing the exact mass", "d", "=", "{", "'[M-H]-'", ":", "-", "1.007276", ",", "'[M+H]+'", ":", "1.007276", ",", "'[M+H-H2O]+'", ":", "1.007276", "-", "(", "(", "1.007276", "*", "2", ")", "+", "15.9949", ")", "}", "try", ":", "return", "exact_mass", "+", "d", "[", "precursor_type", "]", "except", "KeyError", "as", "e", ":", "print", "(", "e", ")", "return", "False" ]
Calculate precursor mz based on exact mass and precursor type Args: exact_mass (float): exact mass of compound of interest precursor_type (str): Precursor type (currently only works with '[M-H]-', '[M+H]+' and '[M+H-H2O]+' Return: neutral mass of compound
[ "Calculate", "precursor", "mz", "based", "on", "exact", "mass", "and", "precursor", "type" ]
python
train
joke2k/faker
faker/providers/company/fr_FR/__init__.py
https://github.com/joke2k/faker/blob/965824b61132e52d92d1a6ce470396dbbe01c96c/faker/providers/company/fr_FR/__init__.py#L97-L111
def _is_catch_phrase_valid(self, catch_phrase): """ Validates a french catch phrase. :param catch_phrase: The catch phrase to validate. """ for word in self.words_which_should_not_appear_twice: # Fastest way to check if a piece of word does not appear twice. begin_pos = catch_phrase.find(word) end_pos = catch_phrase.find(word, begin_pos + 1) if begin_pos != -1 and begin_pos != end_pos: return False return True
[ "def", "_is_catch_phrase_valid", "(", "self", ",", "catch_phrase", ")", ":", "for", "word", "in", "self", ".", "words_which_should_not_appear_twice", ":", "# Fastest way to check if a piece of word does not appear twice.", "begin_pos", "=", "catch_phrase", ".", "find", "(", "word", ")", "end_pos", "=", "catch_phrase", ".", "find", "(", "word", ",", "begin_pos", "+", "1", ")", "if", "begin_pos", "!=", "-", "1", "and", "begin_pos", "!=", "end_pos", ":", "return", "False", "return", "True" ]
Validates a french catch phrase. :param catch_phrase: The catch phrase to validate.
[ "Validates", "a", "french", "catch", "phrase", "." ]
python
train
frejanordsiek/GeminiMotorDrive
GeminiMotorDrive/drivers.py
https://github.com/frejanordsiek/GeminiMotorDrive/blob/8de347ffb91228fbfe3832098b4996fa0141d8f1/GeminiMotorDrive/drivers.py#L529-L631
def send_commands(self, commands, timeout=1.0, max_retries=1, eor=('\n', '\n- ')): """ Send a sequence of commands to the drive and collect output. Takes a sequence of many commands and executes them one by one till either all are executed or one runs out of retries (`max_retries`). Retries are optionally performed if a command's repsonse indicates that there was an error. Remaining commands are not executed. The processed output of the final execution (last try or retry) of each command that was actually executed is returned. This function basically feeds commands one by one to ``send_command`` and collates the outputs. Parameters ---------- commands : iterable of str Iterable of commands to send to the drive. Each command must be an ``str``. timeout : float or None, optional Optional timeout in seconds to use when reading the response. A negative value or ``None`` indicates that the an infinite timeout should be used. max_retries : int, optional Maximum number of retries to do per command in the case of errors. eor : str or iterable of str, optional End Of Resonse. An EOR is either a ``str`` or an iterable of ``str`` that denote the possible endings of a response. 'eor' can be a single EOR, in which case it is used for all commands, or it can be an iterable of EOR to use for each individual command. For most commands, it should be ``('\\n', '\\n- ')``, but for running a program, it should be ``'*END\\n'``. The default is ``('\\n', '\\n- ')``. Returns ------- outputs : list of lists ``list`` composed of the processed responses of each command in the order that they were done up to and including the last command executed. See ``send_command`` for the format of processed responses. See Also -------- send_command : Send a single command. Examples -------- A sequence of commands to energize the motor, move it a bit away from the starting position, and then do 4 forward/reverse cycles, and de-energize the motor. **DO NOT** try these specific movement distances without checking that the motion won't damage something (very motor and application specific). >>> from GeminiMotorDrive.drivers import ASCII_RS232 >>> ra = ASCII_RS232('/dev/ttyS1') >>> ra.send_commands(['DRIVE1', 'D-10000', 'GO'] ... + ['D-10000','GO','D10000','GO']*4 ... + [ 'DRIVE0']) [['DRIVE1', 'DRIVE1\\r', 'DRIVE1', None, []], ['D-10000', 'D-10000\\r', 'D-10000', None, []], ['GO', 'GO\\r', 'GO', None, []], ['D-10000', 'D-10000\\r', 'D-10000', None, []], ['GO', 'GO\\r', 'GO', None, []], ['D10000', 'D10000\\r', 'D10000', None, []], ['GO', 'GO\\r', 'GO', None, []], ['D-10000', 'D-10000\\r', 'D-10000', None, []], ['GO', 'GO\\r', 'GO', None, []], ['D10000', 'D10000\\r', 'D10000', None, []], ['GO', 'GO\\r', 'GO', None, []], ['D-10000', 'D-10000\\r', 'D-10000', None, []], ['GO', 'GO\\r', 'GO', None, []], ['D10000', 'D10000\\r', 'D10000', None, []], ['GO', 'GO\\r', 'GO', None, []], ['D-10000', 'D-10000\\r', 'D-10000', None, []], ['GO', 'GO\\r', 'GO', None, []], ['D10000', 'D10000\\r', 'D10000', None, []], ['GO', 'GO\\r', 'GO', None, []], ['DRIVE0', 'DRIVE0\\r', 'DRIVE0', None, []]] """ # If eor is not a list, make a list of it replicated enough for # every command. if not isinstance(eor, list): eor = [eor]*len(commands) # Do every command one by one, collecting the responses and # stuffing them in a list. Commands that failed are retried, and # we stop if the last retry is exhausted. responses = [] for i, command in enumerate(commands): rsp = self.send_command(command, timeout=timeout, max_retries=max_retries, eor=eor[i]) responses.append(rsp) if self.command_error(rsp): break # Put in a slight pause so the drive has a bit of breathing # time between commands. time.sleep(0.25) return responses
[ "def", "send_commands", "(", "self", ",", "commands", ",", "timeout", "=", "1.0", ",", "max_retries", "=", "1", ",", "eor", "=", "(", "'\\n'", ",", "'\\n- '", ")", ")", ":", "# If eor is not a list, make a list of it replicated enough for", "# every command.", "if", "not", "isinstance", "(", "eor", ",", "list", ")", ":", "eor", "=", "[", "eor", "]", "*", "len", "(", "commands", ")", "# Do every command one by one, collecting the responses and", "# stuffing them in a list. Commands that failed are retried, and", "# we stop if the last retry is exhausted.", "responses", "=", "[", "]", "for", "i", ",", "command", "in", "enumerate", "(", "commands", ")", ":", "rsp", "=", "self", ".", "send_command", "(", "command", ",", "timeout", "=", "timeout", ",", "max_retries", "=", "max_retries", ",", "eor", "=", "eor", "[", "i", "]", ")", "responses", ".", "append", "(", "rsp", ")", "if", "self", ".", "command_error", "(", "rsp", ")", ":", "break", "# Put in a slight pause so the drive has a bit of breathing", "# time between commands.", "time", ".", "sleep", "(", "0.25", ")", "return", "responses" ]
Send a sequence of commands to the drive and collect output. Takes a sequence of many commands and executes them one by one till either all are executed or one runs out of retries (`max_retries`). Retries are optionally performed if a command's repsonse indicates that there was an error. Remaining commands are not executed. The processed output of the final execution (last try or retry) of each command that was actually executed is returned. This function basically feeds commands one by one to ``send_command`` and collates the outputs. Parameters ---------- commands : iterable of str Iterable of commands to send to the drive. Each command must be an ``str``. timeout : float or None, optional Optional timeout in seconds to use when reading the response. A negative value or ``None`` indicates that the an infinite timeout should be used. max_retries : int, optional Maximum number of retries to do per command in the case of errors. eor : str or iterable of str, optional End Of Resonse. An EOR is either a ``str`` or an iterable of ``str`` that denote the possible endings of a response. 'eor' can be a single EOR, in which case it is used for all commands, or it can be an iterable of EOR to use for each individual command. For most commands, it should be ``('\\n', '\\n- ')``, but for running a program, it should be ``'*END\\n'``. The default is ``('\\n', '\\n- ')``. Returns ------- outputs : list of lists ``list`` composed of the processed responses of each command in the order that they were done up to and including the last command executed. See ``send_command`` for the format of processed responses. See Also -------- send_command : Send a single command. Examples -------- A sequence of commands to energize the motor, move it a bit away from the starting position, and then do 4 forward/reverse cycles, and de-energize the motor. **DO NOT** try these specific movement distances without checking that the motion won't damage something (very motor and application specific). >>> from GeminiMotorDrive.drivers import ASCII_RS232 >>> ra = ASCII_RS232('/dev/ttyS1') >>> ra.send_commands(['DRIVE1', 'D-10000', 'GO'] ... + ['D-10000','GO','D10000','GO']*4 ... + [ 'DRIVE0']) [['DRIVE1', 'DRIVE1\\r', 'DRIVE1', None, []], ['D-10000', 'D-10000\\r', 'D-10000', None, []], ['GO', 'GO\\r', 'GO', None, []], ['D-10000', 'D-10000\\r', 'D-10000', None, []], ['GO', 'GO\\r', 'GO', None, []], ['D10000', 'D10000\\r', 'D10000', None, []], ['GO', 'GO\\r', 'GO', None, []], ['D-10000', 'D-10000\\r', 'D-10000', None, []], ['GO', 'GO\\r', 'GO', None, []], ['D10000', 'D10000\\r', 'D10000', None, []], ['GO', 'GO\\r', 'GO', None, []], ['D-10000', 'D-10000\\r', 'D-10000', None, []], ['GO', 'GO\\r', 'GO', None, []], ['D10000', 'D10000\\r', 'D10000', None, []], ['GO', 'GO\\r', 'GO', None, []], ['D-10000', 'D-10000\\r', 'D-10000', None, []], ['GO', 'GO\\r', 'GO', None, []], ['D10000', 'D10000\\r', 'D10000', None, []], ['GO', 'GO\\r', 'GO', None, []], ['DRIVE0', 'DRIVE0\\r', 'DRIVE0', None, []]]
[ "Send", "a", "sequence", "of", "commands", "to", "the", "drive", "and", "collect", "output", "." ]
python
train
heitzmann/gdspy
gdspy/__init__.py
https://github.com/heitzmann/gdspy/blob/2c8d1313248c544e2066d19095b7ad7158c79bc9/gdspy/__init__.py#L2217-L2243
def add(self, element): """ Add a new element or list of elements to this cell. Parameters ---------- element : object, list The element or list of elements to be inserted in this cell. Returns ------- out : ``Cell`` This cell. """ if isinstance(element, list): for e in element: if isinstance(e, Label): self.labels.append(e) else: self.elements.append(e) else: if isinstance(element, Label): self.labels.append(element) else: self.elements.append(element) self._bb_valid = False return self
[ "def", "add", "(", "self", ",", "element", ")", ":", "if", "isinstance", "(", "element", ",", "list", ")", ":", "for", "e", "in", "element", ":", "if", "isinstance", "(", "e", ",", "Label", ")", ":", "self", ".", "labels", ".", "append", "(", "e", ")", "else", ":", "self", ".", "elements", ".", "append", "(", "e", ")", "else", ":", "if", "isinstance", "(", "element", ",", "Label", ")", ":", "self", ".", "labels", ".", "append", "(", "element", ")", "else", ":", "self", ".", "elements", ".", "append", "(", "element", ")", "self", ".", "_bb_valid", "=", "False", "return", "self" ]
Add a new element or list of elements to this cell. Parameters ---------- element : object, list The element or list of elements to be inserted in this cell. Returns ------- out : ``Cell`` This cell.
[ "Add", "a", "new", "element", "or", "list", "of", "elements", "to", "this", "cell", "." ]
python
train
click-contrib/click-repl
click_repl/__init__.py
https://github.com/click-contrib/click-repl/blob/2d78dc520eb0bb5b813bad3b72344edbd22a7f4e/click_repl/__init__.py#L168-L257
def repl( # noqa: C901 old_ctx, prompt_kwargs=None, allow_system_commands=True, allow_internal_commands=True, ): """ Start an interactive shell. All subcommands are available in it. :param old_ctx: The current Click context. :param prompt_kwargs: Parameters passed to :py:func:`prompt_toolkit.shortcuts.prompt`. If stdin is not a TTY, no prompt will be printed, but only commands read from stdin. """ # parent should be available, but we're not going to bother if not group_ctx = old_ctx.parent or old_ctx group = group_ctx.command isatty = sys.stdin.isatty() # Delete the REPL command from those available, as we don't want to allow # nesting REPLs (note: pass `None` to `pop` as we don't want to error if # REPL command already not present for some reason). repl_command_name = old_ctx.command.name if isinstance(group_ctx.command, click.CommandCollection): available_commands = { cmd_name: cmd_obj for source in group_ctx.command.sources for cmd_name, cmd_obj in source.commands.items() } else: available_commands = group_ctx.command.commands available_commands.pop(repl_command_name, None) prompt_kwargs = bootstrap_prompt(prompt_kwargs, group) if isatty: def get_command(): return prompt(**prompt_kwargs) else: get_command = sys.stdin.readline while True: try: command = get_command() except KeyboardInterrupt: continue except EOFError: break if not command: if isatty: continue else: break if allow_system_commands and dispatch_repl_commands(command): continue if allow_internal_commands: try: result = handle_internal_commands(command) if isinstance(result, six.string_types): click.echo(result) continue except ExitReplException: break try: args = shlex.split(command) except ValueError as e: click.echo("{}: {}".format(type(e).__name__, e)) continue try: with group.make_context(None, args, parent=group_ctx) as ctx: group.invoke(ctx) ctx.exit() except click.ClickException as e: e.show() except ClickExit: pass except SystemExit: pass except ExitReplException: break
[ "def", "repl", "(", "# noqa: C901", "old_ctx", ",", "prompt_kwargs", "=", "None", ",", "allow_system_commands", "=", "True", ",", "allow_internal_commands", "=", "True", ",", ")", ":", "# parent should be available, but we're not going to bother if not", "group_ctx", "=", "old_ctx", ".", "parent", "or", "old_ctx", "group", "=", "group_ctx", ".", "command", "isatty", "=", "sys", ".", "stdin", ".", "isatty", "(", ")", "# Delete the REPL command from those available, as we don't want to allow", "# nesting REPLs (note: pass `None` to `pop` as we don't want to error if", "# REPL command already not present for some reason).", "repl_command_name", "=", "old_ctx", ".", "command", ".", "name", "if", "isinstance", "(", "group_ctx", ".", "command", ",", "click", ".", "CommandCollection", ")", ":", "available_commands", "=", "{", "cmd_name", ":", "cmd_obj", "for", "source", "in", "group_ctx", ".", "command", ".", "sources", "for", "cmd_name", ",", "cmd_obj", "in", "source", ".", "commands", ".", "items", "(", ")", "}", "else", ":", "available_commands", "=", "group_ctx", ".", "command", ".", "commands", "available_commands", ".", "pop", "(", "repl_command_name", ",", "None", ")", "prompt_kwargs", "=", "bootstrap_prompt", "(", "prompt_kwargs", ",", "group", ")", "if", "isatty", ":", "def", "get_command", "(", ")", ":", "return", "prompt", "(", "*", "*", "prompt_kwargs", ")", "else", ":", "get_command", "=", "sys", ".", "stdin", ".", "readline", "while", "True", ":", "try", ":", "command", "=", "get_command", "(", ")", "except", "KeyboardInterrupt", ":", "continue", "except", "EOFError", ":", "break", "if", "not", "command", ":", "if", "isatty", ":", "continue", "else", ":", "break", "if", "allow_system_commands", "and", "dispatch_repl_commands", "(", "command", ")", ":", "continue", "if", "allow_internal_commands", ":", "try", ":", "result", "=", "handle_internal_commands", "(", "command", ")", "if", "isinstance", "(", "result", ",", "six", ".", "string_types", ")", ":", "click", ".", "echo", "(", "result", ")", "continue", "except", "ExitReplException", ":", "break", "try", ":", "args", "=", "shlex", ".", "split", "(", "command", ")", "except", "ValueError", "as", "e", ":", "click", ".", "echo", "(", "\"{}: {}\"", ".", "format", "(", "type", "(", "e", ")", ".", "__name__", ",", "e", ")", ")", "continue", "try", ":", "with", "group", ".", "make_context", "(", "None", ",", "args", ",", "parent", "=", "group_ctx", ")", "as", "ctx", ":", "group", ".", "invoke", "(", "ctx", ")", "ctx", ".", "exit", "(", ")", "except", "click", ".", "ClickException", "as", "e", ":", "e", ".", "show", "(", ")", "except", "ClickExit", ":", "pass", "except", "SystemExit", ":", "pass", "except", "ExitReplException", ":", "break" ]
Start an interactive shell. All subcommands are available in it. :param old_ctx: The current Click context. :param prompt_kwargs: Parameters passed to :py:func:`prompt_toolkit.shortcuts.prompt`. If stdin is not a TTY, no prompt will be printed, but only commands read from stdin.
[ "Start", "an", "interactive", "shell", ".", "All", "subcommands", "are", "available", "in", "it", "." ]
python
train
DataONEorg/d1_python
gmn/src/d1_gmn/app/revision.py
https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/gmn/src/d1_gmn/app/revision.py#L230-L246
def _set_chain_sid(chain_model, sid): """Set or update SID for chain. If the chain already has a SID, ``sid`` must either be None or match the existing SID. """ if not sid: return if chain_model.sid and chain_model.sid.did != sid: raise d1_common.types.exceptions.ServiceFailure( 0, 'Attempted to modify existing SID. ' 'existing_sid="{}", new_sid="{}"'.format(chain_model.sid.did, sid), ) chain_model.sid = d1_gmn.app.did.get_or_create_did(sid) chain_model.save()
[ "def", "_set_chain_sid", "(", "chain_model", ",", "sid", ")", ":", "if", "not", "sid", ":", "return", "if", "chain_model", ".", "sid", "and", "chain_model", ".", "sid", ".", "did", "!=", "sid", ":", "raise", "d1_common", ".", "types", ".", "exceptions", ".", "ServiceFailure", "(", "0", ",", "'Attempted to modify existing SID. '", "'existing_sid=\"{}\", new_sid=\"{}\"'", ".", "format", "(", "chain_model", ".", "sid", ".", "did", ",", "sid", ")", ",", ")", "chain_model", ".", "sid", "=", "d1_gmn", ".", "app", ".", "did", ".", "get_or_create_did", "(", "sid", ")", "chain_model", ".", "save", "(", ")" ]
Set or update SID for chain. If the chain already has a SID, ``sid`` must either be None or match the existing SID.
[ "Set", "or", "update", "SID", "for", "chain", "." ]
python
train
aws/sagemaker-python-sdk
src/sagemaker/logs.py
https://github.com/aws/sagemaker-python-sdk/blob/a9e724c7d3f5572b68c3903548c792a59d99799a/src/sagemaker/logs.py#L80-L113
def multi_stream_iter(client, log_group, streams, positions=None): """Iterate over the available events coming from a set of log streams in a single log group interleaving the events from each stream so they're yielded in timestamp order. Args: client (boto3 client): The boto client for logs. log_group (str): The name of the log group. streams (list of str): A list of the log stream names. The position of the stream in this list is the stream number. positions: (list of Positions): A list of pairs of (timestamp, skip) which represents the last record read from each stream. Yields: A tuple of (stream number, cloudwatch log event). """ positions = positions or {s: Position(timestamp=0, skip=0) for s in streams} event_iters = [log_stream(client, log_group, s, positions[s].timestamp, positions[s].skip) for s in streams] events = [] for s in event_iters: if not s: events.append(None) continue try: events.append(next(s)) except StopIteration: events.append(None) while some(events): i = argmin(events, lambda x: x['timestamp'] if x else 9999999999) yield (i, events[i]) try: events[i] = next(event_iters[i]) except StopIteration: events[i] = None
[ "def", "multi_stream_iter", "(", "client", ",", "log_group", ",", "streams", ",", "positions", "=", "None", ")", ":", "positions", "=", "positions", "or", "{", "s", ":", "Position", "(", "timestamp", "=", "0", ",", "skip", "=", "0", ")", "for", "s", "in", "streams", "}", "event_iters", "=", "[", "log_stream", "(", "client", ",", "log_group", ",", "s", ",", "positions", "[", "s", "]", ".", "timestamp", ",", "positions", "[", "s", "]", ".", "skip", ")", "for", "s", "in", "streams", "]", "events", "=", "[", "]", "for", "s", "in", "event_iters", ":", "if", "not", "s", ":", "events", ".", "append", "(", "None", ")", "continue", "try", ":", "events", ".", "append", "(", "next", "(", "s", ")", ")", "except", "StopIteration", ":", "events", ".", "append", "(", "None", ")", "while", "some", "(", "events", ")", ":", "i", "=", "argmin", "(", "events", ",", "lambda", "x", ":", "x", "[", "'timestamp'", "]", "if", "x", "else", "9999999999", ")", "yield", "(", "i", ",", "events", "[", "i", "]", ")", "try", ":", "events", "[", "i", "]", "=", "next", "(", "event_iters", "[", "i", "]", ")", "except", "StopIteration", ":", "events", "[", "i", "]", "=", "None" ]
Iterate over the available events coming from a set of log streams in a single log group interleaving the events from each stream so they're yielded in timestamp order. Args: client (boto3 client): The boto client for logs. log_group (str): The name of the log group. streams (list of str): A list of the log stream names. The position of the stream in this list is the stream number. positions: (list of Positions): A list of pairs of (timestamp, skip) which represents the last record read from each stream. Yields: A tuple of (stream number, cloudwatch log event).
[ "Iterate", "over", "the", "available", "events", "coming", "from", "a", "set", "of", "log", "streams", "in", "a", "single", "log", "group", "interleaving", "the", "events", "from", "each", "stream", "so", "they", "re", "yielded", "in", "timestamp", "order", "." ]
python
train
iotile/coretools
transport_plugins/awsiot/iotile_transport_awsiot/gateway_agent.py
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/transport_plugins/awsiot/iotile_transport_awsiot/gateway_agent.py#L159-L170
def _publish_response(self, slug, message): """Publish a response message for a device Args: slug (string): The device slug that we are publishing on behalf of message (dict): A set of key value pairs that are used to create the message that is sent. """ resp_topic = self.topics.gateway_topic(slug, 'data/response') self._logger.debug("Publishing response message: (topic=%s) (message=%s)", resp_topic, message) self.client.publish(resp_topic, message)
[ "def", "_publish_response", "(", "self", ",", "slug", ",", "message", ")", ":", "resp_topic", "=", "self", ".", "topics", ".", "gateway_topic", "(", "slug", ",", "'data/response'", ")", "self", ".", "_logger", ".", "debug", "(", "\"Publishing response message: (topic=%s) (message=%s)\"", ",", "resp_topic", ",", "message", ")", "self", ".", "client", ".", "publish", "(", "resp_topic", ",", "message", ")" ]
Publish a response message for a device Args: slug (string): The device slug that we are publishing on behalf of message (dict): A set of key value pairs that are used to create the message that is sent.
[ "Publish", "a", "response", "message", "for", "a", "device" ]
python
train
RadhikaG/markdown-magic
magic/gifAPI.py
https://github.com/RadhikaG/markdown-magic/blob/af99549b033269d861ea13f0541cb4f894057c47/magic/gifAPI.py#L5-L49
def processGif(searchStr): ''' This function returns the url of the gif searched for with the given search parameters using the Giphy API. Thanks! Fails gracefully when it can't find a gif by returning an appropriate image url with the failure message on it. ''' # Sanitizing searchStr # TODO: Find a better way to do this searchStr.replace('| ', ' ') searchStr.replace('|', ' ') searchStr.replace(', ', ' ') searchStr.replace(',', ' ') searchStr.rstrip() searchStr = searchStr.strip('./?\'!,') searchStr = searchStr.replace(' ', '+') if searchStr is None or searchStr == '': print("No search parameters specified!") return no_search_params api_url = 'http://api.giphy.com/v1/gifs/search' api_key = 'dc6zaTOxFJmzC' payload = { 'q': searchStr, 'limit': 1, 'api_key': api_key, } r = requests.get(api_url, params=payload) parsed_json = json.loads(r.text) # print(parsed_json) if len(parsed_json['data']) == 0: print("Couldn't find suitable match for gif! :(") return -1 else: # Success! imgURL = parsed_json['data'][0]['images']['fixed_height']['url'] # print(imgURL) return imgURL
[ "def", "processGif", "(", "searchStr", ")", ":", "# Sanitizing searchStr", "# TODO: Find a better way to do this", "searchStr", ".", "replace", "(", "'| '", ",", "' '", ")", "searchStr", ".", "replace", "(", "'|'", ",", "' '", ")", "searchStr", ".", "replace", "(", "', '", ",", "' '", ")", "searchStr", ".", "replace", "(", "','", ",", "' '", ")", "searchStr", ".", "rstrip", "(", ")", "searchStr", "=", "searchStr", ".", "strip", "(", "'./?\\'!,'", ")", "searchStr", "=", "searchStr", ".", "replace", "(", "' '", ",", "'+'", ")", "if", "searchStr", "is", "None", "or", "searchStr", "==", "''", ":", "print", "(", "\"No search parameters specified!\"", ")", "return", "no_search_params", "api_url", "=", "'http://api.giphy.com/v1/gifs/search'", "api_key", "=", "'dc6zaTOxFJmzC'", "payload", "=", "{", "'q'", ":", "searchStr", ",", "'limit'", ":", "1", ",", "'api_key'", ":", "api_key", ",", "}", "r", "=", "requests", ".", "get", "(", "api_url", ",", "params", "=", "payload", ")", "parsed_json", "=", "json", ".", "loads", "(", "r", ".", "text", ")", "# print(parsed_json)", "if", "len", "(", "parsed_json", "[", "'data'", "]", ")", "==", "0", ":", "print", "(", "\"Couldn't find suitable match for gif! :(\"", ")", "return", "-", "1", "else", ":", "# Success!", "imgURL", "=", "parsed_json", "[", "'data'", "]", "[", "0", "]", "[", "'images'", "]", "[", "'fixed_height'", "]", "[", "'url'", "]", "# print(imgURL)", "return", "imgURL" ]
This function returns the url of the gif searched for with the given search parameters using the Giphy API. Thanks! Fails gracefully when it can't find a gif by returning an appropriate image url with the failure message on it.
[ "This", "function", "returns", "the", "url", "of", "the", "gif", "searched", "for", "with", "the", "given", "search", "parameters", "using", "the", "Giphy", "API", ".", "Thanks!" ]
python
train
swharden/SWHLab
doc/oldcode/swhlab/core/ap.py
https://github.com/swharden/SWHLab/blob/a86c3c65323cec809a4bd4f81919644927094bf5/doc/oldcode/swhlab/core/ap.py#L153-L215
def analyzeAP(Y,dY,I,rate,verbose=False): """ given a sweep and a time point, return the AP array for that AP. APs will be centered in time by their maximum upslope. """ Ims = int(rate/1000) #Is per MS IsToLook=5*Ims #TODO: clarify this, ms until downslope is over upslope=np.max(dY[I:I+IsToLook]) #maximum rise velocity upslopeI=np.where(dY[I:I+IsToLook]==upslope)[0][0]+I I=upslopeI #center sweep at the upslope downslope=np.min(dY[I:I+IsToLook]) #maximum fall velocity downslopeI=np.where(dY[I:I+IsToLook]==downslope)[0][0]+I peak=np.max(Y[I:I+IsToLook]) #find peak value (mV) peakI=np.where(Y[I:I+IsToLook]==peak)[0][0]+I #find peak I thresholdI=I-np.where(dY[I:I+IsToLook:--1]<10)[0] #detect <10V/S if not len(thresholdI): return False thresholdI=thresholdI[0] threshold=Y[thresholdI] # mV where >10mV/S height=peak-threshold # height (mV) from threshold to peak halfwidthPoint=np.average((threshold,peak)) halfwidth=np.where(Y[I-IsToLook:I+IsToLook]>halfwidthPoint)[0] if not len(halfwidth): return False #doesn't look like a real AP halfwidthI1=halfwidth[0]+I-IsToLook halfwidthI2=halfwidth[-1]+I-IsToLook if Y[halfwidthI1-1]>halfwidthPoint or Y[halfwidthI2+1]>halfwidthPoint: return False #doesn't look like a real AP halfwidth=len(halfwidth)/rate*1000 #now in MS riseTime=(peakI-thresholdI)*1000/rate # time (ms) from threshold to peak IsToLook=100*Ims #TODO: max prediction until AHP reaches nadir AHPchunk=np.diff(Y[downslopeI:downslopeI+IsToLook]) #first inflection AHPI=np.where(AHPchunk>0)[0] if len(AHPI)==0: AHPI=np.nan else: AHPI=AHPI[0]+downslopeI AHPchunk=Y[AHPI:AHPI+IsToLook] if max(AHPchunk)>threshold: #if another AP is coming, cut it out AHPchunk=AHPchunk[:np.where(AHPchunk>threshold)[0][0]] if len(AHPchunk): AHP=np.nanmin(AHPchunk) AHPI=np.where(AHPchunk==AHP)[0][0]+AHPI AHPheight=threshold-AHP # AHP magnitude from threshold (mV) IsToLook=500*Ims #TODO: max prediction until AHP reaches threshold AHPreturn=np.average((AHP,threshold)) #half of threshold AHPreturnI=np.where(Y[AHPI:AHPI+IsToLook]>AHPreturn)[0] if len(AHPreturnI): #not having a clean decay won't cause AP to crash AHPreturnI=AHPreturnI[0]+AHPI AHPrisetime=(AHPreturnI-AHPI)*2/rate*1000 #predicted return time (ms) AHPupslope=AHPheight/AHPrisetime #mV/ms = V/S AHPreturnFullI=(AHPreturnI-AHPI)*2+AHPI else: #make them nan so you can do averages later AHPreturnI,AHPrisetime,AHPupslope=np.nan,np.nan,np.nan downslope=np.nan #fasttime (10V/S to 10V/S) #TODO: #dpp (deriv peak to peak) #TODO: sweepI,sweepT=I,I/rate # clean up variable names del IsToLook,I, Y, dY, Ims, AHPchunk, verbose #delete what we don't need return locals()
[ "def", "analyzeAP", "(", "Y", ",", "dY", ",", "I", ",", "rate", ",", "verbose", "=", "False", ")", ":", "Ims", "=", "int", "(", "rate", "/", "1000", ")", "#Is per MS", "IsToLook", "=", "5", "*", "Ims", "#TODO: clarify this, ms until downslope is over", "upslope", "=", "np", ".", "max", "(", "dY", "[", "I", ":", "I", "+", "IsToLook", "]", ")", "#maximum rise velocity", "upslopeI", "=", "np", ".", "where", "(", "dY", "[", "I", ":", "I", "+", "IsToLook", "]", "==", "upslope", ")", "[", "0", "]", "[", "0", "]", "+", "I", "I", "=", "upslopeI", "#center sweep at the upslope", "downslope", "=", "np", ".", "min", "(", "dY", "[", "I", ":", "I", "+", "IsToLook", "]", ")", "#maximum fall velocity", "downslopeI", "=", "np", ".", "where", "(", "dY", "[", "I", ":", "I", "+", "IsToLook", "]", "==", "downslope", ")", "[", "0", "]", "[", "0", "]", "+", "I", "peak", "=", "np", ".", "max", "(", "Y", "[", "I", ":", "I", "+", "IsToLook", "]", ")", "#find peak value (mV)", "peakI", "=", "np", ".", "where", "(", "Y", "[", "I", ":", "I", "+", "IsToLook", "]", "==", "peak", ")", "[", "0", "]", "[", "0", "]", "+", "I", "#find peak I", "thresholdI", "=", "I", "-", "np", ".", "where", "(", "dY", "[", "I", ":", "I", "+", "IsToLook", ":", "-", "-", "1", "]", "<", "10", ")", "[", "0", "]", "#detect <10V/S", "if", "not", "len", "(", "thresholdI", ")", ":", "return", "False", "thresholdI", "=", "thresholdI", "[", "0", "]", "threshold", "=", "Y", "[", "thresholdI", "]", "# mV where >10mV/S", "height", "=", "peak", "-", "threshold", "# height (mV) from threshold to peak", "halfwidthPoint", "=", "np", ".", "average", "(", "(", "threshold", ",", "peak", ")", ")", "halfwidth", "=", "np", ".", "where", "(", "Y", "[", "I", "-", "IsToLook", ":", "I", "+", "IsToLook", "]", ">", "halfwidthPoint", ")", "[", "0", "]", "if", "not", "len", "(", "halfwidth", ")", ":", "return", "False", "#doesn't look like a real AP", "halfwidthI1", "=", "halfwidth", "[", "0", "]", "+", "I", "-", "IsToLook", "halfwidthI2", "=", "halfwidth", "[", "-", "1", "]", "+", "I", "-", "IsToLook", "if", "Y", "[", "halfwidthI1", "-", "1", "]", ">", "halfwidthPoint", "or", "Y", "[", "halfwidthI2", "+", "1", "]", ">", "halfwidthPoint", ":", "return", "False", "#doesn't look like a real AP", "halfwidth", "=", "len", "(", "halfwidth", ")", "/", "rate", "*", "1000", "#now in MS", "riseTime", "=", "(", "peakI", "-", "thresholdI", ")", "*", "1000", "/", "rate", "# time (ms) from threshold to peak", "IsToLook", "=", "100", "*", "Ims", "#TODO: max prediction until AHP reaches nadir", "AHPchunk", "=", "np", ".", "diff", "(", "Y", "[", "downslopeI", ":", "downslopeI", "+", "IsToLook", "]", ")", "#first inflection", "AHPI", "=", "np", ".", "where", "(", "AHPchunk", ">", "0", ")", "[", "0", "]", "if", "len", "(", "AHPI", ")", "==", "0", ":", "AHPI", "=", "np", ".", "nan", "else", ":", "AHPI", "=", "AHPI", "[", "0", "]", "+", "downslopeI", "AHPchunk", "=", "Y", "[", "AHPI", ":", "AHPI", "+", "IsToLook", "]", "if", "max", "(", "AHPchunk", ")", ">", "threshold", ":", "#if another AP is coming, cut it out", "AHPchunk", "=", "AHPchunk", "[", ":", "np", ".", "where", "(", "AHPchunk", ">", "threshold", ")", "[", "0", "]", "[", "0", "]", "]", "if", "len", "(", "AHPchunk", ")", ":", "AHP", "=", "np", ".", "nanmin", "(", "AHPchunk", ")", "AHPI", "=", "np", ".", "where", "(", "AHPchunk", "==", "AHP", ")", "[", "0", "]", "[", "0", "]", "+", "AHPI", "AHPheight", "=", "threshold", "-", "AHP", "# AHP magnitude from threshold (mV)", "IsToLook", "=", "500", "*", "Ims", "#TODO: max prediction until AHP reaches threshold", "AHPreturn", "=", "np", ".", "average", "(", "(", "AHP", ",", "threshold", ")", ")", "#half of threshold", "AHPreturnI", "=", "np", ".", "where", "(", "Y", "[", "AHPI", ":", "AHPI", "+", "IsToLook", "]", ">", "AHPreturn", ")", "[", "0", "]", "if", "len", "(", "AHPreturnI", ")", ":", "#not having a clean decay won't cause AP to crash", "AHPreturnI", "=", "AHPreturnI", "[", "0", "]", "+", "AHPI", "AHPrisetime", "=", "(", "AHPreturnI", "-", "AHPI", ")", "*", "2", "/", "rate", "*", "1000", "#predicted return time (ms)", "AHPupslope", "=", "AHPheight", "/", "AHPrisetime", "#mV/ms = V/S", "AHPreturnFullI", "=", "(", "AHPreturnI", "-", "AHPI", ")", "*", "2", "+", "AHPI", "else", ":", "#make them nan so you can do averages later", "AHPreturnI", ",", "AHPrisetime", ",", "AHPupslope", "=", "np", ".", "nan", ",", "np", ".", "nan", ",", "np", ".", "nan", "downslope", "=", "np", ".", "nan", "#fasttime (10V/S to 10V/S) #TODO:", "#dpp (deriv peak to peak) #TODO:", "sweepI", ",", "sweepT", "=", "I", ",", "I", "/", "rate", "# clean up variable names", "del", "IsToLook", ",", "I", ",", "Y", ",", "dY", ",", "Ims", ",", "AHPchunk", ",", "verbose", "#delete what we don't need", "return", "locals", "(", ")" ]
given a sweep and a time point, return the AP array for that AP. APs will be centered in time by their maximum upslope.
[ "given", "a", "sweep", "and", "a", "time", "point", "return", "the", "AP", "array", "for", "that", "AP", ".", "APs", "will", "be", "centered", "in", "time", "by", "their", "maximum", "upslope", "." ]
python
valid
mrsarm/mongotail
mongotail/err.py
https://github.com/mrsarm/mongotail/blob/82ba74e32eff92faa320833a8d19c58555f9cd49/mongotail/err.py#L42-L48
def error_parsing(msg="unknown options"): """ Print any parsing error and exit with status -1 """ sys.stderr.write("Error parsing command line: %s\ntry 'mongotail --help' for more information\n" % msg) sys.stderr.flush() exit(EINVAL)
[ "def", "error_parsing", "(", "msg", "=", "\"unknown options\"", ")", ":", "sys", ".", "stderr", ".", "write", "(", "\"Error parsing command line: %s\\ntry 'mongotail --help' for more information\\n\"", "%", "msg", ")", "sys", ".", "stderr", ".", "flush", "(", ")", "exit", "(", "EINVAL", ")" ]
Print any parsing error and exit with status -1
[ "Print", "any", "parsing", "error", "and", "exit", "with", "status", "-", "1" ]
python
test
Dallinger/Dallinger
dallinger/models.py
https://github.com/Dallinger/Dallinger/blob/76ca8217c709989c116d0ebd8fca37bd22f591af/dallinger/models.py#L1663-L1673
def json_data(self): """The json representation of a transmissions.""" return { "vector_id": self.vector_id, "origin_id": self.origin_id, "destination_id": self.destination_id, "info_id": self.info_id, "network_id": self.network_id, "receive_time": self.receive_time, "status": self.status, }
[ "def", "json_data", "(", "self", ")", ":", "return", "{", "\"vector_id\"", ":", "self", ".", "vector_id", ",", "\"origin_id\"", ":", "self", ".", "origin_id", ",", "\"destination_id\"", ":", "self", ".", "destination_id", ",", "\"info_id\"", ":", "self", ".", "info_id", ",", "\"network_id\"", ":", "self", ".", "network_id", ",", "\"receive_time\"", ":", "self", ".", "receive_time", ",", "\"status\"", ":", "self", ".", "status", ",", "}" ]
The json representation of a transmissions.
[ "The", "json", "representation", "of", "a", "transmissions", "." ]
python
train
twidi/py-dataql
dataql/solvers/resources.py
https://github.com/twidi/py-dataql/blob/5841a3fd559829193ed709c255166085bdde1c52/dataql/solvers/resources.py#L261-L316
def coerce(self, value, resource): """Coerce the value to an acceptable one. Only these kinds of values are returned as is: - str - int - float - True - False - None For all others values, it will be coerced using ``self.coerce_default`` (with convert the value to a string in the default implementation). Arguments --------- value : ? The value to be coerced. resource : dataql.resources.Resource The ``Resource`` object used to obtain this value from the original one. Returns ------- str | int | float | True | False | None The coerced value. Example ------- >>> from dataql.solvers.registry import Registry >>> registry = Registry() >>> from datetime import date >>> registry.register(date) >>> solver = AttributeSolver(registry) >>> solver.coerce('foo', None) 'foo' >>> solver.coerce(11, None) 11 >>> solver.coerce(1.1, None) 1.1 >>> solver.coerce(True, None) True >>> solver.coerce(False, None) False >>> solver.coerce(date(2015, 6, 1), None) '2015-06-01' >>> solver.coerce(None, None) """ if value in (True, False, None): return value if isinstance(value, (int, float)): return value if isinstance(value, str): return value return self.coerce_default(value, resource)
[ "def", "coerce", "(", "self", ",", "value", ",", "resource", ")", ":", "if", "value", "in", "(", "True", ",", "False", ",", "None", ")", ":", "return", "value", "if", "isinstance", "(", "value", ",", "(", "int", ",", "float", ")", ")", ":", "return", "value", "if", "isinstance", "(", "value", ",", "str", ")", ":", "return", "value", "return", "self", ".", "coerce_default", "(", "value", ",", "resource", ")" ]
Coerce the value to an acceptable one. Only these kinds of values are returned as is: - str - int - float - True - False - None For all others values, it will be coerced using ``self.coerce_default`` (with convert the value to a string in the default implementation). Arguments --------- value : ? The value to be coerced. resource : dataql.resources.Resource The ``Resource`` object used to obtain this value from the original one. Returns ------- str | int | float | True | False | None The coerced value. Example ------- >>> from dataql.solvers.registry import Registry >>> registry = Registry() >>> from datetime import date >>> registry.register(date) >>> solver = AttributeSolver(registry) >>> solver.coerce('foo', None) 'foo' >>> solver.coerce(11, None) 11 >>> solver.coerce(1.1, None) 1.1 >>> solver.coerce(True, None) True >>> solver.coerce(False, None) False >>> solver.coerce(date(2015, 6, 1), None) '2015-06-01' >>> solver.coerce(None, None)
[ "Coerce", "the", "value", "to", "an", "acceptable", "one", "." ]
python
train
google/grr
grr/client/grr_response_client/comms.py
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/client/grr_response_client/comms.py#L431-L440
def Wait(self, timeout): """Wait for the specified timeout.""" time.sleep(timeout - int(timeout)) # Split a long sleep interval into 1 second intervals so we can heartbeat. for _ in range(int(timeout)): time.sleep(1) if self.heart_beat_cb: self.heart_beat_cb()
[ "def", "Wait", "(", "self", ",", "timeout", ")", ":", "time", ".", "sleep", "(", "timeout", "-", "int", "(", "timeout", ")", ")", "# Split a long sleep interval into 1 second intervals so we can heartbeat.", "for", "_", "in", "range", "(", "int", "(", "timeout", ")", ")", ":", "time", ".", "sleep", "(", "1", ")", "if", "self", ".", "heart_beat_cb", ":", "self", ".", "heart_beat_cb", "(", ")" ]
Wait for the specified timeout.
[ "Wait", "for", "the", "specified", "timeout", "." ]
python
train
shoebot/shoebot
lib/web/yahoo.py
https://github.com/shoebot/shoebot/blob/d554c1765c1899fa25727c9fc6805d221585562b/lib/web/yahoo.py#L256-L289
def sort(words, context="", strict=True, relative=True, service=YAHOO_SEARCH, wait=10, asynchronous=False, cached=False): """Performs a Yahoo sort on the given list. Sorts the items in the list according to the result count Yahoo yields on an item. Setting a context sorts the items according to their relation to this context; for example sorting [red, green, blue] by "love" yields red as the highest results, likely because red is the color commonly associated with love. """ results = [] for word in words: q = word + " " + context q.strip() if strict: q = "\""+q+"\"" r = YahooSearch(q, 1, 1, service, context, wait, asynchronous, cached) results.append(r) results.sort(YahooResults.__cmp__) results.reverse() if relative and len(results) > 0: sum = 0.000000000000000001 for r in results: sum += r.total for r in results: r.total /= float(sum) results = [(r.query, r.total) for r in results] return results
[ "def", "sort", "(", "words", ",", "context", "=", "\"\"", ",", "strict", "=", "True", ",", "relative", "=", "True", ",", "service", "=", "YAHOO_SEARCH", ",", "wait", "=", "10", ",", "asynchronous", "=", "False", ",", "cached", "=", "False", ")", ":", "results", "=", "[", "]", "for", "word", "in", "words", ":", "q", "=", "word", "+", "\" \"", "+", "context", "q", ".", "strip", "(", ")", "if", "strict", ":", "q", "=", "\"\\\"\"", "+", "q", "+", "\"\\\"\"", "r", "=", "YahooSearch", "(", "q", ",", "1", ",", "1", ",", "service", ",", "context", ",", "wait", ",", "asynchronous", ",", "cached", ")", "results", ".", "append", "(", "r", ")", "results", ".", "sort", "(", "YahooResults", ".", "__cmp__", ")", "results", ".", "reverse", "(", ")", "if", "relative", "and", "len", "(", "results", ")", ">", "0", ":", "sum", "=", "0.000000000000000001", "for", "r", "in", "results", ":", "sum", "+=", "r", ".", "total", "for", "r", "in", "results", ":", "r", ".", "total", "/=", "float", "(", "sum", ")", "results", "=", "[", "(", "r", ".", "query", ",", "r", ".", "total", ")", "for", "r", "in", "results", "]", "return", "results" ]
Performs a Yahoo sort on the given list. Sorts the items in the list according to the result count Yahoo yields on an item. Setting a context sorts the items according to their relation to this context; for example sorting [red, green, blue] by "love" yields red as the highest results, likely because red is the color commonly associated with love.
[ "Performs", "a", "Yahoo", "sort", "on", "the", "given", "list", ".", "Sorts", "the", "items", "in", "the", "list", "according", "to", "the", "result", "count", "Yahoo", "yields", "on", "an", "item", ".", "Setting", "a", "context", "sorts", "the", "items", "according", "to", "their", "relation", "to", "this", "context", ";", "for", "example", "sorting", "[", "red", "green", "blue", "]", "by", "love", "yields", "red", "as", "the", "highest", "results", "likely", "because", "red", "is", "the", "color", "commonly", "associated", "with", "love", "." ]
python
valid
calmjs/calmjs.parse
src/calmjs/parse/parsers/es5.py
https://github.com/calmjs/calmjs.parse/blob/369f0ee346c5a84c4d5c35a7733a0e63b02eac59/src/calmjs/parse/parsers/es5.py#L1223-L1229
def p_iteration_statement_4(self, p): """ iteration_statement \ : FOR LPAREN left_hand_side_expr IN expr RPAREN statement """ p[0] = self.asttypes.ForIn(item=p[3], iterable=p[5], statement=p[7]) p[0].setpos(p)
[ "def", "p_iteration_statement_4", "(", "self", ",", "p", ")", ":", "p", "[", "0", "]", "=", "self", ".", "asttypes", ".", "ForIn", "(", "item", "=", "p", "[", "3", "]", ",", "iterable", "=", "p", "[", "5", "]", ",", "statement", "=", "p", "[", "7", "]", ")", "p", "[", "0", "]", ".", "setpos", "(", "p", ")" ]
iteration_statement \ : FOR LPAREN left_hand_side_expr IN expr RPAREN statement
[ "iteration_statement", "\\", ":", "FOR", "LPAREN", "left_hand_side_expr", "IN", "expr", "RPAREN", "statement" ]
python
train
elastic/elasticsearch-dsl-py
elasticsearch_dsl/index.py
https://github.com/elastic/elasticsearch-dsl-py/blob/874b52472fc47b601de0e5fa0e4300e21aff0085/elasticsearch_dsl/index.py#L373-L380
def exists(self, using=None, **kwargs): """ Returns ``True`` if the index already exists in elasticsearch. Any additional keyword arguments will be passed to ``Elasticsearch.indices.exists`` unchanged. """ return self._get_connection(using).indices.exists(index=self._name, **kwargs)
[ "def", "exists", "(", "self", ",", "using", "=", "None", ",", "*", "*", "kwargs", ")", ":", "return", "self", ".", "_get_connection", "(", "using", ")", ".", "indices", ".", "exists", "(", "index", "=", "self", ".", "_name", ",", "*", "*", "kwargs", ")" ]
Returns ``True`` if the index already exists in elasticsearch. Any additional keyword arguments will be passed to ``Elasticsearch.indices.exists`` unchanged.
[ "Returns", "True", "if", "the", "index", "already", "exists", "in", "elasticsearch", "." ]
python
train
i3visio/osrframework
osrframework/thirdparties/pipl_com/lib/containers.py
https://github.com/i3visio/osrframework/blob/83437f4c14c9c08cb80a896bd9834c77f6567871/osrframework/thirdparties/pipl_com/lib/containers.py#L63-L67
def all_fields(self): """A list with all the fields contained in this object.""" return [field for container in FieldsContainer.class_container.values() for field in getattr(self, container)]
[ "def", "all_fields", "(", "self", ")", ":", "return", "[", "field", "for", "container", "in", "FieldsContainer", ".", "class_container", ".", "values", "(", ")", "for", "field", "in", "getattr", "(", "self", ",", "container", ")", "]" ]
A list with all the fields contained in this object.
[ "A", "list", "with", "all", "the", "fields", "contained", "in", "this", "object", "." ]
python
train
lambdamusic/Ontospy
ontospy/ontodocs/viz_factory.py
https://github.com/lambdamusic/Ontospy/blob/eb46cb13792b2b87f21babdf976996318eec7571/ontospy/ontodocs/viz_factory.py#L92-L105
def _buildTemplates(self): """ do all the things necessary to build the viz should be adapted to work for single-file viz, or multi-files etc. :param output_path: :return: """ # in this case we only have one contents = self._renderTemplate(self.template_name, extraContext=None) # the main url used for opening viz f = self.main_file_name main_url = self._save2File(contents, f, self.output_path) return main_url
[ "def", "_buildTemplates", "(", "self", ")", ":", "# in this case we only have one", "contents", "=", "self", ".", "_renderTemplate", "(", "self", ".", "template_name", ",", "extraContext", "=", "None", ")", "# the main url used for opening viz", "f", "=", "self", ".", "main_file_name", "main_url", "=", "self", ".", "_save2File", "(", "contents", ",", "f", ",", "self", ".", "output_path", ")", "return", "main_url" ]
do all the things necessary to build the viz should be adapted to work for single-file viz, or multi-files etc. :param output_path: :return:
[ "do", "all", "the", "things", "necessary", "to", "build", "the", "viz", "should", "be", "adapted", "to", "work", "for", "single", "-", "file", "viz", "or", "multi", "-", "files", "etc", "." ]
python
train
quantopian/serializable-traitlets
straitlets/serializable.py
https://github.com/quantopian/serializable-traitlets/blob/dd334366d1130825aea55d3dfecd6756973594e0/straitlets/serializable.py#L167-L175
def write_example_yaml(cls, dest, skip=()): """ Write a file containing an example yaml string for a Serializable subclass. """ # Make sure we can make an instance before we open a file. inst = cls.example_instance(skip=skip) with open(dest, 'w') as f: inst.to_yaml(stream=f, skip=skip)
[ "def", "write_example_yaml", "(", "cls", ",", "dest", ",", "skip", "=", "(", ")", ")", ":", "# Make sure we can make an instance before we open a file.", "inst", "=", "cls", ".", "example_instance", "(", "skip", "=", "skip", ")", "with", "open", "(", "dest", ",", "'w'", ")", "as", "f", ":", "inst", ".", "to_yaml", "(", "stream", "=", "f", ",", "skip", "=", "skip", ")" ]
Write a file containing an example yaml string for a Serializable subclass.
[ "Write", "a", "file", "containing", "an", "example", "yaml", "string", "for", "a", "Serializable", "subclass", "." ]
python
train
pytroll/satpy
satpy/readers/seviri_l1b_hrit.py
https://github.com/pytroll/satpy/blob/1f21d20ac686b745fb0da9b4030d139893e066dd/satpy/readers/seviri_l1b_hrit.py#L534-L578
def calibrate(self, data, calibration): """Calibrate the data.""" tic = datetime.now() channel_name = self.channel_name if calibration == 'counts': res = data elif calibration in ['radiance', 'reflectance', 'brightness_temperature']: # Choose calibration coefficients # a) Internal: Nominal or GSICS? band_idx = self.mda['spectral_channel_id'] - 1 if self.calib_mode != 'GSICS' or self.channel_name in VIS_CHANNELS: # you cant apply GSICS values to the VIS channels coefs = self.prologue["RadiometricProcessing"]["Level15ImageCalibration"] int_gain = coefs['CalSlope'][band_idx] int_offset = coefs['CalOffset'][band_idx] else: coefs = self.prologue["RadiometricProcessing"]['MPEFCalFeedback'] int_gain = coefs['GSICSCalCoeff'][band_idx] int_offset = coefs['GSICSOffsetCount'][band_idx] # b) Internal or external? External takes precedence. gain = self.ext_calib_coefs.get(self.channel_name, {}).get('gain', int_gain) offset = self.ext_calib_coefs.get(self.channel_name, {}).get('offset', int_offset) # Convert to radiance data = data.where(data > 0) res = self._convert_to_radiance(data.astype(np.float32), gain, offset) line_mask = self.mda['image_segment_line_quality']['line_validity'] >= 2 line_mask &= self.mda['image_segment_line_quality']['line_validity'] <= 3 line_mask &= self.mda['image_segment_line_quality']['line_radiometric_quality'] == 4 line_mask &= self.mda['image_segment_line_quality']['line_geometric_quality'] == 4 res *= np.choose(line_mask, [1, np.nan])[:, np.newaxis].astype(np.float32) if calibration == 'reflectance': solar_irradiance = CALIB[self.platform_id][channel_name]["F"] res = self._vis_calibrate(res, solar_irradiance) elif calibration == 'brightness_temperature': cal_type = self.prologue['ImageDescription'][ 'Level15ImageProduction']['PlannedChanProcessing'][self.mda['spectral_channel_id']] res = self._ir_calibrate(res, channel_name, cal_type) logger.debug("Calibration time " + str(datetime.now() - tic)) return res
[ "def", "calibrate", "(", "self", ",", "data", ",", "calibration", ")", ":", "tic", "=", "datetime", ".", "now", "(", ")", "channel_name", "=", "self", ".", "channel_name", "if", "calibration", "==", "'counts'", ":", "res", "=", "data", "elif", "calibration", "in", "[", "'radiance'", ",", "'reflectance'", ",", "'brightness_temperature'", "]", ":", "# Choose calibration coefficients", "# a) Internal: Nominal or GSICS?", "band_idx", "=", "self", ".", "mda", "[", "'spectral_channel_id'", "]", "-", "1", "if", "self", ".", "calib_mode", "!=", "'GSICS'", "or", "self", ".", "channel_name", "in", "VIS_CHANNELS", ":", "# you cant apply GSICS values to the VIS channels", "coefs", "=", "self", ".", "prologue", "[", "\"RadiometricProcessing\"", "]", "[", "\"Level15ImageCalibration\"", "]", "int_gain", "=", "coefs", "[", "'CalSlope'", "]", "[", "band_idx", "]", "int_offset", "=", "coefs", "[", "'CalOffset'", "]", "[", "band_idx", "]", "else", ":", "coefs", "=", "self", ".", "prologue", "[", "\"RadiometricProcessing\"", "]", "[", "'MPEFCalFeedback'", "]", "int_gain", "=", "coefs", "[", "'GSICSCalCoeff'", "]", "[", "band_idx", "]", "int_offset", "=", "coefs", "[", "'GSICSOffsetCount'", "]", "[", "band_idx", "]", "# b) Internal or external? External takes precedence.", "gain", "=", "self", ".", "ext_calib_coefs", ".", "get", "(", "self", ".", "channel_name", ",", "{", "}", ")", ".", "get", "(", "'gain'", ",", "int_gain", ")", "offset", "=", "self", ".", "ext_calib_coefs", ".", "get", "(", "self", ".", "channel_name", ",", "{", "}", ")", ".", "get", "(", "'offset'", ",", "int_offset", ")", "# Convert to radiance", "data", "=", "data", ".", "where", "(", "data", ">", "0", ")", "res", "=", "self", ".", "_convert_to_radiance", "(", "data", ".", "astype", "(", "np", ".", "float32", ")", ",", "gain", ",", "offset", ")", "line_mask", "=", "self", ".", "mda", "[", "'image_segment_line_quality'", "]", "[", "'line_validity'", "]", ">=", "2", "line_mask", "&=", "self", ".", "mda", "[", "'image_segment_line_quality'", "]", "[", "'line_validity'", "]", "<=", "3", "line_mask", "&=", "self", ".", "mda", "[", "'image_segment_line_quality'", "]", "[", "'line_radiometric_quality'", "]", "==", "4", "line_mask", "&=", "self", ".", "mda", "[", "'image_segment_line_quality'", "]", "[", "'line_geometric_quality'", "]", "==", "4", "res", "*=", "np", ".", "choose", "(", "line_mask", ",", "[", "1", ",", "np", ".", "nan", "]", ")", "[", ":", ",", "np", ".", "newaxis", "]", ".", "astype", "(", "np", ".", "float32", ")", "if", "calibration", "==", "'reflectance'", ":", "solar_irradiance", "=", "CALIB", "[", "self", ".", "platform_id", "]", "[", "channel_name", "]", "[", "\"F\"", "]", "res", "=", "self", ".", "_vis_calibrate", "(", "res", ",", "solar_irradiance", ")", "elif", "calibration", "==", "'brightness_temperature'", ":", "cal_type", "=", "self", ".", "prologue", "[", "'ImageDescription'", "]", "[", "'Level15ImageProduction'", "]", "[", "'PlannedChanProcessing'", "]", "[", "self", ".", "mda", "[", "'spectral_channel_id'", "]", "]", "res", "=", "self", ".", "_ir_calibrate", "(", "res", ",", "channel_name", ",", "cal_type", ")", "logger", ".", "debug", "(", "\"Calibration time \"", "+", "str", "(", "datetime", ".", "now", "(", ")", "-", "tic", ")", ")", "return", "res" ]
Calibrate the data.
[ "Calibrate", "the", "data", "." ]
python
train
bcbio/bcbio-nextgen
bcbio/structural/cnvkit.py
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/cnvkit.py#L228-L235
def _get_general_coverage(data, itype): """Retrieve coverage information from new shared SV bins. """ work_bam = dd.get_align_bam(data) or dd.get_work_bam(data) return [{"bam": work_bam, "file": tz.get_in(["depth", "bins", "target"], data), "cnntype": "target", "itype": itype, "sample": dd.get_sample_name(data)}, {"bam": work_bam, "file": tz.get_in(["depth", "bins", "antitarget"], data), "cnntype": "antitarget", "itype": itype, "sample": dd.get_sample_name(data)}]
[ "def", "_get_general_coverage", "(", "data", ",", "itype", ")", ":", "work_bam", "=", "dd", ".", "get_align_bam", "(", "data", ")", "or", "dd", ".", "get_work_bam", "(", "data", ")", "return", "[", "{", "\"bam\"", ":", "work_bam", ",", "\"file\"", ":", "tz", ".", "get_in", "(", "[", "\"depth\"", ",", "\"bins\"", ",", "\"target\"", "]", ",", "data", ")", ",", "\"cnntype\"", ":", "\"target\"", ",", "\"itype\"", ":", "itype", ",", "\"sample\"", ":", "dd", ".", "get_sample_name", "(", "data", ")", "}", ",", "{", "\"bam\"", ":", "work_bam", ",", "\"file\"", ":", "tz", ".", "get_in", "(", "[", "\"depth\"", ",", "\"bins\"", ",", "\"antitarget\"", "]", ",", "data", ")", ",", "\"cnntype\"", ":", "\"antitarget\"", ",", "\"itype\"", ":", "itype", ",", "\"sample\"", ":", "dd", ".", "get_sample_name", "(", "data", ")", "}", "]" ]
Retrieve coverage information from new shared SV bins.
[ "Retrieve", "coverage", "information", "from", "new", "shared", "SV", "bins", "." ]
python
train
blakev/python-syncthing
syncthing/__init__.py
https://github.com/blakev/python-syncthing/blob/a7f4930f86f7543cd96990277945467896fb523d/syncthing/__init__.py#L714-L735
def scan(self, folder, sub=None, next_=None): """ Request immediate rescan of a folder, or a specific path within a folder. Args: folder (str): Folder ID. sub (str): Path relative to the folder root. If sub is omitted the entire folder is scanned for changes, otherwise only the given path children are scanned. next_ (int): Delays Syncthing's automated rescan interval for a given amount of seconds. Returns: str """ if not sub: sub = '' assert isinstance(sub, string_types) assert isinstance(next_, int) or next_ is None return self.post('scan', params={'folder': folder, 'sub': sub, 'next': next_})
[ "def", "scan", "(", "self", ",", "folder", ",", "sub", "=", "None", ",", "next_", "=", "None", ")", ":", "if", "not", "sub", ":", "sub", "=", "''", "assert", "isinstance", "(", "sub", ",", "string_types", ")", "assert", "isinstance", "(", "next_", ",", "int", ")", "or", "next_", "is", "None", "return", "self", ".", "post", "(", "'scan'", ",", "params", "=", "{", "'folder'", ":", "folder", ",", "'sub'", ":", "sub", ",", "'next'", ":", "next_", "}", ")" ]
Request immediate rescan of a folder, or a specific path within a folder. Args: folder (str): Folder ID. sub (str): Path relative to the folder root. If sub is omitted the entire folder is scanned for changes, otherwise only the given path children are scanned. next_ (int): Delays Syncthing's automated rescan interval for a given amount of seconds. Returns: str
[ "Request", "immediate", "rescan", "of", "a", "folder", "or", "a", "specific", "path", "within", "a", "folder", "." ]
python
train
Erotemic/utool
utool/util_hash.py
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_hash.py#L438-L498
def hash_data(data, hashlen=None, alphabet=None): r""" Get a unique hash depending on the state of the data. Args: data (object): any sort of loosely organized data hashlen (None): (default = None) alphabet (None): (default = None) Returns: str: text - hash string CommandLine: python -m utool.util_hash hash_data Example: >>> # ENABLE_DOCTEST >>> from utool.util_hash import * # NOQA >>> import utool as ut >>> counter = [0] >>> failed = [] >>> def check_hash(input_, want=None): >>> count = counter[0] = counter[0] + 1 >>> got = ut.hash_data(input_) >>> print('({}) {}'.format(count, got)) >>> if want is not None and not got.startswith(want): >>> failed.append((got, input_, count, want)) >>> check_hash('1', 'wuvrng') >>> check_hash(['1'], 'dekbfpby') >>> check_hash(tuple(['1']), 'dekbfpby') >>> check_hash(b'12', 'marreflbv') >>> check_hash([b'1', b'2'], 'nwfs') >>> check_hash(['1', '2', '3'], 'arfrp') >>> check_hash(['1', np.array([1,2,3]), '3'], 'uyqwcq') >>> check_hash('123', 'ehkgxk') >>> check_hash(zip([1, 2, 3], [4, 5, 6]), 'mjcpwa') >>> import numpy as np >>> rng = np.random.RandomState(0) >>> check_hash(rng.rand(100000), 'bdwosuey') >>> for got, input_, count, want in failed: >>> print('failed {} on {}'.format(count, input_)) >>> print('got={}, want={}'.format(got, want)) >>> assert not failed """ if alphabet is None: alphabet = ALPHABET_27 if hashlen is None: hashlen = HASH_LEN2 if isinstance(data, stringlike) and len(data) == 0: # Make a special hash for empty data text = (alphabet[0] * hashlen) else: hasher = hashlib.sha512() _update_hasher(hasher, data) # Get a 128 character hex string text = hasher.hexdigest() # Shorten length of string (by increasing base) hashstr2 = convert_hexstr_to_bigbase(text, alphabet, bigbase=len(alphabet)) # Truncate text = hashstr2[:hashlen] return text
[ "def", "hash_data", "(", "data", ",", "hashlen", "=", "None", ",", "alphabet", "=", "None", ")", ":", "if", "alphabet", "is", "None", ":", "alphabet", "=", "ALPHABET_27", "if", "hashlen", "is", "None", ":", "hashlen", "=", "HASH_LEN2", "if", "isinstance", "(", "data", ",", "stringlike", ")", "and", "len", "(", "data", ")", "==", "0", ":", "# Make a special hash for empty data", "text", "=", "(", "alphabet", "[", "0", "]", "*", "hashlen", ")", "else", ":", "hasher", "=", "hashlib", ".", "sha512", "(", ")", "_update_hasher", "(", "hasher", ",", "data", ")", "# Get a 128 character hex string", "text", "=", "hasher", ".", "hexdigest", "(", ")", "# Shorten length of string (by increasing base)", "hashstr2", "=", "convert_hexstr_to_bigbase", "(", "text", ",", "alphabet", ",", "bigbase", "=", "len", "(", "alphabet", ")", ")", "# Truncate", "text", "=", "hashstr2", "[", ":", "hashlen", "]", "return", "text" ]
r""" Get a unique hash depending on the state of the data. Args: data (object): any sort of loosely organized data hashlen (None): (default = None) alphabet (None): (default = None) Returns: str: text - hash string CommandLine: python -m utool.util_hash hash_data Example: >>> # ENABLE_DOCTEST >>> from utool.util_hash import * # NOQA >>> import utool as ut >>> counter = [0] >>> failed = [] >>> def check_hash(input_, want=None): >>> count = counter[0] = counter[0] + 1 >>> got = ut.hash_data(input_) >>> print('({}) {}'.format(count, got)) >>> if want is not None and not got.startswith(want): >>> failed.append((got, input_, count, want)) >>> check_hash('1', 'wuvrng') >>> check_hash(['1'], 'dekbfpby') >>> check_hash(tuple(['1']), 'dekbfpby') >>> check_hash(b'12', 'marreflbv') >>> check_hash([b'1', b'2'], 'nwfs') >>> check_hash(['1', '2', '3'], 'arfrp') >>> check_hash(['1', np.array([1,2,3]), '3'], 'uyqwcq') >>> check_hash('123', 'ehkgxk') >>> check_hash(zip([1, 2, 3], [4, 5, 6]), 'mjcpwa') >>> import numpy as np >>> rng = np.random.RandomState(0) >>> check_hash(rng.rand(100000), 'bdwosuey') >>> for got, input_, count, want in failed: >>> print('failed {} on {}'.format(count, input_)) >>> print('got={}, want={}'.format(got, want)) >>> assert not failed
[ "r", "Get", "a", "unique", "hash", "depending", "on", "the", "state", "of", "the", "data", "." ]
python
train
rmed/pyemtmad
pyemtmad/api/parking.py
https://github.com/rmed/pyemtmad/blob/c21c42d0c7b50035dfed29540d7e64ab67833728/pyemtmad/api/parking.py#L230-L254
def list_features(self, **kwargs): """Obtain a list of parkings. Args: lang (str): Language code (*es* or *en*). Returns: Status boolean and parsed response (list[Parking]), or message string in case of error. """ # Endpoint parameters params = { 'language': util.language_code(kwargs.get('lang')), 'publicData': True } # Request result = self.make_request('list_features', {}, **params) if not util.check_result(result): return False, result.get('message', 'UNKNOWN ERROR') # Parse values = util.response_list(result, 'Data') return True, [emtype.ParkingFeature(**a) for a in values]
[ "def", "list_features", "(", "self", ",", "*", "*", "kwargs", ")", ":", "# Endpoint parameters", "params", "=", "{", "'language'", ":", "util", ".", "language_code", "(", "kwargs", ".", "get", "(", "'lang'", ")", ")", ",", "'publicData'", ":", "True", "}", "# Request", "result", "=", "self", ".", "make_request", "(", "'list_features'", ",", "{", "}", ",", "*", "*", "params", ")", "if", "not", "util", ".", "check_result", "(", "result", ")", ":", "return", "False", ",", "result", ".", "get", "(", "'message'", ",", "'UNKNOWN ERROR'", ")", "# Parse", "values", "=", "util", ".", "response_list", "(", "result", ",", "'Data'", ")", "return", "True", ",", "[", "emtype", ".", "ParkingFeature", "(", "*", "*", "a", ")", "for", "a", "in", "values", "]" ]
Obtain a list of parkings. Args: lang (str): Language code (*es* or *en*). Returns: Status boolean and parsed response (list[Parking]), or message string in case of error.
[ "Obtain", "a", "list", "of", "parkings", "." ]
python
train
Cornices/cornice.ext.swagger
cornice_swagger/views.py
https://github.com/Cornices/cornice.ext.swagger/blob/c31a5cc8d5dd112b11dc41ccb6d09b423b537abc/cornice_swagger/views.py#L61-L76
def swagger_ui_script_template(request, **kwargs): """ :param request: :return: Generates the <script> code that bootstraps Swagger UI, it will be injected into index template """ swagger_spec_url = request.route_url('cornice_swagger.open_api_path') template = pkg_resources.resource_string( 'cornice_swagger', 'templates/index_script_template.html' ).decode('utf8') return Template(template).safe_substitute( swagger_spec_url=swagger_spec_url, )
[ "def", "swagger_ui_script_template", "(", "request", ",", "*", "*", "kwargs", ")", ":", "swagger_spec_url", "=", "request", ".", "route_url", "(", "'cornice_swagger.open_api_path'", ")", "template", "=", "pkg_resources", ".", "resource_string", "(", "'cornice_swagger'", ",", "'templates/index_script_template.html'", ")", ".", "decode", "(", "'utf8'", ")", "return", "Template", "(", "template", ")", ".", "safe_substitute", "(", "swagger_spec_url", "=", "swagger_spec_url", ",", ")" ]
:param request: :return: Generates the <script> code that bootstraps Swagger UI, it will be injected into index template
[ ":", "param", "request", ":", ":", "return", ":" ]
python
valid
gem/oq-engine
openquake/hazardlib/gsim/cauzzi_faccioli_2008.py
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/hazardlib/gsim/cauzzi_faccioli_2008.py#L153-L175
def _compute_mean(self, C, mag, dists, vs30, rake, imt): """ Compute mean value for PGV, PGA and Displacement responce spectrum, as given in equation 2, page 462 with the addition of the faulting style term as given in equation 5, page 465. Converts also displacement responce spectrum values to SA. """ mean = (self._compute_term_1_2(C, mag) + self._compute_term_3(C, dists.rhypo) + self._compute_site_term(C, vs30) + self._compute_faulting_style_term(C, rake)) # convert from cm/s**2 to g for SA and from m/s**2 to g for PGA (PGV # is already in cm/s) and also convert from base 10 to base e. if imt.name == "PGA": mean = np.log((10 ** mean) / g) elif imt.name == "SA": mean = np.log((10 ** mean) * ((2 * np.pi / imt.period) ** 2) * 1e-2 / g) else: mean = np.log(10 ** mean) return mean
[ "def", "_compute_mean", "(", "self", ",", "C", ",", "mag", ",", "dists", ",", "vs30", ",", "rake", ",", "imt", ")", ":", "mean", "=", "(", "self", ".", "_compute_term_1_2", "(", "C", ",", "mag", ")", "+", "self", ".", "_compute_term_3", "(", "C", ",", "dists", ".", "rhypo", ")", "+", "self", ".", "_compute_site_term", "(", "C", ",", "vs30", ")", "+", "self", ".", "_compute_faulting_style_term", "(", "C", ",", "rake", ")", ")", "# convert from cm/s**2 to g for SA and from m/s**2 to g for PGA (PGV", "# is already in cm/s) and also convert from base 10 to base e.", "if", "imt", ".", "name", "==", "\"PGA\"", ":", "mean", "=", "np", ".", "log", "(", "(", "10", "**", "mean", ")", "/", "g", ")", "elif", "imt", ".", "name", "==", "\"SA\"", ":", "mean", "=", "np", ".", "log", "(", "(", "10", "**", "mean", ")", "*", "(", "(", "2", "*", "np", ".", "pi", "/", "imt", ".", "period", ")", "**", "2", ")", "*", "1e-2", "/", "g", ")", "else", ":", "mean", "=", "np", ".", "log", "(", "10", "**", "mean", ")", "return", "mean" ]
Compute mean value for PGV, PGA and Displacement responce spectrum, as given in equation 2, page 462 with the addition of the faulting style term as given in equation 5, page 465. Converts also displacement responce spectrum values to SA.
[ "Compute", "mean", "value", "for", "PGV", "PGA", "and", "Displacement", "responce", "spectrum", "as", "given", "in", "equation", "2", "page", "462", "with", "the", "addition", "of", "the", "faulting", "style", "term", "as", "given", "in", "equation", "5", "page", "465", ".", "Converts", "also", "displacement", "responce", "spectrum", "values", "to", "SA", "." ]
python
train
krukas/Trionyx
trionyx/trionyx/views/core.py
https://github.com/krukas/Trionyx/blob/edac132cc0797190153f2e60bc7e88cb50e80da6/trionyx/trionyx/views/core.py#L68-L72
def get_model_config(self): """Get Trionyx model config""" if not hasattr(self, '__config'): setattr(self, '__config', models_config.get_config(self.get_model_class())) return getattr(self, '__config', None)
[ "def", "get_model_config", "(", "self", ")", ":", "if", "not", "hasattr", "(", "self", ",", "'__config'", ")", ":", "setattr", "(", "self", ",", "'__config'", ",", "models_config", ".", "get_config", "(", "self", ".", "get_model_class", "(", ")", ")", ")", "return", "getattr", "(", "self", ",", "'__config'", ",", "None", ")" ]
Get Trionyx model config
[ "Get", "Trionyx", "model", "config" ]
python
train
airspeed-velocity/asv
asv/extern/asizeof.py
https://github.com/airspeed-velocity/asv/blob/d23bb8b74e8adacbfa3cf5724bda55fb39d56ba6/asv/extern/asizeof.py#L1638-L1658
def _sizes(self, objs, sized=None): '''Return the size or an **Asized** instance for each given object and the total size. The total includes the size of duplicates only once. ''' self.exclude_refs(*objs) # skip refs to objs s, t = {}, [] for o in objs: i = id(o) if i in s: # duplicate self._seen[i] += 1 self._duplicate += 1 else: s[i] = self._sizer(o, 0, sized) t.append(s[i]) if sized: s = _sum([i.size for i in _values(s)]) # [] for Python 2.2 else: s = _sum(_values(s)) self._total += s # accumulate return s, tuple(t)
[ "def", "_sizes", "(", "self", ",", "objs", ",", "sized", "=", "None", ")", ":", "self", ".", "exclude_refs", "(", "*", "objs", ")", "# skip refs to objs", "s", ",", "t", "=", "{", "}", ",", "[", "]", "for", "o", "in", "objs", ":", "i", "=", "id", "(", "o", ")", "if", "i", "in", "s", ":", "# duplicate", "self", ".", "_seen", "[", "i", "]", "+=", "1", "self", ".", "_duplicate", "+=", "1", "else", ":", "s", "[", "i", "]", "=", "self", ".", "_sizer", "(", "o", ",", "0", ",", "sized", ")", "t", ".", "append", "(", "s", "[", "i", "]", ")", "if", "sized", ":", "s", "=", "_sum", "(", "[", "i", ".", "size", "for", "i", "in", "_values", "(", "s", ")", "]", ")", "# [] for Python 2.2", "else", ":", "s", "=", "_sum", "(", "_values", "(", "s", ")", ")", "self", ".", "_total", "+=", "s", "# accumulate", "return", "s", ",", "tuple", "(", "t", ")" ]
Return the size or an **Asized** instance for each given object and the total size. The total includes the size of duplicates only once.
[ "Return", "the", "size", "or", "an", "**", "Asized", "**", "instance", "for", "each", "given", "object", "and", "the", "total", "size", ".", "The", "total", "includes", "the", "size", "of", "duplicates", "only", "once", "." ]
python
train
saltstack/salt
salt/modules/saltutil.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/saltutil.py#L362-L392
def refresh_grains(**kwargs): ''' .. versionadded:: 2016.3.6,2016.11.4,2017.7.0 Refresh the minion's grains without syncing custom grains modules from ``salt://_grains``. .. note:: The available execution modules will be reloaded as part of this proceess, as grains can affect which modules are available. refresh_pillar : True Set to ``False`` to keep pillar data from being refreshed. CLI Examples: .. code-block:: bash salt '*' saltutil.refresh_grains ''' kwargs = salt.utils.args.clean_kwargs(**kwargs) _refresh_pillar = kwargs.pop('refresh_pillar', True) if kwargs: salt.utils.args.invalid_kwargs(kwargs) # Modules and pillar need to be refreshed in case grains changes affected # them, and the module refresh process reloads the grains and assigns the # newly-reloaded grains to each execution module's __grains__ dunder. refresh_modules() if _refresh_pillar: refresh_pillar() return True
[ "def", "refresh_grains", "(", "*", "*", "kwargs", ")", ":", "kwargs", "=", "salt", ".", "utils", ".", "args", ".", "clean_kwargs", "(", "*", "*", "kwargs", ")", "_refresh_pillar", "=", "kwargs", ".", "pop", "(", "'refresh_pillar'", ",", "True", ")", "if", "kwargs", ":", "salt", ".", "utils", ".", "args", ".", "invalid_kwargs", "(", "kwargs", ")", "# Modules and pillar need to be refreshed in case grains changes affected", "# them, and the module refresh process reloads the grains and assigns the", "# newly-reloaded grains to each execution module's __grains__ dunder.", "refresh_modules", "(", ")", "if", "_refresh_pillar", ":", "refresh_pillar", "(", ")", "return", "True" ]
.. versionadded:: 2016.3.6,2016.11.4,2017.7.0 Refresh the minion's grains without syncing custom grains modules from ``salt://_grains``. .. note:: The available execution modules will be reloaded as part of this proceess, as grains can affect which modules are available. refresh_pillar : True Set to ``False`` to keep pillar data from being refreshed. CLI Examples: .. code-block:: bash salt '*' saltutil.refresh_grains
[ "..", "versionadded", "::", "2016", ".", "3", ".", "6", "2016", ".", "11", ".", "4", "2017", ".", "7", ".", "0" ]
python
train
log2timeline/dftimewolf
dftimewolf/lib/containers/interface.py
https://github.com/log2timeline/dftimewolf/blob/45f898476a288d73c4256ae8e3836a2a4848c0d7/dftimewolf/lib/containers/interface.py#L45-L54
def copy_from_dict(self, attributes): """Copies the attribute container from a dictionary. Args: attributes (dict[str, object]): attribute values per name. """ for attribute_name, attribute_value in attributes.items(): # Not using startswith to improve performance. if attribute_name[0] == '_': continue setattr(self, attribute_name, attribute_value)
[ "def", "copy_from_dict", "(", "self", ",", "attributes", ")", ":", "for", "attribute_name", ",", "attribute_value", "in", "attributes", ".", "items", "(", ")", ":", "# Not using startswith to improve performance.", "if", "attribute_name", "[", "0", "]", "==", "'_'", ":", "continue", "setattr", "(", "self", ",", "attribute_name", ",", "attribute_value", ")" ]
Copies the attribute container from a dictionary. Args: attributes (dict[str, object]): attribute values per name.
[ "Copies", "the", "attribute", "container", "from", "a", "dictionary", ".", "Args", ":", "attributes", "(", "dict", "[", "str", "object", "]", ")", ":", "attribute", "values", "per", "name", "." ]
python
train
bunq/sdk_python
bunq/sdk/model/generated/endpoint.py
https://github.com/bunq/sdk_python/blob/da6c9b83e6d83ee8062617f53c6eb7293c0d863d/bunq/sdk/model/generated/endpoint.py#L7569-L7660
def create(cls, cash_register_id, description, status, amount_total, monetary_account_id=None, allow_amount_higher=None, allow_amount_lower=None, want_tip=None, minimum_age=None, require_address=None, redirect_url=None, visibility=None, expiration=None, tab_attachment=None, custom_headers=None): """ Create a TabUsageMultiple. On creation the status must be set to OPEN :type user_id: int :type monetary_account_id: int :type cash_register_id: int :param description: The description of the TabUsageMultiple. Maximum 9000 characters. Field is required but can be an empty string. :type description: str :param status: The status of the TabUsageMultiple. On creation the status must be set to OPEN. You can change the status from OPEN to PAYABLE. If the TabUsageMultiple gets paid the status will remain PAYABLE. :type status: str :param amount_total: The total amount of the Tab. Must be a positive amount. As long as the tab has the status OPEN you can change the total amount. This amount is not affected by the amounts of the TabItems. However, if you've created any TabItems for a Tab the sum of the amounts of these items must be equal to the total_amount of the Tab when you change its status to PAYABLE :type amount_total: object_.Amount :param allow_amount_higher: [DEPRECATED] Whether or not a higher amount can be paid. :type allow_amount_higher: bool :param allow_amount_lower: [DEPRECATED] Whether or not a lower amount can be paid. :type allow_amount_lower: bool :param want_tip: [DEPRECATED] Whether or not the user paying the Tab should be asked if he wants to give a tip. When want_tip is set to true, allow_amount_higher must also be set to true and allow_amount_lower must be false. :type want_tip: bool :param minimum_age: The minimum age of the user paying the Tab. :type minimum_age: int :param require_address: Whether a billing and shipping address must be provided when paying the Tab. Possible values are: BILLING, SHIPPING, BILLING_SHIPPING, NONE, OPTIONAL. Default is NONE. :type require_address: str :param redirect_url: The URL which the user is sent to after paying the Tab. :type redirect_url: str :param visibility: The visibility of a Tab. A Tab can be visible trough NearPay, the QR code of the CashRegister and its own QR code. :type visibility: object_.TabVisibility :param expiration: The moment when this Tab expires. Can be at most 365 days into the future. :type expiration: str :param tab_attachment: An array of attachments that describe the tab. Uploaded through the POST /user/{userid}/attachment-tab endpoint. :type tab_attachment: list[object_.BunqId] :type custom_headers: dict[str, str]|None :rtype: BunqResponseStr """ if custom_headers is None: custom_headers = {} request_map = { cls.FIELD_DESCRIPTION: description, cls.FIELD_STATUS: status, cls.FIELD_AMOUNT_TOTAL: amount_total, cls.FIELD_ALLOW_AMOUNT_HIGHER: allow_amount_higher, cls.FIELD_ALLOW_AMOUNT_LOWER: allow_amount_lower, cls.FIELD_WANT_TIP: want_tip, cls.FIELD_MINIMUM_AGE: minimum_age, cls.FIELD_REQUIRE_ADDRESS: require_address, cls.FIELD_REDIRECT_URL: redirect_url, cls.FIELD_VISIBILITY: visibility, cls.FIELD_EXPIRATION: expiration, cls.FIELD_TAB_ATTACHMENT: tab_attachment } request_map_string = converter.class_to_json(request_map) request_map_string = cls._remove_field_for_request(request_map_string) api_client = client.ApiClient(cls._get_api_context()) request_bytes = request_map_string.encode() endpoint_url = cls._ENDPOINT_URL_CREATE.format(cls._determine_user_id(), cls._determine_monetary_account_id( monetary_account_id), cash_register_id) response_raw = api_client.post(endpoint_url, request_bytes, custom_headers) return BunqResponseStr.cast_from_bunq_response( cls._process_for_uuid(response_raw) )
[ "def", "create", "(", "cls", ",", "cash_register_id", ",", "description", ",", "status", ",", "amount_total", ",", "monetary_account_id", "=", "None", ",", "allow_amount_higher", "=", "None", ",", "allow_amount_lower", "=", "None", ",", "want_tip", "=", "None", ",", "minimum_age", "=", "None", ",", "require_address", "=", "None", ",", "redirect_url", "=", "None", ",", "visibility", "=", "None", ",", "expiration", "=", "None", ",", "tab_attachment", "=", "None", ",", "custom_headers", "=", "None", ")", ":", "if", "custom_headers", "is", "None", ":", "custom_headers", "=", "{", "}", "request_map", "=", "{", "cls", ".", "FIELD_DESCRIPTION", ":", "description", ",", "cls", ".", "FIELD_STATUS", ":", "status", ",", "cls", ".", "FIELD_AMOUNT_TOTAL", ":", "amount_total", ",", "cls", ".", "FIELD_ALLOW_AMOUNT_HIGHER", ":", "allow_amount_higher", ",", "cls", ".", "FIELD_ALLOW_AMOUNT_LOWER", ":", "allow_amount_lower", ",", "cls", ".", "FIELD_WANT_TIP", ":", "want_tip", ",", "cls", ".", "FIELD_MINIMUM_AGE", ":", "minimum_age", ",", "cls", ".", "FIELD_REQUIRE_ADDRESS", ":", "require_address", ",", "cls", ".", "FIELD_REDIRECT_URL", ":", "redirect_url", ",", "cls", ".", "FIELD_VISIBILITY", ":", "visibility", ",", "cls", ".", "FIELD_EXPIRATION", ":", "expiration", ",", "cls", ".", "FIELD_TAB_ATTACHMENT", ":", "tab_attachment", "}", "request_map_string", "=", "converter", ".", "class_to_json", "(", "request_map", ")", "request_map_string", "=", "cls", ".", "_remove_field_for_request", "(", "request_map_string", ")", "api_client", "=", "client", ".", "ApiClient", "(", "cls", ".", "_get_api_context", "(", ")", ")", "request_bytes", "=", "request_map_string", ".", "encode", "(", ")", "endpoint_url", "=", "cls", ".", "_ENDPOINT_URL_CREATE", ".", "format", "(", "cls", ".", "_determine_user_id", "(", ")", ",", "cls", ".", "_determine_monetary_account_id", "(", "monetary_account_id", ")", ",", "cash_register_id", ")", "response_raw", "=", "api_client", ".", "post", "(", "endpoint_url", ",", "request_bytes", ",", "custom_headers", ")", "return", "BunqResponseStr", ".", "cast_from_bunq_response", "(", "cls", ".", "_process_for_uuid", "(", "response_raw", ")", ")" ]
Create a TabUsageMultiple. On creation the status must be set to OPEN :type user_id: int :type monetary_account_id: int :type cash_register_id: int :param description: The description of the TabUsageMultiple. Maximum 9000 characters. Field is required but can be an empty string. :type description: str :param status: The status of the TabUsageMultiple. On creation the status must be set to OPEN. You can change the status from OPEN to PAYABLE. If the TabUsageMultiple gets paid the status will remain PAYABLE. :type status: str :param amount_total: The total amount of the Tab. Must be a positive amount. As long as the tab has the status OPEN you can change the total amount. This amount is not affected by the amounts of the TabItems. However, if you've created any TabItems for a Tab the sum of the amounts of these items must be equal to the total_amount of the Tab when you change its status to PAYABLE :type amount_total: object_.Amount :param allow_amount_higher: [DEPRECATED] Whether or not a higher amount can be paid. :type allow_amount_higher: bool :param allow_amount_lower: [DEPRECATED] Whether or not a lower amount can be paid. :type allow_amount_lower: bool :param want_tip: [DEPRECATED] Whether or not the user paying the Tab should be asked if he wants to give a tip. When want_tip is set to true, allow_amount_higher must also be set to true and allow_amount_lower must be false. :type want_tip: bool :param minimum_age: The minimum age of the user paying the Tab. :type minimum_age: int :param require_address: Whether a billing and shipping address must be provided when paying the Tab. Possible values are: BILLING, SHIPPING, BILLING_SHIPPING, NONE, OPTIONAL. Default is NONE. :type require_address: str :param redirect_url: The URL which the user is sent to after paying the Tab. :type redirect_url: str :param visibility: The visibility of a Tab. A Tab can be visible trough NearPay, the QR code of the CashRegister and its own QR code. :type visibility: object_.TabVisibility :param expiration: The moment when this Tab expires. Can be at most 365 days into the future. :type expiration: str :param tab_attachment: An array of attachments that describe the tab. Uploaded through the POST /user/{userid}/attachment-tab endpoint. :type tab_attachment: list[object_.BunqId] :type custom_headers: dict[str, str]|None :rtype: BunqResponseStr
[ "Create", "a", "TabUsageMultiple", ".", "On", "creation", "the", "status", "must", "be", "set", "to", "OPEN" ]
python
train