repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
sequence
docstring
stringlengths
1
46.9k
docstring_tokens
sequence
language
stringclasses
1 value
partition
stringclasses
3 values
tnkteja/myhelp
virtualEnvironment/lib/python2.7/site-packages/coverage/data.py
https://github.com/tnkteja/myhelp/blob/fb3a4809d448ad14d5b2e6ddf2e7e89ad52b71cb/virtualEnvironment/lib/python2.7/site-packages/coverage/data.py#L213-L220
def add_arc_data(self, arc_data): """Add measured arc data. `arc_data` is { filename: { (l1,l2): None, ... }, ...} """ for filename, arcs in iitems(arc_data): self.arcs.setdefault(filename, {}).update(arcs)
[ "def", "add_arc_data", "(", "self", ",", "arc_data", ")", ":", "for", "filename", ",", "arcs", "in", "iitems", "(", "arc_data", ")", ":", "self", ".", "arcs", ".", "setdefault", "(", "filename", ",", "{", "}", ")", ".", "update", "(", "arcs", ")" ]
Add measured arc data. `arc_data` is { filename: { (l1,l2): None, ... }, ...}
[ "Add", "measured", "arc", "data", "." ]
python
test
PMBio/limix-backup
limix/varDecomp/varianceDecomposition.py
https://github.com/PMBio/limix-backup/blob/1e201fdb5c694d0d5506f207f3de65d8ef66146c/limix/varDecomp/varianceDecomposition.py#L900-L915
def _getScalesRand(self): """ Internal function for parameter initialization Return a vector of random scales """ if self.P>1: scales = [] for term_i in range(self.n_randEffs): _scales = sp.randn(self.diag[term_i].shape[0]) if self.jitter[term_i]>0: _scales = sp.concatenate((_scales,sp.array([sp.sqrt(self.jitter[term_i])]))) scales.append(_scales) scales = sp.concatenate(scales) else: scales=sp.randn(self.vd.getNumberScales()) return scales
[ "def", "_getScalesRand", "(", "self", ")", ":", "if", "self", ".", "P", ">", "1", ":", "scales", "=", "[", "]", "for", "term_i", "in", "range", "(", "self", ".", "n_randEffs", ")", ":", "_scales", "=", "sp", ".", "randn", "(", "self", ".", "diag", "[", "term_i", "]", ".", "shape", "[", "0", "]", ")", "if", "self", ".", "jitter", "[", "term_i", "]", ">", "0", ":", "_scales", "=", "sp", ".", "concatenate", "(", "(", "_scales", ",", "sp", ".", "array", "(", "[", "sp", ".", "sqrt", "(", "self", ".", "jitter", "[", "term_i", "]", ")", "]", ")", ")", ")", "scales", ".", "append", "(", "_scales", ")", "scales", "=", "sp", ".", "concatenate", "(", "scales", ")", "else", ":", "scales", "=", "sp", ".", "randn", "(", "self", ".", "vd", ".", "getNumberScales", "(", ")", ")", "return", "scales" ]
Internal function for parameter initialization Return a vector of random scales
[ "Internal", "function", "for", "parameter", "initialization", "Return", "a", "vector", "of", "random", "scales" ]
python
train
mathiasertl/django-ca
ca/django_ca/models.py
https://github.com/mathiasertl/django-ca/blob/976d7ea05276320f20daed2a6d59c8f5660fe976/ca/django_ca/models.py#L306-L308
def subject(self): """The certificates subject as :py:class:`~django_ca.subject.Subject`.""" return Subject([(s.oid, s.value) for s in self.x509.subject])
[ "def", "subject", "(", "self", ")", ":", "return", "Subject", "(", "[", "(", "s", ".", "oid", ",", "s", ".", "value", ")", "for", "s", "in", "self", ".", "x509", ".", "subject", "]", ")" ]
The certificates subject as :py:class:`~django_ca.subject.Subject`.
[ "The", "certificates", "subject", "as", ":", "py", ":", "class", ":", "~django_ca", ".", "subject", ".", "Subject", "." ]
python
train
ThreatConnect-Inc/tcex
tcex/tcex_resources.py
https://github.com/ThreatConnect-Inc/tcex/blob/dd4d7a1ef723af1561687120191886b9a2fd4b47/tcex/tcex_resources.py#L524-L547
def copy(self): """Return a "clean" copy of this instance. Return: (instance): A clean copy of this instance. """ resource = copy.copy(self) # workaround for bytes/str issue in Py3 with copy of instance # TypeError: a bytes-like object is required, not 'str' (ssl.py) resource._request = self.tcex.request(self.tcex.session) # reset properties of resource resource.copy_reset() # Preserve settings resource.http_method = self.http_method if self._request.payload.get('owner') is not None: resource.owner = self._request.payload.get('owner') # future bcs - these should not need to be reset. correct? # resource._request_entity = self._api_entity # resource._request_uri = self._api_uri return resource
[ "def", "copy", "(", "self", ")", ":", "resource", "=", "copy", ".", "copy", "(", "self", ")", "# workaround for bytes/str issue in Py3 with copy of instance", "# TypeError: a bytes-like object is required, not 'str' (ssl.py)", "resource", ".", "_request", "=", "self", ".", "tcex", ".", "request", "(", "self", ".", "tcex", ".", "session", ")", "# reset properties of resource", "resource", ".", "copy_reset", "(", ")", "# Preserve settings", "resource", ".", "http_method", "=", "self", ".", "http_method", "if", "self", ".", "_request", ".", "payload", ".", "get", "(", "'owner'", ")", "is", "not", "None", ":", "resource", ".", "owner", "=", "self", ".", "_request", ".", "payload", ".", "get", "(", "'owner'", ")", "# future bcs - these should not need to be reset. correct?", "# resource._request_entity = self._api_entity", "# resource._request_uri = self._api_uri", "return", "resource" ]
Return a "clean" copy of this instance. Return: (instance): A clean copy of this instance.
[ "Return", "a", "clean", "copy", "of", "this", "instance", "." ]
python
train
loganasherjones/yapconf
yapconf/spec.py
https://github.com/loganasherjones/yapconf/blob/d2970e6e7e3334615d4d978d8b0ca33006d79d16/yapconf/spec.py#L329-L357
def spawn_watcher(self, label, target=None, eternal=False): """Spawns a config watcher in a separate daemon thread. If a particular config value changes, and the item has a ``watch_target`` defined, then that method will be called. If a ``target`` is passed in, then it will call the ``target`` anytime the config changes. Args: label (str): Should match a label added through ``add_source`` target (func): Should be a function that takes two arguments, the old configuration and the new configuration. eternal (bool): Determines if watcher threads should be restarted if they die. Returns: The thread that was spawned. """ if label not in self._sources: raise YapconfSourceError( 'Cannot watch %s no source named %s' % (label, label) ) current_config = self._sources[label].get_data() handler = ConfigChangeHandler(current_config, self, target) return self._sources[label].watch(handler, eternal)
[ "def", "spawn_watcher", "(", "self", ",", "label", ",", "target", "=", "None", ",", "eternal", "=", "False", ")", ":", "if", "label", "not", "in", "self", ".", "_sources", ":", "raise", "YapconfSourceError", "(", "'Cannot watch %s no source named %s'", "%", "(", "label", ",", "label", ")", ")", "current_config", "=", "self", ".", "_sources", "[", "label", "]", ".", "get_data", "(", ")", "handler", "=", "ConfigChangeHandler", "(", "current_config", ",", "self", ",", "target", ")", "return", "self", ".", "_sources", "[", "label", "]", ".", "watch", "(", "handler", ",", "eternal", ")" ]
Spawns a config watcher in a separate daemon thread. If a particular config value changes, and the item has a ``watch_target`` defined, then that method will be called. If a ``target`` is passed in, then it will call the ``target`` anytime the config changes. Args: label (str): Should match a label added through ``add_source`` target (func): Should be a function that takes two arguments, the old configuration and the new configuration. eternal (bool): Determines if watcher threads should be restarted if they die. Returns: The thread that was spawned.
[ "Spawns", "a", "config", "watcher", "in", "a", "separate", "daemon", "thread", "." ]
python
train
fhcrc/taxtastic
taxtastic/refpkg.py
https://github.com/fhcrc/taxtastic/blob/4e874b7f2cc146178828bfba386314f8c342722b/taxtastic/refpkg.py#L227-L231
def open(self, name, *mode): """ Return an open file object for a file in the reference package. """ return self.file_factory(self.file_path(name), *mode)
[ "def", "open", "(", "self", ",", "name", ",", "*", "mode", ")", ":", "return", "self", ".", "file_factory", "(", "self", ".", "file_path", "(", "name", ")", ",", "*", "mode", ")" ]
Return an open file object for a file in the reference package.
[ "Return", "an", "open", "file", "object", "for", "a", "file", "in", "the", "reference", "package", "." ]
python
train
geophysics-ubonn/crtomo_tools
src/sens_center_plot.py
https://github.com/geophysics-ubonn/crtomo_tools/blob/27c3e21a557f8df1c12455b96c4c2e00e08a5b4a/src/sens_center_plot.py#L293-L328
def compute_sens(self, elem_file, elec_file, configs): """ Compute the sensitivities for the given input data. A CRMod instance is called to create the sensitivity files. """ CRMod_config = CRMod.config() # activate 2D mode and set sink nr if self.options.sink is not None: print('2D mode with sink {0}'.format(self.options.sink)) CRMod_config['2D'] = 0 CRMod_config['fictitious_sink'] = 'T' CRMod_config['sink_node'] = self.options.sink CRMod_config['write_sens'] = 'T' CRMod_instance = CRMod.CRMod(CRMod_config) CRMod_instance.elemfile = elem_file CRMod_instance.elecfile = elec_file CRMod_instance.configdata = configs resistivity = 100 # get number of elements fid = open(elem_file, 'r') fid.readline() elements = int(fid.readline().strip().split()[1]) fid.close() # create rho.dat file rhodata = '{0}\n'.format(elements) for i in range(0, elements): rhodata += '{0} 0\n'.format(resistivity) CRMod_instance.rhodata = rhodata CRMod_instance.run_in_tempdir() volt_file = CRMod_instance.volt_file sens_files = CRMod_instance.sens_files return sens_files, volt_file, CRMod_instance.temp_dir
[ "def", "compute_sens", "(", "self", ",", "elem_file", ",", "elec_file", ",", "configs", ")", ":", "CRMod_config", "=", "CRMod", ".", "config", "(", ")", "# activate 2D mode and set sink nr", "if", "self", ".", "options", ".", "sink", "is", "not", "None", ":", "print", "(", "'2D mode with sink {0}'", ".", "format", "(", "self", ".", "options", ".", "sink", ")", ")", "CRMod_config", "[", "'2D'", "]", "=", "0", "CRMod_config", "[", "'fictitious_sink'", "]", "=", "'T'", "CRMod_config", "[", "'sink_node'", "]", "=", "self", ".", "options", ".", "sink", "CRMod_config", "[", "'write_sens'", "]", "=", "'T'", "CRMod_instance", "=", "CRMod", ".", "CRMod", "(", "CRMod_config", ")", "CRMod_instance", ".", "elemfile", "=", "elem_file", "CRMod_instance", ".", "elecfile", "=", "elec_file", "CRMod_instance", ".", "configdata", "=", "configs", "resistivity", "=", "100", "# get number of elements", "fid", "=", "open", "(", "elem_file", ",", "'r'", ")", "fid", ".", "readline", "(", ")", "elements", "=", "int", "(", "fid", ".", "readline", "(", ")", ".", "strip", "(", ")", ".", "split", "(", ")", "[", "1", "]", ")", "fid", ".", "close", "(", ")", "# create rho.dat file", "rhodata", "=", "'{0}\\n'", ".", "format", "(", "elements", ")", "for", "i", "in", "range", "(", "0", ",", "elements", ")", ":", "rhodata", "+=", "'{0} 0\\n'", ".", "format", "(", "resistivity", ")", "CRMod_instance", ".", "rhodata", "=", "rhodata", "CRMod_instance", ".", "run_in_tempdir", "(", ")", "volt_file", "=", "CRMod_instance", ".", "volt_file", "sens_files", "=", "CRMod_instance", ".", "sens_files", "return", "sens_files", ",", "volt_file", ",", "CRMod_instance", ".", "temp_dir" ]
Compute the sensitivities for the given input data. A CRMod instance is called to create the sensitivity files.
[ "Compute", "the", "sensitivities", "for", "the", "given", "input", "data", ".", "A", "CRMod", "instance", "is", "called", "to", "create", "the", "sensitivity", "files", "." ]
python
train
Nukesor/pueue
pueue/daemon/process_handler.py
https://github.com/Nukesor/pueue/blob/f1d276360454d4dd2738658a13df1e20caa4b926/pueue/daemon/process_handler.py#L226-L238
def start_process(self, key): """Start a specific processes.""" if key in self.processes and key in self.paused: os.killpg(os.getpgid(self.processes[key].pid), signal.SIGCONT) self.queue[key]['status'] = 'running' self.paused.remove(key) return True elif key not in self.processes: if self.queue[key]['status'] in ['queued', 'stashed']: self.spawn_new(key) return True return False
[ "def", "start_process", "(", "self", ",", "key", ")", ":", "if", "key", "in", "self", ".", "processes", "and", "key", "in", "self", ".", "paused", ":", "os", ".", "killpg", "(", "os", ".", "getpgid", "(", "self", ".", "processes", "[", "key", "]", ".", "pid", ")", ",", "signal", ".", "SIGCONT", ")", "self", ".", "queue", "[", "key", "]", "[", "'status'", "]", "=", "'running'", "self", ".", "paused", ".", "remove", "(", "key", ")", "return", "True", "elif", "key", "not", "in", "self", ".", "processes", ":", "if", "self", ".", "queue", "[", "key", "]", "[", "'status'", "]", "in", "[", "'queued'", ",", "'stashed'", "]", ":", "self", ".", "spawn_new", "(", "key", ")", "return", "True", "return", "False" ]
Start a specific processes.
[ "Start", "a", "specific", "processes", "." ]
python
train
aio-libs/aiodocker
aiodocker/nodes.py
https://github.com/aio-libs/aiodocker/blob/88d0285ddba8e606ff684278e0a831347209189c/aiodocker/nodes.py#L43-L69
async def update( self, *, node_id: str, version: int, spec: Mapping[str, Any] ) -> Mapping[str, Any]: """ Update the spec of a node. Args: node_id: The ID or name of the node version: version number of the node being updated spec: fields to be updated """ params = {"version": version} if "Role" in spec: assert spec["Role"] in {"worker", "manager"} if "Availability" in spec: assert spec["Availability"] in {"active", "pause", "drain"} response = await self.docker._query_json( "nodes/{node_id}/update".format(node_id=node_id), method="POST", params=params, data=spec, ) return response
[ "async", "def", "update", "(", "self", ",", "*", ",", "node_id", ":", "str", ",", "version", ":", "int", ",", "spec", ":", "Mapping", "[", "str", ",", "Any", "]", ")", "->", "Mapping", "[", "str", ",", "Any", "]", ":", "params", "=", "{", "\"version\"", ":", "version", "}", "if", "\"Role\"", "in", "spec", ":", "assert", "spec", "[", "\"Role\"", "]", "in", "{", "\"worker\"", ",", "\"manager\"", "}", "if", "\"Availability\"", "in", "spec", ":", "assert", "spec", "[", "\"Availability\"", "]", "in", "{", "\"active\"", ",", "\"pause\"", ",", "\"drain\"", "}", "response", "=", "await", "self", ".", "docker", ".", "_query_json", "(", "\"nodes/{node_id}/update\"", ".", "format", "(", "node_id", "=", "node_id", ")", ",", "method", "=", "\"POST\"", ",", "params", "=", "params", ",", "data", "=", "spec", ",", ")", "return", "response" ]
Update the spec of a node. Args: node_id: The ID or name of the node version: version number of the node being updated spec: fields to be updated
[ "Update", "the", "spec", "of", "a", "node", "." ]
python
train
bids-standard/pybids
bids/layout/layout.py
https://github.com/bids-standard/pybids/blob/30d924ce770622bda0e390d613a8da42a2a20c32/bids/layout/layout.py#L31-L74
def parse_file_entities(filename, entities=None, config=None, include_unmatched=False): """ Parse the passed filename for entity/value pairs. Args: filename (str): The filename to parse for entity values entities (list): An optional list of Entity instances to use in extraction. If passed, the config argument is ignored. config (str, Config, list): One or more Config objects or names of configurations to use in matching. Each element must be a Config object, or a valid Config name (e.g., 'bids' or 'derivatives'). If None, all available configs are used. include_unmatched (bool): If True, unmatched entities are included in the returned dict, with values set to None. If False (default), unmatched entities are ignored. Returns: A dict, where keys are Entity names and values are the values extracted from the filename. """ # Load Configs if needed if entities is None: if config is None: config = ['bids', 'derivatives'] config = [Config.load(c) if not isinstance(c, Config) else c for c in listify(config)] # Consolidate entities from all Configs into a single dict entities = {} for c in config: entities.update(c.entities) entities = entities.values() # Extract matches bf = BIDSFile(filename) ent_vals = {} for ent in entities: match = ent.match_file(bf) if match is not None or include_unmatched: ent_vals[ent.name] = match return ent_vals
[ "def", "parse_file_entities", "(", "filename", ",", "entities", "=", "None", ",", "config", "=", "None", ",", "include_unmatched", "=", "False", ")", ":", "# Load Configs if needed", "if", "entities", "is", "None", ":", "if", "config", "is", "None", ":", "config", "=", "[", "'bids'", ",", "'derivatives'", "]", "config", "=", "[", "Config", ".", "load", "(", "c", ")", "if", "not", "isinstance", "(", "c", ",", "Config", ")", "else", "c", "for", "c", "in", "listify", "(", "config", ")", "]", "# Consolidate entities from all Configs into a single dict", "entities", "=", "{", "}", "for", "c", "in", "config", ":", "entities", ".", "update", "(", "c", ".", "entities", ")", "entities", "=", "entities", ".", "values", "(", ")", "# Extract matches", "bf", "=", "BIDSFile", "(", "filename", ")", "ent_vals", "=", "{", "}", "for", "ent", "in", "entities", ":", "match", "=", "ent", ".", "match_file", "(", "bf", ")", "if", "match", "is", "not", "None", "or", "include_unmatched", ":", "ent_vals", "[", "ent", ".", "name", "]", "=", "match", "return", "ent_vals" ]
Parse the passed filename for entity/value pairs. Args: filename (str): The filename to parse for entity values entities (list): An optional list of Entity instances to use in extraction. If passed, the config argument is ignored. config (str, Config, list): One or more Config objects or names of configurations to use in matching. Each element must be a Config object, or a valid Config name (e.g., 'bids' or 'derivatives'). If None, all available configs are used. include_unmatched (bool): If True, unmatched entities are included in the returned dict, with values set to None. If False (default), unmatched entities are ignored. Returns: A dict, where keys are Entity names and values are the values extracted from the filename.
[ "Parse", "the", "passed", "filename", "for", "entity", "/", "value", "pairs", "." ]
python
train
gamechanger/mongothon
mongothon/model.py
https://github.com/gamechanger/mongothon/blob/5305bdae8e38d09bfe7881f1edc99ac0a2e6b96b/mongothon/model.py#L267-L270
def static_method(cls, f): """Decorator which dynamically binds static methods to the model for later use.""" setattr(cls, f.__name__, staticmethod(f)) return f
[ "def", "static_method", "(", "cls", ",", "f", ")", ":", "setattr", "(", "cls", ",", "f", ".", "__name__", ",", "staticmethod", "(", "f", ")", ")", "return", "f" ]
Decorator which dynamically binds static methods to the model for later use.
[ "Decorator", "which", "dynamically", "binds", "static", "methods", "to", "the", "model", "for", "later", "use", "." ]
python
train
rikrd/inspire
inspirespeech/__init__.py
https://github.com/rikrd/inspire/blob/e281c0266a9a9633f34ab70f9c3ad58036c19b59/inspirespeech/__init__.py#L325-L371
def what_task(self, token_id, presented_pronunciation, index, phonemes, phonemes_probability, warn=True, default=True): """Provide the prediction of the what task. This function is used to predict the probability of a given phoneme being reported at a given index for a given token. :param token_id: The token for which the prediction is provided :param index: The index of the token for which the prediction is provided :param phonemes: The phoneme or phoneme sequence for which the prediction is being made (as a space separated string) :param phonemes_probability: The probability of the phoneme or phoneme sequence :param warn: Set to False in order to avoid warnings about 0 or 1 probabilities :param default: Set to False in order to avoid generating the default probabilities """ if phonemes_probability is not None and not 0. < phonemes_probability < 1. and warn: logging.warning('Setting a probability of [{}] to phonemes [{}] for token [{}].\n ' 'Using probabilities of 0.0 or 1.0 ' 'may lead to likelihoods of -Infinity'.format(phonemes_probability, phonemes, token_id)) default_preds = self._what_default(presented_pronunciation) if default else {} self['tokens'].setdefault(token_id, {}) \ .setdefault('what', default_preds) if index is not None: self['tokens'][token_id]['what'].setdefault(str(index), {}) if phonemes is not None: if phonemes_probability is not None and index is not None: self['tokens'][token_id]['what'][str(index)][phonemes] = phonemes_probability else: if index is not None: if phonemes in default_preds[str(index)]: self['tokens'][token_id]['what'][str(index)][phonemes] = default_preds[str(index)][phonemes] else: self['tokens'][token_id]['what'][str(index)].pop(phonemes) else: if str(index) in default_preds: self['tokens'][token_id]['what'][str(index)] = default_preds[str(index)] else: self['tokens'][token_id]['what'].pop(str(index))
[ "def", "what_task", "(", "self", ",", "token_id", ",", "presented_pronunciation", ",", "index", ",", "phonemes", ",", "phonemes_probability", ",", "warn", "=", "True", ",", "default", "=", "True", ")", ":", "if", "phonemes_probability", "is", "not", "None", "and", "not", "0.", "<", "phonemes_probability", "<", "1.", "and", "warn", ":", "logging", ".", "warning", "(", "'Setting a probability of [{}] to phonemes [{}] for token [{}].\\n '", "'Using probabilities of 0.0 or 1.0 '", "'may lead to likelihoods of -Infinity'", ".", "format", "(", "phonemes_probability", ",", "phonemes", ",", "token_id", ")", ")", "default_preds", "=", "self", ".", "_what_default", "(", "presented_pronunciation", ")", "if", "default", "else", "{", "}", "self", "[", "'tokens'", "]", ".", "setdefault", "(", "token_id", ",", "{", "}", ")", ".", "setdefault", "(", "'what'", ",", "default_preds", ")", "if", "index", "is", "not", "None", ":", "self", "[", "'tokens'", "]", "[", "token_id", "]", "[", "'what'", "]", ".", "setdefault", "(", "str", "(", "index", ")", ",", "{", "}", ")", "if", "phonemes", "is", "not", "None", ":", "if", "phonemes_probability", "is", "not", "None", "and", "index", "is", "not", "None", ":", "self", "[", "'tokens'", "]", "[", "token_id", "]", "[", "'what'", "]", "[", "str", "(", "index", ")", "]", "[", "phonemes", "]", "=", "phonemes_probability", "else", ":", "if", "index", "is", "not", "None", ":", "if", "phonemes", "in", "default_preds", "[", "str", "(", "index", ")", "]", ":", "self", "[", "'tokens'", "]", "[", "token_id", "]", "[", "'what'", "]", "[", "str", "(", "index", ")", "]", "[", "phonemes", "]", "=", "default_preds", "[", "str", "(", "index", ")", "]", "[", "phonemes", "]", "else", ":", "self", "[", "'tokens'", "]", "[", "token_id", "]", "[", "'what'", "]", "[", "str", "(", "index", ")", "]", ".", "pop", "(", "phonemes", ")", "else", ":", "if", "str", "(", "index", ")", "in", "default_preds", ":", "self", "[", "'tokens'", "]", "[", "token_id", "]", "[", "'what'", "]", "[", "str", "(", "index", ")", "]", "=", "default_preds", "[", "str", "(", "index", ")", "]", "else", ":", "self", "[", "'tokens'", "]", "[", "token_id", "]", "[", "'what'", "]", ".", "pop", "(", "str", "(", "index", ")", ")" ]
Provide the prediction of the what task. This function is used to predict the probability of a given phoneme being reported at a given index for a given token. :param token_id: The token for which the prediction is provided :param index: The index of the token for which the prediction is provided :param phonemes: The phoneme or phoneme sequence for which the prediction is being made (as a space separated string) :param phonemes_probability: The probability of the phoneme or phoneme sequence :param warn: Set to False in order to avoid warnings about 0 or 1 probabilities :param default: Set to False in order to avoid generating the default probabilities
[ "Provide", "the", "prediction", "of", "the", "what", "task", "." ]
python
train
trailofbits/manticore
manticore/native/cpu/x86.py
https://github.com/trailofbits/manticore/blob/54c5a15b1119c523ae54c09972413e8b97f11629/manticore/native/cpu/x86.py#L4632-L4650
def PXOR(cpu, dest, src): """ Logical exclusive OR. Performs a bitwise logical exclusive-OR (XOR) operation on the quadword source (second) and destination (first) operands and stores the result in the destination operand location. The source operand can be an MMX(TM) technology register or a quadword memory location; the destination operand must be an MMX register. Each bit of the result is 1 if the corresponding bits of the two operands are different; each bit is 0 if the corresponding bits of the operands are the same:: DEST = DEST XOR SRC; :param cpu: current CPU. :param dest: destination operand. :param src: quadword source operand. """ res = dest.write(dest.read() ^ src.read())
[ "def", "PXOR", "(", "cpu", ",", "dest", ",", "src", ")", ":", "res", "=", "dest", ".", "write", "(", "dest", ".", "read", "(", ")", "^", "src", ".", "read", "(", ")", ")" ]
Logical exclusive OR. Performs a bitwise logical exclusive-OR (XOR) operation on the quadword source (second) and destination (first) operands and stores the result in the destination operand location. The source operand can be an MMX(TM) technology register or a quadword memory location; the destination operand must be an MMX register. Each bit of the result is 1 if the corresponding bits of the two operands are different; each bit is 0 if the corresponding bits of the operands are the same:: DEST = DEST XOR SRC; :param cpu: current CPU. :param dest: destination operand. :param src: quadword source operand.
[ "Logical", "exclusive", "OR", "." ]
python
valid
wal-e/wal-e
wal_e/worker/upload_pool.py
https://github.com/wal-e/wal-e/blob/027263860e72a403bc0e1497bb3e67523138e7a2/wal_e/worker/upload_pool.py#L56-L69
def _wait(self): """Block until an upload finishes Raise an exception if that tar volume failed with an error. """ val = self.wait_change.get() if isinstance(val, Exception): # Don't other uncharging, because execution is going to stop raise val else: # Uncharge for resources. self.member_burden -= len(val) self.concurrency_burden -= 1
[ "def", "_wait", "(", "self", ")", ":", "val", "=", "self", ".", "wait_change", ".", "get", "(", ")", "if", "isinstance", "(", "val", ",", "Exception", ")", ":", "# Don't other uncharging, because execution is going to stop", "raise", "val", "else", ":", "# Uncharge for resources.", "self", ".", "member_burden", "-=", "len", "(", "val", ")", "self", ".", "concurrency_burden", "-=", "1" ]
Block until an upload finishes Raise an exception if that tar volume failed with an error.
[ "Block", "until", "an", "upload", "finishes" ]
python
train
lemieuxl/pyGenClean
pyGenClean/DupSNPs/duplicated_snps.py
https://github.com/lemieuxl/pyGenClean/blob/6173a48ccc0cf3a3bd711b1f2a1fa16248b8bf55/pyGenClean/DupSNPs/duplicated_snps.py#L727-L930
def printProblems(completion, concordance, tped, snps, frequencies, prefix, diffFreq): """Print the statistics. :param completion: the completion of each duplicated markers. :param concordance: the pairwise concordance between duplicated markers. :param tped: a representation of the ``tped`` of duplicated markers. :param snps: the positions of the duplicated markers in the ``tped`` :param frequencies: the frequency of each of the duplicated markers. :param prefix: the prefix of the output files. :param diffFreq: the frequency difference threshold. :type completion: numpy.array :type concordance: dict :type tped: numpy.array :type snps: dict :type frequencies: dict :type prefix: str :type diffFreq: float :returns: a :py:class:`set` containing duplicated markers to complete. Creates a summary file (``prefix.summary``) containing information about duplicated markers: chromosome, position, name, alleles, status, completion percentage, completion number and mean concordance. The frequency and the minor allele are used to be certain that two duplicated markers are exactly the same marker (and not a tri-allelic one, for example). For each duplicated markers: 1. Constructs the set of available alleles for the first marker. 2. Constructs the set of available alleles for the second marker. 3. If the two sets are different, but the number of alleles is the same, we try to flip one of the marker. If the two sets are the same, but the number of alleles is 1, we set the status to ``homo_flip``. If the markers are heterozygous, we set the status to ``flip``. 4. If there is a difference in the number of alleles (one is homozygous, the other, heterozygous), and that there is on allele in common, we set the status to ``homo_hetero``. If there are no allele in common, we try to flip one. If the new sets have one allele in common, we set the status to ``homo_hetero_flip``. 5. If the sets of available alleles are the same (without flip), we check the frequency and the minor alleles. If the minor allele is different, we set the status to ``diff_minor_allele``. If the difference in frequencies is higher than a threshold, we set the status to ``diff_frequency``. 6. If all of the above fail, we set the status to ``problem``. Problems are written in the ``prefix.problems`` file, and contains the following columns: chromosome, position, name and status. This file contains all the markers with a status, as explained above. """ completionPercentage = np.true_divide(completion[0], completion[1]) outSummary = None try: outSummary = open(prefix + ".summary", "w") except IOError: msg = "%s: can't write file" % prefix + ".summary" raise ProgramError # Prints the header of the summary file print >>outSummary, "\t".join(["chr", "pos", "name", "alleles", "status", "% completion", "completion", "mean concordance"]) # The data structure containing the problems problems = {} for snpID, indexes in snps.iteritems(): for i, index in enumerate(indexes): # The SNP information (chromosome and position) toPrint = list(snpID) # The name of the SNP snpName = tped[index, 1] toPrint.append(snpName) # The frequency of the SNP snpFreq, mafAlleles = frequencies[snpName] # A list of the other SNP name with problems otherSnpNameWithProblem = set() # The alleles alleles = set() otherAlleles = set() status = [] for genotype in np.unique(tped[index, 4:]): alleles |= set(genotype.split(" ")) if "0" in alleles: alleles.remove("0") for j in xrange(i+1, len(indexes)): otherIndex = indexes[j] otherSnpName = tped[otherIndex, 1] # The frequency of the other SNP otherSnpFreq, otherMafAlleles = frequencies[otherSnpName] # Checking the alleles for genotype in np.unique(tped[otherIndex, 4:]): otherAlleles |= set(genotype.split(" ")) if "0" in otherAlleles: otherAlleles.remove("0") if alleles != otherAlleles: if len(alleles) == len(otherAlleles): # Same number of alleles # Try the flipped ones otherAlleles = flipGenotype(otherAlleles) if alleles == otherAlleles: if len(alleles) == 1: status.append("homo_flip") otherSnpNameWithProblem.add(otherSnpName) else: status.append("flip") otherSnpNameWithProblem.add(otherSnpName) else: status.append("problem") otherSnpNameWithProblem.add(otherSnpName) else: # Different number of alleles if len(alleles & otherAlleles) == 1: status.append("homo_hetero") otherSnpNameWithProblem.add(otherSnpName) else: # Try the flipped one otherAlleles = flipGenotype(otherAlleles) if len(alleles & otherAlleles) == 1: status.append("homo_hetero_flip") otherSnpNameWithProblem.add(otherSnpName) else: status.append("problem") otherSnpNameWithProblem.add(otherSnpName) else: # The alleles are the same, so we check the frequency if mafAlleles[0] != otherMafAlleles[0]: # They don't have the same minor allele status.append("diff_minor_allele") otherSnpNameWithProblem.add(otherSnpName) elif math.fabs(snpFreq - otherSnpFreq) > diffFreq: # They don't have same frequency status.append("diff_frequency") otherSnpNameWithProblem.add(otherSnpName) alleles = list(alleles) alleles.sort() if len(alleles) == 1: alleles.append(alleles[0]) toPrint.append(" ".join(alleles)) toPrint.append(";".join(status)) # The completion toPrint.append("%.8f" % completionPercentage[index]) toPrint.append("%d/%d" % (completion[0][index], completion[1][index])) # The concordance indexToKeep = list(set(range(len(indexes))) - set([i])) currConcordance = np.true_divide( concordance[snpID][0][i, indexToKeep], concordance[snpID][1][i, indexToKeep], ) currConcordance = np.mean(currConcordance) toPrint.append("%.8f" % currConcordance) print >>outSummary, "\t".join(toPrint) # Now updating the problems data structure if len(status) != len(otherSnpNameWithProblem): msg = "There is a problem with the problematic SNPs" raise ProgramError(msg) if len(status) > 0: if snpID not in problems: tmp = {"snpNames": {snpName}, "problems": set()} problems[snpID] = tmp # We have problems problems[snpID]["snpNames"] |= otherSnpNameWithProblem problems[snpID]["problems"] |= set(status) outSummary.close() outProblems = None try: outProblems = open(prefix + ".problems", "w") except IOError: msg = "%s: can't write file" % prefix + ".problems" raise ProgramError # Printing the header of the problem file... print >>outProblems, "\t".join(["chr", "pos", "name", "status"]) for snpID in problems.iterkeys(): toPrint = list(snpID) toPrint.append(";".join(list(problems[snpID]["snpNames"]))) toPrint.append(";".join(list(problems[snpID]["problems"]))) print >>outProblems, "\t".join(toPrint) outProblems.close() # Returning the SNPs to complete return set(snps.keys()) - set(problems.keys())
[ "def", "printProblems", "(", "completion", ",", "concordance", ",", "tped", ",", "snps", ",", "frequencies", ",", "prefix", ",", "diffFreq", ")", ":", "completionPercentage", "=", "np", ".", "true_divide", "(", "completion", "[", "0", "]", ",", "completion", "[", "1", "]", ")", "outSummary", "=", "None", "try", ":", "outSummary", "=", "open", "(", "prefix", "+", "\".summary\"", ",", "\"w\"", ")", "except", "IOError", ":", "msg", "=", "\"%s: can't write file\"", "%", "prefix", "+", "\".summary\"", "raise", "ProgramError", "# Prints the header of the summary file", "print", ">>", "outSummary", ",", "\"\\t\"", ".", "join", "(", "[", "\"chr\"", ",", "\"pos\"", ",", "\"name\"", ",", "\"alleles\"", ",", "\"status\"", ",", "\"% completion\"", ",", "\"completion\"", ",", "\"mean concordance\"", "]", ")", "# The data structure containing the problems", "problems", "=", "{", "}", "for", "snpID", ",", "indexes", "in", "snps", ".", "iteritems", "(", ")", ":", "for", "i", ",", "index", "in", "enumerate", "(", "indexes", ")", ":", "# The SNP information (chromosome and position)", "toPrint", "=", "list", "(", "snpID", ")", "# The name of the SNP", "snpName", "=", "tped", "[", "index", ",", "1", "]", "toPrint", ".", "append", "(", "snpName", ")", "# The frequency of the SNP", "snpFreq", ",", "mafAlleles", "=", "frequencies", "[", "snpName", "]", "# A list of the other SNP name with problems", "otherSnpNameWithProblem", "=", "set", "(", ")", "# The alleles", "alleles", "=", "set", "(", ")", "otherAlleles", "=", "set", "(", ")", "status", "=", "[", "]", "for", "genotype", "in", "np", ".", "unique", "(", "tped", "[", "index", ",", "4", ":", "]", ")", ":", "alleles", "|=", "set", "(", "genotype", ".", "split", "(", "\" \"", ")", ")", "if", "\"0\"", "in", "alleles", ":", "alleles", ".", "remove", "(", "\"0\"", ")", "for", "j", "in", "xrange", "(", "i", "+", "1", ",", "len", "(", "indexes", ")", ")", ":", "otherIndex", "=", "indexes", "[", "j", "]", "otherSnpName", "=", "tped", "[", "otherIndex", ",", "1", "]", "# The frequency of the other SNP", "otherSnpFreq", ",", "otherMafAlleles", "=", "frequencies", "[", "otherSnpName", "]", "# Checking the alleles", "for", "genotype", "in", "np", ".", "unique", "(", "tped", "[", "otherIndex", ",", "4", ":", "]", ")", ":", "otherAlleles", "|=", "set", "(", "genotype", ".", "split", "(", "\" \"", ")", ")", "if", "\"0\"", "in", "otherAlleles", ":", "otherAlleles", ".", "remove", "(", "\"0\"", ")", "if", "alleles", "!=", "otherAlleles", ":", "if", "len", "(", "alleles", ")", "==", "len", "(", "otherAlleles", ")", ":", "# Same number of alleles", "# Try the flipped ones", "otherAlleles", "=", "flipGenotype", "(", "otherAlleles", ")", "if", "alleles", "==", "otherAlleles", ":", "if", "len", "(", "alleles", ")", "==", "1", ":", "status", ".", "append", "(", "\"homo_flip\"", ")", "otherSnpNameWithProblem", ".", "add", "(", "otherSnpName", ")", "else", ":", "status", ".", "append", "(", "\"flip\"", ")", "otherSnpNameWithProblem", ".", "add", "(", "otherSnpName", ")", "else", ":", "status", ".", "append", "(", "\"problem\"", ")", "otherSnpNameWithProblem", ".", "add", "(", "otherSnpName", ")", "else", ":", "# Different number of alleles", "if", "len", "(", "alleles", "&", "otherAlleles", ")", "==", "1", ":", "status", ".", "append", "(", "\"homo_hetero\"", ")", "otherSnpNameWithProblem", ".", "add", "(", "otherSnpName", ")", "else", ":", "# Try the flipped one", "otherAlleles", "=", "flipGenotype", "(", "otherAlleles", ")", "if", "len", "(", "alleles", "&", "otherAlleles", ")", "==", "1", ":", "status", ".", "append", "(", "\"homo_hetero_flip\"", ")", "otherSnpNameWithProblem", ".", "add", "(", "otherSnpName", ")", "else", ":", "status", ".", "append", "(", "\"problem\"", ")", "otherSnpNameWithProblem", ".", "add", "(", "otherSnpName", ")", "else", ":", "# The alleles are the same, so we check the frequency", "if", "mafAlleles", "[", "0", "]", "!=", "otherMafAlleles", "[", "0", "]", ":", "# They don't have the same minor allele", "status", ".", "append", "(", "\"diff_minor_allele\"", ")", "otherSnpNameWithProblem", ".", "add", "(", "otherSnpName", ")", "elif", "math", ".", "fabs", "(", "snpFreq", "-", "otherSnpFreq", ")", ">", "diffFreq", ":", "# They don't have same frequency", "status", ".", "append", "(", "\"diff_frequency\"", ")", "otherSnpNameWithProblem", ".", "add", "(", "otherSnpName", ")", "alleles", "=", "list", "(", "alleles", ")", "alleles", ".", "sort", "(", ")", "if", "len", "(", "alleles", ")", "==", "1", ":", "alleles", ".", "append", "(", "alleles", "[", "0", "]", ")", "toPrint", ".", "append", "(", "\" \"", ".", "join", "(", "alleles", ")", ")", "toPrint", ".", "append", "(", "\";\"", ".", "join", "(", "status", ")", ")", "# The completion", "toPrint", ".", "append", "(", "\"%.8f\"", "%", "completionPercentage", "[", "index", "]", ")", "toPrint", ".", "append", "(", "\"%d/%d\"", "%", "(", "completion", "[", "0", "]", "[", "index", "]", ",", "completion", "[", "1", "]", "[", "index", "]", ")", ")", "# The concordance", "indexToKeep", "=", "list", "(", "set", "(", "range", "(", "len", "(", "indexes", ")", ")", ")", "-", "set", "(", "[", "i", "]", ")", ")", "currConcordance", "=", "np", ".", "true_divide", "(", "concordance", "[", "snpID", "]", "[", "0", "]", "[", "i", ",", "indexToKeep", "]", ",", "concordance", "[", "snpID", "]", "[", "1", "]", "[", "i", ",", "indexToKeep", "]", ",", ")", "currConcordance", "=", "np", ".", "mean", "(", "currConcordance", ")", "toPrint", ".", "append", "(", "\"%.8f\"", "%", "currConcordance", ")", "print", ">>", "outSummary", ",", "\"\\t\"", ".", "join", "(", "toPrint", ")", "# Now updating the problems data structure", "if", "len", "(", "status", ")", "!=", "len", "(", "otherSnpNameWithProblem", ")", ":", "msg", "=", "\"There is a problem with the problematic SNPs\"", "raise", "ProgramError", "(", "msg", ")", "if", "len", "(", "status", ")", ">", "0", ":", "if", "snpID", "not", "in", "problems", ":", "tmp", "=", "{", "\"snpNames\"", ":", "{", "snpName", "}", ",", "\"problems\"", ":", "set", "(", ")", "}", "problems", "[", "snpID", "]", "=", "tmp", "# We have problems", "problems", "[", "snpID", "]", "[", "\"snpNames\"", "]", "|=", "otherSnpNameWithProblem", "problems", "[", "snpID", "]", "[", "\"problems\"", "]", "|=", "set", "(", "status", ")", "outSummary", ".", "close", "(", ")", "outProblems", "=", "None", "try", ":", "outProblems", "=", "open", "(", "prefix", "+", "\".problems\"", ",", "\"w\"", ")", "except", "IOError", ":", "msg", "=", "\"%s: can't write file\"", "%", "prefix", "+", "\".problems\"", "raise", "ProgramError", "# Printing the header of the problem file...", "print", ">>", "outProblems", ",", "\"\\t\"", ".", "join", "(", "[", "\"chr\"", ",", "\"pos\"", ",", "\"name\"", ",", "\"status\"", "]", ")", "for", "snpID", "in", "problems", ".", "iterkeys", "(", ")", ":", "toPrint", "=", "list", "(", "snpID", ")", "toPrint", ".", "append", "(", "\";\"", ".", "join", "(", "list", "(", "problems", "[", "snpID", "]", "[", "\"snpNames\"", "]", ")", ")", ")", "toPrint", ".", "append", "(", "\";\"", ".", "join", "(", "list", "(", "problems", "[", "snpID", "]", "[", "\"problems\"", "]", ")", ")", ")", "print", ">>", "outProblems", ",", "\"\\t\"", ".", "join", "(", "toPrint", ")", "outProblems", ".", "close", "(", ")", "# Returning the SNPs to complete", "return", "set", "(", "snps", ".", "keys", "(", ")", ")", "-", "set", "(", "problems", ".", "keys", "(", ")", ")" ]
Print the statistics. :param completion: the completion of each duplicated markers. :param concordance: the pairwise concordance between duplicated markers. :param tped: a representation of the ``tped`` of duplicated markers. :param snps: the positions of the duplicated markers in the ``tped`` :param frequencies: the frequency of each of the duplicated markers. :param prefix: the prefix of the output files. :param diffFreq: the frequency difference threshold. :type completion: numpy.array :type concordance: dict :type tped: numpy.array :type snps: dict :type frequencies: dict :type prefix: str :type diffFreq: float :returns: a :py:class:`set` containing duplicated markers to complete. Creates a summary file (``prefix.summary``) containing information about duplicated markers: chromosome, position, name, alleles, status, completion percentage, completion number and mean concordance. The frequency and the minor allele are used to be certain that two duplicated markers are exactly the same marker (and not a tri-allelic one, for example). For each duplicated markers: 1. Constructs the set of available alleles for the first marker. 2. Constructs the set of available alleles for the second marker. 3. If the two sets are different, but the number of alleles is the same, we try to flip one of the marker. If the two sets are the same, but the number of alleles is 1, we set the status to ``homo_flip``. If the markers are heterozygous, we set the status to ``flip``. 4. If there is a difference in the number of alleles (one is homozygous, the other, heterozygous), and that there is on allele in common, we set the status to ``homo_hetero``. If there are no allele in common, we try to flip one. If the new sets have one allele in common, we set the status to ``homo_hetero_flip``. 5. If the sets of available alleles are the same (without flip), we check the frequency and the minor alleles. If the minor allele is different, we set the status to ``diff_minor_allele``. If the difference in frequencies is higher than a threshold, we set the status to ``diff_frequency``. 6. If all of the above fail, we set the status to ``problem``. Problems are written in the ``prefix.problems`` file, and contains the following columns: chromosome, position, name and status. This file contains all the markers with a status, as explained above.
[ "Print", "the", "statistics", "." ]
python
train
mitsei/dlkit
dlkit/services/relationship.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/services/relationship.py#L905-L913
def use_comparative_relationship_view(self): """Pass through to provider RelationshipLookupSession.use_comparative_relationship_view""" self._object_views['relationship'] = COMPARATIVE # self._get_provider_session('relationship_lookup_session') # To make sure the session is tracked for session in self._get_provider_sessions(): try: session.use_comparative_relationship_view() except AttributeError: pass
[ "def", "use_comparative_relationship_view", "(", "self", ")", ":", "self", ".", "_object_views", "[", "'relationship'", "]", "=", "COMPARATIVE", "# self._get_provider_session('relationship_lookup_session') # To make sure the session is tracked", "for", "session", "in", "self", ".", "_get_provider_sessions", "(", ")", ":", "try", ":", "session", ".", "use_comparative_relationship_view", "(", ")", "except", "AttributeError", ":", "pass" ]
Pass through to provider RelationshipLookupSession.use_comparative_relationship_view
[ "Pass", "through", "to", "provider", "RelationshipLookupSession", ".", "use_comparative_relationship_view" ]
python
train
OpenTreeOfLife/peyotl
peyotl/utility/get_config.py
https://github.com/OpenTreeOfLife/peyotl/blob/5e4e52a0fdbd17f490aa644ad79fda6ea2eda7c0/peyotl/utility/get_config.py#L315-L324
def get_config_object(): """Thread-safe accessor for the immutable default ConfigWrapper object""" global _DEFAULT_CONFIG_WRAPPER if _DEFAULT_CONFIG_WRAPPER is not None: return _DEFAULT_CONFIG_WRAPPER with _DEFAULT_CONFIG_WRAPPER_LOCK: if _DEFAULT_CONFIG_WRAPPER is not None: return _DEFAULT_CONFIG_WRAPPER _DEFAULT_CONFIG_WRAPPER = ConfigWrapper() return _DEFAULT_CONFIG_WRAPPER
[ "def", "get_config_object", "(", ")", ":", "global", "_DEFAULT_CONFIG_WRAPPER", "if", "_DEFAULT_CONFIG_WRAPPER", "is", "not", "None", ":", "return", "_DEFAULT_CONFIG_WRAPPER", "with", "_DEFAULT_CONFIG_WRAPPER_LOCK", ":", "if", "_DEFAULT_CONFIG_WRAPPER", "is", "not", "None", ":", "return", "_DEFAULT_CONFIG_WRAPPER", "_DEFAULT_CONFIG_WRAPPER", "=", "ConfigWrapper", "(", ")", "return", "_DEFAULT_CONFIG_WRAPPER" ]
Thread-safe accessor for the immutable default ConfigWrapper object
[ "Thread", "-", "safe", "accessor", "for", "the", "immutable", "default", "ConfigWrapper", "object" ]
python
train
lsst-sqre/ltd-conveyor
ltdconveyor/keeper/build.py
https://github.com/lsst-sqre/ltd-conveyor/blob/c492937c4c1e050ccc4a0b9dcc38f9980d57e305/ltdconveyor/keeper/build.py#L60-L87
def confirm_build(build_url, keeper_token): """Confirm a build upload is complete. Wraps ``PATCH /builds/{build}``. Parameters ---------- build_url : `str` URL of the build resource. Given a build resource, this URL is available from the ``self_url`` field. keeper_token : `str` Auth token (`ltdconveyor.keeper.get_keeper_token`). Raises ------ ltdconveyor.keeper.KeeperError Raised if there is an error communicating with the LTD Keeper API. """ data = { 'uploaded': True } r = requests.patch( build_url, auth=(keeper_token, ''), json=data) if r.status_code != 200: raise KeeperError(r)
[ "def", "confirm_build", "(", "build_url", ",", "keeper_token", ")", ":", "data", "=", "{", "'uploaded'", ":", "True", "}", "r", "=", "requests", ".", "patch", "(", "build_url", ",", "auth", "=", "(", "keeper_token", ",", "''", ")", ",", "json", "=", "data", ")", "if", "r", ".", "status_code", "!=", "200", ":", "raise", "KeeperError", "(", "r", ")" ]
Confirm a build upload is complete. Wraps ``PATCH /builds/{build}``. Parameters ---------- build_url : `str` URL of the build resource. Given a build resource, this URL is available from the ``self_url`` field. keeper_token : `str` Auth token (`ltdconveyor.keeper.get_keeper_token`). Raises ------ ltdconveyor.keeper.KeeperError Raised if there is an error communicating with the LTD Keeper API.
[ "Confirm", "a", "build", "upload", "is", "complete", "." ]
python
test
myaooo/pysbrl
pysbrl/train.py
https://github.com/myaooo/pysbrl/blob/74bba8c6913a7f82e32313108f8c3e025b89d9c7/pysbrl/train.py#L11-L46
def train_sbrl(data_file, label_file, lambda_=20, eta=2, max_iters=300000, n_chains=20, alpha=1, seed=None, verbose=0): """ The basic training function of the scalable bayesian rule list. Users are suggested to use SBRL instead of this function. It takes the paths of the pre-processed data and label files as input, and return the parameters of the trained rule list. Check pysbrl.utils:categorical2pysbrl_data to see how to convert categorical data to the required format :param data_file: The data file :param label_file: The label file :param lambda_: A hyper parameter, the prior representing the expected length of the rule list :param eta: A hyper parameter, the prior representing the expected length of each rule :param max_iters: The maximum iteration of the algo :param n_chains: The number of markov chains to run :param alpha: The prior of the output probability distribution, see the paper for more detail. :return: A tuple of (`rule_ids`, `outputs`, `rule_strings`) `rule_ids`: the list of ids of rules `outputs`: the outputs matrix (prob distribution as a vector per rule) `rule_strings`: the whole list of rules in the format of strings like `u'{c2=x,c4=o,c5=b}'`. """ if isinstance(alpha, int): alphas = np.array([alpha], dtype=np.int32) elif isinstance(alpha, list): for a in alpha: assert isinstance(a, int) alphas = np.array(alpha, dtype=np.int32) else: raise ValueError('the argument alpha can only be int or List[int]') if seed is None: seed = -1 if not os.path.isfile(data_file): raise FileNotFoundError('data file %s does not exists!' % data_file) if not os.path.isfile(label_file): raise FileNotFoundError('label file %s does not exists!' % label_file) return _train(data_file, label_file, lambda_, eta, max_iters, n_chains, alphas, seed, verbose)
[ "def", "train_sbrl", "(", "data_file", ",", "label_file", ",", "lambda_", "=", "20", ",", "eta", "=", "2", ",", "max_iters", "=", "300000", ",", "n_chains", "=", "20", ",", "alpha", "=", "1", ",", "seed", "=", "None", ",", "verbose", "=", "0", ")", ":", "if", "isinstance", "(", "alpha", ",", "int", ")", ":", "alphas", "=", "np", ".", "array", "(", "[", "alpha", "]", ",", "dtype", "=", "np", ".", "int32", ")", "elif", "isinstance", "(", "alpha", ",", "list", ")", ":", "for", "a", "in", "alpha", ":", "assert", "isinstance", "(", "a", ",", "int", ")", "alphas", "=", "np", ".", "array", "(", "alpha", ",", "dtype", "=", "np", ".", "int32", ")", "else", ":", "raise", "ValueError", "(", "'the argument alpha can only be int or List[int]'", ")", "if", "seed", "is", "None", ":", "seed", "=", "-", "1", "if", "not", "os", ".", "path", ".", "isfile", "(", "data_file", ")", ":", "raise", "FileNotFoundError", "(", "'data file %s does not exists!'", "%", "data_file", ")", "if", "not", "os", ".", "path", ".", "isfile", "(", "label_file", ")", ":", "raise", "FileNotFoundError", "(", "'label file %s does not exists!'", "%", "label_file", ")", "return", "_train", "(", "data_file", ",", "label_file", ",", "lambda_", ",", "eta", ",", "max_iters", ",", "n_chains", ",", "alphas", ",", "seed", ",", "verbose", ")" ]
The basic training function of the scalable bayesian rule list. Users are suggested to use SBRL instead of this function. It takes the paths of the pre-processed data and label files as input, and return the parameters of the trained rule list. Check pysbrl.utils:categorical2pysbrl_data to see how to convert categorical data to the required format :param data_file: The data file :param label_file: The label file :param lambda_: A hyper parameter, the prior representing the expected length of the rule list :param eta: A hyper parameter, the prior representing the expected length of each rule :param max_iters: The maximum iteration of the algo :param n_chains: The number of markov chains to run :param alpha: The prior of the output probability distribution, see the paper for more detail. :return: A tuple of (`rule_ids`, `outputs`, `rule_strings`) `rule_ids`: the list of ids of rules `outputs`: the outputs matrix (prob distribution as a vector per rule) `rule_strings`: the whole list of rules in the format of strings like `u'{c2=x,c4=o,c5=b}'`.
[ "The", "basic", "training", "function", "of", "the", "scalable", "bayesian", "rule", "list", ".", "Users", "are", "suggested", "to", "use", "SBRL", "instead", "of", "this", "function", ".", "It", "takes", "the", "paths", "of", "the", "pre", "-", "processed", "data", "and", "label", "files", "as", "input", "and", "return", "the", "parameters", "of", "the", "trained", "rule", "list", "." ]
python
train
numat/midas
midas/util.py
https://github.com/numat/midas/blob/c3a97a6cd67df1283831c3c78bf3f984212e97a8/midas/util.py#L36-L43
async def _connect(self): """Start asynchronous reconnect loop.""" self.waiting = True await self.client.start(self.ip) self.waiting = False if self.client.protocol is None: raise IOError("Could not connect to '{}'.".format(self.ip)) self.open = True
[ "async", "def", "_connect", "(", "self", ")", ":", "self", ".", "waiting", "=", "True", "await", "self", ".", "client", ".", "start", "(", "self", ".", "ip", ")", "self", ".", "waiting", "=", "False", "if", "self", ".", "client", ".", "protocol", "is", "None", ":", "raise", "IOError", "(", "\"Could not connect to '{}'.\"", ".", "format", "(", "self", ".", "ip", ")", ")", "self", ".", "open", "=", "True" ]
Start asynchronous reconnect loop.
[ "Start", "asynchronous", "reconnect", "loop", "." ]
python
train
monarch-initiative/dipper
dipper/utils/CurieUtil.py
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/utils/CurieUtil.py#L46-L60
def get_uri(self, curie): ''' Get a URI from a CURIE ''' if curie is None: return None parts = curie.split(':') if len(parts) == 1: if curie != '': LOG.error("Not a properly formed curie: \"%s\"", curie) return None prefix = parts[0] if prefix in self.curie_map: return '%s%s' % (self.curie_map.get(prefix), curie[(curie.index(':') + 1):]) LOG.error("Curie prefix not defined for %s", curie) return None
[ "def", "get_uri", "(", "self", ",", "curie", ")", ":", "if", "curie", "is", "None", ":", "return", "None", "parts", "=", "curie", ".", "split", "(", "':'", ")", "if", "len", "(", "parts", ")", "==", "1", ":", "if", "curie", "!=", "''", ":", "LOG", ".", "error", "(", "\"Not a properly formed curie: \\\"%s\\\"\"", ",", "curie", ")", "return", "None", "prefix", "=", "parts", "[", "0", "]", "if", "prefix", "in", "self", ".", "curie_map", ":", "return", "'%s%s'", "%", "(", "self", ".", "curie_map", ".", "get", "(", "prefix", ")", ",", "curie", "[", "(", "curie", ".", "index", "(", "':'", ")", "+", "1", ")", ":", "]", ")", "LOG", ".", "error", "(", "\"Curie prefix not defined for %s\"", ",", "curie", ")", "return", "None" ]
Get a URI from a CURIE
[ "Get", "a", "URI", "from", "a", "CURIE" ]
python
train
mfussenegger/cr8
cr8/clients.py
https://github.com/mfussenegger/cr8/blob/a37d6049f1f9fee2d0556efae2b7b7f8761bffe8/cr8/clients.py#L146-L176
def _to_dsn(hosts): """Convert a host URI into a dsn for aiopg. >>> _to_dsn('aiopg://myhostname:4242/mydb') 'postgres://crate@myhostname:4242/mydb' >>> _to_dsn('aiopg://myhostname:4242') 'postgres://crate@myhostname:4242/doc' >>> _to_dsn('aiopg://hoschi:pw@myhostname:4242/doc?sslmode=require') 'postgres://hoschi:pw@myhostname:4242/doc?sslmode=require' >>> _to_dsn('aiopg://myhostname') 'postgres://crate@myhostname:5432/doc' """ p = urlparse(hosts) try: user_and_pw, netloc = p.netloc.split('@', maxsplit=1) except ValueError: netloc = p.netloc user_and_pw = 'crate' try: host, port = netloc.split(':', maxsplit=1) except ValueError: host = netloc port = 5432 dbname = p.path[1:] if p.path else 'doc' dsn = f'postgres://{user_and_pw}@{host}:{port}/{dbname}' if p.query: dsn += '?' + '&'.join(k + '=' + v[0] for k, v in parse_qs(p.query).items()) return dsn
[ "def", "_to_dsn", "(", "hosts", ")", ":", "p", "=", "urlparse", "(", "hosts", ")", "try", ":", "user_and_pw", ",", "netloc", "=", "p", ".", "netloc", ".", "split", "(", "'@'", ",", "maxsplit", "=", "1", ")", "except", "ValueError", ":", "netloc", "=", "p", ".", "netloc", "user_and_pw", "=", "'crate'", "try", ":", "host", ",", "port", "=", "netloc", ".", "split", "(", "':'", ",", "maxsplit", "=", "1", ")", "except", "ValueError", ":", "host", "=", "netloc", "port", "=", "5432", "dbname", "=", "p", ".", "path", "[", "1", ":", "]", "if", "p", ".", "path", "else", "'doc'", "dsn", "=", "f'postgres://{user_and_pw}@{host}:{port}/{dbname}'", "if", "p", ".", "query", ":", "dsn", "+=", "'?'", "+", "'&'", ".", "join", "(", "k", "+", "'='", "+", "v", "[", "0", "]", "for", "k", ",", "v", "in", "parse_qs", "(", "p", ".", "query", ")", ".", "items", "(", ")", ")", "return", "dsn" ]
Convert a host URI into a dsn for aiopg. >>> _to_dsn('aiopg://myhostname:4242/mydb') 'postgres://crate@myhostname:4242/mydb' >>> _to_dsn('aiopg://myhostname:4242') 'postgres://crate@myhostname:4242/doc' >>> _to_dsn('aiopg://hoschi:pw@myhostname:4242/doc?sslmode=require') 'postgres://hoschi:pw@myhostname:4242/doc?sslmode=require' >>> _to_dsn('aiopg://myhostname') 'postgres://crate@myhostname:5432/doc'
[ "Convert", "a", "host", "URI", "into", "a", "dsn", "for", "aiopg", "." ]
python
train
clalancette/pycdlib
pycdlib/pycdlib.py
https://github.com/clalancette/pycdlib/blob/1e7b77a809e905d67dc71e12d70e850be26b6233/pycdlib/pycdlib.py#L1887-L1907
def _add_to_ptr_size(self, ptr): # type: (path_table_record.PathTableRecord) -> int ''' An internal method to add a PTR to a VD, adding space to the VD if necessary. Parameters: ptr - The PTR to add to the vd. Returns: The number of additional bytes that are needed to fit the new PTR (this may be zero). ''' num_bytes_to_add = 0 for pvd in self.pvds: # The add_to_ptr_size() method returns True if the PVD needs # additional space in the PTR to store this directory. We always # add 4 additional extents for that (2 for LE, 2 for BE). if pvd.add_to_ptr_size(path_table_record.PathTableRecord.record_length(ptr.len_di)): num_bytes_to_add += 4 * self.pvd.logical_block_size() return num_bytes_to_add
[ "def", "_add_to_ptr_size", "(", "self", ",", "ptr", ")", ":", "# type: (path_table_record.PathTableRecord) -> int", "num_bytes_to_add", "=", "0", "for", "pvd", "in", "self", ".", "pvds", ":", "# The add_to_ptr_size() method returns True if the PVD needs", "# additional space in the PTR to store this directory. We always", "# add 4 additional extents for that (2 for LE, 2 for BE).", "if", "pvd", ".", "add_to_ptr_size", "(", "path_table_record", ".", "PathTableRecord", ".", "record_length", "(", "ptr", ".", "len_di", ")", ")", ":", "num_bytes_to_add", "+=", "4", "*", "self", ".", "pvd", ".", "logical_block_size", "(", ")", "return", "num_bytes_to_add" ]
An internal method to add a PTR to a VD, adding space to the VD if necessary. Parameters: ptr - The PTR to add to the vd. Returns: The number of additional bytes that are needed to fit the new PTR (this may be zero).
[ "An", "internal", "method", "to", "add", "a", "PTR", "to", "a", "VD", "adding", "space", "to", "the", "VD", "if", "necessary", "." ]
python
train
pantsbuild/pants
src/python/pants/task/scm_publish_mixin.py
https://github.com/pantsbuild/pants/blob/b72e650da0df685824ffdcc71988b8c282d0962d/src/python/pants/task/scm_publish_mixin.py#L212-L221
def publish_pushdb_changes_to_remote_scm(self, pushdb_file, coordinate, tag_name, tag_message, postscript=None): """Push pushdb changes to the remote scm repository, and then tag the commit if it succeeds.""" self._add_pushdb(pushdb_file) self.commit_pushdb(coordinate, postscript=postscript) self._push_and_tag_changes( tag_name=tag_name, tag_message='{message}{postscript}'.format(message=tag_message, postscript=postscript or '') )
[ "def", "publish_pushdb_changes_to_remote_scm", "(", "self", ",", "pushdb_file", ",", "coordinate", ",", "tag_name", ",", "tag_message", ",", "postscript", "=", "None", ")", ":", "self", ".", "_add_pushdb", "(", "pushdb_file", ")", "self", ".", "commit_pushdb", "(", "coordinate", ",", "postscript", "=", "postscript", ")", "self", ".", "_push_and_tag_changes", "(", "tag_name", "=", "tag_name", ",", "tag_message", "=", "'{message}{postscript}'", ".", "format", "(", "message", "=", "tag_message", ",", "postscript", "=", "postscript", "or", "''", ")", ")" ]
Push pushdb changes to the remote scm repository, and then tag the commit if it succeeds.
[ "Push", "pushdb", "changes", "to", "the", "remote", "scm", "repository", "and", "then", "tag", "the", "commit", "if", "it", "succeeds", "." ]
python
train
balloob/pychromecast
pychromecast/socket_client.py
https://github.com/balloob/pychromecast/blob/831b09c4fed185a7bffe0ea330b7849d5f4e36b6/pychromecast/socket_client.py#L550-L584
def _route_message(self, message, data): """ Route message to any handlers on the message namespace """ # route message to handlers if message.namespace in self._handlers: # debug messages if message.namespace != NS_HEARTBEAT: self.logger.debug( "[%s:%s] Received: %s", self.fn or self.host, self.port, _message_to_string(message, data)) # message handlers try: handled = \ self._handlers[message.namespace].receive_message( message, data) if not handled: if data.get(REQUEST_ID) not in self._request_callbacks: self.logger.debug( "[%s:%s] Message unhandled: %s", self.fn or self.host, self.port, _message_to_string(message, data)) except Exception: # pylint: disable=broad-except self.logger.exception( ("[%s:%s] Exception caught while sending message to " "controller %s: %s"), self.fn or self.host, self.port, type(self._handlers[message.namespace]).__name__, _message_to_string(message, data)) else: self.logger.debug( "[%s:%s] Received unknown namespace: %s", self.fn or self.host, self.port, _message_to_string(message, data))
[ "def", "_route_message", "(", "self", ",", "message", ",", "data", ")", ":", "# route message to handlers", "if", "message", ".", "namespace", "in", "self", ".", "_handlers", ":", "# debug messages", "if", "message", ".", "namespace", "!=", "NS_HEARTBEAT", ":", "self", ".", "logger", ".", "debug", "(", "\"[%s:%s] Received: %s\"", ",", "self", ".", "fn", "or", "self", ".", "host", ",", "self", ".", "port", ",", "_message_to_string", "(", "message", ",", "data", ")", ")", "# message handlers", "try", ":", "handled", "=", "self", ".", "_handlers", "[", "message", ".", "namespace", "]", ".", "receive_message", "(", "message", ",", "data", ")", "if", "not", "handled", ":", "if", "data", ".", "get", "(", "REQUEST_ID", ")", "not", "in", "self", ".", "_request_callbacks", ":", "self", ".", "logger", ".", "debug", "(", "\"[%s:%s] Message unhandled: %s\"", ",", "self", ".", "fn", "or", "self", ".", "host", ",", "self", ".", "port", ",", "_message_to_string", "(", "message", ",", "data", ")", ")", "except", "Exception", ":", "# pylint: disable=broad-except", "self", ".", "logger", ".", "exception", "(", "(", "\"[%s:%s] Exception caught while sending message to \"", "\"controller %s: %s\"", ")", ",", "self", ".", "fn", "or", "self", ".", "host", ",", "self", ".", "port", ",", "type", "(", "self", ".", "_handlers", "[", "message", ".", "namespace", "]", ")", ".", "__name__", ",", "_message_to_string", "(", "message", ",", "data", ")", ")", "else", ":", "self", ".", "logger", ".", "debug", "(", "\"[%s:%s] Received unknown namespace: %s\"", ",", "self", ".", "fn", "or", "self", ".", "host", ",", "self", ".", "port", ",", "_message_to_string", "(", "message", ",", "data", ")", ")" ]
Route message to any handlers on the message namespace
[ "Route", "message", "to", "any", "handlers", "on", "the", "message", "namespace" ]
python
train
underworldcode/stripy
stripy-src/stripy/cartesian.py
https://github.com/underworldcode/stripy/blob/d4c3480c3e58c88489ded695eadbe7cd5bf94b48/stripy-src/stripy/cartesian.py#L507-L575
def interpolate_cubic(self, xi, yi, zdata, gradz=None, derivatives=False): """ Cubic spline interpolation/extrapolation to arbirary point(s). This method has C^1 continuity. Parameters ---------- xi : float / array of floats, shape (l,) x coordinates on the Cartesian plane yi : float / array of floats, shape (l,) y coordinates on the Cartesian plane zdata : array of floats, shape (n,) value at each point in the triangulation must be the same size of the mesh gradz (optional) : array of floats, shape (2,n) derivative at each point in the triangulation in the x-direction (first row), y-direction (second row) if not supplied it is evaluated using self.gradient derivatives (optional) : bool (default: False) optionally returns the first derivatives at point(s) (xi,yi) Returns ------- zi : float / array of floats, shape (l,) interpolated value(s) of (xi,yi) err : int / array of ints, shape (l,) whether interpolation (0), extrapolation (1) or error (other) dzx, dzy (optional) : float, array of floats, shape(l,) first partial derivatives in x and y direction at (xi,yi) """ if zdata.size != self.npoints: raise ValueError('zdata should be same size as mesh') if type(gradz) == type(None): gradX, gradY = self.gradient(zdata) gradX, gradY = self._shuffle_field(gradX, gradY) elif np.array(gradz).shape == (2,self.npoints): gradX, gradY = self._shuffle_field(gradz[0], gradz[1]) else: raise ValueError("gradz must be of shape {}".format((2,self.npoints))) iflgs = 0 dflag = 1 sigma = 0.0 xi = np.array(xi) yi = np.array(yi) size = xi.size zi = np.empty(size) dzx = np.empty(size) dzy = np.empty(size) zierr = np.empty(size, dtype=np.int) gradZ = np.vstack([gradX, gradY]) zdata = self._shuffle_field(zdata) for i in range(0, size): ist = np.abs(self._x - xi[i]).argmin() + 1 zi[i], dzx[i], dzy[i], zierr[i] = _srfpack.intrc1(xi[i], yi[i], self._x, self._y, zdata,\ self.lst, self.lptr, self.lend, iflgs, sigma, gradZ, dflag, ist) if derivatives: return zi, zierr, (dzx, dzy) else: return zi, zierr
[ "def", "interpolate_cubic", "(", "self", ",", "xi", ",", "yi", ",", "zdata", ",", "gradz", "=", "None", ",", "derivatives", "=", "False", ")", ":", "if", "zdata", ".", "size", "!=", "self", ".", "npoints", ":", "raise", "ValueError", "(", "'zdata should be same size as mesh'", ")", "if", "type", "(", "gradz", ")", "==", "type", "(", "None", ")", ":", "gradX", ",", "gradY", "=", "self", ".", "gradient", "(", "zdata", ")", "gradX", ",", "gradY", "=", "self", ".", "_shuffle_field", "(", "gradX", ",", "gradY", ")", "elif", "np", ".", "array", "(", "gradz", ")", ".", "shape", "==", "(", "2", ",", "self", ".", "npoints", ")", ":", "gradX", ",", "gradY", "=", "self", ".", "_shuffle_field", "(", "gradz", "[", "0", "]", ",", "gradz", "[", "1", "]", ")", "else", ":", "raise", "ValueError", "(", "\"gradz must be of shape {}\"", ".", "format", "(", "(", "2", ",", "self", ".", "npoints", ")", ")", ")", "iflgs", "=", "0", "dflag", "=", "1", "sigma", "=", "0.0", "xi", "=", "np", ".", "array", "(", "xi", ")", "yi", "=", "np", ".", "array", "(", "yi", ")", "size", "=", "xi", ".", "size", "zi", "=", "np", ".", "empty", "(", "size", ")", "dzx", "=", "np", ".", "empty", "(", "size", ")", "dzy", "=", "np", ".", "empty", "(", "size", ")", "zierr", "=", "np", ".", "empty", "(", "size", ",", "dtype", "=", "np", ".", "int", ")", "gradZ", "=", "np", ".", "vstack", "(", "[", "gradX", ",", "gradY", "]", ")", "zdata", "=", "self", ".", "_shuffle_field", "(", "zdata", ")", "for", "i", "in", "range", "(", "0", ",", "size", ")", ":", "ist", "=", "np", ".", "abs", "(", "self", ".", "_x", "-", "xi", "[", "i", "]", ")", ".", "argmin", "(", ")", "+", "1", "zi", "[", "i", "]", ",", "dzx", "[", "i", "]", ",", "dzy", "[", "i", "]", ",", "zierr", "[", "i", "]", "=", "_srfpack", ".", "intrc1", "(", "xi", "[", "i", "]", ",", "yi", "[", "i", "]", ",", "self", ".", "_x", ",", "self", ".", "_y", ",", "zdata", ",", "self", ".", "lst", ",", "self", ".", "lptr", ",", "self", ".", "lend", ",", "iflgs", ",", "sigma", ",", "gradZ", ",", "dflag", ",", "ist", ")", "if", "derivatives", ":", "return", "zi", ",", "zierr", ",", "(", "dzx", ",", "dzy", ")", "else", ":", "return", "zi", ",", "zierr" ]
Cubic spline interpolation/extrapolation to arbirary point(s). This method has C^1 continuity. Parameters ---------- xi : float / array of floats, shape (l,) x coordinates on the Cartesian plane yi : float / array of floats, shape (l,) y coordinates on the Cartesian plane zdata : array of floats, shape (n,) value at each point in the triangulation must be the same size of the mesh gradz (optional) : array of floats, shape (2,n) derivative at each point in the triangulation in the x-direction (first row), y-direction (second row) if not supplied it is evaluated using self.gradient derivatives (optional) : bool (default: False) optionally returns the first derivatives at point(s) (xi,yi) Returns ------- zi : float / array of floats, shape (l,) interpolated value(s) of (xi,yi) err : int / array of ints, shape (l,) whether interpolation (0), extrapolation (1) or error (other) dzx, dzy (optional) : float, array of floats, shape(l,) first partial derivatives in x and y direction at (xi,yi)
[ "Cubic", "spline", "interpolation", "/", "extrapolation", "to", "arbirary", "point", "(", "s", ")", ".", "This", "method", "has", "C^1", "continuity", "." ]
python
train
IDSIA/sacred
sacred/experiment.py
https://github.com/IDSIA/sacred/blob/72633776bed9b5bddf93ae7d215188e61970973a/sacred/experiment.py#L408-L418
def _gather(self, func): """ Removes the experiment's path (prefix) from the names of the gathered items. This means that, for example, 'experiment.print_config' becomes 'print_config'. """ for ingredient, _ in self.traverse_ingredients(): for name, item in func(ingredient): if ingredient == self: name = name[len(self.path) + 1:] yield name, item
[ "def", "_gather", "(", "self", ",", "func", ")", ":", "for", "ingredient", ",", "_", "in", "self", ".", "traverse_ingredients", "(", ")", ":", "for", "name", ",", "item", "in", "func", "(", "ingredient", ")", ":", "if", "ingredient", "==", "self", ":", "name", "=", "name", "[", "len", "(", "self", ".", "path", ")", "+", "1", ":", "]", "yield", "name", ",", "item" ]
Removes the experiment's path (prefix) from the names of the gathered items. This means that, for example, 'experiment.print_config' becomes 'print_config'.
[ "Removes", "the", "experiment", "s", "path", "(", "prefix", ")", "from", "the", "names", "of", "the", "gathered", "items", ".", "This", "means", "that", "for", "example", "experiment", ".", "print_config", "becomes", "print_config", "." ]
python
train
Skype4Py/Skype4Py
Skype4Py/skype.py
https://github.com/Skype4Py/Skype4Py/blob/c48d83f7034109fe46315d45a066126002c6e0d4/Skype4Py/skype.py#L588-L600
def CreateSms(self, MessageType, *TargetNumbers): """Creates an SMS message. :Parameters: MessageType : `enums`.smsMessageType* Message type. TargetNumbers : str One or more target SMS numbers. :return: An sms message object. :rtype: `SmsMessage` """ return SmsMessage(self, chop(self._DoCommand('CREATE SMS %s %s' % (MessageType, ', '.join(TargetNumbers))), 2)[1])
[ "def", "CreateSms", "(", "self", ",", "MessageType", ",", "*", "TargetNumbers", ")", ":", "return", "SmsMessage", "(", "self", ",", "chop", "(", "self", ".", "_DoCommand", "(", "'CREATE SMS %s %s'", "%", "(", "MessageType", ",", "', '", ".", "join", "(", "TargetNumbers", ")", ")", ")", ",", "2", ")", "[", "1", "]", ")" ]
Creates an SMS message. :Parameters: MessageType : `enums`.smsMessageType* Message type. TargetNumbers : str One or more target SMS numbers. :return: An sms message object. :rtype: `SmsMessage`
[ "Creates", "an", "SMS", "message", "." ]
python
train
dh1tw/pyhamtools
pyhamtools/callinfo.py
https://github.com/dh1tw/pyhamtools/blob/ee7e4b8732e23c298da10e07163748156c16d0fa/pyhamtools/callinfo.py#L341-L376
def get_lat_long(self, callsign, timestamp=timestamp_now): """ Returns Latitude and Longitude for a callsign Args: callsign (str): Amateur Radio callsign timestamp (datetime, optional): datetime in UTC (tzinfo=pytz.UTC) Returns: dict: Containing Latitude and Longitude Raises: KeyError: No data found for callsign Example: The following code returns Latitude & Longitude for "DH1TW" >>> from pyhamtools import LookupLib, Callinfo >>> my_lookuplib = LookupLib(lookuptype="countryfile") >>> cic = Callinfo(my_lookuplib) >>> cic.get_lat_long("DH1TW") { 'latitude': 51.0, 'longitude': -10.0 } Note: Unfortunately, in most cases the returned Latitude and Longitude are not very precise. Clublog and Country-files.com use the country's capital coordinates in most cases, if no dedicated entry in the database exists. Best results will be retrieved with QRZ.com Lookup. """ callsign_data = self.get_all(callsign, timestamp=timestamp) return { const.LATITUDE: callsign_data[const.LATITUDE], const.LONGITUDE: callsign_data[const.LONGITUDE] }
[ "def", "get_lat_long", "(", "self", ",", "callsign", ",", "timestamp", "=", "timestamp_now", ")", ":", "callsign_data", "=", "self", ".", "get_all", "(", "callsign", ",", "timestamp", "=", "timestamp", ")", "return", "{", "const", ".", "LATITUDE", ":", "callsign_data", "[", "const", ".", "LATITUDE", "]", ",", "const", ".", "LONGITUDE", ":", "callsign_data", "[", "const", ".", "LONGITUDE", "]", "}" ]
Returns Latitude and Longitude for a callsign Args: callsign (str): Amateur Radio callsign timestamp (datetime, optional): datetime in UTC (tzinfo=pytz.UTC) Returns: dict: Containing Latitude and Longitude Raises: KeyError: No data found for callsign Example: The following code returns Latitude & Longitude for "DH1TW" >>> from pyhamtools import LookupLib, Callinfo >>> my_lookuplib = LookupLib(lookuptype="countryfile") >>> cic = Callinfo(my_lookuplib) >>> cic.get_lat_long("DH1TW") { 'latitude': 51.0, 'longitude': -10.0 } Note: Unfortunately, in most cases the returned Latitude and Longitude are not very precise. Clublog and Country-files.com use the country's capital coordinates in most cases, if no dedicated entry in the database exists. Best results will be retrieved with QRZ.com Lookup.
[ "Returns", "Latitude", "and", "Longitude", "for", "a", "callsign" ]
python
train
daskol/nls
nls/animation.py
https://github.com/daskol/nls/blob/00bb4555e4f56e222dc6f54faf2e286567519626/nls/animation.py#L30-L41
def render(self, filename): """Perform initialization of render, set quality and size video attributes and then call template method that is defined in child class. """ self.elapsed_time = -time() dpi = 100 fig = figure(figsize=(16, 9), dpi=dpi) with self.writer.saving(fig, filename, dpi): for frame_id in xrange(self.frames + 1): self.renderFrame(frame_id) self.writer.grab_frame() self.elapsed_time += time()
[ "def", "render", "(", "self", ",", "filename", ")", ":", "self", ".", "elapsed_time", "=", "-", "time", "(", ")", "dpi", "=", "100", "fig", "=", "figure", "(", "figsize", "=", "(", "16", ",", "9", ")", ",", "dpi", "=", "dpi", ")", "with", "self", ".", "writer", ".", "saving", "(", "fig", ",", "filename", ",", "dpi", ")", ":", "for", "frame_id", "in", "xrange", "(", "self", ".", "frames", "+", "1", ")", ":", "self", ".", "renderFrame", "(", "frame_id", ")", "self", ".", "writer", ".", "grab_frame", "(", ")", "self", ".", "elapsed_time", "+=", "time", "(", ")" ]
Perform initialization of render, set quality and size video attributes and then call template method that is defined in child class.
[ "Perform", "initialization", "of", "render", "set", "quality", "and", "size", "video", "attributes", "and", "then", "call", "template", "method", "that", "is", "defined", "in", "child", "class", "." ]
python
train
wreckage/django-happenings
happenings/utils/calendars.py
https://github.com/wreckage/django-happenings/blob/7bca5576efa6cd4c4e87356bf9e5b8cd538ae91d/happenings/utils/calendars.py#L380-L398
def formatday(self, day, weekday): """Return a day as a table cell.""" super(MiniEventCalendar, self).formatday(day, weekday) now = get_now() self.day = day if day == 0: return '<td class="noday">&nbsp;</td>' # day outside month elif now.month == self.mo and now.year == self.yr and day == now.day: if day in self.count: self.popover_helper() return self.wkday_today + self.anch + self.cal_event + self.end else: return self.wkday_today + self.anch + self.end elif day in self.count: self.popover_helper() return self.wkday_not_today + self.anch + self.cal_event + self.end else: return self.wkday_not_today + self.anch + self.end
[ "def", "formatday", "(", "self", ",", "day", ",", "weekday", ")", ":", "super", "(", "MiniEventCalendar", ",", "self", ")", ".", "formatday", "(", "day", ",", "weekday", ")", "now", "=", "get_now", "(", ")", "self", ".", "day", "=", "day", "if", "day", "==", "0", ":", "return", "'<td class=\"noday\">&nbsp;</td>'", "# day outside month", "elif", "now", ".", "month", "==", "self", ".", "mo", "and", "now", ".", "year", "==", "self", ".", "yr", "and", "day", "==", "now", ".", "day", ":", "if", "day", "in", "self", ".", "count", ":", "self", ".", "popover_helper", "(", ")", "return", "self", ".", "wkday_today", "+", "self", ".", "anch", "+", "self", ".", "cal_event", "+", "self", ".", "end", "else", ":", "return", "self", ".", "wkday_today", "+", "self", ".", "anch", "+", "self", ".", "end", "elif", "day", "in", "self", ".", "count", ":", "self", ".", "popover_helper", "(", ")", "return", "self", ".", "wkday_not_today", "+", "self", ".", "anch", "+", "self", ".", "cal_event", "+", "self", ".", "end", "else", ":", "return", "self", ".", "wkday_not_today", "+", "self", ".", "anch", "+", "self", ".", "end" ]
Return a day as a table cell.
[ "Return", "a", "day", "as", "a", "table", "cell", "." ]
python
test
flowersteam/explauto
explauto/models/pydmps/dmp_rhythmic.py
https://github.com/flowersteam/explauto/blob/cf0f81ecb9f6412f7276a95bd27359000e1e26b6/explauto/models/pydmps/dmp_rhythmic.py#L61-L75
def gen_goal(self, y_des): """Generate the goal for path imitation. For rhythmic DMPs the goal is the average of the desired trajectory. y_des np.array: the desired trajectory to follow """ goal = np.zeros(self.dmps) for n in range(self.dmps): num_idx = ~np.isnan(y_des[n]) # ignore nan's when calculating goal goal[n] = .5 * (y_des[n,num_idx].min() + \ y_des[n,num_idx].max()) return goal
[ "def", "gen_goal", "(", "self", ",", "y_des", ")", ":", "goal", "=", "np", ".", "zeros", "(", "self", ".", "dmps", ")", "for", "n", "in", "range", "(", "self", ".", "dmps", ")", ":", "num_idx", "=", "~", "np", ".", "isnan", "(", "y_des", "[", "n", "]", ")", "# ignore nan's when calculating goal", "goal", "[", "n", "]", "=", ".5", "*", "(", "y_des", "[", "n", ",", "num_idx", "]", ".", "min", "(", ")", "+", "y_des", "[", "n", ",", "num_idx", "]", ".", "max", "(", ")", ")", "return", "goal" ]
Generate the goal for path imitation. For rhythmic DMPs the goal is the average of the desired trajectory. y_des np.array: the desired trajectory to follow
[ "Generate", "the", "goal", "for", "path", "imitation", ".", "For", "rhythmic", "DMPs", "the", "goal", "is", "the", "average", "of", "the", "desired", "trajectory", ".", "y_des", "np", ".", "array", ":", "the", "desired", "trajectory", "to", "follow" ]
python
train
ampl/amplpy
amplpy/ampl.py
https://github.com/ampl/amplpy/blob/39df6954049a11a8f666aed26853259b4687099a/amplpy/ampl.py#L710-L729
def setOutputHandler(self, outputhandler): """ Sets a new output handler. Args: outputhandler: The function handling the AMPL output derived from interpreting user commands. """ class OutputHandlerInternal(amplpython.OutputHandler): def output(self, kind, msg): outputhandler.output(kind, msg) self._outputhandler = outputhandler self._outputhandler_internal = OutputHandlerInternal() lock_and_call( lambda: self._impl.setOutputHandler( self._outputhandler_internal ), self._lock )
[ "def", "setOutputHandler", "(", "self", ",", "outputhandler", ")", ":", "class", "OutputHandlerInternal", "(", "amplpython", ".", "OutputHandler", ")", ":", "def", "output", "(", "self", ",", "kind", ",", "msg", ")", ":", "outputhandler", ".", "output", "(", "kind", ",", "msg", ")", "self", ".", "_outputhandler", "=", "outputhandler", "self", ".", "_outputhandler_internal", "=", "OutputHandlerInternal", "(", ")", "lock_and_call", "(", "lambda", ":", "self", ".", "_impl", ".", "setOutputHandler", "(", "self", ".", "_outputhandler_internal", ")", ",", "self", ".", "_lock", ")" ]
Sets a new output handler. Args: outputhandler: The function handling the AMPL output derived from interpreting user commands.
[ "Sets", "a", "new", "output", "handler", "." ]
python
train
CivicSpleen/ambry
ambry/orm/source.py
https://github.com/CivicSpleen/ambry/blob/d7f2be4bf1f7ffd086f3fadd4fcae60c32473e42/ambry/orm/source.py#L271-L310
def update_table(self, unknown_type='str'): """Update the source table from the datafile""" from ambry_sources.intuit import TypeIntuiter st = self.source_table if self.reftype == 'partition': for c in self.partition.table.columns: st.add_column(c.sequence_id, source_header=c.name, dest_header=c.name, datatype=c.datatype, description = c.description) elif self.datafile.exists: with self.datafile.reader as r: names = set() for col in r.columns: name = col['name'] if name in names: # Handle duplicate names. name = name+"_"+str(col['pos']) names.add(name) c = st.column(name) dt = col['resolved_type'] if col['resolved_type'] != 'unknown' else unknown_type if c: c.datatype = TypeIntuiter.promote_type(c.datatype, col['resolved_type']) else: c = st.add_column(col['pos'], source_header=name, dest_header=name, datatype=col['resolved_type'], description=col['description'], has_codes=col['has_codes'])
[ "def", "update_table", "(", "self", ",", "unknown_type", "=", "'str'", ")", ":", "from", "ambry_sources", ".", "intuit", "import", "TypeIntuiter", "st", "=", "self", ".", "source_table", "if", "self", ".", "reftype", "==", "'partition'", ":", "for", "c", "in", "self", ".", "partition", ".", "table", ".", "columns", ":", "st", ".", "add_column", "(", "c", ".", "sequence_id", ",", "source_header", "=", "c", ".", "name", ",", "dest_header", "=", "c", ".", "name", ",", "datatype", "=", "c", ".", "datatype", ",", "description", "=", "c", ".", "description", ")", "elif", "self", ".", "datafile", ".", "exists", ":", "with", "self", ".", "datafile", ".", "reader", "as", "r", ":", "names", "=", "set", "(", ")", "for", "col", "in", "r", ".", "columns", ":", "name", "=", "col", "[", "'name'", "]", "if", "name", "in", "names", ":", "# Handle duplicate names.", "name", "=", "name", "+", "\"_\"", "+", "str", "(", "col", "[", "'pos'", "]", ")", "names", ".", "add", "(", "name", ")", "c", "=", "st", ".", "column", "(", "name", ")", "dt", "=", "col", "[", "'resolved_type'", "]", "if", "col", "[", "'resolved_type'", "]", "!=", "'unknown'", "else", "unknown_type", "if", "c", ":", "c", ".", "datatype", "=", "TypeIntuiter", ".", "promote_type", "(", "c", ".", "datatype", ",", "col", "[", "'resolved_type'", "]", ")", "else", ":", "c", "=", "st", ".", "add_column", "(", "col", "[", "'pos'", "]", ",", "source_header", "=", "name", ",", "dest_header", "=", "name", ",", "datatype", "=", "col", "[", "'resolved_type'", "]", ",", "description", "=", "col", "[", "'description'", "]", ",", "has_codes", "=", "col", "[", "'has_codes'", "]", ")" ]
Update the source table from the datafile
[ "Update", "the", "source", "table", "from", "the", "datafile" ]
python
train
coldfix/udiskie
udiskie/tray.py
https://github.com/coldfix/udiskie/blob/804c9d27df6f7361fec3097c432398f2d702f911/udiskie/tray.py#L337-L342
def _create_statusicon(self): """Return a new Gtk.StatusIcon.""" statusicon = Gtk.StatusIcon() statusicon.set_from_gicon(self._icons.get_gicon('media')) statusicon.set_tooltip_text(_("udiskie")) return statusicon
[ "def", "_create_statusicon", "(", "self", ")", ":", "statusicon", "=", "Gtk", ".", "StatusIcon", "(", ")", "statusicon", ".", "set_from_gicon", "(", "self", ".", "_icons", ".", "get_gicon", "(", "'media'", ")", ")", "statusicon", ".", "set_tooltip_text", "(", "_", "(", "\"udiskie\"", ")", ")", "return", "statusicon" ]
Return a new Gtk.StatusIcon.
[ "Return", "a", "new", "Gtk", ".", "StatusIcon", "." ]
python
train
saltstack/salt
salt/modules/netbsd_sysctl.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/netbsd_sysctl.py#L30-L66
def show(config_file=False): ''' Return a list of sysctl parameters for this minion CLI Example: .. code-block:: bash salt '*' sysctl.show ''' roots = ( 'kern', 'vm', 'vfs', 'net', 'hw', 'machdep', 'user', 'ddb', 'proc', 'emul', 'security', 'init' ) cmd = 'sysctl -ae' ret = {} out = __salt__['cmd.run'](cmd, output_loglevel='trace') comps = [''] for line in out.splitlines(): if any([line.startswith('{0}.'.format(root)) for root in roots]): comps = re.split('[=:]', line, 1) ret[comps[0]] = comps[1] elif comps[0]: ret[comps[0]] += '{0}\n'.format(line) else: continue return ret
[ "def", "show", "(", "config_file", "=", "False", ")", ":", "roots", "=", "(", "'kern'", ",", "'vm'", ",", "'vfs'", ",", "'net'", ",", "'hw'", ",", "'machdep'", ",", "'user'", ",", "'ddb'", ",", "'proc'", ",", "'emul'", ",", "'security'", ",", "'init'", ")", "cmd", "=", "'sysctl -ae'", "ret", "=", "{", "}", "out", "=", "__salt__", "[", "'cmd.run'", "]", "(", "cmd", ",", "output_loglevel", "=", "'trace'", ")", "comps", "=", "[", "''", "]", "for", "line", "in", "out", ".", "splitlines", "(", ")", ":", "if", "any", "(", "[", "line", ".", "startswith", "(", "'{0}.'", ".", "format", "(", "root", ")", ")", "for", "root", "in", "roots", "]", ")", ":", "comps", "=", "re", ".", "split", "(", "'[=:]'", ",", "line", ",", "1", ")", "ret", "[", "comps", "[", "0", "]", "]", "=", "comps", "[", "1", "]", "elif", "comps", "[", "0", "]", ":", "ret", "[", "comps", "[", "0", "]", "]", "+=", "'{0}\\n'", ".", "format", "(", "line", ")", "else", ":", "continue", "return", "ret" ]
Return a list of sysctl parameters for this minion CLI Example: .. code-block:: bash salt '*' sysctl.show
[ "Return", "a", "list", "of", "sysctl", "parameters", "for", "this", "minion" ]
python
train
moonso/vcf_parser
vcf_parser/header_parser.py
https://github.com/moonso/vcf_parser/blob/8e2b6724e31995e0d43af501f25974310c6b843b/vcf_parser/header_parser.py#L238-L254
def add_info(self, info_id, number, entry_type, description): """ Add an info line to the header. Arguments: info_id (str): The id of the info line number (str): Integer or any of [A,R,G,.] entry_type (str): Any of [Integer,Float,Flag,Character,String] description (str): A description of the info line """ info_line = '##INFO=<ID={0},Number={1},Type={2},Description="{3}">'.format( info_id, number, entry_type, description ) self.logger.info("Adding info line to vcf: {0}".format(info_line)) self.parse_meta_data(info_line) return
[ "def", "add_info", "(", "self", ",", "info_id", ",", "number", ",", "entry_type", ",", "description", ")", ":", "info_line", "=", "'##INFO=<ID={0},Number={1},Type={2},Description=\"{3}\">'", ".", "format", "(", "info_id", ",", "number", ",", "entry_type", ",", "description", ")", "self", ".", "logger", ".", "info", "(", "\"Adding info line to vcf: {0}\"", ".", "format", "(", "info_line", ")", ")", "self", ".", "parse_meta_data", "(", "info_line", ")", "return" ]
Add an info line to the header. Arguments: info_id (str): The id of the info line number (str): Integer or any of [A,R,G,.] entry_type (str): Any of [Integer,Float,Flag,Character,String] description (str): A description of the info line
[ "Add", "an", "info", "line", "to", "the", "header", ".", "Arguments", ":", "info_id", "(", "str", ")", ":", "The", "id", "of", "the", "info", "line", "number", "(", "str", ")", ":", "Integer", "or", "any", "of", "[", "A", "R", "G", ".", "]", "entry_type", "(", "str", ")", ":", "Any", "of", "[", "Integer", "Float", "Flag", "Character", "String", "]", "description", "(", "str", ")", ":", "A", "description", "of", "the", "info", "line" ]
python
train
IdentityPython/SATOSA
src/satosa/frontends/saml2.py
https://github.com/IdentityPython/SATOSA/blob/49da5d4c0ac1a5ebf1a71b4f7aaf04f0e52d8fdb/src/satosa/frontends/saml2.py#L439-L464
def _register_endpoints(self, providers): """ Register methods to endpoints :type providers: list[str] :rtype: list[(str, ((satosa.context.Context, Any) -> satosa.response.Response, Any))] :param providers: A list of backend providers :return: A list of endpoint/method pairs """ url_map = [] for endp_category in self.endpoints: for binding, endp in self.endpoints[endp_category].items(): valid_providers = "" for provider in providers: valid_providers = "{}|^{}".format(valid_providers, provider) valid_providers = valid_providers.lstrip("|") parsed_endp = urlparse(endp) url_map.append(("(%s)/%s$" % (valid_providers, parsed_endp.path), functools.partial(self.handle_authn_request, binding_in=binding))) if self.expose_entityid_endpoint(): parsed_entity_id = urlparse(self.idp.config.entityid) url_map.append(("^{0}".format(parsed_entity_id.path[1:]), self._metadata_endpoint)) return url_map
[ "def", "_register_endpoints", "(", "self", ",", "providers", ")", ":", "url_map", "=", "[", "]", "for", "endp_category", "in", "self", ".", "endpoints", ":", "for", "binding", ",", "endp", "in", "self", ".", "endpoints", "[", "endp_category", "]", ".", "items", "(", ")", ":", "valid_providers", "=", "\"\"", "for", "provider", "in", "providers", ":", "valid_providers", "=", "\"{}|^{}\"", ".", "format", "(", "valid_providers", ",", "provider", ")", "valid_providers", "=", "valid_providers", ".", "lstrip", "(", "\"|\"", ")", "parsed_endp", "=", "urlparse", "(", "endp", ")", "url_map", ".", "append", "(", "(", "\"(%s)/%s$\"", "%", "(", "valid_providers", ",", "parsed_endp", ".", "path", ")", ",", "functools", ".", "partial", "(", "self", ".", "handle_authn_request", ",", "binding_in", "=", "binding", ")", ")", ")", "if", "self", ".", "expose_entityid_endpoint", "(", ")", ":", "parsed_entity_id", "=", "urlparse", "(", "self", ".", "idp", ".", "config", ".", "entityid", ")", "url_map", ".", "append", "(", "(", "\"^{0}\"", ".", "format", "(", "parsed_entity_id", ".", "path", "[", "1", ":", "]", ")", ",", "self", ".", "_metadata_endpoint", ")", ")", "return", "url_map" ]
Register methods to endpoints :type providers: list[str] :rtype: list[(str, ((satosa.context.Context, Any) -> satosa.response.Response, Any))] :param providers: A list of backend providers :return: A list of endpoint/method pairs
[ "Register", "methods", "to", "endpoints", ":", "type", "providers", ":", "list", "[", "str", "]", ":", "rtype", ":", "list", "[", "(", "str", "((", "satosa", ".", "context", ".", "Context", "Any", ")", "-", ">", "satosa", ".", "response", ".", "Response", "Any", "))", "]", ":", "param", "providers", ":", "A", "list", "of", "backend", "providers", ":", "return", ":", "A", "list", "of", "endpoint", "/", "method", "pairs" ]
python
train
talkincode/txradius
txradius/radius/packet.py
https://github.com/talkincode/txradius/blob/b86fdbc9be41183680b82b07d3a8e8ea10926e01/txradius/radius/packet.py#L564-L571
def CreateReply(self, **attributes): """Create a new packet as a reply to this one. This method makes sure the authenticator and secret are copied over to the new instance. """ return CoAPacket(CoAACK, self.id, self.secret, self.authenticator, dict=self.dict, **attributes)
[ "def", "CreateReply", "(", "self", ",", "*", "*", "attributes", ")", ":", "return", "CoAPacket", "(", "CoAACK", ",", "self", ".", "id", ",", "self", ".", "secret", ",", "self", ".", "authenticator", ",", "dict", "=", "self", ".", "dict", ",", "*", "*", "attributes", ")" ]
Create a new packet as a reply to this one. This method makes sure the authenticator and secret are copied over to the new instance.
[ "Create", "a", "new", "packet", "as", "a", "reply", "to", "this", "one", ".", "This", "method", "makes", "sure", "the", "authenticator", "and", "secret", "are", "copied", "over", "to", "the", "new", "instance", "." ]
python
train
mitsei/dlkit
dlkit/records/assessment/qti/ordered_choice_records.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/records/assessment/qti/ordered_choice_records.py#L40-L45
def is_response_correct(self, response): """returns True if response evaluates to an Item Answer that is 100 percent correct""" for answer in self.my_osid_object.get_answers(): if self._is_match(response, answer): return True return False
[ "def", "is_response_correct", "(", "self", ",", "response", ")", ":", "for", "answer", "in", "self", ".", "my_osid_object", ".", "get_answers", "(", ")", ":", "if", "self", ".", "_is_match", "(", "response", ",", "answer", ")", ":", "return", "True", "return", "False" ]
returns True if response evaluates to an Item Answer that is 100 percent correct
[ "returns", "True", "if", "response", "evaluates", "to", "an", "Item", "Answer", "that", "is", "100", "percent", "correct" ]
python
train
PagerDuty/pagerduty-api-python-client
pypd/models/notification.py
https://github.com/PagerDuty/pagerduty-api-python-client/blob/f420b34ca9b29689cc2ecc9adca6dc5d56ae7161/pypd/models/notification.py#L14-L46
def find(cls, *args, **kwargs): """ Find notifications. Optional kwargs are: since: datetime instance until: datetime instance If not specified, until will default to now(), and since will default to 30 days prior to until. As per PD spec, date range must not exceed 1 month. """ seconds = 60 * 60 * 24 * 30 # seconds in 30 days until = kwargs.pop('until', None) since = kwargs.pop('since', None) if until is None: until = datetime.datetime.now() if since is None: since = until - datetime.timedelta(seconds=seconds) dt = until - since if dt > datetime.timedelta(seconds=seconds): raise InvalidArguments(until, since) kwargs['since'] = since.isoformat() kwargs['until'] = until.isoformat() return getattr(Entity, 'find').__func__(cls, *args, **kwargs)
[ "def", "find", "(", "cls", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "seconds", "=", "60", "*", "60", "*", "24", "*", "30", "# seconds in 30 days", "until", "=", "kwargs", ".", "pop", "(", "'until'", ",", "None", ")", "since", "=", "kwargs", ".", "pop", "(", "'since'", ",", "None", ")", "if", "until", "is", "None", ":", "until", "=", "datetime", ".", "datetime", ".", "now", "(", ")", "if", "since", "is", "None", ":", "since", "=", "until", "-", "datetime", ".", "timedelta", "(", "seconds", "=", "seconds", ")", "dt", "=", "until", "-", "since", "if", "dt", ">", "datetime", ".", "timedelta", "(", "seconds", "=", "seconds", ")", ":", "raise", "InvalidArguments", "(", "until", ",", "since", ")", "kwargs", "[", "'since'", "]", "=", "since", ".", "isoformat", "(", ")", "kwargs", "[", "'until'", "]", "=", "until", ".", "isoformat", "(", ")", "return", "getattr", "(", "Entity", ",", "'find'", ")", ".", "__func__", "(", "cls", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
Find notifications. Optional kwargs are: since: datetime instance until: datetime instance If not specified, until will default to now(), and since will default to 30 days prior to until. As per PD spec, date range must not exceed 1 month.
[ "Find", "notifications", "." ]
python
train
ucbvislab/radiotool
radiotool/composer/composition.py
https://github.com/ucbvislab/radiotool/blob/01c9d878a811cf400b1482896d641d9c95e83ded/radiotool/composer/composition.py#L98-L105
def add_segments(self, segments): """Add a list of segments to the composition :param segments: Segments to add to composition :type segments: list of :py:class:`radiotool.composer.Segment` """ self.tracks.update([seg.track for seg in segments]) self.segments.extend(segments)
[ "def", "add_segments", "(", "self", ",", "segments", ")", ":", "self", ".", "tracks", ".", "update", "(", "[", "seg", ".", "track", "for", "seg", "in", "segments", "]", ")", "self", ".", "segments", ".", "extend", "(", "segments", ")" ]
Add a list of segments to the composition :param segments: Segments to add to composition :type segments: list of :py:class:`radiotool.composer.Segment`
[ "Add", "a", "list", "of", "segments", "to", "the", "composition" ]
python
train
Esri/ArcREST
src/arcrest/hostedservice/service.py
https://github.com/Esri/ArcREST/blob/ab240fde2b0200f61d4a5f6df033516e53f2f416/src/arcrest/hostedservice/service.py#L535-L552
def refresh(self, serviceDefinition=True): """ The refresh operation refreshes a service, which clears the web server cache for the service. """ url = self._url + "/MapServer/refresh" params = { "f" : "json", "serviceDefinition" : serviceDefinition } res = self._post(url=self._url, param_dict=params, securityHandler=self._securityHandler, proxy_url=self._proxy_url, proxy_port=self._proxy_port) self.__init() return res
[ "def", "refresh", "(", "self", ",", "serviceDefinition", "=", "True", ")", ":", "url", "=", "self", ".", "_url", "+", "\"/MapServer/refresh\"", "params", "=", "{", "\"f\"", ":", "\"json\"", ",", "\"serviceDefinition\"", ":", "serviceDefinition", "}", "res", "=", "self", ".", "_post", "(", "url", "=", "self", ".", "_url", ",", "param_dict", "=", "params", ",", "securityHandler", "=", "self", ".", "_securityHandler", ",", "proxy_url", "=", "self", ".", "_proxy_url", ",", "proxy_port", "=", "self", ".", "_proxy_port", ")", "self", ".", "__init", "(", ")", "return", "res" ]
The refresh operation refreshes a service, which clears the web server cache for the service.
[ "The", "refresh", "operation", "refreshes", "a", "service", "which", "clears", "the", "web", "server", "cache", "for", "the", "service", "." ]
python
train
census-instrumentation/opencensus-python
contrib/opencensus-ext-google-cloud-clientlibs/opencensus/ext/google_cloud_clientlibs/trace.py
https://github.com/census-instrumentation/opencensus-python/blob/992b223f7e34c5dcb65922b7d5c827e7a1351e7d/contrib/opencensus-ext-google-cloud-clientlibs/opencensus/ext/google_cloud_clientlibs/trace.py#L50-L76
def trace_grpc(tracer=None): """Integrate with gRPC.""" # Wrap google.cloud._helpers.make_secure_channel make_secure_channel_func = getattr(_helpers, MAKE_SECURE_CHANNEL) make_secure_channel_wrapped = wrap_make_secure_channel( make_secure_channel_func, tracer) setattr( _helpers, MAKE_SECURE_CHANNEL, make_secure_channel_wrapped) # Wrap the grpc.insecure_channel. insecure_channel_func = getattr(grpc, INSECURE_CHANNEL) insecure_channel_wrapped = wrap_insecure_channel( insecure_channel_func, tracer) setattr( grpc, INSECURE_CHANNEL, insecure_channel_wrapped) # Wrap google.api_core.grpc_helpers.create_channel create_channel_func = getattr(grpc_helpers, CREATE_CHANNEL) create_channel_wrapped = wrap_create_channel(create_channel_func, tracer) setattr( grpc_helpers, CREATE_CHANNEL, create_channel_wrapped)
[ "def", "trace_grpc", "(", "tracer", "=", "None", ")", ":", "# Wrap google.cloud._helpers.make_secure_channel", "make_secure_channel_func", "=", "getattr", "(", "_helpers", ",", "MAKE_SECURE_CHANNEL", ")", "make_secure_channel_wrapped", "=", "wrap_make_secure_channel", "(", "make_secure_channel_func", ",", "tracer", ")", "setattr", "(", "_helpers", ",", "MAKE_SECURE_CHANNEL", ",", "make_secure_channel_wrapped", ")", "# Wrap the grpc.insecure_channel.", "insecure_channel_func", "=", "getattr", "(", "grpc", ",", "INSECURE_CHANNEL", ")", "insecure_channel_wrapped", "=", "wrap_insecure_channel", "(", "insecure_channel_func", ",", "tracer", ")", "setattr", "(", "grpc", ",", "INSECURE_CHANNEL", ",", "insecure_channel_wrapped", ")", "# Wrap google.api_core.grpc_helpers.create_channel", "create_channel_func", "=", "getattr", "(", "grpc_helpers", ",", "CREATE_CHANNEL", ")", "create_channel_wrapped", "=", "wrap_create_channel", "(", "create_channel_func", ",", "tracer", ")", "setattr", "(", "grpc_helpers", ",", "CREATE_CHANNEL", ",", "create_channel_wrapped", ")" ]
Integrate with gRPC.
[ "Integrate", "with", "gRPC", "." ]
python
train
StorjOld/plowshare-wrapper
plowshare/plowshare.py
https://github.com/StorjOld/plowshare-wrapper/blob/edb38d01fd1decabf92cc4f536d7404dca6a977c/plowshare/plowshare.py#L258-L273
def parse_output(self, hostname, output): """Parse plowup's output. For now, we just return the last line. :param hostname: Name of host you are working with. :type hostname: str :param output: Dictionary containing information about a plowshare action. :type output: dict :returns: Parsed and decoded output list. :rtype: list """ if isinstance(output, bytes): output = output.decode('utf-8') return output.split()[-1]
[ "def", "parse_output", "(", "self", ",", "hostname", ",", "output", ")", ":", "if", "isinstance", "(", "output", ",", "bytes", ")", ":", "output", "=", "output", ".", "decode", "(", "'utf-8'", ")", "return", "output", ".", "split", "(", ")", "[", "-", "1", "]" ]
Parse plowup's output. For now, we just return the last line. :param hostname: Name of host you are working with. :type hostname: str :param output: Dictionary containing information about a plowshare action. :type output: dict :returns: Parsed and decoded output list. :rtype: list
[ "Parse", "plowup", "s", "output", "." ]
python
train
mila-iqia/fuel
fuel/converters/ilsvrc2010.py
https://github.com/mila-iqia/fuel/blob/1d6292dc25e3a115544237e392e61bff6631d23c/fuel/converters/ilsvrc2010.py#L390-L426
def load_from_tar_or_patch(tar, image_filename, patch_images): """Do everything necessary to process an image inside a TAR. Parameters ---------- tar : `TarFile` instance The tar from which to read `image_filename`. image_filename : str Fully-qualified path inside of `tar` from which to read an image file. patch_images : dict A dictionary containing filenames (without path) of replacements to be substituted in place of the version of the same file found in `tar`. Returns ------- image_data : bytes The JPEG bytes representing either the image from the TAR archive or its replacement from the patch dictionary. patched : bool True if the image was retrieved from the patch dictionary. False if it was retrieved from the TAR file. """ patched = True image_bytes = patch_images.get(os.path.basename(image_filename), None) if image_bytes is None: patched = False try: image_bytes = tar.extractfile(image_filename).read() numpy.array(Image.open(io.BytesIO(image_bytes))) except (IOError, OSError): with gzip.GzipFile(fileobj=tar.extractfile(image_filename)) as gz: image_bytes = gz.read() numpy.array(Image.open(io.BytesIO(image_bytes))) return image_bytes, patched
[ "def", "load_from_tar_or_patch", "(", "tar", ",", "image_filename", ",", "patch_images", ")", ":", "patched", "=", "True", "image_bytes", "=", "patch_images", ".", "get", "(", "os", ".", "path", ".", "basename", "(", "image_filename", ")", ",", "None", ")", "if", "image_bytes", "is", "None", ":", "patched", "=", "False", "try", ":", "image_bytes", "=", "tar", ".", "extractfile", "(", "image_filename", ")", ".", "read", "(", ")", "numpy", ".", "array", "(", "Image", ".", "open", "(", "io", ".", "BytesIO", "(", "image_bytes", ")", ")", ")", "except", "(", "IOError", ",", "OSError", ")", ":", "with", "gzip", ".", "GzipFile", "(", "fileobj", "=", "tar", ".", "extractfile", "(", "image_filename", ")", ")", "as", "gz", ":", "image_bytes", "=", "gz", ".", "read", "(", ")", "numpy", ".", "array", "(", "Image", ".", "open", "(", "io", ".", "BytesIO", "(", "image_bytes", ")", ")", ")", "return", "image_bytes", ",", "patched" ]
Do everything necessary to process an image inside a TAR. Parameters ---------- tar : `TarFile` instance The tar from which to read `image_filename`. image_filename : str Fully-qualified path inside of `tar` from which to read an image file. patch_images : dict A dictionary containing filenames (without path) of replacements to be substituted in place of the version of the same file found in `tar`. Returns ------- image_data : bytes The JPEG bytes representing either the image from the TAR archive or its replacement from the patch dictionary. patched : bool True if the image was retrieved from the patch dictionary. False if it was retrieved from the TAR file.
[ "Do", "everything", "necessary", "to", "process", "an", "image", "inside", "a", "TAR", "." ]
python
train
apache/incubator-heron
third_party/python/cpplint/cpplint.py
https://github.com/apache/incubator-heron/blob/ad10325a0febe89ad337e561ebcbe37ec5d9a5ac/third_party/python/cpplint/cpplint.py#L5479-L5498
def ExpectingFunctionArgs(clean_lines, linenum): """Checks whether where function type arguments are expected. Args: clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. Returns: True if the line at 'linenum' is inside something that expects arguments of function types. """ line = clean_lines.elided[linenum] return (Match(r'^\s*MOCK_(CONST_)?METHOD\d+(_T)?\(', line) or (linenum >= 2 and (Match(r'^\s*MOCK_(?:CONST_)?METHOD\d+(?:_T)?\((?:\S+,)?\s*$', clean_lines.elided[linenum - 1]) or Match(r'^\s*MOCK_(?:CONST_)?METHOD\d+(?:_T)?\(\s*$', clean_lines.elided[linenum - 2]) or Search(r'\bstd::m?function\s*\<\s*$', clean_lines.elided[linenum - 1]))))
[ "def", "ExpectingFunctionArgs", "(", "clean_lines", ",", "linenum", ")", ":", "line", "=", "clean_lines", ".", "elided", "[", "linenum", "]", "return", "(", "Match", "(", "r'^\\s*MOCK_(CONST_)?METHOD\\d+(_T)?\\('", ",", "line", ")", "or", "(", "linenum", ">=", "2", "and", "(", "Match", "(", "r'^\\s*MOCK_(?:CONST_)?METHOD\\d+(?:_T)?\\((?:\\S+,)?\\s*$'", ",", "clean_lines", ".", "elided", "[", "linenum", "-", "1", "]", ")", "or", "Match", "(", "r'^\\s*MOCK_(?:CONST_)?METHOD\\d+(?:_T)?\\(\\s*$'", ",", "clean_lines", ".", "elided", "[", "linenum", "-", "2", "]", ")", "or", "Search", "(", "r'\\bstd::m?function\\s*\\<\\s*$'", ",", "clean_lines", ".", "elided", "[", "linenum", "-", "1", "]", ")", ")", ")", ")" ]
Checks whether where function type arguments are expected. Args: clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. Returns: True if the line at 'linenum' is inside something that expects arguments of function types.
[ "Checks", "whether", "where", "function", "type", "arguments", "are", "expected", "." ]
python
valid
townsenddw/jhubctl
jhubctl/hubs/hubs.py
https://github.com/townsenddw/jhubctl/blob/c8c20f86a16e9d01dd90e4607d81423417cc773b/jhubctl/hubs/hubs.py#L22-L41
def get_hubs(self): """Get a list of hubs names. Returns ------- hubs : list List of hub names """ # Use helm to get a list of hubs. output = helm( 'list', '-q' ) # Check if an error occurred. if output.returncode != 0: print("Something went wrong!") print(output.stderr) else: hubs = output.stdout.split() return hubs
[ "def", "get_hubs", "(", "self", ")", ":", "# Use helm to get a list of hubs.", "output", "=", "helm", "(", "'list'", ",", "'-q'", ")", "# Check if an error occurred.", "if", "output", ".", "returncode", "!=", "0", ":", "print", "(", "\"Something went wrong!\"", ")", "print", "(", "output", ".", "stderr", ")", "else", ":", "hubs", "=", "output", ".", "stdout", ".", "split", "(", ")", "return", "hubs" ]
Get a list of hubs names. Returns ------- hubs : list List of hub names
[ "Get", "a", "list", "of", "hubs", "names", ".", "Returns", "-------", "hubs", ":", "list", "List", "of", "hub", "names" ]
python
train
BD2KGenomics/protect
src/protect/addons/assess_immunotherapy_resistance.py
https://github.com/BD2KGenomics/protect/blob/06310682c50dcf8917b912c8e551299ff7ee41ce/src/protect/addons/assess_immunotherapy_resistance.py#L40-L130
def assess_itx_resistance(job, gene_expression, univ_options, reports_options): """ Assess the prevalence of the various genes in various cancer pathways and return a report in the txt format. :param toil.fileStore.FileID gene_expression: fsID for the rsem gene expression file :param dict univ_options: Dict of universal options used by almost all tools :param dict reports_options: Options specific to reporting modules :return: The fsID for the itx resistance report file :rtype: toil.fileStore.FileID """ work_dir = os.getcwd() tumor_type = univ_options['tumor_type'] # Get the input files input_files = { 'rsem_quant.tsv': gene_expression, 'itx_resistance.tsv.tar.gz': reports_options['itx_resistance_file'], 'immune_resistance_pathways.json.tar.gz': reports_options['immune_resistance_pathways_file']} input_files = get_files_from_filestore(job, input_files, work_dir, docker=False) input_files['itx_resistance.tsv'] = untargz(input_files['itx_resistance.tsv.tar.gz'], work_dir) input_files['immune_resistance_pathways.json'] = untargz(input_files['immune_resistance_pathways.json.tar.gz'], work_dir) full_data = pd.read_table(input_files['itx_resistance.tsv'], index_col=0) # Read pathways descriptions and cancer pathway data with open(input_files['immune_resistance_pathways.json']) as json_file: json_data = json.load(json_file) # Read patient file patient_df = pd.read_csv('rsem_quant.tsv', sep=' ', delimiter='\t', header='infer', index_col=0) patient_df.index = (patient_df.index).str.replace('\\..*$', '') with open('immunotherapy_resistance_report.txt', 'w') as report_file: # Check if data exsits for specified tumor type try: pathways = json_data['Cancer_to_pathway'][tumor_type] except KeyError: print('Data not available for ' + tumor_type, file=report_file) else: # If data exists, write a report for pathway in pathways: up_is_good = json_data['Pathways'][pathway]['up_is_good'] if up_is_good: comp_fn = lambda x, y: x >= y else: comp_fn = lambda x, y: x < y # Describe pathway and genes for it print('Pathway: ' + pathway + '\n', file=report_file) print ('Papers: ' + json_data['Pathways'][pathway]['paper'], file=report_file) description = json_data['Pathways'][pathway]['description'] print('Description of pathway:\n' + textwrap.fill(description, width=100), file=report_file) print('Pathway genes: ', file=report_file) print('\t{:10}{:<20}{:<20}{:<12}'.format('Gene', 'GTEX Median', 'TCGA N Median', 'Observed'), file=report_file) status = [] # Write TCGA, GTEX, and observed values for gene in json_data['Pathways'][pathway]['genes']: gtex = '{0:.2f}'.format( float(full_data.loc[gene, TCGAToGTEx[tumor_type]])) \ if gene in full_data.index else 'NA' tcga = '{0:.2f}'.format( float(full_data.loc[gene, tumor_type + ' normal'])) \ if gene in full_data.index else 'NA' tpm_value = '{0:.2f}'.format(float(patient_df.loc[gene, 'TPM'])) \ if gene in patient_df.index else 'NA' ensg = json_data['Pathways'][pathway]['genes'][gene] print('\t{:10}{:<20}{:<20}{:<12}'.format(ensg, gtex, tcga, tpm_value), file=report_file) if gtex != 'NA' and tpm_value != 'NA': tcga_bool = comp_fn(float(tpm_value), float(tcga)) gtex_bool = comp_fn(float(tpm_value), float(gtex)) status.append(tcga_bool and gtex_bool) else: status.append(False) # Based on the number of genes with expression values above normal, assess the status print ('Status: ' + json_data['Pathways'][pathway]['status'][ str(sum(status) >= 0.75 * len(status))] + '\n', file=report_file) output_file = job.fileStore.writeGlobalFile(report_file.name) export_results(job, output_file, report_file.name, univ_options, subfolder='reports') job.fileStore.logToMaster('Ran create immunotherapy resistance report on %s successfully' % univ_options['patient']) return output_file
[ "def", "assess_itx_resistance", "(", "job", ",", "gene_expression", ",", "univ_options", ",", "reports_options", ")", ":", "work_dir", "=", "os", ".", "getcwd", "(", ")", "tumor_type", "=", "univ_options", "[", "'tumor_type'", "]", "# Get the input files", "input_files", "=", "{", "'rsem_quant.tsv'", ":", "gene_expression", ",", "'itx_resistance.tsv.tar.gz'", ":", "reports_options", "[", "'itx_resistance_file'", "]", ",", "'immune_resistance_pathways.json.tar.gz'", ":", "reports_options", "[", "'immune_resistance_pathways_file'", "]", "}", "input_files", "=", "get_files_from_filestore", "(", "job", ",", "input_files", ",", "work_dir", ",", "docker", "=", "False", ")", "input_files", "[", "'itx_resistance.tsv'", "]", "=", "untargz", "(", "input_files", "[", "'itx_resistance.tsv.tar.gz'", "]", ",", "work_dir", ")", "input_files", "[", "'immune_resistance_pathways.json'", "]", "=", "untargz", "(", "input_files", "[", "'immune_resistance_pathways.json.tar.gz'", "]", ",", "work_dir", ")", "full_data", "=", "pd", ".", "read_table", "(", "input_files", "[", "'itx_resistance.tsv'", "]", ",", "index_col", "=", "0", ")", "# Read pathways descriptions and cancer pathway data", "with", "open", "(", "input_files", "[", "'immune_resistance_pathways.json'", "]", ")", "as", "json_file", ":", "json_data", "=", "json", ".", "load", "(", "json_file", ")", "# Read patient file", "patient_df", "=", "pd", ".", "read_csv", "(", "'rsem_quant.tsv'", ",", "sep", "=", "' '", ",", "delimiter", "=", "'\\t'", ",", "header", "=", "'infer'", ",", "index_col", "=", "0", ")", "patient_df", ".", "index", "=", "(", "patient_df", ".", "index", ")", ".", "str", ".", "replace", "(", "'\\\\..*$'", ",", "''", ")", "with", "open", "(", "'immunotherapy_resistance_report.txt'", ",", "'w'", ")", "as", "report_file", ":", "# Check if data exsits for specified tumor type", "try", ":", "pathways", "=", "json_data", "[", "'Cancer_to_pathway'", "]", "[", "tumor_type", "]", "except", "KeyError", ":", "print", "(", "'Data not available for '", "+", "tumor_type", ",", "file", "=", "report_file", ")", "else", ":", "# If data exists, write a report", "for", "pathway", "in", "pathways", ":", "up_is_good", "=", "json_data", "[", "'Pathways'", "]", "[", "pathway", "]", "[", "'up_is_good'", "]", "if", "up_is_good", ":", "comp_fn", "=", "lambda", "x", ",", "y", ":", "x", ">=", "y", "else", ":", "comp_fn", "=", "lambda", "x", ",", "y", ":", "x", "<", "y", "# Describe pathway and genes for it", "print", "(", "'Pathway: '", "+", "pathway", "+", "'\\n'", ",", "file", "=", "report_file", ")", "print", "(", "'Papers: '", "+", "json_data", "[", "'Pathways'", "]", "[", "pathway", "]", "[", "'paper'", "]", ",", "file", "=", "report_file", ")", "description", "=", "json_data", "[", "'Pathways'", "]", "[", "pathway", "]", "[", "'description'", "]", "print", "(", "'Description of pathway:\\n'", "+", "textwrap", ".", "fill", "(", "description", ",", "width", "=", "100", ")", ",", "file", "=", "report_file", ")", "print", "(", "'Pathway genes: '", ",", "file", "=", "report_file", ")", "print", "(", "'\\t{:10}{:<20}{:<20}{:<12}'", ".", "format", "(", "'Gene'", ",", "'GTEX Median'", ",", "'TCGA N Median'", ",", "'Observed'", ")", ",", "file", "=", "report_file", ")", "status", "=", "[", "]", "# Write TCGA, GTEX, and observed values", "for", "gene", "in", "json_data", "[", "'Pathways'", "]", "[", "pathway", "]", "[", "'genes'", "]", ":", "gtex", "=", "'{0:.2f}'", ".", "format", "(", "float", "(", "full_data", ".", "loc", "[", "gene", ",", "TCGAToGTEx", "[", "tumor_type", "]", "]", ")", ")", "if", "gene", "in", "full_data", ".", "index", "else", "'NA'", "tcga", "=", "'{0:.2f}'", ".", "format", "(", "float", "(", "full_data", ".", "loc", "[", "gene", ",", "tumor_type", "+", "' normal'", "]", ")", ")", "if", "gene", "in", "full_data", ".", "index", "else", "'NA'", "tpm_value", "=", "'{0:.2f}'", ".", "format", "(", "float", "(", "patient_df", ".", "loc", "[", "gene", ",", "'TPM'", "]", ")", ")", "if", "gene", "in", "patient_df", ".", "index", "else", "'NA'", "ensg", "=", "json_data", "[", "'Pathways'", "]", "[", "pathway", "]", "[", "'genes'", "]", "[", "gene", "]", "print", "(", "'\\t{:10}{:<20}{:<20}{:<12}'", ".", "format", "(", "ensg", ",", "gtex", ",", "tcga", ",", "tpm_value", ")", ",", "file", "=", "report_file", ")", "if", "gtex", "!=", "'NA'", "and", "tpm_value", "!=", "'NA'", ":", "tcga_bool", "=", "comp_fn", "(", "float", "(", "tpm_value", ")", ",", "float", "(", "tcga", ")", ")", "gtex_bool", "=", "comp_fn", "(", "float", "(", "tpm_value", ")", ",", "float", "(", "gtex", ")", ")", "status", ".", "append", "(", "tcga_bool", "and", "gtex_bool", ")", "else", ":", "status", ".", "append", "(", "False", ")", "# Based on the number of genes with expression values above normal, assess the status", "print", "(", "'Status: '", "+", "json_data", "[", "'Pathways'", "]", "[", "pathway", "]", "[", "'status'", "]", "[", "str", "(", "sum", "(", "status", ")", ">=", "0.75", "*", "len", "(", "status", ")", ")", "]", "+", "'\\n'", ",", "file", "=", "report_file", ")", "output_file", "=", "job", ".", "fileStore", ".", "writeGlobalFile", "(", "report_file", ".", "name", ")", "export_results", "(", "job", ",", "output_file", ",", "report_file", ".", "name", ",", "univ_options", ",", "subfolder", "=", "'reports'", ")", "job", ".", "fileStore", ".", "logToMaster", "(", "'Ran create immunotherapy resistance report on %s successfully'", "%", "univ_options", "[", "'patient'", "]", ")", "return", "output_file" ]
Assess the prevalence of the various genes in various cancer pathways and return a report in the txt format. :param toil.fileStore.FileID gene_expression: fsID for the rsem gene expression file :param dict univ_options: Dict of universal options used by almost all tools :param dict reports_options: Options specific to reporting modules :return: The fsID for the itx resistance report file :rtype: toil.fileStore.FileID
[ "Assess", "the", "prevalence", "of", "the", "various", "genes", "in", "various", "cancer", "pathways", "and", "return", "a", "report", "in", "the", "txt", "format", "." ]
python
train
woolfson-group/isambard
isambard/optimisation/evo_optimizers.py
https://github.com/woolfson-group/isambard/blob/ebc33b48a28ad217e18f93b910dfba46e6e71e07/isambard/optimisation/evo_optimizers.py#L366-L374
def _initialize_pop(self, pop_size): """Assigns indices to individuals in population.""" self.toolbox.register("individual", self._generate) self.toolbox.register("population", tools.initRepeat, list, self.toolbox.individual) self.population = self.toolbox.population(n=pop_size) self.assign_fitnesses(self.population) self._model_count += len(self.population) return
[ "def", "_initialize_pop", "(", "self", ",", "pop_size", ")", ":", "self", ".", "toolbox", ".", "register", "(", "\"individual\"", ",", "self", ".", "_generate", ")", "self", ".", "toolbox", ".", "register", "(", "\"population\"", ",", "tools", ".", "initRepeat", ",", "list", ",", "self", ".", "toolbox", ".", "individual", ")", "self", ".", "population", "=", "self", ".", "toolbox", ".", "population", "(", "n", "=", "pop_size", ")", "self", ".", "assign_fitnesses", "(", "self", ".", "population", ")", "self", ".", "_model_count", "+=", "len", "(", "self", ".", "population", ")", "return" ]
Assigns indices to individuals in population.
[ "Assigns", "indices", "to", "individuals", "in", "population", "." ]
python
train
kgiusti/pyngus
pyngus/connection.py
https://github.com/kgiusti/pyngus/blob/5392392046989f1bb84ba938c30e4d48311075f1/pyngus/connection.py#L856-L861
def _ep_active(self): """Both ends of the Endpoint have become active.""" LOG.debug("Connection is up") if self._handler: with self._callback_lock: self._handler.connection_active(self)
[ "def", "_ep_active", "(", "self", ")", ":", "LOG", ".", "debug", "(", "\"Connection is up\"", ")", "if", "self", ".", "_handler", ":", "with", "self", ".", "_callback_lock", ":", "self", ".", "_handler", ".", "connection_active", "(", "self", ")" ]
Both ends of the Endpoint have become active.
[ "Both", "ends", "of", "the", "Endpoint", "have", "become", "active", "." ]
python
test
seequent/properties
properties/base/base.py
https://github.com/seequent/properties/blob/096b07012fff86b0a880c8c018320c3b512751b9/properties/base/base.py#L592-L603
def _deserialize_class(cls, input_cls_name, trusted, strict): """Returns the HasProperties class to use for deserialization""" if not input_cls_name or input_cls_name == cls.__name__: return cls if trusted and input_cls_name in cls._REGISTRY: return cls._REGISTRY[input_cls_name] if strict: raise ValueError( 'Class name {} from deserialization input dictionary does ' 'not match input class {}'.format(input_cls_name, cls.__name__) ) return cls
[ "def", "_deserialize_class", "(", "cls", ",", "input_cls_name", ",", "trusted", ",", "strict", ")", ":", "if", "not", "input_cls_name", "or", "input_cls_name", "==", "cls", ".", "__name__", ":", "return", "cls", "if", "trusted", "and", "input_cls_name", "in", "cls", ".", "_REGISTRY", ":", "return", "cls", ".", "_REGISTRY", "[", "input_cls_name", "]", "if", "strict", ":", "raise", "ValueError", "(", "'Class name {} from deserialization input dictionary does '", "'not match input class {}'", ".", "format", "(", "input_cls_name", ",", "cls", ".", "__name__", ")", ")", "return", "cls" ]
Returns the HasProperties class to use for deserialization
[ "Returns", "the", "HasProperties", "class", "to", "use", "for", "deserialization" ]
python
train
JdeRobot/base
src/drivers/MAVLinkServer/MAVProxy/tools/MAVExplorer.py
https://github.com/JdeRobot/base/blob/303b18992785b2fe802212f2d758a60873007f1f/src/drivers/MAVLinkServer/MAVProxy/tools/MAVExplorer.py#L74-L89
def menu_callback(m): '''called on menu selection''' if m.returnkey.startswith('# '): cmd = m.returnkey[2:] if m.handler is not None: if m.handler_result is None: return cmd += m.handler_result process_stdin(cmd) elif m.returnkey == 'menuSettings': wxsettings.WXSettings(mestate.settings) elif m.returnkey.startswith("mode-"): idx = int(m.returnkey[5:]) mestate.flightmode_selections[idx] = m.IsChecked() else: print('Unknown menu selection: %s' % m.returnkey)
[ "def", "menu_callback", "(", "m", ")", ":", "if", "m", ".", "returnkey", ".", "startswith", "(", "'# '", ")", ":", "cmd", "=", "m", ".", "returnkey", "[", "2", ":", "]", "if", "m", ".", "handler", "is", "not", "None", ":", "if", "m", ".", "handler_result", "is", "None", ":", "return", "cmd", "+=", "m", ".", "handler_result", "process_stdin", "(", "cmd", ")", "elif", "m", ".", "returnkey", "==", "'menuSettings'", ":", "wxsettings", ".", "WXSettings", "(", "mestate", ".", "settings", ")", "elif", "m", ".", "returnkey", ".", "startswith", "(", "\"mode-\"", ")", ":", "idx", "=", "int", "(", "m", ".", "returnkey", "[", "5", ":", "]", ")", "mestate", ".", "flightmode_selections", "[", "idx", "]", "=", "m", ".", "IsChecked", "(", ")", "else", ":", "print", "(", "'Unknown menu selection: %s'", "%", "m", ".", "returnkey", ")" ]
called on menu selection
[ "called", "on", "menu", "selection" ]
python
train
rueckstiess/mtools
mtools/util/logevent.py
https://github.com/rueckstiess/mtools/blob/a6a22910c3569c0c8a3908660ca218a4557e4249/mtools/util/logevent.py#L282-L333
def _match_datetime_pattern(self, tokens): """ Match the datetime pattern at the beginning of the token list. There are several formats that this method needs to understand and distinguish between (see MongoDB's SERVER-7965): ctime-pre2.4 Wed Dec 31 19:00:00 ctime Wed Dec 31 19:00:00.000 iso8601-utc 1970-01-01T00:00:00.000Z iso8601-local 1969-12-31T19:00:00.000+0500 """ # first check: less than 4 tokens can't be ctime assume_iso8601_format = len(tokens) < 4 # check for ctime-pre-2.4 or ctime format if not assume_iso8601_format: weekday, month, day, time = tokens[:4] if (len(tokens) < 4 or (weekday not in self.weekdays) or (month not in self.months) or not day.isdigit()): assume_iso8601_format = True if assume_iso8601_format: # sanity check, because the dateutil parser could interpret # any numbers as a valid date if not re.match(r'\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}.\d{3}', tokens[0]): return None # convinced that this is a ISO-8601 format, the dateutil parser # will do the rest dt = dateutil.parser.parse(tokens[0]) self._datetime_format = "iso8601-utc" \ if tokens[0].endswith('Z') else "iso8601-local" else: # assume current year unless self.year_rollover # is set (from LogFile) year = datetime.now().year dt = dateutil.parser.parse(' '.join(tokens[: 4]), default=datetime(year, 1, 1)) if dt.tzinfo is None: dt = dt.replace(tzinfo=tzutc()) if self._year_rollover and dt > self._year_rollover: dt = dt.replace(year=year - 1) self._datetime_format = "ctime" \ if '.' in tokens[3] else "ctime-pre2.4" return dt
[ "def", "_match_datetime_pattern", "(", "self", ",", "tokens", ")", ":", "# first check: less than 4 tokens can't be ctime", "assume_iso8601_format", "=", "len", "(", "tokens", ")", "<", "4", "# check for ctime-pre-2.4 or ctime format", "if", "not", "assume_iso8601_format", ":", "weekday", ",", "month", ",", "day", ",", "time", "=", "tokens", "[", ":", "4", "]", "if", "(", "len", "(", "tokens", ")", "<", "4", "or", "(", "weekday", "not", "in", "self", ".", "weekdays", ")", "or", "(", "month", "not", "in", "self", ".", "months", ")", "or", "not", "day", ".", "isdigit", "(", ")", ")", ":", "assume_iso8601_format", "=", "True", "if", "assume_iso8601_format", ":", "# sanity check, because the dateutil parser could interpret", "# any numbers as a valid date", "if", "not", "re", ".", "match", "(", "r'\\d{4}-\\d{2}-\\d{2}T\\d{2}:\\d{2}:\\d{2}.\\d{3}'", ",", "tokens", "[", "0", "]", ")", ":", "return", "None", "# convinced that this is a ISO-8601 format, the dateutil parser", "# will do the rest", "dt", "=", "dateutil", ".", "parser", ".", "parse", "(", "tokens", "[", "0", "]", ")", "self", ".", "_datetime_format", "=", "\"iso8601-utc\"", "if", "tokens", "[", "0", "]", ".", "endswith", "(", "'Z'", ")", "else", "\"iso8601-local\"", "else", ":", "# assume current year unless self.year_rollover", "# is set (from LogFile)", "year", "=", "datetime", ".", "now", "(", ")", ".", "year", "dt", "=", "dateutil", ".", "parser", ".", "parse", "(", "' '", ".", "join", "(", "tokens", "[", ":", "4", "]", ")", ",", "default", "=", "datetime", "(", "year", ",", "1", ",", "1", ")", ")", "if", "dt", ".", "tzinfo", "is", "None", ":", "dt", "=", "dt", ".", "replace", "(", "tzinfo", "=", "tzutc", "(", ")", ")", "if", "self", ".", "_year_rollover", "and", "dt", ">", "self", ".", "_year_rollover", ":", "dt", "=", "dt", ".", "replace", "(", "year", "=", "year", "-", "1", ")", "self", ".", "_datetime_format", "=", "\"ctime\"", "if", "'.'", "in", "tokens", "[", "3", "]", "else", "\"ctime-pre2.4\"", "return", "dt" ]
Match the datetime pattern at the beginning of the token list. There are several formats that this method needs to understand and distinguish between (see MongoDB's SERVER-7965): ctime-pre2.4 Wed Dec 31 19:00:00 ctime Wed Dec 31 19:00:00.000 iso8601-utc 1970-01-01T00:00:00.000Z iso8601-local 1969-12-31T19:00:00.000+0500
[ "Match", "the", "datetime", "pattern", "at", "the", "beginning", "of", "the", "token", "list", "." ]
python
train
mozilla/socorrolib
socorrolib/lib/transform_rules.py
https://github.com/mozilla/socorrolib/blob/4ec08c6a4ee2c8a69150268afdd324f5f22b90c8/socorrolib/lib/transform_rules.py#L254-L278
def act(self, *args, **kwargs): """gather a rules parameters together and run the predicate. If that returns True, then go on and run the action function returns: a tuple indicating the results of applying the predicate and the action function: (False, None) - the predicate failed, action function not run (True, True) - the predicate and action functions succeeded (True, False) - the predicate succeeded, but the action function failed""" pred_args = tuple(args) + tuple(self.predicate_args) pred_kwargs = kwargs.copy() pred_kwargs.update(self.predicate_kwargs) if self.function_invocation_proxy(self.predicate, pred_args, pred_kwargs): act_args = tuple(args) + tuple(self.action_args) act_kwargs = kwargs.copy() act_kwargs.update(self.action_kwargs) bool_result = self.function_invocation_proxy(self.action, act_args, act_kwargs) return (True, bool_result) else: return (False, None)
[ "def", "act", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "pred_args", "=", "tuple", "(", "args", ")", "+", "tuple", "(", "self", ".", "predicate_args", ")", "pred_kwargs", "=", "kwargs", ".", "copy", "(", ")", "pred_kwargs", ".", "update", "(", "self", ".", "predicate_kwargs", ")", "if", "self", ".", "function_invocation_proxy", "(", "self", ".", "predicate", ",", "pred_args", ",", "pred_kwargs", ")", ":", "act_args", "=", "tuple", "(", "args", ")", "+", "tuple", "(", "self", ".", "action_args", ")", "act_kwargs", "=", "kwargs", ".", "copy", "(", ")", "act_kwargs", ".", "update", "(", "self", ".", "action_kwargs", ")", "bool_result", "=", "self", ".", "function_invocation_proxy", "(", "self", ".", "action", ",", "act_args", ",", "act_kwargs", ")", "return", "(", "True", ",", "bool_result", ")", "else", ":", "return", "(", "False", ",", "None", ")" ]
gather a rules parameters together and run the predicate. If that returns True, then go on and run the action function returns: a tuple indicating the results of applying the predicate and the action function: (False, None) - the predicate failed, action function not run (True, True) - the predicate and action functions succeeded (True, False) - the predicate succeeded, but the action function failed
[ "gather", "a", "rules", "parameters", "together", "and", "run", "the", "predicate", ".", "If", "that", "returns", "True", "then", "go", "on", "and", "run", "the", "action", "function" ]
python
train
ARMmbed/mbed-cloud-sdk-python
src/mbed_cloud/certificates/certificates.py
https://github.com/ARMmbed/mbed-cloud-sdk-python/blob/c0af86fb2cdd4dc7ed26f236139241067d293509/src/mbed_cloud/certificates/certificates.py#L176-L195
def update_certificate(self, certificate_id, **kwargs): """Update a certificate. :param str certificate_id: The certificate id (Required) :param str certificate_data: X509.v3 trusted certificate in PEM format. :param str signature: This parameter has been DEPRECATED in the API and does not need to be provided. :param str type: type of the certificate. Values: lwm2m or bootstrap. :param str status: Status of the certificate. Allowed values: "ACTIVE" | "INACTIVE". :param str description: Human readable description of this certificate, not longer than 500 characters. :returns: Certificate object :rtype: Certificate """ api = self._get_api(iam.DeveloperApi) cert = Certificate._create_request_map(kwargs) body = iam.TrustedCertificateReq(**cert) certificate = Certificate(api.update_certificate(certificate_id, body)) return self.get_certificate(certificate.id)
[ "def", "update_certificate", "(", "self", ",", "certificate_id", ",", "*", "*", "kwargs", ")", ":", "api", "=", "self", ".", "_get_api", "(", "iam", ".", "DeveloperApi", ")", "cert", "=", "Certificate", ".", "_create_request_map", "(", "kwargs", ")", "body", "=", "iam", ".", "TrustedCertificateReq", "(", "*", "*", "cert", ")", "certificate", "=", "Certificate", "(", "api", ".", "update_certificate", "(", "certificate_id", ",", "body", ")", ")", "return", "self", ".", "get_certificate", "(", "certificate", ".", "id", ")" ]
Update a certificate. :param str certificate_id: The certificate id (Required) :param str certificate_data: X509.v3 trusted certificate in PEM format. :param str signature: This parameter has been DEPRECATED in the API and does not need to be provided. :param str type: type of the certificate. Values: lwm2m or bootstrap. :param str status: Status of the certificate. Allowed values: "ACTIVE" | "INACTIVE". :param str description: Human readable description of this certificate, not longer than 500 characters. :returns: Certificate object :rtype: Certificate
[ "Update", "a", "certificate", "." ]
python
train
mlperf/training
translation/tensorflow/transformer/transformer_main.py
https://github.com/mlperf/training/blob/1c6ae725a81d15437a2b2df05cac0673fde5c3a4/translation/tensorflow/transformer/transformer_main.py#L187-L289
def train_schedule( estimator, train_eval_iterations, single_iteration_train_steps=None, single_iteration_train_epochs=None, bleu_source=None, bleu_ref=None, bleu_threshold=None): """Train and evaluate model, and optionally compute model's BLEU score. **Step vs. Epoch vs. Iteration** Steps and epochs are canonical terms used in TensorFlow and general machine learning. They are used to describe running a single process (train/eval): - Step refers to running the process through a single or batch of examples. - Epoch refers to running the process through an entire dataset. E.g. training a dataset with 100 examples. The dataset is divided into 20 batches with 5 examples per batch. A single training step trains the model on one batch. After 20 training steps, the model will have trained on every batch in the dataset, or, in other words, one epoch. Meanwhile, iteration is used in this implementation to describe running multiple processes (training and eval). - A single iteration: 1. trains the model for a specific number of steps or epochs. 2. evaluates the model. 3. (if source and ref files are provided) compute BLEU score. This function runs through multiple train+eval+bleu iterations. Args: estimator: tf.Estimator containing model to train. train_eval_iterations: Number of times to repeat the train+eval iteration. single_iteration_train_steps: Number of steps to train in one iteration. single_iteration_train_epochs: Number of epochs to train in one iteration. bleu_source: File containing text to be translated for BLEU calculation. bleu_ref: File containing reference translations for BLEU calculation. bleu_threshold: minimum BLEU score before training is stopped. Raises: ValueError: if both or none of single_iteration_train_steps and single_iteration_train_epochs were defined. """ # Ensure that exactly one of single_iteration_train_steps and # single_iteration_train_epochs is defined. if single_iteration_train_steps is None: if single_iteration_train_epochs is None: raise ValueError( "Exactly one of single_iteration_train_steps or " "single_iteration_train_epochs must be defined. Both were none.") else: if single_iteration_train_epochs is not None: raise ValueError( "Exactly one of single_iteration_train_steps or " "single_iteration_train_epochs must be defined. Both were defined.") evaluate_bleu = bleu_source is not None and bleu_ref is not None # Print out training schedule print("Training schedule:") if single_iteration_train_epochs is not None: print("\t1. Train for %d epochs." % single_iteration_train_epochs) else: print("\t1. Train for %d steps." % single_iteration_train_steps) print("\t2. Evaluate model.") if evaluate_bleu: print("\t3. Compute BLEU score.") if bleu_threshold is not None: print("Repeat above steps until the BLEU score reaches", bleu_threshold) if not evaluate_bleu or bleu_threshold is None: print("Repeat above steps %d times." % train_eval_iterations) if evaluate_bleu: # Set summary writer to log bleu score. bleu_writer = tf.summary.FileWriter( os.path.join(estimator.model_dir, BLEU_DIR)) if bleu_threshold is not None: # Change loop stopping condition if bleu_threshold is defined. train_eval_iterations = INF # Loop training/evaluation/bleu cycles mlperf_log.transformer_print(key=mlperf_log.TRAIN_LOOP) for i in xrange(train_eval_iterations): print("Starting iteration", i + 1) mlperf_log.transformer_print(key=mlperf_log.TRAIN_EPOCH, value=i * single_iteration_train_epochs + 1) # Train the model for single_iteration_train_steps or until the input fn # runs out of examples (if single_iteration_train_steps is None). estimator.train(dataset.train_input_fn, steps=single_iteration_train_steps) mlperf_log.transformer_print(key=mlperf_log.EVAL_START) eval_results = estimator.evaluate(dataset.eval_input_fn) print("Evaluation results (iter %d/%d):" % (i + 1, train_eval_iterations), eval_results) if evaluate_bleu: uncased_score, _ = evaluate_and_log_bleu( estimator, bleu_writer, bleu_source, bleu_ref) if bleu_threshold is not None and uncased_score > bleu_threshold: bleu_writer.close() break mlperf_log.transformer_print(key=mlperf_log.EVAL_TARGET, value=bleu_threshold) mlperf_log.transformer_print(key=mlperf_log.EVAL_ACCURACY, value=uncased_score) mlperf_log.transformer_print(key=mlperf_log.EVAL_STOP)
[ "def", "train_schedule", "(", "estimator", ",", "train_eval_iterations", ",", "single_iteration_train_steps", "=", "None", ",", "single_iteration_train_epochs", "=", "None", ",", "bleu_source", "=", "None", ",", "bleu_ref", "=", "None", ",", "bleu_threshold", "=", "None", ")", ":", "# Ensure that exactly one of single_iteration_train_steps and", "# single_iteration_train_epochs is defined.", "if", "single_iteration_train_steps", "is", "None", ":", "if", "single_iteration_train_epochs", "is", "None", ":", "raise", "ValueError", "(", "\"Exactly one of single_iteration_train_steps or \"", "\"single_iteration_train_epochs must be defined. Both were none.\"", ")", "else", ":", "if", "single_iteration_train_epochs", "is", "not", "None", ":", "raise", "ValueError", "(", "\"Exactly one of single_iteration_train_steps or \"", "\"single_iteration_train_epochs must be defined. Both were defined.\"", ")", "evaluate_bleu", "=", "bleu_source", "is", "not", "None", "and", "bleu_ref", "is", "not", "None", "# Print out training schedule", "print", "(", "\"Training schedule:\"", ")", "if", "single_iteration_train_epochs", "is", "not", "None", ":", "print", "(", "\"\\t1. Train for %d epochs.\"", "%", "single_iteration_train_epochs", ")", "else", ":", "print", "(", "\"\\t1. Train for %d steps.\"", "%", "single_iteration_train_steps", ")", "print", "(", "\"\\t2. Evaluate model.\"", ")", "if", "evaluate_bleu", ":", "print", "(", "\"\\t3. Compute BLEU score.\"", ")", "if", "bleu_threshold", "is", "not", "None", ":", "print", "(", "\"Repeat above steps until the BLEU score reaches\"", ",", "bleu_threshold", ")", "if", "not", "evaluate_bleu", "or", "bleu_threshold", "is", "None", ":", "print", "(", "\"Repeat above steps %d times.\"", "%", "train_eval_iterations", ")", "if", "evaluate_bleu", ":", "# Set summary writer to log bleu score.", "bleu_writer", "=", "tf", ".", "summary", ".", "FileWriter", "(", "os", ".", "path", ".", "join", "(", "estimator", ".", "model_dir", ",", "BLEU_DIR", ")", ")", "if", "bleu_threshold", "is", "not", "None", ":", "# Change loop stopping condition if bleu_threshold is defined.", "train_eval_iterations", "=", "INF", "# Loop training/evaluation/bleu cycles", "mlperf_log", ".", "transformer_print", "(", "key", "=", "mlperf_log", ".", "TRAIN_LOOP", ")", "for", "i", "in", "xrange", "(", "train_eval_iterations", ")", ":", "print", "(", "\"Starting iteration\"", ",", "i", "+", "1", ")", "mlperf_log", ".", "transformer_print", "(", "key", "=", "mlperf_log", ".", "TRAIN_EPOCH", ",", "value", "=", "i", "*", "single_iteration_train_epochs", "+", "1", ")", "# Train the model for single_iteration_train_steps or until the input fn", "# runs out of examples (if single_iteration_train_steps is None).", "estimator", ".", "train", "(", "dataset", ".", "train_input_fn", ",", "steps", "=", "single_iteration_train_steps", ")", "mlperf_log", ".", "transformer_print", "(", "key", "=", "mlperf_log", ".", "EVAL_START", ")", "eval_results", "=", "estimator", ".", "evaluate", "(", "dataset", ".", "eval_input_fn", ")", "print", "(", "\"Evaluation results (iter %d/%d):\"", "%", "(", "i", "+", "1", ",", "train_eval_iterations", ")", ",", "eval_results", ")", "if", "evaluate_bleu", ":", "uncased_score", ",", "_", "=", "evaluate_and_log_bleu", "(", "estimator", ",", "bleu_writer", ",", "bleu_source", ",", "bleu_ref", ")", "if", "bleu_threshold", "is", "not", "None", "and", "uncased_score", ">", "bleu_threshold", ":", "bleu_writer", ".", "close", "(", ")", "break", "mlperf_log", ".", "transformer_print", "(", "key", "=", "mlperf_log", ".", "EVAL_TARGET", ",", "value", "=", "bleu_threshold", ")", "mlperf_log", ".", "transformer_print", "(", "key", "=", "mlperf_log", ".", "EVAL_ACCURACY", ",", "value", "=", "uncased_score", ")", "mlperf_log", ".", "transformer_print", "(", "key", "=", "mlperf_log", ".", "EVAL_STOP", ")" ]
Train and evaluate model, and optionally compute model's BLEU score. **Step vs. Epoch vs. Iteration** Steps and epochs are canonical terms used in TensorFlow and general machine learning. They are used to describe running a single process (train/eval): - Step refers to running the process through a single or batch of examples. - Epoch refers to running the process through an entire dataset. E.g. training a dataset with 100 examples. The dataset is divided into 20 batches with 5 examples per batch. A single training step trains the model on one batch. After 20 training steps, the model will have trained on every batch in the dataset, or, in other words, one epoch. Meanwhile, iteration is used in this implementation to describe running multiple processes (training and eval). - A single iteration: 1. trains the model for a specific number of steps or epochs. 2. evaluates the model. 3. (if source and ref files are provided) compute BLEU score. This function runs through multiple train+eval+bleu iterations. Args: estimator: tf.Estimator containing model to train. train_eval_iterations: Number of times to repeat the train+eval iteration. single_iteration_train_steps: Number of steps to train in one iteration. single_iteration_train_epochs: Number of epochs to train in one iteration. bleu_source: File containing text to be translated for BLEU calculation. bleu_ref: File containing reference translations for BLEU calculation. bleu_threshold: minimum BLEU score before training is stopped. Raises: ValueError: if both or none of single_iteration_train_steps and single_iteration_train_epochs were defined.
[ "Train", "and", "evaluate", "model", "and", "optionally", "compute", "model", "s", "BLEU", "score", "." ]
python
train
saltstack/salt
salt/modules/openvswitch.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/openvswitch.py#L607-L635
def db_set(table, record, column, value, if_exists=False): ''' Sets a column's value for a specific record. Args: table: A string - name of the database table. record: A string - identifier of the record. column: A string - name of the column. value: A string - the value to be set if_exists: A boolean - if True, it is not an error if the record does not exist. Returns: None on success and an error message on failure. CLI Example: .. code-block:: bash salt '*' openvswitch.db_set Interface br0 mac 02:03:04:05:06:07 ''' cmd = ['ovs-vsctl'] if if_exists: cmd += ['--if-exists'] cmd += ['set', table, record, '{0}={1}'.format(column, json.dumps(value))] result = __salt__['cmd.run_all'](cmd) if result['retcode'] != 0: return result['stderr'] else: return None
[ "def", "db_set", "(", "table", ",", "record", ",", "column", ",", "value", ",", "if_exists", "=", "False", ")", ":", "cmd", "=", "[", "'ovs-vsctl'", "]", "if", "if_exists", ":", "cmd", "+=", "[", "'--if-exists'", "]", "cmd", "+=", "[", "'set'", ",", "table", ",", "record", ",", "'{0}={1}'", ".", "format", "(", "column", ",", "json", ".", "dumps", "(", "value", ")", ")", "]", "result", "=", "__salt__", "[", "'cmd.run_all'", "]", "(", "cmd", ")", "if", "result", "[", "'retcode'", "]", "!=", "0", ":", "return", "result", "[", "'stderr'", "]", "else", ":", "return", "None" ]
Sets a column's value for a specific record. Args: table: A string - name of the database table. record: A string - identifier of the record. column: A string - name of the column. value: A string - the value to be set if_exists: A boolean - if True, it is not an error if the record does not exist. Returns: None on success and an error message on failure. CLI Example: .. code-block:: bash salt '*' openvswitch.db_set Interface br0 mac 02:03:04:05:06:07
[ "Sets", "a", "column", "s", "value", "for", "a", "specific", "record", "." ]
python
train
huge-success/sanic
sanic/blueprints.py
https://github.com/huge-success/sanic/blob/6a4a3f617fdbe1d3ee8bdc9d1b12ad2d0b34acdd/sanic/blueprints.py#L260-L282
def websocket( self, uri, host=None, strict_slashes=None, version=None, name=None ): """Create a blueprint websocket route from a decorated function. :param uri: endpoint at which the route will be accessible. :param host: IP Address of FQDN for the sanic server to use. :param strict_slashes: Enforce the API urls are requested with a training */* :param version: Blueprint Version :param name: Unique name to identify the Websocket Route """ if strict_slashes is None: strict_slashes = self.strict_slashes def decorator(handler): route = FutureRoute( handler, uri, [], host, strict_slashes, False, version, name ) self.websocket_routes.append(route) return handler return decorator
[ "def", "websocket", "(", "self", ",", "uri", ",", "host", "=", "None", ",", "strict_slashes", "=", "None", ",", "version", "=", "None", ",", "name", "=", "None", ")", ":", "if", "strict_slashes", "is", "None", ":", "strict_slashes", "=", "self", ".", "strict_slashes", "def", "decorator", "(", "handler", ")", ":", "route", "=", "FutureRoute", "(", "handler", ",", "uri", ",", "[", "]", ",", "host", ",", "strict_slashes", ",", "False", ",", "version", ",", "name", ")", "self", ".", "websocket_routes", ".", "append", "(", "route", ")", "return", "handler", "return", "decorator" ]
Create a blueprint websocket route from a decorated function. :param uri: endpoint at which the route will be accessible. :param host: IP Address of FQDN for the sanic server to use. :param strict_slashes: Enforce the API urls are requested with a training */* :param version: Blueprint Version :param name: Unique name to identify the Websocket Route
[ "Create", "a", "blueprint", "websocket", "route", "from", "a", "decorated", "function", "." ]
python
train
ssato/python-anyconfig
src/anyconfig/utils.py
https://github.com/ssato/python-anyconfig/blob/f2f4fb8d8e232aadea866c202e1dd7a5967e2877/src/anyconfig/utils.py#L192-L217
def is_path_like_object(obj, marker='*'): """ Is given object 'obj' a path string, a pathlib.Path, a file / file-like (stream) or IOInfo namedtuple object? :param obj: a path string, pathlib.Path object, a file / file-like or 'IOInfo' object :return: True if 'obj' is a path string or a pathlib.Path object or a file (stream) object >>> assert is_path_like_object(__file__) >>> assert not is_path_like_object("/a/b/c/*.json", '*') >>> from anyconfig.compat import pathlib >>> if pathlib is not None: ... assert is_path_like_object(pathlib.Path("a.ini")) ... assert not is_path_like_object(pathlib.Path("x.ini"), 'x') >>> assert is_path_like_object(open(__file__)) """ return ((is_path(obj) and marker not in obj) or (is_path_obj(obj) and marker not in obj.as_posix()) or is_file_stream(obj) or is_ioinfo(obj))
[ "def", "is_path_like_object", "(", "obj", ",", "marker", "=", "'*'", ")", ":", "return", "(", "(", "is_path", "(", "obj", ")", "and", "marker", "not", "in", "obj", ")", "or", "(", "is_path_obj", "(", "obj", ")", "and", "marker", "not", "in", "obj", ".", "as_posix", "(", ")", ")", "or", "is_file_stream", "(", "obj", ")", "or", "is_ioinfo", "(", "obj", ")", ")" ]
Is given object 'obj' a path string, a pathlib.Path, a file / file-like (stream) or IOInfo namedtuple object? :param obj: a path string, pathlib.Path object, a file / file-like or 'IOInfo' object :return: True if 'obj' is a path string or a pathlib.Path object or a file (stream) object >>> assert is_path_like_object(__file__) >>> assert not is_path_like_object("/a/b/c/*.json", '*') >>> from anyconfig.compat import pathlib >>> if pathlib is not None: ... assert is_path_like_object(pathlib.Path("a.ini")) ... assert not is_path_like_object(pathlib.Path("x.ini"), 'x') >>> assert is_path_like_object(open(__file__))
[ "Is", "given", "object", "obj", "a", "path", "string", "a", "pathlib", ".", "Path", "a", "file", "/", "file", "-", "like", "(", "stream", ")", "or", "IOInfo", "namedtuple", "object?" ]
python
train
thespacedoctor/fundamentals
fundamentals/mysql/database.py
https://github.com/thespacedoctor/fundamentals/blob/1d2c007ac74442ec2eabde771cfcacdb9c1ab382/fundamentals/mysql/database.py#L85-L125
def connect(self): """connect to the database **Return:** - ``dbConn`` -- the database connection See the class docstring for usage """ self.log.debug('starting the ``get`` method') dbSettings = self.dbSettings port = False if "tunnel" in dbSettings and dbSettings["tunnel"]: port = self._setup_tunnel( tunnelParameters=dbSettings["tunnel"] ) # SETUP A DATABASE CONNECTION host = dbSettings["host"] user = dbSettings["user"] passwd = dbSettings["password"] dbName = dbSettings["db"] dbConn = ms.connect( host=host, user=user, passwd=passwd, db=dbName, port=port, use_unicode=True, charset='utf8', local_infile=1, client_flag=ms.constants.CLIENT.MULTI_STATEMENTS, connect_timeout=36000, max_allowed_packet=51200000 ) if self.autocommit: dbConn.autocommit(True) self.log.debug('completed the ``get`` method') return dbConn
[ "def", "connect", "(", "self", ")", ":", "self", ".", "log", ".", "debug", "(", "'starting the ``get`` method'", ")", "dbSettings", "=", "self", ".", "dbSettings", "port", "=", "False", "if", "\"tunnel\"", "in", "dbSettings", "and", "dbSettings", "[", "\"tunnel\"", "]", ":", "port", "=", "self", ".", "_setup_tunnel", "(", "tunnelParameters", "=", "dbSettings", "[", "\"tunnel\"", "]", ")", "# SETUP A DATABASE CONNECTION", "host", "=", "dbSettings", "[", "\"host\"", "]", "user", "=", "dbSettings", "[", "\"user\"", "]", "passwd", "=", "dbSettings", "[", "\"password\"", "]", "dbName", "=", "dbSettings", "[", "\"db\"", "]", "dbConn", "=", "ms", ".", "connect", "(", "host", "=", "host", ",", "user", "=", "user", ",", "passwd", "=", "passwd", ",", "db", "=", "dbName", ",", "port", "=", "port", ",", "use_unicode", "=", "True", ",", "charset", "=", "'utf8'", ",", "local_infile", "=", "1", ",", "client_flag", "=", "ms", ".", "constants", ".", "CLIENT", ".", "MULTI_STATEMENTS", ",", "connect_timeout", "=", "36000", ",", "max_allowed_packet", "=", "51200000", ")", "if", "self", ".", "autocommit", ":", "dbConn", ".", "autocommit", "(", "True", ")", "self", ".", "log", ".", "debug", "(", "'completed the ``get`` method'", ")", "return", "dbConn" ]
connect to the database **Return:** - ``dbConn`` -- the database connection See the class docstring for usage
[ "connect", "to", "the", "database" ]
python
train
chaoss/grimoirelab-kingarthur
arthur/scheduler.py
https://github.com/chaoss/grimoirelab-kingarthur/blob/9d6a638bee68d5e5c511f045eeebf06340fd3252/arthur/scheduler.py#L361-L366
def _handle_failed_job(self, job): """Handle failed jobs""" task_id = job.kwargs['task_id'] logger.error("Job #%s (task: %s) failed; cancelled", job.id, task_id)
[ "def", "_handle_failed_job", "(", "self", ",", "job", ")", ":", "task_id", "=", "job", ".", "kwargs", "[", "'task_id'", "]", "logger", ".", "error", "(", "\"Job #%s (task: %s) failed; cancelled\"", ",", "job", ".", "id", ",", "task_id", ")" ]
Handle failed jobs
[ "Handle", "failed", "jobs" ]
python
test
openstax/cnx-archive
cnxarchive/search.py
https://github.com/openstax/cnx-archive/blob/d31d34aa8bbc8a9fde6cd4227a0df92726e8daf4/cnxarchive/search.py#L195-L208
def highlighted_fulltext(self): """Highlight the found terms in the fulltext.""" terms = self.fields.get('fulltext', []) if not terms: return None arguments = {'id': self['id'], 'query': ' & '.join(terms), } with db_connect() as db_connection: with db_connection.cursor() as cursor: cursor.execute(_read_sql_file('highlighted-fulltext'), arguments) hl_fulltext = cursor.fetchone()[0] return hl_fulltext
[ "def", "highlighted_fulltext", "(", "self", ")", ":", "terms", "=", "self", ".", "fields", ".", "get", "(", "'fulltext'", ",", "[", "]", ")", "if", "not", "terms", ":", "return", "None", "arguments", "=", "{", "'id'", ":", "self", "[", "'id'", "]", ",", "'query'", ":", "' & '", ".", "join", "(", "terms", ")", ",", "}", "with", "db_connect", "(", ")", "as", "db_connection", ":", "with", "db_connection", ".", "cursor", "(", ")", "as", "cursor", ":", "cursor", ".", "execute", "(", "_read_sql_file", "(", "'highlighted-fulltext'", ")", ",", "arguments", ")", "hl_fulltext", "=", "cursor", ".", "fetchone", "(", ")", "[", "0", "]", "return", "hl_fulltext" ]
Highlight the found terms in the fulltext.
[ "Highlight", "the", "found", "terms", "in", "the", "fulltext", "." ]
python
train
daler/trackhub
trackhub/track.py
https://github.com/daler/trackhub/blob/e4655f79177822529f80b923df117e38e28df702/trackhub/track.py#L454-L472
def add_subgroups(self, subgroups): """ Add a list of SubGroupDefinition objects to this composite. Note that in contrast to :meth:`BaseTrack`, which takes a single dictionary indicating the particular subgroups for the track, this method takes a list of :class:`SubGroupDefinition` objects representing the allowed subgroups for the composite. :param subgroups: List of SubGroupDefinition objects. """ if subgroups is None: subgroups = {} _subgroups = {} for sg in subgroups: assert isinstance(sg, SubGroupDefinition) _subgroups[sg.name] = sg self.subgroups = _subgroups
[ "def", "add_subgroups", "(", "self", ",", "subgroups", ")", ":", "if", "subgroups", "is", "None", ":", "subgroups", "=", "{", "}", "_subgroups", "=", "{", "}", "for", "sg", "in", "subgroups", ":", "assert", "isinstance", "(", "sg", ",", "SubGroupDefinition", ")", "_subgroups", "[", "sg", ".", "name", "]", "=", "sg", "self", ".", "subgroups", "=", "_subgroups" ]
Add a list of SubGroupDefinition objects to this composite. Note that in contrast to :meth:`BaseTrack`, which takes a single dictionary indicating the particular subgroups for the track, this method takes a list of :class:`SubGroupDefinition` objects representing the allowed subgroups for the composite. :param subgroups: List of SubGroupDefinition objects.
[ "Add", "a", "list", "of", "SubGroupDefinition", "objects", "to", "this", "composite", "." ]
python
train
QInfer/python-qinfer
src/qinfer/utils.py
https://github.com/QInfer/python-qinfer/blob/8170c84a0be1723f8c6b09e0d3c7a40a886f1fe3/src/qinfer/utils.py#L163-L202
def sample_multinomial(N, p, size=None): r""" Draws fixed number of samples N from different multinomial distributions (with the same number dice sides). :param int N: How many samples to draw from each distribution. :param np.ndarray p: Probabilities specifying each distribution. Sum along axis 0 should be 1. :param size: Output shape. ``int`` or tuple of ``int``s. If the given shape is, e.g., ``(m, n, k)``, then m * n * k samples are drawn for each distribution. Default is None, in which case a single value is returned for each distribution. :rtype: np.ndarray :return: Array of shape ``(p.shape, size)`` or p.shape if size is ``None``. """ # ensure s is array s = np.array([1]) if size is None else np.array([size]).flatten() def take_samples(ps): # we have to flatten to make apply_along_axis work. return np.random.multinomial(N, ps, np.prod(s)).flatten() # should have shape (prod(size)*ps.shape[0], ps.shape[1:]) samples = np.apply_along_axis(take_samples, 0, p) # should have shape (size, p.shape) samples = samples.reshape(np.concatenate([s, p.shape])) # should have shape (p.shape, size) samples = samples.transpose(np.concatenate( [np.arange(s.ndim, p.ndim+s.ndim), np.arange(s.ndim)] )) if size is None: # get rid of trailing singleton dimension. samples = samples[...,0] return samples
[ "def", "sample_multinomial", "(", "N", ",", "p", ",", "size", "=", "None", ")", ":", "# ensure s is array", "s", "=", "np", ".", "array", "(", "[", "1", "]", ")", "if", "size", "is", "None", "else", "np", ".", "array", "(", "[", "size", "]", ")", ".", "flatten", "(", ")", "def", "take_samples", "(", "ps", ")", ":", "# we have to flatten to make apply_along_axis work.", "return", "np", ".", "random", ".", "multinomial", "(", "N", ",", "ps", ",", "np", ".", "prod", "(", "s", ")", ")", ".", "flatten", "(", ")", "# should have shape (prod(size)*ps.shape[0], ps.shape[1:])", "samples", "=", "np", ".", "apply_along_axis", "(", "take_samples", ",", "0", ",", "p", ")", "# should have shape (size, p.shape)", "samples", "=", "samples", ".", "reshape", "(", "np", ".", "concatenate", "(", "[", "s", ",", "p", ".", "shape", "]", ")", ")", "# should have shape (p.shape, size)", "samples", "=", "samples", ".", "transpose", "(", "np", ".", "concatenate", "(", "[", "np", ".", "arange", "(", "s", ".", "ndim", ",", "p", ".", "ndim", "+", "s", ".", "ndim", ")", ",", "np", ".", "arange", "(", "s", ".", "ndim", ")", "]", ")", ")", "if", "size", "is", "None", ":", "# get rid of trailing singleton dimension.", "samples", "=", "samples", "[", "...", ",", "0", "]", "return", "samples" ]
r""" Draws fixed number of samples N from different multinomial distributions (with the same number dice sides). :param int N: How many samples to draw from each distribution. :param np.ndarray p: Probabilities specifying each distribution. Sum along axis 0 should be 1. :param size: Output shape. ``int`` or tuple of ``int``s. If the given shape is, e.g., ``(m, n, k)``, then m * n * k samples are drawn for each distribution. Default is None, in which case a single value is returned for each distribution. :rtype: np.ndarray :return: Array of shape ``(p.shape, size)`` or p.shape if size is ``None``.
[ "r", "Draws", "fixed", "number", "of", "samples", "N", "from", "different", "multinomial", "distributions", "(", "with", "the", "same", "number", "dice", "sides", ")", "." ]
python
train
theislab/scanpy
scanpy/preprocessing/_bbknn.py
https://github.com/theislab/scanpy/blob/9e4e5ee02e04cf618872d9b098e24f0542e8b227/scanpy/preprocessing/_bbknn.py#L1-L84
def bbknn(adata, batch_key='batch', save_knn=False, copy=False, **kwargs): """\ Batch balanced kNN [Park18]_. Batch balanced kNN alters the kNN procedure to identify each cell's top neighbours in each batch separately instead of the entire cell pool with no accounting for batch. Aligns batches in a quick and lightweight manner. For use in the scanpy workflow as an alternative to :func:`scanpi.pp.neighbors`. .. note:: This is just a wrapper of :func:`bbknn.bbknn`: more information and bug reports `here <https://github.com/Teichlab/bbknn>`__. Params ------ adata : ``AnnData`` Needs the PCA computed and stored in ``adata.obsm["X_pca"]``. batch_key : ``str``, optional (default: "batch") ``adata.obs`` column name discriminating between your batches. neighbors_within_batch : ``int``, optional (default: 3) How many top neighbours to report for each batch; total number of neighbours will be this number times the number of batches. n_pcs : ``int``, optional (default: 50) How many principal components to use in the analysis. trim : ``int`` or ``None``, optional (default: ``None``) If not ``None``, trim the neighbours of each cell to these many top connectivities. May help with population independence and improve the tidiness of clustering. approx : ``bool``, optional (default: ``True``) If ``True``, use annoy's approximate neighbour finding. This results in a quicker run time for large datasets while also potentially increasing the degree of batch correction. n_trees : ``int``, optional (default: 10) Only used when ``approx=True``. The number of trees to construct in the annoy forest. More trees give higher precision when querying, at the cost of increased run time and resource intensity. use_faiss : ``bool``, optional (default: ``True``) If ``approx=False`` and the metric is "euclidean", use the faiss package to compute nearest neighbours if installed. This improves performance at a minor cost to numerical precision as faiss operates on float32. metric : ``str`` or ``sklearn.neighbors.DistanceMetric``, optional (default: "angular") What distance metric to use. If using ``approx=True``, the options are "angular", "euclidean", "manhattan" and "hamming". Otherwise, the options are "euclidean", a member of the ``sklearn.neighbors.KDTree.valid_metrics`` list, or parameterised ``sklearn.neighbors.DistanceMetric`` `objects <https://scikit-learn.org/stable/modules/generated/sklearn.neighbors.DistanceMetric.html>`_:: >>> from sklearn import neighbors >>> neighbors.KDTree.valid_metrics ['p', 'chebyshev', 'cityblock', 'minkowski', 'infinity', 'l2', 'euclidean', 'manhattan', 'l1'] >>> pass_as_metric = neighbors.DistanceMetric.get_metric('minkowski', p=3) bandwidth : ``float``, optional (default: 1) ``scanpy.neighbors.compute_connectivities_umap`` parameter, higher values result in a gentler slope of the connectivities exponentials (i.e. larger connectivity values being returned) local_connectivity : ``int``, optional (default: 1) ``scanpy.neighbors.compute_connectivities_umap`` parameter, how many nearest neighbors of each cell are assumed to be fully connected (and given a connectivity value of 1) save_knn : ``bool``, optional (default: ``False``) If ``True``, save the indices of the nearest neighbours for each cell in ``adata.uns['bbknn']``. copy : ``bool``, optional (default: ``False``) If ``True``, return a copy instead of writing to the supplied adata. Returns ------- The `adata` with the batch-corrected graph. """ params = locals() # Has to be first kwargs = params.pop('kwargs') try: from bbknn import bbknn except ImportError: raise ImportError('Please install bbknn: `pip install bbknn`.') return bbknn(**params, **kwargs)
[ "def", "bbknn", "(", "adata", ",", "batch_key", "=", "'batch'", ",", "save_knn", "=", "False", ",", "copy", "=", "False", ",", "*", "*", "kwargs", ")", ":", "params", "=", "locals", "(", ")", "# Has to be first", "kwargs", "=", "params", ".", "pop", "(", "'kwargs'", ")", "try", ":", "from", "bbknn", "import", "bbknn", "except", "ImportError", ":", "raise", "ImportError", "(", "'Please install bbknn: `pip install bbknn`.'", ")", "return", "bbknn", "(", "*", "*", "params", ",", "*", "*", "kwargs", ")" ]
\ Batch balanced kNN [Park18]_. Batch balanced kNN alters the kNN procedure to identify each cell's top neighbours in each batch separately instead of the entire cell pool with no accounting for batch. Aligns batches in a quick and lightweight manner. For use in the scanpy workflow as an alternative to :func:`scanpi.pp.neighbors`. .. note:: This is just a wrapper of :func:`bbknn.bbknn`: more information and bug reports `here <https://github.com/Teichlab/bbknn>`__. Params ------ adata : ``AnnData`` Needs the PCA computed and stored in ``adata.obsm["X_pca"]``. batch_key : ``str``, optional (default: "batch") ``adata.obs`` column name discriminating between your batches. neighbors_within_batch : ``int``, optional (default: 3) How many top neighbours to report for each batch; total number of neighbours will be this number times the number of batches. n_pcs : ``int``, optional (default: 50) How many principal components to use in the analysis. trim : ``int`` or ``None``, optional (default: ``None``) If not ``None``, trim the neighbours of each cell to these many top connectivities. May help with population independence and improve the tidiness of clustering. approx : ``bool``, optional (default: ``True``) If ``True``, use annoy's approximate neighbour finding. This results in a quicker run time for large datasets while also potentially increasing the degree of batch correction. n_trees : ``int``, optional (default: 10) Only used when ``approx=True``. The number of trees to construct in the annoy forest. More trees give higher precision when querying, at the cost of increased run time and resource intensity. use_faiss : ``bool``, optional (default: ``True``) If ``approx=False`` and the metric is "euclidean", use the faiss package to compute nearest neighbours if installed. This improves performance at a minor cost to numerical precision as faiss operates on float32. metric : ``str`` or ``sklearn.neighbors.DistanceMetric``, optional (default: "angular") What distance metric to use. If using ``approx=True``, the options are "angular", "euclidean", "manhattan" and "hamming". Otherwise, the options are "euclidean", a member of the ``sklearn.neighbors.KDTree.valid_metrics`` list, or parameterised ``sklearn.neighbors.DistanceMetric`` `objects <https://scikit-learn.org/stable/modules/generated/sklearn.neighbors.DistanceMetric.html>`_:: >>> from sklearn import neighbors >>> neighbors.KDTree.valid_metrics ['p', 'chebyshev', 'cityblock', 'minkowski', 'infinity', 'l2', 'euclidean', 'manhattan', 'l1'] >>> pass_as_metric = neighbors.DistanceMetric.get_metric('minkowski', p=3) bandwidth : ``float``, optional (default: 1) ``scanpy.neighbors.compute_connectivities_umap`` parameter, higher values result in a gentler slope of the connectivities exponentials (i.e. larger connectivity values being returned) local_connectivity : ``int``, optional (default: 1) ``scanpy.neighbors.compute_connectivities_umap`` parameter, how many nearest neighbors of each cell are assumed to be fully connected (and given a connectivity value of 1) save_knn : ``bool``, optional (default: ``False``) If ``True``, save the indices of the nearest neighbours for each cell in ``adata.uns['bbknn']``. copy : ``bool``, optional (default: ``False``) If ``True``, return a copy instead of writing to the supplied adata. Returns ------- The `adata` with the batch-corrected graph.
[ "\\", "Batch", "balanced", "kNN", "[", "Park18", "]", "_", "." ]
python
train
apache/incubator-mxnet
python/mxnet/module/module.py
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/module/module.py#L722-L743
def get_states(self, merge_multi_context=True): """Gets states from all devices. If `merge_multi_context` is ``True``, it is like ``[out1, out2]``. Otherwise, it is like ``[[out1_dev1, out1_dev2], [out2_dev1, out2_dev2]]``. All the output elements are `NDArray`. Parameters ---------- merge_multi_context : bool Default is ``True``. In the case when data-parallelism is used, the states will be collected from multiple devices. A ``True`` value indicate that we should merge the collected results so that they look like from a single executor. Returns ------- list of NDArray or list of list of NDArray States """ assert self.binded and self.params_initialized return self._exec_group.get_states(merge_multi_context=merge_multi_context)
[ "def", "get_states", "(", "self", ",", "merge_multi_context", "=", "True", ")", ":", "assert", "self", ".", "binded", "and", "self", ".", "params_initialized", "return", "self", ".", "_exec_group", ".", "get_states", "(", "merge_multi_context", "=", "merge_multi_context", ")" ]
Gets states from all devices. If `merge_multi_context` is ``True``, it is like ``[out1, out2]``. Otherwise, it is like ``[[out1_dev1, out1_dev2], [out2_dev1, out2_dev2]]``. All the output elements are `NDArray`. Parameters ---------- merge_multi_context : bool Default is ``True``. In the case when data-parallelism is used, the states will be collected from multiple devices. A ``True`` value indicate that we should merge the collected results so that they look like from a single executor. Returns ------- list of NDArray or list of list of NDArray States
[ "Gets", "states", "from", "all", "devices", "." ]
python
train
JustinLovinger/optimal
optimal/algorithms/pbil.py
https://github.com/JustinLovinger/optimal/blob/ab48a4961697338cc32d50e3a6b06ac989e39c3f/optimal/algorithms/pbil.py#L126-L129
def _sample(probability_vec): """Return random binary string, with given probabilities.""" return map(int, numpy.random.random(probability_vec.size) <= probability_vec)
[ "def", "_sample", "(", "probability_vec", ")", ":", "return", "map", "(", "int", ",", "numpy", ".", "random", ".", "random", "(", "probability_vec", ".", "size", ")", "<=", "probability_vec", ")" ]
Return random binary string, with given probabilities.
[ "Return", "random", "binary", "string", "with", "given", "probabilities", "." ]
python
train
B2W-BIT/aiologger
aiologger/handlers/files.py
https://github.com/B2W-BIT/aiologger/blob/0b366597a8305d5577a267305e81d5e4784cd398/aiologger/handlers/files.py#L276-L356
def compute_rollover(self, current_time: int) -> int: """ Work out the rollover time based on the specified time. If we are rolling over at midnight or weekly, then the interval is already known. need to figure out is WHEN the next interval is. In other words, if you are rolling over at midnight, then your base interval is 1 day, but you want to start that one day clock at midnight, not now. So, we have to fudge the `rollover_at` value in order to trigger the first rollover at the right time. After that, the regular interval will take care of the rest. Note that this code doesn't care about leap seconds. :) """ result = current_time + self.interval if ( self.when == RolloverInterval.MIDNIGHT or self.when in RolloverInterval.WEEK_DAYS ): if self.utc: t = time.gmtime(current_time) else: t = time.localtime(current_time) current_hour = t[3] current_minute = t[4] current_second = t[5] current_day = t[6] # r is the number of seconds left between now and the next rotation if self.at_time is None: rotate_ts = ONE_DAY_IN_SECONDS else: rotate_ts = ( self.at_time.hour * 60 + self.at_time.minute ) * 60 + self.at_time.second r = rotate_ts - ( (current_hour * 60 + current_minute) * 60 + current_second ) if r < 0: # Rotate time is before the current time (for example when # self.rotateAt is 13:45 and it now 14:15), rotation is # tomorrow. r += ONE_DAY_IN_SECONDS current_day = (current_day + 1) % 7 result = current_time + r # If we are rolling over on a certain day, add in the number of days until # the next rollover, but offset by 1 since we just calculated the time # until the next day starts. There are three cases: # Case 1) The day to rollover is today; in this case, do nothing # Case 2) The day to rollover is further in the interval (i.e., today is # day 2 (Wednesday) and rollover is on day 6 (Sunday). Days to # next rollover is simply 6 - 2 - 1, or 3. # Case 3) The day to rollover is behind us in the interval (i.e., today # is day 5 (Saturday) and rollover is on day 3 (Thursday). # Days to rollover is 6 - 5 + 3, or 4. In this case, it's the # number of days left in the current week (1) plus the number # of days in the next week until the rollover day (3). # The calculations described in 2) and 3) above need to have a day added. # This is because the above time calculation takes us to midnight on this # day, i.e. the start of the next day. if self.when in RolloverInterval.WEEK_DAYS: day = current_day # 0 is Monday if day != self.day_of_week: if day < self.day_of_week: days_to_wait = self.day_of_week - day else: days_to_wait = 6 - day + self.day_of_week + 1 new_rollover_at = result + (days_to_wait * (60 * 60 * 24)) if not self.utc: dst_now = t[-1] dst_at_rollover = time.localtime(new_rollover_at)[-1] if dst_now != dst_at_rollover: if not dst_now: # DST kicks in before next rollover, so we need to deduct an hour addend = -ONE_HOUR_IN_SECONDS else: # DST bows out before next rollover, so we need to add an hour addend = ONE_HOUR_IN_SECONDS new_rollover_at += addend result = new_rollover_at return result
[ "def", "compute_rollover", "(", "self", ",", "current_time", ":", "int", ")", "->", "int", ":", "result", "=", "current_time", "+", "self", ".", "interval", "if", "(", "self", ".", "when", "==", "RolloverInterval", ".", "MIDNIGHT", "or", "self", ".", "when", "in", "RolloverInterval", ".", "WEEK_DAYS", ")", ":", "if", "self", ".", "utc", ":", "t", "=", "time", ".", "gmtime", "(", "current_time", ")", "else", ":", "t", "=", "time", ".", "localtime", "(", "current_time", ")", "current_hour", "=", "t", "[", "3", "]", "current_minute", "=", "t", "[", "4", "]", "current_second", "=", "t", "[", "5", "]", "current_day", "=", "t", "[", "6", "]", "# r is the number of seconds left between now and the next rotation", "if", "self", ".", "at_time", "is", "None", ":", "rotate_ts", "=", "ONE_DAY_IN_SECONDS", "else", ":", "rotate_ts", "=", "(", "self", ".", "at_time", ".", "hour", "*", "60", "+", "self", ".", "at_time", ".", "minute", ")", "*", "60", "+", "self", ".", "at_time", ".", "second", "r", "=", "rotate_ts", "-", "(", "(", "current_hour", "*", "60", "+", "current_minute", ")", "*", "60", "+", "current_second", ")", "if", "r", "<", "0", ":", "# Rotate time is before the current time (for example when", "# self.rotateAt is 13:45 and it now 14:15), rotation is", "# tomorrow.", "r", "+=", "ONE_DAY_IN_SECONDS", "current_day", "=", "(", "current_day", "+", "1", ")", "%", "7", "result", "=", "current_time", "+", "r", "# If we are rolling over on a certain day, add in the number of days until", "# the next rollover, but offset by 1 since we just calculated the time", "# until the next day starts. There are three cases:", "# Case 1) The day to rollover is today; in this case, do nothing", "# Case 2) The day to rollover is further in the interval (i.e., today is", "# day 2 (Wednesday) and rollover is on day 6 (Sunday). Days to", "# next rollover is simply 6 - 2 - 1, or 3.", "# Case 3) The day to rollover is behind us in the interval (i.e., today", "# is day 5 (Saturday) and rollover is on day 3 (Thursday).", "# Days to rollover is 6 - 5 + 3, or 4. In this case, it's the", "# number of days left in the current week (1) plus the number", "# of days in the next week until the rollover day (3).", "# The calculations described in 2) and 3) above need to have a day added.", "# This is because the above time calculation takes us to midnight on this", "# day, i.e. the start of the next day.", "if", "self", ".", "when", "in", "RolloverInterval", ".", "WEEK_DAYS", ":", "day", "=", "current_day", "# 0 is Monday", "if", "day", "!=", "self", ".", "day_of_week", ":", "if", "day", "<", "self", ".", "day_of_week", ":", "days_to_wait", "=", "self", ".", "day_of_week", "-", "day", "else", ":", "days_to_wait", "=", "6", "-", "day", "+", "self", ".", "day_of_week", "+", "1", "new_rollover_at", "=", "result", "+", "(", "days_to_wait", "*", "(", "60", "*", "60", "*", "24", ")", ")", "if", "not", "self", ".", "utc", ":", "dst_now", "=", "t", "[", "-", "1", "]", "dst_at_rollover", "=", "time", ".", "localtime", "(", "new_rollover_at", ")", "[", "-", "1", "]", "if", "dst_now", "!=", "dst_at_rollover", ":", "if", "not", "dst_now", ":", "# DST kicks in before next rollover, so we need to deduct an hour", "addend", "=", "-", "ONE_HOUR_IN_SECONDS", "else", ":", "# DST bows out before next rollover, so we need to add an hour", "addend", "=", "ONE_HOUR_IN_SECONDS", "new_rollover_at", "+=", "addend", "result", "=", "new_rollover_at", "return", "result" ]
Work out the rollover time based on the specified time. If we are rolling over at midnight or weekly, then the interval is already known. need to figure out is WHEN the next interval is. In other words, if you are rolling over at midnight, then your base interval is 1 day, but you want to start that one day clock at midnight, not now. So, we have to fudge the `rollover_at` value in order to trigger the first rollover at the right time. After that, the regular interval will take care of the rest. Note that this code doesn't care about leap seconds. :)
[ "Work", "out", "the", "rollover", "time", "based", "on", "the", "specified", "time", "." ]
python
train
spacetelescope/drizzlepac
drizzlepac/wcs_functions.py
https://github.com/spacetelescope/drizzlepac/blob/15bec3c929a6a869d9e71b9398ced43ede0620f1/drizzlepac/wcs_functions.py#L64-L70
def backward(self,pixx,pixy): """ Transform pixx,pixy positions from the output frame back onto their original positions in the input frame. """ skyx,skyy = self.output.wcs_pix2world(pixx,pixy,self.origin) result = self.input.all_world2pix(skyx,skyy,self.origin) return result
[ "def", "backward", "(", "self", ",", "pixx", ",", "pixy", ")", ":", "skyx", ",", "skyy", "=", "self", ".", "output", ".", "wcs_pix2world", "(", "pixx", ",", "pixy", ",", "self", ".", "origin", ")", "result", "=", "self", ".", "input", ".", "all_world2pix", "(", "skyx", ",", "skyy", ",", "self", ".", "origin", ")", "return", "result" ]
Transform pixx,pixy positions from the output frame back onto their original positions in the input frame.
[ "Transform", "pixx", "pixy", "positions", "from", "the", "output", "frame", "back", "onto", "their", "original", "positions", "in", "the", "input", "frame", "." ]
python
train
MartinThoma/hwrt
hwrt/utils.py
https://github.com/MartinThoma/hwrt/blob/725c21a3d0f5a30b8492cbc184b3688ceb364e1c/hwrt/utils.py#L448-L482
def load_model(model_file): """Load a model by its file. This includes the model itself, but also the preprocessing queue, the feature list and the output semantics. """ # Extract tar with tarfile.open(model_file) as tar: tarfolder = tempfile.mkdtemp() tar.extractall(path=tarfolder) from . import features from . import preprocessing # Get the preprocessing with open(os.path.join(tarfolder, "preprocessing.yml"), 'r') as ymlfile: preprocessing_description = yaml.load(ymlfile) preprocessing_queue = preprocessing.get_preprocessing_queue( preprocessing_description['queue']) # Get the features with open(os.path.join(tarfolder, "features.yml"), 'r') as ymlfile: feature_description = yaml.load(ymlfile) feature_str_list = feature_description['features'] feature_list = features.get_features(feature_str_list) # Get the model import nntoolkit.utils model = nntoolkit.utils.get_model(model_file) output_semantics_file = os.path.join(tarfolder, 'output_semantics.csv') output_semantics = nntoolkit.utils.get_outputs(output_semantics_file) # Cleanup shutil.rmtree(tarfolder) return (preprocessing_queue, feature_list, model, output_semantics)
[ "def", "load_model", "(", "model_file", ")", ":", "# Extract tar", "with", "tarfile", ".", "open", "(", "model_file", ")", "as", "tar", ":", "tarfolder", "=", "tempfile", ".", "mkdtemp", "(", ")", "tar", ".", "extractall", "(", "path", "=", "tarfolder", ")", "from", ".", "import", "features", "from", ".", "import", "preprocessing", "# Get the preprocessing", "with", "open", "(", "os", ".", "path", ".", "join", "(", "tarfolder", ",", "\"preprocessing.yml\"", ")", ",", "'r'", ")", "as", "ymlfile", ":", "preprocessing_description", "=", "yaml", ".", "load", "(", "ymlfile", ")", "preprocessing_queue", "=", "preprocessing", ".", "get_preprocessing_queue", "(", "preprocessing_description", "[", "'queue'", "]", ")", "# Get the features", "with", "open", "(", "os", ".", "path", ".", "join", "(", "tarfolder", ",", "\"features.yml\"", ")", ",", "'r'", ")", "as", "ymlfile", ":", "feature_description", "=", "yaml", ".", "load", "(", "ymlfile", ")", "feature_str_list", "=", "feature_description", "[", "'features'", "]", "feature_list", "=", "features", ".", "get_features", "(", "feature_str_list", ")", "# Get the model", "import", "nntoolkit", ".", "utils", "model", "=", "nntoolkit", ".", "utils", ".", "get_model", "(", "model_file", ")", "output_semantics_file", "=", "os", ".", "path", ".", "join", "(", "tarfolder", ",", "'output_semantics.csv'", ")", "output_semantics", "=", "nntoolkit", ".", "utils", ".", "get_outputs", "(", "output_semantics_file", ")", "# Cleanup", "shutil", ".", "rmtree", "(", "tarfolder", ")", "return", "(", "preprocessing_queue", ",", "feature_list", ",", "model", ",", "output_semantics", ")" ]
Load a model by its file. This includes the model itself, but also the preprocessing queue, the feature list and the output semantics.
[ "Load", "a", "model", "by", "its", "file", ".", "This", "includes", "the", "model", "itself", "but", "also", "the", "preprocessing", "queue", "the", "feature", "list", "and", "the", "output", "semantics", "." ]
python
train
madprime/cgivar2gvcf
cgivar2gvcf/__init__.py
https://github.com/madprime/cgivar2gvcf/blob/13b4cd8da08669f7e4b0ceed77a7a17082f91037/cgivar2gvcf/__init__.py#L65-L117
def process_full_position(data, header, var_only=False): """ Return genetic data when all alleles called on same line. Returns an array containing one item, a tuple of five items: (string) chromosome (string) start position (1-based) (array of strings) matching dbSNP entries (string) reference allele sequence (array of strings) the genome's allele sequences """ feature_type = data[header['varType']] # Skip unmatchable, uncovered, or pseudoautosomal-in-X if (feature_type == 'no-ref' or feature_type.startswith('PAR-called-in-X')): return None if var_only and feature_type in ['no-call', 'ref']: return None filters = [] if feature_type == 'no-call': filters.append('NOCALL') if 'varQuality' in header: if 'VQLOW' in data[header['varQuality']]: filters.append('VQLOW') else: var_filter = data[header['varFilter']] if var_filter and not var_filter == "PASS": filters = filters + var_filter.split(';') chrom = data[header['chromosome']] start = data[header['begin']] ref_allele = data[header['reference']] alleles = [data[header['alleleSeq']]] dbsnp_data = [] dbsnp_data = data[header['xRef']].split(';') assert data[header['ploidy']] in ['1', '2'] if feature_type == 'ref' or feature_type == 'no-call': return [{'chrom': chrom, 'start': start, 'dbsnp_data': dbsnp_data, 'ref_seq': ref_allele, 'alleles': alleles, 'allele_count': data[header['ploidy']], 'filters': filters, 'end': data[header['end']]}] else: return [{'chrom': chrom, 'start': start, 'dbsnp_data': dbsnp_data, 'ref_seq': ref_allele, 'alleles': alleles, 'allele_count': data[header['ploidy']], 'filters': filters}]
[ "def", "process_full_position", "(", "data", ",", "header", ",", "var_only", "=", "False", ")", ":", "feature_type", "=", "data", "[", "header", "[", "'varType'", "]", "]", "# Skip unmatchable, uncovered, or pseudoautosomal-in-X", "if", "(", "feature_type", "==", "'no-ref'", "or", "feature_type", ".", "startswith", "(", "'PAR-called-in-X'", ")", ")", ":", "return", "None", "if", "var_only", "and", "feature_type", "in", "[", "'no-call'", ",", "'ref'", "]", ":", "return", "None", "filters", "=", "[", "]", "if", "feature_type", "==", "'no-call'", ":", "filters", ".", "append", "(", "'NOCALL'", ")", "if", "'varQuality'", "in", "header", ":", "if", "'VQLOW'", "in", "data", "[", "header", "[", "'varQuality'", "]", "]", ":", "filters", ".", "append", "(", "'VQLOW'", ")", "else", ":", "var_filter", "=", "data", "[", "header", "[", "'varFilter'", "]", "]", "if", "var_filter", "and", "not", "var_filter", "==", "\"PASS\"", ":", "filters", "=", "filters", "+", "var_filter", ".", "split", "(", "';'", ")", "chrom", "=", "data", "[", "header", "[", "'chromosome'", "]", "]", "start", "=", "data", "[", "header", "[", "'begin'", "]", "]", "ref_allele", "=", "data", "[", "header", "[", "'reference'", "]", "]", "alleles", "=", "[", "data", "[", "header", "[", "'alleleSeq'", "]", "]", "]", "dbsnp_data", "=", "[", "]", "dbsnp_data", "=", "data", "[", "header", "[", "'xRef'", "]", "]", ".", "split", "(", "';'", ")", "assert", "data", "[", "header", "[", "'ploidy'", "]", "]", "in", "[", "'1'", ",", "'2'", "]", "if", "feature_type", "==", "'ref'", "or", "feature_type", "==", "'no-call'", ":", "return", "[", "{", "'chrom'", ":", "chrom", ",", "'start'", ":", "start", ",", "'dbsnp_data'", ":", "dbsnp_data", ",", "'ref_seq'", ":", "ref_allele", ",", "'alleles'", ":", "alleles", ",", "'allele_count'", ":", "data", "[", "header", "[", "'ploidy'", "]", "]", ",", "'filters'", ":", "filters", ",", "'end'", ":", "data", "[", "header", "[", "'end'", "]", "]", "}", "]", "else", ":", "return", "[", "{", "'chrom'", ":", "chrom", ",", "'start'", ":", "start", ",", "'dbsnp_data'", ":", "dbsnp_data", ",", "'ref_seq'", ":", "ref_allele", ",", "'alleles'", ":", "alleles", ",", "'allele_count'", ":", "data", "[", "header", "[", "'ploidy'", "]", "]", ",", "'filters'", ":", "filters", "}", "]" ]
Return genetic data when all alleles called on same line. Returns an array containing one item, a tuple of five items: (string) chromosome (string) start position (1-based) (array of strings) matching dbSNP entries (string) reference allele sequence (array of strings) the genome's allele sequences
[ "Return", "genetic", "data", "when", "all", "alleles", "called", "on", "same", "line", "." ]
python
train
bukun/TorCMS
torcms/model/wiki_hist_model.py
https://github.com/bukun/TorCMS/blob/6567c7fe2604a1d646d4570c017840958630ed2b/torcms/model/wiki_hist_model.py#L11-L19
def get_last(postid): ''' Get the last wiki in history. ''' recs = TabWikiHist.select().where( TabWikiHist.wiki_id == postid ).order_by(TabWikiHist.time_update.desc()) return None if recs.count() == 0 else recs.get()
[ "def", "get_last", "(", "postid", ")", ":", "recs", "=", "TabWikiHist", ".", "select", "(", ")", ".", "where", "(", "TabWikiHist", ".", "wiki_id", "==", "postid", ")", ".", "order_by", "(", "TabWikiHist", ".", "time_update", ".", "desc", "(", ")", ")", "return", "None", "if", "recs", ".", "count", "(", ")", "==", "0", "else", "recs", ".", "get", "(", ")" ]
Get the last wiki in history.
[ "Get", "the", "last", "wiki", "in", "history", "." ]
python
train
xeroc/python-graphenelib
graphenecommon/transactionbuilder.py
https://github.com/xeroc/python-graphenelib/blob/8bb5396bc79998ee424cf3813af478304173f3a6/graphenecommon/transactionbuilder.py#L329-L339
def set_fee_asset(self, fee_asset): """ Set asset to fee """ if isinstance(fee_asset, self.amount_class): self.fee_asset_id = fee_asset["id"] elif isinstance(fee_asset, self.asset_class): self.fee_asset_id = fee_asset["id"] elif fee_asset: self.fee_asset_id = fee_asset else: self.fee_asset_id = "1.3.0"
[ "def", "set_fee_asset", "(", "self", ",", "fee_asset", ")", ":", "if", "isinstance", "(", "fee_asset", ",", "self", ".", "amount_class", ")", ":", "self", ".", "fee_asset_id", "=", "fee_asset", "[", "\"id\"", "]", "elif", "isinstance", "(", "fee_asset", ",", "self", ".", "asset_class", ")", ":", "self", ".", "fee_asset_id", "=", "fee_asset", "[", "\"id\"", "]", "elif", "fee_asset", ":", "self", ".", "fee_asset_id", "=", "fee_asset", "else", ":", "self", ".", "fee_asset_id", "=", "\"1.3.0\"" ]
Set asset to fee
[ "Set", "asset", "to", "fee" ]
python
valid
ellmetha/django-machina
machina/core/loading.py
https://github.com/ellmetha/django-machina/blob/89ac083c1eaf1cfdeae6686ee094cc86362e8c69/machina/core/loading.py#L82-L98
def _pick_up_classes(modules, classnames): """ Given a list of class names to retrieve, try to fetch them from the specified list of modules and returns the list of the fetched classes. """ klasses = [] for classname in classnames: klass = None for module in modules: if hasattr(module, classname): klass = getattr(module, classname) break if not klass: raise ClassNotFoundError('Error fetching \'{}\' in {}'.format( classname, str([module.__name__ for module in modules])) ) klasses.append(klass) return klasses
[ "def", "_pick_up_classes", "(", "modules", ",", "classnames", ")", ":", "klasses", "=", "[", "]", "for", "classname", "in", "classnames", ":", "klass", "=", "None", "for", "module", "in", "modules", ":", "if", "hasattr", "(", "module", ",", "classname", ")", ":", "klass", "=", "getattr", "(", "module", ",", "classname", ")", "break", "if", "not", "klass", ":", "raise", "ClassNotFoundError", "(", "'Error fetching \\'{}\\' in {}'", ".", "format", "(", "classname", ",", "str", "(", "[", "module", ".", "__name__", "for", "module", "in", "modules", "]", ")", ")", ")", "klasses", ".", "append", "(", "klass", ")", "return", "klasses" ]
Given a list of class names to retrieve, try to fetch them from the specified list of modules and returns the list of the fetched classes.
[ "Given", "a", "list", "of", "class", "names", "to", "retrieve", "try", "to", "fetch", "them", "from", "the", "specified", "list", "of", "modules", "and", "returns", "the", "list", "of", "the", "fetched", "classes", "." ]
python
train
scieloorg/accessstatsapi
accessstats/queries.py
https://github.com/scieloorg/accessstatsapi/blob/8092d76bedab9e82efce4005f9bcd21fb94e8e98/accessstats/queries.py#L38-L115
def downloads_per_year(collection, code, raw=False): """ This method retrieve the total of downloads per year. arguments collection: SciELO 3 letters Acronym code: (Journal ISSN, Issue PID, Article PID) return [ ("2017", "20101"), ("2016", "11201"), ("2015", "12311"), ... ] """ tc = ThriftClient() body = {"query": {"filtered": {}}} fltr = {} query = { "query": { "bool": { "must": [ { "match": { "collection": collection } } ] } } } aggs = { "aggs": { "access_year": { "terms": { "field": "access_year", "size": 0, "order": { "_term": "asc" } }, "aggs": { "access_total": { "sum": { "field": "access_total" } } } } } } body['query']['filtered'].update(fltr) body['query']['filtered'].update(query) body.update(aggs) code_type = _code_type(code) if code_type: query["query"]["bool"]["must"].append({ "match": { code_type: code } }) query_parameters = [ ('size', '0') ] query_result = tc.search(json.dumps(body), query_parameters) return query_result if raw is True else _compute_downloads_per_year(query_result)
[ "def", "downloads_per_year", "(", "collection", ",", "code", ",", "raw", "=", "False", ")", ":", "tc", "=", "ThriftClient", "(", ")", "body", "=", "{", "\"query\"", ":", "{", "\"filtered\"", ":", "{", "}", "}", "}", "fltr", "=", "{", "}", "query", "=", "{", "\"query\"", ":", "{", "\"bool\"", ":", "{", "\"must\"", ":", "[", "{", "\"match\"", ":", "{", "\"collection\"", ":", "collection", "}", "}", "]", "}", "}", "}", "aggs", "=", "{", "\"aggs\"", ":", "{", "\"access_year\"", ":", "{", "\"terms\"", ":", "{", "\"field\"", ":", "\"access_year\"", ",", "\"size\"", ":", "0", ",", "\"order\"", ":", "{", "\"_term\"", ":", "\"asc\"", "}", "}", ",", "\"aggs\"", ":", "{", "\"access_total\"", ":", "{", "\"sum\"", ":", "{", "\"field\"", ":", "\"access_total\"", "}", "}", "}", "}", "}", "}", "body", "[", "'query'", "]", "[", "'filtered'", "]", ".", "update", "(", "fltr", ")", "body", "[", "'query'", "]", "[", "'filtered'", "]", ".", "update", "(", "query", ")", "body", ".", "update", "(", "aggs", ")", "code_type", "=", "_code_type", "(", "code", ")", "if", "code_type", ":", "query", "[", "\"query\"", "]", "[", "\"bool\"", "]", "[", "\"must\"", "]", ".", "append", "(", "{", "\"match\"", ":", "{", "code_type", ":", "code", "}", "}", ")", "query_parameters", "=", "[", "(", "'size'", ",", "'0'", ")", "]", "query_result", "=", "tc", ".", "search", "(", "json", ".", "dumps", "(", "body", ")", ",", "query_parameters", ")", "return", "query_result", "if", "raw", "is", "True", "else", "_compute_downloads_per_year", "(", "query_result", ")" ]
This method retrieve the total of downloads per year. arguments collection: SciELO 3 letters Acronym code: (Journal ISSN, Issue PID, Article PID) return [ ("2017", "20101"), ("2016", "11201"), ("2015", "12311"), ... ]
[ "This", "method", "retrieve", "the", "total", "of", "downloads", "per", "year", "." ]
python
train
pyviz/holoviews
holoviews/core/dimension.py
https://github.com/pyviz/holoviews/blob/ae0dd2f3de448b0ca5e9065aabd6ef8d84c7e655/holoviews/core/dimension.py#L97-L129
def process_dimensions(kdims, vdims): """Converts kdims and vdims to Dimension objects. Args: kdims: List or single key dimension(s) specified as strings, tuples dicts or Dimension objects. vdims: List or single value dimension(s) specified as strings, tuples dicts or Dimension objects. Returns: Dictionary containing kdims and vdims converted to Dimension objects: {'kdims': [Dimension('x')], 'vdims': [Dimension('y')] """ dimensions = {} for group, dims in [('kdims', kdims), ('vdims', vdims)]: if dims is None: continue elif isinstance(dims, (tuple, basestring, Dimension, dict)): dims = [dims] elif not isinstance(dims, list): raise ValueError("%s argument expects a Dimension or list of dimensions, " "specified as tuples, strings, dictionaries or Dimension " "instances, not a %s type. Ensure you passed the data as the " "first argument." % (group, type(dims).__name__)) for dim in dims: if not isinstance(dim, (tuple, basestring, Dimension, dict)): raise ValueError('Dimensions must be defined as a tuple, ' 'string, dictionary or Dimension instance, ' 'found a %s type.' % type(dim).__name__) dimensions[group] = [asdim(d) for d in dims] return dimensions
[ "def", "process_dimensions", "(", "kdims", ",", "vdims", ")", ":", "dimensions", "=", "{", "}", "for", "group", ",", "dims", "in", "[", "(", "'kdims'", ",", "kdims", ")", ",", "(", "'vdims'", ",", "vdims", ")", "]", ":", "if", "dims", "is", "None", ":", "continue", "elif", "isinstance", "(", "dims", ",", "(", "tuple", ",", "basestring", ",", "Dimension", ",", "dict", ")", ")", ":", "dims", "=", "[", "dims", "]", "elif", "not", "isinstance", "(", "dims", ",", "list", ")", ":", "raise", "ValueError", "(", "\"%s argument expects a Dimension or list of dimensions, \"", "\"specified as tuples, strings, dictionaries or Dimension \"", "\"instances, not a %s type. Ensure you passed the data as the \"", "\"first argument.\"", "%", "(", "group", ",", "type", "(", "dims", ")", ".", "__name__", ")", ")", "for", "dim", "in", "dims", ":", "if", "not", "isinstance", "(", "dim", ",", "(", "tuple", ",", "basestring", ",", "Dimension", ",", "dict", ")", ")", ":", "raise", "ValueError", "(", "'Dimensions must be defined as a tuple, '", "'string, dictionary or Dimension instance, '", "'found a %s type.'", "%", "type", "(", "dim", ")", ".", "__name__", ")", "dimensions", "[", "group", "]", "=", "[", "asdim", "(", "d", ")", "for", "d", "in", "dims", "]", "return", "dimensions" ]
Converts kdims and vdims to Dimension objects. Args: kdims: List or single key dimension(s) specified as strings, tuples dicts or Dimension objects. vdims: List or single value dimension(s) specified as strings, tuples dicts or Dimension objects. Returns: Dictionary containing kdims and vdims converted to Dimension objects: {'kdims': [Dimension('x')], 'vdims': [Dimension('y')]
[ "Converts", "kdims", "and", "vdims", "to", "Dimension", "objects", "." ]
python
train
manns/pyspread
pyspread/src/actions/_main_window_actions.py
https://github.com/manns/pyspread/blob/0e2fd44c2e0f06605efc3058c20a43a8c1f9e7e0/pyspread/src/actions/_main_window_actions.py#L803-L853
def launch_help(self, helpname, filename): """Generic help launcher Launches HTMLWindow that shows content of filename or the Internet page with the filename url Parameters ---------- filename: String \thtml file or url """ # Set up window position = config["help_window_position"] size = config["help_window_size"] self.help_window = wx.Frame(self.main_window, -1, helpname, position, size) self.help_htmlwindow = wx.html.HtmlWindow(self.help_window, -1, (0, 0), size) self.help_window.Bind(wx.EVT_MOVE, self.OnHelpMove) self.help_window.Bind(wx.EVT_SIZE, self.OnHelpSize) self.help_htmlwindow.Bind(wx.EVT_RIGHT_DOWN, self.OnHelpBack) self.help_htmlwindow.Bind(wx.html.EVT_HTML_LINK_CLICKED, lambda e: self.open_external_links(e)) self.help_htmlwindow.Bind(wx.EVT_MOUSEWHEEL, lambda e: self.zoom_html(e)) # Get help data current_path = os.getcwd() os.chdir(get_help_path()) try: if os.path.isfile(filename): self.help_htmlwindow.LoadFile(filename) else: self.help_htmlwindow.LoadPage(filename) except IOError: self.help_htmlwindow.LoadPage(filename) # Show tutorial window self.help_window.Show() os.chdir(current_path)
[ "def", "launch_help", "(", "self", ",", "helpname", ",", "filename", ")", ":", "# Set up window", "position", "=", "config", "[", "\"help_window_position\"", "]", "size", "=", "config", "[", "\"help_window_size\"", "]", "self", ".", "help_window", "=", "wx", ".", "Frame", "(", "self", ".", "main_window", ",", "-", "1", ",", "helpname", ",", "position", ",", "size", ")", "self", ".", "help_htmlwindow", "=", "wx", ".", "html", ".", "HtmlWindow", "(", "self", ".", "help_window", ",", "-", "1", ",", "(", "0", ",", "0", ")", ",", "size", ")", "self", ".", "help_window", ".", "Bind", "(", "wx", ".", "EVT_MOVE", ",", "self", ".", "OnHelpMove", ")", "self", ".", "help_window", ".", "Bind", "(", "wx", ".", "EVT_SIZE", ",", "self", ".", "OnHelpSize", ")", "self", ".", "help_htmlwindow", ".", "Bind", "(", "wx", ".", "EVT_RIGHT_DOWN", ",", "self", ".", "OnHelpBack", ")", "self", ".", "help_htmlwindow", ".", "Bind", "(", "wx", ".", "html", ".", "EVT_HTML_LINK_CLICKED", ",", "lambda", "e", ":", "self", ".", "open_external_links", "(", "e", ")", ")", "self", ".", "help_htmlwindow", ".", "Bind", "(", "wx", ".", "EVT_MOUSEWHEEL", ",", "lambda", "e", ":", "self", ".", "zoom_html", "(", "e", ")", ")", "# Get help data", "current_path", "=", "os", ".", "getcwd", "(", ")", "os", ".", "chdir", "(", "get_help_path", "(", ")", ")", "try", ":", "if", "os", ".", "path", ".", "isfile", "(", "filename", ")", ":", "self", ".", "help_htmlwindow", ".", "LoadFile", "(", "filename", ")", "else", ":", "self", ".", "help_htmlwindow", ".", "LoadPage", "(", "filename", ")", "except", "IOError", ":", "self", ".", "help_htmlwindow", ".", "LoadPage", "(", "filename", ")", "# Show tutorial window", "self", ".", "help_window", ".", "Show", "(", ")", "os", ".", "chdir", "(", "current_path", ")" ]
Generic help launcher Launches HTMLWindow that shows content of filename or the Internet page with the filename url Parameters ---------- filename: String \thtml file or url
[ "Generic", "help", "launcher" ]
python
train
log2timeline/plaso
plaso/storage/sqlite/sqlite_file.py
https://github.com/log2timeline/plaso/blob/9c564698d2da3ffbe23607a3c54c0582ea18a6cc/plaso/storage/sqlite/sqlite_file.py#L652-L667
def AddEventTags(self, event_tags): """Adds event tags. Args: event_tags (list[EventTag]): event tags. Raises: IOError: when the storage file is closed or read-only or if the event tags cannot be serialized. OSError: when the storage file is closed or read-only or if the event tags cannot be serialized. """ self._RaiseIfNotWritable() for event_tag in event_tags: self.AddEventTag(event_tag)
[ "def", "AddEventTags", "(", "self", ",", "event_tags", ")", ":", "self", ".", "_RaiseIfNotWritable", "(", ")", "for", "event_tag", "in", "event_tags", ":", "self", ".", "AddEventTag", "(", "event_tag", ")" ]
Adds event tags. Args: event_tags (list[EventTag]): event tags. Raises: IOError: when the storage file is closed or read-only or if the event tags cannot be serialized. OSError: when the storage file is closed or read-only or if the event tags cannot be serialized.
[ "Adds", "event", "tags", "." ]
python
train
django-danceschool/django-danceschool
danceschool/private_lessons/models.py
https://github.com/django-danceschool/django-danceschool/blob/bb08cbf39017a812a5a94bdb4ea34170bf1a30ba/danceschool/private_lessons/models.py#L48-L56
def getBasePrice(self,**kwargs): ''' This method overrides the method of the base Event class by checking the pricingTier associated with this PrivateLessonEvent and getting the appropriate price for it. ''' if not self.pricingTier: return None return self.pricingTier.getBasePrice(**kwargs) * max(self.numSlots,1)
[ "def", "getBasePrice", "(", "self", ",", "*", "*", "kwargs", ")", ":", "if", "not", "self", ".", "pricingTier", ":", "return", "None", "return", "self", ".", "pricingTier", ".", "getBasePrice", "(", "*", "*", "kwargs", ")", "*", "max", "(", "self", ".", "numSlots", ",", "1", ")" ]
This method overrides the method of the base Event class by checking the pricingTier associated with this PrivateLessonEvent and getting the appropriate price for it.
[ "This", "method", "overrides", "the", "method", "of", "the", "base", "Event", "class", "by", "checking", "the", "pricingTier", "associated", "with", "this", "PrivateLessonEvent", "and", "getting", "the", "appropriate", "price", "for", "it", "." ]
python
train
spookey/photon
photon/util/locations.py
https://github.com/spookey/photon/blob/57212a26ce713ab7723910ee49e3d0ba1697799f/photon/util/locations.py#L73-L102
def make_locations(locations=None, verbose=True): ''' Creates folders :param locations: A list of folders to create (can be a dictionary, see note below) :param verbose: Warn if any folders were created .. note:: * |params_locations_dict| * |param_locations_none| ''' from photon.util.structures import to_list from photon.util.system import shell_notify if not locations: locations = get_locations().values() locations = to_list(locations) r = list() for p in reversed(sorted(locations)): if not _path.exists(p): _makedirs(p) r.append(p) if verbose and r: shell_notify('path created', state=None, more=r) return r
[ "def", "make_locations", "(", "locations", "=", "None", ",", "verbose", "=", "True", ")", ":", "from", "photon", ".", "util", ".", "structures", "import", "to_list", "from", "photon", ".", "util", ".", "system", "import", "shell_notify", "if", "not", "locations", ":", "locations", "=", "get_locations", "(", ")", ".", "values", "(", ")", "locations", "=", "to_list", "(", "locations", ")", "r", "=", "list", "(", ")", "for", "p", "in", "reversed", "(", "sorted", "(", "locations", ")", ")", ":", "if", "not", "_path", ".", "exists", "(", "p", ")", ":", "_makedirs", "(", "p", ")", "r", ".", "append", "(", "p", ")", "if", "verbose", "and", "r", ":", "shell_notify", "(", "'path created'", ",", "state", "=", "None", ",", "more", "=", "r", ")", "return", "r" ]
Creates folders :param locations: A list of folders to create (can be a dictionary, see note below) :param verbose: Warn if any folders were created .. note:: * |params_locations_dict| * |param_locations_none|
[ "Creates", "folders" ]
python
train
MisterY/asset-allocation
asset_allocation/formatters.py
https://github.com/MisterY/asset-allocation/blob/72239aa20762cda67c091f27b86e65d61bf3b613/asset_allocation/formatters.py#L129-L132
def append_text_column(self, text: str, index: int): """ Add value to the output row, width based on index """ width = self.columns[index]["width"] return f"{text:<{width}}"
[ "def", "append_text_column", "(", "self", ",", "text", ":", "str", ",", "index", ":", "int", ")", ":", "width", "=", "self", ".", "columns", "[", "index", "]", "[", "\"width\"", "]", "return", "f\"{text:<{width}}\"" ]
Add value to the output row, width based on index
[ "Add", "value", "to", "the", "output", "row", "width", "based", "on", "index" ]
python
train
f3at/feat
src/feat/models/call.py
https://github.com/f3at/feat/blob/15da93fc9d6ec8154f52a9172824e25821195ef8/src/feat/models/call.py#L155-L167
def action_filter(method_name, *args, **kwargs): """ Creates an effect that will call the action's method with the current value and specified arguments and keywords. @param method_name: the name of method belonging to the action. @type method_name: str """ def action_filter(value, context, **_params): method = getattr(context["action"], method_name) return _filter(method, value, args, kwargs) return action_filter
[ "def", "action_filter", "(", "method_name", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "def", "action_filter", "(", "value", ",", "context", ",", "*", "*", "_params", ")", ":", "method", "=", "getattr", "(", "context", "[", "\"action\"", "]", ",", "method_name", ")", "return", "_filter", "(", "method", ",", "value", ",", "args", ",", "kwargs", ")", "return", "action_filter" ]
Creates an effect that will call the action's method with the current value and specified arguments and keywords. @param method_name: the name of method belonging to the action. @type method_name: str
[ "Creates", "an", "effect", "that", "will", "call", "the", "action", "s", "method", "with", "the", "current", "value", "and", "specified", "arguments", "and", "keywords", "." ]
python
train
kennethreitz/maya
maya/core.py
https://github.com/kennethreitz/maya/blob/774b141d91a83a5d77cb5351db3d02bf50564b21/maya/core.py#L142-L146
def add(self, **kwargs): """Returns a new MayaDT object with the given offsets.""" return self.from_datetime( pendulum.instance(self.datetime()).add(**kwargs) )
[ "def", "add", "(", "self", ",", "*", "*", "kwargs", ")", ":", "return", "self", ".", "from_datetime", "(", "pendulum", ".", "instance", "(", "self", ".", "datetime", "(", ")", ")", ".", "add", "(", "*", "*", "kwargs", ")", ")" ]
Returns a new MayaDT object with the given offsets.
[ "Returns", "a", "new", "MayaDT", "object", "with", "the", "given", "offsets", "." ]
python
train
ladybug-tools/uwg
uwg/uwg.py
https://github.com/ladybug-tools/uwg/blob/fb71f656b3cb69e7ccf1d851dff862e14fa210fc/uwg/uwg.py#L612-L727
def init_input_obj(self): """Section 4 - Create uwg objects from input parameters self.simTime # simulation time parameter obj self.weather # weather obj for simulation time period self.forcIP # Forcing obj self.forc # Empty forcing obj self.geoParam # geographic parameters obj self.RSM # Rural site & vertical diffusion model obj self.USM # Urban site & vertical diffusion model obj self.UCM # Urban canopy model obj self.UBL # Urban boundary layer model self.road # urban road element self.rural # rural road element self.soilindex1 # soil index for urban rsoad depth self.soilindex2 # soil index for rural road depth self.Sch # list of Schedule objects """ climate_file_path = os.path.join(self.epwDir, self.epwFileName) self.simTime = SimParam(self.dtSim, self.dtWeather, self.Month, self.Day, self.nDay) # simulation time parametrs # weather file data for simulation time period self.weather = Weather(climate_file_path, self.simTime.timeInitial, self.simTime.timeFinal) self.forcIP = Forcing(self.weather.staTemp, self.weather) # initialized Forcing class self.forc = Forcing() # empty forcing class # Initialize geographic Param and Urban Boundary Layer Objects nightStart = 18. # arbitrary values for begin/end hour for night setpoint nightEnd = 8. maxdx = 250. # max dx (m) self.geoParam = Param(self.h_ubl1, self.h_ubl2, self.h_ref, self.h_temp, self.h_wind, self.c_circ, self.maxDay, self.maxNight, self.latTree, self.latGrss, self.albVeg, self.vegStart, self.vegEnd, nightStart, nightEnd, self.windMin, self.WGMAX, self.c_exch, maxdx, self.G, self.CP, self.VK, self.R, self.RV, self.LV, math.pi, self.SIGMA, self.WATERDENS, self.LVTT, self.TT, self.ESTT, self.CL, self.CPV, self.B, self.CM, self.COLBURN) self.UBL = UBLDef( 'C', self.charLength, self.weather.staTemp[0], maxdx, self.geoParam.dayBLHeight, self.geoParam.nightBLHeight) # Defining road emis = 0.93 asphalt = Material(self.kRoad, self.cRoad, 'asphalt') road_T_init = 293. road_horizontal = 1 # fraction of surface vegetation coverage road_veg_coverage = min(self.vegCover/(1-self.bldDensity), 1.) # define road layers road_layer_num = int(math.ceil(self.d_road/0.05)) # 0.5/0.05 ~ 10 x 1 matrix of 0.05 thickness thickness_vector = [0.05 for r in range(road_layer_num)] material_vector = [asphalt for r in range(road_layer_num)] self.road = Element(self.alb_road, emis, thickness_vector, material_vector, road_veg_coverage, road_T_init, road_horizontal, name="urban_road") self.rural = copy.deepcopy(self.road) self.rural.vegCoverage = self.rurVegCover self.rural._name = "rural_road" # Reference site class (also include VDM) self.RSM = RSMDef(self.lat, self.lon, self.GMT, self.h_obs, self.weather.staTemp[0], self.weather.staPres[0], self.geoParam, self.z_meso_dir_path) self.USM = RSMDef(self.lat, self.lon, self.GMT, self.bldHeight/10., self.weather.staTemp[0], self.weather.staPres[0], self.geoParam, self.z_meso_dir_path) T_init = self.weather.staTemp[0] H_init = self.weather.staHum[0] self.UCM = UCMDef(self.bldHeight, self.bldDensity, self.verToHor, self.treeCoverage, self.sensAnth, self.latAnth, T_init, H_init, self.weather.staUmod[0], self.geoParam, self.r_glaze_total, self.SHGC_total, self.alb_wall_total, self.road) self.UCM.h_mix = self.h_mix # Define Road Element & buffer to match ground temperature depth roadMat, newthickness = procMat(self.road, self.MAXTHICKNESS, self.MINTHICKNESS) for i in range(self.nSoil): # if soil depth is greater then the thickness of the road # we add new slices of soil at max thickness until road is greater or equal is_soildepth_equal = self.is_near_zero(self.depth_soil[i][0] - sum(newthickness), 1e-15) if is_soildepth_equal or (self.depth_soil[i][0] > sum(newthickness)): while self.depth_soil[i][0] > sum(newthickness): newthickness.append(self.MAXTHICKNESS) roadMat.append(self.SOIL) self.soilindex1 = i break self.road = Element(self.road.albedo, self.road.emissivity, newthickness, roadMat, self.road.vegCoverage, self.road.layerTemp[0], self.road.horizontal, self.road._name) # Define Rural Element ruralMat, newthickness = procMat(self.rural, self.MAXTHICKNESS, self.MINTHICKNESS) for i in range(self.nSoil): # if soil depth is greater then the thickness of the road # we add new slices of soil at max thickness until road is greater or equal is_soildepth_equal = self.is_near_zero(self.depth_soil[i][0] - sum(newthickness), 1e-15) if is_soildepth_equal or (self.depth_soil[i][0] > sum(newthickness)): while self.depth_soil[i][0] > sum(newthickness): newthickness.append(self.MAXTHICKNESS) ruralMat.append(self.SOIL) self.soilindex2 = i break self.rural = Element(self.rural.albedo, self.rural.emissivity, newthickness, ruralMat, self.rural.vegCoverage, self.rural.layerTemp[0], self.rural.horizontal, self.rural._name)
[ "def", "init_input_obj", "(", "self", ")", ":", "climate_file_path", "=", "os", ".", "path", ".", "join", "(", "self", ".", "epwDir", ",", "self", ".", "epwFileName", ")", "self", ".", "simTime", "=", "SimParam", "(", "self", ".", "dtSim", ",", "self", ".", "dtWeather", ",", "self", ".", "Month", ",", "self", ".", "Day", ",", "self", ".", "nDay", ")", "# simulation time parametrs\r", "# weather file data for simulation time period\r", "self", ".", "weather", "=", "Weather", "(", "climate_file_path", ",", "self", ".", "simTime", ".", "timeInitial", ",", "self", ".", "simTime", ".", "timeFinal", ")", "self", ".", "forcIP", "=", "Forcing", "(", "self", ".", "weather", ".", "staTemp", ",", "self", ".", "weather", ")", "# initialized Forcing class\r", "self", ".", "forc", "=", "Forcing", "(", ")", "# empty forcing class\r", "# Initialize geographic Param and Urban Boundary Layer Objects\r", "nightStart", "=", "18.", "# arbitrary values for begin/end hour for night setpoint\r", "nightEnd", "=", "8.", "maxdx", "=", "250.", "# max dx (m)\r", "self", ".", "geoParam", "=", "Param", "(", "self", ".", "h_ubl1", ",", "self", ".", "h_ubl2", ",", "self", ".", "h_ref", ",", "self", ".", "h_temp", ",", "self", ".", "h_wind", ",", "self", ".", "c_circ", ",", "self", ".", "maxDay", ",", "self", ".", "maxNight", ",", "self", ".", "latTree", ",", "self", ".", "latGrss", ",", "self", ".", "albVeg", ",", "self", ".", "vegStart", ",", "self", ".", "vegEnd", ",", "nightStart", ",", "nightEnd", ",", "self", ".", "windMin", ",", "self", ".", "WGMAX", ",", "self", ".", "c_exch", ",", "maxdx", ",", "self", ".", "G", ",", "self", ".", "CP", ",", "self", ".", "VK", ",", "self", ".", "R", ",", "self", ".", "RV", ",", "self", ".", "LV", ",", "math", ".", "pi", ",", "self", ".", "SIGMA", ",", "self", ".", "WATERDENS", ",", "self", ".", "LVTT", ",", "self", ".", "TT", ",", "self", ".", "ESTT", ",", "self", ".", "CL", ",", "self", ".", "CPV", ",", "self", ".", "B", ",", "self", ".", "CM", ",", "self", ".", "COLBURN", ")", "self", ".", "UBL", "=", "UBLDef", "(", "'C'", ",", "self", ".", "charLength", ",", "self", ".", "weather", ".", "staTemp", "[", "0", "]", ",", "maxdx", ",", "self", ".", "geoParam", ".", "dayBLHeight", ",", "self", ".", "geoParam", ".", "nightBLHeight", ")", "# Defining road\r", "emis", "=", "0.93", "asphalt", "=", "Material", "(", "self", ".", "kRoad", ",", "self", ".", "cRoad", ",", "'asphalt'", ")", "road_T_init", "=", "293.", "road_horizontal", "=", "1", "# fraction of surface vegetation coverage\r", "road_veg_coverage", "=", "min", "(", "self", ".", "vegCover", "/", "(", "1", "-", "self", ".", "bldDensity", ")", ",", "1.", ")", "# define road layers\r", "road_layer_num", "=", "int", "(", "math", ".", "ceil", "(", "self", ".", "d_road", "/", "0.05", ")", ")", "# 0.5/0.05 ~ 10 x 1 matrix of 0.05 thickness\r", "thickness_vector", "=", "[", "0.05", "for", "r", "in", "range", "(", "road_layer_num", ")", "]", "material_vector", "=", "[", "asphalt", "for", "r", "in", "range", "(", "road_layer_num", ")", "]", "self", ".", "road", "=", "Element", "(", "self", ".", "alb_road", ",", "emis", ",", "thickness_vector", ",", "material_vector", ",", "road_veg_coverage", ",", "road_T_init", ",", "road_horizontal", ",", "name", "=", "\"urban_road\"", ")", "self", ".", "rural", "=", "copy", ".", "deepcopy", "(", "self", ".", "road", ")", "self", ".", "rural", ".", "vegCoverage", "=", "self", ".", "rurVegCover", "self", ".", "rural", ".", "_name", "=", "\"rural_road\"", "# Reference site class (also include VDM)\r", "self", ".", "RSM", "=", "RSMDef", "(", "self", ".", "lat", ",", "self", ".", "lon", ",", "self", ".", "GMT", ",", "self", ".", "h_obs", ",", "self", ".", "weather", ".", "staTemp", "[", "0", "]", ",", "self", ".", "weather", ".", "staPres", "[", "0", "]", ",", "self", ".", "geoParam", ",", "self", ".", "z_meso_dir_path", ")", "self", ".", "USM", "=", "RSMDef", "(", "self", ".", "lat", ",", "self", ".", "lon", ",", "self", ".", "GMT", ",", "self", ".", "bldHeight", "/", "10.", ",", "self", ".", "weather", ".", "staTemp", "[", "0", "]", ",", "self", ".", "weather", ".", "staPres", "[", "0", "]", ",", "self", ".", "geoParam", ",", "self", ".", "z_meso_dir_path", ")", "T_init", "=", "self", ".", "weather", ".", "staTemp", "[", "0", "]", "H_init", "=", "self", ".", "weather", ".", "staHum", "[", "0", "]", "self", ".", "UCM", "=", "UCMDef", "(", "self", ".", "bldHeight", ",", "self", ".", "bldDensity", ",", "self", ".", "verToHor", ",", "self", ".", "treeCoverage", ",", "self", ".", "sensAnth", ",", "self", ".", "latAnth", ",", "T_init", ",", "H_init", ",", "self", ".", "weather", ".", "staUmod", "[", "0", "]", ",", "self", ".", "geoParam", ",", "self", ".", "r_glaze_total", ",", "self", ".", "SHGC_total", ",", "self", ".", "alb_wall_total", ",", "self", ".", "road", ")", "self", ".", "UCM", ".", "h_mix", "=", "self", ".", "h_mix", "# Define Road Element & buffer to match ground temperature depth\r", "roadMat", ",", "newthickness", "=", "procMat", "(", "self", ".", "road", ",", "self", ".", "MAXTHICKNESS", ",", "self", ".", "MINTHICKNESS", ")", "for", "i", "in", "range", "(", "self", ".", "nSoil", ")", ":", "# if soil depth is greater then the thickness of the road\r", "# we add new slices of soil at max thickness until road is greater or equal\r", "is_soildepth_equal", "=", "self", ".", "is_near_zero", "(", "self", ".", "depth_soil", "[", "i", "]", "[", "0", "]", "-", "sum", "(", "newthickness", ")", ",", "1e-15", ")", "if", "is_soildepth_equal", "or", "(", "self", ".", "depth_soil", "[", "i", "]", "[", "0", "]", ">", "sum", "(", "newthickness", ")", ")", ":", "while", "self", ".", "depth_soil", "[", "i", "]", "[", "0", "]", ">", "sum", "(", "newthickness", ")", ":", "newthickness", ".", "append", "(", "self", ".", "MAXTHICKNESS", ")", "roadMat", ".", "append", "(", "self", ".", "SOIL", ")", "self", ".", "soilindex1", "=", "i", "break", "self", ".", "road", "=", "Element", "(", "self", ".", "road", ".", "albedo", ",", "self", ".", "road", ".", "emissivity", ",", "newthickness", ",", "roadMat", ",", "self", ".", "road", ".", "vegCoverage", ",", "self", ".", "road", ".", "layerTemp", "[", "0", "]", ",", "self", ".", "road", ".", "horizontal", ",", "self", ".", "road", ".", "_name", ")", "# Define Rural Element\r", "ruralMat", ",", "newthickness", "=", "procMat", "(", "self", ".", "rural", ",", "self", ".", "MAXTHICKNESS", ",", "self", ".", "MINTHICKNESS", ")", "for", "i", "in", "range", "(", "self", ".", "nSoil", ")", ":", "# if soil depth is greater then the thickness of the road\r", "# we add new slices of soil at max thickness until road is greater or equal\r", "is_soildepth_equal", "=", "self", ".", "is_near_zero", "(", "self", ".", "depth_soil", "[", "i", "]", "[", "0", "]", "-", "sum", "(", "newthickness", ")", ",", "1e-15", ")", "if", "is_soildepth_equal", "or", "(", "self", ".", "depth_soil", "[", "i", "]", "[", "0", "]", ">", "sum", "(", "newthickness", ")", ")", ":", "while", "self", ".", "depth_soil", "[", "i", "]", "[", "0", "]", ">", "sum", "(", "newthickness", ")", ":", "newthickness", ".", "append", "(", "self", ".", "MAXTHICKNESS", ")", "ruralMat", ".", "append", "(", "self", ".", "SOIL", ")", "self", ".", "soilindex2", "=", "i", "break", "self", ".", "rural", "=", "Element", "(", "self", ".", "rural", ".", "albedo", ",", "self", ".", "rural", ".", "emissivity", ",", "newthickness", ",", "ruralMat", ",", "self", ".", "rural", ".", "vegCoverage", ",", "self", ".", "rural", ".", "layerTemp", "[", "0", "]", ",", "self", ".", "rural", ".", "horizontal", ",", "self", ".", "rural", ".", "_name", ")" ]
Section 4 - Create uwg objects from input parameters self.simTime # simulation time parameter obj self.weather # weather obj for simulation time period self.forcIP # Forcing obj self.forc # Empty forcing obj self.geoParam # geographic parameters obj self.RSM # Rural site & vertical diffusion model obj self.USM # Urban site & vertical diffusion model obj self.UCM # Urban canopy model obj self.UBL # Urban boundary layer model self.road # urban road element self.rural # rural road element self.soilindex1 # soil index for urban rsoad depth self.soilindex2 # soil index for rural road depth self.Sch # list of Schedule objects
[ "Section", "4", "-", "Create", "uwg", "objects", "from", "input", "parameters", "self", ".", "simTime", "#", "simulation", "time", "parameter", "obj", "self", ".", "weather", "#", "weather", "obj", "for", "simulation", "time", "period", "self", ".", "forcIP", "#", "Forcing", "obj", "self", ".", "forc", "#", "Empty", "forcing", "obj", "self", ".", "geoParam", "#", "geographic", "parameters", "obj", "self", ".", "RSM", "#", "Rural", "site", "&", "vertical", "diffusion", "model", "obj", "self", ".", "USM", "#", "Urban", "site", "&", "vertical", "diffusion", "model", "obj", "self", ".", "UCM", "#", "Urban", "canopy", "model", "obj", "self", ".", "UBL", "#", "Urban", "boundary", "layer", "model", "self", ".", "road", "#", "urban", "road", "element", "self", ".", "rural", "#", "rural", "road", "element", "self", ".", "soilindex1", "#", "soil", "index", "for", "urban", "rsoad", "depth", "self", ".", "soilindex2", "#", "soil", "index", "for", "rural", "road", "depth", "self", ".", "Sch", "#", "list", "of", "Schedule", "objects" ]
python
train
wummel/linkchecker
linkcheck/url.py
https://github.com/wummel/linkchecker/blob/c2ce810c3fb00b895a841a7be6b2e78c64e7b042/linkcheck/url.py#L431-L441
def match_host (host, domainlist): """Return True if host matches an entry in given domain list.""" if not host: return False for domain in domainlist: if domain.startswith('.'): if host.endswith(domain): return True elif host == domain: return True return False
[ "def", "match_host", "(", "host", ",", "domainlist", ")", ":", "if", "not", "host", ":", "return", "False", "for", "domain", "in", "domainlist", ":", "if", "domain", ".", "startswith", "(", "'.'", ")", ":", "if", "host", ".", "endswith", "(", "domain", ")", ":", "return", "True", "elif", "host", "==", "domain", ":", "return", "True", "return", "False" ]
Return True if host matches an entry in given domain list.
[ "Return", "True", "if", "host", "matches", "an", "entry", "in", "given", "domain", "list", "." ]
python
train
wummel/linkchecker
third_party/dnspython/dns/name.py
https://github.com/wummel/linkchecker/blob/c2ce810c3fb00b895a841a7be6b2e78c64e7b042/third_party/dnspython/dns/name.py#L534-L594
def from_unicode(text, origin = root): """Convert unicode text into a Name object. Lables are encoded in IDN ACE form. @rtype: dns.name.Name object """ if not isinstance(text, unicode): raise ValueError("input to from_unicode() must be a unicode string") if not (origin is None or isinstance(origin, Name)): raise ValueError("origin must be a Name or None") labels = [] label = u'' escaping = False edigits = 0 total = 0 if text == u'@': text = u'' if text: if text == u'.': return Name(['']) # no Unicode "u" on this constant! for c in text: if escaping: if edigits == 0: if c.isdigit(): total = int(c) edigits += 1 else: label += c escaping = False else: if not c.isdigit(): raise BadEscape total *= 10 total += int(c) edigits += 1 if edigits == 3: escaping = False label += chr(total) elif c == u'.' or c == u'\u3002' or \ c == u'\uff0e' or c == u'\uff61': if len(label) == 0: raise EmptyLabel labels.append(encodings.idna.ToASCII(label)) label = u'' elif c == u'\\': escaping = True edigits = 0 total = 0 else: label += c if escaping: raise BadEscape if len(label) > 0: labels.append(encodings.idna.ToASCII(label)) else: labels.append('') if (len(labels) == 0 or labels[-1] != '') and not origin is None: labels.extend(list(origin.labels)) return Name(labels)
[ "def", "from_unicode", "(", "text", ",", "origin", "=", "root", ")", ":", "if", "not", "isinstance", "(", "text", ",", "unicode", ")", ":", "raise", "ValueError", "(", "\"input to from_unicode() must be a unicode string\"", ")", "if", "not", "(", "origin", "is", "None", "or", "isinstance", "(", "origin", ",", "Name", ")", ")", ":", "raise", "ValueError", "(", "\"origin must be a Name or None\"", ")", "labels", "=", "[", "]", "label", "=", "u''", "escaping", "=", "False", "edigits", "=", "0", "total", "=", "0", "if", "text", "==", "u'@'", ":", "text", "=", "u''", "if", "text", ":", "if", "text", "==", "u'.'", ":", "return", "Name", "(", "[", "''", "]", ")", "# no Unicode \"u\" on this constant!", "for", "c", "in", "text", ":", "if", "escaping", ":", "if", "edigits", "==", "0", ":", "if", "c", ".", "isdigit", "(", ")", ":", "total", "=", "int", "(", "c", ")", "edigits", "+=", "1", "else", ":", "label", "+=", "c", "escaping", "=", "False", "else", ":", "if", "not", "c", ".", "isdigit", "(", ")", ":", "raise", "BadEscape", "total", "*=", "10", "total", "+=", "int", "(", "c", ")", "edigits", "+=", "1", "if", "edigits", "==", "3", ":", "escaping", "=", "False", "label", "+=", "chr", "(", "total", ")", "elif", "c", "==", "u'.'", "or", "c", "==", "u'\\u3002'", "or", "c", "==", "u'\\uff0e'", "or", "c", "==", "u'\\uff61'", ":", "if", "len", "(", "label", ")", "==", "0", ":", "raise", "EmptyLabel", "labels", ".", "append", "(", "encodings", ".", "idna", ".", "ToASCII", "(", "label", ")", ")", "label", "=", "u''", "elif", "c", "==", "u'\\\\'", ":", "escaping", "=", "True", "edigits", "=", "0", "total", "=", "0", "else", ":", "label", "+=", "c", "if", "escaping", ":", "raise", "BadEscape", "if", "len", "(", "label", ")", ">", "0", ":", "labels", ".", "append", "(", "encodings", ".", "idna", ".", "ToASCII", "(", "label", ")", ")", "else", ":", "labels", ".", "append", "(", "''", ")", "if", "(", "len", "(", "labels", ")", "==", "0", "or", "labels", "[", "-", "1", "]", "!=", "''", ")", "and", "not", "origin", "is", "None", ":", "labels", ".", "extend", "(", "list", "(", "origin", ".", "labels", ")", ")", "return", "Name", "(", "labels", ")" ]
Convert unicode text into a Name object. Lables are encoded in IDN ACE form. @rtype: dns.name.Name object
[ "Convert", "unicode", "text", "into", "a", "Name", "object", "." ]
python
train
fprimex/zdesk
zdesk/zdesk_api.py
https://github.com/fprimex/zdesk/blob/851611c13b4d530e9df31390b3ec709baf0a0188/zdesk/zdesk_api.py#L184-L187
def any_channel_validate_token_create(self, data, **kwargs): "https://developer.zendesk.com/rest_api/docs/core/channel_framework#validate-token" api_path = "/api/v2/any_channel/validate_token" return self.call(api_path, method="POST", data=data, **kwargs)
[ "def", "any_channel_validate_token_create", "(", "self", ",", "data", ",", "*", "*", "kwargs", ")", ":", "api_path", "=", "\"/api/v2/any_channel/validate_token\"", "return", "self", ".", "call", "(", "api_path", ",", "method", "=", "\"POST\"", ",", "data", "=", "data", ",", "*", "*", "kwargs", ")" ]
https://developer.zendesk.com/rest_api/docs/core/channel_framework#validate-token
[ "https", ":", "//", "developer", ".", "zendesk", ".", "com", "/", "rest_api", "/", "docs", "/", "core", "/", "channel_framework#validate", "-", "token" ]
python
train
carlitux/turboengine
src/turboengine/decorators.py
https://github.com/carlitux/turboengine/blob/627b6dbc400d8c16e2ff7e17afd01915371ea287/src/turboengine/decorators.py#L29-L39
def login_required(method): """A decorator that control if a user is logged.""" def wrapper(self, *arg, **karg): if not self.user: if self.request.method == "GET": self.redirect(settings.LOGIN_PATH) else: self.error(403) else: method(self, *arg, **karg) return wrapper
[ "def", "login_required", "(", "method", ")", ":", "def", "wrapper", "(", "self", ",", "*", "arg", ",", "*", "*", "karg", ")", ":", "if", "not", "self", ".", "user", ":", "if", "self", ".", "request", ".", "method", "==", "\"GET\"", ":", "self", ".", "redirect", "(", "settings", ".", "LOGIN_PATH", ")", "else", ":", "self", ".", "error", "(", "403", ")", "else", ":", "method", "(", "self", ",", "*", "arg", ",", "*", "*", "karg", ")", "return", "wrapper" ]
A decorator that control if a user is logged.
[ "A", "decorator", "that", "control", "if", "a", "user", "is", "logged", "." ]
python
train
vinci1it2000/schedula
schedula/utils/dsp.py
https://github.com/vinci1it2000/schedula/blob/addb9fd685be81544b796c51383ac00a31543ce9/schedula/utils/dsp.py#L567-L603
def combine_nested_dicts(*nested_dicts, depth=-1, base=None): """ Merge nested-dictionaries. :param nested_dicts: Nested dictionaries. :type nested_dicts: dict :param depth: Maximum keys depth. :type depth: int, optional :param base: Base dict where combine multiple dicts in one. :type base: dict, optional :return: Combined nested-dictionary. :rtype: dict """ if base is None: base = {} for nested_dict in nested_dicts: for k, v in stack_nested_keys(nested_dict, depth=depth): while k: # noinspection PyBroadException try: get_nested_dicts(base, *k[:-1])[k[-1]] = v break except Exception: # A branch of the nested_dict is longer than the base. k = k[:-1] v = get_nested_dicts(nested_dict, *k) return base
[ "def", "combine_nested_dicts", "(", "*", "nested_dicts", ",", "depth", "=", "-", "1", ",", "base", "=", "None", ")", ":", "if", "base", "is", "None", ":", "base", "=", "{", "}", "for", "nested_dict", "in", "nested_dicts", ":", "for", "k", ",", "v", "in", "stack_nested_keys", "(", "nested_dict", ",", "depth", "=", "depth", ")", ":", "while", "k", ":", "# noinspection PyBroadException", "try", ":", "get_nested_dicts", "(", "base", ",", "*", "k", "[", ":", "-", "1", "]", ")", "[", "k", "[", "-", "1", "]", "]", "=", "v", "break", "except", "Exception", ":", "# A branch of the nested_dict is longer than the base.", "k", "=", "k", "[", ":", "-", "1", "]", "v", "=", "get_nested_dicts", "(", "nested_dict", ",", "*", "k", ")", "return", "base" ]
Merge nested-dictionaries. :param nested_dicts: Nested dictionaries. :type nested_dicts: dict :param depth: Maximum keys depth. :type depth: int, optional :param base: Base dict where combine multiple dicts in one. :type base: dict, optional :return: Combined nested-dictionary. :rtype: dict
[ "Merge", "nested", "-", "dictionaries", "." ]
python
train
aouyar/PyMunin
pysysinfo/ntp.py
https://github.com/aouyar/PyMunin/blob/4f58a64b6b37c85a84cc7e1e07aafaa0321b249d/pysysinfo/ntp.py#L29-L49
def getPeerStats(self): """Get NTP Peer Stats for localhost by querying local NTP Server. @return: Dictionary of NTP stats converted to seconds. """ info_dict = {} output = util.exec_command([ntpqCmd, '-n', '-c', 'peers']) for line in output.splitlines(): mobj = re.match('\*(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})\s+', line) if mobj: info_dict['ip'] = mobj.group(1) cols = line.split() info_dict['stratum'] = int(cols[2]) info_dict['delay'] = float(cols[7]) / 1000.0 info_dict['offset'] = float(cols[8]) / 1000.0 info_dict['jitter'] = float(cols[9]) / 1000.0 return info_dict else: raise Exception("Execution of command failed: %s" % ntpqCmd) return info_dict
[ "def", "getPeerStats", "(", "self", ")", ":", "info_dict", "=", "{", "}", "output", "=", "util", ".", "exec_command", "(", "[", "ntpqCmd", ",", "'-n'", ",", "'-c'", ",", "'peers'", "]", ")", "for", "line", "in", "output", ".", "splitlines", "(", ")", ":", "mobj", "=", "re", ".", "match", "(", "'\\*(\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3})\\s+'", ",", "line", ")", "if", "mobj", ":", "info_dict", "[", "'ip'", "]", "=", "mobj", ".", "group", "(", "1", ")", "cols", "=", "line", ".", "split", "(", ")", "info_dict", "[", "'stratum'", "]", "=", "int", "(", "cols", "[", "2", "]", ")", "info_dict", "[", "'delay'", "]", "=", "float", "(", "cols", "[", "7", "]", ")", "/", "1000.0", "info_dict", "[", "'offset'", "]", "=", "float", "(", "cols", "[", "8", "]", ")", "/", "1000.0", "info_dict", "[", "'jitter'", "]", "=", "float", "(", "cols", "[", "9", "]", ")", "/", "1000.0", "return", "info_dict", "else", ":", "raise", "Exception", "(", "\"Execution of command failed: %s\"", "%", "ntpqCmd", ")", "return", "info_dict" ]
Get NTP Peer Stats for localhost by querying local NTP Server. @return: Dictionary of NTP stats converted to seconds.
[ "Get", "NTP", "Peer", "Stats", "for", "localhost", "by", "querying", "local", "NTP", "Server", ".", "@return", ":", "Dictionary", "of", "NTP", "stats", "converted", "to", "seconds", "." ]
python
train
mushkevych/scheduler
synergy/scheduler/tree.py
https://github.com/mushkevych/scheduler/blob/6740331360f49083c208085fb5a60ce80ebf418b/synergy/scheduler/tree.py#L191-L197
def get_next_node(self, process_name): """ :return: <AbstractTreeNode> next node to process by a process with process_name """ if process_name not in self.process_hierarchy: raise ValueError('unable to compute the next_node due to unknown process: {0}'.format(process_name)) time_qualifier = self.process_hierarchy[process_name].process_entry.time_qualifier return self._get_next_node(time_qualifier)
[ "def", "get_next_node", "(", "self", ",", "process_name", ")", ":", "if", "process_name", "not", "in", "self", ".", "process_hierarchy", ":", "raise", "ValueError", "(", "'unable to compute the next_node due to unknown process: {0}'", ".", "format", "(", "process_name", ")", ")", "time_qualifier", "=", "self", ".", "process_hierarchy", "[", "process_name", "]", ".", "process_entry", ".", "time_qualifier", "return", "self", ".", "_get_next_node", "(", "time_qualifier", ")" ]
:return: <AbstractTreeNode> next node to process by a process with process_name
[ ":", "return", ":", "<AbstractTreeNode", ">", "next", "node", "to", "process", "by", "a", "process", "with", "process_name" ]
python
train
devassistant/devassistant
devassistant/yaml_checker.py
https://github.com/devassistant/devassistant/blob/2dbfeaa666a64127263664d18969c55d19ecc83e/devassistant/yaml_checker.py#L242-L273
def _assert_struct_type(self, struct, name, types, path=None, extra_info=None): """Asserts that given structure is of any of given types. Args: struct: structure to check name: displayable name of the checked structure (e.g. "run_foo" for section run_foo) types: list/tuple of types that are allowed for given struct path: list with a source file as a first element and previous names (as in name argument to this method) as other elements extra_info: extra information to print if error is found (e.g. hint how to fix this) Raises: YamlTypeError: if given struct is not of any given type; error message contains source file and a "path" (e.g. args -> somearg -> flags) specifying where the problem is """ wanted_yaml_typenames = set() for t in types: wanted_yaml_typenames.add(self._get_yaml_typename(t)) wanted_yaml_typenames = ' or '.join(wanted_yaml_typenames) actual_yaml_typename = self._get_yaml_typename(type(struct)) if not isinstance(struct, types): err = [] if path: err.append(self._format_error_path(path + [name])) err.append(' Expected {w} value for "{n}", got value of type {a}: "{v}"'. format(w=wanted_yaml_typenames, n=name, a=actual_yaml_typename, v=struct)) if extra_info: err.append('Tip: ' + extra_info) raise exceptions.YamlTypeError('\n'.join(err))
[ "def", "_assert_struct_type", "(", "self", ",", "struct", ",", "name", ",", "types", ",", "path", "=", "None", ",", "extra_info", "=", "None", ")", ":", "wanted_yaml_typenames", "=", "set", "(", ")", "for", "t", "in", "types", ":", "wanted_yaml_typenames", ".", "add", "(", "self", ".", "_get_yaml_typename", "(", "t", ")", ")", "wanted_yaml_typenames", "=", "' or '", ".", "join", "(", "wanted_yaml_typenames", ")", "actual_yaml_typename", "=", "self", ".", "_get_yaml_typename", "(", "type", "(", "struct", ")", ")", "if", "not", "isinstance", "(", "struct", ",", "types", ")", ":", "err", "=", "[", "]", "if", "path", ":", "err", ".", "append", "(", "self", ".", "_format_error_path", "(", "path", "+", "[", "name", "]", ")", ")", "err", ".", "append", "(", "' Expected {w} value for \"{n}\", got value of type {a}: \"{v}\"'", ".", "format", "(", "w", "=", "wanted_yaml_typenames", ",", "n", "=", "name", ",", "a", "=", "actual_yaml_typename", ",", "v", "=", "struct", ")", ")", "if", "extra_info", ":", "err", ".", "append", "(", "'Tip: '", "+", "extra_info", ")", "raise", "exceptions", ".", "YamlTypeError", "(", "'\\n'", ".", "join", "(", "err", ")", ")" ]
Asserts that given structure is of any of given types. Args: struct: structure to check name: displayable name of the checked structure (e.g. "run_foo" for section run_foo) types: list/tuple of types that are allowed for given struct path: list with a source file as a first element and previous names (as in name argument to this method) as other elements extra_info: extra information to print if error is found (e.g. hint how to fix this) Raises: YamlTypeError: if given struct is not of any given type; error message contains source file and a "path" (e.g. args -> somearg -> flags) specifying where the problem is
[ "Asserts", "that", "given", "structure", "is", "of", "any", "of", "given", "types", "." ]
python
train
spyder-ide/spyder
spyder/plugins/editor/extensions/docstring.py
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/plugins/editor/extensions/docstring.py#L779-L829
def parse_body(self, text): """Parse the function body text.""" re_raise = re.findall(r'[ \t]raise ([a-zA-Z0-9_]*)', text) if len(re_raise) > 0: self.raise_list = [x.strip() for x in re_raise] # remove duplicates from list while keeping it in the order # in python 2.7 # stackoverflow.com/questions/7961363/removing-duplicates-in-lists self.raise_list = list(OrderedDict.fromkeys(self.raise_list)) re_yield = re.search(r'[ \t]yield ', text) if re_yield: self.has_yield = True # get return value pattern_return = r'return |yield ' line_list = text.split('\n') is_found_return = False line_return_tmp = '' for line in line_list: line = line.strip() if is_found_return is False: if re.match(pattern_return, line): is_found_return = True if is_found_return: line_return_tmp += line # check the integrity of line try: pos_quote = self._find_quote_position(line_return_tmp) if line_return_tmp[-1] == '\\': line_return_tmp = line_return_tmp[:-1] continue self._find_bracket_position(line_return_tmp, '(', ')', pos_quote) self._find_bracket_position(line_return_tmp, '{', '}', pos_quote) self._find_bracket_position(line_return_tmp, '[', ']', pos_quote) except IndexError: continue return_value = re.sub(pattern_return, '', line_return_tmp) self.return_value_in_body.append(return_value) is_found_return = False line_return_tmp = ''
[ "def", "parse_body", "(", "self", ",", "text", ")", ":", "re_raise", "=", "re", ".", "findall", "(", "r'[ \\t]raise ([a-zA-Z0-9_]*)'", ",", "text", ")", "if", "len", "(", "re_raise", ")", ">", "0", ":", "self", ".", "raise_list", "=", "[", "x", ".", "strip", "(", ")", "for", "x", "in", "re_raise", "]", "# remove duplicates from list while keeping it in the order\r", "# in python 2.7\r", "# stackoverflow.com/questions/7961363/removing-duplicates-in-lists\r", "self", ".", "raise_list", "=", "list", "(", "OrderedDict", ".", "fromkeys", "(", "self", ".", "raise_list", ")", ")", "re_yield", "=", "re", ".", "search", "(", "r'[ \\t]yield '", ",", "text", ")", "if", "re_yield", ":", "self", ".", "has_yield", "=", "True", "# get return value\r", "pattern_return", "=", "r'return |yield '", "line_list", "=", "text", ".", "split", "(", "'\\n'", ")", "is_found_return", "=", "False", "line_return_tmp", "=", "''", "for", "line", "in", "line_list", ":", "line", "=", "line", ".", "strip", "(", ")", "if", "is_found_return", "is", "False", ":", "if", "re", ".", "match", "(", "pattern_return", ",", "line", ")", ":", "is_found_return", "=", "True", "if", "is_found_return", ":", "line_return_tmp", "+=", "line", "# check the integrity of line\r", "try", ":", "pos_quote", "=", "self", ".", "_find_quote_position", "(", "line_return_tmp", ")", "if", "line_return_tmp", "[", "-", "1", "]", "==", "'\\\\'", ":", "line_return_tmp", "=", "line_return_tmp", "[", ":", "-", "1", "]", "continue", "self", ".", "_find_bracket_position", "(", "line_return_tmp", ",", "'('", ",", "')'", ",", "pos_quote", ")", "self", ".", "_find_bracket_position", "(", "line_return_tmp", ",", "'{'", ",", "'}'", ",", "pos_quote", ")", "self", ".", "_find_bracket_position", "(", "line_return_tmp", ",", "'['", ",", "']'", ",", "pos_quote", ")", "except", "IndexError", ":", "continue", "return_value", "=", "re", ".", "sub", "(", "pattern_return", ",", "''", ",", "line_return_tmp", ")", "self", ".", "return_value_in_body", ".", "append", "(", "return_value", ")", "is_found_return", "=", "False", "line_return_tmp", "=", "''" ]
Parse the function body text.
[ "Parse", "the", "function", "body", "text", "." ]
python
train
consbio/parserutils
parserutils/numbers.py
https://github.com/consbio/parserutils/blob/f13f80db99ed43479336b116e38512e3566e4623/parserutils/numbers.py#L4-L15
def is_number(num, if_bool=False): """ :return: True if num is either an actual number, or an object that converts to one """ if isinstance(num, bool): return if_bool elif isinstance(num, int): return True try: number = float(num) return not (isnan(number) or isinf(number)) except (TypeError, ValueError): return False
[ "def", "is_number", "(", "num", ",", "if_bool", "=", "False", ")", ":", "if", "isinstance", "(", "num", ",", "bool", ")", ":", "return", "if_bool", "elif", "isinstance", "(", "num", ",", "int", ")", ":", "return", "True", "try", ":", "number", "=", "float", "(", "num", ")", "return", "not", "(", "isnan", "(", "number", ")", "or", "isinf", "(", "number", ")", ")", "except", "(", "TypeError", ",", "ValueError", ")", ":", "return", "False" ]
:return: True if num is either an actual number, or an object that converts to one
[ ":", "return", ":", "True", "if", "num", "is", "either", "an", "actual", "number", "or", "an", "object", "that", "converts", "to", "one" ]
python
train
bcbio/bcbio-nextgen
bcbio/distributed/runfn.py
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/distributed/runfn.py#L500-L510
def _combine_cwl_records(recs, record_name, parallel): """Provide a list of nexted CWL records keyed by output key. Handles batches, where we return a list of records, and single items where we return one record. """ if parallel not in ["multi-batch", "single-split", "multi-combined", "batch-single"]: assert len(recs) == 1, pprint.pformat(recs) return {record_name: recs[0]} else: return {record_name: recs}
[ "def", "_combine_cwl_records", "(", "recs", ",", "record_name", ",", "parallel", ")", ":", "if", "parallel", "not", "in", "[", "\"multi-batch\"", ",", "\"single-split\"", ",", "\"multi-combined\"", ",", "\"batch-single\"", "]", ":", "assert", "len", "(", "recs", ")", "==", "1", ",", "pprint", ".", "pformat", "(", "recs", ")", "return", "{", "record_name", ":", "recs", "[", "0", "]", "}", "else", ":", "return", "{", "record_name", ":", "recs", "}" ]
Provide a list of nexted CWL records keyed by output key. Handles batches, where we return a list of records, and single items where we return one record.
[ "Provide", "a", "list", "of", "nexted", "CWL", "records", "keyed", "by", "output", "key", "." ]
python
train
unitedstack/steth
stetho/agent/common/utils.py
https://github.com/unitedstack/steth/blob/955884ceebf3bdc474c93cc5cf555e67d16458f1/stetho/agent/common/utils.py#L62-L75
def create_deamon(cmd, shell=False, root=False): """Usage: Create servcice process. """ try: if root: cmd.insert(0, 'sudo') LOG.info(cmd) subproc = subprocess.Popen(cmd, shell=shell, stdout=subprocess.PIPE, stderr=subprocess.PIPE) return subproc.pid except Exception as e: LOG.error(e) raise
[ "def", "create_deamon", "(", "cmd", ",", "shell", "=", "False", ",", "root", "=", "False", ")", ":", "try", ":", "if", "root", ":", "cmd", ".", "insert", "(", "0", ",", "'sudo'", ")", "LOG", ".", "info", "(", "cmd", ")", "subproc", "=", "subprocess", ".", "Popen", "(", "cmd", ",", "shell", "=", "shell", ",", "stdout", "=", "subprocess", ".", "PIPE", ",", "stderr", "=", "subprocess", ".", "PIPE", ")", "return", "subproc", ".", "pid", "except", "Exception", "as", "e", ":", "LOG", ".", "error", "(", "e", ")", "raise" ]
Usage: Create servcice process.
[ "Usage", ":", "Create", "servcice", "process", "." ]
python
train
sio2project/filetracker
filetracker/servers/storage.py
https://github.com/sio2project/filetracker/blob/359b474850622e3d0c25ee2596d7242c02f84efb/filetracker/servers/storage.py#L289-L294
def stored_version(self, name): """Returns the version of file `name` or None if it doesn't exist.""" link_path = self._link_path(name) if not _path_exists(link_path): return None return _file_version(link_path)
[ "def", "stored_version", "(", "self", ",", "name", ")", ":", "link_path", "=", "self", ".", "_link_path", "(", "name", ")", "if", "not", "_path_exists", "(", "link_path", ")", ":", "return", "None", "return", "_file_version", "(", "link_path", ")" ]
Returns the version of file `name` or None if it doesn't exist.
[ "Returns", "the", "version", "of", "file", "name", "or", "None", "if", "it", "doesn", "t", "exist", "." ]
python
train
saltstack/salt
salt/cloud/clouds/vultrpy.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/vultrpy.py#L175-L192
def list_nodes_full(**kwargs): ''' Return all data on nodes ''' nodes = _query('server/list') ret = {} for node in nodes: name = nodes[node]['label'] ret[name] = nodes[node].copy() ret[name]['id'] = node ret[name]['image'] = nodes[node]['os'] ret[name]['size'] = nodes[node]['VPSPLANID'] ret[name]['state'] = nodes[node]['status'] ret[name]['private_ips'] = nodes[node]['internal_ip'] ret[name]['public_ips'] = nodes[node]['main_ip'] return ret
[ "def", "list_nodes_full", "(", "*", "*", "kwargs", ")", ":", "nodes", "=", "_query", "(", "'server/list'", ")", "ret", "=", "{", "}", "for", "node", "in", "nodes", ":", "name", "=", "nodes", "[", "node", "]", "[", "'label'", "]", "ret", "[", "name", "]", "=", "nodes", "[", "node", "]", ".", "copy", "(", ")", "ret", "[", "name", "]", "[", "'id'", "]", "=", "node", "ret", "[", "name", "]", "[", "'image'", "]", "=", "nodes", "[", "node", "]", "[", "'os'", "]", "ret", "[", "name", "]", "[", "'size'", "]", "=", "nodes", "[", "node", "]", "[", "'VPSPLANID'", "]", "ret", "[", "name", "]", "[", "'state'", "]", "=", "nodes", "[", "node", "]", "[", "'status'", "]", "ret", "[", "name", "]", "[", "'private_ips'", "]", "=", "nodes", "[", "node", "]", "[", "'internal_ip'", "]", "ret", "[", "name", "]", "[", "'public_ips'", "]", "=", "nodes", "[", "node", "]", "[", "'main_ip'", "]", "return", "ret" ]
Return all data on nodes
[ "Return", "all", "data", "on", "nodes" ]
python
train
ucbvislab/radiotool
radiotool/composer/segment.py
https://github.com/ucbvislab/radiotool/blob/01c9d878a811cf400b1482896d641d9c95e83ded/radiotool/composer/segment.py#L59-L75
def get_frames(self, channels=2): """Get numpy array of frames corresponding to the segment. :param integer channels: Number of channels in output array :returns: Array of frames in the segment :rtype: numpy array """ tmp_frame = self.track.current_frame self.track.current_frame = self.start frames = self.track.read_frames(self.duration, channels=channels) self.track.current_frame = tmp_frame for effect in self.effects: frames = effect.apply_to(frames, self.samplerate) return frames.copy()
[ "def", "get_frames", "(", "self", ",", "channels", "=", "2", ")", ":", "tmp_frame", "=", "self", ".", "track", ".", "current_frame", "self", ".", "track", ".", "current_frame", "=", "self", ".", "start", "frames", "=", "self", ".", "track", ".", "read_frames", "(", "self", ".", "duration", ",", "channels", "=", "channels", ")", "self", ".", "track", ".", "current_frame", "=", "tmp_frame", "for", "effect", "in", "self", ".", "effects", ":", "frames", "=", "effect", ".", "apply_to", "(", "frames", ",", "self", ".", "samplerate", ")", "return", "frames", ".", "copy", "(", ")" ]
Get numpy array of frames corresponding to the segment. :param integer channels: Number of channels in output array :returns: Array of frames in the segment :rtype: numpy array
[ "Get", "numpy", "array", "of", "frames", "corresponding", "to", "the", "segment", "." ]
python
train