repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
couchbase/couchbase-python-client
couchbase/cluster.py
https://github.com/couchbase/couchbase-python-client/blob/a7bada167785bf79a29c39f820d932a433a6a535/couchbase/cluster.py#L395-L407
def get_auto_credentials(self, bucket): """ :param bucket: :return: returns a dictionary of credentials for bucket/admin authentication """ result = {k: v(self) for k, v in self.get_unique_creds_dict().items()} if bucket: result.update(self.get_cred_bucket(bucket)) else: result.update(self.get_cred_not_bucket()) return result
[ "def", "get_auto_credentials", "(", "self", ",", "bucket", ")", ":", "result", "=", "{", "k", ":", "v", "(", "self", ")", "for", "k", ",", "v", "in", "self", ".", "get_unique_creds_dict", "(", ")", ".", "items", "(", ")", "}", "if", "bucket", ":", "result", ".", "update", "(", "self", ".", "get_cred_bucket", "(", "bucket", ")", ")", "else", ":", "result", ".", "update", "(", "self", ".", "get_cred_not_bucket", "(", ")", ")", "return", "result" ]
:param bucket: :return: returns a dictionary of credentials for bucket/admin authentication
[ ":", "param", "bucket", ":", ":", "return", ":", "returns", "a", "dictionary", "of", "credentials", "for", "bucket", "/", "admin", "authentication" ]
python
train
rosenbrockc/ci
pyci/server.py
https://github.com/rosenbrockc/ci/blob/4d5a60291424a83124d1d962d17fb4c7718cde2b/pyci/server.py#L216-L224
def _get_repos(self): """Gets a list of all the installed repositories in this server. """ result = {} for xmlpath in self.installed: repo = RepositorySettings(self, xmlpath) result[repo.name.lower()] = repo return result
[ "def", "_get_repos", "(", "self", ")", ":", "result", "=", "{", "}", "for", "xmlpath", "in", "self", ".", "installed", ":", "repo", "=", "RepositorySettings", "(", "self", ",", "xmlpath", ")", "result", "[", "repo", ".", "name", ".", "lower", "(", ")", "]", "=", "repo", "return", "result" ]
Gets a list of all the installed repositories in this server.
[ "Gets", "a", "list", "of", "all", "the", "installed", "repositories", "in", "this", "server", "." ]
python
train
SKA-ScienceDataProcessor/integration-prototype
sip/execution_control/docker_api/sip_docker_swarm/docker_swarm_client.py
https://github.com/SKA-ScienceDataProcessor/integration-prototype/blob/8c8006de6ad71dcd44114b0338780738079c87d4/sip/execution_control/docker_api/sip_docker_swarm/docker_swarm_client.py#L193-L207
def delete_volume(self, volume_name: str): """Removes/stops a docker volume. Only the manager nodes can delete a volume Args: volume_name (string): Name of the volume """ # Raise an exception if we are not a manager if not self._manager: raise RuntimeError('Volumes can only be deleted ' 'on swarm manager nodes') # Remove volume self._api_client.remove_volume(volume_name)
[ "def", "delete_volume", "(", "self", ",", "volume_name", ":", "str", ")", ":", "# Raise an exception if we are not a manager", "if", "not", "self", ".", "_manager", ":", "raise", "RuntimeError", "(", "'Volumes can only be deleted '", "'on swarm manager nodes'", ")", "# Remove volume", "self", ".", "_api_client", ".", "remove_volume", "(", "volume_name", ")" ]
Removes/stops a docker volume. Only the manager nodes can delete a volume Args: volume_name (string): Name of the volume
[ "Removes", "/", "stops", "a", "docker", "volume", "." ]
python
train
spyder-ide/spyder
spyder/plugins/projects/plugin.py
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/plugins/projects/plugin.py#L337-L363
def delete_project(self): """ Delete the current project without deleting the files in the directory. """ if self.current_active_project: self.switch_to_plugin() path = self.current_active_project.root_path buttons = QMessageBox.Yes | QMessageBox.No answer = QMessageBox.warning( self, _("Delete"), _("Do you really want to delete <b>{filename}</b>?<br><br>" "<b>Note:</b> This action will only delete the project. " "Its files are going to be preserved on disk." ).format(filename=osp.basename(path)), buttons) if answer == QMessageBox.Yes: try: self.close_project() shutil.rmtree(osp.join(path, '.spyproject')) except EnvironmentError as error: QMessageBox.critical( self, _("Project Explorer"), _("<b>Unable to delete <i>{varpath}</i></b>" "<br><br>The error message was:<br>{error}" ).format(varpath=path, error=to_text_string(error)))
[ "def", "delete_project", "(", "self", ")", ":", "if", "self", ".", "current_active_project", ":", "self", ".", "switch_to_plugin", "(", ")", "path", "=", "self", ".", "current_active_project", ".", "root_path", "buttons", "=", "QMessageBox", ".", "Yes", "|", "QMessageBox", ".", "No", "answer", "=", "QMessageBox", ".", "warning", "(", "self", ",", "_", "(", "\"Delete\"", ")", ",", "_", "(", "\"Do you really want to delete <b>{filename}</b>?<br><br>\"", "\"<b>Note:</b> This action will only delete the project. \"", "\"Its files are going to be preserved on disk.\"", ")", ".", "format", "(", "filename", "=", "osp", ".", "basename", "(", "path", ")", ")", ",", "buttons", ")", "if", "answer", "==", "QMessageBox", ".", "Yes", ":", "try", ":", "self", ".", "close_project", "(", ")", "shutil", ".", "rmtree", "(", "osp", ".", "join", "(", "path", ",", "'.spyproject'", ")", ")", "except", "EnvironmentError", "as", "error", ":", "QMessageBox", ".", "critical", "(", "self", ",", "_", "(", "\"Project Explorer\"", ")", ",", "_", "(", "\"<b>Unable to delete <i>{varpath}</i></b>\"", "\"<br><br>The error message was:<br>{error}\"", ")", ".", "format", "(", "varpath", "=", "path", ",", "error", "=", "to_text_string", "(", "error", ")", ")", ")" ]
Delete the current project without deleting the files in the directory.
[ "Delete", "the", "current", "project", "without", "deleting", "the", "files", "in", "the", "directory", "." ]
python
train
Laufire/ec
tools/modules/helpers.py
https://github.com/Laufire/ec/blob/63e84a1daef9234487d7de538e5da233a7d13071/tools/modules/helpers.py#L24-L31
def run(command, **kwargs): """Excecutes the given command while transfering control, till the execution is complete. """ print command p = Popen(shlex.split(command), **kwargs) p.wait() return p.returncode
[ "def", "run", "(", "command", ",", "*", "*", "kwargs", ")", ":", "print", "command", "p", "=", "Popen", "(", "shlex", ".", "split", "(", "command", ")", ",", "*", "*", "kwargs", ")", "p", ".", "wait", "(", ")", "return", "p", ".", "returncode" ]
Excecutes the given command while transfering control, till the execution is complete.
[ "Excecutes", "the", "given", "command", "while", "transfering", "control", "till", "the", "execution", "is", "complete", "." ]
python
train
dwavesystems/penaltymodel
penaltymodel_cache/penaltymodel/cache/database_manager.py
https://github.com/dwavesystems/penaltymodel/blob/b9d343233aea8df0f59cea45a07f12d0b3b8d9b3/penaltymodel_cache/penaltymodel/cache/database_manager.py#L215-L236
def iter_feasible_configurations(cur): """Iterate over all of the sets of feasible configurations in the cache. Args: cur (:class:`sqlite3.Cursor`): An sqlite3 cursor. This function is meant to be run within a :obj:`with` statement. Yields: dict[tuple(int): number]: The feasible_configurations. """ select = \ """ SELECT num_variables, feasible_configurations, energies FROM feasible_configurations """ for num_variables, feasible_configurations, energies in cur.execute(select): configs = json.loads(feasible_configurations) energies = json.loads(energies) yield {_decode_config(config, num_variables): energy for config, energy in zip(configs, energies)}
[ "def", "iter_feasible_configurations", "(", "cur", ")", ":", "select", "=", "\"\"\"\n SELECT num_variables, feasible_configurations, energies\n FROM feasible_configurations\n \"\"\"", "for", "num_variables", ",", "feasible_configurations", ",", "energies", "in", "cur", ".", "execute", "(", "select", ")", ":", "configs", "=", "json", ".", "loads", "(", "feasible_configurations", ")", "energies", "=", "json", ".", "loads", "(", "energies", ")", "yield", "{", "_decode_config", "(", "config", ",", "num_variables", ")", ":", "energy", "for", "config", ",", "energy", "in", "zip", "(", "configs", ",", "energies", ")", "}" ]
Iterate over all of the sets of feasible configurations in the cache. Args: cur (:class:`sqlite3.Cursor`): An sqlite3 cursor. This function is meant to be run within a :obj:`with` statement. Yields: dict[tuple(int): number]: The feasible_configurations.
[ "Iterate", "over", "all", "of", "the", "sets", "of", "feasible", "configurations", "in", "the", "cache", "." ]
python
train
wonambi-python/wonambi
wonambi/ioeeg/bci2000.py
https://github.com/wonambi-python/wonambi/blob/1d8e3d7e53df8017c199f703bcab582914676e76/wonambi/ioeeg/bci2000.py#L38-L99
def return_hdr(self): """Return the header for further use. Returns ------- subj_id : str subject identification code start_time : datetime start time of the dataset s_freq : float sampling frequency chan_name : list of str list of all the channels n_samples : int number of samples in the dataset orig : dict additional information taken directly from the header Notes ----- As far as I can, BCI2000 doesn't have channel labels, so we use dummies starting at chan001 (more consistent with Matlab 1-base indexing...) """ orig = {} orig = _read_header(self.filename) nchan = int(orig['SourceCh']) chan_name = ['ch{:03d}'.format(i + 1) for i in range(nchan)] chan_dtype = dtype(orig['DataFormat']) self.statevector_len = int(orig['StatevectorLen']) s_freq = orig['Parameter']['SamplingRate'] if s_freq.endswith('Hz'): s_freq = s_freq.replace('Hz', '') s_freq = int(s_freq.strip()) self.s_freq = s_freq storagetime = orig['Parameter']['StorageTime'].replace('%20', ' ') try: # newer version start_time = datetime.strptime(storagetime, '%a %b %d %H:%M:%S %Y') except: start_time = datetime.strptime(storagetime, '%Y-%m-%dT%H:%M:%S') subj_id = orig['Parameter']['SubjectName'] self.dtype = dtype([(chan, chan_dtype) for chan in chan_name] + [('statevector', 'S', self.statevector_len)]) # compute n_samples based on file size - header with open(self.filename, 'rb') as f: f.seek(0, SEEK_END) EOData = f.tell() n_samples = int((EOData - int(orig['HeaderLen'])) / self.dtype.itemsize) self.s_freq = s_freq self.header_len = int(orig['HeaderLen']) self.n_samples = n_samples self.statevectors = _prepare_statevectors(orig['StateVector']) # TODO: a better way to parse header self.gain = array([float(x) for x in orig['Parameter']['SourceChGain'].split(' ')[1:]]) return subj_id, start_time, s_freq, chan_name, n_samples, orig
[ "def", "return_hdr", "(", "self", ")", ":", "orig", "=", "{", "}", "orig", "=", "_read_header", "(", "self", ".", "filename", ")", "nchan", "=", "int", "(", "orig", "[", "'SourceCh'", "]", ")", "chan_name", "=", "[", "'ch{:03d}'", ".", "format", "(", "i", "+", "1", ")", "for", "i", "in", "range", "(", "nchan", ")", "]", "chan_dtype", "=", "dtype", "(", "orig", "[", "'DataFormat'", "]", ")", "self", ".", "statevector_len", "=", "int", "(", "orig", "[", "'StatevectorLen'", "]", ")", "s_freq", "=", "orig", "[", "'Parameter'", "]", "[", "'SamplingRate'", "]", "if", "s_freq", ".", "endswith", "(", "'Hz'", ")", ":", "s_freq", "=", "s_freq", ".", "replace", "(", "'Hz'", ",", "''", ")", "s_freq", "=", "int", "(", "s_freq", ".", "strip", "(", ")", ")", "self", ".", "s_freq", "=", "s_freq", "storagetime", "=", "orig", "[", "'Parameter'", "]", "[", "'StorageTime'", "]", ".", "replace", "(", "'%20'", ",", "' '", ")", "try", ":", "# newer version", "start_time", "=", "datetime", ".", "strptime", "(", "storagetime", ",", "'%a %b %d %H:%M:%S %Y'", ")", "except", ":", "start_time", "=", "datetime", ".", "strptime", "(", "storagetime", ",", "'%Y-%m-%dT%H:%M:%S'", ")", "subj_id", "=", "orig", "[", "'Parameter'", "]", "[", "'SubjectName'", "]", "self", ".", "dtype", "=", "dtype", "(", "[", "(", "chan", ",", "chan_dtype", ")", "for", "chan", "in", "chan_name", "]", "+", "[", "(", "'statevector'", ",", "'S'", ",", "self", ".", "statevector_len", ")", "]", ")", "# compute n_samples based on file size - header", "with", "open", "(", "self", ".", "filename", ",", "'rb'", ")", "as", "f", ":", "f", ".", "seek", "(", "0", ",", "SEEK_END", ")", "EOData", "=", "f", ".", "tell", "(", ")", "n_samples", "=", "int", "(", "(", "EOData", "-", "int", "(", "orig", "[", "'HeaderLen'", "]", ")", ")", "/", "self", ".", "dtype", ".", "itemsize", ")", "self", ".", "s_freq", "=", "s_freq", "self", ".", "header_len", "=", "int", "(", "orig", "[", "'HeaderLen'", "]", ")", "self", ".", "n_samples", "=", "n_samples", "self", ".", "statevectors", "=", "_prepare_statevectors", "(", "orig", "[", "'StateVector'", "]", ")", "# TODO: a better way to parse header", "self", ".", "gain", "=", "array", "(", "[", "float", "(", "x", ")", "for", "x", "in", "orig", "[", "'Parameter'", "]", "[", "'SourceChGain'", "]", ".", "split", "(", "' '", ")", "[", "1", ":", "]", "]", ")", "return", "subj_id", ",", "start_time", ",", "s_freq", ",", "chan_name", ",", "n_samples", ",", "orig" ]
Return the header for further use. Returns ------- subj_id : str subject identification code start_time : datetime start time of the dataset s_freq : float sampling frequency chan_name : list of str list of all the channels n_samples : int number of samples in the dataset orig : dict additional information taken directly from the header Notes ----- As far as I can, BCI2000 doesn't have channel labels, so we use dummies starting at chan001 (more consistent with Matlab 1-base indexing...)
[ "Return", "the", "header", "for", "further", "use", "." ]
python
train
jaraco/irc
irc/features.py
https://github.com/jaraco/irc/blob/571c1f448d5d5bb92bbe2605c33148bf6e698413/irc/features.py#L44-L47
def load(self, arguments): "Load the values from the a ServerConnection arguments" features = arguments[1:-1] list(map(self.load_feature, features))
[ "def", "load", "(", "self", ",", "arguments", ")", ":", "features", "=", "arguments", "[", "1", ":", "-", "1", "]", "list", "(", "map", "(", "self", ".", "load_feature", ",", "features", ")", ")" ]
Load the values from the a ServerConnection arguments
[ "Load", "the", "values", "from", "the", "a", "ServerConnection", "arguments" ]
python
train
houluy/chessboard
chessboard/__init__.py
https://github.com/houluy/chessboard/blob/b834819d93d71b492f27780a58dfbb3a107d7e85/chessboard/__init__.py#L423-L441
def rotate_board(self, angle, unit='radian'): '''Rotate the chessboard for a specific angle, angle must be integral multiple of pi/2(90 degree)''' if unit == 'angle': angle = angle*math.pi/180 angle %= 2*math.pi if angle not in [0, math.pi/2, math.pi, math.pi*3/2]: raise ValueError('Angle must be integral multiple of pi/2(90 degree)') new_pos = [[0 for _ in self.pos_range] for _ in self.pos_range] cos_ang = math.cos(angle) sin_ang = math.sin(angle) center = (self.board_size - 1)/2 for x, y in comb(self.pos_range, 2): xt = int((x - center)*cos_ang - (y - center)*sin_ang + center) yt = int((x - center)*sin_ang + (y - center)*cos_ang + center) new_pos[xt][yt] = self.pos[x][y] return new_pos
[ "def", "rotate_board", "(", "self", ",", "angle", ",", "unit", "=", "'radian'", ")", ":", "if", "unit", "==", "'angle'", ":", "angle", "=", "angle", "*", "math", ".", "pi", "/", "180", "angle", "%=", "2", "*", "math", ".", "pi", "if", "angle", "not", "in", "[", "0", ",", "math", ".", "pi", "/", "2", ",", "math", ".", "pi", ",", "math", ".", "pi", "*", "3", "/", "2", "]", ":", "raise", "ValueError", "(", "'Angle must be integral multiple of pi/2(90 degree)'", ")", "new_pos", "=", "[", "[", "0", "for", "_", "in", "self", ".", "pos_range", "]", "for", "_", "in", "self", ".", "pos_range", "]", "cos_ang", "=", "math", ".", "cos", "(", "angle", ")", "sin_ang", "=", "math", ".", "sin", "(", "angle", ")", "center", "=", "(", "self", ".", "board_size", "-", "1", ")", "/", "2", "for", "x", ",", "y", "in", "comb", "(", "self", ".", "pos_range", ",", "2", ")", ":", "xt", "=", "int", "(", "(", "x", "-", "center", ")", "*", "cos_ang", "-", "(", "y", "-", "center", ")", "*", "sin_ang", "+", "center", ")", "yt", "=", "int", "(", "(", "x", "-", "center", ")", "*", "sin_ang", "+", "(", "y", "-", "center", ")", "*", "cos_ang", "+", "center", ")", "new_pos", "[", "xt", "]", "[", "yt", "]", "=", "self", ".", "pos", "[", "x", "]", "[", "y", "]", "return", "new_pos" ]
Rotate the chessboard for a specific angle, angle must be integral multiple of pi/2(90 degree)
[ "Rotate", "the", "chessboard", "for", "a", "specific", "angle", "angle", "must", "be", "integral", "multiple", "of", "pi", "/", "2", "(", "90", "degree", ")" ]
python
train
saltstack/salt
salt/states/service.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/service.py#L858-L1002
def mod_watch(name, sfun=None, sig=None, full_restart=False, init_delay=None, force=False, **kwargs): ''' The service watcher, called to invoke the watch command. When called, it will restart or reload the named service. .. note:: This state exists to support special handling of the ``watch`` :ref:`requisite <requisites>`. It should not be called directly. Parameters for this function should be set by the watching service. (i.e. ``service.running``) name The name of the init or rc script used to manage the service sfun The original function which triggered the mod_watch call (`service.running`, for example). sig The string to search for when looking for the service process with ps reload If True use reload instead of the default restart. If value is a list of requisites; reload only if all watched changes are contained in the reload list. Otherwise watch will restart. full_restart Use service.full_restart instead of restart. When set, reload the service instead of restarting it. (i.e. ``service nginx reload``) full_restart Perform a full stop/start of a service by passing ``--full-restart``. This option is ignored if ``reload`` is set and is supported by only a few :py:func:`service modules <salt.modules.service>`. force Use service.force_reload instead of reload (needs reload to be set to True) init_delay Add a sleep command (in seconds) before the service is restarted/reloaded ''' reload_ = kwargs.pop('reload', False) ret = {'name': name, 'changes': {}, 'result': True, 'comment': ''} past_participle = None if sfun == 'dead': verb = 'stop' past_participle = verb + 'ped' if __salt__['service.status'](name, sig): func = __salt__['service.stop'] else: ret['result'] = True ret['comment'] = 'Service is already {0}'.format(past_participle) return ret elif sfun == 'running': if __salt__['service.status'](name, sig): if 'service.reload' in __salt__ and reload_: if isinstance(reload_, list): only_reload_needed = True for watch_item in kwargs['__reqs__']['watch']: if __running__[_gen_tag(watch_item)]['changes']: match_found = False for this_reload in reload_: for state, id_ in six.iteritems(this_reload): if state == watch_item['state'] \ and id_ == watch_item['__id__']: match_found = True if not match_found: only_reload_needed = False if only_reload_needed: if 'service.force_reload' in __salt__ and force: func = __salt__['service.force_reload'] verb = 'forcefully reload' else: func = __salt__['service.reload'] verb = 'reload' else: if 'service.full_restart' in __salt__ and full_restart: func = __salt__['service.full_restart'] verb = 'fully restart' else: func = __salt__['service.restart'] verb = 'restart' else: if 'service.force_reload' in __salt__ and force: func = __salt__['service.force_reload'] verb = 'forcefully reload' else: func = __salt__['service.reload'] verb = 'reload' elif 'service.full_restart' in __salt__ and full_restart: func = __salt__['service.full_restart'] verb = 'fully restart' else: func = __salt__['service.restart'] verb = 'restart' else: func = __salt__['service.start'] verb = 'start' if not past_participle: past_participle = verb + 'ed' else: ret['comment'] = 'Unable to trigger watch for service.{0}'.format(sfun) ret['result'] = False return ret if __opts__['test']: ret['result'] = None ret['comment'] = 'Service is set to be {0}'.format(past_participle) return ret if verb == 'start' and 'service.stop' in __salt__: # stop service before start __salt__['service.stop'](name) func_kwargs, warnings = _get_systemd_only(func, kwargs) if warnings: ret.setdefault('warnings', []).extend(warnings) try: result = func(name, **func_kwargs) except CommandExecutionError as exc: ret['result'] = False ret['comment'] = exc.strerror return ret if init_delay: time.sleep(init_delay) ret['changes'] = {name: result} ret['result'] = result ret['comment'] = 'Service {0}'.format(past_participle) if result else \ 'Failed to {0} the service'.format(verb) return ret
[ "def", "mod_watch", "(", "name", ",", "sfun", "=", "None", ",", "sig", "=", "None", ",", "full_restart", "=", "False", ",", "init_delay", "=", "None", ",", "force", "=", "False", ",", "*", "*", "kwargs", ")", ":", "reload_", "=", "kwargs", ".", "pop", "(", "'reload'", ",", "False", ")", "ret", "=", "{", "'name'", ":", "name", ",", "'changes'", ":", "{", "}", ",", "'result'", ":", "True", ",", "'comment'", ":", "''", "}", "past_participle", "=", "None", "if", "sfun", "==", "'dead'", ":", "verb", "=", "'stop'", "past_participle", "=", "verb", "+", "'ped'", "if", "__salt__", "[", "'service.status'", "]", "(", "name", ",", "sig", ")", ":", "func", "=", "__salt__", "[", "'service.stop'", "]", "else", ":", "ret", "[", "'result'", "]", "=", "True", "ret", "[", "'comment'", "]", "=", "'Service is already {0}'", ".", "format", "(", "past_participle", ")", "return", "ret", "elif", "sfun", "==", "'running'", ":", "if", "__salt__", "[", "'service.status'", "]", "(", "name", ",", "sig", ")", ":", "if", "'service.reload'", "in", "__salt__", "and", "reload_", ":", "if", "isinstance", "(", "reload_", ",", "list", ")", ":", "only_reload_needed", "=", "True", "for", "watch_item", "in", "kwargs", "[", "'__reqs__'", "]", "[", "'watch'", "]", ":", "if", "__running__", "[", "_gen_tag", "(", "watch_item", ")", "]", "[", "'changes'", "]", ":", "match_found", "=", "False", "for", "this_reload", "in", "reload_", ":", "for", "state", ",", "id_", "in", "six", ".", "iteritems", "(", "this_reload", ")", ":", "if", "state", "==", "watch_item", "[", "'state'", "]", "and", "id_", "==", "watch_item", "[", "'__id__'", "]", ":", "match_found", "=", "True", "if", "not", "match_found", ":", "only_reload_needed", "=", "False", "if", "only_reload_needed", ":", "if", "'service.force_reload'", "in", "__salt__", "and", "force", ":", "func", "=", "__salt__", "[", "'service.force_reload'", "]", "verb", "=", "'forcefully reload'", "else", ":", "func", "=", "__salt__", "[", "'service.reload'", "]", "verb", "=", "'reload'", "else", ":", "if", "'service.full_restart'", "in", "__salt__", "and", "full_restart", ":", "func", "=", "__salt__", "[", "'service.full_restart'", "]", "verb", "=", "'fully restart'", "else", ":", "func", "=", "__salt__", "[", "'service.restart'", "]", "verb", "=", "'restart'", "else", ":", "if", "'service.force_reload'", "in", "__salt__", "and", "force", ":", "func", "=", "__salt__", "[", "'service.force_reload'", "]", "verb", "=", "'forcefully reload'", "else", ":", "func", "=", "__salt__", "[", "'service.reload'", "]", "verb", "=", "'reload'", "elif", "'service.full_restart'", "in", "__salt__", "and", "full_restart", ":", "func", "=", "__salt__", "[", "'service.full_restart'", "]", "verb", "=", "'fully restart'", "else", ":", "func", "=", "__salt__", "[", "'service.restart'", "]", "verb", "=", "'restart'", "else", ":", "func", "=", "__salt__", "[", "'service.start'", "]", "verb", "=", "'start'", "if", "not", "past_participle", ":", "past_participle", "=", "verb", "+", "'ed'", "else", ":", "ret", "[", "'comment'", "]", "=", "'Unable to trigger watch for service.{0}'", ".", "format", "(", "sfun", ")", "ret", "[", "'result'", "]", "=", "False", "return", "ret", "if", "__opts__", "[", "'test'", "]", ":", "ret", "[", "'result'", "]", "=", "None", "ret", "[", "'comment'", "]", "=", "'Service is set to be {0}'", ".", "format", "(", "past_participle", ")", "return", "ret", "if", "verb", "==", "'start'", "and", "'service.stop'", "in", "__salt__", ":", "# stop service before start", "__salt__", "[", "'service.stop'", "]", "(", "name", ")", "func_kwargs", ",", "warnings", "=", "_get_systemd_only", "(", "func", ",", "kwargs", ")", "if", "warnings", ":", "ret", ".", "setdefault", "(", "'warnings'", ",", "[", "]", ")", ".", "extend", "(", "warnings", ")", "try", ":", "result", "=", "func", "(", "name", ",", "*", "*", "func_kwargs", ")", "except", "CommandExecutionError", "as", "exc", ":", "ret", "[", "'result'", "]", "=", "False", "ret", "[", "'comment'", "]", "=", "exc", ".", "strerror", "return", "ret", "if", "init_delay", ":", "time", ".", "sleep", "(", "init_delay", ")", "ret", "[", "'changes'", "]", "=", "{", "name", ":", "result", "}", "ret", "[", "'result'", "]", "=", "result", "ret", "[", "'comment'", "]", "=", "'Service {0}'", ".", "format", "(", "past_participle", ")", "if", "result", "else", "'Failed to {0} the service'", ".", "format", "(", "verb", ")", "return", "ret" ]
The service watcher, called to invoke the watch command. When called, it will restart or reload the named service. .. note:: This state exists to support special handling of the ``watch`` :ref:`requisite <requisites>`. It should not be called directly. Parameters for this function should be set by the watching service. (i.e. ``service.running``) name The name of the init or rc script used to manage the service sfun The original function which triggered the mod_watch call (`service.running`, for example). sig The string to search for when looking for the service process with ps reload If True use reload instead of the default restart. If value is a list of requisites; reload only if all watched changes are contained in the reload list. Otherwise watch will restart. full_restart Use service.full_restart instead of restart. When set, reload the service instead of restarting it. (i.e. ``service nginx reload``) full_restart Perform a full stop/start of a service by passing ``--full-restart``. This option is ignored if ``reload`` is set and is supported by only a few :py:func:`service modules <salt.modules.service>`. force Use service.force_reload instead of reload (needs reload to be set to True) init_delay Add a sleep command (in seconds) before the service is restarted/reloaded
[ "The", "service", "watcher", "called", "to", "invoke", "the", "watch", "command", ".", "When", "called", "it", "will", "restart", "or", "reload", "the", "named", "service", "." ]
python
train
roboogle/gtkmvc3
gtkmvco/gtkmvc3/adapters/default.py
https://github.com/roboogle/gtkmvc3/blob/63405fd8d2056be26af49103b13a8d5e57fe4dff/gtkmvco/gtkmvc3/adapters/default.py#L131-L146
def search_adapter_info(wid, flavour=None): """Given a widget returns the default tuple found in __def_adapter. @param flavour can be used to specialize the search for a particular tuple. """ t = (type(wid), flavour) if t in __memoize__: return __memoize__[t] for w in __def_adapter: if (isinstance(wid, w[WIDGET]) and flavour == w[FLAVOUR]): __memoize__[t] = w return w raise TypeError("Adapter type " + str(t) + " not found among supported adapters")
[ "def", "search_adapter_info", "(", "wid", ",", "flavour", "=", "None", ")", ":", "t", "=", "(", "type", "(", "wid", ")", ",", "flavour", ")", "if", "t", "in", "__memoize__", ":", "return", "__memoize__", "[", "t", "]", "for", "w", "in", "__def_adapter", ":", "if", "(", "isinstance", "(", "wid", ",", "w", "[", "WIDGET", "]", ")", "and", "flavour", "==", "w", "[", "FLAVOUR", "]", ")", ":", "__memoize__", "[", "t", "]", "=", "w", "return", "w", "raise", "TypeError", "(", "\"Adapter type \"", "+", "str", "(", "t", ")", "+", "\" not found among supported adapters\"", ")" ]
Given a widget returns the default tuple found in __def_adapter. @param flavour can be used to specialize the search for a particular tuple.
[ "Given", "a", "widget", "returns", "the", "default", "tuple", "found", "in", "__def_adapter", "." ]
python
train
20c/xbahn
xbahn/connection/__init__.py
https://github.com/20c/xbahn/blob/afb27b0576841338a366d7cac0200a782bd84be6/xbahn/connection/__init__.py#L320-L327
def listen(url, prefix=None, **kwargs): """ bind and return a connection instance from url arguments: - url (str): xbahn connection url """ return listener(url, prefix=get_prefix(prefix), **kwargs)
[ "def", "listen", "(", "url", ",", "prefix", "=", "None", ",", "*", "*", "kwargs", ")", ":", "return", "listener", "(", "url", ",", "prefix", "=", "get_prefix", "(", "prefix", ")", ",", "*", "*", "kwargs", ")" ]
bind and return a connection instance from url arguments: - url (str): xbahn connection url
[ "bind", "and", "return", "a", "connection", "instance", "from", "url" ]
python
train
mitsei/dlkit
dlkit/json_/authorization/sessions.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/authorization/sessions.py#L613-L654
def get_authorizations_by_ids(self, authorization_ids): """Gets an ``AuthorizationList`` corresponding to the given ``IdList``. In plenary mode, the returned list contains all of the authorizations specified in the ``Id`` list, in the order of the list, including duplicates, or an error results if an ``Id`` in the supplied list is not found or inaccessible. Otherwise, inaccessible ``Authorizations`` may be omitted from the list and may present the elements in any order including returning a unique set. arg: authorization_ids (osid.id.IdList): the list of ``Ids`` to retrieve return: (osid.authorization.AuthorizationList) - the returned ``Authorization list`` raise: NotFound - an ``Id was`` not found raise: NullArgument - ``authorization_ids`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for # osid.resource.ResourceLookupSession.get_resources_by_ids # NOTE: This implementation currently ignores plenary view collection = JSONClientValidated('authorization', collection='Authorization', runtime=self._runtime) object_id_list = [] for i in authorization_ids: object_id_list.append(ObjectId(self._get_id(i, 'authorization').get_identifier())) result = collection.find( dict({'_id': {'$in': object_id_list}}, **self._view_filter())) result = list(result) sorted_result = [] for object_id in object_id_list: for object_map in result: if object_map['_id'] == object_id: sorted_result.append(object_map) break return objects.AuthorizationList(sorted_result, runtime=self._runtime, proxy=self._proxy)
[ "def", "get_authorizations_by_ids", "(", "self", ",", "authorization_ids", ")", ":", "# Implemented from template for", "# osid.resource.ResourceLookupSession.get_resources_by_ids", "# NOTE: This implementation currently ignores plenary view", "collection", "=", "JSONClientValidated", "(", "'authorization'", ",", "collection", "=", "'Authorization'", ",", "runtime", "=", "self", ".", "_runtime", ")", "object_id_list", "=", "[", "]", "for", "i", "in", "authorization_ids", ":", "object_id_list", ".", "append", "(", "ObjectId", "(", "self", ".", "_get_id", "(", "i", ",", "'authorization'", ")", ".", "get_identifier", "(", ")", ")", ")", "result", "=", "collection", ".", "find", "(", "dict", "(", "{", "'_id'", ":", "{", "'$in'", ":", "object_id_list", "}", "}", ",", "*", "*", "self", ".", "_view_filter", "(", ")", ")", ")", "result", "=", "list", "(", "result", ")", "sorted_result", "=", "[", "]", "for", "object_id", "in", "object_id_list", ":", "for", "object_map", "in", "result", ":", "if", "object_map", "[", "'_id'", "]", "==", "object_id", ":", "sorted_result", ".", "append", "(", "object_map", ")", "break", "return", "objects", ".", "AuthorizationList", "(", "sorted_result", ",", "runtime", "=", "self", ".", "_runtime", ",", "proxy", "=", "self", ".", "_proxy", ")" ]
Gets an ``AuthorizationList`` corresponding to the given ``IdList``. In plenary mode, the returned list contains all of the authorizations specified in the ``Id`` list, in the order of the list, including duplicates, or an error results if an ``Id`` in the supplied list is not found or inaccessible. Otherwise, inaccessible ``Authorizations`` may be omitted from the list and may present the elements in any order including returning a unique set. arg: authorization_ids (osid.id.IdList): the list of ``Ids`` to retrieve return: (osid.authorization.AuthorizationList) - the returned ``Authorization list`` raise: NotFound - an ``Id was`` not found raise: NullArgument - ``authorization_ids`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.*
[ "Gets", "an", "AuthorizationList", "corresponding", "to", "the", "given", "IdList", "." ]
python
train
linkedin/luminol
src/luminol/anomaly_detector.py
https://github.com/linkedin/luminol/blob/42e4ab969b774ff98f902d064cb041556017f635/src/luminol/anomaly_detector.py#L79-L89
def _get_algorithm(self, algorithm_name): """ Get the specific algorithm. :param str algorithm_name: name of the algorithm to use(file name). :return: algorithm object. """ try: algorithm = anomaly_detector_algorithms[algorithm_name] return algorithm except KeyError: raise exceptions.AlgorithmNotFound('luminol.AnomalyDetector: ' + str(algorithm_name) + ' not found.')
[ "def", "_get_algorithm", "(", "self", ",", "algorithm_name", ")", ":", "try", ":", "algorithm", "=", "anomaly_detector_algorithms", "[", "algorithm_name", "]", "return", "algorithm", "except", "KeyError", ":", "raise", "exceptions", ".", "AlgorithmNotFound", "(", "'luminol.AnomalyDetector: '", "+", "str", "(", "algorithm_name", ")", "+", "' not found.'", ")" ]
Get the specific algorithm. :param str algorithm_name: name of the algorithm to use(file name). :return: algorithm object.
[ "Get", "the", "specific", "algorithm", ".", ":", "param", "str", "algorithm_name", ":", "name", "of", "the", "algorithm", "to", "use", "(", "file", "name", ")", ".", ":", "return", ":", "algorithm", "object", "." ]
python
train
log2timeline/dfvfs
dfvfs/volume/volume_system.py
https://github.com/log2timeline/dfvfs/blob/2b3ccd115f9901d89f383397d4a1376a873c83c4/dfvfs/volume/volume_system.py#L195-L201
def sections(self): """list[VolumeExtent]: sections.""" if not self._is_parsed: self._Parse() self._is_parsed = True return self._sections
[ "def", "sections", "(", "self", ")", ":", "if", "not", "self", ".", "_is_parsed", ":", "self", ".", "_Parse", "(", ")", "self", ".", "_is_parsed", "=", "True", "return", "self", ".", "_sections" ]
list[VolumeExtent]: sections.
[ "list", "[", "VolumeExtent", "]", ":", "sections", "." ]
python
train
MartinThoma/hwrt
hwrt/partitions.py
https://github.com/MartinThoma/hwrt/blob/725c21a3d0f5a30b8492cbc184b3688ceb364e1c/hwrt/partitions.py#L160-L177
def get_top_segmentations(table, n): """ Parameters ---------- table : matrix of probabilities Each cell (i, j) of `table` gives the probability that i and j are in the same symbol. n : int Number of best segmentations which get returned """ stroke_count = list(range(len(table))) topf = TopFinder(n) for curr_segmentation in all_segmentations(stroke_count): curr_seg_score = score_segmentation(curr_segmentation, table) topf.push(curr_segmentation, curr_seg_score) for el, score in topf: yield [normalize_segmentation(el), score]
[ "def", "get_top_segmentations", "(", "table", ",", "n", ")", ":", "stroke_count", "=", "list", "(", "range", "(", "len", "(", "table", ")", ")", ")", "topf", "=", "TopFinder", "(", "n", ")", "for", "curr_segmentation", "in", "all_segmentations", "(", "stroke_count", ")", ":", "curr_seg_score", "=", "score_segmentation", "(", "curr_segmentation", ",", "table", ")", "topf", ".", "push", "(", "curr_segmentation", ",", "curr_seg_score", ")", "for", "el", ",", "score", "in", "topf", ":", "yield", "[", "normalize_segmentation", "(", "el", ")", ",", "score", "]" ]
Parameters ---------- table : matrix of probabilities Each cell (i, j) of `table` gives the probability that i and j are in the same symbol. n : int Number of best segmentations which get returned
[ "Parameters", "----------", "table", ":", "matrix", "of", "probabilities", "Each", "cell", "(", "i", "j", ")", "of", "table", "gives", "the", "probability", "that", "i", "and", "j", "are", "in", "the", "same", "symbol", ".", "n", ":", "int", "Number", "of", "best", "segmentations", "which", "get", "returned" ]
python
train
wonambi-python/wonambi
wonambi/widgets/modal_widgets.py
https://github.com/wonambi-python/wonambi/blob/1d8e3d7e53df8017c199f703bcab582914676e76/wonambi/widgets/modal_widgets.py#L102-L121
def update_cycles(self): """Enable cycles checkbox only if there are cycles marked, with no errors.""" self.idx_cycle.clear() try: self.cycles = self.parent.notes.annot.get_cycles() except ValueError as err: self.idx_cycle.setEnabled(False) msg = 'There is a problem with the cycle markers: ' + str(err) self.parent.statusBar().showMessage(msg) else: if self.cycles is None: self.idx_cycle.setEnabled(False) else: self.idx_cycle.setEnabled(True) for i in range(len(self.cycles)): self.idx_cycle.addItem(str(i+1))
[ "def", "update_cycles", "(", "self", ")", ":", "self", ".", "idx_cycle", ".", "clear", "(", ")", "try", ":", "self", ".", "cycles", "=", "self", ".", "parent", ".", "notes", ".", "annot", ".", "get_cycles", "(", ")", "except", "ValueError", "as", "err", ":", "self", ".", "idx_cycle", ".", "setEnabled", "(", "False", ")", "msg", "=", "'There is a problem with the cycle markers: '", "+", "str", "(", "err", ")", "self", ".", "parent", ".", "statusBar", "(", ")", ".", "showMessage", "(", "msg", ")", "else", ":", "if", "self", ".", "cycles", "is", "None", ":", "self", ".", "idx_cycle", ".", "setEnabled", "(", "False", ")", "else", ":", "self", ".", "idx_cycle", ".", "setEnabled", "(", "True", ")", "for", "i", "in", "range", "(", "len", "(", "self", ".", "cycles", ")", ")", ":", "self", ".", "idx_cycle", ".", "addItem", "(", "str", "(", "i", "+", "1", ")", ")" ]
Enable cycles checkbox only if there are cycles marked, with no errors.
[ "Enable", "cycles", "checkbox", "only", "if", "there", "are", "cycles", "marked", "with", "no", "errors", "." ]
python
train
pypa/pipenv
pipenv/vendor/distlib/database.py
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/distlib/database.py#L57-L63
def clear(self): """ Clear the cache, setting it to its initial state. """ self.name.clear() self.path.clear() self.generated = False
[ "def", "clear", "(", "self", ")", ":", "self", ".", "name", ".", "clear", "(", ")", "self", ".", "path", ".", "clear", "(", ")", "self", ".", "generated", "=", "False" ]
Clear the cache, setting it to its initial state.
[ "Clear", "the", "cache", "setting", "it", "to", "its", "initial", "state", "." ]
python
train
google/grr
grr/client_builder/grr_response_client_builder/builders/signing.py
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/client_builder/grr_response_client_builder/builders/signing.py#L85-L103
def SignBuffer(self, in_buffer): """Sign a buffer via temp files. Our signing tool can't sign a buffer, so we work around it using temporary files. Args: in_buffer: data to sign Returns: signed data """ precondition.AssertType(in_buffer, bytes) with tempfile.NamedTemporaryFile() as temp_in: temp_in.write(in_buffer) temp_in.seek(0) outfile = self.SignFile(temp_in.name) with io.open(outfile, "rb") as filedesc: return filedesc.read()
[ "def", "SignBuffer", "(", "self", ",", "in_buffer", ")", ":", "precondition", ".", "AssertType", "(", "in_buffer", ",", "bytes", ")", "with", "tempfile", ".", "NamedTemporaryFile", "(", ")", "as", "temp_in", ":", "temp_in", ".", "write", "(", "in_buffer", ")", "temp_in", ".", "seek", "(", "0", ")", "outfile", "=", "self", ".", "SignFile", "(", "temp_in", ".", "name", ")", "with", "io", ".", "open", "(", "outfile", ",", "\"rb\"", ")", "as", "filedesc", ":", "return", "filedesc", ".", "read", "(", ")" ]
Sign a buffer via temp files. Our signing tool can't sign a buffer, so we work around it using temporary files. Args: in_buffer: data to sign Returns: signed data
[ "Sign", "a", "buffer", "via", "temp", "files", "." ]
python
train
Jajcus/pyxmpp2
pyxmpp2/ext/muc/muccore.py
https://github.com/Jajcus/pyxmpp2/blob/14a40a3950910a9cd008b55f0d8905aa0186ce18/pyxmpp2/ext/muc/muccore.py#L157-L186
def set_history(self, parameters): """ Set history parameters. Types: - `parameters`: `HistoryParameters` """ for child in xml_element_iter(self.xmlnode.children): if get_node_ns_uri(child) == MUC_NS and child.name == "history": child.unlinkNode() child.freeNode() break if parameters.maxchars and parameters.maxchars < 0: raise ValueError("History parameter maxchars must be positive") if parameters.maxstanzas and parameters.maxstanzas < 0: raise ValueError("History parameter maxstanzas must be positive") if parameters.maxseconds and parameters.maxseconds < 0: raise ValueError("History parameter maxseconds must be positive") hnode=self.xmlnode.newChild(self.xmlnode.ns(), "history", None) if parameters.maxchars is not None: hnode.setProp("maxchars", str(parameters.maxchars)) if parameters.maxstanzas is not None: hnode.setProp("maxstanzas", str(parameters.maxstanzas)) if parameters.maxseconds is not None: hnode.setProp("maxseconds", str(parameters.maxseconds)) if parameters.since is not None: hnode.setProp("since", parameters.since.strftime("%Y-%m-%dT%H:%M:%SZ"))
[ "def", "set_history", "(", "self", ",", "parameters", ")", ":", "for", "child", "in", "xml_element_iter", "(", "self", ".", "xmlnode", ".", "children", ")", ":", "if", "get_node_ns_uri", "(", "child", ")", "==", "MUC_NS", "and", "child", ".", "name", "==", "\"history\"", ":", "child", ".", "unlinkNode", "(", ")", "child", ".", "freeNode", "(", ")", "break", "if", "parameters", ".", "maxchars", "and", "parameters", ".", "maxchars", "<", "0", ":", "raise", "ValueError", "(", "\"History parameter maxchars must be positive\"", ")", "if", "parameters", ".", "maxstanzas", "and", "parameters", ".", "maxstanzas", "<", "0", ":", "raise", "ValueError", "(", "\"History parameter maxstanzas must be positive\"", ")", "if", "parameters", ".", "maxseconds", "and", "parameters", ".", "maxseconds", "<", "0", ":", "raise", "ValueError", "(", "\"History parameter maxseconds must be positive\"", ")", "hnode", "=", "self", ".", "xmlnode", ".", "newChild", "(", "self", ".", "xmlnode", ".", "ns", "(", ")", ",", "\"history\"", ",", "None", ")", "if", "parameters", ".", "maxchars", "is", "not", "None", ":", "hnode", ".", "setProp", "(", "\"maxchars\"", ",", "str", "(", "parameters", ".", "maxchars", ")", ")", "if", "parameters", ".", "maxstanzas", "is", "not", "None", ":", "hnode", ".", "setProp", "(", "\"maxstanzas\"", ",", "str", "(", "parameters", ".", "maxstanzas", ")", ")", "if", "parameters", ".", "maxseconds", "is", "not", "None", ":", "hnode", ".", "setProp", "(", "\"maxseconds\"", ",", "str", "(", "parameters", ".", "maxseconds", ")", ")", "if", "parameters", ".", "since", "is", "not", "None", ":", "hnode", ".", "setProp", "(", "\"since\"", ",", "parameters", ".", "since", ".", "strftime", "(", "\"%Y-%m-%dT%H:%M:%SZ\"", ")", ")" ]
Set history parameters. Types: - `parameters`: `HistoryParameters`
[ "Set", "history", "parameters", "." ]
python
valid
saltstack/salt
salt/modules/zpool.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/zpool.py#L1384-L1435
def offline(zpool, *vdevs, **kwargs): ''' .. versionadded:: 2015.5.0 Ensure that the specified devices are offline .. warning:: By default, the ``OFFLINE`` state is persistent. The device remains offline when the system is rebooted. To temporarily take a device offline, use ``temporary=True``. zpool : string name of storage pool vdevs : string One or more devices temporary : boolean Enable temporarily offline CLI Example: .. code-block:: bash salt '*' zpool.offline myzpool /path/to/vdev1 [...] [temporary=True|False] ''' ## Configure pool # NOTE: initialize the defaults flags = [] target = [] # NOTE: set extra config based on kwargs if kwargs.get('temporary', False): flags.append('-t') # NOTE: append the pool name and specifications target.append(zpool) target.extend(vdevs) ## Take a device offline res = __salt__['cmd.run_all']( __utils__['zfs.zpool_command']( command='offline', flags=flags, target=target, ), python_shell=False, ) return __utils__['zfs.parse_command_result'](res, 'offlined')
[ "def", "offline", "(", "zpool", ",", "*", "vdevs", ",", "*", "*", "kwargs", ")", ":", "## Configure pool", "# NOTE: initialize the defaults", "flags", "=", "[", "]", "target", "=", "[", "]", "# NOTE: set extra config based on kwargs", "if", "kwargs", ".", "get", "(", "'temporary'", ",", "False", ")", ":", "flags", ".", "append", "(", "'-t'", ")", "# NOTE: append the pool name and specifications", "target", ".", "append", "(", "zpool", ")", "target", ".", "extend", "(", "vdevs", ")", "## Take a device offline", "res", "=", "__salt__", "[", "'cmd.run_all'", "]", "(", "__utils__", "[", "'zfs.zpool_command'", "]", "(", "command", "=", "'offline'", ",", "flags", "=", "flags", ",", "target", "=", "target", ",", ")", ",", "python_shell", "=", "False", ",", ")", "return", "__utils__", "[", "'zfs.parse_command_result'", "]", "(", "res", ",", "'offlined'", ")" ]
.. versionadded:: 2015.5.0 Ensure that the specified devices are offline .. warning:: By default, the ``OFFLINE`` state is persistent. The device remains offline when the system is rebooted. To temporarily take a device offline, use ``temporary=True``. zpool : string name of storage pool vdevs : string One or more devices temporary : boolean Enable temporarily offline CLI Example: .. code-block:: bash salt '*' zpool.offline myzpool /path/to/vdev1 [...] [temporary=True|False]
[ "..", "versionadded", "::", "2015", ".", "5", ".", "0" ]
python
train
pyviz/holoviews
holoviews/streams.py
https://github.com/pyviz/holoviews/blob/ae0dd2f3de448b0ca5e9065aabd6ef8d84c7e655/holoviews/streams.py#L528-L548
def verify(self, x): """ Verify consistency of dataframes that pass through this stream """ if type(x) != type(self.data): raise TypeError("Input expected to be of type %s, got %s." % (type(self.data).__name__, type(x).__name__)) elif isinstance(x, np.ndarray): if x.ndim != 2: raise ValueError('Streamed array data must be two-dimensional') elif x.shape[1] != self.data.shape[1]: raise ValueError("Streamed array data expeced to have %d columns, " "got %d." % (self.data.shape[1], x.shape[1])) elif util.pd and isinstance(x, util.pd.DataFrame) and list(x.columns) != list(self.data.columns): raise IndexError("Input expected to have columns %s, got %s" % (list(self.data.columns), list(x.columns))) elif isinstance(x, dict): if any(c not in x for c in self.data): raise IndexError("Input expected to have columns %s, got %s" % (sorted(self.data.keys()), sorted(x.keys()))) elif len(set(len(v) for v in x.values())) > 1: raise ValueError("Input columns expected to have the " "same number of rows.")
[ "def", "verify", "(", "self", ",", "x", ")", ":", "if", "type", "(", "x", ")", "!=", "type", "(", "self", ".", "data", ")", ":", "raise", "TypeError", "(", "\"Input expected to be of type %s, got %s.\"", "%", "(", "type", "(", "self", ".", "data", ")", ".", "__name__", ",", "type", "(", "x", ")", ".", "__name__", ")", ")", "elif", "isinstance", "(", "x", ",", "np", ".", "ndarray", ")", ":", "if", "x", ".", "ndim", "!=", "2", ":", "raise", "ValueError", "(", "'Streamed array data must be two-dimensional'", ")", "elif", "x", ".", "shape", "[", "1", "]", "!=", "self", ".", "data", ".", "shape", "[", "1", "]", ":", "raise", "ValueError", "(", "\"Streamed array data expeced to have %d columns, \"", "\"got %d.\"", "%", "(", "self", ".", "data", ".", "shape", "[", "1", "]", ",", "x", ".", "shape", "[", "1", "]", ")", ")", "elif", "util", ".", "pd", "and", "isinstance", "(", "x", ",", "util", ".", "pd", ".", "DataFrame", ")", "and", "list", "(", "x", ".", "columns", ")", "!=", "list", "(", "self", ".", "data", ".", "columns", ")", ":", "raise", "IndexError", "(", "\"Input expected to have columns %s, got %s\"", "%", "(", "list", "(", "self", ".", "data", ".", "columns", ")", ",", "list", "(", "x", ".", "columns", ")", ")", ")", "elif", "isinstance", "(", "x", ",", "dict", ")", ":", "if", "any", "(", "c", "not", "in", "x", "for", "c", "in", "self", ".", "data", ")", ":", "raise", "IndexError", "(", "\"Input expected to have columns %s, got %s\"", "%", "(", "sorted", "(", "self", ".", "data", ".", "keys", "(", ")", ")", ",", "sorted", "(", "x", ".", "keys", "(", ")", ")", ")", ")", "elif", "len", "(", "set", "(", "len", "(", "v", ")", "for", "v", "in", "x", ".", "values", "(", ")", ")", ")", ">", "1", ":", "raise", "ValueError", "(", "\"Input columns expected to have the \"", "\"same number of rows.\"", ")" ]
Verify consistency of dataframes that pass through this stream
[ "Verify", "consistency", "of", "dataframes", "that", "pass", "through", "this", "stream" ]
python
train
datastax/python-driver
cassandra/metadata.py
https://github.com/datastax/python-driver/blob/30a80d0b798b1f45f8cb77163b1fa791f3e3ca29/cassandra/metadata.py#L264-L291
def rebuild_token_map(self, partitioner, token_map): """ Rebuild our view of the topology from fresh rows from the system topology tables. For internal use only. """ self.partitioner = partitioner if partitioner.endswith('RandomPartitioner'): token_class = MD5Token elif partitioner.endswith('Murmur3Partitioner'): token_class = Murmur3Token elif partitioner.endswith('ByteOrderedPartitioner'): token_class = BytesToken else: self.token_map = None return token_to_host_owner = {} ring = [] for host, token_strings in six.iteritems(token_map): for token_string in token_strings: token = token_class.from_string(token_string) ring.append(token) token_to_host_owner[token] = host all_tokens = sorted(ring) self.token_map = TokenMap( token_class, token_to_host_owner, all_tokens, self)
[ "def", "rebuild_token_map", "(", "self", ",", "partitioner", ",", "token_map", ")", ":", "self", ".", "partitioner", "=", "partitioner", "if", "partitioner", ".", "endswith", "(", "'RandomPartitioner'", ")", ":", "token_class", "=", "MD5Token", "elif", "partitioner", ".", "endswith", "(", "'Murmur3Partitioner'", ")", ":", "token_class", "=", "Murmur3Token", "elif", "partitioner", ".", "endswith", "(", "'ByteOrderedPartitioner'", ")", ":", "token_class", "=", "BytesToken", "else", ":", "self", ".", "token_map", "=", "None", "return", "token_to_host_owner", "=", "{", "}", "ring", "=", "[", "]", "for", "host", ",", "token_strings", "in", "six", ".", "iteritems", "(", "token_map", ")", ":", "for", "token_string", "in", "token_strings", ":", "token", "=", "token_class", ".", "from_string", "(", "token_string", ")", "ring", ".", "append", "(", "token", ")", "token_to_host_owner", "[", "token", "]", "=", "host", "all_tokens", "=", "sorted", "(", "ring", ")", "self", ".", "token_map", "=", "TokenMap", "(", "token_class", ",", "token_to_host_owner", ",", "all_tokens", ",", "self", ")" ]
Rebuild our view of the topology from fresh rows from the system topology tables. For internal use only.
[ "Rebuild", "our", "view", "of", "the", "topology", "from", "fresh", "rows", "from", "the", "system", "topology", "tables", ".", "For", "internal", "use", "only", "." ]
python
train
nion-software/nionswift
nion/swift/LineGraphCanvasItem.py
https://github.com/nion-software/nionswift/blob/d43693eaf057b8683b9638e575000f055fede452/nion/swift/LineGraphCanvasItem.py#L936-L946
def size_to_content(self): """ Size the canvas item to the proper width. """ new_sizing = self.copy_sizing() new_sizing.minimum_width = 0 new_sizing.maximum_width = 0 axes = self.__axes if axes and axes.is_valid: if axes.y_calibration and axes.y_calibration.units: new_sizing.minimum_width = self.font_size + 4 new_sizing.maximum_width = self.font_size + 4 self.update_sizing(new_sizing)
[ "def", "size_to_content", "(", "self", ")", ":", "new_sizing", "=", "self", ".", "copy_sizing", "(", ")", "new_sizing", ".", "minimum_width", "=", "0", "new_sizing", ".", "maximum_width", "=", "0", "axes", "=", "self", ".", "__axes", "if", "axes", "and", "axes", ".", "is_valid", ":", "if", "axes", ".", "y_calibration", "and", "axes", ".", "y_calibration", ".", "units", ":", "new_sizing", ".", "minimum_width", "=", "self", ".", "font_size", "+", "4", "new_sizing", ".", "maximum_width", "=", "self", ".", "font_size", "+", "4", "self", ".", "update_sizing", "(", "new_sizing", ")" ]
Size the canvas item to the proper width.
[ "Size", "the", "canvas", "item", "to", "the", "proper", "width", "." ]
python
train
pudo-attic/scrapekit
scrapekit/logs.py
https://github.com/pudo-attic/scrapekit/blob/cfd258120922fcd571430cdf00ba50f3cf18dc15/scrapekit/logs.py#L42-L45
def log_path(scraper): """ Determine the file name for the JSON log. """ return os.path.join(scraper.config.data_path, '%s.jsonlog' % scraper.name)
[ "def", "log_path", "(", "scraper", ")", ":", "return", "os", ".", "path", ".", "join", "(", "scraper", ".", "config", ".", "data_path", ",", "'%s.jsonlog'", "%", "scraper", ".", "name", ")" ]
Determine the file name for the JSON log.
[ "Determine", "the", "file", "name", "for", "the", "JSON", "log", "." ]
python
train
ray-project/ray
python/ray/tune/schedulers/hyperband.py
https://github.com/ray-project/ray/blob/4eade036a0505e244c976f36aaa2d64386b5129b/python/ray/tune/schedulers/hyperband.py#L199-L207
def on_trial_remove(self, trial_runner, trial): """Notification when trial terminates. Trial info is removed from bracket. Triggers halving if bracket is not finished.""" bracket, _ = self._trial_info[trial] bracket.cleanup_trial(trial) if not bracket.finished(): self._process_bracket(trial_runner, bracket, trial)
[ "def", "on_trial_remove", "(", "self", ",", "trial_runner", ",", "trial", ")", ":", "bracket", ",", "_", "=", "self", ".", "_trial_info", "[", "trial", "]", "bracket", ".", "cleanup_trial", "(", "trial", ")", "if", "not", "bracket", ".", "finished", "(", ")", ":", "self", ".", "_process_bracket", "(", "trial_runner", ",", "bracket", ",", "trial", ")" ]
Notification when trial terminates. Trial info is removed from bracket. Triggers halving if bracket is not finished.
[ "Notification", "when", "trial", "terminates", "." ]
python
train
kensho-technologies/graphql-compiler
graphql_compiler/compiler/filters.py
https://github.com/kensho-technologies/graphql-compiler/blob/f6079c6d10f64932f6b3af309b79bcea2123ca8f/graphql_compiler/compiler/filters.py#L546-L561
def _get_filter_op_name_and_values(directive): """Extract the (op_name, operator_params) tuple from a directive object.""" args = get_uniquely_named_objects_by_name(directive.arguments) if 'op_name' not in args: raise AssertionError(u'op_name not found in filter directive arguments!' u'Validation should have caught this: {}'.format(directive)) # HACK(predrag): Workaround for graphql-core validation issue # https://github.com/graphql-python/graphql-core/issues/97 if not isinstance(args['value'].value, ListValue): raise GraphQLValidationError(u'Filter directive value was not a list: {}'.format(directive)) op_name = args['op_name'].value.value operator_params = [x.value for x in args['value'].value.values] return (op_name, operator_params)
[ "def", "_get_filter_op_name_and_values", "(", "directive", ")", ":", "args", "=", "get_uniquely_named_objects_by_name", "(", "directive", ".", "arguments", ")", "if", "'op_name'", "not", "in", "args", ":", "raise", "AssertionError", "(", "u'op_name not found in filter directive arguments!'", "u'Validation should have caught this: {}'", ".", "format", "(", "directive", ")", ")", "# HACK(predrag): Workaround for graphql-core validation issue", "# https://github.com/graphql-python/graphql-core/issues/97", "if", "not", "isinstance", "(", "args", "[", "'value'", "]", ".", "value", ",", "ListValue", ")", ":", "raise", "GraphQLValidationError", "(", "u'Filter directive value was not a list: {}'", ".", "format", "(", "directive", ")", ")", "op_name", "=", "args", "[", "'op_name'", "]", ".", "value", ".", "value", "operator_params", "=", "[", "x", ".", "value", "for", "x", "in", "args", "[", "'value'", "]", ".", "value", ".", "values", "]", "return", "(", "op_name", ",", "operator_params", ")" ]
Extract the (op_name, operator_params) tuple from a directive object.
[ "Extract", "the", "(", "op_name", "operator_params", ")", "tuple", "from", "a", "directive", "object", "." ]
python
train
Alignak-monitoring/alignak
alignak/objects/host.py
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/objects/host.py#L304-L317
def fill_predictive_missing_parameters(self): """Fill address with host_name if not already set and define state with initial_state :return: None """ if hasattr(self, 'host_name') and not hasattr(self, 'address'): self.address = self.host_name if hasattr(self, 'host_name') and not hasattr(self, 'alias'): self.alias = self.host_name if self.initial_state == 'd': self.state = 'DOWN' elif self.initial_state == 'x': self.state = 'UNREACHABLE'
[ "def", "fill_predictive_missing_parameters", "(", "self", ")", ":", "if", "hasattr", "(", "self", ",", "'host_name'", ")", "and", "not", "hasattr", "(", "self", ",", "'address'", ")", ":", "self", ".", "address", "=", "self", ".", "host_name", "if", "hasattr", "(", "self", ",", "'host_name'", ")", "and", "not", "hasattr", "(", "self", ",", "'alias'", ")", ":", "self", ".", "alias", "=", "self", ".", "host_name", "if", "self", ".", "initial_state", "==", "'d'", ":", "self", ".", "state", "=", "'DOWN'", "elif", "self", ".", "initial_state", "==", "'x'", ":", "self", ".", "state", "=", "'UNREACHABLE'" ]
Fill address with host_name if not already set and define state with initial_state :return: None
[ "Fill", "address", "with", "host_name", "if", "not", "already", "set", "and", "define", "state", "with", "initial_state" ]
python
train
jazzband/django-axes
axes/helpers.py
https://github.com/jazzband/django-axes/blob/3e215a174030e43e7ab8c2a79c395eb0eeddc667/axes/helpers.py#L106-L134
def get_client_username(request: AxesHttpRequest, credentials: dict = None) -> str: """ Resolve client username from the given request or credentials if supplied. The order of preference for fetching the username is as follows: 1. If configured, use ``AXES_USERNAME_CALLABLE``, and supply ``request, credentials`` as arguments 2. If given, use ``credentials`` and fetch username from ``AXES_USERNAME_FORM_FIELD`` (defaults to ``username``) 3. Use request.POST and fetch username from ``AXES_USERNAME_FORM_FIELD`` (defaults to ``username``) :param request: incoming Django ``HttpRequest`` or similar object from authentication backend or other source :param credentials: incoming credentials ``dict`` or similar object from authentication backend or other source """ if settings.AXES_USERNAME_CALLABLE: log.debug('Using settings.AXES_USERNAME_CALLABLE to get username') if callable(settings.AXES_USERNAME_CALLABLE): return settings.AXES_USERNAME_CALLABLE(request, credentials) if isinstance(settings.AXES_USERNAME_CALLABLE, str): return import_string(settings.AXES_USERNAME_CALLABLE)(request, credentials) raise TypeError('settings.AXES_USERNAME_CALLABLE needs to be a string, callable, or None.') if credentials: log.debug('Using parameter credentials to get username with key settings.AXES_USERNAME_FORM_FIELD') return credentials.get(settings.AXES_USERNAME_FORM_FIELD, None) log.debug('Using parameter request.POST to get username with key settings.AXES_USERNAME_FORM_FIELD') return request.POST.get(settings.AXES_USERNAME_FORM_FIELD, None)
[ "def", "get_client_username", "(", "request", ":", "AxesHttpRequest", ",", "credentials", ":", "dict", "=", "None", ")", "->", "str", ":", "if", "settings", ".", "AXES_USERNAME_CALLABLE", ":", "log", ".", "debug", "(", "'Using settings.AXES_USERNAME_CALLABLE to get username'", ")", "if", "callable", "(", "settings", ".", "AXES_USERNAME_CALLABLE", ")", ":", "return", "settings", ".", "AXES_USERNAME_CALLABLE", "(", "request", ",", "credentials", ")", "if", "isinstance", "(", "settings", ".", "AXES_USERNAME_CALLABLE", ",", "str", ")", ":", "return", "import_string", "(", "settings", ".", "AXES_USERNAME_CALLABLE", ")", "(", "request", ",", "credentials", ")", "raise", "TypeError", "(", "'settings.AXES_USERNAME_CALLABLE needs to be a string, callable, or None.'", ")", "if", "credentials", ":", "log", ".", "debug", "(", "'Using parameter credentials to get username with key settings.AXES_USERNAME_FORM_FIELD'", ")", "return", "credentials", ".", "get", "(", "settings", ".", "AXES_USERNAME_FORM_FIELD", ",", "None", ")", "log", ".", "debug", "(", "'Using parameter request.POST to get username with key settings.AXES_USERNAME_FORM_FIELD'", ")", "return", "request", ".", "POST", ".", "get", "(", "settings", ".", "AXES_USERNAME_FORM_FIELD", ",", "None", ")" ]
Resolve client username from the given request or credentials if supplied. The order of preference for fetching the username is as follows: 1. If configured, use ``AXES_USERNAME_CALLABLE``, and supply ``request, credentials`` as arguments 2. If given, use ``credentials`` and fetch username from ``AXES_USERNAME_FORM_FIELD`` (defaults to ``username``) 3. Use request.POST and fetch username from ``AXES_USERNAME_FORM_FIELD`` (defaults to ``username``) :param request: incoming Django ``HttpRequest`` or similar object from authentication backend or other source :param credentials: incoming credentials ``dict`` or similar object from authentication backend or other source
[ "Resolve", "client", "username", "from", "the", "given", "request", "or", "credentials", "if", "supplied", "." ]
python
train
alorence/pysvg-py3
pysvg/turtle.py
https://github.com/alorence/pysvg-py3/blob/ce217a4da3ada44a71d3e2f391d37c67d95c724e/pysvg/turtle.py#L187-L193
def getXML(self): """Retrieves the pysvg elements that make up the turtles path and returns them as String in an xml representation. """ s = '' for element in self._svgElements: s += element.getXML() return s
[ "def", "getXML", "(", "self", ")", ":", "s", "=", "''", "for", "element", "in", "self", ".", "_svgElements", ":", "s", "+=", "element", ".", "getXML", "(", ")", "return", "s" ]
Retrieves the pysvg elements that make up the turtles path and returns them as String in an xml representation.
[ "Retrieves", "the", "pysvg", "elements", "that", "make", "up", "the", "turtles", "path", "and", "returns", "them", "as", "String", "in", "an", "xml", "representation", "." ]
python
train
willprice/python-omxplayer-wrapper
omxplayer/player.py
https://github.com/willprice/python-omxplayer-wrapper/blob/f242cb391f0fd07be2d9211c13ebe72fbc628fa3/omxplayer/player.py#L618-L629
def set_video_pos(self, x1, y1, x2, y2): """ Set the video position on the screen Args: x1 (int): Top left x coordinate (px) y1 (int): Top left y coordinate (px) x2 (int): Bottom right x coordinate (px) y2 (int): Bottom right y coordinate (px) """ position = "%s %s %s %s" % (str(x1),str(y1),str(x2),str(y2)) self._player_interface.VideoPos(ObjectPath('/not/used'), String(position))
[ "def", "set_video_pos", "(", "self", ",", "x1", ",", "y1", ",", "x2", ",", "y2", ")", ":", "position", "=", "\"%s %s %s %s\"", "%", "(", "str", "(", "x1", ")", ",", "str", "(", "y1", ")", ",", "str", "(", "x2", ")", ",", "str", "(", "y2", ")", ")", "self", ".", "_player_interface", ".", "VideoPos", "(", "ObjectPath", "(", "'/not/used'", ")", ",", "String", "(", "position", ")", ")" ]
Set the video position on the screen Args: x1 (int): Top left x coordinate (px) y1 (int): Top left y coordinate (px) x2 (int): Bottom right x coordinate (px) y2 (int): Bottom right y coordinate (px)
[ "Set", "the", "video", "position", "on", "the", "screen" ]
python
valid
saltstack/salt
salt/master.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/master.py#L1130-L1141
def run(self): ''' Start a Master Worker ''' salt.utils.process.appendproctitle(self.name) self.clear_funcs = ClearFuncs( self.opts, self.key, ) self.aes_funcs = AESFuncs(self.opts) salt.utils.crypt.reinit_crypto() self.__bind()
[ "def", "run", "(", "self", ")", ":", "salt", ".", "utils", ".", "process", ".", "appendproctitle", "(", "self", ".", "name", ")", "self", ".", "clear_funcs", "=", "ClearFuncs", "(", "self", ".", "opts", ",", "self", ".", "key", ",", ")", "self", ".", "aes_funcs", "=", "AESFuncs", "(", "self", ".", "opts", ")", "salt", ".", "utils", ".", "crypt", ".", "reinit_crypto", "(", ")", "self", ".", "__bind", "(", ")" ]
Start a Master Worker
[ "Start", "a", "Master", "Worker" ]
python
train
Arubacloud/pyArubaCloud
ArubaCloud/base/__init__.py
https://github.com/Arubacloud/pyArubaCloud/blob/ec4aecd8ca342b1e1a4f16b7cc87cb5e697cfcd4/ArubaCloud/base/__init__.py#L95-L114
def _commit(self): """ :return: (dict) Response object content """ assert self.uri is not None, Exception("BadArgument: uri property cannot be None") url = '{}/{}'.format(self.uri, self.__class__.__name__) serialized_json = jsonpickle.encode(self, unpicklable=False, ) headers = {'Content-Type': 'application/json', 'Content-Length': str(len(serialized_json))} response = Http.post(url=url, data=serialized_json, headers=headers) if response.status_code != 200: from ArubaCloud.base.Errors import MalformedJsonRequest raise MalformedJsonRequest("Request: {}, Status Code: {}".format(serialized_json, response.status_code)) content = jsonpickle.decode(response.content.decode("utf-8")) if content['ResultCode'] == 17: from ArubaCloud.base.Errors import OperationAlreadyEnqueued raise OperationAlreadyEnqueued("{} already enqueued".format(self.__class__.__name__)) if content['Success'] is False: from ArubaCloud.base.Errors import RequestFailed raise RequestFailed("Request: {}, Response: {}".format(serialized_json, response.content)) return content
[ "def", "_commit", "(", "self", ")", ":", "assert", "self", ".", "uri", "is", "not", "None", ",", "Exception", "(", "\"BadArgument: uri property cannot be None\"", ")", "url", "=", "'{}/{}'", ".", "format", "(", "self", ".", "uri", ",", "self", ".", "__class__", ".", "__name__", ")", "serialized_json", "=", "jsonpickle", ".", "encode", "(", "self", ",", "unpicklable", "=", "False", ",", ")", "headers", "=", "{", "'Content-Type'", ":", "'application/json'", ",", "'Content-Length'", ":", "str", "(", "len", "(", "serialized_json", ")", ")", "}", "response", "=", "Http", ".", "post", "(", "url", "=", "url", ",", "data", "=", "serialized_json", ",", "headers", "=", "headers", ")", "if", "response", ".", "status_code", "!=", "200", ":", "from", "ArubaCloud", ".", "base", ".", "Errors", "import", "MalformedJsonRequest", "raise", "MalformedJsonRequest", "(", "\"Request: {}, Status Code: {}\"", ".", "format", "(", "serialized_json", ",", "response", ".", "status_code", ")", ")", "content", "=", "jsonpickle", ".", "decode", "(", "response", ".", "content", ".", "decode", "(", "\"utf-8\"", ")", ")", "if", "content", "[", "'ResultCode'", "]", "==", "17", ":", "from", "ArubaCloud", ".", "base", ".", "Errors", "import", "OperationAlreadyEnqueued", "raise", "OperationAlreadyEnqueued", "(", "\"{} already enqueued\"", ".", "format", "(", "self", ".", "__class__", ".", "__name__", ")", ")", "if", "content", "[", "'Success'", "]", "is", "False", ":", "from", "ArubaCloud", ".", "base", ".", "Errors", "import", "RequestFailed", "raise", "RequestFailed", "(", "\"Request: {}, Response: {}\"", ".", "format", "(", "serialized_json", ",", "response", ".", "content", ")", ")", "return", "content" ]
:return: (dict) Response object content
[ ":", "return", ":", "(", "dict", ")", "Response", "object", "content" ]
python
train
TrafficSenseMSD/SumoTools
traci/_vehicle.py
https://github.com/TrafficSenseMSD/SumoTools/blob/8607b4f885f1d1798e43240be643efe6dccccdaa/traci/_vehicle.py#L1185-L1201
def add(self, vehID, routeID, depart=tc.DEPARTFLAG_NOW, pos=0, speed=0, lane=tc.DEPARTFLAG_LANE_FIRST_ALLOWED, typeID="DEFAULT_VEHTYPE"): """ Add a new vehicle (old style) """ self._connection._beginMessage(tc.CMD_SET_VEHICLE_VARIABLE, tc.ADD, vehID, 1 + 4 + 1 + 4 + len(typeID) + 1 + 4 + len(routeID) + 1 + 4 + 1 + 8 + 1 + 8 + 1 + 1) if depart > 0: depart *= 1000 self._connection._string += struct.pack("!Bi", tc.TYPE_COMPOUND, 6) self._connection._packString(typeID) self._connection._packString(routeID) self._connection._string += struct.pack("!Bi", tc.TYPE_INTEGER, depart) self._connection._string += struct.pack("!BdBd", tc.TYPE_DOUBLE, pos, tc.TYPE_DOUBLE, speed) self._connection._string += struct.pack("!Bb", tc.TYPE_BYTE, lane) self._connection._sendExact()
[ "def", "add", "(", "self", ",", "vehID", ",", "routeID", ",", "depart", "=", "tc", ".", "DEPARTFLAG_NOW", ",", "pos", "=", "0", ",", "speed", "=", "0", ",", "lane", "=", "tc", ".", "DEPARTFLAG_LANE_FIRST_ALLOWED", ",", "typeID", "=", "\"DEFAULT_VEHTYPE\"", ")", ":", "self", ".", "_connection", ".", "_beginMessage", "(", "tc", ".", "CMD_SET_VEHICLE_VARIABLE", ",", "tc", ".", "ADD", ",", "vehID", ",", "1", "+", "4", "+", "1", "+", "4", "+", "len", "(", "typeID", ")", "+", "1", "+", "4", "+", "len", "(", "routeID", ")", "+", "1", "+", "4", "+", "1", "+", "8", "+", "1", "+", "8", "+", "1", "+", "1", ")", "if", "depart", ">", "0", ":", "depart", "*=", "1000", "self", ".", "_connection", ".", "_string", "+=", "struct", ".", "pack", "(", "\"!Bi\"", ",", "tc", ".", "TYPE_COMPOUND", ",", "6", ")", "self", ".", "_connection", ".", "_packString", "(", "typeID", ")", "self", ".", "_connection", ".", "_packString", "(", "routeID", ")", "self", ".", "_connection", ".", "_string", "+=", "struct", ".", "pack", "(", "\"!Bi\"", ",", "tc", ".", "TYPE_INTEGER", ",", "depart", ")", "self", ".", "_connection", ".", "_string", "+=", "struct", ".", "pack", "(", "\"!BdBd\"", ",", "tc", ".", "TYPE_DOUBLE", ",", "pos", ",", "tc", ".", "TYPE_DOUBLE", ",", "speed", ")", "self", ".", "_connection", ".", "_string", "+=", "struct", ".", "pack", "(", "\"!Bb\"", ",", "tc", ".", "TYPE_BYTE", ",", "lane", ")", "self", ".", "_connection", ".", "_sendExact", "(", ")" ]
Add a new vehicle (old style)
[ "Add", "a", "new", "vehicle", "(", "old", "style", ")" ]
python
train
drhagen/parsita
parsita/state.py
https://github.com/drhagen/parsita/blob/d97414a05541f48231381f607d1d2e6b50781d39/parsita/state.py#L298-L330
def merge(self, status: 'Status[Input, Output]') -> 'Status[Input, Output]': """Merge the failure message from another status into this one. Whichever status represents parsing that has gone the farthest is retained. If both statuses have gone the same distance, then the expected values from both are retained. Args: status: The status to merge into this one. Returns: This ``Status`` which may have ``farthest`` and ``expected`` updated accordingly. """ if status is None or status.farthest is None: # No new message; simply return unchanged pass elif self.farthest is None: # No current message to compare to; use the message from status self.farthest = status.farthest self.expected = status.expected elif status.farthest.position < self.farthest.position: # New message is not farther; keep current message pass elif status.farthest.position > self.farthest.position: # New message is farther than current message; replace with new message self.farthest = status.farthest self.expected = status.expected else: # New message and current message are equally far; merge messages self.expected = status.expected + self.expected return self
[ "def", "merge", "(", "self", ",", "status", ":", "'Status[Input, Output]'", ")", "->", "'Status[Input, Output]'", ":", "if", "status", "is", "None", "or", "status", ".", "farthest", "is", "None", ":", "# No new message; simply return unchanged", "pass", "elif", "self", ".", "farthest", "is", "None", ":", "# No current message to compare to; use the message from status", "self", ".", "farthest", "=", "status", ".", "farthest", "self", ".", "expected", "=", "status", ".", "expected", "elif", "status", ".", "farthest", ".", "position", "<", "self", ".", "farthest", ".", "position", ":", "# New message is not farther; keep current message", "pass", "elif", "status", ".", "farthest", ".", "position", ">", "self", ".", "farthest", ".", "position", ":", "# New message is farther than current message; replace with new message", "self", ".", "farthest", "=", "status", ".", "farthest", "self", ".", "expected", "=", "status", ".", "expected", "else", ":", "# New message and current message are equally far; merge messages", "self", ".", "expected", "=", "status", ".", "expected", "+", "self", ".", "expected", "return", "self" ]
Merge the failure message from another status into this one. Whichever status represents parsing that has gone the farthest is retained. If both statuses have gone the same distance, then the expected values from both are retained. Args: status: The status to merge into this one. Returns: This ``Status`` which may have ``farthest`` and ``expected`` updated accordingly.
[ "Merge", "the", "failure", "message", "from", "another", "status", "into", "this", "one", "." ]
python
test
rosenbrockc/fortpy
fortpy/serialize.py
https://github.com/rosenbrockc/fortpy/blob/1ed0757c52d549e41d9d44bdea68cb89529293a5/fortpy/serialize.py#L147-L153
def _cache_directory(self): """Returns the full path to the cache directory as specified in settings. """ if settings.unit_testing_mode or settings.use_test_cache: return os.path.join(settings.cache_directory.replace("Fortpy", "Fortpy_Testing"), self.py_tag) else: return os.path.join(settings.cache_directory, self.py_tag)
[ "def", "_cache_directory", "(", "self", ")", ":", "if", "settings", ".", "unit_testing_mode", "or", "settings", ".", "use_test_cache", ":", "return", "os", ".", "path", ".", "join", "(", "settings", ".", "cache_directory", ".", "replace", "(", "\"Fortpy\"", ",", "\"Fortpy_Testing\"", ")", ",", "self", ".", "py_tag", ")", "else", ":", "return", "os", ".", "path", ".", "join", "(", "settings", ".", "cache_directory", ",", "self", ".", "py_tag", ")" ]
Returns the full path to the cache directory as specified in settings.
[ "Returns", "the", "full", "path", "to", "the", "cache", "directory", "as", "specified", "in", "settings", "." ]
python
train
santosjorge/cufflinks
cufflinks/pandastools.py
https://github.com/santosjorge/cufflinks/blob/ca1cbf93998dc793d0b1f8ac30fe1f2bd105f63a/cufflinks/pandastools.py#L6-L29
def _screen(self,include=True,**kwargs): """ Filters a DataFrame for columns that contain the given strings. Parameters: ----------- include : bool If False then it will exclude items that match the given filters. This is the same as passing a regex ^keyword kwargs : Key value pairs that indicate the column and value to screen for Example: df.screen(col1='string_to_match',col2=['string1','string2']) """ df=self.copy() for k,v in list(kwargs.items()): v=[v] if type(v)!=list else v if include: df=df[df[k].str.contains('|'.join(v),flags=re.IGNORECASE).fillna(False)] else: df=df[df[k].str.contains('|'.join(v),flags=re.IGNORECASE).fillna(False)==False] return df
[ "def", "_screen", "(", "self", ",", "include", "=", "True", ",", "*", "*", "kwargs", ")", ":", "df", "=", "self", ".", "copy", "(", ")", "for", "k", ",", "v", "in", "list", "(", "kwargs", ".", "items", "(", ")", ")", ":", "v", "=", "[", "v", "]", "if", "type", "(", "v", ")", "!=", "list", "else", "v", "if", "include", ":", "df", "=", "df", "[", "df", "[", "k", "]", ".", "str", ".", "contains", "(", "'|'", ".", "join", "(", "v", ")", ",", "flags", "=", "re", ".", "IGNORECASE", ")", ".", "fillna", "(", "False", ")", "]", "else", ":", "df", "=", "df", "[", "df", "[", "k", "]", ".", "str", ".", "contains", "(", "'|'", ".", "join", "(", "v", ")", ",", "flags", "=", "re", ".", "IGNORECASE", ")", ".", "fillna", "(", "False", ")", "==", "False", "]", "return", "df" ]
Filters a DataFrame for columns that contain the given strings. Parameters: ----------- include : bool If False then it will exclude items that match the given filters. This is the same as passing a regex ^keyword kwargs : Key value pairs that indicate the column and value to screen for Example: df.screen(col1='string_to_match',col2=['string1','string2'])
[ "Filters", "a", "DataFrame", "for", "columns", "that", "contain", "the", "given", "strings", ".", "Parameters", ":", "-----------", "include", ":", "bool", "If", "False", "then", "it", "will", "exclude", "items", "that", "match", "the", "given", "filters", ".", "This", "is", "the", "same", "as", "passing", "a", "regex", "^keyword", "kwargs", ":", "Key", "value", "pairs", "that", "indicate", "the", "column", "and", "value", "to", "screen", "for" ]
python
train
jupyterhub/nullauthenticator
examples/token-only/login-service.py
https://github.com/jupyterhub/nullauthenticator/blob/155a44f85cf4474a5ecf7fa74bc6834f1f04393d/examples/token-only/login-service.py#L38-L42
async def delete_user(name): """Stop a user's server and delete the user""" app_log.info("Deleting user %s", name) await api_request('users/{}/server'.format(name), method='DELETE') await api_request('users/{}'.format(name), method='DELETE')
[ "async", "def", "delete_user", "(", "name", ")", ":", "app_log", ".", "info", "(", "\"Deleting user %s\"", ",", "name", ")", "await", "api_request", "(", "'users/{}/server'", ".", "format", "(", "name", ")", ",", "method", "=", "'DELETE'", ")", "await", "api_request", "(", "'users/{}'", ".", "format", "(", "name", ")", ",", "method", "=", "'DELETE'", ")" ]
Stop a user's server and delete the user
[ "Stop", "a", "user", "s", "server", "and", "delete", "the", "user" ]
python
train
SatelliteQE/nailgun
nailgun/entity_mixins.py
https://github.com/SatelliteQE/nailgun/blob/c36d8c20862e87bf6975bd48ac1ca40a9e634eaa/nailgun/entity_mixins.py#L898-L919
def create_raw(self, create_missing=None): """Create an entity. Possibly call :meth:`create_missing`. Then make an HTTP POST call to ``self.path('base')``. The request payload consists of whatever is returned by :meth:`create_payload`. Return the response. :param create_missing: Should :meth:`create_missing` be called? In other words, should values be generated for required, empty fields? Defaults to :data:`nailgun.entity_mixins.CREATE_MISSING`. :return: A ``requests.response`` object. """ if create_missing is None: create_missing = CREATE_MISSING if create_missing is True: self.create_missing() return client.post( self.path('base'), self.create_payload(), **self._server_config.get_client_kwargs() )
[ "def", "create_raw", "(", "self", ",", "create_missing", "=", "None", ")", ":", "if", "create_missing", "is", "None", ":", "create_missing", "=", "CREATE_MISSING", "if", "create_missing", "is", "True", ":", "self", ".", "create_missing", "(", ")", "return", "client", ".", "post", "(", "self", ".", "path", "(", "'base'", ")", ",", "self", ".", "create_payload", "(", ")", ",", "*", "*", "self", ".", "_server_config", ".", "get_client_kwargs", "(", ")", ")" ]
Create an entity. Possibly call :meth:`create_missing`. Then make an HTTP POST call to ``self.path('base')``. The request payload consists of whatever is returned by :meth:`create_payload`. Return the response. :param create_missing: Should :meth:`create_missing` be called? In other words, should values be generated for required, empty fields? Defaults to :data:`nailgun.entity_mixins.CREATE_MISSING`. :return: A ``requests.response`` object.
[ "Create", "an", "entity", "." ]
python
train
fprimex/zdesk
zdesk/zdesk_api.py
https://github.com/fprimex/zdesk/blob/851611c13b4d530e9df31390b3ec709baf0a0188/zdesk/zdesk_api.py#L4438-L4442
def visitor_update(self, visitor_id, data, **kwargs): "https://developer.zendesk.com/rest_api/docs/chat/visitors#update-visitor" api_path = "/api/v2/visitors/{visitor_id}" api_path = api_path.format(visitor_id=visitor_id) return self.call(api_path, method="PUT", data=data, **kwargs)
[ "def", "visitor_update", "(", "self", ",", "visitor_id", ",", "data", ",", "*", "*", "kwargs", ")", ":", "api_path", "=", "\"/api/v2/visitors/{visitor_id}\"", "api_path", "=", "api_path", ".", "format", "(", "visitor_id", "=", "visitor_id", ")", "return", "self", ".", "call", "(", "api_path", ",", "method", "=", "\"PUT\"", ",", "data", "=", "data", ",", "*", "*", "kwargs", ")" ]
https://developer.zendesk.com/rest_api/docs/chat/visitors#update-visitor
[ "https", ":", "//", "developer", ".", "zendesk", ".", "com", "/", "rest_api", "/", "docs", "/", "chat", "/", "visitors#update", "-", "visitor" ]
python
train
klahnakoski/pyLibrary
mo_dots/__init__.py
https://github.com/klahnakoski/pyLibrary/blob/fa2dcbc48fda8d26999baef400e9a98149e0b982/mo_dots/__init__.py#L121-L126
def join_field(path): """ RETURN field SEQUENCE AS STRING """ output = ".".join([f.replace(".", "\\.") for f in path if f != None]) return output if output else "."
[ "def", "join_field", "(", "path", ")", ":", "output", "=", "\".\"", ".", "join", "(", "[", "f", ".", "replace", "(", "\".\"", ",", "\"\\\\.\"", ")", "for", "f", "in", "path", "if", "f", "!=", "None", "]", ")", "return", "output", "if", "output", "else", "\".\"" ]
RETURN field SEQUENCE AS STRING
[ "RETURN", "field", "SEQUENCE", "AS", "STRING" ]
python
train
Azure/azure-kusto-python
azure-kusto-ingest/azure/kusto/ingest/_ingest_client.py
https://github.com/Azure/azure-kusto-python/blob/92466a2ae175d6353d1dee3496a02517b2a71a86/azure-kusto-ingest/azure/kusto/ingest/_ingest_client.py#L32-L69
def ingest_from_dataframe(self, df, ingestion_properties): """Enqueuing an ingest command from local files. :param pandas.DataFrame df: input dataframe to ingest. :param azure.kusto.ingest.IngestionProperties ingestion_properties: Ingestion properties. """ from pandas import DataFrame if not isinstance(df, DataFrame): raise ValueError("Expected DataFrame instance, found {}".format(type(df))) file_name = "df_{timestamp}_{pid}.csv.gz".format(timestamp=int(time.time()), pid=os.getpid()) temp_file_path = os.path.join(tempfile.gettempdir(), file_name) df.to_csv(temp_file_path, index=False, encoding="utf-8", header=False, compression="gzip") fd = FileDescriptor(temp_file_path) blob_name = "{db}__{table}__{guid}__{file}".format( db=ingestion_properties.database, table=ingestion_properties.table, guid=uuid.uuid4(), file=file_name ) containers = self._resource_manager.get_containers() container_details = random.choice(containers) storage_client = CloudStorageAccount(container_details.storage_account_name, sas_token=container_details.sas) blob_service = storage_client.create_block_blob_service() blob_service.create_blob_from_path( container_name=container_details.object_name, blob_name=blob_name, file_path=temp_file_path ) url = blob_service.make_blob_url(container_details.object_name, blob_name, sas_token=container_details.sas) self.ingest_from_blob(BlobDescriptor(url, fd.size), ingestion_properties=ingestion_properties) fd.delete_files() os.unlink(temp_file_path)
[ "def", "ingest_from_dataframe", "(", "self", ",", "df", ",", "ingestion_properties", ")", ":", "from", "pandas", "import", "DataFrame", "if", "not", "isinstance", "(", "df", ",", "DataFrame", ")", ":", "raise", "ValueError", "(", "\"Expected DataFrame instance, found {}\"", ".", "format", "(", "type", "(", "df", ")", ")", ")", "file_name", "=", "\"df_{timestamp}_{pid}.csv.gz\"", ".", "format", "(", "timestamp", "=", "int", "(", "time", ".", "time", "(", ")", ")", ",", "pid", "=", "os", ".", "getpid", "(", ")", ")", "temp_file_path", "=", "os", ".", "path", ".", "join", "(", "tempfile", ".", "gettempdir", "(", ")", ",", "file_name", ")", "df", ".", "to_csv", "(", "temp_file_path", ",", "index", "=", "False", ",", "encoding", "=", "\"utf-8\"", ",", "header", "=", "False", ",", "compression", "=", "\"gzip\"", ")", "fd", "=", "FileDescriptor", "(", "temp_file_path", ")", "blob_name", "=", "\"{db}__{table}__{guid}__{file}\"", ".", "format", "(", "db", "=", "ingestion_properties", ".", "database", ",", "table", "=", "ingestion_properties", ".", "table", ",", "guid", "=", "uuid", ".", "uuid4", "(", ")", ",", "file", "=", "file_name", ")", "containers", "=", "self", ".", "_resource_manager", ".", "get_containers", "(", ")", "container_details", "=", "random", ".", "choice", "(", "containers", ")", "storage_client", "=", "CloudStorageAccount", "(", "container_details", ".", "storage_account_name", ",", "sas_token", "=", "container_details", ".", "sas", ")", "blob_service", "=", "storage_client", ".", "create_block_blob_service", "(", ")", "blob_service", ".", "create_blob_from_path", "(", "container_name", "=", "container_details", ".", "object_name", ",", "blob_name", "=", "blob_name", ",", "file_path", "=", "temp_file_path", ")", "url", "=", "blob_service", ".", "make_blob_url", "(", "container_details", ".", "object_name", ",", "blob_name", ",", "sas_token", "=", "container_details", ".", "sas", ")", "self", ".", "ingest_from_blob", "(", "BlobDescriptor", "(", "url", ",", "fd", ".", "size", ")", ",", "ingestion_properties", "=", "ingestion_properties", ")", "fd", ".", "delete_files", "(", ")", "os", ".", "unlink", "(", "temp_file_path", ")" ]
Enqueuing an ingest command from local files. :param pandas.DataFrame df: input dataframe to ingest. :param azure.kusto.ingest.IngestionProperties ingestion_properties: Ingestion properties.
[ "Enqueuing", "an", "ingest", "command", "from", "local", "files", ".", ":", "param", "pandas", ".", "DataFrame", "df", ":", "input", "dataframe", "to", "ingest", ".", ":", "param", "azure", ".", "kusto", ".", "ingest", ".", "IngestionProperties", "ingestion_properties", ":", "Ingestion", "properties", "." ]
python
train
sjwood/pydvdid
pydvdid/functions.py
https://github.com/sjwood/pydvdid/blob/03914fb7e24283c445e5af724f9d919b23caaf95/pydvdid/functions.py#L161-L180
def _get_first_64k_content(file_path): """Returns the first 65536 (or the file size, whichever is smaller) bytes of the file at the specified file path, as a bytearray. """ if not isfile(file_path): raise PathDoesNotExistException(file_path) file_size = getsize(file_path) content_size = min(file_size, 0x10000) content = bytearray(content_size) with open(file_path, "rb") as file_object: content_read = file_object.readinto(content) if content_read is None or content_read < content_size: raise FileContentReadException(content_size, content_read) return content
[ "def", "_get_first_64k_content", "(", "file_path", ")", ":", "if", "not", "isfile", "(", "file_path", ")", ":", "raise", "PathDoesNotExistException", "(", "file_path", ")", "file_size", "=", "getsize", "(", "file_path", ")", "content_size", "=", "min", "(", "file_size", ",", "0x10000", ")", "content", "=", "bytearray", "(", "content_size", ")", "with", "open", "(", "file_path", ",", "\"rb\"", ")", "as", "file_object", ":", "content_read", "=", "file_object", ".", "readinto", "(", "content", ")", "if", "content_read", "is", "None", "or", "content_read", "<", "content_size", ":", "raise", "FileContentReadException", "(", "content_size", ",", "content_read", ")", "return", "content" ]
Returns the first 65536 (or the file size, whichever is smaller) bytes of the file at the specified file path, as a bytearray.
[ "Returns", "the", "first", "65536", "(", "or", "the", "file", "size", "whichever", "is", "smaller", ")", "bytes", "of", "the", "file", "at", "the", "specified", "file", "path", "as", "a", "bytearray", "." ]
python
train
acoomans/flask-autodoc
flask_autodoc/autodoc.py
https://github.com/acoomans/flask-autodoc/blob/6c77c8935b71fbf3243b5e589c5c255d0299d853/flask_autodoc/autodoc.py#L168-L194
def html(self, groups='all', template=None, **context): """Return an html string of the routes specified by the doc() method A template can be specified. A list of routes is available under the 'autodoc' value (refer to the documentation for the generate() for a description of available values). If no template is specified, a default template is used. By specifying the group or groups arguments, only routes belonging to those groups will be returned. """ context['autodoc'] = context['autodoc'] if 'autodoc' in context \ else self.generate(groups=groups) context['defaults'] = context['defaults'] if 'defaults' in context \ else self.default_props if template: return render_template(template, **context) else: filename = os.path.join( os.path.dirname(__file__), 'templates', 'autodoc_default.html' ) with open(filename) as file: content = file.read() with current_app.app_context(): return render_template_string(content, **context)
[ "def", "html", "(", "self", ",", "groups", "=", "'all'", ",", "template", "=", "None", ",", "*", "*", "context", ")", ":", "context", "[", "'autodoc'", "]", "=", "context", "[", "'autodoc'", "]", "if", "'autodoc'", "in", "context", "else", "self", ".", "generate", "(", "groups", "=", "groups", ")", "context", "[", "'defaults'", "]", "=", "context", "[", "'defaults'", "]", "if", "'defaults'", "in", "context", "else", "self", ".", "default_props", "if", "template", ":", "return", "render_template", "(", "template", ",", "*", "*", "context", ")", "else", ":", "filename", "=", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "dirname", "(", "__file__", ")", ",", "'templates'", ",", "'autodoc_default.html'", ")", "with", "open", "(", "filename", ")", "as", "file", ":", "content", "=", "file", ".", "read", "(", ")", "with", "current_app", ".", "app_context", "(", ")", ":", "return", "render_template_string", "(", "content", ",", "*", "*", "context", ")" ]
Return an html string of the routes specified by the doc() method A template can be specified. A list of routes is available under the 'autodoc' value (refer to the documentation for the generate() for a description of available values). If no template is specified, a default template is used. By specifying the group or groups arguments, only routes belonging to those groups will be returned.
[ "Return", "an", "html", "string", "of", "the", "routes", "specified", "by", "the", "doc", "()", "method" ]
python
train
chimera0/accel-brain-code
Reinforcement-Learning/demo/demo_maze_multi_agent_deep_q_network.py
https://github.com/chimera0/accel-brain-code/blob/03661f6f544bed656269fcd4b3c23c9061629daa/Reinforcement-Learning/demo/demo_maze_multi_agent_deep_q_network.py#L73-L128
def inference(self, state_arr, limit=1000): ''' Infernce. Args: state_arr: `np.ndarray` of state. limit: The number of inferencing. Returns: `list of `np.ndarray` of an optimal route. ''' self.__inferencing_flag = True agent_x, agent_y = np.where(state_arr[0] == 1) agent_x, agent_y = agent_x[0], agent_y[0] self.__create_enemy(self.__map_arr) result_list = [(agent_x, agent_y, 0.0)] result_val_list = [agent_x, agent_y] for e in range(self.__enemy_num): result_val_list.append(self.__enemy_pos_list[e][0]) result_val_list.append(self.__enemy_pos_list[e][1]) result_val_list.append(0.0) result_list.append(tuple(result_val_list)) self.t = 0 while self.t < limit: next_action_arr = self.extract_possible_actions(state_arr) next_q_arr = self.function_approximator.inference_q(next_action_arr) action_arr, q = self.select_action(next_action_arr, next_q_arr) self.__move_enemy(action_arr) agent_x, agent_y = np.where(action_arr[0] == 1) agent_x, agent_y = agent_x[0], agent_y[0] result_val_list = [agent_x, agent_y] for e in range(self.__enemy_num): result_val_list.append(self.__enemy_pos_list[e][0]) result_val_list.append(self.__enemy_pos_list[e][1]) try: result_val_list.append(q[0]) except IndexError: result_val_list.append(q) result_list.append(tuple(result_val_list)) # Update State. state_arr = self.update_state(state_arr, action_arr) # Epsode. self.t += 1 # Check. end_flag = self.check_the_end_flag(state_arr) if end_flag is True: break return result_list
[ "def", "inference", "(", "self", ",", "state_arr", ",", "limit", "=", "1000", ")", ":", "self", ".", "__inferencing_flag", "=", "True", "agent_x", ",", "agent_y", "=", "np", ".", "where", "(", "state_arr", "[", "0", "]", "==", "1", ")", "agent_x", ",", "agent_y", "=", "agent_x", "[", "0", "]", ",", "agent_y", "[", "0", "]", "self", ".", "__create_enemy", "(", "self", ".", "__map_arr", ")", "result_list", "=", "[", "(", "agent_x", ",", "agent_y", ",", "0.0", ")", "]", "result_val_list", "=", "[", "agent_x", ",", "agent_y", "]", "for", "e", "in", "range", "(", "self", ".", "__enemy_num", ")", ":", "result_val_list", ".", "append", "(", "self", ".", "__enemy_pos_list", "[", "e", "]", "[", "0", "]", ")", "result_val_list", ".", "append", "(", "self", ".", "__enemy_pos_list", "[", "e", "]", "[", "1", "]", ")", "result_val_list", ".", "append", "(", "0.0", ")", "result_list", ".", "append", "(", "tuple", "(", "result_val_list", ")", ")", "self", ".", "t", "=", "0", "while", "self", ".", "t", "<", "limit", ":", "next_action_arr", "=", "self", ".", "extract_possible_actions", "(", "state_arr", ")", "next_q_arr", "=", "self", ".", "function_approximator", ".", "inference_q", "(", "next_action_arr", ")", "action_arr", ",", "q", "=", "self", ".", "select_action", "(", "next_action_arr", ",", "next_q_arr", ")", "self", ".", "__move_enemy", "(", "action_arr", ")", "agent_x", ",", "agent_y", "=", "np", ".", "where", "(", "action_arr", "[", "0", "]", "==", "1", ")", "agent_x", ",", "agent_y", "=", "agent_x", "[", "0", "]", ",", "agent_y", "[", "0", "]", "result_val_list", "=", "[", "agent_x", ",", "agent_y", "]", "for", "e", "in", "range", "(", "self", ".", "__enemy_num", ")", ":", "result_val_list", ".", "append", "(", "self", ".", "__enemy_pos_list", "[", "e", "]", "[", "0", "]", ")", "result_val_list", ".", "append", "(", "self", ".", "__enemy_pos_list", "[", "e", "]", "[", "1", "]", ")", "try", ":", "result_val_list", ".", "append", "(", "q", "[", "0", "]", ")", "except", "IndexError", ":", "result_val_list", ".", "append", "(", "q", ")", "result_list", ".", "append", "(", "tuple", "(", "result_val_list", ")", ")", "# Update State.", "state_arr", "=", "self", ".", "update_state", "(", "state_arr", ",", "action_arr", ")", "# Epsode.", "self", ".", "t", "+=", "1", "# Check.", "end_flag", "=", "self", ".", "check_the_end_flag", "(", "state_arr", ")", "if", "end_flag", "is", "True", ":", "break", "return", "result_list" ]
Infernce. Args: state_arr: `np.ndarray` of state. limit: The number of inferencing. Returns: `list of `np.ndarray` of an optimal route.
[ "Infernce", ".", "Args", ":", "state_arr", ":", "np", ".", "ndarray", "of", "state", ".", "limit", ":", "The", "number", "of", "inferencing", ".", "Returns", ":", "list", "of", "np", ".", "ndarray", "of", "an", "optimal", "route", "." ]
python
train
tonyfischetti/sake
sakelib/acts.py
https://github.com/tonyfischetti/sake/blob/b7ad20fe8e7137db99a20ac06b8da26492601b00/sakelib/acts.py#L626-L675
def visualize(G, settings, filename="dependencies", no_graphviz=False): """ Uses networkX to draw a graphviz dot file either (a) calls the graphviz command "dot" to turn it into a SVG and remove the dotfile (default), or (b) if no_graphviz is True, just output the graphviz dot file Args: a NetworkX DiGraph the settings dictionary a filename (a default is provided a flag indicating whether graphviz should *not* be called Returns: 0 if everything worked will cause fatal error on failure """ error = settings["error"] if no_graphviz: write_dot_file(G, filename) return 0 write_dot_file(G, "tempdot") renderer = "svg" if re.search("\.jpg$", filename, re.IGNORECASE): renderer = "jpg" elif re.search("\.jpeg$", filename, re.IGNORECASE): renderer = "jpg" elif re.search("\.svg$", filename, re.IGNORECASE): renderer = "svg" elif re.search("\.png$", filename, re.IGNORECASE): renderer = "png" elif re.search("\.gif$", filename, re.IGNORECASE): renderer = "gif" elif re.search("\.ps$", filename, re.IGNORECASE): renderer = "ps" elif re.search("\.pdf$", filename, re.IGNORECASE): renderer = "pdf" else: renderer = "svg" filename += ".svg" command = "dot -T{} tempdot -o {}".format(renderer, filename) p = Popen(command, shell=True) p.communicate() if p.returncode: errmes = "Either graphviz is not installed, or its not on PATH" os.remove("tempdot") error(errmes) sys.exit(1) os.remove("tempdot") return 0
[ "def", "visualize", "(", "G", ",", "settings", ",", "filename", "=", "\"dependencies\"", ",", "no_graphviz", "=", "False", ")", ":", "error", "=", "settings", "[", "\"error\"", "]", "if", "no_graphviz", ":", "write_dot_file", "(", "G", ",", "filename", ")", "return", "0", "write_dot_file", "(", "G", ",", "\"tempdot\"", ")", "renderer", "=", "\"svg\"", "if", "re", ".", "search", "(", "\"\\.jpg$\"", ",", "filename", ",", "re", ".", "IGNORECASE", ")", ":", "renderer", "=", "\"jpg\"", "elif", "re", ".", "search", "(", "\"\\.jpeg$\"", ",", "filename", ",", "re", ".", "IGNORECASE", ")", ":", "renderer", "=", "\"jpg\"", "elif", "re", ".", "search", "(", "\"\\.svg$\"", ",", "filename", ",", "re", ".", "IGNORECASE", ")", ":", "renderer", "=", "\"svg\"", "elif", "re", ".", "search", "(", "\"\\.png$\"", ",", "filename", ",", "re", ".", "IGNORECASE", ")", ":", "renderer", "=", "\"png\"", "elif", "re", ".", "search", "(", "\"\\.gif$\"", ",", "filename", ",", "re", ".", "IGNORECASE", ")", ":", "renderer", "=", "\"gif\"", "elif", "re", ".", "search", "(", "\"\\.ps$\"", ",", "filename", ",", "re", ".", "IGNORECASE", ")", ":", "renderer", "=", "\"ps\"", "elif", "re", ".", "search", "(", "\"\\.pdf$\"", ",", "filename", ",", "re", ".", "IGNORECASE", ")", ":", "renderer", "=", "\"pdf\"", "else", ":", "renderer", "=", "\"svg\"", "filename", "+=", "\".svg\"", "command", "=", "\"dot -T{} tempdot -o {}\"", ".", "format", "(", "renderer", ",", "filename", ")", "p", "=", "Popen", "(", "command", ",", "shell", "=", "True", ")", "p", ".", "communicate", "(", ")", "if", "p", ".", "returncode", ":", "errmes", "=", "\"Either graphviz is not installed, or its not on PATH\"", "os", ".", "remove", "(", "\"tempdot\"", ")", "error", "(", "errmes", ")", "sys", ".", "exit", "(", "1", ")", "os", ".", "remove", "(", "\"tempdot\"", ")", "return", "0" ]
Uses networkX to draw a graphviz dot file either (a) calls the graphviz command "dot" to turn it into a SVG and remove the dotfile (default), or (b) if no_graphviz is True, just output the graphviz dot file Args: a NetworkX DiGraph the settings dictionary a filename (a default is provided a flag indicating whether graphviz should *not* be called Returns: 0 if everything worked will cause fatal error on failure
[ "Uses", "networkX", "to", "draw", "a", "graphviz", "dot", "file", "either", "(", "a", ")", "calls", "the", "graphviz", "command", "dot", "to", "turn", "it", "into", "a", "SVG", "and", "remove", "the", "dotfile", "(", "default", ")", "or", "(", "b", ")", "if", "no_graphviz", "is", "True", "just", "output", "the", "graphviz", "dot", "file" ]
python
valid
biocore/burrito-fillings
bfillings/bwa.py
https://github.com/biocore/burrito-fillings/blob/02ab71a46119b40793bd56a4ae00ca15f6dc3329/bfillings/bwa.py#L243-L273
def _get_result_paths(self, data): """Gets the results for a run of bwa index. bwa index outputs 5 files when the index is created. The filename prefix will be the same as the input fasta, unless overridden with the -p option, and the 5 extensions are listed below: .amb .ann .bwt .pac .sa and these extentions (including the period) are the keys to the dictionary that is returned. """ # determine the names of the files. The name will be the same as the # input fasta file unless overridden with the -p option if self.Parameters['-p'].isOn(): prefix = self.Parameters['-p'].Value else: prefix = data['fasta_in'] # the 5 output file suffixes suffixes = ['.amb', '.ann', '.bwt', '.pac', '.sa'] out_files = {} for suffix in suffixes: out_files[suffix] = ResultPath(prefix + suffix, IsWritten=True) return out_files
[ "def", "_get_result_paths", "(", "self", ",", "data", ")", ":", "# determine the names of the files. The name will be the same as the", "# input fasta file unless overridden with the -p option", "if", "self", ".", "Parameters", "[", "'-p'", "]", ".", "isOn", "(", ")", ":", "prefix", "=", "self", ".", "Parameters", "[", "'-p'", "]", ".", "Value", "else", ":", "prefix", "=", "data", "[", "'fasta_in'", "]", "# the 5 output file suffixes", "suffixes", "=", "[", "'.amb'", ",", "'.ann'", ",", "'.bwt'", ",", "'.pac'", ",", "'.sa'", "]", "out_files", "=", "{", "}", "for", "suffix", "in", "suffixes", ":", "out_files", "[", "suffix", "]", "=", "ResultPath", "(", "prefix", "+", "suffix", ",", "IsWritten", "=", "True", ")", "return", "out_files" ]
Gets the results for a run of bwa index. bwa index outputs 5 files when the index is created. The filename prefix will be the same as the input fasta, unless overridden with the -p option, and the 5 extensions are listed below: .amb .ann .bwt .pac .sa and these extentions (including the period) are the keys to the dictionary that is returned.
[ "Gets", "the", "results", "for", "a", "run", "of", "bwa", "index", "." ]
python
train
saltstack/salt
salt/states/kubernetes.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/kubernetes.py#L595-L634
def configmap_absent(name, namespace='default', **kwargs): ''' Ensures that the named configmap is absent from the given namespace. name The name of the configmap namespace The namespace holding the configmap. The 'default' one is going to be used unless a different one is specified. ''' ret = {'name': name, 'changes': {}, 'result': False, 'comment': ''} configmap = __salt__['kubernetes.show_configmap'](name, namespace, **kwargs) if configmap is None: ret['result'] = True if not __opts__['test'] else None ret['comment'] = 'The configmap does not exist' return ret if __opts__['test']: ret['comment'] = 'The configmap is going to be deleted' ret['result'] = None return ret __salt__['kubernetes.delete_configmap'](name, namespace, **kwargs) # As for kubernetes 1.6.4 doesn't set a code when deleting a configmap # The kubernetes module will raise an exception if the kubernetes # server will return an error ret['result'] = True ret['changes'] = { 'kubernetes.configmap': { 'new': 'absent', 'old': 'present'}} ret['comment'] = 'ConfigMap deleted' return ret
[ "def", "configmap_absent", "(", "name", ",", "namespace", "=", "'default'", ",", "*", "*", "kwargs", ")", ":", "ret", "=", "{", "'name'", ":", "name", ",", "'changes'", ":", "{", "}", ",", "'result'", ":", "False", ",", "'comment'", ":", "''", "}", "configmap", "=", "__salt__", "[", "'kubernetes.show_configmap'", "]", "(", "name", ",", "namespace", ",", "*", "*", "kwargs", ")", "if", "configmap", "is", "None", ":", "ret", "[", "'result'", "]", "=", "True", "if", "not", "__opts__", "[", "'test'", "]", "else", "None", "ret", "[", "'comment'", "]", "=", "'The configmap does not exist'", "return", "ret", "if", "__opts__", "[", "'test'", "]", ":", "ret", "[", "'comment'", "]", "=", "'The configmap is going to be deleted'", "ret", "[", "'result'", "]", "=", "None", "return", "ret", "__salt__", "[", "'kubernetes.delete_configmap'", "]", "(", "name", ",", "namespace", ",", "*", "*", "kwargs", ")", "# As for kubernetes 1.6.4 doesn't set a code when deleting a configmap", "# The kubernetes module will raise an exception if the kubernetes", "# server will return an error", "ret", "[", "'result'", "]", "=", "True", "ret", "[", "'changes'", "]", "=", "{", "'kubernetes.configmap'", ":", "{", "'new'", ":", "'absent'", ",", "'old'", ":", "'present'", "}", "}", "ret", "[", "'comment'", "]", "=", "'ConfigMap deleted'", "return", "ret" ]
Ensures that the named configmap is absent from the given namespace. name The name of the configmap namespace The namespace holding the configmap. The 'default' one is going to be used unless a different one is specified.
[ "Ensures", "that", "the", "named", "configmap", "is", "absent", "from", "the", "given", "namespace", "." ]
python
train
DerMitch/fritzbox-smarthome
fritzhome/fritz.py
https://github.com/DerMitch/fritzbox-smarthome/blob/84cbd7c1b33e6256add041b0395ff5fccc01f103/fritzhome/fritz.py#L213-L268
def get_consumption(self, deviceid, timerange="10"): """ Return all available energy consumption data for the device. You need to divice watt_values by 100 and volt_values by 1000 to get the "real" values. :return: dict """ tranges = ("10", "24h", "month", "year") if timerange not in tranges: raise ValueError( "Unknown timerange. Possible values are: {0}".format(tranges) ) url = self.base_url + "/net/home_auto_query.lua" response = self.session.get(url, params={ 'sid': self.sid, 'command': 'EnergyStats_{0}'.format(timerange), 'id': deviceid, 'xhr': 0, }, timeout=15) response.raise_for_status() data = response.json() result = {} # Single result values values_map = { 'MM_Value_Amp': 'mm_value_amp', 'MM_Value_Power': 'mm_value_power', 'MM_Value_Volt': 'mm_value_volt', 'EnStats_average_value': 'enstats_average_value', 'EnStats_max_value': 'enstats_max_value', 'EnStats_min_value': 'enstats_min_value', 'EnStats_timer_type': 'enstats_timer_type', 'sum_Day': 'sum_day', 'sum_Month': 'sum_month', 'sum_Year': 'sum_year', } for avm_key, py_key in values_map.items(): result[py_key] = int(data[avm_key]) # Stats counts count = int(data["EnStats_count"]) watt_values = [None for i in range(count)] volt_values = [None for i in range(count)] for i in range(1, count + 1): watt_values[i - 1] = int(data["EnStats_watt_value_{}".format(i)]) volt_values[i - 1] = int(data["EnStats_volt_value_{}".format(i)]) result['watt_values'] = watt_values result['volt_values'] = volt_values return result
[ "def", "get_consumption", "(", "self", ",", "deviceid", ",", "timerange", "=", "\"10\"", ")", ":", "tranges", "=", "(", "\"10\"", ",", "\"24h\"", ",", "\"month\"", ",", "\"year\"", ")", "if", "timerange", "not", "in", "tranges", ":", "raise", "ValueError", "(", "\"Unknown timerange. Possible values are: {0}\"", ".", "format", "(", "tranges", ")", ")", "url", "=", "self", ".", "base_url", "+", "\"/net/home_auto_query.lua\"", "response", "=", "self", ".", "session", ".", "get", "(", "url", ",", "params", "=", "{", "'sid'", ":", "self", ".", "sid", ",", "'command'", ":", "'EnergyStats_{0}'", ".", "format", "(", "timerange", ")", ",", "'id'", ":", "deviceid", ",", "'xhr'", ":", "0", ",", "}", ",", "timeout", "=", "15", ")", "response", ".", "raise_for_status", "(", ")", "data", "=", "response", ".", "json", "(", ")", "result", "=", "{", "}", "# Single result values", "values_map", "=", "{", "'MM_Value_Amp'", ":", "'mm_value_amp'", ",", "'MM_Value_Power'", ":", "'mm_value_power'", ",", "'MM_Value_Volt'", ":", "'mm_value_volt'", ",", "'EnStats_average_value'", ":", "'enstats_average_value'", ",", "'EnStats_max_value'", ":", "'enstats_max_value'", ",", "'EnStats_min_value'", ":", "'enstats_min_value'", ",", "'EnStats_timer_type'", ":", "'enstats_timer_type'", ",", "'sum_Day'", ":", "'sum_day'", ",", "'sum_Month'", ":", "'sum_month'", ",", "'sum_Year'", ":", "'sum_year'", ",", "}", "for", "avm_key", ",", "py_key", "in", "values_map", ".", "items", "(", ")", ":", "result", "[", "py_key", "]", "=", "int", "(", "data", "[", "avm_key", "]", ")", "# Stats counts", "count", "=", "int", "(", "data", "[", "\"EnStats_count\"", "]", ")", "watt_values", "=", "[", "None", "for", "i", "in", "range", "(", "count", ")", "]", "volt_values", "=", "[", "None", "for", "i", "in", "range", "(", "count", ")", "]", "for", "i", "in", "range", "(", "1", ",", "count", "+", "1", ")", ":", "watt_values", "[", "i", "-", "1", "]", "=", "int", "(", "data", "[", "\"EnStats_watt_value_{}\"", ".", "format", "(", "i", ")", "]", ")", "volt_values", "[", "i", "-", "1", "]", "=", "int", "(", "data", "[", "\"EnStats_volt_value_{}\"", ".", "format", "(", "i", ")", "]", ")", "result", "[", "'watt_values'", "]", "=", "watt_values", "result", "[", "'volt_values'", "]", "=", "volt_values", "return", "result" ]
Return all available energy consumption data for the device. You need to divice watt_values by 100 and volt_values by 1000 to get the "real" values. :return: dict
[ "Return", "all", "available", "energy", "consumption", "data", "for", "the", "device", ".", "You", "need", "to", "divice", "watt_values", "by", "100", "and", "volt_values", "by", "1000", "to", "get", "the", "real", "values", "." ]
python
train
hanguokai/youku
youku/youku_videos.py
https://github.com/hanguokai/youku/blob/b2df060c7dccfad990bcfa289fff68bb77d1e69b/youku/youku_videos.py#L71-L83
def find_video_details_by_ids(self, video_ids, ext=None): """doc: http://open.youku.com/docs/doc?id=47 """ url = 'https://openapi.youku.com/v2/videos/show_batch.json' params = { 'client_id': self.client_id, 'video_ids': video_ids } if ext: params['ext'] = ext r = requests.get(url, params=params) check_error(r) return r.json()
[ "def", "find_video_details_by_ids", "(", "self", ",", "video_ids", ",", "ext", "=", "None", ")", ":", "url", "=", "'https://openapi.youku.com/v2/videos/show_batch.json'", "params", "=", "{", "'client_id'", ":", "self", ".", "client_id", ",", "'video_ids'", ":", "video_ids", "}", "if", "ext", ":", "params", "[", "'ext'", "]", "=", "ext", "r", "=", "requests", ".", "get", "(", "url", ",", "params", "=", "params", ")", "check_error", "(", "r", ")", "return", "r", ".", "json", "(", ")" ]
doc: http://open.youku.com/docs/doc?id=47
[ "doc", ":", "http", ":", "//", "open", ".", "youku", ".", "com", "/", "docs", "/", "doc?id", "=", "47" ]
python
train
vanheeringen-lab/gimmemotifs
gimmemotifs/comparison.py
https://github.com/vanheeringen-lab/gimmemotifs/blob/1dc0572179e5d0c8f96958060133c1f8d92c6675/gimmemotifs/comparison.py#L662-L711
def get_closest_match(self, motifs, dbmotifs=None, match="partial", metric="wic",combine="mean", parallel=True, ncpus=None): """Return best match in database for motifs. Parameters ---------- motifs : list or str Filename of motifs or list of motifs. dbmotifs : list or str, optional Database motifs, default will be used if not specified. match : str, optional metric : str, optional combine : str, optional ncpus : int, optional Number of threads to use. Returns ------- closest_match : dict """ if dbmotifs is None: pwm = self.config.get_default_params()["motif_db"] pwmdir = self.config.get_motif_dir() dbmotifs = os.path.join(pwmdir, pwm) motifs = parse_motifs(motifs) dbmotifs = parse_motifs(dbmotifs) dbmotif_lookup = dict([(m.id, m) for m in dbmotifs]) scores = self.get_all_scores(motifs, dbmotifs, match, metric, combine, parallel=parallel, ncpus=ncpus) for motif in scores: scores[motif] = sorted( scores[motif].items(), key=lambda x:x[1][0] )[-1] for motif in motifs: dbmotif, score = scores[motif.id] pval, pos, orient = self.compare_motifs( motif, dbmotif_lookup[dbmotif], match, metric, combine, True) scores[motif.id] = [dbmotif, (list(score) + [pval])] return scores
[ "def", "get_closest_match", "(", "self", ",", "motifs", ",", "dbmotifs", "=", "None", ",", "match", "=", "\"partial\"", ",", "metric", "=", "\"wic\"", ",", "combine", "=", "\"mean\"", ",", "parallel", "=", "True", ",", "ncpus", "=", "None", ")", ":", "if", "dbmotifs", "is", "None", ":", "pwm", "=", "self", ".", "config", ".", "get_default_params", "(", ")", "[", "\"motif_db\"", "]", "pwmdir", "=", "self", ".", "config", ".", "get_motif_dir", "(", ")", "dbmotifs", "=", "os", ".", "path", ".", "join", "(", "pwmdir", ",", "pwm", ")", "motifs", "=", "parse_motifs", "(", "motifs", ")", "dbmotifs", "=", "parse_motifs", "(", "dbmotifs", ")", "dbmotif_lookup", "=", "dict", "(", "[", "(", "m", ".", "id", ",", "m", ")", "for", "m", "in", "dbmotifs", "]", ")", "scores", "=", "self", ".", "get_all_scores", "(", "motifs", ",", "dbmotifs", ",", "match", ",", "metric", ",", "combine", ",", "parallel", "=", "parallel", ",", "ncpus", "=", "ncpus", ")", "for", "motif", "in", "scores", ":", "scores", "[", "motif", "]", "=", "sorted", "(", "scores", "[", "motif", "]", ".", "items", "(", ")", ",", "key", "=", "lambda", "x", ":", "x", "[", "1", "]", "[", "0", "]", ")", "[", "-", "1", "]", "for", "motif", "in", "motifs", ":", "dbmotif", ",", "score", "=", "scores", "[", "motif", ".", "id", "]", "pval", ",", "pos", ",", "orient", "=", "self", ".", "compare_motifs", "(", "motif", ",", "dbmotif_lookup", "[", "dbmotif", "]", ",", "match", ",", "metric", ",", "combine", ",", "True", ")", "scores", "[", "motif", ".", "id", "]", "=", "[", "dbmotif", ",", "(", "list", "(", "score", ")", "+", "[", "pval", "]", ")", "]", "return", "scores" ]
Return best match in database for motifs. Parameters ---------- motifs : list or str Filename of motifs or list of motifs. dbmotifs : list or str, optional Database motifs, default will be used if not specified. match : str, optional metric : str, optional combine : str, optional ncpus : int, optional Number of threads to use. Returns ------- closest_match : dict
[ "Return", "best", "match", "in", "database", "for", "motifs", "." ]
python
train
BernardFW/bernard
src/bernard/i18n/translator.py
https://github.com/BernardFW/bernard/blob/9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab/src/bernard/i18n/translator.py#L607-L626
async def render( text: TransText, request: Optional['Request'], multi_line=False) -> Union[Text, List[Text]]: """ Render either a normal string either a string to translate into an actual string for the specified request. """ if isinstance(text, str): out = [text] elif isinstance(text, StringToTranslate): out = await text.render_list(request) else: raise TypeError('Provided text cannot be rendered') if multi_line: return out else: return ' '.join(out)
[ "async", "def", "render", "(", "text", ":", "TransText", ",", "request", ":", "Optional", "[", "'Request'", "]", ",", "multi_line", "=", "False", ")", "->", "Union", "[", "Text", ",", "List", "[", "Text", "]", "]", ":", "if", "isinstance", "(", "text", ",", "str", ")", ":", "out", "=", "[", "text", "]", "elif", "isinstance", "(", "text", ",", "StringToTranslate", ")", ":", "out", "=", "await", "text", ".", "render_list", "(", "request", ")", "else", ":", "raise", "TypeError", "(", "'Provided text cannot be rendered'", ")", "if", "multi_line", ":", "return", "out", "else", ":", "return", "' '", ".", "join", "(", "out", ")" ]
Render either a normal string either a string to translate into an actual string for the specified request.
[ "Render", "either", "a", "normal", "string", "either", "a", "string", "to", "translate", "into", "an", "actual", "string", "for", "the", "specified", "request", "." ]
python
train
cloudera/cm_api
python/src/cm_api/endpoints/role_config_groups.py
https://github.com/cloudera/cm_api/blob/5d2512375bd94684b4da36df9e0d9177865ffcbb/python/src/cm_api/endpoints/role_config_groups.py#L229-L241
def move_roles(self, roles): """ Moves roles to this role config group. The roles can be moved from any role config group belonging to the same service. The role type of the destination group must match the role type of the roles. @param roles: The names of the roles to move. @return: List of roles which have been moved successfully. """ return move_roles(self._get_resource_root(), self.serviceRef.serviceName, self.name, roles, self.serviceRef.clusterName)
[ "def", "move_roles", "(", "self", ",", "roles", ")", ":", "return", "move_roles", "(", "self", ".", "_get_resource_root", "(", ")", ",", "self", ".", "serviceRef", ".", "serviceName", ",", "self", ".", "name", ",", "roles", ",", "self", ".", "serviceRef", ".", "clusterName", ")" ]
Moves roles to this role config group. The roles can be moved from any role config group belonging to the same service. The role type of the destination group must match the role type of the roles. @param roles: The names of the roles to move. @return: List of roles which have been moved successfully.
[ "Moves", "roles", "to", "this", "role", "config", "group", "." ]
python
train
pypa/pipenv
pipenv/vendor/dotenv/__init__.py
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/dotenv/__init__.py#L9-L30
def get_cli_string(path=None, action=None, key=None, value=None, quote=None): """Returns a string suitable for running as a shell script. Useful for converting a arguments passed to a fabric task to be passed to a `local` or `run` command. """ command = ['dotenv'] if quote: command.append('-q %s' % quote) if path: command.append('-f %s' % path) if action: command.append(action) if key: command.append(key) if value: if ' ' in value: command.append('"%s"' % value) else: command.append(value) return ' '.join(command).strip()
[ "def", "get_cli_string", "(", "path", "=", "None", ",", "action", "=", "None", ",", "key", "=", "None", ",", "value", "=", "None", ",", "quote", "=", "None", ")", ":", "command", "=", "[", "'dotenv'", "]", "if", "quote", ":", "command", ".", "append", "(", "'-q %s'", "%", "quote", ")", "if", "path", ":", "command", ".", "append", "(", "'-f %s'", "%", "path", ")", "if", "action", ":", "command", ".", "append", "(", "action", ")", "if", "key", ":", "command", ".", "append", "(", "key", ")", "if", "value", ":", "if", "' '", "in", "value", ":", "command", ".", "append", "(", "'\"%s\"'", "%", "value", ")", "else", ":", "command", ".", "append", "(", "value", ")", "return", "' '", ".", "join", "(", "command", ")", ".", "strip", "(", ")" ]
Returns a string suitable for running as a shell script. Useful for converting a arguments passed to a fabric task to be passed to a `local` or `run` command.
[ "Returns", "a", "string", "suitable", "for", "running", "as", "a", "shell", "script", "." ]
python
train
Gandi/gandi.cli
gandi/cli/modules/cert.py
https://github.com/Gandi/gandi.cli/blob/6ee5b8fc8ec44b0a6c232043ca610606ad8f693d/gandi/cli/modules/cert.py#L236-L245
def advice_dcv_method(cls, csr, package, altnames, dcv_method, cert_id=None): """ Display dcv_method information. """ params = {'csr': csr, 'package': package, 'dcv_method': dcv_method} if cert_id: params['cert_id'] = cert_id result = cls.call('cert.get_dcv_params', params) if dcv_method == 'dns': cls.echo('You have to add these records in your domain zone :') cls.echo('\n'.join(result['message']))
[ "def", "advice_dcv_method", "(", "cls", ",", "csr", ",", "package", ",", "altnames", ",", "dcv_method", ",", "cert_id", "=", "None", ")", ":", "params", "=", "{", "'csr'", ":", "csr", ",", "'package'", ":", "package", ",", "'dcv_method'", ":", "dcv_method", "}", "if", "cert_id", ":", "params", "[", "'cert_id'", "]", "=", "cert_id", "result", "=", "cls", ".", "call", "(", "'cert.get_dcv_params'", ",", "params", ")", "if", "dcv_method", "==", "'dns'", ":", "cls", ".", "echo", "(", "'You have to add these records in your domain zone :'", ")", "cls", ".", "echo", "(", "'\\n'", ".", "join", "(", "result", "[", "'message'", "]", ")", ")" ]
Display dcv_method information.
[ "Display", "dcv_method", "information", "." ]
python
train
mitsei/dlkit
dlkit/json_/assessment/sessions.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/assessment/sessions.py#L9199-L9218
def get_child_banks(self, bank_id): """Gets the children of the given bank. arg: bank_id (osid.id.Id): the ``Id`` to query return: (osid.assessment.BankList) - the children of the bank raise: NotFound - ``bank_id`` is not found raise: NullArgument - ``bank_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for # osid.resource.BinHierarchySession.get_child_bins if self._catalog_session is not None: return self._catalog_session.get_child_catalogs(catalog_id=bank_id) return BankLookupSession( self._proxy, self._runtime).get_banks_by_ids( list(self.get_child_bank_ids(bank_id)))
[ "def", "get_child_banks", "(", "self", ",", "bank_id", ")", ":", "# Implemented from template for", "# osid.resource.BinHierarchySession.get_child_bins", "if", "self", ".", "_catalog_session", "is", "not", "None", ":", "return", "self", ".", "_catalog_session", ".", "get_child_catalogs", "(", "catalog_id", "=", "bank_id", ")", "return", "BankLookupSession", "(", "self", ".", "_proxy", ",", "self", ".", "_runtime", ")", ".", "get_banks_by_ids", "(", "list", "(", "self", ".", "get_child_bank_ids", "(", "bank_id", ")", ")", ")" ]
Gets the children of the given bank. arg: bank_id (osid.id.Id): the ``Id`` to query return: (osid.assessment.BankList) - the children of the bank raise: NotFound - ``bank_id`` is not found raise: NullArgument - ``bank_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.*
[ "Gets", "the", "children", "of", "the", "given", "bank", "." ]
python
train
gem/oq-engine
openquake/hazardlib/gsim/lin_lee_2008.py
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/hazardlib/gsim/lin_lee_2008.py#L102-L107
def _compute_mean(self, C, mag, rhypo, hypo_depth, mean, idx): """ Compute mean value according to equations 10 and 11 page 226. """ mean[idx] = (C['C1'] + C['C2'] * mag + C['C3'] * np.log(rhypo[idx] + C['C4'] * np.exp(C['C5'] * mag)) + C['C6'] * hypo_depth)
[ "def", "_compute_mean", "(", "self", ",", "C", ",", "mag", ",", "rhypo", ",", "hypo_depth", ",", "mean", ",", "idx", ")", ":", "mean", "[", "idx", "]", "=", "(", "C", "[", "'C1'", "]", "+", "C", "[", "'C2'", "]", "*", "mag", "+", "C", "[", "'C3'", "]", "*", "np", ".", "log", "(", "rhypo", "[", "idx", "]", "+", "C", "[", "'C4'", "]", "*", "np", ".", "exp", "(", "C", "[", "'C5'", "]", "*", "mag", ")", ")", "+", "C", "[", "'C6'", "]", "*", "hypo_depth", ")" ]
Compute mean value according to equations 10 and 11 page 226.
[ "Compute", "mean", "value", "according", "to", "equations", "10", "and", "11", "page", "226", "." ]
python
train
horazont/aioxmpp
aioxmpp/carbons/service.py
https://github.com/horazont/aioxmpp/blob/22a68e5e1d23f2a4dee470092adbd4672f9ef061/aioxmpp/carbons/service.py#L93-L109
def disable(self): """ Disable message carbons. :raises RuntimeError: if the server does not support message carbons. :raises aioxmpp.XMPPError: if the server responded with an error to the request. :raises: as specified in :meth:`aioxmpp.Client.send` """ yield from self._check_for_feature() iq = aioxmpp.IQ( type_=aioxmpp.IQType.SET, payload=carbons_xso.Disable() ) yield from self.client.send(iq)
[ "def", "disable", "(", "self", ")", ":", "yield", "from", "self", ".", "_check_for_feature", "(", ")", "iq", "=", "aioxmpp", ".", "IQ", "(", "type_", "=", "aioxmpp", ".", "IQType", ".", "SET", ",", "payload", "=", "carbons_xso", ".", "Disable", "(", ")", ")", "yield", "from", "self", ".", "client", ".", "send", "(", "iq", ")" ]
Disable message carbons. :raises RuntimeError: if the server does not support message carbons. :raises aioxmpp.XMPPError: if the server responded with an error to the request. :raises: as specified in :meth:`aioxmpp.Client.send`
[ "Disable", "message", "carbons", "." ]
python
train
onicagroup/runway
runway/commands/runway/gen_sample.py
https://github.com/onicagroup/runway/blob/3f3549ec3bf6e39b9f27d9738a1847f3a4369e7f/runway/commands/runway/gen_sample.py#L107-L151
def generate_sample_cdk_cs_module(env_root, module_dir=None): """Generate skeleton CDK C# sample module.""" if module_dir is None: module_dir = os.path.join(env_root, 'sampleapp.cdk') generate_sample_module(module_dir) for i in ['add-project.hook.d.ts', 'cdk.json', 'package.json', 'runway.module.yml', 'README.md']: shutil.copyfile( os.path.join(ROOT, 'templates', 'cdk-csharp', i), os.path.join(module_dir, i), ) shutil.copyfile( os.path.join(ROOT, 'templates', 'cdk-csharp', 'dot_gitignore'), os.path.join(module_dir, '.gitignore'), ) os.mkdir(os.path.join(module_dir, 'src')) shutil.copyfile( os.path.join(ROOT, 'templates', 'cdk-csharp', 'src', 'HelloCdk.sln'), os.path.join(module_dir, 'src', 'HelloCdk.sln'), ) os.mkdir(os.path.join(module_dir, 'src', 'HelloCdk')) for i in ['HelloCdk.csproj', 'HelloConstruct.cs', 'HelloStack.cs', 'Program.cs']: shutil.copyfile( os.path.join(ROOT, 'templates', 'cdk-csharp', 'src', 'HelloCdk', i), os.path.join(module_dir, 'src', 'HelloCdk', i), ) LOGGER.info("Sample C# CDK module created at %s", module_dir) LOGGER.info('To finish its setup, change to the %s directory and execute ' '"npm install" to generate its lockfile.', module_dir)
[ "def", "generate_sample_cdk_cs_module", "(", "env_root", ",", "module_dir", "=", "None", ")", ":", "if", "module_dir", "is", "None", ":", "module_dir", "=", "os", ".", "path", ".", "join", "(", "env_root", ",", "'sampleapp.cdk'", ")", "generate_sample_module", "(", "module_dir", ")", "for", "i", "in", "[", "'add-project.hook.d.ts'", ",", "'cdk.json'", ",", "'package.json'", ",", "'runway.module.yml'", ",", "'README.md'", "]", ":", "shutil", ".", "copyfile", "(", "os", ".", "path", ".", "join", "(", "ROOT", ",", "'templates'", ",", "'cdk-csharp'", ",", "i", ")", ",", "os", ".", "path", ".", "join", "(", "module_dir", ",", "i", ")", ",", ")", "shutil", ".", "copyfile", "(", "os", ".", "path", ".", "join", "(", "ROOT", ",", "'templates'", ",", "'cdk-csharp'", ",", "'dot_gitignore'", ")", ",", "os", ".", "path", ".", "join", "(", "module_dir", ",", "'.gitignore'", ")", ",", ")", "os", ".", "mkdir", "(", "os", ".", "path", ".", "join", "(", "module_dir", ",", "'src'", ")", ")", "shutil", ".", "copyfile", "(", "os", ".", "path", ".", "join", "(", "ROOT", ",", "'templates'", ",", "'cdk-csharp'", ",", "'src'", ",", "'HelloCdk.sln'", ")", ",", "os", ".", "path", ".", "join", "(", "module_dir", ",", "'src'", ",", "'HelloCdk.sln'", ")", ",", ")", "os", ".", "mkdir", "(", "os", ".", "path", ".", "join", "(", "module_dir", ",", "'src'", ",", "'HelloCdk'", ")", ")", "for", "i", "in", "[", "'HelloCdk.csproj'", ",", "'HelloConstruct.cs'", ",", "'HelloStack.cs'", ",", "'Program.cs'", "]", ":", "shutil", ".", "copyfile", "(", "os", ".", "path", ".", "join", "(", "ROOT", ",", "'templates'", ",", "'cdk-csharp'", ",", "'src'", ",", "'HelloCdk'", ",", "i", ")", ",", "os", ".", "path", ".", "join", "(", "module_dir", ",", "'src'", ",", "'HelloCdk'", ",", "i", ")", ",", ")", "LOGGER", ".", "info", "(", "\"Sample C# CDK module created at %s\"", ",", "module_dir", ")", "LOGGER", ".", "info", "(", "'To finish its setup, change to the %s directory and execute '", "'\"npm install\" to generate its lockfile.'", ",", "module_dir", ")" ]
Generate skeleton CDK C# sample module.
[ "Generate", "skeleton", "CDK", "C#", "sample", "module", "." ]
python
train
adamcharnock/swiftwind
swiftwind/costs/models.py
https://github.com/adamcharnock/swiftwind/blob/72c715800841c3b2feabded3f3b65b76388b4cea/swiftwind/costs/models.py#L146-L150
def get_amount_arrears_balance(self, billing_cycle): """Get the balance of to_account at the end of billing_cycle""" return self.to_account.balance( transaction__date__lt=billing_cycle.date_range.lower, )
[ "def", "get_amount_arrears_balance", "(", "self", ",", "billing_cycle", ")", ":", "return", "self", ".", "to_account", ".", "balance", "(", "transaction__date__lt", "=", "billing_cycle", ".", "date_range", ".", "lower", ",", ")" ]
Get the balance of to_account at the end of billing_cycle
[ "Get", "the", "balance", "of", "to_account", "at", "the", "end", "of", "billing_cycle" ]
python
train
materialsproject/pymatgen
pymatgen/io/abinit/pseudos.py
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/io/abinit/pseudos.py#L1439-L1462
def plot_projectors(self, ax=None, fontsize=12, **kwargs): """ Plot the PAW projectors. Args: ax: matplotlib :class:`Axes` or None if a new figure should be created. Returns: `matplotlib` figure """ ax, fig, plt = get_ax_fig_plt(ax) title = kwargs.pop("title", "Projectors") ax.grid(True) ax.set_xlabel('r [Bohr]') ax.set_ylabel(r"$r\tilde p\, [Bohr]^{-\frac{1}{2}}$") #ax.axvline(x=self.paw_radius, linewidth=2, color='k', linestyle="--") #ax.annotate("$r_c$", xy=(self.paw_radius + 0.1, 0.1)) for state, rfunc in self.projector_functions.items(): ax.plot(rfunc.mesh, rfunc.mesh * rfunc.values, label="TPROJ: " + state) ax.legend(loc="best", shadow=True, fontsize=fontsize) return fig
[ "def", "plot_projectors", "(", "self", ",", "ax", "=", "None", ",", "fontsize", "=", "12", ",", "*", "*", "kwargs", ")", ":", "ax", ",", "fig", ",", "plt", "=", "get_ax_fig_plt", "(", "ax", ")", "title", "=", "kwargs", ".", "pop", "(", "\"title\"", ",", "\"Projectors\"", ")", "ax", ".", "grid", "(", "True", ")", "ax", ".", "set_xlabel", "(", "'r [Bohr]'", ")", "ax", ".", "set_ylabel", "(", "r\"$r\\tilde p\\, [Bohr]^{-\\frac{1}{2}}$\"", ")", "#ax.axvline(x=self.paw_radius, linewidth=2, color='k', linestyle=\"--\")", "#ax.annotate(\"$r_c$\", xy=(self.paw_radius + 0.1, 0.1))", "for", "state", ",", "rfunc", "in", "self", ".", "projector_functions", ".", "items", "(", ")", ":", "ax", ".", "plot", "(", "rfunc", ".", "mesh", ",", "rfunc", ".", "mesh", "*", "rfunc", ".", "values", ",", "label", "=", "\"TPROJ: \"", "+", "state", ")", "ax", ".", "legend", "(", "loc", "=", "\"best\"", ",", "shadow", "=", "True", ",", "fontsize", "=", "fontsize", ")", "return", "fig" ]
Plot the PAW projectors. Args: ax: matplotlib :class:`Axes` or None if a new figure should be created. Returns: `matplotlib` figure
[ "Plot", "the", "PAW", "projectors", "." ]
python
train
ntoll/microfs
microfs.py
https://github.com/ntoll/microfs/blob/11387109cfc36aaddceb018596ea75d55417ca0c/microfs.py#L111-L119
def get_serial(): """ Detect if a micro:bit is connected and return a serial object to talk to it. """ port, serial_number = find_microbit() if port is None: raise IOError('Could not find micro:bit.') return Serial(port, SERIAL_BAUD_RATE, timeout=1, parity='N')
[ "def", "get_serial", "(", ")", ":", "port", ",", "serial_number", "=", "find_microbit", "(", ")", "if", "port", "is", "None", ":", "raise", "IOError", "(", "'Could not find micro:bit.'", ")", "return", "Serial", "(", "port", ",", "SERIAL_BAUD_RATE", ",", "timeout", "=", "1", ",", "parity", "=", "'N'", ")" ]
Detect if a micro:bit is connected and return a serial object to talk to it.
[ "Detect", "if", "a", "micro", ":", "bit", "is", "connected", "and", "return", "a", "serial", "object", "to", "talk", "to", "it", "." ]
python
train
HPENetworking/PYHPEIMC
build/lib/pyhpimc/plat/operator.py
https://github.com/HPENetworking/PYHPEIMC/blob/4fba31827573587e03a6233c7db60f188038c8e5/build/lib/pyhpimc/plat/operator.py#L115-L129
def get_plat_operator(auth, url,headers=HEADERS): ''' Funtion takes no inputs and returns a list of dictionaties of all of the operators currently configured on the HPE IMC system :return: list of dictionaries ''' get_operator_url = '/imcrs/plat/operator?start=0&size=1000&orderBy=id&desc=false&total=false' f_url = url + get_operator_url try: r = requests.get(f_url, auth=auth, headers=headers) plat_oper_list = json.loads(r.text) return plat_oper_list['operator'] except requests.exceptions.RequestException as e: print ("Error:\n" + str(e) + ' get_plat_operator: An Error has occured') return "Error:\n" + str(e) + ' get_plat_operator: An Error has occured'
[ "def", "get_plat_operator", "(", "auth", ",", "url", ",", "headers", "=", "HEADERS", ")", ":", "get_operator_url", "=", "'/imcrs/plat/operator?start=0&size=1000&orderBy=id&desc=false&total=false'", "f_url", "=", "url", "+", "get_operator_url", "try", ":", "r", "=", "requests", ".", "get", "(", "f_url", ",", "auth", "=", "auth", ",", "headers", "=", "headers", ")", "plat_oper_list", "=", "json", ".", "loads", "(", "r", ".", "text", ")", "return", "plat_oper_list", "[", "'operator'", "]", "except", "requests", ".", "exceptions", ".", "RequestException", "as", "e", ":", "print", "(", "\"Error:\\n\"", "+", "str", "(", "e", ")", "+", "' get_plat_operator: An Error has occured'", ")", "return", "\"Error:\\n\"", "+", "str", "(", "e", ")", "+", "' get_plat_operator: An Error has occured'" ]
Funtion takes no inputs and returns a list of dictionaties of all of the operators currently configured on the HPE IMC system :return: list of dictionaries
[ "Funtion", "takes", "no", "inputs", "and", "returns", "a", "list", "of", "dictionaties", "of", "all", "of", "the", "operators", "currently", "configured", "on", "the", "HPE", "IMC", "system", ":", "return", ":", "list", "of", "dictionaries" ]
python
train
JasonKessler/scattertext
scattertext/ScatterChart.py
https://github.com/JasonKessler/scattertext/blob/cacf1f687d218ee8cae3fc05cc901db824bb1b81/scattertext/ScatterChart.py#L106-L134
def inject_metadata_descriptions(self, term_dict): ''' Inserts a set of descriptions of meta data terms. These will be displayed below the scatter plot when a meta data term is clicked. All keys in the term dict must occur as meta data. Parameters ---------- term_dict: dict {metadataname: str: 'explanation to insert', ...} Returns ------- self: ScatterChart ''' assert type(term_dict) == dict if not self.term_doc_matrix.metadata_in_use(): raise TermDocMatrixHasNoMetadataException("No metadata is present in the term document matrix") # This doesn't seem necessary. If a definition's not in the corpus, it just won't be shown. # if set(term_dict.keys()) - set(self.term_doc_matrix.get_metadata()) != set(): # raise Exception('The following meta data terms are not present: ' # + ', '.join(list(set(term_dict.keys()) - set(self.term_doc_matrix.get_metadata())))) if sys.version_info[0] == 2: assert set([type(v) for v in term_dict.values()]) - set([str, unicode]) == set() else: assert set([type(v) for v in term_dict.values()]) - set([str]) == set() self.metadata_descriptions = term_dict return self
[ "def", "inject_metadata_descriptions", "(", "self", ",", "term_dict", ")", ":", "assert", "type", "(", "term_dict", ")", "==", "dict", "if", "not", "self", ".", "term_doc_matrix", ".", "metadata_in_use", "(", ")", ":", "raise", "TermDocMatrixHasNoMetadataException", "(", "\"No metadata is present in the term document matrix\"", ")", "# This doesn't seem necessary. If a definition's not in the corpus, it just won't be shown.", "# if set(term_dict.keys()) - set(self.term_doc_matrix.get_metadata()) != set():", "# raise Exception('The following meta data terms are not present: '", "# + ', '.join(list(set(term_dict.keys()) - set(self.term_doc_matrix.get_metadata()))))", "if", "sys", ".", "version_info", "[", "0", "]", "==", "2", ":", "assert", "set", "(", "[", "type", "(", "v", ")", "for", "v", "in", "term_dict", ".", "values", "(", ")", "]", ")", "-", "set", "(", "[", "str", ",", "unicode", "]", ")", "==", "set", "(", ")", "else", ":", "assert", "set", "(", "[", "type", "(", "v", ")", "for", "v", "in", "term_dict", ".", "values", "(", ")", "]", ")", "-", "set", "(", "[", "str", "]", ")", "==", "set", "(", ")", "self", ".", "metadata_descriptions", "=", "term_dict", "return", "self" ]
Inserts a set of descriptions of meta data terms. These will be displayed below the scatter plot when a meta data term is clicked. All keys in the term dict must occur as meta data. Parameters ---------- term_dict: dict {metadataname: str: 'explanation to insert', ...} Returns ------- self: ScatterChart
[ "Inserts", "a", "set", "of", "descriptions", "of", "meta", "data", "terms", ".", "These", "will", "be", "displayed", "below", "the", "scatter", "plot", "when", "a", "meta", "data", "term", "is", "clicked", ".", "All", "keys", "in", "the", "term", "dict", "must", "occur", "as", "meta", "data", "." ]
python
train
ihucos/plash
opt/plash/lib/py/plash/macros/common.py
https://github.com/ihucos/plash/blob/2ab2bc956e309d5aa6414c80983bfbf29b0ce572/opt/plash/lib/py/plash/macros/common.py#L78-L92
def eval_file(file): 'evaluate file content as expressions' fname = os.path.realpath(os.path.expanduser(file)) with open(fname) as f: inscript = f.read() sh = run_write_read(['plash', 'eval'], inscript.encode()).decode() # we remove an possibly existing newline # because else this macros would add one if sh.endswith('\n'): return sh[:-1] return sh
[ "def", "eval_file", "(", "file", ")", ":", "fname", "=", "os", ".", "path", ".", "realpath", "(", "os", ".", "path", ".", "expanduser", "(", "file", ")", ")", "with", "open", "(", "fname", ")", "as", "f", ":", "inscript", "=", "f", ".", "read", "(", ")", "sh", "=", "run_write_read", "(", "[", "'plash'", ",", "'eval'", "]", ",", "inscript", ".", "encode", "(", ")", ")", ".", "decode", "(", ")", "# we remove an possibly existing newline", "# because else this macros would add one", "if", "sh", ".", "endswith", "(", "'\\n'", ")", ":", "return", "sh", "[", ":", "-", "1", "]", "return", "sh" ]
evaluate file content as expressions
[ "evaluate", "file", "content", "as", "expressions" ]
python
train
shidenggui/easyquotation
easyquotation/jsl.py
https://github.com/shidenggui/easyquotation/blob/a75820db4f05f5386e1c1024d05b0bfc1de6cbda/easyquotation/jsl.py#L209-L221
def fundm(self): """以字典形式返回分级母基数据 """ # 添加当前的ctime self.__fundm_url = self.__fundm_url.format(ctime=int(time.time())) # 请求数据 rep = requests.get(self.__fundm_url) # 获取返回的json字符串 fundmjson = json.loads(rep.text) # 格式化返回的json字符串 data = self.formatfundajson(fundmjson) self.__fundm = data return self.__fundm
[ "def", "fundm", "(", "self", ")", ":", "# 添加当前的ctime", "self", ".", "__fundm_url", "=", "self", ".", "__fundm_url", ".", "format", "(", "ctime", "=", "int", "(", "time", ".", "time", "(", ")", ")", ")", "# 请求数据", "rep", "=", "requests", ".", "get", "(", "self", ".", "__fundm_url", ")", "# 获取返回的json字符串", "fundmjson", "=", "json", ".", "loads", "(", "rep", ".", "text", ")", "# 格式化返回的json字符串", "data", "=", "self", ".", "formatfundajson", "(", "fundmjson", ")", "self", ".", "__fundm", "=", "data", "return", "self", ".", "__fundm" ]
以字典形式返回分级母基数据
[ "以字典形式返回分级母基数据" ]
python
train
astropy/photutils
photutils/background/background_2d.py
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/background/background_2d.py#L501-L535
def _make_2d_array(self, data): """ Convert a 1D array of mesh values to a masked 2D mesh array given the 1D mesh indices ``mesh_idx``. Parameters ---------- data : 1D `~numpy.ndarray` A 1D array of mesh values. Returns ------- result : 2D `~numpy.ma.MaskedArray` A 2D masked array. Pixels not defined in ``mesh_idx`` are masked. """ if data.shape != self.mesh_idx.shape: raise ValueError('data and mesh_idx must have the same shape') if np.ma.is_masked(data): raise ValueError('data must not be a masked array') data2d = np.zeros(self._mesh_shape).astype(data.dtype) data2d[self.mesh_yidx, self.mesh_xidx] = data if len(self.mesh_idx) == self.nboxes: # no meshes were masked return data2d else: # some meshes were masked mask2d = np.ones(data2d.shape).astype(np.bool) mask2d[self.mesh_yidx, self.mesh_xidx] = False return np.ma.masked_array(data2d, mask=mask2d)
[ "def", "_make_2d_array", "(", "self", ",", "data", ")", ":", "if", "data", ".", "shape", "!=", "self", ".", "mesh_idx", ".", "shape", ":", "raise", "ValueError", "(", "'data and mesh_idx must have the same shape'", ")", "if", "np", ".", "ma", ".", "is_masked", "(", "data", ")", ":", "raise", "ValueError", "(", "'data must not be a masked array'", ")", "data2d", "=", "np", ".", "zeros", "(", "self", ".", "_mesh_shape", ")", ".", "astype", "(", "data", ".", "dtype", ")", "data2d", "[", "self", ".", "mesh_yidx", ",", "self", ".", "mesh_xidx", "]", "=", "data", "if", "len", "(", "self", ".", "mesh_idx", ")", "==", "self", ".", "nboxes", ":", "# no meshes were masked", "return", "data2d", "else", ":", "# some meshes were masked", "mask2d", "=", "np", ".", "ones", "(", "data2d", ".", "shape", ")", ".", "astype", "(", "np", ".", "bool", ")", "mask2d", "[", "self", ".", "mesh_yidx", ",", "self", ".", "mesh_xidx", "]", "=", "False", "return", "np", ".", "ma", ".", "masked_array", "(", "data2d", ",", "mask", "=", "mask2d", ")" ]
Convert a 1D array of mesh values to a masked 2D mesh array given the 1D mesh indices ``mesh_idx``. Parameters ---------- data : 1D `~numpy.ndarray` A 1D array of mesh values. Returns ------- result : 2D `~numpy.ma.MaskedArray` A 2D masked array. Pixels not defined in ``mesh_idx`` are masked.
[ "Convert", "a", "1D", "array", "of", "mesh", "values", "to", "a", "masked", "2D", "mesh", "array", "given", "the", "1D", "mesh", "indices", "mesh_idx", "." ]
python
train
zeaphoo/budoc
budoc/pydoc.py
https://github.com/zeaphoo/budoc/blob/28f3aea4ad72ac90605ced012ed20e61af90c23a/budoc/pydoc.py#L653-L661
def instance_variables(self): """ Returns all instance variables in the class, sorted alphabetically as a list of `pydoc.Variable`. Instance variables are attributes of `self` defined in a class's `__init__` method. """ p = lambda o: isinstance(o, Variable) and self.module._docfilter(o) return filter(p, self.doc_init.values())
[ "def", "instance_variables", "(", "self", ")", ":", "p", "=", "lambda", "o", ":", "isinstance", "(", "o", ",", "Variable", ")", "and", "self", ".", "module", ".", "_docfilter", "(", "o", ")", "return", "filter", "(", "p", ",", "self", ".", "doc_init", ".", "values", "(", ")", ")" ]
Returns all instance variables in the class, sorted alphabetically as a list of `pydoc.Variable`. Instance variables are attributes of `self` defined in a class's `__init__` method.
[ "Returns", "all", "instance", "variables", "in", "the", "class", "sorted", "alphabetically", "as", "a", "list", "of", "pydoc", ".", "Variable", ".", "Instance", "variables", "are", "attributes", "of", "self", "defined", "in", "a", "class", "s", "__init__", "method", "." ]
python
train
PMEAL/OpenPNM
openpnm/utils/Project.py
https://github.com/PMEAL/OpenPNM/blob/0547b5724ffedc0a593aae48639d36fe10e0baed/openpnm/utils/Project.py#L76-L94
def extend(self, obj): r""" This function is used to add objects to the project. Arguments can be single OpenPNM objects, an OpenPNM project list, or a plain list of OpenPNM objects. """ if type(obj) is not list: obj = [obj] for item in obj: if hasattr(item, '_mro'): if 'GenericNetwork' in item._mro(): if self.network: raise Exception('Project already has a network') # Must use append since extend breaks the dicts up into # separate objects, while append keeps it as a single object. super().append(item) else: raise Exception('Only OpenPNM objects can be added')
[ "def", "extend", "(", "self", ",", "obj", ")", ":", "if", "type", "(", "obj", ")", "is", "not", "list", ":", "obj", "=", "[", "obj", "]", "for", "item", "in", "obj", ":", "if", "hasattr", "(", "item", ",", "'_mro'", ")", ":", "if", "'GenericNetwork'", "in", "item", ".", "_mro", "(", ")", ":", "if", "self", ".", "network", ":", "raise", "Exception", "(", "'Project already has a network'", ")", "# Must use append since extend breaks the dicts up into", "# separate objects, while append keeps it as a single object.", "super", "(", ")", ".", "append", "(", "item", ")", "else", ":", "raise", "Exception", "(", "'Only OpenPNM objects can be added'", ")" ]
r""" This function is used to add objects to the project. Arguments can be single OpenPNM objects, an OpenPNM project list, or a plain list of OpenPNM objects.
[ "r", "This", "function", "is", "used", "to", "add", "objects", "to", "the", "project", ".", "Arguments", "can", "be", "single", "OpenPNM", "objects", "an", "OpenPNM", "project", "list", "or", "a", "plain", "list", "of", "OpenPNM", "objects", "." ]
python
train
thomwiggers/httpserver
httpserver/httpserver.py
https://github.com/thomwiggers/httpserver/blob/88a3a35619ce5185347c6764f211878e898e6aad/httpserver/httpserver.py#L173-L184
def _get_request_uri(self, request): """Parse the request URI into something useful Server MUST accept full URIs (5.1.2)""" request_uri = request['target'] if request_uri.startswith('/'): # eg. GET /index.html return (request.get('Host', 'localhost').split(':')[0], request_uri[1:]) elif '://' in request_uri: # eg. GET http://rded.nl locator = request_uri.split('://', 1)[1] host, path = locator.split('/', 1) return (host.split(':')[0], path)
[ "def", "_get_request_uri", "(", "self", ",", "request", ")", ":", "request_uri", "=", "request", "[", "'target'", "]", "if", "request_uri", ".", "startswith", "(", "'/'", ")", ":", "# eg. GET /index.html", "return", "(", "request", ".", "get", "(", "'Host'", ",", "'localhost'", ")", ".", "split", "(", "':'", ")", "[", "0", "]", ",", "request_uri", "[", "1", ":", "]", ")", "elif", "'://'", "in", "request_uri", ":", "# eg. GET http://rded.nl", "locator", "=", "request_uri", ".", "split", "(", "'://'", ",", "1", ")", "[", "1", "]", "host", ",", "path", "=", "locator", ".", "split", "(", "'/'", ",", "1", ")", "return", "(", "host", ".", "split", "(", "':'", ")", "[", "0", "]", ",", "path", ")" ]
Parse the request URI into something useful Server MUST accept full URIs (5.1.2)
[ "Parse", "the", "request", "URI", "into", "something", "useful" ]
python
train
google/grr
grr/server/grr_response_server/artifact.py
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/server/grr_response_server/artifact.py#L61-L111
def GetArtifactKnowledgeBase(client_obj, allow_uninitialized=False): """This generates an artifact knowledge base from a GRR client. Args: client_obj: A GRRClient object which is opened for reading. allow_uninitialized: If True we accept an uninitialized knowledge_base. Returns: A KnowledgeBase semantic value. Raises: ArtifactProcessingError: If called when the knowledge base has not been initialized. KnowledgeBaseUninitializedError: If we failed to initialize the knowledge base. This is needed so that the artifact library has a standardized interface to the data that is actually stored in the GRRClient object in the GRR datastore. We expect that the client KNOWLEDGE_BASE is already filled out through the, KnowledgeBaseInitialization flow, but attempt to make some intelligent guesses if things failed. """ client_schema = client_obj.Schema kb = client_obj.Get(client_schema.KNOWLEDGE_BASE) if not allow_uninitialized: if not kb: raise artifact_utils.KnowledgeBaseUninitializedError( "KnowledgeBase empty for %s." % client_obj.urn) if not kb.os: raise artifact_utils.KnowledgeBaseAttributesMissingError( "KnowledgeBase missing OS for %s. Knowledgebase content: %s" % (client_obj.urn, kb)) if not kb: kb = client_schema.KNOWLEDGE_BASE() SetCoreGRRKnowledgeBaseValues(kb, client_obj) if kb.os == "Windows": # Add fallback values. if not kb.environ_allusersappdata and kb.environ_allusersprofile: # Guess if we don't have it already. if kb.os_major_version >= 6: kb.environ_allusersappdata = u"c:\\programdata" kb.environ_allusersprofile = u"c:\\programdata" else: kb.environ_allusersappdata = (u"c:\\documents and settings\\All Users\\" "Application Data") kb.environ_allusersprofile = u"c:\\documents and settings\\All Users" return kb
[ "def", "GetArtifactKnowledgeBase", "(", "client_obj", ",", "allow_uninitialized", "=", "False", ")", ":", "client_schema", "=", "client_obj", ".", "Schema", "kb", "=", "client_obj", ".", "Get", "(", "client_schema", ".", "KNOWLEDGE_BASE", ")", "if", "not", "allow_uninitialized", ":", "if", "not", "kb", ":", "raise", "artifact_utils", ".", "KnowledgeBaseUninitializedError", "(", "\"KnowledgeBase empty for %s.\"", "%", "client_obj", ".", "urn", ")", "if", "not", "kb", ".", "os", ":", "raise", "artifact_utils", ".", "KnowledgeBaseAttributesMissingError", "(", "\"KnowledgeBase missing OS for %s. Knowledgebase content: %s\"", "%", "(", "client_obj", ".", "urn", ",", "kb", ")", ")", "if", "not", "kb", ":", "kb", "=", "client_schema", ".", "KNOWLEDGE_BASE", "(", ")", "SetCoreGRRKnowledgeBaseValues", "(", "kb", ",", "client_obj", ")", "if", "kb", ".", "os", "==", "\"Windows\"", ":", "# Add fallback values.", "if", "not", "kb", ".", "environ_allusersappdata", "and", "kb", ".", "environ_allusersprofile", ":", "# Guess if we don't have it already.", "if", "kb", ".", "os_major_version", ">=", "6", ":", "kb", ".", "environ_allusersappdata", "=", "u\"c:\\\\programdata\"", "kb", ".", "environ_allusersprofile", "=", "u\"c:\\\\programdata\"", "else", ":", "kb", ".", "environ_allusersappdata", "=", "(", "u\"c:\\\\documents and settings\\\\All Users\\\\\"", "\"Application Data\"", ")", "kb", ".", "environ_allusersprofile", "=", "u\"c:\\\\documents and settings\\\\All Users\"", "return", "kb" ]
This generates an artifact knowledge base from a GRR client. Args: client_obj: A GRRClient object which is opened for reading. allow_uninitialized: If True we accept an uninitialized knowledge_base. Returns: A KnowledgeBase semantic value. Raises: ArtifactProcessingError: If called when the knowledge base has not been initialized. KnowledgeBaseUninitializedError: If we failed to initialize the knowledge base. This is needed so that the artifact library has a standardized interface to the data that is actually stored in the GRRClient object in the GRR datastore. We expect that the client KNOWLEDGE_BASE is already filled out through the, KnowledgeBaseInitialization flow, but attempt to make some intelligent guesses if things failed.
[ "This", "generates", "an", "artifact", "knowledge", "base", "from", "a", "GRR", "client", "." ]
python
train
bcbio/bcbio-nextgen
bcbio/variation/vfilter.py
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/vfilter.py#L78-L112
def _freebayes_cutoff(in_file, data): """Perform filtering of FreeBayes results, flagging low confidence calls. Filters using cutoffs on low depth based on Meynert et al's work modeling sensitivity of homozygote and heterozygote calling on depth: http://www.ncbi.nlm.nih.gov/pubmed/23773188 and high depth heterozygote SNP filtering based on Heng Li's work evaluating variant calling artifacts: http://arxiv.org/abs/1404.0929 Tuned based on NA12878 call comparisons to Genome in a Bottle reference genome. """ if not vcfutils.vcf_has_variants(in_file): base, ext = utils.splitext_plus(in_file) out_file = "{base}-filter{ext}".format(**locals()) if not utils.file_exists(out_file): shutil.copy(in_file, out_file) if out_file.endswith(".vcf.gz"): out_file = vcfutils.bgzip_and_index(out_file, data["config"]) return out_file depth_thresh, qual_thresh = None, None if _do_high_depth_filter(data): stats = _calc_vcf_stats(in_file) if stats["avg_depth"] > 0: depth_thresh = int(math.ceil(stats["avg_depth"] + 3 * math.pow(stats["avg_depth"], 0.5))) qual_thresh = depth_thresh * 2.0 # Multiplier from default GATK QD cutoff filter filters = ('(AF[0] <= 0.5 && (max(FORMAT/DP) < 4 || (max(FORMAT/DP) < 13 && %QUAL < 10))) || ' '(AF[0] > 0.5 && (max(FORMAT/DP) < 4 && %QUAL < 50))') if depth_thresh: filters += ' || (%QUAL < {qual_thresh} && max(FORMAT/DP) > {depth_thresh} && AF[0] <= 0.5)'.format(**locals()) return cutoff_w_expression(in_file, filters, data, name="FBQualDepth")
[ "def", "_freebayes_cutoff", "(", "in_file", ",", "data", ")", ":", "if", "not", "vcfutils", ".", "vcf_has_variants", "(", "in_file", ")", ":", "base", ",", "ext", "=", "utils", ".", "splitext_plus", "(", "in_file", ")", "out_file", "=", "\"{base}-filter{ext}\"", ".", "format", "(", "*", "*", "locals", "(", ")", ")", "if", "not", "utils", ".", "file_exists", "(", "out_file", ")", ":", "shutil", ".", "copy", "(", "in_file", ",", "out_file", ")", "if", "out_file", ".", "endswith", "(", "\".vcf.gz\"", ")", ":", "out_file", "=", "vcfutils", ".", "bgzip_and_index", "(", "out_file", ",", "data", "[", "\"config\"", "]", ")", "return", "out_file", "depth_thresh", ",", "qual_thresh", "=", "None", ",", "None", "if", "_do_high_depth_filter", "(", "data", ")", ":", "stats", "=", "_calc_vcf_stats", "(", "in_file", ")", "if", "stats", "[", "\"avg_depth\"", "]", ">", "0", ":", "depth_thresh", "=", "int", "(", "math", ".", "ceil", "(", "stats", "[", "\"avg_depth\"", "]", "+", "3", "*", "math", ".", "pow", "(", "stats", "[", "\"avg_depth\"", "]", ",", "0.5", ")", ")", ")", "qual_thresh", "=", "depth_thresh", "*", "2.0", "# Multiplier from default GATK QD cutoff filter", "filters", "=", "(", "'(AF[0] <= 0.5 && (max(FORMAT/DP) < 4 || (max(FORMAT/DP) < 13 && %QUAL < 10))) || '", "'(AF[0] > 0.5 && (max(FORMAT/DP) < 4 && %QUAL < 50))'", ")", "if", "depth_thresh", ":", "filters", "+=", "' || (%QUAL < {qual_thresh} && max(FORMAT/DP) > {depth_thresh} && AF[0] <= 0.5)'", ".", "format", "(", "*", "*", "locals", "(", ")", ")", "return", "cutoff_w_expression", "(", "in_file", ",", "filters", ",", "data", ",", "name", "=", "\"FBQualDepth\"", ")" ]
Perform filtering of FreeBayes results, flagging low confidence calls. Filters using cutoffs on low depth based on Meynert et al's work modeling sensitivity of homozygote and heterozygote calling on depth: http://www.ncbi.nlm.nih.gov/pubmed/23773188 and high depth heterozygote SNP filtering based on Heng Li's work evaluating variant calling artifacts: http://arxiv.org/abs/1404.0929 Tuned based on NA12878 call comparisons to Genome in a Bottle reference genome.
[ "Perform", "filtering", "of", "FreeBayes", "results", "flagging", "low", "confidence", "calls", "." ]
python
train
townsenddw/jhubctl
jhubctl/hubs/single.py
https://github.com/townsenddw/jhubctl/blob/c8c20f86a16e9d01dd90e4607d81423417cc773b/jhubctl/hubs/single.py#L52-L59
def get(self): """Get specific information about this hub.""" output = helm("get", self.release) if output.returncode != 0: print("Something went wrong!") print(output.stderr) else: print(output.stdout)
[ "def", "get", "(", "self", ")", ":", "output", "=", "helm", "(", "\"get\"", ",", "self", ".", "release", ")", "if", "output", ".", "returncode", "!=", "0", ":", "print", "(", "\"Something went wrong!\"", ")", "print", "(", "output", ".", "stderr", ")", "else", ":", "print", "(", "output", ".", "stdout", ")" ]
Get specific information about this hub.
[ "Get", "specific", "information", "about", "this", "hub", "." ]
python
train
ArduPilot/MAVProxy
MAVProxy/modules/mavproxy_map/__init__.py
https://github.com/ArduPilot/MAVProxy/blob/f50bdeff33064876f7dc8dc4683d278ff47f75d5/MAVProxy/modules/mavproxy_map/__init__.py#L136-L173
def cmd_map(self, args): '''map commands''' from MAVProxy.modules.mavproxy_map import mp_slipmap if len(args) < 1: print("usage: map <icon|set>") elif args[0] == "icon": if len(args) < 3: print("Usage: map icon <lat> <lon> <icon>") else: lat = args[1] lon = args[2] flag = 'flag.png' if len(args) > 3: flag = args[3] + '.png' icon = self.map.icon(flag) self.map.add_object(mp_slipmap.SlipIcon('icon - %s [%u]' % (str(flag),self.icon_counter), (float(lat),float(lon)), icon, layer=3, rotation=0, follow=False)) self.icon_counter += 1 elif args[0] == "set": self.map_settings.command(args[1:]) self.map.add_object(mp_slipmap.SlipBrightness(self.map_settings.brightness)) elif args[0] == "sethome": self.cmd_set_home(args) elif args[0] == "sethomepos": self.cmd_set_homepos(args) elif args[0] == "setorigin": self.cmd_set_origin(args) elif args[0] == "setoriginpos": self.cmd_set_originpos(args) elif args[0] == "zoom": self.cmd_zoom(args) elif args[0] == "center": self.cmd_center(args) elif args[0] == "follow": self.cmd_follow(args) else: print("usage: map <icon|set>")
[ "def", "cmd_map", "(", "self", ",", "args", ")", ":", "from", "MAVProxy", ".", "modules", ".", "mavproxy_map", "import", "mp_slipmap", "if", "len", "(", "args", ")", "<", "1", ":", "print", "(", "\"usage: map <icon|set>\"", ")", "elif", "args", "[", "0", "]", "==", "\"icon\"", ":", "if", "len", "(", "args", ")", "<", "3", ":", "print", "(", "\"Usage: map icon <lat> <lon> <icon>\"", ")", "else", ":", "lat", "=", "args", "[", "1", "]", "lon", "=", "args", "[", "2", "]", "flag", "=", "'flag.png'", "if", "len", "(", "args", ")", ">", "3", ":", "flag", "=", "args", "[", "3", "]", "+", "'.png'", "icon", "=", "self", ".", "map", ".", "icon", "(", "flag", ")", "self", ".", "map", ".", "add_object", "(", "mp_slipmap", ".", "SlipIcon", "(", "'icon - %s [%u]'", "%", "(", "str", "(", "flag", ")", ",", "self", ".", "icon_counter", ")", ",", "(", "float", "(", "lat", ")", ",", "float", "(", "lon", ")", ")", ",", "icon", ",", "layer", "=", "3", ",", "rotation", "=", "0", ",", "follow", "=", "False", ")", ")", "self", ".", "icon_counter", "+=", "1", "elif", "args", "[", "0", "]", "==", "\"set\"", ":", "self", ".", "map_settings", ".", "command", "(", "args", "[", "1", ":", "]", ")", "self", ".", "map", ".", "add_object", "(", "mp_slipmap", ".", "SlipBrightness", "(", "self", ".", "map_settings", ".", "brightness", ")", ")", "elif", "args", "[", "0", "]", "==", "\"sethome\"", ":", "self", ".", "cmd_set_home", "(", "args", ")", "elif", "args", "[", "0", "]", "==", "\"sethomepos\"", ":", "self", ".", "cmd_set_homepos", "(", "args", ")", "elif", "args", "[", "0", "]", "==", "\"setorigin\"", ":", "self", ".", "cmd_set_origin", "(", "args", ")", "elif", "args", "[", "0", "]", "==", "\"setoriginpos\"", ":", "self", ".", "cmd_set_originpos", "(", "args", ")", "elif", "args", "[", "0", "]", "==", "\"zoom\"", ":", "self", ".", "cmd_zoom", "(", "args", ")", "elif", "args", "[", "0", "]", "==", "\"center\"", ":", "self", ".", "cmd_center", "(", "args", ")", "elif", "args", "[", "0", "]", "==", "\"follow\"", ":", "self", ".", "cmd_follow", "(", "args", ")", "else", ":", "print", "(", "\"usage: map <icon|set>\"", ")" ]
map commands
[ "map", "commands" ]
python
train
callowayproject/Calloway
calloway/apps/django_ext/templatetags/fb.py
https://github.com/callowayproject/Calloway/blob/d22e98d41fbd298ab6393ba7bd84a75528be9f81/calloway/apps/django_ext/templatetags/fb.py#L43-L97
def fburl(parser, token): """ Returns an absolute URL matching given view with its parameters. This is a way to define links that aren't tied to a particular URL configuration:: {% url path.to.some_view arg1,arg2,name1=value1 %} The first argument is a path to a view. It can be an absolute python path or just ``app_name.view_name`` without the project name if the view is located inside the project. Other arguments are comma-separated values that will be filled in place of positional and keyword arguments in the URL. All arguments for the URL should be present. For example if you have a view ``app_name.client`` taking client's id and the corresponding line in a URLconf looks like this:: ('^client/(\d+)/$', 'app_name.client') and this app's URLconf is included into the project's URLconf under some path:: ('^clients/', include('project_name.app_name.urls')) then in a template you can create a link for a certain client like this:: {% url app_name.client client.id %} The URL will look like ``/clients/client/123/``. """ bits = token.contents.split(' ') if len(bits) < 2: raise template.TemplateSyntaxError("'%s' takes at least one argument" " (path to a view)" % bits[0]) viewname = bits[1] args = [] kwargs = {} asvar = None if len(bits) > 2: bits = iter(bits[2:]) for bit in bits: if bit == 'as': asvar = bits.next() break else: for arg in bit.split(","): if '=' in arg: k, v = arg.split('=', 1) k = k.strip() kwargs[k] = parser.compile_filter(v) elif arg: args.append(parser.compile_filter(arg)) return URLNode(viewname, args, kwargs, asvar)
[ "def", "fburl", "(", "parser", ",", "token", ")", ":", "bits", "=", "token", ".", "contents", ".", "split", "(", "' '", ")", "if", "len", "(", "bits", ")", "<", "2", ":", "raise", "template", ".", "TemplateSyntaxError", "(", "\"'%s' takes at least one argument\"", "\" (path to a view)\"", "%", "bits", "[", "0", "]", ")", "viewname", "=", "bits", "[", "1", "]", "args", "=", "[", "]", "kwargs", "=", "{", "}", "asvar", "=", "None", "if", "len", "(", "bits", ")", ">", "2", ":", "bits", "=", "iter", "(", "bits", "[", "2", ":", "]", ")", "for", "bit", "in", "bits", ":", "if", "bit", "==", "'as'", ":", "asvar", "=", "bits", ".", "next", "(", ")", "break", "else", ":", "for", "arg", "in", "bit", ".", "split", "(", "\",\"", ")", ":", "if", "'='", "in", "arg", ":", "k", ",", "v", "=", "arg", ".", "split", "(", "'='", ",", "1", ")", "k", "=", "k", ".", "strip", "(", ")", "kwargs", "[", "k", "]", "=", "parser", ".", "compile_filter", "(", "v", ")", "elif", "arg", ":", "args", ".", "append", "(", "parser", ".", "compile_filter", "(", "arg", ")", ")", "return", "URLNode", "(", "viewname", ",", "args", ",", "kwargs", ",", "asvar", ")" ]
Returns an absolute URL matching given view with its parameters. This is a way to define links that aren't tied to a particular URL configuration:: {% url path.to.some_view arg1,arg2,name1=value1 %} The first argument is a path to a view. It can be an absolute python path or just ``app_name.view_name`` without the project name if the view is located inside the project. Other arguments are comma-separated values that will be filled in place of positional and keyword arguments in the URL. All arguments for the URL should be present. For example if you have a view ``app_name.client`` taking client's id and the corresponding line in a URLconf looks like this:: ('^client/(\d+)/$', 'app_name.client') and this app's URLconf is included into the project's URLconf under some path:: ('^clients/', include('project_name.app_name.urls')) then in a template you can create a link for a certain client like this:: {% url app_name.client client.id %} The URL will look like ``/clients/client/123/``.
[ "Returns", "an", "absolute", "URL", "matching", "given", "view", "with", "its", "parameters", "." ]
python
train
deepmind/sonnet
sonnet/python/modules/spatial_transformer.py
https://github.com/deepmind/sonnet/blob/00612ca3178964d86b556e062694d808ff81fcca/sonnet/python/modules/spatial_transformer.py#L538-L546
def combine_with(self, additional_constraints): """Combines two sets of constraints into a coherent single set.""" x = additional_constraints if not isinstance(additional_constraints, AffineWarpConstraints): x = AffineWarpConstraints(additional_constraints) new_constraints = [] for left, right in zip(self._constraints, x.constraints): new_constraints.append([self._combine(x, y) for x, y in zip(left, right)]) return AffineWarpConstraints(new_constraints)
[ "def", "combine_with", "(", "self", ",", "additional_constraints", ")", ":", "x", "=", "additional_constraints", "if", "not", "isinstance", "(", "additional_constraints", ",", "AffineWarpConstraints", ")", ":", "x", "=", "AffineWarpConstraints", "(", "additional_constraints", ")", "new_constraints", "=", "[", "]", "for", "left", ",", "right", "in", "zip", "(", "self", ".", "_constraints", ",", "x", ".", "constraints", ")", ":", "new_constraints", ".", "append", "(", "[", "self", ".", "_combine", "(", "x", ",", "y", ")", "for", "x", ",", "y", "in", "zip", "(", "left", ",", "right", ")", "]", ")", "return", "AffineWarpConstraints", "(", "new_constraints", ")" ]
Combines two sets of constraints into a coherent single set.
[ "Combines", "two", "sets", "of", "constraints", "into", "a", "coherent", "single", "set", "." ]
python
train
saltstack/salt
salt/state.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/state.py#L2211-L2246
def verify_retry_data(self, retry_data): ''' verifies the specified retry data ''' retry_defaults = { 'until': True, 'attempts': 2, 'splay': 0, 'interval': 30, } expected_data = { 'until': bool, 'attempts': int, 'interval': int, 'splay': int, } validated_retry_data = {} if isinstance(retry_data, dict): for expected_key, value_type in six.iteritems(expected_data): if expected_key in retry_data: if isinstance(retry_data[expected_key], value_type): validated_retry_data[expected_key] = retry_data[expected_key] else: log.warning( 'An invalid value was passed for the retry %s, ' 'using default value \'%s\'', expected_key, retry_defaults[expected_key] ) validated_retry_data[expected_key] = retry_defaults[expected_key] else: validated_retry_data[expected_key] = retry_defaults[expected_key] else: log.warning(('State is set to retry, but a valid dict for retry ' 'configuration was not found. Using retry defaults')) validated_retry_data = retry_defaults return validated_retry_data
[ "def", "verify_retry_data", "(", "self", ",", "retry_data", ")", ":", "retry_defaults", "=", "{", "'until'", ":", "True", ",", "'attempts'", ":", "2", ",", "'splay'", ":", "0", ",", "'interval'", ":", "30", ",", "}", "expected_data", "=", "{", "'until'", ":", "bool", ",", "'attempts'", ":", "int", ",", "'interval'", ":", "int", ",", "'splay'", ":", "int", ",", "}", "validated_retry_data", "=", "{", "}", "if", "isinstance", "(", "retry_data", ",", "dict", ")", ":", "for", "expected_key", ",", "value_type", "in", "six", ".", "iteritems", "(", "expected_data", ")", ":", "if", "expected_key", "in", "retry_data", ":", "if", "isinstance", "(", "retry_data", "[", "expected_key", "]", ",", "value_type", ")", ":", "validated_retry_data", "[", "expected_key", "]", "=", "retry_data", "[", "expected_key", "]", "else", ":", "log", ".", "warning", "(", "'An invalid value was passed for the retry %s, '", "'using default value \\'%s\\''", ",", "expected_key", ",", "retry_defaults", "[", "expected_key", "]", ")", "validated_retry_data", "[", "expected_key", "]", "=", "retry_defaults", "[", "expected_key", "]", "else", ":", "validated_retry_data", "[", "expected_key", "]", "=", "retry_defaults", "[", "expected_key", "]", "else", ":", "log", ".", "warning", "(", "(", "'State is set to retry, but a valid dict for retry '", "'configuration was not found. Using retry defaults'", ")", ")", "validated_retry_data", "=", "retry_defaults", "return", "validated_retry_data" ]
verifies the specified retry data
[ "verifies", "the", "specified", "retry", "data" ]
python
train
rootpy/rootpy
rootpy/tree/tree.py
https://github.com/rootpy/rootpy/blob/3926935e1f2100d8ba68070c2ab44055d4800f73/rootpy/tree/tree.py#L85-L101
def branch_type(cls, branch): """ Return the string representation for the type of a branch """ typename = branch.GetClassName() if not typename: leaf = branch.GetListOfLeaves()[0] typename = leaf.GetTypeName() # check if leaf has multiple elements leaf_count = leaf.GetLeafCount() if leaf_count: length = leaf_count.GetMaximum() else: length = leaf.GetLen() if length > 1: typename = '{0}[{1:d}]'.format(typename, length) return typename
[ "def", "branch_type", "(", "cls", ",", "branch", ")", ":", "typename", "=", "branch", ".", "GetClassName", "(", ")", "if", "not", "typename", ":", "leaf", "=", "branch", ".", "GetListOfLeaves", "(", ")", "[", "0", "]", "typename", "=", "leaf", ".", "GetTypeName", "(", ")", "# check if leaf has multiple elements", "leaf_count", "=", "leaf", ".", "GetLeafCount", "(", ")", "if", "leaf_count", ":", "length", "=", "leaf_count", ".", "GetMaximum", "(", ")", "else", ":", "length", "=", "leaf", ".", "GetLen", "(", ")", "if", "length", ">", "1", ":", "typename", "=", "'{0}[{1:d}]'", ".", "format", "(", "typename", ",", "length", ")", "return", "typename" ]
Return the string representation for the type of a branch
[ "Return", "the", "string", "representation", "for", "the", "type", "of", "a", "branch" ]
python
train
Azure/azure-sdk-for-python
azure-mgmt-web/azure/mgmt/web/operations/web_apps_operations.py
https://github.com/Azure/azure-sdk-for-python/blob/d7306fde32f60a293a7567678692bdad31e4b667/azure-mgmt-web/azure/mgmt/web/operations/web_apps_operations.py#L7932-L7989
def start_web_site_network_trace_operation( self, resource_group_name, name, duration_in_seconds=None, max_frame_length=None, sas_url=None, custom_headers=None, raw=False, polling=True, **operation_config): """Start capturing network packets for the site. Start capturing network packets for the site. :param resource_group_name: Name of the resource group to which the resource belongs. :type resource_group_name: str :param name: The name of the web app. :type name: str :param duration_in_seconds: The duration to keep capturing in seconds. :type duration_in_seconds: int :param max_frame_length: The maximum frame length in bytes (Optional). :type max_frame_length: int :param sas_url: The Blob URL to store capture file. :type sas_url: str :param dict custom_headers: headers that will be added to the request :param bool raw: The poller return type is ClientRawResponse, the direct response alongside the deserialized response :param polling: True for ARMPolling, False for no polling, or a polling object for personal polling strategy :return: An instance of LROPoller that returns list or ClientRawResponse<list> if raw==True :rtype: ~msrestazure.azure_operation.AzureOperationPoller[list[~azure.mgmt.web.models.NetworkTrace]] or ~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[list[~azure.mgmt.web.models.NetworkTrace]]] :raises: :class:`DefaultErrorResponseException<azure.mgmt.web.models.DefaultErrorResponseException>` """ raw_result = self._start_web_site_network_trace_operation_initial( resource_group_name=resource_group_name, name=name, duration_in_seconds=duration_in_seconds, max_frame_length=max_frame_length, sas_url=sas_url, custom_headers=custom_headers, raw=True, **operation_config ) def get_long_running_output(response): deserialized = self._deserialize('[NetworkTrace]', response) if raw: client_raw_response = ClientRawResponse(deserialized, response) return client_raw_response return deserialized lro_delay = operation_config.get( 'long_running_operation_timeout', self.config.long_running_operation_timeout) if polling is True: polling_method = ARMPolling(lro_delay, **operation_config) elif polling is False: polling_method = NoPolling() else: polling_method = polling return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
[ "def", "start_web_site_network_trace_operation", "(", "self", ",", "resource_group_name", ",", "name", ",", "duration_in_seconds", "=", "None", ",", "max_frame_length", "=", "None", ",", "sas_url", "=", "None", ",", "custom_headers", "=", "None", ",", "raw", "=", "False", ",", "polling", "=", "True", ",", "*", "*", "operation_config", ")", ":", "raw_result", "=", "self", ".", "_start_web_site_network_trace_operation_initial", "(", "resource_group_name", "=", "resource_group_name", ",", "name", "=", "name", ",", "duration_in_seconds", "=", "duration_in_seconds", ",", "max_frame_length", "=", "max_frame_length", ",", "sas_url", "=", "sas_url", ",", "custom_headers", "=", "custom_headers", ",", "raw", "=", "True", ",", "*", "*", "operation_config", ")", "def", "get_long_running_output", "(", "response", ")", ":", "deserialized", "=", "self", ".", "_deserialize", "(", "'[NetworkTrace]'", ",", "response", ")", "if", "raw", ":", "client_raw_response", "=", "ClientRawResponse", "(", "deserialized", ",", "response", ")", "return", "client_raw_response", "return", "deserialized", "lro_delay", "=", "operation_config", ".", "get", "(", "'long_running_operation_timeout'", ",", "self", ".", "config", ".", "long_running_operation_timeout", ")", "if", "polling", "is", "True", ":", "polling_method", "=", "ARMPolling", "(", "lro_delay", ",", "*", "*", "operation_config", ")", "elif", "polling", "is", "False", ":", "polling_method", "=", "NoPolling", "(", ")", "else", ":", "polling_method", "=", "polling", "return", "LROPoller", "(", "self", ".", "_client", ",", "raw_result", ",", "get_long_running_output", ",", "polling_method", ")" ]
Start capturing network packets for the site. Start capturing network packets for the site. :param resource_group_name: Name of the resource group to which the resource belongs. :type resource_group_name: str :param name: The name of the web app. :type name: str :param duration_in_seconds: The duration to keep capturing in seconds. :type duration_in_seconds: int :param max_frame_length: The maximum frame length in bytes (Optional). :type max_frame_length: int :param sas_url: The Blob URL to store capture file. :type sas_url: str :param dict custom_headers: headers that will be added to the request :param bool raw: The poller return type is ClientRawResponse, the direct response alongside the deserialized response :param polling: True for ARMPolling, False for no polling, or a polling object for personal polling strategy :return: An instance of LROPoller that returns list or ClientRawResponse<list> if raw==True :rtype: ~msrestazure.azure_operation.AzureOperationPoller[list[~azure.mgmt.web.models.NetworkTrace]] or ~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[list[~azure.mgmt.web.models.NetworkTrace]]] :raises: :class:`DefaultErrorResponseException<azure.mgmt.web.models.DefaultErrorResponseException>`
[ "Start", "capturing", "network", "packets", "for", "the", "site", "." ]
python
test
malinoff/structures
structures/core.py
https://github.com/malinoff/structures/blob/36b1d641d399cd0b2a824704da53d8b5c8bd4f10/structures/core.py#L62-L79
def build_stream(self, obj, stream: BytesIO, context=None) -> None: """ Build bytes from the python object into the stream. :param obj: Python object to build bytes from. :param stream: A ``io.BytesIO`` instance to write bytes into. :param context: Optional context dictionary. """ if context is None: context = Context() if not isinstance(context, Context): context = Context(context) try: self._build_stream(obj, stream, context) except Error: raise except Exception as exc: raise BuildingError(str(exc))
[ "def", "build_stream", "(", "self", ",", "obj", ",", "stream", ":", "BytesIO", ",", "context", "=", "None", ")", "->", "None", ":", "if", "context", "is", "None", ":", "context", "=", "Context", "(", ")", "if", "not", "isinstance", "(", "context", ",", "Context", ")", ":", "context", "=", "Context", "(", "context", ")", "try", ":", "self", ".", "_build_stream", "(", "obj", ",", "stream", ",", "context", ")", "except", "Error", ":", "raise", "except", "Exception", "as", "exc", ":", "raise", "BuildingError", "(", "str", "(", "exc", ")", ")" ]
Build bytes from the python object into the stream. :param obj: Python object to build bytes from. :param stream: A ``io.BytesIO`` instance to write bytes into. :param context: Optional context dictionary.
[ "Build", "bytes", "from", "the", "python", "object", "into", "the", "stream", "." ]
python
train
senaite/senaite.core
bika/lims/exportimport/instruments/horiba/jobinyvon/parser.py
https://github.com/senaite/senaite.core/blob/7602ce2ea2f9e81eb34e20ce17b98a3e70713f85/bika/lims/exportimport/instruments/horiba/jobinyvon/parser.py#L66-L77
def parse_line_headers(self, line): """We must build headers carefully: there are multiple blank values in the header row, and the instrument may just add more for all we know. """ headers = line.split(",") for i, v in enumerate(headers): if v: headers[i] = v else: headers[i] = str(i) self.headers = headers
[ "def", "parse_line_headers", "(", "self", ",", "line", ")", ":", "headers", "=", "line", ".", "split", "(", "\",\"", ")", "for", "i", ",", "v", "in", "enumerate", "(", "headers", ")", ":", "if", "v", ":", "headers", "[", "i", "]", "=", "v", "else", ":", "headers", "[", "i", "]", "=", "str", "(", "i", ")", "self", ".", "headers", "=", "headers" ]
We must build headers carefully: there are multiple blank values in the header row, and the instrument may just add more for all we know.
[ "We", "must", "build", "headers", "carefully", ":", "there", "are", "multiple", "blank", "values", "in", "the", "header", "row", "and", "the", "instrument", "may", "just", "add", "more", "for", "all", "we", "know", "." ]
python
train
GPflow/GPflow
gpflow/models/gplvm.py
https://github.com/GPflow/GPflow/blob/549394f0b1b0696c7b521a065e49bdae6e7acf27/gpflow/models/gplvm.py#L123-L166
def _build_likelihood(self): """ Construct a tensorflow function to compute the bound on the marginal likelihood. """ pX = DiagonalGaussian(self.X_mean, self.X_var) num_inducing = len(self.feature) psi0 = tf.reduce_sum(expectation(pX, self.kern)) psi1 = expectation(pX, (self.kern, self.feature)) psi2 = tf.reduce_sum(expectation(pX, (self.kern, self.feature), (self.kern, self.feature)), axis=0) Kuu = features.Kuu(self.feature, self.kern, jitter=settings.jitter) L = tf.cholesky(Kuu) sigma2 = self.likelihood.variance sigma = tf.sqrt(sigma2) # Compute intermediate matrices A = tf.matrix_triangular_solve(L, tf.transpose(psi1), lower=True) / sigma tmp = tf.matrix_triangular_solve(L, psi2, lower=True) AAT = tf.matrix_triangular_solve(L, tf.transpose(tmp), lower=True) / sigma2 B = AAT + tf.eye(num_inducing, dtype=settings.float_type) LB = tf.cholesky(B) log_det_B = 2. * tf.reduce_sum(tf.log(tf.matrix_diag_part(LB))) c = tf.matrix_triangular_solve(LB, tf.matmul(A, self.Y), lower=True) / sigma # KL[q(x) || p(x)] dX_var = self.X_var if len(self.X_var.get_shape()) == 2 else tf.matrix_diag_part(self.X_var) NQ = tf.cast(tf.size(self.X_mean), settings.float_type) D = tf.cast(tf.shape(self.Y)[1], settings.float_type) KL = -0.5 * tf.reduce_sum(tf.log(dX_var)) \ + 0.5 * tf.reduce_sum(tf.log(self.X_prior_var)) \ - 0.5 * NQ \ + 0.5 * tf.reduce_sum((tf.square(self.X_mean - self.X_prior_mean) + dX_var) / self.X_prior_var) # compute log marginal bound ND = tf.cast(tf.size(self.Y), settings.float_type) bound = -0.5 * ND * tf.log(2 * np.pi * sigma2) bound += -0.5 * D * log_det_B bound += -0.5 * tf.reduce_sum(tf.square(self.Y)) / sigma2 bound += 0.5 * tf.reduce_sum(tf.square(c)) bound += -0.5 * D * (tf.reduce_sum(psi0) / sigma2 - tf.reduce_sum(tf.matrix_diag_part(AAT))) bound -= KL return bound
[ "def", "_build_likelihood", "(", "self", ")", ":", "pX", "=", "DiagonalGaussian", "(", "self", ".", "X_mean", ",", "self", ".", "X_var", ")", "num_inducing", "=", "len", "(", "self", ".", "feature", ")", "psi0", "=", "tf", ".", "reduce_sum", "(", "expectation", "(", "pX", ",", "self", ".", "kern", ")", ")", "psi1", "=", "expectation", "(", "pX", ",", "(", "self", ".", "kern", ",", "self", ".", "feature", ")", ")", "psi2", "=", "tf", ".", "reduce_sum", "(", "expectation", "(", "pX", ",", "(", "self", ".", "kern", ",", "self", ".", "feature", ")", ",", "(", "self", ".", "kern", ",", "self", ".", "feature", ")", ")", ",", "axis", "=", "0", ")", "Kuu", "=", "features", ".", "Kuu", "(", "self", ".", "feature", ",", "self", ".", "kern", ",", "jitter", "=", "settings", ".", "jitter", ")", "L", "=", "tf", ".", "cholesky", "(", "Kuu", ")", "sigma2", "=", "self", ".", "likelihood", ".", "variance", "sigma", "=", "tf", ".", "sqrt", "(", "sigma2", ")", "# Compute intermediate matrices", "A", "=", "tf", ".", "matrix_triangular_solve", "(", "L", ",", "tf", ".", "transpose", "(", "psi1", ")", ",", "lower", "=", "True", ")", "/", "sigma", "tmp", "=", "tf", ".", "matrix_triangular_solve", "(", "L", ",", "psi2", ",", "lower", "=", "True", ")", "AAT", "=", "tf", ".", "matrix_triangular_solve", "(", "L", ",", "tf", ".", "transpose", "(", "tmp", ")", ",", "lower", "=", "True", ")", "/", "sigma2", "B", "=", "AAT", "+", "tf", ".", "eye", "(", "num_inducing", ",", "dtype", "=", "settings", ".", "float_type", ")", "LB", "=", "tf", ".", "cholesky", "(", "B", ")", "log_det_B", "=", "2.", "*", "tf", ".", "reduce_sum", "(", "tf", ".", "log", "(", "tf", ".", "matrix_diag_part", "(", "LB", ")", ")", ")", "c", "=", "tf", ".", "matrix_triangular_solve", "(", "LB", ",", "tf", ".", "matmul", "(", "A", ",", "self", ".", "Y", ")", ",", "lower", "=", "True", ")", "/", "sigma", "# KL[q(x) || p(x)]", "dX_var", "=", "self", ".", "X_var", "if", "len", "(", "self", ".", "X_var", ".", "get_shape", "(", ")", ")", "==", "2", "else", "tf", ".", "matrix_diag_part", "(", "self", ".", "X_var", ")", "NQ", "=", "tf", ".", "cast", "(", "tf", ".", "size", "(", "self", ".", "X_mean", ")", ",", "settings", ".", "float_type", ")", "D", "=", "tf", ".", "cast", "(", "tf", ".", "shape", "(", "self", ".", "Y", ")", "[", "1", "]", ",", "settings", ".", "float_type", ")", "KL", "=", "-", "0.5", "*", "tf", ".", "reduce_sum", "(", "tf", ".", "log", "(", "dX_var", ")", ")", "+", "0.5", "*", "tf", ".", "reduce_sum", "(", "tf", ".", "log", "(", "self", ".", "X_prior_var", ")", ")", "-", "0.5", "*", "NQ", "+", "0.5", "*", "tf", ".", "reduce_sum", "(", "(", "tf", ".", "square", "(", "self", ".", "X_mean", "-", "self", ".", "X_prior_mean", ")", "+", "dX_var", ")", "/", "self", ".", "X_prior_var", ")", "# compute log marginal bound", "ND", "=", "tf", ".", "cast", "(", "tf", ".", "size", "(", "self", ".", "Y", ")", ",", "settings", ".", "float_type", ")", "bound", "=", "-", "0.5", "*", "ND", "*", "tf", ".", "log", "(", "2", "*", "np", ".", "pi", "*", "sigma2", ")", "bound", "+=", "-", "0.5", "*", "D", "*", "log_det_B", "bound", "+=", "-", "0.5", "*", "tf", ".", "reduce_sum", "(", "tf", ".", "square", "(", "self", ".", "Y", ")", ")", "/", "sigma2", "bound", "+=", "0.5", "*", "tf", ".", "reduce_sum", "(", "tf", ".", "square", "(", "c", ")", ")", "bound", "+=", "-", "0.5", "*", "D", "*", "(", "tf", ".", "reduce_sum", "(", "psi0", ")", "/", "sigma2", "-", "tf", ".", "reduce_sum", "(", "tf", ".", "matrix_diag_part", "(", "AAT", ")", ")", ")", "bound", "-=", "KL", "return", "bound" ]
Construct a tensorflow function to compute the bound on the marginal likelihood.
[ "Construct", "a", "tensorflow", "function", "to", "compute", "the", "bound", "on", "the", "marginal", "likelihood", "." ]
python
train
mitsei/dlkit
dlkit/json_/commenting/sessions.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/commenting/sessions.py#L2465-L2486
def get_parent_books(self, book_id): """Gets the parent books of the given ``id``. arg: book_id (osid.id.Id): the ``Id`` of the ``Book`` to query return: (osid.commenting.BookList) - the parent books of the ``id`` raise: NotFound - a ``Book`` identified by ``Id is`` not found raise: NullArgument - ``book_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for # osid.resource.BinHierarchySession.get_parent_bins if self._catalog_session is not None: return self._catalog_session.get_parent_catalogs(catalog_id=book_id) return BookLookupSession( self._proxy, self._runtime).get_books_by_ids( list(self.get_parent_book_ids(book_id)))
[ "def", "get_parent_books", "(", "self", ",", "book_id", ")", ":", "# Implemented from template for", "# osid.resource.BinHierarchySession.get_parent_bins", "if", "self", ".", "_catalog_session", "is", "not", "None", ":", "return", "self", ".", "_catalog_session", ".", "get_parent_catalogs", "(", "catalog_id", "=", "book_id", ")", "return", "BookLookupSession", "(", "self", ".", "_proxy", ",", "self", ".", "_runtime", ")", ".", "get_books_by_ids", "(", "list", "(", "self", ".", "get_parent_book_ids", "(", "book_id", ")", ")", ")" ]
Gets the parent books of the given ``id``. arg: book_id (osid.id.Id): the ``Id`` of the ``Book`` to query return: (osid.commenting.BookList) - the parent books of the ``id`` raise: NotFound - a ``Book`` identified by ``Id is`` not found raise: NullArgument - ``book_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.*
[ "Gets", "the", "parent", "books", "of", "the", "given", "id", "." ]
python
train
openid/python-openid
openid/consumer/discover.py
https://github.com/openid/python-openid/blob/f7e13536f0d1828d3cef5ae7a7b55cabadff37fc/openid/consumer/discover.py#L303-L311
def normalizeURL(url): """Normalize a URL, converting normalization failures to DiscoveryFailure""" try: normalized = urinorm.urinorm(url) except ValueError, why: raise DiscoveryFailure('Normalizing identifier: %s' % (why[0],), None) else: return urlparse.urldefrag(normalized)[0]
[ "def", "normalizeURL", "(", "url", ")", ":", "try", ":", "normalized", "=", "urinorm", ".", "urinorm", "(", "url", ")", "except", "ValueError", ",", "why", ":", "raise", "DiscoveryFailure", "(", "'Normalizing identifier: %s'", "%", "(", "why", "[", "0", "]", ",", ")", ",", "None", ")", "else", ":", "return", "urlparse", ".", "urldefrag", "(", "normalized", ")", "[", "0", "]" ]
Normalize a URL, converting normalization failures to DiscoveryFailure
[ "Normalize", "a", "URL", "converting", "normalization", "failures", "to", "DiscoveryFailure" ]
python
train
clinicedc/edc-notification
edc_notification/site_notifications.py
https://github.com/clinicedc/edc-notification/blob/79e43a44261e37566c63a8780d80b0d8ece89cc9/edc_notification/site_notifications.py#L86-L98
def notify(self, instance=None, **kwargs): """A wrapper to call notification.notify for each notification class associated with the given model instance. Returns a dictionary of {notification.name: model, ...} including only notifications sent. """ notified = {} for notification_cls in self.registry.values(): notification = notification_cls() if notification.notify(instance=instance, **kwargs): notified.update({notification_cls.name: instance._meta.label_lower}) return notified
[ "def", "notify", "(", "self", ",", "instance", "=", "None", ",", "*", "*", "kwargs", ")", ":", "notified", "=", "{", "}", "for", "notification_cls", "in", "self", ".", "registry", ".", "values", "(", ")", ":", "notification", "=", "notification_cls", "(", ")", "if", "notification", ".", "notify", "(", "instance", "=", "instance", ",", "*", "*", "kwargs", ")", ":", "notified", ".", "update", "(", "{", "notification_cls", ".", "name", ":", "instance", ".", "_meta", ".", "label_lower", "}", ")", "return", "notified" ]
A wrapper to call notification.notify for each notification class associated with the given model instance. Returns a dictionary of {notification.name: model, ...} including only notifications sent.
[ "A", "wrapper", "to", "call", "notification", ".", "notify", "for", "each", "notification", "class", "associated", "with", "the", "given", "model", "instance", "." ]
python
train
shidenggui/easyquotation
easyquotation/basequotation.py
https://github.com/shidenggui/easyquotation/blob/a75820db4f05f5386e1c1024d05b0bfc1de6cbda/easyquotation/basequotation.py#L89-L94
def market_snapshot(self, prefix=False): """return all market quotation snapshot :param prefix: if prefix is True, return quotation dict's stock_code key start with sh/sz market flag """ return self.get_stock_data(self.stock_list, prefix=prefix)
[ "def", "market_snapshot", "(", "self", ",", "prefix", "=", "False", ")", ":", "return", "self", ".", "get_stock_data", "(", "self", ".", "stock_list", ",", "prefix", "=", "prefix", ")" ]
return all market quotation snapshot :param prefix: if prefix is True, return quotation dict's stock_code key start with sh/sz market flag
[ "return", "all", "market", "quotation", "snapshot", ":", "param", "prefix", ":", "if", "prefix", "is", "True", "return", "quotation", "dict", "s", "stock_code", "key", "start", "with", "sh", "/", "sz", "market", "flag" ]
python
train
mitsei/dlkit
dlkit/runtime/impls/proxy/sessions.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/runtime/impls/proxy/sessions.py#L34-L69
def get_proxy(self, input_): """Gets a proxy. :param input: a proxy condition :type input: ``osid.proxy.ProxyCondition`` :return: a proxy :rtype: ``osid.proxy.Proxy`` :raise: ``NullArgument`` -- ``input`` is ``null`` :raise: ``OperationFailed`` -- unable to complete request :raise: ``PermissionDenied`` -- authorization failure :raise: ``Unsupported`` -- ``input`` is not of this service *compliance: mandatory -- This method is must be implemented.* """ if input_._http_request is not None: authentication = DjangoAuthentication() authentication.set_django_user(input_._http_request.user, input_._use_user_id) elif input_._xblock_user is not None: authentication = XBlockAuthentication() authentication.set_xblock_user(input_._xblock_user) else: authentication = None if authentication is not None: effective_agent_id = authentication.get_agent_id() else: effective_agent_id = input_._effective_agent_id if input_._locale is not None: locale = input_._locale else: locale = None return rules.Proxy(authentication=authentication, effective_agent_id=effective_agent_id, locale=locale)
[ "def", "get_proxy", "(", "self", ",", "input_", ")", ":", "if", "input_", ".", "_http_request", "is", "not", "None", ":", "authentication", "=", "DjangoAuthentication", "(", ")", "authentication", ".", "set_django_user", "(", "input_", ".", "_http_request", ".", "user", ",", "input_", ".", "_use_user_id", ")", "elif", "input_", ".", "_xblock_user", "is", "not", "None", ":", "authentication", "=", "XBlockAuthentication", "(", ")", "authentication", ".", "set_xblock_user", "(", "input_", ".", "_xblock_user", ")", "else", ":", "authentication", "=", "None", "if", "authentication", "is", "not", "None", ":", "effective_agent_id", "=", "authentication", ".", "get_agent_id", "(", ")", "else", ":", "effective_agent_id", "=", "input_", ".", "_effective_agent_id", "if", "input_", ".", "_locale", "is", "not", "None", ":", "locale", "=", "input_", ".", "_locale", "else", ":", "locale", "=", "None", "return", "rules", ".", "Proxy", "(", "authentication", "=", "authentication", ",", "effective_agent_id", "=", "effective_agent_id", ",", "locale", "=", "locale", ")" ]
Gets a proxy. :param input: a proxy condition :type input: ``osid.proxy.ProxyCondition`` :return: a proxy :rtype: ``osid.proxy.Proxy`` :raise: ``NullArgument`` -- ``input`` is ``null`` :raise: ``OperationFailed`` -- unable to complete request :raise: ``PermissionDenied`` -- authorization failure :raise: ``Unsupported`` -- ``input`` is not of this service *compliance: mandatory -- This method is must be implemented.*
[ "Gets", "a", "proxy", "." ]
python
train
tensorlayer/tensorlayer
tensorlayer/prepro.py
https://github.com/tensorlayer/tensorlayer/blob/aa9e52e36c7058a7e6fd81d36563ca6850b21956/tensorlayer/prepro.py#L881-L915
def flip_axis(x, axis=1, is_random=False): """Flip the axis of an image, such as flip left and right, up and down, randomly or non-randomly, Parameters ---------- x : numpy.array An image with dimension of [row, col, channel] (default). axis : int Which axis to flip. - 0, flip up and down - 1, flip left and right - 2, flip channel is_random : boolean If True, randomly flip. Default is False. Returns ------- numpy.array A processed image. """ if is_random: factor = np.random.uniform(-1, 1) if factor > 0: x = np.asarray(x).swapaxes(axis, 0) x = x[::-1, ...] x = x.swapaxes(0, axis) return x else: return x else: x = np.asarray(x).swapaxes(axis, 0) x = x[::-1, ...] x = x.swapaxes(0, axis) return x
[ "def", "flip_axis", "(", "x", ",", "axis", "=", "1", ",", "is_random", "=", "False", ")", ":", "if", "is_random", ":", "factor", "=", "np", ".", "random", ".", "uniform", "(", "-", "1", ",", "1", ")", "if", "factor", ">", "0", ":", "x", "=", "np", ".", "asarray", "(", "x", ")", ".", "swapaxes", "(", "axis", ",", "0", ")", "x", "=", "x", "[", ":", ":", "-", "1", ",", "...", "]", "x", "=", "x", ".", "swapaxes", "(", "0", ",", "axis", ")", "return", "x", "else", ":", "return", "x", "else", ":", "x", "=", "np", ".", "asarray", "(", "x", ")", ".", "swapaxes", "(", "axis", ",", "0", ")", "x", "=", "x", "[", ":", ":", "-", "1", ",", "...", "]", "x", "=", "x", ".", "swapaxes", "(", "0", ",", "axis", ")", "return", "x" ]
Flip the axis of an image, such as flip left and right, up and down, randomly or non-randomly, Parameters ---------- x : numpy.array An image with dimension of [row, col, channel] (default). axis : int Which axis to flip. - 0, flip up and down - 1, flip left and right - 2, flip channel is_random : boolean If True, randomly flip. Default is False. Returns ------- numpy.array A processed image.
[ "Flip", "the", "axis", "of", "an", "image", "such", "as", "flip", "left", "and", "right", "up", "and", "down", "randomly", "or", "non", "-", "randomly" ]
python
valid
lorien/grab
grab/deprecated.py
https://github.com/lorien/grab/blob/8b301db2a08c830245b61c589e58af6234f4db79/grab/deprecated.py#L76-L93
def find_link(self, href_pattern, make_absolute=True): """ Find link in response body which href value matches ``href_pattern``. Returns found url or None. """ if make_absolute: self.tree.make_links_absolute(self.doc.url) if isinstance(href_pattern, six.text_type): raise GrabMisuseError('Method `find_link` accepts only ' 'byte-string argument') href_pattern = make_unicode(href_pattern) for elem, _, link, _ in self.tree.iterlinks(): if elem.tag == 'a' and href_pattern in link: return link return None
[ "def", "find_link", "(", "self", ",", "href_pattern", ",", "make_absolute", "=", "True", ")", ":", "if", "make_absolute", ":", "self", ".", "tree", ".", "make_links_absolute", "(", "self", ".", "doc", ".", "url", ")", "if", "isinstance", "(", "href_pattern", ",", "six", ".", "text_type", ")", ":", "raise", "GrabMisuseError", "(", "'Method `find_link` accepts only '", "'byte-string argument'", ")", "href_pattern", "=", "make_unicode", "(", "href_pattern", ")", "for", "elem", ",", "_", ",", "link", ",", "_", "in", "self", ".", "tree", ".", "iterlinks", "(", ")", ":", "if", "elem", ".", "tag", "==", "'a'", "and", "href_pattern", "in", "link", ":", "return", "link", "return", "None" ]
Find link in response body which href value matches ``href_pattern``. Returns found url or None.
[ "Find", "link", "in", "response", "body", "which", "href", "value", "matches", "href_pattern", "." ]
python
train
kata198/AdvancedHTMLParser
AdvancedHTMLParser/Tags.py
https://github.com/kata198/AdvancedHTMLParser/blob/06aeea5d8e2ea86e155aae0fc237623d3e9b7f9d/AdvancedHTMLParser/Tags.py#L751-L785
def insertAfter(self, child, afterChild): ''' insertAfter - Inserts a child after #afterChild @param child <AdvancedTag/str> - Child block to insert @param afterChild <AdvancedTag/str> - Child block to insert after. if None, will be appended @return - The added child. Note, if it is a text block (str), the return isl NOT be linked by reference. ''' # If after child is null/None, just append if afterChild is None: return self.appendBlock(child) isChildTag = isTagNode(child) myBlocks = self.blocks myChildren = self.children # Determine where we need to insert this both in "blocks" and, if a tag, "children" try: blocksIdx = myBlocks.index(afterChild) if isChildTag: childrenIdx = myChildren.index(afterChild) except ValueError: raise ValueError('Provided "afterChild" is not a child of element, cannot insert.') # Append child to requested spot self.blocks = myBlocks[:blocksIdx+1] + [child] + myBlocks[blocksIdx+1:] if isChildTag: self.children = myChildren[:childrenIdx+1] + [child] + myChildren[childrenIdx+1:] return child
[ "def", "insertAfter", "(", "self", ",", "child", ",", "afterChild", ")", ":", "# If after child is null/None, just append", "if", "afterChild", "is", "None", ":", "return", "self", ".", "appendBlock", "(", "child", ")", "isChildTag", "=", "isTagNode", "(", "child", ")", "myBlocks", "=", "self", ".", "blocks", "myChildren", "=", "self", ".", "children", "# Determine where we need to insert this both in \"blocks\" and, if a tag, \"children\"", "try", ":", "blocksIdx", "=", "myBlocks", ".", "index", "(", "afterChild", ")", "if", "isChildTag", ":", "childrenIdx", "=", "myChildren", ".", "index", "(", "afterChild", ")", "except", "ValueError", ":", "raise", "ValueError", "(", "'Provided \"afterChild\" is not a child of element, cannot insert.'", ")", "# Append child to requested spot", "self", ".", "blocks", "=", "myBlocks", "[", ":", "blocksIdx", "+", "1", "]", "+", "[", "child", "]", "+", "myBlocks", "[", "blocksIdx", "+", "1", ":", "]", "if", "isChildTag", ":", "self", ".", "children", "=", "myChildren", "[", ":", "childrenIdx", "+", "1", "]", "+", "[", "child", "]", "+", "myChildren", "[", "childrenIdx", "+", "1", ":", "]", "return", "child" ]
insertAfter - Inserts a child after #afterChild @param child <AdvancedTag/str> - Child block to insert @param afterChild <AdvancedTag/str> - Child block to insert after. if None, will be appended @return - The added child. Note, if it is a text block (str), the return isl NOT be linked by reference.
[ "insertAfter", "-", "Inserts", "a", "child", "after", "#afterChild" ]
python
train
user-cont/conu
conu/backend/nspawn/image.py
https://github.com/user-cont/conu/blob/08caae7bb6bdd265b55bb106c3da6a7946a5a352/conu/backend/nspawn/image.py#L256-L271
def create_snapshot(self, name, tag): """ Create new instance of image with snaphot image (it is copied inside class constructuor) :param name: str - name of image - not used now :param tag: str - tag for image :return: NspawnImage instance """ source = self.local_location logger.debug("Create Snapshot: %s -> %s" % (source, name)) # FIXME: actually create the snapshot via clone command if name and tag: output_tag = "{}:{}".format(name, tag) else: output_tag = name or tag return self.__class__(repository=source, tag=output_tag)
[ "def", "create_snapshot", "(", "self", ",", "name", ",", "tag", ")", ":", "source", "=", "self", ".", "local_location", "logger", ".", "debug", "(", "\"Create Snapshot: %s -> %s\"", "%", "(", "source", ",", "name", ")", ")", "# FIXME: actually create the snapshot via clone command", "if", "name", "and", "tag", ":", "output_tag", "=", "\"{}:{}\"", ".", "format", "(", "name", ",", "tag", ")", "else", ":", "output_tag", "=", "name", "or", "tag", "return", "self", ".", "__class__", "(", "repository", "=", "source", ",", "tag", "=", "output_tag", ")" ]
Create new instance of image with snaphot image (it is copied inside class constructuor) :param name: str - name of image - not used now :param tag: str - tag for image :return: NspawnImage instance
[ "Create", "new", "instance", "of", "image", "with", "snaphot", "image", "(", "it", "is", "copied", "inside", "class", "constructuor", ")" ]
python
train
osrg/ryu
ryu/lib/ovs/vsctl.py
https://github.com/osrg/ryu/blob/6f906e72c92e10bd0264c9b91a2f7bb85b97780c/ryu/lib/ovs/vsctl.py#L2142-L2158
def _check_value(self, ovsrec_row, column_value): """ :type column_value: tuple of column and value_json """ column, value_json = column_value column_schema = ovsrec_row._table.columns[column] value = ovs.db.data.Datum.from_json( column_schema.type, value_json).to_python(ovs.db.idl._uuid_to_row) datum = getattr(ovsrec_row, column) if column_schema.type.is_map(): for k, v in value.items(): if k in datum and datum[k] == v: return True elif datum == value: return True return False
[ "def", "_check_value", "(", "self", ",", "ovsrec_row", ",", "column_value", ")", ":", "column", ",", "value_json", "=", "column_value", "column_schema", "=", "ovsrec_row", ".", "_table", ".", "columns", "[", "column", "]", "value", "=", "ovs", ".", "db", ".", "data", ".", "Datum", ".", "from_json", "(", "column_schema", ".", "type", ",", "value_json", ")", ".", "to_python", "(", "ovs", ".", "db", ".", "idl", ".", "_uuid_to_row", ")", "datum", "=", "getattr", "(", "ovsrec_row", ",", "column", ")", "if", "column_schema", ".", "type", ".", "is_map", "(", ")", ":", "for", "k", ",", "v", "in", "value", ".", "items", "(", ")", ":", "if", "k", "in", "datum", "and", "datum", "[", "k", "]", "==", "v", ":", "return", "True", "elif", "datum", "==", "value", ":", "return", "True", "return", "False" ]
:type column_value: tuple of column and value_json
[ ":", "type", "column_value", ":", "tuple", "of", "column", "and", "value_json" ]
python
train
jplusplus/statscraper
statscraper/base_scraper.py
https://github.com/jplusplus/statscraper/blob/932ec048b23d15b3dbdaf829facc55fd78ec0109/statscraper/base_scraper.py#L560-L565
def move_to_top(self): """Move to root item.""" self.current_item = self.root for f in self._hooks["top"]: f(self) return self
[ "def", "move_to_top", "(", "self", ")", ":", "self", ".", "current_item", "=", "self", ".", "root", "for", "f", "in", "self", ".", "_hooks", "[", "\"top\"", "]", ":", "f", "(", "self", ")", "return", "self" ]
Move to root item.
[ "Move", "to", "root", "item", "." ]
python
train
JoelBender/bacpypes
py25/bacpypes/primitivedata.py
https://github.com/JoelBender/bacpypes/blob/4111b8604a16fa2b7f80d8104a43b9f3e28dfc78/py25/bacpypes/primitivedata.py#L137-L173
def decode(self, pdu): """Decode a tag from the PDU.""" try: tag = pdu.get() # extract the type self.tagClass = (tag >> 3) & 0x01 # extract the tag number self.tagNumber = (tag >> 4) if (self.tagNumber == 0x0F): self.tagNumber = pdu.get() # extract the length self.tagLVT = tag & 0x07 if (self.tagLVT == 5): self.tagLVT = pdu.get() if (self.tagLVT == 254): self.tagLVT = pdu.get_short() elif (self.tagLVT == 255): self.tagLVT = pdu.get_long() elif (self.tagLVT == 6): self.tagClass = Tag.openingTagClass self.tagLVT = 0 elif (self.tagLVT == 7): self.tagClass = Tag.closingTagClass self.tagLVT = 0 # application tagged boolean has no more data if (self.tagClass == Tag.applicationTagClass) and (self.tagNumber == Tag.booleanAppTag): # tagLVT contains value self.tagData = '' else: # tagLVT contains length self.tagData = pdu.get_data(self.tagLVT) except DecodingError: raise InvalidTag("invalid tag encoding")
[ "def", "decode", "(", "self", ",", "pdu", ")", ":", "try", ":", "tag", "=", "pdu", ".", "get", "(", ")", "# extract the type", "self", ".", "tagClass", "=", "(", "tag", ">>", "3", ")", "&", "0x01", "# extract the tag number", "self", ".", "tagNumber", "=", "(", "tag", ">>", "4", ")", "if", "(", "self", ".", "tagNumber", "==", "0x0F", ")", ":", "self", ".", "tagNumber", "=", "pdu", ".", "get", "(", ")", "# extract the length", "self", ".", "tagLVT", "=", "tag", "&", "0x07", "if", "(", "self", ".", "tagLVT", "==", "5", ")", ":", "self", ".", "tagLVT", "=", "pdu", ".", "get", "(", ")", "if", "(", "self", ".", "tagLVT", "==", "254", ")", ":", "self", ".", "tagLVT", "=", "pdu", ".", "get_short", "(", ")", "elif", "(", "self", ".", "tagLVT", "==", "255", ")", ":", "self", ".", "tagLVT", "=", "pdu", ".", "get_long", "(", ")", "elif", "(", "self", ".", "tagLVT", "==", "6", ")", ":", "self", ".", "tagClass", "=", "Tag", ".", "openingTagClass", "self", ".", "tagLVT", "=", "0", "elif", "(", "self", ".", "tagLVT", "==", "7", ")", ":", "self", ".", "tagClass", "=", "Tag", ".", "closingTagClass", "self", ".", "tagLVT", "=", "0", "# application tagged boolean has no more data", "if", "(", "self", ".", "tagClass", "==", "Tag", ".", "applicationTagClass", ")", "and", "(", "self", ".", "tagNumber", "==", "Tag", ".", "booleanAppTag", ")", ":", "# tagLVT contains value", "self", ".", "tagData", "=", "''", "else", ":", "# tagLVT contains length", "self", ".", "tagData", "=", "pdu", ".", "get_data", "(", "self", ".", "tagLVT", ")", "except", "DecodingError", ":", "raise", "InvalidTag", "(", "\"invalid tag encoding\"", ")" ]
Decode a tag from the PDU.
[ "Decode", "a", "tag", "from", "the", "PDU", "." ]
python
train
BreakingBytes/simkit
simkit/core/calculators.py
https://github.com/BreakingBytes/simkit/blob/205163d879d3880b6c9ef609f1b723a58773026b/simkit/core/calculators.py#L106-L163
def get_covariance(datargs, outargs, vargs, datvar, outvar): """ Get covariance matrix. :param datargs: data arguments :param outargs: output arguments :param vargs: variable arguments :param datvar: variance of data arguments :param outvar: variance of output arguments :return: covariance """ # number of formula arguments that are not constant argn = len(vargs) # number of observations must be the same for all vargs nobs = 1 for m in xrange(argn): a = vargs[m] try: a = datargs[a] except (KeyError, TypeError): a = outargs[a] avar = outvar[a] else: avar = datvar[a] for n in xrange(argn): b = vargs[n] try: b = datargs[b] except (KeyError, TypeError): b = outargs[b] c = avar.get(b, 0.0) try: nobs = max(nobs, len(c)) except (TypeError, ValueError): LOGGER.debug('c of %s vs %s = %g', a, b, c) # covariance matrix is initially zeros cov = np.zeros((nobs, argn, argn)) # loop over arguments in both directions, fill in covariance for m in xrange(argn): a = vargs[m] try: a = datargs[a] except (KeyError, TypeError): a = outargs[a] avar = outvar[a] else: avar = datvar[a] for n in xrange(argn): b = vargs[n] try: b = datargs[b] except (KeyError, TypeError): b = outargs[b] cov[:, m, n] = avar.get(b, 0.0) if nobs == 1: cov = cov.squeeze() # squeeze out any extra dimensions LOGGER.debug('covariance:\n%r', cov) return cov
[ "def", "get_covariance", "(", "datargs", ",", "outargs", ",", "vargs", ",", "datvar", ",", "outvar", ")", ":", "# number of formula arguments that are not constant", "argn", "=", "len", "(", "vargs", ")", "# number of observations must be the same for all vargs", "nobs", "=", "1", "for", "m", "in", "xrange", "(", "argn", ")", ":", "a", "=", "vargs", "[", "m", "]", "try", ":", "a", "=", "datargs", "[", "a", "]", "except", "(", "KeyError", ",", "TypeError", ")", ":", "a", "=", "outargs", "[", "a", "]", "avar", "=", "outvar", "[", "a", "]", "else", ":", "avar", "=", "datvar", "[", "a", "]", "for", "n", "in", "xrange", "(", "argn", ")", ":", "b", "=", "vargs", "[", "n", "]", "try", ":", "b", "=", "datargs", "[", "b", "]", "except", "(", "KeyError", ",", "TypeError", ")", ":", "b", "=", "outargs", "[", "b", "]", "c", "=", "avar", ".", "get", "(", "b", ",", "0.0", ")", "try", ":", "nobs", "=", "max", "(", "nobs", ",", "len", "(", "c", ")", ")", "except", "(", "TypeError", ",", "ValueError", ")", ":", "LOGGER", ".", "debug", "(", "'c of %s vs %s = %g'", ",", "a", ",", "b", ",", "c", ")", "# covariance matrix is initially zeros", "cov", "=", "np", ".", "zeros", "(", "(", "nobs", ",", "argn", ",", "argn", ")", ")", "# loop over arguments in both directions, fill in covariance", "for", "m", "in", "xrange", "(", "argn", ")", ":", "a", "=", "vargs", "[", "m", "]", "try", ":", "a", "=", "datargs", "[", "a", "]", "except", "(", "KeyError", ",", "TypeError", ")", ":", "a", "=", "outargs", "[", "a", "]", "avar", "=", "outvar", "[", "a", "]", "else", ":", "avar", "=", "datvar", "[", "a", "]", "for", "n", "in", "xrange", "(", "argn", ")", ":", "b", "=", "vargs", "[", "n", "]", "try", ":", "b", "=", "datargs", "[", "b", "]", "except", "(", "KeyError", ",", "TypeError", ")", ":", "b", "=", "outargs", "[", "b", "]", "cov", "[", ":", ",", "m", ",", "n", "]", "=", "avar", ".", "get", "(", "b", ",", "0.0", ")", "if", "nobs", "==", "1", ":", "cov", "=", "cov", ".", "squeeze", "(", ")", "# squeeze out any extra dimensions", "LOGGER", ".", "debug", "(", "'covariance:\\n%r'", ",", "cov", ")", "return", "cov" ]
Get covariance matrix. :param datargs: data arguments :param outargs: output arguments :param vargs: variable arguments :param datvar: variance of data arguments :param outvar: variance of output arguments :return: covariance
[ "Get", "covariance", "matrix", "." ]
python
train
esterhui/pypu
pypu/service_facebook.py
https://github.com/esterhui/pypu/blob/cc3e259d59f024c2c4c0fbb9c8a1547e51de75ec/pypu/service_facebook.py#L147-L165
def _load_megapixels(self,directory): """Opens megapixel file, if contains '3.5' for instance, will scale all uploaded photos in directory this this size, the original photo is untouched. Returns None if file not found """ #FIXME: should check if DB tracking file before using it fullfile=os.path.join(directory,MEGAPIXEL_FILE) try: mp=float(open(fullfile).readline()) logger.debug("_load_megapixel: MP from file is %f",mp) except: logger.warning("Couldn't open image size file in %s, not scaling images"\ %(directory)) return None return mp
[ "def", "_load_megapixels", "(", "self", ",", "directory", ")", ":", "#FIXME: should check if DB tracking file before using it", "fullfile", "=", "os", ".", "path", ".", "join", "(", "directory", ",", "MEGAPIXEL_FILE", ")", "try", ":", "mp", "=", "float", "(", "open", "(", "fullfile", ")", ".", "readline", "(", ")", ")", "logger", ".", "debug", "(", "\"_load_megapixel: MP from file is %f\"", ",", "mp", ")", "except", ":", "logger", ".", "warning", "(", "\"Couldn't open image size file in %s, not scaling images\"", "%", "(", "directory", ")", ")", "return", "None", "return", "mp" ]
Opens megapixel file, if contains '3.5' for instance, will scale all uploaded photos in directory this this size, the original photo is untouched. Returns None if file not found
[ "Opens", "megapixel", "file", "if", "contains", "3", ".", "5", "for", "instance", "will", "scale", "all", "uploaded", "photos", "in", "directory", "this", "this", "size", "the", "original", "photo", "is", "untouched", ".", "Returns", "None", "if", "file", "not", "found" ]
python
train
ozak/georasters
georasters/georasters.py
https://github.com/ozak/georasters/blob/0612bd91bb2a2cb2f1d59ba89c1ff131dae27d70/georasters/georasters.py#L477-L480
def copy(self): """Returns copy of itself""" return GeoRaster(self.raster.copy(), self.geot, nodata_value=self.nodata_value, projection=self.projection, datatype=self.datatype)
[ "def", "copy", "(", "self", ")", ":", "return", "GeoRaster", "(", "self", ".", "raster", ".", "copy", "(", ")", ",", "self", ".", "geot", ",", "nodata_value", "=", "self", ".", "nodata_value", ",", "projection", "=", "self", ".", "projection", ",", "datatype", "=", "self", ".", "datatype", ")" ]
Returns copy of itself
[ "Returns", "copy", "of", "itself" ]
python
train
jmurty/xml4h
xml4h/impls/lxml_etree.py
https://github.com/jmurty/xml4h/blob/adbb45e27a01a869a505aee7bc16bad2f517b511/xml4h/impls/lxml_etree.py#L440-L455
def _is_ns_in_ancestor(self, node, name, value): """ Return True if the given namespace name/value is defined in an ancestor of the given node, meaning that the given node need not have its own attributes to apply that namespacing. """ curr_node = self.get_node_parent(node) while curr_node.__class__ == etree._Element: if (hasattr(curr_node, 'nsmap') and curr_node.nsmap.get(name) == value): return True for n, v in curr_node.attrib.items(): if v == value and '{%s}' % nodes.Node.XMLNS_URI in n: return True curr_node = self.get_node_parent(curr_node) return False
[ "def", "_is_ns_in_ancestor", "(", "self", ",", "node", ",", "name", ",", "value", ")", ":", "curr_node", "=", "self", ".", "get_node_parent", "(", "node", ")", "while", "curr_node", ".", "__class__", "==", "etree", ".", "_Element", ":", "if", "(", "hasattr", "(", "curr_node", ",", "'nsmap'", ")", "and", "curr_node", ".", "nsmap", ".", "get", "(", "name", ")", "==", "value", ")", ":", "return", "True", "for", "n", ",", "v", "in", "curr_node", ".", "attrib", ".", "items", "(", ")", ":", "if", "v", "==", "value", "and", "'{%s}'", "%", "nodes", ".", "Node", ".", "XMLNS_URI", "in", "n", ":", "return", "True", "curr_node", "=", "self", ".", "get_node_parent", "(", "curr_node", ")", "return", "False" ]
Return True if the given namespace name/value is defined in an ancestor of the given node, meaning that the given node need not have its own attributes to apply that namespacing.
[ "Return", "True", "if", "the", "given", "namespace", "name", "/", "value", "is", "defined", "in", "an", "ancestor", "of", "the", "given", "node", "meaning", "that", "the", "given", "node", "need", "not", "have", "its", "own", "attributes", "to", "apply", "that", "namespacing", "." ]
python
train
nicktgr15/sac
sac/methods/sm_analysis/kernel.py
https://github.com/nicktgr15/sac/blob/4b1d5d8e6ca2c437972db34ddc72990860865159/sac/methods/sm_analysis/kernel.py#L66-L112
def get_checkerboard_matrix(kernel_width, kernel_type="default", gaussian_param=0.1): """ example matrix for width = 2 -1 -1 1 1 -1 -1 1 1 1 1 -1 -1 1 1 -1 -1 :param kernel_type: :param kernel_width: :return: """ if kernel_type is "gaussian": return get_gaussian_kernel(kernel_width, gaussian_param) if kernel_type is "default": return np.vstack(( np.hstack(( -1 * np.ones((kernel_width, kernel_width)), np.ones((kernel_width, kernel_width)) )), np.hstack(( np.ones((kernel_width, kernel_width)), -1 * np.ones((kernel_width, kernel_width)) )) )) elif kernel_type is "bottom_right": return np.vstack(( np.hstack(( np.ones((kernel_width, kernel_width)), np.ones((kernel_width, kernel_width)) )), np.hstack(( np.ones((kernel_width, kernel_width)), -1 * np.ones((kernel_width, kernel_width)) )) )) elif kernel_type is "top_left": return np.vstack(( np.hstack(( -1 * np.ones((kernel_width, kernel_width)), np.ones((kernel_width, kernel_width)) )), np.hstack(( np.ones((kernel_width, kernel_width)), np.ones((kernel_width, kernel_width)) )) ))
[ "def", "get_checkerboard_matrix", "(", "kernel_width", ",", "kernel_type", "=", "\"default\"", ",", "gaussian_param", "=", "0.1", ")", ":", "if", "kernel_type", "is", "\"gaussian\"", ":", "return", "get_gaussian_kernel", "(", "kernel_width", ",", "gaussian_param", ")", "if", "kernel_type", "is", "\"default\"", ":", "return", "np", ".", "vstack", "(", "(", "np", ".", "hstack", "(", "(", "-", "1", "*", "np", ".", "ones", "(", "(", "kernel_width", ",", "kernel_width", ")", ")", ",", "np", ".", "ones", "(", "(", "kernel_width", ",", "kernel_width", ")", ")", ")", ")", ",", "np", ".", "hstack", "(", "(", "np", ".", "ones", "(", "(", "kernel_width", ",", "kernel_width", ")", ")", ",", "-", "1", "*", "np", ".", "ones", "(", "(", "kernel_width", ",", "kernel_width", ")", ")", ")", ")", ")", ")", "elif", "kernel_type", "is", "\"bottom_right\"", ":", "return", "np", ".", "vstack", "(", "(", "np", ".", "hstack", "(", "(", "np", ".", "ones", "(", "(", "kernel_width", ",", "kernel_width", ")", ")", ",", "np", ".", "ones", "(", "(", "kernel_width", ",", "kernel_width", ")", ")", ")", ")", ",", "np", ".", "hstack", "(", "(", "np", ".", "ones", "(", "(", "kernel_width", ",", "kernel_width", ")", ")", ",", "-", "1", "*", "np", ".", "ones", "(", "(", "kernel_width", ",", "kernel_width", ")", ")", ")", ")", ")", ")", "elif", "kernel_type", "is", "\"top_left\"", ":", "return", "np", ".", "vstack", "(", "(", "np", ".", "hstack", "(", "(", "-", "1", "*", "np", ".", "ones", "(", "(", "kernel_width", ",", "kernel_width", ")", ")", ",", "np", ".", "ones", "(", "(", "kernel_width", ",", "kernel_width", ")", ")", ")", ")", ",", "np", ".", "hstack", "(", "(", "np", ".", "ones", "(", "(", "kernel_width", ",", "kernel_width", ")", ")", ",", "np", ".", "ones", "(", "(", "kernel_width", ",", "kernel_width", ")", ")", ")", ")", ")", ")" ]
example matrix for width = 2 -1 -1 1 1 -1 -1 1 1 1 1 -1 -1 1 1 -1 -1 :param kernel_type: :param kernel_width: :return:
[ "example", "matrix", "for", "width", "=", "2" ]
python
train
rackerlabs/timid
timid/utils.py
https://github.com/rackerlabs/timid/blob/b1c6aa159ab380a033740f4aa392cf0d125e0ac6/timid/utils.py#L28-L43
def canonicalize_path(cwd, path): """ Canonicalizes a path relative to a given working directory. That is, the path, if not absolute, is interpreted relative to the working directory, then converted to absolute form. :param cwd: The working directory. :param path: The path to canonicalize. :returns: The absolute path. """ if not os.path.isabs(path): path = os.path.join(cwd, path) return os.path.abspath(path)
[ "def", "canonicalize_path", "(", "cwd", ",", "path", ")", ":", "if", "not", "os", ".", "path", ".", "isabs", "(", "path", ")", ":", "path", "=", "os", ".", "path", ".", "join", "(", "cwd", ",", "path", ")", "return", "os", ".", "path", ".", "abspath", "(", "path", ")" ]
Canonicalizes a path relative to a given working directory. That is, the path, if not absolute, is interpreted relative to the working directory, then converted to absolute form. :param cwd: The working directory. :param path: The path to canonicalize. :returns: The absolute path.
[ "Canonicalizes", "a", "path", "relative", "to", "a", "given", "working", "directory", ".", "That", "is", "the", "path", "if", "not", "absolute", "is", "interpreted", "relative", "to", "the", "working", "directory", "then", "converted", "to", "absolute", "form", "." ]
python
test