repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
sequence
docstring
stringlengths
1
46.9k
docstring_tokens
sequence
language
stringclasses
1 value
partition
stringclasses
3 values
brocade/pynos
pynos/versions/ver_6/ver_6_0_1/yang/tailf_webui.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/tailf_webui.py#L94-L110
def webui_schematics_panels_panel_components_component_id(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") webui = ET.SubElement(config, "webui", xmlns="http://tail-f.com/ns/webui") schematics = ET.SubElement(webui, "schematics") panels = ET.SubElement(schematics, "panels") panel = ET.SubElement(panels, "panel") name_key = ET.SubElement(panel, "name") name_key.text = kwargs.pop('name') components = ET.SubElement(panel, "components") component = ET.SubElement(components, "component") id = ET.SubElement(component, "id") id.text = kwargs.pop('id') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "webui_schematics_panels_panel_components_component_id", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "webui", "=", "ET", ".", "SubElement", "(", "config", ",", "\"webui\"", ",", "xmlns", "=", "\"http://tail-f.com/ns/webui\"", ")", "schematics", "=", "ET", ".", "SubElement", "(", "webui", ",", "\"schematics\"", ")", "panels", "=", "ET", ".", "SubElement", "(", "schematics", ",", "\"panels\"", ")", "panel", "=", "ET", ".", "SubElement", "(", "panels", ",", "\"panel\"", ")", "name_key", "=", "ET", ".", "SubElement", "(", "panel", ",", "\"name\"", ")", "name_key", ".", "text", "=", "kwargs", ".", "pop", "(", "'name'", ")", "components", "=", "ET", ".", "SubElement", "(", "panel", ",", "\"components\"", ")", "component", "=", "ET", ".", "SubElement", "(", "components", ",", "\"component\"", ")", "id", "=", "ET", ".", "SubElement", "(", "component", ",", "\"id\"", ")", "id", ".", "text", "=", "kwargs", ".", "pop", "(", "'id'", ")", "callback", "=", "kwargs", ".", "pop", "(", "'callback'", ",", "self", ".", "_callback", ")", "return", "callback", "(", "config", ")" ]
Auto Generated Code
[ "Auto", "Generated", "Code" ]
python
train
taspinar/twitterscraper
twitterscraper/query.py
https://github.com/taspinar/twitterscraper/blob/b3596b5c5e22287ec3fbe84937f49e7e2ed728e8/twitterscraper/query.py#L120-L168
def query_tweets_once_generator(query, limit=None, lang='', pos=None): """ Queries twitter for all the tweets you want! It will load all pages it gets from twitter. However, twitter might out of a sudden stop serving new pages, in that case, use the `query_tweets` method. Note that this function catches the KeyboardInterrupt so it can return tweets on incomplete queries if the user decides to abort. :param query: Any advanced query you want to do! Compile it at https://twitter.com/search-advanced and just copy the query! :param limit: Scraping will be stopped when at least ``limit`` number of items are fetched. :param pos: Field used as a "checkpoint" to continue where you left off in iteration :return: A list of twitterscraper.Tweet objects. You will get at least ``limit`` number of items. """ logger.info('Querying {}'.format(query)) query = query.replace(' ', '%20').replace('#', '%23').replace(':', '%3A') num_tweets = 0 try: while True: new_tweets, new_pos = query_single_page(query, lang, pos) if len(new_tweets) == 0: logger.info('Got {} tweets for {}.'.format( num_tweets, query)) return for t in new_tweets: yield t, pos # use new_pos only once you have iterated through all old tweets pos = new_pos num_tweets += len(new_tweets) if limit and num_tweets >= limit: logger.info('Got {} tweets for {}.'.format( num_tweets, query)) return except KeyboardInterrupt: logger.info('Program interrupted by user. Returning tweets gathered ' 'so far...') except BaseException: logger.exception('An unknown error occurred! Returning tweets ' 'gathered so far.') logger.info('Got {} tweets for {}.'.format( num_tweets, query))
[ "def", "query_tweets_once_generator", "(", "query", ",", "limit", "=", "None", ",", "lang", "=", "''", ",", "pos", "=", "None", ")", ":", "logger", ".", "info", "(", "'Querying {}'", ".", "format", "(", "query", ")", ")", "query", "=", "query", ".", "replace", "(", "' '", ",", "'%20'", ")", ".", "replace", "(", "'#'", ",", "'%23'", ")", ".", "replace", "(", "':'", ",", "'%3A'", ")", "num_tweets", "=", "0", "try", ":", "while", "True", ":", "new_tweets", ",", "new_pos", "=", "query_single_page", "(", "query", ",", "lang", ",", "pos", ")", "if", "len", "(", "new_tweets", ")", "==", "0", ":", "logger", ".", "info", "(", "'Got {} tweets for {}.'", ".", "format", "(", "num_tweets", ",", "query", ")", ")", "return", "for", "t", "in", "new_tweets", ":", "yield", "t", ",", "pos", "# use new_pos only once you have iterated through all old tweets", "pos", "=", "new_pos", "num_tweets", "+=", "len", "(", "new_tweets", ")", "if", "limit", "and", "num_tweets", ">=", "limit", ":", "logger", ".", "info", "(", "'Got {} tweets for {}.'", ".", "format", "(", "num_tweets", ",", "query", ")", ")", "return", "except", "KeyboardInterrupt", ":", "logger", ".", "info", "(", "'Program interrupted by user. Returning tweets gathered '", "'so far...'", ")", "except", "BaseException", ":", "logger", ".", "exception", "(", "'An unknown error occurred! Returning tweets '", "'gathered so far.'", ")", "logger", ".", "info", "(", "'Got {} tweets for {}.'", ".", "format", "(", "num_tweets", ",", "query", ")", ")" ]
Queries twitter for all the tweets you want! It will load all pages it gets from twitter. However, twitter might out of a sudden stop serving new pages, in that case, use the `query_tweets` method. Note that this function catches the KeyboardInterrupt so it can return tweets on incomplete queries if the user decides to abort. :param query: Any advanced query you want to do! Compile it at https://twitter.com/search-advanced and just copy the query! :param limit: Scraping will be stopped when at least ``limit`` number of items are fetched. :param pos: Field used as a "checkpoint" to continue where you left off in iteration :return: A list of twitterscraper.Tweet objects. You will get at least ``limit`` number of items.
[ "Queries", "twitter", "for", "all", "the", "tweets", "you", "want!", "It", "will", "load", "all", "pages", "it", "gets", "from", "twitter", ".", "However", "twitter", "might", "out", "of", "a", "sudden", "stop", "serving", "new", "pages", "in", "that", "case", "use", "the", "query_tweets", "method", "." ]
python
train
DataDog/integrations-core
mapreduce/datadog_checks/mapreduce/mapreduce.py
https://github.com/DataDog/integrations-core/blob/ebd41c873cf9f97a8c51bf9459bc6a7536af8acd/mapreduce/datadog_checks/mapreduce/mapreduce.py#L413-L444
def _mapreduce_task_metrics(self, running_jobs, auth, ssl_verify, addl_tags): """ Get metrics for each MapReduce task Return a dictionary of {task_id: 'tracking_url'} for each MapReduce task """ for job_stats in itervalues(running_jobs): metrics_json = self._rest_request_to_json( job_stats['tracking_url'], auth, ssl_verify, 'tasks', self.MAPREDUCE_SERVICE_CHECK, tags=addl_tags ) if metrics_json.get('tasks'): if metrics_json['tasks'].get('task'): for task in metrics_json['tasks']['task']: task_type = task.get('type') if task_type: tags = [ 'app_name:' + job_stats['app_name'], 'user_name:' + job_stats['user_name'], 'job_name:' + job_stats['job_name'], 'task_type:' + str(task_type).lower(), ] tags.extend(addl_tags) if task_type == 'MAP': self._set_metrics_from_json(task, self.MAPREDUCE_MAP_TASK_METRICS, tags) elif task_type == 'REDUCE': self._set_metrics_from_json(task, self.MAPREDUCE_REDUCE_TASK_METRICS, tags)
[ "def", "_mapreduce_task_metrics", "(", "self", ",", "running_jobs", ",", "auth", ",", "ssl_verify", ",", "addl_tags", ")", ":", "for", "job_stats", "in", "itervalues", "(", "running_jobs", ")", ":", "metrics_json", "=", "self", ".", "_rest_request_to_json", "(", "job_stats", "[", "'tracking_url'", "]", ",", "auth", ",", "ssl_verify", ",", "'tasks'", ",", "self", ".", "MAPREDUCE_SERVICE_CHECK", ",", "tags", "=", "addl_tags", ")", "if", "metrics_json", ".", "get", "(", "'tasks'", ")", ":", "if", "metrics_json", "[", "'tasks'", "]", ".", "get", "(", "'task'", ")", ":", "for", "task", "in", "metrics_json", "[", "'tasks'", "]", "[", "'task'", "]", ":", "task_type", "=", "task", ".", "get", "(", "'type'", ")", "if", "task_type", ":", "tags", "=", "[", "'app_name:'", "+", "job_stats", "[", "'app_name'", "]", ",", "'user_name:'", "+", "job_stats", "[", "'user_name'", "]", ",", "'job_name:'", "+", "job_stats", "[", "'job_name'", "]", ",", "'task_type:'", "+", "str", "(", "task_type", ")", ".", "lower", "(", ")", ",", "]", "tags", ".", "extend", "(", "addl_tags", ")", "if", "task_type", "==", "'MAP'", ":", "self", ".", "_set_metrics_from_json", "(", "task", ",", "self", ".", "MAPREDUCE_MAP_TASK_METRICS", ",", "tags", ")", "elif", "task_type", "==", "'REDUCE'", ":", "self", ".", "_set_metrics_from_json", "(", "task", ",", "self", ".", "MAPREDUCE_REDUCE_TASK_METRICS", ",", "tags", ")" ]
Get metrics for each MapReduce task Return a dictionary of {task_id: 'tracking_url'} for each MapReduce task
[ "Get", "metrics", "for", "each", "MapReduce", "task", "Return", "a", "dictionary", "of", "{", "task_id", ":", "tracking_url", "}", "for", "each", "MapReduce", "task" ]
python
train
glormph/msstitch
src/app/actions/mslookup/proteinquant.py
https://github.com/glormph/msstitch/blob/ded7e5cbd813d7797dc9d42805778266e59ff042/src/app/actions/mslookup/proteinquant.py#L60-L66
def create_tablefn_map(fns, pqdb, poolnames): """Stores protein/peptide table names in DB, returns a map with their respective DB IDs""" poolmap = {name: pid for (name, pid) in pqdb.get_all_poolnames()} pqdb.store_table_files([(poolmap[pool], os.path.basename(fn)) for fn, pool in zip(fns, poolnames)]) return pqdb.get_tablefn_map()
[ "def", "create_tablefn_map", "(", "fns", ",", "pqdb", ",", "poolnames", ")", ":", "poolmap", "=", "{", "name", ":", "pid", "for", "(", "name", ",", "pid", ")", "in", "pqdb", ".", "get_all_poolnames", "(", ")", "}", "pqdb", ".", "store_table_files", "(", "[", "(", "poolmap", "[", "pool", "]", ",", "os", ".", "path", ".", "basename", "(", "fn", ")", ")", "for", "fn", ",", "pool", "in", "zip", "(", "fns", ",", "poolnames", ")", "]", ")", "return", "pqdb", ".", "get_tablefn_map", "(", ")" ]
Stores protein/peptide table names in DB, returns a map with their respective DB IDs
[ "Stores", "protein", "/", "peptide", "table", "names", "in", "DB", "returns", "a", "map", "with", "their", "respective", "DB", "IDs" ]
python
train
ambitioninc/python-logentries-api
logentries_api/special_alerts.py
https://github.com/ambitioninc/python-logentries-api/blob/77ff1a7a2995d7ea2725b74e34c0f880f4ee23bc/logentries_api/special_alerts.py#L274-L289
def _api_post(self, url, **kwargs): """ Convenience method for posting """ response = self.session.post( url=url, headers=self._get_api_headers(), **kwargs ) if not response.ok: raise ServerException( '{0}: {1}'.format( response.status_code, response.text or response.reason )) return response.json()
[ "def", "_api_post", "(", "self", ",", "url", ",", "*", "*", "kwargs", ")", ":", "response", "=", "self", ".", "session", ".", "post", "(", "url", "=", "url", ",", "headers", "=", "self", ".", "_get_api_headers", "(", ")", ",", "*", "*", "kwargs", ")", "if", "not", "response", ".", "ok", ":", "raise", "ServerException", "(", "'{0}: {1}'", ".", "format", "(", "response", ".", "status_code", ",", "response", ".", "text", "or", "response", ".", "reason", ")", ")", "return", "response", ".", "json", "(", ")" ]
Convenience method for posting
[ "Convenience", "method", "for", "posting" ]
python
test
postlund/pyatv
pyatv/mrp/__init__.py
https://github.com/postlund/pyatv/blob/655dfcda4e2f9d1c501540e18da4f480d8bf0e70/pyatv/mrp/__init__.py#L145-L154
def play_state(self): """Play state, e.g. playing or paused.""" # TODO: extract to a convert module state = self._setstate.playbackState if state == 1: return const.PLAY_STATE_PLAYING if state == 2: return const.PLAY_STATE_PAUSED return const.PLAY_STATE_PAUSED
[ "def", "play_state", "(", "self", ")", ":", "# TODO: extract to a convert module", "state", "=", "self", ".", "_setstate", ".", "playbackState", "if", "state", "==", "1", ":", "return", "const", ".", "PLAY_STATE_PLAYING", "if", "state", "==", "2", ":", "return", "const", ".", "PLAY_STATE_PAUSED", "return", "const", ".", "PLAY_STATE_PAUSED" ]
Play state, e.g. playing or paused.
[ "Play", "state", "e", ".", "g", ".", "playing", "or", "paused", "." ]
python
train
saltstack/salt
salt/proxy/marathon.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/proxy/marathon.py#L43-L51
def init(opts): ''' Perform any needed setup. ''' if CONFIG_BASE_URL in opts['proxy']: CONFIG[CONFIG_BASE_URL] = opts['proxy'][CONFIG_BASE_URL] else: log.error('missing proxy property %s', CONFIG_BASE_URL) log.debug('CONFIG: %s', CONFIG)
[ "def", "init", "(", "opts", ")", ":", "if", "CONFIG_BASE_URL", "in", "opts", "[", "'proxy'", "]", ":", "CONFIG", "[", "CONFIG_BASE_URL", "]", "=", "opts", "[", "'proxy'", "]", "[", "CONFIG_BASE_URL", "]", "else", ":", "log", ".", "error", "(", "'missing proxy property %s'", ",", "CONFIG_BASE_URL", ")", "log", ".", "debug", "(", "'CONFIG: %s'", ",", "CONFIG", ")" ]
Perform any needed setup.
[ "Perform", "any", "needed", "setup", "." ]
python
train
gwastro/pycbc
pycbc/transforms.py
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/transforms.py#L407-L413
def inverse_jacobian(self, maps): """Returns the Jacobian for transforming mass1 and mass2 to mchirp and q. """ m1 = maps[parameters.mass1] m2 = maps[parameters.mass2] return conversions.mchirp_from_mass1_mass2(m1, m2)/m2**2.
[ "def", "inverse_jacobian", "(", "self", ",", "maps", ")", ":", "m1", "=", "maps", "[", "parameters", ".", "mass1", "]", "m2", "=", "maps", "[", "parameters", ".", "mass2", "]", "return", "conversions", ".", "mchirp_from_mass1_mass2", "(", "m1", ",", "m2", ")", "/", "m2", "**", "2." ]
Returns the Jacobian for transforming mass1 and mass2 to mchirp and q.
[ "Returns", "the", "Jacobian", "for", "transforming", "mass1", "and", "mass2", "to", "mchirp", "and", "q", "." ]
python
train
zarr-developers/zarr
zarr/convenience.py
https://github.com/zarr-developers/zarr/blob/fb8e6d5ea6bc26e451e5cf0eaaee36977556d5b5/zarr/convenience.py#L21-L102
def open(store=None, mode='a', **kwargs): """Convenience function to open a group or array using file-mode-like semantics. Parameters ---------- store : MutableMapping or string, optional Store or path to directory in file system or name of zip file. mode : {'r', 'r+', 'a', 'w', 'w-'}, optional Persistence mode: 'r' means read only (must exist); 'r+' means read/write (must exist); 'a' means read/write (create if doesn't exist); 'w' means create (overwrite if exists); 'w-' means create (fail if exists). **kwargs Additional parameters are passed through to :func:`zarr.creation.open_array` or :func:`zarr.hierarchy.open_group`. Returns ------- z : :class:`zarr.core.Array` or :class:`zarr.hierarchy.Group` Array or group, depending on what exists in the given store. See Also -------- zarr.creation.open_array, zarr.hierarchy.open_group Examples -------- Storing data in a directory 'data/example.zarr' on the local file system:: >>> import zarr >>> store = 'data/example.zarr' >>> zw = zarr.open(store, mode='w', shape=100, dtype='i4') # open new array >>> zw <zarr.core.Array (100,) int32> >>> za = zarr.open(store, mode='a') # open existing array for reading and writing >>> za <zarr.core.Array (100,) int32> >>> zr = zarr.open(store, mode='r') # open existing array read-only >>> zr <zarr.core.Array (100,) int32 read-only> >>> gw = zarr.open(store, mode='w') # open new group, overwriting previous data >>> gw <zarr.hierarchy.Group '/'> >>> ga = zarr.open(store, mode='a') # open existing group for reading and writing >>> ga <zarr.hierarchy.Group '/'> >>> gr = zarr.open(store, mode='r') # open existing group read-only >>> gr <zarr.hierarchy.Group '/' read-only> """ path = kwargs.get('path', None) # handle polymorphic store arg clobber = mode == 'w' store = normalize_store_arg(store, clobber=clobber) path = normalize_storage_path(path) if mode in {'w', 'w-', 'x'}: if 'shape' in kwargs: return open_array(store, mode=mode, **kwargs) else: return open_group(store, mode=mode, **kwargs) elif mode == 'a': if contains_array(store, path): return open_array(store, mode=mode, **kwargs) elif contains_group(store, path): return open_group(store, mode=mode, **kwargs) elif 'shape' in kwargs: return open_array(store, mode=mode, **kwargs) else: return open_group(store, mode=mode, **kwargs) else: if contains_array(store, path): return open_array(store, mode=mode, **kwargs) elif contains_group(store, path): return open_group(store, mode=mode, **kwargs) else: err_path_not_found(path)
[ "def", "open", "(", "store", "=", "None", ",", "mode", "=", "'a'", ",", "*", "*", "kwargs", ")", ":", "path", "=", "kwargs", ".", "get", "(", "'path'", ",", "None", ")", "# handle polymorphic store arg", "clobber", "=", "mode", "==", "'w'", "store", "=", "normalize_store_arg", "(", "store", ",", "clobber", "=", "clobber", ")", "path", "=", "normalize_storage_path", "(", "path", ")", "if", "mode", "in", "{", "'w'", ",", "'w-'", ",", "'x'", "}", ":", "if", "'shape'", "in", "kwargs", ":", "return", "open_array", "(", "store", ",", "mode", "=", "mode", ",", "*", "*", "kwargs", ")", "else", ":", "return", "open_group", "(", "store", ",", "mode", "=", "mode", ",", "*", "*", "kwargs", ")", "elif", "mode", "==", "'a'", ":", "if", "contains_array", "(", "store", ",", "path", ")", ":", "return", "open_array", "(", "store", ",", "mode", "=", "mode", ",", "*", "*", "kwargs", ")", "elif", "contains_group", "(", "store", ",", "path", ")", ":", "return", "open_group", "(", "store", ",", "mode", "=", "mode", ",", "*", "*", "kwargs", ")", "elif", "'shape'", "in", "kwargs", ":", "return", "open_array", "(", "store", ",", "mode", "=", "mode", ",", "*", "*", "kwargs", ")", "else", ":", "return", "open_group", "(", "store", ",", "mode", "=", "mode", ",", "*", "*", "kwargs", ")", "else", ":", "if", "contains_array", "(", "store", ",", "path", ")", ":", "return", "open_array", "(", "store", ",", "mode", "=", "mode", ",", "*", "*", "kwargs", ")", "elif", "contains_group", "(", "store", ",", "path", ")", ":", "return", "open_group", "(", "store", ",", "mode", "=", "mode", ",", "*", "*", "kwargs", ")", "else", ":", "err_path_not_found", "(", "path", ")" ]
Convenience function to open a group or array using file-mode-like semantics. Parameters ---------- store : MutableMapping or string, optional Store or path to directory in file system or name of zip file. mode : {'r', 'r+', 'a', 'w', 'w-'}, optional Persistence mode: 'r' means read only (must exist); 'r+' means read/write (must exist); 'a' means read/write (create if doesn't exist); 'w' means create (overwrite if exists); 'w-' means create (fail if exists). **kwargs Additional parameters are passed through to :func:`zarr.creation.open_array` or :func:`zarr.hierarchy.open_group`. Returns ------- z : :class:`zarr.core.Array` or :class:`zarr.hierarchy.Group` Array or group, depending on what exists in the given store. See Also -------- zarr.creation.open_array, zarr.hierarchy.open_group Examples -------- Storing data in a directory 'data/example.zarr' on the local file system:: >>> import zarr >>> store = 'data/example.zarr' >>> zw = zarr.open(store, mode='w', shape=100, dtype='i4') # open new array >>> zw <zarr.core.Array (100,) int32> >>> za = zarr.open(store, mode='a') # open existing array for reading and writing >>> za <zarr.core.Array (100,) int32> >>> zr = zarr.open(store, mode='r') # open existing array read-only >>> zr <zarr.core.Array (100,) int32 read-only> >>> gw = zarr.open(store, mode='w') # open new group, overwriting previous data >>> gw <zarr.hierarchy.Group '/'> >>> ga = zarr.open(store, mode='a') # open existing group for reading and writing >>> ga <zarr.hierarchy.Group '/'> >>> gr = zarr.open(store, mode='r') # open existing group read-only >>> gr <zarr.hierarchy.Group '/' read-only>
[ "Convenience", "function", "to", "open", "a", "group", "or", "array", "using", "file", "-", "mode", "-", "like", "semantics", "." ]
python
train
etalab/cada
cada/commands.py
https://github.com/etalab/cada/blob/36e8b57514445c01ff7cd59a1c965180baf83d5e/cada/commands.py#L179-L193
def reindex(): '''Reindex all advices''' header('Reindexing all advices') echo('Deleting index {0}', white(es.index_name)) if es.indices.exists(es.index_name): es.indices.delete(index=es.index_name) es.initialize() idx = 0 for idx, advice in enumerate(Advice.objects, 1): index(advice) echo('.' if idx % 50 else white(idx), nl=False) echo(white(idx) if idx % 50 else '') es.indices.refresh(index=es.index_name) success('Indexed {0} advices', idx)
[ "def", "reindex", "(", ")", ":", "header", "(", "'Reindexing all advices'", ")", "echo", "(", "'Deleting index {0}'", ",", "white", "(", "es", ".", "index_name", ")", ")", "if", "es", ".", "indices", ".", "exists", "(", "es", ".", "index_name", ")", ":", "es", ".", "indices", ".", "delete", "(", "index", "=", "es", ".", "index_name", ")", "es", ".", "initialize", "(", ")", "idx", "=", "0", "for", "idx", ",", "advice", "in", "enumerate", "(", "Advice", ".", "objects", ",", "1", ")", ":", "index", "(", "advice", ")", "echo", "(", "'.'", "if", "idx", "%", "50", "else", "white", "(", "idx", ")", ",", "nl", "=", "False", ")", "echo", "(", "white", "(", "idx", ")", "if", "idx", "%", "50", "else", "''", ")", "es", ".", "indices", ".", "refresh", "(", "index", "=", "es", ".", "index_name", ")", "success", "(", "'Indexed {0} advices'", ",", "idx", ")" ]
Reindex all advices
[ "Reindex", "all", "advices" ]
python
train
Jaza/s3-saver
s3_saver.py
https://github.com/Jaza/s3-saver/blob/81dc4447d76c2fc0b0238fb96fa70e879612e355/s3_saver.py#L128-L153
def save(self, temp_file, filename, obj): """Saves the specified file to either S3 or the local filesystem, depending on the currently enabled storage type.""" if not (self.storage_type and self.bucket_name): ret = self._save_local(temp_file, filename, obj) else: if self.storage_type != 's3': raise ValueError('Storage type "%s" is invalid, the only supported storage type (apart from default local storage) is s3.' % self.storage_type) ret = self._save_s3(temp_file, filename, obj) if self.field_name: setattr(obj, self.field_name, ret) if self.storage_type == 's3': if self.storage_type_field: setattr(obj, self.storage_type_field, self.storage_type) if self.bucket_name_field: setattr(obj, self.bucket_name_field, self.bucket_name) else: if self.storage_type_field: setattr(obj, self.storage_type_field, '') if self.bucket_name_field: setattr(obj, self.bucket_name_field, '') return ret
[ "def", "save", "(", "self", ",", "temp_file", ",", "filename", ",", "obj", ")", ":", "if", "not", "(", "self", ".", "storage_type", "and", "self", ".", "bucket_name", ")", ":", "ret", "=", "self", ".", "_save_local", "(", "temp_file", ",", "filename", ",", "obj", ")", "else", ":", "if", "self", ".", "storage_type", "!=", "'s3'", ":", "raise", "ValueError", "(", "'Storage type \"%s\" is invalid, the only supported storage type (apart from default local storage) is s3.'", "%", "self", ".", "storage_type", ")", "ret", "=", "self", ".", "_save_s3", "(", "temp_file", ",", "filename", ",", "obj", ")", "if", "self", ".", "field_name", ":", "setattr", "(", "obj", ",", "self", ".", "field_name", ",", "ret", ")", "if", "self", ".", "storage_type", "==", "'s3'", ":", "if", "self", ".", "storage_type_field", ":", "setattr", "(", "obj", ",", "self", ".", "storage_type_field", ",", "self", ".", "storage_type", ")", "if", "self", ".", "bucket_name_field", ":", "setattr", "(", "obj", ",", "self", ".", "bucket_name_field", ",", "self", ".", "bucket_name", ")", "else", ":", "if", "self", ".", "storage_type_field", ":", "setattr", "(", "obj", ",", "self", ".", "storage_type_field", ",", "''", ")", "if", "self", ".", "bucket_name_field", ":", "setattr", "(", "obj", ",", "self", ".", "bucket_name_field", ",", "''", ")", "return", "ret" ]
Saves the specified file to either S3 or the local filesystem, depending on the currently enabled storage type.
[ "Saves", "the", "specified", "file", "to", "either", "S3", "or", "the", "local", "filesystem", "depending", "on", "the", "currently", "enabled", "storage", "type", "." ]
python
test
minio/minio-py
minio/parsers.py
https://github.com/minio/minio-py/blob/7107c84183cf5fb4deff68c0a16ab9f1c0b4c37e/minio/parsers.py#L186-L200
def parse_list_buckets(data): """ Parser for list buckets response. :param data: Response data for list buckets. :return: List of :class:`Bucket <Bucket>`. """ root = S3Element.fromstring('ListBucketsResult', data) return [ Bucket(bucket.get_child_text('Name'), bucket.get_localized_time_elem('CreationDate')) for buckets in root.findall('Buckets') for bucket in buckets.findall('Bucket') ]
[ "def", "parse_list_buckets", "(", "data", ")", ":", "root", "=", "S3Element", ".", "fromstring", "(", "'ListBucketsResult'", ",", "data", ")", "return", "[", "Bucket", "(", "bucket", ".", "get_child_text", "(", "'Name'", ")", ",", "bucket", ".", "get_localized_time_elem", "(", "'CreationDate'", ")", ")", "for", "buckets", "in", "root", ".", "findall", "(", "'Buckets'", ")", "for", "bucket", "in", "buckets", ".", "findall", "(", "'Bucket'", ")", "]" ]
Parser for list buckets response. :param data: Response data for list buckets. :return: List of :class:`Bucket <Bucket>`.
[ "Parser", "for", "list", "buckets", "response", "." ]
python
train
SetBased/py-etlt
etlt/helper/Type2JoinHelper.py
https://github.com/SetBased/py-etlt/blob/1c5b8ea60293c14f54d7845a9fe5c595021f66f2/etlt/helper/Type2JoinHelper.py#L55-L90
def _intersection(self, keys, rows): """ Computes the intersection of the date intervals of two or more reference data sets. If the intersection is empty the row is removed from the group. :param list[tuple[str,str]] keys: The other keys with start and end date. :param list[dict[str,T]] rows: The list of rows. :rtype: list[dict[str,T]] """ # If there are no other keys with start and end date (i.e. nothing to merge) return immediately. if not keys: return rows ret = list() for row in rows: start_date = row[self._key_start_date] end_date = row[self._key_end_date] for key_start_date, key_end_date in keys: start_date, end_date = Type2JoinHelper._intersect(start_date, end_date, row[key_start_date], row[key_end_date]) if not start_date: break if key_start_date not in [self._key_start_date, self._key_end_date]: del row[key_start_date] if key_end_date not in [self._key_start_date, self._key_end_date]: del row[key_end_date] if start_date: row[self._key_start_date] = start_date row[self._key_end_date] = end_date ret.append(row) return ret
[ "def", "_intersection", "(", "self", ",", "keys", ",", "rows", ")", ":", "# If there are no other keys with start and end date (i.e. nothing to merge) return immediately.", "if", "not", "keys", ":", "return", "rows", "ret", "=", "list", "(", ")", "for", "row", "in", "rows", ":", "start_date", "=", "row", "[", "self", ".", "_key_start_date", "]", "end_date", "=", "row", "[", "self", ".", "_key_end_date", "]", "for", "key_start_date", ",", "key_end_date", "in", "keys", ":", "start_date", ",", "end_date", "=", "Type2JoinHelper", ".", "_intersect", "(", "start_date", ",", "end_date", ",", "row", "[", "key_start_date", "]", ",", "row", "[", "key_end_date", "]", ")", "if", "not", "start_date", ":", "break", "if", "key_start_date", "not", "in", "[", "self", ".", "_key_start_date", ",", "self", ".", "_key_end_date", "]", ":", "del", "row", "[", "key_start_date", "]", "if", "key_end_date", "not", "in", "[", "self", ".", "_key_start_date", ",", "self", ".", "_key_end_date", "]", ":", "del", "row", "[", "key_end_date", "]", "if", "start_date", ":", "row", "[", "self", ".", "_key_start_date", "]", "=", "start_date", "row", "[", "self", ".", "_key_end_date", "]", "=", "end_date", "ret", ".", "append", "(", "row", ")", "return", "ret" ]
Computes the intersection of the date intervals of two or more reference data sets. If the intersection is empty the row is removed from the group. :param list[tuple[str,str]] keys: The other keys with start and end date. :param list[dict[str,T]] rows: The list of rows. :rtype: list[dict[str,T]]
[ "Computes", "the", "intersection", "of", "the", "date", "intervals", "of", "two", "or", "more", "reference", "data", "sets", ".", "If", "the", "intersection", "is", "empty", "the", "row", "is", "removed", "from", "the", "group", "." ]
python
train
hyperledger/indy-plenum
stp_zmq/zstack.py
https://github.com/hyperledger/indy-plenum/blob/dcd144e238af7f17a869ffc9412f13dc488b7020/stp_zmq/zstack.py#L478-L500
def _receiveFromListener(self, quota: Quota) -> int: """ Receives messages from listener :param quota: number of messages to receive :return: number of received messages """ i = 0 incoming_size = 0 while i < quota.count and incoming_size < quota.size: try: ident, msg = self.listener.recv_multipart(flags=zmq.NOBLOCK) if not msg: # Router probing sends empty message on connection continue incoming_size += len(msg) i += 1 self._verifyAndAppend(msg, ident) except zmq.Again: break if i > 0: logger.trace('{} got {} messages through listener'. format(self, i)) return i
[ "def", "_receiveFromListener", "(", "self", ",", "quota", ":", "Quota", ")", "->", "int", ":", "i", "=", "0", "incoming_size", "=", "0", "while", "i", "<", "quota", ".", "count", "and", "incoming_size", "<", "quota", ".", "size", ":", "try", ":", "ident", ",", "msg", "=", "self", ".", "listener", ".", "recv_multipart", "(", "flags", "=", "zmq", ".", "NOBLOCK", ")", "if", "not", "msg", ":", "# Router probing sends empty message on connection", "continue", "incoming_size", "+=", "len", "(", "msg", ")", "i", "+=", "1", "self", ".", "_verifyAndAppend", "(", "msg", ",", "ident", ")", "except", "zmq", ".", "Again", ":", "break", "if", "i", ">", "0", ":", "logger", ".", "trace", "(", "'{} got {} messages through listener'", ".", "format", "(", "self", ",", "i", ")", ")", "return", "i" ]
Receives messages from listener :param quota: number of messages to receive :return: number of received messages
[ "Receives", "messages", "from", "listener", ":", "param", "quota", ":", "number", "of", "messages", "to", "receive", ":", "return", ":", "number", "of", "received", "messages" ]
python
train
hubo1016/vlcp
vlcp/utils/vxlandiscover.py
https://github.com/hubo1016/vlcp/blob/239055229ec93a99cc7e15208075724ccf543bd1/vlcp/utils/vxlandiscover.py#L157-L178
def get_broadcast_ips(vxlan_endpointset, local_ip, ovsdb_vhost, system_id, bridge): ''' Get all IP addresses that are not local :param vxlan_endpointset: a VXLANEndpointSet object :param local_ips: list of local IP address to exclude with :param ovsdb_vhost: identifier, vhost :param system_id: identifier, system-id :param bridge: identifier, bridge name :return: `[(ip, ipnum)]` list where IPs are the original string of the IP address, and ipnum are 32-bit numeric IPv4 address. ''' localip_addr = _get_ip(local_ip) allips = [(ip, ipnum) for ip, ipnum in ((ep[0], _get_ip(ep[0])) for ep in vxlan_endpointset.endpointlist if (ep[1], ep[2], ep[3]) != (ovsdb_vhost, system_id, bridge)) if ipnum is not None and ipnum != localip_addr] return allips
[ "def", "get_broadcast_ips", "(", "vxlan_endpointset", ",", "local_ip", ",", "ovsdb_vhost", ",", "system_id", ",", "bridge", ")", ":", "localip_addr", "=", "_get_ip", "(", "local_ip", ")", "allips", "=", "[", "(", "ip", ",", "ipnum", ")", "for", "ip", ",", "ipnum", "in", "(", "(", "ep", "[", "0", "]", ",", "_get_ip", "(", "ep", "[", "0", "]", ")", ")", "for", "ep", "in", "vxlan_endpointset", ".", "endpointlist", "if", "(", "ep", "[", "1", "]", ",", "ep", "[", "2", "]", ",", "ep", "[", "3", "]", ")", "!=", "(", "ovsdb_vhost", ",", "system_id", ",", "bridge", ")", ")", "if", "ipnum", "is", "not", "None", "and", "ipnum", "!=", "localip_addr", "]", "return", "allips" ]
Get all IP addresses that are not local :param vxlan_endpointset: a VXLANEndpointSet object :param local_ips: list of local IP address to exclude with :param ovsdb_vhost: identifier, vhost :param system_id: identifier, system-id :param bridge: identifier, bridge name :return: `[(ip, ipnum)]` list where IPs are the original string of the IP address, and ipnum are 32-bit numeric IPv4 address.
[ "Get", "all", "IP", "addresses", "that", "are", "not", "local", ":", "param", "vxlan_endpointset", ":", "a", "VXLANEndpointSet", "object", ":", "param", "local_ips", ":", "list", "of", "local", "IP", "address", "to", "exclude", "with", ":", "param", "ovsdb_vhost", ":", "identifier", "vhost", ":", "param", "system_id", ":", "identifier", "system", "-", "id", ":", "param", "bridge", ":", "identifier", "bridge", "name", ":", "return", ":", "[", "(", "ip", "ipnum", ")", "]", "list", "where", "IPs", "are", "the", "original", "string", "of", "the", "IP", "address", "and", "ipnum", "are", "32", "-", "bit", "numeric", "IPv4", "address", "." ]
python
train
Nachtfeuer/pipeline
spline/tools/condition.py
https://github.com/Nachtfeuer/pipeline/blob/04ca18c4e95e4349532bb45b768206393e1f2c13/spline/tools/condition.py#L129-L141
def get_tokens(condition): """ Get AST tokens for Python condition. Returns: list: list of AST tokens """ try: ast_tokens = list(ast.walk(ast.parse(condition.strip()))) except SyntaxError as exception: Logger.get_logger(__name__).error("Syntax error: %s", exception) ast_tokens = [] return ast_tokens
[ "def", "get_tokens", "(", "condition", ")", ":", "try", ":", "ast_tokens", "=", "list", "(", "ast", ".", "walk", "(", "ast", ".", "parse", "(", "condition", ".", "strip", "(", ")", ")", ")", ")", "except", "SyntaxError", "as", "exception", ":", "Logger", ".", "get_logger", "(", "__name__", ")", ".", "error", "(", "\"Syntax error: %s\"", ",", "exception", ")", "ast_tokens", "=", "[", "]", "return", "ast_tokens" ]
Get AST tokens for Python condition. Returns: list: list of AST tokens
[ "Get", "AST", "tokens", "for", "Python", "condition", "." ]
python
train
reingart/gui2py
gui/tools/propeditor.py
https://github.com/reingart/gui2py/blob/aca0a05f6fcde55c94ad7cc058671a06608b01a4/gui/tools/propeditor.py#L183-L195
def edit(self, name=""): "Programatically select a (default) property to start editing it" # for more info see DoSelectAndEdit in propgrid.cpp for name in (name, "label", "value", "text", "title", "filename", "name"): prop = self.pg.GetPropertyByName(name) if prop is not None: break self.Parent.SetFocus() self.Parent.Raise() self.pg.SetFocus() # give time to the ui to show the prop grid and set focus: wx.CallLater(250, self.select, prop.GetName())
[ "def", "edit", "(", "self", ",", "name", "=", "\"\"", ")", ":", "# for more info see DoSelectAndEdit in propgrid.cpp", "for", "name", "in", "(", "name", ",", "\"label\"", ",", "\"value\"", ",", "\"text\"", ",", "\"title\"", ",", "\"filename\"", ",", "\"name\"", ")", ":", "prop", "=", "self", ".", "pg", ".", "GetPropertyByName", "(", "name", ")", "if", "prop", "is", "not", "None", ":", "break", "self", ".", "Parent", ".", "SetFocus", "(", ")", "self", ".", "Parent", ".", "Raise", "(", ")", "self", ".", "pg", ".", "SetFocus", "(", ")", "# give time to the ui to show the prop grid and set focus:", "wx", ".", "CallLater", "(", "250", ",", "self", ".", "select", ",", "prop", ".", "GetName", "(", ")", ")" ]
Programatically select a (default) property to start editing it
[ "Programatically", "select", "a", "(", "default", ")", "property", "to", "start", "editing", "it" ]
python
test
nion-software/nionswift-io
nionswift_plugin/TIFF_IO/tifffile.py
https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/TIFF_IO/tifffile.py#L8907-L8956
def imagej_description(shape, rgb=None, colormaped=False, version=None, hyperstack=None, mode=None, loop=None, **kwargs): """Return ImageJ image description from data shape. ImageJ can handle up to 6 dimensions in order TZCYXS. >>> imagej_description((51, 5, 2, 196, 171)) # doctest: +SKIP ImageJ=1.11a images=510 channels=2 slices=5 frames=51 hyperstack=true mode=grayscale loop=false """ if colormaped: raise NotImplementedError('ImageJ colormapping not supported') if version is None: version = '1.11a' shape = imagej_shape(shape, rgb=rgb) rgb = shape[-1] in (3, 4) result = ['ImageJ=%s' % version] append = [] result.append('images=%i' % product(shape[:-3])) if hyperstack is None: hyperstack = True append.append('hyperstack=true') else: append.append('hyperstack=%s' % bool(hyperstack)) if shape[2] > 1: result.append('channels=%i' % shape[2]) if mode is None and not rgb: mode = 'grayscale' if hyperstack and mode: append.append('mode=%s' % mode) if shape[1] > 1: result.append('slices=%i' % shape[1]) if shape[0] > 1: result.append('frames=%i' % shape[0]) if loop is None: append.append('loop=false') if loop is not None: append.append('loop=%s' % bool(loop)) for key, value in kwargs.items(): append.append('%s=%s' % (key.lower(), value)) return '\n'.join(result + append + [''])
[ "def", "imagej_description", "(", "shape", ",", "rgb", "=", "None", ",", "colormaped", "=", "False", ",", "version", "=", "None", ",", "hyperstack", "=", "None", ",", "mode", "=", "None", ",", "loop", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "colormaped", ":", "raise", "NotImplementedError", "(", "'ImageJ colormapping not supported'", ")", "if", "version", "is", "None", ":", "version", "=", "'1.11a'", "shape", "=", "imagej_shape", "(", "shape", ",", "rgb", "=", "rgb", ")", "rgb", "=", "shape", "[", "-", "1", "]", "in", "(", "3", ",", "4", ")", "result", "=", "[", "'ImageJ=%s'", "%", "version", "]", "append", "=", "[", "]", "result", ".", "append", "(", "'images=%i'", "%", "product", "(", "shape", "[", ":", "-", "3", "]", ")", ")", "if", "hyperstack", "is", "None", ":", "hyperstack", "=", "True", "append", ".", "append", "(", "'hyperstack=true'", ")", "else", ":", "append", ".", "append", "(", "'hyperstack=%s'", "%", "bool", "(", "hyperstack", ")", ")", "if", "shape", "[", "2", "]", ">", "1", ":", "result", ".", "append", "(", "'channels=%i'", "%", "shape", "[", "2", "]", ")", "if", "mode", "is", "None", "and", "not", "rgb", ":", "mode", "=", "'grayscale'", "if", "hyperstack", "and", "mode", ":", "append", ".", "append", "(", "'mode=%s'", "%", "mode", ")", "if", "shape", "[", "1", "]", ">", "1", ":", "result", ".", "append", "(", "'slices=%i'", "%", "shape", "[", "1", "]", ")", "if", "shape", "[", "0", "]", ">", "1", ":", "result", ".", "append", "(", "'frames=%i'", "%", "shape", "[", "0", "]", ")", "if", "loop", "is", "None", ":", "append", ".", "append", "(", "'loop=false'", ")", "if", "loop", "is", "not", "None", ":", "append", ".", "append", "(", "'loop=%s'", "%", "bool", "(", "loop", ")", ")", "for", "key", ",", "value", "in", "kwargs", ".", "items", "(", ")", ":", "append", ".", "append", "(", "'%s=%s'", "%", "(", "key", ".", "lower", "(", ")", ",", "value", ")", ")", "return", "'\\n'", ".", "join", "(", "result", "+", "append", "+", "[", "''", "]", ")" ]
Return ImageJ image description from data shape. ImageJ can handle up to 6 dimensions in order TZCYXS. >>> imagej_description((51, 5, 2, 196, 171)) # doctest: +SKIP ImageJ=1.11a images=510 channels=2 slices=5 frames=51 hyperstack=true mode=grayscale loop=false
[ "Return", "ImageJ", "image", "description", "from", "data", "shape", "." ]
python
train
bgyori/pykqml
kqml/kqml_list.py
https://github.com/bgyori/pykqml/blob/c18b39868626215deb634567c6bd7c0838e443c0/kqml/kqml_list.py#L149-L182
def set(self, keyword, value): """Set the element of the list after the given keyword. Parameters ---------- keyword : str The keyword parameter to find in the list. Putting a colon before the keyword is optional, if no colon is given, it is added automatically (e.g. "keyword" will be found as ":keyword" in the list). value : KQMLObject or str If the value is given as str, it is instantiated as a KQMLToken Example: kl = KQMLList.from_string('(FAILURE)') kl.set('reason', 'INVALID_PARAMETER') """ if not keyword.startswith(':'): keyword = ':' + keyword if isinstance(value, str): value = KQMLToken(value) if isinstance(keyword, str): keyword = KQMLToken(keyword) found = False for i, key in enumerate(self.data): if key.to_string().lower() == keyword.lower(): found = True if i < len(self.data)-1: self.data[i+1] = value break if not found: self.data.append(keyword) self.data.append(value)
[ "def", "set", "(", "self", ",", "keyword", ",", "value", ")", ":", "if", "not", "keyword", ".", "startswith", "(", "':'", ")", ":", "keyword", "=", "':'", "+", "keyword", "if", "isinstance", "(", "value", ",", "str", ")", ":", "value", "=", "KQMLToken", "(", "value", ")", "if", "isinstance", "(", "keyword", ",", "str", ")", ":", "keyword", "=", "KQMLToken", "(", "keyword", ")", "found", "=", "False", "for", "i", ",", "key", "in", "enumerate", "(", "self", ".", "data", ")", ":", "if", "key", ".", "to_string", "(", ")", ".", "lower", "(", ")", "==", "keyword", ".", "lower", "(", ")", ":", "found", "=", "True", "if", "i", "<", "len", "(", "self", ".", "data", ")", "-", "1", ":", "self", ".", "data", "[", "i", "+", "1", "]", "=", "value", "break", "if", "not", "found", ":", "self", ".", "data", ".", "append", "(", "keyword", ")", "self", ".", "data", ".", "append", "(", "value", ")" ]
Set the element of the list after the given keyword. Parameters ---------- keyword : str The keyword parameter to find in the list. Putting a colon before the keyword is optional, if no colon is given, it is added automatically (e.g. "keyword" will be found as ":keyword" in the list). value : KQMLObject or str If the value is given as str, it is instantiated as a KQMLToken Example: kl = KQMLList.from_string('(FAILURE)') kl.set('reason', 'INVALID_PARAMETER')
[ "Set", "the", "element", "of", "the", "list", "after", "the", "given", "keyword", "." ]
python
train
jobovy/galpy
galpy/potential/SphericalShellPotential.py
https://github.com/jobovy/galpy/blob/9c5b9fe65d58835624dffe432be282060918ee08/galpy/potential/SphericalShellPotential.py#L61-L81
def _evaluate(self,R,z,phi=0.,t=0.): """ NAME: _evaluate PURPOSE: evaluate the potential at R,z INPUT: R - Galactocentric cylindrical radius z - vertical height phi - azimuth t - time OUTPUT: Phi(R,z) HISTORY: 2018-08-04 - Written - Bovy (UofT) """ r2= R**2+z**2 if r2 <= self.a2: return -1./self.a else: return -1./nu.sqrt(r2)
[ "def", "_evaluate", "(", "self", ",", "R", ",", "z", ",", "phi", "=", "0.", ",", "t", "=", "0.", ")", ":", "r2", "=", "R", "**", "2", "+", "z", "**", "2", "if", "r2", "<=", "self", ".", "a2", ":", "return", "-", "1.", "/", "self", ".", "a", "else", ":", "return", "-", "1.", "/", "nu", ".", "sqrt", "(", "r2", ")" ]
NAME: _evaluate PURPOSE: evaluate the potential at R,z INPUT: R - Galactocentric cylindrical radius z - vertical height phi - azimuth t - time OUTPUT: Phi(R,z) HISTORY: 2018-08-04 - Written - Bovy (UofT)
[ "NAME", ":", "_evaluate", "PURPOSE", ":", "evaluate", "the", "potential", "at", "R", "z", "INPUT", ":", "R", "-", "Galactocentric", "cylindrical", "radius", "z", "-", "vertical", "height", "phi", "-", "azimuth", "t", "-", "time", "OUTPUT", ":", "Phi", "(", "R", "z", ")", "HISTORY", ":", "2018", "-", "08", "-", "04", "-", "Written", "-", "Bovy", "(", "UofT", ")" ]
python
train
klahnakoski/pyLibrary
mo_math/vendor/strangman/stats.py
https://github.com/klahnakoski/pyLibrary/blob/fa2dcbc48fda8d26999baef400e9a98149e0b982/mo_math/vendor/strangman/stats.py#L632-L644
def var(inlist): """ Returns the variance of the values in the passed list using N-1 for the denominator (i.e., for estimating population variance). Usage: lvar(inlist) """ n = len(inlist) mn = mean(inlist) deviations = [0] * len(inlist) for i in range(len(inlist)): deviations[i] = inlist[i] - mn return ss(deviations) / float(n - 1)
[ "def", "var", "(", "inlist", ")", ":", "n", "=", "len", "(", "inlist", ")", "mn", "=", "mean", "(", "inlist", ")", "deviations", "=", "[", "0", "]", "*", "len", "(", "inlist", ")", "for", "i", "in", "range", "(", "len", "(", "inlist", ")", ")", ":", "deviations", "[", "i", "]", "=", "inlist", "[", "i", "]", "-", "mn", "return", "ss", "(", "deviations", ")", "/", "float", "(", "n", "-", "1", ")" ]
Returns the variance of the values in the passed list using N-1 for the denominator (i.e., for estimating population variance). Usage: lvar(inlist)
[ "Returns", "the", "variance", "of", "the", "values", "in", "the", "passed", "list", "using", "N", "-", "1", "for", "the", "denominator", "(", "i", ".", "e", ".", "for", "estimating", "population", "variance", ")", "." ]
python
train
frasertweedale/ledgertools
ltlib/ui.py
https://github.com/frasertweedale/ledgertools/blob/a695f8667d72253e5448693c12f0282d09902aaa/ltlib/ui.py#L206-L215
def yn(self, prompt, default=None): """Prompts the user for yes/no confirmation, with optional default""" if default is True: opts = " [Y/n]: " elif default is False: opts = " [y/N]: " else: opts = " [y/n]: " prompt += opts return self.input(curry(filter_yn, default=default), prompt)
[ "def", "yn", "(", "self", ",", "prompt", ",", "default", "=", "None", ")", ":", "if", "default", "is", "True", ":", "opts", "=", "\" [Y/n]: \"", "elif", "default", "is", "False", ":", "opts", "=", "\" [y/N]: \"", "else", ":", "opts", "=", "\" [y/n]: \"", "prompt", "+=", "opts", "return", "self", ".", "input", "(", "curry", "(", "filter_yn", ",", "default", "=", "default", ")", ",", "prompt", ")" ]
Prompts the user for yes/no confirmation, with optional default
[ "Prompts", "the", "user", "for", "yes", "/", "no", "confirmation", "with", "optional", "default" ]
python
train
playpauseandstop/bootstrapper
bootstrapper.py
https://github.com/playpauseandstop/bootstrapper/blob/b216a05f2acb0b9f4919c4e010ff7b0f63fc1393/bootstrapper.py#L276-L331
def main(*args): r"""Bootstrap Python projects and libraries with virtualenv and pip. Also check system requirements before bootstrap and run post bootstrap hook if any. :param \*args: Command line arguments list. """ # Create parser, read arguments from direct input or command line with disable_error_handler(): args = parse_args(args or sys.argv[1:]) # Read current config from file and command line arguments config = read_config(args.config, args) if config is None: return True bootstrap = config[__script__] # Check pre-requirements if not check_pre_requirements(bootstrap['pre_requirements']): return True # Create virtual environment env_args = prepare_args(config['virtualenv'], bootstrap) if not create_env( bootstrap['env'], env_args, bootstrap['recreate'], bootstrap['ignore_activated'], bootstrap['quiet'] ): # Exit if couldn't create virtual environment return True # And install library or project here pip_args = prepare_args(config['pip'], bootstrap) if not install( bootstrap['env'], bootstrap['requirements'], pip_args, bootstrap['ignore_activated'], bootstrap['install_dev_requirements'], bootstrap['quiet'] ): # Exist if couldn't install requirements into venv return True # Run post-bootstrap hook run_hook(bootstrap['hook'], bootstrap, bootstrap['quiet']) # All OK! if not bootstrap['quiet']: print_message('All OK!') # False means everything went alright, exit code: 0 return False
[ "def", "main", "(", "*", "args", ")", ":", "# Create parser, read arguments from direct input or command line", "with", "disable_error_handler", "(", ")", ":", "args", "=", "parse_args", "(", "args", "or", "sys", ".", "argv", "[", "1", ":", "]", ")", "# Read current config from file and command line arguments", "config", "=", "read_config", "(", "args", ".", "config", ",", "args", ")", "if", "config", "is", "None", ":", "return", "True", "bootstrap", "=", "config", "[", "__script__", "]", "# Check pre-requirements", "if", "not", "check_pre_requirements", "(", "bootstrap", "[", "'pre_requirements'", "]", ")", ":", "return", "True", "# Create virtual environment", "env_args", "=", "prepare_args", "(", "config", "[", "'virtualenv'", "]", ",", "bootstrap", ")", "if", "not", "create_env", "(", "bootstrap", "[", "'env'", "]", ",", "env_args", ",", "bootstrap", "[", "'recreate'", "]", ",", "bootstrap", "[", "'ignore_activated'", "]", ",", "bootstrap", "[", "'quiet'", "]", ")", ":", "# Exit if couldn't create virtual environment", "return", "True", "# And install library or project here", "pip_args", "=", "prepare_args", "(", "config", "[", "'pip'", "]", ",", "bootstrap", ")", "if", "not", "install", "(", "bootstrap", "[", "'env'", "]", ",", "bootstrap", "[", "'requirements'", "]", ",", "pip_args", ",", "bootstrap", "[", "'ignore_activated'", "]", ",", "bootstrap", "[", "'install_dev_requirements'", "]", ",", "bootstrap", "[", "'quiet'", "]", ")", ":", "# Exist if couldn't install requirements into venv", "return", "True", "# Run post-bootstrap hook", "run_hook", "(", "bootstrap", "[", "'hook'", "]", ",", "bootstrap", ",", "bootstrap", "[", "'quiet'", "]", ")", "# All OK!", "if", "not", "bootstrap", "[", "'quiet'", "]", ":", "print_message", "(", "'All OK!'", ")", "# False means everything went alright, exit code: 0", "return", "False" ]
r"""Bootstrap Python projects and libraries with virtualenv and pip. Also check system requirements before bootstrap and run post bootstrap hook if any. :param \*args: Command line arguments list.
[ "r", "Bootstrap", "Python", "projects", "and", "libraries", "with", "virtualenv", "and", "pip", "." ]
python
valid
saltstack/salt
salt/modules/tls.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/tls.py#L164-L171
def _microtime(): ''' Return a Unix timestamp as a string of digits :return: ''' val1, val2 = math.modf(time.time()) val2 = int(val2) return '{0:f}{1}'.format(val1, val2)
[ "def", "_microtime", "(", ")", ":", "val1", ",", "val2", "=", "math", ".", "modf", "(", "time", ".", "time", "(", ")", ")", "val2", "=", "int", "(", "val2", ")", "return", "'{0:f}{1}'", ".", "format", "(", "val1", ",", "val2", ")" ]
Return a Unix timestamp as a string of digits :return:
[ "Return", "a", "Unix", "timestamp", "as", "a", "string", "of", "digits", ":", "return", ":" ]
python
train
quantumlib/Cirq
cirq/linalg/transformations.py
https://github.com/quantumlib/Cirq/blob/0827da80dd7880e5b923eb69407e980ed9bc0bd2/cirq/linalg/transformations.py#L274-L306
def partial_trace(tensor: np.ndarray, keep_indices: List[int]) -> np.ndarray: """Takes the partial trace of a given tensor. The input tensor must have shape `(d_0, ..., d_{k-1}, d_0, ..., d_{k-1})`. The trace is done over all indices that are not in keep_indices. The resulting tensor has shape `(d_{i_0}, ..., d_{i_r}, d_{i_0}, ..., d_{i_r})` where `i_j` is the `j`th element of `keep_indices`. Args: tensor: The tensor to sum over. This tensor must have a shape `(d_0, ..., d_{k-1}, d_0, ..., d_{k-1})`. keep_indices: Which indices to not sum over. These are only the indices of the first half of the tensors indices (i.e. all elements must be between `0` and `tensor.ndims / 2 - 1` inclusive). Raises: ValueError: if the tensor is not of the correct shape or the indices are not from the first half of valid indices for the tensor. """ ndim = tensor.ndim // 2 if not all(tensor.shape[i] == tensor.shape[i + ndim] for i in range(ndim)): raise ValueError('Tensors must have shape (d_0,...,d_{{k-1}},d_0,...,' 'd_{{k-1}}) but had shape ({}).'.format(tensor.shape)) if not all(i < ndim for i in keep_indices): raise ValueError('keep_indices were {} but must be in first half, ' 'i.e. have index less that {}.'.format(keep_indices, ndim)) keep_set = set(keep_indices) keep_map = dict(zip(keep_indices, sorted(keep_indices))) left_indices = [keep_map[i] if i in keep_set else i for i in range(ndim)] right_indices = [ndim + i if i in keep_set else i for i in left_indices] return np.einsum(tensor, left_indices + right_indices)
[ "def", "partial_trace", "(", "tensor", ":", "np", ".", "ndarray", ",", "keep_indices", ":", "List", "[", "int", "]", ")", "->", "np", ".", "ndarray", ":", "ndim", "=", "tensor", ".", "ndim", "//", "2", "if", "not", "all", "(", "tensor", ".", "shape", "[", "i", "]", "==", "tensor", ".", "shape", "[", "i", "+", "ndim", "]", "for", "i", "in", "range", "(", "ndim", ")", ")", ":", "raise", "ValueError", "(", "'Tensors must have shape (d_0,...,d_{{k-1}},d_0,...,'", "'d_{{k-1}}) but had shape ({}).'", ".", "format", "(", "tensor", ".", "shape", ")", ")", "if", "not", "all", "(", "i", "<", "ndim", "for", "i", "in", "keep_indices", ")", ":", "raise", "ValueError", "(", "'keep_indices were {} but must be in first half, '", "'i.e. have index less that {}.'", ".", "format", "(", "keep_indices", ",", "ndim", ")", ")", "keep_set", "=", "set", "(", "keep_indices", ")", "keep_map", "=", "dict", "(", "zip", "(", "keep_indices", ",", "sorted", "(", "keep_indices", ")", ")", ")", "left_indices", "=", "[", "keep_map", "[", "i", "]", "if", "i", "in", "keep_set", "else", "i", "for", "i", "in", "range", "(", "ndim", ")", "]", "right_indices", "=", "[", "ndim", "+", "i", "if", "i", "in", "keep_set", "else", "i", "for", "i", "in", "left_indices", "]", "return", "np", ".", "einsum", "(", "tensor", ",", "left_indices", "+", "right_indices", ")" ]
Takes the partial trace of a given tensor. The input tensor must have shape `(d_0, ..., d_{k-1}, d_0, ..., d_{k-1})`. The trace is done over all indices that are not in keep_indices. The resulting tensor has shape `(d_{i_0}, ..., d_{i_r}, d_{i_0}, ..., d_{i_r})` where `i_j` is the `j`th element of `keep_indices`. Args: tensor: The tensor to sum over. This tensor must have a shape `(d_0, ..., d_{k-1}, d_0, ..., d_{k-1})`. keep_indices: Which indices to not sum over. These are only the indices of the first half of the tensors indices (i.e. all elements must be between `0` and `tensor.ndims / 2 - 1` inclusive). Raises: ValueError: if the tensor is not of the correct shape or the indices are not from the first half of valid indices for the tensor.
[ "Takes", "the", "partial", "trace", "of", "a", "given", "tensor", "." ]
python
train
mikeywaites/flask-arrested
arrested/contrib/kim_arrested.py
https://github.com/mikeywaites/flask-arrested/blob/6b97ce2ad2765f9acab10f4726e310258aa51de0/arrested/contrib/kim_arrested.py#L187-L208
def get_response_handler_params(self, **params): """Return a config object that will be used to configure the KimResponseHandler :returns: a dictionary of config options :rtype: dict """ params = super(KimEndpoint, self).get_response_handler_params(**params) params['mapper_class'] = self.mapper_class params['role'] = self.serialize_role # After a successfull attempt to marshal an object has been made, a response # is generated using the RepsonseHandler. Rather than taking the class level # setting for many by default, pull it from the request handler params config to # ensure Marshaling and Serializing are run the same way. if self._is_marshal_request(): req_params = self.get_request_handler_params() params['many'] = req_params.get('many', self.many) else: params['many'] = self.many return params
[ "def", "get_response_handler_params", "(", "self", ",", "*", "*", "params", ")", ":", "params", "=", "super", "(", "KimEndpoint", ",", "self", ")", ".", "get_response_handler_params", "(", "*", "*", "params", ")", "params", "[", "'mapper_class'", "]", "=", "self", ".", "mapper_class", "params", "[", "'role'", "]", "=", "self", ".", "serialize_role", "# After a successfull attempt to marshal an object has been made, a response", "# is generated using the RepsonseHandler. Rather than taking the class level", "# setting for many by default, pull it from the request handler params config to", "# ensure Marshaling and Serializing are run the same way.", "if", "self", ".", "_is_marshal_request", "(", ")", ":", "req_params", "=", "self", ".", "get_request_handler_params", "(", ")", "params", "[", "'many'", "]", "=", "req_params", ".", "get", "(", "'many'", ",", "self", ".", "many", ")", "else", ":", "params", "[", "'many'", "]", "=", "self", ".", "many", "return", "params" ]
Return a config object that will be used to configure the KimResponseHandler :returns: a dictionary of config options :rtype: dict
[ "Return", "a", "config", "object", "that", "will", "be", "used", "to", "configure", "the", "KimResponseHandler" ]
python
train
tensorflow/tensorboard
tensorboard/notebook.py
https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/notebook.py#L238-L289
def _display(port=None, height=None, print_message=False, display_handle=None): """Internal version of `display`. Args: port: As with `display`. height: As with `display`. print_message: True to print which TensorBoard instance was selected for display (if applicable), or False otherwise. display_handle: If not None, an IPython display handle into which to render TensorBoard. """ if height is None: height = 800 if port is None: infos = manager.get_all() if not infos: raise ValueError("Can't display TensorBoard: no known instances running.") else: info = max(manager.get_all(), key=lambda x: x.start_time) port = info.port else: infos = [i for i in manager.get_all() if i.port == port] info = ( max(infos, key=lambda x: x.start_time) if infos else None ) if print_message: if info is not None: message = ( "Selecting TensorBoard with {data_source} " "(started {delta} ago; port {port}, pid {pid})." ).format( data_source=manager.data_source_from_info(info), delta=_time_delta_from_info(info), port=info.port, pid=info.pid, ) print(message) else: # The user explicitly provided a port, and we don't have any # additional information. There's nothing useful to say. pass fn = { _CONTEXT_COLAB: _display_colab, _CONTEXT_IPYTHON: _display_ipython, _CONTEXT_NONE: _display_cli, }[_get_context()] return fn(port=port, height=height, display_handle=display_handle)
[ "def", "_display", "(", "port", "=", "None", ",", "height", "=", "None", ",", "print_message", "=", "False", ",", "display_handle", "=", "None", ")", ":", "if", "height", "is", "None", ":", "height", "=", "800", "if", "port", "is", "None", ":", "infos", "=", "manager", ".", "get_all", "(", ")", "if", "not", "infos", ":", "raise", "ValueError", "(", "\"Can't display TensorBoard: no known instances running.\"", ")", "else", ":", "info", "=", "max", "(", "manager", ".", "get_all", "(", ")", ",", "key", "=", "lambda", "x", ":", "x", ".", "start_time", ")", "port", "=", "info", ".", "port", "else", ":", "infos", "=", "[", "i", "for", "i", "in", "manager", ".", "get_all", "(", ")", "if", "i", ".", "port", "==", "port", "]", "info", "=", "(", "max", "(", "infos", ",", "key", "=", "lambda", "x", ":", "x", ".", "start_time", ")", "if", "infos", "else", "None", ")", "if", "print_message", ":", "if", "info", "is", "not", "None", ":", "message", "=", "(", "\"Selecting TensorBoard with {data_source} \"", "\"(started {delta} ago; port {port}, pid {pid}).\"", ")", ".", "format", "(", "data_source", "=", "manager", ".", "data_source_from_info", "(", "info", ")", ",", "delta", "=", "_time_delta_from_info", "(", "info", ")", ",", "port", "=", "info", ".", "port", ",", "pid", "=", "info", ".", "pid", ",", ")", "print", "(", "message", ")", "else", ":", "# The user explicitly provided a port, and we don't have any", "# additional information. There's nothing useful to say.", "pass", "fn", "=", "{", "_CONTEXT_COLAB", ":", "_display_colab", ",", "_CONTEXT_IPYTHON", ":", "_display_ipython", ",", "_CONTEXT_NONE", ":", "_display_cli", ",", "}", "[", "_get_context", "(", ")", "]", "return", "fn", "(", "port", "=", "port", ",", "height", "=", "height", ",", "display_handle", "=", "display_handle", ")" ]
Internal version of `display`. Args: port: As with `display`. height: As with `display`. print_message: True to print which TensorBoard instance was selected for display (if applicable), or False otherwise. display_handle: If not None, an IPython display handle into which to render TensorBoard.
[ "Internal", "version", "of", "display", "." ]
python
train
fprimex/zdesk
zdesk/zdesk_api.py
https://github.com/fprimex/zdesk/blob/851611c13b4d530e9df31390b3ec709baf0a0188/zdesk/zdesk_api.py#L1835-L1842
def help_center_section_articles(self, id, locale=None, **kwargs): "https://developer.zendesk.com/rest_api/docs/help_center/articles#list-articles" api_path = "/api/v2/help_center/sections/{id}/articles.json" api_path = api_path.format(id=id) if locale: api_opt_path = "/api/v2/help_center/{locale}/sections/{id}/articles.json" api_path = api_opt_path.format(id=id, locale=locale) return self.call(api_path, **kwargs)
[ "def", "help_center_section_articles", "(", "self", ",", "id", ",", "locale", "=", "None", ",", "*", "*", "kwargs", ")", ":", "api_path", "=", "\"/api/v2/help_center/sections/{id}/articles.json\"", "api_path", "=", "api_path", ".", "format", "(", "id", "=", "id", ")", "if", "locale", ":", "api_opt_path", "=", "\"/api/v2/help_center/{locale}/sections/{id}/articles.json\"", "api_path", "=", "api_opt_path", ".", "format", "(", "id", "=", "id", ",", "locale", "=", "locale", ")", "return", "self", ".", "call", "(", "api_path", ",", "*", "*", "kwargs", ")" ]
https://developer.zendesk.com/rest_api/docs/help_center/articles#list-articles
[ "https", ":", "//", "developer", ".", "zendesk", ".", "com", "/", "rest_api", "/", "docs", "/", "help_center", "/", "articles#list", "-", "articles" ]
python
train
openego/eDisGo
edisgo/tools/pypsa_io.py
https://github.com/openego/eDisGo/blob/e6245bdaf236f9c49dbda5a18c1c458290f41e2b/edisgo/tools/pypsa_io.py#L1619-L1737
def update_pypsa_storage(pypsa, storages, storages_lines): """ Adds storages and their lines to pypsa representation of the edisgo graph. This function effects the following attributes of the pypsa network: components ('StorageUnit'), storage_units, storage_units_t (p_set, q_set), buses, lines Parameters ----------- pypsa : :pypsa:`pypsa.Network<network>` storages : :obj:`list` List with storages of type :class:`~.grid.components.Storage` to add to pypsa network. storages_lines : :obj:`list` List with lines of type :class:`~.grid.components.Line` that connect storages to the grid. """ bus = {'name': [], 'v_nom': [], 'x': [], 'y': []} line = {'name': [], 'bus0': [], 'bus1': [], 'type': [], 'x': [], 'r': [], 's_nom': [], 'length': []} storage = { 'name': [], 'bus': [], 'p_nom': [], 'state_of_charge_initial': [], 'efficiency_store': [], 'efficiency_dispatch': [], 'standing_loss': []} for s in storages: bus_name = '_'.join(['Bus', repr(s)]) storage['name'].append(repr(s)) storage['bus'].append(bus_name) storage['p_nom'].append(s.nominal_power / 1e3) storage['state_of_charge_initial'].append(s.soc_initial) storage['efficiency_store'].append(s.efficiency_in) storage['efficiency_dispatch'].append(s.efficiency_out) storage['standing_loss'].append(s.standing_loss) bus['name'].append(bus_name) bus['v_nom'].append(s.grid.voltage_nom) bus['x'].append(s.geom.x) bus['y'].append(s.geom.y) omega = 2 * pi * 50 for l in storages_lines: line['name'].append(repr(l)) adj_nodes = l.grid.graph.nodes_from_line(l) if isinstance(l.grid, LVGrid): if isinstance(adj_nodes[0], LVStation): line['bus0'].append( '_'.join(['Bus', adj_nodes[0].__repr__(side='lv')])) else: line['bus0'].append('_'.join(['Bus', repr(adj_nodes[0])])) if isinstance(adj_nodes[1], LVStation): line['bus1'].append( '_'.join(['Bus', adj_nodes[1].__repr__(side='lv')])) else: line['bus1'].append('_'.join(['Bus', repr(adj_nodes[1])])) else: if isinstance(adj_nodes[0], LVStation): line['bus0'].append( '_'.join(['Bus', adj_nodes[0].__repr__(side='mv')])) elif isinstance(adj_nodes[0], MVStation): line['bus0'].append( '_'.join(['Bus', adj_nodes[0].__repr__(side='lv')])) else: line['bus0'].append('_'.join(['Bus', repr(adj_nodes[0])])) if isinstance(adj_nodes[1], LVStation): line['bus1'].append( '_'.join(['Bus', adj_nodes[1].__repr__(side='mv')])) elif isinstance(adj_nodes[1], MVStation): line['bus1'].append( '_'.join(['Bus', adj_nodes[1].__repr__(side='lv')])) else: line['bus1'].append('_'.join(['Bus', repr(adj_nodes[1])])) line['type'].append("") line['x'].append(l.type['L'] * omega / 1e3 * l.length) line['r'].append(l.type['R'] * l.length) line['s_nom'].append( sqrt(3) * l.type['I_max_th'] * l.type['U_n'] / 1e3) line['length'].append(l.length) # import new components to pypsa pypsa.import_components_from_dataframe( pd.DataFrame(bus).set_index('name'), 'Bus') pypsa.import_components_from_dataframe( pd.DataFrame(storage).set_index('name'), 'StorageUnit') pypsa.import_components_from_dataframe( pd.DataFrame(line).set_index('name'), 'Line') # import time series of storages and buses to pypsa timeseries_storage_p = pd.DataFrame() timeseries_storage_q = pd.DataFrame() for s in storages: timeseries_storage_p[repr(s)] = s.pypsa_timeseries('p').loc[ pypsa.storage_units_t.p_set.index] timeseries_storage_q[repr(s)] = s.pypsa_timeseries('q').loc[ pypsa.storage_units_t.q_set.index] import_series_from_dataframe(pypsa, timeseries_storage_p, 'StorageUnit', 'p_set') import_series_from_dataframe(pypsa, timeseries_storage_q, 'StorageUnit', 'q_set')
[ "def", "update_pypsa_storage", "(", "pypsa", ",", "storages", ",", "storages_lines", ")", ":", "bus", "=", "{", "'name'", ":", "[", "]", ",", "'v_nom'", ":", "[", "]", ",", "'x'", ":", "[", "]", ",", "'y'", ":", "[", "]", "}", "line", "=", "{", "'name'", ":", "[", "]", ",", "'bus0'", ":", "[", "]", ",", "'bus1'", ":", "[", "]", ",", "'type'", ":", "[", "]", ",", "'x'", ":", "[", "]", ",", "'r'", ":", "[", "]", ",", "'s_nom'", ":", "[", "]", ",", "'length'", ":", "[", "]", "}", "storage", "=", "{", "'name'", ":", "[", "]", ",", "'bus'", ":", "[", "]", ",", "'p_nom'", ":", "[", "]", ",", "'state_of_charge_initial'", ":", "[", "]", ",", "'efficiency_store'", ":", "[", "]", ",", "'efficiency_dispatch'", ":", "[", "]", ",", "'standing_loss'", ":", "[", "]", "}", "for", "s", "in", "storages", ":", "bus_name", "=", "'_'", ".", "join", "(", "[", "'Bus'", ",", "repr", "(", "s", ")", "]", ")", "storage", "[", "'name'", "]", ".", "append", "(", "repr", "(", "s", ")", ")", "storage", "[", "'bus'", "]", ".", "append", "(", "bus_name", ")", "storage", "[", "'p_nom'", "]", ".", "append", "(", "s", ".", "nominal_power", "/", "1e3", ")", "storage", "[", "'state_of_charge_initial'", "]", ".", "append", "(", "s", ".", "soc_initial", ")", "storage", "[", "'efficiency_store'", "]", ".", "append", "(", "s", ".", "efficiency_in", ")", "storage", "[", "'efficiency_dispatch'", "]", ".", "append", "(", "s", ".", "efficiency_out", ")", "storage", "[", "'standing_loss'", "]", ".", "append", "(", "s", ".", "standing_loss", ")", "bus", "[", "'name'", "]", ".", "append", "(", "bus_name", ")", "bus", "[", "'v_nom'", "]", ".", "append", "(", "s", ".", "grid", ".", "voltage_nom", ")", "bus", "[", "'x'", "]", ".", "append", "(", "s", ".", "geom", ".", "x", ")", "bus", "[", "'y'", "]", ".", "append", "(", "s", ".", "geom", ".", "y", ")", "omega", "=", "2", "*", "pi", "*", "50", "for", "l", "in", "storages_lines", ":", "line", "[", "'name'", "]", ".", "append", "(", "repr", "(", "l", ")", ")", "adj_nodes", "=", "l", ".", "grid", ".", "graph", ".", "nodes_from_line", "(", "l", ")", "if", "isinstance", "(", "l", ".", "grid", ",", "LVGrid", ")", ":", "if", "isinstance", "(", "adj_nodes", "[", "0", "]", ",", "LVStation", ")", ":", "line", "[", "'bus0'", "]", ".", "append", "(", "'_'", ".", "join", "(", "[", "'Bus'", ",", "adj_nodes", "[", "0", "]", ".", "__repr__", "(", "side", "=", "'lv'", ")", "]", ")", ")", "else", ":", "line", "[", "'bus0'", "]", ".", "append", "(", "'_'", ".", "join", "(", "[", "'Bus'", ",", "repr", "(", "adj_nodes", "[", "0", "]", ")", "]", ")", ")", "if", "isinstance", "(", "adj_nodes", "[", "1", "]", ",", "LVStation", ")", ":", "line", "[", "'bus1'", "]", ".", "append", "(", "'_'", ".", "join", "(", "[", "'Bus'", ",", "adj_nodes", "[", "1", "]", ".", "__repr__", "(", "side", "=", "'lv'", ")", "]", ")", ")", "else", ":", "line", "[", "'bus1'", "]", ".", "append", "(", "'_'", ".", "join", "(", "[", "'Bus'", ",", "repr", "(", "adj_nodes", "[", "1", "]", ")", "]", ")", ")", "else", ":", "if", "isinstance", "(", "adj_nodes", "[", "0", "]", ",", "LVStation", ")", ":", "line", "[", "'bus0'", "]", ".", "append", "(", "'_'", ".", "join", "(", "[", "'Bus'", ",", "adj_nodes", "[", "0", "]", ".", "__repr__", "(", "side", "=", "'mv'", ")", "]", ")", ")", "elif", "isinstance", "(", "adj_nodes", "[", "0", "]", ",", "MVStation", ")", ":", "line", "[", "'bus0'", "]", ".", "append", "(", "'_'", ".", "join", "(", "[", "'Bus'", ",", "adj_nodes", "[", "0", "]", ".", "__repr__", "(", "side", "=", "'lv'", ")", "]", ")", ")", "else", ":", "line", "[", "'bus0'", "]", ".", "append", "(", "'_'", ".", "join", "(", "[", "'Bus'", ",", "repr", "(", "adj_nodes", "[", "0", "]", ")", "]", ")", ")", "if", "isinstance", "(", "adj_nodes", "[", "1", "]", ",", "LVStation", ")", ":", "line", "[", "'bus1'", "]", ".", "append", "(", "'_'", ".", "join", "(", "[", "'Bus'", ",", "adj_nodes", "[", "1", "]", ".", "__repr__", "(", "side", "=", "'mv'", ")", "]", ")", ")", "elif", "isinstance", "(", "adj_nodes", "[", "1", "]", ",", "MVStation", ")", ":", "line", "[", "'bus1'", "]", ".", "append", "(", "'_'", ".", "join", "(", "[", "'Bus'", ",", "adj_nodes", "[", "1", "]", ".", "__repr__", "(", "side", "=", "'lv'", ")", "]", ")", ")", "else", ":", "line", "[", "'bus1'", "]", ".", "append", "(", "'_'", ".", "join", "(", "[", "'Bus'", ",", "repr", "(", "adj_nodes", "[", "1", "]", ")", "]", ")", ")", "line", "[", "'type'", "]", ".", "append", "(", "\"\"", ")", "line", "[", "'x'", "]", ".", "append", "(", "l", ".", "type", "[", "'L'", "]", "*", "omega", "/", "1e3", "*", "l", ".", "length", ")", "line", "[", "'r'", "]", ".", "append", "(", "l", ".", "type", "[", "'R'", "]", "*", "l", ".", "length", ")", "line", "[", "'s_nom'", "]", ".", "append", "(", "sqrt", "(", "3", ")", "*", "l", ".", "type", "[", "'I_max_th'", "]", "*", "l", ".", "type", "[", "'U_n'", "]", "/", "1e3", ")", "line", "[", "'length'", "]", ".", "append", "(", "l", ".", "length", ")", "# import new components to pypsa", "pypsa", ".", "import_components_from_dataframe", "(", "pd", ".", "DataFrame", "(", "bus", ")", ".", "set_index", "(", "'name'", ")", ",", "'Bus'", ")", "pypsa", ".", "import_components_from_dataframe", "(", "pd", ".", "DataFrame", "(", "storage", ")", ".", "set_index", "(", "'name'", ")", ",", "'StorageUnit'", ")", "pypsa", ".", "import_components_from_dataframe", "(", "pd", ".", "DataFrame", "(", "line", ")", ".", "set_index", "(", "'name'", ")", ",", "'Line'", ")", "# import time series of storages and buses to pypsa", "timeseries_storage_p", "=", "pd", ".", "DataFrame", "(", ")", "timeseries_storage_q", "=", "pd", ".", "DataFrame", "(", ")", "for", "s", "in", "storages", ":", "timeseries_storage_p", "[", "repr", "(", "s", ")", "]", "=", "s", ".", "pypsa_timeseries", "(", "'p'", ")", ".", "loc", "[", "pypsa", ".", "storage_units_t", ".", "p_set", ".", "index", "]", "timeseries_storage_q", "[", "repr", "(", "s", ")", "]", "=", "s", ".", "pypsa_timeseries", "(", "'q'", ")", ".", "loc", "[", "pypsa", ".", "storage_units_t", ".", "q_set", ".", "index", "]", "import_series_from_dataframe", "(", "pypsa", ",", "timeseries_storage_p", ",", "'StorageUnit'", ",", "'p_set'", ")", "import_series_from_dataframe", "(", "pypsa", ",", "timeseries_storage_q", ",", "'StorageUnit'", ",", "'q_set'", ")" ]
Adds storages and their lines to pypsa representation of the edisgo graph. This function effects the following attributes of the pypsa network: components ('StorageUnit'), storage_units, storage_units_t (p_set, q_set), buses, lines Parameters ----------- pypsa : :pypsa:`pypsa.Network<network>` storages : :obj:`list` List with storages of type :class:`~.grid.components.Storage` to add to pypsa network. storages_lines : :obj:`list` List with lines of type :class:`~.grid.components.Line` that connect storages to the grid.
[ "Adds", "storages", "and", "their", "lines", "to", "pypsa", "representation", "of", "the", "edisgo", "graph", "." ]
python
train
xtuml/pyxtuml
bridgepoint/ooaofooa.py
https://github.com/xtuml/pyxtuml/blob/7dd9343b9a0191d1db1887ab9288d0a026608d9a/bridgepoint/ooaofooa.py#L433-L445
def mk_association(m, r_rel): ''' Create a pyxtuml association from a R_REL in ooaofooa. ''' handler = { 'R_SIMP': mk_simple_association, 'R_ASSOC': mk_linked_association, 'R_SUBSUP': mk_subsuper_association, 'R_COMP': mk_derived_association, } inst = subtype(r_rel, 206) fn = handler.get(type(inst).__name__) return fn(m, inst)
[ "def", "mk_association", "(", "m", ",", "r_rel", ")", ":", "handler", "=", "{", "'R_SIMP'", ":", "mk_simple_association", ",", "'R_ASSOC'", ":", "mk_linked_association", ",", "'R_SUBSUP'", ":", "mk_subsuper_association", ",", "'R_COMP'", ":", "mk_derived_association", ",", "}", "inst", "=", "subtype", "(", "r_rel", ",", "206", ")", "fn", "=", "handler", ".", "get", "(", "type", "(", "inst", ")", ".", "__name__", ")", "return", "fn", "(", "m", ",", "inst", ")" ]
Create a pyxtuml association from a R_REL in ooaofooa.
[ "Create", "a", "pyxtuml", "association", "from", "a", "R_REL", "in", "ooaofooa", "." ]
python
test
couchbase/couchbase-python-client
couchbase/__init__.py
https://github.com/couchbase/couchbase-python-client/blob/a7bada167785bf79a29c39f820d932a433a6a535/couchbase/__init__.py#L71-L95
def set_pickle_converters(encode, decode): """ Modify the default Pickle conversion functions. This affects all :class:`~couchbase.bucket.Bucket` instances. These functions will be called instead of the default ones (``pickle.dumps`` and ``pickle.loads``) to encode and decode values to and from the Pickle format (when :const:`FMT_PICKLE` is used). :param callable encode: Callable to invoke when encoding an object to Pickle. This should have the same prototype as ``pickle.dumps`` with the exception that it is only ever called with a single argument :param callable decode: Callable to invoke when decoding a Pickle encoded object to a Python object. Should have the same prototype as ``pickle.loads`` with the exception that it is only ever passed a single argument :return: A tuple of ``(old encoder, old decoder)`` No exceptions are raised and it is the responsibility of the caller to ensure that the provided functions operate correctly. """ ret = _LCB._modify_helpers(pickle_encode=encode, pickle_decode=decode) return (ret['pickle_encode'], ret['pickle_decode'])
[ "def", "set_pickle_converters", "(", "encode", ",", "decode", ")", ":", "ret", "=", "_LCB", ".", "_modify_helpers", "(", "pickle_encode", "=", "encode", ",", "pickle_decode", "=", "decode", ")", "return", "(", "ret", "[", "'pickle_encode'", "]", ",", "ret", "[", "'pickle_decode'", "]", ")" ]
Modify the default Pickle conversion functions. This affects all :class:`~couchbase.bucket.Bucket` instances. These functions will be called instead of the default ones (``pickle.dumps`` and ``pickle.loads``) to encode and decode values to and from the Pickle format (when :const:`FMT_PICKLE` is used). :param callable encode: Callable to invoke when encoding an object to Pickle. This should have the same prototype as ``pickle.dumps`` with the exception that it is only ever called with a single argument :param callable decode: Callable to invoke when decoding a Pickle encoded object to a Python object. Should have the same prototype as ``pickle.loads`` with the exception that it is only ever passed a single argument :return: A tuple of ``(old encoder, old decoder)`` No exceptions are raised and it is the responsibility of the caller to ensure that the provided functions operate correctly.
[ "Modify", "the", "default", "Pickle", "conversion", "functions", ".", "This", "affects", "all", ":", "class", ":", "~couchbase", ".", "bucket", ".", "Bucket", "instances", "." ]
python
train
learningequality/iceqube
src/iceqube/common/utils.py
https://github.com/learningequality/iceqube/blob/97ac9e0f65bfedb0efa9f94638bcb57c7926dea2/src/iceqube/common/utils.py#L18-L33
def import_stringified_func(funcstring): """ Import a string that represents a module and function, e.g. {module}.{funcname}. Given a function f, import_stringified_func(stringify_func(f)) will return the same function. :param funcstring: String to try to import :return: callable """ assert isinstance(funcstring, str) modulestring, funcname = funcstring.rsplit('.', 1) mod = importlib.import_module(modulestring) func = getattr(mod, funcname) return func
[ "def", "import_stringified_func", "(", "funcstring", ")", ":", "assert", "isinstance", "(", "funcstring", ",", "str", ")", "modulestring", ",", "funcname", "=", "funcstring", ".", "rsplit", "(", "'.'", ",", "1", ")", "mod", "=", "importlib", ".", "import_module", "(", "modulestring", ")", "func", "=", "getattr", "(", "mod", ",", "funcname", ")", "return", "func" ]
Import a string that represents a module and function, e.g. {module}.{funcname}. Given a function f, import_stringified_func(stringify_func(f)) will return the same function. :param funcstring: String to try to import :return: callable
[ "Import", "a", "string", "that", "represents", "a", "module", "and", "function", "e", ".", "g", ".", "{", "module", "}", ".", "{", "funcname", "}", "." ]
python
train
scot-dev/scot
scot/var.py
https://github.com/scot-dev/scot/blob/48598b79d4400dad893b134cd2194715511facda/scot/var.py#L290-L304
def _get_msge_with_gradient(data, delta, xvschema, skipstep, p): """Calculate mean squared generalization error and its gradient, automatically selecting the best function. """ t, m, l = data.shape n = (l - p) * t underdetermined = n < m * p if underdetermined: return _msge_with_gradient_underdetermined(data, delta, xvschema, skipstep, p) else: return _msge_with_gradient_overdetermined(data, delta, xvschema, skipstep, p)
[ "def", "_get_msge_with_gradient", "(", "data", ",", "delta", ",", "xvschema", ",", "skipstep", ",", "p", ")", ":", "t", ",", "m", ",", "l", "=", "data", ".", "shape", "n", "=", "(", "l", "-", "p", ")", "*", "t", "underdetermined", "=", "n", "<", "m", "*", "p", "if", "underdetermined", ":", "return", "_msge_with_gradient_underdetermined", "(", "data", ",", "delta", ",", "xvschema", ",", "skipstep", ",", "p", ")", "else", ":", "return", "_msge_with_gradient_overdetermined", "(", "data", ",", "delta", ",", "xvschema", ",", "skipstep", ",", "p", ")" ]
Calculate mean squared generalization error and its gradient, automatically selecting the best function.
[ "Calculate", "mean", "squared", "generalization", "error", "and", "its", "gradient", "automatically", "selecting", "the", "best", "function", "." ]
python
train
gbiggs/rtctree
rtctree/component.py
https://github.com/gbiggs/rtctree/blob/bd725a47ac87c259c8bce06156ccc9ab71111c26/rtctree/component.py#L1056-L1065
def activate_conf_set(self, set_name): '''Activate a configuration set by name. @raises NoSuchConfSetError ''' with self._mutex: if not set_name in self.conf_sets: raise exceptions.NoSuchConfSetError(set_name) self._conf.activate_configuration_set(set_name)
[ "def", "activate_conf_set", "(", "self", ",", "set_name", ")", ":", "with", "self", ".", "_mutex", ":", "if", "not", "set_name", "in", "self", ".", "conf_sets", ":", "raise", "exceptions", ".", "NoSuchConfSetError", "(", "set_name", ")", "self", ".", "_conf", ".", "activate_configuration_set", "(", "set_name", ")" ]
Activate a configuration set by name. @raises NoSuchConfSetError
[ "Activate", "a", "configuration", "set", "by", "name", "." ]
python
train
LIVVkit/LIVVkit
livvkit/util/TexHelper.py
https://github.com/LIVVkit/LIVVkit/blob/680120cd437e408673e62e535fc0a246c7fc17db/livvkit/util/TexHelper.py#L65-L90
def translate_page(data): """ Translates data elements with data['Type'] = 'page'. This is the top level of translation that occurs, and delegates the translation of other element types contained on a page to their proper functions. """ if "Page" != data["Type"]: return "" tex_str = ('\\documentclass{article}\\n' + '\\usepackage{placeins}\\n' + '\\title{LIVVkit}\\n' + '\\author{$USER}\\n' + '\\usepackage[parfill]{parskip}\\n' + '\\begin{document}\\n' + '\\maketitle\\n' ).replace('$USER', livvkit.user) content = data["Data"] for tag_name in ["Elements", "Tabs"]: for tag in content.get(tag_name, []): print("Translating " + tag["Type"]) tex_str += translate_map[tag["Type"]](tag) tex_str += '\n\\end{document}' return tex_str
[ "def", "translate_page", "(", "data", ")", ":", "if", "\"Page\"", "!=", "data", "[", "\"Type\"", "]", ":", "return", "\"\"", "tex_str", "=", "(", "'\\\\documentclass{article}\\\\n'", "+", "'\\\\usepackage{placeins}\\\\n'", "+", "'\\\\title{LIVVkit}\\\\n'", "+", "'\\\\author{$USER}\\\\n'", "+", "'\\\\usepackage[parfill]{parskip}\\\\n'", "+", "'\\\\begin{document}\\\\n'", "+", "'\\\\maketitle\\\\n'", ")", ".", "replace", "(", "'$USER'", ",", "livvkit", ".", "user", ")", "content", "=", "data", "[", "\"Data\"", "]", "for", "tag_name", "in", "[", "\"Elements\"", ",", "\"Tabs\"", "]", ":", "for", "tag", "in", "content", ".", "get", "(", "tag_name", ",", "[", "]", ")", ":", "print", "(", "\"Translating \"", "+", "tag", "[", "\"Type\"", "]", ")", "tex_str", "+=", "translate_map", "[", "tag", "[", "\"Type\"", "]", "]", "(", "tag", ")", "tex_str", "+=", "'\\n\\\\end{document}'", "return", "tex_str" ]
Translates data elements with data['Type'] = 'page'. This is the top level of translation that occurs, and delegates the translation of other element types contained on a page to their proper functions.
[ "Translates", "data", "elements", "with", "data", "[", "Type", "]", "=", "page", ".", "This", "is", "the", "top", "level", "of", "translation", "that", "occurs", "and", "delegates", "the", "translation", "of", "other", "element", "types", "contained", "on", "a", "page", "to", "their", "proper", "functions", "." ]
python
train
timothycrosley/jiphy
jiphy/parser.py
https://github.com/timothycrosley/jiphy/blob/6e09be9c3496ca40901df70fc9b14d2ca3ec2e04/jiphy/parser.py#L35-L66
def text_till(self, strings, keep_index=False): """Returns all text till it encounters the given string (or one of the given strings)""" if isinstance(strings, str): strings = [strings] original_index = self.index text = "" matched_string = "" while self.more: test_against = self.characters(len(max(strings, key=len))) for string in strings: if string.startswith("^"): if test_against[0] in (" ", "\t", "\n", ")", "(") and test_against[1:].startswith(string[1:]): matched_string = string break if test_against.startswith(string): matched_string = string break if matched_string: break text += self.pop() self += 1 if keep_index: self.index = original_index return (text, matched_string)
[ "def", "text_till", "(", "self", ",", "strings", ",", "keep_index", "=", "False", ")", ":", "if", "isinstance", "(", "strings", ",", "str", ")", ":", "strings", "=", "[", "strings", "]", "original_index", "=", "self", ".", "index", "text", "=", "\"\"", "matched_string", "=", "\"\"", "while", "self", ".", "more", ":", "test_against", "=", "self", ".", "characters", "(", "len", "(", "max", "(", "strings", ",", "key", "=", "len", ")", ")", ")", "for", "string", "in", "strings", ":", "if", "string", ".", "startswith", "(", "\"^\"", ")", ":", "if", "test_against", "[", "0", "]", "in", "(", "\" \"", ",", "\"\\t\"", ",", "\"\\n\"", ",", "\")\"", ",", "\"(\"", ")", "and", "test_against", "[", "1", ":", "]", ".", "startswith", "(", "string", "[", "1", ":", "]", ")", ":", "matched_string", "=", "string", "break", "if", "test_against", ".", "startswith", "(", "string", ")", ":", "matched_string", "=", "string", "break", "if", "matched_string", ":", "break", "text", "+=", "self", ".", "pop", "(", ")", "self", "+=", "1", "if", "keep_index", ":", "self", ".", "index", "=", "original_index", "return", "(", "text", ",", "matched_string", ")" ]
Returns all text till it encounters the given string (or one of the given strings)
[ "Returns", "all", "text", "till", "it", "encounters", "the", "given", "string", "(", "or", "one", "of", "the", "given", "strings", ")" ]
python
train
ThreatConnect-Inc/tcex
tcex/tcex_bin_run.py
https://github.com/ThreatConnect-Inc/tcex/blob/dd4d7a1ef723af1561687120191886b9a2fd4b47/tcex/tcex_bin_run.py#L110-L152
def _logger(self): """Create logger instance. Returns: logger: An instance of logging """ log_level = { 'debug': logging.DEBUG, 'info': logging.INFO, 'warning': logging.WARNING, 'error': logging.ERROR, 'critical': logging.CRITICAL, } level = log_level.get(self.args.logging_level.lower()) # Formatter tx_format = '%(asctime)s - %(name)s - %(levelname)s - %(message)s ' tx_format += '(%(funcName)s:%(lineno)d)' formatter = logging.Formatter(tx_format) # Logger log = logging.getLogger('tcrun') # # Stream Handler # sh = logging.StreamHandler() # sh.set_name('sh') # sh.setLevel(level) # sh.setFormatter(formatter) # log.addHandler(sh) # File Handler if not os.access('log', os.W_OK): os.makedirs('log') logfile = os.path.join('log', 'run.log') fh = logging.FileHandler(logfile) fh.set_name('fh') fh.setLevel(logging.DEBUG) fh.setFormatter(formatter) log.addHandler(fh) log.setLevel(level) log.info('Logging Level: {}'.format(logging.getLevelName(level))) return log
[ "def", "_logger", "(", "self", ")", ":", "log_level", "=", "{", "'debug'", ":", "logging", ".", "DEBUG", ",", "'info'", ":", "logging", ".", "INFO", ",", "'warning'", ":", "logging", ".", "WARNING", ",", "'error'", ":", "logging", ".", "ERROR", ",", "'critical'", ":", "logging", ".", "CRITICAL", ",", "}", "level", "=", "log_level", ".", "get", "(", "self", ".", "args", ".", "logging_level", ".", "lower", "(", ")", ")", "# Formatter", "tx_format", "=", "'%(asctime)s - %(name)s - %(levelname)s - %(message)s '", "tx_format", "+=", "'(%(funcName)s:%(lineno)d)'", "formatter", "=", "logging", ".", "Formatter", "(", "tx_format", ")", "# Logger", "log", "=", "logging", ".", "getLogger", "(", "'tcrun'", ")", "# # Stream Handler", "# sh = logging.StreamHandler()", "# sh.set_name('sh')", "# sh.setLevel(level)", "# sh.setFormatter(formatter)", "# log.addHandler(sh)", "# File Handler", "if", "not", "os", ".", "access", "(", "'log'", ",", "os", ".", "W_OK", ")", ":", "os", ".", "makedirs", "(", "'log'", ")", "logfile", "=", "os", ".", "path", ".", "join", "(", "'log'", ",", "'run.log'", ")", "fh", "=", "logging", ".", "FileHandler", "(", "logfile", ")", "fh", ".", "set_name", "(", "'fh'", ")", "fh", ".", "setLevel", "(", "logging", ".", "DEBUG", ")", "fh", ".", "setFormatter", "(", "formatter", ")", "log", ".", "addHandler", "(", "fh", ")", "log", ".", "setLevel", "(", "level", ")", "log", ".", "info", "(", "'Logging Level: {}'", ".", "format", "(", "logging", ".", "getLevelName", "(", "level", ")", ")", ")", "return", "log" ]
Create logger instance. Returns: logger: An instance of logging
[ "Create", "logger", "instance", "." ]
python
train
whyscream/dspam-milter
dspam/utils.py
https://github.com/whyscream/dspam-milter/blob/f9939b718eed02cb7e56f8c5b921db4cfe1cd85a/dspam/utils.py#L87-L108
def config_str2dict(option_value): """ Parse the value of a config option and convert it to a dictionary. The configuration allows lines formatted like: foo = Bar:1,Baz,Flub:0.75 This gets converted to a dictionary: foo = { 'Bar': 1, 'Baz': 0, 'Flub': 0.75 } Args: option_value -- The config string to parse. """ dict = {} for key in option_value.split(','): if ':' in key: key, value = pair.split(':') value = float(value) else: value = 0 dict[key] = value return dict
[ "def", "config_str2dict", "(", "option_value", ")", ":", "dict", "=", "{", "}", "for", "key", "in", "option_value", ".", "split", "(", "','", ")", ":", "if", "':'", "in", "key", ":", "key", ",", "value", "=", "pair", ".", "split", "(", "':'", ")", "value", "=", "float", "(", "value", ")", "else", ":", "value", "=", "0", "dict", "[", "key", "]", "=", "value", "return", "dict" ]
Parse the value of a config option and convert it to a dictionary. The configuration allows lines formatted like: foo = Bar:1,Baz,Flub:0.75 This gets converted to a dictionary: foo = { 'Bar': 1, 'Baz': 0, 'Flub': 0.75 } Args: option_value -- The config string to parse.
[ "Parse", "the", "value", "of", "a", "config", "option", "and", "convert", "it", "to", "a", "dictionary", "." ]
python
train
saltstack/salt
salt/states/azurearm_network.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/azurearm_network.py#L261-L321
def virtual_network_absent(name, resource_group, connection_auth=None): ''' .. versionadded:: 2019.2.0 Ensure a virtual network does not exist in the resource group. :param name: Name of the virtual network. :param resource_group: The resource group assigned to the virtual network. :param connection_auth: A dict with subscription and authentication parameters to be used in connecting to the Azure Resource Manager API. ''' ret = { 'name': name, 'result': False, 'comment': '', 'changes': {} } if not isinstance(connection_auth, dict): ret['comment'] = 'Connection information must be specified via connection_auth dictionary!' return ret vnet = __salt__['azurearm_network.virtual_network_get']( name, resource_group, azurearm_log_level='info', **connection_auth ) if 'error' in vnet: ret['result'] = True ret['comment'] = 'Virtual network {0} was not found.'.format(name) return ret elif __opts__['test']: ret['comment'] = 'Virtual network {0} would be deleted.'.format(name) ret['result'] = None ret['changes'] = { 'old': vnet, 'new': {}, } return ret deleted = __salt__['azurearm_network.virtual_network_delete'](name, resource_group, **connection_auth) if deleted: ret['result'] = True ret['comment'] = 'Virtual network {0} has been deleted.'.format(name) ret['changes'] = { 'old': vnet, 'new': {} } return ret ret['comment'] = 'Failed to delete virtual network {0}!'.format(name) return ret
[ "def", "virtual_network_absent", "(", "name", ",", "resource_group", ",", "connection_auth", "=", "None", ")", ":", "ret", "=", "{", "'name'", ":", "name", ",", "'result'", ":", "False", ",", "'comment'", ":", "''", ",", "'changes'", ":", "{", "}", "}", "if", "not", "isinstance", "(", "connection_auth", ",", "dict", ")", ":", "ret", "[", "'comment'", "]", "=", "'Connection information must be specified via connection_auth dictionary!'", "return", "ret", "vnet", "=", "__salt__", "[", "'azurearm_network.virtual_network_get'", "]", "(", "name", ",", "resource_group", ",", "azurearm_log_level", "=", "'info'", ",", "*", "*", "connection_auth", ")", "if", "'error'", "in", "vnet", ":", "ret", "[", "'result'", "]", "=", "True", "ret", "[", "'comment'", "]", "=", "'Virtual network {0} was not found.'", ".", "format", "(", "name", ")", "return", "ret", "elif", "__opts__", "[", "'test'", "]", ":", "ret", "[", "'comment'", "]", "=", "'Virtual network {0} would be deleted.'", ".", "format", "(", "name", ")", "ret", "[", "'result'", "]", "=", "None", "ret", "[", "'changes'", "]", "=", "{", "'old'", ":", "vnet", ",", "'new'", ":", "{", "}", ",", "}", "return", "ret", "deleted", "=", "__salt__", "[", "'azurearm_network.virtual_network_delete'", "]", "(", "name", ",", "resource_group", ",", "*", "*", "connection_auth", ")", "if", "deleted", ":", "ret", "[", "'result'", "]", "=", "True", "ret", "[", "'comment'", "]", "=", "'Virtual network {0} has been deleted.'", ".", "format", "(", "name", ")", "ret", "[", "'changes'", "]", "=", "{", "'old'", ":", "vnet", ",", "'new'", ":", "{", "}", "}", "return", "ret", "ret", "[", "'comment'", "]", "=", "'Failed to delete virtual network {0}!'", ".", "format", "(", "name", ")", "return", "ret" ]
.. versionadded:: 2019.2.0 Ensure a virtual network does not exist in the resource group. :param name: Name of the virtual network. :param resource_group: The resource group assigned to the virtual network. :param connection_auth: A dict with subscription and authentication parameters to be used in connecting to the Azure Resource Manager API.
[ "..", "versionadded", "::", "2019", ".", "2", ".", "0" ]
python
train
mrcagney/gtfstk
gtfstk/routes.py
https://github.com/mrcagney/gtfstk/blob/c91494e6fefc02523889655a0dc92d1c0eee8d03/gtfstk/routes.py#L453-L487
def get_routes( feed: "Feed", date: Optional[str] = None, time: Optional[str] = None ) -> DataFrame: """ Return a subset of ``feed.routes`` Parameters ----------- feed : Feed date : string YYYYMMDD date string restricting routes to only those active on the date time : string HH:MM:SS time string, possibly with HH > 23, restricting routes to only those active during the time Returns ------- DataFrame A subset of ``feed.routes`` Notes ----- Assume the following feed attributes are not ``None``: - ``feed.routes`` - Those used in :func:`.trips.get_trips`. """ if date is None: return feed.routes.copy() trips = feed.get_trips(date, time) R = trips["route_id"].unique() return feed.routes[feed.routes["route_id"].isin(R)]
[ "def", "get_routes", "(", "feed", ":", "\"Feed\"", ",", "date", ":", "Optional", "[", "str", "]", "=", "None", ",", "time", ":", "Optional", "[", "str", "]", "=", "None", ")", "->", "DataFrame", ":", "if", "date", "is", "None", ":", "return", "feed", ".", "routes", ".", "copy", "(", ")", "trips", "=", "feed", ".", "get_trips", "(", "date", ",", "time", ")", "R", "=", "trips", "[", "\"route_id\"", "]", ".", "unique", "(", ")", "return", "feed", ".", "routes", "[", "feed", ".", "routes", "[", "\"route_id\"", "]", ".", "isin", "(", "R", ")", "]" ]
Return a subset of ``feed.routes`` Parameters ----------- feed : Feed date : string YYYYMMDD date string restricting routes to only those active on the date time : string HH:MM:SS time string, possibly with HH > 23, restricting routes to only those active during the time Returns ------- DataFrame A subset of ``feed.routes`` Notes ----- Assume the following feed attributes are not ``None``: - ``feed.routes`` - Those used in :func:`.trips.get_trips`.
[ "Return", "a", "subset", "of", "feed", ".", "routes" ]
python
train
ericmjl/nxviz
nxviz/utils.py
https://github.com/ericmjl/nxviz/blob/6ea5823a8030a686f165fbe37d7a04d0f037ecc9/nxviz/utils.py#L22-L76
def infer_data_type(data_container): """ For a given container of data, infer the type of data as one of continuous, categorical, or ordinal. For now, it is a one-to-one mapping as such: - str: categorical - int: ordinal - float: continuous There may be better ways that are not currently implemented below. For example, with a list of numbers, we can check whether the number of unique entries is less than or equal to 12, but has over 10000+ entries. This would be a good candidate for floats being categorical. :param data_container: A generic container of data points. :type data_container: `iterable` """ # Defensive programming checks. # 0. Ensure that we are dealing with lists or tuples, and nothing else. assert isinstance(data_container, list) or isinstance( data_container, tuple ), "data_container should be a list or tuple." # 1. Don't want to deal with only single values. assert ( len(set(data_container)) > 1 ), "There should be more than one value in the data container." # 2. Don't want to deal with mixed data. assert is_data_homogenous( data_container ), "Data are not of a homogenous type!" # Once we check that the data type of the container is homogenous, we only # need to check the first element in the data container for its type. datum = data_container[0] # Return statements below # treat binomial data as categorical # TODO: make tests for this. if len(set(data_container)) == 2: return "categorical" elif isinstance(datum, str): return "categorical" elif isinstance(datum, int): return "ordinal" elif isinstance(datum, float): return "continuous" else: raise ValueError("Not possible to tell what the data type is.")
[ "def", "infer_data_type", "(", "data_container", ")", ":", "# Defensive programming checks.", "# 0. Ensure that we are dealing with lists or tuples, and nothing else.", "assert", "isinstance", "(", "data_container", ",", "list", ")", "or", "isinstance", "(", "data_container", ",", "tuple", ")", ",", "\"data_container should be a list or tuple.\"", "# 1. Don't want to deal with only single values.", "assert", "(", "len", "(", "set", "(", "data_container", ")", ")", ">", "1", ")", ",", "\"There should be more than one value in the data container.\"", "# 2. Don't want to deal with mixed data.", "assert", "is_data_homogenous", "(", "data_container", ")", ",", "\"Data are not of a homogenous type!\"", "# Once we check that the data type of the container is homogenous, we only", "# need to check the first element in the data container for its type.", "datum", "=", "data_container", "[", "0", "]", "# Return statements below", "# treat binomial data as categorical", "# TODO: make tests for this.", "if", "len", "(", "set", "(", "data_container", ")", ")", "==", "2", ":", "return", "\"categorical\"", "elif", "isinstance", "(", "datum", ",", "str", ")", ":", "return", "\"categorical\"", "elif", "isinstance", "(", "datum", ",", "int", ")", ":", "return", "\"ordinal\"", "elif", "isinstance", "(", "datum", ",", "float", ")", ":", "return", "\"continuous\"", "else", ":", "raise", "ValueError", "(", "\"Not possible to tell what the data type is.\"", ")" ]
For a given container of data, infer the type of data as one of continuous, categorical, or ordinal. For now, it is a one-to-one mapping as such: - str: categorical - int: ordinal - float: continuous There may be better ways that are not currently implemented below. For example, with a list of numbers, we can check whether the number of unique entries is less than or equal to 12, but has over 10000+ entries. This would be a good candidate for floats being categorical. :param data_container: A generic container of data points. :type data_container: `iterable`
[ "For", "a", "given", "container", "of", "data", "infer", "the", "type", "of", "data", "as", "one", "of", "continuous", "categorical", "or", "ordinal", "." ]
python
train
koodaamo/iconframer
iconframer/iconframer.py
https://github.com/koodaamo/iconframer/blob/58d71fd78bfe3893a7f20384f429592d033d802a/iconframer/iconframer.py#L23-L39
def process_path(label, pth): "check and expand paths" if pth is None: sys.exit("no %s path given" % label) if pth.startswith("/"): pass elif pth[0] in (".", "~"): pth = os.path.realpath(pth) else: pth = os.getcwd() + os.sep + pth if not os.path.exists(pth): sys.exit("%s path %s does not exist" % (label, pth)) return pth
[ "def", "process_path", "(", "label", ",", "pth", ")", ":", "if", "pth", "is", "None", ":", "sys", ".", "exit", "(", "\"no %s path given\"", "%", "label", ")", "if", "pth", ".", "startswith", "(", "\"/\"", ")", ":", "pass", "elif", "pth", "[", "0", "]", "in", "(", "\".\"", ",", "\"~\"", ")", ":", "pth", "=", "os", ".", "path", ".", "realpath", "(", "pth", ")", "else", ":", "pth", "=", "os", ".", "getcwd", "(", ")", "+", "os", ".", "sep", "+", "pth", "if", "not", "os", ".", "path", ".", "exists", "(", "pth", ")", ":", "sys", ".", "exit", "(", "\"%s path %s does not exist\"", "%", "(", "label", ",", "pth", ")", ")", "return", "pth" ]
check and expand paths
[ "check", "and", "expand", "paths" ]
python
train
bhmm/bhmm
bhmm/hmm/generic_sampled_hmm.py
https://github.com/bhmm/bhmm/blob/9804d18c2ddb684fb4d90b544cc209617a89ca9a/bhmm/hmm/generic_sampled_hmm.py#L70-L75
def initial_distribution_samples(self): r""" Samples of the initial distribution """ res = np.empty((self.nsamples, self.nstates), dtype=config.dtype) for i in range(self.nsamples): res[i, :] = self._sampled_hmms[i].stationary_distribution return res
[ "def", "initial_distribution_samples", "(", "self", ")", ":", "res", "=", "np", ".", "empty", "(", "(", "self", ".", "nsamples", ",", "self", ".", "nstates", ")", ",", "dtype", "=", "config", ".", "dtype", ")", "for", "i", "in", "range", "(", "self", ".", "nsamples", ")", ":", "res", "[", "i", ",", ":", "]", "=", "self", ".", "_sampled_hmms", "[", "i", "]", ".", "stationary_distribution", "return", "res" ]
r""" Samples of the initial distribution
[ "r", "Samples", "of", "the", "initial", "distribution" ]
python
train
Unidata/siphon
siphon/ncss.py
https://github.com/Unidata/siphon/blob/53fb0d84fbce1c18c8e81c9e68bc81620ee0a6ac/siphon/ncss.py#L408-L425
def parse_csv_header(line): """Parse the CSV header returned by TDS.""" units = {} names = [] for var in line.split(','): start = var.find('[') if start < 0: names.append(str(var)) continue else: names.append(str(var[:start])) end = var.find(']', start) unitstr = var[start + 1:end] eq = unitstr.find('=') if eq >= 0: # go past = and ", skip final " units[names[-1]] = unitstr[eq + 2:-1] return names, units
[ "def", "parse_csv_header", "(", "line", ")", ":", "units", "=", "{", "}", "names", "=", "[", "]", "for", "var", "in", "line", ".", "split", "(", "','", ")", ":", "start", "=", "var", ".", "find", "(", "'['", ")", "if", "start", "<", "0", ":", "names", ".", "append", "(", "str", "(", "var", ")", ")", "continue", "else", ":", "names", ".", "append", "(", "str", "(", "var", "[", ":", "start", "]", ")", ")", "end", "=", "var", ".", "find", "(", "']'", ",", "start", ")", "unitstr", "=", "var", "[", "start", "+", "1", ":", "end", "]", "eq", "=", "unitstr", ".", "find", "(", "'='", ")", "if", "eq", ">=", "0", ":", "# go past = and \", skip final \"", "units", "[", "names", "[", "-", "1", "]", "]", "=", "unitstr", "[", "eq", "+", "2", ":", "-", "1", "]", "return", "names", ",", "units" ]
Parse the CSV header returned by TDS.
[ "Parse", "the", "CSV", "header", "returned", "by", "TDS", "." ]
python
train
bykof/billomapy
billomapy/billomapy.py
https://github.com/bykof/billomapy/blob/a28ba69fd37654fa145d0411d52c200e7f8984ab/billomapy/billomapy.py#L1880-L1895
def get_all_incoming_properties(self, params=None): """ Get all incoming properties This will iterate over all pages until it gets all elements. So if the rate limit exceeded it will throw an Exception and you will get nothing :param params: search params :return: list """ if not params: params = {} return self._iterate_through_pages( self.get_incoming_properties_per_page, resource=INCOMING_PROPERTIES, **{'params': params} )
[ "def", "get_all_incoming_properties", "(", "self", ",", "params", "=", "None", ")", ":", "if", "not", "params", ":", "params", "=", "{", "}", "return", "self", ".", "_iterate_through_pages", "(", "self", ".", "get_incoming_properties_per_page", ",", "resource", "=", "INCOMING_PROPERTIES", ",", "*", "*", "{", "'params'", ":", "params", "}", ")" ]
Get all incoming properties This will iterate over all pages until it gets all elements. So if the rate limit exceeded it will throw an Exception and you will get nothing :param params: search params :return: list
[ "Get", "all", "incoming", "properties", "This", "will", "iterate", "over", "all", "pages", "until", "it", "gets", "all", "elements", ".", "So", "if", "the", "rate", "limit", "exceeded", "it", "will", "throw", "an", "Exception", "and", "you", "will", "get", "nothing" ]
python
train
klahnakoski/pyLibrary
mo_threads/queues.py
https://github.com/klahnakoski/pyLibrary/blob/fa2dcbc48fda8d26999baef400e9a98149e0b982/mo_threads/queues.py#L98-L109
def push(self, value): """ SNEAK value TO FRONT OF THE QUEUE """ if self.closed and not self.allow_add_after_close: Log.error("Do not push to closed queue") with self.lock: self._wait_for_queue_space() if not self.closed: self.queue.appendleft(value) return self
[ "def", "push", "(", "self", ",", "value", ")", ":", "if", "self", ".", "closed", "and", "not", "self", ".", "allow_add_after_close", ":", "Log", ".", "error", "(", "\"Do not push to closed queue\"", ")", "with", "self", ".", "lock", ":", "self", ".", "_wait_for_queue_space", "(", ")", "if", "not", "self", ".", "closed", ":", "self", ".", "queue", ".", "appendleft", "(", "value", ")", "return", "self" ]
SNEAK value TO FRONT OF THE QUEUE
[ "SNEAK", "value", "TO", "FRONT", "OF", "THE", "QUEUE" ]
python
train
Clinical-Genomics/scout
scout/parse/variant/models.py
https://github.com/Clinical-Genomics/scout/blob/90a551e2e1653a319e654c2405c2866f93d0ebb9/scout/parse/variant/models.py#L2-L20
def parse_genetic_models(models_info, case_id): """Parse the genetic models entry of a vcf Args: models_info(str): The raw vcf information case_id(str) Returns: genetic_models(list) """ genetic_models = [] if models_info: for family_info in models_info.split(','): splitted_info = family_info.split(':') if splitted_info[0] == case_id: genetic_models = splitted_info[1].split('|') return genetic_models
[ "def", "parse_genetic_models", "(", "models_info", ",", "case_id", ")", ":", "genetic_models", "=", "[", "]", "if", "models_info", ":", "for", "family_info", "in", "models_info", ".", "split", "(", "','", ")", ":", "splitted_info", "=", "family_info", ".", "split", "(", "':'", ")", "if", "splitted_info", "[", "0", "]", "==", "case_id", ":", "genetic_models", "=", "splitted_info", "[", "1", "]", ".", "split", "(", "'|'", ")", "return", "genetic_models" ]
Parse the genetic models entry of a vcf Args: models_info(str): The raw vcf information case_id(str) Returns: genetic_models(list)
[ "Parse", "the", "genetic", "models", "entry", "of", "a", "vcf" ]
python
test
Azure/azure-python-devtools
src/azure_devtools/ci_tools/github_tools.py
https://github.com/Azure/azure-python-devtools/blob/2bf87b1f3cedd2b26fb2e4fd47a9baf435dcf936/src/azure_devtools/ci_tools/github_tools.py#L336-L340
def create_comment(self, text): """Mimic issue API, so we can use it everywhere. Return dashboard comment. """ return DashboardComment.get_or_create(self._issue_or_pr, self._header, text)
[ "def", "create_comment", "(", "self", ",", "text", ")", ":", "return", "DashboardComment", ".", "get_or_create", "(", "self", ".", "_issue_or_pr", ",", "self", ".", "_header", ",", "text", ")" ]
Mimic issue API, so we can use it everywhere. Return dashboard comment.
[ "Mimic", "issue", "API", "so", "we", "can", "use", "it", "everywhere", ".", "Return", "dashboard", "comment", "." ]
python
train
molmod/molmod
molmod/graphs.py
https://github.com/molmod/molmod/blob/a7b5b4364ed514ad4c465856c05b5eda1cb561e0/molmod/graphs.py#L1310-L1334
def iter_final_matches(self, canonical_match, subject_graph, one_match): """Given a match, iterate over all related equivalent matches When criteria sets are defined, the iterator runs over all symmetric equivalent matches that fulfill one of the criteria sets. When not criteria sets are defined, the iterator only yields the input match. """ if self.criteria_sets is None or one_match: yield canonical_match else: for criteria_set in self.criteria_sets: satisfied_match_tags = set([]) for symmetry in self.pattern_graph.symmetries: final_match = canonical_match * symmetry #print final_match if criteria_set.test_match(final_match, self.pattern_graph, subject_graph): match_tags = tuple( self.vertex_tags.get(symmetry.reverse[vertex0]) for vertex0 in range(self.pattern_graph.num_vertices) ) if match_tags not in satisfied_match_tags: final_match.__dict__.update(criteria_set.info) yield final_match satisfied_match_tags.add(match_tags)
[ "def", "iter_final_matches", "(", "self", ",", "canonical_match", ",", "subject_graph", ",", "one_match", ")", ":", "if", "self", ".", "criteria_sets", "is", "None", "or", "one_match", ":", "yield", "canonical_match", "else", ":", "for", "criteria_set", "in", "self", ".", "criteria_sets", ":", "satisfied_match_tags", "=", "set", "(", "[", "]", ")", "for", "symmetry", "in", "self", ".", "pattern_graph", ".", "symmetries", ":", "final_match", "=", "canonical_match", "*", "symmetry", "#print final_match", "if", "criteria_set", ".", "test_match", "(", "final_match", ",", "self", ".", "pattern_graph", ",", "subject_graph", ")", ":", "match_tags", "=", "tuple", "(", "self", ".", "vertex_tags", ".", "get", "(", "symmetry", ".", "reverse", "[", "vertex0", "]", ")", "for", "vertex0", "in", "range", "(", "self", ".", "pattern_graph", ".", "num_vertices", ")", ")", "if", "match_tags", "not", "in", "satisfied_match_tags", ":", "final_match", ".", "__dict__", ".", "update", "(", "criteria_set", ".", "info", ")", "yield", "final_match", "satisfied_match_tags", ".", "add", "(", "match_tags", ")" ]
Given a match, iterate over all related equivalent matches When criteria sets are defined, the iterator runs over all symmetric equivalent matches that fulfill one of the criteria sets. When not criteria sets are defined, the iterator only yields the input match.
[ "Given", "a", "match", "iterate", "over", "all", "related", "equivalent", "matches" ]
python
train
projectshift/shift-boiler
boiler/user/util/oauth_providers.py
https://github.com/projectshift/shift-boiler/blob/8e6f3a3e4b9493fb6c8bd16bed160ede153bfb0b/boiler/user/util/oauth_providers.py#L87-L102
def vkontakte_config(self, id, secret, scope=None, offline=False, **_): """ Get config dictionary for vkontakte oauth """ if scope is None: scope = 'email,offline' if offline: scope += ',offline' token_params = dict(scope=scope) config = dict( request_token_url=None, access_token_url='https://oauth.vk.com/access_token', authorize_url='https://oauth.vk.com/authorize', base_url='https://api.vk.com/method/', consumer_key=id, consumer_secret=secret, request_token_params=token_params ) return config
[ "def", "vkontakte_config", "(", "self", ",", "id", ",", "secret", ",", "scope", "=", "None", ",", "offline", "=", "False", ",", "*", "*", "_", ")", ":", "if", "scope", "is", "None", ":", "scope", "=", "'email,offline'", "if", "offline", ":", "scope", "+=", "',offline'", "token_params", "=", "dict", "(", "scope", "=", "scope", ")", "config", "=", "dict", "(", "request_token_url", "=", "None", ",", "access_token_url", "=", "'https://oauth.vk.com/access_token'", ",", "authorize_url", "=", "'https://oauth.vk.com/authorize'", ",", "base_url", "=", "'https://api.vk.com/method/'", ",", "consumer_key", "=", "id", ",", "consumer_secret", "=", "secret", ",", "request_token_params", "=", "token_params", ")", "return", "config" ]
Get config dictionary for vkontakte oauth
[ "Get", "config", "dictionary", "for", "vkontakte", "oauth" ]
python
train
shmir/PyIxExplorer
ixexplorer/ixe_hw.py
https://github.com/shmir/PyIxExplorer/blob/d6946b9ce0e8961507cc912062e10c365d4beee2/ixexplorer/ixe_hw.py#L272-L282
def enable_capture_state(self, state, writeToHw=False): """ Enable/Disable capture on resource group """ if state: activePorts = self.rePortInList.findall(self.activePortList) self.activeCapturePortList = "{{" + activePorts[0] + "}}" else: self.activeCapturePortList = "{{""}}" if (writeToHw): self.ix_command('write')
[ "def", "enable_capture_state", "(", "self", ",", "state", ",", "writeToHw", "=", "False", ")", ":", "if", "state", ":", "activePorts", "=", "self", ".", "rePortInList", ".", "findall", "(", "self", ".", "activePortList", ")", "self", ".", "activeCapturePortList", "=", "\"{{\"", "+", "activePorts", "[", "0", "]", "+", "\"}}\"", "else", ":", "self", ".", "activeCapturePortList", "=", "\"{{\"", "\"}}\"", "if", "(", "writeToHw", ")", ":", "self", ".", "ix_command", "(", "'write'", ")" ]
Enable/Disable capture on resource group
[ "Enable", "/", "Disable", "capture", "on", "resource", "group" ]
python
train
kensho-technologies/graphql-compiler
graphql_compiler/query_formatting/match_formatting.py
https://github.com/kensho-technologies/graphql-compiler/blob/f6079c6d10f64932f6b3af309b79bcea2123ca8f/graphql_compiler/query_formatting/match_formatting.py#L78-L113
def _safe_match_argument(expected_type, argument_value): """Return a MATCH (SQL) string representing the given argument value.""" if GraphQLString.is_same_type(expected_type): return _safe_match_string(argument_value) elif GraphQLID.is_same_type(expected_type): # IDs can be strings or numbers, but the GraphQL library coerces them to strings. # We will follow suit and treat them as strings. if not isinstance(argument_value, six.string_types): if isinstance(argument_value, bytes): # should only happen in py3 argument_value = argument_value.decode('utf-8') else: argument_value = six.text_type(argument_value) return _safe_match_string(argument_value) elif GraphQLFloat.is_same_type(expected_type): return represent_float_as_str(argument_value) elif GraphQLInt.is_same_type(expected_type): # Special case: in Python, isinstance(True, int) returns True. # Safeguard against this with an explicit check against bool type. if isinstance(argument_value, bool): raise GraphQLInvalidArgumentError(u'Attempting to represent a non-int as an int: ' u'{}'.format(argument_value)) return type_check_and_str(int, argument_value) elif GraphQLBoolean.is_same_type(expected_type): return type_check_and_str(bool, argument_value) elif GraphQLDecimal.is_same_type(expected_type): return _safe_match_decimal(argument_value) elif GraphQLDate.is_same_type(expected_type): return _safe_match_date_and_datetime(expected_type, (datetime.date,), argument_value) elif GraphQLDateTime.is_same_type(expected_type): return _safe_match_date_and_datetime(expected_type, (datetime.datetime, arrow.Arrow), argument_value) elif isinstance(expected_type, GraphQLList): return _safe_match_list(expected_type.of_type, argument_value) else: raise AssertionError(u'Could not safely represent the requested GraphQL type: ' u'{} {}'.format(expected_type, argument_value))
[ "def", "_safe_match_argument", "(", "expected_type", ",", "argument_value", ")", ":", "if", "GraphQLString", ".", "is_same_type", "(", "expected_type", ")", ":", "return", "_safe_match_string", "(", "argument_value", ")", "elif", "GraphQLID", ".", "is_same_type", "(", "expected_type", ")", ":", "# IDs can be strings or numbers, but the GraphQL library coerces them to strings.", "# We will follow suit and treat them as strings.", "if", "not", "isinstance", "(", "argument_value", ",", "six", ".", "string_types", ")", ":", "if", "isinstance", "(", "argument_value", ",", "bytes", ")", ":", "# should only happen in py3", "argument_value", "=", "argument_value", ".", "decode", "(", "'utf-8'", ")", "else", ":", "argument_value", "=", "six", ".", "text_type", "(", "argument_value", ")", "return", "_safe_match_string", "(", "argument_value", ")", "elif", "GraphQLFloat", ".", "is_same_type", "(", "expected_type", ")", ":", "return", "represent_float_as_str", "(", "argument_value", ")", "elif", "GraphQLInt", ".", "is_same_type", "(", "expected_type", ")", ":", "# Special case: in Python, isinstance(True, int) returns True.", "# Safeguard against this with an explicit check against bool type.", "if", "isinstance", "(", "argument_value", ",", "bool", ")", ":", "raise", "GraphQLInvalidArgumentError", "(", "u'Attempting to represent a non-int as an int: '", "u'{}'", ".", "format", "(", "argument_value", ")", ")", "return", "type_check_and_str", "(", "int", ",", "argument_value", ")", "elif", "GraphQLBoolean", ".", "is_same_type", "(", "expected_type", ")", ":", "return", "type_check_and_str", "(", "bool", ",", "argument_value", ")", "elif", "GraphQLDecimal", ".", "is_same_type", "(", "expected_type", ")", ":", "return", "_safe_match_decimal", "(", "argument_value", ")", "elif", "GraphQLDate", ".", "is_same_type", "(", "expected_type", ")", ":", "return", "_safe_match_date_and_datetime", "(", "expected_type", ",", "(", "datetime", ".", "date", ",", ")", ",", "argument_value", ")", "elif", "GraphQLDateTime", ".", "is_same_type", "(", "expected_type", ")", ":", "return", "_safe_match_date_and_datetime", "(", "expected_type", ",", "(", "datetime", ".", "datetime", ",", "arrow", ".", "Arrow", ")", ",", "argument_value", ")", "elif", "isinstance", "(", "expected_type", ",", "GraphQLList", ")", ":", "return", "_safe_match_list", "(", "expected_type", ".", "of_type", ",", "argument_value", ")", "else", ":", "raise", "AssertionError", "(", "u'Could not safely represent the requested GraphQL type: '", "u'{} {}'", ".", "format", "(", "expected_type", ",", "argument_value", ")", ")" ]
Return a MATCH (SQL) string representing the given argument value.
[ "Return", "a", "MATCH", "(", "SQL", ")", "string", "representing", "the", "given", "argument", "value", "." ]
python
train
JukeboxPipeline/jukebox-core
src/jukeboxcore/iniconf.py
https://github.com/JukeboxPipeline/jukebox-core/blob/bac2280ca49940355270e4b69400ce9976ab2e6f/src/jukeboxcore/iniconf.py#L45-L75
def check_default_values(section, key, validator=None): """Raise an MissingDefaultError if a value in section does not have a default values :param section: the section of a configspec :type section: section :param key: a key of the section :type key: str :param validator: a Validator object to get the default values :type validator: Validator :returns: None :raises: MissingDefaultError Use this in conjunction with the walk method of a ConfigObj. The ConfigObj should be the configspec! When you want to use a custom validator, try:: configinstance.walk(check_default_values, validator=validatorinstance) """ if validator is None: validator = Validator() try: validator.get_default_value(section[key]) except KeyError: #dv = set(section.default_values.keys()) # set of all defined default values #scalars = set(section.scalars) # set of all keys #if dv != scalars: parents = get_section_path(section) msg = 'The Key %s in the section %s is missing a default: %s' % (key, parents, section[key]) log.debug(msg) raise ConfigError(msg)
[ "def", "check_default_values", "(", "section", ",", "key", ",", "validator", "=", "None", ")", ":", "if", "validator", "is", "None", ":", "validator", "=", "Validator", "(", ")", "try", ":", "validator", ".", "get_default_value", "(", "section", "[", "key", "]", ")", "except", "KeyError", ":", "#dv = set(section.default_values.keys()) # set of all defined default values", "#scalars = set(section.scalars) # set of all keys", "#if dv != scalars:", "parents", "=", "get_section_path", "(", "section", ")", "msg", "=", "'The Key %s in the section %s is missing a default: %s'", "%", "(", "key", ",", "parents", ",", "section", "[", "key", "]", ")", "log", ".", "debug", "(", "msg", ")", "raise", "ConfigError", "(", "msg", ")" ]
Raise an MissingDefaultError if a value in section does not have a default values :param section: the section of a configspec :type section: section :param key: a key of the section :type key: str :param validator: a Validator object to get the default values :type validator: Validator :returns: None :raises: MissingDefaultError Use this in conjunction with the walk method of a ConfigObj. The ConfigObj should be the configspec! When you want to use a custom validator, try:: configinstance.walk(check_default_values, validator=validatorinstance)
[ "Raise", "an", "MissingDefaultError", "if", "a", "value", "in", "section", "does", "not", "have", "a", "default", "values" ]
python
train
hollenstein/maspy
maspy/core.py
https://github.com/hollenstein/maspy/blob/f15fcfd24df306d8420540460d902aa3073ec133/maspy/core.py#L701-L712
def _reprJSON(self): """Returns a JSON serializable represenation of a ``Ci`` class instance. Use :func:`maspy.core.Ci._fromJSON()` to generate a new ``Ci`` instance from the return value. :returns: a JSON serializable python object """ return {'__Ci__': (self.id, self.specfile, self.dataProcessingRef, self.precursor, self.product, self.params, self.attrib, self.arrayInfo ) }
[ "def", "_reprJSON", "(", "self", ")", ":", "return", "{", "'__Ci__'", ":", "(", "self", ".", "id", ",", "self", ".", "specfile", ",", "self", ".", "dataProcessingRef", ",", "self", ".", "precursor", ",", "self", ".", "product", ",", "self", ".", "params", ",", "self", ".", "attrib", ",", "self", ".", "arrayInfo", ")", "}" ]
Returns a JSON serializable represenation of a ``Ci`` class instance. Use :func:`maspy.core.Ci._fromJSON()` to generate a new ``Ci`` instance from the return value. :returns: a JSON serializable python object
[ "Returns", "a", "JSON", "serializable", "represenation", "of", "a", "Ci", "class", "instance", ".", "Use", ":", "func", ":", "maspy", ".", "core", ".", "Ci", ".", "_fromJSON", "()", "to", "generate", "a", "new", "Ci", "instance", "from", "the", "return", "value", "." ]
python
train
pycontribs/pyrax
pyrax/cloudmonitoring.py
https://github.com/pycontribs/pyrax/blob/9ddfd5064b3a292d7337906f3b2d5dce95b50b99/pyrax/cloudmonitoring.py#L798-L809
def update(self, label=None, name=None, disabled=None, metadata=None, monitoring_zones_poll=None, timeout=None, period=None, target_alias=None, target_hostname=None, target_receiver=None): """ Updates an existing check with any of the parameters. """ self.manager.update(self, label=label, name=name, disabled=disabled, metadata=metadata, monitoring_zones_poll=monitoring_zones_poll, timeout=timeout, period=period, target_alias=target_alias, target_hostname=target_hostname, target_receiver=target_receiver)
[ "def", "update", "(", "self", ",", "label", "=", "None", ",", "name", "=", "None", ",", "disabled", "=", "None", ",", "metadata", "=", "None", ",", "monitoring_zones_poll", "=", "None", ",", "timeout", "=", "None", ",", "period", "=", "None", ",", "target_alias", "=", "None", ",", "target_hostname", "=", "None", ",", "target_receiver", "=", "None", ")", ":", "self", ".", "manager", ".", "update", "(", "self", ",", "label", "=", "label", ",", "name", "=", "name", ",", "disabled", "=", "disabled", ",", "metadata", "=", "metadata", ",", "monitoring_zones_poll", "=", "monitoring_zones_poll", ",", "timeout", "=", "timeout", ",", "period", "=", "period", ",", "target_alias", "=", "target_alias", ",", "target_hostname", "=", "target_hostname", ",", "target_receiver", "=", "target_receiver", ")" ]
Updates an existing check with any of the parameters.
[ "Updates", "an", "existing", "check", "with", "any", "of", "the", "parameters", "." ]
python
train
codelv/enaml-native-cli
enamlnativecli/main.py
https://github.com/codelv/enaml-native-cli/blob/81d6faa7e3dd437956f661c512031e49c0d44b63/enamlnativecli/main.py#L1102-L1249
def unlink_android(self, path, pkg): """ Unlink's the android project to this library. 1. In the app's android/settings.gradle, it removes the following lines (if they exist): include ':<project-name>' project(':<project-name>').projectDir = new File( rootProject.projectDir, '../venv/packages/<project-name>/android') 2. In the app's android/app/build.gradle, it removes the following line (if present) compile project(':<project-name>') 3. In the app's android/app/src/main/java/<bundle/id>/MainApplication.java, it removes: import <package>.<Name>Package; new <Name>Package(), If no comma exists it will remove the comma from the previous line. """ bundle_id = self.ctx['bundle_id'] #: Check if it's already linked with open(join('android', 'settings.gradle')) as f: settings_gradle = f.read() with open(join('android', 'app', 'build.gradle')) as f: build_gradle = f.read() #: Find the MainApplication.java main_app_java_path = join('android', 'app', 'src', 'main', 'java', join(*bundle_id.split(".")), 'MainApplication.java') with open(main_app_java_path) as f: main_application_java = f.read() try: #: Now link all the EnamlPackages we can find in the new "package" new_packages = Link.find_packages(join(path, 'android', pkg)) if not new_packages: print(Colors.RED+"\t[Android] {} No EnamlPackages found to " "unlink!".format(pkg)+Colors.RESET) return #: Unlink settings.gradle if Link.is_settings_linked(settings_gradle, pkg): #: Remove the two statements new_settings = [ line for line in settings_gradle.split("\n") if line.strip() not in [ "include ':{name}'".format(name=pkg), "project(':{name}').projectDir = " "new File(rootProject.projectDir, " "'../{path}/android/{name}')".format(path=path, name=pkg) ] ] with open(join('android', 'settings.gradle'), 'w') as f: f.write("\n".join(new_settings)) print("\t[Android] {} unlinked settings.gradle!".format(pkg)) else: print("\t[Android] {} was not linked in " "settings.gradle!".format(pkg)) #: Unlink app/build.gradle if Link.is_build_linked(build_gradle, pkg): #: Add two statements new_build = [ line for line in build_gradle.split("\n") if line.strip() not in [ "compile project(':{name}')".format(name=pkg), "api project(':{name}')".format(name=pkg), ] ] with open(join('android', 'app', 'build.gradle'), 'w') as f: f.write("\n".join(new_build)) print("\t[Android] {} unlinked in " "app/build.gradle!".format(pkg)) else: print("\t[Android] {} was not linked in " "app/build.gradle!".format(pkg)) new_app_java = [] for package in new_packages: #: Add our import statement javacls = os.path.splitext(package)[0].replace("/", ".") if Link.is_app_linked(main_application_java, pkg, javacls): #: Reuse previous if avialable new_app_java = (new_app_java or main_application_java.split("\n")) new_app_java = [ line for line in new_app_java if line.strip() not in [ "import {};".format(javacls), "new {}()".format(javacls.split(".")[-1]), "new {}(),".format(javacls.split(".")[-1]), ] ] #: Now find the last package and remove the comma if it #: exists found = False j = 0 for i, line in enumerate(new_app_java): if fnmatch.fnmatch(line.strip(), "new *Package()"): found = True elif fnmatch.fnmatch(line.strip(), "new *Package(),"): j = i #: We removed the last package so add a comma if not found: #: This kills any whitespace... new_app_java[j] = new_app_java[j][ :new_app_java[j].rfind(',')] else: print("\t[Android] {} was not linked in {}!".format( pkg, main_app_java_path)) if new_app_java: with open(main_app_java_path, 'w') as f: f.write("\n".join(new_app_java)) print(Colors.GREEN+"\t[Android] {} unlinked successfully!".format( pkg)+Colors.RESET) except Exception as e: print(Colors.RED+"\t[Android] {} Failed to unlink. " "Reverting due to error: {}".format(pkg, e)+Colors.RESET) #: Undo any changes with open(join('android', 'settings.gradle'), 'w') as f: f.write(settings_gradle) with open(join('android', 'app', 'build.gradle'), 'w') as f: f.write(build_gradle) with open(main_app_java_path, 'w') as f: f.write(main_application_java) #: Now blow up raise
[ "def", "unlink_android", "(", "self", ",", "path", ",", "pkg", ")", ":", "bundle_id", "=", "self", ".", "ctx", "[", "'bundle_id'", "]", "#: Check if it's already linked", "with", "open", "(", "join", "(", "'android'", ",", "'settings.gradle'", ")", ")", "as", "f", ":", "settings_gradle", "=", "f", ".", "read", "(", ")", "with", "open", "(", "join", "(", "'android'", ",", "'app'", ",", "'build.gradle'", ")", ")", "as", "f", ":", "build_gradle", "=", "f", ".", "read", "(", ")", "#: Find the MainApplication.java", "main_app_java_path", "=", "join", "(", "'android'", ",", "'app'", ",", "'src'", ",", "'main'", ",", "'java'", ",", "join", "(", "*", "bundle_id", ".", "split", "(", "\".\"", ")", ")", ",", "'MainApplication.java'", ")", "with", "open", "(", "main_app_java_path", ")", "as", "f", ":", "main_application_java", "=", "f", ".", "read", "(", ")", "try", ":", "#: Now link all the EnamlPackages we can find in the new \"package\"", "new_packages", "=", "Link", ".", "find_packages", "(", "join", "(", "path", ",", "'android'", ",", "pkg", ")", ")", "if", "not", "new_packages", ":", "print", "(", "Colors", ".", "RED", "+", "\"\\t[Android] {} No EnamlPackages found to \"", "\"unlink!\"", ".", "format", "(", "pkg", ")", "+", "Colors", ".", "RESET", ")", "return", "#: Unlink settings.gradle", "if", "Link", ".", "is_settings_linked", "(", "settings_gradle", ",", "pkg", ")", ":", "#: Remove the two statements", "new_settings", "=", "[", "line", "for", "line", "in", "settings_gradle", ".", "split", "(", "\"\\n\"", ")", "if", "line", ".", "strip", "(", ")", "not", "in", "[", "\"include ':{name}'\"", ".", "format", "(", "name", "=", "pkg", ")", ",", "\"project(':{name}').projectDir = \"", "\"new File(rootProject.projectDir, \"", "\"'../{path}/android/{name}')\"", ".", "format", "(", "path", "=", "path", ",", "name", "=", "pkg", ")", "]", "]", "with", "open", "(", "join", "(", "'android'", ",", "'settings.gradle'", ")", ",", "'w'", ")", "as", "f", ":", "f", ".", "write", "(", "\"\\n\"", ".", "join", "(", "new_settings", ")", ")", "print", "(", "\"\\t[Android] {} unlinked settings.gradle!\"", ".", "format", "(", "pkg", ")", ")", "else", ":", "print", "(", "\"\\t[Android] {} was not linked in \"", "\"settings.gradle!\"", ".", "format", "(", "pkg", ")", ")", "#: Unlink app/build.gradle", "if", "Link", ".", "is_build_linked", "(", "build_gradle", ",", "pkg", ")", ":", "#: Add two statements", "new_build", "=", "[", "line", "for", "line", "in", "build_gradle", ".", "split", "(", "\"\\n\"", ")", "if", "line", ".", "strip", "(", ")", "not", "in", "[", "\"compile project(':{name}')\"", ".", "format", "(", "name", "=", "pkg", ")", ",", "\"api project(':{name}')\"", ".", "format", "(", "name", "=", "pkg", ")", ",", "]", "]", "with", "open", "(", "join", "(", "'android'", ",", "'app'", ",", "'build.gradle'", ")", ",", "'w'", ")", "as", "f", ":", "f", ".", "write", "(", "\"\\n\"", ".", "join", "(", "new_build", ")", ")", "print", "(", "\"\\t[Android] {} unlinked in \"", "\"app/build.gradle!\"", ".", "format", "(", "pkg", ")", ")", "else", ":", "print", "(", "\"\\t[Android] {} was not linked in \"", "\"app/build.gradle!\"", ".", "format", "(", "pkg", ")", ")", "new_app_java", "=", "[", "]", "for", "package", "in", "new_packages", ":", "#: Add our import statement", "javacls", "=", "os", ".", "path", ".", "splitext", "(", "package", ")", "[", "0", "]", ".", "replace", "(", "\"/\"", ",", "\".\"", ")", "if", "Link", ".", "is_app_linked", "(", "main_application_java", ",", "pkg", ",", "javacls", ")", ":", "#: Reuse previous if avialable", "new_app_java", "=", "(", "new_app_java", "or", "main_application_java", ".", "split", "(", "\"\\n\"", ")", ")", "new_app_java", "=", "[", "line", "for", "line", "in", "new_app_java", "if", "line", ".", "strip", "(", ")", "not", "in", "[", "\"import {};\"", ".", "format", "(", "javacls", ")", ",", "\"new {}()\"", ".", "format", "(", "javacls", ".", "split", "(", "\".\"", ")", "[", "-", "1", "]", ")", ",", "\"new {}(),\"", ".", "format", "(", "javacls", ".", "split", "(", "\".\"", ")", "[", "-", "1", "]", ")", ",", "]", "]", "#: Now find the last package and remove the comma if it", "#: exists", "found", "=", "False", "j", "=", "0", "for", "i", ",", "line", "in", "enumerate", "(", "new_app_java", ")", ":", "if", "fnmatch", ".", "fnmatch", "(", "line", ".", "strip", "(", ")", ",", "\"new *Package()\"", ")", ":", "found", "=", "True", "elif", "fnmatch", ".", "fnmatch", "(", "line", ".", "strip", "(", ")", ",", "\"new *Package(),\"", ")", ":", "j", "=", "i", "#: We removed the last package so add a comma", "if", "not", "found", ":", "#: This kills any whitespace...", "new_app_java", "[", "j", "]", "=", "new_app_java", "[", "j", "]", "[", ":", "new_app_java", "[", "j", "]", ".", "rfind", "(", "','", ")", "]", "else", ":", "print", "(", "\"\\t[Android] {} was not linked in {}!\"", ".", "format", "(", "pkg", ",", "main_app_java_path", ")", ")", "if", "new_app_java", ":", "with", "open", "(", "main_app_java_path", ",", "'w'", ")", "as", "f", ":", "f", ".", "write", "(", "\"\\n\"", ".", "join", "(", "new_app_java", ")", ")", "print", "(", "Colors", ".", "GREEN", "+", "\"\\t[Android] {} unlinked successfully!\"", ".", "format", "(", "pkg", ")", "+", "Colors", ".", "RESET", ")", "except", "Exception", "as", "e", ":", "print", "(", "Colors", ".", "RED", "+", "\"\\t[Android] {} Failed to unlink. \"", "\"Reverting due to error: {}\"", ".", "format", "(", "pkg", ",", "e", ")", "+", "Colors", ".", "RESET", ")", "#: Undo any changes", "with", "open", "(", "join", "(", "'android'", ",", "'settings.gradle'", ")", ",", "'w'", ")", "as", "f", ":", "f", ".", "write", "(", "settings_gradle", ")", "with", "open", "(", "join", "(", "'android'", ",", "'app'", ",", "'build.gradle'", ")", ",", "'w'", ")", "as", "f", ":", "f", ".", "write", "(", "build_gradle", ")", "with", "open", "(", "main_app_java_path", ",", "'w'", ")", "as", "f", ":", "f", ".", "write", "(", "main_application_java", ")", "#: Now blow up", "raise" ]
Unlink's the android project to this library. 1. In the app's android/settings.gradle, it removes the following lines (if they exist): include ':<project-name>' project(':<project-name>').projectDir = new File( rootProject.projectDir, '../venv/packages/<project-name>/android') 2. In the app's android/app/build.gradle, it removes the following line (if present) compile project(':<project-name>') 3. In the app's android/app/src/main/java/<bundle/id>/MainApplication.java, it removes: import <package>.<Name>Package; new <Name>Package(), If no comma exists it will remove the comma from the previous line.
[ "Unlink", "s", "the", "android", "project", "to", "this", "library", "." ]
python
train
bram85/topydo
topydo/commands/AddCommand.py
https://github.com/bram85/topydo/blob/b59fcfca5361869a6b78d4c9808c7c6cd0a18b58/topydo/commands/AddCommand.py#L80-L100
def execute(self): """ Adds a todo item to the list. """ if not super().execute(): return False self.printer.add_filter(PrettyPrinterNumbers(self.todolist)) self._process_flags() if self.from_file: try: new_todos = self.get_todos_from_file() for todo in new_todos: self._add_todo(todo) except (IOError, OSError): self.error('File not found: ' + self.from_file) else: if self.text: self._add_todo(self.text) else: self.error(self.usage())
[ "def", "execute", "(", "self", ")", ":", "if", "not", "super", "(", ")", ".", "execute", "(", ")", ":", "return", "False", "self", ".", "printer", ".", "add_filter", "(", "PrettyPrinterNumbers", "(", "self", ".", "todolist", ")", ")", "self", ".", "_process_flags", "(", ")", "if", "self", ".", "from_file", ":", "try", ":", "new_todos", "=", "self", ".", "get_todos_from_file", "(", ")", "for", "todo", "in", "new_todos", ":", "self", ".", "_add_todo", "(", "todo", ")", "except", "(", "IOError", ",", "OSError", ")", ":", "self", ".", "error", "(", "'File not found: '", "+", "self", ".", "from_file", ")", "else", ":", "if", "self", ".", "text", ":", "self", ".", "_add_todo", "(", "self", ".", "text", ")", "else", ":", "self", ".", "error", "(", "self", ".", "usage", "(", ")", ")" ]
Adds a todo item to the list.
[ "Adds", "a", "todo", "item", "to", "the", "list", "." ]
python
train
RedHatInsights/insights-core
insights/core/dr.py
https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/core/dr.py#L322-L349
def get_dependency_graph(component): """ Generate a component's graph of dependencies, which can be passed to :func:`run` or :func:`run_incremental`. """ if component not in DEPENDENCIES: raise Exception("%s is not a registered component." % get_name(component)) if not DEPENDENCIES[component]: return {component: set()} graph = defaultdict(set) def visitor(c, parent): if parent is not None: graph[parent].add(c) walk_dependencies(component, visitor) graph = dict(graph) # Find all items that don't depend on anything. extra_items_in_deps = _reduce(set.union, graph.values(), set()) - set(graph.keys()) # Add empty dependencies where needed. graph.update(dict((item, set()) for item in extra_items_in_deps)) return graph
[ "def", "get_dependency_graph", "(", "component", ")", ":", "if", "component", "not", "in", "DEPENDENCIES", ":", "raise", "Exception", "(", "\"%s is not a registered component.\"", "%", "get_name", "(", "component", ")", ")", "if", "not", "DEPENDENCIES", "[", "component", "]", ":", "return", "{", "component", ":", "set", "(", ")", "}", "graph", "=", "defaultdict", "(", "set", ")", "def", "visitor", "(", "c", ",", "parent", ")", ":", "if", "parent", "is", "not", "None", ":", "graph", "[", "parent", "]", ".", "add", "(", "c", ")", "walk_dependencies", "(", "component", ",", "visitor", ")", "graph", "=", "dict", "(", "graph", ")", "# Find all items that don't depend on anything.", "extra_items_in_deps", "=", "_reduce", "(", "set", ".", "union", ",", "graph", ".", "values", "(", ")", ",", "set", "(", ")", ")", "-", "set", "(", "graph", ".", "keys", "(", ")", ")", "# Add empty dependencies where needed.", "graph", ".", "update", "(", "dict", "(", "(", "item", ",", "set", "(", ")", ")", "for", "item", "in", "extra_items_in_deps", ")", ")", "return", "graph" ]
Generate a component's graph of dependencies, which can be passed to :func:`run` or :func:`run_incremental`.
[ "Generate", "a", "component", "s", "graph", "of", "dependencies", "which", "can", "be", "passed", "to", ":", "func", ":", "run", "or", ":", "func", ":", "run_incremental", "." ]
python
train
poppy-project/pypot
pypot/dynamixel/controller.py
https://github.com/poppy-project/pypot/blob/d9c6551bbc87d45d9d1f0bc15e35b616d0002afd/pypot/dynamixel/controller.py#L62-L83
def get_register(self, motors, disable_sync_read=False): """ Gets the value from the specified register and sets it to the :class:`~pypot.dynamixel.motor.DxlMotor`. """ if not motors: return False ids = [m.id for m in motors] getter = getattr(self.io, 'get_{}'.format(self.regname)) values = (sum([list(getter([id])) for id in ids], []) if disable_sync_read else getter(ids)) if not values: return False for m, val in zip(motors, values): m.__dict__[self.varname] = val for m in motors: m._read_synced[self.varname].done() return True
[ "def", "get_register", "(", "self", ",", "motors", ",", "disable_sync_read", "=", "False", ")", ":", "if", "not", "motors", ":", "return", "False", "ids", "=", "[", "m", ".", "id", "for", "m", "in", "motors", "]", "getter", "=", "getattr", "(", "self", ".", "io", ",", "'get_{}'", ".", "format", "(", "self", ".", "regname", ")", ")", "values", "=", "(", "sum", "(", "[", "list", "(", "getter", "(", "[", "id", "]", ")", ")", "for", "id", "in", "ids", "]", ",", "[", "]", ")", "if", "disable_sync_read", "else", "getter", "(", "ids", ")", ")", "if", "not", "values", ":", "return", "False", "for", "m", ",", "val", "in", "zip", "(", "motors", ",", "values", ")", ":", "m", ".", "__dict__", "[", "self", ".", "varname", "]", "=", "val", "for", "m", "in", "motors", ":", "m", ".", "_read_synced", "[", "self", ".", "varname", "]", ".", "done", "(", ")", "return", "True" ]
Gets the value from the specified register and sets it to the :class:`~pypot.dynamixel.motor.DxlMotor`.
[ "Gets", "the", "value", "from", "the", "specified", "register", "and", "sets", "it", "to", "the", ":", "class", ":", "~pypot", ".", "dynamixel", ".", "motor", ".", "DxlMotor", "." ]
python
train
hit9/rux
rux/renderer.py
https://github.com/hit9/rux/blob/d7f60722658a3b83ac6d7bb3ca2790ac9c926b59/rux/renderer.py#L28-L42
def render(self, template, **data): """Render data with template, return html unicodes. parameters template str the template's filename data dict the data to render """ # make a copy and update the copy dct = self.global_data.copy() dct.update(data) try: html = self.env.get_template(template).render(**dct) except TemplateNotFound: raise JinjaTemplateNotFound return html
[ "def", "render", "(", "self", ",", "template", ",", "*", "*", "data", ")", ":", "# make a copy and update the copy", "dct", "=", "self", ".", "global_data", ".", "copy", "(", ")", "dct", ".", "update", "(", "data", ")", "try", ":", "html", "=", "self", ".", "env", ".", "get_template", "(", "template", ")", ".", "render", "(", "*", "*", "dct", ")", "except", "TemplateNotFound", ":", "raise", "JinjaTemplateNotFound", "return", "html" ]
Render data with template, return html unicodes. parameters template str the template's filename data dict the data to render
[ "Render", "data", "with", "template", "return", "html", "unicodes", ".", "parameters", "template", "str", "the", "template", "s", "filename", "data", "dict", "the", "data", "to", "render" ]
python
valid
quantopian/zipline
zipline/utils/pool.py
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/utils/pool.py#L84-L116
def apply_async(f, args=(), kwargs=None, callback=None): """Apply a function but emulate the API of an asynchronous call. Parameters ---------- f : callable The function to call. args : tuple, optional The positional arguments. kwargs : dict, optional The keyword arguments. Returns ------- future : ApplyAsyncResult The result of calling the function boxed in a future-like api. Notes ----- This calls the function eagerly but wraps it so that ``SequentialPool`` can be used where a :class:`multiprocessing.Pool` or :class:`gevent.pool.Pool` would be used. """ try: value = (identity if callback is None else callback)( f(*args, **kwargs or {}), ) successful = True except Exception as e: value = e successful = False return ApplyAsyncResult(value, successful)
[ "def", "apply_async", "(", "f", ",", "args", "=", "(", ")", ",", "kwargs", "=", "None", ",", "callback", "=", "None", ")", ":", "try", ":", "value", "=", "(", "identity", "if", "callback", "is", "None", "else", "callback", ")", "(", "f", "(", "*", "args", ",", "*", "*", "kwargs", "or", "{", "}", ")", ",", ")", "successful", "=", "True", "except", "Exception", "as", "e", ":", "value", "=", "e", "successful", "=", "False", "return", "ApplyAsyncResult", "(", "value", ",", "successful", ")" ]
Apply a function but emulate the API of an asynchronous call. Parameters ---------- f : callable The function to call. args : tuple, optional The positional arguments. kwargs : dict, optional The keyword arguments. Returns ------- future : ApplyAsyncResult The result of calling the function boxed in a future-like api. Notes ----- This calls the function eagerly but wraps it so that ``SequentialPool`` can be used where a :class:`multiprocessing.Pool` or :class:`gevent.pool.Pool` would be used.
[ "Apply", "a", "function", "but", "emulate", "the", "API", "of", "an", "asynchronous", "call", "." ]
python
train
rwl/pylon
pylon/util.py
https://github.com/rwl/pylon/blob/916514255db1ae1661406f0283df756baf960d14/pylon/util.py#L159-L168
def format_from_extension(fname): """ Tries to infer a protocol from the file extension.""" _base, ext = os.path.splitext(fname) if not ext: return None try: format = known_extensions[ext.replace('.', '')] except KeyError: format = None return format
[ "def", "format_from_extension", "(", "fname", ")", ":", "_base", ",", "ext", "=", "os", ".", "path", ".", "splitext", "(", "fname", ")", "if", "not", "ext", ":", "return", "None", "try", ":", "format", "=", "known_extensions", "[", "ext", ".", "replace", "(", "'.'", ",", "''", ")", "]", "except", "KeyError", ":", "format", "=", "None", "return", "format" ]
Tries to infer a protocol from the file extension.
[ "Tries", "to", "infer", "a", "protocol", "from", "the", "file", "extension", "." ]
python
train
spotify/luigi
luigi/contrib/hdfs/target.py
https://github.com/spotify/luigi/blob/c5eca1c3c3ee2a7eb612486192a0da146710a1e9/luigi/contrib/hdfs/target.py#L134-L138
def move(self, path, raise_if_exists=False): """ Alias for ``rename()`` """ self.rename(path, raise_if_exists=raise_if_exists)
[ "def", "move", "(", "self", ",", "path", ",", "raise_if_exists", "=", "False", ")", ":", "self", ".", "rename", "(", "path", ",", "raise_if_exists", "=", "raise_if_exists", ")" ]
Alias for ``rename()``
[ "Alias", "for", "rename", "()" ]
python
train
mongolab/mongoctl
mongoctl/objects/replicaset_cluster.py
https://github.com/mongolab/mongoctl/blob/fab15216127ad4bf8ea9aa8a95d75504c0ef01a2/mongoctl/objects/replicaset_cluster.py#L377-L422
def get_dump_best_secondary(self, max_repl_lag=None): """ Returns the best secondary member to be used for dumping best = passives with least lags, if no passives then least lag """ secondary_lag_tuples = [] primary_member = self.get_primary_member() if not primary_member: raise MongoctlException("Unable to determine primary member for" " cluster '%s'" % self.id) master_status = primary_member.get_server().get_member_rs_status() if not master_status: raise MongoctlException("Unable to determine replicaset status for" " primary member '%s'" % primary_member.get_server().id) for member in self.get_members(): if member.get_server().is_secondary(): repl_lag = member.get_server().get_repl_lag(master_status) if max_repl_lag and repl_lag > max_repl_lag: log_info("Excluding member '%s' because it's repl lag " "(in seconds)%s is more than max %s. " % (member.get_server().id, repl_lag, max_repl_lag)) continue secondary_lag_tuples.append((member,repl_lag)) def best_secondary_comp(x, y): x_mem, x_lag = x y_mem, y_lag = y if x_mem.is_passive(): if y_mem.is_passive(): return x_lag - y_lag else: return -1 elif y_mem.is_passive(): return 1 else: return x_lag - y_lag if secondary_lag_tuples: secondary_lag_tuples.sort(best_secondary_comp) return secondary_lag_tuples[0][0]
[ "def", "get_dump_best_secondary", "(", "self", ",", "max_repl_lag", "=", "None", ")", ":", "secondary_lag_tuples", "=", "[", "]", "primary_member", "=", "self", ".", "get_primary_member", "(", ")", "if", "not", "primary_member", ":", "raise", "MongoctlException", "(", "\"Unable to determine primary member for\"", "\" cluster '%s'\"", "%", "self", ".", "id", ")", "master_status", "=", "primary_member", ".", "get_server", "(", ")", ".", "get_member_rs_status", "(", ")", "if", "not", "master_status", ":", "raise", "MongoctlException", "(", "\"Unable to determine replicaset status for\"", "\" primary member '%s'\"", "%", "primary_member", ".", "get_server", "(", ")", ".", "id", ")", "for", "member", "in", "self", ".", "get_members", "(", ")", ":", "if", "member", ".", "get_server", "(", ")", ".", "is_secondary", "(", ")", ":", "repl_lag", "=", "member", ".", "get_server", "(", ")", ".", "get_repl_lag", "(", "master_status", ")", "if", "max_repl_lag", "and", "repl_lag", ">", "max_repl_lag", ":", "log_info", "(", "\"Excluding member '%s' because it's repl lag \"", "\"(in seconds)%s is more than max %s. \"", "%", "(", "member", ".", "get_server", "(", ")", ".", "id", ",", "repl_lag", ",", "max_repl_lag", ")", ")", "continue", "secondary_lag_tuples", ".", "append", "(", "(", "member", ",", "repl_lag", ")", ")", "def", "best_secondary_comp", "(", "x", ",", "y", ")", ":", "x_mem", ",", "x_lag", "=", "x", "y_mem", ",", "y_lag", "=", "y", "if", "x_mem", ".", "is_passive", "(", ")", ":", "if", "y_mem", ".", "is_passive", "(", ")", ":", "return", "x_lag", "-", "y_lag", "else", ":", "return", "-", "1", "elif", "y_mem", ".", "is_passive", "(", ")", ":", "return", "1", "else", ":", "return", "x_lag", "-", "y_lag", "if", "secondary_lag_tuples", ":", "secondary_lag_tuples", ".", "sort", "(", "best_secondary_comp", ")", "return", "secondary_lag_tuples", "[", "0", "]", "[", "0", "]" ]
Returns the best secondary member to be used for dumping best = passives with least lags, if no passives then least lag
[ "Returns", "the", "best", "secondary", "member", "to", "be", "used", "for", "dumping", "best", "=", "passives", "with", "least", "lags", "if", "no", "passives", "then", "least", "lag" ]
python
train
bitprophet/botox
botox/utils.py
https://github.com/bitprophet/botox/blob/02c887a28bd2638273548cc7d1e6d6f1d4d38bf9/botox/utils.py#L5-L19
def puts(text, end="\n", flush=True, stream=sys.stdout): """ Print ``text`` to ``stream`` (default: ``sys.stdout``) and auto-flush. This is useful for fast loops where Python's default IO buffering would prevent "realtime" updating. Newlines may be disabled by setting ``end`` to the empty string (``''``). (This intentionally mirrors Python 3's ``print`` syntax.) You may disable output flushing by setting ``flush=False``. """ stream.write(str(text) + end) if flush: stream.flush()
[ "def", "puts", "(", "text", ",", "end", "=", "\"\\n\"", ",", "flush", "=", "True", ",", "stream", "=", "sys", ".", "stdout", ")", ":", "stream", ".", "write", "(", "str", "(", "text", ")", "+", "end", ")", "if", "flush", ":", "stream", ".", "flush", "(", ")" ]
Print ``text`` to ``stream`` (default: ``sys.stdout``) and auto-flush. This is useful for fast loops where Python's default IO buffering would prevent "realtime" updating. Newlines may be disabled by setting ``end`` to the empty string (``''``). (This intentionally mirrors Python 3's ``print`` syntax.) You may disable output flushing by setting ``flush=False``.
[ "Print", "text", "to", "stream", "(", "default", ":", "sys", ".", "stdout", ")", "and", "auto", "-", "flush", "." ]
python
train
orbingol/NURBS-Python
geomdl/exchange.py
https://github.com/orbingol/NURBS-Python/blob/b1c6a8b51cf143ff58761438e93ba6baef470627/geomdl/exchange.py#L127-L155
def import_csv(file_name, **kwargs): """ Reads control points from a CSV file and generates a 1-dimensional list of control points. It is possible to use a different value separator via ``separator`` keyword argument. The following code segment illustrates the usage of ``separator`` keyword argument. .. code-block:: python :linenos: # By default, import_csv uses 'comma' as the value separator ctrlpts = exchange.import_csv("control_points.csv") # Alternatively, it is possible to import a file containing tab-separated values ctrlpts = exchange.import_csv("control_points.csv", separator="\\t") The only difference of this function from :py:func:`.exchange.import_txt()` is skipping the first line of the input file which generally contains the column headings. :param file_name: file name of the text file :type file_name: str :return: list of control points :rtype: list :raises GeomdlException: an error occurred reading the file """ # File delimiters sep = kwargs.get('separator', ",") content = exch.read_file(file_name, skip_lines=1) return exch.import_text_data(content, sep)
[ "def", "import_csv", "(", "file_name", ",", "*", "*", "kwargs", ")", ":", "# File delimiters", "sep", "=", "kwargs", ".", "get", "(", "'separator'", ",", "\",\"", ")", "content", "=", "exch", ".", "read_file", "(", "file_name", ",", "skip_lines", "=", "1", ")", "return", "exch", ".", "import_text_data", "(", "content", ",", "sep", ")" ]
Reads control points from a CSV file and generates a 1-dimensional list of control points. It is possible to use a different value separator via ``separator`` keyword argument. The following code segment illustrates the usage of ``separator`` keyword argument. .. code-block:: python :linenos: # By default, import_csv uses 'comma' as the value separator ctrlpts = exchange.import_csv("control_points.csv") # Alternatively, it is possible to import a file containing tab-separated values ctrlpts = exchange.import_csv("control_points.csv", separator="\\t") The only difference of this function from :py:func:`.exchange.import_txt()` is skipping the first line of the input file which generally contains the column headings. :param file_name: file name of the text file :type file_name: str :return: list of control points :rtype: list :raises GeomdlException: an error occurred reading the file
[ "Reads", "control", "points", "from", "a", "CSV", "file", "and", "generates", "a", "1", "-", "dimensional", "list", "of", "control", "points", "." ]
python
train
BlueBrain/NeuroM
neurom/fst/_neuritefunc.py
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/fst/_neuritefunc.py#L322-L333
def section_radial_distances(neurites, neurite_type=NeuriteType.all, origin=None, iterator_type=Tree.ipreorder): '''Section radial distances in a collection of neurites. The iterator_type can be used to select only terminal sections (ileaf) or only bifurcations (ibifurcation_point).''' dist = [] for n in iter_neurites(neurites, filt=is_type(neurite_type)): pos = n.root_node.points[0] if origin is None else origin dist.extend(sectionfunc.section_radial_distance(s, pos) for s in iter_sections(n, iterator_type=iterator_type)) return dist
[ "def", "section_radial_distances", "(", "neurites", ",", "neurite_type", "=", "NeuriteType", ".", "all", ",", "origin", "=", "None", ",", "iterator_type", "=", "Tree", ".", "ipreorder", ")", ":", "dist", "=", "[", "]", "for", "n", "in", "iter_neurites", "(", "neurites", ",", "filt", "=", "is_type", "(", "neurite_type", ")", ")", ":", "pos", "=", "n", ".", "root_node", ".", "points", "[", "0", "]", "if", "origin", "is", "None", "else", "origin", "dist", ".", "extend", "(", "sectionfunc", ".", "section_radial_distance", "(", "s", ",", "pos", ")", "for", "s", "in", "iter_sections", "(", "n", ",", "iterator_type", "=", "iterator_type", ")", ")", "return", "dist" ]
Section radial distances in a collection of neurites. The iterator_type can be used to select only terminal sections (ileaf) or only bifurcations (ibifurcation_point).
[ "Section", "radial", "distances", "in", "a", "collection", "of", "neurites", ".", "The", "iterator_type", "can", "be", "used", "to", "select", "only", "terminal", "sections", "(", "ileaf", ")", "or", "only", "bifurcations", "(", "ibifurcation_point", ")", "." ]
python
train
JdeRobot/base
src/drivers/MAVLinkServer/MAVProxy/pymavlink/dialects/v10/matrixpilot.py
https://github.com/JdeRobot/base/blob/303b18992785b2fe802212f2d758a60873007f1f/src/drivers/MAVLinkServer/MAVProxy/pymavlink/dialects/v10/matrixpilot.py#L12933-L12952
def adsb_vehicle_encode(self, ICAO_address, lat, lon, altitude_type, altitude, heading, hor_velocity, ver_velocity, callsign, emitter_type, tslc, flags, squawk): ''' The location and information of an ADSB vehicle ICAO_address : ICAO address (uint32_t) lat : Latitude, expressed as degrees * 1E7 (int32_t) lon : Longitude, expressed as degrees * 1E7 (int32_t) altitude_type : Type from ADSB_ALTITUDE_TYPE enum (uint8_t) altitude : Altitude(ASL) in millimeters (int32_t) heading : Course over ground in centidegrees (uint16_t) hor_velocity : The horizontal velocity in centimeters/second (uint16_t) ver_velocity : The vertical velocity in centimeters/second, positive is up (int16_t) callsign : The callsign, 8+null (char) emitter_type : Type from ADSB_EMITTER_TYPE enum (uint8_t) tslc : Time since last communication in seconds (uint8_t) flags : Flags to indicate various statuses including valid data fields (uint16_t) squawk : Squawk code (uint16_t) ''' return MAVLink_adsb_vehicle_message(ICAO_address, lat, lon, altitude_type, altitude, heading, hor_velocity, ver_velocity, callsign, emitter_type, tslc, flags, squawk)
[ "def", "adsb_vehicle_encode", "(", "self", ",", "ICAO_address", ",", "lat", ",", "lon", ",", "altitude_type", ",", "altitude", ",", "heading", ",", "hor_velocity", ",", "ver_velocity", ",", "callsign", ",", "emitter_type", ",", "tslc", ",", "flags", ",", "squawk", ")", ":", "return", "MAVLink_adsb_vehicle_message", "(", "ICAO_address", ",", "lat", ",", "lon", ",", "altitude_type", ",", "altitude", ",", "heading", ",", "hor_velocity", ",", "ver_velocity", ",", "callsign", ",", "emitter_type", ",", "tslc", ",", "flags", ",", "squawk", ")" ]
The location and information of an ADSB vehicle ICAO_address : ICAO address (uint32_t) lat : Latitude, expressed as degrees * 1E7 (int32_t) lon : Longitude, expressed as degrees * 1E7 (int32_t) altitude_type : Type from ADSB_ALTITUDE_TYPE enum (uint8_t) altitude : Altitude(ASL) in millimeters (int32_t) heading : Course over ground in centidegrees (uint16_t) hor_velocity : The horizontal velocity in centimeters/second (uint16_t) ver_velocity : The vertical velocity in centimeters/second, positive is up (int16_t) callsign : The callsign, 8+null (char) emitter_type : Type from ADSB_EMITTER_TYPE enum (uint8_t) tslc : Time since last communication in seconds (uint8_t) flags : Flags to indicate various statuses including valid data fields (uint16_t) squawk : Squawk code (uint16_t)
[ "The", "location", "and", "information", "of", "an", "ADSB", "vehicle" ]
python
train
idank/bashlex
bashlex/parser.py
https://github.com/idank/bashlex/blob/800cb7e3c634eaa3c81f8a8648fd7fd4e27050ac/bashlex/parser.py#L288-L297
def p_function_def(p): '''function_def : WORD LEFT_PAREN RIGHT_PAREN newline_list function_body | FUNCTION WORD LEFT_PAREN RIGHT_PAREN newline_list function_body | FUNCTION WORD newline_list function_body''' parts = _makeparts(p) body = parts[-1] name = parts[ast.findfirstkind(parts, 'word')] p[0] = ast.node(kind='function', name=name, body=body, parts=parts, pos=_partsspan(parts))
[ "def", "p_function_def", "(", "p", ")", ":", "parts", "=", "_makeparts", "(", "p", ")", "body", "=", "parts", "[", "-", "1", "]", "name", "=", "parts", "[", "ast", ".", "findfirstkind", "(", "parts", ",", "'word'", ")", "]", "p", "[", "0", "]", "=", "ast", ".", "node", "(", "kind", "=", "'function'", ",", "name", "=", "name", ",", "body", "=", "body", ",", "parts", "=", "parts", ",", "pos", "=", "_partsspan", "(", "parts", ")", ")" ]
function_def : WORD LEFT_PAREN RIGHT_PAREN newline_list function_body | FUNCTION WORD LEFT_PAREN RIGHT_PAREN newline_list function_body | FUNCTION WORD newline_list function_body
[ "function_def", ":", "WORD", "LEFT_PAREN", "RIGHT_PAREN", "newline_list", "function_body", "|", "FUNCTION", "WORD", "LEFT_PAREN", "RIGHT_PAREN", "newline_list", "function_body", "|", "FUNCTION", "WORD", "newline_list", "function_body" ]
python
train
xolox/python-coloredlogs
coloredlogs/syslog.py
https://github.com/xolox/python-coloredlogs/blob/1cbf0c6bbee400c6ddbc43008143809934ec3e79/coloredlogs/syslog.py#L207-L223
def find_syslog_address(): """ Find the most suitable destination for system log messages. :returns: The pathname of a log device (a string) or an address/port tuple as supported by :class:`~logging.handlers.SysLogHandler`. On Mac OS X this prefers :data:`LOG_DEVICE_MACOSX`, after that :data:`LOG_DEVICE_UNIX` is checked for existence. If both of these device files don't exist the default used by :class:`~logging.handlers.SysLogHandler` is returned. """ if sys.platform == 'darwin' and os.path.exists(LOG_DEVICE_MACOSX): return LOG_DEVICE_MACOSX elif os.path.exists(LOG_DEVICE_UNIX): return LOG_DEVICE_UNIX else: return 'localhost', logging.handlers.SYSLOG_UDP_PORT
[ "def", "find_syslog_address", "(", ")", ":", "if", "sys", ".", "platform", "==", "'darwin'", "and", "os", ".", "path", ".", "exists", "(", "LOG_DEVICE_MACOSX", ")", ":", "return", "LOG_DEVICE_MACOSX", "elif", "os", ".", "path", ".", "exists", "(", "LOG_DEVICE_UNIX", ")", ":", "return", "LOG_DEVICE_UNIX", "else", ":", "return", "'localhost'", ",", "logging", ".", "handlers", ".", "SYSLOG_UDP_PORT" ]
Find the most suitable destination for system log messages. :returns: The pathname of a log device (a string) or an address/port tuple as supported by :class:`~logging.handlers.SysLogHandler`. On Mac OS X this prefers :data:`LOG_DEVICE_MACOSX`, after that :data:`LOG_DEVICE_UNIX` is checked for existence. If both of these device files don't exist the default used by :class:`~logging.handlers.SysLogHandler` is returned.
[ "Find", "the", "most", "suitable", "destination", "for", "system", "log", "messages", "." ]
python
train
istresearch/scrapy-cluster
crawler/crawling/distributed_scheduler.py
https://github.com/istresearch/scrapy-cluster/blob/13aaed2349af5d792d6bcbfcadc5563158aeb599/crawler/crawling/distributed_scheduler.py#L386-L436
def enqueue_request(self, request): ''' Pushes a request from the spider into the proper throttled queue ''' if not request.dont_filter and self.dupefilter.request_seen(request): self.logger.debug("Request not added back to redis") return req_dict = self.request_to_dict(request) if not self.is_blacklisted(req_dict['meta']['appid'], req_dict['meta']['crawlid']): # grab the tld of the request ex_res = self.extract(req_dict['url']) key = "{sid}:{dom}.{suf}:queue".format( sid=req_dict['meta']['spiderid'], dom=ex_res.domain, suf=ex_res.suffix) curr_time = time.time() domain = "{d}.{s}".format(d=ex_res.domain, s=ex_res.suffix) # allow only if we want all requests or we want # everything but blacklisted domains # insert if crawl never expires (0) or time < expires if (self.backlog_blacklist or (not self.backlog_blacklist and domain not in self.black_domains)) and \ (req_dict['meta']['expires'] == 0 or curr_time < req_dict['meta']['expires']): # we may already have the queue in memory if key in self.queue_keys: self.queue_dict[key][0].push(req_dict, req_dict['meta']['priority']) else: # shoving into a new redis queue, negative b/c of sorted sets # this will populate ourself and other schedulers when # they call create_queues self.redis_conn.zadd(key, ujson.dumps(req_dict), -req_dict['meta']['priority']) self.logger.debug("Crawlid: '{id}' Appid: '{appid}' added to queue" .format(appid=req_dict['meta']['appid'], id=req_dict['meta']['crawlid'])) else: self.logger.debug("Crawlid: '{id}' Appid: '{appid}' expired" .format(appid=req_dict['meta']['appid'], id=req_dict['meta']['crawlid'])) else: self.logger.debug("Crawlid: '{id}' Appid: '{appid}' blacklisted" .format(appid=req_dict['meta']['appid'], id=req_dict['meta']['crawlid']))
[ "def", "enqueue_request", "(", "self", ",", "request", ")", ":", "if", "not", "request", ".", "dont_filter", "and", "self", ".", "dupefilter", ".", "request_seen", "(", "request", ")", ":", "self", ".", "logger", ".", "debug", "(", "\"Request not added back to redis\"", ")", "return", "req_dict", "=", "self", ".", "request_to_dict", "(", "request", ")", "if", "not", "self", ".", "is_blacklisted", "(", "req_dict", "[", "'meta'", "]", "[", "'appid'", "]", ",", "req_dict", "[", "'meta'", "]", "[", "'crawlid'", "]", ")", ":", "# grab the tld of the request", "ex_res", "=", "self", ".", "extract", "(", "req_dict", "[", "'url'", "]", ")", "key", "=", "\"{sid}:{dom}.{suf}:queue\"", ".", "format", "(", "sid", "=", "req_dict", "[", "'meta'", "]", "[", "'spiderid'", "]", ",", "dom", "=", "ex_res", ".", "domain", ",", "suf", "=", "ex_res", ".", "suffix", ")", "curr_time", "=", "time", ".", "time", "(", ")", "domain", "=", "\"{d}.{s}\"", ".", "format", "(", "d", "=", "ex_res", ".", "domain", ",", "s", "=", "ex_res", ".", "suffix", ")", "# allow only if we want all requests or we want", "# everything but blacklisted domains", "# insert if crawl never expires (0) or time < expires", "if", "(", "self", ".", "backlog_blacklist", "or", "(", "not", "self", ".", "backlog_blacklist", "and", "domain", "not", "in", "self", ".", "black_domains", ")", ")", "and", "(", "req_dict", "[", "'meta'", "]", "[", "'expires'", "]", "==", "0", "or", "curr_time", "<", "req_dict", "[", "'meta'", "]", "[", "'expires'", "]", ")", ":", "# we may already have the queue in memory", "if", "key", "in", "self", ".", "queue_keys", ":", "self", ".", "queue_dict", "[", "key", "]", "[", "0", "]", ".", "push", "(", "req_dict", ",", "req_dict", "[", "'meta'", "]", "[", "'priority'", "]", ")", "else", ":", "# shoving into a new redis queue, negative b/c of sorted sets", "# this will populate ourself and other schedulers when", "# they call create_queues", "self", ".", "redis_conn", ".", "zadd", "(", "key", ",", "ujson", ".", "dumps", "(", "req_dict", ")", ",", "-", "req_dict", "[", "'meta'", "]", "[", "'priority'", "]", ")", "self", ".", "logger", ".", "debug", "(", "\"Crawlid: '{id}' Appid: '{appid}' added to queue\"", ".", "format", "(", "appid", "=", "req_dict", "[", "'meta'", "]", "[", "'appid'", "]", ",", "id", "=", "req_dict", "[", "'meta'", "]", "[", "'crawlid'", "]", ")", ")", "else", ":", "self", ".", "logger", ".", "debug", "(", "\"Crawlid: '{id}' Appid: '{appid}' expired\"", ".", "format", "(", "appid", "=", "req_dict", "[", "'meta'", "]", "[", "'appid'", "]", ",", "id", "=", "req_dict", "[", "'meta'", "]", "[", "'crawlid'", "]", ")", ")", "else", ":", "self", ".", "logger", ".", "debug", "(", "\"Crawlid: '{id}' Appid: '{appid}' blacklisted\"", ".", "format", "(", "appid", "=", "req_dict", "[", "'meta'", "]", "[", "'appid'", "]", ",", "id", "=", "req_dict", "[", "'meta'", "]", "[", "'crawlid'", "]", ")", ")" ]
Pushes a request from the spider into the proper throttled queue
[ "Pushes", "a", "request", "from", "the", "spider", "into", "the", "proper", "throttled", "queue" ]
python
train
mixmastamyk/fr
fr/linux.py
https://github.com/mixmastamyk/fr/blob/f96df8ed7210a033b9e711bbed768d4116213bfb/fr/linux.py#L219-L252
def get_meminfo(opts): ''' Returns a dictionary holding the current memory info, divided by the ouptut unit. If mem info can't be read, returns None. ''' meminfo = MemInfo() outunit = opts.outunit try: with open(memfname) as infile: lines = infile.readlines() except IOError: return None for line in lines: # format: 'MemTotal: 511456 kB\n' tokens = line.split() if tokens: name, value = tokens[0][:-1].lower(), tokens[1] # rm : if len(tokens) == 2: continue unit = tokens[2].lower() # parse_result to bytes TODO value = int(value) if unit == 'kb': value = value * 1024 # most likely elif unit == 'b': value = value elif unit == 'mb': value = value * 1024 * 1024 elif unit == 'gb': value = value * 1024 * 1024 * 1024 setattr(meminfo, name, value / outunit) cache = meminfo.cached + meminfo.buffers meminfo.used = meminfo.memtotal - meminfo.memfree - cache meminfo.swapused = (meminfo.swaptotal - meminfo.swapcached - meminfo.swapfree) return meminfo
[ "def", "get_meminfo", "(", "opts", ")", ":", "meminfo", "=", "MemInfo", "(", ")", "outunit", "=", "opts", ".", "outunit", "try", ":", "with", "open", "(", "memfname", ")", "as", "infile", ":", "lines", "=", "infile", ".", "readlines", "(", ")", "except", "IOError", ":", "return", "None", "for", "line", "in", "lines", ":", "# format: 'MemTotal: 511456 kB\\n'", "tokens", "=", "line", ".", "split", "(", ")", "if", "tokens", ":", "name", ",", "value", "=", "tokens", "[", "0", "]", "[", ":", "-", "1", "]", ".", "lower", "(", ")", ",", "tokens", "[", "1", "]", "# rm :", "if", "len", "(", "tokens", ")", "==", "2", ":", "continue", "unit", "=", "tokens", "[", "2", "]", ".", "lower", "(", ")", "# parse_result to bytes TODO", "value", "=", "int", "(", "value", ")", "if", "unit", "==", "'kb'", ":", "value", "=", "value", "*", "1024", "# most likely", "elif", "unit", "==", "'b'", ":", "value", "=", "value", "elif", "unit", "==", "'mb'", ":", "value", "=", "value", "*", "1024", "*", "1024", "elif", "unit", "==", "'gb'", ":", "value", "=", "value", "*", "1024", "*", "1024", "*", "1024", "setattr", "(", "meminfo", ",", "name", ",", "value", "/", "outunit", ")", "cache", "=", "meminfo", ".", "cached", "+", "meminfo", ".", "buffers", "meminfo", ".", "used", "=", "meminfo", ".", "memtotal", "-", "meminfo", ".", "memfree", "-", "cache", "meminfo", ".", "swapused", "=", "(", "meminfo", ".", "swaptotal", "-", "meminfo", ".", "swapcached", "-", "meminfo", ".", "swapfree", ")", "return", "meminfo" ]
Returns a dictionary holding the current memory info, divided by the ouptut unit. If mem info can't be read, returns None.
[ "Returns", "a", "dictionary", "holding", "the", "current", "memory", "info", "divided", "by", "the", "ouptut", "unit", ".", "If", "mem", "info", "can", "t", "be", "read", "returns", "None", "." ]
python
train
django-parler/django-parler
parler/models.py
https://github.com/django-parler/django-parler/blob/11ae4af5e8faddb74c69c848870122df4006a54e/parler/models.py#L608-L621
def _get_prefetched_translations(self, meta=None): """ Return the queryset with prefetch results. """ if meta is None: meta = self._parler_meta.root related_name = meta.rel_name try: # Read the list directly, avoid QuerySet construction. # Accessing self._get_translated_queryset(parler_meta)._prefetch_done is more expensive. return self._prefetched_objects_cache[related_name] except (AttributeError, KeyError): return None
[ "def", "_get_prefetched_translations", "(", "self", ",", "meta", "=", "None", ")", ":", "if", "meta", "is", "None", ":", "meta", "=", "self", ".", "_parler_meta", ".", "root", "related_name", "=", "meta", ".", "rel_name", "try", ":", "# Read the list directly, avoid QuerySet construction.", "# Accessing self._get_translated_queryset(parler_meta)._prefetch_done is more expensive.", "return", "self", ".", "_prefetched_objects_cache", "[", "related_name", "]", "except", "(", "AttributeError", ",", "KeyError", ")", ":", "return", "None" ]
Return the queryset with prefetch results.
[ "Return", "the", "queryset", "with", "prefetch", "results", "." ]
python
train
django-treebeard/django-treebeard
treebeard/mp_tree.py
https://github.com/django-treebeard/django-treebeard/blob/8042ee939cb45394909237da447f8925e3cc6aa3/treebeard/mp_tree.py#L958-L963
def get_descendants(self): """ :returns: A queryset of all the node's descendants as DFS, doesn't include the node itself """ return self.__class__.get_tree(self).exclude(pk=self.pk)
[ "def", "get_descendants", "(", "self", ")", ":", "return", "self", ".", "__class__", ".", "get_tree", "(", "self", ")", ".", "exclude", "(", "pk", "=", "self", ".", "pk", ")" ]
:returns: A queryset of all the node's descendants as DFS, doesn't include the node itself
[ ":", "returns", ":", "A", "queryset", "of", "all", "the", "node", "s", "descendants", "as", "DFS", "doesn", "t", "include", "the", "node", "itself" ]
python
train
QuantEcon/QuantEcon.py
quantecon/game_theory/random.py
https://github.com/QuantEcon/QuantEcon.py/blob/26a66c552f2a73967d7efb6e1f4b4c4985a12643/quantecon/game_theory/random.py#L127-L153
def random_mixed_actions(nums_actions, random_state=None): """ Return a tuple of random mixed actions (vectors of floats). Parameters ---------- nums_actions : tuple(int) Tuple of the numbers of actions, one for each player. random_state : int or np.random.RandomState, optional Random seed (integer) or np.random.RandomState instance to set the initial state of the random number generator for reproducibility. If None, a randomly initialized RandomState is used. Returns ------- action_profile : tuple(ndarray(float, ndim=1)) Tuple of mixed_actions, one for each player. """ random_state = check_random_state(random_state) action_profile = tuple( [probvec(1, num_actions, random_state).ravel() for num_actions in nums_actions] ) return action_profile
[ "def", "random_mixed_actions", "(", "nums_actions", ",", "random_state", "=", "None", ")", ":", "random_state", "=", "check_random_state", "(", "random_state", ")", "action_profile", "=", "tuple", "(", "[", "probvec", "(", "1", ",", "num_actions", ",", "random_state", ")", ".", "ravel", "(", ")", "for", "num_actions", "in", "nums_actions", "]", ")", "return", "action_profile" ]
Return a tuple of random mixed actions (vectors of floats). Parameters ---------- nums_actions : tuple(int) Tuple of the numbers of actions, one for each player. random_state : int or np.random.RandomState, optional Random seed (integer) or np.random.RandomState instance to set the initial state of the random number generator for reproducibility. If None, a randomly initialized RandomState is used. Returns ------- action_profile : tuple(ndarray(float, ndim=1)) Tuple of mixed_actions, one for each player.
[ "Return", "a", "tuple", "of", "random", "mixed", "actions", "(", "vectors", "of", "floats", ")", "." ]
python
train
vtkiorg/vtki
vtki/common.py
https://github.com/vtkiorg/vtki/blob/5ccad7ae6d64a03e9594c9c7474c8aab3eb22dd1/vtki/common.py#L261-L270
def set_active_vectors(self, name, preference='cell'): """Finds the vectors by name and appropriately sets it as active""" _, field = get_scalar(self, name, preference=preference, info=True) if field == POINT_DATA_FIELD: self.GetPointData().SetActiveVectors(name) elif field == CELL_DATA_FIELD: self.GetCellData().SetActiveVectors(name) else: raise RuntimeError('Data field ({}) not useable'.format(field)) self._active_vectors_info = [field, name]
[ "def", "set_active_vectors", "(", "self", ",", "name", ",", "preference", "=", "'cell'", ")", ":", "_", ",", "field", "=", "get_scalar", "(", "self", ",", "name", ",", "preference", "=", "preference", ",", "info", "=", "True", ")", "if", "field", "==", "POINT_DATA_FIELD", ":", "self", ".", "GetPointData", "(", ")", ".", "SetActiveVectors", "(", "name", ")", "elif", "field", "==", "CELL_DATA_FIELD", ":", "self", ".", "GetCellData", "(", ")", ".", "SetActiveVectors", "(", "name", ")", "else", ":", "raise", "RuntimeError", "(", "'Data field ({}) not useable'", ".", "format", "(", "field", ")", ")", "self", ".", "_active_vectors_info", "=", "[", "field", ",", "name", "]" ]
Finds the vectors by name and appropriately sets it as active
[ "Finds", "the", "vectors", "by", "name", "and", "appropriately", "sets", "it", "as", "active" ]
python
train
sdispater/orator
orator/dbal/platforms/sqlite_platform.py
https://github.com/sdispater/orator/blob/bd90bf198ee897751848f9a92e49d18e60a74136/orator/dbal/platforms/sqlite_platform.py#L517-L530
def _get_primary_index_in_altered_table(self, diff): """ :param diff: The table diff :type diff: orator.dbal.table_diff.TableDiff :rtype: dict """ primary_index = {} for index in self._get_indexes_in_altered_table(diff).values(): if index.is_primary(): primary_index = {index.get_name(): index} return primary_index
[ "def", "_get_primary_index_in_altered_table", "(", "self", ",", "diff", ")", ":", "primary_index", "=", "{", "}", "for", "index", "in", "self", ".", "_get_indexes_in_altered_table", "(", "diff", ")", ".", "values", "(", ")", ":", "if", "index", ".", "is_primary", "(", ")", ":", "primary_index", "=", "{", "index", ".", "get_name", "(", ")", ":", "index", "}", "return", "primary_index" ]
:param diff: The table diff :type diff: orator.dbal.table_diff.TableDiff :rtype: dict
[ ":", "param", "diff", ":", "The", "table", "diff", ":", "type", "diff", ":", "orator", ".", "dbal", ".", "table_diff", ".", "TableDiff" ]
python
train
dereneaton/ipyrad
ipyrad/assemble/rawedit.py
https://github.com/dereneaton/ipyrad/blob/5eeb8a178160f45faf71bf47cec4abe998a575d1/ipyrad/assemble/rawedit.py#L171-L273
def cutadaptit_single(data, sample): """ Applies quality and adapter filters to reads using cutadapt. If the ipyrad filter param is set to 0 then it only filters to hard trim edges and uses mintrimlen. If filter=1, we add quality filters. If filter=2 we add adapter filters. """ sname = sample.name ## if (GBS, ddRAD) we look for the second cut site + adapter. For single-end ## data we don't bother trying to remove the second barcode since it's not ## as critical as with PE data. if data.paramsdict["datatype"] == "rad": adapter = data._hackersonly["p3_adapter"] else: ## if GBS then the barcode can also be on the other side. if data.paramsdict["datatype"] == "gbs": ## make full adapter (-revcompcut-revcompbarcode-adapter) ## and add adapter without revcompbarcode if data.barcodes: adapter = \ fullcomp(data.paramsdict["restriction_overhang"][1])[::-1] \ + fullcomp(data.barcodes[sample.name])[::-1] \ + data._hackersonly["p3_adapter"] ## add incomplete adapter to extras (-recompcut-adapter) data._hackersonly["p3_adapters_extra"].append( fullcomp(data.paramsdict["restriction_overhang"][1])[::-1] \ + data._hackersonly["p3_adapter"]) else: LOGGER.warning("No barcode information present, and is therefore not "+\ "being used for adapter trimming of SE gbs data.") ## else no search for barcodes on 3' adapter = \ fullcomp(data.paramsdict["restriction_overhang"][1])[::-1] \ + data._hackersonly["p3_adapter"] else: adapter = \ fullcomp(data.paramsdict["restriction_overhang"][1])[::-1] \ + data._hackersonly["p3_adapter"] ## get length trim parameter from new or older version of ipyrad params trim5r1 = trim3r1 = [] if data.paramsdict.get("trim_reads"): trimlen = data.paramsdict.get("trim_reads") ## trim 5' end if trimlen[0]: trim5r1 = ["-u", str(trimlen[0])] if trimlen[1] < 0: trim3r1 = ["-u", str(trimlen[1])] if trimlen[1] > 0: trim3r1 = ["--length", str(trimlen[1])] else: trimlen = data.paramsdict.get("edit_cutsites") trim5r1 = ["--cut", str(trimlen[0])] ## testing new 'trim_reads' setting cmdf1 = ["cutadapt"] if trim5r1: cmdf1 += trim5r1 if trim3r1: cmdf1 += trim3r1 cmdf1 += ["--minimum-length", str(data.paramsdict["filter_min_trim_len"]), "--max-n", str(data.paramsdict["max_low_qual_bases"]), "--trim-n", "--output", OPJ(data.dirs.edits, sname+".trimmed_R1_.fastq.gz"), sample.files.concat[0][0]] if int(data.paramsdict["filter_adapters"]): ## NEW: only quality trim the 3' end for SE data. cmdf1.insert(1, "20") cmdf1.insert(1, "-q") cmdf1.insert(1, str(data.paramsdict["phred_Qscore_offset"])) cmdf1.insert(1, "--quality-base") ## if filter_adapters==3 then p3_adapters_extra will already have extra ## poly adapters added to its list. if int(data.paramsdict["filter_adapters"]) > 1: ## first enter extra cuts (order of input is reversed) for extracut in list(set(data._hackersonly["p3_adapters_extra"]))[::-1]: cmdf1.insert(1, extracut) cmdf1.insert(1, "-a") ## then put the main cut so it appears first in command cmdf1.insert(1, adapter) cmdf1.insert(1, "-a") ## do modifications to read1 and write to tmp file LOGGER.info(cmdf1) proc1 = sps.Popen(cmdf1, stderr=sps.STDOUT, stdout=sps.PIPE, close_fds=True) try: res1 = proc1.communicate()[0] except KeyboardInterrupt: proc1.kill() raise KeyboardInterrupt ## raise errors if found if proc1.returncode: raise IPyradWarningExit(" error in {}\n {}".format(" ".join(cmdf1), res1)) ## return result string to be parsed outside of engine return res1
[ "def", "cutadaptit_single", "(", "data", ",", "sample", ")", ":", "sname", "=", "sample", ".", "name", "## if (GBS, ddRAD) we look for the second cut site + adapter. For single-end", "## data we don't bother trying to remove the second barcode since it's not", "## as critical as with PE data.", "if", "data", ".", "paramsdict", "[", "\"datatype\"", "]", "==", "\"rad\"", ":", "adapter", "=", "data", ".", "_hackersonly", "[", "\"p3_adapter\"", "]", "else", ":", "## if GBS then the barcode can also be on the other side. ", "if", "data", ".", "paramsdict", "[", "\"datatype\"", "]", "==", "\"gbs\"", ":", "## make full adapter (-revcompcut-revcompbarcode-adapter)", "## and add adapter without revcompbarcode", "if", "data", ".", "barcodes", ":", "adapter", "=", "fullcomp", "(", "data", ".", "paramsdict", "[", "\"restriction_overhang\"", "]", "[", "1", "]", ")", "[", ":", ":", "-", "1", "]", "+", "fullcomp", "(", "data", ".", "barcodes", "[", "sample", ".", "name", "]", ")", "[", ":", ":", "-", "1", "]", "+", "data", ".", "_hackersonly", "[", "\"p3_adapter\"", "]", "## add incomplete adapter to extras (-recompcut-adapter)", "data", ".", "_hackersonly", "[", "\"p3_adapters_extra\"", "]", ".", "append", "(", "fullcomp", "(", "data", ".", "paramsdict", "[", "\"restriction_overhang\"", "]", "[", "1", "]", ")", "[", ":", ":", "-", "1", "]", "+", "data", ".", "_hackersonly", "[", "\"p3_adapter\"", "]", ")", "else", ":", "LOGGER", ".", "warning", "(", "\"No barcode information present, and is therefore not \"", "+", "\"being used for adapter trimming of SE gbs data.\"", ")", "## else no search for barcodes on 3'", "adapter", "=", "fullcomp", "(", "data", ".", "paramsdict", "[", "\"restriction_overhang\"", "]", "[", "1", "]", ")", "[", ":", ":", "-", "1", "]", "+", "data", ".", "_hackersonly", "[", "\"p3_adapter\"", "]", "else", ":", "adapter", "=", "fullcomp", "(", "data", ".", "paramsdict", "[", "\"restriction_overhang\"", "]", "[", "1", "]", ")", "[", ":", ":", "-", "1", "]", "+", "data", ".", "_hackersonly", "[", "\"p3_adapter\"", "]", "## get length trim parameter from new or older version of ipyrad params", "trim5r1", "=", "trim3r1", "=", "[", "]", "if", "data", ".", "paramsdict", ".", "get", "(", "\"trim_reads\"", ")", ":", "trimlen", "=", "data", ".", "paramsdict", ".", "get", "(", "\"trim_reads\"", ")", "## trim 5' end", "if", "trimlen", "[", "0", "]", ":", "trim5r1", "=", "[", "\"-u\"", ",", "str", "(", "trimlen", "[", "0", "]", ")", "]", "if", "trimlen", "[", "1", "]", "<", "0", ":", "trim3r1", "=", "[", "\"-u\"", ",", "str", "(", "trimlen", "[", "1", "]", ")", "]", "if", "trimlen", "[", "1", "]", ">", "0", ":", "trim3r1", "=", "[", "\"--length\"", ",", "str", "(", "trimlen", "[", "1", "]", ")", "]", "else", ":", "trimlen", "=", "data", ".", "paramsdict", ".", "get", "(", "\"edit_cutsites\"", ")", "trim5r1", "=", "[", "\"--cut\"", ",", "str", "(", "trimlen", "[", "0", "]", ")", "]", "## testing new 'trim_reads' setting", "cmdf1", "=", "[", "\"cutadapt\"", "]", "if", "trim5r1", ":", "cmdf1", "+=", "trim5r1", "if", "trim3r1", ":", "cmdf1", "+=", "trim3r1", "cmdf1", "+=", "[", "\"--minimum-length\"", ",", "str", "(", "data", ".", "paramsdict", "[", "\"filter_min_trim_len\"", "]", ")", ",", "\"--max-n\"", ",", "str", "(", "data", ".", "paramsdict", "[", "\"max_low_qual_bases\"", "]", ")", ",", "\"--trim-n\"", ",", "\"--output\"", ",", "OPJ", "(", "data", ".", "dirs", ".", "edits", ",", "sname", "+", "\".trimmed_R1_.fastq.gz\"", ")", ",", "sample", ".", "files", ".", "concat", "[", "0", "]", "[", "0", "]", "]", "if", "int", "(", "data", ".", "paramsdict", "[", "\"filter_adapters\"", "]", ")", ":", "## NEW: only quality trim the 3' end for SE data.", "cmdf1", ".", "insert", "(", "1", ",", "\"20\"", ")", "cmdf1", ".", "insert", "(", "1", ",", "\"-q\"", ")", "cmdf1", ".", "insert", "(", "1", ",", "str", "(", "data", ".", "paramsdict", "[", "\"phred_Qscore_offset\"", "]", ")", ")", "cmdf1", ".", "insert", "(", "1", ",", "\"--quality-base\"", ")", "## if filter_adapters==3 then p3_adapters_extra will already have extra", "## poly adapters added to its list. ", "if", "int", "(", "data", ".", "paramsdict", "[", "\"filter_adapters\"", "]", ")", ">", "1", ":", "## first enter extra cuts (order of input is reversed)", "for", "extracut", "in", "list", "(", "set", "(", "data", ".", "_hackersonly", "[", "\"p3_adapters_extra\"", "]", ")", ")", "[", ":", ":", "-", "1", "]", ":", "cmdf1", ".", "insert", "(", "1", ",", "extracut", ")", "cmdf1", ".", "insert", "(", "1", ",", "\"-a\"", ")", "## then put the main cut so it appears first in command", "cmdf1", ".", "insert", "(", "1", ",", "adapter", ")", "cmdf1", ".", "insert", "(", "1", ",", "\"-a\"", ")", "## do modifications to read1 and write to tmp file", "LOGGER", ".", "info", "(", "cmdf1", ")", "proc1", "=", "sps", ".", "Popen", "(", "cmdf1", ",", "stderr", "=", "sps", ".", "STDOUT", ",", "stdout", "=", "sps", ".", "PIPE", ",", "close_fds", "=", "True", ")", "try", ":", "res1", "=", "proc1", ".", "communicate", "(", ")", "[", "0", "]", "except", "KeyboardInterrupt", ":", "proc1", ".", "kill", "(", ")", "raise", "KeyboardInterrupt", "## raise errors if found", "if", "proc1", ".", "returncode", ":", "raise", "IPyradWarningExit", "(", "\" error in {}\\n {}\"", ".", "format", "(", "\" \"", ".", "join", "(", "cmdf1", ")", ",", "res1", ")", ")", "## return result string to be parsed outside of engine", "return", "res1" ]
Applies quality and adapter filters to reads using cutadapt. If the ipyrad filter param is set to 0 then it only filters to hard trim edges and uses mintrimlen. If filter=1, we add quality filters. If filter=2 we add adapter filters.
[ "Applies", "quality", "and", "adapter", "filters", "to", "reads", "using", "cutadapt", ".", "If", "the", "ipyrad", "filter", "param", "is", "set", "to", "0", "then", "it", "only", "filters", "to", "hard", "trim", "edges", "and", "uses", "mintrimlen", ".", "If", "filter", "=", "1", "we", "add", "quality", "filters", ".", "If", "filter", "=", "2", "we", "add", "adapter", "filters", "." ]
python
valid
moreati/subresource-integrity
subresource_integrity.py
https://github.com/moreati/subresource-integrity/blob/c9f6cecddea85f1c7bb5562551a41b9678fbda21/subresource_integrity.py#L102-L115
def generate(data, algorithms=(DEFAULT_ALOGRITHM,)): """Yields subresource integrity Hash objects for the given data & algorithms >>> for ihash in generate(b"alert('Hello, world.');"): ... print ('%s %s' % (ihash.algorithm, ihash.b58digest)) sha384 H8BRh8j48O9oYatfu5AZzq6A9RINhZO5H16dQZngK7T62em8MUt1FLm52t+eX6xO >>> list(generate(b"alert('Hello, world.');", ['sha256', 'sha384'])) ... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE [subresource_integrity.Hash('sha256', 'qz.../Tng=', ''), subresource_integrity.Hash('sha384', 'H8BR...+eX6xO', '')] """ return (Hash.fromresource(data, algorithm) for algorithm in algorithms)
[ "def", "generate", "(", "data", ",", "algorithms", "=", "(", "DEFAULT_ALOGRITHM", ",", ")", ")", ":", "return", "(", "Hash", ".", "fromresource", "(", "data", ",", "algorithm", ")", "for", "algorithm", "in", "algorithms", ")" ]
Yields subresource integrity Hash objects for the given data & algorithms >>> for ihash in generate(b"alert('Hello, world.');"): ... print ('%s %s' % (ihash.algorithm, ihash.b58digest)) sha384 H8BRh8j48O9oYatfu5AZzq6A9RINhZO5H16dQZngK7T62em8MUt1FLm52t+eX6xO >>> list(generate(b"alert('Hello, world.');", ['sha256', 'sha384'])) ... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE [subresource_integrity.Hash('sha256', 'qz.../Tng=', ''), subresource_integrity.Hash('sha384', 'H8BR...+eX6xO', '')]
[ "Yields", "subresource", "integrity", "Hash", "objects", "for", "the", "given", "data", "&", "algorithms" ]
python
train
OpenHydrology/floodestimation
floodestimation/analysis.py
https://github.com/OpenHydrology/floodestimation/blob/782da7c5abd1348923129efe89fb70003ebb088c/floodestimation/analysis.py#L418-L425
def _pruaf(self): """ Return percentage runoff urban adjustment factor. Methodology source: eqn. 6, Kjeldsen 2010 """ return 1 + 0.47 * self.catchment.descriptors.urbext(self.year) \ * self.catchment.descriptors.bfihost / (1 - self.catchment.descriptors.bfihost)
[ "def", "_pruaf", "(", "self", ")", ":", "return", "1", "+", "0.47", "*", "self", ".", "catchment", ".", "descriptors", ".", "urbext", "(", "self", ".", "year", ")", "*", "self", ".", "catchment", ".", "descriptors", ".", "bfihost", "/", "(", "1", "-", "self", ".", "catchment", ".", "descriptors", ".", "bfihost", ")" ]
Return percentage runoff urban adjustment factor. Methodology source: eqn. 6, Kjeldsen 2010
[ "Return", "percentage", "runoff", "urban", "adjustment", "factor", "." ]
python
train
pgmpy/pgmpy
pgmpy/models/ClusterGraph.py
https://github.com/pgmpy/pgmpy/blob/9381a66aba3c3871d3ccd00672b148d17d63239e/pgmpy/models/ClusterGraph.py#L257-L285
def get_partition_function(self): r""" Returns the partition function for a given undirected graph. A partition function is defined as .. math:: \sum_{X}(\prod_{i=1}^{m} \phi_i) where m is the number of factors present in the graph and X are all the random variables present. Examples -------- >>> from pgmpy.models import ClusterGraph >>> from pgmpy.factors.discrete import DiscreteFactor >>> G = ClusterGraph() >>> G.add_nodes_from([('a', 'b', 'c'), ('a', 'b'), ('a', 'c')]) >>> G.add_edges_from([(('a', 'b', 'c'), ('a', 'b')), ... (('a', 'b', 'c'), ('a', 'c'))]) >>> phi1 = DiscreteFactor(['a', 'b', 'c'], [2, 2, 2], np.random.rand(8)) >>> phi2 = DiscreteFactor(['a', 'b'], [2, 2], np.random.rand(4)) >>> phi3 = DiscreteFactor(['a', 'c'], [2, 2], np.random.rand(4)) >>> G.add_factors(phi1, phi2, phi3) >>> G.get_partition_function() """ if self.check_model(): factor = self.factors[0] factor = factor_product(factor, *[self.factors[i] for i in range(1, len(self.factors))]) return np.sum(factor.values)
[ "def", "get_partition_function", "(", "self", ")", ":", "if", "self", ".", "check_model", "(", ")", ":", "factor", "=", "self", ".", "factors", "[", "0", "]", "factor", "=", "factor_product", "(", "factor", ",", "*", "[", "self", ".", "factors", "[", "i", "]", "for", "i", "in", "range", "(", "1", ",", "len", "(", "self", ".", "factors", ")", ")", "]", ")", "return", "np", ".", "sum", "(", "factor", ".", "values", ")" ]
r""" Returns the partition function for a given undirected graph. A partition function is defined as .. math:: \sum_{X}(\prod_{i=1}^{m} \phi_i) where m is the number of factors present in the graph and X are all the random variables present. Examples -------- >>> from pgmpy.models import ClusterGraph >>> from pgmpy.factors.discrete import DiscreteFactor >>> G = ClusterGraph() >>> G.add_nodes_from([('a', 'b', 'c'), ('a', 'b'), ('a', 'c')]) >>> G.add_edges_from([(('a', 'b', 'c'), ('a', 'b')), ... (('a', 'b', 'c'), ('a', 'c'))]) >>> phi1 = DiscreteFactor(['a', 'b', 'c'], [2, 2, 2], np.random.rand(8)) >>> phi2 = DiscreteFactor(['a', 'b'], [2, 2], np.random.rand(4)) >>> phi3 = DiscreteFactor(['a', 'c'], [2, 2], np.random.rand(4)) >>> G.add_factors(phi1, phi2, phi3) >>> G.get_partition_function()
[ "r", "Returns", "the", "partition", "function", "for", "a", "given", "undirected", "graph", "." ]
python
train
mozillazg/baidu-pcs-python-sdk
baidupcs/api.py
https://github.com/mozillazg/baidu-pcs-python-sdk/blob/12fe3f13b2ecda8f8bdcc5334c876e934776a5cc/baidupcs/api.py#L568-L584
def diff(self, cursor='null', **kwargs): """文件增量更新操作查询接口. 本接口有数秒延迟,但保证返回结果为最终一致. :param cursor: 用于标记更新断点。 * 首次调用cursor=null; * 非首次调用,使用最后一次调用diff接口的返回结果 中的cursor。 :type cursor: str :return: Response 对象 """ params = { 'cursor': cursor, } return self._request('file', 'diff', extra_params=params, **kwargs)
[ "def", "diff", "(", "self", ",", "cursor", "=", "'null'", ",", "*", "*", "kwargs", ")", ":", "params", "=", "{", "'cursor'", ":", "cursor", ",", "}", "return", "self", ".", "_request", "(", "'file'", ",", "'diff'", ",", "extra_params", "=", "params", ",", "*", "*", "kwargs", ")" ]
文件增量更新操作查询接口. 本接口有数秒延迟,但保证返回结果为最终一致. :param cursor: 用于标记更新断点。 * 首次调用cursor=null; * 非首次调用,使用最后一次调用diff接口的返回结果 中的cursor。 :type cursor: str :return: Response 对象
[ "文件增量更新操作查询接口", ".", "本接口有数秒延迟,但保证返回结果为最终一致", "." ]
python
train
LCAV/pylocus
pylocus/basics_angles.py
https://github.com/LCAV/pylocus/blob/c56a38c251d8a435caf4641a8ae6027ecba2c8c6/pylocus/basics_angles.py#L7-L26
def change_angles(method, theta, tol=1e-10): """ Function used by all angle conversion functions (from_x_to_x_pi(...))""" try: theta_new = np.zeros(theta.shape) for i, thet in enumerate(theta): try: # theta is vector theta_new[i] = method(thet, tol) except: # theta is matrix for j, th in enumerate(thet): try: theta_new[i, j] = method(th, tol) except: # theta is tensor for k, t in enumerate(th): theta_new[i, j, k] = method(t, tol) return theta_new except: return method(theta, tol)
[ "def", "change_angles", "(", "method", ",", "theta", ",", "tol", "=", "1e-10", ")", ":", "try", ":", "theta_new", "=", "np", ".", "zeros", "(", "theta", ".", "shape", ")", "for", "i", ",", "thet", "in", "enumerate", "(", "theta", ")", ":", "try", ":", "# theta is vector", "theta_new", "[", "i", "]", "=", "method", "(", "thet", ",", "tol", ")", "except", ":", "# theta is matrix", "for", "j", ",", "th", "in", "enumerate", "(", "thet", ")", ":", "try", ":", "theta_new", "[", "i", ",", "j", "]", "=", "method", "(", "th", ",", "tol", ")", "except", ":", "# theta is tensor", "for", "k", ",", "t", "in", "enumerate", "(", "th", ")", ":", "theta_new", "[", "i", ",", "j", ",", "k", "]", "=", "method", "(", "t", ",", "tol", ")", "return", "theta_new", "except", ":", "return", "method", "(", "theta", ",", "tol", ")" ]
Function used by all angle conversion functions (from_x_to_x_pi(...))
[ "Function", "used", "by", "all", "angle", "conversion", "functions", "(", "from_x_to_x_pi", "(", "...", "))" ]
python
train
smitchell556/cuttlepool
cuttlepool.py
https://github.com/smitchell556/cuttlepool/blob/21b74bc61d11c80d3b40da0321485ec6c2f9db31/cuttlepool.py#L465-L472
def close(self): """ Returns the resource to the resource pool. """ if self._resource is not None: self._pool.put_resource(self._resource) self._resource = None self._pool = None
[ "def", "close", "(", "self", ")", ":", "if", "self", ".", "_resource", "is", "not", "None", ":", "self", ".", "_pool", ".", "put_resource", "(", "self", ".", "_resource", ")", "self", ".", "_resource", "=", "None", "self", ".", "_pool", "=", "None" ]
Returns the resource to the resource pool.
[ "Returns", "the", "resource", "to", "the", "resource", "pool", "." ]
python
train
treycucco/pyebnf
pyebnf/operator.py
https://github.com/treycucco/pyebnf/blob/3634ddabbe5d73508bcc20f4a591f86a46634e1d/pyebnf/operator.py#L22-L48
def infix_to_postfix(nodes, *, recurse_types=None): """Convert a list of nodes in infix order to a list of nodes in postfix order. E.G. with normal algebraic precedence, 3 + 4 * 5 -> 3 4 5 * + """ output = [] operators = [] for node in nodes: if isinstance(node, OperatorNode): # Drain out all operators whose precedence is gte the node's... cmp_operator = node.operator while operators: current_operator = operators[-1].operator if current_operator.precedence > cmp_operator.precedence or \ current_operator.precedence == cmp_operator.precedence and current_operator.association == Association.left: output.append(operators.pop()) else: break operators.append(node) else: if recurse_types is not None and node.node_type in recurse_types: output.extend(infix_to_postfix(node.children, recurse_types=recurse_types)) else: output.append(node) return output + list(reversed(operators))
[ "def", "infix_to_postfix", "(", "nodes", ",", "*", ",", "recurse_types", "=", "None", ")", ":", "output", "=", "[", "]", "operators", "=", "[", "]", "for", "node", "in", "nodes", ":", "if", "isinstance", "(", "node", ",", "OperatorNode", ")", ":", "# Drain out all operators whose precedence is gte the node's...", "cmp_operator", "=", "node", ".", "operator", "while", "operators", ":", "current_operator", "=", "operators", "[", "-", "1", "]", ".", "operator", "if", "current_operator", ".", "precedence", ">", "cmp_operator", ".", "precedence", "or", "current_operator", ".", "precedence", "==", "cmp_operator", ".", "precedence", "and", "current_operator", ".", "association", "==", "Association", ".", "left", ":", "output", ".", "append", "(", "operators", ".", "pop", "(", ")", ")", "else", ":", "break", "operators", ".", "append", "(", "node", ")", "else", ":", "if", "recurse_types", "is", "not", "None", "and", "node", ".", "node_type", "in", "recurse_types", ":", "output", ".", "extend", "(", "infix_to_postfix", "(", "node", ".", "children", ",", "recurse_types", "=", "recurse_types", ")", ")", "else", ":", "output", ".", "append", "(", "node", ")", "return", "output", "+", "list", "(", "reversed", "(", "operators", ")", ")" ]
Convert a list of nodes in infix order to a list of nodes in postfix order. E.G. with normal algebraic precedence, 3 + 4 * 5 -> 3 4 5 * +
[ "Convert", "a", "list", "of", "nodes", "in", "infix", "order", "to", "a", "list", "of", "nodes", "in", "postfix", "order", "." ]
python
test
facelessuser/wcmatch
wcmatch/glob.py
https://github.com/facelessuser/wcmatch/blob/d153e7007cc73b994ae1ba553dc4584039f5c212/wcmatch/glob.py#L317-L337
def _get_starting_paths(self, curdir): """ Get the starting location. For case sensitive paths, we have to "glob" for it first as Python doesn't like for its users to think about case. By scanning for it, we can get the actual casing and then compare. """ results = [curdir] if not self._is_parent(curdir) and not self._is_this(curdir): fullpath = os.path.abspath(curdir) basename = os.path.basename(fullpath) dirname = os.path.dirname(fullpath) if basename: matcher = self._get_matcher(basename) results = [os.path.basename(name) for name in self._glob_dir(dirname, matcher, self)] return results
[ "def", "_get_starting_paths", "(", "self", ",", "curdir", ")", ":", "results", "=", "[", "curdir", "]", "if", "not", "self", ".", "_is_parent", "(", "curdir", ")", "and", "not", "self", ".", "_is_this", "(", "curdir", ")", ":", "fullpath", "=", "os", ".", "path", ".", "abspath", "(", "curdir", ")", "basename", "=", "os", ".", "path", ".", "basename", "(", "fullpath", ")", "dirname", "=", "os", ".", "path", ".", "dirname", "(", "fullpath", ")", "if", "basename", ":", "matcher", "=", "self", ".", "_get_matcher", "(", "basename", ")", "results", "=", "[", "os", ".", "path", ".", "basename", "(", "name", ")", "for", "name", "in", "self", ".", "_glob_dir", "(", "dirname", ",", "matcher", ",", "self", ")", "]", "return", "results" ]
Get the starting location. For case sensitive paths, we have to "glob" for it first as Python doesn't like for its users to think about case. By scanning for it, we can get the actual casing and then compare.
[ "Get", "the", "starting", "location", "." ]
python
train
tjcsl/ion
intranet/apps/signage/templatetags/signage.py
https://github.com/tjcsl/ion/blob/5d722b0725d572039bb0929fd5715a4070c82c72/intranet/apps/signage/templatetags/signage.py#L10-L22
def render_page(page, page_args): """ Renders the template at page.template """ print(page_args) template_name = page.template if page.template else page.name template = "signage/pages/{}.html".format(template_name) if page.function: context_method = getattr(pages, page.function) else: context_method = getattr(pages, page.name) sign, request = page_args context = context_method(page, sign, request) return render_to_string(template, context)
[ "def", "render_page", "(", "page", ",", "page_args", ")", ":", "print", "(", "page_args", ")", "template_name", "=", "page", ".", "template", "if", "page", ".", "template", "else", "page", ".", "name", "template", "=", "\"signage/pages/{}.html\"", ".", "format", "(", "template_name", ")", "if", "page", ".", "function", ":", "context_method", "=", "getattr", "(", "pages", ",", "page", ".", "function", ")", "else", ":", "context_method", "=", "getattr", "(", "pages", ",", "page", ".", "name", ")", "sign", ",", "request", "=", "page_args", "context", "=", "context_method", "(", "page", ",", "sign", ",", "request", ")", "return", "render_to_string", "(", "template", ",", "context", ")" ]
Renders the template at page.template
[ "Renders", "the", "template", "at", "page", ".", "template" ]
python
train
programa-stic/barf-project
barf/analysis/graphs/basicblock.py
https://github.com/programa-stic/barf-project/blob/18ed9e5eace55f7bf6015ec57f037c364099021c/barf/analysis/graphs/basicblock.py#L30-L39
def bb_get_instr_max_width(basic_block): """Get maximum instruction mnemonic width """ asm_mnemonic_max_width = 0 for instr in basic_block: if len(instr.mnemonic) > asm_mnemonic_max_width: asm_mnemonic_max_width = len(instr.mnemonic) return asm_mnemonic_max_width
[ "def", "bb_get_instr_max_width", "(", "basic_block", ")", ":", "asm_mnemonic_max_width", "=", "0", "for", "instr", "in", "basic_block", ":", "if", "len", "(", "instr", ".", "mnemonic", ")", ">", "asm_mnemonic_max_width", ":", "asm_mnemonic_max_width", "=", "len", "(", "instr", ".", "mnemonic", ")", "return", "asm_mnemonic_max_width" ]
Get maximum instruction mnemonic width
[ "Get", "maximum", "instruction", "mnemonic", "width" ]
python
train
timknip/pycsg
csg/geom.py
https://github.com/timknip/pycsg/blob/b8f9710fd15c38dcc275d56a2108f604af38dcc8/csg/geom.py#L192-L260
def splitPolygon(self, polygon, coplanarFront, coplanarBack, front, back): """ Split `polygon` by this plane if needed, then put the polygon or polygon fragments in the appropriate lists. Coplanar polygons go into either `coplanarFront` or `coplanarBack` depending on their orientation with respect to this plane. Polygons in front or in back of this plane go into either `front` or `back` """ COPLANAR = 0 # all the vertices are within EPSILON distance from plane FRONT = 1 # all the vertices are in front of the plane BACK = 2 # all the vertices are at the back of the plane SPANNING = 3 # some vertices are in front, some in the back # Classify each point as well as the entire polygon into one of the above # four classes. polygonType = 0 vertexLocs = [] numVertices = len(polygon.vertices) for i in range(numVertices): t = self.normal.dot(polygon.vertices[i].pos) - self.w loc = -1 if t < -Plane.EPSILON: loc = BACK elif t > Plane.EPSILON: loc = FRONT else: loc = COPLANAR polygonType |= loc vertexLocs.append(loc) # Put the polygon in the correct list, splitting it when necessary. if polygonType == COPLANAR: normalDotPlaneNormal = self.normal.dot(polygon.plane.normal) if normalDotPlaneNormal > 0: coplanarFront.append(polygon) else: coplanarBack.append(polygon) elif polygonType == FRONT: front.append(polygon) elif polygonType == BACK: back.append(polygon) elif polygonType == SPANNING: f = [] b = [] for i in range(numVertices): j = (i+1) % numVertices ti = vertexLocs[i] tj = vertexLocs[j] vi = polygon.vertices[i] vj = polygon.vertices[j] if ti != BACK: f.append(vi) if ti != FRONT: if ti != BACK: b.append(vi.clone()) else: b.append(vi) if (ti | tj) == SPANNING: # interpolation weight at the intersection point t = (self.w - self.normal.dot(vi.pos)) / self.normal.dot(vj.pos.minus(vi.pos)) # intersection point on the plane v = vi.interpolate(vj, t) f.append(v) b.append(v.clone()) if len(f) >= 3: front.append(Polygon(f, polygon.shared)) if len(b) >= 3: back.append(Polygon(b, polygon.shared))
[ "def", "splitPolygon", "(", "self", ",", "polygon", ",", "coplanarFront", ",", "coplanarBack", ",", "front", ",", "back", ")", ":", "COPLANAR", "=", "0", "# all the vertices are within EPSILON distance from plane", "FRONT", "=", "1", "# all the vertices are in front of the plane", "BACK", "=", "2", "# all the vertices are at the back of the plane", "SPANNING", "=", "3", "# some vertices are in front, some in the back", "# Classify each point as well as the entire polygon into one of the above", "# four classes.", "polygonType", "=", "0", "vertexLocs", "=", "[", "]", "numVertices", "=", "len", "(", "polygon", ".", "vertices", ")", "for", "i", "in", "range", "(", "numVertices", ")", ":", "t", "=", "self", ".", "normal", ".", "dot", "(", "polygon", ".", "vertices", "[", "i", "]", ".", "pos", ")", "-", "self", ".", "w", "loc", "=", "-", "1", "if", "t", "<", "-", "Plane", ".", "EPSILON", ":", "loc", "=", "BACK", "elif", "t", ">", "Plane", ".", "EPSILON", ":", "loc", "=", "FRONT", "else", ":", "loc", "=", "COPLANAR", "polygonType", "|=", "loc", "vertexLocs", ".", "append", "(", "loc", ")", "# Put the polygon in the correct list, splitting it when necessary.", "if", "polygonType", "==", "COPLANAR", ":", "normalDotPlaneNormal", "=", "self", ".", "normal", ".", "dot", "(", "polygon", ".", "plane", ".", "normal", ")", "if", "normalDotPlaneNormal", ">", "0", ":", "coplanarFront", ".", "append", "(", "polygon", ")", "else", ":", "coplanarBack", ".", "append", "(", "polygon", ")", "elif", "polygonType", "==", "FRONT", ":", "front", ".", "append", "(", "polygon", ")", "elif", "polygonType", "==", "BACK", ":", "back", ".", "append", "(", "polygon", ")", "elif", "polygonType", "==", "SPANNING", ":", "f", "=", "[", "]", "b", "=", "[", "]", "for", "i", "in", "range", "(", "numVertices", ")", ":", "j", "=", "(", "i", "+", "1", ")", "%", "numVertices", "ti", "=", "vertexLocs", "[", "i", "]", "tj", "=", "vertexLocs", "[", "j", "]", "vi", "=", "polygon", ".", "vertices", "[", "i", "]", "vj", "=", "polygon", ".", "vertices", "[", "j", "]", "if", "ti", "!=", "BACK", ":", "f", ".", "append", "(", "vi", ")", "if", "ti", "!=", "FRONT", ":", "if", "ti", "!=", "BACK", ":", "b", ".", "append", "(", "vi", ".", "clone", "(", ")", ")", "else", ":", "b", ".", "append", "(", "vi", ")", "if", "(", "ti", "|", "tj", ")", "==", "SPANNING", ":", "# interpolation weight at the intersection point", "t", "=", "(", "self", ".", "w", "-", "self", ".", "normal", ".", "dot", "(", "vi", ".", "pos", ")", ")", "/", "self", ".", "normal", ".", "dot", "(", "vj", ".", "pos", ".", "minus", "(", "vi", ".", "pos", ")", ")", "# intersection point on the plane", "v", "=", "vi", ".", "interpolate", "(", "vj", ",", "t", ")", "f", ".", "append", "(", "v", ")", "b", ".", "append", "(", "v", ".", "clone", "(", ")", ")", "if", "len", "(", "f", ")", ">=", "3", ":", "front", ".", "append", "(", "Polygon", "(", "f", ",", "polygon", ".", "shared", ")", ")", "if", "len", "(", "b", ")", ">=", "3", ":", "back", ".", "append", "(", "Polygon", "(", "b", ",", "polygon", ".", "shared", ")", ")" ]
Split `polygon` by this plane if needed, then put the polygon or polygon fragments in the appropriate lists. Coplanar polygons go into either `coplanarFront` or `coplanarBack` depending on their orientation with respect to this plane. Polygons in front or in back of this plane go into either `front` or `back`
[ "Split", "polygon", "by", "this", "plane", "if", "needed", "then", "put", "the", "polygon", "or", "polygon", "fragments", "in", "the", "appropriate", "lists", ".", "Coplanar", "polygons", "go", "into", "either", "coplanarFront", "or", "coplanarBack", "depending", "on", "their", "orientation", "with", "respect", "to", "this", "plane", ".", "Polygons", "in", "front", "or", "in", "back", "of", "this", "plane", "go", "into", "either", "front", "or", "back" ]
python
train
RedHatInsights/insights-core
insights/formats/text.py
https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/formats/text.py#L118-L131
def show_dropped(self): """ Show dropped files """ ctx = _find_context(self.broker) if ctx and ctx.all_files: ds = self.broker.get_by_type(datasource) vals = [] for v in ds.values(): if isinstance(v, list): vals.extend(d.path for d in v) else: vals.append(v.path) dropped = set(ctx.all_files) - set(vals) pprint("Dropped Files:", stream=self.stream) pprint(dropped, indent=4, stream=self.stream)
[ "def", "show_dropped", "(", "self", ")", ":", "ctx", "=", "_find_context", "(", "self", ".", "broker", ")", "if", "ctx", "and", "ctx", ".", "all_files", ":", "ds", "=", "self", ".", "broker", ".", "get_by_type", "(", "datasource", ")", "vals", "=", "[", "]", "for", "v", "in", "ds", ".", "values", "(", ")", ":", "if", "isinstance", "(", "v", ",", "list", ")", ":", "vals", ".", "extend", "(", "d", ".", "path", "for", "d", "in", "v", ")", "else", ":", "vals", ".", "append", "(", "v", ".", "path", ")", "dropped", "=", "set", "(", "ctx", ".", "all_files", ")", "-", "set", "(", "vals", ")", "pprint", "(", "\"Dropped Files:\"", ",", "stream", "=", "self", ".", "stream", ")", "pprint", "(", "dropped", ",", "indent", "=", "4", ",", "stream", "=", "self", ".", "stream", ")" ]
Show dropped files
[ "Show", "dropped", "files" ]
python
train
saltstack/salt
salt/utils/minions.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/minions.py#L234-L264
def _pki_minions(self): ''' Retreive complete minion list from PKI dir. Respects cache if configured ''' minions = [] pki_cache_fn = os.path.join(self.opts['pki_dir'], self.acc, '.key_cache') try: os.makedirs(os.path.dirname(pki_cache_fn)) except OSError: pass try: if self.opts['key_cache'] and os.path.exists(pki_cache_fn): log.debug('Returning cached minion list') if six.PY2: with salt.utils.files.fopen(pki_cache_fn) as fn_: return self.serial.load(fn_) else: with salt.utils.files.fopen(pki_cache_fn, mode='rb') as fn_: return self.serial.load(fn_) else: for fn_ in salt.utils.data.sorted_ignorecase(os.listdir(os.path.join(self.opts['pki_dir'], self.acc))): if not fn_.startswith('.') and os.path.isfile(os.path.join(self.opts['pki_dir'], self.acc, fn_)): minions.append(fn_) return minions except OSError as exc: log.error( 'Encountered OSError while evaluating minions in PKI dir: %s', exc ) return minions
[ "def", "_pki_minions", "(", "self", ")", ":", "minions", "=", "[", "]", "pki_cache_fn", "=", "os", ".", "path", ".", "join", "(", "self", ".", "opts", "[", "'pki_dir'", "]", ",", "self", ".", "acc", ",", "'.key_cache'", ")", "try", ":", "os", ".", "makedirs", "(", "os", ".", "path", ".", "dirname", "(", "pki_cache_fn", ")", ")", "except", "OSError", ":", "pass", "try", ":", "if", "self", ".", "opts", "[", "'key_cache'", "]", "and", "os", ".", "path", ".", "exists", "(", "pki_cache_fn", ")", ":", "log", ".", "debug", "(", "'Returning cached minion list'", ")", "if", "six", ".", "PY2", ":", "with", "salt", ".", "utils", ".", "files", ".", "fopen", "(", "pki_cache_fn", ")", "as", "fn_", ":", "return", "self", ".", "serial", ".", "load", "(", "fn_", ")", "else", ":", "with", "salt", ".", "utils", ".", "files", ".", "fopen", "(", "pki_cache_fn", ",", "mode", "=", "'rb'", ")", "as", "fn_", ":", "return", "self", ".", "serial", ".", "load", "(", "fn_", ")", "else", ":", "for", "fn_", "in", "salt", ".", "utils", ".", "data", ".", "sorted_ignorecase", "(", "os", ".", "listdir", "(", "os", ".", "path", ".", "join", "(", "self", ".", "opts", "[", "'pki_dir'", "]", ",", "self", ".", "acc", ")", ")", ")", ":", "if", "not", "fn_", ".", "startswith", "(", "'.'", ")", "and", "os", ".", "path", ".", "isfile", "(", "os", ".", "path", ".", "join", "(", "self", ".", "opts", "[", "'pki_dir'", "]", ",", "self", ".", "acc", ",", "fn_", ")", ")", ":", "minions", ".", "append", "(", "fn_", ")", "return", "minions", "except", "OSError", "as", "exc", ":", "log", ".", "error", "(", "'Encountered OSError while evaluating minions in PKI dir: %s'", ",", "exc", ")", "return", "minions" ]
Retreive complete minion list from PKI dir. Respects cache if configured
[ "Retreive", "complete", "minion", "list", "from", "PKI", "dir", ".", "Respects", "cache", "if", "configured" ]
python
train
wagtail/django-modelcluster
modelcluster/models.py
https://github.com/wagtail/django-modelcluster/blob/bfc8bd755af0ddd49e2aee2f2ca126921573d38b/modelcluster/models.py#L227-L268
def from_serializable_data(cls, data, check_fks=True, strict_fks=False): """ Build an instance of this model from the JSON-like structure passed in, recursing into related objects as required. If check_fks is true, it will check whether referenced foreign keys still exist in the database. - dangling foreign keys on related objects are dealt with by either nullifying the key or dropping the related object, according to the 'on_delete' setting. - dangling foreign keys on the base object will be nullified, unless strict_fks is true, in which case any dangling foreign keys with on_delete=CASCADE will cause None to be returned for the entire object. """ obj = model_from_serializable_data(cls, data, check_fks=check_fks, strict_fks=strict_fks) if obj is None: return None child_relations = get_all_child_relations(cls) for rel in child_relations: rel_name = rel.get_accessor_name() try: child_data_list = data[rel_name] except KeyError: continue related_model = rel.related_model if hasattr(related_model, 'from_serializable_data'): children = [ related_model.from_serializable_data(child_data, check_fks=check_fks, strict_fks=True) for child_data in child_data_list ] else: children = [ model_from_serializable_data(related_model, child_data, check_fks=check_fks, strict_fks=True) for child_data in child_data_list ] children = filter(lambda child: child is not None, children) setattr(obj, rel_name, children) return obj
[ "def", "from_serializable_data", "(", "cls", ",", "data", ",", "check_fks", "=", "True", ",", "strict_fks", "=", "False", ")", ":", "obj", "=", "model_from_serializable_data", "(", "cls", ",", "data", ",", "check_fks", "=", "check_fks", ",", "strict_fks", "=", "strict_fks", ")", "if", "obj", "is", "None", ":", "return", "None", "child_relations", "=", "get_all_child_relations", "(", "cls", ")", "for", "rel", "in", "child_relations", ":", "rel_name", "=", "rel", ".", "get_accessor_name", "(", ")", "try", ":", "child_data_list", "=", "data", "[", "rel_name", "]", "except", "KeyError", ":", "continue", "related_model", "=", "rel", ".", "related_model", "if", "hasattr", "(", "related_model", ",", "'from_serializable_data'", ")", ":", "children", "=", "[", "related_model", ".", "from_serializable_data", "(", "child_data", ",", "check_fks", "=", "check_fks", ",", "strict_fks", "=", "True", ")", "for", "child_data", "in", "child_data_list", "]", "else", ":", "children", "=", "[", "model_from_serializable_data", "(", "related_model", ",", "child_data", ",", "check_fks", "=", "check_fks", ",", "strict_fks", "=", "True", ")", "for", "child_data", "in", "child_data_list", "]", "children", "=", "filter", "(", "lambda", "child", ":", "child", "is", "not", "None", ",", "children", ")", "setattr", "(", "obj", ",", "rel_name", ",", "children", ")", "return", "obj" ]
Build an instance of this model from the JSON-like structure passed in, recursing into related objects as required. If check_fks is true, it will check whether referenced foreign keys still exist in the database. - dangling foreign keys on related objects are dealt with by either nullifying the key or dropping the related object, according to the 'on_delete' setting. - dangling foreign keys on the base object will be nullified, unless strict_fks is true, in which case any dangling foreign keys with on_delete=CASCADE will cause None to be returned for the entire object.
[ "Build", "an", "instance", "of", "this", "model", "from", "the", "JSON", "-", "like", "structure", "passed", "in", "recursing", "into", "related", "objects", "as", "required", ".", "If", "check_fks", "is", "true", "it", "will", "check", "whether", "referenced", "foreign", "keys", "still", "exist", "in", "the", "database", ".", "-", "dangling", "foreign", "keys", "on", "related", "objects", "are", "dealt", "with", "by", "either", "nullifying", "the", "key", "or", "dropping", "the", "related", "object", "according", "to", "the", "on_delete", "setting", ".", "-", "dangling", "foreign", "keys", "on", "the", "base", "object", "will", "be", "nullified", "unless", "strict_fks", "is", "true", "in", "which", "case", "any", "dangling", "foreign", "keys", "with", "on_delete", "=", "CASCADE", "will", "cause", "None", "to", "be", "returned", "for", "the", "entire", "object", "." ]
python
test
chr-1x/ananas
ananas/default/roll.py
https://github.com/chr-1x/ananas/blob/e4625a3da193fa1c77119edb68d4ee18dcbc56ca/ananas/default/roll.py#L197-L206
def sum_dice(spec): """ Replace the dice roll arrays from roll_dice in place with summations of the rolls. """ if spec[0] == 'c': return spec[1] elif spec[0] == 'r': return sum(spec[1]) elif spec[0] == 'x': return [sum_dice(r) for r in spec[1]] elif spec[0] in ops: return (spec[0], sum_dice(spec[1]), sum_dice(spec[2])) else: raise ValueError("Invalid dice specification")
[ "def", "sum_dice", "(", "spec", ")", ":", "if", "spec", "[", "0", "]", "==", "'c'", ":", "return", "spec", "[", "1", "]", "elif", "spec", "[", "0", "]", "==", "'r'", ":", "return", "sum", "(", "spec", "[", "1", "]", ")", "elif", "spec", "[", "0", "]", "==", "'x'", ":", "return", "[", "sum_dice", "(", "r", ")", "for", "r", "in", "spec", "[", "1", "]", "]", "elif", "spec", "[", "0", "]", "in", "ops", ":", "return", "(", "spec", "[", "0", "]", ",", "sum_dice", "(", "spec", "[", "1", "]", ")", ",", "sum_dice", "(", "spec", "[", "2", "]", ")", ")", "else", ":", "raise", "ValueError", "(", "\"Invalid dice specification\"", ")" ]
Replace the dice roll arrays from roll_dice in place with summations of the rolls.
[ "Replace", "the", "dice", "roll", "arrays", "from", "roll_dice", "in", "place", "with", "summations", "of", "the", "rolls", "." ]
python
train
scdoshi/django-bits
bits/gis.py
https://github.com/scdoshi/django-bits/blob/0a2f4fd9374d2a8acb8df9a7b83eebcf2782256f/bits/gis.py#L15-L25
def gprmc_to_degdec(lat, latDirn, lng, lngDirn): """Converts GPRMC formats (Decimal Minutes) to Degrees Decimal.""" x = float(lat[0:2]) + float(lat[2:]) / 60 y = float(lng[0:3]) + float(lng[3:]) / 60 if latDirn == 'S': x = -x if lngDirn == 'W': y = -y return x, y
[ "def", "gprmc_to_degdec", "(", "lat", ",", "latDirn", ",", "lng", ",", "lngDirn", ")", ":", "x", "=", "float", "(", "lat", "[", "0", ":", "2", "]", ")", "+", "float", "(", "lat", "[", "2", ":", "]", ")", "/", "60", "y", "=", "float", "(", "lng", "[", "0", ":", "3", "]", ")", "+", "float", "(", "lng", "[", "3", ":", "]", ")", "/", "60", "if", "latDirn", "==", "'S'", ":", "x", "=", "-", "x", "if", "lngDirn", "==", "'W'", ":", "y", "=", "-", "y", "return", "x", ",", "y" ]
Converts GPRMC formats (Decimal Minutes) to Degrees Decimal.
[ "Converts", "GPRMC", "formats", "(", "Decimal", "Minutes", ")", "to", "Degrees", "Decimal", "." ]
python
train
Yubico/python-u2flib-host
u2flib_host/utils.py
https://github.com/Yubico/python-u2flib-host/blob/eadc4dbf3bf516e74ea00d2e5690742a535834cb/u2flib_host/utils.py#L40-L49
def u2str(data): """Recursively converts unicode objects to UTF-8 encoded byte strings.""" if isinstance(data, dict): return {u2str(k): u2str(v) for k, v in data.items()} elif isinstance(data, list): return [u2str(x) for x in data] elif isinstance(data, text_type): return data.encode('utf-8') else: return data
[ "def", "u2str", "(", "data", ")", ":", "if", "isinstance", "(", "data", ",", "dict", ")", ":", "return", "{", "u2str", "(", "k", ")", ":", "u2str", "(", "v", ")", "for", "k", ",", "v", "in", "data", ".", "items", "(", ")", "}", "elif", "isinstance", "(", "data", ",", "list", ")", ":", "return", "[", "u2str", "(", "x", ")", "for", "x", "in", "data", "]", "elif", "isinstance", "(", "data", ",", "text_type", ")", ":", "return", "data", ".", "encode", "(", "'utf-8'", ")", "else", ":", "return", "data" ]
Recursively converts unicode objects to UTF-8 encoded byte strings.
[ "Recursively", "converts", "unicode", "objects", "to", "UTF", "-", "8", "encoded", "byte", "strings", "." ]
python
test
pywbem/pywbem
pywbem/cim_obj.py
https://github.com/pywbem/pywbem/blob/e54ecb82c2211e289a268567443d60fdd489f1e4/pywbem/cim_obj.py#L3332-L3348
def copy(self): """ Return a new :class:`~pywbem.CIMClassName` object that is a copy of this CIM class path. Objects of this class have no mutable types in any attributes, so modifications of the original object will not affect the returned copy, and vice versa. Note that the Python functions :func:`py:copy.copy` and :func:`py:copy.deepcopy` can be used to create completely shallow or completely deep copies of objects of this class. """ return CIMClassName( self.classname, host=self.host, namespace=self.namespace)
[ "def", "copy", "(", "self", ")", ":", "return", "CIMClassName", "(", "self", ".", "classname", ",", "host", "=", "self", ".", "host", ",", "namespace", "=", "self", ".", "namespace", ")" ]
Return a new :class:`~pywbem.CIMClassName` object that is a copy of this CIM class path. Objects of this class have no mutable types in any attributes, so modifications of the original object will not affect the returned copy, and vice versa. Note that the Python functions :func:`py:copy.copy` and :func:`py:copy.deepcopy` can be used to create completely shallow or completely deep copies of objects of this class.
[ "Return", "a", "new", ":", "class", ":", "~pywbem", ".", "CIMClassName", "object", "that", "is", "a", "copy", "of", "this", "CIM", "class", "path", "." ]
python
train
biesnecker/cleveland
cleveland/actor.py
https://github.com/biesnecker/cleveland/blob/7c899fa7d3fc8cf3736e3c5b7d2ff5ae26b6c3a7/cleveland/actor.py#L38-L45
def _run(self): '''The actor's main work loop''' while self._is_running: yield from self._task() # Signal that the loop has finished. self._run_complete.set_result(True)
[ "def", "_run", "(", "self", ")", ":", "while", "self", ".", "_is_running", ":", "yield", "from", "self", ".", "_task", "(", ")", "# Signal that the loop has finished.", "self", ".", "_run_complete", ".", "set_result", "(", "True", ")" ]
The actor's main work loop
[ "The", "actor", "s", "main", "work", "loop" ]
python
train
odlgroup/odl
odl/space/weighting.py
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/space/weighting.py#L555-L560
def repr_part(self): """String usable in a space's ``__repr__`` method.""" optargs = [('weighting', array_str(self.array, nprint=10), ''), ('exponent', self.exponent, 2.0)] return signature_string([], optargs, sep=',\n', mod=[[], ['!s', ':.4']])
[ "def", "repr_part", "(", "self", ")", ":", "optargs", "=", "[", "(", "'weighting'", ",", "array_str", "(", "self", ".", "array", ",", "nprint", "=", "10", ")", ",", "''", ")", ",", "(", "'exponent'", ",", "self", ".", "exponent", ",", "2.0", ")", "]", "return", "signature_string", "(", "[", "]", ",", "optargs", ",", "sep", "=", "',\\n'", ",", "mod", "=", "[", "[", "]", ",", "[", "'!s'", ",", "':.4'", "]", "]", ")" ]
String usable in a space's ``__repr__`` method.
[ "String", "usable", "in", "a", "space", "s", "__repr__", "method", "." ]
python
train
cloudera/cm_api
python/src/cm_api/endpoints/parcels.py
https://github.com/cloudera/cm_api/blob/5d2512375bd94684b4da36df9e0d9177865ffcbb/python/src/cm_api/endpoints/parcels.py#L38-L47
def get_all_parcels(resource_root, cluster_name="default", view=None): """ Get all parcels @param resource_root: The root Resource object. @param cluster_name: Cluster name @return: A list of ApiParcel objects. @since: API v3 """ return call(resource_root.get, PARCELS_PATH % (cluster_name,), ApiParcel, True, params=view and dict(view=view) or None, api_version=3)
[ "def", "get_all_parcels", "(", "resource_root", ",", "cluster_name", "=", "\"default\"", ",", "view", "=", "None", ")", ":", "return", "call", "(", "resource_root", ".", "get", ",", "PARCELS_PATH", "%", "(", "cluster_name", ",", ")", ",", "ApiParcel", ",", "True", ",", "params", "=", "view", "and", "dict", "(", "view", "=", "view", ")", "or", "None", ",", "api_version", "=", "3", ")" ]
Get all parcels @param resource_root: The root Resource object. @param cluster_name: Cluster name @return: A list of ApiParcel objects. @since: API v3
[ "Get", "all", "parcels" ]
python
train
pyviz/holoviews
holoviews/plotting/plot.py
https://github.com/pyviz/holoviews/blob/ae0dd2f3de448b0ca5e9065aabd6ef8d84c7e655/holoviews/plotting/plot.py#L69-L83
def cleanup(self): """ Cleans up references to the plot on the attached Stream subscribers. """ plots = self.traverse(lambda x: x, [Plot]) for plot in plots: if not isinstance(plot, (GenericCompositePlot, GenericElementPlot, GenericOverlayPlot)): continue for stream in set(plot.streams): stream._subscribers = [ (p, subscriber) for p, subscriber in stream._subscribers if util.get_method_owner(subscriber) not in plots] if self.comm: self.comm.close()
[ "def", "cleanup", "(", "self", ")", ":", "plots", "=", "self", ".", "traverse", "(", "lambda", "x", ":", "x", ",", "[", "Plot", "]", ")", "for", "plot", "in", "plots", ":", "if", "not", "isinstance", "(", "plot", ",", "(", "GenericCompositePlot", ",", "GenericElementPlot", ",", "GenericOverlayPlot", ")", ")", ":", "continue", "for", "stream", "in", "set", "(", "plot", ".", "streams", ")", ":", "stream", ".", "_subscribers", "=", "[", "(", "p", ",", "subscriber", ")", "for", "p", ",", "subscriber", "in", "stream", ".", "_subscribers", "if", "util", ".", "get_method_owner", "(", "subscriber", ")", "not", "in", "plots", "]", "if", "self", ".", "comm", ":", "self", ".", "comm", ".", "close", "(", ")" ]
Cleans up references to the plot on the attached Stream subscribers.
[ "Cleans", "up", "references", "to", "the", "plot", "on", "the", "attached", "Stream", "subscribers", "." ]
python
train