Unnamed: 0
int64
0
10k
repository_name
stringlengths
7
54
func_path_in_repository
stringlengths
5
223
func_name
stringlengths
1
134
whole_func_string
stringlengths
100
30.3k
language
stringclasses
1 value
func_code_string
stringlengths
100
30.3k
func_code_tokens
stringlengths
138
33.2k
func_documentation_string
stringlengths
1
15k
func_documentation_tokens
stringlengths
5
5.14k
split_name
stringclasses
1 value
func_code_url
stringlengths
91
315
8,400
ericjang/tdb
tdb/debug_session.py
DebugSession.get_values
def get_values(self): """ returns final values (same result as tf.Session.run()) """ return [self._cache.get(i.name,None) for i in self._original_evals]
python
def get_values(self): """ returns final values (same result as tf.Session.run()) """ return [self._cache.get(i.name,None) for i in self._original_evals]
['def', 'get_values', '(', 'self', ')', ':', 'return', '[', 'self', '.', '_cache', '.', 'get', '(', 'i', '.', 'name', ',', 'None', ')', 'for', 'i', 'in', 'self', '.', '_original_evals', ']']
returns final values (same result as tf.Session.run())
['returns', 'final', 'values', '(', 'same', 'result', 'as', 'tf', '.', 'Session', '.', 'run', '()', ')']
train
https://github.com/ericjang/tdb/blob/5e78b5dbecf78b6d28eb2f5b67decf8d1f1eb17d/tdb/debug_session.py#L95-L99
8,401
kennethreitz/clint
clint/textui/formatters.py
_max_width_formatter
def _max_width_formatter(string, cols, separator='\n'): """Returns a freshly formatted :param string: string to be formatted :type string: basestring or clint.textui.colored.ColoredString :param cols: max width the text to be formatted :type cols: int :param separator: separator to break rows :type separator: basestring """ is_color = isinstance(string, ColoredString) if is_color: string_copy = string._new('') string = string.s stack = tsplit(string, NEWLINES) for i, substring in enumerate(stack): stack[i] = substring.split() _stack = [] for row in stack: _row = ['',] _row_i = 0 for word in row: if (len(_row[_row_i]) + len(word)) <= cols: _row[_row_i] += word _row[_row_i] += ' ' elif len(word) > cols: # ensure empty row if len(_row[_row_i]): _row[_row_i] = _row[_row_i].rstrip() _row.append('') _row_i += 1 chunks = schunk(word, cols) for i, chunk in enumerate(chunks): if not (i + 1) == len(chunks): _row[_row_i] += chunk _row[_row_i] = _row[_row_i].rstrip() _row.append('') _row_i += 1 else: _row[_row_i] += chunk _row[_row_i] += ' ' else: _row[_row_i] = _row[_row_i].rstrip() _row.append('') _row_i += 1 _row[_row_i] += word _row[_row_i] += ' ' else: _row[_row_i] = _row[_row_i].rstrip() _row = map(str, _row) _stack.append(separator.join(_row)) _s = '\n'.join(_stack) if is_color: _s = string_copy._new(_s) return _s
python
def _max_width_formatter(string, cols, separator='\n'): """Returns a freshly formatted :param string: string to be formatted :type string: basestring or clint.textui.colored.ColoredString :param cols: max width the text to be formatted :type cols: int :param separator: separator to break rows :type separator: basestring """ is_color = isinstance(string, ColoredString) if is_color: string_copy = string._new('') string = string.s stack = tsplit(string, NEWLINES) for i, substring in enumerate(stack): stack[i] = substring.split() _stack = [] for row in stack: _row = ['',] _row_i = 0 for word in row: if (len(_row[_row_i]) + len(word)) <= cols: _row[_row_i] += word _row[_row_i] += ' ' elif len(word) > cols: # ensure empty row if len(_row[_row_i]): _row[_row_i] = _row[_row_i].rstrip() _row.append('') _row_i += 1 chunks = schunk(word, cols) for i, chunk in enumerate(chunks): if not (i + 1) == len(chunks): _row[_row_i] += chunk _row[_row_i] = _row[_row_i].rstrip() _row.append('') _row_i += 1 else: _row[_row_i] += chunk _row[_row_i] += ' ' else: _row[_row_i] = _row[_row_i].rstrip() _row.append('') _row_i += 1 _row[_row_i] += word _row[_row_i] += ' ' else: _row[_row_i] = _row[_row_i].rstrip() _row = map(str, _row) _stack.append(separator.join(_row)) _s = '\n'.join(_stack) if is_color: _s = string_copy._new(_s) return _s
['def', '_max_width_formatter', '(', 'string', ',', 'cols', ',', 'separator', '=', "'\\n'", ')', ':', 'is_color', '=', 'isinstance', '(', 'string', ',', 'ColoredString', ')', 'if', 'is_color', ':', 'string_copy', '=', 'string', '.', '_new', '(', "''", ')', 'string', '=', 'string', '.', 's', 'stack', '=', 'tsplit', '(', 'string', ',', 'NEWLINES', ')', 'for', 'i', ',', 'substring', 'in', 'enumerate', '(', 'stack', ')', ':', 'stack', '[', 'i', ']', '=', 'substring', '.', 'split', '(', ')', '_stack', '=', '[', ']', 'for', 'row', 'in', 'stack', ':', '_row', '=', '[', "''", ',', ']', '_row_i', '=', '0', 'for', 'word', 'in', 'row', ':', 'if', '(', 'len', '(', '_row', '[', '_row_i', ']', ')', '+', 'len', '(', 'word', ')', ')', '<=', 'cols', ':', '_row', '[', '_row_i', ']', '+=', 'word', '_row', '[', '_row_i', ']', '+=', "' '", 'elif', 'len', '(', 'word', ')', '>', 'cols', ':', '# ensure empty row', 'if', 'len', '(', '_row', '[', '_row_i', ']', ')', ':', '_row', '[', '_row_i', ']', '=', '_row', '[', '_row_i', ']', '.', 'rstrip', '(', ')', '_row', '.', 'append', '(', "''", ')', '_row_i', '+=', '1', 'chunks', '=', 'schunk', '(', 'word', ',', 'cols', ')', 'for', 'i', ',', 'chunk', 'in', 'enumerate', '(', 'chunks', ')', ':', 'if', 'not', '(', 'i', '+', '1', ')', '==', 'len', '(', 'chunks', ')', ':', '_row', '[', '_row_i', ']', '+=', 'chunk', '_row', '[', '_row_i', ']', '=', '_row', '[', '_row_i', ']', '.', 'rstrip', '(', ')', '_row', '.', 'append', '(', "''", ')', '_row_i', '+=', '1', 'else', ':', '_row', '[', '_row_i', ']', '+=', 'chunk', '_row', '[', '_row_i', ']', '+=', "' '", 'else', ':', '_row', '[', '_row_i', ']', '=', '_row', '[', '_row_i', ']', '.', 'rstrip', '(', ')', '_row', '.', 'append', '(', "''", ')', '_row_i', '+=', '1', '_row', '[', '_row_i', ']', '+=', 'word', '_row', '[', '_row_i', ']', '+=', "' '", 'else', ':', '_row', '[', '_row_i', ']', '=', '_row', '[', '_row_i', ']', '.', 'rstrip', '(', ')', '_row', '=', 'map', '(', 'str', ',', '_row', ')', '_stack', '.', 'append', '(', 'separator', '.', 'join', '(', '_row', ')', ')', '_s', '=', "'\\n'", '.', 'join', '(', '_stack', ')', 'if', 'is_color', ':', '_s', '=', 'string_copy', '.', '_new', '(', '_s', ')', 'return', '_s']
Returns a freshly formatted :param string: string to be formatted :type string: basestring or clint.textui.colored.ColoredString :param cols: max width the text to be formatted :type cols: int :param separator: separator to break rows :type separator: basestring
['Returns', 'a', 'freshly', 'formatted', ':', 'param', 'string', ':', 'string', 'to', 'be', 'formatted', ':', 'type', 'string', ':', 'basestring', 'or', 'clint', '.', 'textui', '.', 'colored', '.', 'ColoredString', ':', 'param', 'cols', ':', 'max', 'width', 'the', 'text', 'to', 'be', 'formatted', ':', 'type', 'cols', ':', 'int', ':', 'param', 'separator', ':', 'separator', 'to', 'break', 'rows', ':', 'type', 'separator', ':', 'basestring']
train
https://github.com/kennethreitz/clint/blob/9d3693d644b8587d985972b6075d970096f6439e/clint/textui/formatters.py#L88-L153
8,402
ruipgil/TrackToTrip
tracktotrip/track.py
Track.simplify
def simplify(self, eps, max_dist_error, max_speed_error, topology_only=False): """ In-place simplification of segments Args: max_dist_error (float): Min distance error, in meters max_speed_error (float): Min speed error, in km/h topology_only: Boolean, optional. True to keep the topology, neglecting velocity and time accuracy (use common Douglas-Ramen-Peucker). False (default) to simplify segments keeping the velocity between points. Returns: This track """ for segment in self.segments: segment.simplify(eps, max_dist_error, max_speed_error, topology_only) return self
python
def simplify(self, eps, max_dist_error, max_speed_error, topology_only=False): """ In-place simplification of segments Args: max_dist_error (float): Min distance error, in meters max_speed_error (float): Min speed error, in km/h topology_only: Boolean, optional. True to keep the topology, neglecting velocity and time accuracy (use common Douglas-Ramen-Peucker). False (default) to simplify segments keeping the velocity between points. Returns: This track """ for segment in self.segments: segment.simplify(eps, max_dist_error, max_speed_error, topology_only) return self
['def', 'simplify', '(', 'self', ',', 'eps', ',', 'max_dist_error', ',', 'max_speed_error', ',', 'topology_only', '=', 'False', ')', ':', 'for', 'segment', 'in', 'self', '.', 'segments', ':', 'segment', '.', 'simplify', '(', 'eps', ',', 'max_dist_error', ',', 'max_speed_error', ',', 'topology_only', ')', 'return', 'self']
In-place simplification of segments Args: max_dist_error (float): Min distance error, in meters max_speed_error (float): Min speed error, in km/h topology_only: Boolean, optional. True to keep the topology, neglecting velocity and time accuracy (use common Douglas-Ramen-Peucker). False (default) to simplify segments keeping the velocity between points. Returns: This track
['In', '-', 'place', 'simplification', 'of', 'segments']
train
https://github.com/ruipgil/TrackToTrip/blob/5537c14ee9748091b5255b658ab528e1d6227f99/tracktotrip/track.py#L99-L115
8,403
disqus/nydus
nydus/db/routers/keyvalue.py
ConsistentHashingRouter._route
def _route(self, attr, args, kwargs, **fkwargs): """ The first argument is assumed to be the ``key`` for routing. """ key = get_key(args, kwargs) found = self._hash.get_node(key) if not found and len(self._down_connections) > 0: raise self.HostListExhausted() return [i for i, h in self.cluster.hosts.iteritems() if h.identifier == found]
python
def _route(self, attr, args, kwargs, **fkwargs): """ The first argument is assumed to be the ``key`` for routing. """ key = get_key(args, kwargs) found = self._hash.get_node(key) if not found and len(self._down_connections) > 0: raise self.HostListExhausted() return [i for i, h in self.cluster.hosts.iteritems() if h.identifier == found]
['def', '_route', '(', 'self', ',', 'attr', ',', 'args', ',', 'kwargs', ',', '*', '*', 'fkwargs', ')', ':', 'key', '=', 'get_key', '(', 'args', ',', 'kwargs', ')', 'found', '=', 'self', '.', '_hash', '.', 'get_node', '(', 'key', ')', 'if', 'not', 'found', 'and', 'len', '(', 'self', '.', '_down_connections', ')', '>', '0', ':', 'raise', 'self', '.', 'HostListExhausted', '(', ')', 'return', '[', 'i', 'for', 'i', ',', 'h', 'in', 'self', '.', 'cluster', '.', 'hosts', '.', 'iteritems', '(', ')', 'if', 'h', '.', 'identifier', '==', 'found', ']']
The first argument is assumed to be the ``key`` for routing.
['The', 'first', 'argument', 'is', 'assumed', 'to', 'be', 'the', 'key', 'for', 'routing', '.']
train
https://github.com/disqus/nydus/blob/9b505840da47a34f758a830c3992fa5dcb7bb7ad/nydus/db/routers/keyvalue.py#L66-L79
8,404
google/grr
grr/server/grr_response_server/databases/mysql_clients.py
MySQLDBClientMixin.WriteClientSnapshotHistory
def WriteClientSnapshotHistory(self, clients, cursor=None): """Writes the full history for a particular client.""" client_id = clients[0].client_id latest_timestamp = max(client.timestamp for client in clients) query = "" params = { "client_id": db_utils.ClientIDToInt(client_id), "latest_timestamp": mysql_utils.RDFDatetimeToTimestamp(latest_timestamp) } for idx, client in enumerate(clients): startup_info = client.startup_info client.startup_info = None query += """ INSERT INTO client_snapshot_history (client_id, timestamp, client_snapshot) VALUES (%(client_id)s, FROM_UNIXTIME(%(timestamp_{idx})s), %(client_snapshot_{idx})s); INSERT INTO client_startup_history (client_id, timestamp, startup_info) VALUES (%(client_id)s, FROM_UNIXTIME(%(timestamp_{idx})s), %(startup_info_{idx})s); """.format(idx=idx) params.update({ "timestamp_{idx}".format(idx=idx): mysql_utils.RDFDatetimeToTimestamp(client.timestamp), "client_snapshot_{idx}".format(idx=idx): client.SerializeToString(), "startup_info_{idx}".format(idx=idx): startup_info.SerializeToString(), }) client.startup_info = startup_info query += """ UPDATE clients SET last_snapshot_timestamp = FROM_UNIXTIME(%(latest_timestamp)s) WHERE client_id = %(client_id)s AND (last_snapshot_timestamp IS NULL OR last_snapshot_timestamp < FROM_UNIXTIME(%(latest_timestamp)s)); UPDATE clients SET last_startup_timestamp = FROM_UNIXTIME(%(latest_timestamp)s) WHERE client_id = %(client_id)s AND (last_startup_timestamp IS NULL OR last_startup_timestamp < FROM_UNIXTIME(%(latest_timestamp)s)); """ try: cursor.execute(query, params) except MySQLdb.IntegrityError as error: raise db.UnknownClientError(client_id, cause=error)
python
def WriteClientSnapshotHistory(self, clients, cursor=None): """Writes the full history for a particular client.""" client_id = clients[0].client_id latest_timestamp = max(client.timestamp for client in clients) query = "" params = { "client_id": db_utils.ClientIDToInt(client_id), "latest_timestamp": mysql_utils.RDFDatetimeToTimestamp(latest_timestamp) } for idx, client in enumerate(clients): startup_info = client.startup_info client.startup_info = None query += """ INSERT INTO client_snapshot_history (client_id, timestamp, client_snapshot) VALUES (%(client_id)s, FROM_UNIXTIME(%(timestamp_{idx})s), %(client_snapshot_{idx})s); INSERT INTO client_startup_history (client_id, timestamp, startup_info) VALUES (%(client_id)s, FROM_UNIXTIME(%(timestamp_{idx})s), %(startup_info_{idx})s); """.format(idx=idx) params.update({ "timestamp_{idx}".format(idx=idx): mysql_utils.RDFDatetimeToTimestamp(client.timestamp), "client_snapshot_{idx}".format(idx=idx): client.SerializeToString(), "startup_info_{idx}".format(idx=idx): startup_info.SerializeToString(), }) client.startup_info = startup_info query += """ UPDATE clients SET last_snapshot_timestamp = FROM_UNIXTIME(%(latest_timestamp)s) WHERE client_id = %(client_id)s AND (last_snapshot_timestamp IS NULL OR last_snapshot_timestamp < FROM_UNIXTIME(%(latest_timestamp)s)); UPDATE clients SET last_startup_timestamp = FROM_UNIXTIME(%(latest_timestamp)s) WHERE client_id = %(client_id)s AND (last_startup_timestamp IS NULL OR last_startup_timestamp < FROM_UNIXTIME(%(latest_timestamp)s)); """ try: cursor.execute(query, params) except MySQLdb.IntegrityError as error: raise db.UnknownClientError(client_id, cause=error)
['def', 'WriteClientSnapshotHistory', '(', 'self', ',', 'clients', ',', 'cursor', '=', 'None', ')', ':', 'client_id', '=', 'clients', '[', '0', ']', '.', 'client_id', 'latest_timestamp', '=', 'max', '(', 'client', '.', 'timestamp', 'for', 'client', 'in', 'clients', ')', 'query', '=', '""', 'params', '=', '{', '"client_id"', ':', 'db_utils', '.', 'ClientIDToInt', '(', 'client_id', ')', ',', '"latest_timestamp"', ':', 'mysql_utils', '.', 'RDFDatetimeToTimestamp', '(', 'latest_timestamp', ')', '}', 'for', 'idx', ',', 'client', 'in', 'enumerate', '(', 'clients', ')', ':', 'startup_info', '=', 'client', '.', 'startup_info', 'client', '.', 'startup_info', '=', 'None', 'query', '+=', '"""\n INSERT INTO client_snapshot_history (client_id, timestamp,\n client_snapshot)\n VALUES (%(client_id)s, FROM_UNIXTIME(%(timestamp_{idx})s),\n %(client_snapshot_{idx})s);\n\n INSERT INTO client_startup_history (client_id, timestamp,\n startup_info)\n VALUES (%(client_id)s, FROM_UNIXTIME(%(timestamp_{idx})s),\n %(startup_info_{idx})s);\n """', '.', 'format', '(', 'idx', '=', 'idx', ')', 'params', '.', 'update', '(', '{', '"timestamp_{idx}"', '.', 'format', '(', 'idx', '=', 'idx', ')', ':', 'mysql_utils', '.', 'RDFDatetimeToTimestamp', '(', 'client', '.', 'timestamp', ')', ',', '"client_snapshot_{idx}"', '.', 'format', '(', 'idx', '=', 'idx', ')', ':', 'client', '.', 'SerializeToString', '(', ')', ',', '"startup_info_{idx}"', '.', 'format', '(', 'idx', '=', 'idx', ')', ':', 'startup_info', '.', 'SerializeToString', '(', ')', ',', '}', ')', 'client', '.', 'startup_info', '=', 'startup_info', 'query', '+=', '"""\n UPDATE clients\n SET last_snapshot_timestamp = FROM_UNIXTIME(%(latest_timestamp)s)\n WHERE client_id = %(client_id)s\n AND (last_snapshot_timestamp IS NULL OR\n last_snapshot_timestamp < FROM_UNIXTIME(%(latest_timestamp)s));\n\n UPDATE clients\n SET last_startup_timestamp = FROM_UNIXTIME(%(latest_timestamp)s)\n WHERE client_id = %(client_id)s\n AND (last_startup_timestamp IS NULL OR\n last_startup_timestamp < FROM_UNIXTIME(%(latest_timestamp)s));\n """', 'try', ':', 'cursor', '.', 'execute', '(', 'query', ',', 'params', ')', 'except', 'MySQLdb', '.', 'IntegrityError', 'as', 'error', ':', 'raise', 'db', '.', 'UnknownClientError', '(', 'client_id', ',', 'cause', '=', 'error', ')']
Writes the full history for a particular client.
['Writes', 'the', 'full', 'history', 'for', 'a', 'particular', 'client', '.']
train
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/server/grr_response_server/databases/mysql_clients.py#L244-L299
8,405
ottogroup/palladium
palladium/server.py
PredictService.sample_from_data
def sample_from_data(self, model, data): """Convert incoming sample *data* into a numpy array. :param model: The :class:`~Model` instance to use for making predictions. :param data: A dict-like with the sample's data, typically retrieved from ``request.args`` or similar. """ values = [] for key, type_name in self.mapping: value_type = self.types[type_name] values.append(value_type(data[key])) if self.unwrap_sample: assert len(values) == 1 return np.array(values[0]) else: return np.array(values, dtype=object)
python
def sample_from_data(self, model, data): """Convert incoming sample *data* into a numpy array. :param model: The :class:`~Model` instance to use for making predictions. :param data: A dict-like with the sample's data, typically retrieved from ``request.args`` or similar. """ values = [] for key, type_name in self.mapping: value_type = self.types[type_name] values.append(value_type(data[key])) if self.unwrap_sample: assert len(values) == 1 return np.array(values[0]) else: return np.array(values, dtype=object)
['def', 'sample_from_data', '(', 'self', ',', 'model', ',', 'data', ')', ':', 'values', '=', '[', ']', 'for', 'key', ',', 'type_name', 'in', 'self', '.', 'mapping', ':', 'value_type', '=', 'self', '.', 'types', '[', 'type_name', ']', 'values', '.', 'append', '(', 'value_type', '(', 'data', '[', 'key', ']', ')', ')', 'if', 'self', '.', 'unwrap_sample', ':', 'assert', 'len', '(', 'values', ')', '==', '1', 'return', 'np', '.', 'array', '(', 'values', '[', '0', ']', ')', 'else', ':', 'return', 'np', '.', 'array', '(', 'values', ',', 'dtype', '=', 'object', ')']
Convert incoming sample *data* into a numpy array. :param model: The :class:`~Model` instance to use for making predictions. :param data: A dict-like with the sample's data, typically retrieved from ``request.args`` or similar.
['Convert', 'incoming', 'sample', '*', 'data', '*', 'into', 'a', 'numpy', 'array', '.']
train
https://github.com/ottogroup/palladium/blob/f3a4372fba809efbd8da7c979a8c6faff04684dd/palladium/server.py#L135-L152
8,406
glitchassassin/lackey
lackey/RegionMatching.py
Region.findBest
def findBest(self, pattern): """ Returns the *best* match in the region (instead of the first match) """ findFailedRetry = True while findFailedRetry: best_match = None all_matches = self.findAll(pattern) for match in all_matches: if best_match is None or best_match.getScore() < match.getScore(): best_match = match self._lastMatch = best_match if best_match is not None: break path = pattern.path if isinstance(pattern, Pattern) else pattern findFailedRetry = self._raiseFindFailed("Could not find pattern '{}'".format(path)) if findFailedRetry: time.sleep(self._repeatWaitTime) return best_match
python
def findBest(self, pattern): """ Returns the *best* match in the region (instead of the first match) """ findFailedRetry = True while findFailedRetry: best_match = None all_matches = self.findAll(pattern) for match in all_matches: if best_match is None or best_match.getScore() < match.getScore(): best_match = match self._lastMatch = best_match if best_match is not None: break path = pattern.path if isinstance(pattern, Pattern) else pattern findFailedRetry = self._raiseFindFailed("Could not find pattern '{}'".format(path)) if findFailedRetry: time.sleep(self._repeatWaitTime) return best_match
['def', 'findBest', '(', 'self', ',', 'pattern', ')', ':', 'findFailedRetry', '=', 'True', 'while', 'findFailedRetry', ':', 'best_match', '=', 'None', 'all_matches', '=', 'self', '.', 'findAll', '(', 'pattern', ')', 'for', 'match', 'in', 'all_matches', ':', 'if', 'best_match', 'is', 'None', 'or', 'best_match', '.', 'getScore', '(', ')', '<', 'match', '.', 'getScore', '(', ')', ':', 'best_match', '=', 'match', 'self', '.', '_lastMatch', '=', 'best_match', 'if', 'best_match', 'is', 'not', 'None', ':', 'break', 'path', '=', 'pattern', '.', 'path', 'if', 'isinstance', '(', 'pattern', ',', 'Pattern', ')', 'else', 'pattern', 'findFailedRetry', '=', 'self', '.', '_raiseFindFailed', '(', '"Could not find pattern \'{}\'"', '.', 'format', '(', 'path', ')', ')', 'if', 'findFailedRetry', ':', 'time', '.', 'sleep', '(', 'self', '.', '_repeatWaitTime', ')', 'return', 'best_match']
Returns the *best* match in the region (instead of the first match)
['Returns', 'the', '*', 'best', '*', 'match', 'in', 'the', 'region', '(', 'instead', 'of', 'the', 'first', 'match', ')']
train
https://github.com/glitchassassin/lackey/blob/7adadfacd7f45d81186710be992f5668b15399fe/lackey/RegionMatching.py#L1352-L1368
8,407
LuminosoInsight/ordered-set
ordered_set.py
is_iterable
def is_iterable(obj): """ Are we being asked to look up a list of things, instead of a single thing? We check for the `__iter__` attribute so that this can cover types that don't have to be known by this module, such as NumPy arrays. Strings, however, should be considered as atomic values to look up, not iterables. The same goes for tuples, since they are immutable and therefore valid entries. We don't need to check for the Python 2 `unicode` type, because it doesn't have an `__iter__` attribute anyway. """ return ( hasattr(obj, "__iter__") and not isinstance(obj, str) and not isinstance(obj, tuple) )
python
def is_iterable(obj): """ Are we being asked to look up a list of things, instead of a single thing? We check for the `__iter__` attribute so that this can cover types that don't have to be known by this module, such as NumPy arrays. Strings, however, should be considered as atomic values to look up, not iterables. The same goes for tuples, since they are immutable and therefore valid entries. We don't need to check for the Python 2 `unicode` type, because it doesn't have an `__iter__` attribute anyway. """ return ( hasattr(obj, "__iter__") and not isinstance(obj, str) and not isinstance(obj, tuple) )
['def', 'is_iterable', '(', 'obj', ')', ':', 'return', '(', 'hasattr', '(', 'obj', ',', '"__iter__"', ')', 'and', 'not', 'isinstance', '(', 'obj', ',', 'str', ')', 'and', 'not', 'isinstance', '(', 'obj', ',', 'tuple', ')', ')']
Are we being asked to look up a list of things, instead of a single thing? We check for the `__iter__` attribute so that this can cover types that don't have to be known by this module, such as NumPy arrays. Strings, however, should be considered as atomic values to look up, not iterables. The same goes for tuples, since they are immutable and therefore valid entries. We don't need to check for the Python 2 `unicode` type, because it doesn't have an `__iter__` attribute anyway.
['Are', 'we', 'being', 'asked', 'to', 'look', 'up', 'a', 'list', 'of', 'things', 'instead', 'of', 'a', 'single', 'thing?', 'We', 'check', 'for', 'the', '__iter__', 'attribute', 'so', 'that', 'this', 'can', 'cover', 'types', 'that', 'don', 't', 'have', 'to', 'be', 'known', 'by', 'this', 'module', 'such', 'as', 'NumPy', 'arrays', '.']
train
https://github.com/LuminosoInsight/ordered-set/blob/a29eaedcedfe5072bcee11bdef61dea321d5e9f9/ordered_set.py#L22-L39
8,408
zhanglab/psamm
psamm/gapfilling.py
add_all_database_reactions
def add_all_database_reactions(model, compartments): """Add all reactions from database that occur in given compartments. Args: model: :class:`psamm.metabolicmodel.MetabolicModel`. """ added = set() for rxnid in model.database.reactions: reaction = model.database.get_reaction(rxnid) if all(compound.compartment in compartments for compound, _ in reaction.compounds): if not model.has_reaction(rxnid): added.add(rxnid) model.add_reaction(rxnid) return added
python
def add_all_database_reactions(model, compartments): """Add all reactions from database that occur in given compartments. Args: model: :class:`psamm.metabolicmodel.MetabolicModel`. """ added = set() for rxnid in model.database.reactions: reaction = model.database.get_reaction(rxnid) if all(compound.compartment in compartments for compound, _ in reaction.compounds): if not model.has_reaction(rxnid): added.add(rxnid) model.add_reaction(rxnid) return added
['def', 'add_all_database_reactions', '(', 'model', ',', 'compartments', ')', ':', 'added', '=', 'set', '(', ')', 'for', 'rxnid', 'in', 'model', '.', 'database', '.', 'reactions', ':', 'reaction', '=', 'model', '.', 'database', '.', 'get_reaction', '(', 'rxnid', ')', 'if', 'all', '(', 'compound', '.', 'compartment', 'in', 'compartments', 'for', 'compound', ',', '_', 'in', 'reaction', '.', 'compounds', ')', ':', 'if', 'not', 'model', '.', 'has_reaction', '(', 'rxnid', ')', ':', 'added', '.', 'add', '(', 'rxnid', ')', 'model', '.', 'add_reaction', '(', 'rxnid', ')', 'return', 'added']
Add all reactions from database that occur in given compartments. Args: model: :class:`psamm.metabolicmodel.MetabolicModel`.
['Add', 'all', 'reactions', 'from', 'database', 'that', 'occur', 'in', 'given', 'compartments', '.']
train
https://github.com/zhanglab/psamm/blob/dc427848c4f9d109ca590f0afa024c63b685b3f4/psamm/gapfilling.py#L38-L54
8,409
jaysonsantos/python-binary-memcached
bmemcached/protocol.py
Protocol.authenticate
def authenticate(self, username, password): """ Authenticate user on server. :param username: Username used to be authenticated. :type username: six.string_types :param password: Password used to be authenticated. :type password: six.string_types :return: True if successful. :raises: InvalidCredentials, AuthenticationNotSupported, MemcachedException :rtype: bool """ self._username = username self._password = password # Reopen the connection with the new credentials. self.disconnect() self._open_connection() return self.authenticated
python
def authenticate(self, username, password): """ Authenticate user on server. :param username: Username used to be authenticated. :type username: six.string_types :param password: Password used to be authenticated. :type password: six.string_types :return: True if successful. :raises: InvalidCredentials, AuthenticationNotSupported, MemcachedException :rtype: bool """ self._username = username self._password = password # Reopen the connection with the new credentials. self.disconnect() self._open_connection() return self.authenticated
['def', 'authenticate', '(', 'self', ',', 'username', ',', 'password', ')', ':', 'self', '.', '_username', '=', 'username', 'self', '.', '_password', '=', 'password', '# Reopen the connection with the new credentials.', 'self', '.', 'disconnect', '(', ')', 'self', '.', '_open_connection', '(', ')', 'return', 'self', '.', 'authenticated']
Authenticate user on server. :param username: Username used to be authenticated. :type username: six.string_types :param password: Password used to be authenticated. :type password: six.string_types :return: True if successful. :raises: InvalidCredentials, AuthenticationNotSupported, MemcachedException :rtype: bool
['Authenticate', 'user', 'on', 'server', '.']
train
https://github.com/jaysonsantos/python-binary-memcached/blob/6a792829349c69204d9c5045e5c34b4231216dd6/bmemcached/protocol.py#L245-L263
8,410
spyder-ide/spyder
spyder/plugins/console/widgets/console.py
insert_text_to
def insert_text_to(cursor, text, fmt): """Helper to print text, taking into account backspaces""" while True: index = text.find(chr(8)) # backspace if index == -1: break cursor.insertText(text[:index], fmt) if cursor.positionInBlock() > 0: cursor.deletePreviousChar() text = text[index+1:] cursor.insertText(text, fmt)
python
def insert_text_to(cursor, text, fmt): """Helper to print text, taking into account backspaces""" while True: index = text.find(chr(8)) # backspace if index == -1: break cursor.insertText(text[:index], fmt) if cursor.positionInBlock() > 0: cursor.deletePreviousChar() text = text[index+1:] cursor.insertText(text, fmt)
['def', 'insert_text_to', '(', 'cursor', ',', 'text', ',', 'fmt', ')', ':', 'while', 'True', ':', 'index', '=', 'text', '.', 'find', '(', 'chr', '(', '8', ')', ')', '# backspace', 'if', 'index', '==', '-', '1', ':', 'break', 'cursor', '.', 'insertText', '(', 'text', '[', ':', 'index', ']', ',', 'fmt', ')', 'if', 'cursor', '.', 'positionInBlock', '(', ')', '>', '0', ':', 'cursor', '.', 'deletePreviousChar', '(', ')', 'text', '=', 'text', '[', 'index', '+', '1', ':', ']', 'cursor', '.', 'insertText', '(', 'text', ',', 'fmt', ')']
Helper to print text, taking into account backspaces
['Helper', 'to', 'print', 'text', 'taking', 'into', 'account', 'backspaces']
train
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/plugins/console/widgets/console.py#L35-L45
8,411
klen/muffin-admin
muffin_admin/peewee.py
pw_converter
def pw_converter(handler, flt): """Convert column name to filter.""" import peewee as pw if isinstance(flt, Filter): return flt model = handler.model field = getattr(model, flt) if isinstance(field, pw.BooleanField): return PWBoolFilter(flt) if field.choices: choices = [(Filter.default, '---')] + list(field.choices) return PWChoiceFilter(flt, choices=choices) return PWFilter(flt)
python
def pw_converter(handler, flt): """Convert column name to filter.""" import peewee as pw if isinstance(flt, Filter): return flt model = handler.model field = getattr(model, flt) if isinstance(field, pw.BooleanField): return PWBoolFilter(flt) if field.choices: choices = [(Filter.default, '---')] + list(field.choices) return PWChoiceFilter(flt, choices=choices) return PWFilter(flt)
['def', 'pw_converter', '(', 'handler', ',', 'flt', ')', ':', 'import', 'peewee', 'as', 'pw', 'if', 'isinstance', '(', 'flt', ',', 'Filter', ')', ':', 'return', 'flt', 'model', '=', 'handler', '.', 'model', 'field', '=', 'getattr', '(', 'model', ',', 'flt', ')', 'if', 'isinstance', '(', 'field', ',', 'pw', '.', 'BooleanField', ')', ':', 'return', 'PWBoolFilter', '(', 'flt', ')', 'if', 'field', '.', 'choices', ':', 'choices', '=', '[', '(', 'Filter', '.', 'default', ',', "'---'", ')', ']', '+', 'list', '(', 'field', '.', 'choices', ')', 'return', 'PWChoiceFilter', '(', 'flt', ',', 'choices', '=', 'choices', ')', 'return', 'PWFilter', '(', 'flt', ')']
Convert column name to filter.
['Convert', 'column', 'name', 'to', 'filter', '.']
train
https://github.com/klen/muffin-admin/blob/404dc8e5107e943b7c42fa21c679c34ddb4de1d5/muffin_admin/peewee.py#L24-L41
8,412
joerick/pyinstrument
pyinstrument/profiler.py
Profiler.first_interesting_frame
def first_interesting_frame(self): """ Traverse down the frame hierarchy until a frame is found with more than one child """ root_frame = self.root_frame() frame = root_frame while len(frame.children) <= 1: if frame.children: frame = frame.children[0] else: # there are no branches return root_frame return frame
python
def first_interesting_frame(self): """ Traverse down the frame hierarchy until a frame is found with more than one child """ root_frame = self.root_frame() frame = root_frame while len(frame.children) <= 1: if frame.children: frame = frame.children[0] else: # there are no branches return root_frame return frame
['def', 'first_interesting_frame', '(', 'self', ')', ':', 'root_frame', '=', 'self', '.', 'root_frame', '(', ')', 'frame', '=', 'root_frame', 'while', 'len', '(', 'frame', '.', 'children', ')', '<=', '1', ':', 'if', 'frame', '.', 'children', ':', 'frame', '=', 'frame', '.', 'children', '[', '0', ']', 'else', ':', '# there are no branches', 'return', 'root_frame', 'return', 'frame']
Traverse down the frame hierarchy until a frame is found with more than one child
['Traverse', 'down', 'the', 'frame', 'hierarchy', 'until', 'a', 'frame', 'is', 'found', 'with', 'more', 'than', 'one', 'child']
train
https://github.com/joerick/pyinstrument/blob/cc4f3f6fc1b493d7cd058ecf41ad012e0030a512/pyinstrument/profiler.py#L119-L133
8,413
keleshev/mini
mini.py
Mini.infix
def infix(self, node, children): 'infix = "(" expr operator expr ")"' _, expr1, operator, expr2, _ = children return operator(expr1, expr2)
python
def infix(self, node, children): 'infix = "(" expr operator expr ")"' _, expr1, operator, expr2, _ = children return operator(expr1, expr2)
['def', 'infix', '(', 'self', ',', 'node', ',', 'children', ')', ':', '_', ',', 'expr1', ',', 'operator', ',', 'expr2', ',', '_', '=', 'children', 'return', 'operator', '(', 'expr1', ',', 'expr2', ')']
infix = "(" expr operator expr ")"
['infix', '=', '(', 'expr', 'operator', 'expr', ')']
train
https://github.com/keleshev/mini/blob/da7893a1ee72aca315d6921f25604316462ec019/mini.py#L59-L62
8,414
archman/beamline
beamline/element.py
MagBlock.copy_patches
def copy_patches(ptches0): """ return a list of copied input matplotlib patches :param ptches0: list of matploblib.patches objects :return: copyed patches object """ if not isinstance(ptches0, list): ptches0 = list(ptches0) copyed_ptches = [] for pt in ptches0: pth = pt.get_path().deepcopy() ptch = patches.PathPatch(pth, lw=pt.get_lw(), fc=pt.get_fc(), ec=pt.get_ec(), alpha=pt.get_alpha()) copyed_ptches.append(ptch) return copyed_ptches
python
def copy_patches(ptches0): """ return a list of copied input matplotlib patches :param ptches0: list of matploblib.patches objects :return: copyed patches object """ if not isinstance(ptches0, list): ptches0 = list(ptches0) copyed_ptches = [] for pt in ptches0: pth = pt.get_path().deepcopy() ptch = patches.PathPatch(pth, lw=pt.get_lw(), fc=pt.get_fc(), ec=pt.get_ec(), alpha=pt.get_alpha()) copyed_ptches.append(ptch) return copyed_ptches
['def', 'copy_patches', '(', 'ptches0', ')', ':', 'if', 'not', 'isinstance', '(', 'ptches0', ',', 'list', ')', ':', 'ptches0', '=', 'list', '(', 'ptches0', ')', 'copyed_ptches', '=', '[', ']', 'for', 'pt', 'in', 'ptches0', ':', 'pth', '=', 'pt', '.', 'get_path', '(', ')', '.', 'deepcopy', '(', ')', 'ptch', '=', 'patches', '.', 'PathPatch', '(', 'pth', ',', 'lw', '=', 'pt', '.', 'get_lw', '(', ')', ',', 'fc', '=', 'pt', '.', 'get_fc', '(', ')', ',', 'ec', '=', 'pt', '.', 'get_ec', '(', ')', ',', 'alpha', '=', 'pt', '.', 'get_alpha', '(', ')', ')', 'copyed_ptches', '.', 'append', '(', 'ptch', ')', 'return', 'copyed_ptches']
return a list of copied input matplotlib patches :param ptches0: list of matploblib.patches objects :return: copyed patches object
['return', 'a', 'list', 'of', 'copied', 'input', 'matplotlib', 'patches', ':', 'param', 'ptches0', ':', 'list', 'of', 'matploblib', '.', 'patches', 'objects', ':', 'return', ':', 'copyed', 'patches', 'object']
train
https://github.com/archman/beamline/blob/417bc5dc13e754bc89d246427984590fced64d07/beamline/element.py#L132-L149
8,415
seung-lab/cloud-volume
cloudvolume/storage.py
SimpleStorage.put_file
def put_file(self, file_path, content, content_type=None, compress=None, cache_control=None): """ Args: filename (string): it can contains folders content (string): binary data to save """ return self.put_files([ (file_path, content) ], content_type=content_type, compress=compress, cache_control=cache_control, block=False )
python
def put_file(self, file_path, content, content_type=None, compress=None, cache_control=None): """ Args: filename (string): it can contains folders content (string): binary data to save """ return self.put_files([ (file_path, content) ], content_type=content_type, compress=compress, cache_control=cache_control, block=False )
['def', 'put_file', '(', 'self', ',', 'file_path', ',', 'content', ',', 'content_type', '=', 'None', ',', 'compress', '=', 'None', ',', 'cache_control', '=', 'None', ')', ':', 'return', 'self', '.', 'put_files', '(', '[', '(', 'file_path', ',', 'content', ')', ']', ',', 'content_type', '=', 'content_type', ',', 'compress', '=', 'compress', ',', 'cache_control', '=', 'cache_control', ',', 'block', '=', 'False', ')']
Args: filename (string): it can contains folders content (string): binary data to save
['Args', ':', 'filename', '(', 'string', ')', ':', 'it', 'can', 'contains', 'folders', 'content', '(', 'string', ')', ':', 'binary', 'data', 'to', 'save']
train
https://github.com/seung-lab/cloud-volume/blob/d2fd4500333f1bc3cd3e3919a8b649cec5d8e214/cloudvolume/storage.py#L101-L112
8,416
Miserlou/Zappa
zappa/core.py
Zappa.get_patch_op
def get_patch_op(self, keypath, value, op='replace'): """ Return an object that describes a change of configuration on the given staging. Setting will be applied on all available HTTP methods. """ if isinstance(value, bool): value = str(value).lower() return {'op': op, 'path': '/*/*/{}'.format(keypath), 'value': value}
python
def get_patch_op(self, keypath, value, op='replace'): """ Return an object that describes a change of configuration on the given staging. Setting will be applied on all available HTTP methods. """ if isinstance(value, bool): value = str(value).lower() return {'op': op, 'path': '/*/*/{}'.format(keypath), 'value': value}
['def', 'get_patch_op', '(', 'self', ',', 'keypath', ',', 'value', ',', 'op', '=', "'replace'", ')', ':', 'if', 'isinstance', '(', 'value', ',', 'bool', ')', ':', 'value', '=', 'str', '(', 'value', ')', '.', 'lower', '(', ')', 'return', '{', "'op'", ':', 'op', ',', "'path'", ':', "'/*/*/{}'", '.', 'format', '(', 'keypath', ')', ',', "'value'", ':', 'value', '}']
Return an object that describes a change of configuration on the given staging. Setting will be applied on all available HTTP methods.
['Return', 'an', 'object', 'that', 'describes', 'a', 'change', 'of', 'configuration', 'on', 'the', 'given', 'staging', '.', 'Setting', 'will', 'be', 'applied', 'on', 'all', 'available', 'HTTP', 'methods', '.']
train
https://github.com/Miserlou/Zappa/blob/3ccf7490a8d8b8fa74a61ee39bf44234f3567739/zappa/core.py#L1963-L1970
8,417
NikolayDachev/jadm
lib/paramiko-1.14.1/paramiko/sftp_client.py
_to_unicode
def _to_unicode(s): """ decode a string as ascii or utf8 if possible (as required by the sftp protocol). if neither works, just return a byte string because the server probably doesn't know the filename's encoding. """ try: return s.encode('ascii') except (UnicodeError, AttributeError): try: return s.decode('utf-8') except UnicodeError: return s
python
def _to_unicode(s): """ decode a string as ascii or utf8 if possible (as required by the sftp protocol). if neither works, just return a byte string because the server probably doesn't know the filename's encoding. """ try: return s.encode('ascii') except (UnicodeError, AttributeError): try: return s.decode('utf-8') except UnicodeError: return s
['def', '_to_unicode', '(', 's', ')', ':', 'try', ':', 'return', 's', '.', 'encode', '(', "'ascii'", ')', 'except', '(', 'UnicodeError', ',', 'AttributeError', ')', ':', 'try', ':', 'return', 's', '.', 'decode', '(', "'utf-8'", ')', 'except', 'UnicodeError', ':', 'return', 's']
decode a string as ascii or utf8 if possible (as required by the sftp protocol). if neither works, just return a byte string because the server probably doesn't know the filename's encoding.
['decode', 'a', 'string', 'as', 'ascii', 'or', 'utf8', 'if', 'possible', '(', 'as', 'required', 'by', 'the', 'sftp', 'protocol', ')', '.', 'if', 'neither', 'works', 'just', 'return', 'a', 'byte', 'string', 'because', 'the', 'server', 'probably', 'doesn', 't', 'know', 'the', 'filename', 's', 'encoding', '.']
train
https://github.com/NikolayDachev/jadm/blob/12bb550445edfcd87506f7cba7a6a35d413c5511/lib/paramiko-1.14.1/paramiko/sftp_client.py#L44-L56
8,418
ricequant/rqalpha
rqalpha/model/portfolio.py
Portfolio.unit_net_value
def unit_net_value(self): """ [float] 实时净值 """ if self._units == 0: return np.nan return self.total_value / self._units
python
def unit_net_value(self): """ [float] 实时净值 """ if self._units == 0: return np.nan return self.total_value / self._units
['def', 'unit_net_value', '(', 'self', ')', ':', 'if', 'self', '.', '_units', '==', '0', ':', 'return', 'np', '.', 'nan', 'return', 'self', '.', 'total_value', '/', 'self', '.', '_units']
[float] 实时净值
['[', 'float', ']', '实时净值']
train
https://github.com/ricequant/rqalpha/blob/ac40a62d4e7eca9494b4d0a14f46facf5616820c/rqalpha/model/portfolio.py#L119-L125
8,419
aliyun/aliyun-odps-python-sdk
odps/df/expr/strings.py
_rsplit
def _rsplit(expr, pat=None, n=-1): """ Split each string in the Series/Index by the given delimiter string, starting at the end of the string and working to the front. Equivalent to str.rsplit(). :param expr: :param pat: Separator to split on. If None, splits on whitespace :param n: None, 0 and -1 will be interpreted as return all splits :return: sequence or scalar """ return _string_op(expr, RSplit, output_type=types.List(types.string), _pat=pat, _n=n)
python
def _rsplit(expr, pat=None, n=-1): """ Split each string in the Series/Index by the given delimiter string, starting at the end of the string and working to the front. Equivalent to str.rsplit(). :param expr: :param pat: Separator to split on. If None, splits on whitespace :param n: None, 0 and -1 will be interpreted as return all splits :return: sequence or scalar """ return _string_op(expr, RSplit, output_type=types.List(types.string), _pat=pat, _n=n)
['def', '_rsplit', '(', 'expr', ',', 'pat', '=', 'None', ',', 'n', '=', '-', '1', ')', ':', 'return', '_string_op', '(', 'expr', ',', 'RSplit', ',', 'output_type', '=', 'types', '.', 'List', '(', 'types', '.', 'string', ')', ',', '_pat', '=', 'pat', ',', '_n', '=', 'n', ')']
Split each string in the Series/Index by the given delimiter string, starting at the end of the string and working to the front. Equivalent to str.rsplit(). :param expr: :param pat: Separator to split on. If None, splits on whitespace :param n: None, 0 and -1 will be interpreted as return all splits :return: sequence or scalar
['Split', 'each', 'string', 'in', 'the', 'Series', '/', 'Index', 'by', 'the', 'given', 'delimiter', 'string', 'starting', 'at', 'the', 'end', 'of', 'the', 'string', 'and', 'working', 'to', 'the', 'front', '.', 'Equivalent', 'to', 'str', '.', 'rsplit', '()', '.']
train
https://github.com/aliyun/aliyun-odps-python-sdk/blob/4b0de18f5864386df6068f26f026e62f932c41e4/odps/df/expr/strings.py#L547-L560
8,420
oscarlazoarjona/fast
build/lib/fast/electric_field.py
electric_field_amplitude_intensity
def electric_field_amplitude_intensity(s0,Omega=1.0e6): '''This function returns the value of E0 (the amplitude of the electric field) at a given saturation parameter s0=I/I0, where I0=2.50399 mW/cm^2 is the saturation intensity of the D2 line of Rubidium for linearly polarized light.''' e0=hbar*Omega/(e*a0) #This is the electric field scale. I0=2.50399 #mW/cm^2 I0=1.66889451102868 #mW/cm^2 I0=I0/1000*(100**2) #W/m^2 r_ciclic=4.226983616875483 #a0 gamma_D2=2*Pi*6.065e6/Omega # The decay frequency of the D2 line. E0_sat=gamma_D2/r_ciclic/sqrt(2.0) E0_sat=E0_sat*e0 I0=E0_sat**2/2/c/mu0 #return sqrt(c*mu0*s0*I0/2)/e0 #return sqrt(c*mu0*s0*I0)/e0 return sqrt(2*c*mu0*s0*I0)/e0
python
def electric_field_amplitude_intensity(s0,Omega=1.0e6): '''This function returns the value of E0 (the amplitude of the electric field) at a given saturation parameter s0=I/I0, where I0=2.50399 mW/cm^2 is the saturation intensity of the D2 line of Rubidium for linearly polarized light.''' e0=hbar*Omega/(e*a0) #This is the electric field scale. I0=2.50399 #mW/cm^2 I0=1.66889451102868 #mW/cm^2 I0=I0/1000*(100**2) #W/m^2 r_ciclic=4.226983616875483 #a0 gamma_D2=2*Pi*6.065e6/Omega # The decay frequency of the D2 line. E0_sat=gamma_D2/r_ciclic/sqrt(2.0) E0_sat=E0_sat*e0 I0=E0_sat**2/2/c/mu0 #return sqrt(c*mu0*s0*I0/2)/e0 #return sqrt(c*mu0*s0*I0)/e0 return sqrt(2*c*mu0*s0*I0)/e0
['def', 'electric_field_amplitude_intensity', '(', 's0', ',', 'Omega', '=', '1.0e6', ')', ':', 'e0', '=', 'hbar', '*', 'Omega', '/', '(', 'e', '*', 'a0', ')', '#This is the electric field scale.\r', 'I0', '=', '2.50399', '#mW/cm^2\r', 'I0', '=', '1.66889451102868', '#mW/cm^2\r', 'I0', '=', 'I0', '/', '1000', '*', '(', '100', '**', '2', ')', '#W/m^2\r', 'r_ciclic', '=', '4.226983616875483', '#a0\r', 'gamma_D2', '=', '2', '*', 'Pi', '*', '6.065e6', '/', 'Omega', '# The decay frequency of the D2 line.\r', 'E0_sat', '=', 'gamma_D2', '/', 'r_ciclic', '/', 'sqrt', '(', '2.0', ')', 'E0_sat', '=', 'E0_sat', '*', 'e0', 'I0', '=', 'E0_sat', '**', '2', '/', '2', '/', 'c', '/', 'mu0', '#return sqrt(c*mu0*s0*I0/2)/e0\r', '#return sqrt(c*mu0*s0*I0)/e0\r', 'return', 'sqrt', '(', '2', '*', 'c', '*', 'mu0', '*', 's0', '*', 'I0', ')', '/', 'e0']
This function returns the value of E0 (the amplitude of the electric field) at a given saturation parameter s0=I/I0, where I0=2.50399 mW/cm^2 is the saturation intensity of the D2 line of Rubidium for linearly polarized light.
['This', 'function', 'returns', 'the', 'value', 'of', 'E0', '(', 'the', 'amplitude', 'of', 'the', 'electric', 'field', ')', 'at', 'a', 'given', 'saturation', 'parameter', 's0', '=', 'I', '/', 'I0', 'where', 'I0', '=', '2', '.', '50399', 'mW', '/', 'cm^2', 'is', 'the', 'saturation', 'intensity', 'of', 'the', 'D2', 'line', 'of', 'Rubidium', 'for', 'linearly', 'polarized', 'light', '.']
train
https://github.com/oscarlazoarjona/fast/blob/3e5400672af2a7b7cc616e7f4aa10d7672720222/build/lib/fast/electric_field.py#L374-L393
8,421
ultrabug/py3status
py3status/util.py
Gradients.generate_gradient
def generate_gradient(self, color_list, size=101): """ Create a gradient of size colors that passes through the colors give in the list (the resultant list may not be exactly size long). The gradient will be evenly distributed. colors should be in hex format eg '#FF00FF' """ list_length = len(color_list) gradient_step = size / (list_length - 1) gradient_data = [] for x in range(list_length): gradient_data.append((int(gradient_step * x), color_list[x])) data = [] for i in range(len(gradient_data) - 1): start, color1 = gradient_data[i] end, color2 = gradient_data[i + 1] color1 = self.hex_2_hsv(color1) color2 = self.hex_2_hsv(color2) steps = end - start for j in range(steps): data.append( self.hsv_2_hex(*self.make_mid_color(color1, color2, j / (steps))) ) data.append(self.hsv_2_hex(*color2)) return data
python
def generate_gradient(self, color_list, size=101): """ Create a gradient of size colors that passes through the colors give in the list (the resultant list may not be exactly size long). The gradient will be evenly distributed. colors should be in hex format eg '#FF00FF' """ list_length = len(color_list) gradient_step = size / (list_length - 1) gradient_data = [] for x in range(list_length): gradient_data.append((int(gradient_step * x), color_list[x])) data = [] for i in range(len(gradient_data) - 1): start, color1 = gradient_data[i] end, color2 = gradient_data[i + 1] color1 = self.hex_2_hsv(color1) color2 = self.hex_2_hsv(color2) steps = end - start for j in range(steps): data.append( self.hsv_2_hex(*self.make_mid_color(color1, color2, j / (steps))) ) data.append(self.hsv_2_hex(*color2)) return data
['def', 'generate_gradient', '(', 'self', ',', 'color_list', ',', 'size', '=', '101', ')', ':', 'list_length', '=', 'len', '(', 'color_list', ')', 'gradient_step', '=', 'size', '/', '(', 'list_length', '-', '1', ')', 'gradient_data', '=', '[', ']', 'for', 'x', 'in', 'range', '(', 'list_length', ')', ':', 'gradient_data', '.', 'append', '(', '(', 'int', '(', 'gradient_step', '*', 'x', ')', ',', 'color_list', '[', 'x', ']', ')', ')', 'data', '=', '[', ']', 'for', 'i', 'in', 'range', '(', 'len', '(', 'gradient_data', ')', '-', '1', ')', ':', 'start', ',', 'color1', '=', 'gradient_data', '[', 'i', ']', 'end', ',', 'color2', '=', 'gradient_data', '[', 'i', '+', '1', ']', 'color1', '=', 'self', '.', 'hex_2_hsv', '(', 'color1', ')', 'color2', '=', 'self', '.', 'hex_2_hsv', '(', 'color2', ')', 'steps', '=', 'end', '-', 'start', 'for', 'j', 'in', 'range', '(', 'steps', ')', ':', 'data', '.', 'append', '(', 'self', '.', 'hsv_2_hex', '(', '*', 'self', '.', 'make_mid_color', '(', 'color1', ',', 'color2', ',', 'j', '/', '(', 'steps', ')', ')', ')', ')', 'data', '.', 'append', '(', 'self', '.', 'hsv_2_hex', '(', '*', 'color2', ')', ')', 'return', 'data']
Create a gradient of size colors that passes through the colors give in the list (the resultant list may not be exactly size long). The gradient will be evenly distributed. colors should be in hex format eg '#FF00FF'
['Create', 'a', 'gradient', 'of', 'size', 'colors', 'that', 'passes', 'through', 'the', 'colors', 'give', 'in', 'the', 'list', '(', 'the', 'resultant', 'list', 'may', 'not', 'be', 'exactly', 'size', 'long', ')', '.', 'The', 'gradient', 'will', 'be', 'evenly', 'distributed', '.', 'colors', 'should', 'be', 'in', 'hex', 'format', 'eg', '#FF00FF']
train
https://github.com/ultrabug/py3status/blob/4c105f1b44f7384ca4f7da5f821a47e468c7dee2/py3status/util.py#L79-L107
8,422
baliame/http-hmac-python
httphmac/v2.py
V2Signer.check
def check(self, request, secret): """Verifies whether or not the request bears an authorization appropriate and valid for this version of the signature. This verifies every element of the signature, including the timestamp's value. Does not alter the request. Keyword arguments: request -- A request object which can be consumed by this API. secret -- The base64-encoded secret key for the HMAC authorization. """ if request.get_header("Authorization") == "": return False ah = self.parse_auth_headers(request.get_header("Authorization")) if "signature" not in ah: return False if request.get_header('x-authorization-timestamp') == '': raise KeyError("X-Authorization-Timestamp is required.") timestamp = int(float(request.get_header('x-authorization-timestamp'))) if timestamp == 0: raise ValueError("X-Authorization-Timestamp must be a valid, non-zero timestamp.") if self.preset_time is None: curr_time = time.time() else: curr_time = self.preset_time if timestamp > curr_time + 900: raise ValueError("X-Authorization-Timestamp is too far in the future.") if timestamp < curr_time - 900: raise ValueError("X-Authorization-Timestamp is too far in the past.") if request.body is not None and request.body != b'': content_hash = request.get_header("x-authorization-content-sha256") if content_hash == '': raise KeyError("X-Authorization-Content-SHA256 is required for requests with a request body.") sha256 = hashlib.sha256() sha256.update(request.body) if content_hash != base64.b64encode(sha256.digest()).decode('utf-8'): raise ValueError("X-Authorization-Content-SHA256 must match the SHA-256 hash of the request body.") return ah["signature"] == self.sign(request, ah, secret)
python
def check(self, request, secret): """Verifies whether or not the request bears an authorization appropriate and valid for this version of the signature. This verifies every element of the signature, including the timestamp's value. Does not alter the request. Keyword arguments: request -- A request object which can be consumed by this API. secret -- The base64-encoded secret key for the HMAC authorization. """ if request.get_header("Authorization") == "": return False ah = self.parse_auth_headers(request.get_header("Authorization")) if "signature" not in ah: return False if request.get_header('x-authorization-timestamp') == '': raise KeyError("X-Authorization-Timestamp is required.") timestamp = int(float(request.get_header('x-authorization-timestamp'))) if timestamp == 0: raise ValueError("X-Authorization-Timestamp must be a valid, non-zero timestamp.") if self.preset_time is None: curr_time = time.time() else: curr_time = self.preset_time if timestamp > curr_time + 900: raise ValueError("X-Authorization-Timestamp is too far in the future.") if timestamp < curr_time - 900: raise ValueError("X-Authorization-Timestamp is too far in the past.") if request.body is not None and request.body != b'': content_hash = request.get_header("x-authorization-content-sha256") if content_hash == '': raise KeyError("X-Authorization-Content-SHA256 is required for requests with a request body.") sha256 = hashlib.sha256() sha256.update(request.body) if content_hash != base64.b64encode(sha256.digest()).decode('utf-8'): raise ValueError("X-Authorization-Content-SHA256 must match the SHA-256 hash of the request body.") return ah["signature"] == self.sign(request, ah, secret)
['def', 'check', '(', 'self', ',', 'request', ',', 'secret', ')', ':', 'if', 'request', '.', 'get_header', '(', '"Authorization"', ')', '==', '""', ':', 'return', 'False', 'ah', '=', 'self', '.', 'parse_auth_headers', '(', 'request', '.', 'get_header', '(', '"Authorization"', ')', ')', 'if', '"signature"', 'not', 'in', 'ah', ':', 'return', 'False', 'if', 'request', '.', 'get_header', '(', "'x-authorization-timestamp'", ')', '==', "''", ':', 'raise', 'KeyError', '(', '"X-Authorization-Timestamp is required."', ')', 'timestamp', '=', 'int', '(', 'float', '(', 'request', '.', 'get_header', '(', "'x-authorization-timestamp'", ')', ')', ')', 'if', 'timestamp', '==', '0', ':', 'raise', 'ValueError', '(', '"X-Authorization-Timestamp must be a valid, non-zero timestamp."', ')', 'if', 'self', '.', 'preset_time', 'is', 'None', ':', 'curr_time', '=', 'time', '.', 'time', '(', ')', 'else', ':', 'curr_time', '=', 'self', '.', 'preset_time', 'if', 'timestamp', '>', 'curr_time', '+', '900', ':', 'raise', 'ValueError', '(', '"X-Authorization-Timestamp is too far in the future."', ')', 'if', 'timestamp', '<', 'curr_time', '-', '900', ':', 'raise', 'ValueError', '(', '"X-Authorization-Timestamp is too far in the past."', ')', 'if', 'request', '.', 'body', 'is', 'not', 'None', 'and', 'request', '.', 'body', '!=', "b''", ':', 'content_hash', '=', 'request', '.', 'get_header', '(', '"x-authorization-content-sha256"', ')', 'if', 'content_hash', '==', "''", ':', 'raise', 'KeyError', '(', '"X-Authorization-Content-SHA256 is required for requests with a request body."', ')', 'sha256', '=', 'hashlib', '.', 'sha256', '(', ')', 'sha256', '.', 'update', '(', 'request', '.', 'body', ')', 'if', 'content_hash', '!=', 'base64', '.', 'b64encode', '(', 'sha256', '.', 'digest', '(', ')', ')', '.', 'decode', '(', "'utf-8'", ')', ':', 'raise', 'ValueError', '(', '"X-Authorization-Content-SHA256 must match the SHA-256 hash of the request body."', ')', 'return', 'ah', '[', '"signature"', ']', '==', 'self', '.', 'sign', '(', 'request', ',', 'ah', ',', 'secret', ')']
Verifies whether or not the request bears an authorization appropriate and valid for this version of the signature. This verifies every element of the signature, including the timestamp's value. Does not alter the request. Keyword arguments: request -- A request object which can be consumed by this API. secret -- The base64-encoded secret key for the HMAC authorization.
['Verifies', 'whether', 'or', 'not', 'the', 'request', 'bears', 'an', 'authorization', 'appropriate', 'and', 'valid', 'for', 'this', 'version', 'of', 'the', 'signature', '.', 'This', 'verifies', 'every', 'element', 'of', 'the', 'signature', 'including', 'the', 'timestamp', 's', 'value', '.', 'Does', 'not', 'alter', 'the', 'request', '.']
train
https://github.com/baliame/http-hmac-python/blob/9884c0cbfdb712f9f37080a8efbfdce82850785f/httphmac/v2.py#L128-L163
8,423
uber/tchannel-python
tchannel/tornado/response.py
Response.write_header
def write_header(self, chunk): """Write to header. Note: the header stream is only available to write before write body. :param chunk: content to write to header :except TChannelError: Raise TChannelError if the response's flush() has been called """ if self.serializer: header = self.serializer.serialize_header(chunk) else: header = chunk if self.flushed: raise TChannelError("write operation invalid after flush call") if (self.argstreams[0].state != StreamState.completed and self.argstreams[0].auto_close): self.argstreams[0].close() return self.argstreams[1].write(header)
python
def write_header(self, chunk): """Write to header. Note: the header stream is only available to write before write body. :param chunk: content to write to header :except TChannelError: Raise TChannelError if the response's flush() has been called """ if self.serializer: header = self.serializer.serialize_header(chunk) else: header = chunk if self.flushed: raise TChannelError("write operation invalid after flush call") if (self.argstreams[0].state != StreamState.completed and self.argstreams[0].auto_close): self.argstreams[0].close() return self.argstreams[1].write(header)
['def', 'write_header', '(', 'self', ',', 'chunk', ')', ':', 'if', 'self', '.', 'serializer', ':', 'header', '=', 'self', '.', 'serializer', '.', 'serialize_header', '(', 'chunk', ')', 'else', ':', 'header', '=', 'chunk', 'if', 'self', '.', 'flushed', ':', 'raise', 'TChannelError', '(', '"write operation invalid after flush call"', ')', 'if', '(', 'self', '.', 'argstreams', '[', '0', ']', '.', 'state', '!=', 'StreamState', '.', 'completed', 'and', 'self', '.', 'argstreams', '[', '0', ']', '.', 'auto_close', ')', ':', 'self', '.', 'argstreams', '[', '0', ']', '.', 'close', '(', ')', 'return', 'self', '.', 'argstreams', '[', '1', ']', '.', 'write', '(', 'header', ')']
Write to header. Note: the header stream is only available to write before write body. :param chunk: content to write to header :except TChannelError: Raise TChannelError if the response's flush() has been called
['Write', 'to', 'header', '.']
train
https://github.com/uber/tchannel-python/blob/ee08cce6234f24fd2373774988186dd374306c43/tchannel/tornado/response.py#L172-L195
8,424
astropy/photutils
photutils/background/background_2d.py
Background2D._calc_bkg_bkgrms
def _calc_bkg_bkgrms(self): """ Calculate the background and background RMS estimate in each of the meshes. Both meshes are computed at the same time here method because the filtering of both depends on the background mesh. The ``background_mesh`` and ``background_rms_mesh`` images are equivalent to the low-resolution "MINIBACKGROUND" and "MINIBACK_RMS" background maps in SExtractor, respectively. """ if self.sigma_clip is not None: data_sigclip = self.sigma_clip(self._mesh_data, axis=1) else: data_sigclip = self._mesh_data del self._mesh_data # preform mesh rejection on sigma-clipped data (i.e. for any # newly-masked pixels) idx = self._select_meshes(data_sigclip) self.mesh_idx = self.mesh_idx[idx] # indices for the output mesh self._data_sigclip = data_sigclip[idx] # always a 2D masked array self._mesh_shape = (self.nyboxes, self.nxboxes) self.mesh_yidx, self.mesh_xidx = np.unravel_index(self.mesh_idx, self._mesh_shape) # These properties are needed later to calculate # background_mesh_ma and background_rms_mesh_ma. Note that _bkg1d # and _bkgrms1d are masked arrays, but the mask should always be # False. self._bkg1d = self.bkg_estimator(self._data_sigclip, axis=1) self._bkgrms1d = self.bkgrms_estimator(self._data_sigclip, axis=1) # make the unfiltered 2D mesh arrays (these are not masked) if len(self._bkg1d) == self.nboxes: bkg = self._make_2d_array(self._bkg1d) bkgrms = self._make_2d_array(self._bkgrms1d) else: bkg = self._interpolate_meshes(self._bkg1d) bkgrms = self._interpolate_meshes(self._bkgrms1d) self._background_mesh_unfiltered = bkg self._background_rms_mesh_unfiltered = bkgrms self.background_mesh = bkg self.background_rms_mesh = bkgrms # filter the 2D mesh arrays if not np.array_equal(self.filter_size, [1, 1]): self._filter_meshes() return
python
def _calc_bkg_bkgrms(self): """ Calculate the background and background RMS estimate in each of the meshes. Both meshes are computed at the same time here method because the filtering of both depends on the background mesh. The ``background_mesh`` and ``background_rms_mesh`` images are equivalent to the low-resolution "MINIBACKGROUND" and "MINIBACK_RMS" background maps in SExtractor, respectively. """ if self.sigma_clip is not None: data_sigclip = self.sigma_clip(self._mesh_data, axis=1) else: data_sigclip = self._mesh_data del self._mesh_data # preform mesh rejection on sigma-clipped data (i.e. for any # newly-masked pixels) idx = self._select_meshes(data_sigclip) self.mesh_idx = self.mesh_idx[idx] # indices for the output mesh self._data_sigclip = data_sigclip[idx] # always a 2D masked array self._mesh_shape = (self.nyboxes, self.nxboxes) self.mesh_yidx, self.mesh_xidx = np.unravel_index(self.mesh_idx, self._mesh_shape) # These properties are needed later to calculate # background_mesh_ma and background_rms_mesh_ma. Note that _bkg1d # and _bkgrms1d are masked arrays, but the mask should always be # False. self._bkg1d = self.bkg_estimator(self._data_sigclip, axis=1) self._bkgrms1d = self.bkgrms_estimator(self._data_sigclip, axis=1) # make the unfiltered 2D mesh arrays (these are not masked) if len(self._bkg1d) == self.nboxes: bkg = self._make_2d_array(self._bkg1d) bkgrms = self._make_2d_array(self._bkgrms1d) else: bkg = self._interpolate_meshes(self._bkg1d) bkgrms = self._interpolate_meshes(self._bkgrms1d) self._background_mesh_unfiltered = bkg self._background_rms_mesh_unfiltered = bkgrms self.background_mesh = bkg self.background_rms_mesh = bkgrms # filter the 2D mesh arrays if not np.array_equal(self.filter_size, [1, 1]): self._filter_meshes() return
['def', '_calc_bkg_bkgrms', '(', 'self', ')', ':', 'if', 'self', '.', 'sigma_clip', 'is', 'not', 'None', ':', 'data_sigclip', '=', 'self', '.', 'sigma_clip', '(', 'self', '.', '_mesh_data', ',', 'axis', '=', '1', ')', 'else', ':', 'data_sigclip', '=', 'self', '.', '_mesh_data', 'del', 'self', '.', '_mesh_data', '# preform mesh rejection on sigma-clipped data (i.e. for any', '# newly-masked pixels)', 'idx', '=', 'self', '.', '_select_meshes', '(', 'data_sigclip', ')', 'self', '.', 'mesh_idx', '=', 'self', '.', 'mesh_idx', '[', 'idx', ']', '# indices for the output mesh', 'self', '.', '_data_sigclip', '=', 'data_sigclip', '[', 'idx', ']', '# always a 2D masked array', 'self', '.', '_mesh_shape', '=', '(', 'self', '.', 'nyboxes', ',', 'self', '.', 'nxboxes', ')', 'self', '.', 'mesh_yidx', ',', 'self', '.', 'mesh_xidx', '=', 'np', '.', 'unravel_index', '(', 'self', '.', 'mesh_idx', ',', 'self', '.', '_mesh_shape', ')', '# These properties are needed later to calculate', '# background_mesh_ma and background_rms_mesh_ma. Note that _bkg1d', '# and _bkgrms1d are masked arrays, but the mask should always be', '# False.', 'self', '.', '_bkg1d', '=', 'self', '.', 'bkg_estimator', '(', 'self', '.', '_data_sigclip', ',', 'axis', '=', '1', ')', 'self', '.', '_bkgrms1d', '=', 'self', '.', 'bkgrms_estimator', '(', 'self', '.', '_data_sigclip', ',', 'axis', '=', '1', ')', '# make the unfiltered 2D mesh arrays (these are not masked)', 'if', 'len', '(', 'self', '.', '_bkg1d', ')', '==', 'self', '.', 'nboxes', ':', 'bkg', '=', 'self', '.', '_make_2d_array', '(', 'self', '.', '_bkg1d', ')', 'bkgrms', '=', 'self', '.', '_make_2d_array', '(', 'self', '.', '_bkgrms1d', ')', 'else', ':', 'bkg', '=', 'self', '.', '_interpolate_meshes', '(', 'self', '.', '_bkg1d', ')', 'bkgrms', '=', 'self', '.', '_interpolate_meshes', '(', 'self', '.', '_bkgrms1d', ')', 'self', '.', '_background_mesh_unfiltered', '=', 'bkg', 'self', '.', '_background_rms_mesh_unfiltered', '=', 'bkgrms', 'self', '.', 'background_mesh', '=', 'bkg', 'self', '.', 'background_rms_mesh', '=', 'bkgrms', '# filter the 2D mesh arrays', 'if', 'not', 'np', '.', 'array_equal', '(', 'self', '.', 'filter_size', ',', '[', '1', ',', '1', ']', ')', ':', 'self', '.', '_filter_meshes', '(', ')', 'return']
Calculate the background and background RMS estimate in each of the meshes. Both meshes are computed at the same time here method because the filtering of both depends on the background mesh. The ``background_mesh`` and ``background_rms_mesh`` images are equivalent to the low-resolution "MINIBACKGROUND" and "MINIBACK_RMS" background maps in SExtractor, respectively.
['Calculate', 'the', 'background', 'and', 'background', 'RMS', 'estimate', 'in', 'each', 'of', 'the', 'meshes', '.']
train
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/background/background_2d.py#L650-L703
8,425
nickjj/ansigenome
ansigenome/utils.py
keys_in_dict
def keys_in_dict(d, parent_key, keys): """ Create a list of keys from a dict recursively. """ for key, value in d.iteritems(): if isinstance(value, dict): keys_in_dict(value, key, keys) else: if parent_key: prefix = parent_key + "." else: prefix = "" keys.append(prefix + key) return keys
python
def keys_in_dict(d, parent_key, keys): """ Create a list of keys from a dict recursively. """ for key, value in d.iteritems(): if isinstance(value, dict): keys_in_dict(value, key, keys) else: if parent_key: prefix = parent_key + "." else: prefix = "" keys.append(prefix + key) return keys
['def', 'keys_in_dict', '(', 'd', ',', 'parent_key', ',', 'keys', ')', ':', 'for', 'key', ',', 'value', 'in', 'd', '.', 'iteritems', '(', ')', ':', 'if', 'isinstance', '(', 'value', ',', 'dict', ')', ':', 'keys_in_dict', '(', 'value', ',', 'key', ',', 'keys', ')', 'else', ':', 'if', 'parent_key', ':', 'prefix', '=', 'parent_key', '+', '"."', 'else', ':', 'prefix', '=', '""', 'keys', '.', 'append', '(', 'prefix', '+', 'key', ')', 'return', 'keys']
Create a list of keys from a dict recursively.
['Create', 'a', 'list', 'of', 'keys', 'from', 'a', 'dict', 'recursively', '.']
train
https://github.com/nickjj/ansigenome/blob/70cd98d7a23d36c56f4e713ea820cfb4c485c81c/ansigenome/utils.py#L258-L273
8,426
python-visualization/branca
branca/element.py
Element.add_to
def add_to(self, parent, name=None, index=None): """Add element to a parent.""" parent.add_child(self, name=name, index=index) return self
python
def add_to(self, parent, name=None, index=None): """Add element to a parent.""" parent.add_child(self, name=name, index=index) return self
['def', 'add_to', '(', 'self', ',', 'parent', ',', 'name', '=', 'None', ',', 'index', '=', 'None', ')', ':', 'parent', '.', 'add_child', '(', 'self', ',', 'name', '=', 'name', ',', 'index', '=', 'index', ')', 'return', 'self']
Add element to a parent.
['Add', 'element', 'to', 'a', 'parent', '.']
train
https://github.com/python-visualization/branca/blob/4e89e88a5a7ff3586f0852249c2c125f72316da8/branca/element.py#L119-L122
8,427
sassoo/goldman
goldman/stores/postgres/store.py
Store.field_cols
def field_cols(model): """ Get the models columns in a friendly SQL format This will be a string of comma separated field names prefixed by the models resource type. TIP: to_manys are not located on the table in Postgres & are instead application references, so any reference to there column names should be pruned! :return: str """ to_many = model.to_many cols = [f for f in model.all_fields if f not in to_many] cols = ', '.join(cols) return cols or None
python
def field_cols(model): """ Get the models columns in a friendly SQL format This will be a string of comma separated field names prefixed by the models resource type. TIP: to_manys are not located on the table in Postgres & are instead application references, so any reference to there column names should be pruned! :return: str """ to_many = model.to_many cols = [f for f in model.all_fields if f not in to_many] cols = ', '.join(cols) return cols or None
['def', 'field_cols', '(', 'model', ')', ':', 'to_many', '=', 'model', '.', 'to_many', 'cols', '=', '[', 'f', 'for', 'f', 'in', 'model', '.', 'all_fields', 'if', 'f', 'not', 'in', 'to_many', ']', 'cols', '=', "', '", '.', 'join', '(', 'cols', ')', 'return', 'cols', 'or', 'None']
Get the models columns in a friendly SQL format This will be a string of comma separated field names prefixed by the models resource type. TIP: to_manys are not located on the table in Postgres & are instead application references, so any reference to there column names should be pruned! :return: str
['Get', 'the', 'models', 'columns', 'in', 'a', 'friendly', 'SQL', 'format']
train
https://github.com/sassoo/goldman/blob/b72540c9ad06b5c68aadb1b4fa8cb0b716260bf2/goldman/stores/postgres/store.py#L126-L143
8,428
watson-developer-cloud/python-sdk
ibm_watson/speech_to_text_v1.py
SpeakerLabelsResult._to_dict
def _to_dict(self): """Return a json dictionary representing this model.""" _dict = {} if hasattr(self, 'from_') and self.from_ is not None: _dict['from'] = self.from_ if hasattr(self, 'to') and self.to is not None: _dict['to'] = self.to if hasattr(self, 'speaker') and self.speaker is not None: _dict['speaker'] = self.speaker if hasattr(self, 'confidence') and self.confidence is not None: _dict['confidence'] = self.confidence if hasattr(self, 'final_results') and self.final_results is not None: _dict['final'] = self.final_results return _dict
python
def _to_dict(self): """Return a json dictionary representing this model.""" _dict = {} if hasattr(self, 'from_') and self.from_ is not None: _dict['from'] = self.from_ if hasattr(self, 'to') and self.to is not None: _dict['to'] = self.to if hasattr(self, 'speaker') and self.speaker is not None: _dict['speaker'] = self.speaker if hasattr(self, 'confidence') and self.confidence is not None: _dict['confidence'] = self.confidence if hasattr(self, 'final_results') and self.final_results is not None: _dict['final'] = self.final_results return _dict
['def', '_to_dict', '(', 'self', ')', ':', '_dict', '=', '{', '}', 'if', 'hasattr', '(', 'self', ',', "'from_'", ')', 'and', 'self', '.', 'from_', 'is', 'not', 'None', ':', '_dict', '[', "'from'", ']', '=', 'self', '.', 'from_', 'if', 'hasattr', '(', 'self', ',', "'to'", ')', 'and', 'self', '.', 'to', 'is', 'not', 'None', ':', '_dict', '[', "'to'", ']', '=', 'self', '.', 'to', 'if', 'hasattr', '(', 'self', ',', "'speaker'", ')', 'and', 'self', '.', 'speaker', 'is', 'not', 'None', ':', '_dict', '[', "'speaker'", ']', '=', 'self', '.', 'speaker', 'if', 'hasattr', '(', 'self', ',', "'confidence'", ')', 'and', 'self', '.', 'confidence', 'is', 'not', 'None', ':', '_dict', '[', "'confidence'", ']', '=', 'self', '.', 'confidence', 'if', 'hasattr', '(', 'self', ',', "'final_results'", ')', 'and', 'self', '.', 'final_results', 'is', 'not', 'None', ':', '_dict', '[', "'final'", ']', '=', 'self', '.', 'final_results', 'return', '_dict']
Return a json dictionary representing this model.
['Return', 'a', 'json', 'dictionary', 'representing', 'this', 'model', '.']
train
https://github.com/watson-developer-cloud/python-sdk/blob/4c2c9df4466fcde88975da9ecd834e6ba95eb353/ibm_watson/speech_to_text_v1.py#L4599-L4612
8,429
cds-astro/mocpy
mocpy/moc/moc.py
MOC.from_url
def from_url(cls, url): """ Creates a `~mocpy.moc.MOC` object from a given url. Parameters ---------- url : str The url of a FITS file storing a MOC. Returns ------- result : `~mocpy.moc.MOC` The resulting MOC. """ path = download_file(url, show_progress=False, timeout=60) return cls.from_fits(path)
python
def from_url(cls, url): """ Creates a `~mocpy.moc.MOC` object from a given url. Parameters ---------- url : str The url of a FITS file storing a MOC. Returns ------- result : `~mocpy.moc.MOC` The resulting MOC. """ path = download_file(url, show_progress=False, timeout=60) return cls.from_fits(path)
['def', 'from_url', '(', 'cls', ',', 'url', ')', ':', 'path', '=', 'download_file', '(', 'url', ',', 'show_progress', '=', 'False', ',', 'timeout', '=', '60', ')', 'return', 'cls', '.', 'from_fits', '(', 'path', ')']
Creates a `~mocpy.moc.MOC` object from a given url. Parameters ---------- url : str The url of a FITS file storing a MOC. Returns ------- result : `~mocpy.moc.MOC` The resulting MOC.
['Creates', 'a', '~mocpy', '.', 'moc', '.', 'MOC', 'object', 'from', 'a', 'given', 'url', '.']
train
https://github.com/cds-astro/mocpy/blob/09472cabe537f6bfdb049eeea64d3ea57b391c21/mocpy/moc/moc.py#L439-L454
8,430
gregreen/dustmaps
dustmaps/bh.py
ascii2h5
def ascii2h5(bh_dir=None): """ Convert the Burstein & Heiles (1982) dust map from ASCII to HDF5. """ if bh_dir is None: bh_dir = os.path.join(data_dir_default, 'bh') fname = os.path.join(bh_dir, '{}.ascii') f = h5py.File('bh.h5', 'w') for region in ('hinorth', 'hisouth'): data = np.loadtxt(fname.format(region), dtype='f4') # Reshape and clip data.shape = (210, 201) # (R, N) data = data[:201] # Last 9 records are empty # Use NaNs where no data data[data < -9000] = np.nan dset = f.create_dataset( region, data=data, chunks=True, compression='gzip', compression_opts=3 ) dset.attrs['axes'] = ('R', 'N') dset.attrs['description'] = ( 'HI 21cm column densities, in units of 10*NHYD. ' 'R = 100 + [(90^o-|b|) sin(l)]/[0.3 degrees]. ' 'N = 100 + [(90^o-|b|) cos (l)]/[0.3 degrees].' ) for region in ('rednorth', 'redsouth'): data = np.loadtxt(fname.format(region), dtype='f4') # Reshape and clip data.shape = (94, 1200) # (R, N) data = data[:93] # Last record is empty # Use NaNs where no data data[data < -9000] = np.nan dset = f.create_dataset( region, data=data, chunks=True, compression='gzip', compression_opts=3 ) dset.attrs['axes'] = ('R', 'N') dset.attrs['description'] = ( 'E(B-V), in units of 0.001 mag. ' 'R = (|b| - 10) / (0.6 degrees). ' 'N = (l + 0.15) / 0.3 - 1.' ) f.attrs['description'] = ( 'The Burstein & Heiles (1982) dust map.' ) f.close()
python
def ascii2h5(bh_dir=None): """ Convert the Burstein & Heiles (1982) dust map from ASCII to HDF5. """ if bh_dir is None: bh_dir = os.path.join(data_dir_default, 'bh') fname = os.path.join(bh_dir, '{}.ascii') f = h5py.File('bh.h5', 'w') for region in ('hinorth', 'hisouth'): data = np.loadtxt(fname.format(region), dtype='f4') # Reshape and clip data.shape = (210, 201) # (R, N) data = data[:201] # Last 9 records are empty # Use NaNs where no data data[data < -9000] = np.nan dset = f.create_dataset( region, data=data, chunks=True, compression='gzip', compression_opts=3 ) dset.attrs['axes'] = ('R', 'N') dset.attrs['description'] = ( 'HI 21cm column densities, in units of 10*NHYD. ' 'R = 100 + [(90^o-|b|) sin(l)]/[0.3 degrees]. ' 'N = 100 + [(90^o-|b|) cos (l)]/[0.3 degrees].' ) for region in ('rednorth', 'redsouth'): data = np.loadtxt(fname.format(region), dtype='f4') # Reshape and clip data.shape = (94, 1200) # (R, N) data = data[:93] # Last record is empty # Use NaNs where no data data[data < -9000] = np.nan dset = f.create_dataset( region, data=data, chunks=True, compression='gzip', compression_opts=3 ) dset.attrs['axes'] = ('R', 'N') dset.attrs['description'] = ( 'E(B-V), in units of 0.001 mag. ' 'R = (|b| - 10) / (0.6 degrees). ' 'N = (l + 0.15) / 0.3 - 1.' ) f.attrs['description'] = ( 'The Burstein & Heiles (1982) dust map.' ) f.close()
['def', 'ascii2h5', '(', 'bh_dir', '=', 'None', ')', ':', 'if', 'bh_dir', 'is', 'None', ':', 'bh_dir', '=', 'os', '.', 'path', '.', 'join', '(', 'data_dir_default', ',', "'bh'", ')', 'fname', '=', 'os', '.', 'path', '.', 'join', '(', 'bh_dir', ',', "'{}.ascii'", ')', 'f', '=', 'h5py', '.', 'File', '(', "'bh.h5'", ',', "'w'", ')', 'for', 'region', 'in', '(', "'hinorth'", ',', "'hisouth'", ')', ':', 'data', '=', 'np', '.', 'loadtxt', '(', 'fname', '.', 'format', '(', 'region', ')', ',', 'dtype', '=', "'f4'", ')', '# Reshape and clip', 'data', '.', 'shape', '=', '(', '210', ',', '201', ')', '# (R, N)', 'data', '=', 'data', '[', ':', '201', ']', '# Last 9 records are empty', '# Use NaNs where no data', 'data', '[', 'data', '<', '-', '9000', ']', '=', 'np', '.', 'nan', 'dset', '=', 'f', '.', 'create_dataset', '(', 'region', ',', 'data', '=', 'data', ',', 'chunks', '=', 'True', ',', 'compression', '=', "'gzip'", ',', 'compression_opts', '=', '3', ')', 'dset', '.', 'attrs', '[', "'axes'", ']', '=', '(', "'R'", ',', "'N'", ')', 'dset', '.', 'attrs', '[', "'description'", ']', '=', '(', "'HI 21cm column densities, in units of 10*NHYD. '", "'R = 100 + [(90^o-|b|) sin(l)]/[0.3 degrees]. '", "'N = 100 + [(90^o-|b|) cos (l)]/[0.3 degrees].'", ')', 'for', 'region', 'in', '(', "'rednorth'", ',', "'redsouth'", ')', ':', 'data', '=', 'np', '.', 'loadtxt', '(', 'fname', '.', 'format', '(', 'region', ')', ',', 'dtype', '=', "'f4'", ')', '# Reshape and clip', 'data', '.', 'shape', '=', '(', '94', ',', '1200', ')', '# (R, N)', 'data', '=', 'data', '[', ':', '93', ']', '# Last record is empty', '# Use NaNs where no data', 'data', '[', 'data', '<', '-', '9000', ']', '=', 'np', '.', 'nan', 'dset', '=', 'f', '.', 'create_dataset', '(', 'region', ',', 'data', '=', 'data', ',', 'chunks', '=', 'True', ',', 'compression', '=', "'gzip'", ',', 'compression_opts', '=', '3', ')', 'dset', '.', 'attrs', '[', "'axes'", ']', '=', '(', "'R'", ',', "'N'", ')', 'dset', '.', 'attrs', '[', "'description'", ']', '=', '(', "'E(B-V), in units of 0.001 mag. '", "'R = (|b| - 10) / (0.6 degrees). '", "'N = (l + 0.15) / 0.3 - 1.'", ')', 'f', '.', 'attrs', '[', "'description'", ']', '=', '(', "'The Burstein & Heiles (1982) dust map.'", ')', 'f', '.', 'close', '(', ')']
Convert the Burstein & Heiles (1982) dust map from ASCII to HDF5.
['Convert', 'the', 'Burstein', '&', 'Heiles', '(', '1982', ')', 'dust', 'map', 'from', 'ASCII', 'to', 'HDF5', '.']
train
https://github.com/gregreen/dustmaps/blob/c8f571a71da0d951bf8ea865621bee14492bdfd9/dustmaps/bh.py#L33-L99
8,431
minhhoit/yacms
yacms/pages/page_processors.py
processor_for
def processor_for(content_model_or_slug, exact_page=False): """ Decorator that registers the decorated function as a page processor for the given content model or slug. When a page exists that forms the prefix of custom urlpatterns in a project (eg: the blog page and app), the page will be added to the template context. Passing in ``True`` for the ``exact_page`` arg, will ensure that the page processor is not run in this situation, requiring that the loaded page object is for the exact URL currently being viewed. """ content_model = None slug = "" if isinstance(content_model_or_slug, (str, _str)): try: parts = content_model_or_slug.split(".", 1) content_model = apps.get_model(*parts) except (TypeError, ValueError, LookupError): slug = content_model_or_slug elif issubclass(content_model_or_slug, Page): content_model = content_model_or_slug else: raise TypeError("%s is not a valid argument for page_processor, " "which should be a model subclass of Page in class " "or string form (app.model), or a valid slug" % content_model_or_slug) def decorator(func): parts = (func, exact_page) if content_model: model_name = content_model._meta.object_name.lower() processors[model_name].insert(0, parts) else: processors["slug:%s" % slug].insert(0, parts) return func return decorator
python
def processor_for(content_model_or_slug, exact_page=False): """ Decorator that registers the decorated function as a page processor for the given content model or slug. When a page exists that forms the prefix of custom urlpatterns in a project (eg: the blog page and app), the page will be added to the template context. Passing in ``True`` for the ``exact_page`` arg, will ensure that the page processor is not run in this situation, requiring that the loaded page object is for the exact URL currently being viewed. """ content_model = None slug = "" if isinstance(content_model_or_slug, (str, _str)): try: parts = content_model_or_slug.split(".", 1) content_model = apps.get_model(*parts) except (TypeError, ValueError, LookupError): slug = content_model_or_slug elif issubclass(content_model_or_slug, Page): content_model = content_model_or_slug else: raise TypeError("%s is not a valid argument for page_processor, " "which should be a model subclass of Page in class " "or string form (app.model), or a valid slug" % content_model_or_slug) def decorator(func): parts = (func, exact_page) if content_model: model_name = content_model._meta.object_name.lower() processors[model_name].insert(0, parts) else: processors["slug:%s" % slug].insert(0, parts) return func return decorator
['def', 'processor_for', '(', 'content_model_or_slug', ',', 'exact_page', '=', 'False', ')', ':', 'content_model', '=', 'None', 'slug', '=', '""', 'if', 'isinstance', '(', 'content_model_or_slug', ',', '(', 'str', ',', '_str', ')', ')', ':', 'try', ':', 'parts', '=', 'content_model_or_slug', '.', 'split', '(', '"."', ',', '1', ')', 'content_model', '=', 'apps', '.', 'get_model', '(', '*', 'parts', ')', 'except', '(', 'TypeError', ',', 'ValueError', ',', 'LookupError', ')', ':', 'slug', '=', 'content_model_or_slug', 'elif', 'issubclass', '(', 'content_model_or_slug', ',', 'Page', ')', ':', 'content_model', '=', 'content_model_or_slug', 'else', ':', 'raise', 'TypeError', '(', '"%s is not a valid argument for page_processor, "', '"which should be a model subclass of Page in class "', '"or string form (app.model), or a valid slug"', '%', 'content_model_or_slug', ')', 'def', 'decorator', '(', 'func', ')', ':', 'parts', '=', '(', 'func', ',', 'exact_page', ')', 'if', 'content_model', ':', 'model_name', '=', 'content_model', '.', '_meta', '.', 'object_name', '.', 'lower', '(', ')', 'processors', '[', 'model_name', ']', '.', 'insert', '(', '0', ',', 'parts', ')', 'else', ':', 'processors', '[', '"slug:%s"', '%', 'slug', ']', '.', 'insert', '(', '0', ',', 'parts', ')', 'return', 'func', 'return', 'decorator']
Decorator that registers the decorated function as a page processor for the given content model or slug. When a page exists that forms the prefix of custom urlpatterns in a project (eg: the blog page and app), the page will be added to the template context. Passing in ``True`` for the ``exact_page`` arg, will ensure that the page processor is not run in this situation, requiring that the loaded page object is for the exact URL currently being viewed.
['Decorator', 'that', 'registers', 'the', 'decorated', 'function', 'as', 'a', 'page', 'processor', 'for', 'the', 'given', 'content', 'model', 'or', 'slug', '.']
train
https://github.com/minhhoit/yacms/blob/2921b706b7107c6e8c5f2bbf790ff11f85a2167f/yacms/pages/page_processors.py#L17-L53
8,432
wmayner/pyphi
pyphi/examples.py
residue_network
def residue_network(): """The network for the residue example. Current and previous state are all nodes OFF. Diagram:: +~~~~~~~+ +~~~~~~~+ | A | | B | +~~>| (AND) | | (AND) |<~~+ | +~~~~~~~+ +~~~~~~~+ | | ^ ^ | | | | | | +~~~~~+ +~~~~~+ | | | | | +~~~+~~~+ +~+~~~+~+ +~~~+~~~+ | C | | D | | E | | | | | | | +~~~~~~~+ +~~~~~~~+ +~~~~~~~+ Connectivity matrix: +---+---+---+---+---+---+ | . | A | B | C | D | E | +---+---+---+---+---+---+ | A | 0 | 0 | 0 | 0 | 0 | +---+---+---+---+---+---+ | B | 0 | 0 | 0 | 0 | 0 | +---+---+---+---+---+---+ | C | 1 | 0 | 0 | 0 | 0 | +---+---+---+---+---+---+ | D | 1 | 1 | 0 | 0 | 0 | +---+---+---+---+---+---+ | E | 0 | 1 | 0 | 0 | 0 | +---+---+---+---+---+---+ """ tpm = np.array([ [int(s) for s in bin(x)[2:].zfill(5)[::-1]] for x in range(32) ]) tpm[np.where(np.sum(tpm[0:, 2:4], 1) == 2), 0] = 1 tpm[np.where(np.sum(tpm[0:, 3:5], 1) == 2), 1] = 1 tpm[np.where(np.sum(tpm[0:, 2:4], 1) < 2), 0] = 0 tpm[np.where(np.sum(tpm[0:, 3:5], 1) < 2), 1] = 0 cm = np.zeros((5, 5)) cm[2:4, 0] = 1 cm[3:, 1] = 1 return Network(tpm, cm=cm, node_labels=LABELS[:tpm.shape[1]])
python
def residue_network(): """The network for the residue example. Current and previous state are all nodes OFF. Diagram:: +~~~~~~~+ +~~~~~~~+ | A | | B | +~~>| (AND) | | (AND) |<~~+ | +~~~~~~~+ +~~~~~~~+ | | ^ ^ | | | | | | +~~~~~+ +~~~~~+ | | | | | +~~~+~~~+ +~+~~~+~+ +~~~+~~~+ | C | | D | | E | | | | | | | +~~~~~~~+ +~~~~~~~+ +~~~~~~~+ Connectivity matrix: +---+---+---+---+---+---+ | . | A | B | C | D | E | +---+---+---+---+---+---+ | A | 0 | 0 | 0 | 0 | 0 | +---+---+---+---+---+---+ | B | 0 | 0 | 0 | 0 | 0 | +---+---+---+---+---+---+ | C | 1 | 0 | 0 | 0 | 0 | +---+---+---+---+---+---+ | D | 1 | 1 | 0 | 0 | 0 | +---+---+---+---+---+---+ | E | 0 | 1 | 0 | 0 | 0 | +---+---+---+---+---+---+ """ tpm = np.array([ [int(s) for s in bin(x)[2:].zfill(5)[::-1]] for x in range(32) ]) tpm[np.where(np.sum(tpm[0:, 2:4], 1) == 2), 0] = 1 tpm[np.where(np.sum(tpm[0:, 3:5], 1) == 2), 1] = 1 tpm[np.where(np.sum(tpm[0:, 2:4], 1) < 2), 0] = 0 tpm[np.where(np.sum(tpm[0:, 3:5], 1) < 2), 1] = 0 cm = np.zeros((5, 5)) cm[2:4, 0] = 1 cm[3:, 1] = 1 return Network(tpm, cm=cm, node_labels=LABELS[:tpm.shape[1]])
['def', 'residue_network', '(', ')', ':', 'tpm', '=', 'np', '.', 'array', '(', '[', '[', 'int', '(', 's', ')', 'for', 's', 'in', 'bin', '(', 'x', ')', '[', '2', ':', ']', '.', 'zfill', '(', '5', ')', '[', ':', ':', '-', '1', ']', ']', 'for', 'x', 'in', 'range', '(', '32', ')', ']', ')', 'tpm', '[', 'np', '.', 'where', '(', 'np', '.', 'sum', '(', 'tpm', '[', '0', ':', ',', '2', ':', '4', ']', ',', '1', ')', '==', '2', ')', ',', '0', ']', '=', '1', 'tpm', '[', 'np', '.', 'where', '(', 'np', '.', 'sum', '(', 'tpm', '[', '0', ':', ',', '3', ':', '5', ']', ',', '1', ')', '==', '2', ')', ',', '1', ']', '=', '1', 'tpm', '[', 'np', '.', 'where', '(', 'np', '.', 'sum', '(', 'tpm', '[', '0', ':', ',', '2', ':', '4', ']', ',', '1', ')', '<', '2', ')', ',', '0', ']', '=', '0', 'tpm', '[', 'np', '.', 'where', '(', 'np', '.', 'sum', '(', 'tpm', '[', '0', ':', ',', '3', ':', '5', ']', ',', '1', ')', '<', '2', ')', ',', '1', ']', '=', '0', 'cm', '=', 'np', '.', 'zeros', '(', '(', '5', ',', '5', ')', ')', 'cm', '[', '2', ':', '4', ',', '0', ']', '=', '1', 'cm', '[', '3', ':', ',', '1', ']', '=', '1', 'return', 'Network', '(', 'tpm', ',', 'cm', '=', 'cm', ',', 'node_labels', '=', 'LABELS', '[', ':', 'tpm', '.', 'shape', '[', '1', ']', ']', ')']
The network for the residue example. Current and previous state are all nodes OFF. Diagram:: +~~~~~~~+ +~~~~~~~+ | A | | B | +~~>| (AND) | | (AND) |<~~+ | +~~~~~~~+ +~~~~~~~+ | | ^ ^ | | | | | | +~~~~~+ +~~~~~+ | | | | | +~~~+~~~+ +~+~~~+~+ +~~~+~~~+ | C | | D | | E | | | | | | | +~~~~~~~+ +~~~~~~~+ +~~~~~~~+ Connectivity matrix: +---+---+---+---+---+---+ | . | A | B | C | D | E | +---+---+---+---+---+---+ | A | 0 | 0 | 0 | 0 | 0 | +---+---+---+---+---+---+ | B | 0 | 0 | 0 | 0 | 0 | +---+---+---+---+---+---+ | C | 1 | 0 | 0 | 0 | 0 | +---+---+---+---+---+---+ | D | 1 | 1 | 0 | 0 | 0 | +---+---+---+---+---+---+ | E | 0 | 1 | 0 | 0 | 0 | +---+---+---+---+---+---+
['The', 'network', 'for', 'the', 'residue', 'example', '.']
train
https://github.com/wmayner/pyphi/blob/deeca69a084d782a6fde7bf26f59e93b593c5d77/pyphi/examples.py#L170-L218
8,433
materialsproject/pymatgen
pymatgen/analysis/defects/corrections.py
FreysoldtCorrection.perform_es_corr
def perform_es_corr(self, lattice, q, step=1e-4): """ Peform Electrostatic Freysoldt Correction """ logger.info("Running Freysoldt 2011 PC calculation (should be " "equivalent to sxdefectalign)") logger.debug("defect lattice constants are (in angstroms)" + str(lattice.abc)) [a1, a2, a3] = ang_to_bohr * np.array(lattice.get_cartesian_coords(1)) logging.debug("In atomic units, lat consts are (in bohr):" + str([a1, a2, a3])) vol = np.dot(a1, np.cross(a2, a3)) # vol in bohr^3 def e_iso(encut): gcut = eV_to_k(encut) # gcut is in units of 1/A return scipy.integrate.quad(lambda g: self.q_model.rho_rec(g * g)**2, step, gcut)[0] * (q**2) / np.pi def e_per(encut): eper = 0 for g2 in generate_reciprocal_vectors_squared(a1, a2, a3, encut): eper += (self.q_model.rho_rec(g2)**2) / g2 eper *= (q**2) * 2 * round(np.pi, 6) / vol eper += (q**2) * 4 * round(np.pi, 6) \ * self.q_model.rho_rec_limit0 / vol return eper eiso = converge(e_iso, 5, self.madetol, self.energy_cutoff) logger.debug("Eisolated : %f", round(eiso, 5)) eper = converge(e_per, 5, self.madetol, self.energy_cutoff) logger.info("Eperiodic : %f hartree", round(eper, 5)) logger.info("difference (periodic-iso) is %f hartree", round(eper - eiso, 6)) logger.info("difference in (eV) is %f", round((eper - eiso) * hart_to_ev, 4)) es_corr = round((eiso - eper) / self.dielectric * hart_to_ev, 6) logger.info("Defect Correction without alignment %f (eV): ", es_corr) return es_corr
python
def perform_es_corr(self, lattice, q, step=1e-4): """ Peform Electrostatic Freysoldt Correction """ logger.info("Running Freysoldt 2011 PC calculation (should be " "equivalent to sxdefectalign)") logger.debug("defect lattice constants are (in angstroms)" + str(lattice.abc)) [a1, a2, a3] = ang_to_bohr * np.array(lattice.get_cartesian_coords(1)) logging.debug("In atomic units, lat consts are (in bohr):" + str([a1, a2, a3])) vol = np.dot(a1, np.cross(a2, a3)) # vol in bohr^3 def e_iso(encut): gcut = eV_to_k(encut) # gcut is in units of 1/A return scipy.integrate.quad(lambda g: self.q_model.rho_rec(g * g)**2, step, gcut)[0] * (q**2) / np.pi def e_per(encut): eper = 0 for g2 in generate_reciprocal_vectors_squared(a1, a2, a3, encut): eper += (self.q_model.rho_rec(g2)**2) / g2 eper *= (q**2) * 2 * round(np.pi, 6) / vol eper += (q**2) * 4 * round(np.pi, 6) \ * self.q_model.rho_rec_limit0 / vol return eper eiso = converge(e_iso, 5, self.madetol, self.energy_cutoff) logger.debug("Eisolated : %f", round(eiso, 5)) eper = converge(e_per, 5, self.madetol, self.energy_cutoff) logger.info("Eperiodic : %f hartree", round(eper, 5)) logger.info("difference (periodic-iso) is %f hartree", round(eper - eiso, 6)) logger.info("difference in (eV) is %f", round((eper - eiso) * hart_to_ev, 4)) es_corr = round((eiso - eper) / self.dielectric * hart_to_ev, 6) logger.info("Defect Correction without alignment %f (eV): ", es_corr) return es_corr
['def', 'perform_es_corr', '(', 'self', ',', 'lattice', ',', 'q', ',', 'step', '=', '1e-4', ')', ':', 'logger', '.', 'info', '(', '"Running Freysoldt 2011 PC calculation (should be "', '"equivalent to sxdefectalign)"', ')', 'logger', '.', 'debug', '(', '"defect lattice constants are (in angstroms)"', '+', 'str', '(', 'lattice', '.', 'abc', ')', ')', '[', 'a1', ',', 'a2', ',', 'a3', ']', '=', 'ang_to_bohr', '*', 'np', '.', 'array', '(', 'lattice', '.', 'get_cartesian_coords', '(', '1', ')', ')', 'logging', '.', 'debug', '(', '"In atomic units, lat consts are (in bohr):"', '+', 'str', '(', '[', 'a1', ',', 'a2', ',', 'a3', ']', ')', ')', 'vol', '=', 'np', '.', 'dot', '(', 'a1', ',', 'np', '.', 'cross', '(', 'a2', ',', 'a3', ')', ')', '# vol in bohr^3', 'def', 'e_iso', '(', 'encut', ')', ':', 'gcut', '=', 'eV_to_k', '(', 'encut', ')', '# gcut is in units of 1/A', 'return', 'scipy', '.', 'integrate', '.', 'quad', '(', 'lambda', 'g', ':', 'self', '.', 'q_model', '.', 'rho_rec', '(', 'g', '*', 'g', ')', '**', '2', ',', 'step', ',', 'gcut', ')', '[', '0', ']', '*', '(', 'q', '**', '2', ')', '/', 'np', '.', 'pi', 'def', 'e_per', '(', 'encut', ')', ':', 'eper', '=', '0', 'for', 'g2', 'in', 'generate_reciprocal_vectors_squared', '(', 'a1', ',', 'a2', ',', 'a3', ',', 'encut', ')', ':', 'eper', '+=', '(', 'self', '.', 'q_model', '.', 'rho_rec', '(', 'g2', ')', '**', '2', ')', '/', 'g2', 'eper', '*=', '(', 'q', '**', '2', ')', '*', '2', '*', 'round', '(', 'np', '.', 'pi', ',', '6', ')', '/', 'vol', 'eper', '+=', '(', 'q', '**', '2', ')', '*', '4', '*', 'round', '(', 'np', '.', 'pi', ',', '6', ')', '*', 'self', '.', 'q_model', '.', 'rho_rec_limit0', '/', 'vol', 'return', 'eper', 'eiso', '=', 'converge', '(', 'e_iso', ',', '5', ',', 'self', '.', 'madetol', ',', 'self', '.', 'energy_cutoff', ')', 'logger', '.', 'debug', '(', '"Eisolated : %f"', ',', 'round', '(', 'eiso', ',', '5', ')', ')', 'eper', '=', 'converge', '(', 'e_per', ',', '5', ',', 'self', '.', 'madetol', ',', 'self', '.', 'energy_cutoff', ')', 'logger', '.', 'info', '(', '"Eperiodic : %f hartree"', ',', 'round', '(', 'eper', ',', '5', ')', ')', 'logger', '.', 'info', '(', '"difference (periodic-iso) is %f hartree"', ',', 'round', '(', 'eper', '-', 'eiso', ',', '6', ')', ')', 'logger', '.', 'info', '(', '"difference in (eV) is %f"', ',', 'round', '(', '(', 'eper', '-', 'eiso', ')', '*', 'hart_to_ev', ',', '4', ')', ')', 'es_corr', '=', 'round', '(', '(', 'eiso', '-', 'eper', ')', '/', 'self', '.', 'dielectric', '*', 'hart_to_ev', ',', '6', ')', 'logger', '.', 'info', '(', '"Defect Correction without alignment %f (eV): "', ',', 'es_corr', ')', 'return', 'es_corr']
Peform Electrostatic Freysoldt Correction
['Peform', 'Electrostatic', 'Freysoldt', 'Correction']
train
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/analysis/defects/corrections.py#L123-L158
8,434
mrstephenneal/mysql-toolkit
mysql/toolkit/components/structure/keys.py
ForeignKey.set_foreign_key
def set_foreign_key(self, parent_table, parent_column, child_table, child_column): """Create a Foreign Key constraint on a column from a table.""" self.execute('ALTER TABLE {0} ADD FOREIGN KEY ({1}) REFERENCES {2}({3})'.format(parent_table, parent_column, child_table, child_column))
python
def set_foreign_key(self, parent_table, parent_column, child_table, child_column): """Create a Foreign Key constraint on a column from a table.""" self.execute('ALTER TABLE {0} ADD FOREIGN KEY ({1}) REFERENCES {2}({3})'.format(parent_table, parent_column, child_table, child_column))
['def', 'set_foreign_key', '(', 'self', ',', 'parent_table', ',', 'parent_column', ',', 'child_table', ',', 'child_column', ')', ':', 'self', '.', 'execute', '(', "'ALTER TABLE {0} ADD FOREIGN KEY ({1}) REFERENCES {2}({3})'", '.', 'format', '(', 'parent_table', ',', 'parent_column', ',', 'child_table', ',', 'child_column', ')', ')']
Create a Foreign Key constraint on a column from a table.
['Create', 'a', 'Foreign', 'Key', 'constraint', 'on', 'a', 'column', 'from', 'a', 'table', '.']
train
https://github.com/mrstephenneal/mysql-toolkit/blob/6964f718f4b72eb30f2259adfcfaf3090526c53d/mysql/toolkit/components/structure/keys.py#L69-L72
8,435
paramiko/paramiko
paramiko/sftp_file.py
SFTPFile.check
def check(self, hash_algorithm, offset=0, length=0, block_size=0): """ Ask the server for a hash of a section of this file. This can be used to verify a successful upload or download, or for various rsync-like operations. The file is hashed from ``offset``, for ``length`` bytes. If ``length`` is 0, the remainder of the file is hashed. Thus, if both ``offset`` and ``length`` are zero, the entire file is hashed. Normally, ``block_size`` will be 0 (the default), and this method will return a byte string representing the requested hash (for example, a string of length 16 for MD5, or 20 for SHA-1). If a non-zero ``block_size`` is given, each chunk of the file (from ``offset`` to ``offset + length``) of ``block_size`` bytes is computed as a separate hash. The hash results are all concatenated and returned as a single string. For example, ``check('sha1', 0, 1024, 512)`` will return a string of length 40. The first 20 bytes will be the SHA-1 of the first 512 bytes of the file, and the last 20 bytes will be the SHA-1 of the next 512 bytes. :param str hash_algorithm: the name of the hash algorithm to use (normally ``"sha1"`` or ``"md5"``) :param offset: offset into the file to begin hashing (0 means to start from the beginning) :param length: number of bytes to hash (0 means continue to the end of the file) :param int block_size: number of bytes to hash per result (must not be less than 256; 0 means to compute only one hash of the entire segment) :return: `str` of bytes representing the hash of each block, concatenated together :raises: ``IOError`` -- if the server doesn't support the "check-file" extension, or possibly doesn't support the hash algorithm requested .. note:: Many (most?) servers don't support this extension yet. .. versionadded:: 1.4 """ t, msg = self.sftp._request( CMD_EXTENDED, "check-file", self.handle, hash_algorithm, long(offset), long(length), block_size, ) msg.get_text() # ext msg.get_text() # alg data = msg.get_remainder() return data
python
def check(self, hash_algorithm, offset=0, length=0, block_size=0): """ Ask the server for a hash of a section of this file. This can be used to verify a successful upload or download, or for various rsync-like operations. The file is hashed from ``offset``, for ``length`` bytes. If ``length`` is 0, the remainder of the file is hashed. Thus, if both ``offset`` and ``length`` are zero, the entire file is hashed. Normally, ``block_size`` will be 0 (the default), and this method will return a byte string representing the requested hash (for example, a string of length 16 for MD5, or 20 for SHA-1). If a non-zero ``block_size`` is given, each chunk of the file (from ``offset`` to ``offset + length``) of ``block_size`` bytes is computed as a separate hash. The hash results are all concatenated and returned as a single string. For example, ``check('sha1', 0, 1024, 512)`` will return a string of length 40. The first 20 bytes will be the SHA-1 of the first 512 bytes of the file, and the last 20 bytes will be the SHA-1 of the next 512 bytes. :param str hash_algorithm: the name of the hash algorithm to use (normally ``"sha1"`` or ``"md5"``) :param offset: offset into the file to begin hashing (0 means to start from the beginning) :param length: number of bytes to hash (0 means continue to the end of the file) :param int block_size: number of bytes to hash per result (must not be less than 256; 0 means to compute only one hash of the entire segment) :return: `str` of bytes representing the hash of each block, concatenated together :raises: ``IOError`` -- if the server doesn't support the "check-file" extension, or possibly doesn't support the hash algorithm requested .. note:: Many (most?) servers don't support this extension yet. .. versionadded:: 1.4 """ t, msg = self.sftp._request( CMD_EXTENDED, "check-file", self.handle, hash_algorithm, long(offset), long(length), block_size, ) msg.get_text() # ext msg.get_text() # alg data = msg.get_remainder() return data
['def', 'check', '(', 'self', ',', 'hash_algorithm', ',', 'offset', '=', '0', ',', 'length', '=', '0', ',', 'block_size', '=', '0', ')', ':', 't', ',', 'msg', '=', 'self', '.', 'sftp', '.', '_request', '(', 'CMD_EXTENDED', ',', '"check-file"', ',', 'self', '.', 'handle', ',', 'hash_algorithm', ',', 'long', '(', 'offset', ')', ',', 'long', '(', 'length', ')', ',', 'block_size', ',', ')', 'msg', '.', 'get_text', '(', ')', '# ext', 'msg', '.', 'get_text', '(', ')', '# alg', 'data', '=', 'msg', '.', 'get_remainder', '(', ')', 'return', 'data']
Ask the server for a hash of a section of this file. This can be used to verify a successful upload or download, or for various rsync-like operations. The file is hashed from ``offset``, for ``length`` bytes. If ``length`` is 0, the remainder of the file is hashed. Thus, if both ``offset`` and ``length`` are zero, the entire file is hashed. Normally, ``block_size`` will be 0 (the default), and this method will return a byte string representing the requested hash (for example, a string of length 16 for MD5, or 20 for SHA-1). If a non-zero ``block_size`` is given, each chunk of the file (from ``offset`` to ``offset + length``) of ``block_size`` bytes is computed as a separate hash. The hash results are all concatenated and returned as a single string. For example, ``check('sha1', 0, 1024, 512)`` will return a string of length 40. The first 20 bytes will be the SHA-1 of the first 512 bytes of the file, and the last 20 bytes will be the SHA-1 of the next 512 bytes. :param str hash_algorithm: the name of the hash algorithm to use (normally ``"sha1"`` or ``"md5"``) :param offset: offset into the file to begin hashing (0 means to start from the beginning) :param length: number of bytes to hash (0 means continue to the end of the file) :param int block_size: number of bytes to hash per result (must not be less than 256; 0 means to compute only one hash of the entire segment) :return: `str` of bytes representing the hash of each block, concatenated together :raises: ``IOError`` -- if the server doesn't support the "check-file" extension, or possibly doesn't support the hash algorithm requested .. note:: Many (most?) servers don't support this extension yet. .. versionadded:: 1.4
['Ask', 'the', 'server', 'for', 'a', 'hash', 'of', 'a', 'section', 'of', 'this', 'file', '.', 'This', 'can', 'be', 'used', 'to', 'verify', 'a', 'successful', 'upload', 'or', 'download', 'or', 'for', 'various', 'rsync', '-', 'like', 'operations', '.']
train
https://github.com/paramiko/paramiko/blob/cf7d49d66f3b1fbc8b0853518a54050182b3b5eb/paramiko/sftp_file.py#L358-L416
8,436
Duke-GCB/lando-messaging
lando_messaging/clients.py
LandoWorkerClient.run_job
def run_job(self, job_details, workflow, vm_instance_name): """ Execute a workflow on a worker. :param job_details: object: details about job(id, name, created date, workflow version) :param workflow: jobapi.Workflow: url to workflow and parameters to use :param vm_instance_name: name of the instance lando_worker is running on (this passed back in the response) """ self._send(JobCommands.RUN_JOB, RunJobPayload(job_details, workflow, vm_instance_name))
python
def run_job(self, job_details, workflow, vm_instance_name): """ Execute a workflow on a worker. :param job_details: object: details about job(id, name, created date, workflow version) :param workflow: jobapi.Workflow: url to workflow and parameters to use :param vm_instance_name: name of the instance lando_worker is running on (this passed back in the response) """ self._send(JobCommands.RUN_JOB, RunJobPayload(job_details, workflow, vm_instance_name))
['def', 'run_job', '(', 'self', ',', 'job_details', ',', 'workflow', ',', 'vm_instance_name', ')', ':', 'self', '.', '_send', '(', 'JobCommands', '.', 'RUN_JOB', ',', 'RunJobPayload', '(', 'job_details', ',', 'workflow', ',', 'vm_instance_name', ')', ')']
Execute a workflow on a worker. :param job_details: object: details about job(id, name, created date, workflow version) :param workflow: jobapi.Workflow: url to workflow and parameters to use :param vm_instance_name: name of the instance lando_worker is running on (this passed back in the response)
['Execute', 'a', 'workflow', 'on', 'a', 'worker', '.', ':', 'param', 'job_details', ':', 'object', ':', 'details', 'about', 'job', '(', 'id', 'name', 'created', 'date', 'workflow', 'version', ')', ':', 'param', 'workflow', ':', 'jobapi', '.', 'Workflow', ':', 'url', 'to', 'workflow', 'and', 'parameters', 'to', 'use', ':', 'param', 'vm_instance_name', ':', 'name', 'of', 'the', 'instance', 'lando_worker', 'is', 'running', 'on', '(', 'this', 'passed', 'back', 'in', 'the', 'response', ')']
train
https://github.com/Duke-GCB/lando-messaging/blob/b90ccc79a874714e0776af8badf505bb2b56c0ec/lando_messaging/clients.py#L123-L130
8,437
cdgriffith/Reusables
reusables/log.py
remove_stream_handlers
def remove_stream_handlers(logger=None): """ Remove only stream handlers from the specified logger :param logger: logging name or object to modify, defaults to root logger """ if not isinstance(logger, logging.Logger): logger = logging.getLogger(logger) new_handlers = [] for handler in logger.handlers: # FileHandler is a subclass of StreamHandler so # 'if not a StreamHandler' does not work if (isinstance(handler, logging.FileHandler) or isinstance(handler, logging.NullHandler) or (isinstance(handler, logging.Handler) and not isinstance(handler, logging.StreamHandler))): new_handlers.append(handler) logger.handlers = new_handlers
python
def remove_stream_handlers(logger=None): """ Remove only stream handlers from the specified logger :param logger: logging name or object to modify, defaults to root logger """ if not isinstance(logger, logging.Logger): logger = logging.getLogger(logger) new_handlers = [] for handler in logger.handlers: # FileHandler is a subclass of StreamHandler so # 'if not a StreamHandler' does not work if (isinstance(handler, logging.FileHandler) or isinstance(handler, logging.NullHandler) or (isinstance(handler, logging.Handler) and not isinstance(handler, logging.StreamHandler))): new_handlers.append(handler) logger.handlers = new_handlers
['def', 'remove_stream_handlers', '(', 'logger', '=', 'None', ')', ':', 'if', 'not', 'isinstance', '(', 'logger', ',', 'logging', '.', 'Logger', ')', ':', 'logger', '=', 'logging', '.', 'getLogger', '(', 'logger', ')', 'new_handlers', '=', '[', ']', 'for', 'handler', 'in', 'logger', '.', 'handlers', ':', '# FileHandler is a subclass of StreamHandler so', "# 'if not a StreamHandler' does not work", 'if', '(', 'isinstance', '(', 'handler', ',', 'logging', '.', 'FileHandler', ')', 'or', 'isinstance', '(', 'handler', ',', 'logging', '.', 'NullHandler', ')', 'or', '(', 'isinstance', '(', 'handler', ',', 'logging', '.', 'Handler', ')', 'and', 'not', 'isinstance', '(', 'handler', ',', 'logging', '.', 'StreamHandler', ')', ')', ')', ':', 'new_handlers', '.', 'append', '(', 'handler', ')', 'logger', '.', 'handlers', '=', 'new_handlers']
Remove only stream handlers from the specified logger :param logger: logging name or object to modify, defaults to root logger
['Remove', 'only', 'stream', 'handlers', 'from', 'the', 'specified', 'logger']
train
https://github.com/cdgriffith/Reusables/blob/bc32f72e4baee7d76a6d58b88fcb23dd635155cd/reusables/log.py#L203-L221
8,438
mdsol/rwslib
rwslib/extras/audit_event/parser.py
make_int
def make_int(value, missing=-1): """Convert string value to long, '' to missing""" if isinstance(value, six.string_types): if not value.strip(): return missing elif value is None: return missing return int(value)
python
def make_int(value, missing=-1): """Convert string value to long, '' to missing""" if isinstance(value, six.string_types): if not value.strip(): return missing elif value is None: return missing return int(value)
['def', 'make_int', '(', 'value', ',', 'missing', '=', '-', '1', ')', ':', 'if', 'isinstance', '(', 'value', ',', 'six', '.', 'string_types', ')', ':', 'if', 'not', 'value', '.', 'strip', '(', ')', ':', 'return', 'missing', 'elif', 'value', 'is', 'None', ':', 'return', 'missing', 'return', 'int', '(', 'value', ')']
Convert string value to long, '' to missing
['Convert', 'string', 'value', 'to', 'long', 'to', 'missing']
train
https://github.com/mdsol/rwslib/blob/1a86bc072d408c009ed1de8bf6e98a1769f54d18/rwslib/extras/audit_event/parser.py#L38-L45
8,439
gwastro/pycbc
pycbc/population/scale_injections.py
estimate_vt
def estimate_vt(injections, mchirp_sampler, model_pdf, **kwargs): #Try including ifar threshold '''Based on injection strategy and the desired astro model estimate the injected volume. Scale injections and estimate sensitive volume. Parameters ---------- injections: dictionary Dictionary obtained after reading injections from read_injections mchirp_sampler: function Sampler for producing chirp mass samples for the astro model. model_pdf: function The PDF for astro model in mass1-mass2-spin1z-spin2z space. This is easily extendible to include precession kwargs: key words Inputs for thresholds and astrophysical models Returns ------- injection_chunks: dictionary The input dictionary with VT and VT error included with the injections ''' thr_var = kwargs.get('thr_var') thr_val = kwargs.get('thr_val') nsamples = 1000000 #Used to calculate injected astro volume injections = copy.deepcopy(injections) min_z, max_z = injections['z_range'] V = quad(contracted_dVdc, 0., max_z)[0] z_astro = astro_redshifts(min_z, max_z, nsamples) astro_lum_dist = cosmo.luminosity_distance(z_astro).value mch_astro = np.array(mchirp_sampler(nsamples = nsamples, **kwargs)) mch_astro_det = mch_astro * (1. + z_astro) idx_within = np.zeros(nsamples) for key in injections.keys(): if key == 'z_range': # This is repeated down again and is so continue mchirp = injections[key]['chirp_mass'] min_mchirp, max_mchirp = min(mchirp), max(mchirp) distance = injections[key]['distance'] if injections[key]['d_dist'] == 'uniform': d_min, d_max = min(distance), max(distance) elif injections[key]['d_dist'] == 'dchirp': d_fid_min = min(distance / (mchirp/_mch_BNS)**(5/6.)) d_fid_max = max(distance / (mchirp/_mch_BNS)**(5/6.)) d_min = d_fid_min * (mch_astro_det/_mch_BNS)**(5/6.) d_max = d_fid_max * (mch_astro_det/_mch_BNS)**(5/6.) bound = np.sign((max_mchirp-mch_astro_det)*(mch_astro_det-min_mchirp)) bound += np.sign((d_max - astro_lum_dist)*(astro_lum_dist - d_min)) idx = np.where(bound == 2) idx_within[idx] = 1 inj_V0 = 4*np.pi*V*len(idx_within[idx_within == 1])/float(nsamples) injections['inj_astro_vol'] = inj_V0 # Estimate the sensitive volume z_range = injections['z_range'] V_min = quad(contracted_dVdc, 0., z_range[0])[0] V_max = quad(contracted_dVdc, 0., z_range[1])[0] thr_falloff, i_inj, i_det, i_det_sq = [], 0, 0, 0 gps_min, gps_max = 1e15, 0 keys = injections.keys() for key in keys: if key == 'z_range' or key == 'inj_astro_vol': continue data = injections[key] distance = data['distance'] mass1, mass2 = data['mass1'], data['mass2'] spin1z, spin2z = data['spin1z'], data['spin2z'] mchirp = data['chirp_mass'] gps_min = min(gps_min, min(data['end_time'])) gps_max = max(gps_max, max(data['end_time'])) z_inj = dlum_to_z(distance) m1_sc, m2_sc = mass1/(1 + z_inj), mass2/(1 + z_inj) p_out = model_pdf(m1_sc, m2_sc, spin1z, spin2z) p_out *= pdf_z_astro(z_inj, V_min, V_max) p_in = 0 J = cosmo.luminosity_distance(z_inj + 0.0005).value J -= cosmo.luminosity_distance(z_inj - 0.0005).value J = abs(J)/0.001 # A quick way to get dD_l/dz # Sum probability of injections from j-th set for all the strategies for key2 in keys: if key2 == 'z_range' or key2 == 'inj_astro_vol': continue dt_j = injections[key2] dist_j = dt_j['distance'] m1_j, m2_j = dt_j['mass1'], dt_j['mass2'] s1x_2, s2x_2 = dt_j['spin1x'], dt_j['spin2x'] s1y_2, s2y_2 = dt_j['spin1y'], dt_j['spin2y'] s1z_2, s2z_2 = dt_j['spin1z'], dt_j['spin2z'] s1 = np.sqrt(s1x_2**2 + s1y_2**2 + s1z_2**2) s2 = np.sqrt(s2x_2**2 + s2y_2**2 + s2z_2**2) mch_j = dt_j['chirp_mass'] #Get probability density for injections in mass-distance space if dt_j['m_dist'] == 'totalMass': lomass, himass = min(min(m1_j), min(m2_j), max(max(m1_j), max(m2_j))) lomass_2, himass_2 = lomass, himass elif dt_j['m_dist'] == 'componentMass' or dt_j['m_dist'] == 'log': lomass, himass = min(m1_j), max(m1_j) lomass_2, himass_2 = min(m2_j), max(m2_j) if dt_j['d_dist'] == 'dchirp': l_dist = min(dist_j / (mch_j/_mch_BNS)**(5/6.)) h_dist = max(dist_j / (mch_j/_mch_BNS)**(5/6.)) elif dt_j['d_dist'] == 'uniform': l_dist, h_dist = min(dist_j), max(dist_j) mdist = dt_j['m_dist'] prob_mass = inj_mass_pdf(mdist, mass1, mass2, lomass, himass, lomass_2, himass_2) ddist = dt_j['d_dist'] prob_dist = inj_distance_pdf(ddist, distance, l_dist, h_dist, mchirp) hspin1, hspin2 = max(s1), max(s2) prob_spin = inj_spin_pdf(dt_j['s_dist'], hspin1, spin1z) prob_spin *= inj_spin_pdf(dt_j['s_dist'], hspin2, spin2z) p_in += prob_mass * prob_dist * prob_spin * J * (1 + z_inj)**2 p_in[p_in == 0] = 1e12 p_out_in = p_out/p_in i_inj += np.sum(p_out_in) i_det += np.sum((p_out_in)[data[thr_var] > thr_val]) i_det_sq += np.sum((p_out_in)[data[thr_var] > thr_val]**2) idx_thr = np.where(data[thr_var] > thr_val) thrs = data[thr_var][idx_thr] ratios = p_out_in[idx_thr]/max(p_out_in[idx_thr]) rndn = np.random.uniform(0, 1, len(ratios)) idx_ratio = np.where(ratios > rndn) thr_falloff.append(thrs[idx_ratio]) inj_V0 = injections['inj_astro_vol'] injections['ninj'] = i_inj injections['ndet'] = i_det injections['ndetsq'] = i_det_sq injections['VT'] = ((inj_V0*i_det/i_inj) * (gps_max - gps_min)/31557600) injections['VT_err'] = injections['VT'] * np.sqrt(i_det_sq)/i_det injections['thr_falloff'] = np.hstack(np.array(thr_falloff).flat) return injections
python
def estimate_vt(injections, mchirp_sampler, model_pdf, **kwargs): #Try including ifar threshold '''Based on injection strategy and the desired astro model estimate the injected volume. Scale injections and estimate sensitive volume. Parameters ---------- injections: dictionary Dictionary obtained after reading injections from read_injections mchirp_sampler: function Sampler for producing chirp mass samples for the astro model. model_pdf: function The PDF for astro model in mass1-mass2-spin1z-spin2z space. This is easily extendible to include precession kwargs: key words Inputs for thresholds and astrophysical models Returns ------- injection_chunks: dictionary The input dictionary with VT and VT error included with the injections ''' thr_var = kwargs.get('thr_var') thr_val = kwargs.get('thr_val') nsamples = 1000000 #Used to calculate injected astro volume injections = copy.deepcopy(injections) min_z, max_z = injections['z_range'] V = quad(contracted_dVdc, 0., max_z)[0] z_astro = astro_redshifts(min_z, max_z, nsamples) astro_lum_dist = cosmo.luminosity_distance(z_astro).value mch_astro = np.array(mchirp_sampler(nsamples = nsamples, **kwargs)) mch_astro_det = mch_astro * (1. + z_astro) idx_within = np.zeros(nsamples) for key in injections.keys(): if key == 'z_range': # This is repeated down again and is so continue mchirp = injections[key]['chirp_mass'] min_mchirp, max_mchirp = min(mchirp), max(mchirp) distance = injections[key]['distance'] if injections[key]['d_dist'] == 'uniform': d_min, d_max = min(distance), max(distance) elif injections[key]['d_dist'] == 'dchirp': d_fid_min = min(distance / (mchirp/_mch_BNS)**(5/6.)) d_fid_max = max(distance / (mchirp/_mch_BNS)**(5/6.)) d_min = d_fid_min * (mch_astro_det/_mch_BNS)**(5/6.) d_max = d_fid_max * (mch_astro_det/_mch_BNS)**(5/6.) bound = np.sign((max_mchirp-mch_astro_det)*(mch_astro_det-min_mchirp)) bound += np.sign((d_max - astro_lum_dist)*(astro_lum_dist - d_min)) idx = np.where(bound == 2) idx_within[idx] = 1 inj_V0 = 4*np.pi*V*len(idx_within[idx_within == 1])/float(nsamples) injections['inj_astro_vol'] = inj_V0 # Estimate the sensitive volume z_range = injections['z_range'] V_min = quad(contracted_dVdc, 0., z_range[0])[0] V_max = quad(contracted_dVdc, 0., z_range[1])[0] thr_falloff, i_inj, i_det, i_det_sq = [], 0, 0, 0 gps_min, gps_max = 1e15, 0 keys = injections.keys() for key in keys: if key == 'z_range' or key == 'inj_astro_vol': continue data = injections[key] distance = data['distance'] mass1, mass2 = data['mass1'], data['mass2'] spin1z, spin2z = data['spin1z'], data['spin2z'] mchirp = data['chirp_mass'] gps_min = min(gps_min, min(data['end_time'])) gps_max = max(gps_max, max(data['end_time'])) z_inj = dlum_to_z(distance) m1_sc, m2_sc = mass1/(1 + z_inj), mass2/(1 + z_inj) p_out = model_pdf(m1_sc, m2_sc, spin1z, spin2z) p_out *= pdf_z_astro(z_inj, V_min, V_max) p_in = 0 J = cosmo.luminosity_distance(z_inj + 0.0005).value J -= cosmo.luminosity_distance(z_inj - 0.0005).value J = abs(J)/0.001 # A quick way to get dD_l/dz # Sum probability of injections from j-th set for all the strategies for key2 in keys: if key2 == 'z_range' or key2 == 'inj_astro_vol': continue dt_j = injections[key2] dist_j = dt_j['distance'] m1_j, m2_j = dt_j['mass1'], dt_j['mass2'] s1x_2, s2x_2 = dt_j['spin1x'], dt_j['spin2x'] s1y_2, s2y_2 = dt_j['spin1y'], dt_j['spin2y'] s1z_2, s2z_2 = dt_j['spin1z'], dt_j['spin2z'] s1 = np.sqrt(s1x_2**2 + s1y_2**2 + s1z_2**2) s2 = np.sqrt(s2x_2**2 + s2y_2**2 + s2z_2**2) mch_j = dt_j['chirp_mass'] #Get probability density for injections in mass-distance space if dt_j['m_dist'] == 'totalMass': lomass, himass = min(min(m1_j), min(m2_j), max(max(m1_j), max(m2_j))) lomass_2, himass_2 = lomass, himass elif dt_j['m_dist'] == 'componentMass' or dt_j['m_dist'] == 'log': lomass, himass = min(m1_j), max(m1_j) lomass_2, himass_2 = min(m2_j), max(m2_j) if dt_j['d_dist'] == 'dchirp': l_dist = min(dist_j / (mch_j/_mch_BNS)**(5/6.)) h_dist = max(dist_j / (mch_j/_mch_BNS)**(5/6.)) elif dt_j['d_dist'] == 'uniform': l_dist, h_dist = min(dist_j), max(dist_j) mdist = dt_j['m_dist'] prob_mass = inj_mass_pdf(mdist, mass1, mass2, lomass, himass, lomass_2, himass_2) ddist = dt_j['d_dist'] prob_dist = inj_distance_pdf(ddist, distance, l_dist, h_dist, mchirp) hspin1, hspin2 = max(s1), max(s2) prob_spin = inj_spin_pdf(dt_j['s_dist'], hspin1, spin1z) prob_spin *= inj_spin_pdf(dt_j['s_dist'], hspin2, spin2z) p_in += prob_mass * prob_dist * prob_spin * J * (1 + z_inj)**2 p_in[p_in == 0] = 1e12 p_out_in = p_out/p_in i_inj += np.sum(p_out_in) i_det += np.sum((p_out_in)[data[thr_var] > thr_val]) i_det_sq += np.sum((p_out_in)[data[thr_var] > thr_val]**2) idx_thr = np.where(data[thr_var] > thr_val) thrs = data[thr_var][idx_thr] ratios = p_out_in[idx_thr]/max(p_out_in[idx_thr]) rndn = np.random.uniform(0, 1, len(ratios)) idx_ratio = np.where(ratios > rndn) thr_falloff.append(thrs[idx_ratio]) inj_V0 = injections['inj_astro_vol'] injections['ninj'] = i_inj injections['ndet'] = i_det injections['ndetsq'] = i_det_sq injections['VT'] = ((inj_V0*i_det/i_inj) * (gps_max - gps_min)/31557600) injections['VT_err'] = injections['VT'] * np.sqrt(i_det_sq)/i_det injections['thr_falloff'] = np.hstack(np.array(thr_falloff).flat) return injections
['def', 'estimate_vt', '(', 'injections', ',', 'mchirp_sampler', ',', 'model_pdf', ',', '*', '*', 'kwargs', ')', ':', '#Try including ifar threshold', 'thr_var', '=', 'kwargs', '.', 'get', '(', "'thr_var'", ')', 'thr_val', '=', 'kwargs', '.', 'get', '(', "'thr_val'", ')', 'nsamples', '=', '1000000', '#Used to calculate injected astro volume', 'injections', '=', 'copy', '.', 'deepcopy', '(', 'injections', ')', 'min_z', ',', 'max_z', '=', 'injections', '[', "'z_range'", ']', 'V', '=', 'quad', '(', 'contracted_dVdc', ',', '0.', ',', 'max_z', ')', '[', '0', ']', 'z_astro', '=', 'astro_redshifts', '(', 'min_z', ',', 'max_z', ',', 'nsamples', ')', 'astro_lum_dist', '=', 'cosmo', '.', 'luminosity_distance', '(', 'z_astro', ')', '.', 'value', 'mch_astro', '=', 'np', '.', 'array', '(', 'mchirp_sampler', '(', 'nsamples', '=', 'nsamples', ',', '*', '*', 'kwargs', ')', ')', 'mch_astro_det', '=', 'mch_astro', '*', '(', '1.', '+', 'z_astro', ')', 'idx_within', '=', 'np', '.', 'zeros', '(', 'nsamples', ')', 'for', 'key', 'in', 'injections', '.', 'keys', '(', ')', ':', 'if', 'key', '==', "'z_range'", ':', '# This is repeated down again and is so', 'continue', 'mchirp', '=', 'injections', '[', 'key', ']', '[', "'chirp_mass'", ']', 'min_mchirp', ',', 'max_mchirp', '=', 'min', '(', 'mchirp', ')', ',', 'max', '(', 'mchirp', ')', 'distance', '=', 'injections', '[', 'key', ']', '[', "'distance'", ']', 'if', 'injections', '[', 'key', ']', '[', "'d_dist'", ']', '==', "'uniform'", ':', 'd_min', ',', 'd_max', '=', 'min', '(', 'distance', ')', ',', 'max', '(', 'distance', ')', 'elif', 'injections', '[', 'key', ']', '[', "'d_dist'", ']', '==', "'dchirp'", ':', 'd_fid_min', '=', 'min', '(', 'distance', '/', '(', 'mchirp', '/', '_mch_BNS', ')', '**', '(', '5', '/', '6.', ')', ')', 'd_fid_max', '=', 'max', '(', 'distance', '/', '(', 'mchirp', '/', '_mch_BNS', ')', '**', '(', '5', '/', '6.', ')', ')', 'd_min', '=', 'd_fid_min', '*', '(', 'mch_astro_det', '/', '_mch_BNS', ')', '**', '(', '5', '/', '6.', ')', 'd_max', '=', 'd_fid_max', '*', '(', 'mch_astro_det', '/', '_mch_BNS', ')', '**', '(', '5', '/', '6.', ')', 'bound', '=', 'np', '.', 'sign', '(', '(', 'max_mchirp', '-', 'mch_astro_det', ')', '*', '(', 'mch_astro_det', '-', 'min_mchirp', ')', ')', 'bound', '+=', 'np', '.', 'sign', '(', '(', 'd_max', '-', 'astro_lum_dist', ')', '*', '(', 'astro_lum_dist', '-', 'd_min', ')', ')', 'idx', '=', 'np', '.', 'where', '(', 'bound', '==', '2', ')', 'idx_within', '[', 'idx', ']', '=', '1', 'inj_V0', '=', '4', '*', 'np', '.', 'pi', '*', 'V', '*', 'len', '(', 'idx_within', '[', 'idx_within', '==', '1', ']', ')', '/', 'float', '(', 'nsamples', ')', 'injections', '[', "'inj_astro_vol'", ']', '=', 'inj_V0', '# Estimate the sensitive volume', 'z_range', '=', 'injections', '[', "'z_range'", ']', 'V_min', '=', 'quad', '(', 'contracted_dVdc', ',', '0.', ',', 'z_range', '[', '0', ']', ')', '[', '0', ']', 'V_max', '=', 'quad', '(', 'contracted_dVdc', ',', '0.', ',', 'z_range', '[', '1', ']', ')', '[', '0', ']', 'thr_falloff', ',', 'i_inj', ',', 'i_det', ',', 'i_det_sq', '=', '[', ']', ',', '0', ',', '0', ',', '0', 'gps_min', ',', 'gps_max', '=', '1e15', ',', '0', 'keys', '=', 'injections', '.', 'keys', '(', ')', 'for', 'key', 'in', 'keys', ':', 'if', 'key', '==', "'z_range'", 'or', 'key', '==', "'inj_astro_vol'", ':', 'continue', 'data', '=', 'injections', '[', 'key', ']', 'distance', '=', 'data', '[', "'distance'", ']', 'mass1', ',', 'mass2', '=', 'data', '[', "'mass1'", ']', ',', 'data', '[', "'mass2'", ']', 'spin1z', ',', 'spin2z', '=', 'data', '[', "'spin1z'", ']', ',', 'data', '[', "'spin2z'", ']', 'mchirp', '=', 'data', '[', "'chirp_mass'", ']', 'gps_min', '=', 'min', '(', 'gps_min', ',', 'min', '(', 'data', '[', "'end_time'", ']', ')', ')', 'gps_max', '=', 'max', '(', 'gps_max', ',', 'max', '(', 'data', '[', "'end_time'", ']', ')', ')', 'z_inj', '=', 'dlum_to_z', '(', 'distance', ')', 'm1_sc', ',', 'm2_sc', '=', 'mass1', '/', '(', '1', '+', 'z_inj', ')', ',', 'mass2', '/', '(', '1', '+', 'z_inj', ')', 'p_out', '=', 'model_pdf', '(', 'm1_sc', ',', 'm2_sc', ',', 'spin1z', ',', 'spin2z', ')', 'p_out', '*=', 'pdf_z_astro', '(', 'z_inj', ',', 'V_min', ',', 'V_max', ')', 'p_in', '=', '0', 'J', '=', 'cosmo', '.', 'luminosity_distance', '(', 'z_inj', '+', '0.0005', ')', '.', 'value', 'J', '-=', 'cosmo', '.', 'luminosity_distance', '(', 'z_inj', '-', '0.0005', ')', '.', 'value', 'J', '=', 'abs', '(', 'J', ')', '/', '0.001', '# A quick way to get dD_l/dz', '# Sum probability of injections from j-th set for all the strategies', 'for', 'key2', 'in', 'keys', ':', 'if', 'key2', '==', "'z_range'", 'or', 'key2', '==', "'inj_astro_vol'", ':', 'continue', 'dt_j', '=', 'injections', '[', 'key2', ']', 'dist_j', '=', 'dt_j', '[', "'distance'", ']', 'm1_j', ',', 'm2_j', '=', 'dt_j', '[', "'mass1'", ']', ',', 'dt_j', '[', "'mass2'", ']', 's1x_2', ',', 's2x_2', '=', 'dt_j', '[', "'spin1x'", ']', ',', 'dt_j', '[', "'spin2x'", ']', 's1y_2', ',', 's2y_2', '=', 'dt_j', '[', "'spin1y'", ']', ',', 'dt_j', '[', "'spin2y'", ']', 's1z_2', ',', 's2z_2', '=', 'dt_j', '[', "'spin1z'", ']', ',', 'dt_j', '[', "'spin2z'", ']', 's1', '=', 'np', '.', 'sqrt', '(', 's1x_2', '**', '2', '+', 's1y_2', '**', '2', '+', 's1z_2', '**', '2', ')', 's2', '=', 'np', '.', 'sqrt', '(', 's2x_2', '**', '2', '+', 's2y_2', '**', '2', '+', 's2z_2', '**', '2', ')', 'mch_j', '=', 'dt_j', '[', "'chirp_mass'", ']', '#Get probability density for injections in mass-distance space', 'if', 'dt_j', '[', "'m_dist'", ']', '==', "'totalMass'", ':', 'lomass', ',', 'himass', '=', 'min', '(', 'min', '(', 'm1_j', ')', ',', 'min', '(', 'm2_j', ')', ',', 'max', '(', 'max', '(', 'm1_j', ')', ',', 'max', '(', 'm2_j', ')', ')', ')', 'lomass_2', ',', 'himass_2', '=', 'lomass', ',', 'himass', 'elif', 'dt_j', '[', "'m_dist'", ']', '==', "'componentMass'", 'or', 'dt_j', '[', "'m_dist'", ']', '==', "'log'", ':', 'lomass', ',', 'himass', '=', 'min', '(', 'm1_j', ')', ',', 'max', '(', 'm1_j', ')', 'lomass_2', ',', 'himass_2', '=', 'min', '(', 'm2_j', ')', ',', 'max', '(', 'm2_j', ')', 'if', 'dt_j', '[', "'d_dist'", ']', '==', "'dchirp'", ':', 'l_dist', '=', 'min', '(', 'dist_j', '/', '(', 'mch_j', '/', '_mch_BNS', ')', '**', '(', '5', '/', '6.', ')', ')', 'h_dist', '=', 'max', '(', 'dist_j', '/', '(', 'mch_j', '/', '_mch_BNS', ')', '**', '(', '5', '/', '6.', ')', ')', 'elif', 'dt_j', '[', "'d_dist'", ']', '==', "'uniform'", ':', 'l_dist', ',', 'h_dist', '=', 'min', '(', 'dist_j', ')', ',', 'max', '(', 'dist_j', ')', 'mdist', '=', 'dt_j', '[', "'m_dist'", ']', 'prob_mass', '=', 'inj_mass_pdf', '(', 'mdist', ',', 'mass1', ',', 'mass2', ',', 'lomass', ',', 'himass', ',', 'lomass_2', ',', 'himass_2', ')', 'ddist', '=', 'dt_j', '[', "'d_dist'", ']', 'prob_dist', '=', 'inj_distance_pdf', '(', 'ddist', ',', 'distance', ',', 'l_dist', ',', 'h_dist', ',', 'mchirp', ')', 'hspin1', ',', 'hspin2', '=', 'max', '(', 's1', ')', ',', 'max', '(', 's2', ')', 'prob_spin', '=', 'inj_spin_pdf', '(', 'dt_j', '[', "'s_dist'", ']', ',', 'hspin1', ',', 'spin1z', ')', 'prob_spin', '*=', 'inj_spin_pdf', '(', 'dt_j', '[', "'s_dist'", ']', ',', 'hspin2', ',', 'spin2z', ')', 'p_in', '+=', 'prob_mass', '*', 'prob_dist', '*', 'prob_spin', '*', 'J', '*', '(', '1', '+', 'z_inj', ')', '**', '2', 'p_in', '[', 'p_in', '==', '0', ']', '=', '1e12', 'p_out_in', '=', 'p_out', '/', 'p_in', 'i_inj', '+=', 'np', '.', 'sum', '(', 'p_out_in', ')', 'i_det', '+=', 'np', '.', 'sum', '(', '(', 'p_out_in', ')', '[', 'data', '[', 'thr_var', ']', '>', 'thr_val', ']', ')', 'i_det_sq', '+=', 'np', '.', 'sum', '(', '(', 'p_out_in', ')', '[', 'data', '[', 'thr_var', ']', '>', 'thr_val', ']', '**', '2', ')', 'idx_thr', '=', 'np', '.', 'where', '(', 'data', '[', 'thr_var', ']', '>', 'thr_val', ')', 'thrs', '=', 'data', '[', 'thr_var', ']', '[', 'idx_thr', ']', 'ratios', '=', 'p_out_in', '[', 'idx_thr', ']', '/', 'max', '(', 'p_out_in', '[', 'idx_thr', ']', ')', 'rndn', '=', 'np', '.', 'random', '.', 'uniform', '(', '0', ',', '1', ',', 'len', '(', 'ratios', ')', ')', 'idx_ratio', '=', 'np', '.', 'where', '(', 'ratios', '>', 'rndn', ')', 'thr_falloff', '.', 'append', '(', 'thrs', '[', 'idx_ratio', ']', ')', 'inj_V0', '=', 'injections', '[', "'inj_astro_vol'", ']', 'injections', '[', "'ninj'", ']', '=', 'i_inj', 'injections', '[', "'ndet'", ']', '=', 'i_det', 'injections', '[', "'ndetsq'", ']', '=', 'i_det_sq', 'injections', '[', "'VT'", ']', '=', '(', '(', 'inj_V0', '*', 'i_det', '/', 'i_inj', ')', '*', '(', 'gps_max', '-', 'gps_min', ')', '/', '31557600', ')', 'injections', '[', "'VT_err'", ']', '=', 'injections', '[', "'VT'", ']', '*', 'np', '.', 'sqrt', '(', 'i_det_sq', ')', '/', 'i_det', 'injections', '[', "'thr_falloff'", ']', '=', 'np', '.', 'hstack', '(', 'np', '.', 'array', '(', 'thr_falloff', ')', '.', 'flat', ')', 'return', 'injections']
Based on injection strategy and the desired astro model estimate the injected volume. Scale injections and estimate sensitive volume. Parameters ---------- injections: dictionary Dictionary obtained after reading injections from read_injections mchirp_sampler: function Sampler for producing chirp mass samples for the astro model. model_pdf: function The PDF for astro model in mass1-mass2-spin1z-spin2z space. This is easily extendible to include precession kwargs: key words Inputs for thresholds and astrophysical models Returns ------- injection_chunks: dictionary The input dictionary with VT and VT error included with the injections
['Based', 'on', 'injection', 'strategy', 'and', 'the', 'desired', 'astro', 'model', 'estimate', 'the', 'injected', 'volume', '.', 'Scale', 'injections', 'and', 'estimate', 'sensitive', 'volume', '.']
train
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/population/scale_injections.py#L73-L236
8,440
learningequality/iceqube
src/iceqube/storage/backends/inmem.py
StorageBackend.set_sqlite_pragmas
def set_sqlite_pragmas(self): """ Sets the connection PRAGMAs for the sqlalchemy engine stored in self.engine. It currently sets: - journal_mode to WAL :return: None """ def _pragmas_on_connect(dbapi_con, con_record): dbapi_con.execute("PRAGMA journal_mode = WAL;") event.listen(self.engine, "connect", _pragmas_on_connect)
python
def set_sqlite_pragmas(self): """ Sets the connection PRAGMAs for the sqlalchemy engine stored in self.engine. It currently sets: - journal_mode to WAL :return: None """ def _pragmas_on_connect(dbapi_con, con_record): dbapi_con.execute("PRAGMA journal_mode = WAL;") event.listen(self.engine, "connect", _pragmas_on_connect)
['def', 'set_sqlite_pragmas', '(', 'self', ')', ':', 'def', '_pragmas_on_connect', '(', 'dbapi_con', ',', 'con_record', ')', ':', 'dbapi_con', '.', 'execute', '(', '"PRAGMA journal_mode = WAL;"', ')', 'event', '.', 'listen', '(', 'self', '.', 'engine', ',', '"connect"', ',', '_pragmas_on_connect', ')']
Sets the connection PRAGMAs for the sqlalchemy engine stored in self.engine. It currently sets: - journal_mode to WAL :return: None
['Sets', 'the', 'connection', 'PRAGMAs', 'for', 'the', 'sqlalchemy', 'engine', 'stored', 'in', 'self', '.', 'engine', '.']
train
https://github.com/learningequality/iceqube/blob/97ac9e0f65bfedb0efa9f94638bcb57c7926dea2/src/iceqube/storage/backends/inmem.py#L78-L91
8,441
jpablo128/simplystatic
simplystatic/s2site.py
Site._set_directories
def _set_directories(self): '''Initialize variables based on evidence about the directories.''' if self._dirs['initial'] == None: self._dirs['base'] = discover_base_dir(self._dirs['run']) else: self._dirs['base'] = discover_base_dir(self._dirs['initial']) # now, if 'base' is None (no base directory was found) then the only # allowed operation is init self._update_dirs_on_base() # we might have set the directory variables fine, but the tree # might not exist yet. _tree_ready is a flag for that. self._tree_ready = verify_dir_structure(self._dirs['base']) if self._tree_ready: self._read_site_config()
python
def _set_directories(self): '''Initialize variables based on evidence about the directories.''' if self._dirs['initial'] == None: self._dirs['base'] = discover_base_dir(self._dirs['run']) else: self._dirs['base'] = discover_base_dir(self._dirs['initial']) # now, if 'base' is None (no base directory was found) then the only # allowed operation is init self._update_dirs_on_base() # we might have set the directory variables fine, but the tree # might not exist yet. _tree_ready is a flag for that. self._tree_ready = verify_dir_structure(self._dirs['base']) if self._tree_ready: self._read_site_config()
['def', '_set_directories', '(', 'self', ')', ':', 'if', 'self', '.', '_dirs', '[', "'initial'", ']', '==', 'None', ':', 'self', '.', '_dirs', '[', "'base'", ']', '=', 'discover_base_dir', '(', 'self', '.', '_dirs', '[', "'run'", ']', ')', 'else', ':', 'self', '.', '_dirs', '[', "'base'", ']', '=', 'discover_base_dir', '(', 'self', '.', '_dirs', '[', "'initial'", ']', ')', "# now, if 'base' is None (no base directory was found) then the only", '# allowed operation is init ', 'self', '.', '_update_dirs_on_base', '(', ')', '# we might have set the directory variables fine, but the tree', '# might not exist yet. _tree_ready is a flag for that.', 'self', '.', '_tree_ready', '=', 'verify_dir_structure', '(', 'self', '.', '_dirs', '[', "'base'", ']', ')', 'if', 'self', '.', '_tree_ready', ':', 'self', '.', '_read_site_config', '(', ')']
Initialize variables based on evidence about the directories.
['Initialize', 'variables', 'based', 'on', 'evidence', 'about', 'the', 'directories', '.']
train
https://github.com/jpablo128/simplystatic/blob/91ac579c8f34fa240bef9b87adb0116c6b40b24d/simplystatic/s2site.py#L207-L220
8,442
6809/MC6809
MC6809/components/mc6809_ops_logic.py
OpsLogicalMixin.LSL
def LSL(self, a): """ Shifts all bits of accumulator A or B or memory location M one place to the left. Bit zero is loaded with a zero. Bit seven of accumulator A or B or memory location M is shifted into the C (carry) bit. This is a duplicate assembly-language mnemonic for the single machine instruction ASL. source code forms: LSL Q; LSLA; LSLB CC bits "HNZVC": naaas """ r = a << 1 self.clear_NZVC() self.update_NZVC_8(a, a, r) return r
python
def LSL(self, a): """ Shifts all bits of accumulator A or B or memory location M one place to the left. Bit zero is loaded with a zero. Bit seven of accumulator A or B or memory location M is shifted into the C (carry) bit. This is a duplicate assembly-language mnemonic for the single machine instruction ASL. source code forms: LSL Q; LSLA; LSLB CC bits "HNZVC": naaas """ r = a << 1 self.clear_NZVC() self.update_NZVC_8(a, a, r) return r
['def', 'LSL', '(', 'self', ',', 'a', ')', ':', 'r', '=', 'a', '<<', '1', 'self', '.', 'clear_NZVC', '(', ')', 'self', '.', 'update_NZVC_8', '(', 'a', ',', 'a', ',', 'r', ')', 'return', 'r']
Shifts all bits of accumulator A or B or memory location M one place to the left. Bit zero is loaded with a zero. Bit seven of accumulator A or B or memory location M is shifted into the C (carry) bit. This is a duplicate assembly-language mnemonic for the single machine instruction ASL. source code forms: LSL Q; LSLA; LSLB CC bits "HNZVC": naaas
['Shifts', 'all', 'bits', 'of', 'accumulator', 'A', 'or', 'B', 'or', 'memory', 'location', 'M', 'one', 'place', 'to', 'the', 'left', '.', 'Bit', 'zero', 'is', 'loaded', 'with', 'a', 'zero', '.', 'Bit', 'seven', 'of', 'accumulator', 'A', 'or', 'B', 'or', 'memory', 'location', 'M', 'is', 'shifted', 'into', 'the', 'C', '(', 'carry', ')', 'bit', '.']
train
https://github.com/6809/MC6809/blob/6ba2f5106df46689017b5d0b6d84d43b7ee6a240/MC6809/components/mc6809_ops_logic.py#L153-L169
8,443
NoneGG/aredis
aredis/commands/sorted_set.py
SortedSetCommandMixin.zrange
async def zrange(self, name, start, end, desc=False, withscores=False, score_cast_func=float): """ Return a range of values from sorted set ``name`` between ``start`` and ``end`` sorted in ascending order. ``start`` and ``end`` can be negative, indicating the end of the range. ``desc`` a boolean indicating whether to sort the results descendingly ``withscores`` indicates to return the scores along with the values. The return type is a list of (value, score) pairs ``score_cast_func`` a callable used to cast the score return value """ if desc: return await self.zrevrange(name, start, end, withscores, score_cast_func) pieces = ['ZRANGE', name, start, end] if withscores: pieces.append(b('WITHSCORES')) options = { 'withscores': withscores, 'score_cast_func': score_cast_func } return await self.execute_command(*pieces, **options)
python
async def zrange(self, name, start, end, desc=False, withscores=False, score_cast_func=float): """ Return a range of values from sorted set ``name`` between ``start`` and ``end`` sorted in ascending order. ``start`` and ``end`` can be negative, indicating the end of the range. ``desc`` a boolean indicating whether to sort the results descendingly ``withscores`` indicates to return the scores along with the values. The return type is a list of (value, score) pairs ``score_cast_func`` a callable used to cast the score return value """ if desc: return await self.zrevrange(name, start, end, withscores, score_cast_func) pieces = ['ZRANGE', name, start, end] if withscores: pieces.append(b('WITHSCORES')) options = { 'withscores': withscores, 'score_cast_func': score_cast_func } return await self.execute_command(*pieces, **options)
['async', 'def', 'zrange', '(', 'self', ',', 'name', ',', 'start', ',', 'end', ',', 'desc', '=', 'False', ',', 'withscores', '=', 'False', ',', 'score_cast_func', '=', 'float', ')', ':', 'if', 'desc', ':', 'return', 'await', 'self', '.', 'zrevrange', '(', 'name', ',', 'start', ',', 'end', ',', 'withscores', ',', 'score_cast_func', ')', 'pieces', '=', '[', "'ZRANGE'", ',', 'name', ',', 'start', ',', 'end', ']', 'if', 'withscores', ':', 'pieces', '.', 'append', '(', 'b', '(', "'WITHSCORES'", ')', ')', 'options', '=', '{', "'withscores'", ':', 'withscores', ',', "'score_cast_func'", ':', 'score_cast_func', '}', 'return', 'await', 'self', '.', 'execute_command', '(', '*', 'pieces', ',', '*', '*', 'options', ')']
Return a range of values from sorted set ``name`` between ``start`` and ``end`` sorted in ascending order. ``start`` and ``end`` can be negative, indicating the end of the range. ``desc`` a boolean indicating whether to sort the results descendingly ``withscores`` indicates to return the scores along with the values. The return type is a list of (value, score) pairs ``score_cast_func`` a callable used to cast the score return value
['Return', 'a', 'range', 'of', 'values', 'from', 'sorted', 'set', 'name', 'between', 'start', 'and', 'end', 'sorted', 'in', 'ascending', 'order', '.']
train
https://github.com/NoneGG/aredis/blob/204caad740ac13e5760d46444a2ba7632982a046/aredis/commands/sorted_set.py#L142-L167
8,444
aarongarrett/inspyred
inspyred/ec/replacers.py
plus_replacement
def plus_replacement(random, population, parents, offspring, args): """Performs "plus" replacement. This function performs "plus" replacement, which means that the entire existing population is replaced by the best population-many elements from the combined set of parents and offspring. .. Arguments: random -- the random number generator object population -- the population of individuals parents -- the list of parent individuals offspring -- the list of offspring individuals args -- a dictionary of keyword arguments """ pool = list(offspring) pool.extend(parents) pool.sort(reverse=True) survivors = pool[:len(population)] return survivors
python
def plus_replacement(random, population, parents, offspring, args): """Performs "plus" replacement. This function performs "plus" replacement, which means that the entire existing population is replaced by the best population-many elements from the combined set of parents and offspring. .. Arguments: random -- the random number generator object population -- the population of individuals parents -- the list of parent individuals offspring -- the list of offspring individuals args -- a dictionary of keyword arguments """ pool = list(offspring) pool.extend(parents) pool.sort(reverse=True) survivors = pool[:len(population)] return survivors
['def', 'plus_replacement', '(', 'random', ',', 'population', ',', 'parents', ',', 'offspring', ',', 'args', ')', ':', 'pool', '=', 'list', '(', 'offspring', ')', 'pool', '.', 'extend', '(', 'parents', ')', 'pool', '.', 'sort', '(', 'reverse', '=', 'True', ')', 'survivors', '=', 'pool', '[', ':', 'len', '(', 'population', ')', ']', 'return', 'survivors']
Performs "plus" replacement. This function performs "plus" replacement, which means that the entire existing population is replaced by the best population-many elements from the combined set of parents and offspring. .. Arguments: random -- the random number generator object population -- the population of individuals parents -- the list of parent individuals offspring -- the list of offspring individuals args -- a dictionary of keyword arguments
['Performs', 'plus', 'replacement', '.', 'This', 'function', 'performs', 'plus', 'replacement', 'which', 'means', 'that', 'the', 'entire', 'existing', 'population', 'is', 'replaced', 'by', 'the', 'best', 'population', '-', 'many', 'elements', 'from', 'the', 'combined', 'set', 'of', 'parents', 'and', 'offspring', '.', '..', 'Arguments', ':', 'random', '--', 'the', 'random', 'number', 'generator', 'object', 'population', '--', 'the', 'population', 'of', 'individuals', 'parents', '--', 'the', 'list', 'of', 'parent', 'individuals', 'offspring', '--', 'the', 'list', 'of', 'offspring', 'individuals', 'args', '--', 'a', 'dictionary', 'of', 'keyword', 'arguments']
train
https://github.com/aarongarrett/inspyred/blob/d5976ab503cc9d51c6f586cbb7bb601a38c01128/inspyred/ec/replacers.py#L171-L191
8,445
mitsei/dlkit
dlkit/aws_adapter/repository/sessions.py
AssetCompositionDesignSession.remove_asset
def remove_asset(self, asset_id, composition_id): """Removes an ``Asset`` from a ``Composition``. arg: asset_id (osid.id.Id): ``Id`` of the ``Asset`` arg: composition_id (osid.id.Id): ``Id`` of the ``Composition`` raise: NotFound - ``asset_id`` ``not found in composition_id`` raise: NullArgument - ``asset_id`` or ``composition_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization fauilure *compliance: mandatory -- This method must be implemented.* """ self._provider_session.remove_asset(self, asset_id, composition_id)
python
def remove_asset(self, asset_id, composition_id): """Removes an ``Asset`` from a ``Composition``. arg: asset_id (osid.id.Id): ``Id`` of the ``Asset`` arg: composition_id (osid.id.Id): ``Id`` of the ``Composition`` raise: NotFound - ``asset_id`` ``not found in composition_id`` raise: NullArgument - ``asset_id`` or ``composition_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization fauilure *compliance: mandatory -- This method must be implemented.* """ self._provider_session.remove_asset(self, asset_id, composition_id)
['def', 'remove_asset', '(', 'self', ',', 'asset_id', ',', 'composition_id', ')', ':', 'self', '.', '_provider_session', '.', 'remove_asset', '(', 'self', ',', 'asset_id', ',', 'composition_id', ')']
Removes an ``Asset`` from a ``Composition``. arg: asset_id (osid.id.Id): ``Id`` of the ``Asset`` arg: composition_id (osid.id.Id): ``Id`` of the ``Composition`` raise: NotFound - ``asset_id`` ``not found in composition_id`` raise: NullArgument - ``asset_id`` or ``composition_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization fauilure *compliance: mandatory -- This method must be implemented.*
['Removes', 'an', 'Asset', 'from', 'a', 'Composition', '.']
train
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/aws_adapter/repository/sessions.py#L2116-L2130
8,446
JarryShaw/PyPCAPKit
src/protocols/internet/internet.py
Internet._read_protos
def _read_protos(self, size): """Read next layer protocol type. Positional arguments: * size -- int, buffer size Returns: * str -- next layer's protocol name """ _byte = self._read_unpack(size) _prot = TP_PROTO.get(_byte) return _prot
python
def _read_protos(self, size): """Read next layer protocol type. Positional arguments: * size -- int, buffer size Returns: * str -- next layer's protocol name """ _byte = self._read_unpack(size) _prot = TP_PROTO.get(_byte) return _prot
['def', '_read_protos', '(', 'self', ',', 'size', ')', ':', '_byte', '=', 'self', '.', '_read_unpack', '(', 'size', ')', '_prot', '=', 'TP_PROTO', '.', 'get', '(', '_byte', ')', 'return', '_prot']
Read next layer protocol type. Positional arguments: * size -- int, buffer size Returns: * str -- next layer's protocol name
['Read', 'next', 'layer', 'protocol', 'type', '.']
train
https://github.com/JarryShaw/PyPCAPKit/blob/c7f0da9aebc2cf210bf8f8b912f7d3cbb98ca10e/src/protocols/internet/internet.py#L63-L75
8,447
AustralianSynchrotron/lightflow
lightflow/models/parameters.py
Parameters.consolidate
def consolidate(self, args): """ Consolidate the provided arguments. If the provided arguments have matching options, this performs a type conversion. For any option that has a default value and is not present in the provided arguments, the default value is added. Args: args (dict): A dictionary of the provided arguments. Returns: dict: A dictionary with the type converted and with default options enriched arguments. """ result = dict(args) for opt in self: if opt.name in result: result[opt.name] = opt.convert(result[opt.name]) else: if opt.default is not None: result[opt.name] = opt.convert(opt.default) return result
python
def consolidate(self, args): """ Consolidate the provided arguments. If the provided arguments have matching options, this performs a type conversion. For any option that has a default value and is not present in the provided arguments, the default value is added. Args: args (dict): A dictionary of the provided arguments. Returns: dict: A dictionary with the type converted and with default options enriched arguments. """ result = dict(args) for opt in self: if opt.name in result: result[opt.name] = opt.convert(result[opt.name]) else: if opt.default is not None: result[opt.name] = opt.convert(opt.default) return result
['def', 'consolidate', '(', 'self', ',', 'args', ')', ':', 'result', '=', 'dict', '(', 'args', ')', 'for', 'opt', 'in', 'self', ':', 'if', 'opt', '.', 'name', 'in', 'result', ':', 'result', '[', 'opt', '.', 'name', ']', '=', 'opt', '.', 'convert', '(', 'result', '[', 'opt', '.', 'name', ']', ')', 'else', ':', 'if', 'opt', '.', 'default', 'is', 'not', 'None', ':', 'result', '[', 'opt', '.', 'name', ']', '=', 'opt', '.', 'convert', '(', 'opt', '.', 'default', ')', 'return', 'result']
Consolidate the provided arguments. If the provided arguments have matching options, this performs a type conversion. For any option that has a default value and is not present in the provided arguments, the default value is added. Args: args (dict): A dictionary of the provided arguments. Returns: dict: A dictionary with the type converted and with default options enriched arguments.
['Consolidate', 'the', 'provided', 'arguments', '.']
train
https://github.com/AustralianSynchrotron/lightflow/blob/dc53dbc1d961e20fb144273baca258060705c03e/lightflow/models/parameters.py#L116-L139
8,448
IBMStreams/pypi.streamsx
streamsx/scripts/extract.py
_Extractor._copy_globalization_resources
def _copy_globalization_resources(self): '''Copy the language resource files for python api functions This function copies the TopologySplpy Resource files from Topology toolkit directory into the impl/nl folder of the project. Returns: the list with the copied locale strings''' rootDir = os.path.join(_topology_tk_dir(), "impl", "nl") languageList = [] for dirName in os.listdir(rootDir): srcDir = os.path.join(_topology_tk_dir(), "impl", "nl", dirName) if (os.path.isdir(srcDir)) and (dirName != "include"): dstDir = os.path.join(self._tk_dir, "impl", "nl", dirName) try: print("Copy globalization resources " + dirName) os.makedirs(dstDir) except OSError as e: if (e.errno == 17) and (os.path.isdir(dstDir)): if self._cmd_args.verbose: print("Directory", dstDir, "exists") else: raise srcFile = os.path.join(srcDir, "TopologySplpyResource.xlf") if os.path.isfile(srcFile): res = shutil.copy2(srcFile, dstDir) languageList.append(dirName) if self._cmd_args.verbose: print("Written: " + res) return languageList
python
def _copy_globalization_resources(self): '''Copy the language resource files for python api functions This function copies the TopologySplpy Resource files from Topology toolkit directory into the impl/nl folder of the project. Returns: the list with the copied locale strings''' rootDir = os.path.join(_topology_tk_dir(), "impl", "nl") languageList = [] for dirName in os.listdir(rootDir): srcDir = os.path.join(_topology_tk_dir(), "impl", "nl", dirName) if (os.path.isdir(srcDir)) and (dirName != "include"): dstDir = os.path.join(self._tk_dir, "impl", "nl", dirName) try: print("Copy globalization resources " + dirName) os.makedirs(dstDir) except OSError as e: if (e.errno == 17) and (os.path.isdir(dstDir)): if self._cmd_args.verbose: print("Directory", dstDir, "exists") else: raise srcFile = os.path.join(srcDir, "TopologySplpyResource.xlf") if os.path.isfile(srcFile): res = shutil.copy2(srcFile, dstDir) languageList.append(dirName) if self._cmd_args.verbose: print("Written: " + res) return languageList
['def', '_copy_globalization_resources', '(', 'self', ')', ':', 'rootDir', '=', 'os', '.', 'path', '.', 'join', '(', '_topology_tk_dir', '(', ')', ',', '"impl"', ',', '"nl"', ')', 'languageList', '=', '[', ']', 'for', 'dirName', 'in', 'os', '.', 'listdir', '(', 'rootDir', ')', ':', 'srcDir', '=', 'os', '.', 'path', '.', 'join', '(', '_topology_tk_dir', '(', ')', ',', '"impl"', ',', '"nl"', ',', 'dirName', ')', 'if', '(', 'os', '.', 'path', '.', 'isdir', '(', 'srcDir', ')', ')', 'and', '(', 'dirName', '!=', '"include"', ')', ':', 'dstDir', '=', 'os', '.', 'path', '.', 'join', '(', 'self', '.', '_tk_dir', ',', '"impl"', ',', '"nl"', ',', 'dirName', ')', 'try', ':', 'print', '(', '"Copy globalization resources "', '+', 'dirName', ')', 'os', '.', 'makedirs', '(', 'dstDir', ')', 'except', 'OSError', 'as', 'e', ':', 'if', '(', 'e', '.', 'errno', '==', '17', ')', 'and', '(', 'os', '.', 'path', '.', 'isdir', '(', 'dstDir', ')', ')', ':', 'if', 'self', '.', '_cmd_args', '.', 'verbose', ':', 'print', '(', '"Directory"', ',', 'dstDir', ',', '"exists"', ')', 'else', ':', 'raise', 'srcFile', '=', 'os', '.', 'path', '.', 'join', '(', 'srcDir', ',', '"TopologySplpyResource.xlf"', ')', 'if', 'os', '.', 'path', '.', 'isfile', '(', 'srcFile', ')', ':', 'res', '=', 'shutil', '.', 'copy2', '(', 'srcFile', ',', 'dstDir', ')', 'languageList', '.', 'append', '(', 'dirName', ')', 'if', 'self', '.', '_cmd_args', '.', 'verbose', ':', 'print', '(', '"Written: "', '+', 'res', ')', 'return', 'languageList']
Copy the language resource files for python api functions This function copies the TopologySplpy Resource files from Topology toolkit directory into the impl/nl folder of the project. Returns: the list with the copied locale strings
['Copy', 'the', 'language', 'resource', 'files', 'for', 'python', 'api', 'functions', 'This', 'function', 'copies', 'the', 'TopologySplpy', 'Resource', 'files', 'from', 'Topology', 'toolkit', 'directory', 'into', 'the', 'impl', '/', 'nl', 'folder', 'of', 'the', 'project', '.', 'Returns', ':', 'the', 'list', 'with', 'the', 'copied', 'locale', 'strings']
train
https://github.com/IBMStreams/pypi.streamsx/blob/abd67b4757120f6f805787fba390f53e9df9cdd8/streamsx/scripts/extract.py#L219-L246
8,449
coghost/izen
izen/chaos.py
Chaos.encrypt
def encrypt(self, plain, algorithm='md5'): """ 简单封装系统加密 :param plain: 待加密内容 :type plain: ``str/bytes`` :param algorithm: 加密算法 :type algorithm: str :return: :rtype: """ plain = helper.to_bytes(plain) return getattr(hashlib, algorithm)(plain).hexdigest()
python
def encrypt(self, plain, algorithm='md5'): """ 简单封装系统加密 :param plain: 待加密内容 :type plain: ``str/bytes`` :param algorithm: 加密算法 :type algorithm: str :return: :rtype: """ plain = helper.to_bytes(plain) return getattr(hashlib, algorithm)(plain).hexdigest()
['def', 'encrypt', '(', 'self', ',', 'plain', ',', 'algorithm', '=', "'md5'", ')', ':', 'plain', '=', 'helper', '.', 'to_bytes', '(', 'plain', ')', 'return', 'getattr', '(', 'hashlib', ',', 'algorithm', ')', '(', 'plain', ')', '.', 'hexdigest', '(', ')']
简单封装系统加密 :param plain: 待加密内容 :type plain: ``str/bytes`` :param algorithm: 加密算法 :type algorithm: str :return: :rtype:
['简单封装系统加密']
train
https://github.com/coghost/izen/blob/432db017f99dd2ba809e1ba1792145ab6510263d/izen/chaos.py#L48-L59
8,450
twilio/twilio-python
twilio/rest/autopilot/v1/assistant/task/field.py
FieldInstance._proxy
def _proxy(self): """ Generate an instance context for the instance, the context is capable of performing various actions. All instance actions are proxied to the context :returns: FieldContext for this FieldInstance :rtype: twilio.rest.autopilot.v1.assistant.task.field.FieldContext """ if self._context is None: self._context = FieldContext( self._version, assistant_sid=self._solution['assistant_sid'], task_sid=self._solution['task_sid'], sid=self._solution['sid'], ) return self._context
python
def _proxy(self): """ Generate an instance context for the instance, the context is capable of performing various actions. All instance actions are proxied to the context :returns: FieldContext for this FieldInstance :rtype: twilio.rest.autopilot.v1.assistant.task.field.FieldContext """ if self._context is None: self._context = FieldContext( self._version, assistant_sid=self._solution['assistant_sid'], task_sid=self._solution['task_sid'], sid=self._solution['sid'], ) return self._context
['def', '_proxy', '(', 'self', ')', ':', 'if', 'self', '.', '_context', 'is', 'None', ':', 'self', '.', '_context', '=', 'FieldContext', '(', 'self', '.', '_version', ',', 'assistant_sid', '=', 'self', '.', '_solution', '[', "'assistant_sid'", ']', ',', 'task_sid', '=', 'self', '.', '_solution', '[', "'task_sid'", ']', ',', 'sid', '=', 'self', '.', '_solution', '[', "'sid'", ']', ',', ')', 'return', 'self', '.', '_context']
Generate an instance context for the instance, the context is capable of performing various actions. All instance actions are proxied to the context :returns: FieldContext for this FieldInstance :rtype: twilio.rest.autopilot.v1.assistant.task.field.FieldContext
['Generate', 'an', 'instance', 'context', 'for', 'the', 'instance', 'the', 'context', 'is', 'capable', 'of', 'performing', 'various', 'actions', '.', 'All', 'instance', 'actions', 'are', 'proxied', 'to', 'the', 'context']
train
https://github.com/twilio/twilio-python/blob/c867895f55dcc29f522e6e8b8868d0d18483132f/twilio/rest/autopilot/v1/assistant/task/field.py#L337-L352
8,451
bokeh/bokeh
bokeh/layouts.py
_chunks
def _chunks(l, ncols): """Yield successive n-sized chunks from list, l.""" assert isinstance(ncols, int), "ncols must be an integer" for i in range(0, len(l), ncols): yield l[i: i+ncols]
python
def _chunks(l, ncols): """Yield successive n-sized chunks from list, l.""" assert isinstance(ncols, int), "ncols must be an integer" for i in range(0, len(l), ncols): yield l[i: i+ncols]
['def', '_chunks', '(', 'l', ',', 'ncols', ')', ':', 'assert', 'isinstance', '(', 'ncols', ',', 'int', ')', ',', '"ncols must be an integer"', 'for', 'i', 'in', 'range', '(', '0', ',', 'len', '(', 'l', ')', ',', 'ncols', ')', ':', 'yield', 'l', '[', 'i', ':', 'i', '+', 'ncols', ']']
Yield successive n-sized chunks from list, l.
['Yield', 'successive', 'n', '-', 'sized', 'chunks', 'from', 'list', 'l', '.']
train
https://github.com/bokeh/bokeh/blob/dc8cf49e4e4302fd38537ad089ece81fbcca4737/bokeh/layouts.py#L613-L617
8,452
mojaie/chorus
chorus/v2000reader.py
mols_from_text
def mols_from_text(text, no_halt=True, assign_descriptors=True): """Returns molecules generated from sdfile text Throws: StopIteration: if the text does not have molecule ValueError: if Unsupported symbol is found """ if isinstance(text, bytes): t = tx.decode(text) else: t = text # Lazy line splitter. More efficient memory usage than str.split. exp = re.compile(r"[^\n]*\n|.") sp = (x.group(0) for x in re.finditer(exp, t)) for c in mol_supplier(sp, no_halt, assign_descriptors): yield c
python
def mols_from_text(text, no_halt=True, assign_descriptors=True): """Returns molecules generated from sdfile text Throws: StopIteration: if the text does not have molecule ValueError: if Unsupported symbol is found """ if isinstance(text, bytes): t = tx.decode(text) else: t = text # Lazy line splitter. More efficient memory usage than str.split. exp = re.compile(r"[^\n]*\n|.") sp = (x.group(0) for x in re.finditer(exp, t)) for c in mol_supplier(sp, no_halt, assign_descriptors): yield c
['def', 'mols_from_text', '(', 'text', ',', 'no_halt', '=', 'True', ',', 'assign_descriptors', '=', 'True', ')', ':', 'if', 'isinstance', '(', 'text', ',', 'bytes', ')', ':', 't', '=', 'tx', '.', 'decode', '(', 'text', ')', 'else', ':', 't', '=', 'text', '# Lazy line splitter. More efficient memory usage than str.split.', 'exp', '=', 're', '.', 'compile', '(', 'r"[^\\n]*\\n|."', ')', 'sp', '=', '(', 'x', '.', 'group', '(', '0', ')', 'for', 'x', 'in', 're', '.', 'finditer', '(', 'exp', ',', 't', ')', ')', 'for', 'c', 'in', 'mol_supplier', '(', 'sp', ',', 'no_halt', ',', 'assign_descriptors', ')', ':', 'yield', 'c']
Returns molecules generated from sdfile text Throws: StopIteration: if the text does not have molecule ValueError: if Unsupported symbol is found
['Returns', 'molecules', 'generated', 'from', 'sdfile', 'text']
train
https://github.com/mojaie/chorus/blob/fc7fe23a0272554c67671645ab07830b315eeb1b/chorus/v2000reader.py#L256-L271
8,453
suds-community/suds
suds/sax/date.py
_date_from_match
def _date_from_match(match_object): """ Create a date object from a regular expression match. The regular expression match is expected to be from _RE_DATE or _RE_DATETIME. @param match_object: The regular expression match. @type match_object: B{re}.I{MatchObject} @return: A date object. @rtype: B{datetime}.I{date} """ year = int(match_object.group("year")) month = int(match_object.group("month")) day = int(match_object.group("day")) return datetime.date(year, month, day)
python
def _date_from_match(match_object): """ Create a date object from a regular expression match. The regular expression match is expected to be from _RE_DATE or _RE_DATETIME. @param match_object: The regular expression match. @type match_object: B{re}.I{MatchObject} @return: A date object. @rtype: B{datetime}.I{date} """ year = int(match_object.group("year")) month = int(match_object.group("month")) day = int(match_object.group("day")) return datetime.date(year, month, day)
['def', '_date_from_match', '(', 'match_object', ')', ':', 'year', '=', 'int', '(', 'match_object', '.', 'group', '(', '"year"', ')', ')', 'month', '=', 'int', '(', 'match_object', '.', 'group', '(', '"month"', ')', ')', 'day', '=', 'int', '(', 'match_object', '.', 'group', '(', '"day"', ')', ')', 'return', 'datetime', '.', 'date', '(', 'year', ',', 'month', ',', 'day', ')']
Create a date object from a regular expression match. The regular expression match is expected to be from _RE_DATE or _RE_DATETIME. @param match_object: The regular expression match. @type match_object: B{re}.I{MatchObject} @return: A date object. @rtype: B{datetime}.I{date}
['Create', 'a', 'date', 'object', 'from', 'a', 'regular', 'expression', 'match', '.']
train
https://github.com/suds-community/suds/blob/6fb0a829337b5037a66c20aae6f89b41acd77e40/suds/sax/date.py#L373-L389
8,454
O365/python-o365
O365/calendar.py
Schedule.new_calendar
def new_calendar(self, calendar_name): """ Creates a new calendar :param str calendar_name: name of the new calendar :return: a new Calendar instance :rtype: Calendar """ if not calendar_name: return None url = self.build_url(self._endpoints.get('root_calendars')) response = self.con.post(url, data={self._cc('name'): calendar_name}) if not response: return None data = response.json() # Everything received from cloud must be passed as self._cloud_data_key return self.calendar_constructor(parent=self, **{self._cloud_data_key: data})
python
def new_calendar(self, calendar_name): """ Creates a new calendar :param str calendar_name: name of the new calendar :return: a new Calendar instance :rtype: Calendar """ if not calendar_name: return None url = self.build_url(self._endpoints.get('root_calendars')) response = self.con.post(url, data={self._cc('name'): calendar_name}) if not response: return None data = response.json() # Everything received from cloud must be passed as self._cloud_data_key return self.calendar_constructor(parent=self, **{self._cloud_data_key: data})
['def', 'new_calendar', '(', 'self', ',', 'calendar_name', ')', ':', 'if', 'not', 'calendar_name', ':', 'return', 'None', 'url', '=', 'self', '.', 'build_url', '(', 'self', '.', '_endpoints', '.', 'get', '(', "'root_calendars'", ')', ')', 'response', '=', 'self', '.', 'con', '.', 'post', '(', 'url', ',', 'data', '=', '{', 'self', '.', '_cc', '(', "'name'", ')', ':', 'calendar_name', '}', ')', 'if', 'not', 'response', ':', 'return', 'None', 'data', '=', 'response', '.', 'json', '(', ')', '# Everything received from cloud must be passed as self._cloud_data_key', 'return', 'self', '.', 'calendar_constructor', '(', 'parent', '=', 'self', ',', '*', '*', '{', 'self', '.', '_cloud_data_key', ':', 'data', '}', ')']
Creates a new calendar :param str calendar_name: name of the new calendar :return: a new Calendar instance :rtype: Calendar
['Creates', 'a', 'new', 'calendar']
train
https://github.com/O365/python-o365/blob/02a71cf3775cc6a3c042e003365d6a07c8c75a73/O365/calendar.py#L1791-L1811
8,455
llazzaro/analyzerstrategies
analyzerstrategies/zscorePortfolioStrategy.py
ZscorePortfolioStrategy.__setUpTrakers
def __setUpTrakers(self): ''' set symbols ''' for symbol in self.symbols: self.__trakers[symbol]=OneTraker(symbol, self, self.buyingRatio)
python
def __setUpTrakers(self): ''' set symbols ''' for symbol in self.symbols: self.__trakers[symbol]=OneTraker(symbol, self, self.buyingRatio)
['def', '__setUpTrakers', '(', 'self', ')', ':', 'for', 'symbol', 'in', 'self', '.', 'symbols', ':', 'self', '.', '__trakers', '[', 'symbol', ']', '=', 'OneTraker', '(', 'symbol', ',', 'self', ',', 'self', '.', 'buyingRatio', ')']
set symbols
['set', 'symbols']
train
https://github.com/llazzaro/analyzerstrategies/blob/3c647802f582bf2f06c6793f282bee0d26514cd6/analyzerstrategies/zscorePortfolioStrategy.py#L35-L38
8,456
synw/chartjspy
chartjspy/__init__.py
Chart._format_list
def _format_list(self, data): """ Format a list to use in javascript """ dataset = "[" i = 0 for el in data: if pd.isnull(el): dataset += "null" else: dtype = type(data[i]) if dtype == int or dtype == float: dataset += str(el) else: dataset += '"' + el + '"' if i < len(data) - 1: dataset += ', ' dataset += "]" return dataset
python
def _format_list(self, data): """ Format a list to use in javascript """ dataset = "[" i = 0 for el in data: if pd.isnull(el): dataset += "null" else: dtype = type(data[i]) if dtype == int or dtype == float: dataset += str(el) else: dataset += '"' + el + '"' if i < len(data) - 1: dataset += ', ' dataset += "]" return dataset
['def', '_format_list', '(', 'self', ',', 'data', ')', ':', 'dataset', '=', '"["', 'i', '=', '0', 'for', 'el', 'in', 'data', ':', 'if', 'pd', '.', 'isnull', '(', 'el', ')', ':', 'dataset', '+=', '"null"', 'else', ':', 'dtype', '=', 'type', '(', 'data', '[', 'i', ']', ')', 'if', 'dtype', '==', 'int', 'or', 'dtype', '==', 'float', ':', 'dataset', '+=', 'str', '(', 'el', ')', 'else', ':', 'dataset', '+=', '\'"\'', '+', 'el', '+', '\'"\'', 'if', 'i', '<', 'len', '(', 'data', ')', '-', '1', ':', 'dataset', '+=', "', '", 'dataset', '+=', '"]"', 'return', 'dataset']
Format a list to use in javascript
['Format', 'a', 'list', 'to', 'use', 'in', 'javascript']
train
https://github.com/synw/chartjspy/blob/f215e36142d47b044fb59a07f95a4ff996d2b158/chartjspy/__init__.py#L92-L110
8,457
TrafficSenseMSD/SumoTools
traci/__init__.py
removeStepListener
def removeStepListener(listener): """removeStepListener(traci.StepListener) -> bool Remove the step listener from traci's step listener container. Returns True if the listener was removed successfully, False if it wasn't registered. """ if listener in _stepListeners: _stepListeners.remove(listener) return True warnings.warn( "removeStepListener(listener): listener %s not registered as step listener" % str(listener)) return False
python
def removeStepListener(listener): """removeStepListener(traci.StepListener) -> bool Remove the step listener from traci's step listener container. Returns True if the listener was removed successfully, False if it wasn't registered. """ if listener in _stepListeners: _stepListeners.remove(listener) return True warnings.warn( "removeStepListener(listener): listener %s not registered as step listener" % str(listener)) return False
['def', 'removeStepListener', '(', 'listener', ')', ':', 'if', 'listener', 'in', '_stepListeners', ':', '_stepListeners', '.', 'remove', '(', 'listener', ')', 'return', 'True', 'warnings', '.', 'warn', '(', '"removeStepListener(listener): listener %s not registered as step listener"', '%', 'str', '(', 'listener', ')', ')', 'return', 'False']
removeStepListener(traci.StepListener) -> bool Remove the step listener from traci's step listener container. Returns True if the listener was removed successfully, False if it wasn't registered.
['removeStepListener', '(', 'traci', '.', 'StepListener', ')', '-', '>', 'bool']
train
https://github.com/TrafficSenseMSD/SumoTools/blob/8607b4f885f1d1798e43240be643efe6dccccdaa/traci/__init__.py#L142-L153
8,458
VisualOps/cli
visualops/utils/db.py
app_update_state
def app_update_state(app_id,state): """ update app state """ try: create_at = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') conn = get_conn() c = conn.cursor() c.execute("UPDATE app SET state='{0}',change_at='{1}' WHERE id='{2}'".format(state, create_at, app_id)) conn.commit() conn.close() print 'UPDATE app %s state to %s succeed!' % (app_id,state) except Exception, e: raise RuntimeError( 'update app %s state to %s failed! %s' % (app_id,state,e) )
python
def app_update_state(app_id,state): """ update app state """ try: create_at = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') conn = get_conn() c = conn.cursor() c.execute("UPDATE app SET state='{0}',change_at='{1}' WHERE id='{2}'".format(state, create_at, app_id)) conn.commit() conn.close() print 'UPDATE app %s state to %s succeed!' % (app_id,state) except Exception, e: raise RuntimeError( 'update app %s state to %s failed! %s' % (app_id,state,e) )
['def', 'app_update_state', '(', 'app_id', ',', 'state', ')', ':', 'try', ':', 'create_at', '=', 'datetime', '.', 'datetime', '.', 'now', '(', ')', '.', 'strftime', '(', "'%Y-%m-%d %H:%M:%S'", ')', 'conn', '=', 'get_conn', '(', ')', 'c', '=', 'conn', '.', 'cursor', '(', ')', 'c', '.', 'execute', '(', '"UPDATE app SET state=\'{0}\',change_at=\'{1}\' WHERE id=\'{2}\'"', '.', 'format', '(', 'state', ',', 'create_at', ',', 'app_id', ')', ')', 'conn', '.', 'commit', '(', ')', 'conn', '.', 'close', '(', ')', 'print', "'UPDATE app %s state to %s succeed!'", '%', '(', 'app_id', ',', 'state', ')', 'except', 'Exception', ',', 'e', ':', 'raise', 'RuntimeError', '(', "'update app %s state to %s failed! %s'", '%', '(', 'app_id', ',', 'state', ',', 'e', ')', ')']
update app state
['update', 'app', 'state']
train
https://github.com/VisualOps/cli/blob/e9ee9a804df0de3cce54be4c623528fd658838dc/visualops/utils/db.py#L58-L71
8,459
openego/eTraGo
etrago/cluster/snapshot.py
update_data_frames
def update_data_frames(network, cluster_weights, dates, hours): """ Updates the snapshots, snapshots weights and the dataframes based on the original data in the network and the medoids created by clustering these original data. Parameters ----------- network : pyPSA network object cluster_weights: dictionary dates: Datetimeindex Returns ------- network """ network.snapshot_weightings = network.snapshot_weightings.loc[dates] network.snapshots = network.snapshot_weightings.index # set new snapshot weights from cluster_weights snapshot_weightings = [] for i in cluster_weights.values(): x = 0 while x < hours: snapshot_weightings.append(i) x += 1 for i in range(len(network.snapshot_weightings)): network.snapshot_weightings[i] = snapshot_weightings[i] # put the snapshot in the right order network.snapshots.sort_values() network.snapshot_weightings.sort_index() return network
python
def update_data_frames(network, cluster_weights, dates, hours): """ Updates the snapshots, snapshots weights and the dataframes based on the original data in the network and the medoids created by clustering these original data. Parameters ----------- network : pyPSA network object cluster_weights: dictionary dates: Datetimeindex Returns ------- network """ network.snapshot_weightings = network.snapshot_weightings.loc[dates] network.snapshots = network.snapshot_weightings.index # set new snapshot weights from cluster_weights snapshot_weightings = [] for i in cluster_weights.values(): x = 0 while x < hours: snapshot_weightings.append(i) x += 1 for i in range(len(network.snapshot_weightings)): network.snapshot_weightings[i] = snapshot_weightings[i] # put the snapshot in the right order network.snapshots.sort_values() network.snapshot_weightings.sort_index() return network
['def', 'update_data_frames', '(', 'network', ',', 'cluster_weights', ',', 'dates', ',', 'hours', ')', ':', 'network', '.', 'snapshot_weightings', '=', 'network', '.', 'snapshot_weightings', '.', 'loc', '[', 'dates', ']', 'network', '.', 'snapshots', '=', 'network', '.', 'snapshot_weightings', '.', 'index', '# set new snapshot weights from cluster_weights', 'snapshot_weightings', '=', '[', ']', 'for', 'i', 'in', 'cluster_weights', '.', 'values', '(', ')', ':', 'x', '=', '0', 'while', 'x', '<', 'hours', ':', 'snapshot_weightings', '.', 'append', '(', 'i', ')', 'x', '+=', '1', 'for', 'i', 'in', 'range', '(', 'len', '(', 'network', '.', 'snapshot_weightings', ')', ')', ':', 'network', '.', 'snapshot_weightings', '[', 'i', ']', '=', 'snapshot_weightings', '[', 'i', ']', '# put the snapshot in the right order', 'network', '.', 'snapshots', '.', 'sort_values', '(', ')', 'network', '.', 'snapshot_weightings', '.', 'sort_index', '(', ')', 'return', 'network']
Updates the snapshots, snapshots weights and the dataframes based on the original data in the network and the medoids created by clustering these original data. Parameters ----------- network : pyPSA network object cluster_weights: dictionary dates: Datetimeindex Returns ------- network
['Updates', 'the', 'snapshots', 'snapshots', 'weights', 'and', 'the', 'dataframes', 'based', 'on', 'the', 'original', 'data', 'in', 'the', 'network', 'and', 'the', 'medoids', 'created', 'by', 'clustering', 'these', 'original', 'data', '.']
train
https://github.com/openego/eTraGo/blob/2a8fc6d4368d0e9abe6fe0d0c39baf66ea0126b9/etrago/cluster/snapshot.py#L145-L179
8,460
dwkim78/upsilon
upsilon/predict/predict.py
predict
def predict(rf_model, features): """ Return label and probability estimated. Parameters ---------- rf_model : sklearn.ensemble.RandomForestClassifier The UPSILoN random forests model. features : array_like A list of features estimated by UPSILoN. Returns ------- label : str A predicted label (i.e. class). probability : float Class probability. flag : int Classification flag. """ import numpy as np from upsilon.extract_features.feature_set import get_feature_set feature_set = get_feature_set() # Grab only necessary features. cols = [feature for feature in features if feature in feature_set] cols = sorted(cols) filtered_features = [] for i in range(len(cols)): filtered_features.append(features[cols[i]]) filtered_features = np.array(filtered_features).reshape(1, -1) # Classify. classes = rf_model.classes_ # Note that we're classifying a single source, so [0] need tobe added. probabilities = rf_model.predict_proba(filtered_features)[0] # Classification flag. flag = 0 if features['period_SNR'] < 20. or is_period_alias(features['period']): flag = 1 # Return class, probability, and flag. max_index = np.where(probabilities == np.max(probabilities)) return classes[max_index][0], probabilities[max_index][0], flag
python
def predict(rf_model, features): """ Return label and probability estimated. Parameters ---------- rf_model : sklearn.ensemble.RandomForestClassifier The UPSILoN random forests model. features : array_like A list of features estimated by UPSILoN. Returns ------- label : str A predicted label (i.e. class). probability : float Class probability. flag : int Classification flag. """ import numpy as np from upsilon.extract_features.feature_set import get_feature_set feature_set = get_feature_set() # Grab only necessary features. cols = [feature for feature in features if feature in feature_set] cols = sorted(cols) filtered_features = [] for i in range(len(cols)): filtered_features.append(features[cols[i]]) filtered_features = np.array(filtered_features).reshape(1, -1) # Classify. classes = rf_model.classes_ # Note that we're classifying a single source, so [0] need tobe added. probabilities = rf_model.predict_proba(filtered_features)[0] # Classification flag. flag = 0 if features['period_SNR'] < 20. or is_period_alias(features['period']): flag = 1 # Return class, probability, and flag. max_index = np.where(probabilities == np.max(probabilities)) return classes[max_index][0], probabilities[max_index][0], flag
['def', 'predict', '(', 'rf_model', ',', 'features', ')', ':', 'import', 'numpy', 'as', 'np', 'from', 'upsilon', '.', 'extract_features', '.', 'feature_set', 'import', 'get_feature_set', 'feature_set', '=', 'get_feature_set', '(', ')', '# Grab only necessary features.', 'cols', '=', '[', 'feature', 'for', 'feature', 'in', 'features', 'if', 'feature', 'in', 'feature_set', ']', 'cols', '=', 'sorted', '(', 'cols', ')', 'filtered_features', '=', '[', ']', 'for', 'i', 'in', 'range', '(', 'len', '(', 'cols', ')', ')', ':', 'filtered_features', '.', 'append', '(', 'features', '[', 'cols', '[', 'i', ']', ']', ')', 'filtered_features', '=', 'np', '.', 'array', '(', 'filtered_features', ')', '.', 'reshape', '(', '1', ',', '-', '1', ')', '# Classify.', 'classes', '=', 'rf_model', '.', 'classes_', "# Note that we're classifying a single source, so [0] need tobe added.", 'probabilities', '=', 'rf_model', '.', 'predict_proba', '(', 'filtered_features', ')', '[', '0', ']', '# Classification flag.', 'flag', '=', '0', 'if', 'features', '[', "'period_SNR'", ']', '<', '20.', 'or', 'is_period_alias', '(', 'features', '[', "'period'", ']', ')', ':', 'flag', '=', '1', '# Return class, probability, and flag.', 'max_index', '=', 'np', '.', 'where', '(', 'probabilities', '==', 'np', '.', 'max', '(', 'probabilities', ')', ')', 'return', 'classes', '[', 'max_index', ']', '[', '0', ']', ',', 'probabilities', '[', 'max_index', ']', '[', '0', ']', ',', 'flag']
Return label and probability estimated. Parameters ---------- rf_model : sklearn.ensemble.RandomForestClassifier The UPSILoN random forests model. features : array_like A list of features estimated by UPSILoN. Returns ------- label : str A predicted label (i.e. class). probability : float Class probability. flag : int Classification flag.
['Return', 'label', 'and', 'probability', 'estimated', '.']
train
https://github.com/dwkim78/upsilon/blob/5f381453f26582ef56e62fb8fed7317ce67861af/upsilon/predict/predict.py#L4-L50
8,461
Chilipp/psyplot
psyplot/project.py
DataArrayPlotter._add_data
def _add_data(self, plotter_cls, *args, **kwargs): """ Visualize this data array Parameters ---------- %(Plotter.parameters.no_data)s Returns ------- psyplot.plotter.Plotter The plotter that visualizes the data """ # this method is just a shortcut to the :meth:`Project._add_data` # method but is reimplemented by subclasses as the # :class:`DatasetPlotter` or the :class:`DataArrayPlotter` return plotter_cls(self._da, *args, **kwargs)
python
def _add_data(self, plotter_cls, *args, **kwargs): """ Visualize this data array Parameters ---------- %(Plotter.parameters.no_data)s Returns ------- psyplot.plotter.Plotter The plotter that visualizes the data """ # this method is just a shortcut to the :meth:`Project._add_data` # method but is reimplemented by subclasses as the # :class:`DatasetPlotter` or the :class:`DataArrayPlotter` return plotter_cls(self._da, *args, **kwargs)
['def', '_add_data', '(', 'self', ',', 'plotter_cls', ',', '*', 'args', ',', '*', '*', 'kwargs', ')', ':', '# this method is just a shortcut to the :meth:`Project._add_data`', '# method but is reimplemented by subclasses as the', '# :class:`DatasetPlotter` or the :class:`DataArrayPlotter`', 'return', 'plotter_cls', '(', 'self', '.', '_da', ',', '*', 'args', ',', '*', '*', 'kwargs', ')']
Visualize this data array Parameters ---------- %(Plotter.parameters.no_data)s Returns ------- psyplot.plotter.Plotter The plotter that visualizes the data
['Visualize', 'this', 'data', 'array']
train
https://github.com/Chilipp/psyplot/blob/75a0a15a9a1dd018e79d2df270d56c4bf5f311d5/psyplot/project.py#L2074-L2090
8,462
sibirrer/lenstronomy
lenstronomy/ImSim/MultiBand/multi_exposures.py
MultiExposures._array2image_list
def _array2image_list(self, array): """ maps 1d vector of joint exposures in list of 2d images of single exposures :param array: 1d numpy array :return: list of 2d numpy arrays of size of exposures """ image_list = [] k = 0 for i in range(self._num_bands): if self._compute_bool[i] is True: num_data = self.num_response_list[i] array_i = array[k:k + num_data] image_i = self._imageModel_list[i].ImageNumerics.array2image(array_i) image_list.append(image_i) k += num_data return image_list
python
def _array2image_list(self, array): """ maps 1d vector of joint exposures in list of 2d images of single exposures :param array: 1d numpy array :return: list of 2d numpy arrays of size of exposures """ image_list = [] k = 0 for i in range(self._num_bands): if self._compute_bool[i] is True: num_data = self.num_response_list[i] array_i = array[k:k + num_data] image_i = self._imageModel_list[i].ImageNumerics.array2image(array_i) image_list.append(image_i) k += num_data return image_list
['def', '_array2image_list', '(', 'self', ',', 'array', ')', ':', 'image_list', '=', '[', ']', 'k', '=', '0', 'for', 'i', 'in', 'range', '(', 'self', '.', '_num_bands', ')', ':', 'if', 'self', '.', '_compute_bool', '[', 'i', ']', 'is', 'True', ':', 'num_data', '=', 'self', '.', 'num_response_list', '[', 'i', ']', 'array_i', '=', 'array', '[', 'k', ':', 'k', '+', 'num_data', ']', 'image_i', '=', 'self', '.', '_imageModel_list', '[', 'i', ']', '.', 'ImageNumerics', '.', 'array2image', '(', 'array_i', ')', 'image_list', '.', 'append', '(', 'image_i', ')', 'k', '+=', 'num_data', 'return', 'image_list']
maps 1d vector of joint exposures in list of 2d images of single exposures :param array: 1d numpy array :return: list of 2d numpy arrays of size of exposures
['maps', '1d', 'vector', 'of', 'joint', 'exposures', 'in', 'list', 'of', '2d', 'images', 'of', 'single', 'exposures']
train
https://github.com/sibirrer/lenstronomy/blob/4edb100a4f3f4fdc4fac9b0032d2b0283d0aa1d6/lenstronomy/ImSim/MultiBand/multi_exposures.py#L75-L91
8,463
opinkerfi/nago
nago/extensions/nodes.py
ping
def ping(token_or_hostname=None): """ Send an echo request to a nago host. Arguments: token_or_host_name -- The remote node to ping If node is not provided, simply return pong You can use the special nodenames "server" or "master" """ if not token_or_hostname: return "Pong!" node = nago.core.get_node(token_or_hostname) if not node and token_or_hostname in ('master', 'server'): token_or_hostname = nago.settings.get_option('server') node = nago.core.get_node(token_or_hostname) if not node: try: address = socket.gethostbyname(token_or_hostname) node = nago.core.Node() node['host_name'] = token_or_hostname node['address'] = address node['access'] = 'node' if token_or_hostname == nago.settings.get_option('server'): node['access'] = 'master' node.save() except Exception: raise Exception("'%s' was not found in list of known hosts, and does not resolve to a valid address" % token_or_hostname) return node.send_command('nodes', 'ping')
python
def ping(token_or_hostname=None): """ Send an echo request to a nago host. Arguments: token_or_host_name -- The remote node to ping If node is not provided, simply return pong You can use the special nodenames "server" or "master" """ if not token_or_hostname: return "Pong!" node = nago.core.get_node(token_or_hostname) if not node and token_or_hostname in ('master', 'server'): token_or_hostname = nago.settings.get_option('server') node = nago.core.get_node(token_or_hostname) if not node: try: address = socket.gethostbyname(token_or_hostname) node = nago.core.Node() node['host_name'] = token_or_hostname node['address'] = address node['access'] = 'node' if token_or_hostname == nago.settings.get_option('server'): node['access'] = 'master' node.save() except Exception: raise Exception("'%s' was not found in list of known hosts, and does not resolve to a valid address" % token_or_hostname) return node.send_command('nodes', 'ping')
['def', 'ping', '(', 'token_or_hostname', '=', 'None', ')', ':', 'if', 'not', 'token_or_hostname', ':', 'return', '"Pong!"', 'node', '=', 'nago', '.', 'core', '.', 'get_node', '(', 'token_or_hostname', ')', 'if', 'not', 'node', 'and', 'token_or_hostname', 'in', '(', "'master'", ',', "'server'", ')', ':', 'token_or_hostname', '=', 'nago', '.', 'settings', '.', 'get_option', '(', "'server'", ')', 'node', '=', 'nago', '.', 'core', '.', 'get_node', '(', 'token_or_hostname', ')', 'if', 'not', 'node', ':', 'try', ':', 'address', '=', 'socket', '.', 'gethostbyname', '(', 'token_or_hostname', ')', 'node', '=', 'nago', '.', 'core', '.', 'Node', '(', ')', 'node', '[', "'host_name'", ']', '=', 'token_or_hostname', 'node', '[', "'address'", ']', '=', 'address', 'node', '[', "'access'", ']', '=', "'node'", 'if', 'token_or_hostname', '==', 'nago', '.', 'settings', '.', 'get_option', '(', "'server'", ')', ':', 'node', '[', "'access'", ']', '=', "'master'", 'node', '.', 'save', '(', ')', 'except', 'Exception', ':', 'raise', 'Exception', '(', '"\'%s\' was not found in list of known hosts, and does not resolve to a valid address"', '%', 'token_or_hostname', ')', 'return', 'node', '.', 'send_command', '(', "'nodes'", ',', "'ping'", ')']
Send an echo request to a nago host. Arguments: token_or_host_name -- The remote node to ping If node is not provided, simply return pong You can use the special nodenames "server" or "master"
['Send', 'an', 'echo', 'request', 'to', 'a', 'nago', 'host', '.']
train
https://github.com/opinkerfi/nago/blob/85e1bdd1de0122f56868a483e7599e1b36a439b0/nago/extensions/nodes.py#L101-L127
8,464
gmr/tinman
tinman/process.py
Process.ssl_options
def ssl_options(self): """Check the config to see if SSL configuration options have been passed and replace none, option, and required with the correct values in the certreqs attribute if it is specified. :rtype: dict """ opts = self.namespace.server.get(config.SSL_OPTIONS) or dict() if config.CERT_REQS in opts: opts[config.CERT_REQS] = \ self.CERT_REQUIREMENTS[opts[config.CERT_REQS]] return opts or None
python
def ssl_options(self): """Check the config to see if SSL configuration options have been passed and replace none, option, and required with the correct values in the certreqs attribute if it is specified. :rtype: dict """ opts = self.namespace.server.get(config.SSL_OPTIONS) or dict() if config.CERT_REQS in opts: opts[config.CERT_REQS] = \ self.CERT_REQUIREMENTS[opts[config.CERT_REQS]] return opts or None
['def', 'ssl_options', '(', 'self', ')', ':', 'opts', '=', 'self', '.', 'namespace', '.', 'server', '.', 'get', '(', 'config', '.', 'SSL_OPTIONS', ')', 'or', 'dict', '(', ')', 'if', 'config', '.', 'CERT_REQS', 'in', 'opts', ':', 'opts', '[', 'config', '.', 'CERT_REQS', ']', '=', 'self', '.', 'CERT_REQUIREMENTS', '[', 'opts', '[', 'config', '.', 'CERT_REQS', ']', ']', 'return', 'opts', 'or', 'None']
Check the config to see if SSL configuration options have been passed and replace none, option, and required with the correct values in the certreqs attribute if it is specified. :rtype: dict
['Check', 'the', 'config', 'to', 'see', 'if', 'SSL', 'configuration', 'options', 'have', 'been', 'passed', 'and', 'replace', 'none', 'option', 'and', 'required', 'with', 'the', 'correct', 'values', 'in', 'the', 'certreqs', 'attribute', 'if', 'it', 'is', 'specified', '.']
train
https://github.com/gmr/tinman/blob/98f0acd15a228d752caa1864cdf02aaa3d492a9f/tinman/process.py#L179-L191
8,465
saltstack/salt
salt/fileclient.py
RemoteClient.hash_and_stat_file
def hash_and_stat_file(self, path, saltenv='base'): ''' The same as hash_file, but also return the file's mode, or None if no mode data is present. ''' hash_result = self.hash_file(path, saltenv) try: path = self._check_proto(path) except MinionError as err: if not os.path.isfile(path): return hash_result, None else: try: return hash_result, list(os.stat(path)) except Exception: return hash_result, None load = {'path': path, 'saltenv': saltenv, 'cmd': '_file_find'} fnd = self.channel.send(load) try: stat_result = fnd.get('stat') except AttributeError: stat_result = None return hash_result, stat_result
python
def hash_and_stat_file(self, path, saltenv='base'): ''' The same as hash_file, but also return the file's mode, or None if no mode data is present. ''' hash_result = self.hash_file(path, saltenv) try: path = self._check_proto(path) except MinionError as err: if not os.path.isfile(path): return hash_result, None else: try: return hash_result, list(os.stat(path)) except Exception: return hash_result, None load = {'path': path, 'saltenv': saltenv, 'cmd': '_file_find'} fnd = self.channel.send(load) try: stat_result = fnd.get('stat') except AttributeError: stat_result = None return hash_result, stat_result
['def', 'hash_and_stat_file', '(', 'self', ',', 'path', ',', 'saltenv', '=', "'base'", ')', ':', 'hash_result', '=', 'self', '.', 'hash_file', '(', 'path', ',', 'saltenv', ')', 'try', ':', 'path', '=', 'self', '.', '_check_proto', '(', 'path', ')', 'except', 'MinionError', 'as', 'err', ':', 'if', 'not', 'os', '.', 'path', '.', 'isfile', '(', 'path', ')', ':', 'return', 'hash_result', ',', 'None', 'else', ':', 'try', ':', 'return', 'hash_result', ',', 'list', '(', 'os', '.', 'stat', '(', 'path', ')', ')', 'except', 'Exception', ':', 'return', 'hash_result', ',', 'None', 'load', '=', '{', "'path'", ':', 'path', ',', "'saltenv'", ':', 'saltenv', ',', "'cmd'", ':', "'_file_find'", '}', 'fnd', '=', 'self', '.', 'channel', '.', 'send', '(', 'load', ')', 'try', ':', 'stat_result', '=', 'fnd', '.', 'get', '(', "'stat'", ')', 'except', 'AttributeError', ':', 'stat_result', '=', 'None', 'return', 'hash_result', ',', 'stat_result']
The same as hash_file, but also return the file's mode, or None if no mode data is present.
['The', 'same', 'as', 'hash_file', 'but', 'also', 'return', 'the', 'file', 's', 'mode', 'or', 'None', 'if', 'no', 'mode', 'data', 'is', 'present', '.']
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/fileclient.py#L1336-L1360
8,466
MostAwesomeDude/blackjack
blackjack.py
Node.find_prekeyed
def find_prekeyed(self, value, key): """ Find a value in a node, using a key function. The value is already a key. """ while self is not NULL: direction = cmp(value, key(self.value)) if direction < 0: self = self.left elif direction > 0: self = self.right elif direction == 0: return self.value
python
def find_prekeyed(self, value, key): """ Find a value in a node, using a key function. The value is already a key. """ while self is not NULL: direction = cmp(value, key(self.value)) if direction < 0: self = self.left elif direction > 0: self = self.right elif direction == 0: return self.value
['def', 'find_prekeyed', '(', 'self', ',', 'value', ',', 'key', ')', ':', 'while', 'self', 'is', 'not', 'NULL', ':', 'direction', '=', 'cmp', '(', 'value', ',', 'key', '(', 'self', '.', 'value', ')', ')', 'if', 'direction', '<', '0', ':', 'self', '=', 'self', '.', 'left', 'elif', 'direction', '>', '0', ':', 'self', '=', 'self', '.', 'right', 'elif', 'direction', '==', '0', ':', 'return', 'self', '.', 'value']
Find a value in a node, using a key function. The value is already a key.
['Find', 'a', 'value', 'in', 'a', 'node', 'using', 'a', 'key', 'function', '.', 'The', 'value', 'is', 'already', 'a', 'key', '.']
train
https://github.com/MostAwesomeDude/blackjack/blob/1346642e353719ab68c0dc3573aa33b688431bf8/blackjack.py#L30-L43
8,467
blockstack/virtualchain
virtualchain/lib/config.py
get_lockfile_filename
def get_lockfile_filename(impl, working_dir): """ Get the absolute path to the chain's indexing lockfile """ lockfile_name = impl.get_virtual_chain_name() + ".lock" return os.path.join(working_dir, lockfile_name)
python
def get_lockfile_filename(impl, working_dir): """ Get the absolute path to the chain's indexing lockfile """ lockfile_name = impl.get_virtual_chain_name() + ".lock" return os.path.join(working_dir, lockfile_name)
['def', 'get_lockfile_filename', '(', 'impl', ',', 'working_dir', ')', ':', 'lockfile_name', '=', 'impl', '.', 'get_virtual_chain_name', '(', ')', '+', '".lock"', 'return', 'os', '.', 'path', '.', 'join', '(', 'working_dir', ',', 'lockfile_name', ')']
Get the absolute path to the chain's indexing lockfile
['Get', 'the', 'absolute', 'path', 'to', 'the', 'chain', 's', 'indexing', 'lockfile']
train
https://github.com/blockstack/virtualchain/blob/fcfc970064ca7dfcab26ebd3ab955870a763ea39/virtualchain/lib/config.py#L139-L144
8,468
proycon/clam
clam/common/data.py
CLAMData.parameter
def parameter(self, parameter_id): """Return the specified global parameter (the entire object, not just the value)""" for parametergroup, parameters in self.parameters: #pylint: disable=unused-variable for parameter in parameters: if parameter.id == parameter_id: return parameter raise KeyError("No such parameter exists: " + parameter_id )
python
def parameter(self, parameter_id): """Return the specified global parameter (the entire object, not just the value)""" for parametergroup, parameters in self.parameters: #pylint: disable=unused-variable for parameter in parameters: if parameter.id == parameter_id: return parameter raise KeyError("No such parameter exists: " + parameter_id )
['def', 'parameter', '(', 'self', ',', 'parameter_id', ')', ':', 'for', 'parametergroup', ',', 'parameters', 'in', 'self', '.', 'parameters', ':', '#pylint: disable=unused-variable', 'for', 'parameter', 'in', 'parameters', ':', 'if', 'parameter', '.', 'id', '==', 'parameter_id', ':', 'return', 'parameter', 'raise', 'KeyError', '(', '"No such parameter exists: "', '+', 'parameter_id', ')']
Return the specified global parameter (the entire object, not just the value)
['Return', 'the', 'specified', 'global', 'parameter', '(', 'the', 'entire', 'object', 'not', 'just', 'the', 'value', ')']
train
https://github.com/proycon/clam/blob/09d15cfc26d7cbe0f5976cdd5424dc446d10dbf3/clam/common/data.py#L676-L682
8,469
icgood/pymap
pymap/interfaces/message.py
MessageInterface.get_message_headers
def get_message_headers(self, section: Sequence[int] = None, subset: Collection[bytes] = None, inverse: bool = False) -> Writeable: """Get the headers from the message or a ``message/rfc822`` sub-part of the message.. The ``section`` argument can index a nested sub-part of the message. For example, ``[2, 3]`` would get the 2nd sub-part of the message and then index it for its 3rd sub-part. Args: section: Optional nested list of sub-part indexes. subset: Subset of headers to get. inverse: If ``subset`` is given, this flag will invert it so that the headers *not* in ``subset`` are returned. """ ...
python
def get_message_headers(self, section: Sequence[int] = None, subset: Collection[bytes] = None, inverse: bool = False) -> Writeable: """Get the headers from the message or a ``message/rfc822`` sub-part of the message.. The ``section`` argument can index a nested sub-part of the message. For example, ``[2, 3]`` would get the 2nd sub-part of the message and then index it for its 3rd sub-part. Args: section: Optional nested list of sub-part indexes. subset: Subset of headers to get. inverse: If ``subset`` is given, this flag will invert it so that the headers *not* in ``subset`` are returned. """ ...
['def', 'get_message_headers', '(', 'self', ',', 'section', ':', 'Sequence', '[', 'int', ']', '=', 'None', ',', 'subset', ':', 'Collection', '[', 'bytes', ']', '=', 'None', ',', 'inverse', ':', 'bool', '=', 'False', ')', '->', 'Writeable', ':', '...']
Get the headers from the message or a ``message/rfc822`` sub-part of the message.. The ``section`` argument can index a nested sub-part of the message. For example, ``[2, 3]`` would get the 2nd sub-part of the message and then index it for its 3rd sub-part. Args: section: Optional nested list of sub-part indexes. subset: Subset of headers to get. inverse: If ``subset`` is given, this flag will invert it so that the headers *not* in ``subset`` are returned.
['Get', 'the', 'headers', 'from', 'the', 'message', 'or', 'a', 'message', '/', 'rfc822', 'sub', '-', 'part', 'of', 'the', 'message', '..']
train
https://github.com/icgood/pymap/blob/e77d9a54d760e3cbe044a548883bb4299ed61dc2/pymap/interfaces/message.py#L170-L187
8,470
dmwm/DBS
Client/src/python/dbs/apis/dbsClient.py
DbsApi.listRuns
def listRuns(self, **kwargs): """ API to list all run dictionary, for example: [{'run_num': [160578, 160498, 160447, 160379]}]. At least one parameter is mandatory. :param logical_file_name: List all runs in the file :type logical_file_name: str :param block_name: List all runs in the block :type block_name: str :param dataset: List all runs in that dataset :type dataset: str :param run_num: List all runs :type run_num: int, string or list """ validParameters = ['run_num', 'logical_file_name', 'block_name', 'dataset'] requiredParameters = {'multiple': validParameters} checkInputParameter(method="listRuns", parameters=kwargs.keys(), validParameters=validParameters, requiredParameters=requiredParameters) return self.__callServer("runs", params=kwargs)
python
def listRuns(self, **kwargs): """ API to list all run dictionary, for example: [{'run_num': [160578, 160498, 160447, 160379]}]. At least one parameter is mandatory. :param logical_file_name: List all runs in the file :type logical_file_name: str :param block_name: List all runs in the block :type block_name: str :param dataset: List all runs in that dataset :type dataset: str :param run_num: List all runs :type run_num: int, string or list """ validParameters = ['run_num', 'logical_file_name', 'block_name', 'dataset'] requiredParameters = {'multiple': validParameters} checkInputParameter(method="listRuns", parameters=kwargs.keys(), validParameters=validParameters, requiredParameters=requiredParameters) return self.__callServer("runs", params=kwargs)
['def', 'listRuns', '(', 'self', ',', '*', '*', 'kwargs', ')', ':', 'validParameters', '=', '[', "'run_num'", ',', "'logical_file_name'", ',', "'block_name'", ',', "'dataset'", ']', 'requiredParameters', '=', '{', "'multiple'", ':', 'validParameters', '}', 'checkInputParameter', '(', 'method', '=', '"listRuns"', ',', 'parameters', '=', 'kwargs', '.', 'keys', '(', ')', ',', 'validParameters', '=', 'validParameters', ',', 'requiredParameters', '=', 'requiredParameters', ')', 'return', 'self', '.', '__callServer', '(', '"runs"', ',', 'params', '=', 'kwargs', ')']
API to list all run dictionary, for example: [{'run_num': [160578, 160498, 160447, 160379]}]. At least one parameter is mandatory. :param logical_file_name: List all runs in the file :type logical_file_name: str :param block_name: List all runs in the block :type block_name: str :param dataset: List all runs in that dataset :type dataset: str :param run_num: List all runs :type run_num: int, string or list
['API', 'to', 'list', 'all', 'run', 'dictionary', 'for', 'example', ':', '[', '{', 'run_num', ':', '[', '160578', '160498', '160447', '160379', ']', '}', ']', '.', 'At', 'least', 'one', 'parameter', 'is', 'mandatory', '.']
train
https://github.com/dmwm/DBS/blob/9619bafce3783b3e77f0415f8f9a258e33dd1e6f/Client/src/python/dbs/apis/dbsClient.py#L1432-L1454
8,471
m-weigand/sip_models
lib/sip_models/cond/cc.py
cc.dre_dc
def dre_dc(self, pars): r""" :math:Add formula """ self._set_parameters(pars) # term 1 num1a = np.log(self.w * self.tau) * self.otc * np.sin(self.ang) num1b = self.otc * np.cos(self.ang) * np.pi / 2.0 term1 = (num1a + num1b) / self.denom # term 2 num2 = self.otc * np.sin(self.c / np.pi) * 2 denom2 = self.denom ** 2 term2 = num2 / denom2 # term 3 num3a = 2 * np.log(self.w * self.tau) * self.otc * np.cos(self.ang) num3b = 2 * ((self.w * self.tau) ** 2) * np.pi / 2.0 * np.sin(self.ang) num3c = 2 * np.log(self.w * self.tau) * self.otc2 term3 = num3a - num3b + num3c result = self.sigmai * self.m * (term1 + term2 * term3) return result
python
def dre_dc(self, pars): r""" :math:Add formula """ self._set_parameters(pars) # term 1 num1a = np.log(self.w * self.tau) * self.otc * np.sin(self.ang) num1b = self.otc * np.cos(self.ang) * np.pi / 2.0 term1 = (num1a + num1b) / self.denom # term 2 num2 = self.otc * np.sin(self.c / np.pi) * 2 denom2 = self.denom ** 2 term2 = num2 / denom2 # term 3 num3a = 2 * np.log(self.w * self.tau) * self.otc * np.cos(self.ang) num3b = 2 * ((self.w * self.tau) ** 2) * np.pi / 2.0 * np.sin(self.ang) num3c = 2 * np.log(self.w * self.tau) * self.otc2 term3 = num3a - num3b + num3c result = self.sigmai * self.m * (term1 + term2 * term3) return result
['def', 'dre_dc', '(', 'self', ',', 'pars', ')', ':', 'self', '.', '_set_parameters', '(', 'pars', ')', '# term 1', 'num1a', '=', 'np', '.', 'log', '(', 'self', '.', 'w', '*', 'self', '.', 'tau', ')', '*', 'self', '.', 'otc', '*', 'np', '.', 'sin', '(', 'self', '.', 'ang', ')', 'num1b', '=', 'self', '.', 'otc', '*', 'np', '.', 'cos', '(', 'self', '.', 'ang', ')', '*', 'np', '.', 'pi', '/', '2.0', 'term1', '=', '(', 'num1a', '+', 'num1b', ')', '/', 'self', '.', 'denom', '# term 2', 'num2', '=', 'self', '.', 'otc', '*', 'np', '.', 'sin', '(', 'self', '.', 'c', '/', 'np', '.', 'pi', ')', '*', '2', 'denom2', '=', 'self', '.', 'denom', '**', '2', 'term2', '=', 'num2', '/', 'denom2', '# term 3', 'num3a', '=', '2', '*', 'np', '.', 'log', '(', 'self', '.', 'w', '*', 'self', '.', 'tau', ')', '*', 'self', '.', 'otc', '*', 'np', '.', 'cos', '(', 'self', '.', 'ang', ')', 'num3b', '=', '2', '*', '(', '(', 'self', '.', 'w', '*', 'self', '.', 'tau', ')', '**', '2', ')', '*', 'np', '.', 'pi', '/', '2.0', '*', 'np', '.', 'sin', '(', 'self', '.', 'ang', ')', 'num3c', '=', '2', '*', 'np', '.', 'log', '(', 'self', '.', 'w', '*', 'self', '.', 'tau', ')', '*', 'self', '.', 'otc2', 'term3', '=', 'num3a', '-', 'num3b', '+', 'num3c', 'result', '=', 'self', '.', 'sigmai', '*', 'self', '.', 'm', '*', '(', 'term1', '+', 'term2', '*', 'term3', ')', 'return', 'result']
r""" :math:Add formula
['r', ':', 'math', ':', 'Add', 'formula']
train
https://github.com/m-weigand/sip_models/blob/917da5d956215d9df2bf65b24123ba020e3e17c0/lib/sip_models/cond/cc.py#L185-L208
8,472
SatelliteQE/nailgun
nailgun/entities.py
ForemanTask.poll
def poll(self, poll_rate=None, timeout=None): """Return the status of a task or timeout. There are several API calls that trigger asynchronous tasks, such as synchronizing a repository, or publishing or promoting a content view. It is possible to check on the status of a task if you know its UUID. This method polls a task once every ``poll_rate`` seconds and, upon task completion, returns information about that task. :param poll_rate: Delay between the end of one task check-up and the start of the next check-up. Defaults to ``nailgun.entity_mixins.TASK_POLL_RATE``. :param timeout: Maximum number of seconds to wait until timing out. Defaults to ``nailgun.entity_mixins.TASK_TIMEOUT``. :returns: Information about the asynchronous task. :raises: ``nailgun.entity_mixins.TaskTimedOutError`` if the task completes with any result other than "success". :raises: ``nailgun.entity_mixins.TaskFailedError`` if the task finishes with any result other than "success". :raises: ``requests.exceptions.HTTPError`` If the API returns a message with an HTTP 4XX or 5XX status code. """ # See nailgun.entity_mixins._poll_task for an explanation of why a # private method is called. return _poll_task( self.id, # pylint:disable=no-member self._server_config, poll_rate, timeout )
python
def poll(self, poll_rate=None, timeout=None): """Return the status of a task or timeout. There are several API calls that trigger asynchronous tasks, such as synchronizing a repository, or publishing or promoting a content view. It is possible to check on the status of a task if you know its UUID. This method polls a task once every ``poll_rate`` seconds and, upon task completion, returns information about that task. :param poll_rate: Delay between the end of one task check-up and the start of the next check-up. Defaults to ``nailgun.entity_mixins.TASK_POLL_RATE``. :param timeout: Maximum number of seconds to wait until timing out. Defaults to ``nailgun.entity_mixins.TASK_TIMEOUT``. :returns: Information about the asynchronous task. :raises: ``nailgun.entity_mixins.TaskTimedOutError`` if the task completes with any result other than "success". :raises: ``nailgun.entity_mixins.TaskFailedError`` if the task finishes with any result other than "success". :raises: ``requests.exceptions.HTTPError`` If the API returns a message with an HTTP 4XX or 5XX status code. """ # See nailgun.entity_mixins._poll_task for an explanation of why a # private method is called. return _poll_task( self.id, # pylint:disable=no-member self._server_config, poll_rate, timeout )
['def', 'poll', '(', 'self', ',', 'poll_rate', '=', 'None', ',', 'timeout', '=', 'None', ')', ':', '# See nailgun.entity_mixins._poll_task for an explanation of why a', '# private method is called.', 'return', '_poll_task', '(', 'self', '.', 'id', ',', '# pylint:disable=no-member', 'self', '.', '_server_config', ',', 'poll_rate', ',', 'timeout', ')']
Return the status of a task or timeout. There are several API calls that trigger asynchronous tasks, such as synchronizing a repository, or publishing or promoting a content view. It is possible to check on the status of a task if you know its UUID. This method polls a task once every ``poll_rate`` seconds and, upon task completion, returns information about that task. :param poll_rate: Delay between the end of one task check-up and the start of the next check-up. Defaults to ``nailgun.entity_mixins.TASK_POLL_RATE``. :param timeout: Maximum number of seconds to wait until timing out. Defaults to ``nailgun.entity_mixins.TASK_TIMEOUT``. :returns: Information about the asynchronous task. :raises: ``nailgun.entity_mixins.TaskTimedOutError`` if the task completes with any result other than "success". :raises: ``nailgun.entity_mixins.TaskFailedError`` if the task finishes with any result other than "success". :raises: ``requests.exceptions.HTTPError`` If the API returns a message with an HTTP 4XX or 5XX status code.
['Return', 'the', 'status', 'of', 'a', 'task', 'or', 'timeout', '.']
train
https://github.com/SatelliteQE/nailgun/blob/c36d8c20862e87bf6975bd48ac1ca40a9e634eaa/nailgun/entities.py#L3185-L3215
8,473
HPENetworking/PYHPEIMC
build/lib/pyhpeimc/plat/device.py
get_dev_mac_learn
def get_dev_mac_learn(devid, auth, url): ''' function takes devid of specific device and issues a RESTFUL call to gather the current IP-MAC learning entries on the target device. :param devid: int value of the target device :param auth: requests auth object #usually auth.creds from auth pyhpeimc.auth.class :param url: base url of IMC RS interface #usually auth.url from pyhpeimc.auth.authclass :return: list of dict objects which contain the mac learn table of target device id :rtype: list >>> from pyhpeimc.auth import * >>> from pyhpeimc.plat.device import * >>> auth = IMCAuth("http://", "10.101.0.203", "8080", "admin", "admin") >>> dev_mac_learn = get_dev_mac_learn('10', auth.creds, auth.url) >>> assert type(dev_mac_learn) is list >>> assert 'deviceId' in dev_mac_learn[0] ''' get_dev_mac_learn_url='/imcrs/res/access/ipMacLearn/'+str(devid) f_url = url+get_dev_mac_learn_url try: r = requests.get(f_url, auth=auth, headers=HEADERS) if r.status_code == 200: if len(r.text) < 1: mac_learn_query = {} return mac_learn_query else: mac_learn_query = (json.loads(r.text))['ipMacLearnResult'] return mac_learn_query except requests.exceptions.RequestException as e: return "Error:\n" + str(e) + " get_dev_mac_learn: An Error has occured"
python
def get_dev_mac_learn(devid, auth, url): ''' function takes devid of specific device and issues a RESTFUL call to gather the current IP-MAC learning entries on the target device. :param devid: int value of the target device :param auth: requests auth object #usually auth.creds from auth pyhpeimc.auth.class :param url: base url of IMC RS interface #usually auth.url from pyhpeimc.auth.authclass :return: list of dict objects which contain the mac learn table of target device id :rtype: list >>> from pyhpeimc.auth import * >>> from pyhpeimc.plat.device import * >>> auth = IMCAuth("http://", "10.101.0.203", "8080", "admin", "admin") >>> dev_mac_learn = get_dev_mac_learn('10', auth.creds, auth.url) >>> assert type(dev_mac_learn) is list >>> assert 'deviceId' in dev_mac_learn[0] ''' get_dev_mac_learn_url='/imcrs/res/access/ipMacLearn/'+str(devid) f_url = url+get_dev_mac_learn_url try: r = requests.get(f_url, auth=auth, headers=HEADERS) if r.status_code == 200: if len(r.text) < 1: mac_learn_query = {} return mac_learn_query else: mac_learn_query = (json.loads(r.text))['ipMacLearnResult'] return mac_learn_query except requests.exceptions.RequestException as e: return "Error:\n" + str(e) + " get_dev_mac_learn: An Error has occured"
['def', 'get_dev_mac_learn', '(', 'devid', ',', 'auth', ',', 'url', ')', ':', 'get_dev_mac_learn_url', '=', "'/imcrs/res/access/ipMacLearn/'", '+', 'str', '(', 'devid', ')', 'f_url', '=', 'url', '+', 'get_dev_mac_learn_url', 'try', ':', 'r', '=', 'requests', '.', 'get', '(', 'f_url', ',', 'auth', '=', 'auth', ',', 'headers', '=', 'HEADERS', ')', 'if', 'r', '.', 'status_code', '==', '200', ':', 'if', 'len', '(', 'r', '.', 'text', ')', '<', '1', ':', 'mac_learn_query', '=', '{', '}', 'return', 'mac_learn_query', 'else', ':', 'mac_learn_query', '=', '(', 'json', '.', 'loads', '(', 'r', '.', 'text', ')', ')', '[', "'ipMacLearnResult'", ']', 'return', 'mac_learn_query', 'except', 'requests', '.', 'exceptions', '.', 'RequestException', 'as', 'e', ':', 'return', '"Error:\\n"', '+', 'str', '(', 'e', ')', '+', '" get_dev_mac_learn: An Error has occured"']
function takes devid of specific device and issues a RESTFUL call to gather the current IP-MAC learning entries on the target device. :param devid: int value of the target device :param auth: requests auth object #usually auth.creds from auth pyhpeimc.auth.class :param url: base url of IMC RS interface #usually auth.url from pyhpeimc.auth.authclass :return: list of dict objects which contain the mac learn table of target device id :rtype: list >>> from pyhpeimc.auth import * >>> from pyhpeimc.plat.device import * >>> auth = IMCAuth("http://", "10.101.0.203", "8080", "admin", "admin") >>> dev_mac_learn = get_dev_mac_learn('10', auth.creds, auth.url) >>> assert type(dev_mac_learn) is list >>> assert 'deviceId' in dev_mac_learn[0]
['function', 'takes', 'devid', 'of', 'specific', 'device', 'and', 'issues', 'a', 'RESTFUL', 'call', 'to', 'gather', 'the', 'current', 'IP', '-', 'MAC', 'learning', 'entries', 'on', 'the', 'target', 'device', '.']
train
https://github.com/HPENetworking/PYHPEIMC/blob/4fba31827573587e03a6233c7db60f188038c8e5/build/lib/pyhpeimc/plat/device.py#L416-L456
8,474
odlgroup/odl
odl/contrib/datasets/images/cambridge.py
convert
def convert(image, shape, gray=False, dtype='float64', normalize='max'): """Convert image to standardized format. Several properties of the input image may be changed including the shape, data type and maximal value of the image. In addition, this function may convert the image into an ODL object and/or a gray scale image. """ image = image.astype(dtype) if gray: image[..., 0] *= 0.2126 image[..., 1] *= 0.7152 image[..., 2] *= 0.0722 image = np.sum(image, axis=2) if shape is not None: image = skimage.transform.resize(image, shape, mode='constant') image = image.astype(dtype) if normalize == 'max': image /= image.max() elif normalize == 'sum': image /= image.sum() else: assert False return image
python
def convert(image, shape, gray=False, dtype='float64', normalize='max'): """Convert image to standardized format. Several properties of the input image may be changed including the shape, data type and maximal value of the image. In addition, this function may convert the image into an ODL object and/or a gray scale image. """ image = image.astype(dtype) if gray: image[..., 0] *= 0.2126 image[..., 1] *= 0.7152 image[..., 2] *= 0.0722 image = np.sum(image, axis=2) if shape is not None: image = skimage.transform.resize(image, shape, mode='constant') image = image.astype(dtype) if normalize == 'max': image /= image.max() elif normalize == 'sum': image /= image.sum() else: assert False return image
['def', 'convert', '(', 'image', ',', 'shape', ',', 'gray', '=', 'False', ',', 'dtype', '=', "'float64'", ',', 'normalize', '=', "'max'", ')', ':', 'image', '=', 'image', '.', 'astype', '(', 'dtype', ')', 'if', 'gray', ':', 'image', '[', '...', ',', '0', ']', '*=', '0.2126', 'image', '[', '...', ',', '1', ']', '*=', '0.7152', 'image', '[', '...', ',', '2', ']', '*=', '0.0722', 'image', '=', 'np', '.', 'sum', '(', 'image', ',', 'axis', '=', '2', ')', 'if', 'shape', 'is', 'not', 'None', ':', 'image', '=', 'skimage', '.', 'transform', '.', 'resize', '(', 'image', ',', 'shape', ',', 'mode', '=', "'constant'", ')', 'image', '=', 'image', '.', 'astype', '(', 'dtype', ')', 'if', 'normalize', '==', "'max'", ':', 'image', '/=', 'image', '.', 'max', '(', ')', 'elif', 'normalize', '==', "'sum'", ':', 'image', '/=', 'image', '.', 'sum', '(', ')', 'else', ':', 'assert', 'False', 'return', 'image']
Convert image to standardized format. Several properties of the input image may be changed including the shape, data type and maximal value of the image. In addition, this function may convert the image into an ODL object and/or a gray scale image.
['Convert', 'image', 'to', 'standardized', 'format', '.']
train
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/contrib/datasets/images/cambridge.py#L32-L59
8,475
hubo1016/vlcp
vlcp/event/runnable.py
RoutineContainer.syscall
async def syscall(self, func, ignoreException = False): """ Call a syscall method and retrieve its return value """ ev = await self.syscall_noreturn(func) if hasattr(ev, 'exception'): if ignoreException: return else: raise ev.exception[1] else: return ev.retvalue
python
async def syscall(self, func, ignoreException = False): """ Call a syscall method and retrieve its return value """ ev = await self.syscall_noreturn(func) if hasattr(ev, 'exception'): if ignoreException: return else: raise ev.exception[1] else: return ev.retvalue
['async', 'def', 'syscall', '(', 'self', ',', 'func', ',', 'ignoreException', '=', 'False', ')', ':', 'ev', '=', 'await', 'self', '.', 'syscall_noreturn', '(', 'func', ')', 'if', 'hasattr', '(', 'ev', ',', "'exception'", ')', ':', 'if', 'ignoreException', ':', 'return', 'else', ':', 'raise', 'ev', '.', 'exception', '[', '1', ']', 'else', ':', 'return', 'ev', '.', 'retvalue']
Call a syscall method and retrieve its return value
['Call', 'a', 'syscall', 'method', 'and', 'retrieve', 'its', 'return', 'value']
train
https://github.com/hubo1016/vlcp/blob/239055229ec93a99cc7e15208075724ccf543bd1/vlcp/event/runnable.py#L663-L674
8,476
caesar0301/relogger
relogger/syslog.py
HEADER.hostname
def hostname(self, value): """ The hostname where the log message was created. Should be the first part of the hostname, or an IP address. Should NOT be set to a fully qualified domain name. """ if value is None: value = socket.gethostname() self._hostname = value
python
def hostname(self, value): """ The hostname where the log message was created. Should be the first part of the hostname, or an IP address. Should NOT be set to a fully qualified domain name. """ if value is None: value = socket.gethostname() self._hostname = value
['def', 'hostname', '(', 'self', ',', 'value', ')', ':', 'if', 'value', 'is', 'None', ':', 'value', '=', 'socket', '.', 'gethostname', '(', ')', 'self', '.', '_hostname', '=', 'value']
The hostname where the log message was created. Should be the first part of the hostname, or an IP address. Should NOT be set to a fully qualified domain name.
['The', 'hostname', 'where', 'the', 'log', 'message', 'was', 'created', '.']
train
https://github.com/caesar0301/relogger/blob/40b722ad2115ac6a179e2cc4eb0c88333f5114de/relogger/syslog.py#L153-L164
8,477
uralbash/pyramid_pages
pyramid_pages/resources.py
resource_of_node
def resource_of_node(resources, node): """ Returns resource of node. """ for resource in resources: model = getattr(resource, 'model', None) if type(node) == model: return resource return BasePageResource
python
def resource_of_node(resources, node): """ Returns resource of node. """ for resource in resources: model = getattr(resource, 'model', None) if type(node) == model: return resource return BasePageResource
['def', 'resource_of_node', '(', 'resources', ',', 'node', ')', ':', 'for', 'resource', 'in', 'resources', ':', 'model', '=', 'getattr', '(', 'resource', ',', "'model'", ',', 'None', ')', 'if', 'type', '(', 'node', ')', '==', 'model', ':', 'return', 'resource', 'return', 'BasePageResource']
Returns resource of node.
['Returns', 'resource', 'of', 'node', '.']
train
https://github.com/uralbash/pyramid_pages/blob/545b1ecb2e5dee5742135ba2a689b9635dd4efa1/pyramid_pages/resources.py#L133-L140
8,478
santosjorge/cufflinks
cufflinks/tools.py
_set_axis
def _set_axis(self,traces,on=None,side='right',title=''): """ Sets the axis in which each trace should appear If the axis doesn't exist then a new axis is created Parameters: ----------- traces : list(str) List of trace names on : string The axis in which the traces should be placed. If this is not indicated then a new axis will be created side : string Side where the axis will be placed 'left' 'right' title : string Sets the title of the axis Applies only to new axis """ fig={} fig_cpy=fig_to_dict(self).copy() fig['data']=fig_cpy['data'] fig['layout']=fig_cpy['layout'] fig=Figure(fig) traces=make_list(traces) def update_data(trace,y): anchor=fig.axis['def'][y]['anchor'] if 'anchor' in fig.axis['def'][y] else 'x1' idx=fig.trace_dict[trace] if isinstance(trace,str) else trace fig['data'][idx]['xaxis']=anchor fig['data'][idx]['yaxis']=y for trace in traces: if on: if on not in fig.axis['def']: raise Exception('"on" axis does not exists: {0}'.format(on)) update_data(trace,y=on) else: curr_x,curr_y=fig.axis['ref'][trace] domain='[0.0, 1.0]' if 'domain' not in fig.axis['def'][curr_y] else str(fig.axis['def'][curr_y]['domain']) try: new_axis=fig.axis['dom']['y'][domain][side] except KeyError: axis=fig.axis['def'][curr_y].copy() ### check overlaying values axis.update(title=title,overlaying=curr_y,side=side,anchor=curr_x) axis_idx=str(fig.axis['len']['y']+1) fig['layout']['yaxis{0}'.format(axis_idx)]=axis new_axis='y{0}'.format(axis_idx) update_data(trace,y=new_axis) for k in list(fig.axis['def'].keys()): id='{0}axis{1}'.format(k[0],k[-1:]) if k not in fig.axis['ref_axis']: try: del fig['layout'][id] except KeyError: pass return fig
python
def _set_axis(self,traces,on=None,side='right',title=''): """ Sets the axis in which each trace should appear If the axis doesn't exist then a new axis is created Parameters: ----------- traces : list(str) List of trace names on : string The axis in which the traces should be placed. If this is not indicated then a new axis will be created side : string Side where the axis will be placed 'left' 'right' title : string Sets the title of the axis Applies only to new axis """ fig={} fig_cpy=fig_to_dict(self).copy() fig['data']=fig_cpy['data'] fig['layout']=fig_cpy['layout'] fig=Figure(fig) traces=make_list(traces) def update_data(trace,y): anchor=fig.axis['def'][y]['anchor'] if 'anchor' in fig.axis['def'][y] else 'x1' idx=fig.trace_dict[trace] if isinstance(trace,str) else trace fig['data'][idx]['xaxis']=anchor fig['data'][idx]['yaxis']=y for trace in traces: if on: if on not in fig.axis['def']: raise Exception('"on" axis does not exists: {0}'.format(on)) update_data(trace,y=on) else: curr_x,curr_y=fig.axis['ref'][trace] domain='[0.0, 1.0]' if 'domain' not in fig.axis['def'][curr_y] else str(fig.axis['def'][curr_y]['domain']) try: new_axis=fig.axis['dom']['y'][domain][side] except KeyError: axis=fig.axis['def'][curr_y].copy() ### check overlaying values axis.update(title=title,overlaying=curr_y,side=side,anchor=curr_x) axis_idx=str(fig.axis['len']['y']+1) fig['layout']['yaxis{0}'.format(axis_idx)]=axis new_axis='y{0}'.format(axis_idx) update_data(trace,y=new_axis) for k in list(fig.axis['def'].keys()): id='{0}axis{1}'.format(k[0],k[-1:]) if k not in fig.axis['ref_axis']: try: del fig['layout'][id] except KeyError: pass return fig
['def', '_set_axis', '(', 'self', ',', 'traces', ',', 'on', '=', 'None', ',', 'side', '=', "'right'", ',', 'title', '=', "''", ')', ':', 'fig', '=', '{', '}', 'fig_cpy', '=', 'fig_to_dict', '(', 'self', ')', '.', 'copy', '(', ')', 'fig', '[', "'data'", ']', '=', 'fig_cpy', '[', "'data'", ']', 'fig', '[', "'layout'", ']', '=', 'fig_cpy', '[', "'layout'", ']', 'fig', '=', 'Figure', '(', 'fig', ')', 'traces', '=', 'make_list', '(', 'traces', ')', 'def', 'update_data', '(', 'trace', ',', 'y', ')', ':', 'anchor', '=', 'fig', '.', 'axis', '[', "'def'", ']', '[', 'y', ']', '[', "'anchor'", ']', 'if', "'anchor'", 'in', 'fig', '.', 'axis', '[', "'def'", ']', '[', 'y', ']', 'else', "'x1'", 'idx', '=', 'fig', '.', 'trace_dict', '[', 'trace', ']', 'if', 'isinstance', '(', 'trace', ',', 'str', ')', 'else', 'trace', 'fig', '[', "'data'", ']', '[', 'idx', ']', '[', "'xaxis'", ']', '=', 'anchor', 'fig', '[', "'data'", ']', '[', 'idx', ']', '[', "'yaxis'", ']', '=', 'y', 'for', 'trace', 'in', 'traces', ':', 'if', 'on', ':', 'if', 'on', 'not', 'in', 'fig', '.', 'axis', '[', "'def'", ']', ':', 'raise', 'Exception', '(', '\'"on" axis does not exists: {0}\'', '.', 'format', '(', 'on', ')', ')', 'update_data', '(', 'trace', ',', 'y', '=', 'on', ')', 'else', ':', 'curr_x', ',', 'curr_y', '=', 'fig', '.', 'axis', '[', "'ref'", ']', '[', 'trace', ']', 'domain', '=', "'[0.0, 1.0]'", 'if', "'domain'", 'not', 'in', 'fig', '.', 'axis', '[', "'def'", ']', '[', 'curr_y', ']', 'else', 'str', '(', 'fig', '.', 'axis', '[', "'def'", ']', '[', 'curr_y', ']', '[', "'domain'", ']', ')', 'try', ':', 'new_axis', '=', 'fig', '.', 'axis', '[', "'dom'", ']', '[', "'y'", ']', '[', 'domain', ']', '[', 'side', ']', 'except', 'KeyError', ':', 'axis', '=', 'fig', '.', 'axis', '[', "'def'", ']', '[', 'curr_y', ']', '.', 'copy', '(', ')', '### check overlaying values', 'axis', '.', 'update', '(', 'title', '=', 'title', ',', 'overlaying', '=', 'curr_y', ',', 'side', '=', 'side', ',', 'anchor', '=', 'curr_x', ')', 'axis_idx', '=', 'str', '(', 'fig', '.', 'axis', '[', "'len'", ']', '[', "'y'", ']', '+', '1', ')', 'fig', '[', "'layout'", ']', '[', "'yaxis{0}'", '.', 'format', '(', 'axis_idx', ')', ']', '=', 'axis', 'new_axis', '=', "'y{0}'", '.', 'format', '(', 'axis_idx', ')', 'update_data', '(', 'trace', ',', 'y', '=', 'new_axis', ')', 'for', 'k', 'in', 'list', '(', 'fig', '.', 'axis', '[', "'def'", ']', '.', 'keys', '(', ')', ')', ':', 'id', '=', "'{0}axis{1}'", '.', 'format', '(', 'k', '[', '0', ']', ',', 'k', '[', '-', '1', ':', ']', ')', 'if', 'k', 'not', 'in', 'fig', '.', 'axis', '[', "'ref_axis'", ']', ':', 'try', ':', 'del', 'fig', '[', "'layout'", ']', '[', 'id', ']', 'except', 'KeyError', ':', 'pass', 'return', 'fig']
Sets the axis in which each trace should appear If the axis doesn't exist then a new axis is created Parameters: ----------- traces : list(str) List of trace names on : string The axis in which the traces should be placed. If this is not indicated then a new axis will be created side : string Side where the axis will be placed 'left' 'right' title : string Sets the title of the axis Applies only to new axis
['Sets', 'the', 'axis', 'in', 'which', 'each', 'trace', 'should', 'appear', 'If', 'the', 'axis', 'doesn', 't', 'exist', 'then', 'a', 'new', 'axis', 'is', 'created']
train
https://github.com/santosjorge/cufflinks/blob/ca1cbf93998dc793d0b1f8ac30fe1f2bd105f63a/cufflinks/tools.py#L1117-L1179
8,479
hvac/hvac
hvac/api/system_backend/key.py
Key.cancel_root_generation
def cancel_root_generation(self): """Cancel any in-progress root generation attempt. This clears any progress made. This must be called to change the OTP or PGP key being used. Supported methods: DELETE: /sys/generate-root/attempt. Produces: 204 (empty body) :return: The response of the request. :rtype: request.Response """ api_path = '/v1/sys/generate-root/attempt' response = self._adapter.delete( url=api_path, ) return response
python
def cancel_root_generation(self): """Cancel any in-progress root generation attempt. This clears any progress made. This must be called to change the OTP or PGP key being used. Supported methods: DELETE: /sys/generate-root/attempt. Produces: 204 (empty body) :return: The response of the request. :rtype: request.Response """ api_path = '/v1/sys/generate-root/attempt' response = self._adapter.delete( url=api_path, ) return response
['def', 'cancel_root_generation', '(', 'self', ')', ':', 'api_path', '=', "'/v1/sys/generate-root/attempt'", 'response', '=', 'self', '.', '_adapter', '.', 'delete', '(', 'url', '=', 'api_path', ',', ')', 'return', 'response']
Cancel any in-progress root generation attempt. This clears any progress made. This must be called to change the OTP or PGP key being used. Supported methods: DELETE: /sys/generate-root/attempt. Produces: 204 (empty body) :return: The response of the request. :rtype: request.Response
['Cancel', 'any', 'in', '-', 'progress', 'root', 'generation', 'attempt', '.']
train
https://github.com/hvac/hvac/blob/cce5b86889193f622c2a72a4a1b7e1c9c8aff1ce/hvac/api/system_backend/key.py#L83-L98
8,480
tcalmant/ipopo
pelix/ipopo/instance.py
StoredInstance.__callback
def __callback(self, event, *args, **kwargs): # type: (str, *Any, **Any) -> Any """ Calls the registered method in the component for the given event :param event: An event (IPOPO_CALLBACK_VALIDATE, ...) :return: The callback result, or None :raise Exception: Something went wrong """ comp_callback = self.context.get_callback(event) if not comp_callback: # No registered callback return True # Call it result = comp_callback(self.instance, *args, **kwargs) if result is None: # Special case, if the call back returns nothing return True return result
python
def __callback(self, event, *args, **kwargs): # type: (str, *Any, **Any) -> Any """ Calls the registered method in the component for the given event :param event: An event (IPOPO_CALLBACK_VALIDATE, ...) :return: The callback result, or None :raise Exception: Something went wrong """ comp_callback = self.context.get_callback(event) if not comp_callback: # No registered callback return True # Call it result = comp_callback(self.instance, *args, **kwargs) if result is None: # Special case, if the call back returns nothing return True return result
['def', '__callback', '(', 'self', ',', 'event', ',', '*', 'args', ',', '*', '*', 'kwargs', ')', ':', '# type: (str, *Any, **Any) -> Any', 'comp_callback', '=', 'self', '.', 'context', '.', 'get_callback', '(', 'event', ')', 'if', 'not', 'comp_callback', ':', '# No registered callback', 'return', 'True', '# Call it', 'result', '=', 'comp_callback', '(', 'self', '.', 'instance', ',', '*', 'args', ',', '*', '*', 'kwargs', ')', 'if', 'result', 'is', 'None', ':', '# Special case, if the call back returns nothing', 'return', 'True', 'return', 'result']
Calls the registered method in the component for the given event :param event: An event (IPOPO_CALLBACK_VALIDATE, ...) :return: The callback result, or None :raise Exception: Something went wrong
['Calls', 'the', 'registered', 'method', 'in', 'the', 'component', 'for', 'the', 'given', 'event']
train
https://github.com/tcalmant/ipopo/blob/2f9ae0c44cd9c34ef1a9d50837b3254e75678eb1/pelix/ipopo/instance.py#L535-L555
8,481
IvanMalison/okcupyd
okcupyd/profile_copy.py
Copy.essays
def essays(self): """Copy essays from the source profile to the destination profile.""" for essay_name in self.dest_user.profile.essays.essay_names: setattr(self.dest_user.profile.essays, essay_name, getattr(self.source_profile.essays, essay_name))
python
def essays(self): """Copy essays from the source profile to the destination profile.""" for essay_name in self.dest_user.profile.essays.essay_names: setattr(self.dest_user.profile.essays, essay_name, getattr(self.source_profile.essays, essay_name))
['def', 'essays', '(', 'self', ')', ':', 'for', 'essay_name', 'in', 'self', '.', 'dest_user', '.', 'profile', '.', 'essays', '.', 'essay_names', ':', 'setattr', '(', 'self', '.', 'dest_user', '.', 'profile', '.', 'essays', ',', 'essay_name', ',', 'getattr', '(', 'self', '.', 'source_profile', '.', 'essays', ',', 'essay_name', ')', ')']
Copy essays from the source profile to the destination profile.
['Copy', 'essays', 'from', 'the', 'source', 'profile', 'to', 'the', 'destination', 'profile', '.']
train
https://github.com/IvanMalison/okcupyd/blob/46f4eaa9419098f6c299738ce148af55c64deb64/okcupyd/profile_copy.py#L115-L119
8,482
bosth/plpygis
plpygis/geometry.py
Geometry.from_shapely
def from_shapely(sgeom, srid=None): """ Create a Geometry from a Shapely geometry and the specified SRID. The Shapely geometry will not be modified. """ if SHAPELY: WKBWriter.defaults["include_srid"] = True if srid: lgeos.GEOSSetSRID(sgeom._geom, srid) return Geometry(sgeom.wkb_hex) else: raise DependencyError("Shapely")
python
def from_shapely(sgeom, srid=None): """ Create a Geometry from a Shapely geometry and the specified SRID. The Shapely geometry will not be modified. """ if SHAPELY: WKBWriter.defaults["include_srid"] = True if srid: lgeos.GEOSSetSRID(sgeom._geom, srid) return Geometry(sgeom.wkb_hex) else: raise DependencyError("Shapely")
['def', 'from_shapely', '(', 'sgeom', ',', 'srid', '=', 'None', ')', ':', 'if', 'SHAPELY', ':', 'WKBWriter', '.', 'defaults', '[', '"include_srid"', ']', '=', 'True', 'if', 'srid', ':', 'lgeos', '.', 'GEOSSetSRID', '(', 'sgeom', '.', '_geom', ',', 'srid', ')', 'return', 'Geometry', '(', 'sgeom', '.', 'wkb_hex', ')', 'else', ':', 'raise', 'DependencyError', '(', '"Shapely"', ')']
Create a Geometry from a Shapely geometry and the specified SRID. The Shapely geometry will not be modified.
['Create', 'a', 'Geometry', 'from', 'a', 'Shapely', 'geometry', 'and', 'the', 'specified', 'SRID', '.']
train
https://github.com/bosth/plpygis/blob/9469cc469df4c8cd407de158903d5465cda804ea/plpygis/geometry.py#L105-L117
8,483
OnroerendErfgoed/pyramid_urireferencer
pyramid_urireferencer/models.py
Item.load_from_json
def load_from_json(data): """ Load a :class:`Item` from a dictionary ot string (that will be parsed as json) """ if isinstance(data, str): data = json.loads(data) return Item(data['title'], data['uri'])
python
def load_from_json(data): """ Load a :class:`Item` from a dictionary ot string (that will be parsed as json) """ if isinstance(data, str): data = json.loads(data) return Item(data['title'], data['uri'])
['def', 'load_from_json', '(', 'data', ')', ':', 'if', 'isinstance', '(', 'data', ',', 'str', ')', ':', 'data', '=', 'json', '.', 'loads', '(', 'data', ')', 'return', 'Item', '(', 'data', '[', "'title'", ']', ',', 'data', '[', "'uri'", ']', ')']
Load a :class:`Item` from a dictionary ot string (that will be parsed as json)
['Load', 'a', ':', 'class', ':', 'Item', 'from', 'a', 'dictionary', 'ot', 'string', '(', 'that', 'will', 'be', 'parsed', 'as', 'json', ')']
train
https://github.com/OnroerendErfgoed/pyramid_urireferencer/blob/c6ee4ba863e32ced304b9cf00f3f5b450757a29a/pyramid_urireferencer/models.py#L114-L121
8,484
andreikop/qutepart
qutepart/rectangularselection.py
RectangularSelection._visibleToRealColumn
def _visibleToRealColumn(self, text, visiblePos): """If \t is used, real position of symbol in block and visible position differs This function converts visible to real. Bigger value is returned, if visiblePos is in the middle of \t, None if text is too short """ if visiblePos == 0: return 0 elif not '\t' in text: return visiblePos else: currentIndex = 1 for currentVisiblePos in self._visibleCharPositionGenerator(text): if currentVisiblePos >= visiblePos: return currentIndex - 1 currentIndex += 1 return None
python
def _visibleToRealColumn(self, text, visiblePos): """If \t is used, real position of symbol in block and visible position differs This function converts visible to real. Bigger value is returned, if visiblePos is in the middle of \t, None if text is too short """ if visiblePos == 0: return 0 elif not '\t' in text: return visiblePos else: currentIndex = 1 for currentVisiblePos in self._visibleCharPositionGenerator(text): if currentVisiblePos >= visiblePos: return currentIndex - 1 currentIndex += 1 return None
['def', '_visibleToRealColumn', '(', 'self', ',', 'text', ',', 'visiblePos', ')', ':', 'if', 'visiblePos', '==', '0', ':', 'return', '0', 'elif', 'not', "'\\t'", 'in', 'text', ':', 'return', 'visiblePos', 'else', ':', 'currentIndex', '=', '1', 'for', 'currentVisiblePos', 'in', 'self', '.', '_visibleCharPositionGenerator', '(', 'text', ')', ':', 'if', 'currentVisiblePos', '>=', 'visiblePos', ':', 'return', 'currentIndex', '-', '1', 'currentIndex', '+=', '1', 'return', 'None']
If \t is used, real position of symbol in block and visible position differs This function converts visible to real. Bigger value is returned, if visiblePos is in the middle of \t, None if text is too short
['If', '\\', 't', 'is', 'used', 'real', 'position', 'of', 'symbol', 'in', 'block', 'and', 'visible', 'position', 'differs', 'This', 'function', 'converts', 'visible', 'to', 'real', '.', 'Bigger', 'value', 'is', 'returned', 'if', 'visiblePos', 'is', 'in', 'the', 'middle', 'of', '\\', 't', 'None', 'if', 'text', 'is', 'too', 'short']
train
https://github.com/andreikop/qutepart/blob/109d76b239751318bcef06f39b2fbbf18687a40b/qutepart/rectangularselection.py#L100-L116
8,485
lingthio/Flask-User
flask_user/user_mixin.py
UserMixin.get_id
def get_id(self): """Converts a User ID and parts of a User password hash to a token.""" # This function is used by Flask-Login to store a User ID securely as a browser cookie. # The last part of the password is included to invalidate tokens when password change. # user_id and password_ends_with are encrypted, timestamped and signed. # This function works in tandem with UserMixin.get_user_by_token() user_manager = current_app.user_manager user_id = self.id password_ends_with = '' if user_manager.USER_ENABLE_AUTH0 else self.password[-8:] user_token = user_manager.generate_token( user_id, # User ID password_ends_with, # Last 8 characters of user password ) # print("UserMixin.get_id: ID:", self.id, "token:", user_token) return user_token
python
def get_id(self): """Converts a User ID and parts of a User password hash to a token.""" # This function is used by Flask-Login to store a User ID securely as a browser cookie. # The last part of the password is included to invalidate tokens when password change. # user_id and password_ends_with are encrypted, timestamped and signed. # This function works in tandem with UserMixin.get_user_by_token() user_manager = current_app.user_manager user_id = self.id password_ends_with = '' if user_manager.USER_ENABLE_AUTH0 else self.password[-8:] user_token = user_manager.generate_token( user_id, # User ID password_ends_with, # Last 8 characters of user password ) # print("UserMixin.get_id: ID:", self.id, "token:", user_token) return user_token
['def', 'get_id', '(', 'self', ')', ':', '# This function is used by Flask-Login to store a User ID securely as a browser cookie.', '# The last part of the password is included to invalidate tokens when password change.', '# user_id and password_ends_with are encrypted, timestamped and signed.', '# This function works in tandem with UserMixin.get_user_by_token()', 'user_manager', '=', 'current_app', '.', 'user_manager', 'user_id', '=', 'self', '.', 'id', 'password_ends_with', '=', "''", 'if', 'user_manager', '.', 'USER_ENABLE_AUTH0', 'else', 'self', '.', 'password', '[', '-', '8', ':', ']', 'user_token', '=', 'user_manager', '.', 'generate_token', '(', 'user_id', ',', '# User ID', 'password_ends_with', ',', '# Last 8 characters of user password', ')', '# print("UserMixin.get_id: ID:", self.id, "token:", user_token)', 'return', 'user_token']
Converts a User ID and parts of a User password hash to a token.
['Converts', 'a', 'User', 'ID', 'and', 'parts', 'of', 'a', 'User', 'password', 'hash', 'to', 'a', 'token', '.']
train
https://github.com/lingthio/Flask-User/blob/a379fa0a281789618c484b459cb41236779b95b1/flask_user/user_mixin.py#L16-L32
8,486
jpscaletti/authcode
authcode/wsgi/werkzeug.py
get_from_headers
def get_from_headers(request, key): """Try to read a value named ``key`` from the headers. """ value = request.headers.get(key) return to_native(value)
python
def get_from_headers(request, key): """Try to read a value named ``key`` from the headers. """ value = request.headers.get(key) return to_native(value)
['def', 'get_from_headers', '(', 'request', ',', 'key', ')', ':', 'value', '=', 'request', '.', 'headers', '.', 'get', '(', 'key', ')', 'return', 'to_native', '(', 'value', ')']
Try to read a value named ``key`` from the headers.
['Try', 'to', 'read', 'a', 'value', 'named', 'key', 'from', 'the', 'headers', '.']
train
https://github.com/jpscaletti/authcode/blob/91529b6d0caec07d1452758d937e1e0745826139/authcode/wsgi/werkzeug.py#L63-L67
8,487
cytoscape/py2cytoscape
py2cytoscape/cyrest/diffusion.py
diffusion.diffuse_advanced
def diffuse_advanced(self, heatColumnName=None, time=None, verbose=False): """ Diffusion will send the selected network view and its selected nodes to a web-based REST service to calculate network propagation. Results are returned and represented by columns in the node table. Columns are created for each execution of Diffusion and their names are returned in the response. :param heatColumnName (string, optional): A node column name intended to override the default table column 'diffusion_input'. This represents the query vector and corresponds to h in the diffusion equation. = ['HEKScore', 'JurkatScore', '(Use selected nodes)'] :param time (string, optional): The extent of spread over the network. This corresponds to t in the diffusion equation. :param verbose: print more """ PARAMS=set_param(["heatColumnName","time"],[heatColumnName,time]) response=api(url=self.__url+"/diffuse_advanced", PARAMS=PARAMS, method="POST", verbose=verbose) return response
python
def diffuse_advanced(self, heatColumnName=None, time=None, verbose=False): """ Diffusion will send the selected network view and its selected nodes to a web-based REST service to calculate network propagation. Results are returned and represented by columns in the node table. Columns are created for each execution of Diffusion and their names are returned in the response. :param heatColumnName (string, optional): A node column name intended to override the default table column 'diffusion_input'. This represents the query vector and corresponds to h in the diffusion equation. = ['HEKScore', 'JurkatScore', '(Use selected nodes)'] :param time (string, optional): The extent of spread over the network. This corresponds to t in the diffusion equation. :param verbose: print more """ PARAMS=set_param(["heatColumnName","time"],[heatColumnName,time]) response=api(url=self.__url+"/diffuse_advanced", PARAMS=PARAMS, method="POST", verbose=verbose) return response
['def', 'diffuse_advanced', '(', 'self', ',', 'heatColumnName', '=', 'None', ',', 'time', '=', 'None', ',', 'verbose', '=', 'False', ')', ':', 'PARAMS', '=', 'set_param', '(', '[', '"heatColumnName"', ',', '"time"', ']', ',', '[', 'heatColumnName', ',', 'time', ']', ')', 'response', '=', 'api', '(', 'url', '=', 'self', '.', '__url', '+', '"/diffuse_advanced"', ',', 'PARAMS', '=', 'PARAMS', ',', 'method', '=', '"POST"', ',', 'verbose', '=', 'verbose', ')', 'return', 'response']
Diffusion will send the selected network view and its selected nodes to a web-based REST service to calculate network propagation. Results are returned and represented by columns in the node table. Columns are created for each execution of Diffusion and their names are returned in the response. :param heatColumnName (string, optional): A node column name intended to override the default table column 'diffusion_input'. This represents the query vector and corresponds to h in the diffusion equation. = ['HEKScore', 'JurkatScore', '(Use selected nodes)'] :param time (string, optional): The extent of spread over the network. This corresponds to t in the diffusion equation. :param verbose: print more
['Diffusion', 'will', 'send', 'the', 'selected', 'network', 'view', 'and', 'its', 'selected', 'nodes', 'to', 'a', 'web', '-', 'based', 'REST', 'service', 'to', 'calculate', 'network', 'propagation', '.', 'Results', 'are', 'returned', 'and', 'represented', 'by', 'columns', 'in', 'the', 'node', 'table', '.', 'Columns', 'are', 'created', 'for', 'each', 'execution', 'of', 'Diffusion', 'and', 'their', 'names', 'are', 'returned', 'in', 'the', 'response', '.']
train
https://github.com/cytoscape/py2cytoscape/blob/dd34de8d028f512314d0057168df7fef7c5d5195/py2cytoscape/cyrest/diffusion.py#L30-L48
8,488
saltstack/salt
salt/states/powerpath.py
license_present
def license_present(name): ''' Ensures that the specified PowerPath license key is present on the host. name The license key to ensure is present ''' ret = {'name': name, 'changes': {}, 'result': False, 'comment': ''} if not __salt__['powerpath.has_powerpath'](): ret['result'] = False ret['comment'] = 'PowerPath is not installed.' return ret licenses = [l['key'] for l in __salt__['powerpath.list_licenses']()] if name in licenses: ret['result'] = True ret['comment'] = 'License key {0} already present'.format(name) return ret if __opts__['test']: ret['result'] = None ret['comment'] = 'License key {0} is set to be added'.format(name) return ret data = __salt__['powerpath.add_license'](name) if data['result']: ret['changes'] = {name: 'added'} ret['result'] = True ret['comment'] = data['output'] return ret else: ret['result'] = False ret['comment'] = data['output'] return ret
python
def license_present(name): ''' Ensures that the specified PowerPath license key is present on the host. name The license key to ensure is present ''' ret = {'name': name, 'changes': {}, 'result': False, 'comment': ''} if not __salt__['powerpath.has_powerpath'](): ret['result'] = False ret['comment'] = 'PowerPath is not installed.' return ret licenses = [l['key'] for l in __salt__['powerpath.list_licenses']()] if name in licenses: ret['result'] = True ret['comment'] = 'License key {0} already present'.format(name) return ret if __opts__['test']: ret['result'] = None ret['comment'] = 'License key {0} is set to be added'.format(name) return ret data = __salt__['powerpath.add_license'](name) if data['result']: ret['changes'] = {name: 'added'} ret['result'] = True ret['comment'] = data['output'] return ret else: ret['result'] = False ret['comment'] = data['output'] return ret
['def', 'license_present', '(', 'name', ')', ':', 'ret', '=', '{', "'name'", ':', 'name', ',', "'changes'", ':', '{', '}', ',', "'result'", ':', 'False', ',', "'comment'", ':', "''", '}', 'if', 'not', '__salt__', '[', "'powerpath.has_powerpath'", ']', '(', ')', ':', 'ret', '[', "'result'", ']', '=', 'False', 'ret', '[', "'comment'", ']', '=', "'PowerPath is not installed.'", 'return', 'ret', 'licenses', '=', '[', 'l', '[', "'key'", ']', 'for', 'l', 'in', '__salt__', '[', "'powerpath.list_licenses'", ']', '(', ')', ']', 'if', 'name', 'in', 'licenses', ':', 'ret', '[', "'result'", ']', '=', 'True', 'ret', '[', "'comment'", ']', '=', "'License key {0} already present'", '.', 'format', '(', 'name', ')', 'return', 'ret', 'if', '__opts__', '[', "'test'", ']', ':', 'ret', '[', "'result'", ']', '=', 'None', 'ret', '[', "'comment'", ']', '=', "'License key {0} is set to be added'", '.', 'format', '(', 'name', ')', 'return', 'ret', 'data', '=', '__salt__', '[', "'powerpath.add_license'", ']', '(', 'name', ')', 'if', 'data', '[', "'result'", ']', ':', 'ret', '[', "'changes'", ']', '=', '{', 'name', ':', "'added'", '}', 'ret', '[', "'result'", ']', '=', 'True', 'ret', '[', "'comment'", ']', '=', 'data', '[', "'output'", ']', 'return', 'ret', 'else', ':', 'ret', '[', "'result'", ']', '=', 'False', 'ret', '[', "'comment'", ']', '=', 'data', '[', "'output'", ']', 'return', 'ret']
Ensures that the specified PowerPath license key is present on the host. name The license key to ensure is present
['Ensures', 'that', 'the', 'specified', 'PowerPath', 'license', 'key', 'is', 'present', 'on', 'the', 'host', '.']
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/powerpath.py#L19-L58
8,489
cloudmesh/cloudmesh-common
cloudmesh/common/util.py
writefile
def writefile(filename, content): """ writes the content into the file :param filename: the filename :param content: teh content :return: """ with open(path_expand(filename), 'w') as outfile: outfile.write(content)
python
def writefile(filename, content): """ writes the content into the file :param filename: the filename :param content: teh content :return: """ with open(path_expand(filename), 'w') as outfile: outfile.write(content)
['def', 'writefile', '(', 'filename', ',', 'content', ')', ':', 'with', 'open', '(', 'path_expand', '(', 'filename', ')', ',', "'w'", ')', 'as', 'outfile', ':', 'outfile', '.', 'write', '(', 'content', ')']
writes the content into the file :param filename: the filename :param content: teh content :return:
['writes', 'the', 'content', 'into', 'the', 'file', ':', 'param', 'filename', ':', 'the', 'filename', ':', 'param', 'content', ':', 'teh', 'content', ':', 'return', ':']
train
https://github.com/cloudmesh/cloudmesh-common/blob/ae4fae09cd78205d179ea692dc58f0b0c8fea2b8/cloudmesh/common/util.py#L330-L338
8,490
AliLozano/django-messages-extends
messages_extends/storages.py
PersistentStorage.add
def add(self, level, message, extra_tags='', *args, **kwargs): """ Queues a message to be stored. The message is only queued if it contained something and its level is not less than the recording level (``self.level``). """ if not message: return # Check that the message level is not less than the recording level. level = int(level) if level < self.level: return # Add the message. self.added_new = True message = Message(level, message, extra_tags=extra_tags) message = self.process_message(message, *args, **kwargs) if message: self._queued_messages.append(message)
python
def add(self, level, message, extra_tags='', *args, **kwargs): """ Queues a message to be stored. The message is only queued if it contained something and its level is not less than the recording level (``self.level``). """ if not message: return # Check that the message level is not less than the recording level. level = int(level) if level < self.level: return # Add the message. self.added_new = True message = Message(level, message, extra_tags=extra_tags) message = self.process_message(message, *args, **kwargs) if message: self._queued_messages.append(message)
['def', 'add', '(', 'self', ',', 'level', ',', 'message', ',', 'extra_tags', '=', "''", ',', '*', 'args', ',', '*', '*', 'kwargs', ')', ':', 'if', 'not', 'message', ':', 'return', '# Check that the message level is not less than the recording level.', 'level', '=', 'int', '(', 'level', ')', 'if', 'level', '<', 'self', '.', 'level', ':', 'return', '# Add the message.', 'self', '.', 'added_new', '=', 'True', 'message', '=', 'Message', '(', 'level', ',', 'message', ',', 'extra_tags', '=', 'extra_tags', ')', 'message', '=', 'self', '.', 'process_message', '(', 'message', ',', '*', 'args', ',', '*', '*', 'kwargs', ')', 'if', 'message', ':', 'self', '.', '_queued_messages', '.', 'append', '(', 'message', ')']
Queues a message to be stored. The message is only queued if it contained something and its level is not less than the recording level (``self.level``).
['Queues', 'a', 'message', 'to', 'be', 'stored', '.']
train
https://github.com/AliLozano/django-messages-extends/blob/141011981d44a6f28c6e82f9832815423b3b205f/messages_extends/storages.py#L193-L211
8,491
rosenbrockc/fortpy
fortpy/stats/calltree.py
_exec_callers
def _exec_callers(xinst, result): """Adds the dependency calls from the specified executable instance to the results dictionary. """ for depkey, depval in xinst.dependencies.items(): if depval.target is not None: if depval.target.name in result: if xinst not in result[depval.target.name]: result[depval.target.name].append(xinst) else: result[depval.target.name] = [xinst] for xname, xvalue in xinst.executables: _exec_callers(xvalue, result)
python
def _exec_callers(xinst, result): """Adds the dependency calls from the specified executable instance to the results dictionary. """ for depkey, depval in xinst.dependencies.items(): if depval.target is not None: if depval.target.name in result: if xinst not in result[depval.target.name]: result[depval.target.name].append(xinst) else: result[depval.target.name] = [xinst] for xname, xvalue in xinst.executables: _exec_callers(xvalue, result)
['def', '_exec_callers', '(', 'xinst', ',', 'result', ')', ':', 'for', 'depkey', ',', 'depval', 'in', 'xinst', '.', 'dependencies', '.', 'items', '(', ')', ':', 'if', 'depval', '.', 'target', 'is', 'not', 'None', ':', 'if', 'depval', '.', 'target', '.', 'name', 'in', 'result', ':', 'if', 'xinst', 'not', 'in', 'result', '[', 'depval', '.', 'target', '.', 'name', ']', ':', 'result', '[', 'depval', '.', 'target', '.', 'name', ']', '.', 'append', '(', 'xinst', ')', 'else', ':', 'result', '[', 'depval', '.', 'target', '.', 'name', ']', '=', '[', 'xinst', ']', 'for', 'xname', ',', 'xvalue', 'in', 'xinst', '.', 'executables', ':', '_exec_callers', '(', 'xvalue', ',', 'result', ')']
Adds the dependency calls from the specified executable instance to the results dictionary.
['Adds', 'the', 'dependency', 'calls', 'from', 'the', 'specified', 'executable', 'instance', 'to', 'the', 'results', 'dictionary', '.']
train
https://github.com/rosenbrockc/fortpy/blob/1ed0757c52d549e41d9d44bdea68cb89529293a5/fortpy/stats/calltree.py#L8-L21
8,492
sassoftware/sas_kernel
sas_kernel/kernel.py
SASKernel.do_execute_direct
def do_execute_direct(self, code: str, silent: bool = False) -> [str, dict]: """ This is the main method that takes code from the Jupyter cell and submits it to the SAS server :param code: code from the cell :param silent: :return: str with either the log or list """ if not code.strip(): return {'status': 'ok', 'execution_count': self.execution_count, 'payload': [], 'user_expressions': {}} if self.mva is None: self._allow_stdin = True self._start_sas() if self.lst_len < 0: self._get_lst_len() if code.startswith('Obfuscated SAS Code'): logger.debug("decoding string") tmp1 = code.split() decode = base64.b64decode(tmp1[-1]) code = decode.decode('utf-8') if code.startswith('showSASLog_11092015') == False and code.startswith("CompleteshowSASLog_11092015") == False: logger.debug("code type: " + str(type(code))) logger.debug("code length: " + str(len(code))) logger.debug("code string: " + code) if code.startswith("/*SASKernelTest*/"): res = self.mva.submit(code, "text") else: res = self.mva.submit(code, prompt=self.promptDict) self.promptDict = {} if res['LOG'].find("SAS process has terminated unexpectedly") > -1: print(res['LOG'], '\n' "Restarting SAS session on your behalf") self.do_shutdown(True) return res['LOG'] output = res['LST'] log = res['LOG'] return self._which_display(log, output) elif code.startswith("CompleteshowSASLog_11092015") == True and code.startswith('showSASLog_11092015') == False: full_log = highlight(self.mva.saslog(), SASLogLexer(), HtmlFormatter(full=True, style=SASLogStyle, lineseparator="<br>", title="Full SAS Log")) return full_log.replace('\n', ' ') else: return self.cachedlog.replace('\n', ' ')
python
def do_execute_direct(self, code: str, silent: bool = False) -> [str, dict]: """ This is the main method that takes code from the Jupyter cell and submits it to the SAS server :param code: code from the cell :param silent: :return: str with either the log or list """ if not code.strip(): return {'status': 'ok', 'execution_count': self.execution_count, 'payload': [], 'user_expressions': {}} if self.mva is None: self._allow_stdin = True self._start_sas() if self.lst_len < 0: self._get_lst_len() if code.startswith('Obfuscated SAS Code'): logger.debug("decoding string") tmp1 = code.split() decode = base64.b64decode(tmp1[-1]) code = decode.decode('utf-8') if code.startswith('showSASLog_11092015') == False and code.startswith("CompleteshowSASLog_11092015") == False: logger.debug("code type: " + str(type(code))) logger.debug("code length: " + str(len(code))) logger.debug("code string: " + code) if code.startswith("/*SASKernelTest*/"): res = self.mva.submit(code, "text") else: res = self.mva.submit(code, prompt=self.promptDict) self.promptDict = {} if res['LOG'].find("SAS process has terminated unexpectedly") > -1: print(res['LOG'], '\n' "Restarting SAS session on your behalf") self.do_shutdown(True) return res['LOG'] output = res['LST'] log = res['LOG'] return self._which_display(log, output) elif code.startswith("CompleteshowSASLog_11092015") == True and code.startswith('showSASLog_11092015') == False: full_log = highlight(self.mva.saslog(), SASLogLexer(), HtmlFormatter(full=True, style=SASLogStyle, lineseparator="<br>", title="Full SAS Log")) return full_log.replace('\n', ' ') else: return self.cachedlog.replace('\n', ' ')
['def', 'do_execute_direct', '(', 'self', ',', 'code', ':', 'str', ',', 'silent', ':', 'bool', '=', 'False', ')', '->', '[', 'str', ',', 'dict', ']', ':', 'if', 'not', 'code', '.', 'strip', '(', ')', ':', 'return', '{', "'status'", ':', "'ok'", ',', "'execution_count'", ':', 'self', '.', 'execution_count', ',', "'payload'", ':', '[', ']', ',', "'user_expressions'", ':', '{', '}', '}', 'if', 'self', '.', 'mva', 'is', 'None', ':', 'self', '.', '_allow_stdin', '=', 'True', 'self', '.', '_start_sas', '(', ')', 'if', 'self', '.', 'lst_len', '<', '0', ':', 'self', '.', '_get_lst_len', '(', ')', 'if', 'code', '.', 'startswith', '(', "'Obfuscated SAS Code'", ')', ':', 'logger', '.', 'debug', '(', '"decoding string"', ')', 'tmp1', '=', 'code', '.', 'split', '(', ')', 'decode', '=', 'base64', '.', 'b64decode', '(', 'tmp1', '[', '-', '1', ']', ')', 'code', '=', 'decode', '.', 'decode', '(', "'utf-8'", ')', 'if', 'code', '.', 'startswith', '(', "'showSASLog_11092015'", ')', '==', 'False', 'and', 'code', '.', 'startswith', '(', '"CompleteshowSASLog_11092015"', ')', '==', 'False', ':', 'logger', '.', 'debug', '(', '"code type: "', '+', 'str', '(', 'type', '(', 'code', ')', ')', ')', 'logger', '.', 'debug', '(', '"code length: "', '+', 'str', '(', 'len', '(', 'code', ')', ')', ')', 'logger', '.', 'debug', '(', '"code string: "', '+', 'code', ')', 'if', 'code', '.', 'startswith', '(', '"/*SASKernelTest*/"', ')', ':', 'res', '=', 'self', '.', 'mva', '.', 'submit', '(', 'code', ',', '"text"', ')', 'else', ':', 'res', '=', 'self', '.', 'mva', '.', 'submit', '(', 'code', ',', 'prompt', '=', 'self', '.', 'promptDict', ')', 'self', '.', 'promptDict', '=', '{', '}', 'if', 'res', '[', "'LOG'", ']', '.', 'find', '(', '"SAS process has terminated unexpectedly"', ')', '>', '-', '1', ':', 'print', '(', 'res', '[', "'LOG'", ']', ',', "'\\n'", '"Restarting SAS session on your behalf"', ')', 'self', '.', 'do_shutdown', '(', 'True', ')', 'return', 'res', '[', "'LOG'", ']', 'output', '=', 'res', '[', "'LST'", ']', 'log', '=', 'res', '[', "'LOG'", ']', 'return', 'self', '.', '_which_display', '(', 'log', ',', 'output', ')', 'elif', 'code', '.', 'startswith', '(', '"CompleteshowSASLog_11092015"', ')', '==', 'True', 'and', 'code', '.', 'startswith', '(', "'showSASLog_11092015'", ')', '==', 'False', ':', 'full_log', '=', 'highlight', '(', 'self', '.', 'mva', '.', 'saslog', '(', ')', ',', 'SASLogLexer', '(', ')', ',', 'HtmlFormatter', '(', 'full', '=', 'True', ',', 'style', '=', 'SASLogStyle', ',', 'lineseparator', '=', '"<br>"', ',', 'title', '=', '"Full SAS Log"', ')', ')', 'return', 'full_log', '.', 'replace', '(', "'\\n'", ',', "' '", ')', 'else', ':', 'return', 'self', '.', 'cachedlog', '.', 'replace', '(', "'\\n'", ',', "' '", ')']
This is the main method that takes code from the Jupyter cell and submits it to the SAS server :param code: code from the cell :param silent: :return: str with either the log or list
['This', 'is', 'the', 'main', 'method', 'that', 'takes', 'code', 'from', 'the', 'Jupyter', 'cell', 'and', 'submits', 'it', 'to', 'the', 'SAS', 'server']
train
https://github.com/sassoftware/sas_kernel/blob/ed63dceb9d1d51157b465f4892ffb793c1c32307/sas_kernel/kernel.py#L131-L179
8,493
secdev/scapy
scapy/layers/radius.py
RadiusAttr_Message_Authenticator.compute_message_authenticator
def compute_message_authenticator(radius_packet, packed_req_authenticator, shared_secret): """ Computes the "Message-Authenticator" of a given RADIUS packet. """ data = prepare_packed_data(radius_packet, packed_req_authenticator) radius_hmac = hmac.new(shared_secret, data, hashlib.md5) return radius_hmac.digest()
python
def compute_message_authenticator(radius_packet, packed_req_authenticator, shared_secret): """ Computes the "Message-Authenticator" of a given RADIUS packet. """ data = prepare_packed_data(radius_packet, packed_req_authenticator) radius_hmac = hmac.new(shared_secret, data, hashlib.md5) return radius_hmac.digest()
['def', 'compute_message_authenticator', '(', 'radius_packet', ',', 'packed_req_authenticator', ',', 'shared_secret', ')', ':', 'data', '=', 'prepare_packed_data', '(', 'radius_packet', ',', 'packed_req_authenticator', ')', 'radius_hmac', '=', 'hmac', '.', 'new', '(', 'shared_secret', ',', 'data', ',', 'hashlib', '.', 'md5', ')', 'return', 'radius_hmac', '.', 'digest', '(', ')']
Computes the "Message-Authenticator" of a given RADIUS packet.
['Computes', 'the', 'Message', '-', 'Authenticator', 'of', 'a', 'given', 'RADIUS', 'packet', '.']
train
https://github.com/secdev/scapy/blob/3ffe757c184017dd46464593a8f80f85abc1e79a/scapy/layers/radius.py#L566-L575
8,494
Cadene/pretrained-models.pytorch
pretrainedmodels/models/fbresnet.py
fbresnet152
def fbresnet152(num_classes=1000, pretrained='imagenet'): """Constructs a ResNet-152 model. Args: pretrained (bool): If True, returns a model pre-trained on ImageNet """ model = FBResNet(Bottleneck, [3, 8, 36, 3], num_classes=num_classes) if pretrained is not None: settings = pretrained_settings['fbresnet152'][pretrained] assert num_classes == settings['num_classes'], \ "num_classes should be {}, but is {}".format(settings['num_classes'], num_classes) model.load_state_dict(model_zoo.load_url(settings['url'])) model.input_space = settings['input_space'] model.input_size = settings['input_size'] model.input_range = settings['input_range'] model.mean = settings['mean'] model.std = settings['std'] return model
python
def fbresnet152(num_classes=1000, pretrained='imagenet'): """Constructs a ResNet-152 model. Args: pretrained (bool): If True, returns a model pre-trained on ImageNet """ model = FBResNet(Bottleneck, [3, 8, 36, 3], num_classes=num_classes) if pretrained is not None: settings = pretrained_settings['fbresnet152'][pretrained] assert num_classes == settings['num_classes'], \ "num_classes should be {}, but is {}".format(settings['num_classes'], num_classes) model.load_state_dict(model_zoo.load_url(settings['url'])) model.input_space = settings['input_space'] model.input_size = settings['input_size'] model.input_range = settings['input_range'] model.mean = settings['mean'] model.std = settings['std'] return model
['def', 'fbresnet152', '(', 'num_classes', '=', '1000', ',', 'pretrained', '=', "'imagenet'", ')', ':', 'model', '=', 'FBResNet', '(', 'Bottleneck', ',', '[', '3', ',', '8', ',', '36', ',', '3', ']', ',', 'num_classes', '=', 'num_classes', ')', 'if', 'pretrained', 'is', 'not', 'None', ':', 'settings', '=', 'pretrained_settings', '[', "'fbresnet152'", ']', '[', 'pretrained', ']', 'assert', 'num_classes', '==', 'settings', '[', "'num_classes'", ']', ',', '"num_classes should be {}, but is {}"', '.', 'format', '(', 'settings', '[', "'num_classes'", ']', ',', 'num_classes', ')', 'model', '.', 'load_state_dict', '(', 'model_zoo', '.', 'load_url', '(', 'settings', '[', "'url'", ']', ')', ')', 'model', '.', 'input_space', '=', 'settings', '[', "'input_space'", ']', 'model', '.', 'input_size', '=', 'settings', '[', "'input_size'", ']', 'model', '.', 'input_range', '=', 'settings', '[', "'input_range'", ']', 'model', '.', 'mean', '=', 'settings', '[', "'mean'", ']', 'model', '.', 'std', '=', 'settings', '[', "'std'", ']', 'return', 'model']
Constructs a ResNet-152 model. Args: pretrained (bool): If True, returns a model pre-trained on ImageNet
['Constructs', 'a', 'ResNet', '-', '152', 'model', '.']
train
https://github.com/Cadene/pretrained-models.pytorch/blob/021d97897c9aa76ec759deff43d341c4fd45d7ba/pretrainedmodels/models/fbresnet.py#L216-L233
8,495
pantsbuild/pants
src/python/pants/goal/task_registrar.py
TaskRegistrar.install
def install(self, goal=None, first=False, replace=False, before=None, after=None): """Install the task in the specified goal (or a new goal with the same name as the task). The placement of the task in the execution list of the goal defaults to the end but can be :rtype : object influence by specifying exactly one of the following arguments: :API: public :param first: Places this task 1st in the goal's execution list. :param replace: Replaces any existing tasks in the goal with this goal. :param before: Places this task before the named task in the goal's execution list. :param after: Places this task after the named task in the goal's execution list. :returns: The goal with task installed. """ goal = Goal.by_name(goal or self.name) goal.install(self, first, replace, before, after) return goal
python
def install(self, goal=None, first=False, replace=False, before=None, after=None): """Install the task in the specified goal (or a new goal with the same name as the task). The placement of the task in the execution list of the goal defaults to the end but can be :rtype : object influence by specifying exactly one of the following arguments: :API: public :param first: Places this task 1st in the goal's execution list. :param replace: Replaces any existing tasks in the goal with this goal. :param before: Places this task before the named task in the goal's execution list. :param after: Places this task after the named task in the goal's execution list. :returns: The goal with task installed. """ goal = Goal.by_name(goal or self.name) goal.install(self, first, replace, before, after) return goal
['def', 'install', '(', 'self', ',', 'goal', '=', 'None', ',', 'first', '=', 'False', ',', 'replace', '=', 'False', ',', 'before', '=', 'None', ',', 'after', '=', 'None', ')', ':', 'goal', '=', 'Goal', '.', 'by_name', '(', 'goal', 'or', 'self', '.', 'name', ')', 'goal', '.', 'install', '(', 'self', ',', 'first', ',', 'replace', ',', 'before', ',', 'after', ')', 'return', 'goal']
Install the task in the specified goal (or a new goal with the same name as the task). The placement of the task in the execution list of the goal defaults to the end but can be :rtype : object influence by specifying exactly one of the following arguments: :API: public :param first: Places this task 1st in the goal's execution list. :param replace: Replaces any existing tasks in the goal with this goal. :param before: Places this task before the named task in the goal's execution list. :param after: Places this task after the named task in the goal's execution list. :returns: The goal with task installed.
['Install', 'the', 'task', 'in', 'the', 'specified', 'goal', '(', 'or', 'a', 'new', 'goal', 'with', 'the', 'same', 'name', 'as', 'the', 'task', ')', '.']
train
https://github.com/pantsbuild/pants/blob/b72e650da0df685824ffdcc71988b8c282d0962d/src/python/pants/goal/task_registrar.py#L59-L76
8,496
revarbat/pymuv
setup.py
find_data_files
def find_data_files(source, target, patterns): """ Locates the specified data-files and returns the matches in a data_files compatible format. source is the root of the source data tree. Use '' or '.' for current directory. target is the root of the target data tree. Use '' or '.' for the distribution directory. patterns is a sequence of glob-patterns for the files you want to copy. """ if glob.has_magic(source) or glob.has_magic(target): raise ValueError("Magic not allowed in src, target") ret = {} for pattern in patterns: pattern = os.path.join(source, pattern) for filename in glob.glob(pattern): if os.path.isfile(filename): targetpath = os.path.join( target, os.path.relpath(filename, source) ) path = os.path.dirname(targetpath) ret.setdefault(path, []).append(filename) return sorted(ret.items())
python
def find_data_files(source, target, patterns): """ Locates the specified data-files and returns the matches in a data_files compatible format. source is the root of the source data tree. Use '' or '.' for current directory. target is the root of the target data tree. Use '' or '.' for the distribution directory. patterns is a sequence of glob-patterns for the files you want to copy. """ if glob.has_magic(source) or glob.has_magic(target): raise ValueError("Magic not allowed in src, target") ret = {} for pattern in patterns: pattern = os.path.join(source, pattern) for filename in glob.glob(pattern): if os.path.isfile(filename): targetpath = os.path.join( target, os.path.relpath(filename, source) ) path = os.path.dirname(targetpath) ret.setdefault(path, []).append(filename) return sorted(ret.items())
['def', 'find_data_files', '(', 'source', ',', 'target', ',', 'patterns', ')', ':', 'if', 'glob', '.', 'has_magic', '(', 'source', ')', 'or', 'glob', '.', 'has_magic', '(', 'target', ')', ':', 'raise', 'ValueError', '(', '"Magic not allowed in src, target"', ')', 'ret', '=', '{', '}', 'for', 'pattern', 'in', 'patterns', ':', 'pattern', '=', 'os', '.', 'path', '.', 'join', '(', 'source', ',', 'pattern', ')', 'for', 'filename', 'in', 'glob', '.', 'glob', '(', 'pattern', ')', ':', 'if', 'os', '.', 'path', '.', 'isfile', '(', 'filename', ')', ':', 'targetpath', '=', 'os', '.', 'path', '.', 'join', '(', 'target', ',', 'os', '.', 'path', '.', 'relpath', '(', 'filename', ',', 'source', ')', ')', 'path', '=', 'os', '.', 'path', '.', 'dirname', '(', 'targetpath', ')', 'ret', '.', 'setdefault', '(', 'path', ',', '[', ']', ')', '.', 'append', '(', 'filename', ')', 'return', 'sorted', '(', 'ret', '.', 'items', '(', ')', ')']
Locates the specified data-files and returns the matches in a data_files compatible format. source is the root of the source data tree. Use '' or '.' for current directory. target is the root of the target data tree. Use '' or '.' for the distribution directory. patterns is a sequence of glob-patterns for the files you want to copy.
['Locates', 'the', 'specified', 'data', '-', 'files', 'and', 'returns', 'the', 'matches', 'in', 'a', 'data_files', 'compatible', 'format', '.']
train
https://github.com/revarbat/pymuv/blob/cefa2f2d35fc32054b9595da5f3393f6cceee5e0/setup.py#L10-L34
8,497
gmr/tredis
tredis/keys.py
KeysMixin.expire
def expire(self, key, timeout): """Set a timeout on key. After the timeout has expired, the key will automatically be deleted. A key with an associated timeout is often said to be volatile in Redis terminology. The timeout is cleared only when the key is removed using the :meth:`~tredis.RedisClient.delete` method or overwritten using the :meth:`~tredis.RedisClient.set` or :meth:`~tredis.RedisClient.getset` methods. This means that all the operations that conceptually alter the value stored at the key without replacing it with a new one will leave the timeout untouched. For instance, incrementing the value of a key with :meth:`~tredis.RedisClient.incr`, pushing a new value into a list with :meth:`~tredis.RedisClient.lpush`, or altering the field value of a hash with :meth:`~tredis.RedisClient.hset` are all operations that will leave the timeout untouched. The timeout can also be cleared, turning the key back into a persistent key, using the :meth:`~tredis.RedisClient.persist` method. If a key is renamed with :meth:`~tredis.RedisClient.rename`, the associated time to live is transferred to the new key name. If a key is overwritten by :meth:`~tredis.RedisClient.rename`, like in the case of an existing key ``Key_A`` that is overwritten by a call like ``client.rename(Key_B, Key_A)`` it does not matter if the original ``Key_A`` had a timeout associated or not, the new key ``Key_A`` will inherit all the characteristics of ``Key_B``. .. note:: **Time complexity**: ``O(1)`` :param key: The key to set an expiration for :type key: :class:`str`, :class:`bytes` :param int timeout: The number of seconds to set the timeout to :rtype: bool :raises: :exc:`~tredis.exceptions.RedisError` """ return self._execute( [b'EXPIRE', key, ascii(timeout).encode('ascii')], 1)
python
def expire(self, key, timeout): """Set a timeout on key. After the timeout has expired, the key will automatically be deleted. A key with an associated timeout is often said to be volatile in Redis terminology. The timeout is cleared only when the key is removed using the :meth:`~tredis.RedisClient.delete` method or overwritten using the :meth:`~tredis.RedisClient.set` or :meth:`~tredis.RedisClient.getset` methods. This means that all the operations that conceptually alter the value stored at the key without replacing it with a new one will leave the timeout untouched. For instance, incrementing the value of a key with :meth:`~tredis.RedisClient.incr`, pushing a new value into a list with :meth:`~tredis.RedisClient.lpush`, or altering the field value of a hash with :meth:`~tredis.RedisClient.hset` are all operations that will leave the timeout untouched. The timeout can also be cleared, turning the key back into a persistent key, using the :meth:`~tredis.RedisClient.persist` method. If a key is renamed with :meth:`~tredis.RedisClient.rename`, the associated time to live is transferred to the new key name. If a key is overwritten by :meth:`~tredis.RedisClient.rename`, like in the case of an existing key ``Key_A`` that is overwritten by a call like ``client.rename(Key_B, Key_A)`` it does not matter if the original ``Key_A`` had a timeout associated or not, the new key ``Key_A`` will inherit all the characteristics of ``Key_B``. .. note:: **Time complexity**: ``O(1)`` :param key: The key to set an expiration for :type key: :class:`str`, :class:`bytes` :param int timeout: The number of seconds to set the timeout to :rtype: bool :raises: :exc:`~tredis.exceptions.RedisError` """ return self._execute( [b'EXPIRE', key, ascii(timeout).encode('ascii')], 1)
['def', 'expire', '(', 'self', ',', 'key', ',', 'timeout', ')', ':', 'return', 'self', '.', '_execute', '(', '[', "b'EXPIRE'", ',', 'key', ',', 'ascii', '(', 'timeout', ')', '.', 'encode', '(', "'ascii'", ')', ']', ',', '1', ')']
Set a timeout on key. After the timeout has expired, the key will automatically be deleted. A key with an associated timeout is often said to be volatile in Redis terminology. The timeout is cleared only when the key is removed using the :meth:`~tredis.RedisClient.delete` method or overwritten using the :meth:`~tredis.RedisClient.set` or :meth:`~tredis.RedisClient.getset` methods. This means that all the operations that conceptually alter the value stored at the key without replacing it with a new one will leave the timeout untouched. For instance, incrementing the value of a key with :meth:`~tredis.RedisClient.incr`, pushing a new value into a list with :meth:`~tredis.RedisClient.lpush`, or altering the field value of a hash with :meth:`~tredis.RedisClient.hset` are all operations that will leave the timeout untouched. The timeout can also be cleared, turning the key back into a persistent key, using the :meth:`~tredis.RedisClient.persist` method. If a key is renamed with :meth:`~tredis.RedisClient.rename`, the associated time to live is transferred to the new key name. If a key is overwritten by :meth:`~tredis.RedisClient.rename`, like in the case of an existing key ``Key_A`` that is overwritten by a call like ``client.rename(Key_B, Key_A)`` it does not matter if the original ``Key_A`` had a timeout associated or not, the new key ``Key_A`` will inherit all the characteristics of ``Key_B``. .. note:: **Time complexity**: ``O(1)`` :param key: The key to set an expiration for :type key: :class:`str`, :class:`bytes` :param int timeout: The number of seconds to set the timeout to :rtype: bool :raises: :exc:`~tredis.exceptions.RedisError`
['Set', 'a', 'timeout', 'on', 'key', '.', 'After', 'the', 'timeout', 'has', 'expired', 'the', 'key', 'will', 'automatically', 'be', 'deleted', '.', 'A', 'key', 'with', 'an', 'associated', 'timeout', 'is', 'often', 'said', 'to', 'be', 'volatile', 'in', 'Redis', 'terminology', '.']
train
https://github.com/gmr/tredis/blob/2e91c6a58a35460be0525c51ac6a98fde3b506ad/tredis/keys.py#L86-L126
8,498
modin-project/modin
modin/backends/pandas/query_compiler.py
PandasQueryCompiler.join
def join(self, other, **kwargs): """Joins a list or two objects together. Args: other: The other object(s) to join on. Returns: Joined objects. """ if not isinstance(other, list): other = [other] return self._join_list_of_managers(other, **kwargs)
python
def join(self, other, **kwargs): """Joins a list or two objects together. Args: other: The other object(s) to join on. Returns: Joined objects. """ if not isinstance(other, list): other = [other] return self._join_list_of_managers(other, **kwargs)
['def', 'join', '(', 'self', ',', 'other', ',', '*', '*', 'kwargs', ')', ':', 'if', 'not', 'isinstance', '(', 'other', ',', 'list', ')', ':', 'other', '=', '[', 'other', ']', 'return', 'self', '.', '_join_list_of_managers', '(', 'other', ',', '*', '*', 'kwargs', ')']
Joins a list or two objects together. Args: other: The other object(s) to join on. Returns: Joined objects.
['Joins', 'a', 'list', 'or', 'two', 'objects', 'together', '.']
train
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/backends/pandas/query_compiler.py#L291-L302
8,499
h2oai/datatable
ci/make_fast.py
build_sourcemap
def build_sourcemap(sources): """ Similar to build_headermap(), but builds a dictionary of includes from the "source" files (i.e. ".c/.cc" files). """ sourcemap = {} for sfile in sources: inc = find_includes(sfile) sourcemap[sfile] = set(inc) return sourcemap
python
def build_sourcemap(sources): """ Similar to build_headermap(), but builds a dictionary of includes from the "source" files (i.e. ".c/.cc" files). """ sourcemap = {} for sfile in sources: inc = find_includes(sfile) sourcemap[sfile] = set(inc) return sourcemap
['def', 'build_sourcemap', '(', 'sources', ')', ':', 'sourcemap', '=', '{', '}', 'for', 'sfile', 'in', 'sources', ':', 'inc', '=', 'find_includes', '(', 'sfile', ')', 'sourcemap', '[', 'sfile', ']', '=', 'set', '(', 'inc', ')', 'return', 'sourcemap']
Similar to build_headermap(), but builds a dictionary of includes from the "source" files (i.e. ".c/.cc" files).
['Similar', 'to', 'build_headermap', '()', 'but', 'builds', 'a', 'dictionary', 'of', 'includes', 'from', 'the', 'source', 'files', '(', 'i', '.', 'e', '.', '.', 'c', '/', '.', 'cc', 'files', ')', '.']
train
https://github.com/h2oai/datatable/blob/dd5fba74d2ca85b66f82ae3c1e0b6ea2fd792564/ci/make_fast.py#L81-L90