repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
dropbox/stone
stone/backend.py
https://github.com/dropbox/stone/blob/2e95cbcd1c48e05cca68c919fd8d24adec6b0f58/stone/backend.py#L381-L451
def generate_multiline_list( self, items, # type: typing.List[typing.Text] before='', # type: typing.Text after='', # type: typing.Text delim=('(', ')'), # type: DelimTuple compact=True, # type: bool sep=',', # type: typing.Text skip_last_sep=False # type: bool ): # type: (...) -> None """ Given a list of items, emits one item per line. This is convenient for function prototypes and invocations, as well as for instantiating arrays, sets, and maps in some languages. TODO(kelkabany): A backend that uses tabs cannot be used with this if compact is false. Args: items (list[str]): Should contain the items to generate a list of. before (str): The string to come before the list of items. after (str): The string to follow the list of items. delim (str, str): The first element is added immediately following `before`. The second element is added prior to `after`. compact (bool): In compact mode, the enclosing parentheses are on the same lines as the first and last list item. sep (str): The string that follows each list item when compact is true. If compact is false, the separator is omitted for the last item. skip_last_sep (bool): When compact is false, whether the last line should have a trailing separator. Ignored when compact is true. """ assert len(delim) == 2 and isinstance(delim[0], six.text_type) and \ isinstance(delim[1], six.text_type), 'delim must be a tuple of two unicode strings.' if len(items) == 0: self.emit(before + delim[0] + delim[1] + after) return if len(items) == 1: self.emit(before + delim[0] + items[0] + delim[1] + after) return if compact: self.emit(before + delim[0] + items[0] + sep) def emit_list(items): items = items[1:] for (i, item) in enumerate(items): if i == len(items) - 1: self.emit(item + delim[1] + after) else: self.emit(item + sep) if before or delim[0]: with self.indent(len(before) + len(delim[0])): emit_list(items) else: emit_list(items) else: if before or delim[0]: self.emit(before + delim[0]) with self.indent(): for (i, item) in enumerate(items): if i == len(items) - 1 and skip_last_sep: self.emit(item) else: self.emit(item + sep) if delim[1] or after: self.emit(delim[1] + after) elif delim[1]: self.emit(delim[1])
[ "def", "generate_multiline_list", "(", "self", ",", "items", ",", "# type: typing.List[typing.Text]", "before", "=", "''", ",", "# type: typing.Text", "after", "=", "''", ",", "# type: typing.Text", "delim", "=", "(", "'('", ",", "')'", ")", ",", "# type: DelimTuple", "compact", "=", "True", ",", "# type: bool", "sep", "=", "','", ",", "# type: typing.Text", "skip_last_sep", "=", "False", "# type: bool", ")", ":", "# type: (...) -> None", "assert", "len", "(", "delim", ")", "==", "2", "and", "isinstance", "(", "delim", "[", "0", "]", ",", "six", ".", "text_type", ")", "and", "isinstance", "(", "delim", "[", "1", "]", ",", "six", ".", "text_type", ")", ",", "'delim must be a tuple of two unicode strings.'", "if", "len", "(", "items", ")", "==", "0", ":", "self", ".", "emit", "(", "before", "+", "delim", "[", "0", "]", "+", "delim", "[", "1", "]", "+", "after", ")", "return", "if", "len", "(", "items", ")", "==", "1", ":", "self", ".", "emit", "(", "before", "+", "delim", "[", "0", "]", "+", "items", "[", "0", "]", "+", "delim", "[", "1", "]", "+", "after", ")", "return", "if", "compact", ":", "self", ".", "emit", "(", "before", "+", "delim", "[", "0", "]", "+", "items", "[", "0", "]", "+", "sep", ")", "def", "emit_list", "(", "items", ")", ":", "items", "=", "items", "[", "1", ":", "]", "for", "(", "i", ",", "item", ")", "in", "enumerate", "(", "items", ")", ":", "if", "i", "==", "len", "(", "items", ")", "-", "1", ":", "self", ".", "emit", "(", "item", "+", "delim", "[", "1", "]", "+", "after", ")", "else", ":", "self", ".", "emit", "(", "item", "+", "sep", ")", "if", "before", "or", "delim", "[", "0", "]", ":", "with", "self", ".", "indent", "(", "len", "(", "before", ")", "+", "len", "(", "delim", "[", "0", "]", ")", ")", ":", "emit_list", "(", "items", ")", "else", ":", "emit_list", "(", "items", ")", "else", ":", "if", "before", "or", "delim", "[", "0", "]", ":", "self", ".", "emit", "(", "before", "+", "delim", "[", "0", "]", ")", "with", "self", ".", "indent", "(", ")", ":", "for", "(", "i", ",", "item", ")", "in", "enumerate", "(", "items", ")", ":", "if", "i", "==", "len", "(", "items", ")", "-", "1", "and", "skip_last_sep", ":", "self", ".", "emit", "(", "item", ")", "else", ":", "self", ".", "emit", "(", "item", "+", "sep", ")", "if", "delim", "[", "1", "]", "or", "after", ":", "self", ".", "emit", "(", "delim", "[", "1", "]", "+", "after", ")", "elif", "delim", "[", "1", "]", ":", "self", ".", "emit", "(", "delim", "[", "1", "]", ")" ]
Given a list of items, emits one item per line. This is convenient for function prototypes and invocations, as well as for instantiating arrays, sets, and maps in some languages. TODO(kelkabany): A backend that uses tabs cannot be used with this if compact is false. Args: items (list[str]): Should contain the items to generate a list of. before (str): The string to come before the list of items. after (str): The string to follow the list of items. delim (str, str): The first element is added immediately following `before`. The second element is added prior to `after`. compact (bool): In compact mode, the enclosing parentheses are on the same lines as the first and last list item. sep (str): The string that follows each list item when compact is true. If compact is false, the separator is omitted for the last item. skip_last_sep (bool): When compact is false, whether the last line should have a trailing separator. Ignored when compact is true.
[ "Given", "a", "list", "of", "items", "emits", "one", "item", "per", "line", "." ]
python
train
lrq3000/pyFileFixity
pyFileFixity/lib/profilers/visual/pympler/classtracker_stats.py
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/pympler/classtracker_stats.py#L115-L130
def dump_stats(self, fdump, close=True): """ Dump the logged data to a file. The argument `file` can be either a filename or an open file object that requires write access. `close` controls if the file is closed before leaving this method (the default behaviour). """ if self.tracker: self.tracker.stop_periodic_snapshots() if isinstance(fdump, type('')): fdump = open(fdump, 'wb') pickle.dump(self.index, fdump, protocol=pickle.HIGHEST_PROTOCOL) pickle.dump(self.snapshots, fdump, protocol=pickle.HIGHEST_PROTOCOL) if close: fdump.close()
[ "def", "dump_stats", "(", "self", ",", "fdump", ",", "close", "=", "True", ")", ":", "if", "self", ".", "tracker", ":", "self", ".", "tracker", ".", "stop_periodic_snapshots", "(", ")", "if", "isinstance", "(", "fdump", ",", "type", "(", "''", ")", ")", ":", "fdump", "=", "open", "(", "fdump", ",", "'wb'", ")", "pickle", ".", "dump", "(", "self", ".", "index", ",", "fdump", ",", "protocol", "=", "pickle", ".", "HIGHEST_PROTOCOL", ")", "pickle", ".", "dump", "(", "self", ".", "snapshots", ",", "fdump", ",", "protocol", "=", "pickle", ".", "HIGHEST_PROTOCOL", ")", "if", "close", ":", "fdump", ".", "close", "(", ")" ]
Dump the logged data to a file. The argument `file` can be either a filename or an open file object that requires write access. `close` controls if the file is closed before leaving this method (the default behaviour).
[ "Dump", "the", "logged", "data", "to", "a", "file", ".", "The", "argument", "file", "can", "be", "either", "a", "filename", "or", "an", "open", "file", "object", "that", "requires", "write", "access", ".", "close", "controls", "if", "the", "file", "is", "closed", "before", "leaving", "this", "method", "(", "the", "default", "behaviour", ")", "." ]
python
train
SheffieldML/GPy
GPy/util/netpbmfile.py
https://github.com/SheffieldML/GPy/blob/54c32d79d289d622fb18b898aee65a2a431d90cf/GPy/util/netpbmfile.py#L156-L160
def close(self): """Close open file. Future asarray calls might fail.""" if self._filename and self._fh: self._fh.close() self._fh = None
[ "def", "close", "(", "self", ")", ":", "if", "self", ".", "_filename", "and", "self", ".", "_fh", ":", "self", ".", "_fh", ".", "close", "(", ")", "self", ".", "_fh", "=", "None" ]
Close open file. Future asarray calls might fail.
[ "Close", "open", "file", ".", "Future", "asarray", "calls", "might", "fail", "." ]
python
train
msiemens/tinydb
tinydb/middlewares.py
https://github.com/msiemens/tinydb/blob/10052cb1ae6a3682d26eb4272c44e3b020aa5877/tinydb/middlewares.py#L106-L112
def flush(self): """ Flush all unwritten data to disk. """ if self._cache_modified_count > 0: self.storage.write(self.cache) self._cache_modified_count = 0
[ "def", "flush", "(", "self", ")", ":", "if", "self", ".", "_cache_modified_count", ">", "0", ":", "self", ".", "storage", ".", "write", "(", "self", ".", "cache", ")", "self", ".", "_cache_modified_count", "=", "0" ]
Flush all unwritten data to disk.
[ "Flush", "all", "unwritten", "data", "to", "disk", "." ]
python
train
JoelBender/bacpypes
py25/bacpypes/appservice.py
https://github.com/JoelBender/bacpypes/blob/4111b8604a16fa2b7f80d8104a43b9f3e28dfc78/py25/bacpypes/appservice.py#L212-L222
def append_segment(self, apdu): """This function appends the apdu content to the end of the current APDU being built. The segmentAPDU is the context.""" if _debug: SSM._debug("append_segment %r", apdu) # check for no context if not self.segmentAPDU: raise RuntimeError("no segmentation context established") # append the data self.segmentAPDU.put_data(apdu.pduData)
[ "def", "append_segment", "(", "self", ",", "apdu", ")", ":", "if", "_debug", ":", "SSM", ".", "_debug", "(", "\"append_segment %r\"", ",", "apdu", ")", "# check for no context", "if", "not", "self", ".", "segmentAPDU", ":", "raise", "RuntimeError", "(", "\"no segmentation context established\"", ")", "# append the data", "self", ".", "segmentAPDU", ".", "put_data", "(", "apdu", ".", "pduData", ")" ]
This function appends the apdu content to the end of the current APDU being built. The segmentAPDU is the context.
[ "This", "function", "appends", "the", "apdu", "content", "to", "the", "end", "of", "the", "current", "APDU", "being", "built", ".", "The", "segmentAPDU", "is", "the", "context", "." ]
python
train
LogicalDash/LiSE
LiSE/LiSE/query.py
https://github.com/LogicalDash/LiSE/blob/fe6fd4f0a7c1780e065f4c9babb9bc443af6bb84/LiSE/LiSE/query.py#L312-L340
def slow_iter_turns_eval_cmp(qry, oper, start_branch=None, engine=None): """Iterate over all turns on which a comparison holds. This is expensive. It evaluates the query for every turn in history. """ def mungeside(side): if isinstance(side, Query): return side.iter_turns elif isinstance(side, StatusAlias): return EntityStatAccessor( side.entity, side.stat, side.engine, side.branch, side.turn, side.tick, side.current, side.mungers ) elif isinstance(side, EntityStatAccessor): return side else: return lambda: side leftside = mungeside(qry.leftside) rightside = mungeside(qry.rightside) engine = engine or leftside.engine or rightside.engine for (branch, _, _) in engine._iter_parent_btt(start_branch or engine.branch): if branch is None: return parent, turn_start, tick_start, turn_end, tick_end = engine._branches[branch] for turn in range(turn_start, engine.turn + 1): if oper(leftside(branch, turn), rightside(branch, turn)): yield branch, turn
[ "def", "slow_iter_turns_eval_cmp", "(", "qry", ",", "oper", ",", "start_branch", "=", "None", ",", "engine", "=", "None", ")", ":", "def", "mungeside", "(", "side", ")", ":", "if", "isinstance", "(", "side", ",", "Query", ")", ":", "return", "side", ".", "iter_turns", "elif", "isinstance", "(", "side", ",", "StatusAlias", ")", ":", "return", "EntityStatAccessor", "(", "side", ".", "entity", ",", "side", ".", "stat", ",", "side", ".", "engine", ",", "side", ".", "branch", ",", "side", ".", "turn", ",", "side", ".", "tick", ",", "side", ".", "current", ",", "side", ".", "mungers", ")", "elif", "isinstance", "(", "side", ",", "EntityStatAccessor", ")", ":", "return", "side", "else", ":", "return", "lambda", ":", "side", "leftside", "=", "mungeside", "(", "qry", ".", "leftside", ")", "rightside", "=", "mungeside", "(", "qry", ".", "rightside", ")", "engine", "=", "engine", "or", "leftside", ".", "engine", "or", "rightside", ".", "engine", "for", "(", "branch", ",", "_", ",", "_", ")", "in", "engine", ".", "_iter_parent_btt", "(", "start_branch", "or", "engine", ".", "branch", ")", ":", "if", "branch", "is", "None", ":", "return", "parent", ",", "turn_start", ",", "tick_start", ",", "turn_end", ",", "tick_end", "=", "engine", ".", "_branches", "[", "branch", "]", "for", "turn", "in", "range", "(", "turn_start", ",", "engine", ".", "turn", "+", "1", ")", ":", "if", "oper", "(", "leftside", "(", "branch", ",", "turn", ")", ",", "rightside", "(", "branch", ",", "turn", ")", ")", ":", "yield", "branch", ",", "turn" ]
Iterate over all turns on which a comparison holds. This is expensive. It evaluates the query for every turn in history.
[ "Iterate", "over", "all", "turns", "on", "which", "a", "comparison", "holds", "." ]
python
train
palantir/python-jsonrpc-server
pyls_jsonrpc/streams.py
https://github.com/palantir/python-jsonrpc-server/blob/7021d849901705ab53c141e483a71d0779aff3d2/pyls_jsonrpc/streams.py#L59-L69
def _content_length(line): """Extract the content length from an input line.""" if line.startswith(b'Content-Length: '): _, value = line.split(b'Content-Length: ') value = value.strip() try: return int(value) except ValueError: raise ValueError("Invalid Content-Length header: {}".format(value)) return None
[ "def", "_content_length", "(", "line", ")", ":", "if", "line", ".", "startswith", "(", "b'Content-Length: '", ")", ":", "_", ",", "value", "=", "line", ".", "split", "(", "b'Content-Length: '", ")", "value", "=", "value", ".", "strip", "(", ")", "try", ":", "return", "int", "(", "value", ")", "except", "ValueError", ":", "raise", "ValueError", "(", "\"Invalid Content-Length header: {}\"", ".", "format", "(", "value", ")", ")", "return", "None" ]
Extract the content length from an input line.
[ "Extract", "the", "content", "length", "from", "an", "input", "line", "." ]
python
train
numenta/nupic
src/nupic/algorithms/temporal_memory.py
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/algorithms/temporal_memory.py#L912-L924
def getPredictiveCells(self): """ Returns the indices of the predictive cells. :returns: (list) Indices of predictive cells. """ previousCell = None predictiveCells = [] for segment in self.activeSegments: if segment.cell != previousCell: predictiveCells.append(segment.cell) previousCell = segment.cell return predictiveCells
[ "def", "getPredictiveCells", "(", "self", ")", ":", "previousCell", "=", "None", "predictiveCells", "=", "[", "]", "for", "segment", "in", "self", ".", "activeSegments", ":", "if", "segment", ".", "cell", "!=", "previousCell", ":", "predictiveCells", ".", "append", "(", "segment", ".", "cell", ")", "previousCell", "=", "segment", ".", "cell", "return", "predictiveCells" ]
Returns the indices of the predictive cells. :returns: (list) Indices of predictive cells.
[ "Returns", "the", "indices", "of", "the", "predictive", "cells", "." ]
python
valid
elastic/elasticsearch-py
elasticsearch/helpers/actions.py
https://github.com/elastic/elasticsearch-py/blob/2aab285c8f506f3863cbdaba3c90a685c510ba00/elasticsearch/helpers/actions.py#L56-L92
def _chunk_actions(actions, chunk_size, max_chunk_bytes, serializer): """ Split actions into chunks by number or size, serialize them into strings in the process. """ bulk_actions, bulk_data = [], [] size, action_count = 0, 0 for action, data in actions: raw_data, raw_action = data, action action = serializer.dumps(action) # +1 to account for the trailing new line character cur_size = len(action.encode("utf-8")) + 1 if data is not None: data = serializer.dumps(data) cur_size += len(data.encode("utf-8")) + 1 # full chunk, send it and start a new one if bulk_actions and ( size + cur_size > max_chunk_bytes or action_count == chunk_size ): yield bulk_data, bulk_actions bulk_actions, bulk_data = [], [] size, action_count = 0, 0 bulk_actions.append(action) if data is not None: bulk_actions.append(data) bulk_data.append((raw_action, raw_data)) else: bulk_data.append((raw_action,)) size += cur_size action_count += 1 if bulk_actions: yield bulk_data, bulk_actions
[ "def", "_chunk_actions", "(", "actions", ",", "chunk_size", ",", "max_chunk_bytes", ",", "serializer", ")", ":", "bulk_actions", ",", "bulk_data", "=", "[", "]", ",", "[", "]", "size", ",", "action_count", "=", "0", ",", "0", "for", "action", ",", "data", "in", "actions", ":", "raw_data", ",", "raw_action", "=", "data", ",", "action", "action", "=", "serializer", ".", "dumps", "(", "action", ")", "# +1 to account for the trailing new line character", "cur_size", "=", "len", "(", "action", ".", "encode", "(", "\"utf-8\"", ")", ")", "+", "1", "if", "data", "is", "not", "None", ":", "data", "=", "serializer", ".", "dumps", "(", "data", ")", "cur_size", "+=", "len", "(", "data", ".", "encode", "(", "\"utf-8\"", ")", ")", "+", "1", "# full chunk, send it and start a new one", "if", "bulk_actions", "and", "(", "size", "+", "cur_size", ">", "max_chunk_bytes", "or", "action_count", "==", "chunk_size", ")", ":", "yield", "bulk_data", ",", "bulk_actions", "bulk_actions", ",", "bulk_data", "=", "[", "]", ",", "[", "]", "size", ",", "action_count", "=", "0", ",", "0", "bulk_actions", ".", "append", "(", "action", ")", "if", "data", "is", "not", "None", ":", "bulk_actions", ".", "append", "(", "data", ")", "bulk_data", ".", "append", "(", "(", "raw_action", ",", "raw_data", ")", ")", "else", ":", "bulk_data", ".", "append", "(", "(", "raw_action", ",", ")", ")", "size", "+=", "cur_size", "action_count", "+=", "1", "if", "bulk_actions", ":", "yield", "bulk_data", ",", "bulk_actions" ]
Split actions into chunks by number or size, serialize them into strings in the process.
[ "Split", "actions", "into", "chunks", "by", "number", "or", "size", "serialize", "them", "into", "strings", "in", "the", "process", "." ]
python
train
fabioz/PyDev.Debugger
pydevd_attach_to_process/winappdbg/thread.py
https://github.com/fabioz/PyDev.Debugger/blob/ed9c4307662a5593b8a7f1f3389ecd0e79b8c503/pydevd_attach_to_process/winappdbg/thread.py#L1496-L1511
def read_code_bytes(self, size = 128, offset = 0): """ Tries to read some bytes of the code currently being executed. @type size: int @param size: Number of bytes to read. @type offset: int @param offset: Offset from the program counter to begin reading. @rtype: str @return: Bytes read from the process memory. @raise WindowsError: Could not read the requested data. """ return self.get_process().read(self.get_pc() + offset, size)
[ "def", "read_code_bytes", "(", "self", ",", "size", "=", "128", ",", "offset", "=", "0", ")", ":", "return", "self", ".", "get_process", "(", ")", ".", "read", "(", "self", ".", "get_pc", "(", ")", "+", "offset", ",", "size", ")" ]
Tries to read some bytes of the code currently being executed. @type size: int @param size: Number of bytes to read. @type offset: int @param offset: Offset from the program counter to begin reading. @rtype: str @return: Bytes read from the process memory. @raise WindowsError: Could not read the requested data.
[ "Tries", "to", "read", "some", "bytes", "of", "the", "code", "currently", "being", "executed", "." ]
python
train
astropy/photutils
photutils/psf/epsf_stars.py
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/psf/epsf_stars.py#L687-L777
def _extract_stars(data, catalog, size=(11, 11), use_xy=True): """ Extract cutout images from a single image centered on stars defined in the single input catalog. Parameters ---------- data : `~astropy.nddata.NDData` A `~astropy.nddata.NDData` object containing the 2D image from which to extract the stars. If the input ``catalog`` contains only the sky coordinates (i.e. not the pixel coordinates) of the stars then the `~astropy.nddata.NDData` object must have a valid ``wcs`` attribute. catalogs : `~astropy.table.Table` A single catalog of sources to be extracted from the input ``data``. The center of each source can be defined either in pixel coordinates (in ``x`` and ``y`` columns) or sky coordinates (in a ``skycoord`` column containing a `~astropy.coordinates.SkyCoord` object). If both are specified, then the value of the ``use_xy`` keyword determines which coordinates will be used. size : int or array_like (int), optional The extraction box size along each axis. If ``size`` is a scalar then a square box of size ``size`` will be used. If ``size`` has two elements, they should be in ``(ny, nx)`` order. The size must be greater than or equal to 3 pixel for both axes. use_xy : bool, optional Whether to use the ``x`` and ``y`` pixel positions when both pixel and sky coordinates are present in the input catalog table. If `False` then sky coordinates are used instead of pixel coordinates (e.g. for linked stars). The default is `True`. Returns ------- stars : list of `EPSFStar` objects A list of `EPSFStar` instances containing the extracted stars. """ colnames = catalog.colnames if ('x' not in colnames or 'y' not in colnames) or not use_xy: xcenters, ycenters = skycoord_to_pixel(catalog['skycoord'], data.wcs, origin=0, mode='all') else: xcenters = catalog['x'].data.astype(np.float) ycenters = catalog['y'].data.astype(np.float) if 'id' in colnames: ids = catalog['id'] else: ids = np.arange(len(catalog), dtype=np.int) + 1 if data.uncertainty is None: weights = np.ones_like(data.data) else: if data.uncertainty.uncertainty_type == 'weights': weights = np.asanyarray(data.uncertainty.array, dtype=np.float) else: warnings.warn('The data uncertainty attribute has an unsupported ' 'type. Only uncertainty_type="weights" can be ' 'used to set weights. Weights will be set to 1.', AstropyUserWarning) weights = np.ones_like(data.data) if data.mask is not None: weights[data.mask] = 0. stars = [] for xcenter, ycenter, obj_id in zip(xcenters, ycenters, ids): try: large_slc, small_slc = overlap_slices(data.data.shape, size, (ycenter, xcenter), mode='strict') data_cutout = data.data[large_slc] weights_cutout = weights[large_slc] except (PartialOverlapError, NoOverlapError): stars.append(None) continue origin = (large_slc[1].start, large_slc[0].start) cutout_center = (xcenter - origin[0], ycenter - origin[1]) star = EPSFStar(data_cutout, weights_cutout, cutout_center=cutout_center, origin=origin, wcs_large=data.wcs, id_label=obj_id) stars.append(star) return stars
[ "def", "_extract_stars", "(", "data", ",", "catalog", ",", "size", "=", "(", "11", ",", "11", ")", ",", "use_xy", "=", "True", ")", ":", "colnames", "=", "catalog", ".", "colnames", "if", "(", "'x'", "not", "in", "colnames", "or", "'y'", "not", "in", "colnames", ")", "or", "not", "use_xy", ":", "xcenters", ",", "ycenters", "=", "skycoord_to_pixel", "(", "catalog", "[", "'skycoord'", "]", ",", "data", ".", "wcs", ",", "origin", "=", "0", ",", "mode", "=", "'all'", ")", "else", ":", "xcenters", "=", "catalog", "[", "'x'", "]", ".", "data", ".", "astype", "(", "np", ".", "float", ")", "ycenters", "=", "catalog", "[", "'y'", "]", ".", "data", ".", "astype", "(", "np", ".", "float", ")", "if", "'id'", "in", "colnames", ":", "ids", "=", "catalog", "[", "'id'", "]", "else", ":", "ids", "=", "np", ".", "arange", "(", "len", "(", "catalog", ")", ",", "dtype", "=", "np", ".", "int", ")", "+", "1", "if", "data", ".", "uncertainty", "is", "None", ":", "weights", "=", "np", ".", "ones_like", "(", "data", ".", "data", ")", "else", ":", "if", "data", ".", "uncertainty", ".", "uncertainty_type", "==", "'weights'", ":", "weights", "=", "np", ".", "asanyarray", "(", "data", ".", "uncertainty", ".", "array", ",", "dtype", "=", "np", ".", "float", ")", "else", ":", "warnings", ".", "warn", "(", "'The data uncertainty attribute has an unsupported '", "'type. Only uncertainty_type=\"weights\" can be '", "'used to set weights. Weights will be set to 1.'", ",", "AstropyUserWarning", ")", "weights", "=", "np", ".", "ones_like", "(", "data", ".", "data", ")", "if", "data", ".", "mask", "is", "not", "None", ":", "weights", "[", "data", ".", "mask", "]", "=", "0.", "stars", "=", "[", "]", "for", "xcenter", ",", "ycenter", ",", "obj_id", "in", "zip", "(", "xcenters", ",", "ycenters", ",", "ids", ")", ":", "try", ":", "large_slc", ",", "small_slc", "=", "overlap_slices", "(", "data", ".", "data", ".", "shape", ",", "size", ",", "(", "ycenter", ",", "xcenter", ")", ",", "mode", "=", "'strict'", ")", "data_cutout", "=", "data", ".", "data", "[", "large_slc", "]", "weights_cutout", "=", "weights", "[", "large_slc", "]", "except", "(", "PartialOverlapError", ",", "NoOverlapError", ")", ":", "stars", ".", "append", "(", "None", ")", "continue", "origin", "=", "(", "large_slc", "[", "1", "]", ".", "start", ",", "large_slc", "[", "0", "]", ".", "start", ")", "cutout_center", "=", "(", "xcenter", "-", "origin", "[", "0", "]", ",", "ycenter", "-", "origin", "[", "1", "]", ")", "star", "=", "EPSFStar", "(", "data_cutout", ",", "weights_cutout", ",", "cutout_center", "=", "cutout_center", ",", "origin", "=", "origin", ",", "wcs_large", "=", "data", ".", "wcs", ",", "id_label", "=", "obj_id", ")", "stars", ".", "append", "(", "star", ")", "return", "stars" ]
Extract cutout images from a single image centered on stars defined in the single input catalog. Parameters ---------- data : `~astropy.nddata.NDData` A `~astropy.nddata.NDData` object containing the 2D image from which to extract the stars. If the input ``catalog`` contains only the sky coordinates (i.e. not the pixel coordinates) of the stars then the `~astropy.nddata.NDData` object must have a valid ``wcs`` attribute. catalogs : `~astropy.table.Table` A single catalog of sources to be extracted from the input ``data``. The center of each source can be defined either in pixel coordinates (in ``x`` and ``y`` columns) or sky coordinates (in a ``skycoord`` column containing a `~astropy.coordinates.SkyCoord` object). If both are specified, then the value of the ``use_xy`` keyword determines which coordinates will be used. size : int or array_like (int), optional The extraction box size along each axis. If ``size`` is a scalar then a square box of size ``size`` will be used. If ``size`` has two elements, they should be in ``(ny, nx)`` order. The size must be greater than or equal to 3 pixel for both axes. use_xy : bool, optional Whether to use the ``x`` and ``y`` pixel positions when both pixel and sky coordinates are present in the input catalog table. If `False` then sky coordinates are used instead of pixel coordinates (e.g. for linked stars). The default is `True`. Returns ------- stars : list of `EPSFStar` objects A list of `EPSFStar` instances containing the extracted stars.
[ "Extract", "cutout", "images", "from", "a", "single", "image", "centered", "on", "stars", "defined", "in", "the", "single", "input", "catalog", "." ]
python
train
hydpy-dev/hydpy
hydpy/models/arma/arma_derived.py
https://github.com/hydpy-dev/hydpy/blob/1bc6a82cf30786521d86b36e27900c6717d3348d/hydpy/models/arma/arma_derived.py#L94-L106
def update(self): """Determine the total number of AR coefficients. >>> from hydpy.models.arma import * >>> parameterstep('1d') >>> responses(((1., 2.), (1.,)), th_3=((1.,), (1., 2., 3.))) >>> derived.ar_order.update() >>> derived.ar_order ar_order(2, 1) """ responses = self.subpars.pars.control.responses self.shape = len(responses) self(responses.ar_orders)
[ "def", "update", "(", "self", ")", ":", "responses", "=", "self", ".", "subpars", ".", "pars", ".", "control", ".", "responses", "self", ".", "shape", "=", "len", "(", "responses", ")", "self", "(", "responses", ".", "ar_orders", ")" ]
Determine the total number of AR coefficients. >>> from hydpy.models.arma import * >>> parameterstep('1d') >>> responses(((1., 2.), (1.,)), th_3=((1.,), (1., 2., 3.))) >>> derived.ar_order.update() >>> derived.ar_order ar_order(2, 1)
[ "Determine", "the", "total", "number", "of", "AR", "coefficients", "." ]
python
train
databuild/databuild
databuild/adapters/locmem/models.py
https://github.com/databuild/databuild/blob/4c8ee04fad1748f5b966753057ac05efbc289b10/databuild/adapters/locmem/models.py#L24-L31
def _match(self, doc, where): """Return True if 'doc' matches the 'where' condition.""" assert isinstance(where, dict), "where is not a dictionary" assert isinstance(doc, dict), "doc is not a dictionary" try: return all([doc[k] == v for k, v in where.items()]) except KeyError: return False
[ "def", "_match", "(", "self", ",", "doc", ",", "where", ")", ":", "assert", "isinstance", "(", "where", ",", "dict", ")", ",", "\"where is not a dictionary\"", "assert", "isinstance", "(", "doc", ",", "dict", ")", ",", "\"doc is not a dictionary\"", "try", ":", "return", "all", "(", "[", "doc", "[", "k", "]", "==", "v", "for", "k", ",", "v", "in", "where", ".", "items", "(", ")", "]", ")", "except", "KeyError", ":", "return", "False" ]
Return True if 'doc' matches the 'where' condition.
[ "Return", "True", "if", "doc", "matches", "the", "where", "condition", "." ]
python
train
kislyuk/ensure
ensure/main.py
https://github.com/kislyuk/ensure/blob/0a562a4b469ffbaf71c75dc4d394e94334c831f0/ensure/main.py#L289-L295
def contains_no(self, prototype): """ Ensures no item of :attr:`subject` is of class *prototype*. """ for element in self._subject: self._run(unittest_case.assertNotIsInstance, (element, prototype)) return ChainInspector(self._subject)
[ "def", "contains_no", "(", "self", ",", "prototype", ")", ":", "for", "element", "in", "self", ".", "_subject", ":", "self", ".", "_run", "(", "unittest_case", ".", "assertNotIsInstance", ",", "(", "element", ",", "prototype", ")", ")", "return", "ChainInspector", "(", "self", ".", "_subject", ")" ]
Ensures no item of :attr:`subject` is of class *prototype*.
[ "Ensures", "no", "item", "of", ":", "attr", ":", "subject", "is", "of", "class", "*", "prototype", "*", "." ]
python
train
tensorflow/datasets
tensorflow_datasets/image/svhn.py
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/image/svhn.py#L92-L113
def _generate_examples(self, filepath): """Generate examples as dicts. Args: filepath: `str` path of the file to process. Yields: Generator yielding the next samples """ with tf.io.gfile.GFile(filepath, "rb") as f: data = tfds.core.lazy_imports.scipy.io.loadmat(f) # Maybe should shuffle ? assert np.max(data["y"]) <= 10 # Sanity check assert np.min(data["y"]) > 0 for image, label in zip(np.rollaxis(data["X"], -1), data["y"]): yield { "image": image, "label": label % 10, # digit 0 is saved as 0 (instead of 10) }
[ "def", "_generate_examples", "(", "self", ",", "filepath", ")", ":", "with", "tf", ".", "io", ".", "gfile", ".", "GFile", "(", "filepath", ",", "\"rb\"", ")", "as", "f", ":", "data", "=", "tfds", ".", "core", ".", "lazy_imports", ".", "scipy", ".", "io", ".", "loadmat", "(", "f", ")", "# Maybe should shuffle ?", "assert", "np", ".", "max", "(", "data", "[", "\"y\"", "]", ")", "<=", "10", "# Sanity check", "assert", "np", ".", "min", "(", "data", "[", "\"y\"", "]", ")", ">", "0", "for", "image", ",", "label", "in", "zip", "(", "np", ".", "rollaxis", "(", "data", "[", "\"X\"", "]", ",", "-", "1", ")", ",", "data", "[", "\"y\"", "]", ")", ":", "yield", "{", "\"image\"", ":", "image", ",", "\"label\"", ":", "label", "%", "10", ",", "# digit 0 is saved as 0 (instead of 10)", "}" ]
Generate examples as dicts. Args: filepath: `str` path of the file to process. Yields: Generator yielding the next samples
[ "Generate", "examples", "as", "dicts", "." ]
python
train
reillysiemens/layabout
layabout.py
https://github.com/reillysiemens/layabout/blob/a146c47f2558e66bb51cf708d39909b93eaea7f4/layabout.py#L291-L296
def _create_slack_with_env_var(env_var: EnvVar) -> SlackClient: """ Create a :obj:`SlackClient` with a token from an env var. """ token = os.getenv(env_var) if token: return SlackClient(token=token) raise MissingToken(f"Could not acquire token from {env_var}")
[ "def", "_create_slack_with_env_var", "(", "env_var", ":", "EnvVar", ")", "->", "SlackClient", ":", "token", "=", "os", ".", "getenv", "(", "env_var", ")", "if", "token", ":", "return", "SlackClient", "(", "token", "=", "token", ")", "raise", "MissingToken", "(", "f\"Could not acquire token from {env_var}\"", ")" ]
Create a :obj:`SlackClient` with a token from an env var.
[ "Create", "a", ":", "obj", ":", "SlackClient", "with", "a", "token", "from", "an", "env", "var", "." ]
python
train
nabetama/slacky
slacky/rest/rest.py
https://github.com/nabetama/slacky/blob/dde62ce49af9b8f581729c36d2ac790310b570e4/slacky/rest/rest.py#L819-L823
def messages(self, query, **kwargs): """ https://api.slack.com/methods/search.messages """ self.url = 'https://slack.com/api/search.messages' return super(Search, self).search_from_url(query, **kwargs)
[ "def", "messages", "(", "self", ",", "query", ",", "*", "*", "kwargs", ")", ":", "self", ".", "url", "=", "'https://slack.com/api/search.messages'", "return", "super", "(", "Search", ",", "self", ")", ".", "search_from_url", "(", "query", ",", "*", "*", "kwargs", ")" ]
https://api.slack.com/methods/search.messages
[ "https", ":", "//", "api", ".", "slack", ".", "com", "/", "methods", "/", "search", ".", "messages" ]
python
train
CloverHealth/temple
temple/utils.py
https://github.com/CloverHealth/temple/blob/d7b75da2459f72ba74d6f3b6e1ab95c3d1b92ccd/temple/utils.py#L85-L102
def set_cmd_env_var(value): """Decorator that sets the temple command env var to value""" def func_decorator(function): @functools.wraps(function) def wrapper(*args, **kwargs): previous_cmd_env_var = os.getenv(temple.constants.TEMPLE_ENV_VAR) os.environ[temple.constants.TEMPLE_ENV_VAR] = value try: ret_val = function(*args, **kwargs) finally: if previous_cmd_env_var is None: del os.environ[temple.constants.TEMPLE_ENV_VAR] else: os.environ[temple.constants.TEMPLE_ENV_VAR] = previous_cmd_env_var return ret_val return wrapper return func_decorator
[ "def", "set_cmd_env_var", "(", "value", ")", ":", "def", "func_decorator", "(", "function", ")", ":", "@", "functools", ".", "wraps", "(", "function", ")", "def", "wrapper", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "previous_cmd_env_var", "=", "os", ".", "getenv", "(", "temple", ".", "constants", ".", "TEMPLE_ENV_VAR", ")", "os", ".", "environ", "[", "temple", ".", "constants", ".", "TEMPLE_ENV_VAR", "]", "=", "value", "try", ":", "ret_val", "=", "function", "(", "*", "args", ",", "*", "*", "kwargs", ")", "finally", ":", "if", "previous_cmd_env_var", "is", "None", ":", "del", "os", ".", "environ", "[", "temple", ".", "constants", ".", "TEMPLE_ENV_VAR", "]", "else", ":", "os", ".", "environ", "[", "temple", ".", "constants", ".", "TEMPLE_ENV_VAR", "]", "=", "previous_cmd_env_var", "return", "ret_val", "return", "wrapper", "return", "func_decorator" ]
Decorator that sets the temple command env var to value
[ "Decorator", "that", "sets", "the", "temple", "command", "env", "var", "to", "value" ]
python
valid
jobovy/galpy
galpy/util/leung_dop853.py
https://github.com/jobovy/galpy/blob/9c5b9fe65d58835624dffe432be282060918ee08/galpy/util/leung_dop853.py#L211-L238
def hinit(func, x, t, pos_neg, f0, iord, hmax, rtol, atol, args): """ Estimate initial step size """ sk = atol + rtol * np.fabs(x) dnf = np.sum(np.square(f0 / sk), axis=0) dny = np.sum(np.square(x / sk), axis=0) h = np.sqrt(dny / dnf) * 0.01 h = np.min([h, np.fabs(hmax)]) h = custom_sign(h, pos_neg) # perform an explicit Euler step xx1 = x + h * f0 f1 = np.array(func(xx1, t[0] + h, *args)) # estimate the second derivative of the solution der2 = np.sum(np.square((f1 - f0) / sk), axis=0) der2 = np.sqrt(der2) / h # step size is computed such that h ** iord * max_d(norm(f0), norm(der2)) = 0.01 der12 = np.max([np.fabs(der2), np.sqrt(dnf)]) h1 = np.power(0.01 / der12, 1.0 / iord) h = np.min([100.0 * np.fabs(h), np.min([np.fabs(h1), np.fabs(hmax)])]) return custom_sign(h, pos_neg), f0, f1, xx1
[ "def", "hinit", "(", "func", ",", "x", ",", "t", ",", "pos_neg", ",", "f0", ",", "iord", ",", "hmax", ",", "rtol", ",", "atol", ",", "args", ")", ":", "sk", "=", "atol", "+", "rtol", "*", "np", ".", "fabs", "(", "x", ")", "dnf", "=", "np", ".", "sum", "(", "np", ".", "square", "(", "f0", "/", "sk", ")", ",", "axis", "=", "0", ")", "dny", "=", "np", ".", "sum", "(", "np", ".", "square", "(", "x", "/", "sk", ")", ",", "axis", "=", "0", ")", "h", "=", "np", ".", "sqrt", "(", "dny", "/", "dnf", ")", "*", "0.01", "h", "=", "np", ".", "min", "(", "[", "h", ",", "np", ".", "fabs", "(", "hmax", ")", "]", ")", "h", "=", "custom_sign", "(", "h", ",", "pos_neg", ")", "# perform an explicit Euler step", "xx1", "=", "x", "+", "h", "*", "f0", "f1", "=", "np", ".", "array", "(", "func", "(", "xx1", ",", "t", "[", "0", "]", "+", "h", ",", "*", "args", ")", ")", "# estimate the second derivative of the solution", "der2", "=", "np", ".", "sum", "(", "np", ".", "square", "(", "(", "f1", "-", "f0", ")", "/", "sk", ")", ",", "axis", "=", "0", ")", "der2", "=", "np", ".", "sqrt", "(", "der2", ")", "/", "h", "# step size is computed such that h ** iord * max_d(norm(f0), norm(der2)) = 0.01", "der12", "=", "np", ".", "max", "(", "[", "np", ".", "fabs", "(", "der2", ")", ",", "np", ".", "sqrt", "(", "dnf", ")", "]", ")", "h1", "=", "np", ".", "power", "(", "0.01", "/", "der12", ",", "1.0", "/", "iord", ")", "h", "=", "np", ".", "min", "(", "[", "100.0", "*", "np", ".", "fabs", "(", "h", ")", ",", "np", ".", "min", "(", "[", "np", ".", "fabs", "(", "h1", ")", ",", "np", ".", "fabs", "(", "hmax", ")", "]", ")", "]", ")", "return", "custom_sign", "(", "h", ",", "pos_neg", ")", ",", "f0", ",", "f1", ",", "xx1" ]
Estimate initial step size
[ "Estimate", "initial", "step", "size" ]
python
train
numberoverzero/bloop
bloop/session.py
https://github.com/numberoverzero/bloop/blob/4c95f5a0ff0802443a1c258bfaccecd1758363e7/bloop/session.py#L271-L286
def enable_ttl(self, table_name, model): """Calls UpdateTimeToLive on the table according to model.Meta["ttl"] :param table_name: The name of the table to enable the TTL setting on :param model: The model to get TTL settings from """ self._tables.pop(table_name, None) ttl_name = model.Meta.ttl["column"].dynamo_name request = { "TableName": table_name, "TimeToLiveSpecification": {"AttributeName": ttl_name, "Enabled": True} } try: self.dynamodb_client.update_time_to_live(**request) except botocore.exceptions.ClientError as error: raise BloopException("Unexpected error while setting TTL.") from error
[ "def", "enable_ttl", "(", "self", ",", "table_name", ",", "model", ")", ":", "self", ".", "_tables", ".", "pop", "(", "table_name", ",", "None", ")", "ttl_name", "=", "model", ".", "Meta", ".", "ttl", "[", "\"column\"", "]", ".", "dynamo_name", "request", "=", "{", "\"TableName\"", ":", "table_name", ",", "\"TimeToLiveSpecification\"", ":", "{", "\"AttributeName\"", ":", "ttl_name", ",", "\"Enabled\"", ":", "True", "}", "}", "try", ":", "self", ".", "dynamodb_client", ".", "update_time_to_live", "(", "*", "*", "request", ")", "except", "botocore", ".", "exceptions", ".", "ClientError", "as", "error", ":", "raise", "BloopException", "(", "\"Unexpected error while setting TTL.\"", ")", "from", "error" ]
Calls UpdateTimeToLive on the table according to model.Meta["ttl"] :param table_name: The name of the table to enable the TTL setting on :param model: The model to get TTL settings from
[ "Calls", "UpdateTimeToLive", "on", "the", "table", "according", "to", "model", ".", "Meta", "[", "ttl", "]" ]
python
train
caseyjlaw/rtpipe
rtpipe/RT.py
https://github.com/caseyjlaw/rtpipe/blob/ac33e4332cf215091a63afbb3137850876d73ec0/rtpipe/RT.py#L732-L930
def set_pipeline(filename, scan, fileroot='', paramfile='', **kwargs): """ Function defines pipeline state for search. Takes data/scan as input. fileroot is base name for associated products (cal files, noise, cands). if blank, it is set to filename. paramfile is name of file that defines all pipeline parameters (python-like syntax). kwargs used to overload paramfile definitions. Many parameters take 0 as default, which auto-defines ideal parameters. This definition does not yet consider memory/cpu/time limitations. nsegments defines how to break jobs in time. nchunk defines how many jobs are sent to nthreads. """ workdir = os.path.dirname(os.path.abspath(filename)) filename = filename.rstrip('/') assert os.path.exists(filename) # then get all metadata if os.path.exists(os.path.join(filename, 'Main.xml')): d = ps.get_metadata(filename, scan, paramfile=paramfile, **kwargs) # can take file name or Params instance d['dataformat'] = 'sdm' else: d = pm.get_metadata(filename, scan, paramfile=paramfile, **kwargs) d['dataformat'] = 'ms' # set version d['rtpipe_version'] = __version__ # define rootname for in/out cal/products if fileroot: d['fileroot'] = fileroot else: d['fileroot'] = os.path.basename(os.path.abspath(filename)) # autodetect calibration products locally if not d['gainfile'] or not os.path.exists(d['gainfile']): # first try to get CASA gain file gainfilelist = glob.glob(os.path.join(d['workdir'], d['fileroot'] + '.g?')) bpfilelist = glob.glob(os.path.join(d['workdir'], d['fileroot'] + '.b?')) # if not in workdir, look locally if not gainfilelist or not bpfilelist: gainfilelist = glob.glob(d['fileroot'] + '.g?') bpfilelist = glob.glob(d['fileroot'] + '.b?') if gainfilelist and bpfilelist: gainfilelist.sort() d['gainfile'] = gainfilelist[-1] logger.info('Autodetected CASA gainfile %s' % d['gainfile']) bpfilelist.sort() d['bpfile'] = bpfilelist[-1] logger.info('Autodetected CASA bpfile %s' % d['bpfile']) # if that fails, look for telcal file filelist = glob.glob(os.path.join(d['workdir'], filename + '.GN')) if not filelist: filelist = glob.glob(filename + '.GN') if filelist: d['gainfile'] = filelist[0] logger.info('Autodetected telcal file %s' % d['gainfile']) if not os.path.exists(d['gainfile']): logger.warn('Calibration file autodetection failed for gainfile {0}'.format(d['gainfile'])) # define features d['featureind'] = ['segment', 'int', 'dmind', 'dtind', 'beamnum'] # feature index. should be stable. if 'features' not in d: if d['searchtype'] == 'image1': d['features'] = ['snr1', 'immax1', 'l1', 'm1'] # features returned by image1 elif d['searchtype'] == 'image1snip': d['features'] = ['snr1', 'immax1', 'l1', 'm1', 'im40', 'spec20'] elif d['searchtype'] == 'image1stats': d['features'] = ['snr1', 'immax1', 'l1', 'm1', 'specstd', 'specskew', 'speckurtosis', 'imskew', 'imkurtosis'] # note: spec statistics are all or nothing. elif 'image2' in d['searchtype']: d['features'] = ['snr1', 'immax1', 'l1', 'm1', 'snr2', 'immax2', 'l2', 'm2'] # features returned by image1 # set imaging parameters to use if d['uvres'] == 0: d['uvres'] = d['uvres_full'] else: urange = d['urange'][scan]*(d['freq'].max()/d['freq_orig'][0]) # uvw from get_uvw already in lambda at ch0 vrange = d['vrange'][scan]*(d['freq'].max()/d['freq_orig'][0]) powers = n.fromfunction(lambda i,j: 2**i*3**j, (14,10), dtype='int') # power array for 2**i * 3**j rangex = n.round(d['uvoversample']*urange).astype('int') rangey = n.round(d['uvoversample']*vrange).astype('int') largerx = n.where(powers-rangex/d['uvres'] > 0, powers, powers[-1,-1]) p2x, p3x = n.where(largerx == largerx.min()) largery = n.where(powers-rangey/d['uvres'] > 0, powers, powers[-1,-1]) p2y, p3y = n.where(largery == largery.min()) d['npixx_full'] = (2**p2x * 3**p3x)[0] d['npixy_full'] = (2**p2y * 3**p3y)[0] # set number of pixels to image d['npixx'] = d['npixx_full'] d['npixy'] = d['npixy_full'] if 'npix_max' in d: if d['npix_max']: d['npixx'] = min(d['npix_max'], d['npixx_full']) d['npixy'] = min(d['npix_max'], d['npixy_full']) if d['npix']: d['npixx'] = d['npix'] d['npixy'] = d['npix'] else: d['npix'] = max(d['npixx'], d['npixy']) # this used to define fringe time # define dmarr, if not already if len(d['dmarr']) == 0: if d.has_key('dm_maxloss') and d.has_key('maxdm') and d.has_key('dm_pulsewidth'): d['dmarr'] = calc_dmgrid(d, maxloss=d['dm_maxloss'], maxdm=d['maxdm'], dt=d['dm_pulsewidth']) if d['maxdm'] > 0: logger.info('Calculated %d dms for max sensitivity loss %.2f, maxdm %d pc/cm3, and pulse width %d ms' % (len(d['dmarr']), d['dm_maxloss'], d['maxdm'], d['dm_pulsewidth']/1000)) else: d['dmarr'] = [0] logger.info('Can\'t calculate dm grid without dm_maxloss, maxdm, and dm_pulsewidth defined. Setting to [0].') # define times for data to read d['t_overlap'] = rtlib.calc_delay(d['freq'], d['inttime'], max(d['dmarr'])).max()*d['inttime'] # time of overlap for total dm coverage at segment boundaries d['datadelay'] = [rtlib.calc_delay(d['freq'], d['inttime'],dm).max() for dm in d['dmarr']] d['nints'] = d['nints'] - d['nskip'] # pols if d.has_key('selectpol'): d['pols'] = [pol for pol in d['pols_orig'] if pol in d['selectpol']] else: d['pols'] = d['pols_orig'] d['npol'] = len(d['pols']) # split imaging into chunks. ideally one per thread, but can modify to fit available memory if d['nchunk'] == 0: d['nchunk'] = d['nthread'] # if nsegments is 0, then auto-define within memory limit if not d['nsegments']: fringetime = calc_fringetime(d) d['nsegments'] = max(1, min(d['nints'], int(d['scale_nsegments']*d['inttime']*d['nints']/(fringetime-d['t_overlap'])))) # at least 1, at most nints calc_segment_times(d) # if auto nsegment definition makes segment too large, try to scale it down to fit in memory_limit (if provided) # limit defined for dm sweep time and max nchunk/nthread ratio if d.has_key('memory_limit'): (vismem0, immem0) = calc_memory_footprint(d, limit=True) assert vismem0+immem0 < d['memory_limit'], 'memory_limit of {0} is smaller than best solution of {1}. Try forcing nsegments/nchunk larger than {2}/{3} or reducing maxdm/npix'.format(d['memory_limit'], vismem0+immem0, d['nsegments'], max(d['dtarr'])/min(d['dtarr'])) (vismem, immem) = calc_memory_footprint(d) if vismem+immem > d['memory_limit']: logger.info('Over memory limit of {4} when reading {0} segments with {1} chunks ({2}/{3} GB for visibilities/imaging). Searching for solution down to {5}/{6} GB...'.format(d['nsegments'], d['nchunk'], vismem, immem, d['memory_limit'], vismem0, immem0)) while vismem+immem > d['memory_limit']: (vismem, immem) = calc_memory_footprint(d) logger.debug('Using {0} segments with {1} chunks ({2}/{3} GB for visibilities/imaging). Searching for better solution...'.format(d['nchunk'], vismem, immem, d['memory_limit'])) d['scale_nsegments'] = d['scale_nsegments'] * (vismem+immem)/float(d['memory_limit']) d['nsegments'] = max(1, min(d['nints'], int(d['scale_nsegments']*d['inttime']*d['nints']/(fringetime-d['t_overlap'])))) # at least 1, at most nints calc_segment_times(d) (vismem, immem) = calc_memory_footprint(d) while vismem+immem > d['memory_limit']: logger.debug('Doubling nchunk from %d to fit in %d GB memory limit.' % (d['nchunk'], d['memory_limit'])) d['nchunk'] = 2*d['nchunk'] (vismem, immem) = calc_memory_footprint(d) if d['nchunk'] >= max(d['dtarr'])/min(d['dtarr'])*d['nthread']: # limit nchunk/nthread to at most the range in dt d['nchunk'] = d['nthread'] break (vismem, immem) = calc_memory_footprint(d) # final set up of memory calc_segment_times(d) (vismem, immem) = calc_memory_footprint(d) # scaling of number of integrations beyond dt=1 assert all(d['dtarr']) and (d['dtarr'] == sorted(d['dtarr'])), 'dtarr must be larger than 0 and in increasing order' # calculate number of thermal noise candidates per segment nfalse = calc_nfalse(d) logger.info('') logger.info('Pipeline summary:') if '.GN' in d['gainfile']: logger.info('\t Products saved with %s. telcal calibration with %s' % (d['fileroot'], os.path.basename(d['gainfile']))) else: logger.info('\t Products saved with %s. CASA calibration files (%s, %s)' % (d['fileroot'], os.path.basename(d['gainfile']), os.path.basename(d['bpfile']))) logger.info('\t Using %d segment%s of %d ints (%.1f s) with overlap of %.1f s' % (d['nsegments'], "s"[not d['nsegments']-1:], d['readints'], d['t_segment'], d['t_overlap'])) if d['t_overlap'] > d['t_segment']/3.: logger.info('\t\t Lots of segments needed, since Max DM sweep (%.1f s) close to segment size (%.2f s)' % (d['t_overlap'], d['t_segment'])) logger.info('\t Downsampling in time/freq by %d/%d and skipping %d ints from start of scan.' % (d['read_tdownsample'], d['read_fdownsample'], d['nskip'])) logger.info('\t Excluding ants %s' % (d['excludeants'])) logger.info('\t Using pols %s' % (d['pols'])) logger.info('') logger.info('\t Search with %s and threshold %.1f.' % (d['searchtype'], d['sigma_image1'])) logger.info('\t Using %d DMs from %.1f to %.1f and dts %s.' % (len(d['dmarr']), min(d['dmarr']), max(d['dmarr']), d['dtarr'])) logger.info('\t Using uvgrid npix=(%d,%d) and res=%d.' % (d['npixx'], d['npixy'], d['uvres'])) logger.info('\t Expect %d thermal false positives per segment.' % nfalse) logger.info('') logger.info('\t Visibility memory usage is %.1f GB/segment' % vismem) logger.info('\t Imaging in %d chunk%s using max of %.1f GB/segment' % (d['nchunk'], "s"[not d['nsegments']-1:], immem)) logger.info('\t Grand total memory usage: %.1f GB/segment' % (vismem + immem)) return d
[ "def", "set_pipeline", "(", "filename", ",", "scan", ",", "fileroot", "=", "''", ",", "paramfile", "=", "''", ",", "*", "*", "kwargs", ")", ":", "workdir", "=", "os", ".", "path", ".", "dirname", "(", "os", ".", "path", ".", "abspath", "(", "filename", ")", ")", "filename", "=", "filename", ".", "rstrip", "(", "'/'", ")", "assert", "os", ".", "path", ".", "exists", "(", "filename", ")", "# then get all metadata", "if", "os", ".", "path", ".", "exists", "(", "os", ".", "path", ".", "join", "(", "filename", ",", "'Main.xml'", ")", ")", ":", "d", "=", "ps", ".", "get_metadata", "(", "filename", ",", "scan", ",", "paramfile", "=", "paramfile", ",", "*", "*", "kwargs", ")", "# can take file name or Params instance", "d", "[", "'dataformat'", "]", "=", "'sdm'", "else", ":", "d", "=", "pm", ".", "get_metadata", "(", "filename", ",", "scan", ",", "paramfile", "=", "paramfile", ",", "*", "*", "kwargs", ")", "d", "[", "'dataformat'", "]", "=", "'ms'", "# set version", "d", "[", "'rtpipe_version'", "]", "=", "__version__", "# define rootname for in/out cal/products", "if", "fileroot", ":", "d", "[", "'fileroot'", "]", "=", "fileroot", "else", ":", "d", "[", "'fileroot'", "]", "=", "os", ".", "path", ".", "basename", "(", "os", ".", "path", ".", "abspath", "(", "filename", ")", ")", "# autodetect calibration products locally", "if", "not", "d", "[", "'gainfile'", "]", "or", "not", "os", ".", "path", ".", "exists", "(", "d", "[", "'gainfile'", "]", ")", ":", "# first try to get CASA gain file", "gainfilelist", "=", "glob", ".", "glob", "(", "os", ".", "path", ".", "join", "(", "d", "[", "'workdir'", "]", ",", "d", "[", "'fileroot'", "]", "+", "'.g?'", ")", ")", "bpfilelist", "=", "glob", ".", "glob", "(", "os", ".", "path", ".", "join", "(", "d", "[", "'workdir'", "]", ",", "d", "[", "'fileroot'", "]", "+", "'.b?'", ")", ")", "# if not in workdir, look locally", "if", "not", "gainfilelist", "or", "not", "bpfilelist", ":", "gainfilelist", "=", "glob", ".", "glob", "(", "d", "[", "'fileroot'", "]", "+", "'.g?'", ")", "bpfilelist", "=", "glob", ".", "glob", "(", "d", "[", "'fileroot'", "]", "+", "'.b?'", ")", "if", "gainfilelist", "and", "bpfilelist", ":", "gainfilelist", ".", "sort", "(", ")", "d", "[", "'gainfile'", "]", "=", "gainfilelist", "[", "-", "1", "]", "logger", ".", "info", "(", "'Autodetected CASA gainfile %s'", "%", "d", "[", "'gainfile'", "]", ")", "bpfilelist", ".", "sort", "(", ")", "d", "[", "'bpfile'", "]", "=", "bpfilelist", "[", "-", "1", "]", "logger", ".", "info", "(", "'Autodetected CASA bpfile %s'", "%", "d", "[", "'bpfile'", "]", ")", "# if that fails, look for telcal file", "filelist", "=", "glob", ".", "glob", "(", "os", ".", "path", ".", "join", "(", "d", "[", "'workdir'", "]", ",", "filename", "+", "'.GN'", ")", ")", "if", "not", "filelist", ":", "filelist", "=", "glob", ".", "glob", "(", "filename", "+", "'.GN'", ")", "if", "filelist", ":", "d", "[", "'gainfile'", "]", "=", "filelist", "[", "0", "]", "logger", ".", "info", "(", "'Autodetected telcal file %s'", "%", "d", "[", "'gainfile'", "]", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "d", "[", "'gainfile'", "]", ")", ":", "logger", ".", "warn", "(", "'Calibration file autodetection failed for gainfile {0}'", ".", "format", "(", "d", "[", "'gainfile'", "]", ")", ")", "# define features", "d", "[", "'featureind'", "]", "=", "[", "'segment'", ",", "'int'", ",", "'dmind'", ",", "'dtind'", ",", "'beamnum'", "]", "# feature index. should be stable.", "if", "'features'", "not", "in", "d", ":", "if", "d", "[", "'searchtype'", "]", "==", "'image1'", ":", "d", "[", "'features'", "]", "=", "[", "'snr1'", ",", "'immax1'", ",", "'l1'", ",", "'m1'", "]", "# features returned by image1", "elif", "d", "[", "'searchtype'", "]", "==", "'image1snip'", ":", "d", "[", "'features'", "]", "=", "[", "'snr1'", ",", "'immax1'", ",", "'l1'", ",", "'m1'", ",", "'im40'", ",", "'spec20'", "]", "elif", "d", "[", "'searchtype'", "]", "==", "'image1stats'", ":", "d", "[", "'features'", "]", "=", "[", "'snr1'", ",", "'immax1'", ",", "'l1'", ",", "'m1'", ",", "'specstd'", ",", "'specskew'", ",", "'speckurtosis'", ",", "'imskew'", ",", "'imkurtosis'", "]", "# note: spec statistics are all or nothing.", "elif", "'image2'", "in", "d", "[", "'searchtype'", "]", ":", "d", "[", "'features'", "]", "=", "[", "'snr1'", ",", "'immax1'", ",", "'l1'", ",", "'m1'", ",", "'snr2'", ",", "'immax2'", ",", "'l2'", ",", "'m2'", "]", "# features returned by image1", "# set imaging parameters to use", "if", "d", "[", "'uvres'", "]", "==", "0", ":", "d", "[", "'uvres'", "]", "=", "d", "[", "'uvres_full'", "]", "else", ":", "urange", "=", "d", "[", "'urange'", "]", "[", "scan", "]", "*", "(", "d", "[", "'freq'", "]", ".", "max", "(", ")", "/", "d", "[", "'freq_orig'", "]", "[", "0", "]", ")", "# uvw from get_uvw already in lambda at ch0", "vrange", "=", "d", "[", "'vrange'", "]", "[", "scan", "]", "*", "(", "d", "[", "'freq'", "]", ".", "max", "(", ")", "/", "d", "[", "'freq_orig'", "]", "[", "0", "]", ")", "powers", "=", "n", ".", "fromfunction", "(", "lambda", "i", ",", "j", ":", "2", "**", "i", "*", "3", "**", "j", ",", "(", "14", ",", "10", ")", ",", "dtype", "=", "'int'", ")", "# power array for 2**i * 3**j", "rangex", "=", "n", ".", "round", "(", "d", "[", "'uvoversample'", "]", "*", "urange", ")", ".", "astype", "(", "'int'", ")", "rangey", "=", "n", ".", "round", "(", "d", "[", "'uvoversample'", "]", "*", "vrange", ")", ".", "astype", "(", "'int'", ")", "largerx", "=", "n", ".", "where", "(", "powers", "-", "rangex", "/", "d", "[", "'uvres'", "]", ">", "0", ",", "powers", ",", "powers", "[", "-", "1", ",", "-", "1", "]", ")", "p2x", ",", "p3x", "=", "n", ".", "where", "(", "largerx", "==", "largerx", ".", "min", "(", ")", ")", "largery", "=", "n", ".", "where", "(", "powers", "-", "rangey", "/", "d", "[", "'uvres'", "]", ">", "0", ",", "powers", ",", "powers", "[", "-", "1", ",", "-", "1", "]", ")", "p2y", ",", "p3y", "=", "n", ".", "where", "(", "largery", "==", "largery", ".", "min", "(", ")", ")", "d", "[", "'npixx_full'", "]", "=", "(", "2", "**", "p2x", "*", "3", "**", "p3x", ")", "[", "0", "]", "d", "[", "'npixy_full'", "]", "=", "(", "2", "**", "p2y", "*", "3", "**", "p3y", ")", "[", "0", "]", "# set number of pixels to image", "d", "[", "'npixx'", "]", "=", "d", "[", "'npixx_full'", "]", "d", "[", "'npixy'", "]", "=", "d", "[", "'npixy_full'", "]", "if", "'npix_max'", "in", "d", ":", "if", "d", "[", "'npix_max'", "]", ":", "d", "[", "'npixx'", "]", "=", "min", "(", "d", "[", "'npix_max'", "]", ",", "d", "[", "'npixx_full'", "]", ")", "d", "[", "'npixy'", "]", "=", "min", "(", "d", "[", "'npix_max'", "]", ",", "d", "[", "'npixy_full'", "]", ")", "if", "d", "[", "'npix'", "]", ":", "d", "[", "'npixx'", "]", "=", "d", "[", "'npix'", "]", "d", "[", "'npixy'", "]", "=", "d", "[", "'npix'", "]", "else", ":", "d", "[", "'npix'", "]", "=", "max", "(", "d", "[", "'npixx'", "]", ",", "d", "[", "'npixy'", "]", ")", "# this used to define fringe time", "# define dmarr, if not already", "if", "len", "(", "d", "[", "'dmarr'", "]", ")", "==", "0", ":", "if", "d", ".", "has_key", "(", "'dm_maxloss'", ")", "and", "d", ".", "has_key", "(", "'maxdm'", ")", "and", "d", ".", "has_key", "(", "'dm_pulsewidth'", ")", ":", "d", "[", "'dmarr'", "]", "=", "calc_dmgrid", "(", "d", ",", "maxloss", "=", "d", "[", "'dm_maxloss'", "]", ",", "maxdm", "=", "d", "[", "'maxdm'", "]", ",", "dt", "=", "d", "[", "'dm_pulsewidth'", "]", ")", "if", "d", "[", "'maxdm'", "]", ">", "0", ":", "logger", ".", "info", "(", "'Calculated %d dms for max sensitivity loss %.2f, maxdm %d pc/cm3, and pulse width %d ms'", "%", "(", "len", "(", "d", "[", "'dmarr'", "]", ")", ",", "d", "[", "'dm_maxloss'", "]", ",", "d", "[", "'maxdm'", "]", ",", "d", "[", "'dm_pulsewidth'", "]", "/", "1000", ")", ")", "else", ":", "d", "[", "'dmarr'", "]", "=", "[", "0", "]", "logger", ".", "info", "(", "'Can\\'t calculate dm grid without dm_maxloss, maxdm, and dm_pulsewidth defined. Setting to [0].'", ")", "# define times for data to read", "d", "[", "'t_overlap'", "]", "=", "rtlib", ".", "calc_delay", "(", "d", "[", "'freq'", "]", ",", "d", "[", "'inttime'", "]", ",", "max", "(", "d", "[", "'dmarr'", "]", ")", ")", ".", "max", "(", ")", "*", "d", "[", "'inttime'", "]", "# time of overlap for total dm coverage at segment boundaries", "d", "[", "'datadelay'", "]", "=", "[", "rtlib", ".", "calc_delay", "(", "d", "[", "'freq'", "]", ",", "d", "[", "'inttime'", "]", ",", "dm", ")", ".", "max", "(", ")", "for", "dm", "in", "d", "[", "'dmarr'", "]", "]", "d", "[", "'nints'", "]", "=", "d", "[", "'nints'", "]", "-", "d", "[", "'nskip'", "]", "# pols", "if", "d", ".", "has_key", "(", "'selectpol'", ")", ":", "d", "[", "'pols'", "]", "=", "[", "pol", "for", "pol", "in", "d", "[", "'pols_orig'", "]", "if", "pol", "in", "d", "[", "'selectpol'", "]", "]", "else", ":", "d", "[", "'pols'", "]", "=", "d", "[", "'pols_orig'", "]", "d", "[", "'npol'", "]", "=", "len", "(", "d", "[", "'pols'", "]", ")", "# split imaging into chunks. ideally one per thread, but can modify to fit available memory", "if", "d", "[", "'nchunk'", "]", "==", "0", ":", "d", "[", "'nchunk'", "]", "=", "d", "[", "'nthread'", "]", "# if nsegments is 0, then auto-define within memory limit", "if", "not", "d", "[", "'nsegments'", "]", ":", "fringetime", "=", "calc_fringetime", "(", "d", ")", "d", "[", "'nsegments'", "]", "=", "max", "(", "1", ",", "min", "(", "d", "[", "'nints'", "]", ",", "int", "(", "d", "[", "'scale_nsegments'", "]", "*", "d", "[", "'inttime'", "]", "*", "d", "[", "'nints'", "]", "/", "(", "fringetime", "-", "d", "[", "'t_overlap'", "]", ")", ")", ")", ")", "# at least 1, at most nints", "calc_segment_times", "(", "d", ")", "# if auto nsegment definition makes segment too large, try to scale it down to fit in memory_limit (if provided)", "# limit defined for dm sweep time and max nchunk/nthread ratio", "if", "d", ".", "has_key", "(", "'memory_limit'", ")", ":", "(", "vismem0", ",", "immem0", ")", "=", "calc_memory_footprint", "(", "d", ",", "limit", "=", "True", ")", "assert", "vismem0", "+", "immem0", "<", "d", "[", "'memory_limit'", "]", ",", "'memory_limit of {0} is smaller than best solution of {1}. Try forcing nsegments/nchunk larger than {2}/{3} or reducing maxdm/npix'", ".", "format", "(", "d", "[", "'memory_limit'", "]", ",", "vismem0", "+", "immem0", ",", "d", "[", "'nsegments'", "]", ",", "max", "(", "d", "[", "'dtarr'", "]", ")", "/", "min", "(", "d", "[", "'dtarr'", "]", ")", ")", "(", "vismem", ",", "immem", ")", "=", "calc_memory_footprint", "(", "d", ")", "if", "vismem", "+", "immem", ">", "d", "[", "'memory_limit'", "]", ":", "logger", ".", "info", "(", "'Over memory limit of {4} when reading {0} segments with {1} chunks ({2}/{3} GB for visibilities/imaging). Searching for solution down to {5}/{6} GB...'", ".", "format", "(", "d", "[", "'nsegments'", "]", ",", "d", "[", "'nchunk'", "]", ",", "vismem", ",", "immem", ",", "d", "[", "'memory_limit'", "]", ",", "vismem0", ",", "immem0", ")", ")", "while", "vismem", "+", "immem", ">", "d", "[", "'memory_limit'", "]", ":", "(", "vismem", ",", "immem", ")", "=", "calc_memory_footprint", "(", "d", ")", "logger", ".", "debug", "(", "'Using {0} segments with {1} chunks ({2}/{3} GB for visibilities/imaging). Searching for better solution...'", ".", "format", "(", "d", "[", "'nchunk'", "]", ",", "vismem", ",", "immem", ",", "d", "[", "'memory_limit'", "]", ")", ")", "d", "[", "'scale_nsegments'", "]", "=", "d", "[", "'scale_nsegments'", "]", "*", "(", "vismem", "+", "immem", ")", "/", "float", "(", "d", "[", "'memory_limit'", "]", ")", "d", "[", "'nsegments'", "]", "=", "max", "(", "1", ",", "min", "(", "d", "[", "'nints'", "]", ",", "int", "(", "d", "[", "'scale_nsegments'", "]", "*", "d", "[", "'inttime'", "]", "*", "d", "[", "'nints'", "]", "/", "(", "fringetime", "-", "d", "[", "'t_overlap'", "]", ")", ")", ")", ")", "# at least 1, at most nints", "calc_segment_times", "(", "d", ")", "(", "vismem", ",", "immem", ")", "=", "calc_memory_footprint", "(", "d", ")", "while", "vismem", "+", "immem", ">", "d", "[", "'memory_limit'", "]", ":", "logger", ".", "debug", "(", "'Doubling nchunk from %d to fit in %d GB memory limit.'", "%", "(", "d", "[", "'nchunk'", "]", ",", "d", "[", "'memory_limit'", "]", ")", ")", "d", "[", "'nchunk'", "]", "=", "2", "*", "d", "[", "'nchunk'", "]", "(", "vismem", ",", "immem", ")", "=", "calc_memory_footprint", "(", "d", ")", "if", "d", "[", "'nchunk'", "]", ">=", "max", "(", "d", "[", "'dtarr'", "]", ")", "/", "min", "(", "d", "[", "'dtarr'", "]", ")", "*", "d", "[", "'nthread'", "]", ":", "# limit nchunk/nthread to at most the range in dt", "d", "[", "'nchunk'", "]", "=", "d", "[", "'nthread'", "]", "break", "(", "vismem", ",", "immem", ")", "=", "calc_memory_footprint", "(", "d", ")", "# final set up of memory", "calc_segment_times", "(", "d", ")", "(", "vismem", ",", "immem", ")", "=", "calc_memory_footprint", "(", "d", ")", "# scaling of number of integrations beyond dt=1", "assert", "all", "(", "d", "[", "'dtarr'", "]", ")", "and", "(", "d", "[", "'dtarr'", "]", "==", "sorted", "(", "d", "[", "'dtarr'", "]", ")", ")", ",", "'dtarr must be larger than 0 and in increasing order'", "# calculate number of thermal noise candidates per segment", "nfalse", "=", "calc_nfalse", "(", "d", ")", "logger", ".", "info", "(", "''", ")", "logger", ".", "info", "(", "'Pipeline summary:'", ")", "if", "'.GN'", "in", "d", "[", "'gainfile'", "]", ":", "logger", ".", "info", "(", "'\\t Products saved with %s. telcal calibration with %s'", "%", "(", "d", "[", "'fileroot'", "]", ",", "os", ".", "path", ".", "basename", "(", "d", "[", "'gainfile'", "]", ")", ")", ")", "else", ":", "logger", ".", "info", "(", "'\\t Products saved with %s. CASA calibration files (%s, %s)'", "%", "(", "d", "[", "'fileroot'", "]", ",", "os", ".", "path", ".", "basename", "(", "d", "[", "'gainfile'", "]", ")", ",", "os", ".", "path", ".", "basename", "(", "d", "[", "'bpfile'", "]", ")", ")", ")", "logger", ".", "info", "(", "'\\t Using %d segment%s of %d ints (%.1f s) with overlap of %.1f s'", "%", "(", "d", "[", "'nsegments'", "]", ",", "\"s\"", "[", "not", "d", "[", "'nsegments'", "]", "-", "1", ":", "]", ",", "d", "[", "'readints'", "]", ",", "d", "[", "'t_segment'", "]", ",", "d", "[", "'t_overlap'", "]", ")", ")", "if", "d", "[", "'t_overlap'", "]", ">", "d", "[", "'t_segment'", "]", "/", "3.", ":", "logger", ".", "info", "(", "'\\t\\t Lots of segments needed, since Max DM sweep (%.1f s) close to segment size (%.2f s)'", "%", "(", "d", "[", "'t_overlap'", "]", ",", "d", "[", "'t_segment'", "]", ")", ")", "logger", ".", "info", "(", "'\\t Downsampling in time/freq by %d/%d and skipping %d ints from start of scan.'", "%", "(", "d", "[", "'read_tdownsample'", "]", ",", "d", "[", "'read_fdownsample'", "]", ",", "d", "[", "'nskip'", "]", ")", ")", "logger", ".", "info", "(", "'\\t Excluding ants %s'", "%", "(", "d", "[", "'excludeants'", "]", ")", ")", "logger", ".", "info", "(", "'\\t Using pols %s'", "%", "(", "d", "[", "'pols'", "]", ")", ")", "logger", ".", "info", "(", "''", ")", "logger", ".", "info", "(", "'\\t Search with %s and threshold %.1f.'", "%", "(", "d", "[", "'searchtype'", "]", ",", "d", "[", "'sigma_image1'", "]", ")", ")", "logger", ".", "info", "(", "'\\t Using %d DMs from %.1f to %.1f and dts %s.'", "%", "(", "len", "(", "d", "[", "'dmarr'", "]", ")", ",", "min", "(", "d", "[", "'dmarr'", "]", ")", ",", "max", "(", "d", "[", "'dmarr'", "]", ")", ",", "d", "[", "'dtarr'", "]", ")", ")", "logger", ".", "info", "(", "'\\t Using uvgrid npix=(%d,%d) and res=%d.'", "%", "(", "d", "[", "'npixx'", "]", ",", "d", "[", "'npixy'", "]", ",", "d", "[", "'uvres'", "]", ")", ")", "logger", ".", "info", "(", "'\\t Expect %d thermal false positives per segment.'", "%", "nfalse", ")", "logger", ".", "info", "(", "''", ")", "logger", ".", "info", "(", "'\\t Visibility memory usage is %.1f GB/segment'", "%", "vismem", ")", "logger", ".", "info", "(", "'\\t Imaging in %d chunk%s using max of %.1f GB/segment'", "%", "(", "d", "[", "'nchunk'", "]", ",", "\"s\"", "[", "not", "d", "[", "'nsegments'", "]", "-", "1", ":", "]", ",", "immem", ")", ")", "logger", ".", "info", "(", "'\\t Grand total memory usage: %.1f GB/segment'", "%", "(", "vismem", "+", "immem", ")", ")", "return", "d" ]
Function defines pipeline state for search. Takes data/scan as input. fileroot is base name for associated products (cal files, noise, cands). if blank, it is set to filename. paramfile is name of file that defines all pipeline parameters (python-like syntax). kwargs used to overload paramfile definitions. Many parameters take 0 as default, which auto-defines ideal parameters. This definition does not yet consider memory/cpu/time limitations. nsegments defines how to break jobs in time. nchunk defines how many jobs are sent to nthreads.
[ "Function", "defines", "pipeline", "state", "for", "search", ".", "Takes", "data", "/", "scan", "as", "input", ".", "fileroot", "is", "base", "name", "for", "associated", "products", "(", "cal", "files", "noise", "cands", ")", ".", "if", "blank", "it", "is", "set", "to", "filename", ".", "paramfile", "is", "name", "of", "file", "that", "defines", "all", "pipeline", "parameters", "(", "python", "-", "like", "syntax", ")", ".", "kwargs", "used", "to", "overload", "paramfile", "definitions", ".", "Many", "parameters", "take", "0", "as", "default", "which", "auto", "-", "defines", "ideal", "parameters", ".", "This", "definition", "does", "not", "yet", "consider", "memory", "/", "cpu", "/", "time", "limitations", ".", "nsegments", "defines", "how", "to", "break", "jobs", "in", "time", ".", "nchunk", "defines", "how", "many", "jobs", "are", "sent", "to", "nthreads", "." ]
python
train
xolox/python-coloredlogs
coloredlogs/__init__.py
https://github.com/xolox/python-coloredlogs/blob/1cbf0c6bbee400c6ddbc43008143809934ec3e79/coloredlogs/__init__.py#L888-L911
def walk_propagation_tree(logger): """ Walk through the propagation hierarchy of the given logger. :param logger: The logger whose hierarchy to walk (a :class:`~logging.Logger` object). :returns: A generator of :class:`~logging.Logger` objects. .. note:: This uses the undocumented :class:`logging.Logger.parent` attribute to find higher level loggers, however it won't raise an exception if the attribute isn't available. """ while isinstance(logger, logging.Logger): # Yield the logger to our caller. yield logger # Check if the logger has propagation enabled. if logger.propagate: # Continue with the parent logger. We use getattr() because the # `parent' attribute isn't documented so properly speaking we # shouldn't break if it's not available. logger = getattr(logger, 'parent', None) else: # The propagation chain stops here. logger = None
[ "def", "walk_propagation_tree", "(", "logger", ")", ":", "while", "isinstance", "(", "logger", ",", "logging", ".", "Logger", ")", ":", "# Yield the logger to our caller.", "yield", "logger", "# Check if the logger has propagation enabled.", "if", "logger", ".", "propagate", ":", "# Continue with the parent logger. We use getattr() because the", "# `parent' attribute isn't documented so properly speaking we", "# shouldn't break if it's not available.", "logger", "=", "getattr", "(", "logger", ",", "'parent'", ",", "None", ")", "else", ":", "# The propagation chain stops here.", "logger", "=", "None" ]
Walk through the propagation hierarchy of the given logger. :param logger: The logger whose hierarchy to walk (a :class:`~logging.Logger` object). :returns: A generator of :class:`~logging.Logger` objects. .. note:: This uses the undocumented :class:`logging.Logger.parent` attribute to find higher level loggers, however it won't raise an exception if the attribute isn't available.
[ "Walk", "through", "the", "propagation", "hierarchy", "of", "the", "given", "logger", "." ]
python
train
raiden-network/raiden
raiden/tasks.py
https://github.com/raiden-network/raiden/blob/407ba15c72074e9de88771d6b9661ff4dc36bef5/raiden/tasks.py#L122-L133
def check_network_id(network_id, web3: Web3): """ Check periodically if the underlying ethereum client's network id has changed""" while True: current_id = int(web3.version.network) if network_id != current_id: raise RuntimeError( f'Raiden was running on network with id {network_id} and it detected ' f'that the underlying ethereum client network id changed to {current_id}.' f' Changing the underlying blockchain while the Raiden node is running ' f'is not supported.', ) gevent.sleep(CHECK_NETWORK_ID_INTERVAL)
[ "def", "check_network_id", "(", "network_id", ",", "web3", ":", "Web3", ")", ":", "while", "True", ":", "current_id", "=", "int", "(", "web3", ".", "version", ".", "network", ")", "if", "network_id", "!=", "current_id", ":", "raise", "RuntimeError", "(", "f'Raiden was running on network with id {network_id} and it detected '", "f'that the underlying ethereum client network id changed to {current_id}.'", "f' Changing the underlying blockchain while the Raiden node is running '", "f'is not supported.'", ",", ")", "gevent", ".", "sleep", "(", "CHECK_NETWORK_ID_INTERVAL", ")" ]
Check periodically if the underlying ethereum client's network id has changed
[ "Check", "periodically", "if", "the", "underlying", "ethereum", "client", "s", "network", "id", "has", "changed" ]
python
train
moderngl/moderngl
examples/window/sdl2/window.py
https://github.com/moderngl/moderngl/blob/a8f5dce8dc72ae84a2f9523887fb5f6b620049b9/examples/window/sdl2/window.py#L124-L130
def destroy(self): """ Gracefully close the window """ sdl2.SDL_GL_DeleteContext(self.context) sdl2.SDL_DestroyWindow(self.window) sdl2.SDL_Quit()
[ "def", "destroy", "(", "self", ")", ":", "sdl2", ".", "SDL_GL_DeleteContext", "(", "self", ".", "context", ")", "sdl2", ".", "SDL_DestroyWindow", "(", "self", ".", "window", ")", "sdl2", ".", "SDL_Quit", "(", ")" ]
Gracefully close the window
[ "Gracefully", "close", "the", "window" ]
python
train
dswah/pyGAM
pygam/terms.py
https://github.com/dswah/pyGAM/blob/b3e5c3cd580f0a3ad69f9372861624f67760c325/pygam/terms.py#L1632-L1655
def pop(self, i=None): """remove the ith term from the term list Parameters --------- i : int, optional term to remove from term list by default the last term is popped. Returns ------- term : Term """ if i == None: i = len(self) - 1 if i >= len(self._terms) or i < 0: raise ValueError('requested pop {}th term, but found only {} terms'\ .format(i, len(self._terms))) term = self._terms[i] self._terms = self._terms[:i] + self._terms[i+1:] return term
[ "def", "pop", "(", "self", ",", "i", "=", "None", ")", ":", "if", "i", "==", "None", ":", "i", "=", "len", "(", "self", ")", "-", "1", "if", "i", ">=", "len", "(", "self", ".", "_terms", ")", "or", "i", "<", "0", ":", "raise", "ValueError", "(", "'requested pop {}th term, but found only {} terms'", ".", "format", "(", "i", ",", "len", "(", "self", ".", "_terms", ")", ")", ")", "term", "=", "self", ".", "_terms", "[", "i", "]", "self", ".", "_terms", "=", "self", ".", "_terms", "[", ":", "i", "]", "+", "self", ".", "_terms", "[", "i", "+", "1", ":", "]", "return", "term" ]
remove the ith term from the term list Parameters --------- i : int, optional term to remove from term list by default the last term is popped. Returns ------- term : Term
[ "remove", "the", "ith", "term", "from", "the", "term", "list" ]
python
train
thespacedoctor/sloancone
build/lib/sloancone/image.py
https://github.com/thespacedoctor/sloancone/blob/106ea6533ad57f5f0ca82bf6db3053132bdb42e1/build/lib/sloancone/image.py#L136-L194
def _download_sdss_image( self): """*download sdss image* """ self.log.info('starting the ``_download_sdss_image`` method') opt = "" if self.grid: opt += "G" if self.label: opt += "L" if self.photocat: opt += "P" if self.speccat: opt += "S" if self.invertColors: opt += "I" if len(opt): opt = "opt=%(opt)s&" % locals() width = self.pixelWidth scale = (self.arcminWidth * 60.) / width converter = unit_conversion( log=self.log ) ra = converter.ra_sexegesimal_to_decimal( ra=self.ra ) dec = converter.dec_sexegesimal_to_decimal( dec=self.dec ) url = """http://skyservice.pha.jhu.edu/DR12/ImgCutout/getjpeg.aspx?ra=%(ra)s&dec=%(dec)s&scale=%(scale)s&%(opt)sPhotoObjs=on&width=%(width)s&height=%(width)s""" % locals( ) from fundamentals.download import multiobject_download localUrls = multiobject_download( urlList=[url], downloadDirectory=self.downloadDirectory, log=self.log, timeStamp=False, timeout=180, concurrentDownloads=10, resetFilename=[self.filename], credentials=False, # { 'username' : "...", "password", "..." } longTime=True, indexFilenames=False ) print url self.log.info('completed the ``_download_sdss_image`` method') return None
[ "def", "_download_sdss_image", "(", "self", ")", ":", "self", ".", "log", ".", "info", "(", "'starting the ``_download_sdss_image`` method'", ")", "opt", "=", "\"\"", "if", "self", ".", "grid", ":", "opt", "+=", "\"G\"", "if", "self", ".", "label", ":", "opt", "+=", "\"L\"", "if", "self", ".", "photocat", ":", "opt", "+=", "\"P\"", "if", "self", ".", "speccat", ":", "opt", "+=", "\"S\"", "if", "self", ".", "invertColors", ":", "opt", "+=", "\"I\"", "if", "len", "(", "opt", ")", ":", "opt", "=", "\"opt=%(opt)s&\"", "%", "locals", "(", ")", "width", "=", "self", ".", "pixelWidth", "scale", "=", "(", "self", ".", "arcminWidth", "*", "60.", ")", "/", "width", "converter", "=", "unit_conversion", "(", "log", "=", "self", ".", "log", ")", "ra", "=", "converter", ".", "ra_sexegesimal_to_decimal", "(", "ra", "=", "self", ".", "ra", ")", "dec", "=", "converter", ".", "dec_sexegesimal_to_decimal", "(", "dec", "=", "self", ".", "dec", ")", "url", "=", "\"\"\"http://skyservice.pha.jhu.edu/DR12/ImgCutout/getjpeg.aspx?ra=%(ra)s&dec=%(dec)s&scale=%(scale)s&%(opt)sPhotoObjs=on&width=%(width)s&height=%(width)s\"\"\"", "%", "locals", "(", ")", "from", "fundamentals", ".", "download", "import", "multiobject_download", "localUrls", "=", "multiobject_download", "(", "urlList", "=", "[", "url", "]", ",", "downloadDirectory", "=", "self", ".", "downloadDirectory", ",", "log", "=", "self", ".", "log", ",", "timeStamp", "=", "False", ",", "timeout", "=", "180", ",", "concurrentDownloads", "=", "10", ",", "resetFilename", "=", "[", "self", ".", "filename", "]", ",", "credentials", "=", "False", ",", "# { 'username' : \"...\", \"password\", \"...\" }", "longTime", "=", "True", ",", "indexFilenames", "=", "False", ")", "print", "url", "self", ".", "log", ".", "info", "(", "'completed the ``_download_sdss_image`` method'", ")", "return", "None" ]
*download sdss image*
[ "*", "download", "sdss", "image", "*" ]
python
train
SCIP-Interfaces/PySCIPOpt
examples/tutorial/logical.py
https://github.com/SCIP-Interfaces/PySCIPOpt/blob/9c960b40d94a48b0304d73dbe28b467b9c065abe/examples/tutorial/logical.py#L50-L58
def or_constraint(v=0, sense="maximize"): """ OR constraint""" assert v in [0,1], "v must be 0 or 1 instead of %s" % v.__repr__() model, x, y, z = _init() r = model.addVar("r", "B") model.addConsOr([x,y,z], r) model.addCons(x==v) model.setObjective(r, sense=sense) _optimize("OR", model)
[ "def", "or_constraint", "(", "v", "=", "0", ",", "sense", "=", "\"maximize\"", ")", ":", "assert", "v", "in", "[", "0", ",", "1", "]", ",", "\"v must be 0 or 1 instead of %s\"", "%", "v", ".", "__repr__", "(", ")", "model", ",", "x", ",", "y", ",", "z", "=", "_init", "(", ")", "r", "=", "model", ".", "addVar", "(", "\"r\"", ",", "\"B\"", ")", "model", ".", "addConsOr", "(", "[", "x", ",", "y", ",", "z", "]", ",", "r", ")", "model", ".", "addCons", "(", "x", "==", "v", ")", "model", ".", "setObjective", "(", "r", ",", "sense", "=", "sense", ")", "_optimize", "(", "\"OR\"", ",", "model", ")" ]
OR constraint
[ "OR", "constraint" ]
python
train
hubo1016/vlcp
vlcp/protocol/http.py
https://github.com/hubo1016/vlcp/blob/239055229ec93a99cc7e15208075724ccf543bd1/vlcp/protocol/http.py#L165-L181
def date_time_string(timestamp=None): """Return the current date and time formatted for a message header.""" global _last_date_time_string _last_timestamp, _last_str = _last_date_time_string if timestamp is None: timestamp = time.time() _curr_timestamp = int(timestamp) if _curr_timestamp == _last_timestamp: return _last_str else: year, month, day, hh, mm, ss, wd, y, z = time.gmtime(timestamp) s = b"%s, %02d %3s %4d %02d:%02d:%02d GMT" % ( weekdayname[wd], day, monthname[month], year, hh, mm, ss) _last_date_time_string = (_curr_timestamp, s) return s
[ "def", "date_time_string", "(", "timestamp", "=", "None", ")", ":", "global", "_last_date_time_string", "_last_timestamp", ",", "_last_str", "=", "_last_date_time_string", "if", "timestamp", "is", "None", ":", "timestamp", "=", "time", ".", "time", "(", ")", "_curr_timestamp", "=", "int", "(", "timestamp", ")", "if", "_curr_timestamp", "==", "_last_timestamp", ":", "return", "_last_str", "else", ":", "year", ",", "month", ",", "day", ",", "hh", ",", "mm", ",", "ss", ",", "wd", ",", "y", ",", "z", "=", "time", ".", "gmtime", "(", "timestamp", ")", "s", "=", "b\"%s, %02d %3s %4d %02d:%02d:%02d GMT\"", "%", "(", "weekdayname", "[", "wd", "]", ",", "day", ",", "monthname", "[", "month", "]", ",", "year", ",", "hh", ",", "mm", ",", "ss", ")", "_last_date_time_string", "=", "(", "_curr_timestamp", ",", "s", ")", "return", "s" ]
Return the current date and time formatted for a message header.
[ "Return", "the", "current", "date", "and", "time", "formatted", "for", "a", "message", "header", "." ]
python
train
rameshg87/pyremotevbox
pyremotevbox/ZSI/wstools/c14n.py
https://github.com/rameshg87/pyremotevbox/blob/123dffff27da57c8faa3ac1dd4c68b1cf4558b1a/pyremotevbox/ZSI/wstools/c14n.py#L238-L254
def _do_pi(self, node): '''_do_pi(self, node) -> None Process a PI node. Render a leading or trailing #xA if the document order of the PI is greater or lesser (respectively) than the document element. ''' if not _in_subset(self.subset, node): return W = self.write if self.documentOrder == _GreaterElement: W('\n') W('<?') W(node.nodeName) s = node.data if s: W(' ') W(s) W('?>') if self.documentOrder == _LesserElement: W('\n')
[ "def", "_do_pi", "(", "self", ",", "node", ")", ":", "if", "not", "_in_subset", "(", "self", ".", "subset", ",", "node", ")", ":", "return", "W", "=", "self", ".", "write", "if", "self", ".", "documentOrder", "==", "_GreaterElement", ":", "W", "(", "'\\n'", ")", "W", "(", "'<?'", ")", "W", "(", "node", ".", "nodeName", ")", "s", "=", "node", ".", "data", "if", "s", ":", "W", "(", "' '", ")", "W", "(", "s", ")", "W", "(", "'?>'", ")", "if", "self", ".", "documentOrder", "==", "_LesserElement", ":", "W", "(", "'\\n'", ")" ]
_do_pi(self, node) -> None Process a PI node. Render a leading or trailing #xA if the document order of the PI is greater or lesser (respectively) than the document element.
[ "_do_pi", "(", "self", "node", ")", "-", ">", "None", "Process", "a", "PI", "node", ".", "Render", "a", "leading", "or", "trailing", "#xA", "if", "the", "document", "order", "of", "the", "PI", "is", "greater", "or", "lesser", "(", "respectively", ")", "than", "the", "document", "element", "." ]
python
train
click-contrib/click-configfile
click_configfile.py
https://github.com/click-contrib/click-configfile/blob/a616204cb9944125fd5051556f27a7ccef611e22/click_configfile.py#L203-L222
def select_params_from_section_schema(section_schema, param_class=Param, deep=False): """Selects the parameters of a config section schema. :param section_schema: Configuration file section schema to use. :return: Generator of params """ # pylint: disable=invalid-name for name, value in inspect.getmembers(section_schema): if name.startswith("__") or value is None: continue # pragma: no cover elif inspect.isclass(value) and deep: # -- CASE: class => SELF-CALL (recursively). # pylint: disable= bad-continuation cls = value for name, value in select_params_from_section_schema(cls, param_class=param_class, deep=True): yield (name, value) elif isinstance(value, param_class): yield (name, value)
[ "def", "select_params_from_section_schema", "(", "section_schema", ",", "param_class", "=", "Param", ",", "deep", "=", "False", ")", ":", "# pylint: disable=invalid-name", "for", "name", ",", "value", "in", "inspect", ".", "getmembers", "(", "section_schema", ")", ":", "if", "name", ".", "startswith", "(", "\"__\"", ")", "or", "value", "is", "None", ":", "continue", "# pragma: no cover", "elif", "inspect", ".", "isclass", "(", "value", ")", "and", "deep", ":", "# -- CASE: class => SELF-CALL (recursively).", "# pylint: disable= bad-continuation", "cls", "=", "value", "for", "name", ",", "value", "in", "select_params_from_section_schema", "(", "cls", ",", "param_class", "=", "param_class", ",", "deep", "=", "True", ")", ":", "yield", "(", "name", ",", "value", ")", "elif", "isinstance", "(", "value", ",", "param_class", ")", ":", "yield", "(", "name", ",", "value", ")" ]
Selects the parameters of a config section schema. :param section_schema: Configuration file section schema to use. :return: Generator of params
[ "Selects", "the", "parameters", "of", "a", "config", "section", "schema", "." ]
python
train
kdeldycke/maildir-deduplicate
maildir_deduplicate/cli.py
https://github.com/kdeldycke/maildir-deduplicate/blob/f1c6ff25b80c6c1a4dc2dc7a65b34d808b0b7733/maildir_deduplicate/cli.py#L224-L235
def hash(ctx, message_id, message): """ Take a single mail message and show its canonicalised form and hash. Mainly used to debug message hashing. """ conf = Config(message_id=message_id) mail = Mail(message, conf) logger.info(mail.header_text) logger.info('-' * 70) logger.info('Hash: {}'.format(mail.hash_key))
[ "def", "hash", "(", "ctx", ",", "message_id", ",", "message", ")", ":", "conf", "=", "Config", "(", "message_id", "=", "message_id", ")", "mail", "=", "Mail", "(", "message", ",", "conf", ")", "logger", ".", "info", "(", "mail", ".", "header_text", ")", "logger", ".", "info", "(", "'-'", "*", "70", ")", "logger", ".", "info", "(", "'Hash: {}'", ".", "format", "(", "mail", ".", "hash_key", ")", ")" ]
Take a single mail message and show its canonicalised form and hash. Mainly used to debug message hashing.
[ "Take", "a", "single", "mail", "message", "and", "show", "its", "canonicalised", "form", "and", "hash", "." ]
python
train
miso-belica/sumy
sumy/summarizers/lex_rank.py
https://github.com/miso-belica/sumy/blob/099ab4938e2c1b6a011297375586bac2953641b9/sumy/summarizers/lex_rank.py#L120-L157
def cosine_similarity(sentence1, sentence2, tf1, tf2, idf_metrics): """ We compute idf-modified-cosine(sentence1, sentence2) here. It's cosine similarity of these two sentences (vectors) A, B computed as cos(x, y) = A . B / (|A| . |B|) Sentences are represented as vector TF*IDF metrics. :param sentence1: Iterable object where every item represents word of 1st sentence. :param sentence2: Iterable object where every item represents word of 2nd sentence. :type tf1: dict :param tf1: Term frequencies of words from 1st sentence. :type tf2: dict :param tf2: Term frequencies of words from 2nd sentence :type idf_metrics: dict :param idf_metrics: Inverted document metrics of the sentences. Every sentence is treated as document for this algorithm. :rtype: float :return: Returns -1.0 for opposite similarity, 1.0 for the same sentence and zero for no similarity between sentences. """ unique_words1 = frozenset(sentence1) unique_words2 = frozenset(sentence2) common_words = unique_words1 & unique_words2 numerator = 0.0 for term in common_words: numerator += tf1[term]*tf2[term] * idf_metrics[term]**2 denominator1 = sum((tf1[t]*idf_metrics[t])**2 for t in unique_words1) denominator2 = sum((tf2[t]*idf_metrics[t])**2 for t in unique_words2) if denominator1 > 0 and denominator2 > 0: return numerator / (math.sqrt(denominator1) * math.sqrt(denominator2)) else: return 0.0
[ "def", "cosine_similarity", "(", "sentence1", ",", "sentence2", ",", "tf1", ",", "tf2", ",", "idf_metrics", ")", ":", "unique_words1", "=", "frozenset", "(", "sentence1", ")", "unique_words2", "=", "frozenset", "(", "sentence2", ")", "common_words", "=", "unique_words1", "&", "unique_words2", "numerator", "=", "0.0", "for", "term", "in", "common_words", ":", "numerator", "+=", "tf1", "[", "term", "]", "*", "tf2", "[", "term", "]", "*", "idf_metrics", "[", "term", "]", "**", "2", "denominator1", "=", "sum", "(", "(", "tf1", "[", "t", "]", "*", "idf_metrics", "[", "t", "]", ")", "**", "2", "for", "t", "in", "unique_words1", ")", "denominator2", "=", "sum", "(", "(", "tf2", "[", "t", "]", "*", "idf_metrics", "[", "t", "]", ")", "**", "2", "for", "t", "in", "unique_words2", ")", "if", "denominator1", ">", "0", "and", "denominator2", ">", "0", ":", "return", "numerator", "/", "(", "math", ".", "sqrt", "(", "denominator1", ")", "*", "math", ".", "sqrt", "(", "denominator2", ")", ")", "else", ":", "return", "0.0" ]
We compute idf-modified-cosine(sentence1, sentence2) here. It's cosine similarity of these two sentences (vectors) A, B computed as cos(x, y) = A . B / (|A| . |B|) Sentences are represented as vector TF*IDF metrics. :param sentence1: Iterable object where every item represents word of 1st sentence. :param sentence2: Iterable object where every item represents word of 2nd sentence. :type tf1: dict :param tf1: Term frequencies of words from 1st sentence. :type tf2: dict :param tf2: Term frequencies of words from 2nd sentence :type idf_metrics: dict :param idf_metrics: Inverted document metrics of the sentences. Every sentence is treated as document for this algorithm. :rtype: float :return: Returns -1.0 for opposite similarity, 1.0 for the same sentence and zero for no similarity between sentences.
[ "We", "compute", "idf", "-", "modified", "-", "cosine", "(", "sentence1", "sentence2", ")", "here", ".", "It", "s", "cosine", "similarity", "of", "these", "two", "sentences", "(", "vectors", ")", "A", "B", "computed", "as", "cos", "(", "x", "y", ")", "=", "A", ".", "B", "/", "(", "|A|", ".", "|B|", ")", "Sentences", "are", "represented", "as", "vector", "TF", "*", "IDF", "metrics", "." ]
python
train
pybel/pybel
src/pybel/struct/filters/edge_predicate_builders.py
https://github.com/pybel/pybel/blob/c8a7a1bdae4c475fa2a8c77f3a9a5f6d79556ca0/src/pybel/struct/filters/edge_predicate_builders.py#L101-L109
def build_upstream_edge_predicate(nodes: Iterable[BaseEntity]) -> EdgePredicate: """Build an edge predicate that pass for relations for which one of the given nodes is the object.""" nodes = set(nodes) def upstream_filter(graph: BELGraph, u: BaseEntity, v: BaseEntity, k: str) -> bool: """Pass for relations for which one of the given nodes is the object.""" return v in nodes and graph[u][v][k][RELATION] in CAUSAL_RELATIONS return upstream_filter
[ "def", "build_upstream_edge_predicate", "(", "nodes", ":", "Iterable", "[", "BaseEntity", "]", ")", "->", "EdgePredicate", ":", "nodes", "=", "set", "(", "nodes", ")", "def", "upstream_filter", "(", "graph", ":", "BELGraph", ",", "u", ":", "BaseEntity", ",", "v", ":", "BaseEntity", ",", "k", ":", "str", ")", "->", "bool", ":", "\"\"\"Pass for relations for which one of the given nodes is the object.\"\"\"", "return", "v", "in", "nodes", "and", "graph", "[", "u", "]", "[", "v", "]", "[", "k", "]", "[", "RELATION", "]", "in", "CAUSAL_RELATIONS", "return", "upstream_filter" ]
Build an edge predicate that pass for relations for which one of the given nodes is the object.
[ "Build", "an", "edge", "predicate", "that", "pass", "for", "relations", "for", "which", "one", "of", "the", "given", "nodes", "is", "the", "object", "." ]
python
train
Genida/django-meerkat
src/meerkat/utils/time.py
https://github.com/Genida/django-meerkat/blob/486502a75bb0800266db785fd32717d8c0eb8deb/src/meerkat/utils/time.py#L21-L33
def daterange(start_date, end_date): """ Yield one date per day from starting date to ending date. Args: start_date (date): starting date. end_date (date): ending date. Yields: date: a date for each day within the range. """ for n in range(int((end_date - start_date).days)): yield start_date + timedelta(n)
[ "def", "daterange", "(", "start_date", ",", "end_date", ")", ":", "for", "n", "in", "range", "(", "int", "(", "(", "end_date", "-", "start_date", ")", ".", "days", ")", ")", ":", "yield", "start_date", "+", "timedelta", "(", "n", ")" ]
Yield one date per day from starting date to ending date. Args: start_date (date): starting date. end_date (date): ending date. Yields: date: a date for each day within the range.
[ "Yield", "one", "date", "per", "day", "from", "starting", "date", "to", "ending", "date", "." ]
python
train
razor-x/scipy-data_fitting
scipy_data_fitting/figure/plot.py
https://github.com/razor-x/scipy-data_fitting/blob/c756a645da8629699b3f22244bfb7d5d4d88b179/scipy_data_fitting/figure/plot.py#L88-L92
def plot_fit(self): """ Add the fit to the plot. """ self.plt.plot(*self.fit.fit, **self.options['fit'])
[ "def", "plot_fit", "(", "self", ")", ":", "self", ".", "plt", ".", "plot", "(", "*", "self", ".", "fit", ".", "fit", ",", "*", "*", "self", ".", "options", "[", "'fit'", "]", ")" ]
Add the fit to the plot.
[ "Add", "the", "fit", "to", "the", "plot", "." ]
python
train
akissa/clamavmirror
setup.py
https://github.com/akissa/clamavmirror/blob/6ef1cfa9fb4fa4a7b8439004f1cd8775f51d77f6/setup.py#L39-L72
def main(): """Main""" opts = dict( name="clamavmirror", version='0.0.4', description="ClamAV Signature Mirroring Tool", long_description=get_readme(), keywords="clamav mirror mirroring mirror-tool signatures", author="Andrew Colin Kissa", author_email="[email protected]", url="https://github.com/akissa/clamavmirror", license="MPL 2.0", packages=[], entry_points={ 'console_scripts': [ 'clamavmirror=clamavmirror:main' ], }, include_package_data=True, zip_safe=False, install_requires=['urllib3', 'dnspython', 'certifi'], classifiers=[ 'Development Status :: 4 - Beta', 'Programming Language :: Python', 'Programming Language :: Python :: 2.6', 'Programming Language :: Python :: 2.7', 'Topic :: Software Development :: Libraries :: Python Modules', 'Intended Audience :: System Administrators', 'Environment :: Console', 'License :: OSI Approved :: Mozilla Public License 2.0 (MPL 2.0)', 'Natural Language :: English', 'Operating System :: OS Independent'],) setup(**opts)
[ "def", "main", "(", ")", ":", "opts", "=", "dict", "(", "name", "=", "\"clamavmirror\"", ",", "version", "=", "'0.0.4'", ",", "description", "=", "\"ClamAV Signature Mirroring Tool\"", ",", "long_description", "=", "get_readme", "(", ")", ",", "keywords", "=", "\"clamav mirror mirroring mirror-tool signatures\"", ",", "author", "=", "\"Andrew Colin Kissa\"", ",", "author_email", "=", "\"[email protected]\"", ",", "url", "=", "\"https://github.com/akissa/clamavmirror\"", ",", "license", "=", "\"MPL 2.0\"", ",", "packages", "=", "[", "]", ",", "entry_points", "=", "{", "'console_scripts'", ":", "[", "'clamavmirror=clamavmirror:main'", "]", ",", "}", ",", "include_package_data", "=", "True", ",", "zip_safe", "=", "False", ",", "install_requires", "=", "[", "'urllib3'", ",", "'dnspython'", ",", "'certifi'", "]", ",", "classifiers", "=", "[", "'Development Status :: 4 - Beta'", ",", "'Programming Language :: Python'", ",", "'Programming Language :: Python :: 2.6'", ",", "'Programming Language :: Python :: 2.7'", ",", "'Topic :: Software Development :: Libraries :: Python Modules'", ",", "'Intended Audience :: System Administrators'", ",", "'Environment :: Console'", ",", "'License :: OSI Approved :: Mozilla Public License 2.0 (MPL 2.0)'", ",", "'Natural Language :: English'", ",", "'Operating System :: OS Independent'", "]", ",", ")", "setup", "(", "*", "*", "opts", ")" ]
Main
[ "Main" ]
python
train
aio-libs/aiohttp
aiohttp/multipart.py
https://github.com/aio-libs/aiohttp/blob/9504fe2affaaff673fa4f3754c1c44221f8ba47d/aiohttp/multipart.py#L399-L405
async def text(self, *, encoding: Optional[str]=None) -> str: """Like read(), but assumes that body part contains text data.""" data = await self.read(decode=True) # see https://www.w3.org/TR/html5/forms.html#multipart/form-data-encoding-algorithm # NOQA # and https://dvcs.w3.org/hg/xhr/raw-file/tip/Overview.html#dom-xmlhttprequest-send # NOQA encoding = encoding or self.get_charset(default='utf-8') return data.decode(encoding)
[ "async", "def", "text", "(", "self", ",", "*", ",", "encoding", ":", "Optional", "[", "str", "]", "=", "None", ")", "->", "str", ":", "data", "=", "await", "self", ".", "read", "(", "decode", "=", "True", ")", "# see https://www.w3.org/TR/html5/forms.html#multipart/form-data-encoding-algorithm # NOQA", "# and https://dvcs.w3.org/hg/xhr/raw-file/tip/Overview.html#dom-xmlhttprequest-send # NOQA", "encoding", "=", "encoding", "or", "self", ".", "get_charset", "(", "default", "=", "'utf-8'", ")", "return", "data", ".", "decode", "(", "encoding", ")" ]
Like read(), but assumes that body part contains text data.
[ "Like", "read", "()", "but", "assumes", "that", "body", "part", "contains", "text", "data", "." ]
python
train
jaraco/jaraco.windows
jaraco/windows/filesystem/__init__.py
https://github.com/jaraco/jaraco.windows/blob/51811efed50b46ad08daa25408a1cc806bc8d519/jaraco/windows/filesystem/__init__.py#L48-L54
def _is_target_a_directory(link, rel_target): """ If creating a symlink from link to a target, determine if target is a directory (relative to dirname(link)). """ target = os.path.join(os.path.dirname(link), rel_target) return os.path.isdir(target)
[ "def", "_is_target_a_directory", "(", "link", ",", "rel_target", ")", ":", "target", "=", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "dirname", "(", "link", ")", ",", "rel_target", ")", "return", "os", ".", "path", ".", "isdir", "(", "target", ")" ]
If creating a symlink from link to a target, determine if target is a directory (relative to dirname(link)).
[ "If", "creating", "a", "symlink", "from", "link", "to", "a", "target", "determine", "if", "target", "is", "a", "directory", "(", "relative", "to", "dirname", "(", "link", "))", "." ]
python
train
OCHA-DAP/hdx-python-utilities
src/hdx/utilities/useragent.py
https://github.com/OCHA-DAP/hdx-python-utilities/blob/9c89e0aa5afac2c002b02a2d8f0e5b91eeb3d2a3/src/hdx/utilities/useragent.py#L146-L160
def set_global(cls, user_agent=None, user_agent_config_yaml=None, user_agent_lookup=None, **kwargs): # type: (Optional[str], Optional[str], Optional[str], Any) -> None """ Set global user agent string Args: user_agent (Optional[str]): User agent string. HDXPythonLibrary/X.X.X- is prefixed. user_agent_config_yaml (Optional[str]): Path to YAML user agent configuration. Ignored if user_agent supplied. Defaults to ~/.useragent.yml. user_agent_lookup (Optional[str]): Lookup key for YAML. Ignored if user_agent supplied. Returns: None """ cls.user_agent = cls._create(user_agent, user_agent_config_yaml, user_agent_lookup, **kwargs)
[ "def", "set_global", "(", "cls", ",", "user_agent", "=", "None", ",", "user_agent_config_yaml", "=", "None", ",", "user_agent_lookup", "=", "None", ",", "*", "*", "kwargs", ")", ":", "# type: (Optional[str], Optional[str], Optional[str], Any) -> None", "cls", ".", "user_agent", "=", "cls", ".", "_create", "(", "user_agent", ",", "user_agent_config_yaml", ",", "user_agent_lookup", ",", "*", "*", "kwargs", ")" ]
Set global user agent string Args: user_agent (Optional[str]): User agent string. HDXPythonLibrary/X.X.X- is prefixed. user_agent_config_yaml (Optional[str]): Path to YAML user agent configuration. Ignored if user_agent supplied. Defaults to ~/.useragent.yml. user_agent_lookup (Optional[str]): Lookup key for YAML. Ignored if user_agent supplied. Returns: None
[ "Set", "global", "user", "agent", "string" ]
python
train
saltstack/salt
salt/utils/master.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/master.py#L648-L767
def run(self): ''' Main loop of the ConCache, starts updates in intervals and answers requests from the MWorkers ''' context = zmq.Context() # the socket for incoming cache requests creq_in = context.socket(zmq.REP) creq_in.setsockopt(zmq.LINGER, 100) creq_in.bind('ipc://' + self.cache_sock) # the socket for incoming cache-updates from workers cupd_in = context.socket(zmq.SUB) cupd_in.setsockopt(zmq.SUBSCRIBE, b'') cupd_in.setsockopt(zmq.LINGER, 100) cupd_in.bind('ipc://' + self.update_sock) # the socket for the timer-event timer_in = context.socket(zmq.SUB) timer_in.setsockopt(zmq.SUBSCRIBE, b'') timer_in.setsockopt(zmq.LINGER, 100) timer_in.connect('ipc://' + self.upd_t_sock) poller = zmq.Poller() poller.register(creq_in, zmq.POLLIN) poller.register(cupd_in, zmq.POLLIN) poller.register(timer_in, zmq.POLLIN) # our serializer serial = salt.payload.Serial(self.opts.get('serial', '')) # register a signal handler signal.signal(signal.SIGINT, self.signal_handler) # secure the sockets from the world self.secure() log.info('ConCache started') while self.running: # we check for new events with the poller try: socks = dict(poller.poll(1)) except KeyboardInterrupt: self.stop() except zmq.ZMQError as zmq_err: log.error('ConCache ZeroMQ-Error occurred') log.exception(zmq_err) self.stop() # check for next cache-request if socks.get(creq_in) == zmq.POLLIN: msg = serial.loads(creq_in.recv()) log.debug('ConCache Received request: %s', msg) # requests to the minion list are send as str's if isinstance(msg, six.string_types): if msg == 'minions': # Send reply back to client reply = serial.dumps(self.minions) creq_in.send(reply) # check for next cache-update from workers if socks.get(cupd_in) == zmq.POLLIN: new_c_data = serial.loads(cupd_in.recv()) # tell the worker to exit #cupd_in.send(serial.dumps('ACK')) # check if the returned data is usable if not isinstance(new_c_data, list): log.error('ConCache Worker returned unusable result') del new_c_data continue # the cache will receive lists of minions # 1. if the list only has 1 item, its from an MWorker, we append it # 2. if the list contains another list, its from a CacheWorker and # the currently cached minions are replaced with that list # 3. anything else is considered malformed try: if not new_c_data: log.debug('ConCache Got empty update from worker') continue data = new_c_data[0] if isinstance(data, six.string_types): if data not in self.minions: log.debug('ConCache Adding minion %s to cache', new_c_data[0]) self.minions.append(data) elif isinstance(data, list): log.debug('ConCache Replacing minion list from worker') self.minions = data except IndexError: log.debug('ConCache Got malformed result dict from worker') del new_c_data log.info('ConCache %s entries in cache', len(self.minions)) # check for next timer-event to start new jobs if socks.get(timer_in) == zmq.POLLIN: sec_event = serial.loads(timer_in.recv()) # update the list every 30 seconds if int(sec_event % 30) == 0: cw = CacheWorker(self.opts) cw.start() self.stop() creq_in.close() cupd_in.close() timer_in.close() context.term() log.debug('ConCache Shutting down')
[ "def", "run", "(", "self", ")", ":", "context", "=", "zmq", ".", "Context", "(", ")", "# the socket for incoming cache requests", "creq_in", "=", "context", ".", "socket", "(", "zmq", ".", "REP", ")", "creq_in", ".", "setsockopt", "(", "zmq", ".", "LINGER", ",", "100", ")", "creq_in", ".", "bind", "(", "'ipc://'", "+", "self", ".", "cache_sock", ")", "# the socket for incoming cache-updates from workers", "cupd_in", "=", "context", ".", "socket", "(", "zmq", ".", "SUB", ")", "cupd_in", ".", "setsockopt", "(", "zmq", ".", "SUBSCRIBE", ",", "b''", ")", "cupd_in", ".", "setsockopt", "(", "zmq", ".", "LINGER", ",", "100", ")", "cupd_in", ".", "bind", "(", "'ipc://'", "+", "self", ".", "update_sock", ")", "# the socket for the timer-event", "timer_in", "=", "context", ".", "socket", "(", "zmq", ".", "SUB", ")", "timer_in", ".", "setsockopt", "(", "zmq", ".", "SUBSCRIBE", ",", "b''", ")", "timer_in", ".", "setsockopt", "(", "zmq", ".", "LINGER", ",", "100", ")", "timer_in", ".", "connect", "(", "'ipc://'", "+", "self", ".", "upd_t_sock", ")", "poller", "=", "zmq", ".", "Poller", "(", ")", "poller", ".", "register", "(", "creq_in", ",", "zmq", ".", "POLLIN", ")", "poller", ".", "register", "(", "cupd_in", ",", "zmq", ".", "POLLIN", ")", "poller", ".", "register", "(", "timer_in", ",", "zmq", ".", "POLLIN", ")", "# our serializer", "serial", "=", "salt", ".", "payload", ".", "Serial", "(", "self", ".", "opts", ".", "get", "(", "'serial'", ",", "''", ")", ")", "# register a signal handler", "signal", ".", "signal", "(", "signal", ".", "SIGINT", ",", "self", ".", "signal_handler", ")", "# secure the sockets from the world", "self", ".", "secure", "(", ")", "log", ".", "info", "(", "'ConCache started'", ")", "while", "self", ".", "running", ":", "# we check for new events with the poller", "try", ":", "socks", "=", "dict", "(", "poller", ".", "poll", "(", "1", ")", ")", "except", "KeyboardInterrupt", ":", "self", ".", "stop", "(", ")", "except", "zmq", ".", "ZMQError", "as", "zmq_err", ":", "log", ".", "error", "(", "'ConCache ZeroMQ-Error occurred'", ")", "log", ".", "exception", "(", "zmq_err", ")", "self", ".", "stop", "(", ")", "# check for next cache-request", "if", "socks", ".", "get", "(", "creq_in", ")", "==", "zmq", ".", "POLLIN", ":", "msg", "=", "serial", ".", "loads", "(", "creq_in", ".", "recv", "(", ")", ")", "log", ".", "debug", "(", "'ConCache Received request: %s'", ",", "msg", ")", "# requests to the minion list are send as str's", "if", "isinstance", "(", "msg", ",", "six", ".", "string_types", ")", ":", "if", "msg", "==", "'minions'", ":", "# Send reply back to client", "reply", "=", "serial", ".", "dumps", "(", "self", ".", "minions", ")", "creq_in", ".", "send", "(", "reply", ")", "# check for next cache-update from workers", "if", "socks", ".", "get", "(", "cupd_in", ")", "==", "zmq", ".", "POLLIN", ":", "new_c_data", "=", "serial", ".", "loads", "(", "cupd_in", ".", "recv", "(", ")", ")", "# tell the worker to exit", "#cupd_in.send(serial.dumps('ACK'))", "# check if the returned data is usable", "if", "not", "isinstance", "(", "new_c_data", ",", "list", ")", ":", "log", ".", "error", "(", "'ConCache Worker returned unusable result'", ")", "del", "new_c_data", "continue", "# the cache will receive lists of minions", "# 1. if the list only has 1 item, its from an MWorker, we append it", "# 2. if the list contains another list, its from a CacheWorker and", "# the currently cached minions are replaced with that list", "# 3. anything else is considered malformed", "try", ":", "if", "not", "new_c_data", ":", "log", ".", "debug", "(", "'ConCache Got empty update from worker'", ")", "continue", "data", "=", "new_c_data", "[", "0", "]", "if", "isinstance", "(", "data", ",", "six", ".", "string_types", ")", ":", "if", "data", "not", "in", "self", ".", "minions", ":", "log", ".", "debug", "(", "'ConCache Adding minion %s to cache'", ",", "new_c_data", "[", "0", "]", ")", "self", ".", "minions", ".", "append", "(", "data", ")", "elif", "isinstance", "(", "data", ",", "list", ")", ":", "log", ".", "debug", "(", "'ConCache Replacing minion list from worker'", ")", "self", ".", "minions", "=", "data", "except", "IndexError", ":", "log", ".", "debug", "(", "'ConCache Got malformed result dict from worker'", ")", "del", "new_c_data", "log", ".", "info", "(", "'ConCache %s entries in cache'", ",", "len", "(", "self", ".", "minions", ")", ")", "# check for next timer-event to start new jobs", "if", "socks", ".", "get", "(", "timer_in", ")", "==", "zmq", ".", "POLLIN", ":", "sec_event", "=", "serial", ".", "loads", "(", "timer_in", ".", "recv", "(", ")", ")", "# update the list every 30 seconds", "if", "int", "(", "sec_event", "%", "30", ")", "==", "0", ":", "cw", "=", "CacheWorker", "(", "self", ".", "opts", ")", "cw", ".", "start", "(", ")", "self", ".", "stop", "(", ")", "creq_in", ".", "close", "(", ")", "cupd_in", ".", "close", "(", ")", "timer_in", ".", "close", "(", ")", "context", ".", "term", "(", ")", "log", ".", "debug", "(", "'ConCache Shutting down'", ")" ]
Main loop of the ConCache, starts updates in intervals and answers requests from the MWorkers
[ "Main", "loop", "of", "the", "ConCache", "starts", "updates", "in", "intervals", "and", "answers", "requests", "from", "the", "MWorkers" ]
python
train
ckan/ckan-service-provider
ckanserviceprovider/web.py
https://github.com/ckan/ckan-service-provider/blob/83a42b027dba8a0b3ca7e5f689f990b7bc2cd7fa/ckanserviceprovider/web.py#L344-L397
def job_list(): '''List all jobs. :param _limit: maximum number of jobs to show (default 100) :type _limit: int :param _offset: how many jobs to skip before showin the first one (default 0) :type _offset: int :param _status: filter jobs by status (complete, error) :type _status: string Also, you can filter the jobs by their metadata. Use the metadata key as parameter key and the value as value. :rtype: A list of job ids ''' args = dict((key, value) for key, value in flask.request.args.items()) limit = args.pop('_limit', 100) offset = args.pop('_offset', 0) select = sql.select( [db.JOBS_TABLE.c.job_id], from_obj=[db.JOBS_TABLE.outerjoin( db.METADATA_TABLE, db.JOBS_TABLE.c.job_id == db.METADATA_TABLE.c.job_id) ]).\ group_by(db.JOBS_TABLE.c.job_id).\ order_by(db.JOBS_TABLE.c.requested_timestamp.desc()).\ limit(limit).offset(offset) status = args.pop('_status', None) if status: select = select.where(db.JOBS_TABLE.c.status == status) ors = [] for key, value in args.iteritems(): # Turn strings into unicode to stop SQLAlchemy # "Unicode type received non-unicode bind param value" warnings. key = unicode(key) ors.append(sql.and_(db.METADATA_TABLE.c.key == key, db.METADATA_TABLE.c.value == value)) if ors: select = select.where(sql.or_(*ors)) select = select.having( sql.func.count(db.JOBS_TABLE.c.job_id) == len(ors) ) result = db.ENGINE.execute(select) listing = [] for (job_id,) in result: listing.append(flask.url_for('job_status', job_id=job_id)) return flask.jsonify(list=listing)
[ "def", "job_list", "(", ")", ":", "args", "=", "dict", "(", "(", "key", ",", "value", ")", "for", "key", ",", "value", "in", "flask", ".", "request", ".", "args", ".", "items", "(", ")", ")", "limit", "=", "args", ".", "pop", "(", "'_limit'", ",", "100", ")", "offset", "=", "args", ".", "pop", "(", "'_offset'", ",", "0", ")", "select", "=", "sql", ".", "select", "(", "[", "db", ".", "JOBS_TABLE", ".", "c", ".", "job_id", "]", ",", "from_obj", "=", "[", "db", ".", "JOBS_TABLE", ".", "outerjoin", "(", "db", ".", "METADATA_TABLE", ",", "db", ".", "JOBS_TABLE", ".", "c", ".", "job_id", "==", "db", ".", "METADATA_TABLE", ".", "c", ".", "job_id", ")", "]", ")", ".", "group_by", "(", "db", ".", "JOBS_TABLE", ".", "c", ".", "job_id", ")", ".", "order_by", "(", "db", ".", "JOBS_TABLE", ".", "c", ".", "requested_timestamp", ".", "desc", "(", ")", ")", ".", "limit", "(", "limit", ")", ".", "offset", "(", "offset", ")", "status", "=", "args", ".", "pop", "(", "'_status'", ",", "None", ")", "if", "status", ":", "select", "=", "select", ".", "where", "(", "db", ".", "JOBS_TABLE", ".", "c", ".", "status", "==", "status", ")", "ors", "=", "[", "]", "for", "key", ",", "value", "in", "args", ".", "iteritems", "(", ")", ":", "# Turn strings into unicode to stop SQLAlchemy", "# \"Unicode type received non-unicode bind param value\" warnings.", "key", "=", "unicode", "(", "key", ")", "ors", ".", "append", "(", "sql", ".", "and_", "(", "db", ".", "METADATA_TABLE", ".", "c", ".", "key", "==", "key", ",", "db", ".", "METADATA_TABLE", ".", "c", ".", "value", "==", "value", ")", ")", "if", "ors", ":", "select", "=", "select", ".", "where", "(", "sql", ".", "or_", "(", "*", "ors", ")", ")", "select", "=", "select", ".", "having", "(", "sql", ".", "func", ".", "count", "(", "db", ".", "JOBS_TABLE", ".", "c", ".", "job_id", ")", "==", "len", "(", "ors", ")", ")", "result", "=", "db", ".", "ENGINE", ".", "execute", "(", "select", ")", "listing", "=", "[", "]", "for", "(", "job_id", ",", ")", "in", "result", ":", "listing", ".", "append", "(", "flask", ".", "url_for", "(", "'job_status'", ",", "job_id", "=", "job_id", ")", ")", "return", "flask", ".", "jsonify", "(", "list", "=", "listing", ")" ]
List all jobs. :param _limit: maximum number of jobs to show (default 100) :type _limit: int :param _offset: how many jobs to skip before showin the first one (default 0) :type _offset: int :param _status: filter jobs by status (complete, error) :type _status: string Also, you can filter the jobs by their metadata. Use the metadata key as parameter key and the value as value. :rtype: A list of job ids
[ "List", "all", "jobs", "." ]
python
train
DLR-RM/RAFCON
source/rafcon/gui/models/selection.py
https://github.com/DLR-RM/RAFCON/blob/24942ef1a904531f49ab8830a1dbb604441be498/source/rafcon/gui/models/selection.py#L170-L177
def add(self, models): """ Adds the passed model(s) to the selection""" if models is None: return models = self._check_model_types(models) self._selected.update(models) self._selected = reduce_to_parent_states(self._selected)
[ "def", "add", "(", "self", ",", "models", ")", ":", "if", "models", "is", "None", ":", "return", "models", "=", "self", ".", "_check_model_types", "(", "models", ")", "self", ".", "_selected", ".", "update", "(", "models", ")", "self", ".", "_selected", "=", "reduce_to_parent_states", "(", "self", ".", "_selected", ")" ]
Adds the passed model(s) to the selection
[ "Adds", "the", "passed", "model", "(", "s", ")", "to", "the", "selection" ]
python
train
UniversalDevicesInc/polyglot-v2-python-interface
polyinterface/polyinterface.py
https://github.com/UniversalDevicesInc/polyglot-v2-python-interface/blob/fe613135b762731a41a081222e43d2a8ae4fc53f/polyinterface/polyinterface.py#L274-L292
def _disconnect(self, mqttc, userdata, rc): """ The callback for when a DISCONNECT occurs. :param mqttc: The client instance for this callback :param userdata: The private userdata for the mqtt client. Not used in Polyglot :param rc: Result code of connection, 0 = Graceful, anything else is unclean """ self.connected = False if rc != 0: LOGGER.info("MQTT Unexpected disconnection. Trying reconnect.") try: self._mqttc.reconnect() except Exception as ex: template = "An exception of type {0} occured. Arguments:\n{1!r}" message = template.format(type(ex).__name__, ex.args) LOGGER.error("MQTT Connection error: " + message) else: LOGGER.info("MQTT Graceful disconnection.")
[ "def", "_disconnect", "(", "self", ",", "mqttc", ",", "userdata", ",", "rc", ")", ":", "self", ".", "connected", "=", "False", "if", "rc", "!=", "0", ":", "LOGGER", ".", "info", "(", "\"MQTT Unexpected disconnection. Trying reconnect.\"", ")", "try", ":", "self", ".", "_mqttc", ".", "reconnect", "(", ")", "except", "Exception", "as", "ex", ":", "template", "=", "\"An exception of type {0} occured. Arguments:\\n{1!r}\"", "message", "=", "template", ".", "format", "(", "type", "(", "ex", ")", ".", "__name__", ",", "ex", ".", "args", ")", "LOGGER", ".", "error", "(", "\"MQTT Connection error: \"", "+", "message", ")", "else", ":", "LOGGER", ".", "info", "(", "\"MQTT Graceful disconnection.\"", ")" ]
The callback for when a DISCONNECT occurs. :param mqttc: The client instance for this callback :param userdata: The private userdata for the mqtt client. Not used in Polyglot :param rc: Result code of connection, 0 = Graceful, anything else is unclean
[ "The", "callback", "for", "when", "a", "DISCONNECT", "occurs", "." ]
python
train
vtemian/buffpy
buffpy/models/link.py
https://github.com/vtemian/buffpy/blob/6c9236fd3b6a8f9e2d70dbf1bc01529242b73075/buffpy/models/link.py#L18-L28
def get_shares(self): ''' Returns an object with a the numbers of shares a link has had using Buffer. www will be stripped, but other subdomains will not. ''' self.shares = self.api.get(url=PATHS['GET_SHARES'] % self.url)['shares'] return self.shares
[ "def", "get_shares", "(", "self", ")", ":", "self", ".", "shares", "=", "self", ".", "api", ".", "get", "(", "url", "=", "PATHS", "[", "'GET_SHARES'", "]", "%", "self", ".", "url", ")", "[", "'shares'", "]", "return", "self", ".", "shares" ]
Returns an object with a the numbers of shares a link has had using Buffer. www will be stripped, but other subdomains will not.
[ "Returns", "an", "object", "with", "a", "the", "numbers", "of", "shares", "a", "link", "has", "had", "using", "Buffer", "." ]
python
valid
darkfeline/animanager
animanager/animecmd.py
https://github.com/darkfeline/animanager/blob/55d92e4cbdc12aac8ebe302420d2cff3fa9fa148/animanager/animecmd.py#L86-L109
def cmdloop(self): """Start CLI REPL.""" while True: cmdline = input(self.prompt) tokens = shlex.split(cmdline) if not tokens: if self.last_cmd: tokens = self.last_cmd else: print('No previous command.') continue if tokens[0] not in self.commands: print('Invalid command') continue command = self.commands[tokens[0]] self.last_cmd = tokens try: if command(self.state, tokens): break except CmdExit: continue except Exception as e: if e not in self.safe_exceptions: logger.exception('Error!')
[ "def", "cmdloop", "(", "self", ")", ":", "while", "True", ":", "cmdline", "=", "input", "(", "self", ".", "prompt", ")", "tokens", "=", "shlex", ".", "split", "(", "cmdline", ")", "if", "not", "tokens", ":", "if", "self", ".", "last_cmd", ":", "tokens", "=", "self", ".", "last_cmd", "else", ":", "print", "(", "'No previous command.'", ")", "continue", "if", "tokens", "[", "0", "]", "not", "in", "self", ".", "commands", ":", "print", "(", "'Invalid command'", ")", "continue", "command", "=", "self", ".", "commands", "[", "tokens", "[", "0", "]", "]", "self", ".", "last_cmd", "=", "tokens", "try", ":", "if", "command", "(", "self", ".", "state", ",", "tokens", ")", ":", "break", "except", "CmdExit", ":", "continue", "except", "Exception", "as", "e", ":", "if", "e", "not", "in", "self", ".", "safe_exceptions", ":", "logger", ".", "exception", "(", "'Error!'", ")" ]
Start CLI REPL.
[ "Start", "CLI", "REPL", "." ]
python
train
suds-community/suds
suds/umx/core.py
https://github.com/suds-community/suds/blob/6fb0a829337b5037a66c20aae6f89b41acd77e40/suds/umx/core.py#L66-L101
def postprocess(self, content): """ Perform final processing of the resulting data structure as follows: - Mixed values (children and text) will have a result of the I{content.node}. - Simi-simple values (attributes, no-children and text) will have a result of a property object. - Simple values (no-attributes, no-children with text nodes) will have a string result equal to the value of the content.node.getText(). @param content: The current content being unmarshalled. @type content: L{Content} @return: The post-processed result. @rtype: I{any} """ node = content.node if len(node.children) and node.hasText(): return node attributes = AttrList(node.attributes) if attributes.rlen() and \ not len(node.children) and \ node.hasText(): p = Factory.property(node.name, node.getText()) return merge(content.data, p) if len(content.data): return content.data lang = attributes.lang() if content.node.isnil(): return None if not len(node.children) and content.text is None: if self.nillable(content): return None else: return Text('', lang=lang) if isinstance(content.text, basestring): return Text(content.text, lang=lang) else: return content.text
[ "def", "postprocess", "(", "self", ",", "content", ")", ":", "node", "=", "content", ".", "node", "if", "len", "(", "node", ".", "children", ")", "and", "node", ".", "hasText", "(", ")", ":", "return", "node", "attributes", "=", "AttrList", "(", "node", ".", "attributes", ")", "if", "attributes", ".", "rlen", "(", ")", "and", "not", "len", "(", "node", ".", "children", ")", "and", "node", ".", "hasText", "(", ")", ":", "p", "=", "Factory", ".", "property", "(", "node", ".", "name", ",", "node", ".", "getText", "(", ")", ")", "return", "merge", "(", "content", ".", "data", ",", "p", ")", "if", "len", "(", "content", ".", "data", ")", ":", "return", "content", ".", "data", "lang", "=", "attributes", ".", "lang", "(", ")", "if", "content", ".", "node", ".", "isnil", "(", ")", ":", "return", "None", "if", "not", "len", "(", "node", ".", "children", ")", "and", "content", ".", "text", "is", "None", ":", "if", "self", ".", "nillable", "(", "content", ")", ":", "return", "None", "else", ":", "return", "Text", "(", "''", ",", "lang", "=", "lang", ")", "if", "isinstance", "(", "content", ".", "text", ",", "basestring", ")", ":", "return", "Text", "(", "content", ".", "text", ",", "lang", "=", "lang", ")", "else", ":", "return", "content", ".", "text" ]
Perform final processing of the resulting data structure as follows: - Mixed values (children and text) will have a result of the I{content.node}. - Simi-simple values (attributes, no-children and text) will have a result of a property object. - Simple values (no-attributes, no-children with text nodes) will have a string result equal to the value of the content.node.getText(). @param content: The current content being unmarshalled. @type content: L{Content} @return: The post-processed result. @rtype: I{any}
[ "Perform", "final", "processing", "of", "the", "resulting", "data", "structure", "as", "follows", ":", "-", "Mixed", "values", "(", "children", "and", "text", ")", "will", "have", "a", "result", "of", "the", "I", "{", "content", ".", "node", "}", ".", "-", "Simi", "-", "simple", "values", "(", "attributes", "no", "-", "children", "and", "text", ")", "will", "have", "a", "result", "of", "a", "property", "object", ".", "-", "Simple", "values", "(", "no", "-", "attributes", "no", "-", "children", "with", "text", "nodes", ")", "will", "have", "a", "string", "result", "equal", "to", "the", "value", "of", "the", "content", ".", "node", ".", "getText", "()", "." ]
python
train
thombashi/SimpleSQLite
simplesqlite/core.py
https://github.com/thombashi/SimpleSQLite/blob/b16f212132b9b98773e68bf7395abc2f60f56fe5/simplesqlite/core.py#L1546-L1560
def __verify_db_file_existence(self, database_path): """ :raises SimpleSQLite.OperationalError: If unable to open database file. """ self.__validate_db_path(database_path) if not os.path.isfile(os.path.realpath(database_path)): raise IOError("file not found: " + database_path) try: connection = sqlite3.connect(database_path) except sqlite3.OperationalError as e: raise OperationalError(e) connection.close()
[ "def", "__verify_db_file_existence", "(", "self", ",", "database_path", ")", ":", "self", ".", "__validate_db_path", "(", "database_path", ")", "if", "not", "os", ".", "path", ".", "isfile", "(", "os", ".", "path", ".", "realpath", "(", "database_path", ")", ")", ":", "raise", "IOError", "(", "\"file not found: \"", "+", "database_path", ")", "try", ":", "connection", "=", "sqlite3", ".", "connect", "(", "database_path", ")", "except", "sqlite3", ".", "OperationalError", "as", "e", ":", "raise", "OperationalError", "(", "e", ")", "connection", ".", "close", "(", ")" ]
:raises SimpleSQLite.OperationalError: If unable to open database file.
[ ":", "raises", "SimpleSQLite", ".", "OperationalError", ":", "If", "unable", "to", "open", "database", "file", "." ]
python
train
emory-libraries/eulcommon
eulcommon/djangoextras/taskresult/models.py
https://github.com/emory-libraries/eulcommon/blob/dc63a9b3b5e38205178235e0d716d1b28158d3a9/eulcommon/djangoextras/taskresult/models.py#L76-L83
def status_icon(self): 'glyphicon for task status; requires bootstrap' icon = self.status_icon_map.get(self.status.lower(), self.unknown_icon) style = self.status_style.get(self.status.lower(), '') return mark_safe( '<span class="glyphicon %s %s" aria-hidden="true"></span>' % (icon, style))
[ "def", "status_icon", "(", "self", ")", ":", "icon", "=", "self", ".", "status_icon_map", ".", "get", "(", "self", ".", "status", ".", "lower", "(", ")", ",", "self", ".", "unknown_icon", ")", "style", "=", "self", ".", "status_style", ".", "get", "(", "self", ".", "status", ".", "lower", "(", ")", ",", "''", ")", "return", "mark_safe", "(", "'<span class=\"glyphicon %s %s\" aria-hidden=\"true\"></span>'", "%", "(", "icon", ",", "style", ")", ")" ]
glyphicon for task status; requires bootstrap
[ "glyphicon", "for", "task", "status", ";", "requires", "bootstrap" ]
python
train
ellmetha/django-machina
machina/apps/forum_tracking/managers.py
https://github.com/ellmetha/django-machina/blob/89ac083c1eaf1cfdeae6686ee094cc86362e8c69/machina/apps/forum_tracking/managers.py#L20-L48
def get_unread_forums_from_list(self, forums, user): """ Filter a list of forums and return only those which are unread. Given a list of forums find and returns the list of forums that are unread for the passed user. If a forum is unread all of its ancestors are also unread and will be included in the final list. """ unread_forums = [] visibility_contents = ForumVisibilityContentTree.from_forums(forums) forum_ids_to_visibility_nodes = visibility_contents.as_dict tracks = super().get_queryset().select_related('forum').filter( user=user, forum__in=forums) tracked_forums = [] for track in tracks: forum_last_post_on = forum_ids_to_visibility_nodes[track.forum_id].last_post_on if (forum_last_post_on and track.mark_time < forum_last_post_on) \ and track.forum not in unread_forums: unread_forums.extend(track.forum.get_ancestors(include_self=True)) tracked_forums.append(track.forum) for forum in forums: if forum not in tracked_forums and forum not in unread_forums \ and forum.direct_topics_count > 0: unread_forums.extend(forum.get_ancestors(include_self=True)) return list(set(unread_forums))
[ "def", "get_unread_forums_from_list", "(", "self", ",", "forums", ",", "user", ")", ":", "unread_forums", "=", "[", "]", "visibility_contents", "=", "ForumVisibilityContentTree", ".", "from_forums", "(", "forums", ")", "forum_ids_to_visibility_nodes", "=", "visibility_contents", ".", "as_dict", "tracks", "=", "super", "(", ")", ".", "get_queryset", "(", ")", ".", "select_related", "(", "'forum'", ")", ".", "filter", "(", "user", "=", "user", ",", "forum__in", "=", "forums", ")", "tracked_forums", "=", "[", "]", "for", "track", "in", "tracks", ":", "forum_last_post_on", "=", "forum_ids_to_visibility_nodes", "[", "track", ".", "forum_id", "]", ".", "last_post_on", "if", "(", "forum_last_post_on", "and", "track", ".", "mark_time", "<", "forum_last_post_on", ")", "and", "track", ".", "forum", "not", "in", "unread_forums", ":", "unread_forums", ".", "extend", "(", "track", ".", "forum", ".", "get_ancestors", "(", "include_self", "=", "True", ")", ")", "tracked_forums", ".", "append", "(", "track", ".", "forum", ")", "for", "forum", "in", "forums", ":", "if", "forum", "not", "in", "tracked_forums", "and", "forum", "not", "in", "unread_forums", "and", "forum", ".", "direct_topics_count", ">", "0", ":", "unread_forums", ".", "extend", "(", "forum", ".", "get_ancestors", "(", "include_self", "=", "True", ")", ")", "return", "list", "(", "set", "(", "unread_forums", ")", ")" ]
Filter a list of forums and return only those which are unread. Given a list of forums find and returns the list of forums that are unread for the passed user. If a forum is unread all of its ancestors are also unread and will be included in the final list.
[ "Filter", "a", "list", "of", "forums", "and", "return", "only", "those", "which", "are", "unread", "." ]
python
train
pycampers/ampy
ampy/cli.py
https://github.com/pycampers/ampy/blob/6851f8b177c334f5ff7bd43bf07307a437433ba2/ampy/cli.py#L290-L304
def rmdir(remote_folder, missing_okay): """Forcefully remove a folder and all its children from the board. Remove the specified folder from the board's filesystem. Must specify one argument which is the path to the folder to delete. This will delete the directory and ALL of its children recursively, use with caution! For example to delete everything under /adafruit_library from the root of a board run: ampy --port /board/serial/port rmdir adafruit_library """ # Delete the provided file/directory on the board. board_files = files.Files(_board) board_files.rmdir(remote_folder, missing_okay=missing_okay)
[ "def", "rmdir", "(", "remote_folder", ",", "missing_okay", ")", ":", "# Delete the provided file/directory on the board.", "board_files", "=", "files", ".", "Files", "(", "_board", ")", "board_files", ".", "rmdir", "(", "remote_folder", ",", "missing_okay", "=", "missing_okay", ")" ]
Forcefully remove a folder and all its children from the board. Remove the specified folder from the board's filesystem. Must specify one argument which is the path to the folder to delete. This will delete the directory and ALL of its children recursively, use with caution! For example to delete everything under /adafruit_library from the root of a board run: ampy --port /board/serial/port rmdir adafruit_library
[ "Forcefully", "remove", "a", "folder", "and", "all", "its", "children", "from", "the", "board", "." ]
python
train
VasilyStepanov/pywidl
pywidl/grammar.py
https://github.com/VasilyStepanov/pywidl/blob/8d84b2e53157bfe276bf16301c19e8b6b32e861e/pywidl/grammar.py#L971-L974
def p_ExtendedAttributeNoArgs(p): """ExtendedAttributeNoArgs : IDENTIFIER""" p[0] = model.ExtendedAttribute( value=model.ExtendedAttributeValue(name=p[1]))
[ "def", "p_ExtendedAttributeNoArgs", "(", "p", ")", ":", "p", "[", "0", "]", "=", "model", ".", "ExtendedAttribute", "(", "value", "=", "model", ".", "ExtendedAttributeValue", "(", "name", "=", "p", "[", "1", "]", ")", ")" ]
ExtendedAttributeNoArgs : IDENTIFIER
[ "ExtendedAttributeNoArgs", ":", "IDENTIFIER" ]
python
train
ricardosasilva/pagseguro-python
pagseguro/api/v2/payment.py
https://github.com/ricardosasilva/pagseguro-python/blob/8e39d1b0585684c460b86073d1fb3f33112b5b3d/pagseguro/api/v2/payment.py#L152-L177
def request(self): ''' Faz a requisição de pagamento ao servidor do PagSeguro. ''' # try: payment_v2_schema(self) # except MultipleInvalid as e: # raise PagSeguroPaymentValidationException(u'Erro na validação dos dados: %s' % e.msg) params = self._build_params() # logger.debug(u'Parametros da requisicao ao PagSeguro: %s' % params) req = requests.post( self.PAGSEGURO_API_URL, params=params, headers={ 'Content-Type': 'application/x-www-form-urlencoded; charset=ISO-8859-1' } ) if req.status_code == 200: self.params = params self.response = self._process_response_xml(req.text) else: raise PagSeguroApiException( u'Erro ao fazer request para a API:' + ' HTTP Status=%s - Response: %s' % (req.status_code, req.text)) return
[ "def", "request", "(", "self", ")", ":", "# try:", "payment_v2_schema", "(", "self", ")", "# except MultipleInvalid as e:", "# raise PagSeguroPaymentValidationException(u'Erro na validação dos dados: %s' % e.msg)", "params", "=", "self", ".", "_build_params", "(", ")", "# logger.debug(u'Parametros da requisicao ao PagSeguro: %s' % params)", "req", "=", "requests", ".", "post", "(", "self", ".", "PAGSEGURO_API_URL", ",", "params", "=", "params", ",", "headers", "=", "{", "'Content-Type'", ":", "'application/x-www-form-urlencoded; charset=ISO-8859-1'", "}", ")", "if", "req", ".", "status_code", "==", "200", ":", "self", ".", "params", "=", "params", "self", ".", "response", "=", "self", ".", "_process_response_xml", "(", "req", ".", "text", ")", "else", ":", "raise", "PagSeguroApiException", "(", "u'Erro ao fazer request para a API:'", "+", "' HTTP Status=%s - Response: %s'", "%", "(", "req", ".", "status_code", ",", "req", ".", "text", ")", ")", "return" ]
Faz a requisição de pagamento ao servidor do PagSeguro.
[ "Faz", "a", "requisição", "de", "pagamento", "ao", "servidor", "do", "PagSeguro", "." ]
python
train
twisted/axiom
axiom/item.py
https://github.com/twisted/axiom/blob/7de70bc8fe1bb81f9c2339fba8daec9eb2e92b68/axiom/item.py#L744-L817
def checkpoint(self): """ Update the database to reflect in-memory changes made to this item; for example, to make it show up in store.query() calls where it is now valid, but was not the last time it was persisted to the database. This is called automatically when in 'autocommit mode' (i.e. not in a transaction) and at the end of each transaction for every object that has been changed. """ if self.store is None: raise NotInStore("You can't checkpoint %r: not in a store" % (self,)) if self.__deleting: if not self.__everInserted: # don't issue duplicate SQL and crap; we were created, then # destroyed immediately. return self.store.executeSQL(self._baseDeleteSQL(self.store), [self.storeID]) # re-using OIDs plays havoc with the cache, and with other things # as well. We need to make sure that we leave a placeholder row at # the end of the table. if self.__deletingObject: # Mark this object as dead. self.store.executeSchemaSQL(_schema.CHANGE_TYPE, [-1, self.storeID]) # Can't do this any more: # self.store.executeSchemaSQL(_schema.DELETE_OBJECT, [self.storeID]) # TODO: need to measure the performance impact of this, then do # it to make sure things are in fact deleted: # self.store.executeSchemaSQL(_schema.APP_VACUUM) else: assert self.__legacy__ # we're done... if self.store.autocommit: self.committed() return if self.__everInserted: # case 1: we've been inserted before, either previously in this # transaction or we were loaded from the db if not self.__dirty__: # we might have been checkpointed twice within the same # transaction; just don't do anything. return self.store.executeSQL(*self._updateSQL()) else: # case 2: we are in the middle of creating the object, we've never # been inserted into the db before schemaAttrs = self.getSchema() insertArgs = [self.storeID] for (ignoredName, attrObj) in schemaAttrs: attrObjDuplicate, attributeValue = self.__dirty__[attrObj.attrname] # assert attrObjDuplicate is attrObj insertArgs.append(attributeValue) # XXX this isn't atomic, gross. self.store.executeSQL(self._baseInsertSQL(self.store), insertArgs) self.__everInserted = True # In case 1, we're dirty but we did an update, synchronizing the # database, in case 2, we haven't been created but we issue an insert. # In either case, the code in attributes.py sets the attribute *as well # as* populating __dirty__, so we clear out dirty and we keep the same # value, knowing it's the same as what's in the db. self.__dirty__.clear() if self.store.autocommit: self.committed()
[ "def", "checkpoint", "(", "self", ")", ":", "if", "self", ".", "store", "is", "None", ":", "raise", "NotInStore", "(", "\"You can't checkpoint %r: not in a store\"", "%", "(", "self", ",", ")", ")", "if", "self", ".", "__deleting", ":", "if", "not", "self", ".", "__everInserted", ":", "# don't issue duplicate SQL and crap; we were created, then", "# destroyed immediately.", "return", "self", ".", "store", ".", "executeSQL", "(", "self", ".", "_baseDeleteSQL", "(", "self", ".", "store", ")", ",", "[", "self", ".", "storeID", "]", ")", "# re-using OIDs plays havoc with the cache, and with other things", "# as well. We need to make sure that we leave a placeholder row at", "# the end of the table.", "if", "self", ".", "__deletingObject", ":", "# Mark this object as dead.", "self", ".", "store", ".", "executeSchemaSQL", "(", "_schema", ".", "CHANGE_TYPE", ",", "[", "-", "1", ",", "self", ".", "storeID", "]", ")", "# Can't do this any more:", "# self.store.executeSchemaSQL(_schema.DELETE_OBJECT, [self.storeID])", "# TODO: need to measure the performance impact of this, then do", "# it to make sure things are in fact deleted:", "# self.store.executeSchemaSQL(_schema.APP_VACUUM)", "else", ":", "assert", "self", ".", "__legacy__", "# we're done...", "if", "self", ".", "store", ".", "autocommit", ":", "self", ".", "committed", "(", ")", "return", "if", "self", ".", "__everInserted", ":", "# case 1: we've been inserted before, either previously in this", "# transaction or we were loaded from the db", "if", "not", "self", ".", "__dirty__", ":", "# we might have been checkpointed twice within the same", "# transaction; just don't do anything.", "return", "self", ".", "store", ".", "executeSQL", "(", "*", "self", ".", "_updateSQL", "(", ")", ")", "else", ":", "# case 2: we are in the middle of creating the object, we've never", "# been inserted into the db before", "schemaAttrs", "=", "self", ".", "getSchema", "(", ")", "insertArgs", "=", "[", "self", ".", "storeID", "]", "for", "(", "ignoredName", ",", "attrObj", ")", "in", "schemaAttrs", ":", "attrObjDuplicate", ",", "attributeValue", "=", "self", ".", "__dirty__", "[", "attrObj", ".", "attrname", "]", "# assert attrObjDuplicate is attrObj", "insertArgs", ".", "append", "(", "attributeValue", ")", "# XXX this isn't atomic, gross.", "self", ".", "store", ".", "executeSQL", "(", "self", ".", "_baseInsertSQL", "(", "self", ".", "store", ")", ",", "insertArgs", ")", "self", ".", "__everInserted", "=", "True", "# In case 1, we're dirty but we did an update, synchronizing the", "# database, in case 2, we haven't been created but we issue an insert.", "# In either case, the code in attributes.py sets the attribute *as well", "# as* populating __dirty__, so we clear out dirty and we keep the same", "# value, knowing it's the same as what's in the db.", "self", ".", "__dirty__", ".", "clear", "(", ")", "if", "self", ".", "store", ".", "autocommit", ":", "self", ".", "committed", "(", ")" ]
Update the database to reflect in-memory changes made to this item; for example, to make it show up in store.query() calls where it is now valid, but was not the last time it was persisted to the database. This is called automatically when in 'autocommit mode' (i.e. not in a transaction) and at the end of each transaction for every object that has been changed.
[ "Update", "the", "database", "to", "reflect", "in", "-", "memory", "changes", "made", "to", "this", "item", ";", "for", "example", "to", "make", "it", "show", "up", "in", "store", ".", "query", "()", "calls", "where", "it", "is", "now", "valid", "but", "was", "not", "the", "last", "time", "it", "was", "persisted", "to", "the", "database", "." ]
python
train
pymoca/pymoca
src/pymoca/backends/xml/model.py
https://github.com/pymoca/pymoca/blob/14b5eb7425e96689de6cc5c10f400895d586a978/src/pymoca/backends/xml/model.py#L70-L76
def create_function_f_c(self): """condition function""" return ca.Function( 'f_c', [self.t, self.x, self.y, self.m, self.p, self.ng, self.nu], [self.f_c], ['t', 'x', 'y', 'm', 'p', 'ng', 'nu'], ['c'], self.func_opt)
[ "def", "create_function_f_c", "(", "self", ")", ":", "return", "ca", ".", "Function", "(", "'f_c'", ",", "[", "self", ".", "t", ",", "self", ".", "x", ",", "self", ".", "y", ",", "self", ".", "m", ",", "self", ".", "p", ",", "self", ".", "ng", ",", "self", ".", "nu", "]", ",", "[", "self", ".", "f_c", "]", ",", "[", "'t'", ",", "'x'", ",", "'y'", ",", "'m'", ",", "'p'", ",", "'ng'", ",", "'nu'", "]", ",", "[", "'c'", "]", ",", "self", ".", "func_opt", ")" ]
condition function
[ "condition", "function" ]
python
train
Kozea/pygal
pygal/graph/public.py
https://github.com/Kozea/pygal/blob/5e25c98a59a0642eecd9fcc5dbfeeb2190fbb5e7/pygal/graph/public.py#L49-L56
def render(self, is_unicode=False, **kwargs): """Render the graph, and return the svg string""" self.setup(**kwargs) svg = self.svg.render( is_unicode=is_unicode, pretty_print=self.pretty_print ) self.teardown() return svg
[ "def", "render", "(", "self", ",", "is_unicode", "=", "False", ",", "*", "*", "kwargs", ")", ":", "self", ".", "setup", "(", "*", "*", "kwargs", ")", "svg", "=", "self", ".", "svg", ".", "render", "(", "is_unicode", "=", "is_unicode", ",", "pretty_print", "=", "self", ".", "pretty_print", ")", "self", ".", "teardown", "(", ")", "return", "svg" ]
Render the graph, and return the svg string
[ "Render", "the", "graph", "and", "return", "the", "svg", "string" ]
python
train
globocom/GloboNetworkAPI-client-python
networkapiclient/TipoRede.py
https://github.com/globocom/GloboNetworkAPI-client-python/blob/cf34f913da48d9abbf750114f5d2ac4b2dde137d/networkapiclient/TipoRede.py#L57-L76
def inserir(self, name): """Insert new network type and return its identifier. :param name: Network type name. :return: Following dictionary: {'net_type': {'id': < id >}} :raise InvalidParameterError: Network type is none or invalid. :raise NomeTipoRedeDuplicadoError: A network type with this name already exists. :raise DataBaseError: Networkapi failed to access the database. :raise XMLError: Networkapi failed to generate the XML response. """ net_type_map = dict() net_type_map['name'] = name code, xml = self.submit( {'net_type': net_type_map}, 'POST', 'net_type/') return self.response(code, xml)
[ "def", "inserir", "(", "self", ",", "name", ")", ":", "net_type_map", "=", "dict", "(", ")", "net_type_map", "[", "'name'", "]", "=", "name", "code", ",", "xml", "=", "self", ".", "submit", "(", "{", "'net_type'", ":", "net_type_map", "}", ",", "'POST'", ",", "'net_type/'", ")", "return", "self", ".", "response", "(", "code", ",", "xml", ")" ]
Insert new network type and return its identifier. :param name: Network type name. :return: Following dictionary: {'net_type': {'id': < id >}} :raise InvalidParameterError: Network type is none or invalid. :raise NomeTipoRedeDuplicadoError: A network type with this name already exists. :raise DataBaseError: Networkapi failed to access the database. :raise XMLError: Networkapi failed to generate the XML response.
[ "Insert", "new", "network", "type", "and", "return", "its", "identifier", "." ]
python
train
manns/pyspread
pyspread/src/gui/_widgets.py
https://github.com/manns/pyspread/blob/0e2fd44c2e0f06605efc3058c20a43a8c1f9e7e0/pyspread/src/gui/_widgets.py#L1088-L1093
def Reposition(self): """Reposition the checkbox""" rect = self.GetFieldRect(1) self.safemode_staticbmp.SetPosition((rect.x, rect.y)) self.size_changed = False
[ "def", "Reposition", "(", "self", ")", ":", "rect", "=", "self", ".", "GetFieldRect", "(", "1", ")", "self", ".", "safemode_staticbmp", ".", "SetPosition", "(", "(", "rect", ".", "x", ",", "rect", ".", "y", ")", ")", "self", ".", "size_changed", "=", "False" ]
Reposition the checkbox
[ "Reposition", "the", "checkbox" ]
python
train
bukun/TorCMS
torcms/model/post_model.py
https://github.com/bukun/TorCMS/blob/6567c7fe2604a1d646d4570c017840958630ed2b/torcms/model/post_model.py#L26-L37
def query_recent_most(num=8, recent=30): ''' Query the records from database that recently updated. :param num: the number that will returned. :param recent: the number of days recent. ''' time_that = int(time.time()) - recent * 24 * 3600 return TabPost.select().where( TabPost.time_update > time_that ).order_by( TabPost.view_count.desc() ).limit(num)
[ "def", "query_recent_most", "(", "num", "=", "8", ",", "recent", "=", "30", ")", ":", "time_that", "=", "int", "(", "time", ".", "time", "(", ")", ")", "-", "recent", "*", "24", "*", "3600", "return", "TabPost", ".", "select", "(", ")", ".", "where", "(", "TabPost", ".", "time_update", ">", "time_that", ")", ".", "order_by", "(", "TabPost", ".", "view_count", ".", "desc", "(", ")", ")", ".", "limit", "(", "num", ")" ]
Query the records from database that recently updated. :param num: the number that will returned. :param recent: the number of days recent.
[ "Query", "the", "records", "from", "database", "that", "recently", "updated", ".", ":", "param", "num", ":", "the", "number", "that", "will", "returned", ".", ":", "param", "recent", ":", "the", "number", "of", "days", "recent", "." ]
python
train
ozak/georasters
georasters/georasters.py
https://github.com/ozak/georasters/blob/0612bd91bb2a2cb2f1d59ba89c1ff131dae27d70/georasters/georasters.py#L1041-L1060
def pysal_Moran_Local(self, **kwargs): """ Compute Local Moran's I measure of local spatial autocorrelation for GeoRaster Usage: geo.pysal_Moran_Local(permutations = 1000, rook=True) arguments passed to raster_weights() and pysal.Moran_Local See help(gr.raster_weights), help(pysal.Moran_Local) for options """ if self.weights is None: self.raster_weights(**kwargs) rasterf = self.raster.flatten() rasterf = rasterf[rasterf.mask==False] self.Moran_Local = pysal.Moran_Local(rasterf, self.weights, **kwargs) for i in self.Moran_Local.__dict__.keys(): if (isinstance(getattr(self.Moran_Local, i), np.ma.masked_array) or (isinstance(getattr(self.Moran_Local, i), np.ndarray)) and len(getattr(self.Moran_Local, i).shape) == 1): setattr(self.Moran_Local, i, self.map_vector(getattr(self.Moran_Local, i)))
[ "def", "pysal_Moran_Local", "(", "self", ",", "*", "*", "kwargs", ")", ":", "if", "self", ".", "weights", "is", "None", ":", "self", ".", "raster_weights", "(", "*", "*", "kwargs", ")", "rasterf", "=", "self", ".", "raster", ".", "flatten", "(", ")", "rasterf", "=", "rasterf", "[", "rasterf", ".", "mask", "==", "False", "]", "self", ".", "Moran_Local", "=", "pysal", ".", "Moran_Local", "(", "rasterf", ",", "self", ".", "weights", ",", "*", "*", "kwargs", ")", "for", "i", "in", "self", ".", "Moran_Local", ".", "__dict__", ".", "keys", "(", ")", ":", "if", "(", "isinstance", "(", "getattr", "(", "self", ".", "Moran_Local", ",", "i", ")", ",", "np", ".", "ma", ".", "masked_array", ")", "or", "(", "isinstance", "(", "getattr", "(", "self", ".", "Moran_Local", ",", "i", ")", ",", "np", ".", "ndarray", ")", ")", "and", "len", "(", "getattr", "(", "self", ".", "Moran_Local", ",", "i", ")", ".", "shape", ")", "==", "1", ")", ":", "setattr", "(", "self", ".", "Moran_Local", ",", "i", ",", "self", ".", "map_vector", "(", "getattr", "(", "self", ".", "Moran_Local", ",", "i", ")", ")", ")" ]
Compute Local Moran's I measure of local spatial autocorrelation for GeoRaster Usage: geo.pysal_Moran_Local(permutations = 1000, rook=True) arguments passed to raster_weights() and pysal.Moran_Local See help(gr.raster_weights), help(pysal.Moran_Local) for options
[ "Compute", "Local", "Moran", "s", "I", "measure", "of", "local", "spatial", "autocorrelation", "for", "GeoRaster" ]
python
train
pgmpy/pgmpy
pgmpy/readwrite/ProbModelXML.py
https://github.com/pgmpy/pgmpy/blob/9381a66aba3c3871d3ccd00672b148d17d63239e/pgmpy/readwrite/ProbModelXML.py#L723-L766
def create_probnet(self): """ Returns a BayesianModel or MarkovModel object depending on the type of ProbModelXML passed to ProbModelXMLReader class. """ self.probnet = {} # Add general properties probnet_elem = self.xml.find('ProbNet') self.probnet['type'] = probnet_elem.attrib['type'] if probnet_elem.find('Comment') is not None: self.add_comment(probnet_elem.find('Comment').text) if probnet_elem.find('Language') is not None: self.add_language(probnet_elem.find('Language').text) if probnet_elem.find('AdditionalProperties') is not None: self.probnet['AdditionalProperties'] = {} for prop in probnet_elem.find('AdditionalProperties'): self.add_additional_property(self.probnet['AdditionalProperties'], prop) # Add additional Constraints self.probnet['AdditionalConstraints'] = {} for constraint in probnet_elem.findall('AdditionalConstraints/Constraint'): self.add_probnet_additionalconstraints(constraint) # Add Decision Criterion self.probnet['DecisionCriteria'] = {} for criterion in probnet_elem.findall('DecisionCriteria/Criterion'): self.add_criterion(criterion) # Add nodes self.probnet['Variables'] = {} for variable in probnet_elem.find('Variables'): self.add_node(variable) # Add edges self.probnet['edges'] = {} for edge in probnet_elem.findall('Links/Link'): self.add_edge(edge) # Add CPD self.probnet['Potentials'] = [] for potential in probnet_elem.findall('Potentials/Potential'): probnet_dict = {} self.add_potential(potential, probnet_dict) self.probnet['Potentials'].append(probnet_dict)
[ "def", "create_probnet", "(", "self", ")", ":", "self", ".", "probnet", "=", "{", "}", "# Add general properties", "probnet_elem", "=", "self", ".", "xml", ".", "find", "(", "'ProbNet'", ")", "self", ".", "probnet", "[", "'type'", "]", "=", "probnet_elem", ".", "attrib", "[", "'type'", "]", "if", "probnet_elem", ".", "find", "(", "'Comment'", ")", "is", "not", "None", ":", "self", ".", "add_comment", "(", "probnet_elem", ".", "find", "(", "'Comment'", ")", ".", "text", ")", "if", "probnet_elem", ".", "find", "(", "'Language'", ")", "is", "not", "None", ":", "self", ".", "add_language", "(", "probnet_elem", ".", "find", "(", "'Language'", ")", ".", "text", ")", "if", "probnet_elem", ".", "find", "(", "'AdditionalProperties'", ")", "is", "not", "None", ":", "self", ".", "probnet", "[", "'AdditionalProperties'", "]", "=", "{", "}", "for", "prop", "in", "probnet_elem", ".", "find", "(", "'AdditionalProperties'", ")", ":", "self", ".", "add_additional_property", "(", "self", ".", "probnet", "[", "'AdditionalProperties'", "]", ",", "prop", ")", "# Add additional Constraints", "self", ".", "probnet", "[", "'AdditionalConstraints'", "]", "=", "{", "}", "for", "constraint", "in", "probnet_elem", ".", "findall", "(", "'AdditionalConstraints/Constraint'", ")", ":", "self", ".", "add_probnet_additionalconstraints", "(", "constraint", ")", "# Add Decision Criterion", "self", ".", "probnet", "[", "'DecisionCriteria'", "]", "=", "{", "}", "for", "criterion", "in", "probnet_elem", ".", "findall", "(", "'DecisionCriteria/Criterion'", ")", ":", "self", ".", "add_criterion", "(", "criterion", ")", "# Add nodes", "self", ".", "probnet", "[", "'Variables'", "]", "=", "{", "}", "for", "variable", "in", "probnet_elem", ".", "find", "(", "'Variables'", ")", ":", "self", ".", "add_node", "(", "variable", ")", "# Add edges", "self", ".", "probnet", "[", "'edges'", "]", "=", "{", "}", "for", "edge", "in", "probnet_elem", ".", "findall", "(", "'Links/Link'", ")", ":", "self", ".", "add_edge", "(", "edge", ")", "# Add CPD", "self", ".", "probnet", "[", "'Potentials'", "]", "=", "[", "]", "for", "potential", "in", "probnet_elem", ".", "findall", "(", "'Potentials/Potential'", ")", ":", "probnet_dict", "=", "{", "}", "self", ".", "add_potential", "(", "potential", ",", "probnet_dict", ")", "self", ".", "probnet", "[", "'Potentials'", "]", ".", "append", "(", "probnet_dict", ")" ]
Returns a BayesianModel or MarkovModel object depending on the type of ProbModelXML passed to ProbModelXMLReader class.
[ "Returns", "a", "BayesianModel", "or", "MarkovModel", "object", "depending", "on", "the", "type", "of", "ProbModelXML", "passed", "to", "ProbModelXMLReader", "class", "." ]
python
train
matthewdeanmartin/jiggle_version
extra/vendorized/pep440.py
https://github.com/matthewdeanmartin/jiggle_version/blob/963656a0a47b7162780a5f6c8f4b8bbbebc148f5/extra/vendorized/pep440.py#L50-L56
def is_canonical(version, loosedev=False): # type: (str, bool) -> bool """ Return whether or not the version string is canonical according to Pep 440 """ if loosedev: return loose440re.match(version) is not None return pep440re.match(version) is not None
[ "def", "is_canonical", "(", "version", ",", "loosedev", "=", "False", ")", ":", "# type: (str, bool) -> bool", "if", "loosedev", ":", "return", "loose440re", ".", "match", "(", "version", ")", "is", "not", "None", "return", "pep440re", ".", "match", "(", "version", ")", "is", "not", "None" ]
Return whether or not the version string is canonical according to Pep 440
[ "Return", "whether", "or", "not", "the", "version", "string", "is", "canonical", "according", "to", "Pep", "440" ]
python
train
CybOXProject/mixbox
mixbox/fields.py
https://github.com/CybOXProject/mixbox/blob/9097dae7a433f5b98c18171c4a5598f69a7d30af/mixbox/fields.py#L50-L62
def iterfields(klass): """Iterate over the input class members and yield its TypedFields. Args: klass: A class (usually an Entity subclass). Yields: (class attribute name, TypedField instance) tuples. """ is_field = lambda x: isinstance(x, TypedField) for name, field in inspect.getmembers(klass, predicate=is_field): yield name, field
[ "def", "iterfields", "(", "klass", ")", ":", "is_field", "=", "lambda", "x", ":", "isinstance", "(", "x", ",", "TypedField", ")", "for", "name", ",", "field", "in", "inspect", ".", "getmembers", "(", "klass", ",", "predicate", "=", "is_field", ")", ":", "yield", "name", ",", "field" ]
Iterate over the input class members and yield its TypedFields. Args: klass: A class (usually an Entity subclass). Yields: (class attribute name, TypedField instance) tuples.
[ "Iterate", "over", "the", "input", "class", "members", "and", "yield", "its", "TypedFields", "." ]
python
train
Qiskit/qiskit-terra
qiskit/extensions/standard/s.py
https://github.com/Qiskit/qiskit-terra/blob/d4f58d903bc96341b816f7c35df936d6421267d1/qiskit/extensions/standard/s.py#L60-L71
def _define(self): """ gate sdg a { u1(-pi/2) a; } """ definition = [] q = QuantumRegister(1, "q") rule = [ (U1Gate(-pi/2), [q[0]], []) ] for inst in rule: definition.append(inst) self.definition = definition
[ "def", "_define", "(", "self", ")", ":", "definition", "=", "[", "]", "q", "=", "QuantumRegister", "(", "1", ",", "\"q\"", ")", "rule", "=", "[", "(", "U1Gate", "(", "-", "pi", "/", "2", ")", ",", "[", "q", "[", "0", "]", "]", ",", "[", "]", ")", "]", "for", "inst", "in", "rule", ":", "definition", ".", "append", "(", "inst", ")", "self", ".", "definition", "=", "definition" ]
gate sdg a { u1(-pi/2) a; }
[ "gate", "sdg", "a", "{", "u1", "(", "-", "pi", "/", "2", ")", "a", ";", "}" ]
python
test
chrislit/abydos
abydos/phonetic/_onca.py
https://github.com/chrislit/abydos/blob/165466b3ff6afd8024a4c8660421b0c4e7773db9/abydos/phonetic/_onca.py#L93-L124
def onca(word, max_length=4, zero_pad=True): """Return the Oxford Name Compression Algorithm (ONCA) code for a word. This is a wrapper for :py:meth:`ONCA.encode`. Parameters ---------- word : str The word to transform max_length : int The maximum length (default 5) of the code to return zero_pad : bool Pad the end of the return value with 0s to achieve a max_length string Returns ------- str The ONCA code Examples -------- >>> onca('Christopher') 'C623' >>> onca('Niall') 'N400' >>> onca('Smith') 'S530' >>> onca('Schmidt') 'S530' """ return ONCA().encode(word, max_length, zero_pad)
[ "def", "onca", "(", "word", ",", "max_length", "=", "4", ",", "zero_pad", "=", "True", ")", ":", "return", "ONCA", "(", ")", ".", "encode", "(", "word", ",", "max_length", ",", "zero_pad", ")" ]
Return the Oxford Name Compression Algorithm (ONCA) code for a word. This is a wrapper for :py:meth:`ONCA.encode`. Parameters ---------- word : str The word to transform max_length : int The maximum length (default 5) of the code to return zero_pad : bool Pad the end of the return value with 0s to achieve a max_length string Returns ------- str The ONCA code Examples -------- >>> onca('Christopher') 'C623' >>> onca('Niall') 'N400' >>> onca('Smith') 'S530' >>> onca('Schmidt') 'S530'
[ "Return", "the", "Oxford", "Name", "Compression", "Algorithm", "(", "ONCA", ")", "code", "for", "a", "word", "." ]
python
valid
globality-corp/microcosm-postgres
microcosm_postgres/store.py
https://github.com/globality-corp/microcosm-postgres/blob/43dd793b1fc9b84e4056700f350e79e0df5ff501/microcosm_postgres/store.py#L124-L137
def update_with_diff(self, identifier, new_instance): """ Update an existing model with a new one. :raises `ModelNotFoundError` if there is no existing model """ with self.flushing(): instance = self.retrieve(identifier) before = Version(instance) self.merge(instance, new_instance) instance.updated_at = instance.new_timestamp() after = Version(instance) return instance, before - after
[ "def", "update_with_diff", "(", "self", ",", "identifier", ",", "new_instance", ")", ":", "with", "self", ".", "flushing", "(", ")", ":", "instance", "=", "self", ".", "retrieve", "(", "identifier", ")", "before", "=", "Version", "(", "instance", ")", "self", ".", "merge", "(", "instance", ",", "new_instance", ")", "instance", ".", "updated_at", "=", "instance", ".", "new_timestamp", "(", ")", "after", "=", "Version", "(", "instance", ")", "return", "instance", ",", "before", "-", "after" ]
Update an existing model with a new one. :raises `ModelNotFoundError` if there is no existing model
[ "Update", "an", "existing", "model", "with", "a", "new", "one", "." ]
python
train
trehn/termdown
termdown.py
https://github.com/trehn/termdown/blob/aa0c4e39d9864fd1466ef9d76947fb93d0cf5be2/termdown.py#L98-L118
def format_seconds(seconds, hide_seconds=False): """ Returns a human-readable string representation of the given amount of seconds. """ if seconds <= 60: return str(seconds) output = "" for period, period_seconds in ( ('y', 31557600), ('d', 86400), ('h', 3600), ('m', 60), ('s', 1), ): if seconds >= period_seconds and not (hide_seconds and period == 's'): output += str(int(seconds / period_seconds)) output += period output += " " seconds = seconds % period_seconds return output.strip()
[ "def", "format_seconds", "(", "seconds", ",", "hide_seconds", "=", "False", ")", ":", "if", "seconds", "<=", "60", ":", "return", "str", "(", "seconds", ")", "output", "=", "\"\"", "for", "period", ",", "period_seconds", "in", "(", "(", "'y'", ",", "31557600", ")", ",", "(", "'d'", ",", "86400", ")", ",", "(", "'h'", ",", "3600", ")", ",", "(", "'m'", ",", "60", ")", ",", "(", "'s'", ",", "1", ")", ",", ")", ":", "if", "seconds", ">=", "period_seconds", "and", "not", "(", "hide_seconds", "and", "period", "==", "'s'", ")", ":", "output", "+=", "str", "(", "int", "(", "seconds", "/", "period_seconds", ")", ")", "output", "+=", "period", "output", "+=", "\" \"", "seconds", "=", "seconds", "%", "period_seconds", "return", "output", ".", "strip", "(", ")" ]
Returns a human-readable string representation of the given amount of seconds.
[ "Returns", "a", "human", "-", "readable", "string", "representation", "of", "the", "given", "amount", "of", "seconds", "." ]
python
train
bambinos/bambi
bambi/models.py
https://github.com/bambinos/bambi/blob/b4a0ced917968bb99ca20915317417d708387946/bambi/models.py#L548-L574
def _match_derived_terms(self, name): ''' Returns all (random) terms whose named are derived from the specified string. For example, 'condition|subject' should match the terms with names '1|subject', 'condition[T.1]|subject', and so on. Only works for strings with grouping operator ('|'). ''' if '|' not in name: return None patt = r'^([01]+)*[\s\+]*([^\|]+)*\|(.*)' intcpt, pred, grpr = re.search(patt, name).groups() intcpt = '1|%s' % grpr if not pred: return [self.terms[intcpt]] if intcpt in self.terms else None source = '%s|%s' % (pred, grpr) found = [t for (n, t) in self.terms.items() if n == intcpt or re.sub('(\[.*?\])', '', n) == source] # If only the intercept matches, return None, because we want to err # on the side of caution and not consider '1|subject' to be a match for # 'condition|subject' if no slopes are found (e.g., the intercept could # have been set by some other specification like 'gender|subject'). return found if found and (len(found) > 1 or found[0].name != intcpt) \ else None
[ "def", "_match_derived_terms", "(", "self", ",", "name", ")", ":", "if", "'|'", "not", "in", "name", ":", "return", "None", "patt", "=", "r'^([01]+)*[\\s\\+]*([^\\|]+)*\\|(.*)'", "intcpt", ",", "pred", ",", "grpr", "=", "re", ".", "search", "(", "patt", ",", "name", ")", ".", "groups", "(", ")", "intcpt", "=", "'1|%s'", "%", "grpr", "if", "not", "pred", ":", "return", "[", "self", ".", "terms", "[", "intcpt", "]", "]", "if", "intcpt", "in", "self", ".", "terms", "else", "None", "source", "=", "'%s|%s'", "%", "(", "pred", ",", "grpr", ")", "found", "=", "[", "t", "for", "(", "n", ",", "t", ")", "in", "self", ".", "terms", ".", "items", "(", ")", "if", "n", "==", "intcpt", "or", "re", ".", "sub", "(", "'(\\[.*?\\])'", ",", "''", ",", "n", ")", "==", "source", "]", "# If only the intercept matches, return None, because we want to err", "# on the side of caution and not consider '1|subject' to be a match for", "# 'condition|subject' if no slopes are found (e.g., the intercept could", "# have been set by some other specification like 'gender|subject').", "return", "found", "if", "found", "and", "(", "len", "(", "found", ")", ">", "1", "or", "found", "[", "0", "]", ".", "name", "!=", "intcpt", ")", "else", "None" ]
Returns all (random) terms whose named are derived from the specified string. For example, 'condition|subject' should match the terms with names '1|subject', 'condition[T.1]|subject', and so on. Only works for strings with grouping operator ('|').
[ "Returns", "all", "(", "random", ")", "terms", "whose", "named", "are", "derived", "from", "the", "specified", "string", ".", "For", "example", "condition|subject", "should", "match", "the", "terms", "with", "names", "1|subject", "condition", "[", "T", ".", "1", "]", "|subject", "and", "so", "on", ".", "Only", "works", "for", "strings", "with", "grouping", "operator", "(", "|", ")", "." ]
python
train
mitsei/dlkit
dlkit/handcar/repository/managers.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/handcar/repository/managers.py#L1222-L1244
def get_asset_composition_session(self): """Gets the session for retrieving asset compositions. return: (osid.repository.AssetCompositionSession) - an AssetCompositionSession raise: OperationFailed - unable to complete request raise: Unimplemented - supports_asset_composition() is false compliance: optional - This method must be implemented if supports_asset_composition() is true. """ if not self.supports_asset_composition(): raise Unimplemented() try: from . import sessions except ImportError: raise # OperationFailed() try: session = sessions.AssetCompositionSession(proxy=self._proxy, runtime=self._runtime) except AttributeError: raise # OperationFailed() return session
[ "def", "get_asset_composition_session", "(", "self", ")", ":", "if", "not", "self", ".", "supports_asset_composition", "(", ")", ":", "raise", "Unimplemented", "(", ")", "try", ":", "from", ".", "import", "sessions", "except", "ImportError", ":", "raise", "# OperationFailed()", "try", ":", "session", "=", "sessions", ".", "AssetCompositionSession", "(", "proxy", "=", "self", ".", "_proxy", ",", "runtime", "=", "self", ".", "_runtime", ")", "except", "AttributeError", ":", "raise", "# OperationFailed()", "return", "session" ]
Gets the session for retrieving asset compositions. return: (osid.repository.AssetCompositionSession) - an AssetCompositionSession raise: OperationFailed - unable to complete request raise: Unimplemented - supports_asset_composition() is false compliance: optional - This method must be implemented if supports_asset_composition() is true.
[ "Gets", "the", "session", "for", "retrieving", "asset", "compositions", "." ]
python
train
rlabbe/filterpy
filterpy/kalman/information_filter.py
https://github.com/rlabbe/filterpy/blob/8123214de798ffb63db968bb0b9492ee74e77950/filterpy/kalman/information_filter.py#L291-L363
def batch_filter(self, zs, Rs=None, update_first=False, saver=None): """ Batch processes a sequences of measurements. Parameters ---------- zs : list-like list of measurements at each time step `self.dt` Missing measurements must be represented by 'None'. Rs : list-like, optional optional list of values to use for the measurement error covariance; a value of None in any position will cause the filter to use `self.R` for that time step. update_first : bool, optional, controls whether the order of operations is update followed by predict, or predict followed by update. Default is predict->update. saver : filterpy.common.Saver, optional filterpy.common.Saver object. If provided, saver.save() will be called after every epoch Returns ------- means: np.array((n,dim_x,1)) array of the state for each time step. Each entry is an np.array. In other words `means[k,:]` is the state at step `k`. covariance: np.array((n,dim_x,dim_x)) array of the covariances for each time step. In other words `covariance[k,:,:]` is the covariance at step `k`. """ raise NotImplementedError("this is not implemented yet") #pylint: disable=unreachable, no-member # this is a copy of the code from kalman_filter, it has not been # turned into the information filter yet. DO NOT USE. n = np.size(zs, 0) if Rs is None: Rs = [None] * n # mean estimates from Kalman Filter means = zeros((n, self.dim_x, 1)) # state covariances from Kalman Filter covariances = zeros((n, self.dim_x, self.dim_x)) if update_first: for i, (z, r) in enumerate(zip(zs, Rs)): self.update(z, r) means[i, :] = self.x covariances[i, :, :] = self._P self.predict() if saver is not None: saver.save() else: for i, (z, r) in enumerate(zip(zs, Rs)): self.predict() self.update(z, r) means[i, :] = self.x covariances[i, :, :] = self._P if saver is not None: saver.save() return (means, covariances)
[ "def", "batch_filter", "(", "self", ",", "zs", ",", "Rs", "=", "None", ",", "update_first", "=", "False", ",", "saver", "=", "None", ")", ":", "raise", "NotImplementedError", "(", "\"this is not implemented yet\"", ")", "#pylint: disable=unreachable, no-member", "# this is a copy of the code from kalman_filter, it has not been", "# turned into the information filter yet. DO NOT USE.", "n", "=", "np", ".", "size", "(", "zs", ",", "0", ")", "if", "Rs", "is", "None", ":", "Rs", "=", "[", "None", "]", "*", "n", "# mean estimates from Kalman Filter", "means", "=", "zeros", "(", "(", "n", ",", "self", ".", "dim_x", ",", "1", ")", ")", "# state covariances from Kalman Filter", "covariances", "=", "zeros", "(", "(", "n", ",", "self", ".", "dim_x", ",", "self", ".", "dim_x", ")", ")", "if", "update_first", ":", "for", "i", ",", "(", "z", ",", "r", ")", "in", "enumerate", "(", "zip", "(", "zs", ",", "Rs", ")", ")", ":", "self", ".", "update", "(", "z", ",", "r", ")", "means", "[", "i", ",", ":", "]", "=", "self", ".", "x", "covariances", "[", "i", ",", ":", ",", ":", "]", "=", "self", ".", "_P", "self", ".", "predict", "(", ")", "if", "saver", "is", "not", "None", ":", "saver", ".", "save", "(", ")", "else", ":", "for", "i", ",", "(", "z", ",", "r", ")", "in", "enumerate", "(", "zip", "(", "zs", ",", "Rs", ")", ")", ":", "self", ".", "predict", "(", ")", "self", ".", "update", "(", "z", ",", "r", ")", "means", "[", "i", ",", ":", "]", "=", "self", ".", "x", "covariances", "[", "i", ",", ":", ",", ":", "]", "=", "self", ".", "_P", "if", "saver", "is", "not", "None", ":", "saver", ".", "save", "(", ")", "return", "(", "means", ",", "covariances", ")" ]
Batch processes a sequences of measurements. Parameters ---------- zs : list-like list of measurements at each time step `self.dt` Missing measurements must be represented by 'None'. Rs : list-like, optional optional list of values to use for the measurement error covariance; a value of None in any position will cause the filter to use `self.R` for that time step. update_first : bool, optional, controls whether the order of operations is update followed by predict, or predict followed by update. Default is predict->update. saver : filterpy.common.Saver, optional filterpy.common.Saver object. If provided, saver.save() will be called after every epoch Returns ------- means: np.array((n,dim_x,1)) array of the state for each time step. Each entry is an np.array. In other words `means[k,:]` is the state at step `k`. covariance: np.array((n,dim_x,dim_x)) array of the covariances for each time step. In other words `covariance[k,:,:]` is the covariance at step `k`.
[ "Batch", "processes", "a", "sequences", "of", "measurements", "." ]
python
train
gpoulter/fablib
fablib.py
https://github.com/gpoulter/fablib/blob/5d14c4d998f79dd1aa3207063c3d06e30e3e2bf9/fablib.py#L126-L140
def rsync(local_path, remote_path, exclude=None, extra_opts=None): """Helper to rsync submodules across""" if not local_path.endswith('/'): local_path += '/' exclude = exclude or [] exclude.extend(['*.egg-info', '*.pyc', '.git', '.gitignore', '.gitmodules', '/build/', '/dist/']) with hide('running'): run("mkdir -p '{}'".format(remote_path)) return rsync_project( remote_path, local_path, delete=True, extra_opts='-i --omit-dir-times -FF ' + (extra_opts if extra_opts else ''), ssh_opts='-o StrictHostKeyChecking=no', exclude=exclude)
[ "def", "rsync", "(", "local_path", ",", "remote_path", ",", "exclude", "=", "None", ",", "extra_opts", "=", "None", ")", ":", "if", "not", "local_path", ".", "endswith", "(", "'/'", ")", ":", "local_path", "+=", "'/'", "exclude", "=", "exclude", "or", "[", "]", "exclude", ".", "extend", "(", "[", "'*.egg-info'", ",", "'*.pyc'", ",", "'.git'", ",", "'.gitignore'", ",", "'.gitmodules'", ",", "'/build/'", ",", "'/dist/'", "]", ")", "with", "hide", "(", "'running'", ")", ":", "run", "(", "\"mkdir -p '{}'\"", ".", "format", "(", "remote_path", ")", ")", "return", "rsync_project", "(", "remote_path", ",", "local_path", ",", "delete", "=", "True", ",", "extra_opts", "=", "'-i --omit-dir-times -FF '", "+", "(", "extra_opts", "if", "extra_opts", "else", "''", ")", ",", "ssh_opts", "=", "'-o StrictHostKeyChecking=no'", ",", "exclude", "=", "exclude", ")" ]
Helper to rsync submodules across
[ "Helper", "to", "rsync", "submodules", "across" ]
python
train
textmagic/textmagic-rest-python
textmagic/rest/models/contacts.py
https://github.com/textmagic/textmagic-rest-python/blob/15d679cb985b88b1cb2153ef2ba80d9749f9e281/textmagic/rest/models/contacts.py#L247-L262
def delete_contacts(self, uid, **kwargs): """ Unassign contacts from the specified list. If contacts assign only to the specified list, then delete permanently. Returns True if success. :Example: client.lists.delete_contacts(uid=1901010, contacts="1723812,1239912") :param int uid: The unique id of the List. Required. :param str contacts: Contact ID(s), separated by comma. Required. """ uri = "%s/%s/contacts" % (self.uri, uid) response, instance = self.request("DELETE", uri, data=kwargs) return response.status == 204
[ "def", "delete_contacts", "(", "self", ",", "uid", ",", "*", "*", "kwargs", ")", ":", "uri", "=", "\"%s/%s/contacts\"", "%", "(", "self", ".", "uri", ",", "uid", ")", "response", ",", "instance", "=", "self", ".", "request", "(", "\"DELETE\"", ",", "uri", ",", "data", "=", "kwargs", ")", "return", "response", ".", "status", "==", "204" ]
Unassign contacts from the specified list. If contacts assign only to the specified list, then delete permanently. Returns True if success. :Example: client.lists.delete_contacts(uid=1901010, contacts="1723812,1239912") :param int uid: The unique id of the List. Required. :param str contacts: Contact ID(s), separated by comma. Required.
[ "Unassign", "contacts", "from", "the", "specified", "list", ".", "If", "contacts", "assign", "only", "to", "the", "specified", "list", "then", "delete", "permanently", ".", "Returns", "True", "if", "success", "." ]
python
train
google/python-gflags
gflags/flagvalues.py
https://github.com/google/python-gflags/blob/4f06c3d0d6cbe9b1fb90ee9fb1c082b3bf9285f6/gflags/flagvalues.py#L245-L263
def _CleanupUnregisteredFlagFromModuleDicts(self, flag_obj): """Cleanup unregistered flags from all module -> [flags] dictionaries. If flag_obj is registered under either its long name or short name, it won't be removed from the dictionaries. Args: flag_obj: A flag object. """ if self._FlagIsRegistered(flag_obj): return for flags_by_module_dict in (self.FlagsByModuleDict(), self.FlagsByModuleIdDict(), self.KeyFlagsByModuleDict()): for flags_in_module in six.itervalues(flags_by_module_dict): # While (as opposed to if) takes care of multiple occurrences of a # flag in the list for the same module. while flag_obj in flags_in_module: flags_in_module.remove(flag_obj)
[ "def", "_CleanupUnregisteredFlagFromModuleDicts", "(", "self", ",", "flag_obj", ")", ":", "if", "self", ".", "_FlagIsRegistered", "(", "flag_obj", ")", ":", "return", "for", "flags_by_module_dict", "in", "(", "self", ".", "FlagsByModuleDict", "(", ")", ",", "self", ".", "FlagsByModuleIdDict", "(", ")", ",", "self", ".", "KeyFlagsByModuleDict", "(", ")", ")", ":", "for", "flags_in_module", "in", "six", ".", "itervalues", "(", "flags_by_module_dict", ")", ":", "# While (as opposed to if) takes care of multiple occurrences of a", "# flag in the list for the same module.", "while", "flag_obj", "in", "flags_in_module", ":", "flags_in_module", ".", "remove", "(", "flag_obj", ")" ]
Cleanup unregistered flags from all module -> [flags] dictionaries. If flag_obj is registered under either its long name or short name, it won't be removed from the dictionaries. Args: flag_obj: A flag object.
[ "Cleanup", "unregistered", "flags", "from", "all", "module", "-", ">", "[", "flags", "]", "dictionaries", "." ]
python
train
santoshphilip/eppy
eppy/bunch_subclass.py
https://github.com/santoshphilip/eppy/blob/55410ff7c11722f35bc4331ff5e00a0b86f787e1/eppy/bunch_subclass.py#L459-L475
def isequal(bch, fieldname, value, places=7): """return True if the field is equal to value""" def equalalphanumeric(bch, fieldname, value): if bch.get_retaincase(fieldname): return bch[fieldname] == value else: return bch[fieldname].upper() == value.upper() fieldidd = bch.getfieldidd(fieldname) try: ftype = fieldidd['type'][0] if ftype in ['real', 'integer']: return almostequal(bch[fieldname], float(value), places=places) else: return equalalphanumeric(bch, fieldname, value) except KeyError as e: return equalalphanumeric(bch, fieldname, value)
[ "def", "isequal", "(", "bch", ",", "fieldname", ",", "value", ",", "places", "=", "7", ")", ":", "def", "equalalphanumeric", "(", "bch", ",", "fieldname", ",", "value", ")", ":", "if", "bch", ".", "get_retaincase", "(", "fieldname", ")", ":", "return", "bch", "[", "fieldname", "]", "==", "value", "else", ":", "return", "bch", "[", "fieldname", "]", ".", "upper", "(", ")", "==", "value", ".", "upper", "(", ")", "fieldidd", "=", "bch", ".", "getfieldidd", "(", "fieldname", ")", "try", ":", "ftype", "=", "fieldidd", "[", "'type'", "]", "[", "0", "]", "if", "ftype", "in", "[", "'real'", ",", "'integer'", "]", ":", "return", "almostequal", "(", "bch", "[", "fieldname", "]", ",", "float", "(", "value", ")", ",", "places", "=", "places", ")", "else", ":", "return", "equalalphanumeric", "(", "bch", ",", "fieldname", ",", "value", ")", "except", "KeyError", "as", "e", ":", "return", "equalalphanumeric", "(", "bch", ",", "fieldname", ",", "value", ")" ]
return True if the field is equal to value
[ "return", "True", "if", "the", "field", "is", "equal", "to", "value" ]
python
train
symphonyoss/python-symphony
symphony/RESTful/nopkcs.py
https://github.com/symphonyoss/python-symphony/blob/b939f35fbda461183ec0c01790c754f89a295be0/symphony/RESTful/nopkcs.py#L43-L65
def POST_query(self, req_hook, req_args): ''' Generic POST query method ''' # HTTP POST queries require keyManagerTokens and sessionTokens headers = {'Content-Type': 'application/json', 'sessionToken': self.__session__, 'keyManagerToken': self.__keymngr__} # HTTP POST query to keymanager authenticate API try: if req_args is None: response = requests.post(self.__url__ + req_hook, headers=headers, verify=True) else: response = requests.post(self.__url__ + req_hook, headers=headers, data=req_args, verify=True) except requests.exceptions.RequestException as err: self.logger.error(err) return '500', 'Internal Error in RESTful.POST_query()' # return the token return response.status_code, response.text
[ "def", "POST_query", "(", "self", ",", "req_hook", ",", "req_args", ")", ":", "# HTTP POST queries require keyManagerTokens and sessionTokens", "headers", "=", "{", "'Content-Type'", ":", "'application/json'", ",", "'sessionToken'", ":", "self", ".", "__session__", ",", "'keyManagerToken'", ":", "self", ".", "__keymngr__", "}", "# HTTP POST query to keymanager authenticate API", "try", ":", "if", "req_args", "is", "None", ":", "response", "=", "requests", ".", "post", "(", "self", ".", "__url__", "+", "req_hook", ",", "headers", "=", "headers", ",", "verify", "=", "True", ")", "else", ":", "response", "=", "requests", ".", "post", "(", "self", ".", "__url__", "+", "req_hook", ",", "headers", "=", "headers", ",", "data", "=", "req_args", ",", "verify", "=", "True", ")", "except", "requests", ".", "exceptions", ".", "RequestException", "as", "err", ":", "self", ".", "logger", ".", "error", "(", "err", ")", "return", "'500'", ",", "'Internal Error in RESTful.POST_query()'", "# return the token", "return", "response", ".", "status_code", ",", "response", ".", "text" ]
Generic POST query method
[ "Generic", "POST", "query", "method" ]
python
train
Neurita/boyle
boyle/utils/strings.py
https://github.com/Neurita/boyle/blob/2dae7199849395a209c887d5f30506e1de8a9ad9/boyle/utils/strings.py#L167-L181
def is_fnmatch_regex(string): """ Returns True if the given string is considered a fnmatch regular expression, False otherwise. It will look for :param string: str """ is_regex = False regex_chars = ['!', '*', '$'] for c in regex_chars: if string.find(c) > -1: return True return is_regex
[ "def", "is_fnmatch_regex", "(", "string", ")", ":", "is_regex", "=", "False", "regex_chars", "=", "[", "'!'", ",", "'*'", ",", "'$'", "]", "for", "c", "in", "regex_chars", ":", "if", "string", ".", "find", "(", "c", ")", ">", "-", "1", ":", "return", "True", "return", "is_regex" ]
Returns True if the given string is considered a fnmatch regular expression, False otherwise. It will look for :param string: str
[ "Returns", "True", "if", "the", "given", "string", "is", "considered", "a", "fnmatch", "regular", "expression", "False", "otherwise", ".", "It", "will", "look", "for" ]
python
valid
HydraChain/hydrachain
hydrachain/native_contracts.py
https://github.com/HydraChain/hydrachain/blob/6c0919b0575dc8aa481f3a8c703e1a7f0575ecc3/hydrachain/native_contracts.py#L587-L606
def chain_nac_proxy(chain, sender, contract_address, value=0): "create an object which acts as a proxy for the contract on the chain" klass = registry[contract_address].im_self assert issubclass(klass, NativeABIContract) def mk_method(method): def m(s, *args): data = abi_encode_args(method, args) block = chain.head_candidate output = test_call(block, sender, contract_address, data) if output is not None: return abi_decode_return_vals(method, output) return m class cproxy(object): pass for m in klass._abi_methods(): setattr(cproxy, m.__func__.func_name, mk_method(m)) return cproxy()
[ "def", "chain_nac_proxy", "(", "chain", ",", "sender", ",", "contract_address", ",", "value", "=", "0", ")", ":", "klass", "=", "registry", "[", "contract_address", "]", ".", "im_self", "assert", "issubclass", "(", "klass", ",", "NativeABIContract", ")", "def", "mk_method", "(", "method", ")", ":", "def", "m", "(", "s", ",", "*", "args", ")", ":", "data", "=", "abi_encode_args", "(", "method", ",", "args", ")", "block", "=", "chain", ".", "head_candidate", "output", "=", "test_call", "(", "block", ",", "sender", ",", "contract_address", ",", "data", ")", "if", "output", "is", "not", "None", ":", "return", "abi_decode_return_vals", "(", "method", ",", "output", ")", "return", "m", "class", "cproxy", "(", "object", ")", ":", "pass", "for", "m", "in", "klass", ".", "_abi_methods", "(", ")", ":", "setattr", "(", "cproxy", ",", "m", ".", "__func__", ".", "func_name", ",", "mk_method", "(", "m", ")", ")", "return", "cproxy", "(", ")" ]
create an object which acts as a proxy for the contract on the chain
[ "create", "an", "object", "which", "acts", "as", "a", "proxy", "for", "the", "contract", "on", "the", "chain" ]
python
test
ssato/python-anyconfig
src/anyconfig/backend/xml.py
https://github.com/ssato/python-anyconfig/blob/f2f4fb8d8e232aadea866c202e1dd7a5967e2877/src/anyconfig/backend/xml.py#L160-L174
def _merge_dicts(dics, container=dict): """ :param dics: [<dict/-like object must not have same keys each other>] :param container: callble to make a container object :return: <container> object >>> _merge_dicts(({}, )) {} >>> _merge_dicts(({'a': 1}, )) {'a': 1} >>> sorted(kv for kv in _merge_dicts(({'a': 1}, {'b': 2})).items()) [('a', 1), ('b', 2)] """ dic_itr = anyconfig.compat.from_iterable(d.items() for d in dics) return container(anyconfig.compat.OrderedDict(dic_itr))
[ "def", "_merge_dicts", "(", "dics", ",", "container", "=", "dict", ")", ":", "dic_itr", "=", "anyconfig", ".", "compat", ".", "from_iterable", "(", "d", ".", "items", "(", ")", "for", "d", "in", "dics", ")", "return", "container", "(", "anyconfig", ".", "compat", ".", "OrderedDict", "(", "dic_itr", ")", ")" ]
:param dics: [<dict/-like object must not have same keys each other>] :param container: callble to make a container object :return: <container> object >>> _merge_dicts(({}, )) {} >>> _merge_dicts(({'a': 1}, )) {'a': 1} >>> sorted(kv for kv in _merge_dicts(({'a': 1}, {'b': 2})).items()) [('a', 1), ('b', 2)]
[ ":", "param", "dics", ":", "[", "<dict", "/", "-", "like", "object", "must", "not", "have", "same", "keys", "each", "other", ">", "]", ":", "param", "container", ":", "callble", "to", "make", "a", "container", "object", ":", "return", ":", "<container", ">", "object" ]
python
train
matiasb/demiurge
demiurge/demiurge.py
https://github.com/matiasb/demiurge/blob/4cfbb24f0519ab99b9bf36fa4c20283ae6e7b9fe/demiurge/demiurge.py#L250-L254
def all(cls, path=''): """Return all ocurrences of the item.""" url = urljoin(cls._meta.base_url, path) pq_items = cls._get_items(url=url, **cls._meta._pyquery_kwargs) return [cls(item=i) for i in pq_items.items()]
[ "def", "all", "(", "cls", ",", "path", "=", "''", ")", ":", "url", "=", "urljoin", "(", "cls", ".", "_meta", ".", "base_url", ",", "path", ")", "pq_items", "=", "cls", ".", "_get_items", "(", "url", "=", "url", ",", "*", "*", "cls", ".", "_meta", ".", "_pyquery_kwargs", ")", "return", "[", "cls", "(", "item", "=", "i", ")", "for", "i", "in", "pq_items", ".", "items", "(", ")", "]" ]
Return all ocurrences of the item.
[ "Return", "all", "ocurrences", "of", "the", "item", "." ]
python
train
crdoconnor/strictyaml
strictyaml/representation.py
https://github.com/crdoconnor/strictyaml/blob/efdac7f89e81679fc95686288cd32b9563fde609/strictyaml/representation.py#L240-L245
def as_yaml(self): """ Render the YAML node and subnodes as string. """ dumped = dump(self.as_marked_up(), Dumper=StrictYAMLDumper, allow_unicode=True) return dumped if sys.version_info[0] == 3 else dumped.decode("utf8")
[ "def", "as_yaml", "(", "self", ")", ":", "dumped", "=", "dump", "(", "self", ".", "as_marked_up", "(", ")", ",", "Dumper", "=", "StrictYAMLDumper", ",", "allow_unicode", "=", "True", ")", "return", "dumped", "if", "sys", ".", "version_info", "[", "0", "]", "==", "3", "else", "dumped", ".", "decode", "(", "\"utf8\"", ")" ]
Render the YAML node and subnodes as string.
[ "Render", "the", "YAML", "node", "and", "subnodes", "as", "string", "." ]
python
train
tensorflow/cleverhans
examples/nips17_adversarial_competition/eval_infra/code/eval_lib/classification_results.py
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/examples/nips17_adversarial_competition/eval_infra/code/eval_lib/classification_results.py#L192-L215
def save_to_file(self, filename, remap_dim0=None, remap_dim1=None): """Saves matrix to the file. Args: filename: name of the file where to save matrix remap_dim0: dictionary with mapping row indices to row names which should be saved to file. If none then indices will be used as names. remap_dim1: dictionary with mapping column indices to column names which should be saved to file. If none then indices will be used as names. """ # rows - first index # columns - second index with open(filename, 'w') as fobj: columns = list(sorted(self._dim1)) for col in columns: fobj.write(',') fobj.write(str(remap_dim1[col] if remap_dim1 else col)) fobj.write('\n') for row in sorted(self._dim0): fobj.write(str(remap_dim0[row] if remap_dim0 else row)) for col in columns: fobj.write(',') fobj.write(str(self[row, col])) fobj.write('\n')
[ "def", "save_to_file", "(", "self", ",", "filename", ",", "remap_dim0", "=", "None", ",", "remap_dim1", "=", "None", ")", ":", "# rows - first index", "# columns - second index", "with", "open", "(", "filename", ",", "'w'", ")", "as", "fobj", ":", "columns", "=", "list", "(", "sorted", "(", "self", ".", "_dim1", ")", ")", "for", "col", "in", "columns", ":", "fobj", ".", "write", "(", "','", ")", "fobj", ".", "write", "(", "str", "(", "remap_dim1", "[", "col", "]", "if", "remap_dim1", "else", "col", ")", ")", "fobj", ".", "write", "(", "'\\n'", ")", "for", "row", "in", "sorted", "(", "self", ".", "_dim0", ")", ":", "fobj", ".", "write", "(", "str", "(", "remap_dim0", "[", "row", "]", "if", "remap_dim0", "else", "row", ")", ")", "for", "col", "in", "columns", ":", "fobj", ".", "write", "(", "','", ")", "fobj", ".", "write", "(", "str", "(", "self", "[", "row", ",", "col", "]", ")", ")", "fobj", ".", "write", "(", "'\\n'", ")" ]
Saves matrix to the file. Args: filename: name of the file where to save matrix remap_dim0: dictionary with mapping row indices to row names which should be saved to file. If none then indices will be used as names. remap_dim1: dictionary with mapping column indices to column names which should be saved to file. If none then indices will be used as names.
[ "Saves", "matrix", "to", "the", "file", "." ]
python
train
anrosent/LT-code
lt/sampler.py
https://github.com/anrosent/LT-code/blob/e13a4c927effc90f9d41ab3884f9fcbd95b9450d/lt/sampler.py#L25-L32
def gen_tau(S, K, delta): """The Robust part of the RSD, we precompute an array for speed """ pivot = floor(K/S) return [S/K * 1/d for d in range(1, pivot)] \ + [S/K * log(S/delta)] \ + [0 for d in range(pivot, K)]
[ "def", "gen_tau", "(", "S", ",", "K", ",", "delta", ")", ":", "pivot", "=", "floor", "(", "K", "/", "S", ")", "return", "[", "S", "/", "K", "*", "1", "/", "d", "for", "d", "in", "range", "(", "1", ",", "pivot", ")", "]", "+", "[", "S", "/", "K", "*", "log", "(", "S", "/", "delta", ")", "]", "+", "[", "0", "for", "d", "in", "range", "(", "pivot", ",", "K", ")", "]" ]
The Robust part of the RSD, we precompute an array for speed
[ "The", "Robust", "part", "of", "the", "RSD", "we", "precompute", "an", "array", "for", "speed" ]
python
train
dswah/pyGAM
pygam/pygam.py
https://github.com/dswah/pyGAM/blob/b3e5c3cd580f0a3ad69f9372861624f67760c325/pygam/pygam.py#L1572-L1661
def summary(self): """produce a summary of the model statistics Parameters ---------- None Returns ------- None """ if not self._is_fitted: raise AttributeError('GAM has not been fitted. Call fit first.') # high-level model summary width_details = 47 width_results = 58 model_fmt = [ (self.__class__.__name__, 'model_details', width_details), ('', 'model_results', width_results) ] model_details = [] objective = 'UBRE' if self.distribution._known_scale else 'GCV' model_details.append({'model_details': space_row('Distribution:', self.distribution.__class__.__name__, total_width=width_details), 'model_results': space_row('Effective DoF:', str(np.round(self.statistics_['edof'], 4)), total_width=width_results)}) model_details.append({'model_details': space_row('Link Function:', self.link.__class__.__name__, total_width=width_details), 'model_results': space_row('Log Likelihood:', str(np.round(self.statistics_['loglikelihood'], 4)), total_width=width_results)}) model_details.append({'model_details': space_row('Number of Samples:', str(self.statistics_['n_samples']), total_width=width_details), 'model_results': space_row('AIC: ', str(np.round(self.statistics_['AIC'], 4)), total_width=width_results)}) model_details.append({'model_results': space_row('AICc: ', str(np.round(self.statistics_['AICc'], 4)), total_width=width_results)}) model_details.append({'model_results': space_row(objective + ':', str(np.round(self.statistics_[objective], 4)), total_width=width_results)}) model_details.append({'model_results': space_row('Scale:', str(np.round(self.statistics_['scale'], 4)), total_width=width_results)}) model_details.append({'model_results': space_row('Pseudo R-Squared:', str(np.round(self.statistics_['pseudo_r2']['explained_deviance'], 4)), total_width=width_results)}) # term summary data = [] for i, term in enumerate(self.terms): # TODO bug: if the number of samples is less than the number of coefficients # we cant get the edof per term if len(self.statistics_['edof_per_coef']) == len(self.coef_): idx = self.terms.get_coef_indices(i) edof = np.round(self.statistics_['edof_per_coef'][idx].sum(), 1) else: edof = '' term_data = { 'feature_func': repr(term), 'lam': '' if term.isintercept else np.round(flatten(term.lam), 4), 'rank': '{}'.format(term.n_coefs), 'edof': '{}'.format(edof), 'p_value': '%.2e'%(self.statistics_['p_values'][i]), 'sig_code': sig_code(self.statistics_['p_values'][i]) } data.append(term_data) fmt = [ ('Feature Function', 'feature_func', 33), ('Lambda', 'lam', 20), ('Rank', 'rank', 12), ('EDoF', 'edof', 12), ('P > x', 'p_value', 12), ('Sig. Code', 'sig_code', 12) ] print( TablePrinter(model_fmt, ul='=', sep=' ')(model_details) ) print("="*106) print( TablePrinter(fmt, ul='=')(data) ) print("="*106) print("Significance codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1") print() print("WARNING: Fitting splines and a linear function to a feature introduces a model identifiability problem\n" \ " which can cause p-values to appear significant when they are not.") print() print("WARNING: p-values calculated in this manner behave correctly for un-penalized models or models with\n" \ " known smoothing parameters, but when smoothing parameters have been estimated, the p-values\n" \ " are typically lower than they should be, meaning that the tests reject the null too readily.") # P-VALUE BUG warnings.warn("KNOWN BUG: p-values computed in this summary are likely "\ "much smaller than they should be. \n \n"\ "Please do not make inferences based on these values! \n\n"\ "Collaborate on a solution, and stay up to date at: \n"\ "github.com/dswah/pyGAM/issues/163 \n", stacklevel=2)
[ "def", "summary", "(", "self", ")", ":", "if", "not", "self", ".", "_is_fitted", ":", "raise", "AttributeError", "(", "'GAM has not been fitted. Call fit first.'", ")", "# high-level model summary", "width_details", "=", "47", "width_results", "=", "58", "model_fmt", "=", "[", "(", "self", ".", "__class__", ".", "__name__", ",", "'model_details'", ",", "width_details", ")", ",", "(", "''", ",", "'model_results'", ",", "width_results", ")", "]", "model_details", "=", "[", "]", "objective", "=", "'UBRE'", "if", "self", ".", "distribution", ".", "_known_scale", "else", "'GCV'", "model_details", ".", "append", "(", "{", "'model_details'", ":", "space_row", "(", "'Distribution:'", ",", "self", ".", "distribution", ".", "__class__", ".", "__name__", ",", "total_width", "=", "width_details", ")", ",", "'model_results'", ":", "space_row", "(", "'Effective DoF:'", ",", "str", "(", "np", ".", "round", "(", "self", ".", "statistics_", "[", "'edof'", "]", ",", "4", ")", ")", ",", "total_width", "=", "width_results", ")", "}", ")", "model_details", ".", "append", "(", "{", "'model_details'", ":", "space_row", "(", "'Link Function:'", ",", "self", ".", "link", ".", "__class__", ".", "__name__", ",", "total_width", "=", "width_details", ")", ",", "'model_results'", ":", "space_row", "(", "'Log Likelihood:'", ",", "str", "(", "np", ".", "round", "(", "self", ".", "statistics_", "[", "'loglikelihood'", "]", ",", "4", ")", ")", ",", "total_width", "=", "width_results", ")", "}", ")", "model_details", ".", "append", "(", "{", "'model_details'", ":", "space_row", "(", "'Number of Samples:'", ",", "str", "(", "self", ".", "statistics_", "[", "'n_samples'", "]", ")", ",", "total_width", "=", "width_details", ")", ",", "'model_results'", ":", "space_row", "(", "'AIC: '", ",", "str", "(", "np", ".", "round", "(", "self", ".", "statistics_", "[", "'AIC'", "]", ",", "4", ")", ")", ",", "total_width", "=", "width_results", ")", "}", ")", "model_details", ".", "append", "(", "{", "'model_results'", ":", "space_row", "(", "'AICc: '", ",", "str", "(", "np", ".", "round", "(", "self", ".", "statistics_", "[", "'AICc'", "]", ",", "4", ")", ")", ",", "total_width", "=", "width_results", ")", "}", ")", "model_details", ".", "append", "(", "{", "'model_results'", ":", "space_row", "(", "objective", "+", "':'", ",", "str", "(", "np", ".", "round", "(", "self", ".", "statistics_", "[", "objective", "]", ",", "4", ")", ")", ",", "total_width", "=", "width_results", ")", "}", ")", "model_details", ".", "append", "(", "{", "'model_results'", ":", "space_row", "(", "'Scale:'", ",", "str", "(", "np", ".", "round", "(", "self", ".", "statistics_", "[", "'scale'", "]", ",", "4", ")", ")", ",", "total_width", "=", "width_results", ")", "}", ")", "model_details", ".", "append", "(", "{", "'model_results'", ":", "space_row", "(", "'Pseudo R-Squared:'", ",", "str", "(", "np", ".", "round", "(", "self", ".", "statistics_", "[", "'pseudo_r2'", "]", "[", "'explained_deviance'", "]", ",", "4", ")", ")", ",", "total_width", "=", "width_results", ")", "}", ")", "# term summary", "data", "=", "[", "]", "for", "i", ",", "term", "in", "enumerate", "(", "self", ".", "terms", ")", ":", "# TODO bug: if the number of samples is less than the number of coefficients", "# we cant get the edof per term", "if", "len", "(", "self", ".", "statistics_", "[", "'edof_per_coef'", "]", ")", "==", "len", "(", "self", ".", "coef_", ")", ":", "idx", "=", "self", ".", "terms", ".", "get_coef_indices", "(", "i", ")", "edof", "=", "np", ".", "round", "(", "self", ".", "statistics_", "[", "'edof_per_coef'", "]", "[", "idx", "]", ".", "sum", "(", ")", ",", "1", ")", "else", ":", "edof", "=", "''", "term_data", "=", "{", "'feature_func'", ":", "repr", "(", "term", ")", ",", "'lam'", ":", "''", "if", "term", ".", "isintercept", "else", "np", ".", "round", "(", "flatten", "(", "term", ".", "lam", ")", ",", "4", ")", ",", "'rank'", ":", "'{}'", ".", "format", "(", "term", ".", "n_coefs", ")", ",", "'edof'", ":", "'{}'", ".", "format", "(", "edof", ")", ",", "'p_value'", ":", "'%.2e'", "%", "(", "self", ".", "statistics_", "[", "'p_values'", "]", "[", "i", "]", ")", ",", "'sig_code'", ":", "sig_code", "(", "self", ".", "statistics_", "[", "'p_values'", "]", "[", "i", "]", ")", "}", "data", ".", "append", "(", "term_data", ")", "fmt", "=", "[", "(", "'Feature Function'", ",", "'feature_func'", ",", "33", ")", ",", "(", "'Lambda'", ",", "'lam'", ",", "20", ")", ",", "(", "'Rank'", ",", "'rank'", ",", "12", ")", ",", "(", "'EDoF'", ",", "'edof'", ",", "12", ")", ",", "(", "'P > x'", ",", "'p_value'", ",", "12", ")", ",", "(", "'Sig. Code'", ",", "'sig_code'", ",", "12", ")", "]", "print", "(", "TablePrinter", "(", "model_fmt", ",", "ul", "=", "'='", ",", "sep", "=", "' '", ")", "(", "model_details", ")", ")", "print", "(", "\"=\"", "*", "106", ")", "print", "(", "TablePrinter", "(", "fmt", ",", "ul", "=", "'='", ")", "(", "data", ")", ")", "print", "(", "\"=\"", "*", "106", ")", "print", "(", "\"Significance codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1\"", ")", "print", "(", ")", "print", "(", "\"WARNING: Fitting splines and a linear function to a feature introduces a model identifiability problem\\n\"", "\" which can cause p-values to appear significant when they are not.\"", ")", "print", "(", ")", "print", "(", "\"WARNING: p-values calculated in this manner behave correctly for un-penalized models or models with\\n\"", "\" known smoothing parameters, but when smoothing parameters have been estimated, the p-values\\n\"", "\" are typically lower than they should be, meaning that the tests reject the null too readily.\"", ")", "# P-VALUE BUG", "warnings", ".", "warn", "(", "\"KNOWN BUG: p-values computed in this summary are likely \"", "\"much smaller than they should be. \\n \\n\"", "\"Please do not make inferences based on these values! \\n\\n\"", "\"Collaborate on a solution, and stay up to date at: \\n\"", "\"github.com/dswah/pyGAM/issues/163 \\n\"", ",", "stacklevel", "=", "2", ")" ]
produce a summary of the model statistics Parameters ---------- None Returns ------- None
[ "produce", "a", "summary", "of", "the", "model", "statistics" ]
python
train
thunder-project/thunder
thunder/readers.py
https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/readers.py#L233-L278
def parse_query(query, delim='/'): """ Parse a boto query """ key = '' prefix = '' postfix = '' parsed = urlparse(query) query = parsed.path.lstrip(delim) bucket = parsed.netloc if not parsed.scheme.lower() in ('', "gs", "s3", "s3n"): raise ValueError("Query scheme must be one of '', 'gs', 's3', or 's3n'; " "got: '%s'" % parsed.scheme) storage = parsed.scheme.lower() if not bucket.strip() and query: toks = query.split(delim, 1) bucket = toks[0] if len(toks) == 2: key = toks[1] else: key = '' if not bucket.strip(): raise ValueError("Could not parse bucket name from query string '%s'" % query) tokens = query.split("*") n = len(tokens) if n == 0: pass elif n == 1: key = tokens[0] elif n == 2: index = tokens[0].rfind(delim) if index >= 0: key = tokens[0][:(index + 1)] prefix = tokens[0][(index + 1):] if len(tokens[0]) > (index + 1) else '' else: prefix = tokens[0] postfix = tokens[1] else: raise ValueError("Only one wildcard ('*') allowed in query string, got: '%s'" % query) return storage, bucket, key, prefix, postfix
[ "def", "parse_query", "(", "query", ",", "delim", "=", "'/'", ")", ":", "key", "=", "''", "prefix", "=", "''", "postfix", "=", "''", "parsed", "=", "urlparse", "(", "query", ")", "query", "=", "parsed", ".", "path", ".", "lstrip", "(", "delim", ")", "bucket", "=", "parsed", ".", "netloc", "if", "not", "parsed", ".", "scheme", ".", "lower", "(", ")", "in", "(", "''", ",", "\"gs\"", ",", "\"s3\"", ",", "\"s3n\"", ")", ":", "raise", "ValueError", "(", "\"Query scheme must be one of '', 'gs', 's3', or 's3n'; \"", "\"got: '%s'\"", "%", "parsed", ".", "scheme", ")", "storage", "=", "parsed", ".", "scheme", ".", "lower", "(", ")", "if", "not", "bucket", ".", "strip", "(", ")", "and", "query", ":", "toks", "=", "query", ".", "split", "(", "delim", ",", "1", ")", "bucket", "=", "toks", "[", "0", "]", "if", "len", "(", "toks", ")", "==", "2", ":", "key", "=", "toks", "[", "1", "]", "else", ":", "key", "=", "''", "if", "not", "bucket", ".", "strip", "(", ")", ":", "raise", "ValueError", "(", "\"Could not parse bucket name from query string '%s'\"", "%", "query", ")", "tokens", "=", "query", ".", "split", "(", "\"*\"", ")", "n", "=", "len", "(", "tokens", ")", "if", "n", "==", "0", ":", "pass", "elif", "n", "==", "1", ":", "key", "=", "tokens", "[", "0", "]", "elif", "n", "==", "2", ":", "index", "=", "tokens", "[", "0", "]", ".", "rfind", "(", "delim", ")", "if", "index", ">=", "0", ":", "key", "=", "tokens", "[", "0", "]", "[", ":", "(", "index", "+", "1", ")", "]", "prefix", "=", "tokens", "[", "0", "]", "[", "(", "index", "+", "1", ")", ":", "]", "if", "len", "(", "tokens", "[", "0", "]", ")", ">", "(", "index", "+", "1", ")", "else", "''", "else", ":", "prefix", "=", "tokens", "[", "0", "]", "postfix", "=", "tokens", "[", "1", "]", "else", ":", "raise", "ValueError", "(", "\"Only one wildcard ('*') allowed in query string, got: '%s'\"", "%", "query", ")", "return", "storage", ",", "bucket", ",", "key", ",", "prefix", ",", "postfix" ]
Parse a boto query
[ "Parse", "a", "boto", "query" ]
python
train
inonit/drf-haystack
drf_haystack/serializers.py
https://github.com/inonit/drf-haystack/blob/ceabd0f6318f129758341ab08292a20205d6f4cd/drf_haystack/serializers.py#L156-L163
def _get_index_class_name(self, index_cls): """ Converts in index model class to a name suitable for use as a field name prefix. A user may optionally specify custom aliases via an 'index_aliases' attribute on the Meta class """ cls_name = index_cls.__name__ aliases = self.Meta.index_aliases return aliases.get(cls_name, cls_name.split('.')[-1])
[ "def", "_get_index_class_name", "(", "self", ",", "index_cls", ")", ":", "cls_name", "=", "index_cls", ".", "__name__", "aliases", "=", "self", ".", "Meta", ".", "index_aliases", "return", "aliases", ".", "get", "(", "cls_name", ",", "cls_name", ".", "split", "(", "'.'", ")", "[", "-", "1", "]", ")" ]
Converts in index model class to a name suitable for use as a field name prefix. A user may optionally specify custom aliases via an 'index_aliases' attribute on the Meta class
[ "Converts", "in", "index", "model", "class", "to", "a", "name", "suitable", "for", "use", "as", "a", "field", "name", "prefix", ".", "A", "user", "may", "optionally", "specify", "custom", "aliases", "via", "an", "index_aliases", "attribute", "on", "the", "Meta", "class" ]
python
train
ssato/python-anyconfig
src/anyconfig/processors.py
https://github.com/ssato/python-anyconfig/blob/f2f4fb8d8e232aadea866c202e1dd7a5967e2877/src/anyconfig/processors.py#L142-L157
def find_by_fileext(fileext, prs): """ :param fileext: File extension :param prs: A list of :class:`anyconfig.models.processor.Processor` classes :return: A list of processor class to processor files with given extension :raises: UnknownFileTypeError """ def pred(pcls): """Predicate""" return fileext in pcls.extensions() pclss = findall_with_pred(pred, prs) if not pclss: raise UnknownFileTypeError("file extension={}".format(fileext)) return pclss
[ "def", "find_by_fileext", "(", "fileext", ",", "prs", ")", ":", "def", "pred", "(", "pcls", ")", ":", "\"\"\"Predicate\"\"\"", "return", "fileext", "in", "pcls", ".", "extensions", "(", ")", "pclss", "=", "findall_with_pred", "(", "pred", ",", "prs", ")", "if", "not", "pclss", ":", "raise", "UnknownFileTypeError", "(", "\"file extension={}\"", ".", "format", "(", "fileext", ")", ")", "return", "pclss" ]
:param fileext: File extension :param prs: A list of :class:`anyconfig.models.processor.Processor` classes :return: A list of processor class to processor files with given extension :raises: UnknownFileTypeError
[ ":", "param", "fileext", ":", "File", "extension", ":", "param", "prs", ":", "A", "list", "of", ":", "class", ":", "anyconfig", ".", "models", ".", "processor", ".", "Processor", "classes", ":", "return", ":", "A", "list", "of", "processor", "class", "to", "processor", "files", "with", "given", "extension", ":", "raises", ":", "UnknownFileTypeError" ]
python
train
MoseleyBioinformaticsLab/ctfile
ctfile/ctfile.py
https://github.com/MoseleyBioinformaticsLab/ctfile/blob/eae864126cd9102207df5d363a3222256a0f1396/ctfile/ctfile.py#L82-L93
def write(self, filehandle, file_format): """Write :class:`~ctfile.ctfile.CTfile` data into file. :param filehandle: File-like object. :param str file_format: Format to use to write data: ``ctfile`` or ``json``. :return: None. :rtype: :py:obj:`None`. """ try: filehandle.write(self.writestr(file_format=file_format)) except IOError: raise IOError('"filehandle" parameter must be writable.')
[ "def", "write", "(", "self", ",", "filehandle", ",", "file_format", ")", ":", "try", ":", "filehandle", ".", "write", "(", "self", ".", "writestr", "(", "file_format", "=", "file_format", ")", ")", "except", "IOError", ":", "raise", "IOError", "(", "'\"filehandle\" parameter must be writable.'", ")" ]
Write :class:`~ctfile.ctfile.CTfile` data into file. :param filehandle: File-like object. :param str file_format: Format to use to write data: ``ctfile`` or ``json``. :return: None. :rtype: :py:obj:`None`.
[ "Write", ":", "class", ":", "~ctfile", ".", "ctfile", ".", "CTfile", "data", "into", "file", "." ]
python
train
rasbt/biopandas
biopandas/mol2/pandas_mol2.py
https://github.com/rasbt/biopandas/blob/615a7cf272692c12bbcfd9d1f217eab440120235/biopandas/mol2/pandas_mol2.py#L67-L86
def _load_mol2(self, mol2_lines, mol2_code, columns): """Load mol2 contents into assert_raise_message instance""" if columns is None: col_names = COLUMN_NAMES col_types = COLUMN_TYPES else: col_names, col_types = [], [] for i in range(len(columns)): col_names.append(columns[i][0]) col_types.append(columns[i][1]) try: self.mol2_text = ''.join(mol2_lines) self.code = mol2_code except TypeError: mol2_lines = [m.decode() for m in mol2_lines] self.mol2_text = ''.join(mol2_lines) self.code = mol2_code.decode() self._df = self._construct_df(mol2_lines, col_names, col_types)
[ "def", "_load_mol2", "(", "self", ",", "mol2_lines", ",", "mol2_code", ",", "columns", ")", ":", "if", "columns", "is", "None", ":", "col_names", "=", "COLUMN_NAMES", "col_types", "=", "COLUMN_TYPES", "else", ":", "col_names", ",", "col_types", "=", "[", "]", ",", "[", "]", "for", "i", "in", "range", "(", "len", "(", "columns", ")", ")", ":", "col_names", ".", "append", "(", "columns", "[", "i", "]", "[", "0", "]", ")", "col_types", ".", "append", "(", "columns", "[", "i", "]", "[", "1", "]", ")", "try", ":", "self", ".", "mol2_text", "=", "''", ".", "join", "(", "mol2_lines", ")", "self", ".", "code", "=", "mol2_code", "except", "TypeError", ":", "mol2_lines", "=", "[", "m", ".", "decode", "(", ")", "for", "m", "in", "mol2_lines", "]", "self", ".", "mol2_text", "=", "''", ".", "join", "(", "mol2_lines", ")", "self", ".", "code", "=", "mol2_code", ".", "decode", "(", ")", "self", ".", "_df", "=", "self", ".", "_construct_df", "(", "mol2_lines", ",", "col_names", ",", "col_types", ")" ]
Load mol2 contents into assert_raise_message instance
[ "Load", "mol2", "contents", "into", "assert_raise_message", "instance" ]
python
train
night-crawler/django-docker-helpers
django_docker_helpers/config/backends/base.py
https://github.com/night-crawler/django-docker-helpers/blob/b64f8009101a8eb61d3841124ba19e3ab881aa2f/django_docker_helpers/config/backends/base.py#L66-L92
def coerce(val: t.Any, coerce_type: t.Optional[t.Type] = None, coercer: t.Optional[t.Callable] = None) -> t.Any: """ Casts a type of ``val`` to ``coerce_type`` with ``coercer``. If ``coerce_type`` is bool and no ``coercer`` specified it uses :func:`~django_docker_helpers.utils.coerce_str_to_bool` by default. :param val: a value of any type :param coerce_type: any type :param coercer: provide a callback that takes ``val`` and returns a value with desired type :return: type casted value """ if not coerce_type and not coercer: return val if coerce_type and type(val) is coerce_type: return val if coerce_type and coerce_type is bool and not coercer: coercer = coerce_str_to_bool if coercer is None: coercer = coerce_type return coercer(val)
[ "def", "coerce", "(", "val", ":", "t", ".", "Any", ",", "coerce_type", ":", "t", ".", "Optional", "[", "t", ".", "Type", "]", "=", "None", ",", "coercer", ":", "t", ".", "Optional", "[", "t", ".", "Callable", "]", "=", "None", ")", "->", "t", ".", "Any", ":", "if", "not", "coerce_type", "and", "not", "coercer", ":", "return", "val", "if", "coerce_type", "and", "type", "(", "val", ")", "is", "coerce_type", ":", "return", "val", "if", "coerce_type", "and", "coerce_type", "is", "bool", "and", "not", "coercer", ":", "coercer", "=", "coerce_str_to_bool", "if", "coercer", "is", "None", ":", "coercer", "=", "coerce_type", "return", "coercer", "(", "val", ")" ]
Casts a type of ``val`` to ``coerce_type`` with ``coercer``. If ``coerce_type`` is bool and no ``coercer`` specified it uses :func:`~django_docker_helpers.utils.coerce_str_to_bool` by default. :param val: a value of any type :param coerce_type: any type :param coercer: provide a callback that takes ``val`` and returns a value with desired type :return: type casted value
[ "Casts", "a", "type", "of", "val", "to", "coerce_type", "with", "coercer", "." ]
python
train
neighbordog/deviantart
deviantart/api.py
https://github.com/neighbordog/deviantart/blob/5612f1d5e2139a48c9d793d7fd19cde7e162d7b1/deviantart/api.py#L1590-L1620
def send_note(self, to, subject="", body="", noetid=""): """Send a note :param to: The username(s) that this note is to :param subject: The subject of the note :param body: The body of the note :param noetid: The UUID of the note that is being responded to """ if self.standard_grant_type is not "authorization_code": raise DeviantartError("Authentication through Authorization Code (Grant Type) is required in order to connect to this endpoint.") response = self._req('/notes/send', post_data={ 'to[]' : to, 'subject' : subject, 'body' : body, 'noetid' : noetid }) sent_notes = [] for item in response['results']: n = {} n['success'] = item['success'] n['user'] = User() n['user'].from_dict(item['user']) sent_notes.append(n) return sent_notes
[ "def", "send_note", "(", "self", ",", "to", ",", "subject", "=", "\"\"", ",", "body", "=", "\"\"", ",", "noetid", "=", "\"\"", ")", ":", "if", "self", ".", "standard_grant_type", "is", "not", "\"authorization_code\"", ":", "raise", "DeviantartError", "(", "\"Authentication through Authorization Code (Grant Type) is required in order to connect to this endpoint.\"", ")", "response", "=", "self", ".", "_req", "(", "'/notes/send'", ",", "post_data", "=", "{", "'to[]'", ":", "to", ",", "'subject'", ":", "subject", ",", "'body'", ":", "body", ",", "'noetid'", ":", "noetid", "}", ")", "sent_notes", "=", "[", "]", "for", "item", "in", "response", "[", "'results'", "]", ":", "n", "=", "{", "}", "n", "[", "'success'", "]", "=", "item", "[", "'success'", "]", "n", "[", "'user'", "]", "=", "User", "(", ")", "n", "[", "'user'", "]", ".", "from_dict", "(", "item", "[", "'user'", "]", ")", "sent_notes", ".", "append", "(", "n", ")", "return", "sent_notes" ]
Send a note :param to: The username(s) that this note is to :param subject: The subject of the note :param body: The body of the note :param noetid: The UUID of the note that is being responded to
[ "Send", "a", "note" ]
python
train
lcharleux/argiope
argiope/utils.py
https://github.com/lcharleux/argiope/blob/8170e431362dc760589f7d141090fd133dece259/argiope/utils.py#L52-L66
def list_to_string(l = range(200), width = 40, indent = " "): """ Converts a list-like to string with given line width. """ l = [str(v) + "," for v in l] counter = 0 out = "" + indent for w in l: s = len(w) if counter + s > width: out += "\n" + indent counter = 0 out += w counter += s return out.strip(",")
[ "def", "list_to_string", "(", "l", "=", "range", "(", "200", ")", ",", "width", "=", "40", ",", "indent", "=", "\" \"", ")", ":", "l", "=", "[", "str", "(", "v", ")", "+", "\",\"", "for", "v", "in", "l", "]", "counter", "=", "0", "out", "=", "\"\"", "+", "indent", "for", "w", "in", "l", ":", "s", "=", "len", "(", "w", ")", "if", "counter", "+", "s", ">", "width", ":", "out", "+=", "\"\\n\"", "+", "indent", "counter", "=", "0", "out", "+=", "w", "counter", "+=", "s", "return", "out", ".", "strip", "(", "\",\"", ")" ]
Converts a list-like to string with given line width.
[ "Converts", "a", "list", "-", "like", "to", "string", "with", "given", "line", "width", "." ]
python
test
FNNDSC/med2image
med2image/systemMisc.py
https://github.com/FNNDSC/med2image/blob/638d5d230de47608af20f9764acf8e382c2bf2ff/med2image/systemMisc.py#L1199-L1214
def str2lst(astr_input, astr_separator=" "): """ Breaks a string at <astr_separator> and joins into a list. Steps along all list elements and strips white space. The list elements are explicitly ascii encoded. """ alistI = astr_input.split(astr_separator) alistJ = [] for i in range(0, len(alistI)): alistI[i] = alistI[i].strip() alistI[i] = alistI[i].encode('ascii') if len(alistI[i]): alistJ.append(alistI[i]) return alistJ
[ "def", "str2lst", "(", "astr_input", ",", "astr_separator", "=", "\" \"", ")", ":", "alistI", "=", "astr_input", ".", "split", "(", "astr_separator", ")", "alistJ", "=", "[", "]", "for", "i", "in", "range", "(", "0", ",", "len", "(", "alistI", ")", ")", ":", "alistI", "[", "i", "]", "=", "alistI", "[", "i", "]", ".", "strip", "(", ")", "alistI", "[", "i", "]", "=", "alistI", "[", "i", "]", ".", "encode", "(", "'ascii'", ")", "if", "len", "(", "alistI", "[", "i", "]", ")", ":", "alistJ", ".", "append", "(", "alistI", "[", "i", "]", ")", "return", "alistJ" ]
Breaks a string at <astr_separator> and joins into a list. Steps along all list elements and strips white space. The list elements are explicitly ascii encoded.
[ "Breaks", "a", "string", "at", "<astr_separator", ">", "and", "joins", "into", "a", "list", ".", "Steps", "along", "all", "list", "elements", "and", "strips", "white", "space", "." ]
python
train
secdev/scapy
scapy/automaton.py
https://github.com/secdev/scapy/blob/3ffe757c184017dd46464593a8f80f85abc1e79a/scapy/automaton.py#L96-L102
def wait_return(self, callback): """Entry point of SelectableObject: register the callback""" if self.check_recv(): return callback(self) _t = threading.Thread(target=self._wait_non_ressources, args=(callback,)) # noqa: E501 _t.setDaemon(True) _t.start()
[ "def", "wait_return", "(", "self", ",", "callback", ")", ":", "if", "self", ".", "check_recv", "(", ")", ":", "return", "callback", "(", "self", ")", "_t", "=", "threading", ".", "Thread", "(", "target", "=", "self", ".", "_wait_non_ressources", ",", "args", "=", "(", "callback", ",", ")", ")", "# noqa: E501", "_t", ".", "setDaemon", "(", "True", ")", "_t", ".", "start", "(", ")" ]
Entry point of SelectableObject: register the callback
[ "Entry", "point", "of", "SelectableObject", ":", "register", "the", "callback" ]
python
train
chrisspen/weka
weka/arff.py
https://github.com/chrisspen/weka/blob/c86fc4b8eef1afd56f89ec28283bdf9e2fdc453b/weka/arff.py#L404-L422
def open_stream(self, class_attr_name=None, fn=None): """ Save an arff structure to a file, leaving the file object open for writing of new data samples. This prevents you from directly accessing the data via Python, but when generating a huge file, this prevents all your data from being stored in memory. """ if fn: self.fout_fn = fn else: fd, self.fout_fn = tempfile.mkstemp() os.close(fd) self.fout = open(self.fout_fn, 'w') if class_attr_name: self.class_attr_name = class_attr_name self.write(fout=self.fout, schema_only=True) self.write(fout=self.fout, data_only=True) self.fout.flush()
[ "def", "open_stream", "(", "self", ",", "class_attr_name", "=", "None", ",", "fn", "=", "None", ")", ":", "if", "fn", ":", "self", ".", "fout_fn", "=", "fn", "else", ":", "fd", ",", "self", ".", "fout_fn", "=", "tempfile", ".", "mkstemp", "(", ")", "os", ".", "close", "(", "fd", ")", "self", ".", "fout", "=", "open", "(", "self", ".", "fout_fn", ",", "'w'", ")", "if", "class_attr_name", ":", "self", ".", "class_attr_name", "=", "class_attr_name", "self", ".", "write", "(", "fout", "=", "self", ".", "fout", ",", "schema_only", "=", "True", ")", "self", ".", "write", "(", "fout", "=", "self", ".", "fout", ",", "data_only", "=", "True", ")", "self", ".", "fout", ".", "flush", "(", ")" ]
Save an arff structure to a file, leaving the file object open for writing of new data samples. This prevents you from directly accessing the data via Python, but when generating a huge file, this prevents all your data from being stored in memory.
[ "Save", "an", "arff", "structure", "to", "a", "file", "leaving", "the", "file", "object", "open", "for", "writing", "of", "new", "data", "samples", ".", "This", "prevents", "you", "from", "directly", "accessing", "the", "data", "via", "Python", "but", "when", "generating", "a", "huge", "file", "this", "prevents", "all", "your", "data", "from", "being", "stored", "in", "memory", "." ]
python
train
wohlgejm/accountable
accountable/cli.py
https://github.com/wohlgejm/accountable/blob/20586365ccd319061e5548ce14fb0b8f449580fa/accountable/cli.py#L181-L188
def checkout(accountable, issue_key): """ Checkout a new branch or checkout to a branch for a given issue. """ issue = accountable.checkout(issue_key) headers = issue.keys() rows = [headers, [v for k, v in issue.items()]] print_table(SingleTable(rows))
[ "def", "checkout", "(", "accountable", ",", "issue_key", ")", ":", "issue", "=", "accountable", ".", "checkout", "(", "issue_key", ")", "headers", "=", "issue", ".", "keys", "(", ")", "rows", "=", "[", "headers", ",", "[", "v", "for", "k", ",", "v", "in", "issue", ".", "items", "(", ")", "]", "]", "print_table", "(", "SingleTable", "(", "rows", ")", ")" ]
Checkout a new branch or checkout to a branch for a given issue.
[ "Checkout", "a", "new", "branch", "or", "checkout", "to", "a", "branch", "for", "a", "given", "issue", "." ]
python
train
nefarioustim/parker
parker/redisset.py
https://github.com/nefarioustim/parker/blob/ccc1de1ac6bfb5e0a8cfa4fdebb2f38f2ee027d6/parker/redisset.py#L51-L61
def add(self, value): """Add value to set.""" added = self.redis.sadd( self.key, value ) if self.redis.scard(self.key) < 2: self.redis.expire(self.key, self.expire) return added
[ "def", "add", "(", "self", ",", "value", ")", ":", "added", "=", "self", ".", "redis", ".", "sadd", "(", "self", ".", "key", ",", "value", ")", "if", "self", ".", "redis", ".", "scard", "(", "self", ".", "key", ")", "<", "2", ":", "self", ".", "redis", ".", "expire", "(", "self", ".", "key", ",", "self", ".", "expire", ")", "return", "added" ]
Add value to set.
[ "Add", "value", "to", "set", "." ]
python
train
ageitgey/face_recognition
examples/face_recognition_knn.py
https://github.com/ageitgey/face_recognition/blob/c96b010c02f15e8eeb0f71308c641179ac1f19bb/examples/face_recognition_knn.py#L111-L150
def predict(X_img_path, knn_clf=None, model_path=None, distance_threshold=0.6): """ Recognizes faces in given image using a trained KNN classifier :param X_img_path: path to image to be recognized :param knn_clf: (optional) a knn classifier object. if not specified, model_save_path must be specified. :param model_path: (optional) path to a pickled knn classifier. if not specified, model_save_path must be knn_clf. :param distance_threshold: (optional) distance threshold for face classification. the larger it is, the more chance of mis-classifying an unknown person as a known one. :return: a list of names and face locations for the recognized faces in the image: [(name, bounding box), ...]. For faces of unrecognized persons, the name 'unknown' will be returned. """ if not os.path.isfile(X_img_path) or os.path.splitext(X_img_path)[1][1:] not in ALLOWED_EXTENSIONS: raise Exception("Invalid image path: {}".format(X_img_path)) if knn_clf is None and model_path is None: raise Exception("Must supply knn classifier either thourgh knn_clf or model_path") # Load a trained KNN model (if one was passed in) if knn_clf is None: with open(model_path, 'rb') as f: knn_clf = pickle.load(f) # Load image file and find face locations X_img = face_recognition.load_image_file(X_img_path) X_face_locations = face_recognition.face_locations(X_img) # If no faces are found in the image, return an empty result. if len(X_face_locations) == 0: return [] # Find encodings for faces in the test iamge faces_encodings = face_recognition.face_encodings(X_img, known_face_locations=X_face_locations) # Use the KNN model to find the best matches for the test face closest_distances = knn_clf.kneighbors(faces_encodings, n_neighbors=1) are_matches = [closest_distances[0][i][0] <= distance_threshold for i in range(len(X_face_locations))] # Predict classes and remove classifications that aren't within the threshold return [(pred, loc) if rec else ("unknown", loc) for pred, loc, rec in zip(knn_clf.predict(faces_encodings), X_face_locations, are_matches)]
[ "def", "predict", "(", "X_img_path", ",", "knn_clf", "=", "None", ",", "model_path", "=", "None", ",", "distance_threshold", "=", "0.6", ")", ":", "if", "not", "os", ".", "path", ".", "isfile", "(", "X_img_path", ")", "or", "os", ".", "path", ".", "splitext", "(", "X_img_path", ")", "[", "1", "]", "[", "1", ":", "]", "not", "in", "ALLOWED_EXTENSIONS", ":", "raise", "Exception", "(", "\"Invalid image path: {}\"", ".", "format", "(", "X_img_path", ")", ")", "if", "knn_clf", "is", "None", "and", "model_path", "is", "None", ":", "raise", "Exception", "(", "\"Must supply knn classifier either thourgh knn_clf or model_path\"", ")", "# Load a trained KNN model (if one was passed in)", "if", "knn_clf", "is", "None", ":", "with", "open", "(", "model_path", ",", "'rb'", ")", "as", "f", ":", "knn_clf", "=", "pickle", ".", "load", "(", "f", ")", "# Load image file and find face locations", "X_img", "=", "face_recognition", ".", "load_image_file", "(", "X_img_path", ")", "X_face_locations", "=", "face_recognition", ".", "face_locations", "(", "X_img", ")", "# If no faces are found in the image, return an empty result.", "if", "len", "(", "X_face_locations", ")", "==", "0", ":", "return", "[", "]", "# Find encodings for faces in the test iamge", "faces_encodings", "=", "face_recognition", ".", "face_encodings", "(", "X_img", ",", "known_face_locations", "=", "X_face_locations", ")", "# Use the KNN model to find the best matches for the test face", "closest_distances", "=", "knn_clf", ".", "kneighbors", "(", "faces_encodings", ",", "n_neighbors", "=", "1", ")", "are_matches", "=", "[", "closest_distances", "[", "0", "]", "[", "i", "]", "[", "0", "]", "<=", "distance_threshold", "for", "i", "in", "range", "(", "len", "(", "X_face_locations", ")", ")", "]", "# Predict classes and remove classifications that aren't within the threshold", "return", "[", "(", "pred", ",", "loc", ")", "if", "rec", "else", "(", "\"unknown\"", ",", "loc", ")", "for", "pred", ",", "loc", ",", "rec", "in", "zip", "(", "knn_clf", ".", "predict", "(", "faces_encodings", ")", ",", "X_face_locations", ",", "are_matches", ")", "]" ]
Recognizes faces in given image using a trained KNN classifier :param X_img_path: path to image to be recognized :param knn_clf: (optional) a knn classifier object. if not specified, model_save_path must be specified. :param model_path: (optional) path to a pickled knn classifier. if not specified, model_save_path must be knn_clf. :param distance_threshold: (optional) distance threshold for face classification. the larger it is, the more chance of mis-classifying an unknown person as a known one. :return: a list of names and face locations for the recognized faces in the image: [(name, bounding box), ...]. For faces of unrecognized persons, the name 'unknown' will be returned.
[ "Recognizes", "faces", "in", "given", "image", "using", "a", "trained", "KNN", "classifier" ]
python
train
tanghaibao/jcvi
jcvi/utils/orderedcollections.py
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/utils/orderedcollections.py#L184-L197
def parse_qs(qs, keep_blank_values=0, strict_parsing=0, keep_attr_order=True): """ Kind of like urlparse.parse_qs, except returns an ordered dict. Also avoids replicating that function's bad habit of overriding the built-in 'dict' type. Taken from below with modification: <https://bitbucket.org/btubbs/thumpy/raw/8cdece404f15/thumpy.py> """ od = DefaultOrderedDict(list) if keep_attr_order else defaultdict(list) for name, value in parse_qsl(qs, keep_blank_values, strict_parsing): od[name].append(value) return od
[ "def", "parse_qs", "(", "qs", ",", "keep_blank_values", "=", "0", ",", "strict_parsing", "=", "0", ",", "keep_attr_order", "=", "True", ")", ":", "od", "=", "DefaultOrderedDict", "(", "list", ")", "if", "keep_attr_order", "else", "defaultdict", "(", "list", ")", "for", "name", ",", "value", "in", "parse_qsl", "(", "qs", ",", "keep_blank_values", ",", "strict_parsing", ")", ":", "od", "[", "name", "]", ".", "append", "(", "value", ")", "return", "od" ]
Kind of like urlparse.parse_qs, except returns an ordered dict. Also avoids replicating that function's bad habit of overriding the built-in 'dict' type. Taken from below with modification: <https://bitbucket.org/btubbs/thumpy/raw/8cdece404f15/thumpy.py>
[ "Kind", "of", "like", "urlparse", ".", "parse_qs", "except", "returns", "an", "ordered", "dict", ".", "Also", "avoids", "replicating", "that", "function", "s", "bad", "habit", "of", "overriding", "the", "built", "-", "in", "dict", "type", "." ]
python
train
openclimatedata/pyhector
pyhector/__init__.py
https://github.com/openclimatedata/pyhector/blob/1649efcc0ed19dc66d8de8dec7a5105fa08df01a/pyhector/__init__.py#L47-L78
def set_value(self, section, variable, value): """ Set config input value directly, see :mod:`pyhector.emissions` for possible values. Parameters ---------- section : str Component in Hector config. variable : str Name of emissions variable. value : pandas.Series, list, tuple, float, or str Pandas Series, list of tuple values with time, list of tuple values with time and unit, single tuple, float or string as in ini config file. """ if isinstance(value, pd.Series): # values with time as Series values = list(zip(value.index, value)) for v in values: self._set_timed_double(section, variable, v[0], v[1]) elif isinstance(value, list): # values with time for v in value: if len(v) == 3: # timed value with unit self._set_timed_double_unit(section, variable, v[0], v[1], v[2]) else: # timed value without unit self._set_timed_double(section, variable, v[0], v[1]) elif isinstance(value, tuple): # value with unit self._set_double_unit(section, variable, value[0], value[1]) elif isinstance(value, str): # value is string self._set_string(section, variable, value) else: # value is only double self._set_double(section, variable, value)
[ "def", "set_value", "(", "self", ",", "section", ",", "variable", ",", "value", ")", ":", "if", "isinstance", "(", "value", ",", "pd", ".", "Series", ")", ":", "# values with time as Series", "values", "=", "list", "(", "zip", "(", "value", ".", "index", ",", "value", ")", ")", "for", "v", "in", "values", ":", "self", ".", "_set_timed_double", "(", "section", ",", "variable", ",", "v", "[", "0", "]", ",", "v", "[", "1", "]", ")", "elif", "isinstance", "(", "value", ",", "list", ")", ":", "# values with time", "for", "v", "in", "value", ":", "if", "len", "(", "v", ")", "==", "3", ":", "# timed value with unit", "self", ".", "_set_timed_double_unit", "(", "section", ",", "variable", ",", "v", "[", "0", "]", ",", "v", "[", "1", "]", ",", "v", "[", "2", "]", ")", "else", ":", "# timed value without unit", "self", ".", "_set_timed_double", "(", "section", ",", "variable", ",", "v", "[", "0", "]", ",", "v", "[", "1", "]", ")", "elif", "isinstance", "(", "value", ",", "tuple", ")", ":", "# value with unit", "self", ".", "_set_double_unit", "(", "section", ",", "variable", ",", "value", "[", "0", "]", ",", "value", "[", "1", "]", ")", "elif", "isinstance", "(", "value", ",", "str", ")", ":", "# value is string", "self", ".", "_set_string", "(", "section", ",", "variable", ",", "value", ")", "else", ":", "# value is only double", "self", ".", "_set_double", "(", "section", ",", "variable", ",", "value", ")" ]
Set config input value directly, see :mod:`pyhector.emissions` for possible values. Parameters ---------- section : str Component in Hector config. variable : str Name of emissions variable. value : pandas.Series, list, tuple, float, or str Pandas Series, list of tuple values with time, list of tuple values with time and unit, single tuple, float or string as in ini config file.
[ "Set", "config", "input", "value", "directly", "see", ":", "mod", ":", "pyhector", ".", "emissions", "for", "possible", "values", "." ]
python
train
pyrogram/pyrogram
pyrogram/client/methods/messages/send_message.py
https://github.com/pyrogram/pyrogram/blob/e7258a341ba905cfa86264c22040654db732ec1c/pyrogram/client/methods/messages/send_message.py#L27-L123
def send_message( self, chat_id: Union[int, str], text: str, parse_mode: str = "", disable_web_page_preview: bool = None, disable_notification: bool = None, reply_to_message_id: int = None, reply_markup: Union[ "pyrogram.InlineKeyboardMarkup", "pyrogram.ReplyKeyboardMarkup", "pyrogram.ReplyKeyboardRemove", "pyrogram.ForceReply" ] = None ) -> "pyrogram.Message": """Use this method to send text messages. Args: chat_id (``int`` | ``str``): Unique identifier (int) or username (str) of the target chat. For your personal cloud (Saved Messages) you can simply use "me" or "self". For a contact that exists in your Telegram address book you can use his phone number (str). text (``str``): Text of the message to be sent. parse_mode (``str``, *optional*): Use :obj:`MARKDOWN <pyrogram.ParseMode.MARKDOWN>` or :obj:`HTML <pyrogram.ParseMode.HTML>` if you want Telegram apps to show bold, italic, fixed-width text or inline URLs in your message. Defaults to Markdown. disable_web_page_preview (``bool``, *optional*): Disables link previews for links in this message. disable_notification (``bool``, *optional*): Sends the message silently. Users will receive a notification with no sound. reply_to_message_id (``int``, *optional*): If the message is a reply, ID of the original message. reply_markup (:obj:`InlineKeyboardMarkup` | :obj:`ReplyKeyboardMarkup` | :obj:`ReplyKeyboardRemove` | :obj:`ForceReply`, *optional*): Additional interface options. An object for an inline keyboard, custom reply keyboard, instructions to remove reply keyboard or to force a reply from the user. Returns: On success, the sent :obj:`Message` is returned. Raises: :class:`RPCError <pyrogram.RPCError>` in case of a Telegram RPC error. """ style = self.html if parse_mode.lower() == "html" else self.markdown message, entities = style.parse(text).values() r = self.send( functions.messages.SendMessage( peer=self.resolve_peer(chat_id), no_webpage=disable_web_page_preview or None, silent=disable_notification or None, reply_to_msg_id=reply_to_message_id, random_id=self.rnd_id(), reply_markup=reply_markup.write() if reply_markup else None, message=message, entities=entities ) ) if isinstance(r, types.UpdateShortSentMessage): peer = self.resolve_peer(chat_id) peer_id = ( peer.user_id if isinstance(peer, types.InputPeerUser) else -peer.chat_id ) return pyrogram.Message( message_id=r.id, chat=pyrogram.Chat( id=peer_id, type="private", client=self ), text=message, date=r.date, outgoing=r.out, entities=entities, client=self ) for i in r.updates: if isinstance(i, (types.UpdateNewMessage, types.UpdateNewChannelMessage)): return pyrogram.Message._parse( self, i.message, {i.id: i for i in r.users}, {i.id: i for i in r.chats} )
[ "def", "send_message", "(", "self", ",", "chat_id", ":", "Union", "[", "int", ",", "str", "]", ",", "text", ":", "str", ",", "parse_mode", ":", "str", "=", "\"\"", ",", "disable_web_page_preview", ":", "bool", "=", "None", ",", "disable_notification", ":", "bool", "=", "None", ",", "reply_to_message_id", ":", "int", "=", "None", ",", "reply_markup", ":", "Union", "[", "\"pyrogram.InlineKeyboardMarkup\"", ",", "\"pyrogram.ReplyKeyboardMarkup\"", ",", "\"pyrogram.ReplyKeyboardRemove\"", ",", "\"pyrogram.ForceReply\"", "]", "=", "None", ")", "->", "\"pyrogram.Message\"", ":", "style", "=", "self", ".", "html", "if", "parse_mode", ".", "lower", "(", ")", "==", "\"html\"", "else", "self", ".", "markdown", "message", ",", "entities", "=", "style", ".", "parse", "(", "text", ")", ".", "values", "(", ")", "r", "=", "self", ".", "send", "(", "functions", ".", "messages", ".", "SendMessage", "(", "peer", "=", "self", ".", "resolve_peer", "(", "chat_id", ")", ",", "no_webpage", "=", "disable_web_page_preview", "or", "None", ",", "silent", "=", "disable_notification", "or", "None", ",", "reply_to_msg_id", "=", "reply_to_message_id", ",", "random_id", "=", "self", ".", "rnd_id", "(", ")", ",", "reply_markup", "=", "reply_markup", ".", "write", "(", ")", "if", "reply_markup", "else", "None", ",", "message", "=", "message", ",", "entities", "=", "entities", ")", ")", "if", "isinstance", "(", "r", ",", "types", ".", "UpdateShortSentMessage", ")", ":", "peer", "=", "self", ".", "resolve_peer", "(", "chat_id", ")", "peer_id", "=", "(", "peer", ".", "user_id", "if", "isinstance", "(", "peer", ",", "types", ".", "InputPeerUser", ")", "else", "-", "peer", ".", "chat_id", ")", "return", "pyrogram", ".", "Message", "(", "message_id", "=", "r", ".", "id", ",", "chat", "=", "pyrogram", ".", "Chat", "(", "id", "=", "peer_id", ",", "type", "=", "\"private\"", ",", "client", "=", "self", ")", ",", "text", "=", "message", ",", "date", "=", "r", ".", "date", ",", "outgoing", "=", "r", ".", "out", ",", "entities", "=", "entities", ",", "client", "=", "self", ")", "for", "i", "in", "r", ".", "updates", ":", "if", "isinstance", "(", "i", ",", "(", "types", ".", "UpdateNewMessage", ",", "types", ".", "UpdateNewChannelMessage", ")", ")", ":", "return", "pyrogram", ".", "Message", ".", "_parse", "(", "self", ",", "i", ".", "message", ",", "{", "i", ".", "id", ":", "i", "for", "i", "in", "r", ".", "users", "}", ",", "{", "i", ".", "id", ":", "i", "for", "i", "in", "r", ".", "chats", "}", ")" ]
Use this method to send text messages. Args: chat_id (``int`` | ``str``): Unique identifier (int) or username (str) of the target chat. For your personal cloud (Saved Messages) you can simply use "me" or "self". For a contact that exists in your Telegram address book you can use his phone number (str). text (``str``): Text of the message to be sent. parse_mode (``str``, *optional*): Use :obj:`MARKDOWN <pyrogram.ParseMode.MARKDOWN>` or :obj:`HTML <pyrogram.ParseMode.HTML>` if you want Telegram apps to show bold, italic, fixed-width text or inline URLs in your message. Defaults to Markdown. disable_web_page_preview (``bool``, *optional*): Disables link previews for links in this message. disable_notification (``bool``, *optional*): Sends the message silently. Users will receive a notification with no sound. reply_to_message_id (``int``, *optional*): If the message is a reply, ID of the original message. reply_markup (:obj:`InlineKeyboardMarkup` | :obj:`ReplyKeyboardMarkup` | :obj:`ReplyKeyboardRemove` | :obj:`ForceReply`, *optional*): Additional interface options. An object for an inline keyboard, custom reply keyboard, instructions to remove reply keyboard or to force a reply from the user. Returns: On success, the sent :obj:`Message` is returned. Raises: :class:`RPCError <pyrogram.RPCError>` in case of a Telegram RPC error.
[ "Use", "this", "method", "to", "send", "text", "messages", "." ]
python
train
Fizzadar/pyinfra
pyinfra/modules/apt.py
https://github.com/Fizzadar/pyinfra/blob/006f751f7db2e07d32522c0285160783de2feb79/pyinfra/modules/apt.py#L230-L279
def packages( state, host, packages=None, present=True, latest=False, update=False, cache_time=None, upgrade=False, force=False, no_recommends=False, allow_downgrades=False, ): ''' Install/remove/update packages & update apt. + packages: list of packages to ensure + present: whether the packages should be installed + latest: whether to upgrade packages without a specified version + update: run apt update + cache_time: when used with update, cache for this many seconds + upgrade: run apt upgrade + force: whether to force package installs by passing `--force-yes` to apt + no_recommends: don't install recommended packages + allow_downgrades: allow downgrading packages with version (--allow-downgrades) Versions: Package versions can be pinned like apt: ``<pkg>=<version>`` Cache time: When ``cache_time`` is set the ``/var/lib/apt/periodic/update-success-stamp`` file is touched upon successful update. Some distros already do this (Ubuntu), but others simply leave the periodic directory empty (Debian). ''' if update: yield _update(state, host, cache_time=cache_time) if upgrade: yield _upgrade(state, host) install_command = 'install' if no_recommends is True: install_command += ' --no-install-recommends' if allow_downgrades: install_command += ' --allow-downgrades' # Compare/ensure packages are present/not yield ensure_packages( packages, host.fact.deb_packages, present, install_command=noninteractive_apt(install_command, force=force), uninstall_command=noninteractive_apt('remove', force=force), upgrade_command=noninteractive_apt(install_command, force=force), version_join='=', latest=latest, )
[ "def", "packages", "(", "state", ",", "host", ",", "packages", "=", "None", ",", "present", "=", "True", ",", "latest", "=", "False", ",", "update", "=", "False", ",", "cache_time", "=", "None", ",", "upgrade", "=", "False", ",", "force", "=", "False", ",", "no_recommends", "=", "False", ",", "allow_downgrades", "=", "False", ",", ")", ":", "if", "update", ":", "yield", "_update", "(", "state", ",", "host", ",", "cache_time", "=", "cache_time", ")", "if", "upgrade", ":", "yield", "_upgrade", "(", "state", ",", "host", ")", "install_command", "=", "'install'", "if", "no_recommends", "is", "True", ":", "install_command", "+=", "' --no-install-recommends'", "if", "allow_downgrades", ":", "install_command", "+=", "' --allow-downgrades'", "# Compare/ensure packages are present/not", "yield", "ensure_packages", "(", "packages", ",", "host", ".", "fact", ".", "deb_packages", ",", "present", ",", "install_command", "=", "noninteractive_apt", "(", "install_command", ",", "force", "=", "force", ")", ",", "uninstall_command", "=", "noninteractive_apt", "(", "'remove'", ",", "force", "=", "force", ")", ",", "upgrade_command", "=", "noninteractive_apt", "(", "install_command", ",", "force", "=", "force", ")", ",", "version_join", "=", "'='", ",", "latest", "=", "latest", ",", ")" ]
Install/remove/update packages & update apt. + packages: list of packages to ensure + present: whether the packages should be installed + latest: whether to upgrade packages without a specified version + update: run apt update + cache_time: when used with update, cache for this many seconds + upgrade: run apt upgrade + force: whether to force package installs by passing `--force-yes` to apt + no_recommends: don't install recommended packages + allow_downgrades: allow downgrading packages with version (--allow-downgrades) Versions: Package versions can be pinned like apt: ``<pkg>=<version>`` Cache time: When ``cache_time`` is set the ``/var/lib/apt/periodic/update-success-stamp`` file is touched upon successful update. Some distros already do this (Ubuntu), but others simply leave the periodic directory empty (Debian).
[ "Install", "/", "remove", "/", "update", "packages", "&", "update", "apt", "." ]
python
train