repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
Jammy2211/PyAutoLens
autolens/data/ccd.py
https://github.com/Jammy2211/PyAutoLens/blob/91e50369c7a9c048c83d217625578b72423cd5a7/autolens/data/ccd.py#L656-L677
def convolve(self, array): """ Convolve an array with this PSF Parameters ---------- image : ndarray An array representing the image the PSF is convolved with. Returns ------- convolved_image : ndarray An array representing the image after convolution. Raises ------ KernelException if either PSF psf dimension is odd """ if self.shape[0] % 2 == 0 or self.shape[1] % 2 == 0: raise exc.KernelException("PSF Kernel must be odd") return scipy.signal.convolve2d(array, self, mode='same')
[ "def", "convolve", "(", "self", ",", "array", ")", ":", "if", "self", ".", "shape", "[", "0", "]", "%", "2", "==", "0", "or", "self", ".", "shape", "[", "1", "]", "%", "2", "==", "0", ":", "raise", "exc", ".", "KernelException", "(", "\"PSF Kernel must be odd\"", ")", "return", "scipy", ".", "signal", ".", "convolve2d", "(", "array", ",", "self", ",", "mode", "=", "'same'", ")" ]
Convolve an array with this PSF Parameters ---------- image : ndarray An array representing the image the PSF is convolved with. Returns ------- convolved_image : ndarray An array representing the image after convolution. Raises ------ KernelException if either PSF psf dimension is odd
[ "Convolve", "an", "array", "with", "this", "PSF" ]
python
valid
dswah/pyGAM
pygam/utils.py
https://github.com/dswah/pyGAM/blob/b3e5c3cd580f0a3ad69f9372861624f67760c325/pygam/utils.py#L402-L417
def get_link_domain(link, dist): """ tool to identify the domain of a given monotonic link function Parameters ---------- link : Link object dist : Distribution object Returns ------- domain : list of length 2, representing the interval of the domain. """ domain = np.array([-np.inf, -1, 0, 1, np.inf]) domain = domain[~np.isnan(link.link(domain, dist))] return [domain[0], domain[-1]]
[ "def", "get_link_domain", "(", "link", ",", "dist", ")", ":", "domain", "=", "np", ".", "array", "(", "[", "-", "np", ".", "inf", ",", "-", "1", ",", "0", ",", "1", ",", "np", ".", "inf", "]", ")", "domain", "=", "domain", "[", "~", "np", ".", "isnan", "(", "link", ".", "link", "(", "domain", ",", "dist", ")", ")", "]", "return", "[", "domain", "[", "0", "]", ",", "domain", "[", "-", "1", "]", "]" ]
tool to identify the domain of a given monotonic link function Parameters ---------- link : Link object dist : Distribution object Returns ------- domain : list of length 2, representing the interval of the domain.
[ "tool", "to", "identify", "the", "domain", "of", "a", "given", "monotonic", "link", "function" ]
python
train
sci-bots/pygtkhelpers
pygtkhelpers/schema.py
https://github.com/sci-bots/pygtkhelpers/blob/3a6e6d6340221c686229cd1c951d7537dae81b07/pygtkhelpers/schema.py#L57-L87
def flatten_dict(root, parents=None, sep='.'): ''' Args: root (dict) : Nested dictionary (e.g., JSON object). parents (list) : List of ancestor keys. Returns ------- list List of ``(key, value)`` tuples, where ``key`` corresponds to the ancestor keys of the respective value joined by ``'.'``. For example, for the item in the dictionary ``{'a': {'b': {'c': 'foo'}}}``, the joined key would be ``'a.b.c'``. See also :func:`expand_items`. ''' if parents is None: parents = [] result = [] for i, (k, v) in enumerate(root.iteritems()): parents_i = parents + [k] key_i = sep.join(parents_i) if isinstance(v, dict): value_i = flatten_dict(v, parents=parents_i, sep=sep) result.extend(value_i) else: value_i = v result.append((key_i, value_i)) return result
[ "def", "flatten_dict", "(", "root", ",", "parents", "=", "None", ",", "sep", "=", "'.'", ")", ":", "if", "parents", "is", "None", ":", "parents", "=", "[", "]", "result", "=", "[", "]", "for", "i", ",", "(", "k", ",", "v", ")", "in", "enumerate", "(", "root", ".", "iteritems", "(", ")", ")", ":", "parents_i", "=", "parents", "+", "[", "k", "]", "key_i", "=", "sep", ".", "join", "(", "parents_i", ")", "if", "isinstance", "(", "v", ",", "dict", ")", ":", "value_i", "=", "flatten_dict", "(", "v", ",", "parents", "=", "parents_i", ",", "sep", "=", "sep", ")", "result", ".", "extend", "(", "value_i", ")", "else", ":", "value_i", "=", "v", "result", ".", "append", "(", "(", "key_i", ",", "value_i", ")", ")", "return", "result" ]
Args: root (dict) : Nested dictionary (e.g., JSON object). parents (list) : List of ancestor keys. Returns ------- list List of ``(key, value)`` tuples, where ``key`` corresponds to the ancestor keys of the respective value joined by ``'.'``. For example, for the item in the dictionary ``{'a': {'b': {'c': 'foo'}}}``, the joined key would be ``'a.b.c'``. See also :func:`expand_items`.
[ "Args", ":" ]
python
train
Cognexa/cxflow
cxflow/hooks/show_progress.py
https://github.com/Cognexa/cxflow/blob/dd609e6b0bd854424a8f86781dd77801a13038f9/cxflow/hooks/show_progress.py#L134-L144
def after_epoch(self, **_) -> None: """ Reset progress counters. Save ``total_batch_count`` after the 1st epoch. """ if not self._total_batch_count_saved: self._total_batch_count = self._current_batch_count.copy() self._total_batch_count_saved = True self._current_batch_count.clear() self._current_stream_start = None self._current_stream_name = None erase_line()
[ "def", "after_epoch", "(", "self", ",", "*", "*", "_", ")", "->", "None", ":", "if", "not", "self", ".", "_total_batch_count_saved", ":", "self", ".", "_total_batch_count", "=", "self", ".", "_current_batch_count", ".", "copy", "(", ")", "self", ".", "_total_batch_count_saved", "=", "True", "self", ".", "_current_batch_count", ".", "clear", "(", ")", "self", ".", "_current_stream_start", "=", "None", "self", ".", "_current_stream_name", "=", "None", "erase_line", "(", ")" ]
Reset progress counters. Save ``total_batch_count`` after the 1st epoch.
[ "Reset", "progress", "counters", ".", "Save", "total_batch_count", "after", "the", "1st", "epoch", "." ]
python
train
GoogleCloudPlatform/appengine-mapreduce
python/src/mapreduce/records.py
https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/records.py#L239-L278
def __try_read_record(self): """Try reading a record. Returns: (data, record_type) tuple. Raises: EOFError: when end of file was reached. InvalidRecordError: when valid record could not be read. """ block_remaining = _BLOCK_SIZE - self.__reader.tell() % _BLOCK_SIZE if block_remaining < _HEADER_LENGTH: return ('', _RECORD_TYPE_NONE) header = self.__reader.read(_HEADER_LENGTH) if len(header) != _HEADER_LENGTH: raise EOFError('Read %s bytes instead of %s' % (len(header), _HEADER_LENGTH)) (masked_crc, length, record_type) = struct.unpack(_HEADER_FORMAT, header) crc = _unmask_crc(masked_crc) if length + _HEADER_LENGTH > block_remaining: # A record can't be bigger than one block. raise errors.InvalidRecordError('Length is too big') data = self.__reader.read(length) if len(data) != length: raise EOFError('Not enough data read. Expected: %s but got %s' % (length, len(data))) if record_type == _RECORD_TYPE_NONE: return ('', record_type) actual_crc = crc32c.crc_update(crc32c.CRC_INIT, [record_type]) actual_crc = crc32c.crc_update(actual_crc, data) actual_crc = crc32c.crc_finalize(actual_crc) if actual_crc != crc: raise errors.InvalidRecordError('Data crc does not match') return (data, record_type)
[ "def", "__try_read_record", "(", "self", ")", ":", "block_remaining", "=", "_BLOCK_SIZE", "-", "self", ".", "__reader", ".", "tell", "(", ")", "%", "_BLOCK_SIZE", "if", "block_remaining", "<", "_HEADER_LENGTH", ":", "return", "(", "''", ",", "_RECORD_TYPE_NONE", ")", "header", "=", "self", ".", "__reader", ".", "read", "(", "_HEADER_LENGTH", ")", "if", "len", "(", "header", ")", "!=", "_HEADER_LENGTH", ":", "raise", "EOFError", "(", "'Read %s bytes instead of %s'", "%", "(", "len", "(", "header", ")", ",", "_HEADER_LENGTH", ")", ")", "(", "masked_crc", ",", "length", ",", "record_type", ")", "=", "struct", ".", "unpack", "(", "_HEADER_FORMAT", ",", "header", ")", "crc", "=", "_unmask_crc", "(", "masked_crc", ")", "if", "length", "+", "_HEADER_LENGTH", ">", "block_remaining", ":", "# A record can't be bigger than one block.", "raise", "errors", ".", "InvalidRecordError", "(", "'Length is too big'", ")", "data", "=", "self", ".", "__reader", ".", "read", "(", "length", ")", "if", "len", "(", "data", ")", "!=", "length", ":", "raise", "EOFError", "(", "'Not enough data read. Expected: %s but got %s'", "%", "(", "length", ",", "len", "(", "data", ")", ")", ")", "if", "record_type", "==", "_RECORD_TYPE_NONE", ":", "return", "(", "''", ",", "record_type", ")", "actual_crc", "=", "crc32c", ".", "crc_update", "(", "crc32c", ".", "CRC_INIT", ",", "[", "record_type", "]", ")", "actual_crc", "=", "crc32c", ".", "crc_update", "(", "actual_crc", ",", "data", ")", "actual_crc", "=", "crc32c", ".", "crc_finalize", "(", "actual_crc", ")", "if", "actual_crc", "!=", "crc", ":", "raise", "errors", ".", "InvalidRecordError", "(", "'Data crc does not match'", ")", "return", "(", "data", ",", "record_type", ")" ]
Try reading a record. Returns: (data, record_type) tuple. Raises: EOFError: when end of file was reached. InvalidRecordError: when valid record could not be read.
[ "Try", "reading", "a", "record", "." ]
python
train
Jajcus/pyxmpp2
pyxmpp2/mainloop/glib.py
https://github.com/Jajcus/pyxmpp2/blob/14a40a3950910a9cd008b55f0d8905aa0186ce18/pyxmpp2/mainloop/glib.py#L206-L214
def _add_timeout_handler(self, handler): """Add a `TimeoutHandler` to the main loop.""" # pylint: disable=W0212 for dummy, method in inspect.getmembers(handler, callable): if not hasattr(method, "_pyxmpp_timeout"): continue tag = glib.timeout_add(int(method._pyxmpp_timeout * 1000), self._timeout_cb, method) self._timer_sources[method] = tag
[ "def", "_add_timeout_handler", "(", "self", ",", "handler", ")", ":", "# pylint: disable=W0212", "for", "dummy", ",", "method", "in", "inspect", ".", "getmembers", "(", "handler", ",", "callable", ")", ":", "if", "not", "hasattr", "(", "method", ",", "\"_pyxmpp_timeout\"", ")", ":", "continue", "tag", "=", "glib", ".", "timeout_add", "(", "int", "(", "method", ".", "_pyxmpp_timeout", "*", "1000", ")", ",", "self", ".", "_timeout_cb", ",", "method", ")", "self", ".", "_timer_sources", "[", "method", "]", "=", "tag" ]
Add a `TimeoutHandler` to the main loop.
[ "Add", "a", "TimeoutHandler", "to", "the", "main", "loop", "." ]
python
valid
intel-analytics/BigDL
pyspark/bigdl/nn/layer.py
https://github.com/intel-analytics/BigDL/blob/e9c19788285986ab789a2e2998f9a85d7524779f/pyspark/bigdl/nn/layer.py#L177-L185
def from_jvalue(jvalue, bigdl_type="float"): """ Create a Python Model base on the given java value :param jvalue: Java object create by Py4j :return: A Python Model """ model = Layer(jvalue=jvalue, bigdl_type=bigdl_type) model.value = jvalue return model
[ "def", "from_jvalue", "(", "jvalue", ",", "bigdl_type", "=", "\"float\"", ")", ":", "model", "=", "Layer", "(", "jvalue", "=", "jvalue", ",", "bigdl_type", "=", "bigdl_type", ")", "model", ".", "value", "=", "jvalue", "return", "model" ]
Create a Python Model base on the given java value :param jvalue: Java object create by Py4j :return: A Python Model
[ "Create", "a", "Python", "Model", "base", "on", "the", "given", "java", "value", ":", "param", "jvalue", ":", "Java", "object", "create", "by", "Py4j", ":", "return", ":", "A", "Python", "Model" ]
python
test
thumbor/thumbor
thumbor/transformer.py
https://github.com/thumbor/thumbor/blob/558ccdd6e3bc29e1c9ee3687372c4b3eb05ac607/thumbor/transformer.py#L219-L233
def do_image_operations(self): """ If ENGINE_THREADPOOL_SIZE > 0, this will schedule the image operations into a threadpool. If not, it just executes them synchronously, and calls self.done_callback when it's finished. The actual work happens in self.img_operation_worker """ def inner(future): self.done_callback() self.context.thread_pool.queue( operation=self.img_operation_worker, callback=inner )
[ "def", "do_image_operations", "(", "self", ")", ":", "def", "inner", "(", "future", ")", ":", "self", ".", "done_callback", "(", ")", "self", ".", "context", ".", "thread_pool", ".", "queue", "(", "operation", "=", "self", ".", "img_operation_worker", ",", "callback", "=", "inner", ")" ]
If ENGINE_THREADPOOL_SIZE > 0, this will schedule the image operations into a threadpool. If not, it just executes them synchronously, and calls self.done_callback when it's finished. The actual work happens in self.img_operation_worker
[ "If", "ENGINE_THREADPOOL_SIZE", ">", "0", "this", "will", "schedule", "the", "image", "operations", "into", "a", "threadpool", ".", "If", "not", "it", "just", "executes", "them", "synchronously", "and", "calls", "self", ".", "done_callback", "when", "it", "s", "finished", "." ]
python
train
dhermes/bezier
src/bezier/surface.py
https://github.com/dhermes/bezier/blob/4f941f82637a8e70a5b159a9203132192e23406b/src/bezier/surface.py#L600-L658
def evaluate_cartesian_multi(self, param_vals, _verify=True): r"""Compute multiple points on the surface. Assumes ``param_vals`` has two columns of Cartesian coordinates. See :meth:`evaluate_cartesian` for more details on how each row of parameter values is evaluated. .. image:: ../../images/surface_evaluate_cartesian_multi.png :align: center .. doctest:: surface-eval-multi1 :options: +NORMALIZE_WHITESPACE >>> nodes = np.asfortranarray([ ... [0.0, 2.0, -3.0], ... [0.0, 1.0, 2.0], ... ]) >>> surface = bezier.Surface(nodes, degree=1) >>> surface <Surface (degree=1, dimension=2)> >>> param_vals = np.asfortranarray([ ... [0.0 , 0.0 ], ... [0.125, 0.625], ... [0.5 , 0.5 ], ... ]) >>> points = surface.evaluate_cartesian_multi(param_vals) >>> points array([[ 0. , -1.625, -0.5 ], [ 0. , 1.375, 1.5 ]]) .. testcleanup:: surface-eval-multi1 import make_images make_images.surface_evaluate_cartesian_multi(surface, points) Args: param_vals (numpy.ndarray): Array of parameter values (as a ``N x 2`` array). _verify (Optional[bool]): Indicates if the coordinates should be verified. See :meth:`evaluate_cartesian`. Defaults to :data:`True`. Will also double check that ``param_vals`` is the right shape. Returns: numpy.ndarray: The points on the surface. Raises: ValueError: If ``param_vals`` is not a 2D array and ``_verify=True``. """ if _verify: if param_vals.ndim != 2: raise ValueError("Parameter values must be 2D array") for s, t in param_vals: self._verify_cartesian(s, t) return _surface_helpers.evaluate_cartesian_multi( self._nodes, self._degree, param_vals, self._dimension )
[ "def", "evaluate_cartesian_multi", "(", "self", ",", "param_vals", ",", "_verify", "=", "True", ")", ":", "if", "_verify", ":", "if", "param_vals", ".", "ndim", "!=", "2", ":", "raise", "ValueError", "(", "\"Parameter values must be 2D array\"", ")", "for", "s", ",", "t", "in", "param_vals", ":", "self", ".", "_verify_cartesian", "(", "s", ",", "t", ")", "return", "_surface_helpers", ".", "evaluate_cartesian_multi", "(", "self", ".", "_nodes", ",", "self", ".", "_degree", ",", "param_vals", ",", "self", ".", "_dimension", ")" ]
r"""Compute multiple points on the surface. Assumes ``param_vals`` has two columns of Cartesian coordinates. See :meth:`evaluate_cartesian` for more details on how each row of parameter values is evaluated. .. image:: ../../images/surface_evaluate_cartesian_multi.png :align: center .. doctest:: surface-eval-multi1 :options: +NORMALIZE_WHITESPACE >>> nodes = np.asfortranarray([ ... [0.0, 2.0, -3.0], ... [0.0, 1.0, 2.0], ... ]) >>> surface = bezier.Surface(nodes, degree=1) >>> surface <Surface (degree=1, dimension=2)> >>> param_vals = np.asfortranarray([ ... [0.0 , 0.0 ], ... [0.125, 0.625], ... [0.5 , 0.5 ], ... ]) >>> points = surface.evaluate_cartesian_multi(param_vals) >>> points array([[ 0. , -1.625, -0.5 ], [ 0. , 1.375, 1.5 ]]) .. testcleanup:: surface-eval-multi1 import make_images make_images.surface_evaluate_cartesian_multi(surface, points) Args: param_vals (numpy.ndarray): Array of parameter values (as a ``N x 2`` array). _verify (Optional[bool]): Indicates if the coordinates should be verified. See :meth:`evaluate_cartesian`. Defaults to :data:`True`. Will also double check that ``param_vals`` is the right shape. Returns: numpy.ndarray: The points on the surface. Raises: ValueError: If ``param_vals`` is not a 2D array and ``_verify=True``.
[ "r", "Compute", "multiple", "points", "on", "the", "surface", "." ]
python
train
joferkington/mplstereonet
mplstereonet/stereonet_axes.py
https://github.com/joferkington/mplstereonet/blob/f6d78ca49807915d4223e864e12bb24d497cc2d6/mplstereonet/stereonet_axes.py#L500-L524
def _point_plot_defaults(self, args, kwargs): """To avoid confusion for new users, this ensures that "scattered" points are plotted by by `plot` instead of points joined by a line. Parameters ---------- args : tuple Arguments representing additional parameters to be passed to `self.plot`. kwargs : dict Keyword arguments representing additional parameters to be passed to `self.plot`. Returns ------- Modified versions of `args` and `kwargs`. """ if args: return args, kwargs if 'ls' not in kwargs and 'linestyle' not in kwargs: kwargs['linestyle'] = 'none' if 'marker' not in kwargs: kwargs['marker'] = 'o' return args, kwargs
[ "def", "_point_plot_defaults", "(", "self", ",", "args", ",", "kwargs", ")", ":", "if", "args", ":", "return", "args", ",", "kwargs", "if", "'ls'", "not", "in", "kwargs", "and", "'linestyle'", "not", "in", "kwargs", ":", "kwargs", "[", "'linestyle'", "]", "=", "'none'", "if", "'marker'", "not", "in", "kwargs", ":", "kwargs", "[", "'marker'", "]", "=", "'o'", "return", "args", ",", "kwargs" ]
To avoid confusion for new users, this ensures that "scattered" points are plotted by by `plot` instead of points joined by a line. Parameters ---------- args : tuple Arguments representing additional parameters to be passed to `self.plot`. kwargs : dict Keyword arguments representing additional parameters to be passed to `self.plot`. Returns ------- Modified versions of `args` and `kwargs`.
[ "To", "avoid", "confusion", "for", "new", "users", "this", "ensures", "that", "scattered", "points", "are", "plotted", "by", "by", "plot", "instead", "of", "points", "joined", "by", "a", "line", "." ]
python
train
materialsproject/pymatgen
pymatgen/io/feff/inputs.py
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/io/feff/inputs.py#L558-L603
def from_file(filename="feff.inp"): """ Creates a Feff_tag dictionary from a PARAMETER or feff.inp file. Args: filename: Filename for either PARAMETER or feff.inp file Returns: Feff_tag object """ with zopen(filename, "rt") as f: lines = list(clean_lines(f.readlines())) params = {} eels_params = [] ieels = -1 ieels_max = -1 for i, line in enumerate(lines): m = re.match(r"([A-Z]+\d*\d*)\s*(.*)", line) if m: key = m.group(1).strip() val = m.group(2).strip() val = Tags.proc_val(key, val) if key not in ("ATOMS", "POTENTIALS", "END", "TITLE"): if key in ["ELNES", "EXELFS"]: ieels = i ieels_max = ieels + 5 else: params[key] = val if ieels >= 0: if i >= ieels and i <= ieels_max: if i == ieels + 1: if int(line.split()[1]) == 1: ieels_max -= 1 eels_params.append(line) if eels_params: if len(eels_params) == 6: eels_keys = ['BEAM_ENERGY', 'BEAM_DIRECTION', 'ANGLES', 'MESH', 'POSITION'] else: eels_keys = ['BEAM_ENERGY', 'ANGLES', 'MESH', 'POSITION'] eels_dict = {"ENERGY": Tags._stringify_val(eels_params[0].split()[1:])} for k, v in zip(eels_keys, eels_params[1:]): eels_dict[k] = str(v) params[str(eels_params[0].split()[0])] = eels_dict return Tags(params)
[ "def", "from_file", "(", "filename", "=", "\"feff.inp\"", ")", ":", "with", "zopen", "(", "filename", ",", "\"rt\"", ")", "as", "f", ":", "lines", "=", "list", "(", "clean_lines", "(", "f", ".", "readlines", "(", ")", ")", ")", "params", "=", "{", "}", "eels_params", "=", "[", "]", "ieels", "=", "-", "1", "ieels_max", "=", "-", "1", "for", "i", ",", "line", "in", "enumerate", "(", "lines", ")", ":", "m", "=", "re", ".", "match", "(", "r\"([A-Z]+\\d*\\d*)\\s*(.*)\"", ",", "line", ")", "if", "m", ":", "key", "=", "m", ".", "group", "(", "1", ")", ".", "strip", "(", ")", "val", "=", "m", ".", "group", "(", "2", ")", ".", "strip", "(", ")", "val", "=", "Tags", ".", "proc_val", "(", "key", ",", "val", ")", "if", "key", "not", "in", "(", "\"ATOMS\"", ",", "\"POTENTIALS\"", ",", "\"END\"", ",", "\"TITLE\"", ")", ":", "if", "key", "in", "[", "\"ELNES\"", ",", "\"EXELFS\"", "]", ":", "ieels", "=", "i", "ieels_max", "=", "ieels", "+", "5", "else", ":", "params", "[", "key", "]", "=", "val", "if", "ieels", ">=", "0", ":", "if", "i", ">=", "ieels", "and", "i", "<=", "ieels_max", ":", "if", "i", "==", "ieels", "+", "1", ":", "if", "int", "(", "line", ".", "split", "(", ")", "[", "1", "]", ")", "==", "1", ":", "ieels_max", "-=", "1", "eels_params", ".", "append", "(", "line", ")", "if", "eels_params", ":", "if", "len", "(", "eels_params", ")", "==", "6", ":", "eels_keys", "=", "[", "'BEAM_ENERGY'", ",", "'BEAM_DIRECTION'", ",", "'ANGLES'", ",", "'MESH'", ",", "'POSITION'", "]", "else", ":", "eels_keys", "=", "[", "'BEAM_ENERGY'", ",", "'ANGLES'", ",", "'MESH'", ",", "'POSITION'", "]", "eels_dict", "=", "{", "\"ENERGY\"", ":", "Tags", ".", "_stringify_val", "(", "eels_params", "[", "0", "]", ".", "split", "(", ")", "[", "1", ":", "]", ")", "}", "for", "k", ",", "v", "in", "zip", "(", "eels_keys", ",", "eels_params", "[", "1", ":", "]", ")", ":", "eels_dict", "[", "k", "]", "=", "str", "(", "v", ")", "params", "[", "str", "(", "eels_params", "[", "0", "]", ".", "split", "(", ")", "[", "0", "]", ")", "]", "=", "eels_dict", "return", "Tags", "(", "params", ")" ]
Creates a Feff_tag dictionary from a PARAMETER or feff.inp file. Args: filename: Filename for either PARAMETER or feff.inp file Returns: Feff_tag object
[ "Creates", "a", "Feff_tag", "dictionary", "from", "a", "PARAMETER", "or", "feff", ".", "inp", "file", "." ]
python
train
fastai/fastai
fastai/vision/image.py
https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/fastai/vision/image.py#L299-L303
def pixel(self, func:PixelFunc, *args, **kwargs)->'ImagePoints': "Equivalent to `self = func_flow(self)`." self = func(self, *args, **kwargs) self.transformed=True return self
[ "def", "pixel", "(", "self", ",", "func", ":", "PixelFunc", ",", "*", "args", ",", "*", "*", "kwargs", ")", "->", "'ImagePoints'", ":", "self", "=", "func", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", "self", ".", "transformed", "=", "True", "return", "self" ]
Equivalent to `self = func_flow(self)`.
[ "Equivalent", "to", "self", "=", "func_flow", "(", "self", ")", "." ]
python
train
pkgw/pwkit
pwkit/lmmin.py
https://github.com/pkgw/pwkit/blob/d40957a1c3d2ea34e7ceac2267ee9635135f2793/pwkit/lmmin.py#L774-L815
def _qrd_solve_full(a, b, ddiag, dtype=np.float): """Solve the equation A^T x = B, D x = 0. Parameters: a - an n-by-m array, m >= n b - an m-vector ddiag - an n-vector giving the diagonal of D. (The rest of D is 0.) Returns: x - n-vector solving the equation. s - the n-by-n supplementary matrix s. pmut - n-element permutation vector defining the permutation matrix P. The equations are solved in a least-squares sense if the system is rank-deficient. D is a diagonal matrix and hence only its diagonal is in fact supplied as an argument. The matrix s is full lower triangular and solves the equation P^T (A A^T + D D) P = S^T S (needs transposition?) where P is the permutation matrix defined by the vector pmut; it puts the rows of 'a' in order of nonincreasing rank, so that a[pmut] has its rows sorted that way. """ a = np.asarray(a, dtype) b = np.asarray(b, dtype) ddiag = np.asarray(ddiag, dtype) n, m = a.shape assert m >= n assert b.shape == (m, ) assert ddiag.shape == (n, ) # The computation is straightforward. q, r, pmut = _qr_factor_full(a) bqt = np.dot(b, q.T) x, s = _manual_qrd_solve(r[:,:n], pmut, ddiag, bqt, dtype=dtype, build_s=True) return x, s, pmut
[ "def", "_qrd_solve_full", "(", "a", ",", "b", ",", "ddiag", ",", "dtype", "=", "np", ".", "float", ")", ":", "a", "=", "np", ".", "asarray", "(", "a", ",", "dtype", ")", "b", "=", "np", ".", "asarray", "(", "b", ",", "dtype", ")", "ddiag", "=", "np", ".", "asarray", "(", "ddiag", ",", "dtype", ")", "n", ",", "m", "=", "a", ".", "shape", "assert", "m", ">=", "n", "assert", "b", ".", "shape", "==", "(", "m", ",", ")", "assert", "ddiag", ".", "shape", "==", "(", "n", ",", ")", "# The computation is straightforward.", "q", ",", "r", ",", "pmut", "=", "_qr_factor_full", "(", "a", ")", "bqt", "=", "np", ".", "dot", "(", "b", ",", "q", ".", "T", ")", "x", ",", "s", "=", "_manual_qrd_solve", "(", "r", "[", ":", ",", ":", "n", "]", ",", "pmut", ",", "ddiag", ",", "bqt", ",", "dtype", "=", "dtype", ",", "build_s", "=", "True", ")", "return", "x", ",", "s", ",", "pmut" ]
Solve the equation A^T x = B, D x = 0. Parameters: a - an n-by-m array, m >= n b - an m-vector ddiag - an n-vector giving the diagonal of D. (The rest of D is 0.) Returns: x - n-vector solving the equation. s - the n-by-n supplementary matrix s. pmut - n-element permutation vector defining the permutation matrix P. The equations are solved in a least-squares sense if the system is rank-deficient. D is a diagonal matrix and hence only its diagonal is in fact supplied as an argument. The matrix s is full lower triangular and solves the equation P^T (A A^T + D D) P = S^T S (needs transposition?) where P is the permutation matrix defined by the vector pmut; it puts the rows of 'a' in order of nonincreasing rank, so that a[pmut] has its rows sorted that way.
[ "Solve", "the", "equation", "A^T", "x", "=", "B", "D", "x", "=", "0", "." ]
python
train
edeposit/edeposit.amqp.aleph
src/edeposit/amqp/aleph/export.py
https://github.com/edeposit/edeposit.amqp.aleph/blob/360342c0504d5daa2344e864762cdf938d4149c7/src/edeposit/amqp/aleph/export.py#L321-L343
def _sendPostDict(post_dict): """ Send `post_dict` to the :attr:`.ALEPH_EXPORT_URL`. Args: post_dict (dict): dictionary from :class:`PostData.get_POST_data()` Returns: str: Reponse from webform. """ downer = Downloader() downer.headers["Referer"] = settings.EDEPOSIT_EXPORT_REFERER data = downer.download(settings.ALEPH_EXPORT_URL, post=post_dict) rheaders = downer.response_headers error_msg = rheaders.get("aleph-info", "").lower().strip() if "aleph-info" in rheaders and error_msg.startswith("error"): raise ExportRejectedException( "Export request was rejected by import webform: %s" % rheaders["aleph-info"] ) return data
[ "def", "_sendPostDict", "(", "post_dict", ")", ":", "downer", "=", "Downloader", "(", ")", "downer", ".", "headers", "[", "\"Referer\"", "]", "=", "settings", ".", "EDEPOSIT_EXPORT_REFERER", "data", "=", "downer", ".", "download", "(", "settings", ".", "ALEPH_EXPORT_URL", ",", "post", "=", "post_dict", ")", "rheaders", "=", "downer", ".", "response_headers", "error_msg", "=", "rheaders", ".", "get", "(", "\"aleph-info\"", ",", "\"\"", ")", ".", "lower", "(", ")", ".", "strip", "(", ")", "if", "\"aleph-info\"", "in", "rheaders", "and", "error_msg", ".", "startswith", "(", "\"error\"", ")", ":", "raise", "ExportRejectedException", "(", "\"Export request was rejected by import webform: %s\"", "%", "rheaders", "[", "\"aleph-info\"", "]", ")", "return", "data" ]
Send `post_dict` to the :attr:`.ALEPH_EXPORT_URL`. Args: post_dict (dict): dictionary from :class:`PostData.get_POST_data()` Returns: str: Reponse from webform.
[ "Send", "post_dict", "to", "the", ":", "attr", ":", ".", "ALEPH_EXPORT_URL", "." ]
python
train
saltstack/salt
salt/modules/redismod.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/redismod.py#L574-L585
def save(host=None, port=None, db=None, password=None): ''' Synchronously save the dataset to disk CLI Example: .. code-block:: bash salt '*' redis.save ''' server = _connect(host, port, db, password) return server.save()
[ "def", "save", "(", "host", "=", "None", ",", "port", "=", "None", ",", "db", "=", "None", ",", "password", "=", "None", ")", ":", "server", "=", "_connect", "(", "host", ",", "port", ",", "db", ",", "password", ")", "return", "server", ".", "save", "(", ")" ]
Synchronously save the dataset to disk CLI Example: .. code-block:: bash salt '*' redis.save
[ "Synchronously", "save", "the", "dataset", "to", "disk" ]
python
train
ska-sa/katcp-python
katcp/resource_client.py
https://github.com/ska-sa/katcp-python/blob/9127c826a1d030c53b84d0e95743e20e5c5ea153/katcp/resource_client.py#L407-L426
def start(self): """Start the client and connect""" # TODO (NM 2015-03-12) Some checking to prevent multiple calls to start() host, port = self.address ic = self._inspecting_client = self.inspecting_client_factory( host, port, self._ioloop_set_to) self.ioloop = ic.ioloop if self._preset_protocol_flags: ic.preset_protocol_flags(self._preset_protocol_flags) ic.katcp_client.auto_reconnect_delay = self.auto_reconnect_delay ic.set_state_callback(self._inspecting_client_state_callback) ic.request_factory = self._request_factory self._sensor_manager = KATCPClientResourceSensorsManager( ic, self.name, logger=self._logger) ic.handle_sensor_value() ic.sensor_factory = self._sensor_manager.sensor_factory # Steal some methods from _sensor_manager self.reapply_sampling_strategies = self._sensor_manager.reapply_sampling_strategies log_future_exceptions(self._logger, ic.connect())
[ "def", "start", "(", "self", ")", ":", "# TODO (NM 2015-03-12) Some checking to prevent multiple calls to start()", "host", ",", "port", "=", "self", ".", "address", "ic", "=", "self", ".", "_inspecting_client", "=", "self", ".", "inspecting_client_factory", "(", "host", ",", "port", ",", "self", ".", "_ioloop_set_to", ")", "self", ".", "ioloop", "=", "ic", ".", "ioloop", "if", "self", ".", "_preset_protocol_flags", ":", "ic", ".", "preset_protocol_flags", "(", "self", ".", "_preset_protocol_flags", ")", "ic", ".", "katcp_client", ".", "auto_reconnect_delay", "=", "self", ".", "auto_reconnect_delay", "ic", ".", "set_state_callback", "(", "self", ".", "_inspecting_client_state_callback", ")", "ic", ".", "request_factory", "=", "self", ".", "_request_factory", "self", ".", "_sensor_manager", "=", "KATCPClientResourceSensorsManager", "(", "ic", ",", "self", ".", "name", ",", "logger", "=", "self", ".", "_logger", ")", "ic", ".", "handle_sensor_value", "(", ")", "ic", ".", "sensor_factory", "=", "self", ".", "_sensor_manager", ".", "sensor_factory", "# Steal some methods from _sensor_manager", "self", ".", "reapply_sampling_strategies", "=", "self", ".", "_sensor_manager", ".", "reapply_sampling_strategies", "log_future_exceptions", "(", "self", ".", "_logger", ",", "ic", ".", "connect", "(", ")", ")" ]
Start the client and connect
[ "Start", "the", "client", "and", "connect" ]
python
train
materialsproject/pymatgen
pymatgen/analysis/local_env.py
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/analysis/local_env.py#L778-L792
def _is_in_targets(self, site, targets): """ Test whether a site contains elements in the target list Args: site (Site): Site to assess targets ([Element]) List of elements Returns: (boolean) Whether this site contains a certain list of elements """ elems = self._get_elements(site) for elem in elems: if elem not in targets: return False return True
[ "def", "_is_in_targets", "(", "self", ",", "site", ",", "targets", ")", ":", "elems", "=", "self", ".", "_get_elements", "(", "site", ")", "for", "elem", "in", "elems", ":", "if", "elem", "not", "in", "targets", ":", "return", "False", "return", "True" ]
Test whether a site contains elements in the target list Args: site (Site): Site to assess targets ([Element]) List of elements Returns: (boolean) Whether this site contains a certain list of elements
[ "Test", "whether", "a", "site", "contains", "elements", "in", "the", "target", "list" ]
python
train
rfk/django-supervisor
djsupervisor/config.py
https://github.com/rfk/django-supervisor/blob/545a379d4a73ed2ae21c4aee6b8009ded8aeedc6/djsupervisor/config.py#L33-L138
def get_merged_config(**options): """Get the final merged configuration for supvervisord, as a string. This is the top-level function exported by this module. It combines the config file from the main project with default settings and those specified in the command-line, processes various special section names, and returns the resulting configuration as a string. """ # Find and load the containing project module. # This can be specified explicity using the --project-dir option. # Otherwise, we attempt to guess by looking for the manage.py file. project_dir = options.get("project_dir") if project_dir is None: project_dir = guess_project_dir() # Find the config file to load. # Default to <project-dir>/supervisord.conf. config_file = options.get("config_file") if config_file is None: config_file = os.path.join(project_dir,CONFIG_FILE) # Build the default template context variables. # This is mostly useful information about the project and environment. ctx = { "PROJECT_DIR": project_dir, "PYTHON": os.path.realpath(os.path.abspath(sys.executable)), "SUPERVISOR_OPTIONS": rerender_options(options), "settings": settings, "environ": os.environ, } # Initialise the ConfigParser. # Fortunately for us, ConfigParser has merge-multiple-config-files # functionality built into it. You just read each file in turn, and # values from later files overwrite values from former. cfg = RawConfigParser() # Start from the default configuration options. data = render_config(DEFAULT_CONFIG,ctx) cfg.readfp(StringIO(data)) # Add in the project-specific config file. with open(config_file,"r") as f: data = render_config(f.read(),ctx) cfg.readfp(StringIO(data)) # Add in the options specified on the command-line. cfg.readfp(StringIO(get_config_from_options(**options))) # Add options from [program:__defaults__] to each program section # if it happens to be missing that option. PROG_DEFAULTS = "program:__defaults__" if cfg.has_section(PROG_DEFAULTS): for option in cfg.options(PROG_DEFAULTS): default = cfg.get(PROG_DEFAULTS,option) for section in cfg.sections(): if section.startswith("program:"): if not cfg.has_option(section,option): cfg.set(section,option,default) cfg.remove_section(PROG_DEFAULTS) # Add options from [program:__overrides__] to each program section # regardless of whether they already have that option. PROG_OVERRIDES = "program:__overrides__" if cfg.has_section(PROG_OVERRIDES): for option in cfg.options(PROG_OVERRIDES): override = cfg.get(PROG_OVERRIDES,option) for section in cfg.sections(): if section.startswith("program:"): cfg.set(section,option,override) cfg.remove_section(PROG_OVERRIDES) # Make sure we've got a port configured for supervisorctl to # talk to supervisord. It's passworded based on secret key. # If they have configured a unix socket then use that, otherwise # use an inet server on localhost at fixed-but-randomish port. username = hashlib.md5(settings.SECRET_KEY).hexdigest()[:7] password = hashlib.md5(username).hexdigest() if cfg.has_section("unix_http_server"): set_if_missing(cfg,"unix_http_server","username",username) set_if_missing(cfg,"unix_http_server","password",password) serverurl = "unix://" + cfg.get("unix_http_server","file") else: # This picks a "random" port in the 9000 range to listen on. # It's derived from the secret key, so it's stable for a given # project but multiple projects are unlikely to collide. port = int(hashlib.md5(password).hexdigest()[:3],16) % 1000 addr = "127.0.0.1:9%03d" % (port,) set_if_missing(cfg,"inet_http_server","port",addr) set_if_missing(cfg,"inet_http_server","username",username) set_if_missing(cfg,"inet_http_server","password",password) serverurl = "http://" + cfg.get("inet_http_server","port") set_if_missing(cfg,"supervisorctl","serverurl",serverurl) set_if_missing(cfg,"supervisorctl","username",username) set_if_missing(cfg,"supervisorctl","password",password) set_if_missing(cfg,"rpcinterface:supervisor", "supervisor.rpcinterface_factory", "supervisor.rpcinterface:make_main_rpcinterface") # Remove any [program:] sections with exclude=true for section in cfg.sections(): try: if cfg.getboolean(section,"exclude"): cfg.remove_section(section) except NoOptionError: pass # Sanity-check to give better error messages. for section in cfg.sections(): if section.startswith("program:"): if not cfg.has_option(section,"command"): msg = "Process name '%s' has no command configured" raise ValueError(msg % (section.split(":",1)[-1])) # Write it out to a StringIO and return the data s = StringIO() cfg.write(s) return s.getvalue()
[ "def", "get_merged_config", "(", "*", "*", "options", ")", ":", "# Find and load the containing project module.", "# This can be specified explicity using the --project-dir option.", "# Otherwise, we attempt to guess by looking for the manage.py file.", "project_dir", "=", "options", ".", "get", "(", "\"project_dir\"", ")", "if", "project_dir", "is", "None", ":", "project_dir", "=", "guess_project_dir", "(", ")", "# Find the config file to load.", "# Default to <project-dir>/supervisord.conf.", "config_file", "=", "options", ".", "get", "(", "\"config_file\"", ")", "if", "config_file", "is", "None", ":", "config_file", "=", "os", ".", "path", ".", "join", "(", "project_dir", ",", "CONFIG_FILE", ")", "# Build the default template context variables.", "# This is mostly useful information about the project and environment.", "ctx", "=", "{", "\"PROJECT_DIR\"", ":", "project_dir", ",", "\"PYTHON\"", ":", "os", ".", "path", ".", "realpath", "(", "os", ".", "path", ".", "abspath", "(", "sys", ".", "executable", ")", ")", ",", "\"SUPERVISOR_OPTIONS\"", ":", "rerender_options", "(", "options", ")", ",", "\"settings\"", ":", "settings", ",", "\"environ\"", ":", "os", ".", "environ", ",", "}", "# Initialise the ConfigParser.", "# Fortunately for us, ConfigParser has merge-multiple-config-files", "# functionality built into it. You just read each file in turn, and", "# values from later files overwrite values from former.", "cfg", "=", "RawConfigParser", "(", ")", "# Start from the default configuration options.", "data", "=", "render_config", "(", "DEFAULT_CONFIG", ",", "ctx", ")", "cfg", ".", "readfp", "(", "StringIO", "(", "data", ")", ")", "# Add in the project-specific config file.", "with", "open", "(", "config_file", ",", "\"r\"", ")", "as", "f", ":", "data", "=", "render_config", "(", "f", ".", "read", "(", ")", ",", "ctx", ")", "cfg", ".", "readfp", "(", "StringIO", "(", "data", ")", ")", "# Add in the options specified on the command-line.", "cfg", ".", "readfp", "(", "StringIO", "(", "get_config_from_options", "(", "*", "*", "options", ")", ")", ")", "# Add options from [program:__defaults__] to each program section", "# if it happens to be missing that option.", "PROG_DEFAULTS", "=", "\"program:__defaults__\"", "if", "cfg", ".", "has_section", "(", "PROG_DEFAULTS", ")", ":", "for", "option", "in", "cfg", ".", "options", "(", "PROG_DEFAULTS", ")", ":", "default", "=", "cfg", ".", "get", "(", "PROG_DEFAULTS", ",", "option", ")", "for", "section", "in", "cfg", ".", "sections", "(", ")", ":", "if", "section", ".", "startswith", "(", "\"program:\"", ")", ":", "if", "not", "cfg", ".", "has_option", "(", "section", ",", "option", ")", ":", "cfg", ".", "set", "(", "section", ",", "option", ",", "default", ")", "cfg", ".", "remove_section", "(", "PROG_DEFAULTS", ")", "# Add options from [program:__overrides__] to each program section", "# regardless of whether they already have that option.", "PROG_OVERRIDES", "=", "\"program:__overrides__\"", "if", "cfg", ".", "has_section", "(", "PROG_OVERRIDES", ")", ":", "for", "option", "in", "cfg", ".", "options", "(", "PROG_OVERRIDES", ")", ":", "override", "=", "cfg", ".", "get", "(", "PROG_OVERRIDES", ",", "option", ")", "for", "section", "in", "cfg", ".", "sections", "(", ")", ":", "if", "section", ".", "startswith", "(", "\"program:\"", ")", ":", "cfg", ".", "set", "(", "section", ",", "option", ",", "override", ")", "cfg", ".", "remove_section", "(", "PROG_OVERRIDES", ")", "# Make sure we've got a port configured for supervisorctl to", "# talk to supervisord. It's passworded based on secret key.", "# If they have configured a unix socket then use that, otherwise", "# use an inet server on localhost at fixed-but-randomish port.", "username", "=", "hashlib", ".", "md5", "(", "settings", ".", "SECRET_KEY", ")", ".", "hexdigest", "(", ")", "[", ":", "7", "]", "password", "=", "hashlib", ".", "md5", "(", "username", ")", ".", "hexdigest", "(", ")", "if", "cfg", ".", "has_section", "(", "\"unix_http_server\"", ")", ":", "set_if_missing", "(", "cfg", ",", "\"unix_http_server\"", ",", "\"username\"", ",", "username", ")", "set_if_missing", "(", "cfg", ",", "\"unix_http_server\"", ",", "\"password\"", ",", "password", ")", "serverurl", "=", "\"unix://\"", "+", "cfg", ".", "get", "(", "\"unix_http_server\"", ",", "\"file\"", ")", "else", ":", "# This picks a \"random\" port in the 9000 range to listen on.", "# It's derived from the secret key, so it's stable for a given", "# project but multiple projects are unlikely to collide.", "port", "=", "int", "(", "hashlib", ".", "md5", "(", "password", ")", ".", "hexdigest", "(", ")", "[", ":", "3", "]", ",", "16", ")", "%", "1000", "addr", "=", "\"127.0.0.1:9%03d\"", "%", "(", "port", ",", ")", "set_if_missing", "(", "cfg", ",", "\"inet_http_server\"", ",", "\"port\"", ",", "addr", ")", "set_if_missing", "(", "cfg", ",", "\"inet_http_server\"", ",", "\"username\"", ",", "username", ")", "set_if_missing", "(", "cfg", ",", "\"inet_http_server\"", ",", "\"password\"", ",", "password", ")", "serverurl", "=", "\"http://\"", "+", "cfg", ".", "get", "(", "\"inet_http_server\"", ",", "\"port\"", ")", "set_if_missing", "(", "cfg", ",", "\"supervisorctl\"", ",", "\"serverurl\"", ",", "serverurl", ")", "set_if_missing", "(", "cfg", ",", "\"supervisorctl\"", ",", "\"username\"", ",", "username", ")", "set_if_missing", "(", "cfg", ",", "\"supervisorctl\"", ",", "\"password\"", ",", "password", ")", "set_if_missing", "(", "cfg", ",", "\"rpcinterface:supervisor\"", ",", "\"supervisor.rpcinterface_factory\"", ",", "\"supervisor.rpcinterface:make_main_rpcinterface\"", ")", "# Remove any [program:] sections with exclude=true", "for", "section", "in", "cfg", ".", "sections", "(", ")", ":", "try", ":", "if", "cfg", ".", "getboolean", "(", "section", ",", "\"exclude\"", ")", ":", "cfg", ".", "remove_section", "(", "section", ")", "except", "NoOptionError", ":", "pass", "# Sanity-check to give better error messages.", "for", "section", "in", "cfg", ".", "sections", "(", ")", ":", "if", "section", ".", "startswith", "(", "\"program:\"", ")", ":", "if", "not", "cfg", ".", "has_option", "(", "section", ",", "\"command\"", ")", ":", "msg", "=", "\"Process name '%s' has no command configured\"", "raise", "ValueError", "(", "msg", "%", "(", "section", ".", "split", "(", "\":\"", ",", "1", ")", "[", "-", "1", "]", ")", ")", "# Write it out to a StringIO and return the data", "s", "=", "StringIO", "(", ")", "cfg", ".", "write", "(", "s", ")", "return", "s", ".", "getvalue", "(", ")" ]
Get the final merged configuration for supvervisord, as a string. This is the top-level function exported by this module. It combines the config file from the main project with default settings and those specified in the command-line, processes various special section names, and returns the resulting configuration as a string.
[ "Get", "the", "final", "merged", "configuration", "for", "supvervisord", "as", "a", "string", "." ]
python
train
noahbenson/pimms
pimms/calculation.py
https://github.com/noahbenson/pimms/blob/9051b86d6b858a7a13511b72c48dc21bc903dab2/pimms/calculation.py#L125-L136
def discard_defaults(self, *args): ''' node.discard_defaults(a, b...) yields a new calculation node identical to the given node except that the default values for the given afferent parameters named by the arguments a, b, etc. have been removed. In the new node that is returned, these parameters will be required. ''' rms = set(arg for aa in args for arg in ([aa] if isinstance(aa, six.string_types) else aa)) new_defaults = ps.pmap({k:v for (k,v) in six.iteritems(args) if k not in rms}) new_cnode = copy.copy(self) object.__setattr__(new_cnode, 'defaults', new_defaults) return new_cnode
[ "def", "discard_defaults", "(", "self", ",", "*", "args", ")", ":", "rms", "=", "set", "(", "arg", "for", "aa", "in", "args", "for", "arg", "in", "(", "[", "aa", "]", "if", "isinstance", "(", "aa", ",", "six", ".", "string_types", ")", "else", "aa", ")", ")", "new_defaults", "=", "ps", ".", "pmap", "(", "{", "k", ":", "v", "for", "(", "k", ",", "v", ")", "in", "six", ".", "iteritems", "(", "args", ")", "if", "k", "not", "in", "rms", "}", ")", "new_cnode", "=", "copy", ".", "copy", "(", "self", ")", "object", ".", "__setattr__", "(", "new_cnode", ",", "'defaults'", ",", "new_defaults", ")", "return", "new_cnode" ]
node.discard_defaults(a, b...) yields a new calculation node identical to the given node except that the default values for the given afferent parameters named by the arguments a, b, etc. have been removed. In the new node that is returned, these parameters will be required.
[ "node", ".", "discard_defaults", "(", "a", "b", "...", ")", "yields", "a", "new", "calculation", "node", "identical", "to", "the", "given", "node", "except", "that", "the", "default", "values", "for", "the", "given", "afferent", "parameters", "named", "by", "the", "arguments", "a", "b", "etc", ".", "have", "been", "removed", ".", "In", "the", "new", "node", "that", "is", "returned", "these", "parameters", "will", "be", "required", "." ]
python
train
acoomans/flask-autodoc
flask_autodoc/autodoc.py
https://github.com/acoomans/flask-autodoc/blob/6c77c8935b71fbf3243b5e589c5c255d0299d853/flask_autodoc/autodoc.py#L64-L111
def doc(self, groups=None, set_location=True, **properties): """Add flask route to autodoc for automatic documentation Any route decorated with this method will be added to the list of routes to be documented by the generate() or html() methods. By default, the route is added to the 'all' group. By specifying group or groups argument, the route can be added to one or multiple other groups as well, besides the 'all' group. If set_location is True, the location of the function will be stored. NOTE: this assumes that the decorator is placed just before the function (in the normal way). Custom parameters may also be passed in beyond groups, if they are named something not already in the dict descibed in the docstring for the generare() function, they will be added to the route's properties, which can be accessed from the template. If a parameter is passed in with a name that is already in the dict, but not of a reserved name, the passed parameter overrides that dict value. """ def decorator(f): # Get previous group list (if any) if f in self.func_groups: groupset = self.func_groups[f] else: groupset = set() # Set group[s] if type(groups) is list: groupset.update(groups) elif type(groups) is str: groupset.add(groups) groupset.add('all') self.func_groups[f] = groupset self.func_props[f] = properties # Set location if set_location: caller_frame = inspect.stack()[1] self.func_locations[f] = { 'filename': caller_frame[1], 'line': caller_frame[2], } return f return decorator
[ "def", "doc", "(", "self", ",", "groups", "=", "None", ",", "set_location", "=", "True", ",", "*", "*", "properties", ")", ":", "def", "decorator", "(", "f", ")", ":", "# Get previous group list (if any)", "if", "f", "in", "self", ".", "func_groups", ":", "groupset", "=", "self", ".", "func_groups", "[", "f", "]", "else", ":", "groupset", "=", "set", "(", ")", "# Set group[s]", "if", "type", "(", "groups", ")", "is", "list", ":", "groupset", ".", "update", "(", "groups", ")", "elif", "type", "(", "groups", ")", "is", "str", ":", "groupset", ".", "add", "(", "groups", ")", "groupset", ".", "add", "(", "'all'", ")", "self", ".", "func_groups", "[", "f", "]", "=", "groupset", "self", ".", "func_props", "[", "f", "]", "=", "properties", "# Set location", "if", "set_location", ":", "caller_frame", "=", "inspect", ".", "stack", "(", ")", "[", "1", "]", "self", ".", "func_locations", "[", "f", "]", "=", "{", "'filename'", ":", "caller_frame", "[", "1", "]", ",", "'line'", ":", "caller_frame", "[", "2", "]", ",", "}", "return", "f", "return", "decorator" ]
Add flask route to autodoc for automatic documentation Any route decorated with this method will be added to the list of routes to be documented by the generate() or html() methods. By default, the route is added to the 'all' group. By specifying group or groups argument, the route can be added to one or multiple other groups as well, besides the 'all' group. If set_location is True, the location of the function will be stored. NOTE: this assumes that the decorator is placed just before the function (in the normal way). Custom parameters may also be passed in beyond groups, if they are named something not already in the dict descibed in the docstring for the generare() function, they will be added to the route's properties, which can be accessed from the template. If a parameter is passed in with a name that is already in the dict, but not of a reserved name, the passed parameter overrides that dict value.
[ "Add", "flask", "route", "to", "autodoc", "for", "automatic", "documentation" ]
python
train
saltstack/salt
salt/modules/glusterfs.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/glusterfs.py#L545-L589
def add_volume_bricks(name, bricks): ''' Add brick(s) to an existing volume name Volume name bricks List of bricks to add to the volume CLI Example: .. code-block:: bash salt '*' glusterfs.add_volume_bricks <volume> <bricks> ''' volinfo = info() if name not in volinfo: log.error('Volume %s does not exist, cannot add bricks', name) return False new_bricks = [] cmd = 'volume add-brick {0}'.format(name) if isinstance(bricks, six.string_types): bricks = [bricks] volume_bricks = [x['path'] for x in volinfo[name]['bricks'].values()] for brick in bricks: if brick in volume_bricks: log.debug( 'Brick %s already in volume %s...excluding from command', brick, name) else: new_bricks.append(brick) if new_bricks: for brick in new_bricks: cmd += ' {0}'.format(brick) return _gluster(cmd) return True
[ "def", "add_volume_bricks", "(", "name", ",", "bricks", ")", ":", "volinfo", "=", "info", "(", ")", "if", "name", "not", "in", "volinfo", ":", "log", ".", "error", "(", "'Volume %s does not exist, cannot add bricks'", ",", "name", ")", "return", "False", "new_bricks", "=", "[", "]", "cmd", "=", "'volume add-brick {0}'", ".", "format", "(", "name", ")", "if", "isinstance", "(", "bricks", ",", "six", ".", "string_types", ")", ":", "bricks", "=", "[", "bricks", "]", "volume_bricks", "=", "[", "x", "[", "'path'", "]", "for", "x", "in", "volinfo", "[", "name", "]", "[", "'bricks'", "]", ".", "values", "(", ")", "]", "for", "brick", "in", "bricks", ":", "if", "brick", "in", "volume_bricks", ":", "log", ".", "debug", "(", "'Brick %s already in volume %s...excluding from command'", ",", "brick", ",", "name", ")", "else", ":", "new_bricks", ".", "append", "(", "brick", ")", "if", "new_bricks", ":", "for", "brick", "in", "new_bricks", ":", "cmd", "+=", "' {0}'", ".", "format", "(", "brick", ")", "return", "_gluster", "(", "cmd", ")", "return", "True" ]
Add brick(s) to an existing volume name Volume name bricks List of bricks to add to the volume CLI Example: .. code-block:: bash salt '*' glusterfs.add_volume_bricks <volume> <bricks>
[ "Add", "brick", "(", "s", ")", "to", "an", "existing", "volume" ]
python
train
Sliim/soundcloud-syncer
ssyncer/strack.py
https://github.com/Sliim/soundcloud-syncer/blob/f15142677bf8e5fb54f40b0eb9a36f21ba940ab6/ssyncer/strack.py#L162-L217
def download(self, localdir, max_retry): """ Download a track in local directory. """ local_file = self.gen_localdir(localdir) + self.gen_filename() if self.track_exists(localdir): print("Track {0} already downloaded, skipping!".format( self.get("id"))) return False if local_file in self.get_ignored_tracks(localdir): print("\033[93mTrack {0} ignored, skipping!!\033[0m".format( self.get("id"))) return False dlurl = self.get_download_link() if not dlurl: raise serror("Can't download track_id:%d|%s" % ( self.get("id"), self.get("title"))) retry = max_retry print("\nDownloading %s (%d).." % (self.get("title"), self.get("id"))) while True: try: urllib.request.urlretrieve(dlurl, local_file, self._progress_hook) break except Exception as e: if os.path.isfile(local_file): os.unlink(local_file) retry -= 1 if retry < 0: raise serror("Can't download track-id %s, max retry " "reached (%d). Error occured: %s" % ( self.get("id"), max_retry, type(e))) else: print("\033[93mError occured for track-id %s (%s). " "Retrying.. (%d/%d) \033[0m" % ( self.get("id"), type(e), max_retry - retry, max_retry)) except KeyboardInterrupt: if os.path.isfile(local_file): os.unlink(local_file) raise serror("KeyBoard Interrupt: Incomplete file removed.") self.filepath = local_file + self.get_file_extension(local_file) os.rename(local_file, self.filepath) print("Downloaded => %s" % self.filepath) self.downloaded = True return True
[ "def", "download", "(", "self", ",", "localdir", ",", "max_retry", ")", ":", "local_file", "=", "self", ".", "gen_localdir", "(", "localdir", ")", "+", "self", ".", "gen_filename", "(", ")", "if", "self", ".", "track_exists", "(", "localdir", ")", ":", "print", "(", "\"Track {0} already downloaded, skipping!\"", ".", "format", "(", "self", ".", "get", "(", "\"id\"", ")", ")", ")", "return", "False", "if", "local_file", "in", "self", ".", "get_ignored_tracks", "(", "localdir", ")", ":", "print", "(", "\"\\033[93mTrack {0} ignored, skipping!!\\033[0m\"", ".", "format", "(", "self", ".", "get", "(", "\"id\"", ")", ")", ")", "return", "False", "dlurl", "=", "self", ".", "get_download_link", "(", ")", "if", "not", "dlurl", ":", "raise", "serror", "(", "\"Can't download track_id:%d|%s\"", "%", "(", "self", ".", "get", "(", "\"id\"", ")", ",", "self", ".", "get", "(", "\"title\"", ")", ")", ")", "retry", "=", "max_retry", "print", "(", "\"\\nDownloading %s (%d)..\"", "%", "(", "self", ".", "get", "(", "\"title\"", ")", ",", "self", ".", "get", "(", "\"id\"", ")", ")", ")", "while", "True", ":", "try", ":", "urllib", ".", "request", ".", "urlretrieve", "(", "dlurl", ",", "local_file", ",", "self", ".", "_progress_hook", ")", "break", "except", "Exception", "as", "e", ":", "if", "os", ".", "path", ".", "isfile", "(", "local_file", ")", ":", "os", ".", "unlink", "(", "local_file", ")", "retry", "-=", "1", "if", "retry", "<", "0", ":", "raise", "serror", "(", "\"Can't download track-id %s, max retry \"", "\"reached (%d). Error occured: %s\"", "%", "(", "self", ".", "get", "(", "\"id\"", ")", ",", "max_retry", ",", "type", "(", "e", ")", ")", ")", "else", ":", "print", "(", "\"\\033[93mError occured for track-id %s (%s). \"", "\"Retrying.. (%d/%d) \\033[0m\"", "%", "(", "self", ".", "get", "(", "\"id\"", ")", ",", "type", "(", "e", ")", ",", "max_retry", "-", "retry", ",", "max_retry", ")", ")", "except", "KeyboardInterrupt", ":", "if", "os", ".", "path", ".", "isfile", "(", "local_file", ")", ":", "os", ".", "unlink", "(", "local_file", ")", "raise", "serror", "(", "\"KeyBoard Interrupt: Incomplete file removed.\"", ")", "self", ".", "filepath", "=", "local_file", "+", "self", ".", "get_file_extension", "(", "local_file", ")", "os", ".", "rename", "(", "local_file", ",", "self", ".", "filepath", ")", "print", "(", "\"Downloaded => %s\"", "%", "self", ".", "filepath", ")", "self", ".", "downloaded", "=", "True", "return", "True" ]
Download a track in local directory.
[ "Download", "a", "track", "in", "local", "directory", "." ]
python
train
skymill/automated-ebs-snapshots
automated_ebs_snapshots/__init__.py
https://github.com/skymill/automated-ebs-snapshots/blob/9595bc49d458f6ffb93430722757d2284e878fab/automated_ebs_snapshots/__init__.py#L109-L171
def main(): """ Main function """ # Read configuration from the config file if present, else fall back to # command line options if args.config: config = config_file_parser.get_configuration(args.config) access_key_id = config['access-key-id'] secret_access_key = config['secret-access-key'] region = config['region'] else: access_key_id = args.access_key_id secret_access_key = args.secret_access_key region = args.region if args.daemon: pid_file = '/tmp/automatic-ebs-snapshots.pid' daemon = AutoEBSDaemon(pid_file) if args.daemon == 'start': daemon.start() elif args.daemon == 'stop': daemon.stop() sys.exit(0) elif args.daemon == 'restart': daemon.restart() elif args.daemon in ['foreground', 'fg']: daemon.run() else: print 'Valid options for --daemon are start, stop and restart' sys.exit(1) # Connect to AWS connection = connection_manager.connect_to_ec2( region, access_key_id, secret_access_key) if args.watch: volume_manager.watch( connection, args.watch, args.interval, args.retention) if args.unwatch: volume_manager.unwatch(connection, args.unwatch) if args.watch_file: volume_manager.watch_from_file(connection, args.watch_file) if args.unwatch_file: volume_manager.unwatch_from_file(connection, args.unwatch_file) if args.snapshots: volume_manager.list_snapshots(connection, args.snapshots) if args.list: volume_manager.list(connection) if args.run: snapshot_manager.run(connection)
[ "def", "main", "(", ")", ":", "# Read configuration from the config file if present, else fall back to", "# command line options", "if", "args", ".", "config", ":", "config", "=", "config_file_parser", ".", "get_configuration", "(", "args", ".", "config", ")", "access_key_id", "=", "config", "[", "'access-key-id'", "]", "secret_access_key", "=", "config", "[", "'secret-access-key'", "]", "region", "=", "config", "[", "'region'", "]", "else", ":", "access_key_id", "=", "args", ".", "access_key_id", "secret_access_key", "=", "args", ".", "secret_access_key", "region", "=", "args", ".", "region", "if", "args", ".", "daemon", ":", "pid_file", "=", "'/tmp/automatic-ebs-snapshots.pid'", "daemon", "=", "AutoEBSDaemon", "(", "pid_file", ")", "if", "args", ".", "daemon", "==", "'start'", ":", "daemon", ".", "start", "(", ")", "elif", "args", ".", "daemon", "==", "'stop'", ":", "daemon", ".", "stop", "(", ")", "sys", ".", "exit", "(", "0", ")", "elif", "args", ".", "daemon", "==", "'restart'", ":", "daemon", ".", "restart", "(", ")", "elif", "args", ".", "daemon", "in", "[", "'foreground'", ",", "'fg'", "]", ":", "daemon", ".", "run", "(", ")", "else", ":", "print", "'Valid options for --daemon are start, stop and restart'", "sys", ".", "exit", "(", "1", ")", "# Connect to AWS", "connection", "=", "connection_manager", ".", "connect_to_ec2", "(", "region", ",", "access_key_id", ",", "secret_access_key", ")", "if", "args", ".", "watch", ":", "volume_manager", ".", "watch", "(", "connection", ",", "args", ".", "watch", ",", "args", ".", "interval", ",", "args", ".", "retention", ")", "if", "args", ".", "unwatch", ":", "volume_manager", ".", "unwatch", "(", "connection", ",", "args", ".", "unwatch", ")", "if", "args", ".", "watch_file", ":", "volume_manager", ".", "watch_from_file", "(", "connection", ",", "args", ".", "watch_file", ")", "if", "args", ".", "unwatch_file", ":", "volume_manager", ".", "unwatch_from_file", "(", "connection", ",", "args", ".", "unwatch_file", ")", "if", "args", ".", "snapshots", ":", "volume_manager", ".", "list_snapshots", "(", "connection", ",", "args", ".", "snapshots", ")", "if", "args", ".", "list", ":", "volume_manager", ".", "list", "(", "connection", ")", "if", "args", ".", "run", ":", "snapshot_manager", ".", "run", "(", "connection", ")" ]
Main function
[ "Main", "function" ]
python
train
gabstopper/smc-python
smc/examples/ip_lists.py
https://github.com/gabstopper/smc-python/blob/e027b8a5dcfaf884eada32d113d41c1e56b32457/smc/examples/ip_lists.py#L42-L54
def upload_as_zip(name, filename): """ Upload an IPList as a zip file. Useful when IPList is very large. This is the default upload format for IPLists. :param str name: name of IPList :param str filename: name of zip file to upload, full path :return: None """ location = list(IPList.objects.filter(name)) if location: iplist = location[0] return iplist.upload(filename=filename)
[ "def", "upload_as_zip", "(", "name", ",", "filename", ")", ":", "location", "=", "list", "(", "IPList", ".", "objects", ".", "filter", "(", "name", ")", ")", "if", "location", ":", "iplist", "=", "location", "[", "0", "]", "return", "iplist", ".", "upload", "(", "filename", "=", "filename", ")" ]
Upload an IPList as a zip file. Useful when IPList is very large. This is the default upload format for IPLists. :param str name: name of IPList :param str filename: name of zip file to upload, full path :return: None
[ "Upload", "an", "IPList", "as", "a", "zip", "file", ".", "Useful", "when", "IPList", "is", "very", "large", ".", "This", "is", "the", "default", "upload", "format", "for", "IPLists", "." ]
python
train
thanethomson/statik
statik/utils.py
https://github.com/thanethomson/statik/blob/56b1b5a2cb05a97afa81f428bfcefc833e935b8d/statik/utils.py#L102-L124
def deep_merge_dict(a, b): """Deep merges dictionary b into dictionary a.""" if not isinstance(a, dict): raise TypeError("a must be a dict, but found %s" % a.__class__.__name__) if not isinstance(b, dict): raise TypeError("b must be a dict, but found %s" % b.__class__.__name__) _a = copy(a) _b = copy(b) for key_b, val_b in iteritems(_b): # if it's a sub-dictionary if isinstance(val_b, dict): if key_b not in _a or not isinstance(_a[key_b], dict): _a[key_b] = {} # perform the deep merge recursively _a[key_b] = deep_merge_dict(_a[key_b], val_b) else: _a[key_b] = val_b # b should now be deep-merged into a return _a
[ "def", "deep_merge_dict", "(", "a", ",", "b", ")", ":", "if", "not", "isinstance", "(", "a", ",", "dict", ")", ":", "raise", "TypeError", "(", "\"a must be a dict, but found %s\"", "%", "a", ".", "__class__", ".", "__name__", ")", "if", "not", "isinstance", "(", "b", ",", "dict", ")", ":", "raise", "TypeError", "(", "\"b must be a dict, but found %s\"", "%", "b", ".", "__class__", ".", "__name__", ")", "_a", "=", "copy", "(", "a", ")", "_b", "=", "copy", "(", "b", ")", "for", "key_b", ",", "val_b", "in", "iteritems", "(", "_b", ")", ":", "# if it's a sub-dictionary", "if", "isinstance", "(", "val_b", ",", "dict", ")", ":", "if", "key_b", "not", "in", "_a", "or", "not", "isinstance", "(", "_a", "[", "key_b", "]", ",", "dict", ")", ":", "_a", "[", "key_b", "]", "=", "{", "}", "# perform the deep merge recursively", "_a", "[", "key_b", "]", "=", "deep_merge_dict", "(", "_a", "[", "key_b", "]", ",", "val_b", ")", "else", ":", "_a", "[", "key_b", "]", "=", "val_b", "# b should now be deep-merged into a", "return", "_a" ]
Deep merges dictionary b into dictionary a.
[ "Deep", "merges", "dictionary", "b", "into", "dictionary", "a", "." ]
python
train
lpantano/seqcluster
seqcluster/libs/thinkbayes.py
https://github.com/lpantano/seqcluster/blob/774e23add8cd4fdc83d626cea3bd1f458e7d060d/seqcluster/libs/thinkbayes.py#L1530-L1535
def EvalBinomialPmf(k, n, p): """Evaluates the binomial pmf. Returns the probabily of k successes in n trials with probability p. """ return scipy.stats.binom.pmf(k, n, p)
[ "def", "EvalBinomialPmf", "(", "k", ",", "n", ",", "p", ")", ":", "return", "scipy", ".", "stats", ".", "binom", ".", "pmf", "(", "k", ",", "n", ",", "p", ")" ]
Evaluates the binomial pmf. Returns the probabily of k successes in n trials with probability p.
[ "Evaluates", "the", "binomial", "pmf", "." ]
python
train
rajeevs1992/pyhealthvault
src/healthvaultlib/objects/vocabularykey.py
https://github.com/rajeevs1992/pyhealthvault/blob/2b6fa7c1687300bcc2e501368883fbb13dc80495/src/healthvaultlib/objects/vocabularykey.py#L34-L67
def write_xml(self): ''' Writes a VocabularyKey Xml as per Healthvault schema. :returns: lxml.etree.Element representing a single VocabularyKey ''' key = None if self. language is not None: lang = {} lang['{http://www.w3.org/XML/1998/namespace}lang'] = self.language key = etree.Element('vocabulary-key', attrib=lang) else: key = etree.Element('vocabulary-key') name = etree.Element('name') name.text = self.name key.append(name) if self.family is not None: family = etree.Element('family') family.text = self.family key.append(family) if self.version is not None: version = etree.Element('version') version.text = self.version key.append(version) if self.code_value is not None: code_value = etree.Element('code-value') code_value.text = self.code_value key.append(code_value) return key
[ "def", "write_xml", "(", "self", ")", ":", "key", "=", "None", "if", "self", ".", "language", "is", "not", "None", ":", "lang", "=", "{", "}", "lang", "[", "'{http://www.w3.org/XML/1998/namespace}lang'", "]", "=", "self", ".", "language", "key", "=", "etree", ".", "Element", "(", "'vocabulary-key'", ",", "attrib", "=", "lang", ")", "else", ":", "key", "=", "etree", ".", "Element", "(", "'vocabulary-key'", ")", "name", "=", "etree", ".", "Element", "(", "'name'", ")", "name", ".", "text", "=", "self", ".", "name", "key", ".", "append", "(", "name", ")", "if", "self", ".", "family", "is", "not", "None", ":", "family", "=", "etree", ".", "Element", "(", "'family'", ")", "family", ".", "text", "=", "self", ".", "family", "key", ".", "append", "(", "family", ")", "if", "self", ".", "version", "is", "not", "None", ":", "version", "=", "etree", ".", "Element", "(", "'version'", ")", "version", ".", "text", "=", "self", ".", "version", "key", ".", "append", "(", "version", ")", "if", "self", ".", "code_value", "is", "not", "None", ":", "code_value", "=", "etree", ".", "Element", "(", "'code-value'", ")", "code_value", ".", "text", "=", "self", ".", "code_value", "key", ".", "append", "(", "code_value", ")", "return", "key" ]
Writes a VocabularyKey Xml as per Healthvault schema. :returns: lxml.etree.Element representing a single VocabularyKey
[ "Writes", "a", "VocabularyKey", "Xml", "as", "per", "Healthvault", "schema", "." ]
python
train
brocade/pynos
pynos/versions/ver_6/ver_6_0_1/yang/brocade_system_monitor.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/brocade_system_monitor.py#L155-L166
def system_monitor_cid_card_threshold_down_threshold(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") system_monitor = ET.SubElement(config, "system-monitor", xmlns="urn:brocade.com:mgmt:brocade-system-monitor") cid_card = ET.SubElement(system_monitor, "cid-card") threshold = ET.SubElement(cid_card, "threshold") down_threshold = ET.SubElement(threshold, "down-threshold") down_threshold.text = kwargs.pop('down_threshold') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "system_monitor_cid_card_threshold_down_threshold", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "system_monitor", "=", "ET", ".", "SubElement", "(", "config", ",", "\"system-monitor\"", ",", "xmlns", "=", "\"urn:brocade.com:mgmt:brocade-system-monitor\"", ")", "cid_card", "=", "ET", ".", "SubElement", "(", "system_monitor", ",", "\"cid-card\"", ")", "threshold", "=", "ET", ".", "SubElement", "(", "cid_card", ",", "\"threshold\"", ")", "down_threshold", "=", "ET", ".", "SubElement", "(", "threshold", ",", "\"down-threshold\"", ")", "down_threshold", ".", "text", "=", "kwargs", ".", "pop", "(", "'down_threshold'", ")", "callback", "=", "kwargs", ".", "pop", "(", "'callback'", ",", "self", ".", "_callback", ")", "return", "callback", "(", "config", ")" ]
Auto Generated Code
[ "Auto", "Generated", "Code" ]
python
train
cloudera/cm_api
python/src/cm_api/endpoints/services.py
https://github.com/cloudera/cm_api/blob/5d2512375bd94684b4da36df9e0d9177865ffcbb/python/src/cm_api/endpoints/services.py#L1190-L1204
def failover_hdfs(self, active_name, standby_name, force=False): """ Initiate a failover of an HDFS NameNode HA pair. This will make the given stand-by NameNode active, and vice-versa. @param active_name: name of currently active NameNode. @param standby_name: name of NameNode currently in stand-by. @param force: whether to force failover. @return: Reference to the submitted command. """ params = { "force" : "true" and force or "false" } args = { ApiList.LIST_KEY : [ active_name, standby_name ] } return self._cmd('hdfsFailover', data=[ active_name, standby_name ], params = { "force" : "true" and force or "false" })
[ "def", "failover_hdfs", "(", "self", ",", "active_name", ",", "standby_name", ",", "force", "=", "False", ")", ":", "params", "=", "{", "\"force\"", ":", "\"true\"", "and", "force", "or", "\"false\"", "}", "args", "=", "{", "ApiList", ".", "LIST_KEY", ":", "[", "active_name", ",", "standby_name", "]", "}", "return", "self", ".", "_cmd", "(", "'hdfsFailover'", ",", "data", "=", "[", "active_name", ",", "standby_name", "]", ",", "params", "=", "{", "\"force\"", ":", "\"true\"", "and", "force", "or", "\"false\"", "}", ")" ]
Initiate a failover of an HDFS NameNode HA pair. This will make the given stand-by NameNode active, and vice-versa. @param active_name: name of currently active NameNode. @param standby_name: name of NameNode currently in stand-by. @param force: whether to force failover. @return: Reference to the submitted command.
[ "Initiate", "a", "failover", "of", "an", "HDFS", "NameNode", "HA", "pair", "." ]
python
train
raamana/mrivis
mrivis/workflow.py
https://github.com/raamana/mrivis/blob/199ad096b8a1d825f69109e7218a81b2f1cec756/mrivis/workflow.py#L699-L711
def check_images(img_spec1, img_spec2, bkground_thresh=0.05): """Reads the two images and assers identical shape.""" img1 = read_image(img_spec1, bkground_thresh) img2 = read_image(img_spec2, bkground_thresh) if img1.shape != img2.shape: raise ValueError('size mismatch! First image: {} Second image: {}\n' 'Two images to be compared must be of the same size in all dimensions.'.format( img1.shape, img2.shape)) return img1, img2
[ "def", "check_images", "(", "img_spec1", ",", "img_spec2", ",", "bkground_thresh", "=", "0.05", ")", ":", "img1", "=", "read_image", "(", "img_spec1", ",", "bkground_thresh", ")", "img2", "=", "read_image", "(", "img_spec2", ",", "bkground_thresh", ")", "if", "img1", ".", "shape", "!=", "img2", ".", "shape", ":", "raise", "ValueError", "(", "'size mismatch! First image: {} Second image: {}\\n'", "'Two images to be compared must be of the same size in all dimensions.'", ".", "format", "(", "img1", ".", "shape", ",", "img2", ".", "shape", ")", ")", "return", "img1", ",", "img2" ]
Reads the two images and assers identical shape.
[ "Reads", "the", "two", "images", "and", "assers", "identical", "shape", "." ]
python
train
mikedh/trimesh
trimesh/exchange/load.py
https://github.com/mikedh/trimesh/blob/25e059bf6d4caa74f62ffd58ce4f61a90ee4e518/trimesh/exchange/load.py#L331-L361
def load_remote(url, **kwargs): """ Load a mesh at a remote URL into a local trimesh object. This must be called explicitly rather than automatically from trimesh.load to ensure users don't accidentally make network requests. Parameters ------------ url : string URL containing mesh file **kwargs : passed to `load` """ # import here to keep requirement soft import requests # download the mesh response = requests.get(url) # wrap as file object file_obj = util.wrap_as_stream(response.content) # so loaders can access textures/etc resolver = visual.resolvers.WebResolver(url) # actually load loaded = load(file_obj=file_obj, file_type=url, resolver=resolver, **kwargs) return loaded
[ "def", "load_remote", "(", "url", ",", "*", "*", "kwargs", ")", ":", "# import here to keep requirement soft", "import", "requests", "# download the mesh", "response", "=", "requests", ".", "get", "(", "url", ")", "# wrap as file object", "file_obj", "=", "util", ".", "wrap_as_stream", "(", "response", ".", "content", ")", "# so loaders can access textures/etc", "resolver", "=", "visual", ".", "resolvers", ".", "WebResolver", "(", "url", ")", "# actually load", "loaded", "=", "load", "(", "file_obj", "=", "file_obj", ",", "file_type", "=", "url", ",", "resolver", "=", "resolver", ",", "*", "*", "kwargs", ")", "return", "loaded" ]
Load a mesh at a remote URL into a local trimesh object. This must be called explicitly rather than automatically from trimesh.load to ensure users don't accidentally make network requests. Parameters ------------ url : string URL containing mesh file **kwargs : passed to `load`
[ "Load", "a", "mesh", "at", "a", "remote", "URL", "into", "a", "local", "trimesh", "object", "." ]
python
train
ggaughan/pipe2py
pipe2py/modules/pipetruncate.py
https://github.com/ggaughan/pipe2py/blob/4767d6d1fd354d2a35e6528594b8deb8a033eed4/pipe2py/modules/pipetruncate.py#L20-L49
def asyncPipeUniq(context=None, _INPUT=None, conf=None, **kwargs): """An operator that asynchronously returns a specified number of items from the top of a feed. Not loopable. Parameters ---------- context : pipe2py.Context object _INPUT : twisted Deferred iterable of items conf : { 'start': {'type': 'number', value': <starting location>} 'count': {'type': 'number', value': <desired feed length>} } returns ------- _OUTPUT : twisted.internet.defer.Deferred generator of unique items """ _input = yield _INPUT asyncFuncs = yield asyncGetSplits(None, conf, **cdicts(opts, kwargs)) pieces = yield asyncFuncs[0]() _pass = yield asyncFuncs[2]() if _pass: _OUTPUT = _input else: start = int(pieces.start) stop = start + int(pieces.count) _OUTPUT = islice(_input, start, stop) returnValue(_OUTPUT)
[ "def", "asyncPipeUniq", "(", "context", "=", "None", ",", "_INPUT", "=", "None", ",", "conf", "=", "None", ",", "*", "*", "kwargs", ")", ":", "_input", "=", "yield", "_INPUT", "asyncFuncs", "=", "yield", "asyncGetSplits", "(", "None", ",", "conf", ",", "*", "*", "cdicts", "(", "opts", ",", "kwargs", ")", ")", "pieces", "=", "yield", "asyncFuncs", "[", "0", "]", "(", ")", "_pass", "=", "yield", "asyncFuncs", "[", "2", "]", "(", ")", "if", "_pass", ":", "_OUTPUT", "=", "_input", "else", ":", "start", "=", "int", "(", "pieces", ".", "start", ")", "stop", "=", "start", "+", "int", "(", "pieces", ".", "count", ")", "_OUTPUT", "=", "islice", "(", "_input", ",", "start", ",", "stop", ")", "returnValue", "(", "_OUTPUT", ")" ]
An operator that asynchronously returns a specified number of items from the top of a feed. Not loopable. Parameters ---------- context : pipe2py.Context object _INPUT : twisted Deferred iterable of items conf : { 'start': {'type': 'number', value': <starting location>} 'count': {'type': 'number', value': <desired feed length>} } returns ------- _OUTPUT : twisted.internet.defer.Deferred generator of unique items
[ "An", "operator", "that", "asynchronously", "returns", "a", "specified", "number", "of", "items", "from", "the", "top", "of", "a", "feed", ".", "Not", "loopable", "." ]
python
train
ihmeuw/vivarium
src/vivarium/interface/interactive.py
https://github.com/ihmeuw/vivarium/blob/c5f5d50f775c8bf337d3aae1ff7c57c025a8e258/src/vivarium/interface/interactive.py#L113-L128
def step(self, step_size: Timedelta=None): """Advance the simulation one step. Parameters ---------- step_size An optional size of step to take. Must be the same type as the simulation clock's step size (usually a pandas.Timedelta). """ old_step_size = self.clock.step_size if step_size is not None: if not isinstance(step_size, type(self.clock.step_size)): raise ValueError(f"Provided time must be an instance of {type(self.clock.step_size)}") self.clock._step_size = step_size super().step() self.clock._step_size = old_step_size
[ "def", "step", "(", "self", ",", "step_size", ":", "Timedelta", "=", "None", ")", ":", "old_step_size", "=", "self", ".", "clock", ".", "step_size", "if", "step_size", "is", "not", "None", ":", "if", "not", "isinstance", "(", "step_size", ",", "type", "(", "self", ".", "clock", ".", "step_size", ")", ")", ":", "raise", "ValueError", "(", "f\"Provided time must be an instance of {type(self.clock.step_size)}\"", ")", "self", ".", "clock", ".", "_step_size", "=", "step_size", "super", "(", ")", ".", "step", "(", ")", "self", ".", "clock", ".", "_step_size", "=", "old_step_size" ]
Advance the simulation one step. Parameters ---------- step_size An optional size of step to take. Must be the same type as the simulation clock's step size (usually a pandas.Timedelta).
[ "Advance", "the", "simulation", "one", "step", "." ]
python
train
zomux/deepy
deepy/dataset/sequence.py
https://github.com/zomux/deepy/blob/090fbad22a08a809b12951cd0d4984f5bd432698/deepy/dataset/sequence.py#L15-L24
def _pad(self, side, length): """ Pad sequences to given length in the left or right side. """ if self._train_set: self._train_set = pad_dataset(self._train_set, side, length) if self._valid_set: self._valid_set = pad_dataset(self._valid_set, side, length) if self._test_set: self._test_set = pad_dataset(self._test_set, side, length)
[ "def", "_pad", "(", "self", ",", "side", ",", "length", ")", ":", "if", "self", ".", "_train_set", ":", "self", ".", "_train_set", "=", "pad_dataset", "(", "self", ".", "_train_set", ",", "side", ",", "length", ")", "if", "self", ".", "_valid_set", ":", "self", ".", "_valid_set", "=", "pad_dataset", "(", "self", ".", "_valid_set", ",", "side", ",", "length", ")", "if", "self", ".", "_test_set", ":", "self", ".", "_test_set", "=", "pad_dataset", "(", "self", ".", "_test_set", ",", "side", ",", "length", ")" ]
Pad sequences to given length in the left or right side.
[ "Pad", "sequences", "to", "given", "length", "in", "the", "left", "or", "right", "side", "." ]
python
test
trailofbits/manticore
manticore/native/manticore.py
https://github.com/trailofbits/manticore/blob/54c5a15b1119c523ae54c09972413e8b97f11629/manticore/native/manticore.py#L121-L136
def add_hook(self, pc, callback): """ Add a callback to be invoked on executing a program counter. Pass `None` for pc to invoke callback on every instruction. `callback` should be a callable that takes one :class:`~manticore.core.state.State` argument. :param pc: Address of instruction to hook :type pc: int or None :param callable callback: Hook function """ if not (isinstance(pc, int) or pc is None): raise TypeError(f"pc must be either an int or None, not {pc.__class__.__name__}") else: self._hooks.setdefault(pc, set()).add(callback) if self._hooks: self._executor.subscribe('will_execute_instruction', self._hook_callback)
[ "def", "add_hook", "(", "self", ",", "pc", ",", "callback", ")", ":", "if", "not", "(", "isinstance", "(", "pc", ",", "int", ")", "or", "pc", "is", "None", ")", ":", "raise", "TypeError", "(", "f\"pc must be either an int or None, not {pc.__class__.__name__}\"", ")", "else", ":", "self", ".", "_hooks", ".", "setdefault", "(", "pc", ",", "set", "(", ")", ")", ".", "add", "(", "callback", ")", "if", "self", ".", "_hooks", ":", "self", ".", "_executor", ".", "subscribe", "(", "'will_execute_instruction'", ",", "self", ".", "_hook_callback", ")" ]
Add a callback to be invoked on executing a program counter. Pass `None` for pc to invoke callback on every instruction. `callback` should be a callable that takes one :class:`~manticore.core.state.State` argument. :param pc: Address of instruction to hook :type pc: int or None :param callable callback: Hook function
[ "Add", "a", "callback", "to", "be", "invoked", "on", "executing", "a", "program", "counter", ".", "Pass", "None", "for", "pc", "to", "invoke", "callback", "on", "every", "instruction", ".", "callback", "should", "be", "a", "callable", "that", "takes", "one", ":", "class", ":", "~manticore", ".", "core", ".", "state", ".", "State", "argument", "." ]
python
valid
hyperledger/indy-plenum
plenum/server/replica.py
https://github.com/hyperledger/indy-plenum/blob/dcd144e238af7f17a869ffc9412f13dc488b7020/plenum/server/replica.py#L2785-L2800
def revert_unordered_batches(self): """ Revert changes to ledger (uncommitted) and state made by any requests that have not been ordered. """ i = 0 for key in sorted(self.batches.keys(), reverse=True): if compare_3PC_keys(self.last_ordered_3pc, key) > 0: ledger_id, discarded, _, prevStateRoot, len_reqIdr = self.batches.pop(key) discarded = invalid_index_serializer.deserialize(discarded) self.logger.debug('{} reverting 3PC key {}'.format(self, key)) self.revert(ledger_id, prevStateRoot, len_reqIdr - len(discarded)) i += 1 else: break return i
[ "def", "revert_unordered_batches", "(", "self", ")", ":", "i", "=", "0", "for", "key", "in", "sorted", "(", "self", ".", "batches", ".", "keys", "(", ")", ",", "reverse", "=", "True", ")", ":", "if", "compare_3PC_keys", "(", "self", ".", "last_ordered_3pc", ",", "key", ")", ">", "0", ":", "ledger_id", ",", "discarded", ",", "_", ",", "prevStateRoot", ",", "len_reqIdr", "=", "self", ".", "batches", ".", "pop", "(", "key", ")", "discarded", "=", "invalid_index_serializer", ".", "deserialize", "(", "discarded", ")", "self", ".", "logger", ".", "debug", "(", "'{} reverting 3PC key {}'", ".", "format", "(", "self", ",", "key", ")", ")", "self", ".", "revert", "(", "ledger_id", ",", "prevStateRoot", ",", "len_reqIdr", "-", "len", "(", "discarded", ")", ")", "i", "+=", "1", "else", ":", "break", "return", "i" ]
Revert changes to ledger (uncommitted) and state made by any requests that have not been ordered.
[ "Revert", "changes", "to", "ledger", "(", "uncommitted", ")", "and", "state", "made", "by", "any", "requests", "that", "have", "not", "been", "ordered", "." ]
python
train
PonteIneptique/collatinus-python
pycollatinus/lemmatiseur.py
https://github.com/PonteIneptique/collatinus-python/blob/fca37b0b77bc60f47d3c24ab42f6d0bdca6ba0f5/pycollatinus/lemmatiseur.py#L176-L187
def _lemmatise_assims(self, f, *args, **kwargs): """ Lemmatise un mot f avec son assimilation :param f: Mot à lemmatiser :param pos: Récupère la POS :param get_lemma_object: Retrieve Lemma object instead of string representation of lemma :param results: Current results """ forme_assimilee = self.assims(f) if forme_assimilee != f: for proposal in self._lemmatise(forme_assimilee, *args, **kwargs): yield proposal
[ "def", "_lemmatise_assims", "(", "self", ",", "f", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "forme_assimilee", "=", "self", ".", "assims", "(", "f", ")", "if", "forme_assimilee", "!=", "f", ":", "for", "proposal", "in", "self", ".", "_lemmatise", "(", "forme_assimilee", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "yield", "proposal" ]
Lemmatise un mot f avec son assimilation :param f: Mot à lemmatiser :param pos: Récupère la POS :param get_lemma_object: Retrieve Lemma object instead of string representation of lemma :param results: Current results
[ "Lemmatise", "un", "mot", "f", "avec", "son", "assimilation" ]
python
train
pantsbuild/pex
pex/vendor/_vendored/wheel/wheel/tool/__init__.py
https://github.com/pantsbuild/pex/blob/87b2129d860250d3b9edce75b9cb62f9789ee521/pex/vendor/_vendored/wheel/wheel/tool/__init__.py#L53-L77
def keygen(get_keyring=get_keyring): """Generate a public/private key pair.""" warn_signatures() WheelKeys, keyring = get_keyring() ed25519ll = signatures.get_ed25519ll() wk = WheelKeys().load() keypair = ed25519ll.crypto_sign_keypair() vk = native(urlsafe_b64encode(keypair.vk)) sk = native(urlsafe_b64encode(keypair.sk)) kr = keyring.get_keyring() kr.set_password("wheel", vk, sk) print("Created Ed25519 keypair with vk={}".format(vk)) print("in {!r}".format(kr)) sk2 = kr.get_password('wheel', vk) if sk2 != sk: raise WheelError("Keyring is broken. Could not retrieve secret key.") print("Trusting {} to sign and verify all packages.".format(vk)) wk.add_signer('+', vk) wk.trust('+', vk) wk.save()
[ "def", "keygen", "(", "get_keyring", "=", "get_keyring", ")", ":", "warn_signatures", "(", ")", "WheelKeys", ",", "keyring", "=", "get_keyring", "(", ")", "ed25519ll", "=", "signatures", ".", "get_ed25519ll", "(", ")", "wk", "=", "WheelKeys", "(", ")", ".", "load", "(", ")", "keypair", "=", "ed25519ll", ".", "crypto_sign_keypair", "(", ")", "vk", "=", "native", "(", "urlsafe_b64encode", "(", "keypair", ".", "vk", ")", ")", "sk", "=", "native", "(", "urlsafe_b64encode", "(", "keypair", ".", "sk", ")", ")", "kr", "=", "keyring", ".", "get_keyring", "(", ")", "kr", ".", "set_password", "(", "\"wheel\"", ",", "vk", ",", "sk", ")", "print", "(", "\"Created Ed25519 keypair with vk={}\"", ".", "format", "(", "vk", ")", ")", "print", "(", "\"in {!r}\"", ".", "format", "(", "kr", ")", ")", "sk2", "=", "kr", ".", "get_password", "(", "'wheel'", ",", "vk", ")", "if", "sk2", "!=", "sk", ":", "raise", "WheelError", "(", "\"Keyring is broken. Could not retrieve secret key.\"", ")", "print", "(", "\"Trusting {} to sign and verify all packages.\"", ".", "format", "(", "vk", ")", ")", "wk", ".", "add_signer", "(", "'+'", ",", "vk", ")", "wk", ".", "trust", "(", "'+'", ",", "vk", ")", "wk", ".", "save", "(", ")" ]
Generate a public/private key pair.
[ "Generate", "a", "public", "/", "private", "key", "pair", "." ]
python
train
blockstack/pybitcoin
pybitcoin/wallet.py
https://github.com/blockstack/pybitcoin/blob/92c8da63c40f7418594b1ce395990c3f5a4787cc/pybitcoin/wallet.py#L45-L59
def keypair(self, i, keypair_class): """ Return the keypair that corresponds to the provided sequence number and keypair class (BitcoinKeypair, etc.). """ # Make sure keypair_class is a valid cryptocurrency keypair if not is_cryptocurrency_keypair_class(keypair_class): raise Exception(_messages["INVALID_KEYPAIR_CLASS"]) currency_name = keypair_class.__name__.lower().replace('keypair', '') k = keypair_class.from_passphrase( self._passphrase + " " + currency_name + str(i)) return k
[ "def", "keypair", "(", "self", ",", "i", ",", "keypair_class", ")", ":", "# Make sure keypair_class is a valid cryptocurrency keypair", "if", "not", "is_cryptocurrency_keypair_class", "(", "keypair_class", ")", ":", "raise", "Exception", "(", "_messages", "[", "\"INVALID_KEYPAIR_CLASS\"", "]", ")", "currency_name", "=", "keypair_class", ".", "__name__", ".", "lower", "(", ")", ".", "replace", "(", "'keypair'", ",", "''", ")", "k", "=", "keypair_class", ".", "from_passphrase", "(", "self", ".", "_passphrase", "+", "\" \"", "+", "currency_name", "+", "str", "(", "i", ")", ")", "return", "k" ]
Return the keypair that corresponds to the provided sequence number and keypair class (BitcoinKeypair, etc.).
[ "Return", "the", "keypair", "that", "corresponds", "to", "the", "provided", "sequence", "number", "and", "keypair", "class", "(", "BitcoinKeypair", "etc", ".", ")", "." ]
python
train
tcalmant/ipopo
pelix/internals/registry.py
https://github.com/tcalmant/ipopo/blob/2f9ae0c44cd9c34ef1a9d50837b3254e75678eb1/pelix/internals/registry.py#L785-L801
def remove_service_listener(self, listener): """ Unregisters a service listener :param listener: The service listener :return: True if the listener has been unregistered """ with self.__svc_lock: try: data = self.__listeners_data.pop(listener) spec_listeners = self.__svc_listeners[data.specification] spec_listeners.remove(data) if not spec_listeners: del self.__svc_listeners[data.specification] return True except KeyError: return False
[ "def", "remove_service_listener", "(", "self", ",", "listener", ")", ":", "with", "self", ".", "__svc_lock", ":", "try", ":", "data", "=", "self", ".", "__listeners_data", ".", "pop", "(", "listener", ")", "spec_listeners", "=", "self", ".", "__svc_listeners", "[", "data", ".", "specification", "]", "spec_listeners", ".", "remove", "(", "data", ")", "if", "not", "spec_listeners", ":", "del", "self", ".", "__svc_listeners", "[", "data", ".", "specification", "]", "return", "True", "except", "KeyError", ":", "return", "False" ]
Unregisters a service listener :param listener: The service listener :return: True if the listener has been unregistered
[ "Unregisters", "a", "service", "listener" ]
python
train
inspirehep/inspire-dojson
inspire_dojson/utils/geo.py
https://github.com/inspirehep/inspire-dojson/blob/17f3789cd3d5ae58efa1190dc0eea9efb9c8ca59/inspire_dojson/utils/geo.py#L545-L573
def parse_institution_address(address, city, state_province, country, postal_code, country_code): """Parse an institution address.""" address_list = force_list(address) state_province = match_us_state(state_province) or state_province postal_code = force_list(postal_code) country = force_list(country) country_code = match_country_code(country_code) if isinstance(postal_code, (tuple, list)): postal_code = ', '.join(postal_code) if isinstance(country, (tuple, list)): country = ', '.join(set(country)) if not country_code and country: country_code = match_country_name_to_its_code(country) if not country_code and state_province and state_province in us_state_to_iso_code.values(): country_code = 'US' return { 'cities': force_list(city), 'country_code': country_code, 'postal_address': address_list, 'postal_code': postal_code, 'state': state_province, }
[ "def", "parse_institution_address", "(", "address", ",", "city", ",", "state_province", ",", "country", ",", "postal_code", ",", "country_code", ")", ":", "address_list", "=", "force_list", "(", "address", ")", "state_province", "=", "match_us_state", "(", "state_province", ")", "or", "state_province", "postal_code", "=", "force_list", "(", "postal_code", ")", "country", "=", "force_list", "(", "country", ")", "country_code", "=", "match_country_code", "(", "country_code", ")", "if", "isinstance", "(", "postal_code", ",", "(", "tuple", ",", "list", ")", ")", ":", "postal_code", "=", "', '", ".", "join", "(", "postal_code", ")", "if", "isinstance", "(", "country", ",", "(", "tuple", ",", "list", ")", ")", ":", "country", "=", "', '", ".", "join", "(", "set", "(", "country", ")", ")", "if", "not", "country_code", "and", "country", ":", "country_code", "=", "match_country_name_to_its_code", "(", "country", ")", "if", "not", "country_code", "and", "state_province", "and", "state_province", "in", "us_state_to_iso_code", ".", "values", "(", ")", ":", "country_code", "=", "'US'", "return", "{", "'cities'", ":", "force_list", "(", "city", ")", ",", "'country_code'", ":", "country_code", ",", "'postal_address'", ":", "address_list", ",", "'postal_code'", ":", "postal_code", ",", "'state'", ":", "state_province", ",", "}" ]
Parse an institution address.
[ "Parse", "an", "institution", "address", "." ]
python
train
bitesofcode/projexui
projexui/windows/xdkwindow/xdkitem.py
https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/windows/xdkwindow/xdkitem.py#L204-L230
def titleForFilepath( url ): """ Returns a gui title for this url. :return <str> """ url = nativestring(url) if url in XdkEntryItem.TITLE_MAP: return XdkEntryItem.TITLE_MAP.get(url) url = nativestring(url).replace('\\', '/') basename = os.path.basename(url) title = os.path.splitext(basename)[0] if title == 'index': title = url.split('/')[-2] if title.endswith('-allmembers'): title = 'List of All Members for %s' % title.split('-')[-2] elif title.endswith('-source'): title = 'Source Code for %s' % title.split('-')[-2] elif len(nativestring(url).split('/')) <= 2 and title in TITLE_MAP: title = TITLE_MAP[title] elif not 'api/' in url: title = projex.text.pretty(title) return title
[ "def", "titleForFilepath", "(", "url", ")", ":", "url", "=", "nativestring", "(", "url", ")", "if", "url", "in", "XdkEntryItem", ".", "TITLE_MAP", ":", "return", "XdkEntryItem", ".", "TITLE_MAP", ".", "get", "(", "url", ")", "url", "=", "nativestring", "(", "url", ")", ".", "replace", "(", "'\\\\'", ",", "'/'", ")", "basename", "=", "os", ".", "path", ".", "basename", "(", "url", ")", "title", "=", "os", ".", "path", ".", "splitext", "(", "basename", ")", "[", "0", "]", "if", "title", "==", "'index'", ":", "title", "=", "url", ".", "split", "(", "'/'", ")", "[", "-", "2", "]", "if", "title", ".", "endswith", "(", "'-allmembers'", ")", ":", "title", "=", "'List of All Members for %s'", "%", "title", ".", "split", "(", "'-'", ")", "[", "-", "2", "]", "elif", "title", ".", "endswith", "(", "'-source'", ")", ":", "title", "=", "'Source Code for %s'", "%", "title", ".", "split", "(", "'-'", ")", "[", "-", "2", "]", "elif", "len", "(", "nativestring", "(", "url", ")", ".", "split", "(", "'/'", ")", ")", "<=", "2", "and", "title", "in", "TITLE_MAP", ":", "title", "=", "TITLE_MAP", "[", "title", "]", "elif", "not", "'api/'", "in", "url", ":", "title", "=", "projex", ".", "text", ".", "pretty", "(", "title", ")", "return", "title" ]
Returns a gui title for this url. :return <str>
[ "Returns", "a", "gui", "title", "for", "this", "url", ".", ":", "return", "<str", ">" ]
python
train
brocade/pynos
pynos/versions/ver_6/ver_6_0_1/yang/brocade_rmon.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/brocade_rmon.py#L38-L49
def rmon_event_entry_log(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") rmon = ET.SubElement(config, "rmon", xmlns="urn:brocade.com:mgmt:brocade-rmon") event_entry = ET.SubElement(rmon, "event-entry") event_index_key = ET.SubElement(event_entry, "event-index") event_index_key.text = kwargs.pop('event_index') log = ET.SubElement(event_entry, "log") callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "rmon_event_entry_log", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "rmon", "=", "ET", ".", "SubElement", "(", "config", ",", "\"rmon\"", ",", "xmlns", "=", "\"urn:brocade.com:mgmt:brocade-rmon\"", ")", "event_entry", "=", "ET", ".", "SubElement", "(", "rmon", ",", "\"event-entry\"", ")", "event_index_key", "=", "ET", ".", "SubElement", "(", "event_entry", ",", "\"event-index\"", ")", "event_index_key", ".", "text", "=", "kwargs", ".", "pop", "(", "'event_index'", ")", "log", "=", "ET", ".", "SubElement", "(", "event_entry", ",", "\"log\"", ")", "callback", "=", "kwargs", ".", "pop", "(", "'callback'", ",", "self", ".", "_callback", ")", "return", "callback", "(", "config", ")" ]
Auto Generated Code
[ "Auto", "Generated", "Code" ]
python
train
wummel/linkchecker
third_party/miniboa-r42/miniboa/telnet.py
https://github.com/wummel/linkchecker/blob/c2ce810c3fb00b895a841a7be6b2e78c64e7b042/third_party/miniboa-r42/miniboa/telnet.py#L696-L700
def _check_remote_option(self, option): """Test the status of remote negotiated Telnet options.""" if not self.telnet_opt_dict.has_key(option): self.telnet_opt_dict[option] = TelnetOption() return self.telnet_opt_dict[option].remote_option
[ "def", "_check_remote_option", "(", "self", ",", "option", ")", ":", "if", "not", "self", ".", "telnet_opt_dict", ".", "has_key", "(", "option", ")", ":", "self", ".", "telnet_opt_dict", "[", "option", "]", "=", "TelnetOption", "(", ")", "return", "self", ".", "telnet_opt_dict", "[", "option", "]", ".", "remote_option" ]
Test the status of remote negotiated Telnet options.
[ "Test", "the", "status", "of", "remote", "negotiated", "Telnet", "options", "." ]
python
train
aychedee/unchained
unchained/fields.py
https://github.com/aychedee/unchained/blob/11d03451ee5247e66b3d6a454e1bde71f81ae357/unchained/fields.py#L152-L162
def get_prep_value(self, value): '''The psycopg adaptor returns Python objects, but we also have to handle conversion ourselves ''' if isinstance(value, JSON.JsonDict): return json.dumps(value, cls=JSON.Encoder) if isinstance(value, JSON.JsonList): return value.json_string if isinstance(value, JSON.JsonString): return json.dumps(value) return value
[ "def", "get_prep_value", "(", "self", ",", "value", ")", ":", "if", "isinstance", "(", "value", ",", "JSON", ".", "JsonDict", ")", ":", "return", "json", ".", "dumps", "(", "value", ",", "cls", "=", "JSON", ".", "Encoder", ")", "if", "isinstance", "(", "value", ",", "JSON", ".", "JsonList", ")", ":", "return", "value", ".", "json_string", "if", "isinstance", "(", "value", ",", "JSON", ".", "JsonString", ")", ":", "return", "json", ".", "dumps", "(", "value", ")", "return", "value" ]
The psycopg adaptor returns Python objects, but we also have to handle conversion ourselves
[ "The", "psycopg", "adaptor", "returns", "Python", "objects", "but", "we", "also", "have", "to", "handle", "conversion", "ourselves" ]
python
train
saltstack/salt
salt/cloud/clouds/xen.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/xen.py#L217-L260
def get_vm_ip(name=None, session=None, call=None): ''' Get the IP address of the VM .. code-block:: bash salt-cloud -a get_vm_ip xenvm01 .. note:: Requires xen guest tools to be installed in VM ''' if call == 'function': raise SaltCloudException( 'This function must be called with -a or --action.' ) if session is None: log.debug('New session being created') session = _get_session() vm = _get_vm(name, session=session) ret = None # -- try to get ip from vif vifs = session.xenapi.VM.get_VIFs(vm) if vifs is not None: for vif in vifs: if session.xenapi.VIF.get_ipv4_addresses(vif): cidr = session.xenapi.VIF.get_ipv4_addresses(vif).pop() ret, subnet = cidr.split('/') log.debug( 'VM vif returned for instance: %s ip: %s', name, ret) return ret # -- try to get ip from get tools metrics vgm = session.xenapi.VM.get_guest_metrics(vm) try: net = session.xenapi.VM_guest_metrics.get_networks(vgm) if "0/ip" in net.keys(): log.debug( 'VM guest metrics returned for instance: %s 0/ip: %s', name, net["0/ip"] ) ret = net["0/ip"] # except Exception as ex: except XenAPI.Failure: log.info('Could not get vm metrics at this time') return ret
[ "def", "get_vm_ip", "(", "name", "=", "None", ",", "session", "=", "None", ",", "call", "=", "None", ")", ":", "if", "call", "==", "'function'", ":", "raise", "SaltCloudException", "(", "'This function must be called with -a or --action.'", ")", "if", "session", "is", "None", ":", "log", ".", "debug", "(", "'New session being created'", ")", "session", "=", "_get_session", "(", ")", "vm", "=", "_get_vm", "(", "name", ",", "session", "=", "session", ")", "ret", "=", "None", "# -- try to get ip from vif", "vifs", "=", "session", ".", "xenapi", ".", "VM", ".", "get_VIFs", "(", "vm", ")", "if", "vifs", "is", "not", "None", ":", "for", "vif", "in", "vifs", ":", "if", "session", ".", "xenapi", ".", "VIF", ".", "get_ipv4_addresses", "(", "vif", ")", ":", "cidr", "=", "session", ".", "xenapi", ".", "VIF", ".", "get_ipv4_addresses", "(", "vif", ")", ".", "pop", "(", ")", "ret", ",", "subnet", "=", "cidr", ".", "split", "(", "'/'", ")", "log", ".", "debug", "(", "'VM vif returned for instance: %s ip: %s'", ",", "name", ",", "ret", ")", "return", "ret", "# -- try to get ip from get tools metrics", "vgm", "=", "session", ".", "xenapi", ".", "VM", ".", "get_guest_metrics", "(", "vm", ")", "try", ":", "net", "=", "session", ".", "xenapi", ".", "VM_guest_metrics", ".", "get_networks", "(", "vgm", ")", "if", "\"0/ip\"", "in", "net", ".", "keys", "(", ")", ":", "log", ".", "debug", "(", "'VM guest metrics returned for instance: %s 0/ip: %s'", ",", "name", ",", "net", "[", "\"0/ip\"", "]", ")", "ret", "=", "net", "[", "\"0/ip\"", "]", "# except Exception as ex:", "except", "XenAPI", ".", "Failure", ":", "log", ".", "info", "(", "'Could not get vm metrics at this time'", ")", "return", "ret" ]
Get the IP address of the VM .. code-block:: bash salt-cloud -a get_vm_ip xenvm01 .. note:: Requires xen guest tools to be installed in VM
[ "Get", "the", "IP", "address", "of", "the", "VM" ]
python
train
ecederstrand/exchangelib
exchangelib/services.py
https://github.com/ecederstrand/exchangelib/blob/736347b337c239fcd6d592db5b29e819f753c1ba/exchangelib/services.py#L1478-L1510
def call(self, folder, additional_fields, restriction, order_fields, shape, query_string, depth, max_items, offset): """ Find items in an account. :param folder: the Folder object to query :param additional_fields: the extra fields that should be returned with the item, as FieldPath objects :param restriction: a Restriction object for :param order_fields: the fields to sort the results by :param shape: The set of attributes to return :param query_string: a QueryString object :param depth: How deep in the folder structure to search for items :param max_items: the max number of items to return :param offset: the offset relative to the first item in the item collection. Usually 0. :return: XML elements for the matching items """ from .items import Persona, ID_ONLY personas = self._paged_call(payload_func=self.get_payload, max_items=max_items, **dict( folder=folder, additional_fields=additional_fields, restriction=restriction, order_fields=order_fields, query_string=query_string, shape=shape, depth=depth, page_size=self.chunk_size, offset=offset, )) if shape == ID_ONLY and additional_fields is None: for p in personas: yield p if isinstance(p, Exception) else Persona.id_from_xml(p) else: for p in personas: yield p if isinstance(p, Exception) else Persona.from_xml(p, account=self.account)
[ "def", "call", "(", "self", ",", "folder", ",", "additional_fields", ",", "restriction", ",", "order_fields", ",", "shape", ",", "query_string", ",", "depth", ",", "max_items", ",", "offset", ")", ":", "from", ".", "items", "import", "Persona", ",", "ID_ONLY", "personas", "=", "self", ".", "_paged_call", "(", "payload_func", "=", "self", ".", "get_payload", ",", "max_items", "=", "max_items", ",", "*", "*", "dict", "(", "folder", "=", "folder", ",", "additional_fields", "=", "additional_fields", ",", "restriction", "=", "restriction", ",", "order_fields", "=", "order_fields", ",", "query_string", "=", "query_string", ",", "shape", "=", "shape", ",", "depth", "=", "depth", ",", "page_size", "=", "self", ".", "chunk_size", ",", "offset", "=", "offset", ",", ")", ")", "if", "shape", "==", "ID_ONLY", "and", "additional_fields", "is", "None", ":", "for", "p", "in", "personas", ":", "yield", "p", "if", "isinstance", "(", "p", ",", "Exception", ")", "else", "Persona", ".", "id_from_xml", "(", "p", ")", "else", ":", "for", "p", "in", "personas", ":", "yield", "p", "if", "isinstance", "(", "p", ",", "Exception", ")", "else", "Persona", ".", "from_xml", "(", "p", ",", "account", "=", "self", ".", "account", ")" ]
Find items in an account. :param folder: the Folder object to query :param additional_fields: the extra fields that should be returned with the item, as FieldPath objects :param restriction: a Restriction object for :param order_fields: the fields to sort the results by :param shape: The set of attributes to return :param query_string: a QueryString object :param depth: How deep in the folder structure to search for items :param max_items: the max number of items to return :param offset: the offset relative to the first item in the item collection. Usually 0. :return: XML elements for the matching items
[ "Find", "items", "in", "an", "account", "." ]
python
train
mbj4668/pyang
pyang/statements.py
https://github.com/mbj4668/pyang/blob/f2a5cc3142162e5b9ee4e18d154568d939ff63dd/pyang/statements.py#L1852-L1862
def v_unique_name_leaf_list(ctx, stmt): """Make sure config true leaf-lists do nothave duplicate defaults""" if not stmt.i_config: return seen = [] for defval in stmt.i_default: if defval in seen: err_add(ctx.errors, stmt.pos, 'DUPLICATE_DEFAULT', (defval)) else: seen.append(defval)
[ "def", "v_unique_name_leaf_list", "(", "ctx", ",", "stmt", ")", ":", "if", "not", "stmt", ".", "i_config", ":", "return", "seen", "=", "[", "]", "for", "defval", "in", "stmt", ".", "i_default", ":", "if", "defval", "in", "seen", ":", "err_add", "(", "ctx", ".", "errors", ",", "stmt", ".", "pos", ",", "'DUPLICATE_DEFAULT'", ",", "(", "defval", ")", ")", "else", ":", "seen", ".", "append", "(", "defval", ")" ]
Make sure config true leaf-lists do nothave duplicate defaults
[ "Make", "sure", "config", "true", "leaf", "-", "lists", "do", "nothave", "duplicate", "defaults" ]
python
train
O365/python-o365
O365/connection.py
https://github.com/O365/python-o365/blob/02a71cf3775cc6a3c042e003365d6a07c8c75a73/O365/connection.py#L380-L426
def get_authorization_url(self, requested_scopes=None, redirect_uri=OAUTH_REDIRECT_URL, **kwargs): """ Initializes the oauth authorization flow, getting the authorization url that the user must approve. :param list[str] requested_scopes: list of scopes to request access for :param str redirect_uri: redirect url configured in registered app :param kwargs: allow to pass unused params in conjunction with Connection :return: authorization url :rtype: str """ # TODO: remove this warning in future releases if redirect_uri == OAUTH_REDIRECT_URL: warnings.warn('The default redirect uri was changed in version 1.1.4. to' ' "https://login.microsoftonline.com/common/oauth2/nativeclient".' ' You may have to change the registered app "redirect uri" or pass here the old "redirect_uri"', DeprecationWarning) client_id, client_secret = self.auth if requested_scopes: scopes = requested_scopes elif self.scopes is not None: scopes = self.scopes else: raise ValueError('Must provide at least one scope') self.session = oauth = OAuth2Session(client_id=client_id, redirect_uri=redirect_uri, scope=scopes) self.session.proxies = self.proxy if self.request_retries: retry = Retry(total=self.request_retries, read=self.request_retries, connect=self.request_retries, backoff_factor=RETRIES_BACKOFF_FACTOR, status_forcelist=RETRIES_STATUS_LIST) adapter = HTTPAdapter(max_retries=retry) self.session.mount('http://', adapter) self.session.mount('https://', adapter) # TODO: access_type='offline' has no effect according to documentation # This is done through scope 'offline_access'. auth_url, state = oauth.authorization_url( url=self._oauth2_authorize_url, access_type='offline') return auth_url
[ "def", "get_authorization_url", "(", "self", ",", "requested_scopes", "=", "None", ",", "redirect_uri", "=", "OAUTH_REDIRECT_URL", ",", "*", "*", "kwargs", ")", ":", "# TODO: remove this warning in future releases", "if", "redirect_uri", "==", "OAUTH_REDIRECT_URL", ":", "warnings", ".", "warn", "(", "'The default redirect uri was changed in version 1.1.4. to'", "' \"https://login.microsoftonline.com/common/oauth2/nativeclient\".'", "' You may have to change the registered app \"redirect uri\" or pass here the old \"redirect_uri\"'", ",", "DeprecationWarning", ")", "client_id", ",", "client_secret", "=", "self", ".", "auth", "if", "requested_scopes", ":", "scopes", "=", "requested_scopes", "elif", "self", ".", "scopes", "is", "not", "None", ":", "scopes", "=", "self", ".", "scopes", "else", ":", "raise", "ValueError", "(", "'Must provide at least one scope'", ")", "self", ".", "session", "=", "oauth", "=", "OAuth2Session", "(", "client_id", "=", "client_id", ",", "redirect_uri", "=", "redirect_uri", ",", "scope", "=", "scopes", ")", "self", ".", "session", ".", "proxies", "=", "self", ".", "proxy", "if", "self", ".", "request_retries", ":", "retry", "=", "Retry", "(", "total", "=", "self", ".", "request_retries", ",", "read", "=", "self", ".", "request_retries", ",", "connect", "=", "self", ".", "request_retries", ",", "backoff_factor", "=", "RETRIES_BACKOFF_FACTOR", ",", "status_forcelist", "=", "RETRIES_STATUS_LIST", ")", "adapter", "=", "HTTPAdapter", "(", "max_retries", "=", "retry", ")", "self", ".", "session", ".", "mount", "(", "'http://'", ",", "adapter", ")", "self", ".", "session", ".", "mount", "(", "'https://'", ",", "adapter", ")", "# TODO: access_type='offline' has no effect according to documentation", "# This is done through scope 'offline_access'.", "auth_url", ",", "state", "=", "oauth", ".", "authorization_url", "(", "url", "=", "self", ".", "_oauth2_authorize_url", ",", "access_type", "=", "'offline'", ")", "return", "auth_url" ]
Initializes the oauth authorization flow, getting the authorization url that the user must approve. :param list[str] requested_scopes: list of scopes to request access for :param str redirect_uri: redirect url configured in registered app :param kwargs: allow to pass unused params in conjunction with Connection :return: authorization url :rtype: str
[ "Initializes", "the", "oauth", "authorization", "flow", "getting", "the", "authorization", "url", "that", "the", "user", "must", "approve", "." ]
python
train
yjzhang/uncurl_python
uncurl/state_estimation.py
https://github.com/yjzhang/uncurl_python/blob/55c58ca5670f87699d3bd5752fdfa4baa07724dd/uncurl/state_estimation.py#L353-L420
def update_m(data, old_M, old_W, selected_genes, disp=False, inner_max_iters=100, parallel=True, threads=4, write_progress_file=None, tol=0.0, regularization=0.0, **kwargs): """ This returns a new M matrix that contains all genes, given an M that was created from running state estimation with a subset of genes. Args: data (sparse matrix or dense array): data matrix of shape (genes, cells), containing all genes old_M (array): shape is (selected_genes, k) old_W (array): shape is (k, cells) selected_genes (list): list of selected gene indices Rest of the args are as in poisson_estimate_state Returns: new_M: array of shape (all_genes, k) """ genes, cells = data.shape k = old_M.shape[1] non_selected_genes = [x for x in range(genes) if x not in set(selected_genes)] # 1. initialize new M new_M = np.zeros((genes, k)) new_M[selected_genes, :] = old_M # TODO: how to initialize rest of genes? # data*w? if disp: print('computing initial guess for M by data*W.T') new_M_non_selected = data[non_selected_genes, :] * sparse.csc_matrix(old_W.T) new_M[non_selected_genes, :] = new_M_non_selected.toarray() X = data.astype(float) XT = X.T is_sparse = False if sparse.issparse(X): is_sparse = True update_fn = sparse_nolips_update_w # convert to csc X = sparse.csc_matrix(X) XT = sparse.csc_matrix(XT) if parallel: update_fn = parallel_sparse_nolips_update_w Xsum = np.asarray(X.sum(0)).flatten() Xsum_m = np.asarray(X.sum(1)).flatten() # L-BFGS-B won't work right now for sparse matrices method = 'NoLips' objective_fn = _call_sparse_obj else: objective_fn = objective update_fn = nolips_update_w Xsum = X.sum(0) Xsum_m = X.sum(1) # If method is NoLips, converting to a sparse matrix # will always improve the performance (?) and never lower accuracy... # will almost always improve performance? # if sparsity is below 40%? if method == 'NoLips': is_sparse = True X = sparse.csc_matrix(X) XT = sparse.csc_matrix(XT) update_fn = sparse_nolips_update_w if parallel: update_fn = parallel_sparse_nolips_update_w objective_fn = _call_sparse_obj if disp: print('starting estimating M') new_M = _estimate_w(XT, new_M.T, old_W.T, Xsum_m, update_fn, objective_fn, is_sparse, parallel, threads, method, tol, disp, inner_max_iters, 'M', regularization) if write_progress_file is not None: progress = open(write_progress_file, 'w') progress.write('0') progress.close() return new_M.T
[ "def", "update_m", "(", "data", ",", "old_M", ",", "old_W", ",", "selected_genes", ",", "disp", "=", "False", ",", "inner_max_iters", "=", "100", ",", "parallel", "=", "True", ",", "threads", "=", "4", ",", "write_progress_file", "=", "None", ",", "tol", "=", "0.0", ",", "regularization", "=", "0.0", ",", "*", "*", "kwargs", ")", ":", "genes", ",", "cells", "=", "data", ".", "shape", "k", "=", "old_M", ".", "shape", "[", "1", "]", "non_selected_genes", "=", "[", "x", "for", "x", "in", "range", "(", "genes", ")", "if", "x", "not", "in", "set", "(", "selected_genes", ")", "]", "# 1. initialize new M", "new_M", "=", "np", ".", "zeros", "(", "(", "genes", ",", "k", ")", ")", "new_M", "[", "selected_genes", ",", ":", "]", "=", "old_M", "# TODO: how to initialize rest of genes?", "# data*w?", "if", "disp", ":", "print", "(", "'computing initial guess for M by data*W.T'", ")", "new_M_non_selected", "=", "data", "[", "non_selected_genes", ",", ":", "]", "*", "sparse", ".", "csc_matrix", "(", "old_W", ".", "T", ")", "new_M", "[", "non_selected_genes", ",", ":", "]", "=", "new_M_non_selected", ".", "toarray", "(", ")", "X", "=", "data", ".", "astype", "(", "float", ")", "XT", "=", "X", ".", "T", "is_sparse", "=", "False", "if", "sparse", ".", "issparse", "(", "X", ")", ":", "is_sparse", "=", "True", "update_fn", "=", "sparse_nolips_update_w", "# convert to csc", "X", "=", "sparse", ".", "csc_matrix", "(", "X", ")", "XT", "=", "sparse", ".", "csc_matrix", "(", "XT", ")", "if", "parallel", ":", "update_fn", "=", "parallel_sparse_nolips_update_w", "Xsum", "=", "np", ".", "asarray", "(", "X", ".", "sum", "(", "0", ")", ")", ".", "flatten", "(", ")", "Xsum_m", "=", "np", ".", "asarray", "(", "X", ".", "sum", "(", "1", ")", ")", ".", "flatten", "(", ")", "# L-BFGS-B won't work right now for sparse matrices", "method", "=", "'NoLips'", "objective_fn", "=", "_call_sparse_obj", "else", ":", "objective_fn", "=", "objective", "update_fn", "=", "nolips_update_w", "Xsum", "=", "X", ".", "sum", "(", "0", ")", "Xsum_m", "=", "X", ".", "sum", "(", "1", ")", "# If method is NoLips, converting to a sparse matrix", "# will always improve the performance (?) and never lower accuracy...", "# will almost always improve performance?", "# if sparsity is below 40%?", "if", "method", "==", "'NoLips'", ":", "is_sparse", "=", "True", "X", "=", "sparse", ".", "csc_matrix", "(", "X", ")", "XT", "=", "sparse", ".", "csc_matrix", "(", "XT", ")", "update_fn", "=", "sparse_nolips_update_w", "if", "parallel", ":", "update_fn", "=", "parallel_sparse_nolips_update_w", "objective_fn", "=", "_call_sparse_obj", "if", "disp", ":", "print", "(", "'starting estimating M'", ")", "new_M", "=", "_estimate_w", "(", "XT", ",", "new_M", ".", "T", ",", "old_W", ".", "T", ",", "Xsum_m", ",", "update_fn", ",", "objective_fn", ",", "is_sparse", ",", "parallel", ",", "threads", ",", "method", ",", "tol", ",", "disp", ",", "inner_max_iters", ",", "'M'", ",", "regularization", ")", "if", "write_progress_file", "is", "not", "None", ":", "progress", "=", "open", "(", "write_progress_file", ",", "'w'", ")", "progress", ".", "write", "(", "'0'", ")", "progress", ".", "close", "(", ")", "return", "new_M", ".", "T" ]
This returns a new M matrix that contains all genes, given an M that was created from running state estimation with a subset of genes. Args: data (sparse matrix or dense array): data matrix of shape (genes, cells), containing all genes old_M (array): shape is (selected_genes, k) old_W (array): shape is (k, cells) selected_genes (list): list of selected gene indices Rest of the args are as in poisson_estimate_state Returns: new_M: array of shape (all_genes, k)
[ "This", "returns", "a", "new", "M", "matrix", "that", "contains", "all", "genes", "given", "an", "M", "that", "was", "created", "from", "running", "state", "estimation", "with", "a", "subset", "of", "genes", "." ]
python
train
ulule/django-linguist
linguist/utils.py
https://github.com/ulule/django-linguist/blob/d2b95a6ab921039d56d5eeb352badfe5be9e8f77/linguist/utils.py#L85-L93
def activate_language(instances, language): """ Activates the given language for the given instances. """ language = ( language if language in get_supported_languages() else get_fallback_language() ) for instance in instances: instance.activate_language(language)
[ "def", "activate_language", "(", "instances", ",", "language", ")", ":", "language", "=", "(", "language", "if", "language", "in", "get_supported_languages", "(", ")", "else", "get_fallback_language", "(", ")", ")", "for", "instance", "in", "instances", ":", "instance", ".", "activate_language", "(", "language", ")" ]
Activates the given language for the given instances.
[ "Activates", "the", "given", "language", "for", "the", "given", "instances", "." ]
python
train
alexa/alexa-skills-kit-sdk-for-python
ask-sdk/ask_sdk/standard.py
https://github.com/alexa/alexa-skills-kit-sdk-for-python/blob/097b6406aa12d5ca0b825b00c936861b530cbf39/ask-sdk/ask_sdk/standard.py#L82-L102
def skill_configuration(self): # type: () -> SkillConfiguration """Create the skill configuration object using the registered components. """ skill_config = super(StandardSkillBuilder, self).skill_configuration skill_config.api_client = DefaultApiClient() if self.table_name is not None: kwargs = {"table_name": self.table_name} # type: Dict[str, Any] if self.auto_create_table: kwargs["create_table"] = self.auto_create_table if self.partition_keygen: kwargs["partition_keygen"] = self.partition_keygen if self.dynamodb_client: kwargs["dynamodb_resource"] = self.dynamodb_client skill_config.persistence_adapter = DynamoDbAdapter(**kwargs) return skill_config
[ "def", "skill_configuration", "(", "self", ")", ":", "# type: () -> SkillConfiguration", "skill_config", "=", "super", "(", "StandardSkillBuilder", ",", "self", ")", ".", "skill_configuration", "skill_config", ".", "api_client", "=", "DefaultApiClient", "(", ")", "if", "self", ".", "table_name", "is", "not", "None", ":", "kwargs", "=", "{", "\"table_name\"", ":", "self", ".", "table_name", "}", "# type: Dict[str, Any]", "if", "self", ".", "auto_create_table", ":", "kwargs", "[", "\"create_table\"", "]", "=", "self", ".", "auto_create_table", "if", "self", ".", "partition_keygen", ":", "kwargs", "[", "\"partition_keygen\"", "]", "=", "self", ".", "partition_keygen", "if", "self", ".", "dynamodb_client", ":", "kwargs", "[", "\"dynamodb_resource\"", "]", "=", "self", ".", "dynamodb_client", "skill_config", ".", "persistence_adapter", "=", "DynamoDbAdapter", "(", "*", "*", "kwargs", ")", "return", "skill_config" ]
Create the skill configuration object using the registered components.
[ "Create", "the", "skill", "configuration", "object", "using", "the", "registered", "components", "." ]
python
train
RudolfCardinal/pythonlib
cardinal_pythonlib/rnc_text.py
https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/rnc_text.py#L515-L525
def dictlist_convert_to_datetime(dict_list: Iterable[Dict], key: str, datetime_format_string: str) -> None: """ Process an iterable of dictionaries. For each dictionary ``d``, convert (in place) ``d[key]`` to a ``datetime.datetime`` form, using ``datetime_format_string`` as the format parameter to :func:`datetime.datetime.strptime`. """ for d in dict_list: d[key] = datetime.datetime.strptime(d[key], datetime_format_string)
[ "def", "dictlist_convert_to_datetime", "(", "dict_list", ":", "Iterable", "[", "Dict", "]", ",", "key", ":", "str", ",", "datetime_format_string", ":", "str", ")", "->", "None", ":", "for", "d", "in", "dict_list", ":", "d", "[", "key", "]", "=", "datetime", ".", "datetime", ".", "strptime", "(", "d", "[", "key", "]", ",", "datetime_format_string", ")" ]
Process an iterable of dictionaries. For each dictionary ``d``, convert (in place) ``d[key]`` to a ``datetime.datetime`` form, using ``datetime_format_string`` as the format parameter to :func:`datetime.datetime.strptime`.
[ "Process", "an", "iterable", "of", "dictionaries", ".", "For", "each", "dictionary", "d", "convert", "(", "in", "place", ")", "d", "[", "key", "]", "to", "a", "datetime", ".", "datetime", "form", "using", "datetime_format_string", "as", "the", "format", "parameter", "to", ":", "func", ":", "datetime", ".", "datetime", ".", "strptime", "." ]
python
train
sernst/cauldron
cauldron/invoke/parser.py
https://github.com/sernst/cauldron/blob/4086aec9c038c402ea212c79fe8bd0d27104f9cf/cauldron/invoke/parser.py#L11-L42
def add_shell_action(sub_parser: ArgumentParser) -> ArgumentParser: """Populates the sub parser with the shell arguments""" sub_parser.add_argument( '-p', '--project', dest='project_directory', type=str, default=None ) sub_parser.add_argument( '-l', '--log', dest='logging_path', type=str, default=None ) sub_parser.add_argument( '-o', '--output', dest='output_directory', type=str, default=None ) sub_parser.add_argument( '-s', '--shared', dest='shared_data_path', type=str, default=None ) return sub_parser
[ "def", "add_shell_action", "(", "sub_parser", ":", "ArgumentParser", ")", "->", "ArgumentParser", ":", "sub_parser", ".", "add_argument", "(", "'-p'", ",", "'--project'", ",", "dest", "=", "'project_directory'", ",", "type", "=", "str", ",", "default", "=", "None", ")", "sub_parser", ".", "add_argument", "(", "'-l'", ",", "'--log'", ",", "dest", "=", "'logging_path'", ",", "type", "=", "str", ",", "default", "=", "None", ")", "sub_parser", ".", "add_argument", "(", "'-o'", ",", "'--output'", ",", "dest", "=", "'output_directory'", ",", "type", "=", "str", ",", "default", "=", "None", ")", "sub_parser", ".", "add_argument", "(", "'-s'", ",", "'--shared'", ",", "dest", "=", "'shared_data_path'", ",", "type", "=", "str", ",", "default", "=", "None", ")", "return", "sub_parser" ]
Populates the sub parser with the shell arguments
[ "Populates", "the", "sub", "parser", "with", "the", "shell", "arguments" ]
python
train
tanghaibao/jcvi
jcvi/utils/progressbar.py
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/utils/progressbar.py#L293-L306
def update(self, pbar, width): 'Updates the progress bar and its subcomponents' left, marker, right = (format_updatable(i, pbar) for i in (self.left, self.marker, self.right)) width -= len(left) + len(right) # Marker must *always* have length of 1 marker *= int(pbar.currval / pbar.maxval * width) if self.fill_left: return '%s%s%s' % (left, marker.ljust(width, self.fill), right) else: return '%s%s%s' % (left, marker.rjust(width, self.fill), right)
[ "def", "update", "(", "self", ",", "pbar", ",", "width", ")", ":", "left", ",", "marker", ",", "right", "=", "(", "format_updatable", "(", "i", ",", "pbar", ")", "for", "i", "in", "(", "self", ".", "left", ",", "self", ".", "marker", ",", "self", ".", "right", ")", ")", "width", "-=", "len", "(", "left", ")", "+", "len", "(", "right", ")", "# Marker must *always* have length of 1", "marker", "*=", "int", "(", "pbar", ".", "currval", "/", "pbar", ".", "maxval", "*", "width", ")", "if", "self", ".", "fill_left", ":", "return", "'%s%s%s'", "%", "(", "left", ",", "marker", ".", "ljust", "(", "width", ",", "self", ".", "fill", ")", ",", "right", ")", "else", ":", "return", "'%s%s%s'", "%", "(", "left", ",", "marker", ".", "rjust", "(", "width", ",", "self", ".", "fill", ")", ",", "right", ")" ]
Updates the progress bar and its subcomponents
[ "Updates", "the", "progress", "bar", "and", "its", "subcomponents" ]
python
train
pymc-devs/pymc
pymc/Model.py
https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/Model.py#L114-L125
def seed(self): """ Seed new initial values for the stochastics. """ for generation in self.generations: for s in generation: try: if s.rseed is not None: value = s.random(**s.parents.value) except: pass
[ "def", "seed", "(", "self", ")", ":", "for", "generation", "in", "self", ".", "generations", ":", "for", "s", "in", "generation", ":", "try", ":", "if", "s", ".", "rseed", "is", "not", "None", ":", "value", "=", "s", ".", "random", "(", "*", "*", "s", ".", "parents", ".", "value", ")", "except", ":", "pass" ]
Seed new initial values for the stochastics.
[ "Seed", "new", "initial", "values", "for", "the", "stochastics", "." ]
python
train
saimn/sigal
sigal/plugins/nomedia.py
https://github.com/saimn/sigal/blob/912ca39991355d358dc85fd55c7aeabdd7acc386/sigal/plugins/nomedia.py#L82-L120
def filter_nomedia(album, settings=None): """Removes all filtered Media and subdirs from an Album""" nomediapath = os.path.join(album.src_path, ".nomedia") if os.path.isfile(nomediapath): if os.path.getsize(nomediapath) == 0: logger.info("Ignoring album '%s' because of present 0-byte " ".nomedia file", album.name) # subdirs have been added to the gallery already, remove them # there, too _remove_albums_with_subdirs(album.gallery.albums, [album.path]) try: os.rmdir(album.dst_path) except OSError as e: # directory was created and populated with images in a # previous run => keep it pass # cannot set albums => empty subdirs so that no albums are # generated album.subdirs = [] album.medias = [] else: with open(nomediapath, "r") as nomediaFile: logger.info("Found a .nomedia file in %s, ignoring its " "entries", album.name) ignored = nomediaFile.read().split("\n") album.medias = [media for media in album.medias if media.src_filename not in ignored] album.subdirs = [dirname for dirname in album.subdirs if dirname not in ignored] # subdirs have been added to the gallery already, remove # them there, too _remove_albums_with_subdirs(album.gallery.albums, ignored, album.path + os.path.sep)
[ "def", "filter_nomedia", "(", "album", ",", "settings", "=", "None", ")", ":", "nomediapath", "=", "os", ".", "path", ".", "join", "(", "album", ".", "src_path", ",", "\".nomedia\"", ")", "if", "os", ".", "path", ".", "isfile", "(", "nomediapath", ")", ":", "if", "os", ".", "path", ".", "getsize", "(", "nomediapath", ")", "==", "0", ":", "logger", ".", "info", "(", "\"Ignoring album '%s' because of present 0-byte \"", "\".nomedia file\"", ",", "album", ".", "name", ")", "# subdirs have been added to the gallery already, remove them", "# there, too", "_remove_albums_with_subdirs", "(", "album", ".", "gallery", ".", "albums", ",", "[", "album", ".", "path", "]", ")", "try", ":", "os", ".", "rmdir", "(", "album", ".", "dst_path", ")", "except", "OSError", "as", "e", ":", "# directory was created and populated with images in a", "# previous run => keep it", "pass", "# cannot set albums => empty subdirs so that no albums are", "# generated", "album", ".", "subdirs", "=", "[", "]", "album", ".", "medias", "=", "[", "]", "else", ":", "with", "open", "(", "nomediapath", ",", "\"r\"", ")", "as", "nomediaFile", ":", "logger", ".", "info", "(", "\"Found a .nomedia file in %s, ignoring its \"", "\"entries\"", ",", "album", ".", "name", ")", "ignored", "=", "nomediaFile", ".", "read", "(", ")", ".", "split", "(", "\"\\n\"", ")", "album", ".", "medias", "=", "[", "media", "for", "media", "in", "album", ".", "medias", "if", "media", ".", "src_filename", "not", "in", "ignored", "]", "album", ".", "subdirs", "=", "[", "dirname", "for", "dirname", "in", "album", ".", "subdirs", "if", "dirname", "not", "in", "ignored", "]", "# subdirs have been added to the gallery already, remove", "# them there, too", "_remove_albums_with_subdirs", "(", "album", ".", "gallery", ".", "albums", ",", "ignored", ",", "album", ".", "path", "+", "os", ".", "path", ".", "sep", ")" ]
Removes all filtered Media and subdirs from an Album
[ "Removes", "all", "filtered", "Media", "and", "subdirs", "from", "an", "Album" ]
python
valid
Karaage-Cluster/python-tldap
tldap/modlist.py
https://github.com/Karaage-Cluster/python-tldap/blob/61f1af74a3648cb6491e7eeb1ee2eb395d67bf59/tldap/modlist.py#L52-L64
def addModlist(entry: dict, ignore_attr_types: Optional[List[str]] = None) -> Dict[str, List[bytes]]: """Build modify list for call of method LDAPObject.add()""" ignore_attr_types = _list_dict(map(str.lower, (ignore_attr_types or []))) modlist: Dict[str, List[bytes]] = {} for attrtype in entry.keys(): if attrtype.lower() in ignore_attr_types: # This attribute type is ignored continue for value in entry[attrtype]: assert value is not None if len(entry[attrtype]) > 0: modlist[attrtype] = escape_list(entry[attrtype]) return modlist
[ "def", "addModlist", "(", "entry", ":", "dict", ",", "ignore_attr_types", ":", "Optional", "[", "List", "[", "str", "]", "]", "=", "None", ")", "->", "Dict", "[", "str", ",", "List", "[", "bytes", "]", "]", ":", "ignore_attr_types", "=", "_list_dict", "(", "map", "(", "str", ".", "lower", ",", "(", "ignore_attr_types", "or", "[", "]", ")", ")", ")", "modlist", ":", "Dict", "[", "str", ",", "List", "[", "bytes", "]", "]", "=", "{", "}", "for", "attrtype", "in", "entry", ".", "keys", "(", ")", ":", "if", "attrtype", ".", "lower", "(", ")", "in", "ignore_attr_types", ":", "# This attribute type is ignored", "continue", "for", "value", "in", "entry", "[", "attrtype", "]", ":", "assert", "value", "is", "not", "None", "if", "len", "(", "entry", "[", "attrtype", "]", ")", ">", "0", ":", "modlist", "[", "attrtype", "]", "=", "escape_list", "(", "entry", "[", "attrtype", "]", ")", "return", "modlist" ]
Build modify list for call of method LDAPObject.add()
[ "Build", "modify", "list", "for", "call", "of", "method", "LDAPObject", ".", "add", "()" ]
python
train
eddieantonio/perfection
perfection/getty.py
https://github.com/eddieantonio/perfection/blob/69b7a06b31a15bd9534c69d4bdcc2e48e8ddfc43/perfection/getty.py#L220-L244
def check_columns_fit(unoccupied_columns, row, offset, row_length): """ Checks if all the occupied columns in the row fit in the indices given by free columns. >>> check_columns_fit({0,1,2,3}, [(0, True), (2, True)], 0, 4) True >>> check_columns_fit({0,2,3}, [(2, True), (3, True)], 0, 4) True >>> check_columns_fit({}, [(2, True), (3, True)], 0, 4) False >>> check_columns_fit({0}, [(2, True)], 2, 4) True >>> check_columns_fit({0}, [(3, True)], 2, 4) False """ for index, item in row: adjusted_index = (index + offset) % row_length # Check if the index is in the appropriate place. if adjusted_index not in unoccupied_columns: return False return True
[ "def", "check_columns_fit", "(", "unoccupied_columns", ",", "row", ",", "offset", ",", "row_length", ")", ":", "for", "index", ",", "item", "in", "row", ":", "adjusted_index", "=", "(", "index", "+", "offset", ")", "%", "row_length", "# Check if the index is in the appropriate place.", "if", "adjusted_index", "not", "in", "unoccupied_columns", ":", "return", "False", "return", "True" ]
Checks if all the occupied columns in the row fit in the indices given by free columns. >>> check_columns_fit({0,1,2,3}, [(0, True), (2, True)], 0, 4) True >>> check_columns_fit({0,2,3}, [(2, True), (3, True)], 0, 4) True >>> check_columns_fit({}, [(2, True), (3, True)], 0, 4) False >>> check_columns_fit({0}, [(2, True)], 2, 4) True >>> check_columns_fit({0}, [(3, True)], 2, 4) False
[ "Checks", "if", "all", "the", "occupied", "columns", "in", "the", "row", "fit", "in", "the", "indices", "given", "by", "free", "columns", "." ]
python
train
aio-libs/aioredis
aioredis/pool.py
https://github.com/aio-libs/aioredis/blob/e8c33e39558d4cc91cf70dde490d8b330c97dc2e/aioredis/pool.py#L184-L199
def execute(self, command, *args, **kw): """Executes redis command in a free connection and returns future waiting for result. Picks connection from free pool and send command through that connection. If no connection is found, returns coroutine waiting for free connection to execute command. """ conn, address = self.get_connection(command, args) if conn is not None: fut = conn.execute(command, *args, **kw) return self._check_result(fut, command, args, kw) else: coro = self._wait_execute(address, command, args, kw) return self._check_result(coro, command, args, kw)
[ "def", "execute", "(", "self", ",", "command", ",", "*", "args", ",", "*", "*", "kw", ")", ":", "conn", ",", "address", "=", "self", ".", "get_connection", "(", "command", ",", "args", ")", "if", "conn", "is", "not", "None", ":", "fut", "=", "conn", ".", "execute", "(", "command", ",", "*", "args", ",", "*", "*", "kw", ")", "return", "self", ".", "_check_result", "(", "fut", ",", "command", ",", "args", ",", "kw", ")", "else", ":", "coro", "=", "self", ".", "_wait_execute", "(", "address", ",", "command", ",", "args", ",", "kw", ")", "return", "self", ".", "_check_result", "(", "coro", ",", "command", ",", "args", ",", "kw", ")" ]
Executes redis command in a free connection and returns future waiting for result. Picks connection from free pool and send command through that connection. If no connection is found, returns coroutine waiting for free connection to execute command.
[ "Executes", "redis", "command", "in", "a", "free", "connection", "and", "returns", "future", "waiting", "for", "result", "." ]
python
train
MuhammedHasan/sklearn_utils
sklearn_utils/preprocessing/dict_input.py
https://github.com/MuhammedHasan/sklearn_utils/blob/337c3b7a27f4921d12da496f66a2b83ef582b413/sklearn_utils/preprocessing/dict_input.py#L29-L39
def transform(self, X): ''' :param X: features. ''' inverser_tranformer = self.dict_vectorizer_ if self.feature_selection: inverser_tranformer = self.clone_dict_vectorizer_ return inverser_tranformer.inverse_transform( self.transformer.transform( self.dict_vectorizer_.transform(X)))
[ "def", "transform", "(", "self", ",", "X", ")", ":", "inverser_tranformer", "=", "self", ".", "dict_vectorizer_", "if", "self", ".", "feature_selection", ":", "inverser_tranformer", "=", "self", ".", "clone_dict_vectorizer_", "return", "inverser_tranformer", ".", "inverse_transform", "(", "self", ".", "transformer", ".", "transform", "(", "self", ".", "dict_vectorizer_", ".", "transform", "(", "X", ")", ")", ")" ]
:param X: features.
[ ":", "param", "X", ":", "features", "." ]
python
test
pgjones/quart
quart/wrappers/response.py
https://github.com/pgjones/quart/blob/7cb2d3bd98e8746025764f2b933abc12041fa175/quart/wrappers/response.py#L341-L360
def set_cookie( # type: ignore self, key: str, value: AnyStr='', max_age: Optional[Union[int, timedelta]]=None, expires: Optional[datetime]=None, path: str='/', domain: Optional[str]=None, secure: bool=False, httponly: bool=False, ) -> None: """Set a cookie in the response headers. The arguments are the standard cookie morsels and this is a wrapper around the stdlib SimpleCookie code. """ if isinstance(value, bytes): value = value.decode() # type: ignore cookie = create_cookie(key, value, max_age, expires, path, domain, secure, httponly) # type: ignore # noqa: E501 self.headers.add('Set-Cookie', cookie.output(header=''))
[ "def", "set_cookie", "(", "# type: ignore", "self", ",", "key", ":", "str", ",", "value", ":", "AnyStr", "=", "''", ",", "max_age", ":", "Optional", "[", "Union", "[", "int", ",", "timedelta", "]", "]", "=", "None", ",", "expires", ":", "Optional", "[", "datetime", "]", "=", "None", ",", "path", ":", "str", "=", "'/'", ",", "domain", ":", "Optional", "[", "str", "]", "=", "None", ",", "secure", ":", "bool", "=", "False", ",", "httponly", ":", "bool", "=", "False", ",", ")", "->", "None", ":", "if", "isinstance", "(", "value", ",", "bytes", ")", ":", "value", "=", "value", ".", "decode", "(", ")", "# type: ignore", "cookie", "=", "create_cookie", "(", "key", ",", "value", ",", "max_age", ",", "expires", ",", "path", ",", "domain", ",", "secure", ",", "httponly", ")", "# type: ignore # noqa: E501", "self", ".", "headers", ".", "add", "(", "'Set-Cookie'", ",", "cookie", ".", "output", "(", "header", "=", "''", ")", ")" ]
Set a cookie in the response headers. The arguments are the standard cookie morsels and this is a wrapper around the stdlib SimpleCookie code.
[ "Set", "a", "cookie", "in", "the", "response", "headers", "." ]
python
train
jrief/django-websocket-redis
ws4redis/subscriber.py
https://github.com/jrief/django-websocket-redis/blob/abcddaad2f579d71dbf375e5e34bc35eef795a81/ws4redis/subscriber.py#L23-L49
def set_pubsub_channels(self, request, channels): """ Initialize the channels used for publishing and subscribing messages through the message queue. """ facility = request.path_info.replace(settings.WEBSOCKET_URL, '', 1) # initialize publishers audience = { 'users': 'publish-user' in channels and [SELF] or [], 'groups': 'publish-group' in channels and [SELF] or [], 'sessions': 'publish-session' in channels and [SELF] or [], 'broadcast': 'publish-broadcast' in channels, } self._publishers = set() for key in self._get_message_channels(request=request, facility=facility, **audience): self._publishers.add(key) # initialize subscribers audience = { 'users': 'subscribe-user' in channels and [SELF] or [], 'groups': 'subscribe-group' in channels and [SELF] or [], 'sessions': 'subscribe-session' in channels and [SELF] or [], 'broadcast': 'subscribe-broadcast' in channels, } self._subscription = self._connection.pubsub() for key in self._get_message_channels(request=request, facility=facility, **audience): self._subscription.subscribe(key)
[ "def", "set_pubsub_channels", "(", "self", ",", "request", ",", "channels", ")", ":", "facility", "=", "request", ".", "path_info", ".", "replace", "(", "settings", ".", "WEBSOCKET_URL", ",", "''", ",", "1", ")", "# initialize publishers", "audience", "=", "{", "'users'", ":", "'publish-user'", "in", "channels", "and", "[", "SELF", "]", "or", "[", "]", ",", "'groups'", ":", "'publish-group'", "in", "channels", "and", "[", "SELF", "]", "or", "[", "]", ",", "'sessions'", ":", "'publish-session'", "in", "channels", "and", "[", "SELF", "]", "or", "[", "]", ",", "'broadcast'", ":", "'publish-broadcast'", "in", "channels", ",", "}", "self", ".", "_publishers", "=", "set", "(", ")", "for", "key", "in", "self", ".", "_get_message_channels", "(", "request", "=", "request", ",", "facility", "=", "facility", ",", "*", "*", "audience", ")", ":", "self", ".", "_publishers", ".", "add", "(", "key", ")", "# initialize subscribers", "audience", "=", "{", "'users'", ":", "'subscribe-user'", "in", "channels", "and", "[", "SELF", "]", "or", "[", "]", ",", "'groups'", ":", "'subscribe-group'", "in", "channels", "and", "[", "SELF", "]", "or", "[", "]", ",", "'sessions'", ":", "'subscribe-session'", "in", "channels", "and", "[", "SELF", "]", "or", "[", "]", ",", "'broadcast'", ":", "'subscribe-broadcast'", "in", "channels", ",", "}", "self", ".", "_subscription", "=", "self", ".", "_connection", ".", "pubsub", "(", ")", "for", "key", "in", "self", ".", "_get_message_channels", "(", "request", "=", "request", ",", "facility", "=", "facility", ",", "*", "*", "audience", ")", ":", "self", ".", "_subscription", ".", "subscribe", "(", "key", ")" ]
Initialize the channels used for publishing and subscribing messages through the message queue.
[ "Initialize", "the", "channels", "used", "for", "publishing", "and", "subscribing", "messages", "through", "the", "message", "queue", "." ]
python
train
edx/bok-choy
bok_choy/query.py
https://github.com/edx/bok-choy/blob/cdd0d423419fc0c49d56a9226533aa1490b60afc/bok_choy/query.py#L148-L194
def filter(self, filter_fn=None, desc=None, **kwargs): """ Return a copy of this query, with some values removed. Example usages: .. code:: python # Returns a query that matches even numbers q.filter(filter_fn=lambda x: x % 2) # Returns a query that matches elements with el.description == "foo" q.filter(description="foo") Keyword Args: filter_fn (callable): If specified, a function that accepts one argument (the element) and returns a boolean indicating whether to include that element in the results. kwargs: Specify attribute values that an element must have to be included in the results. desc (str): A description of the filter, for use in log messages. Defaults to the name of the filter function or attribute. Raises: TypeError: neither or both of `filter_fn` and `kwargs` are provided. """ if filter_fn is not None and kwargs: raise TypeError('Must supply either a filter_fn or attribute filter parameters to filter(), but not both.') if filter_fn is None and not kwargs: raise TypeError('Must supply one of filter_fn or one or more attribute filter parameters to filter().') if desc is None: if filter_fn is not None: desc = getattr(filter_fn, '__name__', '') elif kwargs: desc = u", ".join([u"{}={!r}".format(key, value) for key, value in kwargs.items()]) desc = u"filter({})".format(desc) if kwargs: def filter_fn(elem): # pylint: disable=function-redefined, missing-docstring return all( getattr(elem, filter_key) == filter_value for filter_key, filter_value in kwargs.items() ) return self.transform(lambda xs: (x for x in xs if filter_fn(x)), desc=desc)
[ "def", "filter", "(", "self", ",", "filter_fn", "=", "None", ",", "desc", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "filter_fn", "is", "not", "None", "and", "kwargs", ":", "raise", "TypeError", "(", "'Must supply either a filter_fn or attribute filter parameters to filter(), but not both.'", ")", "if", "filter_fn", "is", "None", "and", "not", "kwargs", ":", "raise", "TypeError", "(", "'Must supply one of filter_fn or one or more attribute filter parameters to filter().'", ")", "if", "desc", "is", "None", ":", "if", "filter_fn", "is", "not", "None", ":", "desc", "=", "getattr", "(", "filter_fn", ",", "'__name__'", ",", "''", ")", "elif", "kwargs", ":", "desc", "=", "u\", \"", ".", "join", "(", "[", "u\"{}={!r}\"", ".", "format", "(", "key", ",", "value", ")", "for", "key", ",", "value", "in", "kwargs", ".", "items", "(", ")", "]", ")", "desc", "=", "u\"filter({})\"", ".", "format", "(", "desc", ")", "if", "kwargs", ":", "def", "filter_fn", "(", "elem", ")", ":", "# pylint: disable=function-redefined, missing-docstring", "return", "all", "(", "getattr", "(", "elem", ",", "filter_key", ")", "==", "filter_value", "for", "filter_key", ",", "filter_value", "in", "kwargs", ".", "items", "(", ")", ")", "return", "self", ".", "transform", "(", "lambda", "xs", ":", "(", "x", "for", "x", "in", "xs", "if", "filter_fn", "(", "x", ")", ")", ",", "desc", "=", "desc", ")" ]
Return a copy of this query, with some values removed. Example usages: .. code:: python # Returns a query that matches even numbers q.filter(filter_fn=lambda x: x % 2) # Returns a query that matches elements with el.description == "foo" q.filter(description="foo") Keyword Args: filter_fn (callable): If specified, a function that accepts one argument (the element) and returns a boolean indicating whether to include that element in the results. kwargs: Specify attribute values that an element must have to be included in the results. desc (str): A description of the filter, for use in log messages. Defaults to the name of the filter function or attribute. Raises: TypeError: neither or both of `filter_fn` and `kwargs` are provided.
[ "Return", "a", "copy", "of", "this", "query", "with", "some", "values", "removed", "." ]
python
train
anteater/anteater
anteater/src/project_scan.py
https://github.com/anteater/anteater/blob/a980adbed8563ef92494f565acd371e91f50f155/anteater/src/project_scan.py#L41-L89
def prepare_project(project, project_dir, binaries, ips, urls): """ Generates blacklists / whitelists """ # Get Various Lists / Project Waivers lists = get_lists.GetLists() # Get file name black list and project waivers file_audit_list, file_audit_project_list = lists.file_audit_list(project) # Get file content black list and project waivers flag_list, ignore_list = lists.file_content_list(project) # Get File Ignore Lists file_ignore = lists.file_ignore() ignore_directories = lists.ignore_directories(project) # Get URL Ignore Lists url_ignore = lists.url_ignore(project) # Get IP Ignore Lists ip_ignore = lists.ip_ignore(project) # Get Binary Ignore Lists hashlist = get_lists.GetLists() if binaries or ips or urls: try: apikey = os.environ["VT_KEY"] except KeyError: logger.error("Please set your virustotal.com API key as an environment variable") sys.exit(1) try: vt_rate_type = config.get('config', 'vt_rate_type') except six.moves.configparser.NoSectionError: logger.error("A config section is required for vt_rate_type with a public | private option") sys.exit(1) patten = re.compile(r'\bpublic\b|\bprivate\b') if not patten.match(vt_rate_type): logger.error("Unrecognized %s option for vt_rate_type", vt_rate_type) sys.exit(1) # Perform rudimentary scans scan_file(project, project_dir, binaries, ips, urls, file_audit_list, file_audit_project_list, flag_list, ignore_list, hashlist, file_ignore, ignore_directories, url_ignore, ip_ignore, apikey)
[ "def", "prepare_project", "(", "project", ",", "project_dir", ",", "binaries", ",", "ips", ",", "urls", ")", ":", "# Get Various Lists / Project Waivers", "lists", "=", "get_lists", ".", "GetLists", "(", ")", "# Get file name black list and project waivers", "file_audit_list", ",", "file_audit_project_list", "=", "lists", ".", "file_audit_list", "(", "project", ")", "# Get file content black list and project waivers", "flag_list", ",", "ignore_list", "=", "lists", ".", "file_content_list", "(", "project", ")", "# Get File Ignore Lists", "file_ignore", "=", "lists", ".", "file_ignore", "(", ")", "ignore_directories", "=", "lists", ".", "ignore_directories", "(", "project", ")", "# Get URL Ignore Lists", "url_ignore", "=", "lists", ".", "url_ignore", "(", "project", ")", "# Get IP Ignore Lists", "ip_ignore", "=", "lists", ".", "ip_ignore", "(", "project", ")", "# Get Binary Ignore Lists", "hashlist", "=", "get_lists", ".", "GetLists", "(", ")", "if", "binaries", "or", "ips", "or", "urls", ":", "try", ":", "apikey", "=", "os", ".", "environ", "[", "\"VT_KEY\"", "]", "except", "KeyError", ":", "logger", ".", "error", "(", "\"Please set your virustotal.com API key as an environment variable\"", ")", "sys", ".", "exit", "(", "1", ")", "try", ":", "vt_rate_type", "=", "config", ".", "get", "(", "'config'", ",", "'vt_rate_type'", ")", "except", "six", ".", "moves", ".", "configparser", ".", "NoSectionError", ":", "logger", ".", "error", "(", "\"A config section is required for vt_rate_type with a public | private option\"", ")", "sys", ".", "exit", "(", "1", ")", "patten", "=", "re", ".", "compile", "(", "r'\\bpublic\\b|\\bprivate\\b'", ")", "if", "not", "patten", ".", "match", "(", "vt_rate_type", ")", ":", "logger", ".", "error", "(", "\"Unrecognized %s option for vt_rate_type\"", ",", "vt_rate_type", ")", "sys", ".", "exit", "(", "1", ")", "# Perform rudimentary scans", "scan_file", "(", "project", ",", "project_dir", ",", "binaries", ",", "ips", ",", "urls", ",", "file_audit_list", ",", "file_audit_project_list", ",", "flag_list", ",", "ignore_list", ",", "hashlist", ",", "file_ignore", ",", "ignore_directories", ",", "url_ignore", ",", "ip_ignore", ",", "apikey", ")" ]
Generates blacklists / whitelists
[ "Generates", "blacklists", "/", "whitelists" ]
python
train
NASA-AMMOS/AIT-Core
ait/core/bsc.py
https://github.com/NASA-AMMOS/AIT-Core/blob/9d85bd9c738e7a6a6fbdff672bea708238b02a3a/ait/core/bsc.py#L712-L731
def _route(self): ''' Handles server route instantiation. ''' self._app.route('/', method='GET', callback=self._get_logger_list) self._app.route('/stats', method='GET', callback=self._fetch_handler_stats) self._app.route('/<name>/start', method='POST', callback=self._add_logger_by_name) self._app.route('/<name>/stop', method='DELETE', callback=self._stop_logger_by_name) self._app.route('/<name>/config', method='GET', callback=self._get_logger_conf) self._app.route('/<name>/rotate', method='POST', callback=self._rotate_capturer_log)
[ "def", "_route", "(", "self", ")", ":", "self", ".", "_app", ".", "route", "(", "'/'", ",", "method", "=", "'GET'", ",", "callback", "=", "self", ".", "_get_logger_list", ")", "self", ".", "_app", ".", "route", "(", "'/stats'", ",", "method", "=", "'GET'", ",", "callback", "=", "self", ".", "_fetch_handler_stats", ")", "self", ".", "_app", ".", "route", "(", "'/<name>/start'", ",", "method", "=", "'POST'", ",", "callback", "=", "self", ".", "_add_logger_by_name", ")", "self", ".", "_app", ".", "route", "(", "'/<name>/stop'", ",", "method", "=", "'DELETE'", ",", "callback", "=", "self", ".", "_stop_logger_by_name", ")", "self", ".", "_app", ".", "route", "(", "'/<name>/config'", ",", "method", "=", "'GET'", ",", "callback", "=", "self", ".", "_get_logger_conf", ")", "self", ".", "_app", ".", "route", "(", "'/<name>/rotate'", ",", "method", "=", "'POST'", ",", "callback", "=", "self", ".", "_rotate_capturer_log", ")" ]
Handles server route instantiation.
[ "Handles", "server", "route", "instantiation", "." ]
python
train
intake/intake
intake/source/utils.py
https://github.com/intake/intake/blob/277b96bfdee39d8a3048ea5408c6d6716d568336/intake/source/utils.py#L258-L285
def path_to_pattern(path, metadata=None): """ Remove source information from path when using chaching Returns None if path is not str Parameters ---------- path : str Path to data optionally containing format_strings metadata : dict, optional Extra arguments to the class, contains any cache information Returns ------- pattern : str Pattern style path stripped of everything to the left of cache regex. """ if not isinstance(path, str): return pattern = path if metadata: cache = metadata.get('cache') if cache: regex = next(c.get('regex') for c in cache if c.get('argkey') == 'urlpath') pattern = pattern.split(regex)[-1] return pattern
[ "def", "path_to_pattern", "(", "path", ",", "metadata", "=", "None", ")", ":", "if", "not", "isinstance", "(", "path", ",", "str", ")", ":", "return", "pattern", "=", "path", "if", "metadata", ":", "cache", "=", "metadata", ".", "get", "(", "'cache'", ")", "if", "cache", ":", "regex", "=", "next", "(", "c", ".", "get", "(", "'regex'", ")", "for", "c", "in", "cache", "if", "c", ".", "get", "(", "'argkey'", ")", "==", "'urlpath'", ")", "pattern", "=", "pattern", ".", "split", "(", "regex", ")", "[", "-", "1", "]", "return", "pattern" ]
Remove source information from path when using chaching Returns None if path is not str Parameters ---------- path : str Path to data optionally containing format_strings metadata : dict, optional Extra arguments to the class, contains any cache information Returns ------- pattern : str Pattern style path stripped of everything to the left of cache regex.
[ "Remove", "source", "information", "from", "path", "when", "using", "chaching" ]
python
train
Kortemme-Lab/klab
klab/bio/bonsai.py
https://github.com/Kortemme-Lab/klab/blob/6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b/klab/bio/bonsai.py#L550-L552
def prune_loop_for_kic(self, loops_segments, search_radius, expected_min_loop_length = None, expected_max_loop_length = None, generate_pymol_session = False): '''A wrapper for prune_structure_according_to_loop_definitions suitable for the Rosetta kinematic closure (KIC) loop modeling method.''' return self.prune_structure_according_to_loop_definitions(loops_segments, search_radius, expected_min_loop_length = expected_min_loop_length, expected_max_loop_length = expected_max_loop_length, generate_pymol_session = generate_pymol_session, check_sequence = True, keep_Ca_buttress_atoms = True)
[ "def", "prune_loop_for_kic", "(", "self", ",", "loops_segments", ",", "search_radius", ",", "expected_min_loop_length", "=", "None", ",", "expected_max_loop_length", "=", "None", ",", "generate_pymol_session", "=", "False", ")", ":", "return", "self", ".", "prune_structure_according_to_loop_definitions", "(", "loops_segments", ",", "search_radius", ",", "expected_min_loop_length", "=", "expected_min_loop_length", ",", "expected_max_loop_length", "=", "expected_max_loop_length", ",", "generate_pymol_session", "=", "generate_pymol_session", ",", "check_sequence", "=", "True", ",", "keep_Ca_buttress_atoms", "=", "True", ")" ]
A wrapper for prune_structure_according_to_loop_definitions suitable for the Rosetta kinematic closure (KIC) loop modeling method.
[ "A", "wrapper", "for", "prune_structure_according_to_loop_definitions", "suitable", "for", "the", "Rosetta", "kinematic", "closure", "(", "KIC", ")", "loop", "modeling", "method", "." ]
python
train
ga4gh/ga4gh-server
ga4gh/server/datamodel/obo_parser.py
https://github.com/ga4gh/ga4gh-server/blob/1aa18922ef136db8604f6f098cb1732cba6f2a76/ga4gh/server/datamodel/obo_parser.py#L522-L556
def paths_to_top(self, term): """ Returns all possible paths to the root node Each path includes the term given. The order of the path is top -> bottom, i.e. it starts with the root and ends with the given term (inclusively). Parameters: ----------- - term: the id of the GO term, where the paths begin (i.e. the accession 'GO:0003682') Returns: -------- - a list of lists of GO Terms """ # error handling consistent with original authors if term not in self: print("Term %s not found!" % term, file=sys.stderr) return def _paths_to_top_recursive(rec): if rec.level == 0: return [[rec]] paths = [] for parent in rec.parents: top_paths = _paths_to_top_recursive(parent) for top_path in top_paths: top_path.append(rec) paths.append(top_path) return paths go_term = self[term] return _paths_to_top_recursive(go_term)
[ "def", "paths_to_top", "(", "self", ",", "term", ")", ":", "# error handling consistent with original authors", "if", "term", "not", "in", "self", ":", "print", "(", "\"Term %s not found!\"", "%", "term", ",", "file", "=", "sys", ".", "stderr", ")", "return", "def", "_paths_to_top_recursive", "(", "rec", ")", ":", "if", "rec", ".", "level", "==", "0", ":", "return", "[", "[", "rec", "]", "]", "paths", "=", "[", "]", "for", "parent", "in", "rec", ".", "parents", ":", "top_paths", "=", "_paths_to_top_recursive", "(", "parent", ")", "for", "top_path", "in", "top_paths", ":", "top_path", ".", "append", "(", "rec", ")", "paths", ".", "append", "(", "top_path", ")", "return", "paths", "go_term", "=", "self", "[", "term", "]", "return", "_paths_to_top_recursive", "(", "go_term", ")" ]
Returns all possible paths to the root node Each path includes the term given. The order of the path is top -> bottom, i.e. it starts with the root and ends with the given term (inclusively). Parameters: ----------- - term: the id of the GO term, where the paths begin (i.e. the accession 'GO:0003682') Returns: -------- - a list of lists of GO Terms
[ "Returns", "all", "possible", "paths", "to", "the", "root", "node" ]
python
train
Dallinger/Dallinger
demos/dlgr/demos/rogers/models.py
https://github.com/Dallinger/Dallinger/blob/76ca8217c709989c116d0ebd8fca37bd22f591af/demos/dlgr/demos/rogers/models.py#L127-L138
def update(self, infos): """Process received infos.""" genes = [i for i in infos if isinstance(i, LearningGene)] for gene in genes: if ( self.network.role == "experiment" and self.generation > 0 and random.random() < 0.10 ): self.mutate(gene) else: self.replicate(gene)
[ "def", "update", "(", "self", ",", "infos", ")", ":", "genes", "=", "[", "i", "for", "i", "in", "infos", "if", "isinstance", "(", "i", ",", "LearningGene", ")", "]", "for", "gene", "in", "genes", ":", "if", "(", "self", ".", "network", ".", "role", "==", "\"experiment\"", "and", "self", ".", "generation", ">", "0", "and", "random", ".", "random", "(", ")", "<", "0.10", ")", ":", "self", ".", "mutate", "(", "gene", ")", "else", ":", "self", ".", "replicate", "(", "gene", ")" ]
Process received infos.
[ "Process", "received", "infos", "." ]
python
train
zalando/patroni
patroni/ctl.py
https://github.com/zalando/patroni/blob/f6d29081c90af52064b981cdd877a07338d86038/patroni/ctl.py#L970-L985
def temporary_file(contents, suffix='', prefix='tmp'): """Creates a temporary file with specified contents that persists for the context. :param contents: binary string that will be written to the file. :param prefix: will be prefixed to the filename. :param suffix: will be appended to the filename. :returns path of the created file. """ tmp = tempfile.NamedTemporaryFile(suffix=suffix, prefix=prefix, delete=False) with tmp: tmp.write(contents) try: yield tmp.name finally: os.unlink(tmp.name)
[ "def", "temporary_file", "(", "contents", ",", "suffix", "=", "''", ",", "prefix", "=", "'tmp'", ")", ":", "tmp", "=", "tempfile", ".", "NamedTemporaryFile", "(", "suffix", "=", "suffix", ",", "prefix", "=", "prefix", ",", "delete", "=", "False", ")", "with", "tmp", ":", "tmp", ".", "write", "(", "contents", ")", "try", ":", "yield", "tmp", ".", "name", "finally", ":", "os", ".", "unlink", "(", "tmp", ".", "name", ")" ]
Creates a temporary file with specified contents that persists for the context. :param contents: binary string that will be written to the file. :param prefix: will be prefixed to the filename. :param suffix: will be appended to the filename. :returns path of the created file.
[ "Creates", "a", "temporary", "file", "with", "specified", "contents", "that", "persists", "for", "the", "context", "." ]
python
train
rytilahti/python-eq3bt
eq3bt/eq3btsmart.py
https://github.com/rytilahti/python-eq3bt/blob/595459d9885920cf13b7059a1edd2cf38cede1f0/eq3bt/eq3btsmart.py#L108-L113
def parse_schedule(self, data): """Parses the device sent schedule.""" sched = Schedule.parse(data) _LOGGER.debug("Got schedule data for day '%s'", sched.day) return sched
[ "def", "parse_schedule", "(", "self", ",", "data", ")", ":", "sched", "=", "Schedule", ".", "parse", "(", "data", ")", "_LOGGER", ".", "debug", "(", "\"Got schedule data for day '%s'\"", ",", "sched", ".", "day", ")", "return", "sched" ]
Parses the device sent schedule.
[ "Parses", "the", "device", "sent", "schedule", "." ]
python
train
softlayer/softlayer-python
SoftLayer/managers/dedicated_host.py
https://github.com/softlayer/softlayer-python/blob/9f181be08cc3668353b05a6de0cb324f52cff6fa/SoftLayer/managers/dedicated_host.py#L431-L438
def _get_item(self, package, flavor): """Returns the item for ordering a dedicated host.""" for item in package['items']: if item['keyName'] == flavor: return item raise SoftLayer.SoftLayerError("Could not find valid item for: '%s'" % flavor)
[ "def", "_get_item", "(", "self", ",", "package", ",", "flavor", ")", ":", "for", "item", "in", "package", "[", "'items'", "]", ":", "if", "item", "[", "'keyName'", "]", "==", "flavor", ":", "return", "item", "raise", "SoftLayer", ".", "SoftLayerError", "(", "\"Could not find valid item for: '%s'\"", "%", "flavor", ")" ]
Returns the item for ordering a dedicated host.
[ "Returns", "the", "item", "for", "ordering", "a", "dedicated", "host", "." ]
python
train
LLNL/scraper
scraper/github/queryManager.py
https://github.com/LLNL/scraper/blob/881a316e4c04dfa5a9cf491b7c7f9f997a7c56ea/scraper/github/queryManager.py#L480-L504
def fileSave(self, filePath=None, updatePath=False): """Write the internal JSON data dictionary to a JSON data file. If no file path is provided, the stored data file path will be used. Args: filePath (Optional[str]): A relative or absolute path to a '.json' file. Defaults to None. updatePath (Optional[bool]): Specifies whether or not to update the stored data file path. Defaults to False. """ if not filePath: filePath = self.filePath if not os.path.isfile(filePath): print("Data file '%s' does not exist, will create new file." % (filePath)) if not os.path.exists(os.path.split(filePath)[0]): os.makedirs(os.path.split(filePath)[0]) dataJsonString = json.dumps(self.data, indent=4, sort_keys=True) print("Writing to file '%s' ... " % (filePath), end="", flush=True) with open(filePath, "w") as fileout: fileout.write(dataJsonString) print("Wrote file!") if updatePath: self.filePath = filePath
[ "def", "fileSave", "(", "self", ",", "filePath", "=", "None", ",", "updatePath", "=", "False", ")", ":", "if", "not", "filePath", ":", "filePath", "=", "self", ".", "filePath", "if", "not", "os", ".", "path", ".", "isfile", "(", "filePath", ")", ":", "print", "(", "\"Data file '%s' does not exist, will create new file.\"", "%", "(", "filePath", ")", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "os", ".", "path", ".", "split", "(", "filePath", ")", "[", "0", "]", ")", ":", "os", ".", "makedirs", "(", "os", ".", "path", ".", "split", "(", "filePath", ")", "[", "0", "]", ")", "dataJsonString", "=", "json", ".", "dumps", "(", "self", ".", "data", ",", "indent", "=", "4", ",", "sort_keys", "=", "True", ")", "print", "(", "\"Writing to file '%s' ... \"", "%", "(", "filePath", ")", ",", "end", "=", "\"\"", ",", "flush", "=", "True", ")", "with", "open", "(", "filePath", ",", "\"w\"", ")", "as", "fileout", ":", "fileout", ".", "write", "(", "dataJsonString", ")", "print", "(", "\"Wrote file!\"", ")", "if", "updatePath", ":", "self", ".", "filePath", "=", "filePath" ]
Write the internal JSON data dictionary to a JSON data file. If no file path is provided, the stored data file path will be used. Args: filePath (Optional[str]): A relative or absolute path to a '.json' file. Defaults to None. updatePath (Optional[bool]): Specifies whether or not to update the stored data file path. Defaults to False.
[ "Write", "the", "internal", "JSON", "data", "dictionary", "to", "a", "JSON", "data", "file", "." ]
python
test
google/grr
grr/client/grr_response_client/fleetspeak_client.py
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/client/grr_response_client/fleetspeak_client.py#L131-L149
def _SendMessages(self, grr_msgs, background=False): """Sends a block of messages through Fleetspeak.""" message_list = rdf_flows.PackedMessageList() communicator.Communicator.EncodeMessageList( rdf_flows.MessageList(job=grr_msgs), message_list) fs_msg = fs_common_pb2.Message( message_type="MessageList", destination=fs_common_pb2.Address(service_name="GRR"), background=background) fs_msg.data.Pack(message_list.AsPrimitiveProto()) try: sent_bytes = self._fs.Send(fs_msg) except (IOError, struct.error): logging.critical("Broken local Fleetspeak connection (write end).") raise stats_collector_instance.Get().IncrementCounter("grr_client_sent_bytes", sent_bytes)
[ "def", "_SendMessages", "(", "self", ",", "grr_msgs", ",", "background", "=", "False", ")", ":", "message_list", "=", "rdf_flows", ".", "PackedMessageList", "(", ")", "communicator", ".", "Communicator", ".", "EncodeMessageList", "(", "rdf_flows", ".", "MessageList", "(", "job", "=", "grr_msgs", ")", ",", "message_list", ")", "fs_msg", "=", "fs_common_pb2", ".", "Message", "(", "message_type", "=", "\"MessageList\"", ",", "destination", "=", "fs_common_pb2", ".", "Address", "(", "service_name", "=", "\"GRR\"", ")", ",", "background", "=", "background", ")", "fs_msg", ".", "data", ".", "Pack", "(", "message_list", ".", "AsPrimitiveProto", "(", ")", ")", "try", ":", "sent_bytes", "=", "self", ".", "_fs", ".", "Send", "(", "fs_msg", ")", "except", "(", "IOError", ",", "struct", ".", "error", ")", ":", "logging", ".", "critical", "(", "\"Broken local Fleetspeak connection (write end).\"", ")", "raise", "stats_collector_instance", ".", "Get", "(", ")", ".", "IncrementCounter", "(", "\"grr_client_sent_bytes\"", ",", "sent_bytes", ")" ]
Sends a block of messages through Fleetspeak.
[ "Sends", "a", "block", "of", "messages", "through", "Fleetspeak", "." ]
python
train
DataBiosphere/toil
src/toil/serviceManager.py
https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/serviceManager.py#L138-L143
def isRunning(self, serviceJobNode): """ Returns true if the service job has started and is active :rtype: boolean """ return (not self.jobStore.fileExists(serviceJobNode.startJobStoreID)) and self.isActive(serviceJobNode)
[ "def", "isRunning", "(", "self", ",", "serviceJobNode", ")", ":", "return", "(", "not", "self", ".", "jobStore", ".", "fileExists", "(", "serviceJobNode", ".", "startJobStoreID", ")", ")", "and", "self", ".", "isActive", "(", "serviceJobNode", ")" ]
Returns true if the service job has started and is active :rtype: boolean
[ "Returns", "true", "if", "the", "service", "job", "has", "started", "and", "is", "active", ":", "rtype", ":", "boolean" ]
python
train
ARMmbed/mbed-cloud-sdk-python
src/mbed_cloud/_backends/enrollment/models/enrollment_identity.py
https://github.com/ARMmbed/mbed-cloud-sdk-python/blob/c0af86fb2cdd4dc7ed26f236139241067d293509/src/mbed_cloud/_backends/enrollment/models/enrollment_identity.py#L160-L173
def enrolled_device_id(self, enrolled_device_id): """ Sets the enrolled_device_id of this EnrollmentIdentity. The ID of the device in the Device Directory once it has been registered. :param enrolled_device_id: The enrolled_device_id of this EnrollmentIdentity. :type: str """ if enrolled_device_id is None: raise ValueError("Invalid value for `enrolled_device_id`, must not be `None`") if enrolled_device_id is not None and not re.search('^[A-Za-z0-9]{32}', enrolled_device_id): raise ValueError("Invalid value for `enrolled_device_id`, must be a follow pattern or equal to `/^[A-Za-z0-9]{32}/`") self._enrolled_device_id = enrolled_device_id
[ "def", "enrolled_device_id", "(", "self", ",", "enrolled_device_id", ")", ":", "if", "enrolled_device_id", "is", "None", ":", "raise", "ValueError", "(", "\"Invalid value for `enrolled_device_id`, must not be `None`\"", ")", "if", "enrolled_device_id", "is", "not", "None", "and", "not", "re", ".", "search", "(", "'^[A-Za-z0-9]{32}'", ",", "enrolled_device_id", ")", ":", "raise", "ValueError", "(", "\"Invalid value for `enrolled_device_id`, must be a follow pattern or equal to `/^[A-Za-z0-9]{32}/`\"", ")", "self", ".", "_enrolled_device_id", "=", "enrolled_device_id" ]
Sets the enrolled_device_id of this EnrollmentIdentity. The ID of the device in the Device Directory once it has been registered. :param enrolled_device_id: The enrolled_device_id of this EnrollmentIdentity. :type: str
[ "Sets", "the", "enrolled_device_id", "of", "this", "EnrollmentIdentity", ".", "The", "ID", "of", "the", "device", "in", "the", "Device", "Directory", "once", "it", "has", "been", "registered", "." ]
python
train
RiotGames/cloud-inquisitor
backend/cloud_inquisitor/plugins/types/issues.py
https://github.com/RiotGames/cloud-inquisitor/blob/181dc2566ca59fc855f695b7fcc2c3b934e6ee9f/backend/cloud_inquisitor/plugins/types/issues.py#L570-L589
def update(self, data): """Updates the object information based on live data, if there were any changes made. Any changes will be automatically applied to the object, but will not be automatically persisted. You must manually call `db.session.add(instance)` on the object. Args: data (:obj:): AWS API Resource object fetched from AWS API Returns: `bool` """ # If the instance was terminated, remove it updated = self.set_property('state', data['state']) updated |= self.set_property('notes', sorted(data['notes'] or [])) updated |= self.set_property('last_notice', data['last_notice']) if updated: self.set_property('last_change', datetime.now()) return updated
[ "def", "update", "(", "self", ",", "data", ")", ":", "# If the instance was terminated, remove it", "updated", "=", "self", ".", "set_property", "(", "'state'", ",", "data", "[", "'state'", "]", ")", "updated", "|=", "self", ".", "set_property", "(", "'notes'", ",", "sorted", "(", "data", "[", "'notes'", "]", "or", "[", "]", ")", ")", "updated", "|=", "self", ".", "set_property", "(", "'last_notice'", ",", "data", "[", "'last_notice'", "]", ")", "if", "updated", ":", "self", ".", "set_property", "(", "'last_change'", ",", "datetime", ".", "now", "(", ")", ")", "return", "updated" ]
Updates the object information based on live data, if there were any changes made. Any changes will be automatically applied to the object, but will not be automatically persisted. You must manually call `db.session.add(instance)` on the object. Args: data (:obj:): AWS API Resource object fetched from AWS API Returns: `bool`
[ "Updates", "the", "object", "information", "based", "on", "live", "data", "if", "there", "were", "any", "changes", "made", ".", "Any", "changes", "will", "be", "automatically", "applied", "to", "the", "object", "but", "will", "not", "be", "automatically", "persisted", ".", "You", "must", "manually", "call", "db", ".", "session", ".", "add", "(", "instance", ")", "on", "the", "object", "." ]
python
train
Erotemic/utool
utool/util_iter.py
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_iter.py#L512-L549
def random_product(items, num=None, rng=None): """ Yields `num` items from the cartesian product of items in a random order. Args: items (list of sequences): items to get caresian product of packed in a list or tuple. (note this deviates from api of it.product) Example: import utool as ut items = [(1, 2, 3), (4, 5, 6, 7)] rng = 0 list(ut.random_product(items, rng=0)) list(ut.random_product(items, num=3, rng=0)) """ import utool as ut rng = ut.ensure_rng(rng, 'python') seen = set() items = [list(g) for g in items] max_num = ut.prod(map(len, items)) if num is None: num = max_num if num > max_num: raise ValueError('num exceedes maximum number of products') # TODO: make this more efficient when num is large if num > max_num // 2: for prod in ut.shuffle(list(it.product(*items)), rng=rng): yield prod else: while len(seen) < num: # combo = tuple(sorted(rng.choice(items, size, replace=False))) idxs = tuple(rng.randint(0, len(g) - 1) for g in items) if idxs not in seen: seen.add(idxs) prod = tuple(g[x] for g, x in zip(items, idxs)) yield prod
[ "def", "random_product", "(", "items", ",", "num", "=", "None", ",", "rng", "=", "None", ")", ":", "import", "utool", "as", "ut", "rng", "=", "ut", ".", "ensure_rng", "(", "rng", ",", "'python'", ")", "seen", "=", "set", "(", ")", "items", "=", "[", "list", "(", "g", ")", "for", "g", "in", "items", "]", "max_num", "=", "ut", ".", "prod", "(", "map", "(", "len", ",", "items", ")", ")", "if", "num", "is", "None", ":", "num", "=", "max_num", "if", "num", ">", "max_num", ":", "raise", "ValueError", "(", "'num exceedes maximum number of products'", ")", "# TODO: make this more efficient when num is large", "if", "num", ">", "max_num", "//", "2", ":", "for", "prod", "in", "ut", ".", "shuffle", "(", "list", "(", "it", ".", "product", "(", "*", "items", ")", ")", ",", "rng", "=", "rng", ")", ":", "yield", "prod", "else", ":", "while", "len", "(", "seen", ")", "<", "num", ":", "# combo = tuple(sorted(rng.choice(items, size, replace=False)))", "idxs", "=", "tuple", "(", "rng", ".", "randint", "(", "0", ",", "len", "(", "g", ")", "-", "1", ")", "for", "g", "in", "items", ")", "if", "idxs", "not", "in", "seen", ":", "seen", ".", "add", "(", "idxs", ")", "prod", "=", "tuple", "(", "g", "[", "x", "]", "for", "g", ",", "x", "in", "zip", "(", "items", ",", "idxs", ")", ")", "yield", "prod" ]
Yields `num` items from the cartesian product of items in a random order. Args: items (list of sequences): items to get caresian product of packed in a list or tuple. (note this deviates from api of it.product) Example: import utool as ut items = [(1, 2, 3), (4, 5, 6, 7)] rng = 0 list(ut.random_product(items, rng=0)) list(ut.random_product(items, num=3, rng=0))
[ "Yields", "num", "items", "from", "the", "cartesian", "product", "of", "items", "in", "a", "random", "order", "." ]
python
train
carpedm20/ndrive
ndrive/client.py
https://github.com/carpedm20/ndrive/blob/ac58eaf8a8d46292ad752bb38047f65838b8ad2b/ndrive/client.py#L694-L725
def getVersionList(self, full_path, startnum = 0, pagingrow = 50, dummy = 54213): """Get a version list of a file or dierectory. :param full_path: The full path to get the file or directory property. Path should start with '/' :param startnum: Start version index. :param pagingrow: Max # of version list in one page. :returns: ``metadata`` if succcess or ``False`` (failed to get history or there is no history) :metadata: - createuser - filesize - getlastmodified - href - versioninfo - versionkey """ data = {'orgresource': full_path, 'startnum': startnum, 'pagingrow': pagingrow, 'userid': self.user_id, 'useridx': self.useridx, 'dummy': dummy, } s, metadata = self.POST('getVersionList', data) if s is True: return metadata else: print "Error getVersionList: Cannot get version list" return False
[ "def", "getVersionList", "(", "self", ",", "full_path", ",", "startnum", "=", "0", ",", "pagingrow", "=", "50", ",", "dummy", "=", "54213", ")", ":", "data", "=", "{", "'orgresource'", ":", "full_path", ",", "'startnum'", ":", "startnum", ",", "'pagingrow'", ":", "pagingrow", ",", "'userid'", ":", "self", ".", "user_id", ",", "'useridx'", ":", "self", ".", "useridx", ",", "'dummy'", ":", "dummy", ",", "}", "s", ",", "metadata", "=", "self", ".", "POST", "(", "'getVersionList'", ",", "data", ")", "if", "s", "is", "True", ":", "return", "metadata", "else", ":", "print", "\"Error getVersionList: Cannot get version list\"", "return", "False" ]
Get a version list of a file or dierectory. :param full_path: The full path to get the file or directory property. Path should start with '/' :param startnum: Start version index. :param pagingrow: Max # of version list in one page. :returns: ``metadata`` if succcess or ``False`` (failed to get history or there is no history) :metadata: - createuser - filesize - getlastmodified - href - versioninfo - versionkey
[ "Get", "a", "version", "list", "of", "a", "file", "or", "dierectory", "." ]
python
train
DataONEorg/d1_python
gmn/src/d1_gmn/app/did.py
https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/gmn/src/d1_gmn/app/did.py#L231-L239
def _is_did(did): """Return True if ``did`` is recorded in a local context. ``did``=None is supported and returns False. A DID can be classified with classify_identifier(). """ return d1_gmn.app.models.IdNamespace.objects.filter(did=did).exists()
[ "def", "_is_did", "(", "did", ")", ":", "return", "d1_gmn", ".", "app", ".", "models", ".", "IdNamespace", ".", "objects", ".", "filter", "(", "did", "=", "did", ")", ".", "exists", "(", ")" ]
Return True if ``did`` is recorded in a local context. ``did``=None is supported and returns False. A DID can be classified with classify_identifier().
[ "Return", "True", "if", "did", "is", "recorded", "in", "a", "local", "context", "." ]
python
train
saltstack/salt
salt/modules/elasticsearch.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/elasticsearch.py#L375-L405
def document_create(index, doc_type, body=None, id=None, hosts=None, profile=None, source=None): ''' Create a document in a specified index index Index name where the document should reside doc_type Type of the document body Document to store source URL of file specifying document to store. Cannot be used in combination with ``body``. id Optional unique document identifier for specified doc_type (empty for random) CLI example:: salt myminion elasticsearch.document_create testindex doctype1 '{}' ''' es = _get_instance(hosts, profile) if source and body: message = 'Either body or source should be specified but not both.' raise SaltInvocationError(message) if source: body = __salt__['cp.get_file_str']( source, saltenv=__opts__.get('saltenv', 'base')) try: return es.index(index=index, doc_type=doc_type, body=body, id=id) except elasticsearch.TransportError as e: raise CommandExecutionError("Cannot create document in index {0}, server returned code {1} with message {2}".format(index, e.status_code, e.error))
[ "def", "document_create", "(", "index", ",", "doc_type", ",", "body", "=", "None", ",", "id", "=", "None", ",", "hosts", "=", "None", ",", "profile", "=", "None", ",", "source", "=", "None", ")", ":", "es", "=", "_get_instance", "(", "hosts", ",", "profile", ")", "if", "source", "and", "body", ":", "message", "=", "'Either body or source should be specified but not both.'", "raise", "SaltInvocationError", "(", "message", ")", "if", "source", ":", "body", "=", "__salt__", "[", "'cp.get_file_str'", "]", "(", "source", ",", "saltenv", "=", "__opts__", ".", "get", "(", "'saltenv'", ",", "'base'", ")", ")", "try", ":", "return", "es", ".", "index", "(", "index", "=", "index", ",", "doc_type", "=", "doc_type", ",", "body", "=", "body", ",", "id", "=", "id", ")", "except", "elasticsearch", ".", "TransportError", "as", "e", ":", "raise", "CommandExecutionError", "(", "\"Cannot create document in index {0}, server returned code {1} with message {2}\"", ".", "format", "(", "index", ",", "e", ".", "status_code", ",", "e", ".", "error", ")", ")" ]
Create a document in a specified index index Index name where the document should reside doc_type Type of the document body Document to store source URL of file specifying document to store. Cannot be used in combination with ``body``. id Optional unique document identifier for specified doc_type (empty for random) CLI example:: salt myminion elasticsearch.document_create testindex doctype1 '{}'
[ "Create", "a", "document", "in", "a", "specified", "index" ]
python
train
plivo/plivohelper-python
plivohelper.py
https://github.com/plivo/plivohelper-python/blob/a2f706d69e2138fbb973f792041341f662072d26/plivohelper.py#L258-L263
def sound_touch(self, call_params): """REST Add soundtouch audio effects to a Call """ path = '/' + self.api_version + '/SoundTouch/' method = 'POST' return self.request(path, method, call_params)
[ "def", "sound_touch", "(", "self", ",", "call_params", ")", ":", "path", "=", "'/'", "+", "self", ".", "api_version", "+", "'/SoundTouch/'", "method", "=", "'POST'", "return", "self", ".", "request", "(", "path", ",", "method", ",", "call_params", ")" ]
REST Add soundtouch audio effects to a Call
[ "REST", "Add", "soundtouch", "audio", "effects", "to", "a", "Call" ]
python
valid
junzis/pyModeS
pyModeS/decoder/bds/bds40.py
https://github.com/junzis/pyModeS/blob/8cd5655a04b08171a9ad5f1ffd232b7e0178ea53/pyModeS/decoder/bds/bds40.py#L104-L119
def p40baro(msg): """Barometric pressure setting Args: msg (String): 28 bytes hexadecimal message (BDS40) string Returns: float: pressure in millibar """ d = hex2bin(data(msg)) if d[26] == '0': return None p = bin2int(d[27:39]) * 0.1 + 800 # millibar return p
[ "def", "p40baro", "(", "msg", ")", ":", "d", "=", "hex2bin", "(", "data", "(", "msg", ")", ")", "if", "d", "[", "26", "]", "==", "'0'", ":", "return", "None", "p", "=", "bin2int", "(", "d", "[", "27", ":", "39", "]", ")", "*", "0.1", "+", "800", "# millibar", "return", "p" ]
Barometric pressure setting Args: msg (String): 28 bytes hexadecimal message (BDS40) string Returns: float: pressure in millibar
[ "Barometric", "pressure", "setting" ]
python
train
zeroSteiner/AdvancedHTTPServer
advancedhttpserver.py
https://github.com/zeroSteiner/AdvancedHTTPServer/blob/8c53cf7e1ddbf7ae9f573c82c5fe5f6992db7b5a/advancedhttpserver.py#L1206-L1217
def cookie_get(self, name): """ Check for a cookie value by name. :param str name: Name of the cookie value to retreive. :return: Returns the cookie value if it's set or None if it's not found. """ if not hasattr(self, 'cookies'): return None if self.cookies.get(name): return self.cookies.get(name).value return None
[ "def", "cookie_get", "(", "self", ",", "name", ")", ":", "if", "not", "hasattr", "(", "self", ",", "'cookies'", ")", ":", "return", "None", "if", "self", ".", "cookies", ".", "get", "(", "name", ")", ":", "return", "self", ".", "cookies", ".", "get", "(", "name", ")", ".", "value", "return", "None" ]
Check for a cookie value by name. :param str name: Name of the cookie value to retreive. :return: Returns the cookie value if it's set or None if it's not found.
[ "Check", "for", "a", "cookie", "value", "by", "name", "." ]
python
train
Erotemic/utool
utool/util_str.py
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_str.py#L2563-L2586
def bubbletext(text, font='cybermedium'): r""" Uses pyfiglet to create bubble text. Args: font (str): default=cybermedium, other fonts include: cybersmall and cyberlarge. References: http://www.figlet.org/ Example: >>> # ENABLE_DOCTEST >>> import utool as ut >>> bubble_text = ut.bubbletext('TESTING BUBBLE TEXT', font='cybermedium') >>> print(bubble_text) """ import utool as ut pyfiglet = ut.tryimport('pyfiglet', 'git+https://github.com/pwaller/pyfiglet') if pyfiglet is None: return text else: bubble_text = pyfiglet.figlet_format(text, font=font) return bubble_text
[ "def", "bubbletext", "(", "text", ",", "font", "=", "'cybermedium'", ")", ":", "import", "utool", "as", "ut", "pyfiglet", "=", "ut", ".", "tryimport", "(", "'pyfiglet'", ",", "'git+https://github.com/pwaller/pyfiglet'", ")", "if", "pyfiglet", "is", "None", ":", "return", "text", "else", ":", "bubble_text", "=", "pyfiglet", ".", "figlet_format", "(", "text", ",", "font", "=", "font", ")", "return", "bubble_text" ]
r""" Uses pyfiglet to create bubble text. Args: font (str): default=cybermedium, other fonts include: cybersmall and cyberlarge. References: http://www.figlet.org/ Example: >>> # ENABLE_DOCTEST >>> import utool as ut >>> bubble_text = ut.bubbletext('TESTING BUBBLE TEXT', font='cybermedium') >>> print(bubble_text)
[ "r", "Uses", "pyfiglet", "to", "create", "bubble", "text", "." ]
python
train
pyca/pyopenssl
src/OpenSSL/crypto.py
https://github.com/pyca/pyopenssl/blob/1fbe064c50fd030948141d7d630673761525b0d0/src/OpenSSL/crypto.py#L1477-L1491
def add_extensions(self, extensions): """ Add extensions to the certificate. :param extensions: The extensions to add. :type extensions: An iterable of :py:class:`X509Extension` objects. :return: ``None`` """ for ext in extensions: if not isinstance(ext, X509Extension): raise ValueError("One of the elements is not an X509Extension") add_result = _lib.X509_add_ext(self._x509, ext._extension, -1) if not add_result: _raise_current_error()
[ "def", "add_extensions", "(", "self", ",", "extensions", ")", ":", "for", "ext", "in", "extensions", ":", "if", "not", "isinstance", "(", "ext", ",", "X509Extension", ")", ":", "raise", "ValueError", "(", "\"One of the elements is not an X509Extension\"", ")", "add_result", "=", "_lib", ".", "X509_add_ext", "(", "self", ".", "_x509", ",", "ext", ".", "_extension", ",", "-", "1", ")", "if", "not", "add_result", ":", "_raise_current_error", "(", ")" ]
Add extensions to the certificate. :param extensions: The extensions to add. :type extensions: An iterable of :py:class:`X509Extension` objects. :return: ``None``
[ "Add", "extensions", "to", "the", "certificate", "." ]
python
test
s0md3v/Photon
core/utils.py
https://github.com/s0md3v/Photon/blob/6a29f2c9782ea9b3dc090db1774a259033600e39/core/utils.py#L87-L96
def timer(diff, processed): """Return the passed time.""" # Changes seconds into minutes and seconds minutes, seconds = divmod(diff, 60) try: # Finds average time taken by requests time_per_request = diff / float(len(processed)) except ZeroDivisionError: time_per_request = 0 return minutes, seconds, time_per_request
[ "def", "timer", "(", "diff", ",", "processed", ")", ":", "# Changes seconds into minutes and seconds", "minutes", ",", "seconds", "=", "divmod", "(", "diff", ",", "60", ")", "try", ":", "# Finds average time taken by requests", "time_per_request", "=", "diff", "/", "float", "(", "len", "(", "processed", ")", ")", "except", "ZeroDivisionError", ":", "time_per_request", "=", "0", "return", "minutes", ",", "seconds", ",", "time_per_request" ]
Return the passed time.
[ "Return", "the", "passed", "time", "." ]
python
train
sdispater/eloquent
eloquent/orm/builder.py
https://github.com/sdispater/eloquent/blob/0638b688d5fd0c1a46b7471dd465eeb4c2f84666/eloquent/orm/builder.py#L758-L767
def _call_scope(self, scope, *args, **kwargs): """ Call the given model scope. :param scope: The scope to call :type scope: str """ result = getattr(self._model, scope)(self, *args, **kwargs) return result or self
[ "def", "_call_scope", "(", "self", ",", "scope", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "result", "=", "getattr", "(", "self", ".", "_model", ",", "scope", ")", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", "return", "result", "or", "self" ]
Call the given model scope. :param scope: The scope to call :type scope: str
[ "Call", "the", "given", "model", "scope", "." ]
python
train
reorx/torext
torext/handlers/base.py
https://github.com/reorx/torext/blob/84c4300ebc7fab0dbd11cf8b020bc7d4d1570171/torext/handlers/base.py#L112-L139
def _handle_request_exception(self, e): """This method handle HTTPError exceptions the same as how tornado does, leave other exceptions to be handled by user defined handler function maped in class attribute `EXCEPTION_HANDLERS` Common HTTP status codes: 200 OK 301 Moved Permanently 302 Found 400 Bad Request 401 Unauthorized 403 Forbidden 404 Not Found 405 Method Not Allowed 500 Internal Server Error It is suggested only to use above HTTP status codes """ handle_func = self._exception_default_handler if self.EXCEPTION_HANDLERS: for excs, func_name in self.EXCEPTION_HANDLERS.items(): if isinstance(e, excs): handle_func = getattr(self, func_name) break handle_func(e) if not self._finished: self.finish()
[ "def", "_handle_request_exception", "(", "self", ",", "e", ")", ":", "handle_func", "=", "self", ".", "_exception_default_handler", "if", "self", ".", "EXCEPTION_HANDLERS", ":", "for", "excs", ",", "func_name", "in", "self", ".", "EXCEPTION_HANDLERS", ".", "items", "(", ")", ":", "if", "isinstance", "(", "e", ",", "excs", ")", ":", "handle_func", "=", "getattr", "(", "self", ",", "func_name", ")", "break", "handle_func", "(", "e", ")", "if", "not", "self", ".", "_finished", ":", "self", ".", "finish", "(", ")" ]
This method handle HTTPError exceptions the same as how tornado does, leave other exceptions to be handled by user defined handler function maped in class attribute `EXCEPTION_HANDLERS` Common HTTP status codes: 200 OK 301 Moved Permanently 302 Found 400 Bad Request 401 Unauthorized 403 Forbidden 404 Not Found 405 Method Not Allowed 500 Internal Server Error It is suggested only to use above HTTP status codes
[ "This", "method", "handle", "HTTPError", "exceptions", "the", "same", "as", "how", "tornado", "does", "leave", "other", "exceptions", "to", "be", "handled", "by", "user", "defined", "handler", "function", "maped", "in", "class", "attribute", "EXCEPTION_HANDLERS" ]
python
train
gwastro/pycbc
pycbc/cosmology.py
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/cosmology.py#L238-L252
def setup_interpolant(self): """Initializes the z(d) interpolation.""" # for computing nearby (z < 1) redshifts zs = numpy.linspace(0., 1., num=self.numpoints) ds = self.cosmology.luminosity_distance(zs).value self.nearby_d2z = interpolate.interp1d(ds, zs, kind='linear', bounds_error=False) # for computing far away (z > 1) redshifts zs = numpy.logspace(0, numpy.log10(self.default_maxz), num=self.numpoints) ds = self.cosmology.luminosity_distance(zs).value self.faraway_d2z = interpolate.interp1d(ds, zs, kind='linear', bounds_error=False) # store the default maximum distance self.default_maxdist = ds.max()
[ "def", "setup_interpolant", "(", "self", ")", ":", "# for computing nearby (z < 1) redshifts", "zs", "=", "numpy", ".", "linspace", "(", "0.", ",", "1.", ",", "num", "=", "self", ".", "numpoints", ")", "ds", "=", "self", ".", "cosmology", ".", "luminosity_distance", "(", "zs", ")", ".", "value", "self", ".", "nearby_d2z", "=", "interpolate", ".", "interp1d", "(", "ds", ",", "zs", ",", "kind", "=", "'linear'", ",", "bounds_error", "=", "False", ")", "# for computing far away (z > 1) redshifts", "zs", "=", "numpy", ".", "logspace", "(", "0", ",", "numpy", ".", "log10", "(", "self", ".", "default_maxz", ")", ",", "num", "=", "self", ".", "numpoints", ")", "ds", "=", "self", ".", "cosmology", ".", "luminosity_distance", "(", "zs", ")", ".", "value", "self", ".", "faraway_d2z", "=", "interpolate", ".", "interp1d", "(", "ds", ",", "zs", ",", "kind", "=", "'linear'", ",", "bounds_error", "=", "False", ")", "# store the default maximum distance", "self", ".", "default_maxdist", "=", "ds", ".", "max", "(", ")" ]
Initializes the z(d) interpolation.
[ "Initializes", "the", "z", "(", "d", ")", "interpolation", "." ]
python
train
iotile/coretools
iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/Builder.py
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/Builder.py#L282-L329
def _node_errors(builder, env, tlist, slist): """Validate that the lists of target and source nodes are legal for this builder and environment. Raise errors or issue warnings as appropriate. """ # First, figure out if there are any errors in the way the targets # were specified. for t in tlist: if t.side_effect: raise UserError("Multiple ways to build the same target were specified for: %s" % t) if t.has_explicit_builder(): # Check for errors when the environments are different # No error if environments are the same Environment instance if (not t.env is None and not t.env is env and # Check OverrideEnvironment case - no error if wrapped Environments # are the same instance, and overrides lists match not (getattr(t.env, '__subject', 0) is getattr(env, '__subject', 1) and getattr(t.env, 'overrides', 0) == getattr(env, 'overrides', 1) and not builder.multi)): action = t.builder.action t_contents = t.builder.action.get_contents(tlist, slist, t.env) contents = builder.action.get_contents(tlist, slist, env) if t_contents == contents: msg = "Two different environments were specified for target %s,\n\tbut they appear to have the same action: %s" % (t, action.genstring(tlist, slist, t.env)) SCons.Warnings.warn(SCons.Warnings.DuplicateEnvironmentWarning, msg) else: try: msg = "Two environments with different actions were specified for the same target: %s\n(action 1: %s)\n(action 2: %s)" % (t,t_contents.decode('utf-8'),contents.decode('utf-8')) except UnicodeDecodeError as e: msg = "Two environments with different actions were specified for the same target: %s"%t raise UserError(msg) if builder.multi: if t.builder != builder: msg = "Two different builders (%s and %s) were specified for the same target: %s" % (t.builder.get_name(env), builder.get_name(env), t) raise UserError(msg) # TODO(batch): list constructed each time! if t.get_executor().get_all_targets() != tlist: msg = "Two different target lists have a target in common: %s (from %s and from %s)" % (t, list(map(str, t.get_executor().get_all_targets())), list(map(str, tlist))) raise UserError(msg) elif t.sources != slist: msg = "Multiple ways to build the same target were specified for: %s (from %s and from %s)" % (t, list(map(str, t.sources)), list(map(str, slist))) raise UserError(msg) if builder.single_source: if len(slist) > 1: raise UserError("More than one source given for single-source builder: targets=%s sources=%s" % (list(map(str,tlist)), list(map(str,slist))))
[ "def", "_node_errors", "(", "builder", ",", "env", ",", "tlist", ",", "slist", ")", ":", "# First, figure out if there are any errors in the way the targets", "# were specified.", "for", "t", "in", "tlist", ":", "if", "t", ".", "side_effect", ":", "raise", "UserError", "(", "\"Multiple ways to build the same target were specified for: %s\"", "%", "t", ")", "if", "t", ".", "has_explicit_builder", "(", ")", ":", "# Check for errors when the environments are different", "# No error if environments are the same Environment instance", "if", "(", "not", "t", ".", "env", "is", "None", "and", "not", "t", ".", "env", "is", "env", "and", "# Check OverrideEnvironment case - no error if wrapped Environments", "# are the same instance, and overrides lists match", "not", "(", "getattr", "(", "t", ".", "env", ",", "'__subject'", ",", "0", ")", "is", "getattr", "(", "env", ",", "'__subject'", ",", "1", ")", "and", "getattr", "(", "t", ".", "env", ",", "'overrides'", ",", "0", ")", "==", "getattr", "(", "env", ",", "'overrides'", ",", "1", ")", "and", "not", "builder", ".", "multi", ")", ")", ":", "action", "=", "t", ".", "builder", ".", "action", "t_contents", "=", "t", ".", "builder", ".", "action", ".", "get_contents", "(", "tlist", ",", "slist", ",", "t", ".", "env", ")", "contents", "=", "builder", ".", "action", ".", "get_contents", "(", "tlist", ",", "slist", ",", "env", ")", "if", "t_contents", "==", "contents", ":", "msg", "=", "\"Two different environments were specified for target %s,\\n\\tbut they appear to have the same action: %s\"", "%", "(", "t", ",", "action", ".", "genstring", "(", "tlist", ",", "slist", ",", "t", ".", "env", ")", ")", "SCons", ".", "Warnings", ".", "warn", "(", "SCons", ".", "Warnings", ".", "DuplicateEnvironmentWarning", ",", "msg", ")", "else", ":", "try", ":", "msg", "=", "\"Two environments with different actions were specified for the same target: %s\\n(action 1: %s)\\n(action 2: %s)\"", "%", "(", "t", ",", "t_contents", ".", "decode", "(", "'utf-8'", ")", ",", "contents", ".", "decode", "(", "'utf-8'", ")", ")", "except", "UnicodeDecodeError", "as", "e", ":", "msg", "=", "\"Two environments with different actions were specified for the same target: %s\"", "%", "t", "raise", "UserError", "(", "msg", ")", "if", "builder", ".", "multi", ":", "if", "t", ".", "builder", "!=", "builder", ":", "msg", "=", "\"Two different builders (%s and %s) were specified for the same target: %s\"", "%", "(", "t", ".", "builder", ".", "get_name", "(", "env", ")", ",", "builder", ".", "get_name", "(", "env", ")", ",", "t", ")", "raise", "UserError", "(", "msg", ")", "# TODO(batch): list constructed each time!", "if", "t", ".", "get_executor", "(", ")", ".", "get_all_targets", "(", ")", "!=", "tlist", ":", "msg", "=", "\"Two different target lists have a target in common: %s (from %s and from %s)\"", "%", "(", "t", ",", "list", "(", "map", "(", "str", ",", "t", ".", "get_executor", "(", ")", ".", "get_all_targets", "(", ")", ")", ")", ",", "list", "(", "map", "(", "str", ",", "tlist", ")", ")", ")", "raise", "UserError", "(", "msg", ")", "elif", "t", ".", "sources", "!=", "slist", ":", "msg", "=", "\"Multiple ways to build the same target were specified for: %s (from %s and from %s)\"", "%", "(", "t", ",", "list", "(", "map", "(", "str", ",", "t", ".", "sources", ")", ")", ",", "list", "(", "map", "(", "str", ",", "slist", ")", ")", ")", "raise", "UserError", "(", "msg", ")", "if", "builder", ".", "single_source", ":", "if", "len", "(", "slist", ")", ">", "1", ":", "raise", "UserError", "(", "\"More than one source given for single-source builder: targets=%s sources=%s\"", "%", "(", "list", "(", "map", "(", "str", ",", "tlist", ")", ")", ",", "list", "(", "map", "(", "str", ",", "slist", ")", ")", ")", ")" ]
Validate that the lists of target and source nodes are legal for this builder and environment. Raise errors or issue warnings as appropriate.
[ "Validate", "that", "the", "lists", "of", "target", "and", "source", "nodes", "are", "legal", "for", "this", "builder", "and", "environment", ".", "Raise", "errors", "or", "issue", "warnings", "as", "appropriate", "." ]
python
train
molmod/molmod
molmod/io/cml.py
https://github.com/molmod/molmod/blob/a7b5b4364ed514ad4c465856c05b5eda1cb561e0/molmod/io/cml.py#L149-L163
def load_cml(cml_filename): """Load the molecules from a CML file Argument: | ``cml_filename`` -- The filename of a CML file. Returns a list of molecule objects with optional molecular graph attribute and extra attributes. """ parser = make_parser() parser.setFeature(feature_namespaces, 0) dh = CMLMoleculeLoader() parser.setContentHandler(dh) parser.parse(cml_filename) return dh.molecules
[ "def", "load_cml", "(", "cml_filename", ")", ":", "parser", "=", "make_parser", "(", ")", "parser", ".", "setFeature", "(", "feature_namespaces", ",", "0", ")", "dh", "=", "CMLMoleculeLoader", "(", ")", "parser", ".", "setContentHandler", "(", "dh", ")", "parser", ".", "parse", "(", "cml_filename", ")", "return", "dh", ".", "molecules" ]
Load the molecules from a CML file Argument: | ``cml_filename`` -- The filename of a CML file. Returns a list of molecule objects with optional molecular graph attribute and extra attributes.
[ "Load", "the", "molecules", "from", "a", "CML", "file" ]
python
train
regardscitoyens/cpc-api
cpc_api/api.py
https://github.com/regardscitoyens/cpc-api/blob/4621dcbda3f3bb8fae1cc094fa58e054df24269d/cpc_api/api.py#L42-L55
def synthese(self, month=None): """ month format: YYYYMM """ if month is None and self.legislature == '2012-2017': raise AssertionError('Global Synthesis on legislature does not work, see https://github.com/regardscitoyens/nosdeputes.fr/issues/69') if month is None: month = 'data' url = '%s/synthese/%s/%s' % (self.base_url, month, self.format) data = requests.get(url).json() return [depute[self.ptype] for depute in data[self.ptype_plural]]
[ "def", "synthese", "(", "self", ",", "month", "=", "None", ")", ":", "if", "month", "is", "None", "and", "self", ".", "legislature", "==", "'2012-2017'", ":", "raise", "AssertionError", "(", "'Global Synthesis on legislature does not work, see https://github.com/regardscitoyens/nosdeputes.fr/issues/69'", ")", "if", "month", "is", "None", ":", "month", "=", "'data'", "url", "=", "'%s/synthese/%s/%s'", "%", "(", "self", ".", "base_url", ",", "month", ",", "self", ".", "format", ")", "data", "=", "requests", ".", "get", "(", "url", ")", ".", "json", "(", ")", "return", "[", "depute", "[", "self", ".", "ptype", "]", "for", "depute", "in", "data", "[", "self", ".", "ptype_plural", "]", "]" ]
month format: YYYYMM
[ "month", "format", ":", "YYYYMM" ]
python
test
apple/turicreate
src/unity/python/turicreate/toolkits/_decision_tree.py
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/_decision_tree.py#L125-L138
def to_dict(self): """ Return the node as a dictionary. Returns ------- dict: All the attributes of this node as a dictionary (minus the left and right). """ out = {} for key in self.__dict__.keys(): if key not in ['left', 'right', 'missing', 'parent']: out[key] = self.__dict__[key] return out
[ "def", "to_dict", "(", "self", ")", ":", "out", "=", "{", "}", "for", "key", "in", "self", ".", "__dict__", ".", "keys", "(", ")", ":", "if", "key", "not", "in", "[", "'left'", ",", "'right'", ",", "'missing'", ",", "'parent'", "]", ":", "out", "[", "key", "]", "=", "self", ".", "__dict__", "[", "key", "]", "return", "out" ]
Return the node as a dictionary. Returns ------- dict: All the attributes of this node as a dictionary (minus the left and right).
[ "Return", "the", "node", "as", "a", "dictionary", "." ]
python
train
smarie/python-parsyfiles
parsyfiles/type_inspection_tools.py
https://github.com/smarie/python-parsyfiles/blob/344b37e1151e8d4e7c2ee49ae09d6568715ae64e/parsyfiles/type_inspection_tools.py#L573-L624
def get_constructor_attributes_types(item_type) -> Dict[str, Tuple[Type[Any], bool]]: """ Utility method to return a dictionary of attribute name > attribute type from the constructor of a given type It supports PEP484 and 'attrs' declaration, see https://github.com/python-attrs/attrs. :param item_type: :return: a dictionary containing for each attr name, a tuple (type, is_mandatory) """ res = dict() try: # -- Try to read an 'attr' declaration and to extract types and optionality from parsyfiles.plugins_optional.support_for_attrs import get_attrs_declarations decls = get_attrs_declarations(item_type) # check that types are correct for attr_name, v in decls.items(): typ, is_optional = v # -- Get and check that the attribute type is PEP484 compliant typ = get_validated_attribute_type_info(typ, item_type, attr_name) # -- optional = attrs'Optional validator was used, or a default value was set, or type is pep484 Optional is_optional = is_optional or is_pep484_nonable(typ) # -- store both info in result dict res[attr_name] = (typ, not is_optional) return res except: # ImportError or NotAnAttrsClassError but we obviously cant import the latter. pass # do not specify a type and use 'pass' so as to reset the exception context # -- Fallback to PEP484 # first get the signature of the class constructor s = _get_constructor_signature(item_type) # then extract the type and optionality of each attribute and raise errors if needed for attr_name in s.parameters.keys(): # skip the 'self' attribute if attr_name != 'self': # -- Get and check that the attribute type is PEP484 compliant typ = get_validated_attribute_type_info(s.parameters[attr_name].annotation, item_type, attr_name) # -- is the attribute mandatory ? is_mandatory = (s.parameters[attr_name].default is Parameter.empty) and not is_pep484_nonable(typ) # -- store both info in result dict res[attr_name] = (typ, is_mandatory) return res
[ "def", "get_constructor_attributes_types", "(", "item_type", ")", "->", "Dict", "[", "str", ",", "Tuple", "[", "Type", "[", "Any", "]", ",", "bool", "]", "]", ":", "res", "=", "dict", "(", ")", "try", ":", "# -- Try to read an 'attr' declaration and to extract types and optionality", "from", "parsyfiles", ".", "plugins_optional", ".", "support_for_attrs", "import", "get_attrs_declarations", "decls", "=", "get_attrs_declarations", "(", "item_type", ")", "# check that types are correct", "for", "attr_name", ",", "v", "in", "decls", ".", "items", "(", ")", ":", "typ", ",", "is_optional", "=", "v", "# -- Get and check that the attribute type is PEP484 compliant", "typ", "=", "get_validated_attribute_type_info", "(", "typ", ",", "item_type", ",", "attr_name", ")", "# -- optional = attrs'Optional validator was used, or a default value was set, or type is pep484 Optional", "is_optional", "=", "is_optional", "or", "is_pep484_nonable", "(", "typ", ")", "# -- store both info in result dict", "res", "[", "attr_name", "]", "=", "(", "typ", ",", "not", "is_optional", ")", "return", "res", "except", ":", "# ImportError or NotAnAttrsClassError but we obviously cant import the latter.", "pass", "# do not specify a type and use 'pass' so as to reset the exception context", "# -- Fallback to PEP484", "# first get the signature of the class constructor", "s", "=", "_get_constructor_signature", "(", "item_type", ")", "# then extract the type and optionality of each attribute and raise errors if needed", "for", "attr_name", "in", "s", ".", "parameters", ".", "keys", "(", ")", ":", "# skip the 'self' attribute", "if", "attr_name", "!=", "'self'", ":", "# -- Get and check that the attribute type is PEP484 compliant", "typ", "=", "get_validated_attribute_type_info", "(", "s", ".", "parameters", "[", "attr_name", "]", ".", "annotation", ",", "item_type", ",", "attr_name", ")", "# -- is the attribute mandatory ?", "is_mandatory", "=", "(", "s", ".", "parameters", "[", "attr_name", "]", ".", "default", "is", "Parameter", ".", "empty", ")", "and", "not", "is_pep484_nonable", "(", "typ", ")", "# -- store both info in result dict", "res", "[", "attr_name", "]", "=", "(", "typ", ",", "is_mandatory", ")", "return", "res" ]
Utility method to return a dictionary of attribute name > attribute type from the constructor of a given type It supports PEP484 and 'attrs' declaration, see https://github.com/python-attrs/attrs. :param item_type: :return: a dictionary containing for each attr name, a tuple (type, is_mandatory)
[ "Utility", "method", "to", "return", "a", "dictionary", "of", "attribute", "name", ">", "attribute", "type", "from", "the", "constructor", "of", "a", "given", "type", "It", "supports", "PEP484", "and", "attrs", "declaration", "see", "https", ":", "//", "github", ".", "com", "/", "python", "-", "attrs", "/", "attrs", "." ]
python
train
msoulier/tftpy
tftpy/TftpPacketTypes.py
https://github.com/msoulier/tftpy/blob/af2f2fe89a3bf45748b78703820efb0986a8207a/tftpy/TftpPacketTypes.py#L292-L305
def encode(self): """Encode the DAT packet. This method populates self.buffer, and returns self for easy method chaining.""" if len(self.data) == 0: log.debug("Encoding an empty DAT packet") data = self.data if not isinstance(self.data, bytes): data = self.data.encode('ascii') fmt = b"!HH%ds" % len(data) self.buffer = struct.pack(fmt, self.opcode, self.blocknumber, data) return self
[ "def", "encode", "(", "self", ")", ":", "if", "len", "(", "self", ".", "data", ")", "==", "0", ":", "log", ".", "debug", "(", "\"Encoding an empty DAT packet\"", ")", "data", "=", "self", ".", "data", "if", "not", "isinstance", "(", "self", ".", "data", ",", "bytes", ")", ":", "data", "=", "self", ".", "data", ".", "encode", "(", "'ascii'", ")", "fmt", "=", "b\"!HH%ds\"", "%", "len", "(", "data", ")", "self", ".", "buffer", "=", "struct", ".", "pack", "(", "fmt", ",", "self", ".", "opcode", ",", "self", ".", "blocknumber", ",", "data", ")", "return", "self" ]
Encode the DAT packet. This method populates self.buffer, and returns self for easy method chaining.
[ "Encode", "the", "DAT", "packet", ".", "This", "method", "populates", "self", ".", "buffer", "and", "returns", "self", "for", "easy", "method", "chaining", "." ]
python
train
ga4gh/ga4gh-server
ga4gh/server/datamodel/variants.py
https://github.com/ga4gh/ga4gh-server/blob/1aa18922ef136db8604f6f098cb1732cba6f2a76/ga4gh/server/datamodel/variants.py#L1297-L1326
def convertTranscriptEffect(self, annStr, hgvsG): """ Takes the ANN string of a SnpEff generated VCF, splits it and returns a populated GA4GH transcript effect object. :param annStr: String :param hgvsG: String :return: effect protocol.TranscriptEffect() """ effect = self._createGaTranscriptEffect() effect.hgvs_annotation.CopyFrom(protocol.HGVSAnnotation()) annDict = dict() if self._annotationType == ANNOTATIONS_SNPEFF: annDict = dict(zip(self. SNPEFF_FIELDS, annStr.split("|"))) elif self._annotationType == ANNOTATIONS_VEP_V82: annDict = dict(zip(self.VEP_FIELDS, annStr.split("|"))) else: annDict = dict(zip(self.CSQ_FIELDS, annStr.split("|"))) annDict["hgvs_annotation.genomic"] = hgvsG if hgvsG else u'' for key, val in annDict.items(): try: protocol.deepSetAttr(effect, key, val) except AttributeError: if val and key not in self.EXCLUDED_FIELDS: protocol.setAttribute( effect.attributes.attr[key].values, val) effect.effects.extend(self.convertSeqOntology(annDict.get('effects'))) self.addLocations( effect, annDict.get('protPos'), annDict.get('cdnaPos')) effect.id = self.getTranscriptEffectId(effect) return effect
[ "def", "convertTranscriptEffect", "(", "self", ",", "annStr", ",", "hgvsG", ")", ":", "effect", "=", "self", ".", "_createGaTranscriptEffect", "(", ")", "effect", ".", "hgvs_annotation", ".", "CopyFrom", "(", "protocol", ".", "HGVSAnnotation", "(", ")", ")", "annDict", "=", "dict", "(", ")", "if", "self", ".", "_annotationType", "==", "ANNOTATIONS_SNPEFF", ":", "annDict", "=", "dict", "(", "zip", "(", "self", ".", "SNPEFF_FIELDS", ",", "annStr", ".", "split", "(", "\"|\"", ")", ")", ")", "elif", "self", ".", "_annotationType", "==", "ANNOTATIONS_VEP_V82", ":", "annDict", "=", "dict", "(", "zip", "(", "self", ".", "VEP_FIELDS", ",", "annStr", ".", "split", "(", "\"|\"", ")", ")", ")", "else", ":", "annDict", "=", "dict", "(", "zip", "(", "self", ".", "CSQ_FIELDS", ",", "annStr", ".", "split", "(", "\"|\"", ")", ")", ")", "annDict", "[", "\"hgvs_annotation.genomic\"", "]", "=", "hgvsG", "if", "hgvsG", "else", "u''", "for", "key", ",", "val", "in", "annDict", ".", "items", "(", ")", ":", "try", ":", "protocol", ".", "deepSetAttr", "(", "effect", ",", "key", ",", "val", ")", "except", "AttributeError", ":", "if", "val", "and", "key", "not", "in", "self", ".", "EXCLUDED_FIELDS", ":", "protocol", ".", "setAttribute", "(", "effect", ".", "attributes", ".", "attr", "[", "key", "]", ".", "values", ",", "val", ")", "effect", ".", "effects", ".", "extend", "(", "self", ".", "convertSeqOntology", "(", "annDict", ".", "get", "(", "'effects'", ")", ")", ")", "self", ".", "addLocations", "(", "effect", ",", "annDict", ".", "get", "(", "'protPos'", ")", ",", "annDict", ".", "get", "(", "'cdnaPos'", ")", ")", "effect", ".", "id", "=", "self", ".", "getTranscriptEffectId", "(", "effect", ")", "return", "effect" ]
Takes the ANN string of a SnpEff generated VCF, splits it and returns a populated GA4GH transcript effect object. :param annStr: String :param hgvsG: String :return: effect protocol.TranscriptEffect()
[ "Takes", "the", "ANN", "string", "of", "a", "SnpEff", "generated", "VCF", "splits", "it", "and", "returns", "a", "populated", "GA4GH", "transcript", "effect", "object", ".", ":", "param", "annStr", ":", "String", ":", "param", "hgvsG", ":", "String", ":", "return", ":", "effect", "protocol", ".", "TranscriptEffect", "()" ]
python
train
pjuren/pyokit
src/pyokit/scripts/conservationProfile.py
https://github.com/pjuren/pyokit/blob/fddae123b5d817daa39496183f19c000d9c3791f/src/pyokit/scripts/conservationProfile.py#L177-L204
def conservtion_profile_pid(region, genome_alignment, mi_seqs=MissingSequenceHandler.TREAT_AS_ALL_GAPS, species=None): """ build a conservation profile for the given region using the genome alignment. The scores in the profile will be the percent of bases identical to the reference sequence. :param miss_seqs: how to treat sequence with no actual sequence data for the column. :return: a list of the same length as the region where each entry is the PID at the corresponding locus. """ res = [] s = region.start if region.isPositiveStrand() else region.end - 1 e = region.end if region.isPositiveStrand() else region.start - 1 step = 1 if region.isPositiveStrand() else -1 for i in range(s, e, step): try: col = genome_alignment.get_column(region.chrom, i, mi_seqs, species) res.append(pid(col)) except NoSuchAlignmentColumnError: res.append(None) except NoUniqueColumnError: res.append(None) return res
[ "def", "conservtion_profile_pid", "(", "region", ",", "genome_alignment", ",", "mi_seqs", "=", "MissingSequenceHandler", ".", "TREAT_AS_ALL_GAPS", ",", "species", "=", "None", ")", ":", "res", "=", "[", "]", "s", "=", "region", ".", "start", "if", "region", ".", "isPositiveStrand", "(", ")", "else", "region", ".", "end", "-", "1", "e", "=", "region", ".", "end", "if", "region", ".", "isPositiveStrand", "(", ")", "else", "region", ".", "start", "-", "1", "step", "=", "1", "if", "region", ".", "isPositiveStrand", "(", ")", "else", "-", "1", "for", "i", "in", "range", "(", "s", ",", "e", ",", "step", ")", ":", "try", ":", "col", "=", "genome_alignment", ".", "get_column", "(", "region", ".", "chrom", ",", "i", ",", "mi_seqs", ",", "species", ")", "res", ".", "append", "(", "pid", "(", "col", ")", ")", "except", "NoSuchAlignmentColumnError", ":", "res", ".", "append", "(", "None", ")", "except", "NoUniqueColumnError", ":", "res", ".", "append", "(", "None", ")", "return", "res" ]
build a conservation profile for the given region using the genome alignment. The scores in the profile will be the percent of bases identical to the reference sequence. :param miss_seqs: how to treat sequence with no actual sequence data for the column. :return: a list of the same length as the region where each entry is the PID at the corresponding locus.
[ "build", "a", "conservation", "profile", "for", "the", "given", "region", "using", "the", "genome", "alignment", "." ]
python
train
eyurtsev/fcsparser
fcsparser/api.py
https://github.com/eyurtsev/fcsparser/blob/710e8e31d4b09ff6e73d47d86770be6ca2f4282c/fcsparser/api.py#L226-L293
def read_text(self, file_handle): """Parse the TEXT segment of the FCS file. The TEXT segment contains meta data associated with the FCS file. Converting all meta keywords to lower case. """ header = self.annotation['__header__'] # For convenience ##### # Read in the TEXT segment of the FCS file # There are some differences in how the file_handle.seek(header['text start'], 0) raw_text = file_handle.read(header['text end'] - header['text start'] + 1) try: raw_text = raw_text.decode(self._encoding) except UnicodeDecodeError as e: # Catching the exception and logging it in this way kills the traceback, but # we can worry about this later. logger.warning(u'Encountered an illegal utf-8 byte in the header.\n Illegal utf-8 ' u'characters will be ignored.\n{}'.format(e)) raw_text = raw_text.decode(self._encoding, errors='ignore') text = self._extract_text_dict(raw_text) ## # Extract channel names and convert some of the channel properties # and other fields into numeric data types (from string) # Note: do not use regular expressions for manipulations here. # Regular expressions are too heavy in terms of computation time. pars = int(text['$PAR']) if '$P0B' in text.keys(): # Checking whether channel number count starts from 0 or from 1 self.channel_numbers = range(0, pars) # Channel number count starts from 0 else: self.channel_numbers = range(1, pars + 1) # Channel numbers start from 1 # Extract parameter names try: names_n = tuple([text['$P{0}N'.format(i)] for i in self.channel_numbers]) except KeyError: names_n = [] try: names_s = tuple([text['$P{0}S'.format(i)] for i in self.channel_numbers]) except KeyError: names_s = [] self.channel_names_s = names_s self.channel_names_n = names_n # Convert some of the fields into integer values keys_encoding_bits = ['$P{0}B'.format(i) for i in self.channel_numbers] add_keys_to_convert_to_int = ['$NEXTDATA', '$PAR', '$TOT'] keys_to_convert_to_int = keys_encoding_bits + add_keys_to_convert_to_int for key in keys_to_convert_to_int: value = text[key] text[key] = int(value) self.annotation.update(text) # Update data start segments if needed if self._data_start == 0: self._data_start = int(text['$BEGINDATA']) if self._data_end == 0: self._data_end = int(text['$ENDDATA'])
[ "def", "read_text", "(", "self", ",", "file_handle", ")", ":", "header", "=", "self", ".", "annotation", "[", "'__header__'", "]", "# For convenience", "#####", "# Read in the TEXT segment of the FCS file", "# There are some differences in how the", "file_handle", ".", "seek", "(", "header", "[", "'text start'", "]", ",", "0", ")", "raw_text", "=", "file_handle", ".", "read", "(", "header", "[", "'text end'", "]", "-", "header", "[", "'text start'", "]", "+", "1", ")", "try", ":", "raw_text", "=", "raw_text", ".", "decode", "(", "self", ".", "_encoding", ")", "except", "UnicodeDecodeError", "as", "e", ":", "# Catching the exception and logging it in this way kills the traceback, but", "# we can worry about this later.", "logger", ".", "warning", "(", "u'Encountered an illegal utf-8 byte in the header.\\n Illegal utf-8 '", "u'characters will be ignored.\\n{}'", ".", "format", "(", "e", ")", ")", "raw_text", "=", "raw_text", ".", "decode", "(", "self", ".", "_encoding", ",", "errors", "=", "'ignore'", ")", "text", "=", "self", ".", "_extract_text_dict", "(", "raw_text", ")", "##", "# Extract channel names and convert some of the channel properties", "# and other fields into numeric data types (from string)", "# Note: do not use regular expressions for manipulations here.", "# Regular expressions are too heavy in terms of computation time.", "pars", "=", "int", "(", "text", "[", "'$PAR'", "]", ")", "if", "'$P0B'", "in", "text", ".", "keys", "(", ")", ":", "# Checking whether channel number count starts from 0 or from 1", "self", ".", "channel_numbers", "=", "range", "(", "0", ",", "pars", ")", "# Channel number count starts from 0", "else", ":", "self", ".", "channel_numbers", "=", "range", "(", "1", ",", "pars", "+", "1", ")", "# Channel numbers start from 1", "# Extract parameter names", "try", ":", "names_n", "=", "tuple", "(", "[", "text", "[", "'$P{0}N'", ".", "format", "(", "i", ")", "]", "for", "i", "in", "self", ".", "channel_numbers", "]", ")", "except", "KeyError", ":", "names_n", "=", "[", "]", "try", ":", "names_s", "=", "tuple", "(", "[", "text", "[", "'$P{0}S'", ".", "format", "(", "i", ")", "]", "for", "i", "in", "self", ".", "channel_numbers", "]", ")", "except", "KeyError", ":", "names_s", "=", "[", "]", "self", ".", "channel_names_s", "=", "names_s", "self", ".", "channel_names_n", "=", "names_n", "# Convert some of the fields into integer values", "keys_encoding_bits", "=", "[", "'$P{0}B'", ".", "format", "(", "i", ")", "for", "i", "in", "self", ".", "channel_numbers", "]", "add_keys_to_convert_to_int", "=", "[", "'$NEXTDATA'", ",", "'$PAR'", ",", "'$TOT'", "]", "keys_to_convert_to_int", "=", "keys_encoding_bits", "+", "add_keys_to_convert_to_int", "for", "key", "in", "keys_to_convert_to_int", ":", "value", "=", "text", "[", "key", "]", "text", "[", "key", "]", "=", "int", "(", "value", ")", "self", ".", "annotation", ".", "update", "(", "text", ")", "# Update data start segments if needed", "if", "self", ".", "_data_start", "==", "0", ":", "self", ".", "_data_start", "=", "int", "(", "text", "[", "'$BEGINDATA'", "]", ")", "if", "self", ".", "_data_end", "==", "0", ":", "self", ".", "_data_end", "=", "int", "(", "text", "[", "'$ENDDATA'", "]", ")" ]
Parse the TEXT segment of the FCS file. The TEXT segment contains meta data associated with the FCS file. Converting all meta keywords to lower case.
[ "Parse", "the", "TEXT", "segment", "of", "the", "FCS", "file", "." ]
python
train