repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
cggh/scikit-allel
allel/stats/distance.py
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/stats/distance.py#L151-L196
def pairwise_dxy(pos, gac, start=None, stop=None, is_accessible=None): """Convenience function to calculate a pairwise distance matrix using nucleotide divergence (a.k.a. Dxy) as the distance metric. Parameters ---------- pos : array_like, int, shape (n_variants,) Variant positions. gac : array_like, int, shape (n_variants, n_samples, n_alleles) Per-genotype allele counts. start : int, optional Start position of region to use. stop : int, optional Stop position of region to use. is_accessible : array_like, bool, shape (len(contig),), optional Boolean array indicating accessibility status for all positions in the chromosome/contig. Returns ------- dist : ndarray Distance matrix in condensed form. See Also -------- allel.model.ndarray.GenotypeArray.to_allele_counts """ if not isinstance(pos, SortedIndex): pos = SortedIndex(pos, copy=False) gac = asarray_ndim(gac, 3) # compute this once here, to avoid repeated evaluation within the loop gan = np.sum(gac, axis=2) m = gac.shape[1] dist = list() for i, j in itertools.combinations(range(m), 2): ac1 = gac[:, i, ...] an1 = gan[:, i] ac2 = gac[:, j, ...] an2 = gan[:, j] d = sequence_divergence(pos, ac1, ac2, an1=an1, an2=an2, start=start, stop=stop, is_accessible=is_accessible) dist.append(d) return np.array(dist)
[ "def", "pairwise_dxy", "(", "pos", ",", "gac", ",", "start", "=", "None", ",", "stop", "=", "None", ",", "is_accessible", "=", "None", ")", ":", "if", "not", "isinstance", "(", "pos", ",", "SortedIndex", ")", ":", "pos", "=", "SortedIndex", "(", "pos", ",", "copy", "=", "False", ")", "gac", "=", "asarray_ndim", "(", "gac", ",", "3", ")", "# compute this once here, to avoid repeated evaluation within the loop", "gan", "=", "np", ".", "sum", "(", "gac", ",", "axis", "=", "2", ")", "m", "=", "gac", ".", "shape", "[", "1", "]", "dist", "=", "list", "(", ")", "for", "i", ",", "j", "in", "itertools", ".", "combinations", "(", "range", "(", "m", ")", ",", "2", ")", ":", "ac1", "=", "gac", "[", ":", ",", "i", ",", "...", "]", "an1", "=", "gan", "[", ":", ",", "i", "]", "ac2", "=", "gac", "[", ":", ",", "j", ",", "...", "]", "an2", "=", "gan", "[", ":", ",", "j", "]", "d", "=", "sequence_divergence", "(", "pos", ",", "ac1", ",", "ac2", ",", "an1", "=", "an1", ",", "an2", "=", "an2", ",", "start", "=", "start", ",", "stop", "=", "stop", ",", "is_accessible", "=", "is_accessible", ")", "dist", ".", "append", "(", "d", ")", "return", "np", ".", "array", "(", "dist", ")" ]
Convenience function to calculate a pairwise distance matrix using nucleotide divergence (a.k.a. Dxy) as the distance metric. Parameters ---------- pos : array_like, int, shape (n_variants,) Variant positions. gac : array_like, int, shape (n_variants, n_samples, n_alleles) Per-genotype allele counts. start : int, optional Start position of region to use. stop : int, optional Stop position of region to use. is_accessible : array_like, bool, shape (len(contig),), optional Boolean array indicating accessibility status for all positions in the chromosome/contig. Returns ------- dist : ndarray Distance matrix in condensed form. See Also -------- allel.model.ndarray.GenotypeArray.to_allele_counts
[ "Convenience", "function", "to", "calculate", "a", "pairwise", "distance", "matrix", "using", "nucleotide", "divergence", "(", "a", ".", "k", ".", "a", ".", "Dxy", ")", "as", "the", "distance", "metric", "." ]
python
train
gwastro/pycbc
pycbc/pnutils.py
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/pnutils.py#L529-L580
def get_inspiral_tf(tc, mass1, mass2, spin1, spin2, f_low, n_points=100, pn_2order=7, approximant='TaylorF2'): """Compute the time-frequency evolution of an inspiral signal. Return a tuple of time and frequency vectors tracking the evolution of an inspiral signal in the time-frequency plane. """ # handle param-dependent approximant specification class Params: pass params = Params() params.mass1 = mass1 params.mass2 = mass2 params.spin1z = spin1 params.spin2z = spin2 try: approximant = eval(approximant, {'__builtins__': None}, dict(params=params)) except NameError: pass if approximant in ['TaylorF2', 'SPAtmplt']: from pycbc.waveform.spa_tmplt import findchirp_chirptime # FIXME spins are not taken into account f_high = f_SchwarzISCO(mass1 + mass2) track_f = numpy.logspace(numpy.log10(f_low), numpy.log10(f_high), n_points) track_t = numpy.array([findchirp_chirptime(float(mass1), float(mass2), float(f), pn_2order) for f in track_f]) elif approximant in ['SEOBNRv2', 'SEOBNRv2_ROM_DoubleSpin', 'SEOBNRv2_ROM_DoubleSpin_HI']: f_high = get_final_freq('SEOBNRv2', mass1, mass2, spin1, spin2) track_f = numpy.logspace(numpy.log10(f_low), numpy.log10(f_high), n_points) # use HI function as it has wider freq range validity track_t = numpy.array([ lalsimulation.SimIMRSEOBNRv2ROMDoubleSpinHITimeOfFrequency(f, solar_mass_to_kg(mass1), solar_mass_to_kg(mass2), float(spin1), float(spin2)) for f in track_f]) elif approximant in ['SEOBNRv4', 'SEOBNRv4_ROM']: f_high = get_final_freq('SEOBNRv4', mass1, mass2, spin1, spin2) # use frequency below final freq in case of rounding error track_f = numpy.logspace(numpy.log10(f_low), numpy.log10(0.999*f_high), n_points) track_t = numpy.array([ lalsimulation.SimIMRSEOBNRv4ROMTimeOfFrequency( f, solar_mass_to_kg(mass1), solar_mass_to_kg(mass2), float(spin1), float(spin2)) for f in track_f]) else: raise ValueError('Approximant ' + approximant + ' not supported') return (tc - track_t, track_f)
[ "def", "get_inspiral_tf", "(", "tc", ",", "mass1", ",", "mass2", ",", "spin1", ",", "spin2", ",", "f_low", ",", "n_points", "=", "100", ",", "pn_2order", "=", "7", ",", "approximant", "=", "'TaylorF2'", ")", ":", "# handle param-dependent approximant specification", "class", "Params", ":", "pass", "params", "=", "Params", "(", ")", "params", ".", "mass1", "=", "mass1", "params", ".", "mass2", "=", "mass2", "params", ".", "spin1z", "=", "spin1", "params", ".", "spin2z", "=", "spin2", "try", ":", "approximant", "=", "eval", "(", "approximant", ",", "{", "'__builtins__'", ":", "None", "}", ",", "dict", "(", "params", "=", "params", ")", ")", "except", "NameError", ":", "pass", "if", "approximant", "in", "[", "'TaylorF2'", ",", "'SPAtmplt'", "]", ":", "from", "pycbc", ".", "waveform", ".", "spa_tmplt", "import", "findchirp_chirptime", "# FIXME spins are not taken into account", "f_high", "=", "f_SchwarzISCO", "(", "mass1", "+", "mass2", ")", "track_f", "=", "numpy", ".", "logspace", "(", "numpy", ".", "log10", "(", "f_low", ")", ",", "numpy", ".", "log10", "(", "f_high", ")", ",", "n_points", ")", "track_t", "=", "numpy", ".", "array", "(", "[", "findchirp_chirptime", "(", "float", "(", "mass1", ")", ",", "float", "(", "mass2", ")", ",", "float", "(", "f", ")", ",", "pn_2order", ")", "for", "f", "in", "track_f", "]", ")", "elif", "approximant", "in", "[", "'SEOBNRv2'", ",", "'SEOBNRv2_ROM_DoubleSpin'", ",", "'SEOBNRv2_ROM_DoubleSpin_HI'", "]", ":", "f_high", "=", "get_final_freq", "(", "'SEOBNRv2'", ",", "mass1", ",", "mass2", ",", "spin1", ",", "spin2", ")", "track_f", "=", "numpy", ".", "logspace", "(", "numpy", ".", "log10", "(", "f_low", ")", ",", "numpy", ".", "log10", "(", "f_high", ")", ",", "n_points", ")", "# use HI function as it has wider freq range validity", "track_t", "=", "numpy", ".", "array", "(", "[", "lalsimulation", ".", "SimIMRSEOBNRv2ROMDoubleSpinHITimeOfFrequency", "(", "f", ",", "solar_mass_to_kg", "(", "mass1", ")", ",", "solar_mass_to_kg", "(", "mass2", ")", ",", "float", "(", "spin1", ")", ",", "float", "(", "spin2", ")", ")", "for", "f", "in", "track_f", "]", ")", "elif", "approximant", "in", "[", "'SEOBNRv4'", ",", "'SEOBNRv4_ROM'", "]", ":", "f_high", "=", "get_final_freq", "(", "'SEOBNRv4'", ",", "mass1", ",", "mass2", ",", "spin1", ",", "spin2", ")", "# use frequency below final freq in case of rounding error", "track_f", "=", "numpy", ".", "logspace", "(", "numpy", ".", "log10", "(", "f_low", ")", ",", "numpy", ".", "log10", "(", "0.999", "*", "f_high", ")", ",", "n_points", ")", "track_t", "=", "numpy", ".", "array", "(", "[", "lalsimulation", ".", "SimIMRSEOBNRv4ROMTimeOfFrequency", "(", "f", ",", "solar_mass_to_kg", "(", "mass1", ")", ",", "solar_mass_to_kg", "(", "mass2", ")", ",", "float", "(", "spin1", ")", ",", "float", "(", "spin2", ")", ")", "for", "f", "in", "track_f", "]", ")", "else", ":", "raise", "ValueError", "(", "'Approximant '", "+", "approximant", "+", "' not supported'", ")", "return", "(", "tc", "-", "track_t", ",", "track_f", ")" ]
Compute the time-frequency evolution of an inspiral signal. Return a tuple of time and frequency vectors tracking the evolution of an inspiral signal in the time-frequency plane.
[ "Compute", "the", "time", "-", "frequency", "evolution", "of", "an", "inspiral", "signal", "." ]
python
train
pyusb/pyusb
usb/util.py
https://github.com/pyusb/pyusb/blob/ffe6faf42c6ad273880b0b464b9bbf44c1d4b2e9/usb/util.py#L151-L179
def find_descriptor(desc, find_all=False, custom_match=None, **args): r"""Find an inner descriptor. find_descriptor works in the same way as the core.find() function does, but it acts on general descriptor objects. For example, suppose you have a Device object called dev and want a Configuration of this object with its bConfigurationValue equals to 1, the code would be like so: >>> cfg = util.find_descriptor(dev, bConfigurationValue=1) You can use any field of the Descriptor as a match criteria, and you can supply a customized match just like core.find() does. The find_descriptor function also accepts the find_all parameter to get an iterator instead of just one descriptor. """ def desc_iter(**kwargs): for d in desc: tests = (val == getattr(d, key) for key, val in kwargs.items()) if _interop._all(tests) and (custom_match is None or custom_match(d)): yield d if find_all: return desc_iter(**args) else: try: return _interop._next(desc_iter(**args)) except StopIteration: return None
[ "def", "find_descriptor", "(", "desc", ",", "find_all", "=", "False", ",", "custom_match", "=", "None", ",", "*", "*", "args", ")", ":", "def", "desc_iter", "(", "*", "*", "kwargs", ")", ":", "for", "d", "in", "desc", ":", "tests", "=", "(", "val", "==", "getattr", "(", "d", ",", "key", ")", "for", "key", ",", "val", "in", "kwargs", ".", "items", "(", ")", ")", "if", "_interop", ".", "_all", "(", "tests", ")", "and", "(", "custom_match", "is", "None", "or", "custom_match", "(", "d", ")", ")", ":", "yield", "d", "if", "find_all", ":", "return", "desc_iter", "(", "*", "*", "args", ")", "else", ":", "try", ":", "return", "_interop", ".", "_next", "(", "desc_iter", "(", "*", "*", "args", ")", ")", "except", "StopIteration", ":", "return", "None" ]
r"""Find an inner descriptor. find_descriptor works in the same way as the core.find() function does, but it acts on general descriptor objects. For example, suppose you have a Device object called dev and want a Configuration of this object with its bConfigurationValue equals to 1, the code would be like so: >>> cfg = util.find_descriptor(dev, bConfigurationValue=1) You can use any field of the Descriptor as a match criteria, and you can supply a customized match just like core.find() does. The find_descriptor function also accepts the find_all parameter to get an iterator instead of just one descriptor.
[ "r", "Find", "an", "inner", "descriptor", "." ]
python
train
RudolfCardinal/pythonlib
cardinal_pythonlib/stringfunc.py
https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/stringfunc.py#L86-L103
def replace_in_list(stringlist: Iterable[str], replacedict: Dict[str, str]) -> List[str]: """ Returns a list produced by applying :func:`multiple_replace` to every string in ``stringlist``. Args: stringlist: list of source strings replacedict: dictionary mapping "original" to "replacement" strings Returns: list of final strings """ newlist = [] for fromstring in stringlist: newlist.append(multiple_replace(fromstring, replacedict)) return newlist
[ "def", "replace_in_list", "(", "stringlist", ":", "Iterable", "[", "str", "]", ",", "replacedict", ":", "Dict", "[", "str", ",", "str", "]", ")", "->", "List", "[", "str", "]", ":", "newlist", "=", "[", "]", "for", "fromstring", "in", "stringlist", ":", "newlist", ".", "append", "(", "multiple_replace", "(", "fromstring", ",", "replacedict", ")", ")", "return", "newlist" ]
Returns a list produced by applying :func:`multiple_replace` to every string in ``stringlist``. Args: stringlist: list of source strings replacedict: dictionary mapping "original" to "replacement" strings Returns: list of final strings
[ "Returns", "a", "list", "produced", "by", "applying", ":", "func", ":", "multiple_replace", "to", "every", "string", "in", "stringlist", "." ]
python
train
saltstack/salt
salt/modules/etcd_mod.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/etcd_mod.py#L120-L171
def update(fields, path='', profile=None, **kwargs): ''' .. versionadded:: 2016.3.0 Sets a dictionary of values in one call. Useful for large updates in syndic environments. The dictionary can contain a mix of formats such as: .. code-block:: python { '/some/example/key': 'bar', '/another/example/key': 'baz' } Or it may be a straight dictionary, which will be flattened to look like the above format: .. code-block:: python { 'some': { 'example': { 'key': 'bar' } }, 'another': { 'example': { 'key': 'baz' } } } You can even mix the two formats and it will be flattened to the first format. Leading and trailing '/' will be removed. Empty directories can be created by setting the value of the key to an empty dictionary. The 'path' parameter will optionally set the root of the path to use. CLI Example: .. code-block:: bash salt myminion etcd.update "{'/path/to/key': 'baz', '/another/key': 'bar'}" salt myminion etcd.update "{'/path/to/key': 'baz', '/another/key': 'bar'}" profile=my_etcd_config salt myminion etcd.update "{'/path/to/key': 'baz', '/another/key': 'bar'}" host=127.0.0.1 port=2379 salt myminion etcd.update "{'/path/to/key': 'baz', '/another/key': 'bar'}" path='/some/root' ''' client = __utils__['etcd_util.get_conn'](__opts__, profile, **kwargs) return client.update(fields, path)
[ "def", "update", "(", "fields", ",", "path", "=", "''", ",", "profile", "=", "None", ",", "*", "*", "kwargs", ")", ":", "client", "=", "__utils__", "[", "'etcd_util.get_conn'", "]", "(", "__opts__", ",", "profile", ",", "*", "*", "kwargs", ")", "return", "client", ".", "update", "(", "fields", ",", "path", ")" ]
.. versionadded:: 2016.3.0 Sets a dictionary of values in one call. Useful for large updates in syndic environments. The dictionary can contain a mix of formats such as: .. code-block:: python { '/some/example/key': 'bar', '/another/example/key': 'baz' } Or it may be a straight dictionary, which will be flattened to look like the above format: .. code-block:: python { 'some': { 'example': { 'key': 'bar' } }, 'another': { 'example': { 'key': 'baz' } } } You can even mix the two formats and it will be flattened to the first format. Leading and trailing '/' will be removed. Empty directories can be created by setting the value of the key to an empty dictionary. The 'path' parameter will optionally set the root of the path to use. CLI Example: .. code-block:: bash salt myminion etcd.update "{'/path/to/key': 'baz', '/another/key': 'bar'}" salt myminion etcd.update "{'/path/to/key': 'baz', '/another/key': 'bar'}" profile=my_etcd_config salt myminion etcd.update "{'/path/to/key': 'baz', '/another/key': 'bar'}" host=127.0.0.1 port=2379 salt myminion etcd.update "{'/path/to/key': 'baz', '/another/key': 'bar'}" path='/some/root'
[ "..", "versionadded", "::", "2016", ".", "3", ".", "0" ]
python
train
osfclient/osfclient
osfclient/cli.py
https://github.com/osfclient/osfclient/blob/44b9a87e8c1ae6b63cdecd27a924af3fc2bf94cf/osfclient/cli.py#L140-L175
def clone(args): """Copy all files from all storages of a project. The output directory defaults to the current directory. If the project is private you need to specify a username. If args.update is True, overwrite any existing local files only if local and remote files differ. """ osf = _setup_osf(args) project = osf.project(args.project) output_dir = args.project if args.output is not None: output_dir = args.output with tqdm(unit='files') as pbar: for store in project.storages: prefix = os.path.join(output_dir, store.name) for file_ in store.files: path = file_.path if path.startswith('/'): path = path[1:] path = os.path.join(prefix, path) if os.path.exists(path) and args.update: if checksum(path) == file_.hashes.get('md5'): continue directory, _ = os.path.split(path) makedirs(directory, exist_ok=True) with open(path, "wb") as f: file_.write_to(f) pbar.update()
[ "def", "clone", "(", "args", ")", ":", "osf", "=", "_setup_osf", "(", "args", ")", "project", "=", "osf", ".", "project", "(", "args", ".", "project", ")", "output_dir", "=", "args", ".", "project", "if", "args", ".", "output", "is", "not", "None", ":", "output_dir", "=", "args", ".", "output", "with", "tqdm", "(", "unit", "=", "'files'", ")", "as", "pbar", ":", "for", "store", "in", "project", ".", "storages", ":", "prefix", "=", "os", ".", "path", ".", "join", "(", "output_dir", ",", "store", ".", "name", ")", "for", "file_", "in", "store", ".", "files", ":", "path", "=", "file_", ".", "path", "if", "path", ".", "startswith", "(", "'/'", ")", ":", "path", "=", "path", "[", "1", ":", "]", "path", "=", "os", ".", "path", ".", "join", "(", "prefix", ",", "path", ")", "if", "os", ".", "path", ".", "exists", "(", "path", ")", "and", "args", ".", "update", ":", "if", "checksum", "(", "path", ")", "==", "file_", ".", "hashes", ".", "get", "(", "'md5'", ")", ":", "continue", "directory", ",", "_", "=", "os", ".", "path", ".", "split", "(", "path", ")", "makedirs", "(", "directory", ",", "exist_ok", "=", "True", ")", "with", "open", "(", "path", ",", "\"wb\"", ")", "as", "f", ":", "file_", ".", "write_to", "(", "f", ")", "pbar", ".", "update", "(", ")" ]
Copy all files from all storages of a project. The output directory defaults to the current directory. If the project is private you need to specify a username. If args.update is True, overwrite any existing local files only if local and remote files differ.
[ "Copy", "all", "files", "from", "all", "storages", "of", "a", "project", "." ]
python
valid
codenerix/django-codenerix
codenerix/djng/angular_base.py
https://github.com/codenerix/django-codenerix/blob/1f5527b352141caaee902b37b2648791a06bd57d/codenerix/djng/angular_base.py#L166-L184
def as_widget(self, widget=None, attrs=None, only_initial=False): """ Renders the field. """ attrs = attrs or {} attrs.update(self.form.get_widget_attrs(self)) if hasattr(self.field, 'widget_css_classes'): css_classes = self.field.widget_css_classes else: css_classes = getattr(self.form, 'widget_css_classes', None) if css_classes: attrs.update({'class': css_classes}) widget_classes = self.form.fields[self.name].widget.attrs.get('class', None) if widget_classes: if attrs.get('class', None): attrs['class'] += ' ' + widget_classes else: attrs.update({'class': widget_classes}) return super(NgBoundField, self).as_widget(widget, attrs, only_initial)
[ "def", "as_widget", "(", "self", ",", "widget", "=", "None", ",", "attrs", "=", "None", ",", "only_initial", "=", "False", ")", ":", "attrs", "=", "attrs", "or", "{", "}", "attrs", ".", "update", "(", "self", ".", "form", ".", "get_widget_attrs", "(", "self", ")", ")", "if", "hasattr", "(", "self", ".", "field", ",", "'widget_css_classes'", ")", ":", "css_classes", "=", "self", ".", "field", ".", "widget_css_classes", "else", ":", "css_classes", "=", "getattr", "(", "self", ".", "form", ",", "'widget_css_classes'", ",", "None", ")", "if", "css_classes", ":", "attrs", ".", "update", "(", "{", "'class'", ":", "css_classes", "}", ")", "widget_classes", "=", "self", ".", "form", ".", "fields", "[", "self", ".", "name", "]", ".", "widget", ".", "attrs", ".", "get", "(", "'class'", ",", "None", ")", "if", "widget_classes", ":", "if", "attrs", ".", "get", "(", "'class'", ",", "None", ")", ":", "attrs", "[", "'class'", "]", "+=", "' '", "+", "widget_classes", "else", ":", "attrs", ".", "update", "(", "{", "'class'", ":", "widget_classes", "}", ")", "return", "super", "(", "NgBoundField", ",", "self", ")", ".", "as_widget", "(", "widget", ",", "attrs", ",", "only_initial", ")" ]
Renders the field.
[ "Renders", "the", "field", "." ]
python
train
apache/incubator-mxnet
python/mxnet/gluon/block.py
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/gluon/block.py#L485-L502
def initialize(self, init=initializer.Uniform(), ctx=None, verbose=False, force_reinit=False): """Initializes :py:class:`Parameter` s of this :py:class:`Block` and its children. Equivalent to ``block.collect_params().initialize(...)`` Parameters ---------- init : Initializer Global default Initializer to be used when :py:meth:`Parameter.init` is ``None``. Otherwise, :py:meth:`Parameter.init` takes precedence. ctx : Context or list of Context Keeps a copy of Parameters on one or many context(s). verbose : bool, default False Whether to verbosely print out details on initialization. force_reinit : bool, default False Whether to force re-initialization if parameter is already initialized. """ self.collect_params().initialize(init, ctx, verbose, force_reinit)
[ "def", "initialize", "(", "self", ",", "init", "=", "initializer", ".", "Uniform", "(", ")", ",", "ctx", "=", "None", ",", "verbose", "=", "False", ",", "force_reinit", "=", "False", ")", ":", "self", ".", "collect_params", "(", ")", ".", "initialize", "(", "init", ",", "ctx", ",", "verbose", ",", "force_reinit", ")" ]
Initializes :py:class:`Parameter` s of this :py:class:`Block` and its children. Equivalent to ``block.collect_params().initialize(...)`` Parameters ---------- init : Initializer Global default Initializer to be used when :py:meth:`Parameter.init` is ``None``. Otherwise, :py:meth:`Parameter.init` takes precedence. ctx : Context or list of Context Keeps a copy of Parameters on one or many context(s). verbose : bool, default False Whether to verbosely print out details on initialization. force_reinit : bool, default False Whether to force re-initialization if parameter is already initialized.
[ "Initializes", ":", "py", ":", "class", ":", "Parameter", "s", "of", "this", ":", "py", ":", "class", ":", "Block", "and", "its", "children", ".", "Equivalent", "to", "block", ".", "collect_params", "()", ".", "initialize", "(", "...", ")" ]
python
train
Workiva/furious
furious/extras/appengine/ndb_persistence.py
https://github.com/Workiva/furious/blob/c29823ec8b98549e7439d7273aa064d1e5830632/furious/extras/appengine/ndb_persistence.py#L187-L210
def _check_markers(task_ids, offset=10): """Returns a flag for markers being found for the task_ids. If all task ids have markers True will be returned. Otherwise it will return False as soon as a None result is hit. """ shuffle(task_ids) has_errors = False for index in xrange(0, len(task_ids), offset): keys = [ndb.Key(FuriousAsyncMarker, id) for id in task_ids[index:index + offset]] markers = ndb.get_multi(keys) if not all(markers): logging.debug("Not all Async's complete") return False, None # Did any of the aync's fail? Check the success property on the # AsyncResult. has_errors = not all((marker.success for marker in markers)) return True, has_errors
[ "def", "_check_markers", "(", "task_ids", ",", "offset", "=", "10", ")", ":", "shuffle", "(", "task_ids", ")", "has_errors", "=", "False", "for", "index", "in", "xrange", "(", "0", ",", "len", "(", "task_ids", ")", ",", "offset", ")", ":", "keys", "=", "[", "ndb", ".", "Key", "(", "FuriousAsyncMarker", ",", "id", ")", "for", "id", "in", "task_ids", "[", "index", ":", "index", "+", "offset", "]", "]", "markers", "=", "ndb", ".", "get_multi", "(", "keys", ")", "if", "not", "all", "(", "markers", ")", ":", "logging", ".", "debug", "(", "\"Not all Async's complete\"", ")", "return", "False", ",", "None", "# Did any of the aync's fail? Check the success property on the", "# AsyncResult.", "has_errors", "=", "not", "all", "(", "(", "marker", ".", "success", "for", "marker", "in", "markers", ")", ")", "return", "True", ",", "has_errors" ]
Returns a flag for markers being found for the task_ids. If all task ids have markers True will be returned. Otherwise it will return False as soon as a None result is hit.
[ "Returns", "a", "flag", "for", "markers", "being", "found", "for", "the", "task_ids", ".", "If", "all", "task", "ids", "have", "markers", "True", "will", "be", "returned", ".", "Otherwise", "it", "will", "return", "False", "as", "soon", "as", "a", "None", "result", "is", "hit", "." ]
python
train
hannes-brt/hebel
hebel/utils/serial.py
https://github.com/hannes-brt/hebel/blob/1e2c3a9309c2646103901b26a55be4e312dd5005/hebel/utils/serial.py#L248-L265
def get_pickle_protocol(): """ Allow configuration of the pickle protocol on a per-machine basis. This way, if you use multiple platforms with different versions of pickle, you can configure each of them to use the highest protocol supported by all of the machines that you want to be able to communicate. """ try: protocol_str = os.environ['PYLEARN2_PICKLE_PROTOCOL'] except KeyError: # If not defined, we default to 0 because this is the default # protocol used by cPickle.dump (and because it results in # maximum portability) protocol_str = '0' if protocol_str == 'pickle.HIGHEST_PROTOCOL': return pickle.HIGHEST_PROTOCOL return int(protocol_str)
[ "def", "get_pickle_protocol", "(", ")", ":", "try", ":", "protocol_str", "=", "os", ".", "environ", "[", "'PYLEARN2_PICKLE_PROTOCOL'", "]", "except", "KeyError", ":", "# If not defined, we default to 0 because this is the default", "# protocol used by cPickle.dump (and because it results in", "# maximum portability)", "protocol_str", "=", "'0'", "if", "protocol_str", "==", "'pickle.HIGHEST_PROTOCOL'", ":", "return", "pickle", ".", "HIGHEST_PROTOCOL", "return", "int", "(", "protocol_str", ")" ]
Allow configuration of the pickle protocol on a per-machine basis. This way, if you use multiple platforms with different versions of pickle, you can configure each of them to use the highest protocol supported by all of the machines that you want to be able to communicate.
[ "Allow", "configuration", "of", "the", "pickle", "protocol", "on", "a", "per", "-", "machine", "basis", ".", "This", "way", "if", "you", "use", "multiple", "platforms", "with", "different", "versions", "of", "pickle", "you", "can", "configure", "each", "of", "them", "to", "use", "the", "highest", "protocol", "supported", "by", "all", "of", "the", "machines", "that", "you", "want", "to", "be", "able", "to", "communicate", "." ]
python
train
mcs07/PubChemPy
pubchempy.py
https://github.com/mcs07/PubChemPy/blob/e3c4f4a9b6120433e5cc3383464c7a79e9b2b86e/pubchempy.py#L757-L767
def cid(self): """The PubChem Compound Identifier (CID). .. note:: When searching using a SMILES or InChI query that is not present in the PubChem Compound database, an automatically generated record may be returned that contains properties that have been calculated on the fly. These records will not have a CID property. """ if 'id' in self.record and 'id' in self.record['id'] and 'cid' in self.record['id']['id']: return self.record['id']['id']['cid']
[ "def", "cid", "(", "self", ")", ":", "if", "'id'", "in", "self", ".", "record", "and", "'id'", "in", "self", ".", "record", "[", "'id'", "]", "and", "'cid'", "in", "self", ".", "record", "[", "'id'", "]", "[", "'id'", "]", ":", "return", "self", ".", "record", "[", "'id'", "]", "[", "'id'", "]", "[", "'cid'", "]" ]
The PubChem Compound Identifier (CID). .. note:: When searching using a SMILES or InChI query that is not present in the PubChem Compound database, an automatically generated record may be returned that contains properties that have been calculated on the fly. These records will not have a CID property.
[ "The", "PubChem", "Compound", "Identifier", "(", "CID", ")", "." ]
python
train
piotr-rusin/spam-lists
spam_lists/structures.py
https://github.com/piotr-rusin/spam-lists/blob/fd616e8761b28f3eaa503fee5e45f7748e8f88f2/spam_lists/structures.py#L84-L94
def is_subdomain(self, other): """Test if the object is a subdomain of the other. :param other: the object to which we compare this instance :returns: True if this instance is a subdomain of the other """ compared = other.value if hasattr(other, 'value') else other try: return self.value.is_subdomain(compared) except AttributeError: return False
[ "def", "is_subdomain", "(", "self", ",", "other", ")", ":", "compared", "=", "other", ".", "value", "if", "hasattr", "(", "other", ",", "'value'", ")", "else", "other", "try", ":", "return", "self", ".", "value", ".", "is_subdomain", "(", "compared", ")", "except", "AttributeError", ":", "return", "False" ]
Test if the object is a subdomain of the other. :param other: the object to which we compare this instance :returns: True if this instance is a subdomain of the other
[ "Test", "if", "the", "object", "is", "a", "subdomain", "of", "the", "other", "." ]
python
train
LogicalDash/LiSE
ELiDE/ELiDE/board/board.py
https://github.com/LogicalDash/LiSE/blob/fe6fd4f0a7c1780e065f4c9babb9bc443af6bb84/ELiDE/ELiDE/board/board.py#L223-L265
def portal_touch_up(self, touch): """Try to create a portal between the spots the user chose.""" try: # If the touch ended upon a spot, and there isn't # already a portal between the origin and this # destination, create one. destspot = next(self.spots_at(*touch.pos)) orig = self.origspot.proxy dest = destspot.proxy if not( orig.name in self.character.portal and dest.name in self.character.portal[orig.name] ): port = self.character.new_portal( orig.name, dest.name ) self.arrowlayout.add_widget( self.make_arrow(port) ) # And another in the opposite direction if needed if ( hasattr(self, 'protoportal2') and not( orig.name in self.character.preportal and dest.name in self.character.preportal[orig.name] ) ): deport = self.character.new_portal( dest.name, orig.name ) self.arrowlayout.add_widget( self.make_arrow(deport) ) except StopIteration: pass self.remove_widget(self.protoportal) if hasattr(self, 'protoportal2'): self.remove_widget(self.protoportal2) del self.protoportal2 self.remove_widget(self.protodest) del self.protoportal del self.protodest
[ "def", "portal_touch_up", "(", "self", ",", "touch", ")", ":", "try", ":", "# If the touch ended upon a spot, and there isn't", "# already a portal between the origin and this", "# destination, create one.", "destspot", "=", "next", "(", "self", ".", "spots_at", "(", "*", "touch", ".", "pos", ")", ")", "orig", "=", "self", ".", "origspot", ".", "proxy", "dest", "=", "destspot", ".", "proxy", "if", "not", "(", "orig", ".", "name", "in", "self", ".", "character", ".", "portal", "and", "dest", ".", "name", "in", "self", ".", "character", ".", "portal", "[", "orig", ".", "name", "]", ")", ":", "port", "=", "self", ".", "character", ".", "new_portal", "(", "orig", ".", "name", ",", "dest", ".", "name", ")", "self", ".", "arrowlayout", ".", "add_widget", "(", "self", ".", "make_arrow", "(", "port", ")", ")", "# And another in the opposite direction if needed", "if", "(", "hasattr", "(", "self", ",", "'protoportal2'", ")", "and", "not", "(", "orig", ".", "name", "in", "self", ".", "character", ".", "preportal", "and", "dest", ".", "name", "in", "self", ".", "character", ".", "preportal", "[", "orig", ".", "name", "]", ")", ")", ":", "deport", "=", "self", ".", "character", ".", "new_portal", "(", "dest", ".", "name", ",", "orig", ".", "name", ")", "self", ".", "arrowlayout", ".", "add_widget", "(", "self", ".", "make_arrow", "(", "deport", ")", ")", "except", "StopIteration", ":", "pass", "self", ".", "remove_widget", "(", "self", ".", "protoportal", ")", "if", "hasattr", "(", "self", ",", "'protoportal2'", ")", ":", "self", ".", "remove_widget", "(", "self", ".", "protoportal2", ")", "del", "self", ".", "protoportal2", "self", ".", "remove_widget", "(", "self", ".", "protodest", ")", "del", "self", ".", "protoportal", "del", "self", ".", "protodest" ]
Try to create a portal between the spots the user chose.
[ "Try", "to", "create", "a", "portal", "between", "the", "spots", "the", "user", "chose", "." ]
python
train
ska-sa/katcp-python
katcp/resource_client.py
https://github.com/ska-sa/katcp-python/blob/9127c826a1d030c53b84d0e95743e20e5c5ea153/katcp/resource_client.py#L1717-L1740
def monitor_resource_sync_state(resource, callback, exit_event=None): """Coroutine that monitors a KATCPResource's sync state. Calls callback(True/False) whenever the resource becomes synced or unsynced. Will always do an initial callback(False) call. Exits without calling callback() if exit_event is set """ exit_event = exit_event or AsyncEvent() callback(False) # Initial condition, assume resource is not connected while not exit_event.is_set(): # Wait for resource to be synced yield until_any(resource.until_synced(), exit_event.until_set()) if exit_event.is_set(): break # If exit event is set we stop without calling callback else: callback(True) # Wait for resource to be un-synced yield until_any(resource.until_not_synced(), exit_event.until_set()) if exit_event.is_set(): break # If exit event is set we stop without calling callback else: callback(False)
[ "def", "monitor_resource_sync_state", "(", "resource", ",", "callback", ",", "exit_event", "=", "None", ")", ":", "exit_event", "=", "exit_event", "or", "AsyncEvent", "(", ")", "callback", "(", "False", ")", "# Initial condition, assume resource is not connected", "while", "not", "exit_event", ".", "is_set", "(", ")", ":", "# Wait for resource to be synced", "yield", "until_any", "(", "resource", ".", "until_synced", "(", ")", ",", "exit_event", ".", "until_set", "(", ")", ")", "if", "exit_event", ".", "is_set", "(", ")", ":", "break", "# If exit event is set we stop without calling callback", "else", ":", "callback", "(", "True", ")", "# Wait for resource to be un-synced", "yield", "until_any", "(", "resource", ".", "until_not_synced", "(", ")", ",", "exit_event", ".", "until_set", "(", ")", ")", "if", "exit_event", ".", "is_set", "(", ")", ":", "break", "# If exit event is set we stop without calling callback", "else", ":", "callback", "(", "False", ")" ]
Coroutine that monitors a KATCPResource's sync state. Calls callback(True/False) whenever the resource becomes synced or unsynced. Will always do an initial callback(False) call. Exits without calling callback() if exit_event is set
[ "Coroutine", "that", "monitors", "a", "KATCPResource", "s", "sync", "state", "." ]
python
train
ARMmbed/yotta
yotta/lib/component.py
https://github.com/ARMmbed/yotta/blob/56bc1e56c602fa20307b23fe27518e9cd6c11af1/yotta/lib/component.py#L691-L708
def getTarget(self, target_name_and_version, additional_config=None): ''' Return a derived target object representing the selected target: if the target is not installed, or is invalid then the returned object will test false in a boolean context. Returns derived_target Errors are not displayed. ''' derived_target, errors = self.satisfyTarget( target_name_and_version, additional_config = additional_config, install_missing = False ) if len(errors): return None else: return derived_target
[ "def", "getTarget", "(", "self", ",", "target_name_and_version", ",", "additional_config", "=", "None", ")", ":", "derived_target", ",", "errors", "=", "self", ".", "satisfyTarget", "(", "target_name_and_version", ",", "additional_config", "=", "additional_config", ",", "install_missing", "=", "False", ")", "if", "len", "(", "errors", ")", ":", "return", "None", "else", ":", "return", "derived_target" ]
Return a derived target object representing the selected target: if the target is not installed, or is invalid then the returned object will test false in a boolean context. Returns derived_target Errors are not displayed.
[ "Return", "a", "derived", "target", "object", "representing", "the", "selected", "target", ":", "if", "the", "target", "is", "not", "installed", "or", "is", "invalid", "then", "the", "returned", "object", "will", "test", "false", "in", "a", "boolean", "context", "." ]
python
valid
portfors-lab/sparkle
sparkle/acq/players.py
https://github.com/portfors-lab/sparkle/blob/5fad1cf2bec58ec6b15d91da20f6236a74826110/sparkle/acq/players.py#L183-L187
def start_timer(self, reprate): """Start the digital output task that serves as the acquistion trigger""" print 'starting digital output at rate {} Hz'.format(reprate) self.trigger_task = DigitalOutTask(self.trigger_src, reprate) self.trigger_task.start()
[ "def", "start_timer", "(", "self", ",", "reprate", ")", ":", "print", "'starting digital output at rate {} Hz'", ".", "format", "(", "reprate", ")", "self", ".", "trigger_task", "=", "DigitalOutTask", "(", "self", ".", "trigger_src", ",", "reprate", ")", "self", ".", "trigger_task", ".", "start", "(", ")" ]
Start the digital output task that serves as the acquistion trigger
[ "Start", "the", "digital", "output", "task", "that", "serves", "as", "the", "acquistion", "trigger" ]
python
train
halcy/Mastodon.py
mastodon/Mastodon.py
https://github.com/halcy/Mastodon.py/blob/35c43562dd3d34d6ebf7a0f757c09e8fcccc957c/mastodon/Mastodon.py#L2155-L2168
def media_update(self, id, description=None, focus=None): """ Update the metadata of the media file with the given `id`. `description` and `focus` are as in `media_post()`_ . Returns the updated `media dict`_. """ id = self.__unpack_id(id) if focus != None: focus = str(focus[0]) + "," + str(focus[1]) params = self.__generate_params(locals(), ['id']) return self.__api_request('PUT', '/api/v1/media/{0}'.format(str(id)), params)
[ "def", "media_update", "(", "self", ",", "id", ",", "description", "=", "None", ",", "focus", "=", "None", ")", ":", "id", "=", "self", ".", "__unpack_id", "(", "id", ")", "if", "focus", "!=", "None", ":", "focus", "=", "str", "(", "focus", "[", "0", "]", ")", "+", "\",\"", "+", "str", "(", "focus", "[", "1", "]", ")", "params", "=", "self", ".", "__generate_params", "(", "locals", "(", ")", ",", "[", "'id'", "]", ")", "return", "self", ".", "__api_request", "(", "'PUT'", ",", "'/api/v1/media/{0}'", ".", "format", "(", "str", "(", "id", ")", ")", ",", "params", ")" ]
Update the metadata of the media file with the given `id`. `description` and `focus` are as in `media_post()`_ . Returns the updated `media dict`_.
[ "Update", "the", "metadata", "of", "the", "media", "file", "with", "the", "given", "id", ".", "description", "and", "focus", "are", "as", "in", "media_post", "()", "_", ".", "Returns", "the", "updated", "media", "dict", "_", "." ]
python
train
awslabs/aws-sam-cli
samcli/commands/local/lib/sam_api_provider.py
https://github.com/awslabs/aws-sam-cli/blob/c05af5e7378c6f05f7d82ad3f0bca17204177db6/samcli/commands/local/lib/sam_api_provider.py#L273-L298
def _convert_event_api(lambda_logical_id, event_properties): """ Converts a AWS::Serverless::Function's Event Property to an Api configuration usable by the provider. :param str lambda_logical_id: Logical Id of the AWS::Serverless::Function :param dict event_properties: Dictionary of the Event's Property :return tuple: tuple of API resource name and Api namedTuple """ path = event_properties.get(SamApiProvider._EVENT_PATH) method = event_properties.get(SamApiProvider._EVENT_METHOD) # An API Event, can have RestApiId property which designates the resource that owns this API. If omitted, # the API is owned by Implicit API resource. This could either be a direct resource logical ID or a # "Ref" of the logicalID api_resource_id = event_properties.get("RestApiId", SamApiProvider._IMPLICIT_API_RESOURCE_ID) if isinstance(api_resource_id, dict) and "Ref" in api_resource_id: api_resource_id = api_resource_id["Ref"] # This is still a dictionary. Something wrong with the template if isinstance(api_resource_id, dict): LOG.debug("Invalid RestApiId property of event %s", event_properties) raise InvalidSamDocumentException("RestApiId property of resource with logicalId '{}' is invalid. " "It should either be a LogicalId string or a Ref of a Logical Id string" .format(lambda_logical_id)) return api_resource_id, Api(path=path, method=method, function_name=lambda_logical_id)
[ "def", "_convert_event_api", "(", "lambda_logical_id", ",", "event_properties", ")", ":", "path", "=", "event_properties", ".", "get", "(", "SamApiProvider", ".", "_EVENT_PATH", ")", "method", "=", "event_properties", ".", "get", "(", "SamApiProvider", ".", "_EVENT_METHOD", ")", "# An API Event, can have RestApiId property which designates the resource that owns this API. If omitted,", "# the API is owned by Implicit API resource. This could either be a direct resource logical ID or a", "# \"Ref\" of the logicalID", "api_resource_id", "=", "event_properties", ".", "get", "(", "\"RestApiId\"", ",", "SamApiProvider", ".", "_IMPLICIT_API_RESOURCE_ID", ")", "if", "isinstance", "(", "api_resource_id", ",", "dict", ")", "and", "\"Ref\"", "in", "api_resource_id", ":", "api_resource_id", "=", "api_resource_id", "[", "\"Ref\"", "]", "# This is still a dictionary. Something wrong with the template", "if", "isinstance", "(", "api_resource_id", ",", "dict", ")", ":", "LOG", ".", "debug", "(", "\"Invalid RestApiId property of event %s\"", ",", "event_properties", ")", "raise", "InvalidSamDocumentException", "(", "\"RestApiId property of resource with logicalId '{}' is invalid. \"", "\"It should either be a LogicalId string or a Ref of a Logical Id string\"", ".", "format", "(", "lambda_logical_id", ")", ")", "return", "api_resource_id", ",", "Api", "(", "path", "=", "path", ",", "method", "=", "method", ",", "function_name", "=", "lambda_logical_id", ")" ]
Converts a AWS::Serverless::Function's Event Property to an Api configuration usable by the provider. :param str lambda_logical_id: Logical Id of the AWS::Serverless::Function :param dict event_properties: Dictionary of the Event's Property :return tuple: tuple of API resource name and Api namedTuple
[ "Converts", "a", "AWS", "::", "Serverless", "::", "Function", "s", "Event", "Property", "to", "an", "Api", "configuration", "usable", "by", "the", "provider", "." ]
python
train
christophertbrown/bioscripts
ctbBio/rax.py
https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/rax.py#L192-L203
def fix_tree(tree, a_id_lookup, out): """ get the names for sequences in the raxml tree """ if check(out) is False and check(tree) is True: tree = open(tree).read() for line in open(a_id_lookup): id, name, header = line.strip().split('\t') tree = tree.replace(id+':', name+':') out_f = open(out, 'w') print(tree.strip(), file=out_f) return out
[ "def", "fix_tree", "(", "tree", ",", "a_id_lookup", ",", "out", ")", ":", "if", "check", "(", "out", ")", "is", "False", "and", "check", "(", "tree", ")", "is", "True", ":", "tree", "=", "open", "(", "tree", ")", ".", "read", "(", ")", "for", "line", "in", "open", "(", "a_id_lookup", ")", ":", "id", ",", "name", ",", "header", "=", "line", ".", "strip", "(", ")", ".", "split", "(", "'\\t'", ")", "tree", "=", "tree", ".", "replace", "(", "id", "+", "':'", ",", "name", "+", "':'", ")", "out_f", "=", "open", "(", "out", ",", "'w'", ")", "print", "(", "tree", ".", "strip", "(", ")", ",", "file", "=", "out_f", ")", "return", "out" ]
get the names for sequences in the raxml tree
[ "get", "the", "names", "for", "sequences", "in", "the", "raxml", "tree" ]
python
train
Scoppio/RagnarokEngine3
RagnarokEngine3/RE3.py
https://github.com/Scoppio/RagnarokEngine3/blob/4395d419ccd64fe9327c41f200b72ee0176ad896/RagnarokEngine3/RE3.py#L2434-L2447
def query_state(self, StateType): """ Is a button depressed? True if a button is pressed, false otherwise. """ if StateType == M_LEFT: # Checking left mouse button return self.left_pressed elif StateType == M_MIDDLE: # Checking middle mouse button return self.middle_pressed elif StateType == M_RIGHT: # Checking right mouse button return self.right_pressed
[ "def", "query_state", "(", "self", ",", "StateType", ")", ":", "if", "StateType", "==", "M_LEFT", ":", "# Checking left mouse button", "return", "self", ".", "left_pressed", "elif", "StateType", "==", "M_MIDDLE", ":", "# Checking middle mouse button", "return", "self", ".", "middle_pressed", "elif", "StateType", "==", "M_RIGHT", ":", "# Checking right mouse button", "return", "self", ".", "right_pressed" ]
Is a button depressed? True if a button is pressed, false otherwise.
[ "Is", "a", "button", "depressed?", "True", "if", "a", "button", "is", "pressed", "false", "otherwise", "." ]
python
train
Clinical-Genomics/scout
scout/server/blueprints/dashboard/controllers.py
https://github.com/Clinical-Genomics/scout/blob/90a551e2e1653a319e654c2405c2866f93d0ebb9/scout/server/blueprints/dashboard/controllers.py#L243-L282
def get_case_groups(adapter, total_cases, institute_id=None, slice_query=None): """Return the information about case groups Args: store(adapter.MongoAdapter) total_cases(int): Total number of cases slice_query(str): Query to filter cases to obtain statistics for. Returns: cases(dict): """ # Create a group with all cases in the database cases = [{'status': 'all', 'count': total_cases, 'percent': 1}] # Group the cases based on their status pipeline = [] group = {'$group' : {'_id': '$status', 'count': {'$sum': 1}}} subquery = {} if institute_id and slice_query: subquery = adapter.cases(owner=institute_id, name_query=slice_query, yield_query=True) elif institute_id: subquery = adapter.cases(owner=institute_id, yield_query=True) elif slice_query: subquery = adapter.cases(name_query=slice_query, yield_query=True) query = {'$match': subquery} if subquery else {} if query: pipeline.append(query) pipeline.append(group) res = adapter.case_collection.aggregate(pipeline) for status_group in res: cases.append({'status': status_group['_id'], 'count': status_group['count'], 'percent': status_group['count'] / total_cases}) return cases
[ "def", "get_case_groups", "(", "adapter", ",", "total_cases", ",", "institute_id", "=", "None", ",", "slice_query", "=", "None", ")", ":", "# Create a group with all cases in the database", "cases", "=", "[", "{", "'status'", ":", "'all'", ",", "'count'", ":", "total_cases", ",", "'percent'", ":", "1", "}", "]", "# Group the cases based on their status", "pipeline", "=", "[", "]", "group", "=", "{", "'$group'", ":", "{", "'_id'", ":", "'$status'", ",", "'count'", ":", "{", "'$sum'", ":", "1", "}", "}", "}", "subquery", "=", "{", "}", "if", "institute_id", "and", "slice_query", ":", "subquery", "=", "adapter", ".", "cases", "(", "owner", "=", "institute_id", ",", "name_query", "=", "slice_query", ",", "yield_query", "=", "True", ")", "elif", "institute_id", ":", "subquery", "=", "adapter", ".", "cases", "(", "owner", "=", "institute_id", ",", "yield_query", "=", "True", ")", "elif", "slice_query", ":", "subquery", "=", "adapter", ".", "cases", "(", "name_query", "=", "slice_query", ",", "yield_query", "=", "True", ")", "query", "=", "{", "'$match'", ":", "subquery", "}", "if", "subquery", "else", "{", "}", "if", "query", ":", "pipeline", ".", "append", "(", "query", ")", "pipeline", ".", "append", "(", "group", ")", "res", "=", "adapter", ".", "case_collection", ".", "aggregate", "(", "pipeline", ")", "for", "status_group", "in", "res", ":", "cases", ".", "append", "(", "{", "'status'", ":", "status_group", "[", "'_id'", "]", ",", "'count'", ":", "status_group", "[", "'count'", "]", ",", "'percent'", ":", "status_group", "[", "'count'", "]", "/", "total_cases", "}", ")", "return", "cases" ]
Return the information about case groups Args: store(adapter.MongoAdapter) total_cases(int): Total number of cases slice_query(str): Query to filter cases to obtain statistics for. Returns: cases(dict):
[ "Return", "the", "information", "about", "case", "groups" ]
python
test
gwpy/gwpy
gwpy/plot/bode.py
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/plot/bode.py#L228-L269
def add_frequencyseries(self, spectrum, dB=True, power=False, **kwargs): """Plot the magnitude and phase of a complex-valued `FrequencySeries` Parameters ---------- spectrum : `~gwpy.frequencyseries.FrequencySeries` the (complex-valued) `FrequencySeries` to display db : `bool`, optional, default: `True` if `True`, display magnitude in decibels, otherwise display amplitude. power : `bool`, optional, default: `False` give `True` to incidate that ``spectrum`` holds power values, so ``dB = 10 * log(abs(spectrum))``, otherwise ``db = 20 * log(abs(spectrum))``. This argument is ignored if ``db=False``. **kwargs any other keyword arguments accepted by :meth:`~matplotlib.axes.Axes.plot` Returns ------- mag, phase : `tuple` of `lines <matplotlib.lines.Line2D>` the lines drawn for the magnitude and phase of the filter. """ # parse spectrum arguments kwargs.setdefault('label', spectrum.name) # get magnitude mag = numpy.absolute(spectrum.value) if dB: mag = to_db(mag) if not power: mag *= 2. # get phase phase = numpy.angle(spectrum.value, deg=True) # plot w = spectrum.frequencies.value mline = self.maxes.plot(w, mag, **kwargs)[0] pline = self.paxes.plot(w, phase, **kwargs)[0] return mline, pline
[ "def", "add_frequencyseries", "(", "self", ",", "spectrum", ",", "dB", "=", "True", ",", "power", "=", "False", ",", "*", "*", "kwargs", ")", ":", "# parse spectrum arguments", "kwargs", ".", "setdefault", "(", "'label'", ",", "spectrum", ".", "name", ")", "# get magnitude", "mag", "=", "numpy", ".", "absolute", "(", "spectrum", ".", "value", ")", "if", "dB", ":", "mag", "=", "to_db", "(", "mag", ")", "if", "not", "power", ":", "mag", "*=", "2.", "# get phase", "phase", "=", "numpy", ".", "angle", "(", "spectrum", ".", "value", ",", "deg", "=", "True", ")", "# plot", "w", "=", "spectrum", ".", "frequencies", ".", "value", "mline", "=", "self", ".", "maxes", ".", "plot", "(", "w", ",", "mag", ",", "*", "*", "kwargs", ")", "[", "0", "]", "pline", "=", "self", ".", "paxes", ".", "plot", "(", "w", ",", "phase", ",", "*", "*", "kwargs", ")", "[", "0", "]", "return", "mline", ",", "pline" ]
Plot the magnitude and phase of a complex-valued `FrequencySeries` Parameters ---------- spectrum : `~gwpy.frequencyseries.FrequencySeries` the (complex-valued) `FrequencySeries` to display db : `bool`, optional, default: `True` if `True`, display magnitude in decibels, otherwise display amplitude. power : `bool`, optional, default: `False` give `True` to incidate that ``spectrum`` holds power values, so ``dB = 10 * log(abs(spectrum))``, otherwise ``db = 20 * log(abs(spectrum))``. This argument is ignored if ``db=False``. **kwargs any other keyword arguments accepted by :meth:`~matplotlib.axes.Axes.plot` Returns ------- mag, phase : `tuple` of `lines <matplotlib.lines.Line2D>` the lines drawn for the magnitude and phase of the filter.
[ "Plot", "the", "magnitude", "and", "phase", "of", "a", "complex", "-", "valued", "FrequencySeries" ]
python
train
LISE-B26/pylabcontrol
build/lib/pylabcontrol/gui/windows_and_widgets/main_window.py
https://github.com/LISE-B26/pylabcontrol/blob/67482e5157fcd1c40705e5c2cacfb93564703ed0/build/lib/pylabcontrol/gui/windows_and_widgets/main_window.py#L1331-L1361
def _hide_parameters(self, file_name): """ hide the parameters that had been hidden Args: file_name: config file that has the information about which parameters are hidden """ try: in_data = load_b26_file(file_name) except: in_data = {} def set_item_visible(item, is_visible): if isinstance(is_visible, dict): for child_id in range(item.childCount()): child = item.child(child_id) if child.name in is_visible: set_item_visible(child, is_visible[child.name]) else: item.visible = is_visible if "scripts_hidden_parameters" in in_data: # consistency check if len(list(in_data["scripts_hidden_parameters"].keys())) == self.tree_scripts.topLevelItemCount(): for index in range(self.tree_scripts.topLevelItemCount()): item = self.tree_scripts.topLevelItem(index) # if item.name in in_data["scripts_hidden_parameters"]: set_item_visible(item, in_data["scripts_hidden_parameters"][item.name]) else: print('WARNING: settings for hiding parameters does\'t seem to match other settings')
[ "def", "_hide_parameters", "(", "self", ",", "file_name", ")", ":", "try", ":", "in_data", "=", "load_b26_file", "(", "file_name", ")", "except", ":", "in_data", "=", "{", "}", "def", "set_item_visible", "(", "item", ",", "is_visible", ")", ":", "if", "isinstance", "(", "is_visible", ",", "dict", ")", ":", "for", "child_id", "in", "range", "(", "item", ".", "childCount", "(", ")", ")", ":", "child", "=", "item", ".", "child", "(", "child_id", ")", "if", "child", ".", "name", "in", "is_visible", ":", "set_item_visible", "(", "child", ",", "is_visible", "[", "child", ".", "name", "]", ")", "else", ":", "item", ".", "visible", "=", "is_visible", "if", "\"scripts_hidden_parameters\"", "in", "in_data", ":", "# consistency check", "if", "len", "(", "list", "(", "in_data", "[", "\"scripts_hidden_parameters\"", "]", ".", "keys", "(", ")", ")", ")", "==", "self", ".", "tree_scripts", ".", "topLevelItemCount", "(", ")", ":", "for", "index", "in", "range", "(", "self", ".", "tree_scripts", ".", "topLevelItemCount", "(", ")", ")", ":", "item", "=", "self", ".", "tree_scripts", ".", "topLevelItem", "(", "index", ")", "# if item.name in in_data[\"scripts_hidden_parameters\"]:", "set_item_visible", "(", "item", ",", "in_data", "[", "\"scripts_hidden_parameters\"", "]", "[", "item", ".", "name", "]", ")", "else", ":", "print", "(", "'WARNING: settings for hiding parameters does\\'t seem to match other settings'", ")" ]
hide the parameters that had been hidden Args: file_name: config file that has the information about which parameters are hidden
[ "hide", "the", "parameters", "that", "had", "been", "hidden", "Args", ":", "file_name", ":", "config", "file", "that", "has", "the", "information", "about", "which", "parameters", "are", "hidden" ]
python
train
phoebe-project/phoebe2
phoebe/parameters/parameters.py
https://github.com/phoebe-project/phoebe2/blob/e64b8be683977064e2d55dd1b3ac400f64c3e379/phoebe/parameters/parameters.py#L1078-L1087
def to_list_of_dicts(self, **kwargs): """ Convert the :class:`ParameterSet` to a list of the dictionary representation of each :class:`Parameter` :return: list of dicts """ if kwargs: return self.filter(**kwargs).to_list_of_dicts() return [param.to_dict() for param in self._params]
[ "def", "to_list_of_dicts", "(", "self", ",", "*", "*", "kwargs", ")", ":", "if", "kwargs", ":", "return", "self", ".", "filter", "(", "*", "*", "kwargs", ")", ".", "to_list_of_dicts", "(", ")", "return", "[", "param", ".", "to_dict", "(", ")", "for", "param", "in", "self", ".", "_params", "]" ]
Convert the :class:`ParameterSet` to a list of the dictionary representation of each :class:`Parameter` :return: list of dicts
[ "Convert", "the", ":", "class", ":", "ParameterSet", "to", "a", "list", "of", "the", "dictionary", "representation", "of", "each", ":", "class", ":", "Parameter" ]
python
train
log2timeline/plaso
plaso/parsers/sqlite.py
https://github.com/log2timeline/plaso/blob/9c564698d2da3ffbe23607a3c54c0582ea18a6cc/plaso/parsers/sqlite.py#L168-L198
def Close(self): """Closes the database connection and cleans up the temporary file.""" self.schema = {} if self._is_open: self._database.close() self._database = None if os.path.exists(self._temp_db_file_path): try: os.remove(self._temp_db_file_path) except (OSError, IOError) as exception: logger.warning(( 'Unable to remove temporary copy: {0:s} of SQLite database: ' '{1:s} with error: {2!s}').format( self._temp_db_file_path, self._filename, exception)) self._temp_db_file_path = '' if os.path.exists(self._temp_wal_file_path): try: os.remove(self._temp_wal_file_path) except (OSError, IOError) as exception: logger.warning(( 'Unable to remove temporary copy: {0:s} of SQLite database: ' '{1:s} with error: {2!s}').format( self._temp_wal_file_path, self._filename, exception)) self._temp_wal_file_path = '' self._is_open = False
[ "def", "Close", "(", "self", ")", ":", "self", ".", "schema", "=", "{", "}", "if", "self", ".", "_is_open", ":", "self", ".", "_database", ".", "close", "(", ")", "self", ".", "_database", "=", "None", "if", "os", ".", "path", ".", "exists", "(", "self", ".", "_temp_db_file_path", ")", ":", "try", ":", "os", ".", "remove", "(", "self", ".", "_temp_db_file_path", ")", "except", "(", "OSError", ",", "IOError", ")", "as", "exception", ":", "logger", ".", "warning", "(", "(", "'Unable to remove temporary copy: {0:s} of SQLite database: '", "'{1:s} with error: {2!s}'", ")", ".", "format", "(", "self", ".", "_temp_db_file_path", ",", "self", ".", "_filename", ",", "exception", ")", ")", "self", ".", "_temp_db_file_path", "=", "''", "if", "os", ".", "path", ".", "exists", "(", "self", ".", "_temp_wal_file_path", ")", ":", "try", ":", "os", ".", "remove", "(", "self", ".", "_temp_wal_file_path", ")", "except", "(", "OSError", ",", "IOError", ")", "as", "exception", ":", "logger", ".", "warning", "(", "(", "'Unable to remove temporary copy: {0:s} of SQLite database: '", "'{1:s} with error: {2!s}'", ")", ".", "format", "(", "self", ".", "_temp_wal_file_path", ",", "self", ".", "_filename", ",", "exception", ")", ")", "self", ".", "_temp_wal_file_path", "=", "''", "self", ".", "_is_open", "=", "False" ]
Closes the database connection and cleans up the temporary file.
[ "Closes", "the", "database", "connection", "and", "cleans", "up", "the", "temporary", "file", "." ]
python
train
ze-phyr-us/django-libretto
django_libretto/decorators.py
https://github.com/ze-phyr-us/django-libretto/blob/b19d8aa21b9579ee91e81967a44d1c40f5588b17/django_libretto/decorators.py#L5-L19
def view_decorator(function_decorator): """Convert a function based decorator into a class based decorator usable on class based Views. Can't subclass the `View` as it breaks inheritance (super in particular), so we monkey-patch instead. Based on http://stackoverflow.com/a/8429311 """ def simple_decorator(View): View.dispatch = method_decorator(function_decorator)(View.dispatch) return View return simple_decorator
[ "def", "view_decorator", "(", "function_decorator", ")", ":", "def", "simple_decorator", "(", "View", ")", ":", "View", ".", "dispatch", "=", "method_decorator", "(", "function_decorator", ")", "(", "View", ".", "dispatch", ")", "return", "View", "return", "simple_decorator" ]
Convert a function based decorator into a class based decorator usable on class based Views. Can't subclass the `View` as it breaks inheritance (super in particular), so we monkey-patch instead. Based on http://stackoverflow.com/a/8429311
[ "Convert", "a", "function", "based", "decorator", "into", "a", "class", "based", "decorator", "usable", "on", "class", "based", "Views", "." ]
python
test
iotile/coretools
iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/Tool/tex.py
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/Tool/tex.py#L617-L664
def ScanFiles(theFile, target, paths, file_tests, file_tests_search, env, graphics_extensions, targetdir, aux_files): """ For theFile (a Node) update any file_tests and search for graphics files then find all included files and call ScanFiles recursively for each of them""" content = theFile.get_text_contents() if Verbose: print(" scanning ",str(theFile)) for i in range(len(file_tests_search)): if file_tests[i][0] is None: if Verbose: print("scan i ",i," files_tests[i] ",file_tests[i], file_tests[i][1]) file_tests[i][0] = file_tests_search[i].search(content) if Verbose and file_tests[i][0]: print(" found match for ",file_tests[i][1][-1]) # for newglossary insert the suffixes in file_tests[i] if file_tests[i][0] and file_tests[i][1][-1] == 'newglossary': findresult = file_tests_search[i].findall(content) for l in range(len(findresult)) : (file_tests[i][1]).insert(0,'.'+findresult[l][3]) (file_tests[i][1]).insert(0,'.'+findresult[l][2]) (file_tests[i][1]).insert(0,'.'+findresult[l][0]) suffix_list = ['.'+findresult[l][0],'.'+findresult[l][2],'.'+findresult[l][3] ] newglossary_suffix.append(suffix_list) if Verbose: print(" new suffixes for newglossary ",newglossary_suffix) incResult = includeOnly_re.search(content) if incResult: aux_files.append(os.path.join(targetdir, incResult.group(1))) if Verbose: print("\include file names : ", aux_files) # recursively call this on each of the included files inc_files = [ ] inc_files.extend( include_re.findall(content) ) if Verbose: print("files included by '%s': "%str(theFile),inc_files) # inc_files is list of file names as given. need to find them # using TEXINPUTS paths. for src in inc_files: srcNode = FindFile(src,['.tex','.ltx','.latex'],paths,env,requireExt=False) if srcNode is not None: file_tests = ScanFiles(srcNode, target, paths, file_tests, file_tests_search, env, graphics_extensions, targetdir, aux_files) if Verbose: print(" done scanning ",str(theFile)) return file_tests
[ "def", "ScanFiles", "(", "theFile", ",", "target", ",", "paths", ",", "file_tests", ",", "file_tests_search", ",", "env", ",", "graphics_extensions", ",", "targetdir", ",", "aux_files", ")", ":", "content", "=", "theFile", ".", "get_text_contents", "(", ")", "if", "Verbose", ":", "print", "(", "\" scanning \"", ",", "str", "(", "theFile", ")", ")", "for", "i", "in", "range", "(", "len", "(", "file_tests_search", ")", ")", ":", "if", "file_tests", "[", "i", "]", "[", "0", "]", "is", "None", ":", "if", "Verbose", ":", "print", "(", "\"scan i \"", ",", "i", ",", "\" files_tests[i] \"", ",", "file_tests", "[", "i", "]", ",", "file_tests", "[", "i", "]", "[", "1", "]", ")", "file_tests", "[", "i", "]", "[", "0", "]", "=", "file_tests_search", "[", "i", "]", ".", "search", "(", "content", ")", "if", "Verbose", "and", "file_tests", "[", "i", "]", "[", "0", "]", ":", "print", "(", "\" found match for \"", ",", "file_tests", "[", "i", "]", "[", "1", "]", "[", "-", "1", "]", ")", "# for newglossary insert the suffixes in file_tests[i]", "if", "file_tests", "[", "i", "]", "[", "0", "]", "and", "file_tests", "[", "i", "]", "[", "1", "]", "[", "-", "1", "]", "==", "'newglossary'", ":", "findresult", "=", "file_tests_search", "[", "i", "]", ".", "findall", "(", "content", ")", "for", "l", "in", "range", "(", "len", "(", "findresult", ")", ")", ":", "(", "file_tests", "[", "i", "]", "[", "1", "]", ")", ".", "insert", "(", "0", ",", "'.'", "+", "findresult", "[", "l", "]", "[", "3", "]", ")", "(", "file_tests", "[", "i", "]", "[", "1", "]", ")", ".", "insert", "(", "0", ",", "'.'", "+", "findresult", "[", "l", "]", "[", "2", "]", ")", "(", "file_tests", "[", "i", "]", "[", "1", "]", ")", ".", "insert", "(", "0", ",", "'.'", "+", "findresult", "[", "l", "]", "[", "0", "]", ")", "suffix_list", "=", "[", "'.'", "+", "findresult", "[", "l", "]", "[", "0", "]", ",", "'.'", "+", "findresult", "[", "l", "]", "[", "2", "]", ",", "'.'", "+", "findresult", "[", "l", "]", "[", "3", "]", "]", "newglossary_suffix", ".", "append", "(", "suffix_list", ")", "if", "Verbose", ":", "print", "(", "\" new suffixes for newglossary \"", ",", "newglossary_suffix", ")", "incResult", "=", "includeOnly_re", ".", "search", "(", "content", ")", "if", "incResult", ":", "aux_files", ".", "append", "(", "os", ".", "path", ".", "join", "(", "targetdir", ",", "incResult", ".", "group", "(", "1", ")", ")", ")", "if", "Verbose", ":", "print", "(", "\"\\include file names : \"", ",", "aux_files", ")", "# recursively call this on each of the included files", "inc_files", "=", "[", "]", "inc_files", ".", "extend", "(", "include_re", ".", "findall", "(", "content", ")", ")", "if", "Verbose", ":", "print", "(", "\"files included by '%s': \"", "%", "str", "(", "theFile", ")", ",", "inc_files", ")", "# inc_files is list of file names as given. need to find them", "# using TEXINPUTS paths.", "for", "src", "in", "inc_files", ":", "srcNode", "=", "FindFile", "(", "src", ",", "[", "'.tex'", ",", "'.ltx'", ",", "'.latex'", "]", ",", "paths", ",", "env", ",", "requireExt", "=", "False", ")", "if", "srcNode", "is", "not", "None", ":", "file_tests", "=", "ScanFiles", "(", "srcNode", ",", "target", ",", "paths", ",", "file_tests", ",", "file_tests_search", ",", "env", ",", "graphics_extensions", ",", "targetdir", ",", "aux_files", ")", "if", "Verbose", ":", "print", "(", "\" done scanning \"", ",", "str", "(", "theFile", ")", ")", "return", "file_tests" ]
For theFile (a Node) update any file_tests and search for graphics files then find all included files and call ScanFiles recursively for each of them
[ "For", "theFile", "(", "a", "Node", ")", "update", "any", "file_tests", "and", "search", "for", "graphics", "files", "then", "find", "all", "included", "files", "and", "call", "ScanFiles", "recursively", "for", "each", "of", "them" ]
python
train
scopus-api/scopus
scopus/deprecated_/scopus_api.py
https://github.com/scopus-api/scopus/blob/27ce02dd3095bfdab9d3e8475543d7c17767d1ab/scopus/deprecated_/scopus_api.py#L283-L308
def get_corresponding_author_info(self): """Try to get corresponding author information. Returns (scopus-id, name, email). """ resp = requests.get(self.scopus_url) from lxml import html parsed_doc = html.fromstring(resp.content) for div in parsed_doc.body.xpath('.//div'): for a in div.xpath('a'): if '/cdn-cgi/l/email-protection' not in a.get('href', ''): continue encoded_text = a.attrib['href'].replace('/cdn-cgi/l/email-protection#', '') key = int(encoded_text[0:2], 16) email = ''.join([chr(int('0x{}'.format(x), 16) ^ key) for x in map(''.join, zip(*[iter(encoded_text[2:])]*2))]) for aa in div.xpath('a'): if 'http://www.scopus.com/authid/detail.url' in aa.get('href', ''): scopus_url = aa.attrib['href'] name = aa.text else: scopus_url, name = None, None return (scopus_url, name, email)
[ "def", "get_corresponding_author_info", "(", "self", ")", ":", "resp", "=", "requests", ".", "get", "(", "self", ".", "scopus_url", ")", "from", "lxml", "import", "html", "parsed_doc", "=", "html", ".", "fromstring", "(", "resp", ".", "content", ")", "for", "div", "in", "parsed_doc", ".", "body", ".", "xpath", "(", "'.//div'", ")", ":", "for", "a", "in", "div", ".", "xpath", "(", "'a'", ")", ":", "if", "'/cdn-cgi/l/email-protection'", "not", "in", "a", ".", "get", "(", "'href'", ",", "''", ")", ":", "continue", "encoded_text", "=", "a", ".", "attrib", "[", "'href'", "]", ".", "replace", "(", "'/cdn-cgi/l/email-protection#'", ",", "''", ")", "key", "=", "int", "(", "encoded_text", "[", "0", ":", "2", "]", ",", "16", ")", "email", "=", "''", ".", "join", "(", "[", "chr", "(", "int", "(", "'0x{}'", ".", "format", "(", "x", ")", ",", "16", ")", "^", "key", ")", "for", "x", "in", "map", "(", "''", ".", "join", ",", "zip", "(", "*", "[", "iter", "(", "encoded_text", "[", "2", ":", "]", ")", "]", "*", "2", ")", ")", "]", ")", "for", "aa", "in", "div", ".", "xpath", "(", "'a'", ")", ":", "if", "'http://www.scopus.com/authid/detail.url'", "in", "aa", ".", "get", "(", "'href'", ",", "''", ")", ":", "scopus_url", "=", "aa", ".", "attrib", "[", "'href'", "]", "name", "=", "aa", ".", "text", "else", ":", "scopus_url", ",", "name", "=", "None", ",", "None", "return", "(", "scopus_url", ",", "name", ",", "email", ")" ]
Try to get corresponding author information. Returns (scopus-id, name, email).
[ "Try", "to", "get", "corresponding", "author", "information", "." ]
python
train
EmbodiedCognition/pagoda
pagoda/cooper.py
https://github.com/EmbodiedCognition/pagoda/blob/8892f847026d98aba8646ecbc4589397e6dec7bd/pagoda/cooper.py#L547-L596
def _step_to_marker_frame(self, frame_no, dt=None): '''Update the simulator to a specific frame of marker data. This method returns a generator of body states for the skeleton! This generator must be exhausted (e.g., by consuming this call in a for loop) for the simulator to work properly. This process involves the following steps: - Move the markers to their new location: - Detach from the skeleton - Update marker locations - Reattach to the skeleton - Detect ODE collisions - Yield the states of the bodies in the skeleton - Advance the ODE world one step Parameters ---------- frame_no : int Step to this frame of marker data. dt : float, optional Step with this time duration. Defaults to ``self.dt``. Returns ------- states : sequence of state tuples A generator of a sequence of one body state for the skeleton. This generator must be exhausted for the simulation to work properly. ''' # update the positions and velocities of the markers. self.markers.detach() self.markers.reposition(frame_no) self.markers.attach(frame_no) # detect collisions. self.ode_space.collide(None, self.on_collision) # record the state of each skeleton body. states = self.skeleton.get_body_states() self.skeleton.set_body_states(states) # yield the current simulation state to our caller. yield states # update the ode world. self.ode_world.step(dt or self.dt) # clear out contact joints to prepare for the next frame. self.ode_contactgroup.empty()
[ "def", "_step_to_marker_frame", "(", "self", ",", "frame_no", ",", "dt", "=", "None", ")", ":", "# update the positions and velocities of the markers.", "self", ".", "markers", ".", "detach", "(", ")", "self", ".", "markers", ".", "reposition", "(", "frame_no", ")", "self", ".", "markers", ".", "attach", "(", "frame_no", ")", "# detect collisions.", "self", ".", "ode_space", ".", "collide", "(", "None", ",", "self", ".", "on_collision", ")", "# record the state of each skeleton body.", "states", "=", "self", ".", "skeleton", ".", "get_body_states", "(", ")", "self", ".", "skeleton", ".", "set_body_states", "(", "states", ")", "# yield the current simulation state to our caller.", "yield", "states", "# update the ode world.", "self", ".", "ode_world", ".", "step", "(", "dt", "or", "self", ".", "dt", ")", "# clear out contact joints to prepare for the next frame.", "self", ".", "ode_contactgroup", ".", "empty", "(", ")" ]
Update the simulator to a specific frame of marker data. This method returns a generator of body states for the skeleton! This generator must be exhausted (e.g., by consuming this call in a for loop) for the simulator to work properly. This process involves the following steps: - Move the markers to their new location: - Detach from the skeleton - Update marker locations - Reattach to the skeleton - Detect ODE collisions - Yield the states of the bodies in the skeleton - Advance the ODE world one step Parameters ---------- frame_no : int Step to this frame of marker data. dt : float, optional Step with this time duration. Defaults to ``self.dt``. Returns ------- states : sequence of state tuples A generator of a sequence of one body state for the skeleton. This generator must be exhausted for the simulation to work properly.
[ "Update", "the", "simulator", "to", "a", "specific", "frame", "of", "marker", "data", "." ]
python
valid
CivicSpleen/ambry
ambry/bundle/concurrent.py
https://github.com/CivicSpleen/ambry/blob/d7f2be4bf1f7ffd086f3fadd4fcae60c32473e42/ambry/bundle/concurrent.py#L166-L172
def unify_mp(b, partition_name): """Unify all of the segment partitions for a parent partition, then run stats on the MPR file""" with b.progress.start('coalesce_mp',0,message="MP coalesce {}".format(partition_name)) as ps: r = b.unify_partition(partition_name, None, ps) return r
[ "def", "unify_mp", "(", "b", ",", "partition_name", ")", ":", "with", "b", ".", "progress", ".", "start", "(", "'coalesce_mp'", ",", "0", ",", "message", "=", "\"MP coalesce {}\"", ".", "format", "(", "partition_name", ")", ")", "as", "ps", ":", "r", "=", "b", ".", "unify_partition", "(", "partition_name", ",", "None", ",", "ps", ")", "return", "r" ]
Unify all of the segment partitions for a parent partition, then run stats on the MPR file
[ "Unify", "all", "of", "the", "segment", "partitions", "for", "a", "parent", "partition", "then", "run", "stats", "on", "the", "MPR", "file" ]
python
train
apple/turicreate
deps/src/boost_1_68_0/tools/build/src/build/type.py
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/build/type.py#L59-L115
def register (type, suffixes = [], base_type = None): """ Registers a target type, possibly derived from a 'base-type'. If 'suffixes' are provided, they list all the suffixes that mean a file is of 'type'. Also, the first element gives the suffix to be used when constructing and object of 'type'. type: a string suffixes: None or a sequence of strings base_type: None or a string """ # Type names cannot contain hyphens, because when used as # feature-values they will be interpreted as composite features # which need to be decomposed. if __re_hyphen.search (type): raise BaseException ('type name "%s" contains a hyphen' % type) # it's possible for a type to be registered with a # base type that hasn't been registered yet. in the # check for base_type below and the following calls to setdefault() # the key `type` will be added to __types. When the base type # actually gets registered, it would fail after the simple check # of "type in __types"; thus the check for "'base' in __types[type]" if type in __types and 'base' in __types[type]: raise BaseException ('Type "%s" is already registered.' % type) entry = __types.setdefault(type, {}) entry['base'] = base_type entry.setdefault('derived', []) entry.setdefault('scanner', None) if base_type: __types.setdefault(base_type, {}).setdefault('derived', []).append(type) if len (suffixes) > 0: # Generated targets of 'type' will use the first of 'suffixes' # (this may be overriden) set_generated_target_suffix (type, [], suffixes [0]) # Specify mapping from suffixes to type register_suffixes (suffixes, type) feature.extend('target-type', [type]) feature.extend('main-target-type', [type]) feature.extend('base-target-type', [type]) if base_type: feature.compose ('<target-type>' + type, [replace_grist (base_type, '<base-target-type>')]) feature.compose ('<base-target-type>' + type, ['<base-target-type>' + base_type]) import b2.build.generators as generators # Adding a new derived type affects generator selection so we need to # make the generator selection module update any of its cached # information related to a new derived type being defined. generators.update_cached_information_with_a_new_type(type) # FIXME: resolving recursive dependency. from b2.manager import get_manager get_manager().projects().project_rules().add_rule_for_type(type)
[ "def", "register", "(", "type", ",", "suffixes", "=", "[", "]", ",", "base_type", "=", "None", ")", ":", "# Type names cannot contain hyphens, because when used as", "# feature-values they will be interpreted as composite features", "# which need to be decomposed.", "if", "__re_hyphen", ".", "search", "(", "type", ")", ":", "raise", "BaseException", "(", "'type name \"%s\" contains a hyphen'", "%", "type", ")", "# it's possible for a type to be registered with a", "# base type that hasn't been registered yet. in the", "# check for base_type below and the following calls to setdefault()", "# the key `type` will be added to __types. When the base type", "# actually gets registered, it would fail after the simple check", "# of \"type in __types\"; thus the check for \"'base' in __types[type]\"", "if", "type", "in", "__types", "and", "'base'", "in", "__types", "[", "type", "]", ":", "raise", "BaseException", "(", "'Type \"%s\" is already registered.'", "%", "type", ")", "entry", "=", "__types", ".", "setdefault", "(", "type", ",", "{", "}", ")", "entry", "[", "'base'", "]", "=", "base_type", "entry", ".", "setdefault", "(", "'derived'", ",", "[", "]", ")", "entry", ".", "setdefault", "(", "'scanner'", ",", "None", ")", "if", "base_type", ":", "__types", ".", "setdefault", "(", "base_type", ",", "{", "}", ")", ".", "setdefault", "(", "'derived'", ",", "[", "]", ")", ".", "append", "(", "type", ")", "if", "len", "(", "suffixes", ")", ">", "0", ":", "# Generated targets of 'type' will use the first of 'suffixes'", "# (this may be overriden)", "set_generated_target_suffix", "(", "type", ",", "[", "]", ",", "suffixes", "[", "0", "]", ")", "# Specify mapping from suffixes to type", "register_suffixes", "(", "suffixes", ",", "type", ")", "feature", ".", "extend", "(", "'target-type'", ",", "[", "type", "]", ")", "feature", ".", "extend", "(", "'main-target-type'", ",", "[", "type", "]", ")", "feature", ".", "extend", "(", "'base-target-type'", ",", "[", "type", "]", ")", "if", "base_type", ":", "feature", ".", "compose", "(", "'<target-type>'", "+", "type", ",", "[", "replace_grist", "(", "base_type", ",", "'<base-target-type>'", ")", "]", ")", "feature", ".", "compose", "(", "'<base-target-type>'", "+", "type", ",", "[", "'<base-target-type>'", "+", "base_type", "]", ")", "import", "b2", ".", "build", ".", "generators", "as", "generators", "# Adding a new derived type affects generator selection so we need to", "# make the generator selection module update any of its cached", "# information related to a new derived type being defined.", "generators", ".", "update_cached_information_with_a_new_type", "(", "type", ")", "# FIXME: resolving recursive dependency.", "from", "b2", ".", "manager", "import", "get_manager", "get_manager", "(", ")", ".", "projects", "(", ")", ".", "project_rules", "(", ")", ".", "add_rule_for_type", "(", "type", ")" ]
Registers a target type, possibly derived from a 'base-type'. If 'suffixes' are provided, they list all the suffixes that mean a file is of 'type'. Also, the first element gives the suffix to be used when constructing and object of 'type'. type: a string suffixes: None or a sequence of strings base_type: None or a string
[ "Registers", "a", "target", "type", "possibly", "derived", "from", "a", "base", "-", "type", ".", "If", "suffixes", "are", "provided", "they", "list", "all", "the", "suffixes", "that", "mean", "a", "file", "is", "of", "type", ".", "Also", "the", "first", "element", "gives", "the", "suffix", "to", "be", "used", "when", "constructing", "and", "object", "of", "type", ".", "type", ":", "a", "string", "suffixes", ":", "None", "or", "a", "sequence", "of", "strings", "base_type", ":", "None", "or", "a", "string" ]
python
train
Hackerfleet/hfos
modules/camera/hfos/camera/manager.py
https://github.com/Hackerfleet/hfos/blob/b6df14eacaffb6be5c844108873ff8763ec7f0c9/modules/camera/hfos/camera/manager.py#L99-L105
def rec(self): """Records a single snapshot""" try: self._snapshot() except Exception as e: self.log("Timer error: ", e, type(e), lvl=error)
[ "def", "rec", "(", "self", ")", ":", "try", ":", "self", ".", "_snapshot", "(", ")", "except", "Exception", "as", "e", ":", "self", ".", "log", "(", "\"Timer error: \"", ",", "e", ",", "type", "(", "e", ")", ",", "lvl", "=", "error", ")" ]
Records a single snapshot
[ "Records", "a", "single", "snapshot" ]
python
train
cocaine/cocaine-tools
cocaine/tools/dispatch.py
https://github.com/cocaine/cocaine-tools/blob/d8834f8e04ca42817d5f4e368d471484d4b3419f/cocaine/tools/dispatch.py#L1369-L1380
def group_refresh(name, **kwargs): """ Refresh routing group. If the name option is empty, this command will refresh all groups. """ ctx = Context(**kwargs) ctx.execute_action('group:refresh', **{ 'locator': ctx.locator, 'storage': ctx.repo.create_secure_service('storage'), 'name': name, })
[ "def", "group_refresh", "(", "name", ",", "*", "*", "kwargs", ")", ":", "ctx", "=", "Context", "(", "*", "*", "kwargs", ")", "ctx", ".", "execute_action", "(", "'group:refresh'", ",", "*", "*", "{", "'locator'", ":", "ctx", ".", "locator", ",", "'storage'", ":", "ctx", ".", "repo", ".", "create_secure_service", "(", "'storage'", ")", ",", "'name'", ":", "name", ",", "}", ")" ]
Refresh routing group. If the name option is empty, this command will refresh all groups.
[ "Refresh", "routing", "group", "." ]
python
train
KenjiTakahashi/td
td/main.py
https://github.com/KenjiTakahashi/td/blob/7311eabc63efe6fe6600687c3026f0837454c2e4/td/main.py#L148-L334
def rock(self): """Starts and does the parsing.""" if not self.argv: self.arg.view() while(self.argv): arg = self.argv.popleft() if arg == "-h" or arg == "--help": print( """Usage: td [-h (--help)] [-v (--version)] [command]""" """, where [command] is one of:\n\n""" """v (view)\tChanges the way next output""" """ will look like. See [td v -h].\n""" """m (modify)\tApplies one time changes to""" """ the database. See [td m -h].\n""" """o (options)\tSets persistent options, applied""" """ on every next execution. See [td o -h].\n""" """a (add)\t\tAdds new item. See [td a -h].\n""" """e (edit)\tEdits existing item. See [td e -h].\n""" """r (rm)\t\tRemoves existing item. See [td r -h].\n""" """d (done)\tMarks items as done. See [td d -h].\n""" """D (undone)\tMarks items as not done. See [td D -h].\n""" """\nAdditional options:\n""" """ -h (--help)\tShows this screen.\n""" """ -v (--version)Shows version number.""" ) elif arg == "-v" or arg == "--version": print("td :: {}".format(__version__)) elif arg == "v" or arg == "view": self._part("view", self.arg.view, { "--no-color": ("nocolor", False), "-s": ("sort", True), "--sort": ("sort", True), "-p": ("purge", False), "--purge": ("purge", False), "-d": ("done", True), "--done": ("done", True), "-D": ("undone", True), "--undone": ("undone", True) }, """Usage: td v [-h (--help)] [command(s)]""" """, where [command(s)] are any of:\n\n""" """-s (--sort) <pattern>\tSorts the output using""" """ <pattern>.\n""" """-p (--purge)\t\tHides items marked as done.\n""" """-d (--done) <pattern>\tDisplays items matching""" """ <pattern> as done.\n""" """-D (--undone) <pattern>\tDisplays items matching""" """ <pattern> as not done.\n""" """--no-color\t\tDo not add color codes to the output.\n""" """\nAdditional options:\n""" """ -h (--help)\t\tShows this screen.""" ) elif arg == "m" or arg == "modify": self._part("modify", self.arg.modify, { "-s": ("sort", True), "--sort": ("sort", True), "-p": ("purge", False), "--purge": ("purge", False), "-d": ("done", True), "--done": ("done", True), "-D": ("undone", True), "--undone": ("undone", True) }, """Usage: td m [-h (--help)] [command(s)]""" """, where [command(s)] are any of:\n\n""" """-s (--sort) <pattern>\tSorts database using""" """ <pattern>.\n""" """-p (--purge)\t\tRemoves items marked as done.\n""" """-d (--done) <pattern>\tMarks items matching""" """ <pattern> as done.\n""" """-D (--undone) <pattern>\tMarks items matching""" """ <pattern> as not done.\n""" """\nAdditional options:\n""" """ -h (--help)\t\tShows this screen.""" ) elif arg == "a" or arg == "add": args = dict() if self.argv and self.arg.model.exists(self.argv[0]): args["parent"] = self.argv.popleft() self._part("add", self.arg.add, { "-n": ("name", True), "--name": ("name", True), "-p": ("priority", True), "--priority": ("priority", True), "-c": ("comment", True), "--comment": ("comment", True) }, """Usage: td a [-h (--help)] [parent] [command(s)]""" """, where [command(s)] are any of:\n\n""" """-n (--name) <text>\t\tSets item's name.\n""" """-p (--priority) <no|name>\tSets item's priority.\n""" """-c (--comment) <text>\t\tSets item's comment.\n""" """\nIf [parent] index is specified, new item will""" """ become it's child.\n""" """If any of the arguments is omitted,""" """ this command will launch an interactive session""" """ letting the user supply the rest of them.\n""" """\nAdditional options:\n""" """ -h (--help)\t\t\tShows this screen.""", **args ) elif arg == "e" or arg == "edit": if not self.argv: raise NotEnoughArgumentsError("edit") args = dict() if self.argv[0] not in ["-h", "--help"]: args["index"] = self.argv.popleft() self._part("edit", self.arg.edit, { "--parent": ("parent", True), "-n": ("name", True), "--name": ("name", True), "-p": ("priority", True), "--priority": ("priority", True), "-c": ("comment", True), "--comment": ("comment", True) }, """Usage: td e [-h (--help)] <index> [command(s)]""" """, where [command(s)] are any of:\n\n""" """--parent <index>\t\tChanges item's parent.\n""" """-n (--name) <text>\t\tChanges item's name.\n""" """-p (--priority) <no|name>\tChanges item's priority.\n""" """-c (--comment) <text>\t\tChanges item's comment.\n""" """\nIndex argument is required and has to point at""" """ an existing item.\n""" """If any of the arguments is omitted, it will launch""" """ an interactive session letting the user supply the""" """ rest of them.\n""" """\nAdditions options:\n""" """ -h (--help)\t\t\tShows this screen.""", **args ) elif arg == "r" or arg == "rm": args = dict() if not self.argv: raise NotEnoughArgumentsError("rm") elif self.argv[0] not in ["-h", "--help"]: args["index"] = self.argv.popleft() self._part("rm", self.arg.rm, { }, """Usage: td r [-h (--help)] <index>\n\n""" """Index argument is required and has to point at""" """ an existing item.\n""" """\nAdditions options:\n""" """ -h (--help)\tShows this screen.""", **args ) elif arg == "d" or arg == "done": args = dict() if not self.argv: raise NotEnoughArgumentsError("done") elif self.argv[0] not in ["-h", "--help"]: args["index"] = self.argv.popleft() self._part("done", self.arg.done, { }, """Usage: td d [-h (--help)] <index>\n\n""" """Index argument is required and has to point at""" """ an existing item.\n""" """\nAdditional options:\n""" """ -h (--help)\tShows this screen.""", **args ) elif arg == "D" or arg == "undone": args = dict() if not self.argv: raise NotEnoughArgumentsError("undone") elif self.argv[0] not in ["-h", "--help"]: args["index"] = self.argv.popleft() self._part("undone", self.arg.undone, { }, """Usage: td D [-h (--help)] <index>\n\n""" """Index argument is required and has to point at""" """ an existing item.\n""" """\nAdditional options:\n""" """ -h (--help)\tShows this screen.""", **args ) elif arg == "o" or arg == "options": self._part("options", self.arg.options, { "-g": ("glob", False), "--global": ("glob", False), "-s": ("sort", True), "--sort": ("sort", True), "-p": ("purge", False), "--purge": ("purge", False), "-d": ("done", True), "--done": ("done", True), "-D": ("undone", True), "--undone": ("undone", True) }, """Usage: td o [-h (--help)] [command(s)]""" """, where [command(s)] are any of:\n\n""" """-g (--global)\t\tApply specified options to all""" """ ToDo lists (store in ~/.tdrc).\n""" """-s (--sort) <pattern>\tAlways sorts using""" """ <pattern>.\n""" """-p (--purge)\t\tAlways removes items marked""" """as done.\n""" """-d (--done) <pattern>\tAlways marks items maching""" """ <pattern> as done.\n""" """-D (--undone) <pattern>\tAlways marks items maching""" """ <pattern> as not done.\n""" """\nAdditional options:\n""" """ -h (--help)\t\tShows this screen.""" ) else: raise UnrecognizedCommandError("td", arg)
[ "def", "rock", "(", "self", ")", ":", "if", "not", "self", ".", "argv", ":", "self", ".", "arg", ".", "view", "(", ")", "while", "(", "self", ".", "argv", ")", ":", "arg", "=", "self", ".", "argv", ".", "popleft", "(", ")", "if", "arg", "==", "\"-h\"", "or", "arg", "==", "\"--help\"", ":", "print", "(", "\"\"\"Usage: td [-h (--help)] [-v (--version)] [command]\"\"\"", "\"\"\", where [command] is one of:\\n\\n\"\"\"", "\"\"\"v (view)\\tChanges the way next output\"\"\"", "\"\"\" will look like. See [td v -h].\\n\"\"\"", "\"\"\"m (modify)\\tApplies one time changes to\"\"\"", "\"\"\" the database. See [td m -h].\\n\"\"\"", "\"\"\"o (options)\\tSets persistent options, applied\"\"\"", "\"\"\" on every next execution. See [td o -h].\\n\"\"\"", "\"\"\"a (add)\\t\\tAdds new item. See [td a -h].\\n\"\"\"", "\"\"\"e (edit)\\tEdits existing item. See [td e -h].\\n\"\"\"", "\"\"\"r (rm)\\t\\tRemoves existing item. See [td r -h].\\n\"\"\"", "\"\"\"d (done)\\tMarks items as done. See [td d -h].\\n\"\"\"", "\"\"\"D (undone)\\tMarks items as not done. See [td D -h].\\n\"\"\"", "\"\"\"\\nAdditional options:\\n\"\"\"", "\"\"\" -h (--help)\\tShows this screen.\\n\"\"\"", "\"\"\" -v (--version)Shows version number.\"\"\"", ")", "elif", "arg", "==", "\"-v\"", "or", "arg", "==", "\"--version\"", ":", "print", "(", "\"td :: {}\"", ".", "format", "(", "__version__", ")", ")", "elif", "arg", "==", "\"v\"", "or", "arg", "==", "\"view\"", ":", "self", ".", "_part", "(", "\"view\"", ",", "self", ".", "arg", ".", "view", ",", "{", "\"--no-color\"", ":", "(", "\"nocolor\"", ",", "False", ")", ",", "\"-s\"", ":", "(", "\"sort\"", ",", "True", ")", ",", "\"--sort\"", ":", "(", "\"sort\"", ",", "True", ")", ",", "\"-p\"", ":", "(", "\"purge\"", ",", "False", ")", ",", "\"--purge\"", ":", "(", "\"purge\"", ",", "False", ")", ",", "\"-d\"", ":", "(", "\"done\"", ",", "True", ")", ",", "\"--done\"", ":", "(", "\"done\"", ",", "True", ")", ",", "\"-D\"", ":", "(", "\"undone\"", ",", "True", ")", ",", "\"--undone\"", ":", "(", "\"undone\"", ",", "True", ")", "}", ",", "\"\"\"Usage: td v [-h (--help)] [command(s)]\"\"\"", "\"\"\", where [command(s)] are any of:\\n\\n\"\"\"", "\"\"\"-s (--sort) <pattern>\\tSorts the output using\"\"\"", "\"\"\" <pattern>.\\n\"\"\"", "\"\"\"-p (--purge)\\t\\tHides items marked as done.\\n\"\"\"", "\"\"\"-d (--done) <pattern>\\tDisplays items matching\"\"\"", "\"\"\" <pattern> as done.\\n\"\"\"", "\"\"\"-D (--undone) <pattern>\\tDisplays items matching\"\"\"", "\"\"\" <pattern> as not done.\\n\"\"\"", "\"\"\"--no-color\\t\\tDo not add color codes to the output.\\n\"\"\"", "\"\"\"\\nAdditional options:\\n\"\"\"", "\"\"\" -h (--help)\\t\\tShows this screen.\"\"\"", ")", "elif", "arg", "==", "\"m\"", "or", "arg", "==", "\"modify\"", ":", "self", ".", "_part", "(", "\"modify\"", ",", "self", ".", "arg", ".", "modify", ",", "{", "\"-s\"", ":", "(", "\"sort\"", ",", "True", ")", ",", "\"--sort\"", ":", "(", "\"sort\"", ",", "True", ")", ",", "\"-p\"", ":", "(", "\"purge\"", ",", "False", ")", ",", "\"--purge\"", ":", "(", "\"purge\"", ",", "False", ")", ",", "\"-d\"", ":", "(", "\"done\"", ",", "True", ")", ",", "\"--done\"", ":", "(", "\"done\"", ",", "True", ")", ",", "\"-D\"", ":", "(", "\"undone\"", ",", "True", ")", ",", "\"--undone\"", ":", "(", "\"undone\"", ",", "True", ")", "}", ",", "\"\"\"Usage: td m [-h (--help)] [command(s)]\"\"\"", "\"\"\", where [command(s)] are any of:\\n\\n\"\"\"", "\"\"\"-s (--sort) <pattern>\\tSorts database using\"\"\"", "\"\"\" <pattern>.\\n\"\"\"", "\"\"\"-p (--purge)\\t\\tRemoves items marked as done.\\n\"\"\"", "\"\"\"-d (--done) <pattern>\\tMarks items matching\"\"\"", "\"\"\" <pattern> as done.\\n\"\"\"", "\"\"\"-D (--undone) <pattern>\\tMarks items matching\"\"\"", "\"\"\" <pattern> as not done.\\n\"\"\"", "\"\"\"\\nAdditional options:\\n\"\"\"", "\"\"\" -h (--help)\\t\\tShows this screen.\"\"\"", ")", "elif", "arg", "==", "\"a\"", "or", "arg", "==", "\"add\"", ":", "args", "=", "dict", "(", ")", "if", "self", ".", "argv", "and", "self", ".", "arg", ".", "model", ".", "exists", "(", "self", ".", "argv", "[", "0", "]", ")", ":", "args", "[", "\"parent\"", "]", "=", "self", ".", "argv", ".", "popleft", "(", ")", "self", ".", "_part", "(", "\"add\"", ",", "self", ".", "arg", ".", "add", ",", "{", "\"-n\"", ":", "(", "\"name\"", ",", "True", ")", ",", "\"--name\"", ":", "(", "\"name\"", ",", "True", ")", ",", "\"-p\"", ":", "(", "\"priority\"", ",", "True", ")", ",", "\"--priority\"", ":", "(", "\"priority\"", ",", "True", ")", ",", "\"-c\"", ":", "(", "\"comment\"", ",", "True", ")", ",", "\"--comment\"", ":", "(", "\"comment\"", ",", "True", ")", "}", ",", "\"\"\"Usage: td a [-h (--help)] [parent] [command(s)]\"\"\"", "\"\"\", where [command(s)] are any of:\\n\\n\"\"\"", "\"\"\"-n (--name) <text>\\t\\tSets item's name.\\n\"\"\"", "\"\"\"-p (--priority) <no|name>\\tSets item's priority.\\n\"\"\"", "\"\"\"-c (--comment) <text>\\t\\tSets item's comment.\\n\"\"\"", "\"\"\"\\nIf [parent] index is specified, new item will\"\"\"", "\"\"\" become it's child.\\n\"\"\"", "\"\"\"If any of the arguments is omitted,\"\"\"", "\"\"\" this command will launch an interactive session\"\"\"", "\"\"\" letting the user supply the rest of them.\\n\"\"\"", "\"\"\"\\nAdditional options:\\n\"\"\"", "\"\"\" -h (--help)\\t\\t\\tShows this screen.\"\"\"", ",", "*", "*", "args", ")", "elif", "arg", "==", "\"e\"", "or", "arg", "==", "\"edit\"", ":", "if", "not", "self", ".", "argv", ":", "raise", "NotEnoughArgumentsError", "(", "\"edit\"", ")", "args", "=", "dict", "(", ")", "if", "self", ".", "argv", "[", "0", "]", "not", "in", "[", "\"-h\"", ",", "\"--help\"", "]", ":", "args", "[", "\"index\"", "]", "=", "self", ".", "argv", ".", "popleft", "(", ")", "self", ".", "_part", "(", "\"edit\"", ",", "self", ".", "arg", ".", "edit", ",", "{", "\"--parent\"", ":", "(", "\"parent\"", ",", "True", ")", ",", "\"-n\"", ":", "(", "\"name\"", ",", "True", ")", ",", "\"--name\"", ":", "(", "\"name\"", ",", "True", ")", ",", "\"-p\"", ":", "(", "\"priority\"", ",", "True", ")", ",", "\"--priority\"", ":", "(", "\"priority\"", ",", "True", ")", ",", "\"-c\"", ":", "(", "\"comment\"", ",", "True", ")", ",", "\"--comment\"", ":", "(", "\"comment\"", ",", "True", ")", "}", ",", "\"\"\"Usage: td e [-h (--help)] <index> [command(s)]\"\"\"", "\"\"\", where [command(s)] are any of:\\n\\n\"\"\"", "\"\"\"--parent <index>\\t\\tChanges item's parent.\\n\"\"\"", "\"\"\"-n (--name) <text>\\t\\tChanges item's name.\\n\"\"\"", "\"\"\"-p (--priority) <no|name>\\tChanges item's priority.\\n\"\"\"", "\"\"\"-c (--comment) <text>\\t\\tChanges item's comment.\\n\"\"\"", "\"\"\"\\nIndex argument is required and has to point at\"\"\"", "\"\"\" an existing item.\\n\"\"\"", "\"\"\"If any of the arguments is omitted, it will launch\"\"\"", "\"\"\" an interactive session letting the user supply the\"\"\"", "\"\"\" rest of them.\\n\"\"\"", "\"\"\"\\nAdditions options:\\n\"\"\"", "\"\"\" -h (--help)\\t\\t\\tShows this screen.\"\"\"", ",", "*", "*", "args", ")", "elif", "arg", "==", "\"r\"", "or", "arg", "==", "\"rm\"", ":", "args", "=", "dict", "(", ")", "if", "not", "self", ".", "argv", ":", "raise", "NotEnoughArgumentsError", "(", "\"rm\"", ")", "elif", "self", ".", "argv", "[", "0", "]", "not", "in", "[", "\"-h\"", ",", "\"--help\"", "]", ":", "args", "[", "\"index\"", "]", "=", "self", ".", "argv", ".", "popleft", "(", ")", "self", ".", "_part", "(", "\"rm\"", ",", "self", ".", "arg", ".", "rm", ",", "{", "}", ",", "\"\"\"Usage: td r [-h (--help)] <index>\\n\\n\"\"\"", "\"\"\"Index argument is required and has to point at\"\"\"", "\"\"\" an existing item.\\n\"\"\"", "\"\"\"\\nAdditions options:\\n\"\"\"", "\"\"\" -h (--help)\\tShows this screen.\"\"\"", ",", "*", "*", "args", ")", "elif", "arg", "==", "\"d\"", "or", "arg", "==", "\"done\"", ":", "args", "=", "dict", "(", ")", "if", "not", "self", ".", "argv", ":", "raise", "NotEnoughArgumentsError", "(", "\"done\"", ")", "elif", "self", ".", "argv", "[", "0", "]", "not", "in", "[", "\"-h\"", ",", "\"--help\"", "]", ":", "args", "[", "\"index\"", "]", "=", "self", ".", "argv", ".", "popleft", "(", ")", "self", ".", "_part", "(", "\"done\"", ",", "self", ".", "arg", ".", "done", ",", "{", "}", ",", "\"\"\"Usage: td d [-h (--help)] <index>\\n\\n\"\"\"", "\"\"\"Index argument is required and has to point at\"\"\"", "\"\"\" an existing item.\\n\"\"\"", "\"\"\"\\nAdditional options:\\n\"\"\"", "\"\"\" -h (--help)\\tShows this screen.\"\"\"", ",", "*", "*", "args", ")", "elif", "arg", "==", "\"D\"", "or", "arg", "==", "\"undone\"", ":", "args", "=", "dict", "(", ")", "if", "not", "self", ".", "argv", ":", "raise", "NotEnoughArgumentsError", "(", "\"undone\"", ")", "elif", "self", ".", "argv", "[", "0", "]", "not", "in", "[", "\"-h\"", ",", "\"--help\"", "]", ":", "args", "[", "\"index\"", "]", "=", "self", ".", "argv", ".", "popleft", "(", ")", "self", ".", "_part", "(", "\"undone\"", ",", "self", ".", "arg", ".", "undone", ",", "{", "}", ",", "\"\"\"Usage: td D [-h (--help)] <index>\\n\\n\"\"\"", "\"\"\"Index argument is required and has to point at\"\"\"", "\"\"\" an existing item.\\n\"\"\"", "\"\"\"\\nAdditional options:\\n\"\"\"", "\"\"\" -h (--help)\\tShows this screen.\"\"\"", ",", "*", "*", "args", ")", "elif", "arg", "==", "\"o\"", "or", "arg", "==", "\"options\"", ":", "self", ".", "_part", "(", "\"options\"", ",", "self", ".", "arg", ".", "options", ",", "{", "\"-g\"", ":", "(", "\"glob\"", ",", "False", ")", ",", "\"--global\"", ":", "(", "\"glob\"", ",", "False", ")", ",", "\"-s\"", ":", "(", "\"sort\"", ",", "True", ")", ",", "\"--sort\"", ":", "(", "\"sort\"", ",", "True", ")", ",", "\"-p\"", ":", "(", "\"purge\"", ",", "False", ")", ",", "\"--purge\"", ":", "(", "\"purge\"", ",", "False", ")", ",", "\"-d\"", ":", "(", "\"done\"", ",", "True", ")", ",", "\"--done\"", ":", "(", "\"done\"", ",", "True", ")", ",", "\"-D\"", ":", "(", "\"undone\"", ",", "True", ")", ",", "\"--undone\"", ":", "(", "\"undone\"", ",", "True", ")", "}", ",", "\"\"\"Usage: td o [-h (--help)] [command(s)]\"\"\"", "\"\"\", where [command(s)] are any of:\\n\\n\"\"\"", "\"\"\"-g (--global)\\t\\tApply specified options to all\"\"\"", "\"\"\" ToDo lists (store in ~/.tdrc).\\n\"\"\"", "\"\"\"-s (--sort) <pattern>\\tAlways sorts using\"\"\"", "\"\"\" <pattern>.\\n\"\"\"", "\"\"\"-p (--purge)\\t\\tAlways removes items marked\"\"\"", "\"\"\"as done.\\n\"\"\"", "\"\"\"-d (--done) <pattern>\\tAlways marks items maching\"\"\"", "\"\"\" <pattern> as done.\\n\"\"\"", "\"\"\"-D (--undone) <pattern>\\tAlways marks items maching\"\"\"", "\"\"\" <pattern> as not done.\\n\"\"\"", "\"\"\"\\nAdditional options:\\n\"\"\"", "\"\"\" -h (--help)\\t\\tShows this screen.\"\"\"", ")", "else", ":", "raise", "UnrecognizedCommandError", "(", "\"td\"", ",", "arg", ")" ]
Starts and does the parsing.
[ "Starts", "and", "does", "the", "parsing", "." ]
python
train
secdev/scapy
scapy/utils.py
https://github.com/secdev/scapy/blob/3ffe757c184017dd46464593a8f80f85abc1e79a/scapy/utils.py#L1458-L1468
def tdecode(pktlist, args=None, **kwargs): """ Run tshark on a list of packets. :param args: If not specified, defaults to ``tshark -V``. See :func:`tcpdump` for more parameters. """ if args is None: args = ["-V"] return tcpdump(pktlist, prog=conf.prog.tshark, args=args, **kwargs)
[ "def", "tdecode", "(", "pktlist", ",", "args", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "args", "is", "None", ":", "args", "=", "[", "\"-V\"", "]", "return", "tcpdump", "(", "pktlist", ",", "prog", "=", "conf", ".", "prog", ".", "tshark", ",", "args", "=", "args", ",", "*", "*", "kwargs", ")" ]
Run tshark on a list of packets. :param args: If not specified, defaults to ``tshark -V``. See :func:`tcpdump` for more parameters.
[ "Run", "tshark", "on", "a", "list", "of", "packets", "." ]
python
train
tjvr/kurt
kurt/scratch14/objtable.py
https://github.com/tjvr/kurt/blob/fcccd80cae11dc233f6dd02b40ec9a388c62f259/kurt/scratch14/objtable.py#L251-L298
def decode_network(objects): """Return root object from ref-containing obj table entries""" def resolve_ref(obj, objects=objects): if isinstance(obj, Ref): # first entry is 1 return objects[obj.index - 1] else: return obj # Reading the ObjTable backwards somehow makes more sense. for i in xrange(len(objects)-1, -1, -1): obj = objects[i] if isinstance(obj, Container): obj.update((k, resolve_ref(v)) for (k, v) in obj.items()) elif isinstance(obj, Dictionary): obj.value = dict( (resolve_ref(field), resolve_ref(value)) for (field, value) in obj.value.items() ) elif isinstance(obj, dict): obj = dict( (resolve_ref(field), resolve_ref(value)) for (field, value) in obj.items() ) elif isinstance(obj, list): obj = [resolve_ref(field) for field in obj] elif isinstance(obj, Form): for field in obj.value: value = getattr(obj, field) value = resolve_ref(value) setattr(obj, field, value) elif isinstance(obj, ContainsRefs): obj.value = [resolve_ref(field) for field in obj.value] objects[i] = obj for obj in objects: if isinstance(obj, Form): obj.built() root = objects[0] return root
[ "def", "decode_network", "(", "objects", ")", ":", "def", "resolve_ref", "(", "obj", ",", "objects", "=", "objects", ")", ":", "if", "isinstance", "(", "obj", ",", "Ref", ")", ":", "# first entry is 1", "return", "objects", "[", "obj", ".", "index", "-", "1", "]", "else", ":", "return", "obj", "# Reading the ObjTable backwards somehow makes more sense.", "for", "i", "in", "xrange", "(", "len", "(", "objects", ")", "-", "1", ",", "-", "1", ",", "-", "1", ")", ":", "obj", "=", "objects", "[", "i", "]", "if", "isinstance", "(", "obj", ",", "Container", ")", ":", "obj", ".", "update", "(", "(", "k", ",", "resolve_ref", "(", "v", ")", ")", "for", "(", "k", ",", "v", ")", "in", "obj", ".", "items", "(", ")", ")", "elif", "isinstance", "(", "obj", ",", "Dictionary", ")", ":", "obj", ".", "value", "=", "dict", "(", "(", "resolve_ref", "(", "field", ")", ",", "resolve_ref", "(", "value", ")", ")", "for", "(", "field", ",", "value", ")", "in", "obj", ".", "value", ".", "items", "(", ")", ")", "elif", "isinstance", "(", "obj", ",", "dict", ")", ":", "obj", "=", "dict", "(", "(", "resolve_ref", "(", "field", ")", ",", "resolve_ref", "(", "value", ")", ")", "for", "(", "field", ",", "value", ")", "in", "obj", ".", "items", "(", ")", ")", "elif", "isinstance", "(", "obj", ",", "list", ")", ":", "obj", "=", "[", "resolve_ref", "(", "field", ")", "for", "field", "in", "obj", "]", "elif", "isinstance", "(", "obj", ",", "Form", ")", ":", "for", "field", "in", "obj", ".", "value", ":", "value", "=", "getattr", "(", "obj", ",", "field", ")", "value", "=", "resolve_ref", "(", "value", ")", "setattr", "(", "obj", ",", "field", ",", "value", ")", "elif", "isinstance", "(", "obj", ",", "ContainsRefs", ")", ":", "obj", ".", "value", "=", "[", "resolve_ref", "(", "field", ")", "for", "field", "in", "obj", ".", "value", "]", "objects", "[", "i", "]", "=", "obj", "for", "obj", "in", "objects", ":", "if", "isinstance", "(", "obj", ",", "Form", ")", ":", "obj", ".", "built", "(", ")", "root", "=", "objects", "[", "0", "]", "return", "root" ]
Return root object from ref-containing obj table entries
[ "Return", "root", "object", "from", "ref", "-", "containing", "obj", "table", "entries" ]
python
train
bcbio/bcbio-nextgen
bcbio/pipeline/run_info.py
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/run_info.py#L575-L585
def _check_algorithm_keys(item): """Check for unexpected keys in the algorithm section. Needs to be manually updated when introducing new keys, but avoids silent bugs with typos in key names. """ problem_keys = [k for k in item["algorithm"].keys() if k not in ALGORITHM_KEYS] if len(problem_keys) > 0: raise ValueError("Unexpected configuration keyword in 'algorithm' section: %s\n" "See configuration documentation for supported options:\n%s\n" % (problem_keys, ALG_DOC_URL))
[ "def", "_check_algorithm_keys", "(", "item", ")", ":", "problem_keys", "=", "[", "k", "for", "k", "in", "item", "[", "\"algorithm\"", "]", ".", "keys", "(", ")", "if", "k", "not", "in", "ALGORITHM_KEYS", "]", "if", "len", "(", "problem_keys", ")", ">", "0", ":", "raise", "ValueError", "(", "\"Unexpected configuration keyword in 'algorithm' section: %s\\n\"", "\"See configuration documentation for supported options:\\n%s\\n\"", "%", "(", "problem_keys", ",", "ALG_DOC_URL", ")", ")" ]
Check for unexpected keys in the algorithm section. Needs to be manually updated when introducing new keys, but avoids silent bugs with typos in key names.
[ "Check", "for", "unexpected", "keys", "in", "the", "algorithm", "section", "." ]
python
train
src-d/modelforge
modelforge/tools.py
https://github.com/src-d/modelforge/blob/4f73c2bf0318261ac01bc8b6c0d4250a5d303418/modelforge/tools.py#L13-L32
def install_environment(args: argparse.Namespace, backend: StorageBackend, log: logging.Logger): """ Install the packages mentioned in the model's metadata. :param args: :param args: :class:`argparse.Namespace` with "input", "reproduce", "backend", \ "args", "username", "password", "remote_repo" and "log_level". :param backend: Backend which is responsible for working with model files. :param log: Logger supplied by supply_backend :return: None """ model = _load_generic_model(args.input, backend, log) if model is None: return 1 packages = ["%s==%s" % (pkg, ver) for pkg, ver in model.environment["packages"]] cmdline = [sys.executable, "-m", "pip", "install"] + args.pip + packages log.info(" ".join(cmdline)) subprocess.check_call(cmdline) if args.reproduce: for dataset in model.datasets: download_http(dataset[0], dataset[1], log)
[ "def", "install_environment", "(", "args", ":", "argparse", ".", "Namespace", ",", "backend", ":", "StorageBackend", ",", "log", ":", "logging", ".", "Logger", ")", ":", "model", "=", "_load_generic_model", "(", "args", ".", "input", ",", "backend", ",", "log", ")", "if", "model", "is", "None", ":", "return", "1", "packages", "=", "[", "\"%s==%s\"", "%", "(", "pkg", ",", "ver", ")", "for", "pkg", ",", "ver", "in", "model", ".", "environment", "[", "\"packages\"", "]", "]", "cmdline", "=", "[", "sys", ".", "executable", ",", "\"-m\"", ",", "\"pip\"", ",", "\"install\"", "]", "+", "args", ".", "pip", "+", "packages", "log", ".", "info", "(", "\" \"", ".", "join", "(", "cmdline", ")", ")", "subprocess", ".", "check_call", "(", "cmdline", ")", "if", "args", ".", "reproduce", ":", "for", "dataset", "in", "model", ".", "datasets", ":", "download_http", "(", "dataset", "[", "0", "]", ",", "dataset", "[", "1", "]", ",", "log", ")" ]
Install the packages mentioned in the model's metadata. :param args: :param args: :class:`argparse.Namespace` with "input", "reproduce", "backend", \ "args", "username", "password", "remote_repo" and "log_level". :param backend: Backend which is responsible for working with model files. :param log: Logger supplied by supply_backend :return: None
[ "Install", "the", "packages", "mentioned", "in", "the", "model", "s", "metadata", "." ]
python
train
PrefPy/prefpy
prefpy/mechanismMcmc.py
https://github.com/PrefPy/prefpy/blob/f395ba3782f05684fa5de0cece387a6da9391d02/prefpy/mechanismMcmc.py#L365-L396
def getCandScoresMapBruteForce(self, profile): """ Returns a dictonary that associates the integer representation of each candidate with the bayesian losses that we calculate using brute force. :ivar Profile profile: A Profile object that represents an election profile. """ wmg = profile.getWmg(True) m = len(wmg.keys()) cands = range(m) V = self.createBinaryRelation(m) gains = dict() for cand in wmg.keys(): gains[cand] = 0 graphs = itertools.product(range(2), repeat=m*(m-1)/2) for comb in graphs: prob = 1 i = 0 for a, b in itertools.combinations(cands,2): V[a][b] = comb[i] V[b][a] = 1-comb[i] if comb[i] > 0: prob *= 1/(1+self.phi ** float(wmg[a+1][b+1])) else: prob *= 1/(1+self.phi ** float(wmg[b+1][a+1])) i += 1 if i >= m*(m-1)/2: break for cand in wmg.keys(): gains[cand] += self.utilityFunction.getUtility([cand], V)*prob return gains
[ "def", "getCandScoresMapBruteForce", "(", "self", ",", "profile", ")", ":", "wmg", "=", "profile", ".", "getWmg", "(", "True", ")", "m", "=", "len", "(", "wmg", ".", "keys", "(", ")", ")", "cands", "=", "range", "(", "m", ")", "V", "=", "self", ".", "createBinaryRelation", "(", "m", ")", "gains", "=", "dict", "(", ")", "for", "cand", "in", "wmg", ".", "keys", "(", ")", ":", "gains", "[", "cand", "]", "=", "0", "graphs", "=", "itertools", ".", "product", "(", "range", "(", "2", ")", ",", "repeat", "=", "m", "*", "(", "m", "-", "1", ")", "/", "2", ")", "for", "comb", "in", "graphs", ":", "prob", "=", "1", "i", "=", "0", "for", "a", ",", "b", "in", "itertools", ".", "combinations", "(", "cands", ",", "2", ")", ":", "V", "[", "a", "]", "[", "b", "]", "=", "comb", "[", "i", "]", "V", "[", "b", "]", "[", "a", "]", "=", "1", "-", "comb", "[", "i", "]", "if", "comb", "[", "i", "]", ">", "0", ":", "prob", "*=", "1", "/", "(", "1", "+", "self", ".", "phi", "**", "float", "(", "wmg", "[", "a", "+", "1", "]", "[", "b", "+", "1", "]", ")", ")", "else", ":", "prob", "*=", "1", "/", "(", "1", "+", "self", ".", "phi", "**", "float", "(", "wmg", "[", "b", "+", "1", "]", "[", "a", "+", "1", "]", ")", ")", "i", "+=", "1", "if", "i", ">=", "m", "*", "(", "m", "-", "1", ")", "/", "2", ":", "break", "for", "cand", "in", "wmg", ".", "keys", "(", ")", ":", "gains", "[", "cand", "]", "+=", "self", ".", "utilityFunction", ".", "getUtility", "(", "[", "cand", "]", ",", "V", ")", "*", "prob", "return", "gains" ]
Returns a dictonary that associates the integer representation of each candidate with the bayesian losses that we calculate using brute force. :ivar Profile profile: A Profile object that represents an election profile.
[ "Returns", "a", "dictonary", "that", "associates", "the", "integer", "representation", "of", "each", "candidate", "with", "the", "bayesian", "losses", "that", "we", "calculate", "using", "brute", "force", "." ]
python
train
google/flatbuffers
python/flatbuffers/table.py
https://github.com/google/flatbuffers/blob/6cc30b3272d79c85db7d4871ac0aa69541dc89de/python/flatbuffers/table.py#L56-L64
def VectorLen(self, off): """VectorLen retrieves the length of the vector whose offset is stored at "off" in this object.""" N.enforce_number(off, N.UOffsetTFlags) off += self.Pos off += encode.Get(N.UOffsetTFlags.packer_type, self.Bytes, off) ret = encode.Get(N.UOffsetTFlags.packer_type, self.Bytes, off) return ret
[ "def", "VectorLen", "(", "self", ",", "off", ")", ":", "N", ".", "enforce_number", "(", "off", ",", "N", ".", "UOffsetTFlags", ")", "off", "+=", "self", ".", "Pos", "off", "+=", "encode", ".", "Get", "(", "N", ".", "UOffsetTFlags", ".", "packer_type", ",", "self", ".", "Bytes", ",", "off", ")", "ret", "=", "encode", ".", "Get", "(", "N", ".", "UOffsetTFlags", ".", "packer_type", ",", "self", ".", "Bytes", ",", "off", ")", "return", "ret" ]
VectorLen retrieves the length of the vector whose offset is stored at "off" in this object.
[ "VectorLen", "retrieves", "the", "length", "of", "the", "vector", "whose", "offset", "is", "stored", "at", "off", "in", "this", "object", "." ]
python
train
welbornprod/colr
colr/colr.py
https://github.com/welbornprod/colr/blob/417117fdbddbc53142096685ac2af006b2bd0220/colr/colr.py#L1723-L1744
def hex(self, value, text=None, back=None, style=None, rgb_mode=False): """ A chained method that sets the fore color to an hex value. Arguments: value : Hex value to convert. text : Text to style if not building up color codes. back : Back color for the text. style : Style for the text. rgb_mode : If False, the closest extended code is used, otherwise true color (rgb) mode is used. """ if rgb_mode: try: colrval = hex2rgb(value, allow_short=True) except ValueError: raise InvalidColr(value) else: try: colrval = hex2term(value, allow_short=True) except ValueError: raise InvalidColr(value) return self.chained(text=text, fore=colrval, back=back, style=style)
[ "def", "hex", "(", "self", ",", "value", ",", "text", "=", "None", ",", "back", "=", "None", ",", "style", "=", "None", ",", "rgb_mode", "=", "False", ")", ":", "if", "rgb_mode", ":", "try", ":", "colrval", "=", "hex2rgb", "(", "value", ",", "allow_short", "=", "True", ")", "except", "ValueError", ":", "raise", "InvalidColr", "(", "value", ")", "else", ":", "try", ":", "colrval", "=", "hex2term", "(", "value", ",", "allow_short", "=", "True", ")", "except", "ValueError", ":", "raise", "InvalidColr", "(", "value", ")", "return", "self", ".", "chained", "(", "text", "=", "text", ",", "fore", "=", "colrval", ",", "back", "=", "back", ",", "style", "=", "style", ")" ]
A chained method that sets the fore color to an hex value. Arguments: value : Hex value to convert. text : Text to style if not building up color codes. back : Back color for the text. style : Style for the text. rgb_mode : If False, the closest extended code is used, otherwise true color (rgb) mode is used.
[ "A", "chained", "method", "that", "sets", "the", "fore", "color", "to", "an", "hex", "value", ".", "Arguments", ":", "value", ":", "Hex", "value", "to", "convert", ".", "text", ":", "Text", "to", "style", "if", "not", "building", "up", "color", "codes", ".", "back", ":", "Back", "color", "for", "the", "text", ".", "style", ":", "Style", "for", "the", "text", ".", "rgb_mode", ":", "If", "False", "the", "closest", "extended", "code", "is", "used", "otherwise", "true", "color", "(", "rgb", ")", "mode", "is", "used", "." ]
python
train
coderanger/depot
depot/storage.py
https://github.com/coderanger/depot/blob/d1a96f13204ad7028432096d25718e611d4d3d9d/depot/storage.py#L122-L157
def _get_storage(cls, uri): """ Given a URI like local:///srv/repo or s3://key:[email protected], return a libcloud storage or container object. """ driver = cls._get_driver(uri.scheme) key = uri.username secret = uri.password container = uri.netloc driver_kwargs = {} if uri.scheme.startswith('s3'): if not key: key = os.environ.get('AWS_ACCESS_KEY_ID') if not secret: secret = os.environ.get('AWS_SECRET_ACCESS_KEY') if not (key and secret and container): raise ValueError('For S3 you must provide an access key ID, secret access key, and bucket name') # No way to store this in the URI, what about a CLI option too? if 'AWS_TOKEN' in os.environ: driver_kwargs['token'] = os.environ['AWS_TOKEN'] elif uri.scheme == 'local': parts = [] if uri.netloc: parts.append(uri.netloc) if uri.path: parts.append(uri.path) if not parts: parts.append('.') base_path = os.path.abspath(''.join(parts)) key = os.path.dirname(base_path) container = os.path.basename(base_path) storage = driver(key, secret, **driver_kwargs) try: return storage.get_container(container) except ContainerDoesNotExistError: return storage.create_container(container)
[ "def", "_get_storage", "(", "cls", ",", "uri", ")", ":", "driver", "=", "cls", ".", "_get_driver", "(", "uri", ".", "scheme", ")", "key", "=", "uri", ".", "username", "secret", "=", "uri", ".", "password", "container", "=", "uri", ".", "netloc", "driver_kwargs", "=", "{", "}", "if", "uri", ".", "scheme", ".", "startswith", "(", "'s3'", ")", ":", "if", "not", "key", ":", "key", "=", "os", ".", "environ", ".", "get", "(", "'AWS_ACCESS_KEY_ID'", ")", "if", "not", "secret", ":", "secret", "=", "os", ".", "environ", ".", "get", "(", "'AWS_SECRET_ACCESS_KEY'", ")", "if", "not", "(", "key", "and", "secret", "and", "container", ")", ":", "raise", "ValueError", "(", "'For S3 you must provide an access key ID, secret access key, and bucket name'", ")", "# No way to store this in the URI, what about a CLI option too?", "if", "'AWS_TOKEN'", "in", "os", ".", "environ", ":", "driver_kwargs", "[", "'token'", "]", "=", "os", ".", "environ", "[", "'AWS_TOKEN'", "]", "elif", "uri", ".", "scheme", "==", "'local'", ":", "parts", "=", "[", "]", "if", "uri", ".", "netloc", ":", "parts", ".", "append", "(", "uri", ".", "netloc", ")", "if", "uri", ".", "path", ":", "parts", ".", "append", "(", "uri", ".", "path", ")", "if", "not", "parts", ":", "parts", ".", "append", "(", "'.'", ")", "base_path", "=", "os", ".", "path", ".", "abspath", "(", "''", ".", "join", "(", "parts", ")", ")", "key", "=", "os", ".", "path", ".", "dirname", "(", "base_path", ")", "container", "=", "os", ".", "path", ".", "basename", "(", "base_path", ")", "storage", "=", "driver", "(", "key", ",", "secret", ",", "*", "*", "driver_kwargs", ")", "try", ":", "return", "storage", ".", "get_container", "(", "container", ")", "except", "ContainerDoesNotExistError", ":", "return", "storage", ".", "create_container", "(", "container", ")" ]
Given a URI like local:///srv/repo or s3://key:[email protected], return a libcloud storage or container object.
[ "Given", "a", "URI", "like", "local", ":", "///", "srv", "/", "repo", "or", "s3", ":", "//", "key", ":", "secret" ]
python
train
jasonrollins/shareplum
shareplum/shareplum.py
https://github.com/jasonrollins/shareplum/blob/404f320808912619920e2d787f2c4387225a14e0/shareplum/shareplum.py#L562-L600
def GetView(self, viewname): """Get Info on View Name """ # Build Request soap_request = soap('GetView') soap_request.add_parameter('listName', self.listName) if viewname == None: views = self.GetViewCollection() for view in views: if 'DefaultView' in view: if views[view]['DefaultView'] == 'TRUE': viewname = view break if self.listName not in ['UserInfo', 'User Information List']: soap_request.add_parameter('viewName', self.views[viewname]['Name'][1:-1]) else: soap_request.add_parameter('viewName', viewname) self.last_request = str(soap_request) # Send Request response = self._session.post(url=self._url('Views'), headers=self._headers('GetView'), data=str(soap_request), verify=self._verify_ssl, timeout=self.timeout) # Parse Response if response.status_code == 200: envelope = etree.fromstring(response.text.encode('utf-8'), parser=etree.XMLParser(huge_tree=self.huge_tree)) view = envelope[0][0][0][0] info = {key: value for (key, value) in view.items()} fields = [x.items()[0][1] for x in view[1]] return {'info': info, 'fields': fields} else: raise Exception("ERROR:", response.status_code, response.text)
[ "def", "GetView", "(", "self", ",", "viewname", ")", ":", "# Build Request", "soap_request", "=", "soap", "(", "'GetView'", ")", "soap_request", ".", "add_parameter", "(", "'listName'", ",", "self", ".", "listName", ")", "if", "viewname", "==", "None", ":", "views", "=", "self", ".", "GetViewCollection", "(", ")", "for", "view", "in", "views", ":", "if", "'DefaultView'", "in", "view", ":", "if", "views", "[", "view", "]", "[", "'DefaultView'", "]", "==", "'TRUE'", ":", "viewname", "=", "view", "break", "if", "self", ".", "listName", "not", "in", "[", "'UserInfo'", ",", "'User Information List'", "]", ":", "soap_request", ".", "add_parameter", "(", "'viewName'", ",", "self", ".", "views", "[", "viewname", "]", "[", "'Name'", "]", "[", "1", ":", "-", "1", "]", ")", "else", ":", "soap_request", ".", "add_parameter", "(", "'viewName'", ",", "viewname", ")", "self", ".", "last_request", "=", "str", "(", "soap_request", ")", "# Send Request", "response", "=", "self", ".", "_session", ".", "post", "(", "url", "=", "self", ".", "_url", "(", "'Views'", ")", ",", "headers", "=", "self", ".", "_headers", "(", "'GetView'", ")", ",", "data", "=", "str", "(", "soap_request", ")", ",", "verify", "=", "self", ".", "_verify_ssl", ",", "timeout", "=", "self", ".", "timeout", ")", "# Parse Response", "if", "response", ".", "status_code", "==", "200", ":", "envelope", "=", "etree", ".", "fromstring", "(", "response", ".", "text", ".", "encode", "(", "'utf-8'", ")", ",", "parser", "=", "etree", ".", "XMLParser", "(", "huge_tree", "=", "self", ".", "huge_tree", ")", ")", "view", "=", "envelope", "[", "0", "]", "[", "0", "]", "[", "0", "]", "[", "0", "]", "info", "=", "{", "key", ":", "value", "for", "(", "key", ",", "value", ")", "in", "view", ".", "items", "(", ")", "}", "fields", "=", "[", "x", ".", "items", "(", ")", "[", "0", "]", "[", "1", "]", "for", "x", "in", "view", "[", "1", "]", "]", "return", "{", "'info'", ":", "info", ",", "'fields'", ":", "fields", "}", "else", ":", "raise", "Exception", "(", "\"ERROR:\"", ",", "response", ".", "status_code", ",", "response", ".", "text", ")" ]
Get Info on View Name
[ "Get", "Info", "on", "View", "Name" ]
python
train
eventbrite/pysoa
pysoa/server/django/cache.py
https://github.com/eventbrite/pysoa/blob/9c052cae2397d13de3df8ae2c790846a70b53f18/pysoa/server/django/cache.py#L71-L78
def close(self, for_shutdown=False, **_kwargs): """ Only call super().close() if the server is shutting down (not between requests). :param for_shutdown: If `False` (the default) """ if for_shutdown: super(PySOAPyLibMCCache, self).close()
[ "def", "close", "(", "self", ",", "for_shutdown", "=", "False", ",", "*", "*", "_kwargs", ")", ":", "if", "for_shutdown", ":", "super", "(", "PySOAPyLibMCCache", ",", "self", ")", ".", "close", "(", ")" ]
Only call super().close() if the server is shutting down (not between requests). :param for_shutdown: If `False` (the default)
[ "Only", "call", "super", "()", ".", "close", "()", "if", "the", "server", "is", "shutting", "down", "(", "not", "between", "requests", ")", "." ]
python
train
gitpython-developers/GitPython
git/refs/symbolic.py
https://github.com/gitpython-developers/GitPython/blob/1f66e25c25cde2423917ee18c4704fff83b837d1/git/refs/symbolic.py#L89-L121
def _iter_packed_refs(cls, repo): """Returns an iterator yielding pairs of sha1/path pairs (as bytes) for the corresponding refs. :note: The packed refs file will be kept open as long as we iterate""" try: with open(cls._get_packed_refs_path(repo), 'rt') as fp: for line in fp: line = line.strip() if not line: continue if line.startswith('#'): # "# pack-refs with: peeled fully-peeled sorted" # the git source code shows "peeled", # "fully-peeled" and "sorted" as the keywords # that can go on this line, as per comments in git file # refs/packed-backend.c # I looked at master on 2017-10-11, # commit 111ef79afe, after tag v2.15.0-rc1 # from repo https://github.com/git/git.git if line.startswith('# pack-refs with:') and 'peeled' not in line: raise TypeError("PackingType of packed-Refs not understood: %r" % line) # END abort if we do not understand the packing scheme continue # END parse comment # skip dereferenced tag object entries - previous line was actual # tag reference for it if line[0] == '^': continue yield tuple(line.split(' ', 1)) # END for each line except (OSError, IOError): return
[ "def", "_iter_packed_refs", "(", "cls", ",", "repo", ")", ":", "try", ":", "with", "open", "(", "cls", ".", "_get_packed_refs_path", "(", "repo", ")", ",", "'rt'", ")", "as", "fp", ":", "for", "line", "in", "fp", ":", "line", "=", "line", ".", "strip", "(", ")", "if", "not", "line", ":", "continue", "if", "line", ".", "startswith", "(", "'#'", ")", ":", "# \"# pack-refs with: peeled fully-peeled sorted\"", "# the git source code shows \"peeled\",", "# \"fully-peeled\" and \"sorted\" as the keywords", "# that can go on this line, as per comments in git file", "# refs/packed-backend.c", "# I looked at master on 2017-10-11,", "# commit 111ef79afe, after tag v2.15.0-rc1", "# from repo https://github.com/git/git.git", "if", "line", ".", "startswith", "(", "'# pack-refs with:'", ")", "and", "'peeled'", "not", "in", "line", ":", "raise", "TypeError", "(", "\"PackingType of packed-Refs not understood: %r\"", "%", "line", ")", "# END abort if we do not understand the packing scheme", "continue", "# END parse comment", "# skip dereferenced tag object entries - previous line was actual", "# tag reference for it", "if", "line", "[", "0", "]", "==", "'^'", ":", "continue", "yield", "tuple", "(", "line", ".", "split", "(", "' '", ",", "1", ")", ")", "# END for each line", "except", "(", "OSError", ",", "IOError", ")", ":", "return" ]
Returns an iterator yielding pairs of sha1/path pairs (as bytes) for the corresponding refs. :note: The packed refs file will be kept open as long as we iterate
[ "Returns", "an", "iterator", "yielding", "pairs", "of", "sha1", "/", "path", "pairs", "(", "as", "bytes", ")", "for", "the", "corresponding", "refs", ".", ":", "note", ":", "The", "packed", "refs", "file", "will", "be", "kept", "open", "as", "long", "as", "we", "iterate" ]
python
train
dixudx/rtcclient
rtcclient/workitem.py
https://github.com/dixudx/rtcclient/blob/1721dd0b047478f5bdd6359b07a2c503cfafd86f/rtcclient/workitem.py#L435-L452
def getIncludedInBuilds(self): """Get all :class:`rtcclient.models.IncludedInBuild` objects that have already included this workitem WARNING: If one of the IncludedInBuilds is removed or cannot be retrieved/found correctly, then 404 error will be raised. :return: a :class:`list` contains all the :class:`rtcclient.models.IncludedInBuild` objects :rtype: list """ build_tag = ("rtc_cm:com.ibm.team.build.linktype.includedWorkItems." "com.ibm.team.build.common.link.includedInBuilds") return self.rtc_obj._get_paged_resources("IncludedInBuild", workitem_id=self.identifier, customized_attr=build_tag, page_size="5")
[ "def", "getIncludedInBuilds", "(", "self", ")", ":", "build_tag", "=", "(", "\"rtc_cm:com.ibm.team.build.linktype.includedWorkItems.\"", "\"com.ibm.team.build.common.link.includedInBuilds\"", ")", "return", "self", ".", "rtc_obj", ".", "_get_paged_resources", "(", "\"IncludedInBuild\"", ",", "workitem_id", "=", "self", ".", "identifier", ",", "customized_attr", "=", "build_tag", ",", "page_size", "=", "\"5\"", ")" ]
Get all :class:`rtcclient.models.IncludedInBuild` objects that have already included this workitem WARNING: If one of the IncludedInBuilds is removed or cannot be retrieved/found correctly, then 404 error will be raised. :return: a :class:`list` contains all the :class:`rtcclient.models.IncludedInBuild` objects :rtype: list
[ "Get", "all", ":", "class", ":", "rtcclient", ".", "models", ".", "IncludedInBuild", "objects", "that", "have", "already", "included", "this", "workitem" ]
python
train
trustar/trustar-python
trustar/utils.py
https://github.com/trustar/trustar-python/blob/707d51adc58d68aed7de12a4ca37949cb75cf122/trustar/utils.py#L117-L138
def parse_boolean(value): """ Coerce a value to boolean. :param value: the value, could be a string, boolean, or None :return: the value as coerced to a boolean """ if value is None: return None if isinstance(value, bool): return value if isinstance(value, string_types): value = value.lower() if value == 'false': return False if value == 'true': return True raise ValueError("Could not convert value to boolean: {}".format(value))
[ "def", "parse_boolean", "(", "value", ")", ":", "if", "value", "is", "None", ":", "return", "None", "if", "isinstance", "(", "value", ",", "bool", ")", ":", "return", "value", "if", "isinstance", "(", "value", ",", "string_types", ")", ":", "value", "=", "value", ".", "lower", "(", ")", "if", "value", "==", "'false'", ":", "return", "False", "if", "value", "==", "'true'", ":", "return", "True", "raise", "ValueError", "(", "\"Could not convert value to boolean: {}\"", ".", "format", "(", "value", ")", ")" ]
Coerce a value to boolean. :param value: the value, could be a string, boolean, or None :return: the value as coerced to a boolean
[ "Coerce", "a", "value", "to", "boolean", "." ]
python
train
codelv/enaml-native
src/enamlnative/ios/uikit_activity_indicator.py
https://github.com/codelv/enaml-native/blob/c33986e9eda468c508806e0a3e73c771401e5718/src/enamlnative/ios/uikit_activity_indicator.py#L44-L61
def init_widget(self): """ Initialize the state of the toolkit widget. This method is called during the top-down pass, just after the 'create_widget()' method is called. This method should init the state of the widget. The child widgets will not yet be created. """ super(UiKitActivityIndicator, self).init_widget() d = self.declaration if d.size != 'normal': self.set_size(d.size) if d.color: self.set_color(d.color) #: Why would you want to stop an activity indicator? self.widget.startAnimating()
[ "def", "init_widget", "(", "self", ")", ":", "super", "(", "UiKitActivityIndicator", ",", "self", ")", ".", "init_widget", "(", ")", "d", "=", "self", ".", "declaration", "if", "d", ".", "size", "!=", "'normal'", ":", "self", ".", "set_size", "(", "d", ".", "size", ")", "if", "d", ".", "color", ":", "self", ".", "set_color", "(", "d", ".", "color", ")", "#: Why would you want to stop an activity indicator?", "self", ".", "widget", ".", "startAnimating", "(", ")" ]
Initialize the state of the toolkit widget. This method is called during the top-down pass, just after the 'create_widget()' method is called. This method should init the state of the widget. The child widgets will not yet be created.
[ "Initialize", "the", "state", "of", "the", "toolkit", "widget", "." ]
python
train
mitsei/dlkit
dlkit/json_/grading/sessions.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/grading/sessions.py#L5099-L5115
def get_child_gradebook_ids(self, gradebook_id): """Gets the child ``Ids`` of the given gradebook. arg: gradebook_id (osid.id.Id): the ``Id`` to query return: (osid.id.IdList) - the children of the gradebook raise: NotFound - ``gradebook_id`` is not found raise: NullArgument - ``gradebook_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for # osid.resource.BinHierarchySession.get_child_bin_ids if self._catalog_session is not None: return self._catalog_session.get_child_catalog_ids(catalog_id=gradebook_id) return self._hierarchy_session.get_children(id_=gradebook_id)
[ "def", "get_child_gradebook_ids", "(", "self", ",", "gradebook_id", ")", ":", "# Implemented from template for", "# osid.resource.BinHierarchySession.get_child_bin_ids", "if", "self", ".", "_catalog_session", "is", "not", "None", ":", "return", "self", ".", "_catalog_session", ".", "get_child_catalog_ids", "(", "catalog_id", "=", "gradebook_id", ")", "return", "self", ".", "_hierarchy_session", ".", "get_children", "(", "id_", "=", "gradebook_id", ")" ]
Gets the child ``Ids`` of the given gradebook. arg: gradebook_id (osid.id.Id): the ``Id`` to query return: (osid.id.IdList) - the children of the gradebook raise: NotFound - ``gradebook_id`` is not found raise: NullArgument - ``gradebook_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.*
[ "Gets", "the", "child", "Ids", "of", "the", "given", "gradebook", "." ]
python
train
annoviko/pyclustering
pyclustering/container/cftree.py
https://github.com/annoviko/pyclustering/blob/98aa0dd89fd36f701668fb1eb29c8fb5662bf7d0/pyclustering/container/cftree.py#L544-L570
def get_farthest_successors(self, type_measurement): """! @brief Find pair of farthest successors of the node in line with measurement type. @param[in] type_measurement (measurement_type): Measurement type that is used for obtaining farthest successors. @return (list) Pair of farthest successors represented by list [cfnode1, cfnode2]. """ farthest_node1 = None; farthest_node2 = None; farthest_distance = 0; for i in range(0, len(self.successors)): candidate1 = self.successors[i]; for j in range(i + 1, len(self.successors)): candidate2 = self.successors[j]; candidate_distance = candidate1.get_distance(candidate2, type_measurement); if (candidate_distance > farthest_distance): farthest_distance = candidate_distance; farthest_node1 = candidate1; farthest_node2 = candidate2; return [farthest_node1, farthest_node2];
[ "def", "get_farthest_successors", "(", "self", ",", "type_measurement", ")", ":", "farthest_node1", "=", "None", "farthest_node2", "=", "None", "farthest_distance", "=", "0", "for", "i", "in", "range", "(", "0", ",", "len", "(", "self", ".", "successors", ")", ")", ":", "candidate1", "=", "self", ".", "successors", "[", "i", "]", "for", "j", "in", "range", "(", "i", "+", "1", ",", "len", "(", "self", ".", "successors", ")", ")", ":", "candidate2", "=", "self", ".", "successors", "[", "j", "]", "candidate_distance", "=", "candidate1", ".", "get_distance", "(", "candidate2", ",", "type_measurement", ")", "if", "(", "candidate_distance", ">", "farthest_distance", ")", ":", "farthest_distance", "=", "candidate_distance", "farthest_node1", "=", "candidate1", "farthest_node2", "=", "candidate2", "return", "[", "farthest_node1", ",", "farthest_node2", "]" ]
! @brief Find pair of farthest successors of the node in line with measurement type. @param[in] type_measurement (measurement_type): Measurement type that is used for obtaining farthest successors. @return (list) Pair of farthest successors represented by list [cfnode1, cfnode2].
[ "!" ]
python
valid
brocade/pynos
pynos/versions/ver_7/ver_7_1_0/yang/brocade_notification_stream.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_7/ver_7_1_0/yang/brocade_notification_stream.py#L528-L537
def RIBVRFRouteLimitExceeded_VRFName(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") RIBVRFRouteLimitExceeded = ET.SubElement(config, "RIBVRFRouteLimitExceeded", xmlns="http://brocade.com/ns/brocade-notification-stream") VRFName = ET.SubElement(RIBVRFRouteLimitExceeded, "VRFName") VRFName.text = kwargs.pop('VRFName') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "RIBVRFRouteLimitExceeded_VRFName", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "RIBVRFRouteLimitExceeded", "=", "ET", ".", "SubElement", "(", "config", ",", "\"RIBVRFRouteLimitExceeded\"", ",", "xmlns", "=", "\"http://brocade.com/ns/brocade-notification-stream\"", ")", "VRFName", "=", "ET", ".", "SubElement", "(", "RIBVRFRouteLimitExceeded", ",", "\"VRFName\"", ")", "VRFName", ".", "text", "=", "kwargs", ".", "pop", "(", "'VRFName'", ")", "callback", "=", "kwargs", ".", "pop", "(", "'callback'", ",", "self", ".", "_callback", ")", "return", "callback", "(", "config", ")" ]
Auto Generated Code
[ "Auto", "Generated", "Code" ]
python
train
log2timeline/plaso
plaso/cli/storage_media_tool.py
https://github.com/log2timeline/plaso/blob/9c564698d2da3ffbe23607a3c54c0582ea18a6cc/plaso/cli/storage_media_tool.py#L379-L392
def _ParseSourcePathOption(self, options): """Parses the source path option. Args: options (argparse.Namespace): command line arguments. Raises: BadConfigOption: if the options are invalid. """ self._source_path = self.ParseStringOption(options, self._SOURCE_OPTION) if not self._source_path: raise errors.BadConfigOption('Missing source path.') self._source_path = os.path.abspath(self._source_path)
[ "def", "_ParseSourcePathOption", "(", "self", ",", "options", ")", ":", "self", ".", "_source_path", "=", "self", ".", "ParseStringOption", "(", "options", ",", "self", ".", "_SOURCE_OPTION", ")", "if", "not", "self", ".", "_source_path", ":", "raise", "errors", ".", "BadConfigOption", "(", "'Missing source path.'", ")", "self", ".", "_source_path", "=", "os", ".", "path", ".", "abspath", "(", "self", ".", "_source_path", ")" ]
Parses the source path option. Args: options (argparse.Namespace): command line arguments. Raises: BadConfigOption: if the options are invalid.
[ "Parses", "the", "source", "path", "option", "." ]
python
train
clinicedc/edc-permissions
edc_permissions/utils/generic.py
https://github.com/clinicedc/edc-permissions/blob/d1aee39a8ddaf4b7741d9306139ddd03625d4e1a/edc_permissions/utils/generic.py#L108-L124
def create_permissions_from_tuples(model, codename_tpls): """Creates custom permissions on model "model". """ if codename_tpls: model_cls = django_apps.get_model(model) content_type = ContentType.objects.get_for_model(model_cls) for codename_tpl in codename_tpls: app_label, codename, name = get_from_codename_tuple( codename_tpl, model_cls._meta.app_label ) try: Permission.objects.get(codename=codename, content_type=content_type) except ObjectDoesNotExist: Permission.objects.create( name=name, codename=codename, content_type=content_type ) verify_codename_exists(f"{app_label}.{codename}")
[ "def", "create_permissions_from_tuples", "(", "model", ",", "codename_tpls", ")", ":", "if", "codename_tpls", ":", "model_cls", "=", "django_apps", ".", "get_model", "(", "model", ")", "content_type", "=", "ContentType", ".", "objects", ".", "get_for_model", "(", "model_cls", ")", "for", "codename_tpl", "in", "codename_tpls", ":", "app_label", ",", "codename", ",", "name", "=", "get_from_codename_tuple", "(", "codename_tpl", ",", "model_cls", ".", "_meta", ".", "app_label", ")", "try", ":", "Permission", ".", "objects", ".", "get", "(", "codename", "=", "codename", ",", "content_type", "=", "content_type", ")", "except", "ObjectDoesNotExist", ":", "Permission", ".", "objects", ".", "create", "(", "name", "=", "name", ",", "codename", "=", "codename", ",", "content_type", "=", "content_type", ")", "verify_codename_exists", "(", "f\"{app_label}.{codename}\"", ")" ]
Creates custom permissions on model "model".
[ "Creates", "custom", "permissions", "on", "model", "model", "." ]
python
train
google/grr
grr/server/grr_response_server/flows/cron/system.py
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/server/grr_response_server/flows/cron/system.py#L70-L90
def Add(self, category, label, age): """Adds another instance of this category into the active_days counter. We automatically count the event towards all relevant active_days. For example, if the category "Windows" was seen 8 days ago it will be counted towards the 30 day active, 14 day active but not against the 7 and 1 day actives. Args: category: The category name to account this instance against. label: Client label to which this should be applied. age: When this instance occurred. """ now = rdfvalue.RDFDatetime.Now() category = utils.SmartUnicode(category) for active_time in self.active_days: self.categories[active_time].setdefault(label, {}) if (now - age).seconds < active_time * 24 * 60 * 60: self.categories[active_time][label][ category] = self.categories[active_time][label].get(category, 0) + 1
[ "def", "Add", "(", "self", ",", "category", ",", "label", ",", "age", ")", ":", "now", "=", "rdfvalue", ".", "RDFDatetime", ".", "Now", "(", ")", "category", "=", "utils", ".", "SmartUnicode", "(", "category", ")", "for", "active_time", "in", "self", ".", "active_days", ":", "self", ".", "categories", "[", "active_time", "]", ".", "setdefault", "(", "label", ",", "{", "}", ")", "if", "(", "now", "-", "age", ")", ".", "seconds", "<", "active_time", "*", "24", "*", "60", "*", "60", ":", "self", ".", "categories", "[", "active_time", "]", "[", "label", "]", "[", "category", "]", "=", "self", ".", "categories", "[", "active_time", "]", "[", "label", "]", ".", "get", "(", "category", ",", "0", ")", "+", "1" ]
Adds another instance of this category into the active_days counter. We automatically count the event towards all relevant active_days. For example, if the category "Windows" was seen 8 days ago it will be counted towards the 30 day active, 14 day active but not against the 7 and 1 day actives. Args: category: The category name to account this instance against. label: Client label to which this should be applied. age: When this instance occurred.
[ "Adds", "another", "instance", "of", "this", "category", "into", "the", "active_days", "counter", "." ]
python
train
danilobellini/audiolazy
audiolazy/lazy_compat.py
https://github.com/danilobellini/audiolazy/blob/dba0a278937909980ed40b976d866b8e97c35dee/audiolazy/lazy_compat.py#L85-L126
def meta(*bases, **kwargs): """ Allows unique syntax similar to Python 3 for working with metaclasses in both Python 2 and Python 3. Examples -------- >>> class BadMeta(type): # An usual metaclass definition ... def __new__(mcls, name, bases, namespace): ... if "bad" not in namespace: # A bad constraint ... raise Exception("Oops, not bad enough") ... value = len(name) # To ensure this metaclass is called again ... def really_bad(self): ... return self.bad() * value ... namespace["really_bad"] = really_bad ... return super(BadMeta, mcls).__new__(mcls, name, bases, namespace) ... >>> class Bady(meta(object, metaclass=BadMeta)): ... def bad(self): ... return "HUA " ... >>> class BadGuy(Bady): ... def bad(self): ... return "R" ... >>> issubclass(BadGuy, Bady) True >>> Bady().really_bad() # Here value = 4 'HUA HUA HUA HUA ' >>> BadGuy().really_bad() # Called metaclass ``__new__`` again, so value = 6 'RRRRRR' """ metaclass = kwargs.get("metaclass", type) if not bases: bases = (object,) class NewMeta(type): def __new__(mcls, name, mbases, namespace): if name: return metaclass.__new__(metaclass, name, bases, namespace) return super(NewMeta, mcls).__new__(mcls, "", mbases, {}) return NewMeta("", tuple(), {})
[ "def", "meta", "(", "*", "bases", ",", "*", "*", "kwargs", ")", ":", "metaclass", "=", "kwargs", ".", "get", "(", "\"metaclass\"", ",", "type", ")", "if", "not", "bases", ":", "bases", "=", "(", "object", ",", ")", "class", "NewMeta", "(", "type", ")", ":", "def", "__new__", "(", "mcls", ",", "name", ",", "mbases", ",", "namespace", ")", ":", "if", "name", ":", "return", "metaclass", ".", "__new__", "(", "metaclass", ",", "name", ",", "bases", ",", "namespace", ")", "return", "super", "(", "NewMeta", ",", "mcls", ")", ".", "__new__", "(", "mcls", ",", "\"\"", ",", "mbases", ",", "{", "}", ")", "return", "NewMeta", "(", "\"\"", ",", "tuple", "(", ")", ",", "{", "}", ")" ]
Allows unique syntax similar to Python 3 for working with metaclasses in both Python 2 and Python 3. Examples -------- >>> class BadMeta(type): # An usual metaclass definition ... def __new__(mcls, name, bases, namespace): ... if "bad" not in namespace: # A bad constraint ... raise Exception("Oops, not bad enough") ... value = len(name) # To ensure this metaclass is called again ... def really_bad(self): ... return self.bad() * value ... namespace["really_bad"] = really_bad ... return super(BadMeta, mcls).__new__(mcls, name, bases, namespace) ... >>> class Bady(meta(object, metaclass=BadMeta)): ... def bad(self): ... return "HUA " ... >>> class BadGuy(Bady): ... def bad(self): ... return "R" ... >>> issubclass(BadGuy, Bady) True >>> Bady().really_bad() # Here value = 4 'HUA HUA HUA HUA ' >>> BadGuy().really_bad() # Called metaclass ``__new__`` again, so value = 6 'RRRRRR'
[ "Allows", "unique", "syntax", "similar", "to", "Python", "3", "for", "working", "with", "metaclasses", "in", "both", "Python", "2", "and", "Python", "3", "." ]
python
train
user-cont/colin
colin/core/target.py
https://github.com/user-cont/colin/blob/00bb80e6e91522e15361935f813e8cf13d7e76dc/colin/core/target.py#L360-L366
def mount_point(self): """ ostree checkout -- real filesystem """ if self._mount_point is None: self._mount_point = os.path.join(self.tmpdir, "checkout") os.makedirs(self._mount_point) self._checkout() return self._mount_point
[ "def", "mount_point", "(", "self", ")", ":", "if", "self", ".", "_mount_point", "is", "None", ":", "self", ".", "_mount_point", "=", "os", ".", "path", ".", "join", "(", "self", ".", "tmpdir", ",", "\"checkout\"", ")", "os", ".", "makedirs", "(", "self", ".", "_mount_point", ")", "self", ".", "_checkout", "(", ")", "return", "self", ".", "_mount_point" ]
ostree checkout -- real filesystem
[ "ostree", "checkout", "--", "real", "filesystem" ]
python
train
mbedmicro/pyOCD
pyocd/coresight/cortex_m.py
https://github.com/mbedmicro/pyOCD/blob/41a174718a9739f3cbe785c2ba21cb7fd1310c6f/pyocd/coresight/cortex_m.py#L713-L792
def _perform_emulated_reset(self): """! @brief Emulate a software reset by writing registers. All core registers are written to reset values. This includes setting the initial PC and SP to values read from the vector table, which is assumed to be located at the based of the boot memory region. If the memory map does not provide a boot region, then the current value of the VTOR register is reused, as it should at least point to a valid vector table. The current value of DEMCR.VC_CORERESET determines whether the core will be resumed or left halted. Note that this reset method will not set DHCSR.S_RESET_ST or DFSR.VCATCH. """ # Halt the core before making changes. self.halt() bootMemory = self.memory_map.get_boot_memory() if bootMemory is None: # Reuse current VTOR value if we don't know the boot memory region. vectorBase = self.read32(self.VTOR) else: vectorBase = bootMemory.start # Read initial SP and PC. initialSp = self.read32(vectorBase) initialPc = self.read32(vectorBase + 4) # Init core registers. regList = ['r0', 'r1', 'r2', 'r3', 'r4', 'r5', 'r6', 'r7', 'r8', 'r9', 'r10', 'r11', 'r12', 'psp', 'msp', 'lr', 'pc', 'xpsr', 'cfbp'] valueList = [0] * 13 + \ [ 0, # PSP initialSp, # MSP 0xffffffff, # LR initialPc, # PC 0x01000000, # XPSR 0, # CFBP ] if self.has_fpu: regList += [('s%d' % n) for n in range(32)] + ['fpscr'] valueList += [0] * 33 self.write_core_registers_raw(regList, valueList) # "Reset" SCS registers. data = [ (self.ICSR_PENDSVCLR | self.ICSR_PENDSTCLR), # ICSR vectorBase, # VTOR (self.NVIC_AIRCR_VECTKEY | self.NVIC_AIRCR_VECTCLRACTIVE), # AIRCR 0, # SCR 0, # CCR 0, # SHPR1 0, # SHPR2 0, # SHPR3 0, # SHCSR 0, # CFSR ] self.write_memory_block32(self.ICSR, data) self.write32(self.CPACR, 0) if self.has_fpu: data = [ 0, # FPCCR 0, # FPCAR 0, # FPDSCR ] self.write_memory_block32(self.FPCCR, data) # "Reset" SysTick. self.write_memory_block32(self.SYSTICK_CSR, [0] * 3) # "Reset" NVIC registers. numregs = (self.read32(self.ICTR) & 0xf) + 1 self.write_memory_block32(self.NVIC_ICER0, [0xffffffff] * numregs) self.write_memory_block32(self.NVIC_ICPR0, [0xffffffff] * numregs) self.write_memory_block32(self.NVIC_IPR0, [0xffffffff] * (numregs * 8))
[ "def", "_perform_emulated_reset", "(", "self", ")", ":", "# Halt the core before making changes.", "self", ".", "halt", "(", ")", "bootMemory", "=", "self", ".", "memory_map", ".", "get_boot_memory", "(", ")", "if", "bootMemory", "is", "None", ":", "# Reuse current VTOR value if we don't know the boot memory region.", "vectorBase", "=", "self", ".", "read32", "(", "self", ".", "VTOR", ")", "else", ":", "vectorBase", "=", "bootMemory", ".", "start", "# Read initial SP and PC.", "initialSp", "=", "self", ".", "read32", "(", "vectorBase", ")", "initialPc", "=", "self", ".", "read32", "(", "vectorBase", "+", "4", ")", "# Init core registers.", "regList", "=", "[", "'r0'", ",", "'r1'", ",", "'r2'", ",", "'r3'", ",", "'r4'", ",", "'r5'", ",", "'r6'", ",", "'r7'", ",", "'r8'", ",", "'r9'", ",", "'r10'", ",", "'r11'", ",", "'r12'", ",", "'psp'", ",", "'msp'", ",", "'lr'", ",", "'pc'", ",", "'xpsr'", ",", "'cfbp'", "]", "valueList", "=", "[", "0", "]", "*", "13", "+", "[", "0", ",", "# PSP", "initialSp", ",", "# MSP", "0xffffffff", ",", "# LR", "initialPc", ",", "# PC", "0x01000000", ",", "# XPSR", "0", ",", "# CFBP", "]", "if", "self", ".", "has_fpu", ":", "regList", "+=", "[", "(", "'s%d'", "%", "n", ")", "for", "n", "in", "range", "(", "32", ")", "]", "+", "[", "'fpscr'", "]", "valueList", "+=", "[", "0", "]", "*", "33", "self", ".", "write_core_registers_raw", "(", "regList", ",", "valueList", ")", "# \"Reset\" SCS registers.", "data", "=", "[", "(", "self", ".", "ICSR_PENDSVCLR", "|", "self", ".", "ICSR_PENDSTCLR", ")", ",", "# ICSR", "vectorBase", ",", "# VTOR", "(", "self", ".", "NVIC_AIRCR_VECTKEY", "|", "self", ".", "NVIC_AIRCR_VECTCLRACTIVE", ")", ",", "# AIRCR", "0", ",", "# SCR", "0", ",", "# CCR", "0", ",", "# SHPR1", "0", ",", "# SHPR2", "0", ",", "# SHPR3", "0", ",", "# SHCSR", "0", ",", "# CFSR", "]", "self", ".", "write_memory_block32", "(", "self", ".", "ICSR", ",", "data", ")", "self", ".", "write32", "(", "self", ".", "CPACR", ",", "0", ")", "if", "self", ".", "has_fpu", ":", "data", "=", "[", "0", ",", "# FPCCR", "0", ",", "# FPCAR", "0", ",", "# FPDSCR", "]", "self", ".", "write_memory_block32", "(", "self", ".", "FPCCR", ",", "data", ")", "# \"Reset\" SysTick.", "self", ".", "write_memory_block32", "(", "self", ".", "SYSTICK_CSR", ",", "[", "0", "]", "*", "3", ")", "# \"Reset\" NVIC registers.", "numregs", "=", "(", "self", ".", "read32", "(", "self", ".", "ICTR", ")", "&", "0xf", ")", "+", "1", "self", ".", "write_memory_block32", "(", "self", ".", "NVIC_ICER0", ",", "[", "0xffffffff", "]", "*", "numregs", ")", "self", ".", "write_memory_block32", "(", "self", ".", "NVIC_ICPR0", ",", "[", "0xffffffff", "]", "*", "numregs", ")", "self", ".", "write_memory_block32", "(", "self", ".", "NVIC_IPR0", ",", "[", "0xffffffff", "]", "*", "(", "numregs", "*", "8", ")", ")" ]
! @brief Emulate a software reset by writing registers. All core registers are written to reset values. This includes setting the initial PC and SP to values read from the vector table, which is assumed to be located at the based of the boot memory region. If the memory map does not provide a boot region, then the current value of the VTOR register is reused, as it should at least point to a valid vector table. The current value of DEMCR.VC_CORERESET determines whether the core will be resumed or left halted. Note that this reset method will not set DHCSR.S_RESET_ST or DFSR.VCATCH.
[ "!" ]
python
train
Azure/blobxfer
blobxfer/util.py
https://github.com/Azure/blobxfer/blob/3eccbe7530cc6a20ab2d30f9e034b6f021817f34/blobxfer/util.py#L340-L364
def parse_fileshare_or_file_snapshot_parameter(url): # type: (str) -> Tuple[str, str] """Checks if the fileshare or file is a snapshot :param url str: file url :rtype: tuple :return: (url, snapshot) """ if is_not_empty(url): if '?sharesnapshot=' in url: try: tmp = url.split('?sharesnapshot=') if len(tmp) == 2: dateutil.parser.parse(tmp[1]) return tmp[0], tmp[1] except (ValueError, OverflowError): pass elif '?snapshot=' in url: try: tmp = url.split('?snapshot=') if len(tmp) == 2: dateutil.parser.parse(tmp[1]) return tmp[0], tmp[1] except (ValueError, OverflowError): pass return url, None
[ "def", "parse_fileshare_or_file_snapshot_parameter", "(", "url", ")", ":", "# type: (str) -> Tuple[str, str]", "if", "is_not_empty", "(", "url", ")", ":", "if", "'?sharesnapshot='", "in", "url", ":", "try", ":", "tmp", "=", "url", ".", "split", "(", "'?sharesnapshot='", ")", "if", "len", "(", "tmp", ")", "==", "2", ":", "dateutil", ".", "parser", ".", "parse", "(", "tmp", "[", "1", "]", ")", "return", "tmp", "[", "0", "]", ",", "tmp", "[", "1", "]", "except", "(", "ValueError", ",", "OverflowError", ")", ":", "pass", "elif", "'?snapshot='", "in", "url", ":", "try", ":", "tmp", "=", "url", ".", "split", "(", "'?snapshot='", ")", "if", "len", "(", "tmp", ")", "==", "2", ":", "dateutil", ".", "parser", ".", "parse", "(", "tmp", "[", "1", "]", ")", "return", "tmp", "[", "0", "]", ",", "tmp", "[", "1", "]", "except", "(", "ValueError", ",", "OverflowError", ")", ":", "pass", "return", "url", ",", "None" ]
Checks if the fileshare or file is a snapshot :param url str: file url :rtype: tuple :return: (url, snapshot)
[ "Checks", "if", "the", "fileshare", "or", "file", "is", "a", "snapshot", ":", "param", "url", "str", ":", "file", "url", ":", "rtype", ":", "tuple", ":", "return", ":", "(", "url", "snapshot", ")" ]
python
train
fmfn/BayesianOptimization
bayes_opt/target_space.py
https://github.com/fmfn/BayesianOptimization/blob/8ce2292895137477963cf1bafa4e71fa20b2ce49/bayes_opt/target_space.py#L221-L232
def max(self): """Get maximum target value found and corresponding parametes.""" try: res = { 'target': self.target.max(), 'params': dict( zip(self.keys, self.params[self.target.argmax()]) ) } except ValueError: res = {} return res
[ "def", "max", "(", "self", ")", ":", "try", ":", "res", "=", "{", "'target'", ":", "self", ".", "target", ".", "max", "(", ")", ",", "'params'", ":", "dict", "(", "zip", "(", "self", ".", "keys", ",", "self", ".", "params", "[", "self", ".", "target", ".", "argmax", "(", ")", "]", ")", ")", "}", "except", "ValueError", ":", "res", "=", "{", "}", "return", "res" ]
Get maximum target value found and corresponding parametes.
[ "Get", "maximum", "target", "value", "found", "and", "corresponding", "parametes", "." ]
python
train
xtuml/pyxtuml
bridgepoint/oal.py
https://github.com/xtuml/pyxtuml/blob/7dd9343b9a0191d1db1887ab9288d0a026608d9a/bridgepoint/oal.py#L1507-L1512
def p_unrelate_statement_1(self, p): '''statement : UNRELATE instance_name FROM instance_name ACROSS rel_id''' p[0] = UnrelateNode(from_variable_name=p[2], to_variable_name=p[4], rel_id=p[6], phrase=None)
[ "def", "p_unrelate_statement_1", "(", "self", ",", "p", ")", ":", "p", "[", "0", "]", "=", "UnrelateNode", "(", "from_variable_name", "=", "p", "[", "2", "]", ",", "to_variable_name", "=", "p", "[", "4", "]", ",", "rel_id", "=", "p", "[", "6", "]", ",", "phrase", "=", "None", ")" ]
statement : UNRELATE instance_name FROM instance_name ACROSS rel_id
[ "statement", ":", "UNRELATE", "instance_name", "FROM", "instance_name", "ACROSS", "rel_id" ]
python
test
SatelliteQE/nailgun
nailgun/entities.py
https://github.com/SatelliteQE/nailgun/blob/c36d8c20862e87bf6975bd48ac1ca40a9e634eaa/nailgun/entities.py#L5220-L5227
def create_payload(self): """Remove ``smart_class_parameter_id`` or ``smart_variable_id``""" payload = super(OverrideValue, self).create_payload() if hasattr(self, 'smart_class_parameter'): del payload['smart_class_parameter_id'] if hasattr(self, 'smart_variable'): del payload['smart_variable_id'] return payload
[ "def", "create_payload", "(", "self", ")", ":", "payload", "=", "super", "(", "OverrideValue", ",", "self", ")", ".", "create_payload", "(", ")", "if", "hasattr", "(", "self", ",", "'smart_class_parameter'", ")", ":", "del", "payload", "[", "'smart_class_parameter_id'", "]", "if", "hasattr", "(", "self", ",", "'smart_variable'", ")", ":", "del", "payload", "[", "'smart_variable_id'", "]", "return", "payload" ]
Remove ``smart_class_parameter_id`` or ``smart_variable_id``
[ "Remove", "smart_class_parameter_id", "or", "smart_variable_id" ]
python
train
marccarre/py_sak
py_sak/validation.py
https://github.com/marccarre/py_sak/blob/8ad4cafbd725d2700a31b50526804c0330d828dd/py_sak/validation.py#L47-L49
def is_valid_dir(path): ''' Returns True if provided directory exists and is a directory, or False otherwise. ''' return os.path.exists(path) and os.path.isdir(path)
[ "def", "is_valid_dir", "(", "path", ")", ":", "return", "os", ".", "path", ".", "exists", "(", "path", ")", "and", "os", ".", "path", ".", "isdir", "(", "path", ")" ]
Returns True if provided directory exists and is a directory, or False otherwise.
[ "Returns", "True", "if", "provided", "directory", "exists", "and", "is", "a", "directory", "or", "False", "otherwise", "." ]
python
train
d0c-s4vage/pfp
pfp/fields.py
https://github.com/d0c-s4vage/pfp/blob/32f2d34fdec1c70019fa83c7006d5e3be0f92fcd/pfp/fields.py#L627-L640
def _pfp__set_value(self, value): """Initialize the struct. Value should be an array of fields, one each for each struct member. :value: An array of fields to initialize the struct with :returns: None """ if self._pfp__frozen: raise errors.UnmodifiableConst() if len(value) != len(self._pfp__children): raise errors.PfpError("struct initialization has wrong number of members") for x in six.moves.range(len(self._pfp__children)): self._pfp__children[x]._pfp__set_value(value[x])
[ "def", "_pfp__set_value", "(", "self", ",", "value", ")", ":", "if", "self", ".", "_pfp__frozen", ":", "raise", "errors", ".", "UnmodifiableConst", "(", ")", "if", "len", "(", "value", ")", "!=", "len", "(", "self", ".", "_pfp__children", ")", ":", "raise", "errors", ".", "PfpError", "(", "\"struct initialization has wrong number of members\"", ")", "for", "x", "in", "six", ".", "moves", ".", "range", "(", "len", "(", "self", ".", "_pfp__children", ")", ")", ":", "self", ".", "_pfp__children", "[", "x", "]", ".", "_pfp__set_value", "(", "value", "[", "x", "]", ")" ]
Initialize the struct. Value should be an array of fields, one each for each struct member. :value: An array of fields to initialize the struct with :returns: None
[ "Initialize", "the", "struct", ".", "Value", "should", "be", "an", "array", "of", "fields", "one", "each", "for", "each", "struct", "member", "." ]
python
train
saltstack/salt
salt/utils/state.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/state.py#L27-L77
def search_onfail_requisites(sid, highstate): ''' For a particular low chunk, search relevant onfail related states ''' onfails = [] if '_|-' in sid: st = salt.state.split_low_tag(sid) else: st = {'__id__': sid} for fstate, fchunks in six.iteritems(highstate): if fstate == st['__id__']: continue else: for mod_, fchunk in six.iteritems(fchunks): if ( not isinstance(mod_, six.string_types) or mod_.startswith('__') ): continue else: if not isinstance(fchunk, list): continue else: # bydefault onfail will fail, but you can # set onfail_stop: False to prevent the highstate # to stop if you handle it onfail_handled = False for fdata in fchunk: if not isinstance(fdata, dict): continue onfail_handled = (fdata.get('onfail_stop', True) is False) if onfail_handled: break if not onfail_handled: continue for fdata in fchunk: if not isinstance(fdata, dict): continue for knob, fvalue in six.iteritems(fdata): if knob != 'onfail': continue for freqs in fvalue: for fmod, fid in six.iteritems(freqs): if not ( fid == st['__id__'] and fmod == st.get('state', fmod) ): continue onfails.append((fstate, mod_, fchunk)) return onfails
[ "def", "search_onfail_requisites", "(", "sid", ",", "highstate", ")", ":", "onfails", "=", "[", "]", "if", "'_|-'", "in", "sid", ":", "st", "=", "salt", ".", "state", ".", "split_low_tag", "(", "sid", ")", "else", ":", "st", "=", "{", "'__id__'", ":", "sid", "}", "for", "fstate", ",", "fchunks", "in", "six", ".", "iteritems", "(", "highstate", ")", ":", "if", "fstate", "==", "st", "[", "'__id__'", "]", ":", "continue", "else", ":", "for", "mod_", ",", "fchunk", "in", "six", ".", "iteritems", "(", "fchunks", ")", ":", "if", "(", "not", "isinstance", "(", "mod_", ",", "six", ".", "string_types", ")", "or", "mod_", ".", "startswith", "(", "'__'", ")", ")", ":", "continue", "else", ":", "if", "not", "isinstance", "(", "fchunk", ",", "list", ")", ":", "continue", "else", ":", "# bydefault onfail will fail, but you can", "# set onfail_stop: False to prevent the highstate", "# to stop if you handle it", "onfail_handled", "=", "False", "for", "fdata", "in", "fchunk", ":", "if", "not", "isinstance", "(", "fdata", ",", "dict", ")", ":", "continue", "onfail_handled", "=", "(", "fdata", ".", "get", "(", "'onfail_stop'", ",", "True", ")", "is", "False", ")", "if", "onfail_handled", ":", "break", "if", "not", "onfail_handled", ":", "continue", "for", "fdata", "in", "fchunk", ":", "if", "not", "isinstance", "(", "fdata", ",", "dict", ")", ":", "continue", "for", "knob", ",", "fvalue", "in", "six", ".", "iteritems", "(", "fdata", ")", ":", "if", "knob", "!=", "'onfail'", ":", "continue", "for", "freqs", "in", "fvalue", ":", "for", "fmod", ",", "fid", "in", "six", ".", "iteritems", "(", "freqs", ")", ":", "if", "not", "(", "fid", "==", "st", "[", "'__id__'", "]", "and", "fmod", "==", "st", ".", "get", "(", "'state'", ",", "fmod", ")", ")", ":", "continue", "onfails", ".", "append", "(", "(", "fstate", ",", "mod_", ",", "fchunk", ")", ")", "return", "onfails" ]
For a particular low chunk, search relevant onfail related states
[ "For", "a", "particular", "low", "chunk", "search", "relevant", "onfail", "related", "states" ]
python
train
goerz/clusterjob
clusterjob/backends/sge.py
https://github.com/goerz/clusterjob/blob/361760d1a6dd3cbde49c5c2158a3acd0c314a749/clusterjob/backends/sge.py#L119-L155
def resource_headers(self, jobscript): """Given a :class:`~clusterjob.JobScript` instance, return a list of lines that encode the resource requirements, to be added at the top of the rendered job script """ lines = [] for (key, val) in jobscript.resources.items(): if key in self.resource_replacements: pbs_key = self.resource_replacements[key] if key == 'mem': val = str(val) + "m" else: pbs_key = key if key in ['nodes', 'threads', 'ppn']: raise ResourcesNotSupportedError("The SGE scheduling system " "uses 'parallel environments' to request resources " "for parallelization. SgeBackend should be subclassed " "for a specific cluster configuration in order to " "encode 'nodes', 'threads', and 'ppn'.") if key in ['-cwd', 'cwd']: continue if val is None: continue if type(val) is bool: if val: if not pbs_key.startswith('-'): pbs_key = '-' + pbs_key lines.append("%s %s" % (self.prefix, pbs_key)) else: if not pbs_key.startswith('-'): pbs_key = '-l %s=' % pbs_key if pbs_key.endswith('='): lines.append('%s %s%s' % (self.prefix, pbs_key, str(val))) else: lines.append('%s %s %s' % (self.prefix, pbs_key, str(val))) lines.append("%s -cwd" % self.prefix) return lines
[ "def", "resource_headers", "(", "self", ",", "jobscript", ")", ":", "lines", "=", "[", "]", "for", "(", "key", ",", "val", ")", "in", "jobscript", ".", "resources", ".", "items", "(", ")", ":", "if", "key", "in", "self", ".", "resource_replacements", ":", "pbs_key", "=", "self", ".", "resource_replacements", "[", "key", "]", "if", "key", "==", "'mem'", ":", "val", "=", "str", "(", "val", ")", "+", "\"m\"", "else", ":", "pbs_key", "=", "key", "if", "key", "in", "[", "'nodes'", ",", "'threads'", ",", "'ppn'", "]", ":", "raise", "ResourcesNotSupportedError", "(", "\"The SGE scheduling system \"", "\"uses 'parallel environments' to request resources \"", "\"for parallelization. SgeBackend should be subclassed \"", "\"for a specific cluster configuration in order to \"", "\"encode 'nodes', 'threads', and 'ppn'.\"", ")", "if", "key", "in", "[", "'-cwd'", ",", "'cwd'", "]", ":", "continue", "if", "val", "is", "None", ":", "continue", "if", "type", "(", "val", ")", "is", "bool", ":", "if", "val", ":", "if", "not", "pbs_key", ".", "startswith", "(", "'-'", ")", ":", "pbs_key", "=", "'-'", "+", "pbs_key", "lines", ".", "append", "(", "\"%s %s\"", "%", "(", "self", ".", "prefix", ",", "pbs_key", ")", ")", "else", ":", "if", "not", "pbs_key", ".", "startswith", "(", "'-'", ")", ":", "pbs_key", "=", "'-l %s='", "%", "pbs_key", "if", "pbs_key", ".", "endswith", "(", "'='", ")", ":", "lines", ".", "append", "(", "'%s %s%s'", "%", "(", "self", ".", "prefix", ",", "pbs_key", ",", "str", "(", "val", ")", ")", ")", "else", ":", "lines", ".", "append", "(", "'%s %s %s'", "%", "(", "self", ".", "prefix", ",", "pbs_key", ",", "str", "(", "val", ")", ")", ")", "lines", ".", "append", "(", "\"%s -cwd\"", "%", "self", ".", "prefix", ")", "return", "lines" ]
Given a :class:`~clusterjob.JobScript` instance, return a list of lines that encode the resource requirements, to be added at the top of the rendered job script
[ "Given", "a", ":", "class", ":", "~clusterjob", ".", "JobScript", "instance", "return", "a", "list", "of", "lines", "that", "encode", "the", "resource", "requirements", "to", "be", "added", "at", "the", "top", "of", "the", "rendered", "job", "script" ]
python
train
awslabs/serverless-application-model
samtranslator/model/sam_resources.py
https://github.com/awslabs/serverless-application-model/blob/cccb0c96b5c91e53355ebc07e542467303a5eedd/samtranslator/model/sam_resources.py#L690-L704
def _get_retention_policy_value(self): """ Sets the deletion policy on this resource. The default is 'Retain'. :return: value for the DeletionPolicy attribute. """ if self.RetentionPolicy is None or self.RetentionPolicy.lower() == self.RETAIN.lower(): return self.RETAIN elif self.RetentionPolicy.lower() == self.DELETE.lower(): return self.DELETE elif self.RetentionPolicy.lower() not in self.retention_policy_options: raise InvalidResourceException(self.logical_id, "'{}' must be one of the following options: {}." .format('RetentionPolicy', [self.RETAIN, self.DELETE]))
[ "def", "_get_retention_policy_value", "(", "self", ")", ":", "if", "self", ".", "RetentionPolicy", "is", "None", "or", "self", ".", "RetentionPolicy", ".", "lower", "(", ")", "==", "self", ".", "RETAIN", ".", "lower", "(", ")", ":", "return", "self", ".", "RETAIN", "elif", "self", ".", "RetentionPolicy", ".", "lower", "(", ")", "==", "self", ".", "DELETE", ".", "lower", "(", ")", ":", "return", "self", ".", "DELETE", "elif", "self", ".", "RetentionPolicy", ".", "lower", "(", ")", "not", "in", "self", ".", "retention_policy_options", ":", "raise", "InvalidResourceException", "(", "self", ".", "logical_id", ",", "\"'{}' must be one of the following options: {}.\"", ".", "format", "(", "'RetentionPolicy'", ",", "[", "self", ".", "RETAIN", ",", "self", ".", "DELETE", "]", ")", ")" ]
Sets the deletion policy on this resource. The default is 'Retain'. :return: value for the DeletionPolicy attribute.
[ "Sets", "the", "deletion", "policy", "on", "this", "resource", ".", "The", "default", "is", "Retain", "." ]
python
train
andymccurdy/redis-py
redis/client.py
https://github.com/andymccurdy/redis-py/blob/cdfe2befbe00db4a3c48c9ddd6d64dea15f6f0db/redis/client.py#L1019-L1031
def memory_usage(self, key, samples=None): """ Return the total memory usage for key, its value and associated administrative overheads. For nested data structures, ``samples`` is the number of elements to sample. If left unspecified, the server's default is 5. Use 0 to sample all elements. """ args = [] if isinstance(samples, int): args.extend([Token.get_token('SAMPLES'), samples]) return self.execute_command('MEMORY USAGE', key, *args)
[ "def", "memory_usage", "(", "self", ",", "key", ",", "samples", "=", "None", ")", ":", "args", "=", "[", "]", "if", "isinstance", "(", "samples", ",", "int", ")", ":", "args", ".", "extend", "(", "[", "Token", ".", "get_token", "(", "'SAMPLES'", ")", ",", "samples", "]", ")", "return", "self", ".", "execute_command", "(", "'MEMORY USAGE'", ",", "key", ",", "*", "args", ")" ]
Return the total memory usage for key, its value and associated administrative overheads. For nested data structures, ``samples`` is the number of elements to sample. If left unspecified, the server's default is 5. Use 0 to sample all elements.
[ "Return", "the", "total", "memory", "usage", "for", "key", "its", "value", "and", "associated", "administrative", "overheads", "." ]
python
train
dereneaton/ipyrad
ipyrad/core/assembly.py
https://github.com/dereneaton/ipyrad/blob/5eeb8a178160f45faf71bf47cec4abe998a575d1/ipyrad/core/assembly.py#L560-L620
def _link_barcodes(self): """ Private function. Links Sample barcodes in a dictionary as [Assembly].barcodes, with barcodes parsed from the 'barcodes_path' parameter. This function is called during set_params() when setting the barcodes_path. """ ## parse barcodefile try: ## allows fuzzy match to barcodefile name barcodefile = glob.glob(self.paramsdict["barcodes_path"])[0] ## read in the file bdf = pd.read_csv(barcodefile, header=None, delim_whitespace=1, dtype=str) bdf = bdf.dropna() ## make sure bars are upper case bdf[1] = bdf[1].str.upper() ## if replicates are present then print a warning reps = bdf[0].unique().shape[0] != bdf[0].shape[0] if reps: print("{spacer}Warning: technical replicates (same name) will be combined."\ .format(**{'spacer': self._spacer})) ## add -technical-replicate-N to replicate names reps = [i for i in bdf[0] if list(bdf[0]).count(i) > 1] ureps = list(set(reps)) for name in ureps: idxs = bdf[bdf[0] == ureps[0]].index.tolist() for num, idx in enumerate(idxs): bdf.ix[idx][0] = bdf.ix[idx][0] + "-technical-replicate-" + str(num+1) ## make sure chars are all proper if not all(bdf[1].apply(set("RKSYWMCATG").issuperset)): LOGGER.warn(BAD_BARCODE) raise IPyradError(BAD_BARCODE) ## 3rad/seqcap use multiplexed barcodes ## We'll concatenate them with a plus and split them later if "3rad" in self.paramsdict["datatype"]: try: bdf[2] = bdf[2].str.upper() self.barcodes = dict(zip(bdf[0], bdf[1] + "+" + bdf[2])) except KeyError as inst: msg = " 3rad assumes multiplexed barcodes. Doublecheck your barcodes file." LOGGER.error(msg) raise IPyradError(msg) else: ## set attribute on Assembly object self.barcodes = dict(zip(bdf[0], bdf[1])) except (IOError, IndexError): raise IPyradWarningExit(\ " Barcodes file not found. You entered: {}"\ .format(self.paramsdict["barcodes_path"])) except ValueError as inst: msg = " Barcodes file format error." LOGGER.warn(msg) raise IPyradError(inst)
[ "def", "_link_barcodes", "(", "self", ")", ":", "## parse barcodefile", "try", ":", "## allows fuzzy match to barcodefile name", "barcodefile", "=", "glob", ".", "glob", "(", "self", ".", "paramsdict", "[", "\"barcodes_path\"", "]", ")", "[", "0", "]", "## read in the file", "bdf", "=", "pd", ".", "read_csv", "(", "barcodefile", ",", "header", "=", "None", ",", "delim_whitespace", "=", "1", ",", "dtype", "=", "str", ")", "bdf", "=", "bdf", ".", "dropna", "(", ")", "## make sure bars are upper case", "bdf", "[", "1", "]", "=", "bdf", "[", "1", "]", ".", "str", ".", "upper", "(", ")", "## if replicates are present then print a warning", "reps", "=", "bdf", "[", "0", "]", ".", "unique", "(", ")", ".", "shape", "[", "0", "]", "!=", "bdf", "[", "0", "]", ".", "shape", "[", "0", "]", "if", "reps", ":", "print", "(", "\"{spacer}Warning: technical replicates (same name) will be combined.\"", ".", "format", "(", "*", "*", "{", "'spacer'", ":", "self", ".", "_spacer", "}", ")", ")", "## add -technical-replicate-N to replicate names", "reps", "=", "[", "i", "for", "i", "in", "bdf", "[", "0", "]", "if", "list", "(", "bdf", "[", "0", "]", ")", ".", "count", "(", "i", ")", ">", "1", "]", "ureps", "=", "list", "(", "set", "(", "reps", ")", ")", "for", "name", "in", "ureps", ":", "idxs", "=", "bdf", "[", "bdf", "[", "0", "]", "==", "ureps", "[", "0", "]", "]", ".", "index", ".", "tolist", "(", ")", "for", "num", ",", "idx", "in", "enumerate", "(", "idxs", ")", ":", "bdf", ".", "ix", "[", "idx", "]", "[", "0", "]", "=", "bdf", ".", "ix", "[", "idx", "]", "[", "0", "]", "+", "\"-technical-replicate-\"", "+", "str", "(", "num", "+", "1", ")", "## make sure chars are all proper", "if", "not", "all", "(", "bdf", "[", "1", "]", ".", "apply", "(", "set", "(", "\"RKSYWMCATG\"", ")", ".", "issuperset", ")", ")", ":", "LOGGER", ".", "warn", "(", "BAD_BARCODE", ")", "raise", "IPyradError", "(", "BAD_BARCODE", ")", "## 3rad/seqcap use multiplexed barcodes", "## We'll concatenate them with a plus and split them later", "if", "\"3rad\"", "in", "self", ".", "paramsdict", "[", "\"datatype\"", "]", ":", "try", ":", "bdf", "[", "2", "]", "=", "bdf", "[", "2", "]", ".", "str", ".", "upper", "(", ")", "self", ".", "barcodes", "=", "dict", "(", "zip", "(", "bdf", "[", "0", "]", ",", "bdf", "[", "1", "]", "+", "\"+\"", "+", "bdf", "[", "2", "]", ")", ")", "except", "KeyError", "as", "inst", ":", "msg", "=", "\" 3rad assumes multiplexed barcodes. Doublecheck your barcodes file.\"", "LOGGER", ".", "error", "(", "msg", ")", "raise", "IPyradError", "(", "msg", ")", "else", ":", "## set attribute on Assembly object", "self", ".", "barcodes", "=", "dict", "(", "zip", "(", "bdf", "[", "0", "]", ",", "bdf", "[", "1", "]", ")", ")", "except", "(", "IOError", ",", "IndexError", ")", ":", "raise", "IPyradWarningExit", "(", "\" Barcodes file not found. You entered: {}\"", ".", "format", "(", "self", ".", "paramsdict", "[", "\"barcodes_path\"", "]", ")", ")", "except", "ValueError", "as", "inst", ":", "msg", "=", "\" Barcodes file format error.\"", "LOGGER", ".", "warn", "(", "msg", ")", "raise", "IPyradError", "(", "inst", ")" ]
Private function. Links Sample barcodes in a dictionary as [Assembly].barcodes, with barcodes parsed from the 'barcodes_path' parameter. This function is called during set_params() when setting the barcodes_path.
[ "Private", "function", ".", "Links", "Sample", "barcodes", "in", "a", "dictionary", "as", "[", "Assembly", "]", ".", "barcodes", "with", "barcodes", "parsed", "from", "the", "barcodes_path", "parameter", ".", "This", "function", "is", "called", "during", "set_params", "()", "when", "setting", "the", "barcodes_path", "." ]
python
valid
noahbenson/neuropythy
neuropythy/mri/images.py
https://github.com/noahbenson/neuropythy/blob/b588889f6db36ddb9602ae4a72c1c0d3f41586b2/neuropythy/mri/images.py#L54-L64
def parse_type(self, hdat, dataobj=None): ''' Parses the dtype out of the header data or the array, depending on which is given; if both, then the header-data overrides the array; if neither, then np.float32. ''' try: dataobj = dataobj.dataobj except Exception: pass dtype = np.asarray(dataobj).dtype if dataobj else self.default_type() if hdat and 'type' in hdat: dtype = np.dtype(hdat['type']) elif hdat and 'dtype' in hdat: dtype = np.dtype(hdat['dtype']) return dtype
[ "def", "parse_type", "(", "self", ",", "hdat", ",", "dataobj", "=", "None", ")", ":", "try", ":", "dataobj", "=", "dataobj", ".", "dataobj", "except", "Exception", ":", "pass", "dtype", "=", "np", ".", "asarray", "(", "dataobj", ")", ".", "dtype", "if", "dataobj", "else", "self", ".", "default_type", "(", ")", "if", "hdat", "and", "'type'", "in", "hdat", ":", "dtype", "=", "np", ".", "dtype", "(", "hdat", "[", "'type'", "]", ")", "elif", "hdat", "and", "'dtype'", "in", "hdat", ":", "dtype", "=", "np", ".", "dtype", "(", "hdat", "[", "'dtype'", "]", ")", "return", "dtype" ]
Parses the dtype out of the header data or the array, depending on which is given; if both, then the header-data overrides the array; if neither, then np.float32.
[ "Parses", "the", "dtype", "out", "of", "the", "header", "data", "or", "the", "array", "depending", "on", "which", "is", "given", ";", "if", "both", "then", "the", "header", "-", "data", "overrides", "the", "array", ";", "if", "neither", "then", "np", ".", "float32", "." ]
python
train
Microsoft/LightGBM
python-package/lightgbm/plotting.py
https://github.com/Microsoft/LightGBM/blob/8d2ec69f4f685b0ab1c4624d59ee2d3287bb3147/python-package/lightgbm/plotting.py#L391-L456
def plot_tree(booster, ax=None, tree_index=0, figsize=None, old_graph_attr=None, old_node_attr=None, old_edge_attr=None, show_info=None, precision=None, **kwargs): """Plot specified tree. Note ---- It is preferable to use ``create_tree_digraph()`` because of its lossless quality and returned objects can be also rendered and displayed directly inside a Jupyter notebook. Parameters ---------- booster : Booster or LGBMModel Booster or LGBMModel instance to be plotted. ax : matplotlib.axes.Axes or None, optional (default=None) Target axes instance. If None, new figure and axes will be created. tree_index : int, optional (default=0) The index of a target tree to plot. figsize : tuple of 2 elements or None, optional (default=None) Figure size. show_info : list of strings or None, optional (default=None) What information should be shown in nodes. Possible values of list items: 'split_gain', 'internal_value', 'internal_count', 'leaf_count'. precision : int or None, optional (default=None) Used to restrict the display of floating point values to a certain precision. **kwargs Other parameters passed to ``Digraph`` constructor. Check https://graphviz.readthedocs.io/en/stable/api.html#digraph for the full list of supported parameters. Returns ------- ax : matplotlib.axes.Axes The plot with single tree. """ if MATPLOTLIB_INSTALLED: import matplotlib.pyplot as plt import matplotlib.image as image else: raise ImportError('You must install matplotlib to plot tree.') for param_name in ['old_graph_attr', 'old_node_attr', 'old_edge_attr']: param = locals().get(param_name) if param is not None: warnings.warn('{0} parameter is deprecated and will be removed in 2.4 version.\n' 'Please use **kwargs to pass {1} parameter.'.format(param_name, param_name[4:]), LGBMDeprecationWarning) if param_name[4:] not in kwargs: kwargs[param_name[4:]] = param if ax is None: if figsize is not None: _check_not_tuple_of_2_elements(figsize, 'figsize') _, ax = plt.subplots(1, 1, figsize=figsize) graph = create_tree_digraph(booster=booster, tree_index=tree_index, show_info=show_info, precision=precision, **kwargs) s = BytesIO() s.write(graph.pipe(format='png')) s.seek(0) img = image.imread(s) ax.imshow(img) ax.axis('off') return ax
[ "def", "plot_tree", "(", "booster", ",", "ax", "=", "None", ",", "tree_index", "=", "0", ",", "figsize", "=", "None", ",", "old_graph_attr", "=", "None", ",", "old_node_attr", "=", "None", ",", "old_edge_attr", "=", "None", ",", "show_info", "=", "None", ",", "precision", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "MATPLOTLIB_INSTALLED", ":", "import", "matplotlib", ".", "pyplot", "as", "plt", "import", "matplotlib", ".", "image", "as", "image", "else", ":", "raise", "ImportError", "(", "'You must install matplotlib to plot tree.'", ")", "for", "param_name", "in", "[", "'old_graph_attr'", ",", "'old_node_attr'", ",", "'old_edge_attr'", "]", ":", "param", "=", "locals", "(", ")", ".", "get", "(", "param_name", ")", "if", "param", "is", "not", "None", ":", "warnings", ".", "warn", "(", "'{0} parameter is deprecated and will be removed in 2.4 version.\\n'", "'Please use **kwargs to pass {1} parameter.'", ".", "format", "(", "param_name", ",", "param_name", "[", "4", ":", "]", ")", ",", "LGBMDeprecationWarning", ")", "if", "param_name", "[", "4", ":", "]", "not", "in", "kwargs", ":", "kwargs", "[", "param_name", "[", "4", ":", "]", "]", "=", "param", "if", "ax", "is", "None", ":", "if", "figsize", "is", "not", "None", ":", "_check_not_tuple_of_2_elements", "(", "figsize", ",", "'figsize'", ")", "_", ",", "ax", "=", "plt", ".", "subplots", "(", "1", ",", "1", ",", "figsize", "=", "figsize", ")", "graph", "=", "create_tree_digraph", "(", "booster", "=", "booster", ",", "tree_index", "=", "tree_index", ",", "show_info", "=", "show_info", ",", "precision", "=", "precision", ",", "*", "*", "kwargs", ")", "s", "=", "BytesIO", "(", ")", "s", ".", "write", "(", "graph", ".", "pipe", "(", "format", "=", "'png'", ")", ")", "s", ".", "seek", "(", "0", ")", "img", "=", "image", ".", "imread", "(", "s", ")", "ax", ".", "imshow", "(", "img", ")", "ax", ".", "axis", "(", "'off'", ")", "return", "ax" ]
Plot specified tree. Note ---- It is preferable to use ``create_tree_digraph()`` because of its lossless quality and returned objects can be also rendered and displayed directly inside a Jupyter notebook. Parameters ---------- booster : Booster or LGBMModel Booster or LGBMModel instance to be plotted. ax : matplotlib.axes.Axes or None, optional (default=None) Target axes instance. If None, new figure and axes will be created. tree_index : int, optional (default=0) The index of a target tree to plot. figsize : tuple of 2 elements or None, optional (default=None) Figure size. show_info : list of strings or None, optional (default=None) What information should be shown in nodes. Possible values of list items: 'split_gain', 'internal_value', 'internal_count', 'leaf_count'. precision : int or None, optional (default=None) Used to restrict the display of floating point values to a certain precision. **kwargs Other parameters passed to ``Digraph`` constructor. Check https://graphviz.readthedocs.io/en/stable/api.html#digraph for the full list of supported parameters. Returns ------- ax : matplotlib.axes.Axes The plot with single tree.
[ "Plot", "specified", "tree", "." ]
python
train
ihgazni2/elist
elist/elist.py
https://github.com/ihgazni2/elist/blob/8c07b5029bda34ead60ce10335ceb145f209263c/elist/elist.py#L6561-L6595
def getStr_to_pathlist(gs): ''' gs = "[1]['1'][2]" getStr_to_pathlist(gs) gs = "['u']['u1']" getStr_to_pathlist(gs) ''' def numize(w): try: int(w) except: try: float(w) except: return(w) else: return(float(w)) else: return(int(w)) def strip_quote(w): if(type(w) == type('')): if(w[0]==w[-1]): if((w[0]=="'") |(w[0]=='"')): return(w[1:-1]) else: return(w) else: return(w) else: return(w) gs = gs[1:-1] pl = gs.split("][") pl = array_map(pl,numize) pl = array_map(pl,strip_quote) return(pl)
[ "def", "getStr_to_pathlist", "(", "gs", ")", ":", "def", "numize", "(", "w", ")", ":", "try", ":", "int", "(", "w", ")", "except", ":", "try", ":", "float", "(", "w", ")", "except", ":", "return", "(", "w", ")", "else", ":", "return", "(", "float", "(", "w", ")", ")", "else", ":", "return", "(", "int", "(", "w", ")", ")", "def", "strip_quote", "(", "w", ")", ":", "if", "(", "type", "(", "w", ")", "==", "type", "(", "''", ")", ")", ":", "if", "(", "w", "[", "0", "]", "==", "w", "[", "-", "1", "]", ")", ":", "if", "(", "(", "w", "[", "0", "]", "==", "\"'\"", ")", "|", "(", "w", "[", "0", "]", "==", "'\"'", ")", ")", ":", "return", "(", "w", "[", "1", ":", "-", "1", "]", ")", "else", ":", "return", "(", "w", ")", "else", ":", "return", "(", "w", ")", "else", ":", "return", "(", "w", ")", "gs", "=", "gs", "[", "1", ":", "-", "1", "]", "pl", "=", "gs", ".", "split", "(", "\"][\"", ")", "pl", "=", "array_map", "(", "pl", ",", "numize", ")", "pl", "=", "array_map", "(", "pl", ",", "strip_quote", ")", "return", "(", "pl", ")" ]
gs = "[1]['1'][2]" getStr_to_pathlist(gs) gs = "['u']['u1']" getStr_to_pathlist(gs)
[ "gs", "=", "[", "1", "]", "[", "1", "]", "[", "2", "]", "getStr_to_pathlist", "(", "gs", ")", "gs", "=", "[", "u", "]", "[", "u1", "]", "getStr_to_pathlist", "(", "gs", ")" ]
python
valid
Kaggle/kaggle-api
kaggle/api/kaggle_api.py
https://github.com/Kaggle/kaggle-api/blob/65f14b1386470c5784d4753e491478e7537660d9/kaggle/api/kaggle_api.py#L2144-L2164
def kernel_pull(self, user_name, kernel_slug, **kwargs): # noqa: E501 """Pull the latest code from a kernel # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.kernel_pull(user_name, kernel_slug, async_req=True) >>> result = thread.get() :param async_req bool :param str user_name: Kernel owner (required) :param str kernel_slug: Kernel name (required) :return: Result If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.kernel_pull_with_http_info(user_name, kernel_slug, **kwargs) # noqa: E501 else: (data) = self.kernel_pull_with_http_info(user_name, kernel_slug, **kwargs) # noqa: E501 return data
[ "def", "kernel_pull", "(", "self", ",", "user_name", ",", "kernel_slug", ",", "*", "*", "kwargs", ")", ":", "# noqa: E501", "kwargs", "[", "'_return_http_data_only'", "]", "=", "True", "if", "kwargs", ".", "get", "(", "'async_req'", ")", ":", "return", "self", ".", "kernel_pull_with_http_info", "(", "user_name", ",", "kernel_slug", ",", "*", "*", "kwargs", ")", "# noqa: E501", "else", ":", "(", "data", ")", "=", "self", ".", "kernel_pull_with_http_info", "(", "user_name", ",", "kernel_slug", ",", "*", "*", "kwargs", ")", "# noqa: E501", "return", "data" ]
Pull the latest code from a kernel # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.kernel_pull(user_name, kernel_slug, async_req=True) >>> result = thread.get() :param async_req bool :param str user_name: Kernel owner (required) :param str kernel_slug: Kernel name (required) :return: Result If the method is called asynchronously, returns the request thread.
[ "Pull", "the", "latest", "code", "from", "a", "kernel", "#", "noqa", ":", "E501" ]
python
train
senaite/senaite.core
bika/lims/setuphandlers.py
https://github.com/senaite/senaite.core/blob/7602ce2ea2f9e81eb34e20ce17b98a3e70713f85/bika/lims/setuphandlers.py#L444-L493
def setup_core_catalogs(portal): """Setup core catalogs """ logger.info("*** Setup Core Catalogs ***") to_reindex = [] for catalog, name, attribute, meta_type in INDEXES: c = api.get_tool(catalog) indexes = c.indexes() if name in indexes: logger.info("*** Index '%s' already in Catalog [SKIP]" % name) continue logger.info("*** Adding Index '%s' for field '%s' to catalog ..." % (meta_type, name)) # do we still need ZCTextIndexes? if meta_type == "ZCTextIndex": addZCTextIndex(c, name) else: c.addIndex(name, meta_type) # get the new created index index = c._catalog.getIndex(name) # set the indexed attributes if hasattr(index, "indexed_attrs"): index.indexed_attrs = [attribute or name] to_reindex.append((c, name)) logger.info("*** Added Index '%s' for field '%s' to catalog [DONE]" % (meta_type, name)) # catalog columns for catalog, name in COLUMNS: c = api.get_tool(catalog) if name not in c.schema(): logger.info("*** Adding Column '%s' to catalog '%s' ..." % (name, catalog)) c.addColumn(name) logger.info("*** Added Column '%s' to catalog '%s' [DONE]" % (name, catalog)) else: logger.info("*** Column '%s' already in catalog '%s' [SKIP]" % (name, catalog)) continue for catalog, name in to_reindex: logger.info("*** Indexing new index '%s' ..." % name) catalog.manage_reindexIndex(name) logger.info("*** Indexing new index '%s' [DONE]" % name)
[ "def", "setup_core_catalogs", "(", "portal", ")", ":", "logger", ".", "info", "(", "\"*** Setup Core Catalogs ***\"", ")", "to_reindex", "=", "[", "]", "for", "catalog", ",", "name", ",", "attribute", ",", "meta_type", "in", "INDEXES", ":", "c", "=", "api", ".", "get_tool", "(", "catalog", ")", "indexes", "=", "c", ".", "indexes", "(", ")", "if", "name", "in", "indexes", ":", "logger", ".", "info", "(", "\"*** Index '%s' already in Catalog [SKIP]\"", "%", "name", ")", "continue", "logger", ".", "info", "(", "\"*** Adding Index '%s' for field '%s' to catalog ...\"", "%", "(", "meta_type", ",", "name", ")", ")", "# do we still need ZCTextIndexes?", "if", "meta_type", "==", "\"ZCTextIndex\"", ":", "addZCTextIndex", "(", "c", ",", "name", ")", "else", ":", "c", ".", "addIndex", "(", "name", ",", "meta_type", ")", "# get the new created index", "index", "=", "c", ".", "_catalog", ".", "getIndex", "(", "name", ")", "# set the indexed attributes", "if", "hasattr", "(", "index", ",", "\"indexed_attrs\"", ")", ":", "index", ".", "indexed_attrs", "=", "[", "attribute", "or", "name", "]", "to_reindex", ".", "append", "(", "(", "c", ",", "name", ")", ")", "logger", ".", "info", "(", "\"*** Added Index '%s' for field '%s' to catalog [DONE]\"", "%", "(", "meta_type", ",", "name", ")", ")", "# catalog columns", "for", "catalog", ",", "name", "in", "COLUMNS", ":", "c", "=", "api", ".", "get_tool", "(", "catalog", ")", "if", "name", "not", "in", "c", ".", "schema", "(", ")", ":", "logger", ".", "info", "(", "\"*** Adding Column '%s' to catalog '%s' ...\"", "%", "(", "name", ",", "catalog", ")", ")", "c", ".", "addColumn", "(", "name", ")", "logger", ".", "info", "(", "\"*** Added Column '%s' to catalog '%s' [DONE]\"", "%", "(", "name", ",", "catalog", ")", ")", "else", ":", "logger", ".", "info", "(", "\"*** Column '%s' already in catalog '%s' [SKIP]\"", "%", "(", "name", ",", "catalog", ")", ")", "continue", "for", "catalog", ",", "name", "in", "to_reindex", ":", "logger", ".", "info", "(", "\"*** Indexing new index '%s' ...\"", "%", "name", ")", "catalog", ".", "manage_reindexIndex", "(", "name", ")", "logger", ".", "info", "(", "\"*** Indexing new index '%s' [DONE]\"", "%", "name", ")" ]
Setup core catalogs
[ "Setup", "core", "catalogs" ]
python
train
arne-cl/discoursegraphs
src/discoursegraphs/util.py
https://github.com/arne-cl/discoursegraphs/blob/842f0068a3190be2c75905754521b176b25a54fb/src/discoursegraphs/util.py#L43-L67
def get_segment_token_offsets(segment_token_list, token_map): """ given a list of token node IDs, returns the index of its first and last elements. this actually calculates the int indices, as there are weird formats like RS3, which use unordered / wrongly ordered IDs. Parameters ---------- segment_token_list : list of str sorted list of token IDs (i.e. the tokens that this segment spans) token_map : dict of (str, int) a map from token IDs to token indices Returns ------- first_token_index : int index of the first token of the segment last_token_index : int index of the last token of the segment """ token_indices = [token_map[token_id] for token_id in segment_token_list] # we need to foolproof this for nasty RS3 files or other input formats # with unordered or wrongly orderd IDs return min(token_indices), max(token_indices)
[ "def", "get_segment_token_offsets", "(", "segment_token_list", ",", "token_map", ")", ":", "token_indices", "=", "[", "token_map", "[", "token_id", "]", "for", "token_id", "in", "segment_token_list", "]", "# we need to foolproof this for nasty RS3 files or other input formats", "# with unordered or wrongly orderd IDs", "return", "min", "(", "token_indices", ")", ",", "max", "(", "token_indices", ")" ]
given a list of token node IDs, returns the index of its first and last elements. this actually calculates the int indices, as there are weird formats like RS3, which use unordered / wrongly ordered IDs. Parameters ---------- segment_token_list : list of str sorted list of token IDs (i.e. the tokens that this segment spans) token_map : dict of (str, int) a map from token IDs to token indices Returns ------- first_token_index : int index of the first token of the segment last_token_index : int index of the last token of the segment
[ "given", "a", "list", "of", "token", "node", "IDs", "returns", "the", "index", "of", "its", "first", "and", "last", "elements", ".", "this", "actually", "calculates", "the", "int", "indices", "as", "there", "are", "weird", "formats", "like", "RS3", "which", "use", "unordered", "/", "wrongly", "ordered", "IDs", "." ]
python
train
vals/umis
umis/umis.py
https://github.com/vals/umis/blob/e8adb8486d9e9134ab8a6cad9811a7e74dcc4a2c/umis/umis.py#L1256-L1305
def demultiplex_samples(fastq, out_dir, nedit, barcodes): ''' Demultiplex a fastqtransformed FASTQ file into a FASTQ file for each sample. ''' annotations = detect_fastq_annotations(fastq) re_string = construct_transformed_regex(annotations) parser_re = re.compile(re_string) if barcodes: barcodes = set(barcode.strip() for barcode in barcodes) else: barcodes = set() if nedit == 0: filter_bc = partial(exact_sample_filter, barcodes=barcodes) else: barcodehash = MutationHash(barcodes, nedit) filter_bc = partial(correcting_sample_filter, barcodehash=barcodehash) sample_set = set() batch = collections.defaultdict(list) parsed = 0 safe_makedir(out_dir) for read in read_fastq(fastq): parsed += 1 read = filter_bc(read) if not read: continue match = parser_re.search(read).groupdict() sample = match['SB'] sample_set.add(sample) batch[sample].append(read) # write in batches to avoid opening up file handles repeatedly if not parsed % 10000000: for sample, reads in batch.items(): out_file = os.path.join(out_dir, sample + ".fq") with open(out_file, "a") as out_handle: for read in reads: fixed = filter_bc(read) if fixed: out_handle.write(fixed) batch = collections.defaultdict(list) for sample, reads in batch.items(): out_file = os.path.join(out_dir, sample + ".fq") with open(out_file, "a") as out_handle: for read in reads: fixed = filter_bc(read) if fixed: out_handle.write(read)
[ "def", "demultiplex_samples", "(", "fastq", ",", "out_dir", ",", "nedit", ",", "barcodes", ")", ":", "annotations", "=", "detect_fastq_annotations", "(", "fastq", ")", "re_string", "=", "construct_transformed_regex", "(", "annotations", ")", "parser_re", "=", "re", ".", "compile", "(", "re_string", ")", "if", "barcodes", ":", "barcodes", "=", "set", "(", "barcode", ".", "strip", "(", ")", "for", "barcode", "in", "barcodes", ")", "else", ":", "barcodes", "=", "set", "(", ")", "if", "nedit", "==", "0", ":", "filter_bc", "=", "partial", "(", "exact_sample_filter", ",", "barcodes", "=", "barcodes", ")", "else", ":", "barcodehash", "=", "MutationHash", "(", "barcodes", ",", "nedit", ")", "filter_bc", "=", "partial", "(", "correcting_sample_filter", ",", "barcodehash", "=", "barcodehash", ")", "sample_set", "=", "set", "(", ")", "batch", "=", "collections", ".", "defaultdict", "(", "list", ")", "parsed", "=", "0", "safe_makedir", "(", "out_dir", ")", "for", "read", "in", "read_fastq", "(", "fastq", ")", ":", "parsed", "+=", "1", "read", "=", "filter_bc", "(", "read", ")", "if", "not", "read", ":", "continue", "match", "=", "parser_re", ".", "search", "(", "read", ")", ".", "groupdict", "(", ")", "sample", "=", "match", "[", "'SB'", "]", "sample_set", ".", "add", "(", "sample", ")", "batch", "[", "sample", "]", ".", "append", "(", "read", ")", "# write in batches to avoid opening up file handles repeatedly", "if", "not", "parsed", "%", "10000000", ":", "for", "sample", ",", "reads", "in", "batch", ".", "items", "(", ")", ":", "out_file", "=", "os", ".", "path", ".", "join", "(", "out_dir", ",", "sample", "+", "\".fq\"", ")", "with", "open", "(", "out_file", ",", "\"a\"", ")", "as", "out_handle", ":", "for", "read", "in", "reads", ":", "fixed", "=", "filter_bc", "(", "read", ")", "if", "fixed", ":", "out_handle", ".", "write", "(", "fixed", ")", "batch", "=", "collections", ".", "defaultdict", "(", "list", ")", "for", "sample", ",", "reads", "in", "batch", ".", "items", "(", ")", ":", "out_file", "=", "os", ".", "path", ".", "join", "(", "out_dir", ",", "sample", "+", "\".fq\"", ")", "with", "open", "(", "out_file", ",", "\"a\"", ")", "as", "out_handle", ":", "for", "read", "in", "reads", ":", "fixed", "=", "filter_bc", "(", "read", ")", "if", "fixed", ":", "out_handle", ".", "write", "(", "read", ")" ]
Demultiplex a fastqtransformed FASTQ file into a FASTQ file for each sample.
[ "Demultiplex", "a", "fastqtransformed", "FASTQ", "file", "into", "a", "FASTQ", "file", "for", "each", "sample", "." ]
python
train
PyHDI/Pyverilog
pyverilog/vparser/parser.py
https://github.com/PyHDI/Pyverilog/blob/b852cc5ed6a7a2712e33639f9d9782d0d1587a53/pyverilog/vparser/parser.py#L1534-L1537
def p_if_statement_delay(self, p): 'if_statement : delays IF LPAREN cond RPAREN true_statement ELSE else_statement' p[0] = IfStatement(p[4], p[6], p[8], lineno=p.lineno(2)) p.set_lineno(0, p.lineno(2))
[ "def", "p_if_statement_delay", "(", "self", ",", "p", ")", ":", "p", "[", "0", "]", "=", "IfStatement", "(", "p", "[", "4", "]", ",", "p", "[", "6", "]", ",", "p", "[", "8", "]", ",", "lineno", "=", "p", ".", "lineno", "(", "2", ")", ")", "p", ".", "set_lineno", "(", "0", ",", "p", ".", "lineno", "(", "2", ")", ")" ]
if_statement : delays IF LPAREN cond RPAREN true_statement ELSE else_statement
[ "if_statement", ":", "delays", "IF", "LPAREN", "cond", "RPAREN", "true_statement", "ELSE", "else_statement" ]
python
train
PmagPy/PmagPy
SPD/lib/new_lib_curvature.py
https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/SPD/lib/new_lib_curvature.py#L29-L75
def fitcircle(n, x, y): # n points, x points, y points """c Fit circle to arbitrary number of x,y pairs, based on the c modified least squares method of Umback and Jones (2000), c IEEE Transactions on Instrumentation and Measurement.""" # adding in normalize vectors step #x = numpy.array(x) / max(x) #y = numpy.array(y) / max(y) # sx, sx2, sx3, sy, sy2, sy3, sxy, sxy2, syx2 = (0,) * 9 print(type(sx), sx) for i in range(n): sx = sx + x[i] sx2 = sx2 + x[i]**2 sx3 = sx3 + x[i]**3 sy = sy + y[i] sy2 = sy2 + y[i]**2 sy3 = sy3 + y[i]**3 sxy = sxy + x[i] * y[i] sxy2 = sxy2 + x[i] * y[i]**2 syx2 = syx2 + y[i] * x[i]**2 A = n * sx2 - sx**2 B = n * sxy - sx*sy C = n * sy2 - sy**2 D = 0.5 * (n * sxy2 - sx * sy2 + n * sx3 - sx * sx2) E = 0.5 * (n * syx2 - sy * sx2 + n * sy3 - sy * sy2) # values check out up to here xo = old_div((D * C - B * E), (A * C - B**2)) yo = old_div((A * E - B * D), (A * C - B**2)) print("xo", xo) print("yo", yo) r = 0 for z in range(n): r = r + old_div(numpy.sqrt( (x[z]-xo)**2 + (y[z]-yo)**2 ), n) if xo <= numpy.mean(x) and yo <= numpy.mean(y): k = old_div(-1.,r) else: k = old_div(1.,r) SSE = lib_k.get_SSE(xo, yo, r, x, y) print("r", r) return k, xo, yo, SSE
[ "def", "fitcircle", "(", "n", ",", "x", ",", "y", ")", ":", "# n points, x points, y points", "# adding in normalize vectors step", "#x = numpy.array(x) / max(x)", "#y = numpy.array(y) / max(y)", "#", "sx", ",", "sx2", ",", "sx3", ",", "sy", ",", "sy2", ",", "sy3", ",", "sxy", ",", "sxy2", ",", "syx2", "=", "(", "0", ",", ")", "*", "9", "print", "(", "type", "(", "sx", ")", ",", "sx", ")", "for", "i", "in", "range", "(", "n", ")", ":", "sx", "=", "sx", "+", "x", "[", "i", "]", "sx2", "=", "sx2", "+", "x", "[", "i", "]", "**", "2", "sx3", "=", "sx3", "+", "x", "[", "i", "]", "**", "3", "sy", "=", "sy", "+", "y", "[", "i", "]", "sy2", "=", "sy2", "+", "y", "[", "i", "]", "**", "2", "sy3", "=", "sy3", "+", "y", "[", "i", "]", "**", "3", "sxy", "=", "sxy", "+", "x", "[", "i", "]", "*", "y", "[", "i", "]", "sxy2", "=", "sxy2", "+", "x", "[", "i", "]", "*", "y", "[", "i", "]", "**", "2", "syx2", "=", "syx2", "+", "y", "[", "i", "]", "*", "x", "[", "i", "]", "**", "2", "A", "=", "n", "*", "sx2", "-", "sx", "**", "2", "B", "=", "n", "*", "sxy", "-", "sx", "*", "sy", "C", "=", "n", "*", "sy2", "-", "sy", "**", "2", "D", "=", "0.5", "*", "(", "n", "*", "sxy2", "-", "sx", "*", "sy2", "+", "n", "*", "sx3", "-", "sx", "*", "sx2", ")", "E", "=", "0.5", "*", "(", "n", "*", "syx2", "-", "sy", "*", "sx2", "+", "n", "*", "sy3", "-", "sy", "*", "sy2", ")", "# values check out up to here", "xo", "=", "old_div", "(", "(", "D", "*", "C", "-", "B", "*", "E", ")", ",", "(", "A", "*", "C", "-", "B", "**", "2", ")", ")", "yo", "=", "old_div", "(", "(", "A", "*", "E", "-", "B", "*", "D", ")", ",", "(", "A", "*", "C", "-", "B", "**", "2", ")", ")", "print", "(", "\"xo\"", ",", "xo", ")", "print", "(", "\"yo\"", ",", "yo", ")", "r", "=", "0", "for", "z", "in", "range", "(", "n", ")", ":", "r", "=", "r", "+", "old_div", "(", "numpy", ".", "sqrt", "(", "(", "x", "[", "z", "]", "-", "xo", ")", "**", "2", "+", "(", "y", "[", "z", "]", "-", "yo", ")", "**", "2", ")", ",", "n", ")", "if", "xo", "<=", "numpy", ".", "mean", "(", "x", ")", "and", "yo", "<=", "numpy", ".", "mean", "(", "y", ")", ":", "k", "=", "old_div", "(", "-", "1.", ",", "r", ")", "else", ":", "k", "=", "old_div", "(", "1.", ",", "r", ")", "SSE", "=", "lib_k", ".", "get_SSE", "(", "xo", ",", "yo", ",", "r", ",", "x", ",", "y", ")", "print", "(", "\"r\"", ",", "r", ")", "return", "k", ",", "xo", ",", "yo", ",", "SSE" ]
c Fit circle to arbitrary number of x,y pairs, based on the c modified least squares method of Umback and Jones (2000), c IEEE Transactions on Instrumentation and Measurement.
[ "c", "Fit", "circle", "to", "arbitrary", "number", "of", "x", "y", "pairs", "based", "on", "the", "c", "modified", "least", "squares", "method", "of", "Umback", "and", "Jones", "(", "2000", ")", "c", "IEEE", "Transactions", "on", "Instrumentation", "and", "Measurement", "." ]
python
train
zero-os/zerotier_client
zerotier/Network.py
https://github.com/zero-os/zerotier_client/blob/03993da11e69d837a0308a2f41ae7b378692fd82/zerotier/Network.py#L15-L29
def create(annot=None, config=None, id=None, ui=None): """ :type annot: dict :type config: NetworkConfig :type id: str :type ui: dict :rtype: Network """ return Network( annot=annot, config=config, id=id, ui=ui, )
[ "def", "create", "(", "annot", "=", "None", ",", "config", "=", "None", ",", "id", "=", "None", ",", "ui", "=", "None", ")", ":", "return", "Network", "(", "annot", "=", "annot", ",", "config", "=", "config", ",", "id", "=", "id", ",", "ui", "=", "ui", ",", ")" ]
:type annot: dict :type config: NetworkConfig :type id: str :type ui: dict :rtype: Network
[ ":", "type", "annot", ":", "dict", ":", "type", "config", ":", "NetworkConfig", ":", "type", "id", ":", "str", ":", "type", "ui", ":", "dict", ":", "rtype", ":", "Network" ]
python
train
deepmind/sonnet
sonnet/python/modules/batch_norm.py
https://github.com/deepmind/sonnet/blob/00612ca3178964d86b556e062694d808ff81fcca/sonnet/python/modules/batch_norm.py#L400-L442
def _batch_norm_op(self, input_batch, mean, variance, use_batch_stats, stat_dtype): """Creates a batch normalization op. It uses the tf.nn.batch_normalization op by default and the tf.nn.fused_batch_norm op to support fused batch normalization. Args: input_batch: A input Tensor of arbitrary dimension. mean: A mean tensor, of the same dtype as `input_batch`. variance: A variance tensor, of the same dtype as `input_batch`. use_batch_stats: A bool value that indicates whether the operation should use the batch statistics. stat_dtype: TensorFlow datatype used for the moving mean and variance. Returns: A batch normalization operation. The current mean tensor, of datatype `stat_dtype`. The current variance tensor, of datatype `stat_dtype`. """ if self._fused: # For the non-training case where not using batch stats, # pass in the moving statistic variables directly. # These will already be in the correct dtype, even for float16 input. batch_norm_op, mean, variance = self._fused_batch_norm_op( input_batch, self._moving_mean, self._moving_variance, use_batch_stats) else: batch_norm_op = tf.nn.batch_normalization( input_batch, mean, variance, self._beta, self._gamma, self._eps, name="batch_norm") # We'll echo the supplied mean and variance so that they can also be used # to update the moving statistics. Cast to matching type if necessary. if input_batch.dtype.base_dtype != stat_dtype: mean = tf.cast(mean, stat_dtype) variance = tf.cast(variance, stat_dtype) return batch_norm_op, mean, variance
[ "def", "_batch_norm_op", "(", "self", ",", "input_batch", ",", "mean", ",", "variance", ",", "use_batch_stats", ",", "stat_dtype", ")", ":", "if", "self", ".", "_fused", ":", "# For the non-training case where not using batch stats,", "# pass in the moving statistic variables directly.", "# These will already be in the correct dtype, even for float16 input.", "batch_norm_op", ",", "mean", ",", "variance", "=", "self", ".", "_fused_batch_norm_op", "(", "input_batch", ",", "self", ".", "_moving_mean", ",", "self", ".", "_moving_variance", ",", "use_batch_stats", ")", "else", ":", "batch_norm_op", "=", "tf", ".", "nn", ".", "batch_normalization", "(", "input_batch", ",", "mean", ",", "variance", ",", "self", ".", "_beta", ",", "self", ".", "_gamma", ",", "self", ".", "_eps", ",", "name", "=", "\"batch_norm\"", ")", "# We'll echo the supplied mean and variance so that they can also be used", "# to update the moving statistics. Cast to matching type if necessary.", "if", "input_batch", ".", "dtype", ".", "base_dtype", "!=", "stat_dtype", ":", "mean", "=", "tf", ".", "cast", "(", "mean", ",", "stat_dtype", ")", "variance", "=", "tf", ".", "cast", "(", "variance", ",", "stat_dtype", ")", "return", "batch_norm_op", ",", "mean", ",", "variance" ]
Creates a batch normalization op. It uses the tf.nn.batch_normalization op by default and the tf.nn.fused_batch_norm op to support fused batch normalization. Args: input_batch: A input Tensor of arbitrary dimension. mean: A mean tensor, of the same dtype as `input_batch`. variance: A variance tensor, of the same dtype as `input_batch`. use_batch_stats: A bool value that indicates whether the operation should use the batch statistics. stat_dtype: TensorFlow datatype used for the moving mean and variance. Returns: A batch normalization operation. The current mean tensor, of datatype `stat_dtype`. The current variance tensor, of datatype `stat_dtype`.
[ "Creates", "a", "batch", "normalization", "op", "." ]
python
train
inveniosoftware-attic/invenio-documents
invenio_documents/api.py
https://github.com/inveniosoftware-attic/invenio-documents/blob/cdfcd21ea5d9ad26f4405f418863fcc3df636176/invenio_documents/api.py#L67-L75
def copy(self, dst, **kwargs): """Copy file to a new destination. Returns JSON Patch with proposed change pointing to new copy. """ _fs, filename = opener.parse(self.uri) _fs_dst, filename_dst = opener.parse(dst) copyfile(_fs, filename, _fs_dst, filename_dst, **kwargs) return [{'op': 'replace', 'path': self.pointer, 'value': dst}]
[ "def", "copy", "(", "self", ",", "dst", ",", "*", "*", "kwargs", ")", ":", "_fs", ",", "filename", "=", "opener", ".", "parse", "(", "self", ".", "uri", ")", "_fs_dst", ",", "filename_dst", "=", "opener", ".", "parse", "(", "dst", ")", "copyfile", "(", "_fs", ",", "filename", ",", "_fs_dst", ",", "filename_dst", ",", "*", "*", "kwargs", ")", "return", "[", "{", "'op'", ":", "'replace'", ",", "'path'", ":", "self", ".", "pointer", ",", "'value'", ":", "dst", "}", "]" ]
Copy file to a new destination. Returns JSON Patch with proposed change pointing to new copy.
[ "Copy", "file", "to", "a", "new", "destination", "." ]
python
train
apple/turicreate
src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/proto_builder.py
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/proto_builder.py#L44-L57
def _GetMessageFromFactory(factory, full_name): """Get a proto class from the MessageFactory by name. Args: factory: a MessageFactory instance. full_name: str, the fully qualified name of the proto type. Returns: A class, for the type identified by full_name. Raises: KeyError, if the proto is not found in the factory's descriptor pool. """ proto_descriptor = factory.pool.FindMessageTypeByName(full_name) proto_cls = factory.GetPrototype(proto_descriptor) return proto_cls
[ "def", "_GetMessageFromFactory", "(", "factory", ",", "full_name", ")", ":", "proto_descriptor", "=", "factory", ".", "pool", ".", "FindMessageTypeByName", "(", "full_name", ")", "proto_cls", "=", "factory", ".", "GetPrototype", "(", "proto_descriptor", ")", "return", "proto_cls" ]
Get a proto class from the MessageFactory by name. Args: factory: a MessageFactory instance. full_name: str, the fully qualified name of the proto type. Returns: A class, for the type identified by full_name. Raises: KeyError, if the proto is not found in the factory's descriptor pool.
[ "Get", "a", "proto", "class", "from", "the", "MessageFactory", "by", "name", "." ]
python
train
Scifabric/pybossa-client
pbclient/__init__.py
https://github.com/Scifabric/pybossa-client/blob/998d7cb0207ff5030dc800f0c2577c5692316c2c/pbclient/__init__.py#L768-L787
def find_helping_materials(project_id, **kwargs): """Return a list of matched helping materials for a given project ID. :param project_id: PYBOSSA Project ID :type project_id: integer :param kwargs: PYBOSSA HelpingMaterial members :type info: dict :rtype: list :returns: A list of helping materials that match the kwargs """ try: kwargs['project_id'] = project_id res = _pybossa_req('get', 'helpingmaterial', params=kwargs) if type(res).__name__ == 'list': return [HelpingMaterial(helping) for helping in res] else: return res except: # pragma: no cover raise
[ "def", "find_helping_materials", "(", "project_id", ",", "*", "*", "kwargs", ")", ":", "try", ":", "kwargs", "[", "'project_id'", "]", "=", "project_id", "res", "=", "_pybossa_req", "(", "'get'", ",", "'helpingmaterial'", ",", "params", "=", "kwargs", ")", "if", "type", "(", "res", ")", ".", "__name__", "==", "'list'", ":", "return", "[", "HelpingMaterial", "(", "helping", ")", "for", "helping", "in", "res", "]", "else", ":", "return", "res", "except", ":", "# pragma: no cover", "raise" ]
Return a list of matched helping materials for a given project ID. :param project_id: PYBOSSA Project ID :type project_id: integer :param kwargs: PYBOSSA HelpingMaterial members :type info: dict :rtype: list :returns: A list of helping materials that match the kwargs
[ "Return", "a", "list", "of", "matched", "helping", "materials", "for", "a", "given", "project", "ID", "." ]
python
valid
bwhite/hadoopy
hadoopy/thirdparty/pyinstaller/PyInstaller/build.py
https://github.com/bwhite/hadoopy/blob/ff39b4e6d4e6efaf1f571cf0f2c0e0d7ab28c2d6/hadoopy/thirdparty/pyinstaller/PyInstaller/build.py#L209-L252
def _rmdir(path): """ Remove dirname(os.path.abspath(path)) and all its contents, but only if: 1. It doesn't start with BUILDPATH 2. It is a directory and not empty (otherwise continue without removing the directory) 3. BUILDPATH and SPECPATH don't start with it 4. The --noconfirm option is set, or sys.stdout is a tty and the user confirms directory removal Otherwise, error out. """ if not os.path.abspath(path): path = os.path.abspath(path) if not path.startswith(BUILDPATH) and os.path.isdir(path) and os.listdir(path): specerr = 0 if BUILDPATH.startswith(path): logger.error('specfile error: The output path "%s" contains ' 'BUILDPATH (%s)', path, BUILDPATH) specerr += 1 if SPECPATH.startswith(path): logger.error('Specfile error: The output path "%s" contains ' 'SPECPATH (%s)', path, SPECPATH) specerr += 1 if specerr: raise SystemExit('Error: Please edit/recreate the specfile (%s) ' 'and set a different output name (e.g. "dist").' % SPEC) if NOCONFIRM: choice = 'y' elif sys.stdout.isatty(): choice = raw_input('WARNING: The output directory "%s" and ALL ITS ' 'CONTENTS will be REMOVED! Continue? (y/n)' % path) else: raise SystemExit('Error: The output directory "%s" is not empty. ' 'Please remove all its contents or use the ' '-y option (remove output directory without ' 'confirmation).' % path) if choice.strip().lower() == 'y': logger.info('Removing %s', path) shutil.rmtree(path) else: raise SystemExit('User aborted')
[ "def", "_rmdir", "(", "path", ")", ":", "if", "not", "os", ".", "path", ".", "abspath", "(", "path", ")", ":", "path", "=", "os", ".", "path", ".", "abspath", "(", "path", ")", "if", "not", "path", ".", "startswith", "(", "BUILDPATH", ")", "and", "os", ".", "path", ".", "isdir", "(", "path", ")", "and", "os", ".", "listdir", "(", "path", ")", ":", "specerr", "=", "0", "if", "BUILDPATH", ".", "startswith", "(", "path", ")", ":", "logger", ".", "error", "(", "'specfile error: The output path \"%s\" contains '", "'BUILDPATH (%s)'", ",", "path", ",", "BUILDPATH", ")", "specerr", "+=", "1", "if", "SPECPATH", ".", "startswith", "(", "path", ")", ":", "logger", ".", "error", "(", "'Specfile error: The output path \"%s\" contains '", "'SPECPATH (%s)'", ",", "path", ",", "SPECPATH", ")", "specerr", "+=", "1", "if", "specerr", ":", "raise", "SystemExit", "(", "'Error: Please edit/recreate the specfile (%s) '", "'and set a different output name (e.g. \"dist\").'", "%", "SPEC", ")", "if", "NOCONFIRM", ":", "choice", "=", "'y'", "elif", "sys", ".", "stdout", ".", "isatty", "(", ")", ":", "choice", "=", "raw_input", "(", "'WARNING: The output directory \"%s\" and ALL ITS '", "'CONTENTS will be REMOVED! Continue? (y/n)'", "%", "path", ")", "else", ":", "raise", "SystemExit", "(", "'Error: The output directory \"%s\" is not empty. '", "'Please remove all its contents or use the '", "'-y option (remove output directory without '", "'confirmation).'", "%", "path", ")", "if", "choice", ".", "strip", "(", ")", ".", "lower", "(", ")", "==", "'y'", ":", "logger", ".", "info", "(", "'Removing %s'", ",", "path", ")", "shutil", ".", "rmtree", "(", "path", ")", "else", ":", "raise", "SystemExit", "(", "'User aborted'", ")" ]
Remove dirname(os.path.abspath(path)) and all its contents, but only if: 1. It doesn't start with BUILDPATH 2. It is a directory and not empty (otherwise continue without removing the directory) 3. BUILDPATH and SPECPATH don't start with it 4. The --noconfirm option is set, or sys.stdout is a tty and the user confirms directory removal Otherwise, error out.
[ "Remove", "dirname", "(", "os", ".", "path", ".", "abspath", "(", "path", "))", "and", "all", "its", "contents", "but", "only", "if", ":" ]
python
train
Syndace/python-x3dh
x3dh/state.py
https://github.com/Syndace/python-x3dh/blob/a6cec1ae858121b88bef1b178f5cda5e43d5c391/x3dh/state.py#L230-L236
def __checkSPKTimestamp(self): """ Check whether the SPK is too old and generate a new one in that case. """ if time.time() - self.__spk["timestamp"] > self.__spk_timeout: self.__generateSPK()
[ "def", "__checkSPKTimestamp", "(", "self", ")", ":", "if", "time", ".", "time", "(", ")", "-", "self", ".", "__spk", "[", "\"timestamp\"", "]", ">", "self", ".", "__spk_timeout", ":", "self", ".", "__generateSPK", "(", ")" ]
Check whether the SPK is too old and generate a new one in that case.
[ "Check", "whether", "the", "SPK", "is", "too", "old", "and", "generate", "a", "new", "one", "in", "that", "case", "." ]
python
train
zhmcclient/python-zhmcclient
zhmcclient/_lpar.py
https://github.com/zhmcclient/python-zhmcclient/blob/9657563e5d9184c51d3c903442a58b9725fdf335/zhmcclient/_lpar.py#L429-L567
def scsi_load(self, load_address, wwpn, lun, load_parameter=None, disk_partition_id=None, operating_system_specific_load_parameters=None, boot_record_logical_block_address=None, force=False, wait_for_completion=True, operation_timeout=None, status_timeout=None, allow_status_exceptions=False): """ Load (boot) this LPAR from a designated SCSI device, using the HMC operation "SCSI Load". This HMC operation has deferred status behavior: If the asynchronous job on the HMC is complete, it takes a few seconds until the LPAR status has reached the desired value. If `wait_for_completion=True`, this method repeatedly checks the status of the LPAR after the HMC operation has completed, and waits until the status is in the desired state "operating", or if `allow_status_exceptions` was set additionally in the state "exceptions". Authorization requirements: * Object-access permission to the CPC containing this LPAR. * Object-access permission to this LPAR. * Task permission for the "SCSI Load" task. Parameters: load_address (:term:`string`): Device number of the boot device. wwpn (:term:`string`): Worldwide port name (WWPN) of the target SCSI device to be used for this operation, in hexadecimal. lun (:term:`string`): Hexadecimal logical unit number (LUN) to be used for the SCSI Load. load_parameter (:term:`string`): Optional load control string. If empty string or `None`, it is not passed to the HMC. disk_partition_id (:term:`integer`): Optional disk-partition-id (also called the boot program selector) to be used for the SCSI Load. If `None`, it is not passed to the HMC. operating_system_specific_load_parameters (:term:`string`): Optional operating system specific load parameters to be used for the SCSI Load. boot_record_logical_block_address (:term:`string`): Optional hexadecimal boot record logical block address to be used for the SCSI Load. force (bool): Boolean controlling whether this operation is permitted when the LPAR is in the "operating" status. The default value is `True`. wait_for_completion (bool): Boolean controlling whether this method should wait for completion of the requested asynchronous HMC operation, as follows: * If `True`, this method will wait for completion of the asynchronous job performing the operation, and for the status becoming "operating" (or in addition "exceptions", if `allow_status_exceptions` was set. * If `False`, this method will return immediately once the HMC has accepted the request to perform the operation. operation_timeout (:term:`number`): Timeout in seconds, for waiting for completion of the asynchronous job performing the operation. The special value 0 means that no timeout is set. `None` means that the default async operation timeout of the session is used. If the timeout expires when `wait_for_completion=True`, a :exc:`~zhmcclient.OperationTimeout` is raised. status_timeout (:term:`number`): Timeout in seconds, for waiting that the status of the LPAR has reached the desired status, after the HMC operation has completed. The special value 0 means that no timeout is set. `None` means that the default async operation timeout of the session is used. If the timeout expires when `wait_for_completion=True`, a :exc:`~zhmcclient.StatusTimeout` is raised. allow_status_exceptions (bool): Boolean controlling whether LPAR status "exceptions" is considered an additional acceptable end status when `wait_for_completion` is set. Returns: `None` or :class:`~zhmcclient.Job`: If `wait_for_completion` is `True`, returns `None`. If `wait_for_completion` is `False`, returns a :class:`~zhmcclient.Job` object representing the asynchronously executing job on the HMC. Raises: :exc:`~zhmcclient.HTTPError` :exc:`~zhmcclient.ParseError` :exc:`~zhmcclient.AuthError` :exc:`~zhmcclient.ConnectionError` :exc:`~zhmcclient.OperationTimeout`: The timeout expired while waiting for completion of the operation. :exc:`~zhmcclient.StatusTimeout`: The timeout expired while waiting for the desired LPAR status. """ body = {} body['load-address'] = load_address body['world-wide-port-name'] = wwpn body['logical-unit-number'] = lun if load_parameter: body['load-parameter'] = load_parameter if disk_partition_id is not None: body['disk-partition-id'] = disk_partition_id if operating_system_specific_load_parameters: body['operating-system-specific-load-parameters'] = \ operating_system_specific_load_parameters if boot_record_logical_block_address: body['boot-record-logical-block-address'] = \ boot_record_logical_block_address if force: body['force'] = force result = self.manager.session.post( self.uri + '/operations/scsi-load', body, wait_for_completion=wait_for_completion, operation_timeout=operation_timeout) if wait_for_completion: statuses = ["operating"] if allow_status_exceptions: statuses.append("exceptions") self.wait_for_status(statuses, status_timeout) return result
[ "def", "scsi_load", "(", "self", ",", "load_address", ",", "wwpn", ",", "lun", ",", "load_parameter", "=", "None", ",", "disk_partition_id", "=", "None", ",", "operating_system_specific_load_parameters", "=", "None", ",", "boot_record_logical_block_address", "=", "None", ",", "force", "=", "False", ",", "wait_for_completion", "=", "True", ",", "operation_timeout", "=", "None", ",", "status_timeout", "=", "None", ",", "allow_status_exceptions", "=", "False", ")", ":", "body", "=", "{", "}", "body", "[", "'load-address'", "]", "=", "load_address", "body", "[", "'world-wide-port-name'", "]", "=", "wwpn", "body", "[", "'logical-unit-number'", "]", "=", "lun", "if", "load_parameter", ":", "body", "[", "'load-parameter'", "]", "=", "load_parameter", "if", "disk_partition_id", "is", "not", "None", ":", "body", "[", "'disk-partition-id'", "]", "=", "disk_partition_id", "if", "operating_system_specific_load_parameters", ":", "body", "[", "'operating-system-specific-load-parameters'", "]", "=", "operating_system_specific_load_parameters", "if", "boot_record_logical_block_address", ":", "body", "[", "'boot-record-logical-block-address'", "]", "=", "boot_record_logical_block_address", "if", "force", ":", "body", "[", "'force'", "]", "=", "force", "result", "=", "self", ".", "manager", ".", "session", ".", "post", "(", "self", ".", "uri", "+", "'/operations/scsi-load'", ",", "body", ",", "wait_for_completion", "=", "wait_for_completion", ",", "operation_timeout", "=", "operation_timeout", ")", "if", "wait_for_completion", ":", "statuses", "=", "[", "\"operating\"", "]", "if", "allow_status_exceptions", ":", "statuses", ".", "append", "(", "\"exceptions\"", ")", "self", ".", "wait_for_status", "(", "statuses", ",", "status_timeout", ")", "return", "result" ]
Load (boot) this LPAR from a designated SCSI device, using the HMC operation "SCSI Load". This HMC operation has deferred status behavior: If the asynchronous job on the HMC is complete, it takes a few seconds until the LPAR status has reached the desired value. If `wait_for_completion=True`, this method repeatedly checks the status of the LPAR after the HMC operation has completed, and waits until the status is in the desired state "operating", or if `allow_status_exceptions` was set additionally in the state "exceptions". Authorization requirements: * Object-access permission to the CPC containing this LPAR. * Object-access permission to this LPAR. * Task permission for the "SCSI Load" task. Parameters: load_address (:term:`string`): Device number of the boot device. wwpn (:term:`string`): Worldwide port name (WWPN) of the target SCSI device to be used for this operation, in hexadecimal. lun (:term:`string`): Hexadecimal logical unit number (LUN) to be used for the SCSI Load. load_parameter (:term:`string`): Optional load control string. If empty string or `None`, it is not passed to the HMC. disk_partition_id (:term:`integer`): Optional disk-partition-id (also called the boot program selector) to be used for the SCSI Load. If `None`, it is not passed to the HMC. operating_system_specific_load_parameters (:term:`string`): Optional operating system specific load parameters to be used for the SCSI Load. boot_record_logical_block_address (:term:`string`): Optional hexadecimal boot record logical block address to be used for the SCSI Load. force (bool): Boolean controlling whether this operation is permitted when the LPAR is in the "operating" status. The default value is `True`. wait_for_completion (bool): Boolean controlling whether this method should wait for completion of the requested asynchronous HMC operation, as follows: * If `True`, this method will wait for completion of the asynchronous job performing the operation, and for the status becoming "operating" (or in addition "exceptions", if `allow_status_exceptions` was set. * If `False`, this method will return immediately once the HMC has accepted the request to perform the operation. operation_timeout (:term:`number`): Timeout in seconds, for waiting for completion of the asynchronous job performing the operation. The special value 0 means that no timeout is set. `None` means that the default async operation timeout of the session is used. If the timeout expires when `wait_for_completion=True`, a :exc:`~zhmcclient.OperationTimeout` is raised. status_timeout (:term:`number`): Timeout in seconds, for waiting that the status of the LPAR has reached the desired status, after the HMC operation has completed. The special value 0 means that no timeout is set. `None` means that the default async operation timeout of the session is used. If the timeout expires when `wait_for_completion=True`, a :exc:`~zhmcclient.StatusTimeout` is raised. allow_status_exceptions (bool): Boolean controlling whether LPAR status "exceptions" is considered an additional acceptable end status when `wait_for_completion` is set. Returns: `None` or :class:`~zhmcclient.Job`: If `wait_for_completion` is `True`, returns `None`. If `wait_for_completion` is `False`, returns a :class:`~zhmcclient.Job` object representing the asynchronously executing job on the HMC. Raises: :exc:`~zhmcclient.HTTPError` :exc:`~zhmcclient.ParseError` :exc:`~zhmcclient.AuthError` :exc:`~zhmcclient.ConnectionError` :exc:`~zhmcclient.OperationTimeout`: The timeout expired while waiting for completion of the operation. :exc:`~zhmcclient.StatusTimeout`: The timeout expired while waiting for the desired LPAR status.
[ "Load", "(", "boot", ")", "this", "LPAR", "from", "a", "designated", "SCSI", "device", "using", "the", "HMC", "operation", "SCSI", "Load", "." ]
python
train
avelino/bottle-auth
bottle_auth/core/httputil.py
https://github.com/avelino/bottle-auth/blob/db07e526864aeac05ee68444b47e5db29540ce18/bottle_auth/core/httputil.py#L191-L233
def parse_multipart_form_data(boundary, data, arguments, files): """Parses a multipart/form-data body. The boundary and data parameters are both byte strings. The dictionaries given in the arguments and files parameters will be updated with the contents of the body. """ # The standard allows for the boundary to be quoted in the header, # although it's rare (it happens at least for google app engine # xmpp). I think we're also supposed to handle backslash-escapes # here but I'll save that until we see a client that uses them # in the wild. if boundary.startswith(b('"')) and boundary.endswith(b('"')): boundary = boundary[1:-1] if data.endswith(b("\r\n")): footer_length = len(boundary) + 6 else: footer_length = len(boundary) + 4 parts = data[:-footer_length].split(b("--") + boundary + b("\r\n")) for part in parts: if not part: continue eoh = part.find(b("\r\n\r\n")) if eoh == -1: logging.warning("multipart/form-data missing headers") continue headers = HTTPHeaders.parse(part[:eoh].decode("utf-8")) disp_header = headers.get("Content-Disposition", "") disposition, disp_params = _parse_header(disp_header) if disposition != "form-data" or not part.endswith(b("\r\n")): logging.warning("Invalid multipart/form-data") continue value = part[eoh + 4:-2] if not disp_params.get("name"): logging.warning("multipart/form-data value missing name") continue name = disp_params["name"] if disp_params.get("filename"): ctype = headers.get("Content-Type", "application/unknown") files.setdefault(name, []).append(dict( filename=disp_params["filename"], body=value, content_type=ctype)) else: arguments.setdefault(name, []).append(value)
[ "def", "parse_multipart_form_data", "(", "boundary", ",", "data", ",", "arguments", ",", "files", ")", ":", "# The standard allows for the boundary to be quoted in the header,", "# although it's rare (it happens at least for google app engine", "# xmpp). I think we're also supposed to handle backslash-escapes", "# here but I'll save that until we see a client that uses them", "# in the wild.", "if", "boundary", ".", "startswith", "(", "b", "(", "'\"'", ")", ")", "and", "boundary", ".", "endswith", "(", "b", "(", "'\"'", ")", ")", ":", "boundary", "=", "boundary", "[", "1", ":", "-", "1", "]", "if", "data", ".", "endswith", "(", "b", "(", "\"\\r\\n\"", ")", ")", ":", "footer_length", "=", "len", "(", "boundary", ")", "+", "6", "else", ":", "footer_length", "=", "len", "(", "boundary", ")", "+", "4", "parts", "=", "data", "[", ":", "-", "footer_length", "]", ".", "split", "(", "b", "(", "\"--\"", ")", "+", "boundary", "+", "b", "(", "\"\\r\\n\"", ")", ")", "for", "part", "in", "parts", ":", "if", "not", "part", ":", "continue", "eoh", "=", "part", ".", "find", "(", "b", "(", "\"\\r\\n\\r\\n\"", ")", ")", "if", "eoh", "==", "-", "1", ":", "logging", ".", "warning", "(", "\"multipart/form-data missing headers\"", ")", "continue", "headers", "=", "HTTPHeaders", ".", "parse", "(", "part", "[", ":", "eoh", "]", ".", "decode", "(", "\"utf-8\"", ")", ")", "disp_header", "=", "headers", ".", "get", "(", "\"Content-Disposition\"", ",", "\"\"", ")", "disposition", ",", "disp_params", "=", "_parse_header", "(", "disp_header", ")", "if", "disposition", "!=", "\"form-data\"", "or", "not", "part", ".", "endswith", "(", "b", "(", "\"\\r\\n\"", ")", ")", ":", "logging", ".", "warning", "(", "\"Invalid multipart/form-data\"", ")", "continue", "value", "=", "part", "[", "eoh", "+", "4", ":", "-", "2", "]", "if", "not", "disp_params", ".", "get", "(", "\"name\"", ")", ":", "logging", ".", "warning", "(", "\"multipart/form-data value missing name\"", ")", "continue", "name", "=", "disp_params", "[", "\"name\"", "]", "if", "disp_params", ".", "get", "(", "\"filename\"", ")", ":", "ctype", "=", "headers", ".", "get", "(", "\"Content-Type\"", ",", "\"application/unknown\"", ")", "files", ".", "setdefault", "(", "name", ",", "[", "]", ")", ".", "append", "(", "dict", "(", "filename", "=", "disp_params", "[", "\"filename\"", "]", ",", "body", "=", "value", ",", "content_type", "=", "ctype", ")", ")", "else", ":", "arguments", ".", "setdefault", "(", "name", ",", "[", "]", ")", ".", "append", "(", "value", ")" ]
Parses a multipart/form-data body. The boundary and data parameters are both byte strings. The dictionaries given in the arguments and files parameters will be updated with the contents of the body.
[ "Parses", "a", "multipart", "/", "form", "-", "data", "body", "." ]
python
test
proycon/clam
clam/common/data.py
https://github.com/proycon/clam/blob/09d15cfc26d7cbe0f5976cdd5424dc446d10dbf3/clam/common/data.py#L1825-L1867
def fromxml(node): """Static method return an OutputTemplate instance from the given XML description. Node can be a string or an etree._Element.""" if not isinstance(node,ElementTree._Element): #pylint: disable=protected-access node = parsexmlstring(node) assert node.tag.lower() == 'outputtemplate' template_id = node.attrib['id'] dataformat = node.attrib['format'] label = node.attrib['label'] kwargs = {} if 'filename' in node.attrib: kwargs['filename'] = node.attrib['filename'] if 'extension' in node.attrib: kwargs['extension'] = node.attrib['extension'] if 'unique' in node.attrib: kwargs['unique'] = node.attrib['unique'].lower() == 'yes' or node.attrib['unique'].lower() == 'true' or node.attrib['unique'].lower() == '1' if 'parent' in node.attrib: kwargs['parent'] = node.attrib['parent'] #find formatclass formatcls = None for C in CUSTOM_FORMATS: #CUSTOM_FORMATS will be injected by clamservice.py if C.__name__ == dataformat: formatcls = C break if formatcls is None: if dataformat in vars(clam.common.formats): formatcls = vars(clam.common.formats)[dataformat] else: raise Exception("Specified format not defined! (" + dataformat + ")") args = [] for subnode in node: if subnode.tag == 'parametercondition': args.append(ParameterCondition.fromxml(subnode)) elif subnode.tag == 'converter': pass #MAYBE TODO: Reading converters from XML is not implemented (and not necessary at this stage) elif subnode.tag == 'viewer': pass #MAYBE TODO: Reading viewers from XML is not implemented (and not necessary at this stage) else: args.append(AbstractMetaField.fromxml(subnode)) return OutputTemplate(template_id,formatcls,label, *args, **kwargs)
[ "def", "fromxml", "(", "node", ")", ":", "if", "not", "isinstance", "(", "node", ",", "ElementTree", ".", "_Element", ")", ":", "#pylint: disable=protected-access", "node", "=", "parsexmlstring", "(", "node", ")", "assert", "node", ".", "tag", ".", "lower", "(", ")", "==", "'outputtemplate'", "template_id", "=", "node", ".", "attrib", "[", "'id'", "]", "dataformat", "=", "node", ".", "attrib", "[", "'format'", "]", "label", "=", "node", ".", "attrib", "[", "'label'", "]", "kwargs", "=", "{", "}", "if", "'filename'", "in", "node", ".", "attrib", ":", "kwargs", "[", "'filename'", "]", "=", "node", ".", "attrib", "[", "'filename'", "]", "if", "'extension'", "in", "node", ".", "attrib", ":", "kwargs", "[", "'extension'", "]", "=", "node", ".", "attrib", "[", "'extension'", "]", "if", "'unique'", "in", "node", ".", "attrib", ":", "kwargs", "[", "'unique'", "]", "=", "node", ".", "attrib", "[", "'unique'", "]", ".", "lower", "(", ")", "==", "'yes'", "or", "node", ".", "attrib", "[", "'unique'", "]", ".", "lower", "(", ")", "==", "'true'", "or", "node", ".", "attrib", "[", "'unique'", "]", ".", "lower", "(", ")", "==", "'1'", "if", "'parent'", "in", "node", ".", "attrib", ":", "kwargs", "[", "'parent'", "]", "=", "node", ".", "attrib", "[", "'parent'", "]", "#find formatclass", "formatcls", "=", "None", "for", "C", "in", "CUSTOM_FORMATS", ":", "#CUSTOM_FORMATS will be injected by clamservice.py", "if", "C", ".", "__name__", "==", "dataformat", ":", "formatcls", "=", "C", "break", "if", "formatcls", "is", "None", ":", "if", "dataformat", "in", "vars", "(", "clam", ".", "common", ".", "formats", ")", ":", "formatcls", "=", "vars", "(", "clam", ".", "common", ".", "formats", ")", "[", "dataformat", "]", "else", ":", "raise", "Exception", "(", "\"Specified format not defined! (\"", "+", "dataformat", "+", "\")\"", ")", "args", "=", "[", "]", "for", "subnode", "in", "node", ":", "if", "subnode", ".", "tag", "==", "'parametercondition'", ":", "args", ".", "append", "(", "ParameterCondition", ".", "fromxml", "(", "subnode", ")", ")", "elif", "subnode", ".", "tag", "==", "'converter'", ":", "pass", "#MAYBE TODO: Reading converters from XML is not implemented (and not necessary at this stage)", "elif", "subnode", ".", "tag", "==", "'viewer'", ":", "pass", "#MAYBE TODO: Reading viewers from XML is not implemented (and not necessary at this stage)", "else", ":", "args", ".", "append", "(", "AbstractMetaField", ".", "fromxml", "(", "subnode", ")", ")", "return", "OutputTemplate", "(", "template_id", ",", "formatcls", ",", "label", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
Static method return an OutputTemplate instance from the given XML description. Node can be a string or an etree._Element.
[ "Static", "method", "return", "an", "OutputTemplate", "instance", "from", "the", "given", "XML", "description", ".", "Node", "can", "be", "a", "string", "or", "an", "etree", ".", "_Element", "." ]
python
train
mikedh/trimesh
trimesh/comparison.py
https://github.com/mikedh/trimesh/blob/25e059bf6d4caa74f62ffd58ce4f61a90ee4e518/trimesh/comparison.py#L107-L134
def identifier_hash(identifier, sigfig=None): """ Hash an identifier array to a specified number of significant figures. Parameters ---------- identifier : (n,) float Vector of properties sigfig : (n,) int Number of sigfigs per property Returns ---------- md5 : str MD5 hash of identifier """ if sigfig is None: sigfig = id_sigfig # convert identifier to integers and order of magnitude as_int, multiplier = util.sigfig_int(identifier, sigfig) # make all scales positive if (multiplier < 0).any(): multiplier += np.abs(multiplier.min()) hashable = (as_int * (10 ** multiplier)).astype(np.int64) md5 = util.md5_object(hashable) return md5
[ "def", "identifier_hash", "(", "identifier", ",", "sigfig", "=", "None", ")", ":", "if", "sigfig", "is", "None", ":", "sigfig", "=", "id_sigfig", "# convert identifier to integers and order of magnitude", "as_int", ",", "multiplier", "=", "util", ".", "sigfig_int", "(", "identifier", ",", "sigfig", ")", "# make all scales positive", "if", "(", "multiplier", "<", "0", ")", ".", "any", "(", ")", ":", "multiplier", "+=", "np", ".", "abs", "(", "multiplier", ".", "min", "(", ")", ")", "hashable", "=", "(", "as_int", "*", "(", "10", "**", "multiplier", ")", ")", ".", "astype", "(", "np", ".", "int64", ")", "md5", "=", "util", ".", "md5_object", "(", "hashable", ")", "return", "md5" ]
Hash an identifier array to a specified number of significant figures. Parameters ---------- identifier : (n,) float Vector of properties sigfig : (n,) int Number of sigfigs per property Returns ---------- md5 : str MD5 hash of identifier
[ "Hash", "an", "identifier", "array", "to", "a", "specified", "number", "of", "significant", "figures", "." ]
python
train
jashandeep-sohi/python-blowfish
blowfish.py
https://github.com/jashandeep-sohi/python-blowfish/blob/5ce7f6d54dcef7efd715b26f9a9ffee0d543047e/blowfish.py#L657-L702
def encrypt_cbc(self, data, init_vector): """ Return an iterator that encrypts `data` using the Cipher-Block Chaining (CBC) mode of operation. CBC mode can only operate on `data` that is a multiple of the block-size in length. Each iteration returns a block-sized :obj:`bytes` object (i.e. 8 bytes) containing the encrypted bytes of the corresponding block in `data`. `init_vector` is the initialization vector and should be a :obj:`bytes`-like object with exactly 8 bytes. If it is not, a :exc:`ValueError` exception is raised. `data` should be a :obj:`bytes`-like object that is a multiple of the block-size in length (i.e. 8, 16, 32, etc.). If it is not, a :exc:`ValueError` exception is raised. """ S1, S2, S3, S4 = self.S P = self.P u4_1_pack = self._u4_1_pack u1_4_unpack = self._u1_4_unpack encrypt = self._encrypt u4_2_pack = self._u4_2_pack try: prev_cipher_L, prev_cipher_R = self._u4_2_unpack(init_vector) except struct_error: raise ValueError("initialization vector is not 8 bytes in length") try: LR_iter = self._u4_2_iter_unpack(data) except struct_error: raise ValueError("data is not a multiple of the block-size in length") for plain_L, plain_R in LR_iter: prev_cipher_L, prev_cipher_R = encrypt( prev_cipher_L ^ plain_L, prev_cipher_R ^ plain_R, P, S1, S2, S3, S4, u4_1_pack, u1_4_unpack ) yield u4_2_pack(prev_cipher_L, prev_cipher_R)
[ "def", "encrypt_cbc", "(", "self", ",", "data", ",", "init_vector", ")", ":", "S1", ",", "S2", ",", "S3", ",", "S4", "=", "self", ".", "S", "P", "=", "self", ".", "P", "u4_1_pack", "=", "self", ".", "_u4_1_pack", "u1_4_unpack", "=", "self", ".", "_u1_4_unpack", "encrypt", "=", "self", ".", "_encrypt", "u4_2_pack", "=", "self", ".", "_u4_2_pack", "try", ":", "prev_cipher_L", ",", "prev_cipher_R", "=", "self", ".", "_u4_2_unpack", "(", "init_vector", ")", "except", "struct_error", ":", "raise", "ValueError", "(", "\"initialization vector is not 8 bytes in length\"", ")", "try", ":", "LR_iter", "=", "self", ".", "_u4_2_iter_unpack", "(", "data", ")", "except", "struct_error", ":", "raise", "ValueError", "(", "\"data is not a multiple of the block-size in length\"", ")", "for", "plain_L", ",", "plain_R", "in", "LR_iter", ":", "prev_cipher_L", ",", "prev_cipher_R", "=", "encrypt", "(", "prev_cipher_L", "^", "plain_L", ",", "prev_cipher_R", "^", "plain_R", ",", "P", ",", "S1", ",", "S2", ",", "S3", ",", "S4", ",", "u4_1_pack", ",", "u1_4_unpack", ")", "yield", "u4_2_pack", "(", "prev_cipher_L", ",", "prev_cipher_R", ")" ]
Return an iterator that encrypts `data` using the Cipher-Block Chaining (CBC) mode of operation. CBC mode can only operate on `data` that is a multiple of the block-size in length. Each iteration returns a block-sized :obj:`bytes` object (i.e. 8 bytes) containing the encrypted bytes of the corresponding block in `data`. `init_vector` is the initialization vector and should be a :obj:`bytes`-like object with exactly 8 bytes. If it is not, a :exc:`ValueError` exception is raised. `data` should be a :obj:`bytes`-like object that is a multiple of the block-size in length (i.e. 8, 16, 32, etc.). If it is not, a :exc:`ValueError` exception is raised.
[ "Return", "an", "iterator", "that", "encrypts", "data", "using", "the", "Cipher", "-", "Block", "Chaining", "(", "CBC", ")", "mode", "of", "operation", ".", "CBC", "mode", "can", "only", "operate", "on", "data", "that", "is", "a", "multiple", "of", "the", "block", "-", "size", "in", "length", ".", "Each", "iteration", "returns", "a", "block", "-", "sized", ":", "obj", ":", "bytes", "object", "(", "i", ".", "e", ".", "8", "bytes", ")", "containing", "the", "encrypted", "bytes", "of", "the", "corresponding", "block", "in", "data", ".", "init_vector", "is", "the", "initialization", "vector", "and", "should", "be", "a", ":", "obj", ":", "bytes", "-", "like", "object", "with", "exactly", "8", "bytes", ".", "If", "it", "is", "not", "a", ":", "exc", ":", "ValueError", "exception", "is", "raised", ".", "data", "should", "be", "a", ":", "obj", ":", "bytes", "-", "like", "object", "that", "is", "a", "multiple", "of", "the", "block", "-", "size", "in", "length", "(", "i", ".", "e", ".", "8", "16", "32", "etc", ".", ")", ".", "If", "it", "is", "not", "a", ":", "exc", ":", "ValueError", "exception", "is", "raised", "." ]
python
train
esterhui/pypu
pypu/service_facebook.py
https://github.com/esterhui/pypu/blob/cc3e259d59f024c2c4c0fbb9c8a1547e51de75ec/pypu/service_facebook.py#L500-L517
def _already_resized_on_fb(self,fn,pid,_megapixels): """Checks if image file (fn) with photo_id (pid) has already been resized on fb. If so, returns True""" logger.debug("%s - resize requested"%(fn)) # Get width/height from fb width_fb,height_fb=self._getphoto_originalsize(pid) # Now compute what image will be if we resize it new_width,new_height=pusher_utils.resize_compute_width_height(\ fn,_megapixels) logger.debug("%s - fb %d/%d, current %d/%d"\ %(fn,width_fb,height_fb,new_width,new_height)) # Check both cases since FB sometimes rotates photos if width_fb==new_width and height_fb==new_height: return True elif width_fb==new_height and height_fb==new_width: return True return False
[ "def", "_already_resized_on_fb", "(", "self", ",", "fn", ",", "pid", ",", "_megapixels", ")", ":", "logger", ".", "debug", "(", "\"%s - resize requested\"", "%", "(", "fn", ")", ")", "# Get width/height from fb", "width_fb", ",", "height_fb", "=", "self", ".", "_getphoto_originalsize", "(", "pid", ")", "# Now compute what image will be if we resize it", "new_width", ",", "new_height", "=", "pusher_utils", ".", "resize_compute_width_height", "(", "fn", ",", "_megapixels", ")", "logger", ".", "debug", "(", "\"%s - fb %d/%d, current %d/%d\"", "%", "(", "fn", ",", "width_fb", ",", "height_fb", ",", "new_width", ",", "new_height", ")", ")", "# Check both cases since FB sometimes rotates photos", "if", "width_fb", "==", "new_width", "and", "height_fb", "==", "new_height", ":", "return", "True", "elif", "width_fb", "==", "new_height", "and", "height_fb", "==", "new_width", ":", "return", "True", "return", "False" ]
Checks if image file (fn) with photo_id (pid) has already been resized on fb. If so, returns True
[ "Checks", "if", "image", "file", "(", "fn", ")", "with", "photo_id", "(", "pid", ")", "has", "already", "been", "resized", "on", "fb", ".", "If", "so", "returns", "True" ]
python
train
astropy/photutils
photutils/isophote/isophote.py
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/isophote/isophote.py#L259-L295
def _compute_errors(self): """ Compute parameter errors based on the diagonal of the covariance matrix of the four harmonic coefficients for harmonics n=1 and n=2. """ try: coeffs = fit_first_and_second_harmonics(self.sample.values[0], self.sample.values[2]) covariance = coeffs[1] coeffs = coeffs[0] model = first_and_second_harmonic_function(self.sample.values[0], coeffs) residual_rms = np.std(self.sample.values[2] - model) errors = np.diagonal(covariance) * residual_rms eps = self.sample.geometry.eps pa = self.sample.geometry.pa # parameter errors result from direct projection of # coefficient errors. These showed to be the error estimators # that best convey the errors measured in Monte Carlo # experiments (see Busko 1996; ASPC 101, 139). ea = abs(errors[2] / self.grad) eb = abs(errors[1] * (1. - eps) / self.grad) self.x0_err = np.sqrt((ea * np.cos(pa))**2 + (eb * np.sin(pa))**2) self.y0_err = np.sqrt((ea * np.sin(pa))**2 + (eb * np.cos(pa))**2) self.ellip_err = (abs(2. * errors[4] * (1. - eps) / self.sma / self.grad)) if (abs(eps) > np.finfo(float).resolution): self.pa_err = (abs(2. * errors[3] * (1. - eps) / self.sma / self.grad / (1. - (1. - eps)**2))) else: self.pa_err = 0. except Exception: # we want to catch everything self.x0_err = self.y0_err = self.pa_err = self.ellip_err = 0.
[ "def", "_compute_errors", "(", "self", ")", ":", "try", ":", "coeffs", "=", "fit_first_and_second_harmonics", "(", "self", ".", "sample", ".", "values", "[", "0", "]", ",", "self", ".", "sample", ".", "values", "[", "2", "]", ")", "covariance", "=", "coeffs", "[", "1", "]", "coeffs", "=", "coeffs", "[", "0", "]", "model", "=", "first_and_second_harmonic_function", "(", "self", ".", "sample", ".", "values", "[", "0", "]", ",", "coeffs", ")", "residual_rms", "=", "np", ".", "std", "(", "self", ".", "sample", ".", "values", "[", "2", "]", "-", "model", ")", "errors", "=", "np", ".", "diagonal", "(", "covariance", ")", "*", "residual_rms", "eps", "=", "self", ".", "sample", ".", "geometry", ".", "eps", "pa", "=", "self", ".", "sample", ".", "geometry", ".", "pa", "# parameter errors result from direct projection of", "# coefficient errors. These showed to be the error estimators", "# that best convey the errors measured in Monte Carlo", "# experiments (see Busko 1996; ASPC 101, 139).", "ea", "=", "abs", "(", "errors", "[", "2", "]", "/", "self", ".", "grad", ")", "eb", "=", "abs", "(", "errors", "[", "1", "]", "*", "(", "1.", "-", "eps", ")", "/", "self", ".", "grad", ")", "self", ".", "x0_err", "=", "np", ".", "sqrt", "(", "(", "ea", "*", "np", ".", "cos", "(", "pa", ")", ")", "**", "2", "+", "(", "eb", "*", "np", ".", "sin", "(", "pa", ")", ")", "**", "2", ")", "self", ".", "y0_err", "=", "np", ".", "sqrt", "(", "(", "ea", "*", "np", ".", "sin", "(", "pa", ")", ")", "**", "2", "+", "(", "eb", "*", "np", ".", "cos", "(", "pa", ")", ")", "**", "2", ")", "self", ".", "ellip_err", "=", "(", "abs", "(", "2.", "*", "errors", "[", "4", "]", "*", "(", "1.", "-", "eps", ")", "/", "self", ".", "sma", "/", "self", ".", "grad", ")", ")", "if", "(", "abs", "(", "eps", ")", ">", "np", ".", "finfo", "(", "float", ")", ".", "resolution", ")", ":", "self", ".", "pa_err", "=", "(", "abs", "(", "2.", "*", "errors", "[", "3", "]", "*", "(", "1.", "-", "eps", ")", "/", "self", ".", "sma", "/", "self", ".", "grad", "/", "(", "1.", "-", "(", "1.", "-", "eps", ")", "**", "2", ")", ")", ")", "else", ":", "self", ".", "pa_err", "=", "0.", "except", "Exception", ":", "# we want to catch everything", "self", ".", "x0_err", "=", "self", ".", "y0_err", "=", "self", ".", "pa_err", "=", "self", ".", "ellip_err", "=", "0." ]
Compute parameter errors based on the diagonal of the covariance matrix of the four harmonic coefficients for harmonics n=1 and n=2.
[ "Compute", "parameter", "errors", "based", "on", "the", "diagonal", "of", "the", "covariance", "matrix", "of", "the", "four", "harmonic", "coefficients", "for", "harmonics", "n", "=", "1", "and", "n", "=", "2", "." ]
python
train
pyQode/pyqode.core
pyqode/core/widgets/output_window.py
https://github.com/pyQode/pyqode.core/blob/a99ec6cd22d519394f613309412f8329dc4e90cb/pyqode/core/widgets/output_window.py#L1329-L1353
def _get_line_and_col(data): """ Gets line and column from a string like the following: "1;5" or "1;" or ";5" and convers the column/line numbers to 0 base. """ try: line, column = data.split(';') except AttributeError: line = int(data) column = 1 # handle empty values and convert them to 0 based indices if not line: line = 0 else: line = int(line) - 1 if line < 0: line = 0 if not column: column = 0 else: column = int(column) - 1 if column < 0: column = 0 return column, line
[ "def", "_get_line_and_col", "(", "data", ")", ":", "try", ":", "line", ",", "column", "=", "data", ".", "split", "(", "';'", ")", "except", "AttributeError", ":", "line", "=", "int", "(", "data", ")", "column", "=", "1", "# handle empty values and convert them to 0 based indices", "if", "not", "line", ":", "line", "=", "0", "else", ":", "line", "=", "int", "(", "line", ")", "-", "1", "if", "line", "<", "0", ":", "line", "=", "0", "if", "not", "column", ":", "column", "=", "0", "else", ":", "column", "=", "int", "(", "column", ")", "-", "1", "if", "column", "<", "0", ":", "column", "=", "0", "return", "column", ",", "line" ]
Gets line and column from a string like the following: "1;5" or "1;" or ";5" and convers the column/line numbers to 0 base.
[ "Gets", "line", "and", "column", "from", "a", "string", "like", "the", "following", ":", "1", ";", "5", "or", "1", ";", "or", ";", "5" ]
python
train
mattja/nsim
nsim/analyses1/_cwtmorlet.py
https://github.com/mattja/nsim/blob/ed62c41cd56b918fd97e09f7ad73c12c76a8c3e0/nsim/analyses1/_cwtmorlet.py#L20-L69
def roughcwt(data, wavelet, widths): """ Continuous wavelet transform. Performs a continuous wavelet transform on `data`, using the `wavelet` function. A CWT performs a convolution with `data` using the `wavelet` function, which is characterized by a width parameter and length parameter. Parameters ---------- data : (N,) ndarray data on which to perform the transform. wavelet : function Wavelet function, which should take 2 arguments. The first argument is the number of points that the returned vector will have (len(wavelet(width,length)) == length). The second is a width parameter, defining the size of the wavelet (e.g. standard deviation of a gaussian). See `ricker`, which satisfies these requirements. widths : (M,) sequence Widths to use for transform. Returns ------- cwt: (M, N) ndarray Will have shape of (len(data), len(widths)). Notes ----- >>> length = min(10 * width[ii], len(data)) >>> cwt[ii,:] = scipy.signal.convolve(data, wavelet(length, ... width[ii]), mode='same') Examples -------- >>> from scipy import signal >>> sig = np.random.rand(20) - 0.5 >>> wavelet = signal.ricker >>> widths = np.arange(1, 11) >>> cwtmatr = signal.cwt(sig, wavelet, widths) """ out_dtype = wavelet(widths[0], widths[0]).dtype output = np.zeros([len(widths), len(data)], dtype=out_dtype) for ind, width in enumerate(widths): wavelet_data = wavelet(min(3 * width, len(data)), width) output[ind, :] = convolve(data, wavelet_data, mode='same') return output
[ "def", "roughcwt", "(", "data", ",", "wavelet", ",", "widths", ")", ":", "out_dtype", "=", "wavelet", "(", "widths", "[", "0", "]", ",", "widths", "[", "0", "]", ")", ".", "dtype", "output", "=", "np", ".", "zeros", "(", "[", "len", "(", "widths", ")", ",", "len", "(", "data", ")", "]", ",", "dtype", "=", "out_dtype", ")", "for", "ind", ",", "width", "in", "enumerate", "(", "widths", ")", ":", "wavelet_data", "=", "wavelet", "(", "min", "(", "3", "*", "width", ",", "len", "(", "data", ")", ")", ",", "width", ")", "output", "[", "ind", ",", ":", "]", "=", "convolve", "(", "data", ",", "wavelet_data", ",", "mode", "=", "'same'", ")", "return", "output" ]
Continuous wavelet transform. Performs a continuous wavelet transform on `data`, using the `wavelet` function. A CWT performs a convolution with `data` using the `wavelet` function, which is characterized by a width parameter and length parameter. Parameters ---------- data : (N,) ndarray data on which to perform the transform. wavelet : function Wavelet function, which should take 2 arguments. The first argument is the number of points that the returned vector will have (len(wavelet(width,length)) == length). The second is a width parameter, defining the size of the wavelet (e.g. standard deviation of a gaussian). See `ricker`, which satisfies these requirements. widths : (M,) sequence Widths to use for transform. Returns ------- cwt: (M, N) ndarray Will have shape of (len(data), len(widths)). Notes ----- >>> length = min(10 * width[ii], len(data)) >>> cwt[ii,:] = scipy.signal.convolve(data, wavelet(length, ... width[ii]), mode='same') Examples -------- >>> from scipy import signal >>> sig = np.random.rand(20) - 0.5 >>> wavelet = signal.ricker >>> widths = np.arange(1, 11) >>> cwtmatr = signal.cwt(sig, wavelet, widths)
[ "Continuous", "wavelet", "transform", "." ]
python
train
gwastro/pycbc
pycbc/tmpltbank/partitioned_bank.py
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/tmpltbank/partitioned_bank.py#L297-L361
def calc_point_distance_vary(self, chi_coords, point_fupper, mus): """ Calculate distance between point and the bank allowing the metric to vary based on varying upper frequency cutoff. Slower than calc_point_distance, but more reliable when upper frequency cutoff can change a lot. Parameters ----------- chi_coords : numpy.array The position of the point in the chi coordinates. point_fupper : float The upper frequency cutoff to use for this point. This value must be one of the ones already calculated in the metric. mus : numpy.array A 2D array where idx 0 holds the upper frequency cutoff and idx 1 holds the coordinates in the [not covaried] mu parameter space for each value of the upper frequency cutoff. Returns -------- min_dist : float The smallest **SQUARED** metric distance between the test point and the bank. indexes : The chi1_bin, chi2_bin and position within that bin at which the closest matching point lies. """ chi1_bin, chi2_bin = self.find_point_bin(chi_coords) min_dist = 1000000000 indexes = None for chi1_bin_offset, chi2_bin_offset in self.bin_loop_order: curr_chi1_bin = chi1_bin + chi1_bin_offset curr_chi2_bin = chi2_bin + chi2_bin_offset # No points = Next iteration curr_bank = self.massbank[curr_chi1_bin][curr_chi2_bin] if not curr_bank['mass1s'].size: continue # *NOT* the same of .min and .max f_upper = numpy.minimum(point_fupper, curr_bank['freqcuts']) f_other = numpy.maximum(point_fupper, curr_bank['freqcuts']) # NOTE: freq_idxes is a vector! freq_idxes = numpy.array([self.frequency_map[f] for f in f_upper]) # vecs1 gives a 2x2 vector: idx0 = stored index, idx1 = mu index vecs1 = mus[freq_idxes, :] # vecs2 gives a 2x2 vector: idx0 = stored index, idx1 = mu index range_idxes = numpy.arange(len(freq_idxes)) vecs2 = curr_bank['mus'][range_idxes, freq_idxes, :] # Now do the sums dists = (vecs1 - vecs2)*(vecs1 - vecs2) # This reduces to 1D: idx = stored index dists = numpy.sum(dists, axis=1) norm_upper = numpy.array([self.normalization_map[f] \ for f in f_upper]) norm_other = numpy.array([self.normalization_map[f] \ for f in f_other]) norm_fac = norm_upper / norm_other renormed_dists = 1 - (1 - dists)*norm_fac curr_min_dist = renormed_dists.min() if curr_min_dist < min_dist: min_dist = curr_min_dist indexes = curr_chi1_bin, curr_chi2_bin, renormed_dists.argmin() return min_dist, indexes
[ "def", "calc_point_distance_vary", "(", "self", ",", "chi_coords", ",", "point_fupper", ",", "mus", ")", ":", "chi1_bin", ",", "chi2_bin", "=", "self", ".", "find_point_bin", "(", "chi_coords", ")", "min_dist", "=", "1000000000", "indexes", "=", "None", "for", "chi1_bin_offset", ",", "chi2_bin_offset", "in", "self", ".", "bin_loop_order", ":", "curr_chi1_bin", "=", "chi1_bin", "+", "chi1_bin_offset", "curr_chi2_bin", "=", "chi2_bin", "+", "chi2_bin_offset", "# No points = Next iteration", "curr_bank", "=", "self", ".", "massbank", "[", "curr_chi1_bin", "]", "[", "curr_chi2_bin", "]", "if", "not", "curr_bank", "[", "'mass1s'", "]", ".", "size", ":", "continue", "# *NOT* the same of .min and .max", "f_upper", "=", "numpy", ".", "minimum", "(", "point_fupper", ",", "curr_bank", "[", "'freqcuts'", "]", ")", "f_other", "=", "numpy", ".", "maximum", "(", "point_fupper", ",", "curr_bank", "[", "'freqcuts'", "]", ")", "# NOTE: freq_idxes is a vector!", "freq_idxes", "=", "numpy", ".", "array", "(", "[", "self", ".", "frequency_map", "[", "f", "]", "for", "f", "in", "f_upper", "]", ")", "# vecs1 gives a 2x2 vector: idx0 = stored index, idx1 = mu index", "vecs1", "=", "mus", "[", "freq_idxes", ",", ":", "]", "# vecs2 gives a 2x2 vector: idx0 = stored index, idx1 = mu index", "range_idxes", "=", "numpy", ".", "arange", "(", "len", "(", "freq_idxes", ")", ")", "vecs2", "=", "curr_bank", "[", "'mus'", "]", "[", "range_idxes", ",", "freq_idxes", ",", ":", "]", "# Now do the sums", "dists", "=", "(", "vecs1", "-", "vecs2", ")", "*", "(", "vecs1", "-", "vecs2", ")", "# This reduces to 1D: idx = stored index", "dists", "=", "numpy", ".", "sum", "(", "dists", ",", "axis", "=", "1", ")", "norm_upper", "=", "numpy", ".", "array", "(", "[", "self", ".", "normalization_map", "[", "f", "]", "for", "f", "in", "f_upper", "]", ")", "norm_other", "=", "numpy", ".", "array", "(", "[", "self", ".", "normalization_map", "[", "f", "]", "for", "f", "in", "f_other", "]", ")", "norm_fac", "=", "norm_upper", "/", "norm_other", "renormed_dists", "=", "1", "-", "(", "1", "-", "dists", ")", "*", "norm_fac", "curr_min_dist", "=", "renormed_dists", ".", "min", "(", ")", "if", "curr_min_dist", "<", "min_dist", ":", "min_dist", "=", "curr_min_dist", "indexes", "=", "curr_chi1_bin", ",", "curr_chi2_bin", ",", "renormed_dists", ".", "argmin", "(", ")", "return", "min_dist", ",", "indexes" ]
Calculate distance between point and the bank allowing the metric to vary based on varying upper frequency cutoff. Slower than calc_point_distance, but more reliable when upper frequency cutoff can change a lot. Parameters ----------- chi_coords : numpy.array The position of the point in the chi coordinates. point_fupper : float The upper frequency cutoff to use for this point. This value must be one of the ones already calculated in the metric. mus : numpy.array A 2D array where idx 0 holds the upper frequency cutoff and idx 1 holds the coordinates in the [not covaried] mu parameter space for each value of the upper frequency cutoff. Returns -------- min_dist : float The smallest **SQUARED** metric distance between the test point and the bank. indexes : The chi1_bin, chi2_bin and position within that bin at which the closest matching point lies.
[ "Calculate", "distance", "between", "point", "and", "the", "bank", "allowing", "the", "metric", "to", "vary", "based", "on", "varying", "upper", "frequency", "cutoff", ".", "Slower", "than", "calc_point_distance", "but", "more", "reliable", "when", "upper", "frequency", "cutoff", "can", "change", "a", "lot", "." ]
python
train
blue-yonder/tsfresh
tsfresh/feature_extraction/feature_calculators.py
https://github.com/blue-yonder/tsfresh/blob/c72c9c574371cf7dd7d54e00a466792792e5d202/tsfresh/feature_extraction/feature_calculators.py#L803-L826
def percentage_of_reoccurring_datapoints_to_all_datapoints(x): """ Returns the percentage of unique values, that are present in the time series more than once. len(different values occurring more than once) / len(different values) This means the percentage is normalized to the number of unique values, in contrast to the percentage_of_reoccurring_values_to_all_values. :param x: the time series to calculate the feature of :type x: numpy.ndarray :return: the value of this feature :return type: float """ if len(x) == 0: return np.nan unique, counts = np.unique(x, return_counts=True) if counts.shape[0] == 0: return 0 return np.sum(counts > 1) / float(counts.shape[0])
[ "def", "percentage_of_reoccurring_datapoints_to_all_datapoints", "(", "x", ")", ":", "if", "len", "(", "x", ")", "==", "0", ":", "return", "np", ".", "nan", "unique", ",", "counts", "=", "np", ".", "unique", "(", "x", ",", "return_counts", "=", "True", ")", "if", "counts", ".", "shape", "[", "0", "]", "==", "0", ":", "return", "0", "return", "np", ".", "sum", "(", "counts", ">", "1", ")", "/", "float", "(", "counts", ".", "shape", "[", "0", "]", ")" ]
Returns the percentage of unique values, that are present in the time series more than once. len(different values occurring more than once) / len(different values) This means the percentage is normalized to the number of unique values, in contrast to the percentage_of_reoccurring_values_to_all_values. :param x: the time series to calculate the feature of :type x: numpy.ndarray :return: the value of this feature :return type: float
[ "Returns", "the", "percentage", "of", "unique", "values", "that", "are", "present", "in", "the", "time", "series", "more", "than", "once", "." ]
python
train
tensorflow/probability
tensorflow_probability/python/internal/special_math.py
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/internal/special_math.py#L157-L183
def ndtri(p, name="ndtri"): """The inverse of the CDF of the Normal distribution function. Returns x such that the area under the pdf from minus infinity to x is equal to p. A piece-wise rational approximation is done for the function. This is a port of the implementation in netlib. Args: p: `Tensor` of type `float32`, `float64`. name: Python string. A name for the operation (default="ndtri"). Returns: x: `Tensor` with `dtype=p.dtype`. Raises: TypeError: if `p` is not floating-type. """ with tf.name_scope(name): p = tf.convert_to_tensor(value=p, name="p") if dtype_util.as_numpy_dtype(p.dtype) not in [np.float32, np.float64]: raise TypeError( "p.dtype=%s is not handled, see docstring for supported types." % p.dtype) return _ndtri(p)
[ "def", "ndtri", "(", "p", ",", "name", "=", "\"ndtri\"", ")", ":", "with", "tf", ".", "name_scope", "(", "name", ")", ":", "p", "=", "tf", ".", "convert_to_tensor", "(", "value", "=", "p", ",", "name", "=", "\"p\"", ")", "if", "dtype_util", ".", "as_numpy_dtype", "(", "p", ".", "dtype", ")", "not", "in", "[", "np", ".", "float32", ",", "np", ".", "float64", "]", ":", "raise", "TypeError", "(", "\"p.dtype=%s is not handled, see docstring for supported types.\"", "%", "p", ".", "dtype", ")", "return", "_ndtri", "(", "p", ")" ]
The inverse of the CDF of the Normal distribution function. Returns x such that the area under the pdf from minus infinity to x is equal to p. A piece-wise rational approximation is done for the function. This is a port of the implementation in netlib. Args: p: `Tensor` of type `float32`, `float64`. name: Python string. A name for the operation (default="ndtri"). Returns: x: `Tensor` with `dtype=p.dtype`. Raises: TypeError: if `p` is not floating-type.
[ "The", "inverse", "of", "the", "CDF", "of", "the", "Normal", "distribution", "function", "." ]
python
test
nezhar/updatable
updatable/__init__.py
https://github.com/nezhar/updatable/blob/654c70a40d9cabcfdd762acf82b49f66057438af/updatable/__init__.py#L24-L36
def get_environment_requirements_list(): """ Take the requirements list from the current running environment :return: string """ requirement_list = [] requirements = check_output([sys.executable, '-m', 'pip', 'freeze']) for requirement in requirements.split(): requirement_list.append(requirement.decode("utf-8")) return requirement_list
[ "def", "get_environment_requirements_list", "(", ")", ":", "requirement_list", "=", "[", "]", "requirements", "=", "check_output", "(", "[", "sys", ".", "executable", ",", "'-m'", ",", "'pip'", ",", "'freeze'", "]", ")", "for", "requirement", "in", "requirements", ".", "split", "(", ")", ":", "requirement_list", ".", "append", "(", "requirement", ".", "decode", "(", "\"utf-8\"", ")", ")", "return", "requirement_list" ]
Take the requirements list from the current running environment :return: string
[ "Take", "the", "requirements", "list", "from", "the", "current", "running", "environment" ]
python
train
telefonicaid/fiware-sdc
python-sdcclient/utils/logger_utils.py
https://github.com/telefonicaid/fiware-sdc/blob/d2d5f87fc574caf6bcc49594bbcb31f620ba8c51/python-sdcclient/utils/logger_utils.py#L129-L145
def log_print_response(logger, response): """ Log an HTTP response data :param logger: logger to use :param response: HTTP response ('Requests' lib) :return: None """ log_msg = '<<<<<<<<<<<<<<<<<<<<<< Response <<<<<<<<<<<<<<<<<<\n' log_msg += '\t< Response code: {}\n'.format(str(response.status_code)) log_msg += '\t< Headers: {}\n'.format(str(dict(response.headers))) try: log_msg += '\t< Payload received:\n {}'.format(_get_pretty_body(dict(response.headers), response.content)) except ValueError: log_msg += '\t< Payload received:\n {}'.format(_get_pretty_body(dict(response.headers), response.content.text)) logger.debug(log_msg)
[ "def", "log_print_response", "(", "logger", ",", "response", ")", ":", "log_msg", "=", "'<<<<<<<<<<<<<<<<<<<<<< Response <<<<<<<<<<<<<<<<<<\\n'", "log_msg", "+=", "'\\t< Response code: {}\\n'", ".", "format", "(", "str", "(", "response", ".", "status_code", ")", ")", "log_msg", "+=", "'\\t< Headers: {}\\n'", ".", "format", "(", "str", "(", "dict", "(", "response", ".", "headers", ")", ")", ")", "try", ":", "log_msg", "+=", "'\\t< Payload received:\\n {}'", ".", "format", "(", "_get_pretty_body", "(", "dict", "(", "response", ".", "headers", ")", ",", "response", ".", "content", ")", ")", "except", "ValueError", ":", "log_msg", "+=", "'\\t< Payload received:\\n {}'", ".", "format", "(", "_get_pretty_body", "(", "dict", "(", "response", ".", "headers", ")", ",", "response", ".", "content", ".", "text", ")", ")", "logger", ".", "debug", "(", "log_msg", ")" ]
Log an HTTP response data :param logger: logger to use :param response: HTTP response ('Requests' lib) :return: None
[ "Log", "an", "HTTP", "response", "data", ":", "param", "logger", ":", "logger", "to", "use", ":", "param", "response", ":", "HTTP", "response", "(", "Requests", "lib", ")", ":", "return", ":", "None" ]
python
train
tensorflow/mesh
mesh_tensorflow/transformer/utils.py
https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/transformer/utils.py#L499-L520
def auto_batch_size(sequence_length, mesh_shape, layout_rules, tokens_per_split=2048): """Automatically compute batch size. Args: sequence_length: an integer mesh_shape: an input to mtf.convert_to_shape() layout_rules: an input to mtf.convert_to_layout_rules() tokens_per_split: an integer Returns: an integer """ num_splits = mtf.tensor_dim_to_mesh_dim_size( layout_rules, mesh_shape, mtf.Dimension("batch", 0)) ret = max(1, tokens_per_split // sequence_length) * num_splits tf.logging.info( "AUTO_BATCH_SIZE tokens_per_split=%s num_splits=%s" " sequence_length=%s batch_size=%s" % (tokens_per_split, num_splits, sequence_length, ret)) return ret
[ "def", "auto_batch_size", "(", "sequence_length", ",", "mesh_shape", ",", "layout_rules", ",", "tokens_per_split", "=", "2048", ")", ":", "num_splits", "=", "mtf", ".", "tensor_dim_to_mesh_dim_size", "(", "layout_rules", ",", "mesh_shape", ",", "mtf", ".", "Dimension", "(", "\"batch\"", ",", "0", ")", ")", "ret", "=", "max", "(", "1", ",", "tokens_per_split", "//", "sequence_length", ")", "*", "num_splits", "tf", ".", "logging", ".", "info", "(", "\"AUTO_BATCH_SIZE tokens_per_split=%s num_splits=%s\"", "\" sequence_length=%s batch_size=%s\"", "%", "(", "tokens_per_split", ",", "num_splits", ",", "sequence_length", ",", "ret", ")", ")", "return", "ret" ]
Automatically compute batch size. Args: sequence_length: an integer mesh_shape: an input to mtf.convert_to_shape() layout_rules: an input to mtf.convert_to_layout_rules() tokens_per_split: an integer Returns: an integer
[ "Automatically", "compute", "batch", "size", "." ]
python
train
ARMmbed/mbed-cloud-sdk-python
src/mbed_cloud/_backends/enrollment/models/enrollment_id.py
https://github.com/ARMmbed/mbed-cloud-sdk-python/blob/c0af86fb2cdd4dc7ed26f236139241067d293509/src/mbed_cloud/_backends/enrollment/models/enrollment_id.py#L61-L74
def enrollment_identity(self, enrollment_identity): """ Sets the enrollment_identity of this EnrollmentId. Enrollment identity. :param enrollment_identity: The enrollment_identity of this EnrollmentId. :type: str """ if enrollment_identity is None: raise ValueError("Invalid value for `enrollment_identity`, must not be `None`") if enrollment_identity is not None and not re.search('^A-[A-Za-z0-9:]{95}$', enrollment_identity): raise ValueError("Invalid value for `enrollment_identity`, must be a follow pattern or equal to `/^A-[A-Za-z0-9:]{95}$/`") self._enrollment_identity = enrollment_identity
[ "def", "enrollment_identity", "(", "self", ",", "enrollment_identity", ")", ":", "if", "enrollment_identity", "is", "None", ":", "raise", "ValueError", "(", "\"Invalid value for `enrollment_identity`, must not be `None`\"", ")", "if", "enrollment_identity", "is", "not", "None", "and", "not", "re", ".", "search", "(", "'^A-[A-Za-z0-9:]{95}$'", ",", "enrollment_identity", ")", ":", "raise", "ValueError", "(", "\"Invalid value for `enrollment_identity`, must be a follow pattern or equal to `/^A-[A-Za-z0-9:]{95}$/`\"", ")", "self", ".", "_enrollment_identity", "=", "enrollment_identity" ]
Sets the enrollment_identity of this EnrollmentId. Enrollment identity. :param enrollment_identity: The enrollment_identity of this EnrollmentId. :type: str
[ "Sets", "the", "enrollment_identity", "of", "this", "EnrollmentId", ".", "Enrollment", "identity", "." ]
python
train