repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
sequence
docstring
stringlengths
1
46.9k
docstring_tokens
sequence
language
stringclasses
1 value
partition
stringclasses
3 values
nicolargo/glances
glances/exports/glances_riemann.py
https://github.com/nicolargo/glances/blob/5bd4d587a736e0d2b03170b56926841d2a3eb7ee/glances/exports/glances_riemann.py#L61-L70
def init(self): """Init the connection to the Riemann server.""" if not self.export_enable: return None try: client = bernhard.Client(host=self.host, port=self.port) return client except Exception as e: logger.critical("Connection to Riemann failed : %s " % e) return None
[ "def", "init", "(", "self", ")", ":", "if", "not", "self", ".", "export_enable", ":", "return", "None", "try", ":", "client", "=", "bernhard", ".", "Client", "(", "host", "=", "self", ".", "host", ",", "port", "=", "self", ".", "port", ")", "return", "client", "except", "Exception", "as", "e", ":", "logger", ".", "critical", "(", "\"Connection to Riemann failed : %s \"", "%", "e", ")", "return", "None" ]
Init the connection to the Riemann server.
[ "Init", "the", "connection", "to", "the", "Riemann", "server", "." ]
python
train
rmed/dev-init
dev_init/dev_init.py
https://github.com/rmed/dev-init/blob/afc5da13002e563324c6291dede0bf2e0f58171f/dev_init/dev_init.py#L192-L204
def read_config(): """ Read the configuration file and parse the different environments. Returns: ConfigParser object """ if not os.path.isfile(CONFIG): with open(CONFIG, "w"): pass parser = ConfigParser() parser.read(CONFIG) return parser
[ "def", "read_config", "(", ")", ":", "if", "not", "os", ".", "path", ".", "isfile", "(", "CONFIG", ")", ":", "with", "open", "(", "CONFIG", ",", "\"w\"", ")", ":", "pass", "parser", "=", "ConfigParser", "(", ")", "parser", ".", "read", "(", "CONFIG", ")", "return", "parser" ]
Read the configuration file and parse the different environments. Returns: ConfigParser object
[ "Read", "the", "configuration", "file", "and", "parse", "the", "different", "environments", "." ]
python
train
Erotemic/utool
utool/util_ubuntu.py
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_ubuntu.py#L8-L120
def add_new_mimetype_association(ext, mime_name, exe_fpath=None, dry=True): """ TODO: move to external manager and generalize Args: ext (str): extension to associate mime_name (str): the name of the mime_name to create (defaults to ext) exe_fpath (str): executable location if this is for one specific file References: https://wiki.archlinux.org/index.php/Default_applications#Custom_file_associations Args: ext (str): extension to associate exe_fpath (str): executable location mime_name (str): the name of the mime_name to create (defaults to ext) CommandLine: python -m utool.util_ubuntu --exec-add_new_mimetype_association # Add ability to open ipython notebooks via double click python -m utool.util_ubuntu --exec-add_new_mimetype_association --mime-name=ipynb+json --ext=.ipynb --exe-fpath=/usr/local/bin/ipynb python -m utool.util_ubuntu --exec-add_new_mimetype_association --mime-name=ipynb+json --ext=.ipynb --exe-fpath=jupyter-notebook --force python -m utool.util_ubuntu --exec-add_new_mimetype_association --mime-name=sqlite --ext=.sqlite --exe-fpath=sqlitebrowser Example: >>> # DISABLE_DOCTEST >>> # SCRIPT >>> from utool.util_ubuntu import * # NOQA >>> import utool as ut >>> ext = ut.get_argval('--ext', type_=str, default=None) >>> mime_name = ut.get_argval('--mime_name', type_=str, default=None) >>> exe_fpath = ut.get_argval('--exe_fpath', type_=str, default=None) >>> dry = not ut.get_argflag('--force') >>> result = add_new_mimetype_association(ext, mime_name, exe_fpath, dry) >>> print(result) """ import utool as ut terminal = True mime_codeblock = ut.codeblock( ''' <?xml version="1.0" encoding="UTF-8"?> <mime-info xmlns="http://www.freedesktop.org/standards/shared-mime-info"> <mime-type type="application/x-{mime_name}"> <glob-deleteall/> <glob pattern="*{ext}"/> </mime-type> </mime-info> ''' ).format(**locals()) prefix = ut.truepath('~/.local/share') mime_dpath = join(prefix, 'mime/packages') mime_fpath = join(mime_dpath, 'application-x-{mime_name}.xml'.format(**locals())) print(mime_codeblock) print('---') print(mime_fpath) print('L___') if exe_fpath is not None: exe_fname_noext = splitext(basename(exe_fpath))[0] app_name = exe_fname_noext.replace('_', '-') nice_name = ' '.join( [word[0].upper() + word[1:].lower() for word in app_name.replace('-', ' ').split(' ')] ) app_codeblock = ut.codeblock( ''' [Desktop Entry] Name={nice_name} Exec={exe_fpath} MimeType=application/x-{mime_name} Terminal={terminal} Type=Application Categories=Utility;Application; Comment=Custom App ''' ).format(**locals()) app_dpath = join(prefix, 'applications') app_fpath = join(app_dpath, '{app_name}.desktop'.format(**locals())) print(app_codeblock) print('---') print(app_fpath) print('L___') # WRITE FILES if not dry: ut.ensuredir(mime_dpath) ut.ensuredir(app_dpath) ut.writeto(mime_fpath, mime_codeblock, verbose=ut.NOT_QUIET, n=None) if exe_fpath is not None: ut.writeto(app_fpath, app_codeblock, verbose=ut.NOT_QUIET, n=None) # UPDATE BACKENDS #ut.cmd('update-mime-database /usr/share/mime') #~/.local/share/applications/mimeapps.list print(ut.codeblock( ''' Run these commands: update-desktop-database ~/.local/share/applications update-mime-database ~/.local/share/mime ''' )) if exe_fpath is not None: ut.cmd('update-desktop-database ~/.local/share/applications') ut.cmd('update-mime-database ~/.local/share/mime') else: print('dry_run')
[ "def", "add_new_mimetype_association", "(", "ext", ",", "mime_name", ",", "exe_fpath", "=", "None", ",", "dry", "=", "True", ")", ":", "import", "utool", "as", "ut", "terminal", "=", "True", "mime_codeblock", "=", "ut", ".", "codeblock", "(", "'''\n <?xml version=\"1.0\" encoding=\"UTF-8\"?>\n <mime-info xmlns=\"http://www.freedesktop.org/standards/shared-mime-info\">\n <mime-type type=\"application/x-{mime_name}\">\n <glob-deleteall/>\n <glob pattern=\"*{ext}\"/>\n </mime-type>\n </mime-info>\n '''", ")", ".", "format", "(", "*", "*", "locals", "(", ")", ")", "prefix", "=", "ut", ".", "truepath", "(", "'~/.local/share'", ")", "mime_dpath", "=", "join", "(", "prefix", ",", "'mime/packages'", ")", "mime_fpath", "=", "join", "(", "mime_dpath", ",", "'application-x-{mime_name}.xml'", ".", "format", "(", "*", "*", "locals", "(", ")", ")", ")", "print", "(", "mime_codeblock", ")", "print", "(", "'---'", ")", "print", "(", "mime_fpath", ")", "print", "(", "'L___'", ")", "if", "exe_fpath", "is", "not", "None", ":", "exe_fname_noext", "=", "splitext", "(", "basename", "(", "exe_fpath", ")", ")", "[", "0", "]", "app_name", "=", "exe_fname_noext", ".", "replace", "(", "'_'", ",", "'-'", ")", "nice_name", "=", "' '", ".", "join", "(", "[", "word", "[", "0", "]", ".", "upper", "(", ")", "+", "word", "[", "1", ":", "]", ".", "lower", "(", ")", "for", "word", "in", "app_name", ".", "replace", "(", "'-'", ",", "' '", ")", ".", "split", "(", "' '", ")", "]", ")", "app_codeblock", "=", "ut", ".", "codeblock", "(", "'''\n [Desktop Entry]\n Name={nice_name}\n Exec={exe_fpath}\n MimeType=application/x-{mime_name}\n Terminal={terminal}\n Type=Application\n Categories=Utility;Application;\n Comment=Custom App\n '''", ")", ".", "format", "(", "*", "*", "locals", "(", ")", ")", "app_dpath", "=", "join", "(", "prefix", ",", "'applications'", ")", "app_fpath", "=", "join", "(", "app_dpath", ",", "'{app_name}.desktop'", ".", "format", "(", "*", "*", "locals", "(", ")", ")", ")", "print", "(", "app_codeblock", ")", "print", "(", "'---'", ")", "print", "(", "app_fpath", ")", "print", "(", "'L___'", ")", "# WRITE FILES", "if", "not", "dry", ":", "ut", ".", "ensuredir", "(", "mime_dpath", ")", "ut", ".", "ensuredir", "(", "app_dpath", ")", "ut", ".", "writeto", "(", "mime_fpath", ",", "mime_codeblock", ",", "verbose", "=", "ut", ".", "NOT_QUIET", ",", "n", "=", "None", ")", "if", "exe_fpath", "is", "not", "None", ":", "ut", ".", "writeto", "(", "app_fpath", ",", "app_codeblock", ",", "verbose", "=", "ut", ".", "NOT_QUIET", ",", "n", "=", "None", ")", "# UPDATE BACKENDS", "#ut.cmd('update-mime-database /usr/share/mime')", "#~/.local/share/applications/mimeapps.list", "print", "(", "ut", ".", "codeblock", "(", "'''\n Run these commands:\n update-desktop-database ~/.local/share/applications\n update-mime-database ~/.local/share/mime\n '''", ")", ")", "if", "exe_fpath", "is", "not", "None", ":", "ut", ".", "cmd", "(", "'update-desktop-database ~/.local/share/applications'", ")", "ut", ".", "cmd", "(", "'update-mime-database ~/.local/share/mime'", ")", "else", ":", "print", "(", "'dry_run'", ")" ]
TODO: move to external manager and generalize Args: ext (str): extension to associate mime_name (str): the name of the mime_name to create (defaults to ext) exe_fpath (str): executable location if this is for one specific file References: https://wiki.archlinux.org/index.php/Default_applications#Custom_file_associations Args: ext (str): extension to associate exe_fpath (str): executable location mime_name (str): the name of the mime_name to create (defaults to ext) CommandLine: python -m utool.util_ubuntu --exec-add_new_mimetype_association # Add ability to open ipython notebooks via double click python -m utool.util_ubuntu --exec-add_new_mimetype_association --mime-name=ipynb+json --ext=.ipynb --exe-fpath=/usr/local/bin/ipynb python -m utool.util_ubuntu --exec-add_new_mimetype_association --mime-name=ipynb+json --ext=.ipynb --exe-fpath=jupyter-notebook --force python -m utool.util_ubuntu --exec-add_new_mimetype_association --mime-name=sqlite --ext=.sqlite --exe-fpath=sqlitebrowser Example: >>> # DISABLE_DOCTEST >>> # SCRIPT >>> from utool.util_ubuntu import * # NOQA >>> import utool as ut >>> ext = ut.get_argval('--ext', type_=str, default=None) >>> mime_name = ut.get_argval('--mime_name', type_=str, default=None) >>> exe_fpath = ut.get_argval('--exe_fpath', type_=str, default=None) >>> dry = not ut.get_argflag('--force') >>> result = add_new_mimetype_association(ext, mime_name, exe_fpath, dry) >>> print(result)
[ "TODO", ":", "move", "to", "external", "manager", "and", "generalize" ]
python
train
tensorflow/tensor2tensor
tensor2tensor/layers/common_layers.py
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_layers.py#L1738-L1800
def padded_cross_entropy(logits, labels, label_smoothing, weights_fn=weights_nonzero, reduce_sum=True, cutoff=0.0, gaussian=False): """Compute cross-entropy assuming 0s are padding. Computes a loss numerator (the sum of losses), and loss denominator (the number of non-padding tokens). Args: logits: a `Tensor` with shape `[batch, timesteps, vocab_size]`. optionally a FactoredTensor. labels: an integer `Tensor` with shape `[batch, timesteps]`. label_smoothing: a floating point `Scalar`. weights_fn: A function from labels to weights. reduce_sum: a Boolean, whether to sum at the end or not. cutoff: a float, at which point to have no loss. gaussian: If true, use a Gaussian distribution for label smoothing Returns: loss_numerator: a `Scalar`. Sum of losses. loss_denominator: a `Scalar. The number of non-padding target tokens. Raises: ValueError: in case of unsupported argument types. """ if isinstance(logits, FactoredTensor): if gaussian: raise ValueError("Factored padded cross entropy with Gaussian smoothing " "is not implemented yet.") return padded_cross_entropy_factored( logits, labels, label_smoothing, weights_fn=weights_fn, reduce_sum=reduce_sum) confidence = 1.0 - label_smoothing logits_shape = shape_list(logits) vocab_size = logits_shape[-1] with tf.name_scope("padded_cross_entropy", values=[logits, labels]): if len(logits_shape) == 2: # Deal with the case where we did not insert extra dimensions due to # TPU issues. No pad-to-same-length happens in this case. # TODO(noam): remove this logic once TPU can handle extra dimensions. labels = tf.reshape(labels, [-1]) else: logits, labels = pad_with_zeros(logits, labels) logits = tf.reshape( logits, shape_list(labels) + [vocab_size], name="padded_cross_entropy_size_check") logits = tf.cast(logits, tf.float32) xent = smoothing_cross_entropy( logits, labels, vocab_size, confidence, gaussian=gaussian) weights = weights_fn(labels) if cutoff > 0.0: xent = tf.nn.relu(xent - cutoff) if not reduce_sum: return xent * weights, weights return tf.reduce_sum(xent * weights), tf.reduce_sum(weights)
[ "def", "padded_cross_entropy", "(", "logits", ",", "labels", ",", "label_smoothing", ",", "weights_fn", "=", "weights_nonzero", ",", "reduce_sum", "=", "True", ",", "cutoff", "=", "0.0", ",", "gaussian", "=", "False", ")", ":", "if", "isinstance", "(", "logits", ",", "FactoredTensor", ")", ":", "if", "gaussian", ":", "raise", "ValueError", "(", "\"Factored padded cross entropy with Gaussian smoothing \"", "\"is not implemented yet.\"", ")", "return", "padded_cross_entropy_factored", "(", "logits", ",", "labels", ",", "label_smoothing", ",", "weights_fn", "=", "weights_fn", ",", "reduce_sum", "=", "reduce_sum", ")", "confidence", "=", "1.0", "-", "label_smoothing", "logits_shape", "=", "shape_list", "(", "logits", ")", "vocab_size", "=", "logits_shape", "[", "-", "1", "]", "with", "tf", ".", "name_scope", "(", "\"padded_cross_entropy\"", ",", "values", "=", "[", "logits", ",", "labels", "]", ")", ":", "if", "len", "(", "logits_shape", ")", "==", "2", ":", "# Deal with the case where we did not insert extra dimensions due to", "# TPU issues. No pad-to-same-length happens in this case.", "# TODO(noam): remove this logic once TPU can handle extra dimensions.", "labels", "=", "tf", ".", "reshape", "(", "labels", ",", "[", "-", "1", "]", ")", "else", ":", "logits", ",", "labels", "=", "pad_with_zeros", "(", "logits", ",", "labels", ")", "logits", "=", "tf", ".", "reshape", "(", "logits", ",", "shape_list", "(", "labels", ")", "+", "[", "vocab_size", "]", ",", "name", "=", "\"padded_cross_entropy_size_check\"", ")", "logits", "=", "tf", ".", "cast", "(", "logits", ",", "tf", ".", "float32", ")", "xent", "=", "smoothing_cross_entropy", "(", "logits", ",", "labels", ",", "vocab_size", ",", "confidence", ",", "gaussian", "=", "gaussian", ")", "weights", "=", "weights_fn", "(", "labels", ")", "if", "cutoff", ">", "0.0", ":", "xent", "=", "tf", ".", "nn", ".", "relu", "(", "xent", "-", "cutoff", ")", "if", "not", "reduce_sum", ":", "return", "xent", "*", "weights", ",", "weights", "return", "tf", ".", "reduce_sum", "(", "xent", "*", "weights", ")", ",", "tf", ".", "reduce_sum", "(", "weights", ")" ]
Compute cross-entropy assuming 0s are padding. Computes a loss numerator (the sum of losses), and loss denominator (the number of non-padding tokens). Args: logits: a `Tensor` with shape `[batch, timesteps, vocab_size]`. optionally a FactoredTensor. labels: an integer `Tensor` with shape `[batch, timesteps]`. label_smoothing: a floating point `Scalar`. weights_fn: A function from labels to weights. reduce_sum: a Boolean, whether to sum at the end or not. cutoff: a float, at which point to have no loss. gaussian: If true, use a Gaussian distribution for label smoothing Returns: loss_numerator: a `Scalar`. Sum of losses. loss_denominator: a `Scalar. The number of non-padding target tokens. Raises: ValueError: in case of unsupported argument types.
[ "Compute", "cross", "-", "entropy", "assuming", "0s", "are", "padding", "." ]
python
train
chrippa/ds4drv
ds4drv/device.py
https://github.com/chrippa/ds4drv/blob/be7327fc3f5abb8717815f2a1a2ad3d335535d8a/ds4drv/device.py#L91-L93
def rumble(self, small=0, big=0): """Sets the intensity of the rumble motors. Valid range is 0-255.""" self._control(small_rumble=small, big_rumble=big)
[ "def", "rumble", "(", "self", ",", "small", "=", "0", ",", "big", "=", "0", ")", ":", "self", ".", "_control", "(", "small_rumble", "=", "small", ",", "big_rumble", "=", "big", ")" ]
Sets the intensity of the rumble motors. Valid range is 0-255.
[ "Sets", "the", "intensity", "of", "the", "rumble", "motors", ".", "Valid", "range", "is", "0", "-", "255", "." ]
python
train
OLC-Bioinformatics/ConFindr
confindr_src/confindr.py
https://github.com/OLC-Bioinformatics/ConFindr/blob/4c292617c3f270ebd5ff138cbc5a107f6d01200d/confindr_src/confindr.py#L109-L126
def find_genusspecific_allele_list(profiles_file, target_genus): """ A new way of making our specific databases: Make our profiles file have lists of every gene/allele present for each genus instead of just excluding a few genes for each. This way, should have much smaller databases while managing to make ConFindr a decent bit faster (maybe) :param profiles_file: Path to profiles file. :param target_genus: :return: List of gene/allele combinations that should be part of species-specific database. """ alleles = list() with open(profiles_file) as f: lines = f.readlines() for line in lines: line = line.rstrip() genus = line.split(':')[0] if genus == target_genus: alleles = line.split(':')[1].split(',')[:-1] return alleles
[ "def", "find_genusspecific_allele_list", "(", "profiles_file", ",", "target_genus", ")", ":", "alleles", "=", "list", "(", ")", "with", "open", "(", "profiles_file", ")", "as", "f", ":", "lines", "=", "f", ".", "readlines", "(", ")", "for", "line", "in", "lines", ":", "line", "=", "line", ".", "rstrip", "(", ")", "genus", "=", "line", ".", "split", "(", "':'", ")", "[", "0", "]", "if", "genus", "==", "target_genus", ":", "alleles", "=", "line", ".", "split", "(", "':'", ")", "[", "1", "]", ".", "split", "(", "','", ")", "[", ":", "-", "1", "]", "return", "alleles" ]
A new way of making our specific databases: Make our profiles file have lists of every gene/allele present for each genus instead of just excluding a few genes for each. This way, should have much smaller databases while managing to make ConFindr a decent bit faster (maybe) :param profiles_file: Path to profiles file. :param target_genus: :return: List of gene/allele combinations that should be part of species-specific database.
[ "A", "new", "way", "of", "making", "our", "specific", "databases", ":", "Make", "our", "profiles", "file", "have", "lists", "of", "every", "gene", "/", "allele", "present", "for", "each", "genus", "instead", "of", "just", "excluding", "a", "few", "genes", "for", "each", ".", "This", "way", "should", "have", "much", "smaller", "databases", "while", "managing", "to", "make", "ConFindr", "a", "decent", "bit", "faster", "(", "maybe", ")", ":", "param", "profiles_file", ":", "Path", "to", "profiles", "file", ".", ":", "param", "target_genus", ":", ":", "return", ":", "List", "of", "gene", "/", "allele", "combinations", "that", "should", "be", "part", "of", "species", "-", "specific", "database", "." ]
python
train
c-w/gutenberg
gutenberg/acquire/metadata.py
https://github.com/c-w/gutenberg/blob/d1ef3da6fba6c3636d452479ed6bcb17c7d4d246/gutenberg/acquire/metadata.py#L252-L266
def _check_can_be_instantiated(cls, cache_location): """Pre-conditions: the cache location is the URL to a Fuseki server and the SPARQLWrapper library exists (transitive dependency of RDFlib's sparqlstore). """ if not any(cache_location.startswith(prefix) for prefix in cls._CACHE_URL_PREFIXES): raise InvalidCacheException('cache location is not a Fuseki url') try: from rdflib.plugins.stores.sparqlstore import SPARQLUpdateStore except ImportError: raise InvalidCacheException('unable to import sparql store') del SPARQLUpdateStore
[ "def", "_check_can_be_instantiated", "(", "cls", ",", "cache_location", ")", ":", "if", "not", "any", "(", "cache_location", ".", "startswith", "(", "prefix", ")", "for", "prefix", "in", "cls", ".", "_CACHE_URL_PREFIXES", ")", ":", "raise", "InvalidCacheException", "(", "'cache location is not a Fuseki url'", ")", "try", ":", "from", "rdflib", ".", "plugins", ".", "stores", ".", "sparqlstore", "import", "SPARQLUpdateStore", "except", "ImportError", ":", "raise", "InvalidCacheException", "(", "'unable to import sparql store'", ")", "del", "SPARQLUpdateStore" ]
Pre-conditions: the cache location is the URL to a Fuseki server and the SPARQLWrapper library exists (transitive dependency of RDFlib's sparqlstore).
[ "Pre", "-", "conditions", ":", "the", "cache", "location", "is", "the", "URL", "to", "a", "Fuseki", "server", "and", "the", "SPARQLWrapper", "library", "exists", "(", "transitive", "dependency", "of", "RDFlib", "s", "sparqlstore", ")", "." ]
python
train
QuantEcon/QuantEcon.py
quantecon/estspec.py
https://github.com/QuantEcon/QuantEcon.py/blob/26a66c552f2a73967d7efb6e1f4b4c4985a12643/quantecon/estspec.py#L70-L108
def periodogram(x, window=None, window_len=7): r""" Computes the periodogram .. math:: I(w) = \frac{1}{n} \Big[ \sum_{t=0}^{n-1} x_t e^{itw} \Big] ^2 at the Fourier frequences :math:`w_j := \frac{2 \pi j}{n}`, :math:`j = 0, \dots, n - 1`, using the fast Fourier transform. Only the frequences :math:`w_j` in :math:`[0, \pi]` and corresponding values :math:`I(w_j)` are returned. If a window type is given then smoothing is performed. Parameters ---------- x : array_like(float) A flat NumPy array containing the data to smooth window_len : scalar(int), optional(default=7) An odd integer giving the length of the window. Defaults to 7. window : string A string giving the window type. Possible values are 'flat', 'hanning', 'hamming', 'bartlett' or 'blackman' Returns ------- w : array_like(float) Fourier frequences at which periodogram is evaluated I_w : array_like(float) Values of periodogram at the Fourier frequences """ n = len(x) I_w = np.abs(fft(x))**2 / n w = 2 * np.pi * np.arange(n) / n # Fourier frequencies w, I_w = w[:int(n/2)+1], I_w[:int(n/2)+1] # Take only values on [0, pi] if window: I_w = smooth(I_w, window_len=window_len, window=window) return w, I_w
[ "def", "periodogram", "(", "x", ",", "window", "=", "None", ",", "window_len", "=", "7", ")", ":", "n", "=", "len", "(", "x", ")", "I_w", "=", "np", ".", "abs", "(", "fft", "(", "x", ")", ")", "**", "2", "/", "n", "w", "=", "2", "*", "np", ".", "pi", "*", "np", ".", "arange", "(", "n", ")", "/", "n", "# Fourier frequencies", "w", ",", "I_w", "=", "w", "[", ":", "int", "(", "n", "/", "2", ")", "+", "1", "]", ",", "I_w", "[", ":", "int", "(", "n", "/", "2", ")", "+", "1", "]", "# Take only values on [0, pi]", "if", "window", ":", "I_w", "=", "smooth", "(", "I_w", ",", "window_len", "=", "window_len", ",", "window", "=", "window", ")", "return", "w", ",", "I_w" ]
r""" Computes the periodogram .. math:: I(w) = \frac{1}{n} \Big[ \sum_{t=0}^{n-1} x_t e^{itw} \Big] ^2 at the Fourier frequences :math:`w_j := \frac{2 \pi j}{n}`, :math:`j = 0, \dots, n - 1`, using the fast Fourier transform. Only the frequences :math:`w_j` in :math:`[0, \pi]` and corresponding values :math:`I(w_j)` are returned. If a window type is given then smoothing is performed. Parameters ---------- x : array_like(float) A flat NumPy array containing the data to smooth window_len : scalar(int), optional(default=7) An odd integer giving the length of the window. Defaults to 7. window : string A string giving the window type. Possible values are 'flat', 'hanning', 'hamming', 'bartlett' or 'blackman' Returns ------- w : array_like(float) Fourier frequences at which periodogram is evaluated I_w : array_like(float) Values of periodogram at the Fourier frequences
[ "r", "Computes", "the", "periodogram" ]
python
train
tdeck/rodong
rodong.py
https://github.com/tdeck/rodong/blob/6247148e585ee323925cefb2494e9833e138e293/rodong.py#L36-L80
def __load_section(self, section_key): """ Reads the set of article links for a section if they are not cached. """ if self._sections[section_key] is not None: return articles = [] for page in count(1): if page > 50: raise Exception('Last page detection is probably broken') url = '{domain}{section}&iMenuID=1&iSubMenuID={page}'.format( domain = DOMAIN, section = SECTIONS[section_key], page = page ) body = self._session.get(url).content # This is a very hacky way of detecting the last page # that will probably break again in the future if "알수 없는 주소" in body: # "Unknown Address" break # Parse out all the article links root = html.fromstring(body) title_lines = root.find_class('ListNewsLineTitle') for title_line in title_lines: title_link = title_line.find('a') # The links do a JS open in a new window, so we need to parse # it out using this ugly, brittle junk href = title_link.get('href') match = re.match("javascript:article_open\('(.+)'\)", href) if not match: raise Exception("The site's link format has changed and is not compatible") path = match.group(1).decode('string_escape') articles.append(Article( self._session, title_link.text_content().strip(), DOMAIN + '/en/' + path )) self._sections[section_key] = articles
[ "def", "__load_section", "(", "self", ",", "section_key", ")", ":", "if", "self", ".", "_sections", "[", "section_key", "]", "is", "not", "None", ":", "return", "articles", "=", "[", "]", "for", "page", "in", "count", "(", "1", ")", ":", "if", "page", ">", "50", ":", "raise", "Exception", "(", "'Last page detection is probably broken'", ")", "url", "=", "'{domain}{section}&iMenuID=1&iSubMenuID={page}'", ".", "format", "(", "domain", "=", "DOMAIN", ",", "section", "=", "SECTIONS", "[", "section_key", "]", ",", "page", "=", "page", ")", "body", "=", "self", ".", "_session", ".", "get", "(", "url", ")", ".", "content", "# This is a very hacky way of detecting the last page", "# that will probably break again in the future", "if", "\"알수 없는 주소\" in body: # ", "Un", "nown", " ", "ddress\"", "break", "# Parse out all the article links", "root", "=", "html", ".", "fromstring", "(", "body", ")", "title_lines", "=", "root", ".", "find_class", "(", "'ListNewsLineTitle'", ")", "for", "title_line", "in", "title_lines", ":", "title_link", "=", "title_line", ".", "find", "(", "'a'", ")", "# The links do a JS open in a new window, so we need to parse", "# it out using this ugly, brittle junk", "href", "=", "title_link", ".", "get", "(", "'href'", ")", "match", "=", "re", ".", "match", "(", "\"javascript:article_open\\('(.+)'\\)\"", ",", "href", ")", "if", "not", "match", ":", "raise", "Exception", "(", "\"The site's link format has changed and is not compatible\"", ")", "path", "=", "match", ".", "group", "(", "1", ")", ".", "decode", "(", "'string_escape'", ")", "articles", ".", "append", "(", "Article", "(", "self", ".", "_session", ",", "title_link", ".", "text_content", "(", ")", ".", "strip", "(", ")", ",", "DOMAIN", "+", "'/en/'", "+", "path", ")", ")", "self", ".", "_sections", "[", "section_key", "]", "=", "articles" ]
Reads the set of article links for a section if they are not cached.
[ "Reads", "the", "set", "of", "article", "links", "for", "a", "section", "if", "they", "are", "not", "cached", "." ]
python
train
sv0/django-markdown-app
django_markdown/views.py
https://github.com/sv0/django-markdown-app/blob/973968c68d79cbe35304e9d6da876ad33f427d2d/django_markdown/views.py#L7-L23
def preview(request): """ Render preview page. :returns: A rendered preview """ if settings.MARKDOWN_PROTECT_PREVIEW: user = getattr(request, 'user', None) if not user or not user.is_staff: from django.contrib.auth.views import redirect_to_login return redirect_to_login(request.get_full_path()) return render( request, settings.MARKDOWN_PREVIEW_TEMPLATE, dict( content=request.POST.get('data', 'No content posted'), css=settings.MARKDOWN_STYLE ))
[ "def", "preview", "(", "request", ")", ":", "if", "settings", ".", "MARKDOWN_PROTECT_PREVIEW", ":", "user", "=", "getattr", "(", "request", ",", "'user'", ",", "None", ")", "if", "not", "user", "or", "not", "user", ".", "is_staff", ":", "from", "django", ".", "contrib", ".", "auth", ".", "views", "import", "redirect_to_login", "return", "redirect_to_login", "(", "request", ".", "get_full_path", "(", ")", ")", "return", "render", "(", "request", ",", "settings", ".", "MARKDOWN_PREVIEW_TEMPLATE", ",", "dict", "(", "content", "=", "request", ".", "POST", ".", "get", "(", "'data'", ",", "'No content posted'", ")", ",", "css", "=", "settings", ".", "MARKDOWN_STYLE", ")", ")" ]
Render preview page. :returns: A rendered preview
[ "Render", "preview", "page", "." ]
python
train
KieranWynn/pyquaternion
pyquaternion/quaternion.py
https://github.com/KieranWynn/pyquaternion/blob/d2aad7f3fb0d4b9cc23aa72b390e9b2e1273eae9/pyquaternion/quaternion.py#L648-L672
def log(cls, q): """Quaternion Logarithm. Find the logarithm of a quaternion amount. Params: q: the input quaternion/argument as a Quaternion object. Returns: A quaternion amount representing log(q) := (log(|q|), v/|v|acos(w/|q|)). Note: The method computes the logarithm of general quaternions. See [Source](https://math.stackexchange.com/questions/2552/the-logarithm-of-quaternion/2554#2554) for more details. """ v_norm = np.linalg.norm(q.vector) q_norm = q.norm tolerance = 1e-17 if q_norm < tolerance: # 0 quaternion - undefined return Quaternion(scalar=-float('inf'), vector=float('nan')*q.vector) if v_norm < tolerance: # real quaternions - no imaginary part return Quaternion(scalar=log(q_norm), vector=[0,0,0]) vec = q.vector / v_norm return Quaternion(scalar=log(q_norm), vector=acos(q.scalar/q_norm)*vec)
[ "def", "log", "(", "cls", ",", "q", ")", ":", "v_norm", "=", "np", ".", "linalg", ".", "norm", "(", "q", ".", "vector", ")", "q_norm", "=", "q", ".", "norm", "tolerance", "=", "1e-17", "if", "q_norm", "<", "tolerance", ":", "# 0 quaternion - undefined", "return", "Quaternion", "(", "scalar", "=", "-", "float", "(", "'inf'", ")", ",", "vector", "=", "float", "(", "'nan'", ")", "*", "q", ".", "vector", ")", "if", "v_norm", "<", "tolerance", ":", "# real quaternions - no imaginary part", "return", "Quaternion", "(", "scalar", "=", "log", "(", "q_norm", ")", ",", "vector", "=", "[", "0", ",", "0", ",", "0", "]", ")", "vec", "=", "q", ".", "vector", "/", "v_norm", "return", "Quaternion", "(", "scalar", "=", "log", "(", "q_norm", ")", ",", "vector", "=", "acos", "(", "q", ".", "scalar", "/", "q_norm", ")", "*", "vec", ")" ]
Quaternion Logarithm. Find the logarithm of a quaternion amount. Params: q: the input quaternion/argument as a Quaternion object. Returns: A quaternion amount representing log(q) := (log(|q|), v/|v|acos(w/|q|)). Note: The method computes the logarithm of general quaternions. See [Source](https://math.stackexchange.com/questions/2552/the-logarithm-of-quaternion/2554#2554) for more details.
[ "Quaternion", "Logarithm", "." ]
python
train
sendgrid/sendgrid-python
sendgrid/helpers/mail/mail.py
https://github.com/sendgrid/sendgrid-python/blob/266c2abde7a35dfcce263e06bedc6a0bbdebeac9/sendgrid/helpers/mail/mail.py#L844-L849
def add_category(self, category): """Add a category assigned to this message :rtype: Category """ self._categories = self._ensure_append(category, self._categories)
[ "def", "add_category", "(", "self", ",", "category", ")", ":", "self", ".", "_categories", "=", "self", ".", "_ensure_append", "(", "category", ",", "self", ".", "_categories", ")" ]
Add a category assigned to this message :rtype: Category
[ "Add", "a", "category", "assigned", "to", "this", "message" ]
python
train
ECESeniorDesign/greenhouse_envmgmt
greenhouse_envmgmt/control.py
https://github.com/ECESeniorDesign/greenhouse_envmgmt/blob/864e0ce98bfb220f9954026913a5470536de9818/greenhouse_envmgmt/control.py#L44-L60
def compile_instance_masks(cls): """ Compiles instance masks into a master mask that is usable by the IO expander. Also determines whether or not the pump should be on. Method is generalized to support multiple IO expanders for possible future expansion. """ # Compute required # of IO expanders needed, clear mask variable. number_IO_expanders = ((len(cls._list) - 1) / 4) + 1 cls.master_mask = [0, 0] * number_IO_expanders for ctrlobj in cls: # Or masks together bank-by-banl cls.master_mask[ctrlobj.bank] |= ctrlobj.mask # Handle the pump request seperately if ctrlobj.pump_request == 1: cls.master_mask[cls.pump_bank] |= 1 << cls.pump_pin
[ "def", "compile_instance_masks", "(", "cls", ")", ":", "# Compute required # of IO expanders needed, clear mask variable.", "number_IO_expanders", "=", "(", "(", "len", "(", "cls", ".", "_list", ")", "-", "1", ")", "/", "4", ")", "+", "1", "cls", ".", "master_mask", "=", "[", "0", ",", "0", "]", "*", "number_IO_expanders", "for", "ctrlobj", "in", "cls", ":", "# Or masks together bank-by-banl", "cls", ".", "master_mask", "[", "ctrlobj", ".", "bank", "]", "|=", "ctrlobj", ".", "mask", "# Handle the pump request seperately", "if", "ctrlobj", ".", "pump_request", "==", "1", ":", "cls", ".", "master_mask", "[", "cls", ".", "pump_bank", "]", "|=", "1", "<<", "cls", ".", "pump_pin" ]
Compiles instance masks into a master mask that is usable by the IO expander. Also determines whether or not the pump should be on. Method is generalized to support multiple IO expanders for possible future expansion.
[ "Compiles", "instance", "masks", "into", "a", "master", "mask", "that", "is", "usable", "by", "the", "IO", "expander", ".", "Also", "determines", "whether", "or", "not", "the", "pump", "should", "be", "on", ".", "Method", "is", "generalized", "to", "support", "multiple", "IO", "expanders", "for", "possible", "future", "expansion", "." ]
python
train
tus/tus-py-client
tusclient/uploader.py
https://github.com/tus/tus-py-client/blob/0e5856efcfae6fc281171359ce38488a70468993/tusclient/uploader.py#L219-L233
def create_url(self): """ Return upload url. Makes request to tus server to create a new upload url for the required file upload. """ headers = self.headers headers['upload-length'] = str(self.file_size) headers['upload-metadata'] = ','.join(self.encode_metadata()) resp = requests.post(self.client.url, headers=headers) url = resp.headers.get("location") if url is None: msg = 'Attempt to retrieve create file url with status {}'.format(resp.status_code) raise TusCommunicationError(msg, resp.status_code, resp.content) return urljoin(self.client.url, url)
[ "def", "create_url", "(", "self", ")", ":", "headers", "=", "self", ".", "headers", "headers", "[", "'upload-length'", "]", "=", "str", "(", "self", ".", "file_size", ")", "headers", "[", "'upload-metadata'", "]", "=", "','", ".", "join", "(", "self", ".", "encode_metadata", "(", ")", ")", "resp", "=", "requests", ".", "post", "(", "self", ".", "client", ".", "url", ",", "headers", "=", "headers", ")", "url", "=", "resp", ".", "headers", ".", "get", "(", "\"location\"", ")", "if", "url", "is", "None", ":", "msg", "=", "'Attempt to retrieve create file url with status {}'", ".", "format", "(", "resp", ".", "status_code", ")", "raise", "TusCommunicationError", "(", "msg", ",", "resp", ".", "status_code", ",", "resp", ".", "content", ")", "return", "urljoin", "(", "self", ".", "client", ".", "url", ",", "url", ")" ]
Return upload url. Makes request to tus server to create a new upload url for the required file upload.
[ "Return", "upload", "url", "." ]
python
train
ethereum/py_ecc
py_ecc/bls/utils.py
https://github.com/ethereum/py_ecc/blob/2088796c59574b256dc8e18f8c9351bc3688ca71/py_ecc/bls/utils.py#L157-L186
def compress_G2(pt: G2Uncompressed) -> G2Compressed: """ The compressed point (z1, z2) has the bit order: z1: (c_flag1, b_flag1, a_flag1, x1) z2: (c_flag2, b_flag2, a_flag2, x2) where - c_flag1 is always set to 1 - b_flag1 indicates infinity when set to 1 - a_flag1 helps determine the y-coordinate when decompressing, - a_flag2, b_flag2, and c_flag2 are always set to 0 """ if not is_on_curve(pt, b2): raise ValueError( "The given point is not on the twisted curve over FQ**2" ) if is_inf(pt): return G2Compressed((POW_2_383 + POW_2_382, 0)) x, y = normalize(pt) x_re, x_im = x.coeffs y_re, y_im = y.coeffs # Record the leftmost bit of y_im to the a_flag1 # If y_im happens to be zero, then use the bit of y_re a_flag1 = (y_im * 2) // q if y_im > 0 else (y_re * 2) // q # Imaginary part of x goes to z1, real part goes to z2 # c_flag1 = 1, b_flag1 = 0 z1 = x_im + a_flag1 * POW_2_381 + POW_2_383 # a_flag2 = b_flag2 = c_flag2 = 0 z2 = x_re return G2Compressed((z1, z2))
[ "def", "compress_G2", "(", "pt", ":", "G2Uncompressed", ")", "->", "G2Compressed", ":", "if", "not", "is_on_curve", "(", "pt", ",", "b2", ")", ":", "raise", "ValueError", "(", "\"The given point is not on the twisted curve over FQ**2\"", ")", "if", "is_inf", "(", "pt", ")", ":", "return", "G2Compressed", "(", "(", "POW_2_383", "+", "POW_2_382", ",", "0", ")", ")", "x", ",", "y", "=", "normalize", "(", "pt", ")", "x_re", ",", "x_im", "=", "x", ".", "coeffs", "y_re", ",", "y_im", "=", "y", ".", "coeffs", "# Record the leftmost bit of y_im to the a_flag1", "# If y_im happens to be zero, then use the bit of y_re", "a_flag1", "=", "(", "y_im", "*", "2", ")", "//", "q", "if", "y_im", ">", "0", "else", "(", "y_re", "*", "2", ")", "//", "q", "# Imaginary part of x goes to z1, real part goes to z2", "# c_flag1 = 1, b_flag1 = 0", "z1", "=", "x_im", "+", "a_flag1", "*", "POW_2_381", "+", "POW_2_383", "# a_flag2 = b_flag2 = c_flag2 = 0", "z2", "=", "x_re", "return", "G2Compressed", "(", "(", "z1", ",", "z2", ")", ")" ]
The compressed point (z1, z2) has the bit order: z1: (c_flag1, b_flag1, a_flag1, x1) z2: (c_flag2, b_flag2, a_flag2, x2) where - c_flag1 is always set to 1 - b_flag1 indicates infinity when set to 1 - a_flag1 helps determine the y-coordinate when decompressing, - a_flag2, b_flag2, and c_flag2 are always set to 0
[ "The", "compressed", "point", "(", "z1", "z2", ")", "has", "the", "bit", "order", ":", "z1", ":", "(", "c_flag1", "b_flag1", "a_flag1", "x1", ")", "z2", ":", "(", "c_flag2", "b_flag2", "a_flag2", "x2", ")", "where", "-", "c_flag1", "is", "always", "set", "to", "1", "-", "b_flag1", "indicates", "infinity", "when", "set", "to", "1", "-", "a_flag1", "helps", "determine", "the", "y", "-", "coordinate", "when", "decompressing", "-", "a_flag2", "b_flag2", "and", "c_flag2", "are", "always", "set", "to", "0" ]
python
test
cloud9ers/gurumate
environment/lib/python2.7/site-packages/IPython/frontend/qt/console/bracket_matcher.py
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/frontend/qt/console/bracket_matcher.py#L86-L100
def _cursor_position_changed(self): """ Updates the document formatting based on the new cursor position. """ # Clear out the old formatting. self._text_edit.setExtraSelections([]) # Attempt to match a bracket for the new cursor position. cursor = self._text_edit.textCursor() if not cursor.hasSelection(): position = cursor.position() - 1 match_position = self._find_match(position) if match_position != -1: extra_selections = [ self._selection_for_character(pos) for pos in (position, match_position) ] self._text_edit.setExtraSelections(extra_selections)
[ "def", "_cursor_position_changed", "(", "self", ")", ":", "# Clear out the old formatting.", "self", ".", "_text_edit", ".", "setExtraSelections", "(", "[", "]", ")", "# Attempt to match a bracket for the new cursor position.", "cursor", "=", "self", ".", "_text_edit", ".", "textCursor", "(", ")", "if", "not", "cursor", ".", "hasSelection", "(", ")", ":", "position", "=", "cursor", ".", "position", "(", ")", "-", "1", "match_position", "=", "self", ".", "_find_match", "(", "position", ")", "if", "match_position", "!=", "-", "1", ":", "extra_selections", "=", "[", "self", ".", "_selection_for_character", "(", "pos", ")", "for", "pos", "in", "(", "position", ",", "match_position", ")", "]", "self", ".", "_text_edit", ".", "setExtraSelections", "(", "extra_selections", ")" ]
Updates the document formatting based on the new cursor position.
[ "Updates", "the", "document", "formatting", "based", "on", "the", "new", "cursor", "position", "." ]
python
test
casacore/python-casacore
casacore/tables/table.py
https://github.com/casacore/python-casacore/blob/975510861ea005f7919dd9e438b5f98a1682eebe/casacore/tables/table.py#L1036-L1054
def getcolnp(self, columnname, nparray, startrow=0, nrow=-1, rowincr=1): """Get the contents of a column or part of it into the given numpy array. The numpy array has to be C-contiguous with a shape matching the shape of the column (part). Data type coercion will be done as needed. If the column contains arrays, they should all have the same shape. An exception is thrown if they differ in shape. In that case the method :func:`getvarcol` should be used instead. The column can be sliced by giving a start row (default 0), number of rows (default all), and row stride (default 1). """ if (not nparray.flags.c_contiguous) or nparray.size == 0: raise ValueError("Argument 'nparray' has to be a contiguous " + "numpy array") return self._getcolvh(columnname, startrow, nrow, rowincr, nparray)
[ "def", "getcolnp", "(", "self", ",", "columnname", ",", "nparray", ",", "startrow", "=", "0", ",", "nrow", "=", "-", "1", ",", "rowincr", "=", "1", ")", ":", "if", "(", "not", "nparray", ".", "flags", ".", "c_contiguous", ")", "or", "nparray", ".", "size", "==", "0", ":", "raise", "ValueError", "(", "\"Argument 'nparray' has to be a contiguous \"", "+", "\"numpy array\"", ")", "return", "self", ".", "_getcolvh", "(", "columnname", ",", "startrow", ",", "nrow", ",", "rowincr", ",", "nparray", ")" ]
Get the contents of a column or part of it into the given numpy array. The numpy array has to be C-contiguous with a shape matching the shape of the column (part). Data type coercion will be done as needed. If the column contains arrays, they should all have the same shape. An exception is thrown if they differ in shape. In that case the method :func:`getvarcol` should be used instead. The column can be sliced by giving a start row (default 0), number of rows (default all), and row stride (default 1).
[ "Get", "the", "contents", "of", "a", "column", "or", "part", "of", "it", "into", "the", "given", "numpy", "array", "." ]
python
train
Azure/azure-sdk-for-python
azure-servicebus/azure/servicebus/aio/async_client.py
https://github.com/Azure/azure-sdk-for-python/blob/d7306fde32f60a293a7567678692bdad31e4b667/azure-servicebus/azure/servicebus/aio/async_client.py#L264-L321
async def send(self, messages, message_timeout=0, session=None, **kwargs): """Send one or more messages to the current entity. This operation will open a single-use connection, send the supplied messages, and close connection. If the entity requires sessions, a session ID must be either provided here, or set on each outgoing message. :param messages: One or more messages to be sent. :type messages: ~azure.servicebus.aio.async_message.Message or list[~azure.servicebus.aio.async_message.Message] :param message_timeout: The period in seconds during which the Message must be sent. If the send is not completed in this time it will return a failure result. :type message_timeout: int :param session: An optional session ID. If supplied this session ID will be applied to every outgoing message sent with this Sender. If an individual message already has a session ID, that will be used instead. If no session ID is supplied here, nor set on an outgoing message, a ValueError will be raised if the entity is sessionful. :type session: str or ~uuid.Guid :raises: ~azure.servicebus.common.errors.MessageSendFailed :returns: A list of the send results of all the messages. Each send result is a tuple with two values. The first is a boolean, indicating `True` if the message sent, or `False` if it failed. The second is an error if the message failed, otherwise it will be `None`. :rtype: list[tuple[bool, ~azure.servicebus.common.errors.MessageSendFailed]] Example: .. literalinclude:: ../examples/async_examples/test_examples_async.py :start-after: [START queue_client_send] :end-before: [END queue_client_send] :language: python :dedent: 4 :caption: Send a single message. .. literalinclude:: ../examples/async_examples/test_examples_async.py :start-after: [START queue_client_send_multiple] :end-before: [END queue_client_send_multiple] :language: python :dedent: 4 :caption: Send multiple messages. """ async with self.get_sender(message_timeout=message_timeout, session=session, **kwargs) as sender: if isinstance(messages, Message): sender.queue_message(messages) else: try: messages = list(messages) except TypeError: raise TypeError( "Value of messages must be a 'Message' object or a synchronous iterable of 'Message' objects.") for m in messages: if not isinstance(m, Message): raise TypeError("Item in iterator is not of type 'Message'.") sender.queue_message(m) return await sender.send_pending_messages()
[ "async", "def", "send", "(", "self", ",", "messages", ",", "message_timeout", "=", "0", ",", "session", "=", "None", ",", "*", "*", "kwargs", ")", ":", "async", "with", "self", ".", "get_sender", "(", "message_timeout", "=", "message_timeout", ",", "session", "=", "session", ",", "*", "*", "kwargs", ")", "as", "sender", ":", "if", "isinstance", "(", "messages", ",", "Message", ")", ":", "sender", ".", "queue_message", "(", "messages", ")", "else", ":", "try", ":", "messages", "=", "list", "(", "messages", ")", "except", "TypeError", ":", "raise", "TypeError", "(", "\"Value of messages must be a 'Message' object or a synchronous iterable of 'Message' objects.\"", ")", "for", "m", "in", "messages", ":", "if", "not", "isinstance", "(", "m", ",", "Message", ")", ":", "raise", "TypeError", "(", "\"Item in iterator is not of type 'Message'.\"", ")", "sender", ".", "queue_message", "(", "m", ")", "return", "await", "sender", ".", "send_pending_messages", "(", ")" ]
Send one or more messages to the current entity. This operation will open a single-use connection, send the supplied messages, and close connection. If the entity requires sessions, a session ID must be either provided here, or set on each outgoing message. :param messages: One or more messages to be sent. :type messages: ~azure.servicebus.aio.async_message.Message or list[~azure.servicebus.aio.async_message.Message] :param message_timeout: The period in seconds during which the Message must be sent. If the send is not completed in this time it will return a failure result. :type message_timeout: int :param session: An optional session ID. If supplied this session ID will be applied to every outgoing message sent with this Sender. If an individual message already has a session ID, that will be used instead. If no session ID is supplied here, nor set on an outgoing message, a ValueError will be raised if the entity is sessionful. :type session: str or ~uuid.Guid :raises: ~azure.servicebus.common.errors.MessageSendFailed :returns: A list of the send results of all the messages. Each send result is a tuple with two values. The first is a boolean, indicating `True` if the message sent, or `False` if it failed. The second is an error if the message failed, otherwise it will be `None`. :rtype: list[tuple[bool, ~azure.servicebus.common.errors.MessageSendFailed]] Example: .. literalinclude:: ../examples/async_examples/test_examples_async.py :start-after: [START queue_client_send] :end-before: [END queue_client_send] :language: python :dedent: 4 :caption: Send a single message. .. literalinclude:: ../examples/async_examples/test_examples_async.py :start-after: [START queue_client_send_multiple] :end-before: [END queue_client_send_multiple] :language: python :dedent: 4 :caption: Send multiple messages.
[ "Send", "one", "or", "more", "messages", "to", "the", "current", "entity", "." ]
python
test
ibm-watson-iot/iot-python
tmp/src/things/things.py
https://github.com/ibm-watson-iot/iot-python/blob/195f05adce3fba4ec997017e41e02ebd85c0c4cc/tmp/src/things/things.py#L1597-L1615
def getMappingsOnThingTypeForLogicalInterface(self, thingTypeId, logicalInterfaceId, draft=False): """ Gets the mappings for a logical interface from a thing type. Parameters: - thingTypeId (string) - the thing type - logicalInterfaceId (string) - the platform returned id of the logical interface Throws APIException on failure. """ if draft: req = ApiClient.oneThingTypeMappingUrl % (self.host, "/draft", thingTypeId, logicalInterfaceId) else: req = ApiClient.oneThingTypeMappingUrl % (self.host, "", thingTypeId, logicalInterfaceId) resp = requests.get(req, auth=self.credentials, verify=self.verify) if resp.status_code == 200: self.logger.debug("Mappings retrieved from the thing type") else: raise ibmiotf.APIException(resp.status_code, "HTTP error getting mappings for a logical interface from a thing type", resp) return resp.json()
[ "def", "getMappingsOnThingTypeForLogicalInterface", "(", "self", ",", "thingTypeId", ",", "logicalInterfaceId", ",", "draft", "=", "False", ")", ":", "if", "draft", ":", "req", "=", "ApiClient", ".", "oneThingTypeMappingUrl", "%", "(", "self", ".", "host", ",", "\"/draft\"", ",", "thingTypeId", ",", "logicalInterfaceId", ")", "else", ":", "req", "=", "ApiClient", ".", "oneThingTypeMappingUrl", "%", "(", "self", ".", "host", ",", "\"\"", ",", "thingTypeId", ",", "logicalInterfaceId", ")", "resp", "=", "requests", ".", "get", "(", "req", ",", "auth", "=", "self", ".", "credentials", ",", "verify", "=", "self", ".", "verify", ")", "if", "resp", ".", "status_code", "==", "200", ":", "self", ".", "logger", ".", "debug", "(", "\"Mappings retrieved from the thing type\"", ")", "else", ":", "raise", "ibmiotf", ".", "APIException", "(", "resp", ".", "status_code", ",", "\"HTTP error getting mappings for a logical interface from a thing type\"", ",", "resp", ")", "return", "resp", ".", "json", "(", ")" ]
Gets the mappings for a logical interface from a thing type. Parameters: - thingTypeId (string) - the thing type - logicalInterfaceId (string) - the platform returned id of the logical interface Throws APIException on failure.
[ "Gets", "the", "mappings", "for", "a", "logical", "interface", "from", "a", "thing", "type", ".", "Parameters", ":", "-", "thingTypeId", "(", "string", ")", "-", "the", "thing", "type", "-", "logicalInterfaceId", "(", "string", ")", "-", "the", "platform", "returned", "id", "of", "the", "logical", "interface", "Throws", "APIException", "on", "failure", "." ]
python
test
google/grr
grr/server/grr_response_server/console_utils.py
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/server/grr_response_server/console_utils.py#L136-L142
def GetNotifications(user=None, token=None): """Show pending notifications for a user.""" if not user: user = getpass.getuser() user_obj = aff4.FACTORY.Open( aff4.ROOT_URN.Add("users").Add(user), token=token) return list(user_obj.Get(user_obj.Schema.PENDING_NOTIFICATIONS))
[ "def", "GetNotifications", "(", "user", "=", "None", ",", "token", "=", "None", ")", ":", "if", "not", "user", ":", "user", "=", "getpass", ".", "getuser", "(", ")", "user_obj", "=", "aff4", ".", "FACTORY", ".", "Open", "(", "aff4", ".", "ROOT_URN", ".", "Add", "(", "\"users\"", ")", ".", "Add", "(", "user", ")", ",", "token", "=", "token", ")", "return", "list", "(", "user_obj", ".", "Get", "(", "user_obj", ".", "Schema", ".", "PENDING_NOTIFICATIONS", ")", ")" ]
Show pending notifications for a user.
[ "Show", "pending", "notifications", "for", "a", "user", "." ]
python
train
mabuchilab/QNET
docs/_extensions/inheritance_diagram.py
https://github.com/mabuchilab/QNET/blob/cc20d26dad78691d34c67173e5cd67dcac94208a/docs/_extensions/inheritance_diagram.py#L173-L179
def _import_classes(self, class_names, currmodule): # type: (unicode, str) -> List[Any] """Import a list of classes.""" classes = [] # type: List[Any] for name in class_names: classes.extend(import_classes(name, currmodule)) return classes
[ "def", "_import_classes", "(", "self", ",", "class_names", ",", "currmodule", ")", ":", "# type: (unicode, str) -> List[Any]", "classes", "=", "[", "]", "# type: List[Any]", "for", "name", "in", "class_names", ":", "classes", ".", "extend", "(", "import_classes", "(", "name", ",", "currmodule", ")", ")", "return", "classes" ]
Import a list of classes.
[ "Import", "a", "list", "of", "classes", "." ]
python
train
atlassian-api/atlassian-python-api
atlassian/confluence.py
https://github.com/atlassian-api/atlassian-python-api/blob/540d269905c3e7547b666fe30c647b2d512cf358/atlassian/confluence.py#L281-L302
def create_page(self, space, title, body, parent_id=None, type='page'): """ Create page from scratch :param space: :param title: :param body: :param parent_id: :param type: :return: """ log.info('Creating {type} "{space}" -> "{title}"'.format(space=space, title=title, type=type)) url = 'rest/api/content/' data = { 'type': type, 'title': title, 'space': {'key': space}, 'body': {'storage': { 'value': body, 'representation': 'storage'}}} if parent_id: data['ancestors'] = [{'type': type, 'id': parent_id}] return self.post(url, data=data)
[ "def", "create_page", "(", "self", ",", "space", ",", "title", ",", "body", ",", "parent_id", "=", "None", ",", "type", "=", "'page'", ")", ":", "log", ".", "info", "(", "'Creating {type} \"{space}\" -> \"{title}\"'", ".", "format", "(", "space", "=", "space", ",", "title", "=", "title", ",", "type", "=", "type", ")", ")", "url", "=", "'rest/api/content/'", "data", "=", "{", "'type'", ":", "type", ",", "'title'", ":", "title", ",", "'space'", ":", "{", "'key'", ":", "space", "}", ",", "'body'", ":", "{", "'storage'", ":", "{", "'value'", ":", "body", ",", "'representation'", ":", "'storage'", "}", "}", "}", "if", "parent_id", ":", "data", "[", "'ancestors'", "]", "=", "[", "{", "'type'", ":", "type", ",", "'id'", ":", "parent_id", "}", "]", "return", "self", ".", "post", "(", "url", ",", "data", "=", "data", ")" ]
Create page from scratch :param space: :param title: :param body: :param parent_id: :param type: :return:
[ "Create", "page", "from", "scratch", ":", "param", "space", ":", ":", "param", "title", ":", ":", "param", "body", ":", ":", "param", "parent_id", ":", ":", "param", "type", ":", ":", "return", ":" ]
python
train
a1ezzz/wasp-general
wasp_general/task/dependency.py
https://github.com/a1ezzz/wasp-general/blob/1029839d33eb663f8dec76c1c46754d53c1de4a9/wasp_general/task/dependency.py#L153-L178
def started_tasks(self, task_registry_id=None, task_cls=None): """ Return tasks that was started. Result way be filtered by the given arguments. :param task_registry_id: if it is specified, then try to return single task which id is the same as \ this value. :param task_cls: if it is specified then result will be consists of this subclass only :return: None or WTask or tuple of WTask """ if task_registry_id is not None: task = None for registered_task in self.__started: if registered_task.__registry_tag__ == task_registry_id: task = registered_task if task_cls is not None and task is not None: if isinstance(task, task_cls) is True: return task return None return task result = filter(lambda x: x is not None, self.__started) if task_cls is not None: result = filter(lambda x: isinstance(x, task_cls), result) return tuple(result)
[ "def", "started_tasks", "(", "self", ",", "task_registry_id", "=", "None", ",", "task_cls", "=", "None", ")", ":", "if", "task_registry_id", "is", "not", "None", ":", "task", "=", "None", "for", "registered_task", "in", "self", ".", "__started", ":", "if", "registered_task", ".", "__registry_tag__", "==", "task_registry_id", ":", "task", "=", "registered_task", "if", "task_cls", "is", "not", "None", "and", "task", "is", "not", "None", ":", "if", "isinstance", "(", "task", ",", "task_cls", ")", "is", "True", ":", "return", "task", "return", "None", "return", "task", "result", "=", "filter", "(", "lambda", "x", ":", "x", "is", "not", "None", ",", "self", ".", "__started", ")", "if", "task_cls", "is", "not", "None", ":", "result", "=", "filter", "(", "lambda", "x", ":", "isinstance", "(", "x", ",", "task_cls", ")", ",", "result", ")", "return", "tuple", "(", "result", ")" ]
Return tasks that was started. Result way be filtered by the given arguments. :param task_registry_id: if it is specified, then try to return single task which id is the same as \ this value. :param task_cls: if it is specified then result will be consists of this subclass only :return: None or WTask or tuple of WTask
[ "Return", "tasks", "that", "was", "started", ".", "Result", "way", "be", "filtered", "by", "the", "given", "arguments", "." ]
python
train
autokey/autokey
lib/autokey/scripting.py
https://github.com/autokey/autokey/blob/35decb72f286ce68cd2a1f09ace8891a520b58d1/lib/autokey/scripting.py#L1198-L1235
def create_hotkey(self, folder, description, modifiers, key, contents): """ Create a text hotkey Usage: C{engine.create_hotkey(folder, description, modifiers, key, contents)} When the given hotkey is pressed, it will be replaced with the given text. Modifiers must be given as a list of strings, with the following values permitted: <ctrl> <alt> <super> <hyper> <meta> <shift> The key must be an unshifted character (i.e. lowercase) @param folder: folder to place the abbreviation in, retrieved using C{engine.get_folder()} @param description: description for the phrase @param modifiers: modifiers to use with the hotkey (as a list) @param key: the hotkey @param contents: the expansion text @raise Exception: if the specified hotkey is not unique """ modifiers.sort() if not self.configManager.check_hotkey_unique(modifiers, key, None, None): raise Exception("The specified hotkey and modifier combination is already in use") self.monitor.suspend() p = model.Phrase(description, contents) p.modes.append(model.TriggerMode.HOTKEY) p.set_hotkey(modifiers, key) folder.add_item(p) p.persist() self.monitor.unsuspend() self.configManager.config_altered(False)
[ "def", "create_hotkey", "(", "self", ",", "folder", ",", "description", ",", "modifiers", ",", "key", ",", "contents", ")", ":", "modifiers", ".", "sort", "(", ")", "if", "not", "self", ".", "configManager", ".", "check_hotkey_unique", "(", "modifiers", ",", "key", ",", "None", ",", "None", ")", ":", "raise", "Exception", "(", "\"The specified hotkey and modifier combination is already in use\"", ")", "self", ".", "monitor", ".", "suspend", "(", ")", "p", "=", "model", ".", "Phrase", "(", "description", ",", "contents", ")", "p", ".", "modes", ".", "append", "(", "model", ".", "TriggerMode", ".", "HOTKEY", ")", "p", ".", "set_hotkey", "(", "modifiers", ",", "key", ")", "folder", ".", "add_item", "(", "p", ")", "p", ".", "persist", "(", ")", "self", ".", "monitor", ".", "unsuspend", "(", ")", "self", ".", "configManager", ".", "config_altered", "(", "False", ")" ]
Create a text hotkey Usage: C{engine.create_hotkey(folder, description, modifiers, key, contents)} When the given hotkey is pressed, it will be replaced with the given text. Modifiers must be given as a list of strings, with the following values permitted: <ctrl> <alt> <super> <hyper> <meta> <shift> The key must be an unshifted character (i.e. lowercase) @param folder: folder to place the abbreviation in, retrieved using C{engine.get_folder()} @param description: description for the phrase @param modifiers: modifiers to use with the hotkey (as a list) @param key: the hotkey @param contents: the expansion text @raise Exception: if the specified hotkey is not unique
[ "Create", "a", "text", "hotkey", "Usage", ":", "C", "{", "engine", ".", "create_hotkey", "(", "folder", "description", "modifiers", "key", "contents", ")", "}", "When", "the", "given", "hotkey", "is", "pressed", "it", "will", "be", "replaced", "with", "the", "given", "text", ".", "Modifiers", "must", "be", "given", "as", "a", "list", "of", "strings", "with", "the", "following", "values", "permitted", ":", "<ctrl", ">", "<alt", ">", "<super", ">", "<hyper", ">", "<meta", ">", "<shift", ">", "The", "key", "must", "be", "an", "unshifted", "character", "(", "i", ".", "e", ".", "lowercase", ")" ]
python
train
MAVENSDC/cdflib
cdflib/cdfwrite.py
https://github.com/MAVENSDC/cdflib/blob/d237c60e5db67db0f92d96054209c25c4042465c/cdflib/cdfwrite.py#L2008-L2044
def _write_ccr(self, f, g, level: int): ''' Write a CCR to file "g" from file "f" with level "level". Currently, only handles gzip compression. Parameters: f : file Uncompressed file to read from g : file File to read the compressed file into level : int The level of the compression from 0 to 9 Returns: None ''' f.seek(8) data = f.read() uSize = len(data) section_type = CDF.CCR_ rfuA = 0 cData = gzip.compress(data, level) block_size = CDF.CCR_BASE_SIZE64 + len(cData) cprOffset = 0 ccr1 = bytearray(32) #ccr1[0:4] = binascii.unhexlify(CDF.V3magicNUMBER_1) #ccr1[4:8] = binascii.unhexlify(CDF.V3magicNUMBER_2c) ccr1[0:8] = struct.pack('>q', block_size) ccr1[8:12] = struct.pack('>i', section_type) ccr1[12:20] = struct.pack('>q', cprOffset) ccr1[20:28] = struct.pack('>q', uSize) ccr1[28:32] = struct.pack('>i', rfuA) g.seek(0, 2) g.write(ccr1) g.write(cData) cprOffset = self._write_cpr(g, CDF.GZIP_COMPRESSION, level) self._update_offset_value(g, 20, 8, cprOffset)
[ "def", "_write_ccr", "(", "self", ",", "f", ",", "g", ",", "level", ":", "int", ")", ":", "f", ".", "seek", "(", "8", ")", "data", "=", "f", ".", "read", "(", ")", "uSize", "=", "len", "(", "data", ")", "section_type", "=", "CDF", ".", "CCR_", "rfuA", "=", "0", "cData", "=", "gzip", ".", "compress", "(", "data", ",", "level", ")", "block_size", "=", "CDF", ".", "CCR_BASE_SIZE64", "+", "len", "(", "cData", ")", "cprOffset", "=", "0", "ccr1", "=", "bytearray", "(", "32", ")", "#ccr1[0:4] = binascii.unhexlify(CDF.V3magicNUMBER_1)", "#ccr1[4:8] = binascii.unhexlify(CDF.V3magicNUMBER_2c)", "ccr1", "[", "0", ":", "8", "]", "=", "struct", ".", "pack", "(", "'>q'", ",", "block_size", ")", "ccr1", "[", "8", ":", "12", "]", "=", "struct", ".", "pack", "(", "'>i'", ",", "section_type", ")", "ccr1", "[", "12", ":", "20", "]", "=", "struct", ".", "pack", "(", "'>q'", ",", "cprOffset", ")", "ccr1", "[", "20", ":", "28", "]", "=", "struct", ".", "pack", "(", "'>q'", ",", "uSize", ")", "ccr1", "[", "28", ":", "32", "]", "=", "struct", ".", "pack", "(", "'>i'", ",", "rfuA", ")", "g", ".", "seek", "(", "0", ",", "2", ")", "g", ".", "write", "(", "ccr1", ")", "g", ".", "write", "(", "cData", ")", "cprOffset", "=", "self", ".", "_write_cpr", "(", "g", ",", "CDF", ".", "GZIP_COMPRESSION", ",", "level", ")", "self", ".", "_update_offset_value", "(", "g", ",", "20", ",", "8", ",", "cprOffset", ")" ]
Write a CCR to file "g" from file "f" with level "level". Currently, only handles gzip compression. Parameters: f : file Uncompressed file to read from g : file File to read the compressed file into level : int The level of the compression from 0 to 9 Returns: None
[ "Write", "a", "CCR", "to", "file", "g", "from", "file", "f", "with", "level", "level", ".", "Currently", "only", "handles", "gzip", "compression", "." ]
python
train
SeattleTestbed/seash
pyreadline/console/ironpython_console.py
https://github.com/SeattleTestbed/seash/blob/40f9d2285662ff8b61e0468b4196acee089b273b/pyreadline/console/ironpython_console.py#L325-L336
def size(self, width=None, height=None): u'''Set/get window size.''' sc = System.Console if width is not None and height is not None: sc.BufferWidth, sc.BufferHeight = width,height else: return sc.BufferWidth, sc.BufferHeight if width is not None and height is not None: sc.WindowWidth, sc.WindowHeight = width,height else: return sc.WindowWidth - 1, sc.WindowHeight - 1
[ "def", "size", "(", "self", ",", "width", "=", "None", ",", "height", "=", "None", ")", ":", "sc", "=", "System", ".", "Console", "if", "width", "is", "not", "None", "and", "height", "is", "not", "None", ":", "sc", ".", "BufferWidth", ",", "sc", ".", "BufferHeight", "=", "width", ",", "height", "else", ":", "return", "sc", ".", "BufferWidth", ",", "sc", ".", "BufferHeight", "if", "width", "is", "not", "None", "and", "height", "is", "not", "None", ":", "sc", ".", "WindowWidth", ",", "sc", ".", "WindowHeight", "=", "width", ",", "height", "else", ":", "return", "sc", ".", "WindowWidth", "-", "1", ",", "sc", ".", "WindowHeight", "-", "1" ]
u'''Set/get window size.
[ "u", "Set", "/", "get", "window", "size", "." ]
python
train
gwastro/pycbc
pycbc/types/timeseries.py
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/types/timeseries.py#L435-L458
def psd(self, segment_duration, **kwds): """ Calculate the power spectral density of this time series. Use the `pycbc.psd.welch` method to estimate the psd of this time segment. For more complete options, please see that function. Parameters ---------- segment_duration: float Duration in seconds to use for each sample of the spectrum. kwds : keywords Additional keyword arguments are passed on to the `pycbc.psd.welch` method. Returns ------- psd : FrequencySeries Frequency series containing the estimated PSD. """ from pycbc.psd import welch seg_len = int(segment_duration * self.sample_rate) seg_stride = int(seg_len / 2) return welch(self, seg_len=seg_len, seg_stride=seg_stride, **kwds)
[ "def", "psd", "(", "self", ",", "segment_duration", ",", "*", "*", "kwds", ")", ":", "from", "pycbc", ".", "psd", "import", "welch", "seg_len", "=", "int", "(", "segment_duration", "*", "self", ".", "sample_rate", ")", "seg_stride", "=", "int", "(", "seg_len", "/", "2", ")", "return", "welch", "(", "self", ",", "seg_len", "=", "seg_len", ",", "seg_stride", "=", "seg_stride", ",", "*", "*", "kwds", ")" ]
Calculate the power spectral density of this time series. Use the `pycbc.psd.welch` method to estimate the psd of this time segment. For more complete options, please see that function. Parameters ---------- segment_duration: float Duration in seconds to use for each sample of the spectrum. kwds : keywords Additional keyword arguments are passed on to the `pycbc.psd.welch` method. Returns ------- psd : FrequencySeries Frequency series containing the estimated PSD.
[ "Calculate", "the", "power", "spectral", "density", "of", "this", "time", "series", "." ]
python
train
tanghaibao/goatools
goatools/wr_tbl_class.py
https://github.com/tanghaibao/goatools/blob/407682e573a108864a79031f8ca19ee3bf377626/goatools/wr_tbl_class.py#L218-L228
def get_hdrs(flds_all, **kws): """Return headers, given user-specified key-word args.""" # Return Headers if the user explicitly lists them. hdrs = kws.get('hdrs', None) if hdrs is not None: return hdrs # User may specify a subset of fields or a column order using prt_flds if 'prt_flds' in kws: return kws['prt_flds'] # All fields in the namedtuple will be in the headers return flds_all
[ "def", "get_hdrs", "(", "flds_all", ",", "*", "*", "kws", ")", ":", "# Return Headers if the user explicitly lists them.", "hdrs", "=", "kws", ".", "get", "(", "'hdrs'", ",", "None", ")", "if", "hdrs", "is", "not", "None", ":", "return", "hdrs", "# User may specify a subset of fields or a column order using prt_flds", "if", "'prt_flds'", "in", "kws", ":", "return", "kws", "[", "'prt_flds'", "]", "# All fields in the namedtuple will be in the headers", "return", "flds_all" ]
Return headers, given user-specified key-word args.
[ "Return", "headers", "given", "user", "-", "specified", "key", "-", "word", "args", "." ]
python
train
jmcarp/robobrowser
robobrowser/forms/form.py
https://github.com/jmcarp/robobrowser/blob/4284c11d00ae1397983e269aa180e5cf7ee5f4cf/robobrowser/forms/form.py#L218-L227
def serialize(self, submit=None): """Serialize each form field to a Payload container. :param Submit submit: Optional `Submit` to click, if form includes multiple submits :return: Payload instance """ include_fields = prepare_fields(self.fields, self.submit_fields, submit) return Payload.from_fields(include_fields)
[ "def", "serialize", "(", "self", ",", "submit", "=", "None", ")", ":", "include_fields", "=", "prepare_fields", "(", "self", ".", "fields", ",", "self", ".", "submit_fields", ",", "submit", ")", "return", "Payload", ".", "from_fields", "(", "include_fields", ")" ]
Serialize each form field to a Payload container. :param Submit submit: Optional `Submit` to click, if form includes multiple submits :return: Payload instance
[ "Serialize", "each", "form", "field", "to", "a", "Payload", "container", "." ]
python
train
esheldon/fitsio
fitsio/hdu/table.py
https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/hdu/table.py#L128-L240
def write(self, data, **keys): """ Write data into this HDU parameters ---------- data: ndarray or list of ndarray A numerical python array. Should be an ordinary array for image HDUs, should have fields for tables. To write an ordinary array to a column in a table HDU, use write_column. If data already exists in this HDU, it will be overwritten. See the append(() method to append new rows to a table HDU. firstrow: integer, optional At which row you should begin writing to tables. Be sure you know what you are doing! For appending see the append() method. Default 0. columns: list, optional If data is a list of arrays, you must send columns as a list of names or column numbers You can also send names= names: list, optional same as columns= """ slow = keys.get('slow', False) isrec = False if isinstance(data, (list, dict)): if isinstance(data, list): data_list = data columns_all = keys.get('columns', None) if columns_all is None: columns_all = keys.get('names', None) if columns_all is None: raise ValueError( "you must send columns with a list of arrays") else: columns_all = list(data.keys()) data_list = [data[n] for n in columns_all] colnums_all = [self._extract_colnum(c) for c in columns_all] names = [self.get_colname(c) for c in colnums_all] isobj = numpy.zeros(len(data_list), dtype=numpy.bool) for i in xrange(len(data_list)): isobj[i] = is_object(data_list[i]) else: if data.dtype.fields is None: raise ValueError("You are writing to a table, so I expected " "an array with fields as input. If you want " "to write a simple array, you should use " "write_column to write to a single column, " "or instead write to an image hdu") if data.shape is (): raise ValueError("cannot write data with shape ()") isrec = True names = data.dtype.names # only write object types (variable-length columns) after # writing the main table isobj = fields_are_object(data) data_list = [] colnums_all = [] for i, name in enumerate(names): colnum = self._extract_colnum(name) data_list.append(data[name]) colnums_all.append(colnum) if slow: for i, name in enumerate(names): if not isobj[i]: self.write_column(name, data_list[i], **keys) else: nonobj_colnums = [] nonobj_arrays = [] for i in xrange(len(data_list)): if not isobj[i]: nonobj_colnums.append(colnums_all[i]) if isrec: # this still leaves possibility of f-order sub-arrays.. colref = array_to_native(data_list[i], inplace=False) else: colref = array_to_native_c(data_list[i], inplace=False) if IS_PY3 and colref.dtype.char == 'U': # for python3, we convert unicode to ascii # this will error if the character is not in ascii colref = colref.astype('S', copy=False) nonobj_arrays.append(colref) for tcolnum, tdata in zip(nonobj_colnums, nonobj_arrays): self._verify_column_data(tcolnum, tdata) if len(nonobj_arrays) > 0: firstrow = keys.get('firstrow', 0) self._FITS.write_columns( self._ext+1, nonobj_colnums, nonobj_arrays, firstrow=firstrow+1, write_bitcols=self.write_bitcols) # writing the object arrays always occurs the same way # need to make sure this works for array fields for i, name in enumerate(names): if isobj[i]: self.write_var_column(name, data_list[i], **keys) self._update_info()
[ "def", "write", "(", "self", ",", "data", ",", "*", "*", "keys", ")", ":", "slow", "=", "keys", ".", "get", "(", "'slow'", ",", "False", ")", "isrec", "=", "False", "if", "isinstance", "(", "data", ",", "(", "list", ",", "dict", ")", ")", ":", "if", "isinstance", "(", "data", ",", "list", ")", ":", "data_list", "=", "data", "columns_all", "=", "keys", ".", "get", "(", "'columns'", ",", "None", ")", "if", "columns_all", "is", "None", ":", "columns_all", "=", "keys", ".", "get", "(", "'names'", ",", "None", ")", "if", "columns_all", "is", "None", ":", "raise", "ValueError", "(", "\"you must send columns with a list of arrays\"", ")", "else", ":", "columns_all", "=", "list", "(", "data", ".", "keys", "(", ")", ")", "data_list", "=", "[", "data", "[", "n", "]", "for", "n", "in", "columns_all", "]", "colnums_all", "=", "[", "self", ".", "_extract_colnum", "(", "c", ")", "for", "c", "in", "columns_all", "]", "names", "=", "[", "self", ".", "get_colname", "(", "c", ")", "for", "c", "in", "colnums_all", "]", "isobj", "=", "numpy", ".", "zeros", "(", "len", "(", "data_list", ")", ",", "dtype", "=", "numpy", ".", "bool", ")", "for", "i", "in", "xrange", "(", "len", "(", "data_list", ")", ")", ":", "isobj", "[", "i", "]", "=", "is_object", "(", "data_list", "[", "i", "]", ")", "else", ":", "if", "data", ".", "dtype", ".", "fields", "is", "None", ":", "raise", "ValueError", "(", "\"You are writing to a table, so I expected \"", "\"an array with fields as input. If you want \"", "\"to write a simple array, you should use \"", "\"write_column to write to a single column, \"", "\"or instead write to an image hdu\"", ")", "if", "data", ".", "shape", "is", "(", ")", ":", "raise", "ValueError", "(", "\"cannot write data with shape ()\"", ")", "isrec", "=", "True", "names", "=", "data", ".", "dtype", ".", "names", "# only write object types (variable-length columns) after", "# writing the main table", "isobj", "=", "fields_are_object", "(", "data", ")", "data_list", "=", "[", "]", "colnums_all", "=", "[", "]", "for", "i", ",", "name", "in", "enumerate", "(", "names", ")", ":", "colnum", "=", "self", ".", "_extract_colnum", "(", "name", ")", "data_list", ".", "append", "(", "data", "[", "name", "]", ")", "colnums_all", ".", "append", "(", "colnum", ")", "if", "slow", ":", "for", "i", ",", "name", "in", "enumerate", "(", "names", ")", ":", "if", "not", "isobj", "[", "i", "]", ":", "self", ".", "write_column", "(", "name", ",", "data_list", "[", "i", "]", ",", "*", "*", "keys", ")", "else", ":", "nonobj_colnums", "=", "[", "]", "nonobj_arrays", "=", "[", "]", "for", "i", "in", "xrange", "(", "len", "(", "data_list", ")", ")", ":", "if", "not", "isobj", "[", "i", "]", ":", "nonobj_colnums", ".", "append", "(", "colnums_all", "[", "i", "]", ")", "if", "isrec", ":", "# this still leaves possibility of f-order sub-arrays..", "colref", "=", "array_to_native", "(", "data_list", "[", "i", "]", ",", "inplace", "=", "False", ")", "else", ":", "colref", "=", "array_to_native_c", "(", "data_list", "[", "i", "]", ",", "inplace", "=", "False", ")", "if", "IS_PY3", "and", "colref", ".", "dtype", ".", "char", "==", "'U'", ":", "# for python3, we convert unicode to ascii", "# this will error if the character is not in ascii", "colref", "=", "colref", ".", "astype", "(", "'S'", ",", "copy", "=", "False", ")", "nonobj_arrays", ".", "append", "(", "colref", ")", "for", "tcolnum", ",", "tdata", "in", "zip", "(", "nonobj_colnums", ",", "nonobj_arrays", ")", ":", "self", ".", "_verify_column_data", "(", "tcolnum", ",", "tdata", ")", "if", "len", "(", "nonobj_arrays", ")", ">", "0", ":", "firstrow", "=", "keys", ".", "get", "(", "'firstrow'", ",", "0", ")", "self", ".", "_FITS", ".", "write_columns", "(", "self", ".", "_ext", "+", "1", ",", "nonobj_colnums", ",", "nonobj_arrays", ",", "firstrow", "=", "firstrow", "+", "1", ",", "write_bitcols", "=", "self", ".", "write_bitcols", ")", "# writing the object arrays always occurs the same way", "# need to make sure this works for array fields", "for", "i", ",", "name", "in", "enumerate", "(", "names", ")", ":", "if", "isobj", "[", "i", "]", ":", "self", ".", "write_var_column", "(", "name", ",", "data_list", "[", "i", "]", ",", "*", "*", "keys", ")", "self", ".", "_update_info", "(", ")" ]
Write data into this HDU parameters ---------- data: ndarray or list of ndarray A numerical python array. Should be an ordinary array for image HDUs, should have fields for tables. To write an ordinary array to a column in a table HDU, use write_column. If data already exists in this HDU, it will be overwritten. See the append(() method to append new rows to a table HDU. firstrow: integer, optional At which row you should begin writing to tables. Be sure you know what you are doing! For appending see the append() method. Default 0. columns: list, optional If data is a list of arrays, you must send columns as a list of names or column numbers You can also send names= names: list, optional same as columns=
[ "Write", "data", "into", "this", "HDU" ]
python
train
PMEAL/OpenPNM
openpnm/models/phases/vapor_pressure.py
https://github.com/PMEAL/OpenPNM/blob/0547b5724ffedc0a593aae48639d36fe10e0baed/openpnm/models/phases/vapor_pressure.py#L35-L85
def water(target, temperature='pore.temperature', salinity='pore.salinity'): r""" Calculates vapor pressure of pure water or seawater given by [1] based on Raoult's law. The pure water vapor pressure is given by [2] Parameters ---------- target : OpenPNM Object The object for which these values are being calculated. This controls the length of the calculated array, and also provides access to other necessary thermofluid properties. temperature : string The dictionary key containing the phase temperature values salinity : string The dictionary key containing the phase salinity values Returns ------- The vapor pressure of water/seawater in [Pa] Notes ----- T must be in K, and S in g of salt per kg of phase, or ppt (parts per thousand) VALIDITY: 273 < T < 473 K; 0 < S < 240 g/kg; ACCURACY: 0.5 % References ---------- [1] Sharqawy M. H., Lienhard J. H., and Zubair, S. M., Desalination and Water Treatment, 2010. [2] ASHRAE handbook: Fundamentals, ASHRAE; 2005. """ T = target[temperature] if salinity in target.keys(): S = target[salinity] else: S = 0 a1 = -5.8002206E+03 a2 = 1.3914993E+00 a3 = -4.8640239E-02 a4 = 4.1764768E-05 a5 = -1.4452093E-08 a6 = 6.5459673E+00 Pv_w = np.exp((a1/T) + a2 + a3*T + a4*T**2 + a5*T**3 + a6*np.log(T)) Pv_sw = Pv_w/(1+0.57357*(S/(1000-S))) value = Pv_sw return value
[ "def", "water", "(", "target", ",", "temperature", "=", "'pore.temperature'", ",", "salinity", "=", "'pore.salinity'", ")", ":", "T", "=", "target", "[", "temperature", "]", "if", "salinity", "in", "target", ".", "keys", "(", ")", ":", "S", "=", "target", "[", "salinity", "]", "else", ":", "S", "=", "0", "a1", "=", "-", "5.8002206E+03", "a2", "=", "1.3914993E+00", "a3", "=", "-", "4.8640239E-02", "a4", "=", "4.1764768E-05", "a5", "=", "-", "1.4452093E-08", "a6", "=", "6.5459673E+00", "Pv_w", "=", "np", ".", "exp", "(", "(", "a1", "/", "T", ")", "+", "a2", "+", "a3", "*", "T", "+", "a4", "*", "T", "**", "2", "+", "a5", "*", "T", "**", "3", "+", "a6", "*", "np", ".", "log", "(", "T", ")", ")", "Pv_sw", "=", "Pv_w", "/", "(", "1", "+", "0.57357", "*", "(", "S", "/", "(", "1000", "-", "S", ")", ")", ")", "value", "=", "Pv_sw", "return", "value" ]
r""" Calculates vapor pressure of pure water or seawater given by [1] based on Raoult's law. The pure water vapor pressure is given by [2] Parameters ---------- target : OpenPNM Object The object for which these values are being calculated. This controls the length of the calculated array, and also provides access to other necessary thermofluid properties. temperature : string The dictionary key containing the phase temperature values salinity : string The dictionary key containing the phase salinity values Returns ------- The vapor pressure of water/seawater in [Pa] Notes ----- T must be in K, and S in g of salt per kg of phase, or ppt (parts per thousand) VALIDITY: 273 < T < 473 K; 0 < S < 240 g/kg; ACCURACY: 0.5 % References ---------- [1] Sharqawy M. H., Lienhard J. H., and Zubair, S. M., Desalination and Water Treatment, 2010. [2] ASHRAE handbook: Fundamentals, ASHRAE; 2005.
[ "r", "Calculates", "vapor", "pressure", "of", "pure", "water", "or", "seawater", "given", "by", "[", "1", "]", "based", "on", "Raoult", "s", "law", ".", "The", "pure", "water", "vapor", "pressure", "is", "given", "by", "[", "2", "]" ]
python
train
OpenTreeOfLife/peyotl
peyotl/collections_store/collections_shard.py
https://github.com/OpenTreeOfLife/peyotl/blob/5e4e52a0fdbd17f490aa644ad79fda6ea2eda7c0/peyotl/collections_store/collections_shard.py#L105-L115
def write_configuration(self, out, secret_attrs=False): """Generic configuration, may be overridden by type-specific version""" key_order = ['name', 'path', 'git_dir', 'doc_dir', 'assumed_doc_version', 'git_ssh', 'pkey', 'has_aliases', 'number of collections'] cd = self.get_configuration_dict(secret_attrs=secret_attrs) for k in key_order: if k in cd: out.write(' {} = {}'.format(k, cd[k])) out.write(' collections in alias groups:\n') for o in cd['collections']: out.write(' {} ==> {}\n'.format(o['keys'], o['relpath']))
[ "def", "write_configuration", "(", "self", ",", "out", ",", "secret_attrs", "=", "False", ")", ":", "key_order", "=", "[", "'name'", ",", "'path'", ",", "'git_dir'", ",", "'doc_dir'", ",", "'assumed_doc_version'", ",", "'git_ssh'", ",", "'pkey'", ",", "'has_aliases'", ",", "'number of collections'", "]", "cd", "=", "self", ".", "get_configuration_dict", "(", "secret_attrs", "=", "secret_attrs", ")", "for", "k", "in", "key_order", ":", "if", "k", "in", "cd", ":", "out", ".", "write", "(", "' {} = {}'", ".", "format", "(", "k", ",", "cd", "[", "k", "]", ")", ")", "out", ".", "write", "(", "' collections in alias groups:\\n'", ")", "for", "o", "in", "cd", "[", "'collections'", "]", ":", "out", ".", "write", "(", "' {} ==> {}\\n'", ".", "format", "(", "o", "[", "'keys'", "]", ",", "o", "[", "'relpath'", "]", ")", ")" ]
Generic configuration, may be overridden by type-specific version
[ "Generic", "configuration", "may", "be", "overridden", "by", "type", "-", "specific", "version" ]
python
train
dirko/pyhacrf
pyhacrf/state_machine.py
https://github.com/dirko/pyhacrf/blob/51455681d4edf88e5323313fc0f6b85577ae185c/pyhacrf/state_machine.py#L164-L168
def build_lattice(self, x): """ Construct the list of nodes and edges for input features. """ I, J, _ = x.shape lattice = self._subset_independent_lattice((I, J)) return lattice
[ "def", "build_lattice", "(", "self", ",", "x", ")", ":", "I", ",", "J", ",", "_", "=", "x", ".", "shape", "lattice", "=", "self", ".", "_subset_independent_lattice", "(", "(", "I", ",", "J", ")", ")", "return", "lattice" ]
Construct the list of nodes and edges for input features.
[ "Construct", "the", "list", "of", "nodes", "and", "edges", "for", "input", "features", "." ]
python
train
pydata/xarray
xarray/conventions.py
https://github.com/pydata/xarray/blob/6d93a95d05bdbfc33fff24064f67d29dd891ab58/xarray/conventions.py#L595-L624
def cf_encoder(variables, attributes): """ A function which takes a dicts of variables and attributes and encodes them to conform to CF conventions as much as possible. This includes masking, scaling, character array handling, and CF-time encoding. Decode a set of CF encoded variables and attributes. See Also, decode_cf_variable Parameters ---------- variables : dict A dictionary mapping from variable name to xarray.Variable attributes : dict A dictionary mapping from attribute name to value Returns ------- encoded_variables : dict A dictionary mapping from variable name to xarray.Variable, encoded_attributes : dict A dictionary mapping from attribute name to value See also: encode_cf_variable """ new_vars = OrderedDict((k, encode_cf_variable(v, name=k)) for k, v in variables.items()) return new_vars, attributes
[ "def", "cf_encoder", "(", "variables", ",", "attributes", ")", ":", "new_vars", "=", "OrderedDict", "(", "(", "k", ",", "encode_cf_variable", "(", "v", ",", "name", "=", "k", ")", ")", "for", "k", ",", "v", "in", "variables", ".", "items", "(", ")", ")", "return", "new_vars", ",", "attributes" ]
A function which takes a dicts of variables and attributes and encodes them to conform to CF conventions as much as possible. This includes masking, scaling, character array handling, and CF-time encoding. Decode a set of CF encoded variables and attributes. See Also, decode_cf_variable Parameters ---------- variables : dict A dictionary mapping from variable name to xarray.Variable attributes : dict A dictionary mapping from attribute name to value Returns ------- encoded_variables : dict A dictionary mapping from variable name to xarray.Variable, encoded_attributes : dict A dictionary mapping from attribute name to value See also: encode_cf_variable
[ "A", "function", "which", "takes", "a", "dicts", "of", "variables", "and", "attributes", "and", "encodes", "them", "to", "conform", "to", "CF", "conventions", "as", "much", "as", "possible", ".", "This", "includes", "masking", "scaling", "character", "array", "handling", "and", "CF", "-", "time", "encoding", "." ]
python
train
digidotcom/python-wvalib
wva/vehicle.py
https://github.com/digidotcom/python-wvalib/blob/4252735e2775f80ebaffd813fbe84046d26906b3/wva/vehicle.py#L20-L36
def sample(self): """Get the current value of this vehicle data element The returned value will be a namedtuple with 'value' and 'timestamp' elements. Example:: speed_el = wva.get_vehicle_data_element('VehicleSpeed') for i in xrange(10): speed = speed_el.sample() print("Speed: %0.2f @ %s" % (speed.value, speed.timestamp)) time.sleep(1) """ # Response: {'VehicleSpeed': {'timestamp': '2015-03-20T18:00:49Z', 'value': 223.368515}} data = self._http_client.get("vehicle/data/{}".format(self.name))[self.name] dt = arrow.get(data["timestamp"]).datetime value = data["value"] return VehicleDataSample(value, dt)
[ "def", "sample", "(", "self", ")", ":", "# Response: {'VehicleSpeed': {'timestamp': '2015-03-20T18:00:49Z', 'value': 223.368515}}", "data", "=", "self", ".", "_http_client", ".", "get", "(", "\"vehicle/data/{}\"", ".", "format", "(", "self", ".", "name", ")", ")", "[", "self", ".", "name", "]", "dt", "=", "arrow", ".", "get", "(", "data", "[", "\"timestamp\"", "]", ")", ".", "datetime", "value", "=", "data", "[", "\"value\"", "]", "return", "VehicleDataSample", "(", "value", ",", "dt", ")" ]
Get the current value of this vehicle data element The returned value will be a namedtuple with 'value' and 'timestamp' elements. Example:: speed_el = wva.get_vehicle_data_element('VehicleSpeed') for i in xrange(10): speed = speed_el.sample() print("Speed: %0.2f @ %s" % (speed.value, speed.timestamp)) time.sleep(1)
[ "Get", "the", "current", "value", "of", "this", "vehicle", "data", "element" ]
python
train
tanghaibao/jcvi
jcvi/variation/cnv.py
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/variation/cnv.py#L645-L669
def coverage(args): """ %prog coverage *.coverage Plot coverage along chromosome. The coverage file can be generated with: $ samtools depth a.bam > a.coverage The plot is a simple line plot using matplotlib. """ from jcvi.graphics.base import savefig p = OptionParser(coverage.__doc__) opts, args, iopts = p.set_image_options(args, format="png") if len(args) != 1: sys.exit(not p.print_help()) covfile, = args df = pd.read_csv(covfile, sep='\t', names=["Ref", "Position", "Depth"]) xlabel, ylabel = "Position", "Depth" df.plot(xlabel, ylabel, color='g') image_name = covfile + "." + iopts.format savefig(image_name)
[ "def", "coverage", "(", "args", ")", ":", "from", "jcvi", ".", "graphics", ".", "base", "import", "savefig", "p", "=", "OptionParser", "(", "coverage", ".", "__doc__", ")", "opts", ",", "args", ",", "iopts", "=", "p", ".", "set_image_options", "(", "args", ",", "format", "=", "\"png\"", ")", "if", "len", "(", "args", ")", "!=", "1", ":", "sys", ".", "exit", "(", "not", "p", ".", "print_help", "(", ")", ")", "covfile", ",", "=", "args", "df", "=", "pd", ".", "read_csv", "(", "covfile", ",", "sep", "=", "'\\t'", ",", "names", "=", "[", "\"Ref\"", ",", "\"Position\"", ",", "\"Depth\"", "]", ")", "xlabel", ",", "ylabel", "=", "\"Position\"", ",", "\"Depth\"", "df", ".", "plot", "(", "xlabel", ",", "ylabel", ",", "color", "=", "'g'", ")", "image_name", "=", "covfile", "+", "\".\"", "+", "iopts", ".", "format", "savefig", "(", "image_name", ")" ]
%prog coverage *.coverage Plot coverage along chromosome. The coverage file can be generated with: $ samtools depth a.bam > a.coverage The plot is a simple line plot using matplotlib.
[ "%prog", "coverage", "*", ".", "coverage" ]
python
train
apache/incubator-heron
heron/instance/src/python/utils/misc/pplan_helper.py
https://github.com/apache/incubator-heron/blob/ad10325a0febe89ad337e561ebcbe37ec5d9a5ac/heron/instance/src/python/utils/misc/pplan_helper.py#L158-L163
def get_topology_config(self): """Returns the topology config""" if self.pplan.topology.HasField("topology_config"): return self._get_dict_from_config(self.pplan.topology.topology_config) else: return {}
[ "def", "get_topology_config", "(", "self", ")", ":", "if", "self", ".", "pplan", ".", "topology", ".", "HasField", "(", "\"topology_config\"", ")", ":", "return", "self", ".", "_get_dict_from_config", "(", "self", ".", "pplan", ".", "topology", ".", "topology_config", ")", "else", ":", "return", "{", "}" ]
Returns the topology config
[ "Returns", "the", "topology", "config" ]
python
valid
usc-isi-i2/etk
etk/etk.py
https://github.com/usc-isi-i2/etk/blob/aab077c984ea20f5e8ae33af622fe11d3c4df866/etk/etk.py#L85-L103
def parse_json_path(self, jsonpath): """ Parse a jsonpath Args: jsonpath: str Returns: a parsed json path """ if jsonpath not in self.parsed: try: self.parsed[jsonpath] = self.parser(jsonpath) except Exception: self.log("Invalid Json Path: " + jsonpath, "error") raise InvalidJsonPathError("Invalid Json Path") return self.parsed[jsonpath]
[ "def", "parse_json_path", "(", "self", ",", "jsonpath", ")", ":", "if", "jsonpath", "not", "in", "self", ".", "parsed", ":", "try", ":", "self", ".", "parsed", "[", "jsonpath", "]", "=", "self", ".", "parser", "(", "jsonpath", ")", "except", "Exception", ":", "self", ".", "log", "(", "\"Invalid Json Path: \"", "+", "jsonpath", ",", "\"error\"", ")", "raise", "InvalidJsonPathError", "(", "\"Invalid Json Path\"", ")", "return", "self", ".", "parsed", "[", "jsonpath", "]" ]
Parse a jsonpath Args: jsonpath: str Returns: a parsed json path
[ "Parse", "a", "jsonpath" ]
python
train
Capitains/MyCapytain
MyCapytain/common/reference/_dts_1.py
https://github.com/Capitains/MyCapytain/blob/b11bbf6b6ae141fc02be70471e3fbf6907be6593/MyCapytain/common/reference/_dts_1.py#L130-L144
def ingest(cls, resource): """ Ingest a list of DTS Citation object (as parsed JSON-LD) and creates the Citation Graph :param resource: List of Citation objects from the DTS Collection Endpoint (as expanded JSON-LD) :type resource: list :return: Citation Graph """ _set = cls() for data in resource: _set.add_child( cls.CitationClass.ingest(data, root=_set) ) return _set
[ "def", "ingest", "(", "cls", ",", "resource", ")", ":", "_set", "=", "cls", "(", ")", "for", "data", "in", "resource", ":", "_set", ".", "add_child", "(", "cls", ".", "CitationClass", ".", "ingest", "(", "data", ",", "root", "=", "_set", ")", ")", "return", "_set" ]
Ingest a list of DTS Citation object (as parsed JSON-LD) and creates the Citation Graph :param resource: List of Citation objects from the DTS Collection Endpoint (as expanded JSON-LD) :type resource: list :return: Citation Graph
[ "Ingest", "a", "list", "of", "DTS", "Citation", "object", "(", "as", "parsed", "JSON", "-", "LD", ")", "and", "creates", "the", "Citation", "Graph" ]
python
train
abhishek-ram/pyas2-lib
pyas2lib/as2.py
https://github.com/abhishek-ram/pyas2-lib/blob/6af6bc71fe8a8cfb3465dad82ecc50539e3fd551/pyas2lib/as2.py#L668-L785
def build(self, message, status, detailed_status=None): """Function builds and signs an AS2 MDN message. :param message: The received AS2 message for which this is an MDN. :param status: The status of processing of the received AS2 message. :param detailed_status: The optional detailed status of processing of the received AS2 message. Used to give additional error info (default "None") """ # Generate message id using UUID 1 as it uses both hostname and time self.message_id = email_utils.make_msgid().lstrip('<').rstrip('>') self.orig_message_id = message.message_id # Set up the message headers mdn_headers = { 'AS2-Version': AS2_VERSION, 'ediint-features': EDIINT_FEATURES, 'Message-ID': '<{}>'.format(self.message_id), 'AS2-From': quote_as2name(message.headers.get('as2-to')), 'AS2-To': quote_as2name(message.headers.get('as2-from')), 'Date': email_utils.formatdate(localtime=True), 'user-agent': 'pyAS2 Open Source AS2 Software' } # Set the confirmation text message here confirmation_text = MDN_CONFIRM_TEXT # overwrite with organization specific message if message.receiver and message.receiver.mdn_confirm_text: confirmation_text = message.receiver.mdn_confirm_text # overwrite with partner specific message if message.sender and message.sender.mdn_confirm_text: confirmation_text = message.sender.mdn_confirm_text if status != 'processed': confirmation_text = MDN_FAILED_TEXT self.payload = MIMEMultipart( 'report', report_type='disposition-notification') # Create and attach the MDN Text Message mdn_text = email_message.Message() mdn_text.set_payload('%s\n' % confirmation_text) mdn_text.set_type('text/plain') del mdn_text['MIME-Version'] encoders.encode_7or8bit(mdn_text) self.payload.attach(mdn_text) # Create and attache the MDN Report Message mdn_base = email_message.Message() mdn_base.set_type('message/disposition-notification') mdn_report = 'Reporting-UA: pyAS2 Open Source AS2 Software\n' mdn_report += 'Original-Recipient: rfc822; {}\n'.format( message.headers.get('as2-to')) mdn_report += 'Final-Recipient: rfc822; {}\n'.format( message.headers.get('as2-to')) mdn_report += 'Original-Message-ID: <{}>\n'.format(message.message_id) mdn_report += 'Disposition: automatic-action/' \ 'MDN-sent-automatically; {}'.format(status) if detailed_status: mdn_report += ': {}'.format(detailed_status) mdn_report += '\n' if message.mic: mdn_report += 'Received-content-MIC: {}, {}\n'.format( message.mic.decode(), message.digest_alg) mdn_base.set_payload(mdn_report) del mdn_base['MIME-Version'] encoders.encode_7or8bit(mdn_base) self.payload.attach(mdn_base) # logger.debug('MDN for message %s created:\n%s' % ( # message.message_id, mdn_base.as_string())) # Sign the MDN if it is requested by the sender if message.headers.get('disposition-notification-options') and \ message.receiver and message.receiver.sign_key: self.digest_alg = \ message.headers['disposition-notification-options'].split( ';')[-1].split(',')[-1].strip().replace('-', '') signed_mdn = MIMEMultipart( 'signed', protocol="application/pkcs7-signature") del signed_mdn['MIME-Version'] signed_mdn.attach(self.payload) # Create the signature mime message signature = email_message.Message() signature.set_type('application/pkcs7-signature') signature.set_param('name', 'smime.p7s') signature.set_param('smime-type', 'signed-data') signature.add_header( 'Content-Disposition', 'attachment', filename='smime.p7s') del signature['MIME-Version'] signature.set_payload(sign_message( canonicalize(self.payload), self.digest_alg, message.receiver.sign_key )) encoders.encode_base64(signature) # logger.debug( # 'Signature for MDN created:\n%s' % signature.as_string()) signed_mdn.set_param('micalg', self.digest_alg) signed_mdn.attach(signature) self.payload = signed_mdn # Update the headers of the final payload and set message boundary for k, v in mdn_headers.items(): if self.payload.get(k): self.payload.replace_header(k, v) else: self.payload.add_header(k, v) if self.payload.is_multipart(): self.payload.set_boundary(make_mime_boundary())
[ "def", "build", "(", "self", ",", "message", ",", "status", ",", "detailed_status", "=", "None", ")", ":", "# Generate message id using UUID 1 as it uses both hostname and time", "self", ".", "message_id", "=", "email_utils", ".", "make_msgid", "(", ")", ".", "lstrip", "(", "'<'", ")", ".", "rstrip", "(", "'>'", ")", "self", ".", "orig_message_id", "=", "message", ".", "message_id", "# Set up the message headers", "mdn_headers", "=", "{", "'AS2-Version'", ":", "AS2_VERSION", ",", "'ediint-features'", ":", "EDIINT_FEATURES", ",", "'Message-ID'", ":", "'<{}>'", ".", "format", "(", "self", ".", "message_id", ")", ",", "'AS2-From'", ":", "quote_as2name", "(", "message", ".", "headers", ".", "get", "(", "'as2-to'", ")", ")", ",", "'AS2-To'", ":", "quote_as2name", "(", "message", ".", "headers", ".", "get", "(", "'as2-from'", ")", ")", ",", "'Date'", ":", "email_utils", ".", "formatdate", "(", "localtime", "=", "True", ")", ",", "'user-agent'", ":", "'pyAS2 Open Source AS2 Software'", "}", "# Set the confirmation text message here", "confirmation_text", "=", "MDN_CONFIRM_TEXT", "# overwrite with organization specific message", "if", "message", ".", "receiver", "and", "message", ".", "receiver", ".", "mdn_confirm_text", ":", "confirmation_text", "=", "message", ".", "receiver", ".", "mdn_confirm_text", "# overwrite with partner specific message", "if", "message", ".", "sender", "and", "message", ".", "sender", ".", "mdn_confirm_text", ":", "confirmation_text", "=", "message", ".", "sender", ".", "mdn_confirm_text", "if", "status", "!=", "'processed'", ":", "confirmation_text", "=", "MDN_FAILED_TEXT", "self", ".", "payload", "=", "MIMEMultipart", "(", "'report'", ",", "report_type", "=", "'disposition-notification'", ")", "# Create and attach the MDN Text Message", "mdn_text", "=", "email_message", ".", "Message", "(", ")", "mdn_text", ".", "set_payload", "(", "'%s\\n'", "%", "confirmation_text", ")", "mdn_text", ".", "set_type", "(", "'text/plain'", ")", "del", "mdn_text", "[", "'MIME-Version'", "]", "encoders", ".", "encode_7or8bit", "(", "mdn_text", ")", "self", ".", "payload", ".", "attach", "(", "mdn_text", ")", "# Create and attache the MDN Report Message", "mdn_base", "=", "email_message", ".", "Message", "(", ")", "mdn_base", ".", "set_type", "(", "'message/disposition-notification'", ")", "mdn_report", "=", "'Reporting-UA: pyAS2 Open Source AS2 Software\\n'", "mdn_report", "+=", "'Original-Recipient: rfc822; {}\\n'", ".", "format", "(", "message", ".", "headers", ".", "get", "(", "'as2-to'", ")", ")", "mdn_report", "+=", "'Final-Recipient: rfc822; {}\\n'", ".", "format", "(", "message", ".", "headers", ".", "get", "(", "'as2-to'", ")", ")", "mdn_report", "+=", "'Original-Message-ID: <{}>\\n'", ".", "format", "(", "message", ".", "message_id", ")", "mdn_report", "+=", "'Disposition: automatic-action/'", "'MDN-sent-automatically; {}'", ".", "format", "(", "status", ")", "if", "detailed_status", ":", "mdn_report", "+=", "': {}'", ".", "format", "(", "detailed_status", ")", "mdn_report", "+=", "'\\n'", "if", "message", ".", "mic", ":", "mdn_report", "+=", "'Received-content-MIC: {}, {}\\n'", ".", "format", "(", "message", ".", "mic", ".", "decode", "(", ")", ",", "message", ".", "digest_alg", ")", "mdn_base", ".", "set_payload", "(", "mdn_report", ")", "del", "mdn_base", "[", "'MIME-Version'", "]", "encoders", ".", "encode_7or8bit", "(", "mdn_base", ")", "self", ".", "payload", ".", "attach", "(", "mdn_base", ")", "# logger.debug('MDN for message %s created:\\n%s' % (", "# message.message_id, mdn_base.as_string()))", "# Sign the MDN if it is requested by the sender", "if", "message", ".", "headers", ".", "get", "(", "'disposition-notification-options'", ")", "and", "message", ".", "receiver", "and", "message", ".", "receiver", ".", "sign_key", ":", "self", ".", "digest_alg", "=", "message", ".", "headers", "[", "'disposition-notification-options'", "]", ".", "split", "(", "';'", ")", "[", "-", "1", "]", ".", "split", "(", "','", ")", "[", "-", "1", "]", ".", "strip", "(", ")", ".", "replace", "(", "'-'", ",", "''", ")", "signed_mdn", "=", "MIMEMultipart", "(", "'signed'", ",", "protocol", "=", "\"application/pkcs7-signature\"", ")", "del", "signed_mdn", "[", "'MIME-Version'", "]", "signed_mdn", ".", "attach", "(", "self", ".", "payload", ")", "# Create the signature mime message", "signature", "=", "email_message", ".", "Message", "(", ")", "signature", ".", "set_type", "(", "'application/pkcs7-signature'", ")", "signature", ".", "set_param", "(", "'name'", ",", "'smime.p7s'", ")", "signature", ".", "set_param", "(", "'smime-type'", ",", "'signed-data'", ")", "signature", ".", "add_header", "(", "'Content-Disposition'", ",", "'attachment'", ",", "filename", "=", "'smime.p7s'", ")", "del", "signature", "[", "'MIME-Version'", "]", "signature", ".", "set_payload", "(", "sign_message", "(", "canonicalize", "(", "self", ".", "payload", ")", ",", "self", ".", "digest_alg", ",", "message", ".", "receiver", ".", "sign_key", ")", ")", "encoders", ".", "encode_base64", "(", "signature", ")", "# logger.debug(", "# 'Signature for MDN created:\\n%s' % signature.as_string())", "signed_mdn", ".", "set_param", "(", "'micalg'", ",", "self", ".", "digest_alg", ")", "signed_mdn", ".", "attach", "(", "signature", ")", "self", ".", "payload", "=", "signed_mdn", "# Update the headers of the final payload and set message boundary", "for", "k", ",", "v", "in", "mdn_headers", ".", "items", "(", ")", ":", "if", "self", ".", "payload", ".", "get", "(", "k", ")", ":", "self", ".", "payload", ".", "replace_header", "(", "k", ",", "v", ")", "else", ":", "self", ".", "payload", ".", "add_header", "(", "k", ",", "v", ")", "if", "self", ".", "payload", ".", "is_multipart", "(", ")", ":", "self", ".", "payload", ".", "set_boundary", "(", "make_mime_boundary", "(", ")", ")" ]
Function builds and signs an AS2 MDN message. :param message: The received AS2 message for which this is an MDN. :param status: The status of processing of the received AS2 message. :param detailed_status: The optional detailed status of processing of the received AS2 message. Used to give additional error info (default "None")
[ "Function", "builds", "and", "signs", "an", "AS2", "MDN", "message", "." ]
python
train
shapiromatron/bmds
bmds/datasets.py
https://github.com/shapiromatron/bmds/blob/395c6ce84ad82876fd9fa4a89a3497fb61616de0/bmds/datasets.py#L482-L492
def drop_dose(self): """ Drop the maximum dose and related response values. """ doses = np.array(self.individual_doses) responses = np.array(self.responses) mask = doses != doses.max() self.individual_doses = doses[mask].tolist() self.responses = responses[mask].tolist() self.set_summary_data() self._validate()
[ "def", "drop_dose", "(", "self", ")", ":", "doses", "=", "np", ".", "array", "(", "self", ".", "individual_doses", ")", "responses", "=", "np", ".", "array", "(", "self", ".", "responses", ")", "mask", "=", "doses", "!=", "doses", ".", "max", "(", ")", "self", ".", "individual_doses", "=", "doses", "[", "mask", "]", ".", "tolist", "(", ")", "self", ".", "responses", "=", "responses", "[", "mask", "]", ".", "tolist", "(", ")", "self", ".", "set_summary_data", "(", ")", "self", ".", "_validate", "(", ")" ]
Drop the maximum dose and related response values.
[ "Drop", "the", "maximum", "dose", "and", "related", "response", "values", "." ]
python
train
trivago/Protector
protector/parser/query_parser.py
https://github.com/trivago/Protector/blob/7ebe7bde965e27737b961a0cb5740724d174fdc7/protector/parser/query_parser.py#L182-L197
def create_delete_query(self, tokens): """ Parse tokens of delete query :param tokens: A list of InfluxDB query tokens """ # From keyword is required if not tokens[Keyword.FROM]: return None where_stmt = self.parse_keyword(Keyword.WHERE, tokens) if where_stmt: if not where_stmt.startswith('time'): return None return DeleteQuery( self.parse_keyword(Keyword.FROM, tokens), self.parse_keyword(Keyword.WHERE, tokens) )
[ "def", "create_delete_query", "(", "self", ",", "tokens", ")", ":", "# From keyword is required", "if", "not", "tokens", "[", "Keyword", ".", "FROM", "]", ":", "return", "None", "where_stmt", "=", "self", ".", "parse_keyword", "(", "Keyword", ".", "WHERE", ",", "tokens", ")", "if", "where_stmt", ":", "if", "not", "where_stmt", ".", "startswith", "(", "'time'", ")", ":", "return", "None", "return", "DeleteQuery", "(", "self", ".", "parse_keyword", "(", "Keyword", ".", "FROM", ",", "tokens", ")", ",", "self", ".", "parse_keyword", "(", "Keyword", ".", "WHERE", ",", "tokens", ")", ")" ]
Parse tokens of delete query :param tokens: A list of InfluxDB query tokens
[ "Parse", "tokens", "of", "delete", "query", ":", "param", "tokens", ":", "A", "list", "of", "InfluxDB", "query", "tokens" ]
python
valid
OpenTreeOfLife/peyotl
tutorials/ot-info-for-taxon-name.py
https://github.com/OpenTreeOfLife/peyotl/blob/5e4e52a0fdbd17f490aa644ad79fda6ea2eda7c0/tutorials/ot-info-for-taxon-name.py#L163-L206
def main(argv): """This function sets up a command-line option parser and then calls match_and_print to do all of the real work. """ import argparse description = 'Uses Open Tree of Life web services to try to find a taxon ID for each name supplied. ' \ 'Using a --context-name=NAME to provide a limited taxonomic context and using the ' \ ' --prohibit-fuzzy-matching option can make the matching faster. If there is only' \ 'one match finds, then it also calls the equivalent of the ot-taxon-info.py and ot-taxon-subtree.py scripts.' parser = argparse.ArgumentParser(prog='ot-tnrs-match-names', description=description) parser.add_argument('names', nargs='+', help='name(s) for which we will try to find OTT IDs') parser.add_argument('--context-name', default=None, type=str, required=False) parser.add_argument('--include-dubious', action='store_true', default=False, required=False, help='return matches to taxa that are not included the synthetic tree because their taxonomic status is doubtful') parser.add_argument('--subtree', action='store_true', default=False, required=False, help='print the newick representation of the taxonomic subtree if there is only one matching OTT ID') parser.add_argument('--include-deprecated', action='store_true', default=False, required=False) parser.add_argument('--prohibit-fuzzy-matching', action='store_true', default=False, required=False) args = parser.parse_args(argv) # The service takes do_approximate_matching # We use the opposite to make the command-line just include positive directives # (as opposed to requiring --do-approximate-matching=False) so we use "not" do_approximate_matching = not args.prohibit_fuzzy_matching name_list = args.names if len(name_list) == 0: name_list = ["Homo sapiens", "Gorilla gorilla"] sys.stderr.write('Running a demonstration query with {}\n'.format(name_list)) else: for name in name_list: if name.startswith('-'): parser.print_help() match_and_print(name_list, context_name=args.context_name, do_approximate_matching=do_approximate_matching, include_dubious=args.include_dubious, include_deprecated=args.include_deprecated, include_subtree=args.subtree, output=sys.stdout)
[ "def", "main", "(", "argv", ")", ":", "import", "argparse", "description", "=", "'Uses Open Tree of Life web services to try to find a taxon ID for each name supplied. '", "'Using a --context-name=NAME to provide a limited taxonomic context and using the '", "' --prohibit-fuzzy-matching option can make the matching faster. If there is only'", "'one match finds, then it also calls the equivalent of the ot-taxon-info.py and ot-taxon-subtree.py scripts.'", "parser", "=", "argparse", ".", "ArgumentParser", "(", "prog", "=", "'ot-tnrs-match-names'", ",", "description", "=", "description", ")", "parser", ".", "add_argument", "(", "'names'", ",", "nargs", "=", "'+'", ",", "help", "=", "'name(s) for which we will try to find OTT IDs'", ")", "parser", ".", "add_argument", "(", "'--context-name'", ",", "default", "=", "None", ",", "type", "=", "str", ",", "required", "=", "False", ")", "parser", ".", "add_argument", "(", "'--include-dubious'", ",", "action", "=", "'store_true'", ",", "default", "=", "False", ",", "required", "=", "False", ",", "help", "=", "'return matches to taxa that are not included the synthetic tree because their taxonomic status is doubtful'", ")", "parser", ".", "add_argument", "(", "'--subtree'", ",", "action", "=", "'store_true'", ",", "default", "=", "False", ",", "required", "=", "False", ",", "help", "=", "'print the newick representation of the taxonomic subtree if there is only one matching OTT ID'", ")", "parser", ".", "add_argument", "(", "'--include-deprecated'", ",", "action", "=", "'store_true'", ",", "default", "=", "False", ",", "required", "=", "False", ")", "parser", ".", "add_argument", "(", "'--prohibit-fuzzy-matching'", ",", "action", "=", "'store_true'", ",", "default", "=", "False", ",", "required", "=", "False", ")", "args", "=", "parser", ".", "parse_args", "(", "argv", ")", "# The service takes do_approximate_matching", "# We use the opposite to make the command-line just include positive directives", "# (as opposed to requiring --do-approximate-matching=False) so we use \"not\"", "do_approximate_matching", "=", "not", "args", ".", "prohibit_fuzzy_matching", "name_list", "=", "args", ".", "names", "if", "len", "(", "name_list", ")", "==", "0", ":", "name_list", "=", "[", "\"Homo sapiens\"", ",", "\"Gorilla gorilla\"", "]", "sys", ".", "stderr", ".", "write", "(", "'Running a demonstration query with {}\\n'", ".", "format", "(", "name_list", ")", ")", "else", ":", "for", "name", "in", "name_list", ":", "if", "name", ".", "startswith", "(", "'-'", ")", ":", "parser", ".", "print_help", "(", ")", "match_and_print", "(", "name_list", ",", "context_name", "=", "args", ".", "context_name", ",", "do_approximate_matching", "=", "do_approximate_matching", ",", "include_dubious", "=", "args", ".", "include_dubious", ",", "include_deprecated", "=", "args", ".", "include_deprecated", ",", "include_subtree", "=", "args", ".", "subtree", ",", "output", "=", "sys", ".", "stdout", ")" ]
This function sets up a command-line option parser and then calls match_and_print to do all of the real work.
[ "This", "function", "sets", "up", "a", "command", "-", "line", "option", "parser", "and", "then", "calls", "match_and_print", "to", "do", "all", "of", "the", "real", "work", "." ]
python
train
GemHQ/round-py
round/__init__.py
https://github.com/GemHQ/round-py/blob/d0838f849cd260b1eb5df67ed3c6f2fe56c91c21/round/__init__.py#L113-L127
def has_auth_params(self, scheme): """Check whether all information required for a given auth scheme have been supplied. Args: scheme (str): Name of the authentication scheme to check. One of Gem-Identify, Gem-Device, Gem-Application Returns: True if all required parameters for the specified scheme are present or False otherwise. """ for k, v in iteritems(self.schemes[scheme][u'params']): if not v: return False return True
[ "def", "has_auth_params", "(", "self", ",", "scheme", ")", ":", "for", "k", ",", "v", "in", "iteritems", "(", "self", ".", "schemes", "[", "scheme", "]", "[", "u'params'", "]", ")", ":", "if", "not", "v", ":", "return", "False", "return", "True" ]
Check whether all information required for a given auth scheme have been supplied. Args: scheme (str): Name of the authentication scheme to check. One of Gem-Identify, Gem-Device, Gem-Application Returns: True if all required parameters for the specified scheme are present or False otherwise.
[ "Check", "whether", "all", "information", "required", "for", "a", "given", "auth", "scheme", "have", "been", "supplied", "." ]
python
train
vbwagner/ctypescrypto
ctypescrypto/cms.py
https://github.com/vbwagner/ctypescrypto/blob/33c32904cf5e04901f87f90e2499634b8feecd3e/ctypescrypto/cms.py#L58-L83
def CMS(data, format="PEM"): """ Factory function to create CMS objects from received messages. Parses CMS data and returns either SignedData or EnvelopedData object. format argument can be either "PEM" or "DER". It determines object type from the contents of received CMS structure. """ bio = Membio(data) if format == "PEM": ptr = libcrypto.PEM_read_bio_CMS(bio.bio, None, None, None) else: ptr = libcrypto.d2i_CMS_bio(bio.bio, None) if ptr is None: raise CMSError("Error parsing CMS data") typeoid = Oid(libcrypto.OBJ_obj2nid(libcrypto.CMS_get0_type(ptr))) if typeoid.shortname() == "pkcs7-signedData": return SignedData(ptr) elif typeoid.shortname() == "pkcs7-envelopedData": return EnvelopedData(ptr) elif typeoid.shortname() == "pkcs7-encryptedData": return EncryptedData(ptr) else: raise NotImplementedError("cannot handle "+typeoid.shortname())
[ "def", "CMS", "(", "data", ",", "format", "=", "\"PEM\"", ")", ":", "bio", "=", "Membio", "(", "data", ")", "if", "format", "==", "\"PEM\"", ":", "ptr", "=", "libcrypto", ".", "PEM_read_bio_CMS", "(", "bio", ".", "bio", ",", "None", ",", "None", ",", "None", ")", "else", ":", "ptr", "=", "libcrypto", ".", "d2i_CMS_bio", "(", "bio", ".", "bio", ",", "None", ")", "if", "ptr", "is", "None", ":", "raise", "CMSError", "(", "\"Error parsing CMS data\"", ")", "typeoid", "=", "Oid", "(", "libcrypto", ".", "OBJ_obj2nid", "(", "libcrypto", ".", "CMS_get0_type", "(", "ptr", ")", ")", ")", "if", "typeoid", ".", "shortname", "(", ")", "==", "\"pkcs7-signedData\"", ":", "return", "SignedData", "(", "ptr", ")", "elif", "typeoid", ".", "shortname", "(", ")", "==", "\"pkcs7-envelopedData\"", ":", "return", "EnvelopedData", "(", "ptr", ")", "elif", "typeoid", ".", "shortname", "(", ")", "==", "\"pkcs7-encryptedData\"", ":", "return", "EncryptedData", "(", "ptr", ")", "else", ":", "raise", "NotImplementedError", "(", "\"cannot handle \"", "+", "typeoid", ".", "shortname", "(", ")", ")" ]
Factory function to create CMS objects from received messages. Parses CMS data and returns either SignedData or EnvelopedData object. format argument can be either "PEM" or "DER". It determines object type from the contents of received CMS structure.
[ "Factory", "function", "to", "create", "CMS", "objects", "from", "received", "messages", ".", "Parses", "CMS", "data", "and", "returns", "either", "SignedData", "or", "EnvelopedData", "object", ".", "format", "argument", "can", "be", "either", "PEM", "or", "DER", "." ]
python
train
opencobra/memote
memote/experimental/config.py
https://github.com/opencobra/memote/blob/276630fcd4449fb7b914186edfd38c239e7052df/memote/experimental/config.py#L142-L169
def load_growth(self, model): """Load and validate all data files.""" data = self.config.get("growth") if data is None: return experiments = data.get("experiments") if experiments is None or len(experiments) == 0: return path = self.get_path(data, join("data", "experimental", "growth")) for exp_id, exp in iteritems(experiments): if exp is None: exp = dict() filename = exp.get("filename") if filename is None: filename = join(path, "{}.csv".format(exp_id)) elif not isabs(filename): filename = join(path, filename) growth = GrowthExperiment( identifier=exp_id, obj=exp, filename=filename) if growth.medium is not None: assert growth.medium in self.media, \ "Growth-experiment '{}' has an undefined medium '{}'." \ "".format(exp_id, growth.medium) growth.medium = self.media[growth.medium] growth.load() growth.validate(model) self.growth[exp_id] = growth
[ "def", "load_growth", "(", "self", ",", "model", ")", ":", "data", "=", "self", ".", "config", ".", "get", "(", "\"growth\"", ")", "if", "data", "is", "None", ":", "return", "experiments", "=", "data", ".", "get", "(", "\"experiments\"", ")", "if", "experiments", "is", "None", "or", "len", "(", "experiments", ")", "==", "0", ":", "return", "path", "=", "self", ".", "get_path", "(", "data", ",", "join", "(", "\"data\"", ",", "\"experimental\"", ",", "\"growth\"", ")", ")", "for", "exp_id", ",", "exp", "in", "iteritems", "(", "experiments", ")", ":", "if", "exp", "is", "None", ":", "exp", "=", "dict", "(", ")", "filename", "=", "exp", ".", "get", "(", "\"filename\"", ")", "if", "filename", "is", "None", ":", "filename", "=", "join", "(", "path", ",", "\"{}.csv\"", ".", "format", "(", "exp_id", ")", ")", "elif", "not", "isabs", "(", "filename", ")", ":", "filename", "=", "join", "(", "path", ",", "filename", ")", "growth", "=", "GrowthExperiment", "(", "identifier", "=", "exp_id", ",", "obj", "=", "exp", ",", "filename", "=", "filename", ")", "if", "growth", ".", "medium", "is", "not", "None", ":", "assert", "growth", ".", "medium", "in", "self", ".", "media", ",", "\"Growth-experiment '{}' has an undefined medium '{}'.\"", "\"\"", ".", "format", "(", "exp_id", ",", "growth", ".", "medium", ")", "growth", ".", "medium", "=", "self", ".", "media", "[", "growth", ".", "medium", "]", "growth", ".", "load", "(", ")", "growth", ".", "validate", "(", "model", ")", "self", ".", "growth", "[", "exp_id", "]", "=", "growth" ]
Load and validate all data files.
[ "Load", "and", "validate", "all", "data", "files", "." ]
python
train
googleapis/protoc-java-resource-names-plugin
plugin/utils/path_template.py
https://github.com/googleapis/protoc-java-resource-names-plugin/blob/3fb2ec9b778f62646c05a7b960c893464c7791c0/plugin/utils/path_template.py#L82-L113
def render(self, bindings): """Renders a string from a path template using the provided bindings. Args: bindings (dict): A dictionary of var names to binding strings. Returns: str: The rendered instantiation of this path template. Raises: ValidationError: If a key isn't provided or if a sub-template can't be parsed. """ out = [] binding = False for segment in self.segments: if segment.kind == _BINDING: if segment.literal not in bindings: raise ValidationException( ('rendering error: value for key \'{}\' ' 'not provided').format(segment.literal)) out.extend(PathTemplate(bindings[segment.literal]).segments) binding = True elif segment.kind == _END_BINDING: binding = False else: if binding: continue out.append(segment) path = _format(out) self.match(path) return path
[ "def", "render", "(", "self", ",", "bindings", ")", ":", "out", "=", "[", "]", "binding", "=", "False", "for", "segment", "in", "self", ".", "segments", ":", "if", "segment", ".", "kind", "==", "_BINDING", ":", "if", "segment", ".", "literal", "not", "in", "bindings", ":", "raise", "ValidationException", "(", "(", "'rendering error: value for key \\'{}\\' '", "'not provided'", ")", ".", "format", "(", "segment", ".", "literal", ")", ")", "out", ".", "extend", "(", "PathTemplate", "(", "bindings", "[", "segment", ".", "literal", "]", ")", ".", "segments", ")", "binding", "=", "True", "elif", "segment", ".", "kind", "==", "_END_BINDING", ":", "binding", "=", "False", "else", ":", "if", "binding", ":", "continue", "out", ".", "append", "(", "segment", ")", "path", "=", "_format", "(", "out", ")", "self", ".", "match", "(", "path", ")", "return", "path" ]
Renders a string from a path template using the provided bindings. Args: bindings (dict): A dictionary of var names to binding strings. Returns: str: The rendered instantiation of this path template. Raises: ValidationError: If a key isn't provided or if a sub-template can't be parsed.
[ "Renders", "a", "string", "from", "a", "path", "template", "using", "the", "provided", "bindings", "." ]
python
train
apple/turicreate
deps/src/libxml2-2.9.1/python/libxml2.py
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/libxml2-2.9.1/python/libxml2.py#L4794-L4801
def validatePushElement(self, ctxt, elem, qname): """Push a new element start on the validation stack. """ if ctxt is None: ctxt__o = None else: ctxt__o = ctxt._o if elem is None: elem__o = None else: elem__o = elem._o ret = libxml2mod.xmlValidatePushElement(ctxt__o, self._o, elem__o, qname) return ret
[ "def", "validatePushElement", "(", "self", ",", "ctxt", ",", "elem", ",", "qname", ")", ":", "if", "ctxt", "is", "None", ":", "ctxt__o", "=", "None", "else", ":", "ctxt__o", "=", "ctxt", ".", "_o", "if", "elem", "is", "None", ":", "elem__o", "=", "None", "else", ":", "elem__o", "=", "elem", ".", "_o", "ret", "=", "libxml2mod", ".", "xmlValidatePushElement", "(", "ctxt__o", ",", "self", ".", "_o", ",", "elem__o", ",", "qname", ")", "return", "ret" ]
Push a new element start on the validation stack.
[ "Push", "a", "new", "element", "start", "on", "the", "validation", "stack", "." ]
python
train
jazzband/django-widget-tweaks
widget_tweaks/templatetags/widget_tweaks.py
https://github.com/jazzband/django-widget-tweaks/blob/f50ee92410d68e81528a7643a10544e7331af8fb/widget_tweaks/templatetags/widget_tweaks.py#L109-L117
def widget_type(field): """ Template filter that returns field widget class name (in lower case). E.g. if field's widget is TextInput then {{ field|widget_type }} will return 'textinput'. """ if hasattr(field, 'field') and hasattr(field.field, 'widget') and field.field.widget: return field.field.widget.__class__.__name__.lower() return ''
[ "def", "widget_type", "(", "field", ")", ":", "if", "hasattr", "(", "field", ",", "'field'", ")", "and", "hasattr", "(", "field", ".", "field", ",", "'widget'", ")", "and", "field", ".", "field", ".", "widget", ":", "return", "field", ".", "field", ".", "widget", ".", "__class__", ".", "__name__", ".", "lower", "(", ")", "return", "''" ]
Template filter that returns field widget class name (in lower case). E.g. if field's widget is TextInput then {{ field|widget_type }} will return 'textinput'.
[ "Template", "filter", "that", "returns", "field", "widget", "class", "name", "(", "in", "lower", "case", ")", ".", "E", ".", "g", ".", "if", "field", "s", "widget", "is", "TextInput", "then", "{{", "field|widget_type", "}}", "will", "return", "textinput", "." ]
python
train
NiklasRosenstein/pydoc-markdown
pydocmd/imp.py
https://github.com/NiklasRosenstein/pydoc-markdown/blob/e7e93b2bf7f7535e0de4cd275058fc9865dff21b/pydocmd/imp.py#L87-L96
def force_lazy_import(name): """ Import any modules off of "name" by iterating a new list rather than a generator so that this library works with lazy imports. """ obj = import_object(name) module_items = list(getattr(obj, '__dict__', {}).items()) for key, value in module_items: if getattr(value, '__module__', None): import_object(name + '.' + key)
[ "def", "force_lazy_import", "(", "name", ")", ":", "obj", "=", "import_object", "(", "name", ")", "module_items", "=", "list", "(", "getattr", "(", "obj", ",", "'__dict__'", ",", "{", "}", ")", ".", "items", "(", ")", ")", "for", "key", ",", "value", "in", "module_items", ":", "if", "getattr", "(", "value", ",", "'__module__'", ",", "None", ")", ":", "import_object", "(", "name", "+", "'.'", "+", "key", ")" ]
Import any modules off of "name" by iterating a new list rather than a generator so that this library works with lazy imports.
[ "Import", "any", "modules", "off", "of", "name", "by", "iterating", "a", "new", "list", "rather", "than", "a", "generator", "so", "that", "this", "library", "works", "with", "lazy", "imports", "." ]
python
train
sibirrer/lenstronomy
lenstronomy/LensModel/lens_model.py
https://github.com/sibirrer/lenstronomy/blob/4edb100a4f3f4fdc4fac9b0032d2b0283d0aa1d6/lenstronomy/LensModel/lens_model.py#L105-L117
def alpha(self, x, y, kwargs, k=None): """ deflection angles :param x: x-position (preferentially arcsec) :type x: numpy array :param y: y-position (preferentially arcsec) :type y: numpy array :param kwargs: list of keyword arguments of lens model parameters matching the lens model classes :param k: only evaluate the k-th lens model :return: deflection angles in units of arcsec """ return self.lens_model.alpha(x, y, kwargs, k=k)
[ "def", "alpha", "(", "self", ",", "x", ",", "y", ",", "kwargs", ",", "k", "=", "None", ")", ":", "return", "self", ".", "lens_model", ".", "alpha", "(", "x", ",", "y", ",", "kwargs", ",", "k", "=", "k", ")" ]
deflection angles :param x: x-position (preferentially arcsec) :type x: numpy array :param y: y-position (preferentially arcsec) :type y: numpy array :param kwargs: list of keyword arguments of lens model parameters matching the lens model classes :param k: only evaluate the k-th lens model :return: deflection angles in units of arcsec
[ "deflection", "angles" ]
python
train
NoneGG/aredis
aredis/cache.py
https://github.com/NoneGG/aredis/blob/204caad740ac13e5760d46444a2ba7632982a046/aredis/cache.py#L243-L256
async def set(self, key, value, param=None, expire_time=None, herd_timeout=None): """ Use key and param to generate identity and pack the content, expire the key within real_timeout if expire_time is given. real_timeout is equal to the sum of expire_time and herd_time. The content is cached with expire_time. """ identity = self._gen_identity(key, param) expected_expired_ts = int(time.time()) if expire_time: expected_expired_ts += expire_time expected_expired_ts += herd_timeout or self.default_herd_timeout value = self._pack([value, expected_expired_ts]) return await self.client.set(identity, value, ex=expire_time)
[ "async", "def", "set", "(", "self", ",", "key", ",", "value", ",", "param", "=", "None", ",", "expire_time", "=", "None", ",", "herd_timeout", "=", "None", ")", ":", "identity", "=", "self", ".", "_gen_identity", "(", "key", ",", "param", ")", "expected_expired_ts", "=", "int", "(", "time", ".", "time", "(", ")", ")", "if", "expire_time", ":", "expected_expired_ts", "+=", "expire_time", "expected_expired_ts", "+=", "herd_timeout", "or", "self", ".", "default_herd_timeout", "value", "=", "self", ".", "_pack", "(", "[", "value", ",", "expected_expired_ts", "]", ")", "return", "await", "self", ".", "client", ".", "set", "(", "identity", ",", "value", ",", "ex", "=", "expire_time", ")" ]
Use key and param to generate identity and pack the content, expire the key within real_timeout if expire_time is given. real_timeout is equal to the sum of expire_time and herd_time. The content is cached with expire_time.
[ "Use", "key", "and", "param", "to", "generate", "identity", "and", "pack", "the", "content", "expire", "the", "key", "within", "real_timeout", "if", "expire_time", "is", "given", ".", "real_timeout", "is", "equal", "to", "the", "sum", "of", "expire_time", "and", "herd_time", ".", "The", "content", "is", "cached", "with", "expire_time", "." ]
python
train
helixyte/everest
everest/representers/registry.py
https://github.com/helixyte/everest/blob/70c9b93c3061db5cb62428349d18b8fb8566411b/everest/representers/registry.py#L62-L112
def register(self, resource_class, content_type, configuration=None): """ Registers a representer factory for the given combination of resource class and content type. :param configuration: representer configuration. A default instance will be created if this is not given. :type configuration: :class:`everest.representers.config.RepresenterConfiguration` """ if not issubclass(resource_class, Resource): raise ValueError('Representers can only be registered for ' 'resource classes (got: %s).' % resource_class) if not content_type in self.__rpr_classes: raise ValueError('No representer class has been registered for ' 'content type "%s".' % content_type) # Register a factory resource -> representer for the given combination # of resource class and content type. rpr_cls = self.__rpr_classes[content_type] self.__rpr_factories[(resource_class, content_type)] = \ rpr_cls.create_from_resource_class if issubclass(rpr_cls, MappingResourceRepresenter): # Create or update an attribute mapping. mp_reg = self.__mp_regs[content_type] mp = mp_reg.find_mapping(resource_class) if mp is None: # No mapping was registered yet for this resource class or any # of its base classes; create a new one on the fly. new_mp = mp_reg.create_mapping(resource_class, configuration) elif not configuration is None: if resource_class is mp.mapped_class: # We have additional configuration for an existing mapping. mp.configuration.update(configuration) new_mp = mp else: # We have a derived class with additional configuration. new_mp = mp_reg.create_mapping( resource_class, configuration=mp.configuration) new_mp.configuration.update(configuration) elif not resource_class is mp.mapped_class: # We have a derived class without additional configuration. new_mp = mp_reg.create_mapping(resource_class, configuration=mp.configuration) else: # We found a dynamically created mapping for the right class # without additional configuration; do not create a new one. new_mp = None if not new_mp is None: # Store the new (or updated) mapping. mp_reg.set_mapping(new_mp)
[ "def", "register", "(", "self", ",", "resource_class", ",", "content_type", ",", "configuration", "=", "None", ")", ":", "if", "not", "issubclass", "(", "resource_class", ",", "Resource", ")", ":", "raise", "ValueError", "(", "'Representers can only be registered for '", "'resource classes (got: %s).'", "%", "resource_class", ")", "if", "not", "content_type", "in", "self", ".", "__rpr_classes", ":", "raise", "ValueError", "(", "'No representer class has been registered for '", "'content type \"%s\".'", "%", "content_type", ")", "# Register a factory resource -> representer for the given combination", "# of resource class and content type.", "rpr_cls", "=", "self", ".", "__rpr_classes", "[", "content_type", "]", "self", ".", "__rpr_factories", "[", "(", "resource_class", ",", "content_type", ")", "]", "=", "rpr_cls", ".", "create_from_resource_class", "if", "issubclass", "(", "rpr_cls", ",", "MappingResourceRepresenter", ")", ":", "# Create or update an attribute mapping.", "mp_reg", "=", "self", ".", "__mp_regs", "[", "content_type", "]", "mp", "=", "mp_reg", ".", "find_mapping", "(", "resource_class", ")", "if", "mp", "is", "None", ":", "# No mapping was registered yet for this resource class or any", "# of its base classes; create a new one on the fly.", "new_mp", "=", "mp_reg", ".", "create_mapping", "(", "resource_class", ",", "configuration", ")", "elif", "not", "configuration", "is", "None", ":", "if", "resource_class", "is", "mp", ".", "mapped_class", ":", "# We have additional configuration for an existing mapping.", "mp", ".", "configuration", ".", "update", "(", "configuration", ")", "new_mp", "=", "mp", "else", ":", "# We have a derived class with additional configuration.", "new_mp", "=", "mp_reg", ".", "create_mapping", "(", "resource_class", ",", "configuration", "=", "mp", ".", "configuration", ")", "new_mp", ".", "configuration", ".", "update", "(", "configuration", ")", "elif", "not", "resource_class", "is", "mp", ".", "mapped_class", ":", "# We have a derived class without additional configuration.", "new_mp", "=", "mp_reg", ".", "create_mapping", "(", "resource_class", ",", "configuration", "=", "mp", ".", "configuration", ")", "else", ":", "# We found a dynamically created mapping for the right class", "# without additional configuration; do not create a new one.", "new_mp", "=", "None", "if", "not", "new_mp", "is", "None", ":", "# Store the new (or updated) mapping.", "mp_reg", ".", "set_mapping", "(", "new_mp", ")" ]
Registers a representer factory for the given combination of resource class and content type. :param configuration: representer configuration. A default instance will be created if this is not given. :type configuration: :class:`everest.representers.config.RepresenterConfiguration`
[ "Registers", "a", "representer", "factory", "for", "the", "given", "combination", "of", "resource", "class", "and", "content", "type", "." ]
python
train
onecodex/onecodex
onecodex/distance.py
https://github.com/onecodex/onecodex/blob/326a0a1af140e3a57ccf31c3c9c5e17a5775c13d/onecodex/distance.py#L9-L42
def alpha_diversity(self, metric="simpson", rank="auto"): """Caculate the diversity within a community. Parameters ---------- metric : {'simpson', 'chao1', 'shannon'} The diversity metric to calculate. rank : {'auto', 'kingdom', 'phylum', 'class', 'order', 'family', 'genus', 'species'}, optional Analysis will be restricted to abundances of taxa at the specified level. Returns ------- pandas.DataFrame, a distance matrix. """ if metric not in ("simpson", "chao1", "shannon"): raise OneCodexException( "For alpha diversity, metric must be one of: simpson, chao1, shannon" ) # needs read counts, not relative abundances if self._guess_normalized(): raise OneCodexException("Alpha diversity requires unnormalized read counts.") df = self.to_df(rank=rank, normalize=False) output = {"classification_id": [], metric: []} for c_id in df.index: output["classification_id"].append(c_id) output[metric].append( skbio.diversity.alpha_diversity(metric, df.loc[c_id].tolist(), [c_id]).values[0] ) return pd.DataFrame(output).set_index("classification_id")
[ "def", "alpha_diversity", "(", "self", ",", "metric", "=", "\"simpson\"", ",", "rank", "=", "\"auto\"", ")", ":", "if", "metric", "not", "in", "(", "\"simpson\"", ",", "\"chao1\"", ",", "\"shannon\"", ")", ":", "raise", "OneCodexException", "(", "\"For alpha diversity, metric must be one of: simpson, chao1, shannon\"", ")", "# needs read counts, not relative abundances", "if", "self", ".", "_guess_normalized", "(", ")", ":", "raise", "OneCodexException", "(", "\"Alpha diversity requires unnormalized read counts.\"", ")", "df", "=", "self", ".", "to_df", "(", "rank", "=", "rank", ",", "normalize", "=", "False", ")", "output", "=", "{", "\"classification_id\"", ":", "[", "]", ",", "metric", ":", "[", "]", "}", "for", "c_id", "in", "df", ".", "index", ":", "output", "[", "\"classification_id\"", "]", ".", "append", "(", "c_id", ")", "output", "[", "metric", "]", ".", "append", "(", "skbio", ".", "diversity", ".", "alpha_diversity", "(", "metric", ",", "df", ".", "loc", "[", "c_id", "]", ".", "tolist", "(", ")", ",", "[", "c_id", "]", ")", ".", "values", "[", "0", "]", ")", "return", "pd", ".", "DataFrame", "(", "output", ")", ".", "set_index", "(", "\"classification_id\"", ")" ]
Caculate the diversity within a community. Parameters ---------- metric : {'simpson', 'chao1', 'shannon'} The diversity metric to calculate. rank : {'auto', 'kingdom', 'phylum', 'class', 'order', 'family', 'genus', 'species'}, optional Analysis will be restricted to abundances of taxa at the specified level. Returns ------- pandas.DataFrame, a distance matrix.
[ "Caculate", "the", "diversity", "within", "a", "community", "." ]
python
train
evyatarmeged/Raccoon
raccoon_src/lib/dns_handler.py
https://github.com/evyatarmeged/Raccoon/blob/985797f73329976ec9c3fefbe4bbb3c74096ca51/raccoon_src/lib/dns_handler.py#L17-L35
def query_dns(cls, domains, records): """ Query DNS records for host. :param domains: Iterable of domains to get DNS Records for :param records: Iterable of DNS records to get from domain. """ results = {k: set() for k in records} for record in records: for domain in domains: try: answers = cls.resolver.query(domain, record) for answer in answers: # Add value to record type results.get(record).add(answer) except (resolver.NoAnswer, resolver.NXDOMAIN, resolver.NoNameservers): # Type of record doesn't fit domain or no answer from ns continue return {k: v for k, v in results.items() if v}
[ "def", "query_dns", "(", "cls", ",", "domains", ",", "records", ")", ":", "results", "=", "{", "k", ":", "set", "(", ")", "for", "k", "in", "records", "}", "for", "record", "in", "records", ":", "for", "domain", "in", "domains", ":", "try", ":", "answers", "=", "cls", ".", "resolver", ".", "query", "(", "domain", ",", "record", ")", "for", "answer", "in", "answers", ":", "# Add value to record type", "results", ".", "get", "(", "record", ")", ".", "add", "(", "answer", ")", "except", "(", "resolver", ".", "NoAnswer", ",", "resolver", ".", "NXDOMAIN", ",", "resolver", ".", "NoNameservers", ")", ":", "# Type of record doesn't fit domain or no answer from ns", "continue", "return", "{", "k", ":", "v", "for", "k", ",", "v", "in", "results", ".", "items", "(", ")", "if", "v", "}" ]
Query DNS records for host. :param domains: Iterable of domains to get DNS Records for :param records: Iterable of DNS records to get from domain.
[ "Query", "DNS", "records", "for", "host", ".", ":", "param", "domains", ":", "Iterable", "of", "domains", "to", "get", "DNS", "Records", "for", ":", "param", "records", ":", "Iterable", "of", "DNS", "records", "to", "get", "from", "domain", "." ]
python
train
mthornhill/django-postal
src/postal/utils.py
https://github.com/mthornhill/django-postal/blob/21d65e09b45f0515cde6166345f46c3f506dd08f/src/postal/utils.py#L136-L144
def loader_for_type(self, ctype): """ Gets a function ref to deserialize content for a certain mimetype. """ for loadee, mimes in Mimer.TYPES.iteritems(): for mime in mimes: if ctype.startswith(mime): return loadee
[ "def", "loader_for_type", "(", "self", ",", "ctype", ")", ":", "for", "loadee", ",", "mimes", "in", "Mimer", ".", "TYPES", ".", "iteritems", "(", ")", ":", "for", "mime", "in", "mimes", ":", "if", "ctype", ".", "startswith", "(", "mime", ")", ":", "return", "loadee" ]
Gets a function ref to deserialize content for a certain mimetype.
[ "Gets", "a", "function", "ref", "to", "deserialize", "content", "for", "a", "certain", "mimetype", "." ]
python
train
django-danceschool/django-danceschool
danceschool/discounts/models.py
https://github.com/django-danceschool/django-danceschool/blob/bb08cbf39017a812a5a94bdb4ea34170bf1a30ba/danceschool/discounts/models.py#L203-L213
def getFlatPrice(self,payAtDoor=False): ''' Rather than embedding logic re: door pricing, other code can call this method. ''' if self.discountType is not DiscountCombo.DiscountType.flatPrice: return None if payAtDoor: return self.doorPrice else: return self.onlinePrice
[ "def", "getFlatPrice", "(", "self", ",", "payAtDoor", "=", "False", ")", ":", "if", "self", ".", "discountType", "is", "not", "DiscountCombo", ".", "DiscountType", ".", "flatPrice", ":", "return", "None", "if", "payAtDoor", ":", "return", "self", ".", "doorPrice", "else", ":", "return", "self", ".", "onlinePrice" ]
Rather than embedding logic re: door pricing, other code can call this method.
[ "Rather", "than", "embedding", "logic", "re", ":", "door", "pricing", "other", "code", "can", "call", "this", "method", "." ]
python
train
HDI-Project/MLBlocks
mlblocks/datasets.py
https://github.com/HDI-Project/MLBlocks/blob/e1ca77bce3c4537c0800a4c1395e1b6bbde5465d/mlblocks/datasets.py#L440-L444
def load_iris(): """Iris Dataset.""" dataset = datasets.load_iris() return Dataset(load_iris.__doc__, dataset.data, dataset.target, accuracy_score, stratify=True)
[ "def", "load_iris", "(", ")", ":", "dataset", "=", "datasets", ".", "load_iris", "(", ")", "return", "Dataset", "(", "load_iris", ".", "__doc__", ",", "dataset", ".", "data", ",", "dataset", ".", "target", ",", "accuracy_score", ",", "stratify", "=", "True", ")" ]
Iris Dataset.
[ "Iris", "Dataset", "." ]
python
train
napalm-automation/napalm-junos
napalm_junos/junos.py
https://github.com/napalm-automation/napalm-junos/blob/78c0d161daf2abf26af5835b773f6db57c46efff/napalm_junos/junos.py#L229-L236
def compare_config(self): """Compare candidate config with running.""" diff = self.device.cu.diff() if diff is None: return '' else: return diff.strip()
[ "def", "compare_config", "(", "self", ")", ":", "diff", "=", "self", ".", "device", ".", "cu", ".", "diff", "(", ")", "if", "diff", "is", "None", ":", "return", "''", "else", ":", "return", "diff", ".", "strip", "(", ")" ]
Compare candidate config with running.
[ "Compare", "candidate", "config", "with", "running", "." ]
python
train
saltstack/salt
salt/master.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/master.py#L1024-L1038
def __bind(self): ''' Bind to the local port ''' # using ZMQIOLoop since we *might* need zmq in there install_zmq() self.io_loop = ZMQDefaultLoop() self.io_loop.make_current() for req_channel in self.req_channels: req_channel.post_fork(self._handle_payload, io_loop=self.io_loop) # TODO: cleaner? Maybe lazily? try: self.io_loop.start() except (KeyboardInterrupt, SystemExit): # Tornado knows what to do pass
[ "def", "__bind", "(", "self", ")", ":", "# using ZMQIOLoop since we *might* need zmq in there", "install_zmq", "(", ")", "self", ".", "io_loop", "=", "ZMQDefaultLoop", "(", ")", "self", ".", "io_loop", ".", "make_current", "(", ")", "for", "req_channel", "in", "self", ".", "req_channels", ":", "req_channel", ".", "post_fork", "(", "self", ".", "_handle_payload", ",", "io_loop", "=", "self", ".", "io_loop", ")", "# TODO: cleaner? Maybe lazily?", "try", ":", "self", ".", "io_loop", ".", "start", "(", ")", "except", "(", "KeyboardInterrupt", ",", "SystemExit", ")", ":", "# Tornado knows what to do", "pass" ]
Bind to the local port
[ "Bind", "to", "the", "local", "port" ]
python
train
blockstack/blockstack-core
blockstack/lib/nameset/namedb.py
https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/lib/nameset/namedb.py#L1088-L1094
def get_all_namespace_ids( self ): """ Get the set of all existing, READY namespace IDs. """ cur = self.db.cursor() namespace_ids = namedb_get_all_namespace_ids( cur ) return namespace_ids
[ "def", "get_all_namespace_ids", "(", "self", ")", ":", "cur", "=", "self", ".", "db", ".", "cursor", "(", ")", "namespace_ids", "=", "namedb_get_all_namespace_ids", "(", "cur", ")", "return", "namespace_ids" ]
Get the set of all existing, READY namespace IDs.
[ "Get", "the", "set", "of", "all", "existing", "READY", "namespace", "IDs", "." ]
python
train
openstates/billy
billy/web/public/views/legislators.py
https://github.com/openstates/billy/blob/5fc795347f12a949e410a8cfad0c911ea6bced67/billy/web/public/views/legislators.py#L25-L106
def legislators(request, abbr): ''' Context: - metadata - chamber - chamber_title - chamber_select_template - chamber_select_collection - chamber_select_chambers - show_chamber_column - abbr - legislators - sort_order - sort_key - legislator_table - nav_active Templates: - billy/web/public/legislators.html - billy/web/public/chamber_select_form.html - billy/web/public/legislator_table.html ''' try: meta = Metadata.get_object(abbr) except DoesNotExist: raise Http404 spec = {'active': True, 'district': {'$exists': True}} chambers = dict((k, v['name']) for k, v in meta['chambers'].items()) chamber = request.GET.get('chamber', 'both') if chamber in chambers: spec['chamber'] = chamber chamber_title = meta['chambers'][chamber]['title'] + 's' else: chamber = 'both' chamber_title = 'Legislators' fields = mongo_fields('leg_id', 'full_name', 'photo_url', 'district', 'party', 'first_name', 'last_name', 'chamber', billy_settings.LEVEL_FIELD, 'last_name') sort_key = 'district' sort_order = 1 if request.GET: sort_key = request.GET.get('key', sort_key) sort_order = int(request.GET.get('order', sort_order)) legislators = meta.legislators(extra_spec=spec, fields=fields) def sort_by_district(obj): matchobj = re.search(r'\d+', obj.get('district', '') or '') if matchobj: return int(matchobj.group()) else: return obj.get('district', '') legislators = sorted(legislators, key=sort_by_district) if sort_key != 'district': legislators = sorted(legislators, key=operator.itemgetter(sort_key), reverse=(sort_order == -1)) else: legislators = sorted(legislators, key=sort_by_district, reverse=bool(0 > sort_order)) sort_order = {1: -1, -1: 1}[sort_order] legislators = list(legislators) return TemplateResponse( request, templatename('legislators'), dict(metadata=meta, chamber=chamber, chamber_title=chamber_title, chamber_select_template=templatename('chamber_select_form'), chamber_select_collection='legislators', chamber_select_chambers=chambers, show_chamber_column=True, abbr=abbr, legislators=legislators, sort_order=sort_order, sort_key=sort_key, legislator_table=templatename('legislator_table'), nav_active='legislators'))
[ "def", "legislators", "(", "request", ",", "abbr", ")", ":", "try", ":", "meta", "=", "Metadata", ".", "get_object", "(", "abbr", ")", "except", "DoesNotExist", ":", "raise", "Http404", "spec", "=", "{", "'active'", ":", "True", ",", "'district'", ":", "{", "'$exists'", ":", "True", "}", "}", "chambers", "=", "dict", "(", "(", "k", ",", "v", "[", "'name'", "]", ")", "for", "k", ",", "v", "in", "meta", "[", "'chambers'", "]", ".", "items", "(", ")", ")", "chamber", "=", "request", ".", "GET", ".", "get", "(", "'chamber'", ",", "'both'", ")", "if", "chamber", "in", "chambers", ":", "spec", "[", "'chamber'", "]", "=", "chamber", "chamber_title", "=", "meta", "[", "'chambers'", "]", "[", "chamber", "]", "[", "'title'", "]", "+", "'s'", "else", ":", "chamber", "=", "'both'", "chamber_title", "=", "'Legislators'", "fields", "=", "mongo_fields", "(", "'leg_id'", ",", "'full_name'", ",", "'photo_url'", ",", "'district'", ",", "'party'", ",", "'first_name'", ",", "'last_name'", ",", "'chamber'", ",", "billy_settings", ".", "LEVEL_FIELD", ",", "'last_name'", ")", "sort_key", "=", "'district'", "sort_order", "=", "1", "if", "request", ".", "GET", ":", "sort_key", "=", "request", ".", "GET", ".", "get", "(", "'key'", ",", "sort_key", ")", "sort_order", "=", "int", "(", "request", ".", "GET", ".", "get", "(", "'order'", ",", "sort_order", ")", ")", "legislators", "=", "meta", ".", "legislators", "(", "extra_spec", "=", "spec", ",", "fields", "=", "fields", ")", "def", "sort_by_district", "(", "obj", ")", ":", "matchobj", "=", "re", ".", "search", "(", "r'\\d+'", ",", "obj", ".", "get", "(", "'district'", ",", "''", ")", "or", "''", ")", "if", "matchobj", ":", "return", "int", "(", "matchobj", ".", "group", "(", ")", ")", "else", ":", "return", "obj", ".", "get", "(", "'district'", ",", "''", ")", "legislators", "=", "sorted", "(", "legislators", ",", "key", "=", "sort_by_district", ")", "if", "sort_key", "!=", "'district'", ":", "legislators", "=", "sorted", "(", "legislators", ",", "key", "=", "operator", ".", "itemgetter", "(", "sort_key", ")", ",", "reverse", "=", "(", "sort_order", "==", "-", "1", ")", ")", "else", ":", "legislators", "=", "sorted", "(", "legislators", ",", "key", "=", "sort_by_district", ",", "reverse", "=", "bool", "(", "0", ">", "sort_order", ")", ")", "sort_order", "=", "{", "1", ":", "-", "1", ",", "-", "1", ":", "1", "}", "[", "sort_order", "]", "legislators", "=", "list", "(", "legislators", ")", "return", "TemplateResponse", "(", "request", ",", "templatename", "(", "'legislators'", ")", ",", "dict", "(", "metadata", "=", "meta", ",", "chamber", "=", "chamber", ",", "chamber_title", "=", "chamber_title", ",", "chamber_select_template", "=", "templatename", "(", "'chamber_select_form'", ")", ",", "chamber_select_collection", "=", "'legislators'", ",", "chamber_select_chambers", "=", "chambers", ",", "show_chamber_column", "=", "True", ",", "abbr", "=", "abbr", ",", "legislators", "=", "legislators", ",", "sort_order", "=", "sort_order", ",", "sort_key", "=", "sort_key", ",", "legislator_table", "=", "templatename", "(", "'legislator_table'", ")", ",", "nav_active", "=", "'legislators'", ")", ")" ]
Context: - metadata - chamber - chamber_title - chamber_select_template - chamber_select_collection - chamber_select_chambers - show_chamber_column - abbr - legislators - sort_order - sort_key - legislator_table - nav_active Templates: - billy/web/public/legislators.html - billy/web/public/chamber_select_form.html - billy/web/public/legislator_table.html
[ "Context", ":", "-", "metadata", "-", "chamber", "-", "chamber_title", "-", "chamber_select_template", "-", "chamber_select_collection", "-", "chamber_select_chambers", "-", "show_chamber_column", "-", "abbr", "-", "legislators", "-", "sort_order", "-", "sort_key", "-", "legislator_table", "-", "nav_active" ]
python
train
pkgw/pwkit
pwkit/io.py
https://github.com/pkgw/pwkit/blob/d40957a1c3d2ea34e7ceac2267ee9635135f2793/pwkit/io.py#L645-L656
def try_unlink (self): """Try to unlink this path. If it doesn't exist, no error is returned. Returns a boolean indicating whether the path was really unlinked. """ try: self.unlink () return True except OSError as e: if e.errno == 2: return False # ENOENT raise
[ "def", "try_unlink", "(", "self", ")", ":", "try", ":", "self", ".", "unlink", "(", ")", "return", "True", "except", "OSError", "as", "e", ":", "if", "e", ".", "errno", "==", "2", ":", "return", "False", "# ENOENT", "raise" ]
Try to unlink this path. If it doesn't exist, no error is returned. Returns a boolean indicating whether the path was really unlinked.
[ "Try", "to", "unlink", "this", "path", ".", "If", "it", "doesn", "t", "exist", "no", "error", "is", "returned", ".", "Returns", "a", "boolean", "indicating", "whether", "the", "path", "was", "really", "unlinked", "." ]
python
train
mattiaslinnap/django-partial-index
partial_index/query.py
https://github.com/mattiaslinnap/django-partial-index/blob/6e60fd9484f95499587365fda34a881050bcd804/partial_index/query.py#L107-L114
def q_mentioned_fields(q, model): """Returns list of field names mentioned in Q object. Q(a__isnull=True, b=F('c')) -> ['a', 'b', 'c'] """ query = Query(model) where = query._add_q(q, used_aliases=set(), allow_joins=False)[0] return list(sorted(set(expression_mentioned_fields(where))))
[ "def", "q_mentioned_fields", "(", "q", ",", "model", ")", ":", "query", "=", "Query", "(", "model", ")", "where", "=", "query", ".", "_add_q", "(", "q", ",", "used_aliases", "=", "set", "(", ")", ",", "allow_joins", "=", "False", ")", "[", "0", "]", "return", "list", "(", "sorted", "(", "set", "(", "expression_mentioned_fields", "(", "where", ")", ")", ")", ")" ]
Returns list of field names mentioned in Q object. Q(a__isnull=True, b=F('c')) -> ['a', 'b', 'c']
[ "Returns", "list", "of", "field", "names", "mentioned", "in", "Q", "object", "." ]
python
train
langloisjp/pysvcmetrics
statsdclient.py
https://github.com/langloisjp/pysvcmetrics/blob/a126fc029ab645d9db46c0f5712c416cdf80e370/statsdclient.py#L79-L86
def count(self, stats, value, sample_rate=1): """ Updates one or more stats counters by arbitrary value >>> client = StatsdClient() >>> client.count('example.counter', 17) """ self.update_stats(stats, value, self.SC_COUNT, sample_rate)
[ "def", "count", "(", "self", ",", "stats", ",", "value", ",", "sample_rate", "=", "1", ")", ":", "self", ".", "update_stats", "(", "stats", ",", "value", ",", "self", ".", "SC_COUNT", ",", "sample_rate", ")" ]
Updates one or more stats counters by arbitrary value >>> client = StatsdClient() >>> client.count('example.counter', 17)
[ "Updates", "one", "or", "more", "stats", "counters", "by", "arbitrary", "value" ]
python
train
StackStorm/pybind
pybind/nos/v6_0_2f/rbridge_id/router/router_bgp/router_bgp_attributes/neighbor/neighbor_ips/neighbor_addr/update_source/__init__.py
https://github.com/StackStorm/pybind/blob/44c467e71b2b425be63867aba6e6fa28b2cfe7fb/pybind/nos/v6_0_2f/rbridge_id/router/router_bgp/router_bgp_attributes/neighbor/neighbor_ips/neighbor_addr/update_source/__init__.py#L95-L116
def _set_sip_ipv4_address(self, v, load=False): """ Setter method for sip_ipv4_address, mapped from YANG variable /rbridge_id/router/router_bgp/router_bgp_attributes/neighbor/neighbor_ips/neighbor_addr/update_source/sip_ipv4_address (sip-ipv4-address) If this variable is read-only (config: false) in the source YANG file, then _set_sip_ipv4_address is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_sip_ipv4_address() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'}), is_leaf=True, yang_name="sip-ipv4-address", rest_name="sip-ipv4-address", parent=self, choice=(u'ch-update-source', u'ca-ipv4'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='sip-ipv4-address', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """sip_ipv4_address must be of a type compatible with sip-ipv4-address""", 'defined-type': "brocade-bgp:sip-ipv4-address", 'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'}), is_leaf=True, yang_name="sip-ipv4-address", rest_name="sip-ipv4-address", parent=self, choice=(u'ch-update-source', u'ca-ipv4'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='sip-ipv4-address', is_config=True)""", }) self.__sip_ipv4_address = t if hasattr(self, '_set'): self._set()
[ "def", "_set_sip_ipv4_address", "(", "self", ",", "v", ",", "load", "=", "False", ")", ":", "if", "hasattr", "(", "v", ",", "\"_utype\"", ")", ":", "v", "=", "v", ".", "_utype", "(", "v", ")", "try", ":", "t", "=", "YANGDynClass", "(", "v", ",", "base", "=", "RestrictedClassType", "(", "base_type", "=", "unicode", ",", "restriction_dict", "=", "{", "'pattern'", ":", "u'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\\\p{N}\\\\p{L}]+)?'", "}", ")", ",", "is_leaf", "=", "True", ",", "yang_name", "=", "\"sip-ipv4-address\"", ",", "rest_name", "=", "\"sip-ipv4-address\"", ",", "parent", "=", "self", ",", "choice", "=", "(", "u'ch-update-source'", ",", "u'ca-ipv4'", ")", ",", "path_helper", "=", "self", ".", "_path_helper", ",", "extmethods", "=", "self", ".", "_extmethods", ",", "register_paths", "=", "True", ",", "extensions", "=", "{", "u'tailf-common'", ":", "{", "u'cli-drop-node-name'", ":", "None", "}", "}", ",", "namespace", "=", "'urn:brocade.com:mgmt:brocade-bgp'", ",", "defining_module", "=", "'brocade-bgp'", ",", "yang_type", "=", "'sip-ipv4-address'", ",", "is_config", "=", "True", ")", "except", "(", "TypeError", ",", "ValueError", ")", ":", "raise", "ValueError", "(", "{", "'error-string'", ":", "\"\"\"sip_ipv4_address must be of a type compatible with sip-ipv4-address\"\"\"", ",", "'defined-type'", ":", "\"brocade-bgp:sip-ipv4-address\"", ",", "'generated-type'", ":", "\"\"\"YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\\\p{N}\\\\p{L}]+)?'}), is_leaf=True, yang_name=\"sip-ipv4-address\", rest_name=\"sip-ipv4-address\", parent=self, choice=(u'ch-update-source', u'ca-ipv4'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='sip-ipv4-address', is_config=True)\"\"\"", ",", "}", ")", "self", ".", "__sip_ipv4_address", "=", "t", "if", "hasattr", "(", "self", ",", "'_set'", ")", ":", "self", ".", "_set", "(", ")" ]
Setter method for sip_ipv4_address, mapped from YANG variable /rbridge_id/router/router_bgp/router_bgp_attributes/neighbor/neighbor_ips/neighbor_addr/update_source/sip_ipv4_address (sip-ipv4-address) If this variable is read-only (config: false) in the source YANG file, then _set_sip_ipv4_address is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_sip_ipv4_address() directly.
[ "Setter", "method", "for", "sip_ipv4_address", "mapped", "from", "YANG", "variable", "/", "rbridge_id", "/", "router", "/", "router_bgp", "/", "router_bgp_attributes", "/", "neighbor", "/", "neighbor_ips", "/", "neighbor_addr", "/", "update_source", "/", "sip_ipv4_address", "(", "sip", "-", "ipv4", "-", "address", ")", "If", "this", "variable", "is", "read", "-", "only", "(", "config", ":", "false", ")", "in", "the", "source", "YANG", "file", "then", "_set_sip_ipv4_address", "is", "considered", "as", "a", "private", "method", ".", "Backends", "looking", "to", "populate", "this", "variable", "should", "do", "so", "via", "calling", "thisObj", ".", "_set_sip_ipv4_address", "()", "directly", "." ]
python
train
aleju/imgaug
imgaug/augmentables/kps.py
https://github.com/aleju/imgaug/blob/786be74aa855513840113ea523c5df495dc6a8af/imgaug/augmentables/kps.py#L414-L435
def on(self, image): """ Project keypoints from one image to a new one. Parameters ---------- image : ndarray or tuple of int New image onto which the keypoints are to be projected. May also simply be that new image's shape tuple. Returns ------- keypoints : imgaug.KeypointsOnImage Object containing all projected keypoints. """ shape = normalize_shape(image) if shape[0:2] == self.shape[0:2]: return self.deepcopy() else: keypoints = [kp.project(self.shape, shape) for kp in self.keypoints] return self.deepcopy(keypoints, shape)
[ "def", "on", "(", "self", ",", "image", ")", ":", "shape", "=", "normalize_shape", "(", "image", ")", "if", "shape", "[", "0", ":", "2", "]", "==", "self", ".", "shape", "[", "0", ":", "2", "]", ":", "return", "self", ".", "deepcopy", "(", ")", "else", ":", "keypoints", "=", "[", "kp", ".", "project", "(", "self", ".", "shape", ",", "shape", ")", "for", "kp", "in", "self", ".", "keypoints", "]", "return", "self", ".", "deepcopy", "(", "keypoints", ",", "shape", ")" ]
Project keypoints from one image to a new one. Parameters ---------- image : ndarray or tuple of int New image onto which the keypoints are to be projected. May also simply be that new image's shape tuple. Returns ------- keypoints : imgaug.KeypointsOnImage Object containing all projected keypoints.
[ "Project", "keypoints", "from", "one", "image", "to", "a", "new", "one", "." ]
python
valid
allenai/allennlp
allennlp/semparse/domain_languages/domain_language.py
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/semparse/domain_languages/domain_language.py#L609-L645
def _get_transitions(self, expression: Any, expected_type: PredicateType) -> Tuple[List[str], PredicateType]: """ This is used when converting a logical form into an action sequence. This piece recursively translates a lisp expression into an action sequence, making sure we match the expected type (or using the expected type to get the right type for constant expressions). """ if isinstance(expression, (list, tuple)): function_transitions, return_type, argument_types = self._get_function_transitions(expression[0], expected_type) if len(argument_types) != len(expression[1:]): raise ParsingError(f'Wrong number of arguments for function in {expression}') argument_transitions = [] for argument_type, subexpression in zip(argument_types, expression[1:]): argument_transitions.extend(self._get_transitions(subexpression, argument_type)[0]) return function_transitions + argument_transitions, return_type elif isinstance(expression, str): if expression not in self._functions: raise ParsingError(f"Unrecognized constant: {expression}") constant_types = self._function_types[expression] if len(constant_types) == 1: constant_type = constant_types[0] # This constant had only one type; that's the easy case. if expected_type and expected_type != constant_type: raise ParsingError(f'{expression} did not have expected type {expected_type} ' f'(found {constant_type})') return [f'{constant_type} -> {expression}'], constant_type else: if not expected_type: raise ParsingError('With no expected type and multiple types to pick from ' f"I don't know what type to use (constant was {expression})") if expected_type not in constant_types: raise ParsingError(f'{expression} did not have expected type {expected_type} ' f'(found these options: {constant_types}; none matched)') return [f'{expected_type} -> {expression}'], expected_type else: raise ParsingError('Not sure how you got here. Please open an issue on github with details.')
[ "def", "_get_transitions", "(", "self", ",", "expression", ":", "Any", ",", "expected_type", ":", "PredicateType", ")", "->", "Tuple", "[", "List", "[", "str", "]", ",", "PredicateType", "]", ":", "if", "isinstance", "(", "expression", ",", "(", "list", ",", "tuple", ")", ")", ":", "function_transitions", ",", "return_type", ",", "argument_types", "=", "self", ".", "_get_function_transitions", "(", "expression", "[", "0", "]", ",", "expected_type", ")", "if", "len", "(", "argument_types", ")", "!=", "len", "(", "expression", "[", "1", ":", "]", ")", ":", "raise", "ParsingError", "(", "f'Wrong number of arguments for function in {expression}'", ")", "argument_transitions", "=", "[", "]", "for", "argument_type", ",", "subexpression", "in", "zip", "(", "argument_types", ",", "expression", "[", "1", ":", "]", ")", ":", "argument_transitions", ".", "extend", "(", "self", ".", "_get_transitions", "(", "subexpression", ",", "argument_type", ")", "[", "0", "]", ")", "return", "function_transitions", "+", "argument_transitions", ",", "return_type", "elif", "isinstance", "(", "expression", ",", "str", ")", ":", "if", "expression", "not", "in", "self", ".", "_functions", ":", "raise", "ParsingError", "(", "f\"Unrecognized constant: {expression}\"", ")", "constant_types", "=", "self", ".", "_function_types", "[", "expression", "]", "if", "len", "(", "constant_types", ")", "==", "1", ":", "constant_type", "=", "constant_types", "[", "0", "]", "# This constant had only one type; that's the easy case.", "if", "expected_type", "and", "expected_type", "!=", "constant_type", ":", "raise", "ParsingError", "(", "f'{expression} did not have expected type {expected_type} '", "f'(found {constant_type})'", ")", "return", "[", "f'{constant_type} -> {expression}'", "]", ",", "constant_type", "else", ":", "if", "not", "expected_type", ":", "raise", "ParsingError", "(", "'With no expected type and multiple types to pick from '", "f\"I don't know what type to use (constant was {expression})\"", ")", "if", "expected_type", "not", "in", "constant_types", ":", "raise", "ParsingError", "(", "f'{expression} did not have expected type {expected_type} '", "f'(found these options: {constant_types}; none matched)'", ")", "return", "[", "f'{expected_type} -> {expression}'", "]", ",", "expected_type", "else", ":", "raise", "ParsingError", "(", "'Not sure how you got here. Please open an issue on github with details.'", ")" ]
This is used when converting a logical form into an action sequence. This piece recursively translates a lisp expression into an action sequence, making sure we match the expected type (or using the expected type to get the right type for constant expressions).
[ "This", "is", "used", "when", "converting", "a", "logical", "form", "into", "an", "action", "sequence", ".", "This", "piece", "recursively", "translates", "a", "lisp", "expression", "into", "an", "action", "sequence", "making", "sure", "we", "match", "the", "expected", "type", "(", "or", "using", "the", "expected", "type", "to", "get", "the", "right", "type", "for", "constant", "expressions", ")", "." ]
python
train
yahoo/TensorFlowOnSpark
examples/wide_deep/census_dataset.py
https://github.com/yahoo/TensorFlowOnSpark/blob/5e4b6c185ab722fd0104ede0377e1149ea8d6f7c/examples/wide_deep/census_dataset.py#L76-L86
def download(data_dir): """Download census data if it is not already present.""" tf.gfile.MakeDirs(data_dir) training_file_path = os.path.join(data_dir, TRAINING_FILE) if not tf.gfile.Exists(training_file_path): _download_and_clean_file(training_file_path, TRAINING_URL) eval_file_path = os.path.join(data_dir, EVAL_FILE) if not tf.gfile.Exists(eval_file_path): _download_and_clean_file(eval_file_path, EVAL_URL)
[ "def", "download", "(", "data_dir", ")", ":", "tf", ".", "gfile", ".", "MakeDirs", "(", "data_dir", ")", "training_file_path", "=", "os", ".", "path", ".", "join", "(", "data_dir", ",", "TRAINING_FILE", ")", "if", "not", "tf", ".", "gfile", ".", "Exists", "(", "training_file_path", ")", ":", "_download_and_clean_file", "(", "training_file_path", ",", "TRAINING_URL", ")", "eval_file_path", "=", "os", ".", "path", ".", "join", "(", "data_dir", ",", "EVAL_FILE", ")", "if", "not", "tf", ".", "gfile", ".", "Exists", "(", "eval_file_path", ")", ":", "_download_and_clean_file", "(", "eval_file_path", ",", "EVAL_URL", ")" ]
Download census data if it is not already present.
[ "Download", "census", "data", "if", "it", "is", "not", "already", "present", "." ]
python
train
kobejohn/PQHelper
pqhelper/base.py
https://github.com/kobejohn/PQHelper/blob/d2b78a22dcb631794295e6a159b06f39c3f10db6/pqhelper/base.py#L181-L226
def _actor_from_game_image(self, name, game_image): """Return an actor object matching the one in the game image. Note: Health and mana are based on measured percentage of a fixed maximum rather than the actual maximum in the game. Arguments: name: must be 'player' or 'opponent' game_image: opencv image of the main game area """ HEALTH_MAX = 100 MANA_MAX = 40 # get the set of tools for investigating this actor tools = {'player': self._player_tools, 'opponent': self._oppnt_tools}[name] # setup the arguments to be set: args = [name] # health: t, l, b, r = tools['health_region'].region_in(game_image) health_image = game_image[t:b, l:r] health_image = numpy.rot90(health_image) # upright for the TankLevel how_full = tools['health_tank'].how_full(health_image) if how_full is None: return None # failure health = int(round(HEALTH_MAX * how_full)) args.append((health, HEALTH_MAX)) # mana for color in ('r', 'g', 'b', 'y'): t, l, b, r = tools[color + '_region'].region_in(game_image) mana_image = game_image[t:b, l:r] how_full = tools[color + '_tank'].how_full(mana_image) if how_full is None: return None # failure mana = int(round(MANA_MAX * how_full)) args.append((mana, MANA_MAX)) # experience and coins simply start at zero x_m = (0, 1000), (0, 1000) args.extend(x_m) # hammer and scroll are unused h_c = (0, 0), (0, 0) args.extend(h_c) # build the actor and return it return Actor(*args)
[ "def", "_actor_from_game_image", "(", "self", ",", "name", ",", "game_image", ")", ":", "HEALTH_MAX", "=", "100", "MANA_MAX", "=", "40", "# get the set of tools for investigating this actor", "tools", "=", "{", "'player'", ":", "self", ".", "_player_tools", ",", "'opponent'", ":", "self", ".", "_oppnt_tools", "}", "[", "name", "]", "# setup the arguments to be set:", "args", "=", "[", "name", "]", "# health:", "t", ",", "l", ",", "b", ",", "r", "=", "tools", "[", "'health_region'", "]", ".", "region_in", "(", "game_image", ")", "health_image", "=", "game_image", "[", "t", ":", "b", ",", "l", ":", "r", "]", "health_image", "=", "numpy", ".", "rot90", "(", "health_image", ")", "# upright for the TankLevel", "how_full", "=", "tools", "[", "'health_tank'", "]", ".", "how_full", "(", "health_image", ")", "if", "how_full", "is", "None", ":", "return", "None", "# failure", "health", "=", "int", "(", "round", "(", "HEALTH_MAX", "*", "how_full", ")", ")", "args", ".", "append", "(", "(", "health", ",", "HEALTH_MAX", ")", ")", "# mana", "for", "color", "in", "(", "'r'", ",", "'g'", ",", "'b'", ",", "'y'", ")", ":", "t", ",", "l", ",", "b", ",", "r", "=", "tools", "[", "color", "+", "'_region'", "]", ".", "region_in", "(", "game_image", ")", "mana_image", "=", "game_image", "[", "t", ":", "b", ",", "l", ":", "r", "]", "how_full", "=", "tools", "[", "color", "+", "'_tank'", "]", ".", "how_full", "(", "mana_image", ")", "if", "how_full", "is", "None", ":", "return", "None", "# failure", "mana", "=", "int", "(", "round", "(", "MANA_MAX", "*", "how_full", ")", ")", "args", ".", "append", "(", "(", "mana", ",", "MANA_MAX", ")", ")", "# experience and coins simply start at zero", "x_m", "=", "(", "0", ",", "1000", ")", ",", "(", "0", ",", "1000", ")", "args", ".", "extend", "(", "x_m", ")", "# hammer and scroll are unused", "h_c", "=", "(", "0", ",", "0", ")", ",", "(", "0", ",", "0", ")", "args", ".", "extend", "(", "h_c", ")", "# build the actor and return it", "return", "Actor", "(", "*", "args", ")" ]
Return an actor object matching the one in the game image. Note: Health and mana are based on measured percentage of a fixed maximum rather than the actual maximum in the game. Arguments: name: must be 'player' or 'opponent' game_image: opencv image of the main game area
[ "Return", "an", "actor", "object", "matching", "the", "one", "in", "the", "game", "image", "." ]
python
train
ebu/PlugIt
examples/simple_service/actions.py
https://github.com/ebu/PlugIt/blob/de5f1e870f67caaef7a4a58e4bb1ed54d9c5dc53/examples/simple_service/actions.py#L31-L39
def home(request): """Show the home page. Send the list of polls""" polls = [] for row in curDB.execute('SELECT id, title FROM Poll ORDER BY title'): polls.append({'id': row[0], 'name': row[1]}) return {'polls': polls}
[ "def", "home", "(", "request", ")", ":", "polls", "=", "[", "]", "for", "row", "in", "curDB", ".", "execute", "(", "'SELECT id, title FROM Poll ORDER BY title'", ")", ":", "polls", ".", "append", "(", "{", "'id'", ":", "row", "[", "0", "]", ",", "'name'", ":", "row", "[", "1", "]", "}", ")", "return", "{", "'polls'", ":", "polls", "}" ]
Show the home page. Send the list of polls
[ "Show", "the", "home", "page", ".", "Send", "the", "list", "of", "polls" ]
python
train
edx/i18n-tools
i18n/extract.py
https://github.com/edx/i18n-tools/blob/99b20c17d1a0ca07a8839f33e0e9068248a581e5/i18n/extract.py#L184-L216
def fix_header(pofile): """ Replace default headers with edX headers """ # By default, django-admin.py makemessages creates this header: # # SOME DESCRIPTIVE TITLE. # Copyright (C) YEAR THE PACKAGE'S COPYRIGHT HOLDER # This file is distributed under the same license as the PACKAGE package. # FIRST AUTHOR <EMAIL@ADDRESS>, YEAR. pofile.metadata_is_fuzzy = [] # remove [u'fuzzy'] header = pofile.header fixes = ( ('SOME DESCRIPTIVE TITLE', EDX_MARKER), ('Translations template for PROJECT.', EDX_MARKER), ('YEAR', str(datetime.utcnow().year)), ('ORGANIZATION', 'edX'), ("THE PACKAGE'S COPYRIGHT HOLDER", "EdX"), ( 'This file is distributed under the same license as the PROJECT project.', 'This file is distributed under the GNU AFFERO GENERAL PUBLIC LICENSE.' ), ( 'This file is distributed under the same license as the PACKAGE package.', 'This file is distributed under the GNU AFFERO GENERAL PUBLIC LICENSE.' ), ('FIRST AUTHOR <EMAIL@ADDRESS>', 'EdX Team <[email protected]>'), ) for src, dest in fixes: header = header.replace(src, dest) pofile.header = header
[ "def", "fix_header", "(", "pofile", ")", ":", "# By default, django-admin.py makemessages creates this header:", "#", "# SOME DESCRIPTIVE TITLE.", "# Copyright (C) YEAR THE PACKAGE'S COPYRIGHT HOLDER", "# This file is distributed under the same license as the PACKAGE package.", "# FIRST AUTHOR <EMAIL@ADDRESS>, YEAR.", "pofile", ".", "metadata_is_fuzzy", "=", "[", "]", "# remove [u'fuzzy']", "header", "=", "pofile", ".", "header", "fixes", "=", "(", "(", "'SOME DESCRIPTIVE TITLE'", ",", "EDX_MARKER", ")", ",", "(", "'Translations template for PROJECT.'", ",", "EDX_MARKER", ")", ",", "(", "'YEAR'", ",", "str", "(", "datetime", ".", "utcnow", "(", ")", ".", "year", ")", ")", ",", "(", "'ORGANIZATION'", ",", "'edX'", ")", ",", "(", "\"THE PACKAGE'S COPYRIGHT HOLDER\"", ",", "\"EdX\"", ")", ",", "(", "'This file is distributed under the same license as the PROJECT project.'", ",", "'This file is distributed under the GNU AFFERO GENERAL PUBLIC LICENSE.'", ")", ",", "(", "'This file is distributed under the same license as the PACKAGE package.'", ",", "'This file is distributed under the GNU AFFERO GENERAL PUBLIC LICENSE.'", ")", ",", "(", "'FIRST AUTHOR <EMAIL@ADDRESS>'", ",", "'EdX Team <[email protected]>'", ")", ",", ")", "for", "src", ",", "dest", "in", "fixes", ":", "header", "=", "header", ".", "replace", "(", "src", ",", "dest", ")", "pofile", ".", "header", "=", "header" ]
Replace default headers with edX headers
[ "Replace", "default", "headers", "with", "edX", "headers" ]
python
train
apple/turicreate
deps/src/libxml2-2.9.1/python/libxml2.py
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/libxml2-2.9.1/python/libxml2.py#L7719-L7731
def xpathNextPreceding(self, cur): """Traversal function for the "preceding" direction the preceding axis contains all nodes in the same document as the context node that are before the context node in document order, excluding any ancestors and excluding attribute nodes and namespace nodes; the nodes are ordered in reverse document order """ if cur is None: cur__o = None else: cur__o = cur._o ret = libxml2mod.xmlXPathNextPreceding(self._o, cur__o) if ret is None:raise xpathError('xmlXPathNextPreceding() failed') __tmp = xmlNode(_obj=ret) return __tmp
[ "def", "xpathNextPreceding", "(", "self", ",", "cur", ")", ":", "if", "cur", "is", "None", ":", "cur__o", "=", "None", "else", ":", "cur__o", "=", "cur", ".", "_o", "ret", "=", "libxml2mod", ".", "xmlXPathNextPreceding", "(", "self", ".", "_o", ",", "cur__o", ")", "if", "ret", "is", "None", ":", "raise", "xpathError", "(", "'xmlXPathNextPreceding() failed'", ")", "__tmp", "=", "xmlNode", "(", "_obj", "=", "ret", ")", "return", "__tmp" ]
Traversal function for the "preceding" direction the preceding axis contains all nodes in the same document as the context node that are before the context node in document order, excluding any ancestors and excluding attribute nodes and namespace nodes; the nodes are ordered in reverse document order
[ "Traversal", "function", "for", "the", "preceding", "direction", "the", "preceding", "axis", "contains", "all", "nodes", "in", "the", "same", "document", "as", "the", "context", "node", "that", "are", "before", "the", "context", "node", "in", "document", "order", "excluding", "any", "ancestors", "and", "excluding", "attribute", "nodes", "and", "namespace", "nodes", ";", "the", "nodes", "are", "ordered", "in", "reverse", "document", "order" ]
python
train
saltstack/salt
salt/states/glusterfs.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/glusterfs.py#L269-L323
def add_volume_bricks(name, bricks): ''' Add brick(s) to an existing volume name Volume name bricks List of bricks to add to the volume .. code-block:: yaml myvolume: glusterfs.add_volume_bricks: - bricks: - host1:/srv/gluster/drive1 - host2:/srv/gluster/drive2 Replicated Volume: glusterfs.add_volume_bricks: - name: volume2 - bricks: - host1:/srv/gluster/drive2 - host2:/srv/gluster/drive3 ''' ret = {'name': name, 'changes': {}, 'comment': '', 'result': False} volinfo = __salt__['glusterfs.info']() if name not in volinfo: ret['comment'] = 'Volume {0} does not exist'.format(name) return ret if int(volinfo[name]['status']) != 1: ret['comment'] = 'Volume {0} is not started'.format(name) return ret current_bricks = [brick['path'] for brick in volinfo[name]['bricks'].values()] if not set(bricks) - set(current_bricks): ret['result'] = True ret['comment'] = 'Bricks already added in volume {0}'.format(name) return ret bricks_added = __salt__['glusterfs.add_volume_bricks'](name, bricks) if bricks_added: ret['result'] = True ret['comment'] = 'Bricks successfully added to volume {0}'.format(name) new_bricks = [brick['path'] for brick in __salt__['glusterfs.info']()[name]['bricks'].values()] ret['changes'] = {'new': new_bricks, 'old': current_bricks} return ret ret['comment'] = 'Adding bricks to volume {0} failed'.format(name) return ret
[ "def", "add_volume_bricks", "(", "name", ",", "bricks", ")", ":", "ret", "=", "{", "'name'", ":", "name", ",", "'changes'", ":", "{", "}", ",", "'comment'", ":", "''", ",", "'result'", ":", "False", "}", "volinfo", "=", "__salt__", "[", "'glusterfs.info'", "]", "(", ")", "if", "name", "not", "in", "volinfo", ":", "ret", "[", "'comment'", "]", "=", "'Volume {0} does not exist'", ".", "format", "(", "name", ")", "return", "ret", "if", "int", "(", "volinfo", "[", "name", "]", "[", "'status'", "]", ")", "!=", "1", ":", "ret", "[", "'comment'", "]", "=", "'Volume {0} is not started'", ".", "format", "(", "name", ")", "return", "ret", "current_bricks", "=", "[", "brick", "[", "'path'", "]", "for", "brick", "in", "volinfo", "[", "name", "]", "[", "'bricks'", "]", ".", "values", "(", ")", "]", "if", "not", "set", "(", "bricks", ")", "-", "set", "(", "current_bricks", ")", ":", "ret", "[", "'result'", "]", "=", "True", "ret", "[", "'comment'", "]", "=", "'Bricks already added in volume {0}'", ".", "format", "(", "name", ")", "return", "ret", "bricks_added", "=", "__salt__", "[", "'glusterfs.add_volume_bricks'", "]", "(", "name", ",", "bricks", ")", "if", "bricks_added", ":", "ret", "[", "'result'", "]", "=", "True", "ret", "[", "'comment'", "]", "=", "'Bricks successfully added to volume {0}'", ".", "format", "(", "name", ")", "new_bricks", "=", "[", "brick", "[", "'path'", "]", "for", "brick", "in", "__salt__", "[", "'glusterfs.info'", "]", "(", ")", "[", "name", "]", "[", "'bricks'", "]", ".", "values", "(", ")", "]", "ret", "[", "'changes'", "]", "=", "{", "'new'", ":", "new_bricks", ",", "'old'", ":", "current_bricks", "}", "return", "ret", "ret", "[", "'comment'", "]", "=", "'Adding bricks to volume {0} failed'", ".", "format", "(", "name", ")", "return", "ret" ]
Add brick(s) to an existing volume name Volume name bricks List of bricks to add to the volume .. code-block:: yaml myvolume: glusterfs.add_volume_bricks: - bricks: - host1:/srv/gluster/drive1 - host2:/srv/gluster/drive2 Replicated Volume: glusterfs.add_volume_bricks: - name: volume2 - bricks: - host1:/srv/gluster/drive2 - host2:/srv/gluster/drive3
[ "Add", "brick", "(", "s", ")", "to", "an", "existing", "volume" ]
python
train
IDSIA/sacred
sacred/stdout_capturing.py
https://github.com/IDSIA/sacred/blob/72633776bed9b5bddf93ae7d215188e61970973a/sacred/stdout_capturing.py#L97-L110
def tee_output_python(): """Duplicate sys.stdout and sys.stderr to new StringIO.""" buffer = StringIO() out = CapturedStdout(buffer) orig_stdout, orig_stderr = sys.stdout, sys.stderr flush() sys.stdout = TeeingStreamProxy(sys.stdout, buffer) sys.stderr = TeeingStreamProxy(sys.stderr, buffer) try: yield out finally: flush() out.finalize() sys.stdout, sys.stderr = orig_stdout, orig_stderr
[ "def", "tee_output_python", "(", ")", ":", "buffer", "=", "StringIO", "(", ")", "out", "=", "CapturedStdout", "(", "buffer", ")", "orig_stdout", ",", "orig_stderr", "=", "sys", ".", "stdout", ",", "sys", ".", "stderr", "flush", "(", ")", "sys", ".", "stdout", "=", "TeeingStreamProxy", "(", "sys", ".", "stdout", ",", "buffer", ")", "sys", ".", "stderr", "=", "TeeingStreamProxy", "(", "sys", ".", "stderr", ",", "buffer", ")", "try", ":", "yield", "out", "finally", ":", "flush", "(", ")", "out", ".", "finalize", "(", ")", "sys", ".", "stdout", ",", "sys", ".", "stderr", "=", "orig_stdout", ",", "orig_stderr" ]
Duplicate sys.stdout and sys.stderr to new StringIO.
[ "Duplicate", "sys", ".", "stdout", "and", "sys", ".", "stderr", "to", "new", "StringIO", "." ]
python
train
horejsek/python-webdriverwrapper
webdriverwrapper/forms.py
https://github.com/horejsek/python-webdriverwrapper/blob/a492f79ab60ed83d860dd817b6a0961500d7e3f5/webdriverwrapper/forms.py#L20-L55
def fill_out(self, data, prefix='', skip_reset=False): """ Fill out ``data`` by dictionary (key is name attribute of inputs). You can pass normal Pythonic data and don't have to care about how to use API of WebDriver. By ``prefix`` you can specify prefix of all name attributes. For example you can have inputs called ``client.name`` and ``client.surname`` - then you will pass to ``prefix`` string ``"client."`` and in dictionary just ``"name"``. Option ``skip_reset`` is for skipping reset, so it can go faster. For example for multiple selects it calls ``deselect_all`` first, but it need to for every option check if it is selected and it is very slow for really big multiple selects. If you know that it is not filled, you can skip it and safe in some cases up to one minute! Also same with text inputs, but first is called ``clear``. Example: .. code-block:: python driver.get_elm('formid').fill_out({ 'name': 'Michael', 'surname': 'Horejsek', 'age': 24, 'enabled': True, 'multibox': ['value1', 'value2'] }, prefix='user_') .. versionchanged:: 2.2 ``turbo`` renamed to ``skip_reset`` and used also for common elements like text inputs or textareas. """ for elm_name, value in data.items(): FormElement(self, prefix + elm_name).fill_out(value, skip_reset)
[ "def", "fill_out", "(", "self", ",", "data", ",", "prefix", "=", "''", ",", "skip_reset", "=", "False", ")", ":", "for", "elm_name", ",", "value", "in", "data", ".", "items", "(", ")", ":", "FormElement", "(", "self", ",", "prefix", "+", "elm_name", ")", ".", "fill_out", "(", "value", ",", "skip_reset", ")" ]
Fill out ``data`` by dictionary (key is name attribute of inputs). You can pass normal Pythonic data and don't have to care about how to use API of WebDriver. By ``prefix`` you can specify prefix of all name attributes. For example you can have inputs called ``client.name`` and ``client.surname`` - then you will pass to ``prefix`` string ``"client."`` and in dictionary just ``"name"``. Option ``skip_reset`` is for skipping reset, so it can go faster. For example for multiple selects it calls ``deselect_all`` first, but it need to for every option check if it is selected and it is very slow for really big multiple selects. If you know that it is not filled, you can skip it and safe in some cases up to one minute! Also same with text inputs, but first is called ``clear``. Example: .. code-block:: python driver.get_elm('formid').fill_out({ 'name': 'Michael', 'surname': 'Horejsek', 'age': 24, 'enabled': True, 'multibox': ['value1', 'value2'] }, prefix='user_') .. versionchanged:: 2.2 ``turbo`` renamed to ``skip_reset`` and used also for common elements like text inputs or textareas.
[ "Fill", "out", "data", "by", "dictionary", "(", "key", "is", "name", "attribute", "of", "inputs", ")", ".", "You", "can", "pass", "normal", "Pythonic", "data", "and", "don", "t", "have", "to", "care", "about", "how", "to", "use", "API", "of", "WebDriver", "." ]
python
train
vtkiorg/vtki
vtki/plotting.py
https://github.com/vtkiorg/vtki/blob/5ccad7ae6d64a03e9594c9c7474c8aab3eb22dd1/vtki/plotting.py#L2021-L2025
def write_frame(self): """ Writes a single frame to the movie file """ if not hasattr(self, 'mwriter'): raise AssertionError('This plotter has not opened a movie or GIF file.') self.mwriter.append_data(self.image)
[ "def", "write_frame", "(", "self", ")", ":", "if", "not", "hasattr", "(", "self", ",", "'mwriter'", ")", ":", "raise", "AssertionError", "(", "'This plotter has not opened a movie or GIF file.'", ")", "self", ".", "mwriter", ".", "append_data", "(", "self", ".", "image", ")" ]
Writes a single frame to the movie file
[ "Writes", "a", "single", "frame", "to", "the", "movie", "file" ]
python
train
JarryShaw/PyPCAPKit
src/protocols/internet/hip.py
https://github.com/JarryShaw/PyPCAPKit/blob/c7f0da9aebc2cf210bf8f8b912f7d3cbb98ca10e/src/protocols/internet/hip.py#L2476-L2511
def _read_para_relay_hmac(self, code, cbit, clen, *, desc, length, version): """Read HIP RELAY_HMAC parameter. Structure of HIP RELAY_HMAC parameter [RFC 5770]: 0 1 2 3 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | | | HMAC | / / / +-------------------------------+ | | Padding | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ Octets Bits Name Description 0 0 relay_hmac.type Parameter Type 1 15 relay_hmac.critical Critical Bit 2 16 relay_hmac.length Length of Contents 4 32 relay_hmac.hmac HMAC ? ? - Padding """ _hmac = self._read_fileng(clen) relay_hmac = dict( type=desc, critical=cbit, length=clen, hmac=_hmac, ) _plen = length - clen if _plen: self._read_fileng(_plen) return relay_hmac
[ "def", "_read_para_relay_hmac", "(", "self", ",", "code", ",", "cbit", ",", "clen", ",", "*", ",", "desc", ",", "length", ",", "version", ")", ":", "_hmac", "=", "self", ".", "_read_fileng", "(", "clen", ")", "relay_hmac", "=", "dict", "(", "type", "=", "desc", ",", "critical", "=", "cbit", ",", "length", "=", "clen", ",", "hmac", "=", "_hmac", ",", ")", "_plen", "=", "length", "-", "clen", "if", "_plen", ":", "self", ".", "_read_fileng", "(", "_plen", ")", "return", "relay_hmac" ]
Read HIP RELAY_HMAC parameter. Structure of HIP RELAY_HMAC parameter [RFC 5770]: 0 1 2 3 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | | | HMAC | / / / +-------------------------------+ | | Padding | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ Octets Bits Name Description 0 0 relay_hmac.type Parameter Type 1 15 relay_hmac.critical Critical Bit 2 16 relay_hmac.length Length of Contents 4 32 relay_hmac.hmac HMAC ? ? - Padding
[ "Read", "HIP", "RELAY_HMAC", "parameter", "." ]
python
train
DBuildService/dockerfile-parse
dockerfile_parse/parser.py
https://github.com/DBuildService/dockerfile-parse/blob/3d7b514d8b8eded1b33529cf0f6a0770a573aee0/dockerfile_parse/parser.py#L169-L184
def content(self): """ :return: string (unicode) with Dockerfile content """ if self.cache_content and self.cached_content: return self.cached_content try: with self._open_dockerfile('rb') as dockerfile: content = b2u(dockerfile.read()) if self.cache_content: self.cached_content = content return content except (IOError, OSError) as ex: logger.error("Couldn't retrieve content of dockerfile: %r", ex) raise
[ "def", "content", "(", "self", ")", ":", "if", "self", ".", "cache_content", "and", "self", ".", "cached_content", ":", "return", "self", ".", "cached_content", "try", ":", "with", "self", ".", "_open_dockerfile", "(", "'rb'", ")", "as", "dockerfile", ":", "content", "=", "b2u", "(", "dockerfile", ".", "read", "(", ")", ")", "if", "self", ".", "cache_content", ":", "self", ".", "cached_content", "=", "content", "return", "content", "except", "(", "IOError", ",", "OSError", ")", "as", "ex", ":", "logger", ".", "error", "(", "\"Couldn't retrieve content of dockerfile: %r\"", ",", "ex", ")", "raise" ]
:return: string (unicode) with Dockerfile content
[ ":", "return", ":", "string", "(", "unicode", ")", "with", "Dockerfile", "content" ]
python
train
klahnakoski/pyLibrary
jx_base/query.py
https://github.com/klahnakoski/pyLibrary/blob/fa2dcbc48fda8d26999baef400e9a98149e0b982/jx_base/query.py#L309-L370
def _normalize_select(select, frum, schema=None): """ :param select: ONE SELECT COLUMN :param frum: TABLE TO get_columns() :param schema: SCHEMA TO LOOKUP NAMES FOR DEFINITIONS :return: AN ARRAY OF SELECT COLUMNS """ if not _Column: _late_import() if is_text(select): canonical = select = Data(value=select) else: select = wrap(select) canonical = select.copy() canonical.aggregate = coalesce(canonical_aggregates[select.aggregate].name, select.aggregate, "none") canonical.default = coalesce(select.default, canonical_aggregates[canonical.aggregate].default) if hasattr(unwrap(frum), "_normalize_select"): return frum._normalize_select(canonical) output = [] if not select.value or select.value == ".": output.extend([ set_default( { "name": c.name, "value": jx_expression(c.name, schema=schema) }, canonical ) for c in frum.get_leaves() ]) elif is_text(select.value): if select.value.endswith(".*"): canonical.name = coalesce(select.name, ".") value = jx_expression(select[:-2], schema=schema) if not is_op(value, Variable): Log.error("`*` over general expression not supported yet") output.append([ set_default( { "value": LeavesOp(value, prefix=select.prefix), "format": "dict" # MARKUP FOR DECODING }, canonical ) for c in frum.get_columns() if c.jx_type not in STRUCT ]) else: Log.error("do not know what to do") else: canonical.name = coalesce(select.name, select.value, select.aggregate) canonical.value = jx_expression(select.value, schema=schema) output.append(canonical) output = wrap(output) if any(n==None for n in output.name): Log.error("expecting select to have a name: {{select}}", select=select) return output
[ "def", "_normalize_select", "(", "select", ",", "frum", ",", "schema", "=", "None", ")", ":", "if", "not", "_Column", ":", "_late_import", "(", ")", "if", "is_text", "(", "select", ")", ":", "canonical", "=", "select", "=", "Data", "(", "value", "=", "select", ")", "else", ":", "select", "=", "wrap", "(", "select", ")", "canonical", "=", "select", ".", "copy", "(", ")", "canonical", ".", "aggregate", "=", "coalesce", "(", "canonical_aggregates", "[", "select", ".", "aggregate", "]", ".", "name", ",", "select", ".", "aggregate", ",", "\"none\"", ")", "canonical", ".", "default", "=", "coalesce", "(", "select", ".", "default", ",", "canonical_aggregates", "[", "canonical", ".", "aggregate", "]", ".", "default", ")", "if", "hasattr", "(", "unwrap", "(", "frum", ")", ",", "\"_normalize_select\"", ")", ":", "return", "frum", ".", "_normalize_select", "(", "canonical", ")", "output", "=", "[", "]", "if", "not", "select", ".", "value", "or", "select", ".", "value", "==", "\".\"", ":", "output", ".", "extend", "(", "[", "set_default", "(", "{", "\"name\"", ":", "c", ".", "name", ",", "\"value\"", ":", "jx_expression", "(", "c", ".", "name", ",", "schema", "=", "schema", ")", "}", ",", "canonical", ")", "for", "c", "in", "frum", ".", "get_leaves", "(", ")", "]", ")", "elif", "is_text", "(", "select", ".", "value", ")", ":", "if", "select", ".", "value", ".", "endswith", "(", "\".*\"", ")", ":", "canonical", ".", "name", "=", "coalesce", "(", "select", ".", "name", ",", "\".\"", ")", "value", "=", "jx_expression", "(", "select", "[", ":", "-", "2", "]", ",", "schema", "=", "schema", ")", "if", "not", "is_op", "(", "value", ",", "Variable", ")", ":", "Log", ".", "error", "(", "\"`*` over general expression not supported yet\"", ")", "output", ".", "append", "(", "[", "set_default", "(", "{", "\"value\"", ":", "LeavesOp", "(", "value", ",", "prefix", "=", "select", ".", "prefix", ")", ",", "\"format\"", ":", "\"dict\"", "# MARKUP FOR DECODING", "}", ",", "canonical", ")", "for", "c", "in", "frum", ".", "get_columns", "(", ")", "if", "c", ".", "jx_type", "not", "in", "STRUCT", "]", ")", "else", ":", "Log", ".", "error", "(", "\"do not know what to do\"", ")", "else", ":", "canonical", ".", "name", "=", "coalesce", "(", "select", ".", "name", ",", "select", ".", "value", ",", "select", ".", "aggregate", ")", "canonical", ".", "value", "=", "jx_expression", "(", "select", ".", "value", ",", "schema", "=", "schema", ")", "output", ".", "append", "(", "canonical", ")", "output", "=", "wrap", "(", "output", ")", "if", "any", "(", "n", "==", "None", "for", "n", "in", "output", ".", "name", ")", ":", "Log", ".", "error", "(", "\"expecting select to have a name: {{select}}\"", ",", "select", "=", "select", ")", "return", "output" ]
:param select: ONE SELECT COLUMN :param frum: TABLE TO get_columns() :param schema: SCHEMA TO LOOKUP NAMES FOR DEFINITIONS :return: AN ARRAY OF SELECT COLUMNS
[ ":", "param", "select", ":", "ONE", "SELECT", "COLUMN", ":", "param", "frum", ":", "TABLE", "TO", "get_columns", "()", ":", "param", "schema", ":", "SCHEMA", "TO", "LOOKUP", "NAMES", "FOR", "DEFINITIONS", ":", "return", ":", "AN", "ARRAY", "OF", "SELECT", "COLUMNS" ]
python
train
SeattleTestbed/seash
pyreadline/console/console.py
https://github.com/SeattleTestbed/seash/blob/40f9d2285662ff8b61e0468b4196acee089b273b/pyreadline/console/console.py#L253-L261
def pos(self, x=None, y=None): u'''Move or query the window cursor.''' if x is None: info = CONSOLE_SCREEN_BUFFER_INFO() self.GetConsoleScreenBufferInfo(self.hout, byref(info)) return (info.dwCursorPosition.X, info.dwCursorPosition.Y) else: return self.SetConsoleCursorPosition(self.hout, self.fixcoord(x, y))
[ "def", "pos", "(", "self", ",", "x", "=", "None", ",", "y", "=", "None", ")", ":", "if", "x", "is", "None", ":", "info", "=", "CONSOLE_SCREEN_BUFFER_INFO", "(", ")", "self", ".", "GetConsoleScreenBufferInfo", "(", "self", ".", "hout", ",", "byref", "(", "info", ")", ")", "return", "(", "info", ".", "dwCursorPosition", ".", "X", ",", "info", ".", "dwCursorPosition", ".", "Y", ")", "else", ":", "return", "self", ".", "SetConsoleCursorPosition", "(", "self", ".", "hout", ",", "self", ".", "fixcoord", "(", "x", ",", "y", ")", ")" ]
u'''Move or query the window cursor.
[ "u", "Move", "or", "query", "the", "window", "cursor", "." ]
python
train
dswah/pyGAM
pygam/terms.py
https://github.com/dswah/pyGAM/blob/b3e5c3cd580f0a3ad69f9372861624f67760c325/pygam/terms.py#L1370-L1412
def _build_marginal_constraints(self, i, coef, constraint_lam, constraint_l2): """builds a constraint matrix for a marginal term in the tensor term takes a tensor's coef vector, and slices it into pieces corresponding to term i, then builds a constraint matrix for each piece of the coef vector, and assembles them into a composite constraint matrix Parameters ---------- i : int, index of the marginal term for which to build a constraint matrix coefs : array-like containing the coefficients of the tensor term constraint_lam : float, penalty to impose on the constraint. typically this is a very large number. constraint_l2 : float, loading to improve the numerical conditioning of the constraint matrix. typically this is a very small number. Returns ------- C : sparse CSC matrix containing the model constraints in quadratic form """ composite_C = np.zeros((len(coef), len(coef))) for slice_ in self._iterate_marginal_coef_slices(i): # get the slice of coefficient vector coef_slice = coef[slice_] # build the constraint matrix for that slice slice_C = self._terms[i].build_constraints(coef_slice, constraint_lam, constraint_l2) # now enter it into the composite composite_C[tuple(np.meshgrid(slice_, slice_))] = slice_C.A return sp.sparse.csc_matrix(composite_C)
[ "def", "_build_marginal_constraints", "(", "self", ",", "i", ",", "coef", ",", "constraint_lam", ",", "constraint_l2", ")", ":", "composite_C", "=", "np", ".", "zeros", "(", "(", "len", "(", "coef", ")", ",", "len", "(", "coef", ")", ")", ")", "for", "slice_", "in", "self", ".", "_iterate_marginal_coef_slices", "(", "i", ")", ":", "# get the slice of coefficient vector", "coef_slice", "=", "coef", "[", "slice_", "]", "# build the constraint matrix for that slice", "slice_C", "=", "self", ".", "_terms", "[", "i", "]", ".", "build_constraints", "(", "coef_slice", ",", "constraint_lam", ",", "constraint_l2", ")", "# now enter it into the composite", "composite_C", "[", "tuple", "(", "np", ".", "meshgrid", "(", "slice_", ",", "slice_", ")", ")", "]", "=", "slice_C", ".", "A", "return", "sp", ".", "sparse", ".", "csc_matrix", "(", "composite_C", ")" ]
builds a constraint matrix for a marginal term in the tensor term takes a tensor's coef vector, and slices it into pieces corresponding to term i, then builds a constraint matrix for each piece of the coef vector, and assembles them into a composite constraint matrix Parameters ---------- i : int, index of the marginal term for which to build a constraint matrix coefs : array-like containing the coefficients of the tensor term constraint_lam : float, penalty to impose on the constraint. typically this is a very large number. constraint_l2 : float, loading to improve the numerical conditioning of the constraint matrix. typically this is a very small number. Returns ------- C : sparse CSC matrix containing the model constraints in quadratic form
[ "builds", "a", "constraint", "matrix", "for", "a", "marginal", "term", "in", "the", "tensor", "term" ]
python
train
tensorflow/tensorboard
tensorboard/plugins/hparams/list_session_groups.py
https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/plugins/hparams/list_session_groups.py#L310-L327
def _find_metric_value(session_or_group, metric_name): """Returns the metric_value for a given metric in a session or session group. Args: session_or_group: A Session protobuffer or SessionGroup protobuffer. metric_name: A MetricName protobuffer. The metric to search for. Returns: A MetricValue protobuffer representing the value of the given metric or None if no such metric was found in session_or_group. """ # Note: We can speed this up by converting the metric_values field # to a dictionary on initialization, to avoid a linear search here. We'll # need to wrap the SessionGroup and Session protos in a python object for # that. for metric_value in session_or_group.metric_values: if (metric_value.name.tag == metric_name.tag and metric_value.name.group == metric_name.group): return metric_value
[ "def", "_find_metric_value", "(", "session_or_group", ",", "metric_name", ")", ":", "# Note: We can speed this up by converting the metric_values field", "# to a dictionary on initialization, to avoid a linear search here. We'll", "# need to wrap the SessionGroup and Session protos in a python object for", "# that.", "for", "metric_value", "in", "session_or_group", ".", "metric_values", ":", "if", "(", "metric_value", ".", "name", ".", "tag", "==", "metric_name", ".", "tag", "and", "metric_value", ".", "name", ".", "group", "==", "metric_name", ".", "group", ")", ":", "return", "metric_value" ]
Returns the metric_value for a given metric in a session or session group. Args: session_or_group: A Session protobuffer or SessionGroup protobuffer. metric_name: A MetricName protobuffer. The metric to search for. Returns: A MetricValue protobuffer representing the value of the given metric or None if no such metric was found in session_or_group.
[ "Returns", "the", "metric_value", "for", "a", "given", "metric", "in", "a", "session", "or", "session", "group", "." ]
python
train
sighingnow/parsec.py
src/parsec/__init__.py
https://github.com/sighingnow/parsec.py/blob/ed50e1e259142757470b925f8d20dfe5ad223af0/src/parsec/__init__.py#L29-L35
def loc_info(text, index): '''Location of `index` in source code `text`.''' if index > len(text): raise ValueError('Invalid index.') line, last_ln = text.count('\n', 0, index), text.rfind('\n', 0, index) col = index - (last_ln + 1) return (line, col)
[ "def", "loc_info", "(", "text", ",", "index", ")", ":", "if", "index", ">", "len", "(", "text", ")", ":", "raise", "ValueError", "(", "'Invalid index.'", ")", "line", ",", "last_ln", "=", "text", ".", "count", "(", "'\\n'", ",", "0", ",", "index", ")", ",", "text", ".", "rfind", "(", "'\\n'", ",", "0", ",", "index", ")", "col", "=", "index", "-", "(", "last_ln", "+", "1", ")", "return", "(", "line", ",", "col", ")" ]
Location of `index` in source code `text`.
[ "Location", "of", "index", "in", "source", "code", "text", "." ]
python
train
lrq3000/pyFileFixity
pyFileFixity/lib/profilers/visual/pympler/util/bottle3.py
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/pympler/util/bottle3.py#L784-L789
def wsgiheader(self): ''' Returns a wsgi conform list of header/value pairs. ''' for c in list(self.COOKIES.values()): if c.OutputString() not in self.headers.getall('Set-Cookie'): self.headers.append('Set-Cookie', c.OutputString()) return list(self.headers.iterallitems())
[ "def", "wsgiheader", "(", "self", ")", ":", "for", "c", "in", "list", "(", "self", ".", "COOKIES", ".", "values", "(", ")", ")", ":", "if", "c", ".", "OutputString", "(", ")", "not", "in", "self", ".", "headers", ".", "getall", "(", "'Set-Cookie'", ")", ":", "self", ".", "headers", ".", "append", "(", "'Set-Cookie'", ",", "c", ".", "OutputString", "(", ")", ")", "return", "list", "(", "self", ".", "headers", ".", "iterallitems", "(", ")", ")" ]
Returns a wsgi conform list of header/value pairs.
[ "Returns", "a", "wsgi", "conform", "list", "of", "header", "/", "value", "pairs", "." ]
python
train
facelessuser/wcmatch
wcmatch/glob.py
https://github.com/facelessuser/wcmatch/blob/d153e7007cc73b994ae1ba553dc4584039f5c212/wcmatch/glob.py#L459-L463
def raw_escape(pattern, unix=False): """Apply raw character transform before applying escape.""" pattern = util.norm_pattern(pattern, False, True) return escape(pattern, unix)
[ "def", "raw_escape", "(", "pattern", ",", "unix", "=", "False", ")", ":", "pattern", "=", "util", ".", "norm_pattern", "(", "pattern", ",", "False", ",", "True", ")", "return", "escape", "(", "pattern", ",", "unix", ")" ]
Apply raw character transform before applying escape.
[ "Apply", "raw", "character", "transform", "before", "applying", "escape", "." ]
python
train
opinkerfi/nago
nago/core/__init__.py
https://github.com/opinkerfi/nago/blob/85e1bdd1de0122f56868a483e7599e1b36a439b0/nago/core/__init__.py#L151-L170
def save(self): """ Save this node (and all its attributes) to config """ cfg_file = "/etc/nago/nago.ini" config = ConfigParser.ConfigParser() config.read(cfg_file) result = {} token = self.data.pop("token", self.token) if token != self._original_token: config.remove_section(self._original_token) config.add_section(token) if token not in config.sections(): config.add_section(token) for key, value in self.data.items(): config.set(token, key, value) for key, value in config.items(token): if key not in self.data: config.set(token, key, None) with open(cfg_file, 'w') as f: return config.write(f)
[ "def", "save", "(", "self", ")", ":", "cfg_file", "=", "\"/etc/nago/nago.ini\"", "config", "=", "ConfigParser", ".", "ConfigParser", "(", ")", "config", ".", "read", "(", "cfg_file", ")", "result", "=", "{", "}", "token", "=", "self", ".", "data", ".", "pop", "(", "\"token\"", ",", "self", ".", "token", ")", "if", "token", "!=", "self", ".", "_original_token", ":", "config", ".", "remove_section", "(", "self", ".", "_original_token", ")", "config", ".", "add_section", "(", "token", ")", "if", "token", "not", "in", "config", ".", "sections", "(", ")", ":", "config", ".", "add_section", "(", "token", ")", "for", "key", ",", "value", "in", "self", ".", "data", ".", "items", "(", ")", ":", "config", ".", "set", "(", "token", ",", "key", ",", "value", ")", "for", "key", ",", "value", "in", "config", ".", "items", "(", "token", ")", ":", "if", "key", "not", "in", "self", ".", "data", ":", "config", ".", "set", "(", "token", ",", "key", ",", "None", ")", "with", "open", "(", "cfg_file", ",", "'w'", ")", "as", "f", ":", "return", "config", ".", "write", "(", "f", ")" ]
Save this node (and all its attributes) to config
[ "Save", "this", "node", "(", "and", "all", "its", "attributes", ")", "to", "config" ]
python
train
python-escpos/python-escpos
src/escpos/printer.py
https://github.com/python-escpos/python-escpos/blob/52719c0b7de8948fabdffd180a2d71c22cf4c02b/src/escpos/printer.py#L266-L271
def open(self): """ Open system file """ self.device = open(self.devfile, "wb") if self.device is None: print("Could not open the specified file {0}".format(self.devfile))
[ "def", "open", "(", "self", ")", ":", "self", ".", "device", "=", "open", "(", "self", ".", "devfile", ",", "\"wb\"", ")", "if", "self", ".", "device", "is", "None", ":", "print", "(", "\"Could not open the specified file {0}\"", ".", "format", "(", "self", ".", "devfile", ")", ")" ]
Open system file
[ "Open", "system", "file" ]
python
train
glue-viz/glue-vispy-viewers
glue_vispy_viewers/extern/vispy/color/colormap.py
https://github.com/glue-viz/glue-vispy-viewers/blob/54a4351d98c1f90dfb1a557d1b447c1f57470eea/glue_vispy_viewers/extern/vispy/color/colormap.py#L69-L72
def _mix_simple(a, b, x): """Mix b (with proportion x) with a.""" x = np.clip(x, 0.0, 1.0) return (1.0 - x)*a + x*b
[ "def", "_mix_simple", "(", "a", ",", "b", ",", "x", ")", ":", "x", "=", "np", ".", "clip", "(", "x", ",", "0.0", ",", "1.0", ")", "return", "(", "1.0", "-", "x", ")", "*", "a", "+", "x", "*", "b" ]
Mix b (with proportion x) with a.
[ "Mix", "b", "(", "with", "proportion", "x", ")", "with", "a", "." ]
python
train
google/grumpy
third_party/stdlib/dummy_thread.py
https://github.com/google/grumpy/blob/3ec87959189cfcdeae82eb68a47648ac25ceb10b/third_party/stdlib/dummy_thread.py#L95-L114
def acquire(self, waitflag=None): """Dummy implementation of acquire(). For blocking calls, self.locked_status is automatically set to True and returned appropriately based on value of ``waitflag``. If it is non-blocking, then the value is actually checked and not set if it is already acquired. This is all done so that threading.Condition's assert statements aren't triggered and throw a little fit. """ if waitflag is None or waitflag: self.locked_status = True return True else: if not self.locked_status: self.locked_status = True return True else: return False
[ "def", "acquire", "(", "self", ",", "waitflag", "=", "None", ")", ":", "if", "waitflag", "is", "None", "or", "waitflag", ":", "self", ".", "locked_status", "=", "True", "return", "True", "else", ":", "if", "not", "self", ".", "locked_status", ":", "self", ".", "locked_status", "=", "True", "return", "True", "else", ":", "return", "False" ]
Dummy implementation of acquire(). For blocking calls, self.locked_status is automatically set to True and returned appropriately based on value of ``waitflag``. If it is non-blocking, then the value is actually checked and not set if it is already acquired. This is all done so that threading.Condition's assert statements aren't triggered and throw a little fit.
[ "Dummy", "implementation", "of", "acquire", "()", "." ]
python
valid
Alignak-monitoring/alignak
alignak/external_command.py
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/external_command.py#L1445-L1461
def change_max_svc_check_attempts(self, service, check_attempts): """Modify max service check attempt Format of the line that triggers function call:: CHANGE_MAX_SVC_CHECK_ATTEMPTS;<host_name>;<service_description>;<check_attempts> :param service: service to edit :type service: alignak.objects.service.Service :param check_attempts: new value to set :type check_attempts: int :return: None """ service.modified_attributes |= DICT_MODATTR["MODATTR_MAX_CHECK_ATTEMPTS"].value service.max_check_attempts = check_attempts if service.state_type == u'HARD' and service.state == u'OK' and service.attempt > 1: service.attempt = service.max_check_attempts self.send_an_element(service.get_update_status_brok())
[ "def", "change_max_svc_check_attempts", "(", "self", ",", "service", ",", "check_attempts", ")", ":", "service", ".", "modified_attributes", "|=", "DICT_MODATTR", "[", "\"MODATTR_MAX_CHECK_ATTEMPTS\"", "]", ".", "value", "service", ".", "max_check_attempts", "=", "check_attempts", "if", "service", ".", "state_type", "==", "u'HARD'", "and", "service", ".", "state", "==", "u'OK'", "and", "service", ".", "attempt", ">", "1", ":", "service", ".", "attempt", "=", "service", ".", "max_check_attempts", "self", ".", "send_an_element", "(", "service", ".", "get_update_status_brok", "(", ")", ")" ]
Modify max service check attempt Format of the line that triggers function call:: CHANGE_MAX_SVC_CHECK_ATTEMPTS;<host_name>;<service_description>;<check_attempts> :param service: service to edit :type service: alignak.objects.service.Service :param check_attempts: new value to set :type check_attempts: int :return: None
[ "Modify", "max", "service", "check", "attempt", "Format", "of", "the", "line", "that", "triggers", "function", "call", "::" ]
python
train
pudo/normality
normality/transliteration.py
https://github.com/pudo/normality/blob/b53cc2c6e5c6205573d2010f72d90808710a4b58/normality/transliteration.py#L39-L44
def ascii_text(text): """Transliterate the given text and make sure it ends up as ASCII.""" text = latinize_text(text, ascii=True) if isinstance(text, six.text_type): text = text.encode('ascii', 'ignore').decode('ascii') return text
[ "def", "ascii_text", "(", "text", ")", ":", "text", "=", "latinize_text", "(", "text", ",", "ascii", "=", "True", ")", "if", "isinstance", "(", "text", ",", "six", ".", "text_type", ")", ":", "text", "=", "text", ".", "encode", "(", "'ascii'", ",", "'ignore'", ")", ".", "decode", "(", "'ascii'", ")", "return", "text" ]
Transliterate the given text and make sure it ends up as ASCII.
[ "Transliterate", "the", "given", "text", "and", "make", "sure", "it", "ends", "up", "as", "ASCII", "." ]
python
train
NarrativeScience/lsi
src/lsi/utils/hosts.py
https://github.com/NarrativeScience/lsi/blob/7d901b03fdb1a34ef795e5412bfe9685d948e32d/src/lsi/utils/hosts.py#L408-L429
def _match_regex(regex, obj): """ Returns true if the regex matches the object, or a string in the object if it is some sort of container. :param regex: A regex. :type regex: ``regex`` :param obj: An arbitrary object. :type object: ``object`` :rtype: ``bool`` """ if isinstance(obj, six.string_types): return len(regex.findall(obj)) > 0 elif isinstance(obj, dict): return _match_regex(regex, obj.values()) elif hasattr(obj, '__iter__'): # Object is a list or some other iterable. return any(_match_regex(regex, s) for s in obj if isinstance(s, six.string_types)) else: return False
[ "def", "_match_regex", "(", "regex", ",", "obj", ")", ":", "if", "isinstance", "(", "obj", ",", "six", ".", "string_types", ")", ":", "return", "len", "(", "regex", ".", "findall", "(", "obj", ")", ")", ">", "0", "elif", "isinstance", "(", "obj", ",", "dict", ")", ":", "return", "_match_regex", "(", "regex", ",", "obj", ".", "values", "(", ")", ")", "elif", "hasattr", "(", "obj", ",", "'__iter__'", ")", ":", "# Object is a list or some other iterable.", "return", "any", "(", "_match_regex", "(", "regex", ",", "s", ")", "for", "s", "in", "obj", "if", "isinstance", "(", "s", ",", "six", ".", "string_types", ")", ")", "else", ":", "return", "False" ]
Returns true if the regex matches the object, or a string in the object if it is some sort of container. :param regex: A regex. :type regex: ``regex`` :param obj: An arbitrary object. :type object: ``object`` :rtype: ``bool``
[ "Returns", "true", "if", "the", "regex", "matches", "the", "object", "or", "a", "string", "in", "the", "object", "if", "it", "is", "some", "sort", "of", "container", "." ]
python
test
googleapis/google-cloud-python
talent/google/cloud/talent_v4beta1/gapic/profile_service_client.py
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/talent/google/cloud/talent_v4beta1/gapic/profile_service_client.py#L98-L105
def profile_path(cls, project, tenant, profile): """Return a fully-qualified profile string.""" return google.api_core.path_template.expand( "projects/{project}/tenants/{tenant}/profiles/{profile}", project=project, tenant=tenant, profile=profile, )
[ "def", "profile_path", "(", "cls", ",", "project", ",", "tenant", ",", "profile", ")", ":", "return", "google", ".", "api_core", ".", "path_template", ".", "expand", "(", "\"projects/{project}/tenants/{tenant}/profiles/{profile}\"", ",", "project", "=", "project", ",", "tenant", "=", "tenant", ",", "profile", "=", "profile", ",", ")" ]
Return a fully-qualified profile string.
[ "Return", "a", "fully", "-", "qualified", "profile", "string", "." ]
python
train
brocade/pynos
pynos/versions/ver_6/ver_6_0_1/yang/brocade_event_handler.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/brocade_event_handler.py#L12-L22
def event_handler_event_handler_list_name(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") event_handler = ET.SubElement(config, "event-handler", xmlns="urn:brocade.com:mgmt:brocade-event-handler") event_handler_list = ET.SubElement(event_handler, "event-handler-list") name = ET.SubElement(event_handler_list, "name") name.text = kwargs.pop('name') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "event_handler_event_handler_list_name", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "event_handler", "=", "ET", ".", "SubElement", "(", "config", ",", "\"event-handler\"", ",", "xmlns", "=", "\"urn:brocade.com:mgmt:brocade-event-handler\"", ")", "event_handler_list", "=", "ET", ".", "SubElement", "(", "event_handler", ",", "\"event-handler-list\"", ")", "name", "=", "ET", ".", "SubElement", "(", "event_handler_list", ",", "\"name\"", ")", "name", ".", "text", "=", "kwargs", ".", "pop", "(", "'name'", ")", "callback", "=", "kwargs", ".", "pop", "(", "'callback'", ",", "self", ".", "_callback", ")", "return", "callback", "(", "config", ")" ]
Auto Generated Code
[ "Auto", "Generated", "Code" ]
python
train
geophysics-ubonn/crtomo_tools
lib/crtomo/parManager.py
https://github.com/geophysics-ubonn/crtomo_tools/blob/27c3e21a557f8df1c12455b96c4c2e00e08a5b4a/lib/crtomo/parManager.py#L40-L112
def add_data(self, data, metadata=None): """Add data to the parameter set Parameters ---------- data: numpy.ndarray one or more parameter sets. It must either be 1D or 2D, with the first dimension the number of parameter sets (K), and the second the number of elements (N): K x N metadata: object, optional the provided object will be stored in in the metadata dict and can be received with the ID that is returned. If multiple (K) datasets are added at ones, provide a list of objects with len K. Returns ------- int, ID ID which can be used to access the parameter set Examples -------- >>> # suppose that grid is a fully initialized grid oject with 100 # elements parman = ParMan(grid) # one_data_set = np.ones(100) cid = parman.add_data(one_data_set) print(parman.parsets[cid]) two_data_sets = np.ones((2, 100)) cids = parman.add_data(two_data_sets) print(cids) [0, ] [1, 2] """ subdata = np.atleast_2d(data) # we try to accommodate transposed input if subdata.shape[1] != self.grid.nr_of_elements: if subdata.shape[0] == self.grid.nr_of_elements: subdata = subdata.T else: raise Exception( 'Number of values does not match the number of ' + 'elements in the grid' ) # now make sure that metadata can be zipped with the subdata K = subdata.shape[0] if metadata is not None: if K > 1: if(not isinstance(metadata, (list, tuple)) or len(metadata) != K): raise Exception('metadata does not fit the provided data') else: # K == 1 metadata = [metadata, ] if metadata is None: metadata = [None for i in range(0, K)] return_ids = [] for dataset, meta in zip(subdata, metadata): cid = self._get_next_index() self.parsets[cid] = dataset self.metadata[cid] = meta return_ids.append(cid) if len(return_ids) == 1: return return_ids[0] else: return return_ids
[ "def", "add_data", "(", "self", ",", "data", ",", "metadata", "=", "None", ")", ":", "subdata", "=", "np", ".", "atleast_2d", "(", "data", ")", "# we try to accommodate transposed input", "if", "subdata", ".", "shape", "[", "1", "]", "!=", "self", ".", "grid", ".", "nr_of_elements", ":", "if", "subdata", ".", "shape", "[", "0", "]", "==", "self", ".", "grid", ".", "nr_of_elements", ":", "subdata", "=", "subdata", ".", "T", "else", ":", "raise", "Exception", "(", "'Number of values does not match the number of '", "+", "'elements in the grid'", ")", "# now make sure that metadata can be zipped with the subdata", "K", "=", "subdata", ".", "shape", "[", "0", "]", "if", "metadata", "is", "not", "None", ":", "if", "K", ">", "1", ":", "if", "(", "not", "isinstance", "(", "metadata", ",", "(", "list", ",", "tuple", ")", ")", "or", "len", "(", "metadata", ")", "!=", "K", ")", ":", "raise", "Exception", "(", "'metadata does not fit the provided data'", ")", "else", ":", "# K == 1", "metadata", "=", "[", "metadata", ",", "]", "if", "metadata", "is", "None", ":", "metadata", "=", "[", "None", "for", "i", "in", "range", "(", "0", ",", "K", ")", "]", "return_ids", "=", "[", "]", "for", "dataset", ",", "meta", "in", "zip", "(", "subdata", ",", "metadata", ")", ":", "cid", "=", "self", ".", "_get_next_index", "(", ")", "self", ".", "parsets", "[", "cid", "]", "=", "dataset", "self", ".", "metadata", "[", "cid", "]", "=", "meta", "return_ids", ".", "append", "(", "cid", ")", "if", "len", "(", "return_ids", ")", "==", "1", ":", "return", "return_ids", "[", "0", "]", "else", ":", "return", "return_ids" ]
Add data to the parameter set Parameters ---------- data: numpy.ndarray one or more parameter sets. It must either be 1D or 2D, with the first dimension the number of parameter sets (K), and the second the number of elements (N): K x N metadata: object, optional the provided object will be stored in in the metadata dict and can be received with the ID that is returned. If multiple (K) datasets are added at ones, provide a list of objects with len K. Returns ------- int, ID ID which can be used to access the parameter set Examples -------- >>> # suppose that grid is a fully initialized grid oject with 100 # elements parman = ParMan(grid) # one_data_set = np.ones(100) cid = parman.add_data(one_data_set) print(parman.parsets[cid]) two_data_sets = np.ones((2, 100)) cids = parman.add_data(two_data_sets) print(cids) [0, ] [1, 2]
[ "Add", "data", "to", "the", "parameter", "set" ]
python
train
ndrlslz/ternya
ternya/annotation.py
https://github.com/ndrlslz/ternya/blob/c05aec10029e645d63ff04313dbcf2644743481f/ternya/annotation.py#L53-L79
def nova(*arg): """ Nova annotation for adding function to process nova notification. if event_type include wildcard, will put {pattern: function} into process_wildcard dict else will put {event_type: function} into process dict :param arg: event_type of notification """ check_event_type(Openstack.Nova, *arg) event_type = arg[0] def decorator(func): if event_type.find("*") != -1: event_type_pattern = pre_compile(event_type) nova_customer_process_wildcard[event_type_pattern] = func else: nova_customer_process[event_type] = func log.info("add function {0} to process event_type:{1}".format(func.__name__, event_type)) @functools.wraps(func) def wrapper(*args, **kwargs): func(*args, **kwargs) return wrapper return decorator
[ "def", "nova", "(", "*", "arg", ")", ":", "check_event_type", "(", "Openstack", ".", "Nova", ",", "*", "arg", ")", "event_type", "=", "arg", "[", "0", "]", "def", "decorator", "(", "func", ")", ":", "if", "event_type", ".", "find", "(", "\"*\"", ")", "!=", "-", "1", ":", "event_type_pattern", "=", "pre_compile", "(", "event_type", ")", "nova_customer_process_wildcard", "[", "event_type_pattern", "]", "=", "func", "else", ":", "nova_customer_process", "[", "event_type", "]", "=", "func", "log", ".", "info", "(", "\"add function {0} to process event_type:{1}\"", ".", "format", "(", "func", ".", "__name__", ",", "event_type", ")", ")", "@", "functools", ".", "wraps", "(", "func", ")", "def", "wrapper", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "func", "(", "*", "args", ",", "*", "*", "kwargs", ")", "return", "wrapper", "return", "decorator" ]
Nova annotation for adding function to process nova notification. if event_type include wildcard, will put {pattern: function} into process_wildcard dict else will put {event_type: function} into process dict :param arg: event_type of notification
[ "Nova", "annotation", "for", "adding", "function", "to", "process", "nova", "notification", "." ]
python
test
iotile/coretools
iotilesensorgraph/iotile/sg/walker.py
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotilesensorgraph/iotile/sg/walker.py#L525-L543
def restore(self, state): """Restore the contents of this virtual stream walker. Args: state (dict): The previously serialized state. Raises: ArgumentError: If the serialized state does not have a matching selector. """ selector = DataStreamSelector.FromString(state.get(u'selector')) if self.selector != selector: raise ArgumentError("Attempted to restore an InvalidStreamWalker with a different selector", selector=self.selector, serialized_data=state) if state.get(u'type') != u'invalid': raise ArgumentError("Invalid serialized state for InvalidStreamWalker", serialized_data=state)
[ "def", "restore", "(", "self", ",", "state", ")", ":", "selector", "=", "DataStreamSelector", ".", "FromString", "(", "state", ".", "get", "(", "u'selector'", ")", ")", "if", "self", ".", "selector", "!=", "selector", ":", "raise", "ArgumentError", "(", "\"Attempted to restore an InvalidStreamWalker with a different selector\"", ",", "selector", "=", "self", ".", "selector", ",", "serialized_data", "=", "state", ")", "if", "state", ".", "get", "(", "u'type'", ")", "!=", "u'invalid'", ":", "raise", "ArgumentError", "(", "\"Invalid serialized state for InvalidStreamWalker\"", ",", "serialized_data", "=", "state", ")" ]
Restore the contents of this virtual stream walker. Args: state (dict): The previously serialized state. Raises: ArgumentError: If the serialized state does not have a matching selector.
[ "Restore", "the", "contents", "of", "this", "virtual", "stream", "walker", "." ]
python
train
numenta/htmresearch
htmresearch/frameworks/grid_cell_learning/CAN.py
https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/htmresearch/frameworks/grid_cell_learning/CAN.py#L660-L674
def computeEnvelope(self, placeCode): """ Compute an envelope for use in suppressing border cells. :param placeCode: The place code representing the population the envelope will be used for. :return: A numpy array that can be elementwise-multiplied with activations for the given cell population to apply the envelope. """ places = np.abs(placeCode - 0.5) envelope = [1 if p < 1 - self.envelopeWidth else np.exp(-1.*self.envelopeFactor * ((p - 1 + self.envelopeWidth)/self.envelopeWidth)**2) for p in places] return np.asarray(envelope)
[ "def", "computeEnvelope", "(", "self", ",", "placeCode", ")", ":", "places", "=", "np", ".", "abs", "(", "placeCode", "-", "0.5", ")", "envelope", "=", "[", "1", "if", "p", "<", "1", "-", "self", ".", "envelopeWidth", "else", "np", ".", "exp", "(", "-", "1.", "*", "self", ".", "envelopeFactor", "*", "(", "(", "p", "-", "1", "+", "self", ".", "envelopeWidth", ")", "/", "self", ".", "envelopeWidth", ")", "**", "2", ")", "for", "p", "in", "places", "]", "return", "np", ".", "asarray", "(", "envelope", ")" ]
Compute an envelope for use in suppressing border cells. :param placeCode: The place code representing the population the envelope will be used for. :return: A numpy array that can be elementwise-multiplied with activations for the given cell population to apply the envelope.
[ "Compute", "an", "envelope", "for", "use", "in", "suppressing", "border", "cells", ".", ":", "param", "placeCode", ":", "The", "place", "code", "representing", "the", "population", "the", "envelope", "will", "be", "used", "for", ".", ":", "return", ":", "A", "numpy", "array", "that", "can", "be", "elementwise", "-", "multiplied", "with", "activations", "for", "the", "given", "cell", "population", "to", "apply", "the", "envelope", "." ]
python
train
Shinichi-Nakagawa/pitchpx
pitchpx/mlbam.py
https://github.com/Shinichi-Nakagawa/pitchpx/blob/5747402a0b3416f5e910b479e100df858f0b6440/pitchpx/mlbam.py#L159-L167
def _validate_datetime_from_to(cls, start, end): """ validate from-to :param start: Start Day(YYYYMMDD) :param end: End Day(YYYYMMDD) :return: None or MlbAmException """ if not start <= end: raise MlbAmBadParameter("not Start Day({start}) <= End Day({end})".format(start=start, end=end))
[ "def", "_validate_datetime_from_to", "(", "cls", ",", "start", ",", "end", ")", ":", "if", "not", "start", "<=", "end", ":", "raise", "MlbAmBadParameter", "(", "\"not Start Day({start}) <= End Day({end})\"", ".", "format", "(", "start", "=", "start", ",", "end", "=", "end", ")", ")" ]
validate from-to :param start: Start Day(YYYYMMDD) :param end: End Day(YYYYMMDD) :return: None or MlbAmException
[ "validate", "from", "-", "to", ":", "param", "start", ":", "Start", "Day", "(", "YYYYMMDD", ")", ":", "param", "end", ":", "End", "Day", "(", "YYYYMMDD", ")", ":", "return", ":", "None", "or", "MlbAmException" ]
python
train