repo
stringlengths
7
55
path
stringlengths
4
223
url
stringlengths
87
315
code
stringlengths
75
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
avg_line_len
float64
7.91
980
odlgroup/odl
odl/tomo/analytic/filtered_back_projection.py
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/tomo/analytic/filtered_back_projection.py#L49-L101
def _fbp_filter(norm_freq, filter_type, frequency_scaling): """Create a smoothing filter for FBP. Parameters ---------- norm_freq : `array-like` Frequencies normalized to lie in the interval [0, 1]. filter_type : {'Ram-Lak', 'Shepp-Logan', 'Cosine', 'Hamming', 'Hann', callable} The type of filter to be used. If a string is given, use one of the standard filters with that name. A callable should take an array of values in [0, 1] and return the filter for these frequencies. frequency_scaling : float Scaling of the frequencies for the filter. All frequencies are scaled by this number, any relative frequency above ``frequency_scaling`` is set to 0. Returns ------- smoothing_filter : `numpy.ndarray` Examples -------- Create an FBP filter >>> norm_freq = np.linspace(0, 1, 10) >>> filt = _fbp_filter(norm_freq, ... filter_type='Hann', ... frequency_scaling=0.8) """ filter_type, filter_type_in = str(filter_type).lower(), filter_type if callable(filter_type): filt = filter_type(norm_freq) elif filter_type == 'ram-lak': filt = np.copy(norm_freq) elif filter_type == 'shepp-logan': filt = norm_freq * np.sinc(norm_freq / (2 * frequency_scaling)) elif filter_type == 'cosine': filt = norm_freq * np.cos(norm_freq * np.pi / (2 * frequency_scaling)) elif filter_type == 'hamming': filt = norm_freq * ( 0.54 + 0.46 * np.cos(norm_freq * np.pi / (frequency_scaling))) elif filter_type == 'hann': filt = norm_freq * ( np.cos(norm_freq * np.pi / (2 * frequency_scaling)) ** 2) else: raise ValueError('unknown `filter_type` ({})' ''.format(filter_type_in)) indicator = (norm_freq <= frequency_scaling) filt *= indicator return filt
[ "def", "_fbp_filter", "(", "norm_freq", ",", "filter_type", ",", "frequency_scaling", ")", ":", "filter_type", ",", "filter_type_in", "=", "str", "(", "filter_type", ")", ".", "lower", "(", ")", ",", "filter_type", "if", "callable", "(", "filter_type", ")", ":", "filt", "=", "filter_type", "(", "norm_freq", ")", "elif", "filter_type", "==", "'ram-lak'", ":", "filt", "=", "np", ".", "copy", "(", "norm_freq", ")", "elif", "filter_type", "==", "'shepp-logan'", ":", "filt", "=", "norm_freq", "*", "np", ".", "sinc", "(", "norm_freq", "/", "(", "2", "*", "frequency_scaling", ")", ")", "elif", "filter_type", "==", "'cosine'", ":", "filt", "=", "norm_freq", "*", "np", ".", "cos", "(", "norm_freq", "*", "np", ".", "pi", "/", "(", "2", "*", "frequency_scaling", ")", ")", "elif", "filter_type", "==", "'hamming'", ":", "filt", "=", "norm_freq", "*", "(", "0.54", "+", "0.46", "*", "np", ".", "cos", "(", "norm_freq", "*", "np", ".", "pi", "/", "(", "frequency_scaling", ")", ")", ")", "elif", "filter_type", "==", "'hann'", ":", "filt", "=", "norm_freq", "*", "(", "np", ".", "cos", "(", "norm_freq", "*", "np", ".", "pi", "/", "(", "2", "*", "frequency_scaling", ")", ")", "**", "2", ")", "else", ":", "raise", "ValueError", "(", "'unknown `filter_type` ({})'", "''", ".", "format", "(", "filter_type_in", ")", ")", "indicator", "=", "(", "norm_freq", "<=", "frequency_scaling", ")", "filt", "*=", "indicator", "return", "filt" ]
Create a smoothing filter for FBP. Parameters ---------- norm_freq : `array-like` Frequencies normalized to lie in the interval [0, 1]. filter_type : {'Ram-Lak', 'Shepp-Logan', 'Cosine', 'Hamming', 'Hann', callable} The type of filter to be used. If a string is given, use one of the standard filters with that name. A callable should take an array of values in [0, 1] and return the filter for these frequencies. frequency_scaling : float Scaling of the frequencies for the filter. All frequencies are scaled by this number, any relative frequency above ``frequency_scaling`` is set to 0. Returns ------- smoothing_filter : `numpy.ndarray` Examples -------- Create an FBP filter >>> norm_freq = np.linspace(0, 1, 10) >>> filt = _fbp_filter(norm_freq, ... filter_type='Hann', ... frequency_scaling=0.8)
[ "Create", "a", "smoothing", "filter", "for", "FBP", "." ]
python
train
36.169811
multiformats/py-multibase
multibase/converters.py
https://github.com/multiformats/py-multibase/blob/8f435762b50a17f921c13b59eb0c7b9c52afc879/multibase/converters.py#L47-L51
def _chunk_with_padding(self, iterable, n, fillvalue=None): "Collect data into fixed-length chunks or blocks" # _chunk_with_padding('ABCDEFG', 3, 'x') --> ABC DEF Gxx" args = [iter(iterable)] * n return zip_longest(*args, fillvalue=fillvalue)
[ "def", "_chunk_with_padding", "(", "self", ",", "iterable", ",", "n", ",", "fillvalue", "=", "None", ")", ":", "# _chunk_with_padding('ABCDEFG', 3, 'x') --> ABC DEF Gxx\"", "args", "=", "[", "iter", "(", "iterable", ")", "]", "*", "n", "return", "zip_longest", "(", "*", "args", ",", "fillvalue", "=", "fillvalue", ")" ]
Collect data into fixed-length chunks or blocks
[ "Collect", "data", "into", "fixed", "-", "length", "chunks", "or", "blocks" ]
python
train
54
bwohlberg/sporco
sporco/linalg.py
https://github.com/bwohlberg/sporco/blob/8946a04331106f4e39904fbdf2dc7351900baa04/sporco/linalg.py#L246-L273
def irfftn(a, s, axes=None): """ Compute the inverse of the multi-dimensional discrete Fourier transform for real input. This function is a wrapper for :func:`pyfftw.interfaces.numpy_fft.irfftn`, with an interface similar to that of :func:`numpy.fft.irfftn`. Parameters ---------- a : array_like Input array s : sequence of ints Shape of the output along each transformed axis (input is cropped or zero-padded to match). This parameter is not optional because, unlike :func:`ifftn`, the output shape cannot be uniquely determined from the input shape. axes : sequence of ints, optional (default None) Axes over which to compute the inverse DFT. Returns ------- af : ndarray Inverse DFT of input array """ return pyfftw.interfaces.numpy_fft.irfftn( a, s=s, axes=axes, overwrite_input=False, planner_effort='FFTW_MEASURE', threads=pyfftw_threads)
[ "def", "irfftn", "(", "a", ",", "s", ",", "axes", "=", "None", ")", ":", "return", "pyfftw", ".", "interfaces", ".", "numpy_fft", ".", "irfftn", "(", "a", ",", "s", "=", "s", ",", "axes", "=", "axes", ",", "overwrite_input", "=", "False", ",", "planner_effort", "=", "'FFTW_MEASURE'", ",", "threads", "=", "pyfftw_threads", ")" ]
Compute the inverse of the multi-dimensional discrete Fourier transform for real input. This function is a wrapper for :func:`pyfftw.interfaces.numpy_fft.irfftn`, with an interface similar to that of :func:`numpy.fft.irfftn`. Parameters ---------- a : array_like Input array s : sequence of ints Shape of the output along each transformed axis (input is cropped or zero-padded to match). This parameter is not optional because, unlike :func:`ifftn`, the output shape cannot be uniquely determined from the input shape. axes : sequence of ints, optional (default None) Axes over which to compute the inverse DFT. Returns ------- af : ndarray Inverse DFT of input array
[ "Compute", "the", "inverse", "of", "the", "multi", "-", "dimensional", "discrete", "Fourier", "transform", "for", "real", "input", ".", "This", "function", "is", "a", "wrapper", "for", ":", "func", ":", "pyfftw", ".", "interfaces", ".", "numpy_fft", ".", "irfftn", "with", "an", "interface", "similar", "to", "that", "of", ":", "func", ":", "numpy", ".", "fft", ".", "irfftn", "." ]
python
train
33.5
gtaylor/EVE-Market-Data-Structures
emds/formats/unified/unified_utils.py
https://github.com/gtaylor/EVE-Market-Data-Structures/blob/77d69b24f2aada3aeff8fba3d75891bfba8fdcf3/emds/formats/unified/unified_utils.py#L10-L31
def _columns_to_kwargs(conversion_table, columns, row): """ Given a list of column names, and a list of values (a row), return a dict of kwargs that may be used to instantiate a MarketHistoryEntry or MarketOrder object. :param dict conversion_table: The conversion table to use for mapping spec names to kwargs. :param list columns: A list of column names. :param list row: A list of values. """ kwdict = {} counter = 0 for column in columns: # Map the column name to the correct MarketHistoryEntry kwarg. kwarg_name = conversion_table[column] # Set the kwarg to the correct value from the row. kwdict[kwarg_name] = row[counter] counter += 1 return kwdict
[ "def", "_columns_to_kwargs", "(", "conversion_table", ",", "columns", ",", "row", ")", ":", "kwdict", "=", "{", "}", "counter", "=", "0", "for", "column", "in", "columns", ":", "# Map the column name to the correct MarketHistoryEntry kwarg.", "kwarg_name", "=", "conversion_table", "[", "column", "]", "# Set the kwarg to the correct value from the row.", "kwdict", "[", "kwarg_name", "]", "=", "row", "[", "counter", "]", "counter", "+=", "1", "return", "kwdict" ]
Given a list of column names, and a list of values (a row), return a dict of kwargs that may be used to instantiate a MarketHistoryEntry or MarketOrder object. :param dict conversion_table: The conversion table to use for mapping spec names to kwargs. :param list columns: A list of column names. :param list row: A list of values.
[ "Given", "a", "list", "of", "column", "names", "and", "a", "list", "of", "values", "(", "a", "row", ")", "return", "a", "dict", "of", "kwargs", "that", "may", "be", "used", "to", "instantiate", "a", "MarketHistoryEntry", "or", "MarketOrder", "object", "." ]
python
train
33.318182
lmjohns3/downhill
downhill/base.py
https://github.com/lmjohns3/downhill/blob/42111ab03b5e6fa47b7bf7c7cb5caa402f10ce6d/downhill/base.py#L281-L301
def evaluate(self, dataset): '''Evaluate the current model parameters on a dataset. Parameters ---------- dataset : :class:`Dataset <downhill.dataset.Dataset>` A set of data to use for evaluating the model. Returns ------- monitors : OrderedDict A dictionary mapping monitor names to values. Monitors are quantities of interest during optimization---for example, loss function, accuracy, or whatever the optimization task requires. ''' if dataset is None: values = [self.f_eval()] else: values = [self.f_eval(*x) for x in dataset] monitors = zip(self._monitor_names, np.mean(values, axis=0)) return collections.OrderedDict(monitors)
[ "def", "evaluate", "(", "self", ",", "dataset", ")", ":", "if", "dataset", "is", "None", ":", "values", "=", "[", "self", ".", "f_eval", "(", ")", "]", "else", ":", "values", "=", "[", "self", ".", "f_eval", "(", "*", "x", ")", "for", "x", "in", "dataset", "]", "monitors", "=", "zip", "(", "self", ".", "_monitor_names", ",", "np", ".", "mean", "(", "values", ",", "axis", "=", "0", ")", ")", "return", "collections", ".", "OrderedDict", "(", "monitors", ")" ]
Evaluate the current model parameters on a dataset. Parameters ---------- dataset : :class:`Dataset <downhill.dataset.Dataset>` A set of data to use for evaluating the model. Returns ------- monitors : OrderedDict A dictionary mapping monitor names to values. Monitors are quantities of interest during optimization---for example, loss function, accuracy, or whatever the optimization task requires.
[ "Evaluate", "the", "current", "model", "parameters", "on", "a", "dataset", "." ]
python
train
37.238095
codenerix/django-codenerix-invoicing
codenerix_invoicing/views_purchases.py
https://github.com/codenerix/django-codenerix-invoicing/blob/7db5c62f335f9215a8b308603848625208b48698/codenerix_invoicing/views_purchases.py#L669-L780
def form_valid(self, form): if self.__pk: obj = PurchasesAlbaran.objects.get(pk=self.__pk) self.request.albaran = obj form.instance.albaran = obj form.instance.validator_user = self.request.user raise Exception("revisar StorageBatch") """ batch = StorageBatch.objects.filter(pk=form.data['batch']).first() if not batch: errors = form._errors.setdefault("batch", ErrorList()) errors.append(_("Batch invalid")) return super(LineAlbaranCreate, self).form_invalid(form) """ # comprueba si el producto comprado requiere un valor de atributo especial product_final = ProductFinal.objects.filter(pk=form.data['product']).first() feature_special_value = None if not product_final: errors = form._errors.setdefault("feature_special_value", ErrorList()) errors.append(_("Product not selected")) return super(LineAlbaranCreate, self).form_invalid(form) elif product_final.product.feature_special: # es obligatorio la informacion de caracteristicas especiales if 'feature_special_value' not in form.data or not form.data['feature_special_value']: errors = form._errors.setdefault("feature_special_value", ErrorList()) errors.append(_("Product needs information of feature special")) return super(LineAlbaranCreate, self).form_invalid(form) else: feature_special_value = list(set(filter(None, form.data['feature_special_value'].split('\n')))) try: quantity = int(float(form.data['quantity'])) except ValueError: errors = form._errors.setdefault("quantity", ErrorList()) errors.append(_("Quantity is not valid")) return super(LineAlbaranCreate, self).form_invalid(form) if product_final.product.feature_special.unique: # mismo numero de caracteristicas que de cantidades # si el feature special esta marcado como 'unico' if len(feature_special_value) != quantity: errors = form._errors.setdefault("feature_special_value", ErrorList()) errors.append(_("Quantity and values of feature special not equals")) return super(LineAlbaranCreate, self).form_invalid(form) # no existen las caracteristicas especiales dadas de alta en el sistema elif ProductUnique.objects.filter(product_final=product_final, value__in=feature_special_value).exists(): errors = form._errors.setdefault("feature_special_value", ErrorList()) errors.append(_("Some value of feature special exists")) return super(LineAlbaranCreate, self).form_invalid(form) elif len(feature_special_value) != 1: errors = form._errors.setdefault("feature_special_value", ErrorList()) errors.append(_("The special feature must be unique for all products")) return super(LineAlbaranCreate, self).form_invalid(form) try: with transaction.atomic(): # save line albaran result = super(LineAlbaranCreate, self).form_valid(form) raise Exception("Cambiar ProductStock por ProductUnique") """ if self.object.status != PURCHASE_ALBARAN_LINE_STATUS_REJECTED: # prepare stock ps = ProductStock() ps.product_final = product_final ps.line_albaran = self.object ps.batch = batch # save stock ps.quantity = self.object.quantity ps.save() if feature_special_value: # prepare product feature special if product_final.product.feature_special.unique: pfs = ProductUnique() pfs.product_final = product_final # save product featureSpecial and stock for fs in feature_special_value: pfs.pk = None pfs.value = fs pfs.save() else: pfs = ProductUnique.objects.filter( value=feature_special_value[0], product_final=product_final ).first() if pfs: pfs.stock_real += self.object.quantity else: pfs = ProductUnique() pfs.product_final = product_final pfs.value = feature_special_value[0] pfs.stock_real = self.object.quantity pfs.save() else: # product unique by default pfs = ProductUnique.objects.filter(product_final=product_final).first() if not pfs: pfs = ProductUnique() pfs.product_final = product_final pfs.stock_real = self.object.quantity else: pfs.stock_real += self.object.quantity pfs.save() """ return result except IntegrityError as e: errors = form._errors.setdefault("product", ErrorList()) errors.append(_("Integrity Error: {}".format(e))) return super(LineAlbaranCreate, self).form_invalid(form)
[ "def", "form_valid", "(", "self", ",", "form", ")", ":", "if", "self", ".", "__pk", ":", "obj", "=", "PurchasesAlbaran", ".", "objects", ".", "get", "(", "pk", "=", "self", ".", "__pk", ")", "self", ".", "request", ".", "albaran", "=", "obj", "form", ".", "instance", ".", "albaran", "=", "obj", "form", ".", "instance", ".", "validator_user", "=", "self", ".", "request", ".", "user", "raise", "Exception", "(", "\"revisar StorageBatch\"", ")", "# comprueba si el producto comprado requiere un valor de atributo especial", "product_final", "=", "ProductFinal", ".", "objects", ".", "filter", "(", "pk", "=", "form", ".", "data", "[", "'product'", "]", ")", ".", "first", "(", ")", "feature_special_value", "=", "None", "if", "not", "product_final", ":", "errors", "=", "form", ".", "_errors", ".", "setdefault", "(", "\"feature_special_value\"", ",", "ErrorList", "(", ")", ")", "errors", ".", "append", "(", "_", "(", "\"Product not selected\"", ")", ")", "return", "super", "(", "LineAlbaranCreate", ",", "self", ")", ".", "form_invalid", "(", "form", ")", "elif", "product_final", ".", "product", ".", "feature_special", ":", "# es obligatorio la informacion de caracteristicas especiales", "if", "'feature_special_value'", "not", "in", "form", ".", "data", "or", "not", "form", ".", "data", "[", "'feature_special_value'", "]", ":", "errors", "=", "form", ".", "_errors", ".", "setdefault", "(", "\"feature_special_value\"", ",", "ErrorList", "(", ")", ")", "errors", ".", "append", "(", "_", "(", "\"Product needs information of feature special\"", ")", ")", "return", "super", "(", "LineAlbaranCreate", ",", "self", ")", ".", "form_invalid", "(", "form", ")", "else", ":", "feature_special_value", "=", "list", "(", "set", "(", "filter", "(", "None", ",", "form", ".", "data", "[", "'feature_special_value'", "]", ".", "split", "(", "'\\n'", ")", ")", ")", ")", "try", ":", "quantity", "=", "int", "(", "float", "(", "form", ".", "data", "[", "'quantity'", "]", ")", ")", "except", "ValueError", ":", "errors", "=", "form", ".", "_errors", ".", "setdefault", "(", "\"quantity\"", ",", "ErrorList", "(", ")", ")", "errors", ".", "append", "(", "_", "(", "\"Quantity is not valid\"", ")", ")", "return", "super", "(", "LineAlbaranCreate", ",", "self", ")", ".", "form_invalid", "(", "form", ")", "if", "product_final", ".", "product", ".", "feature_special", ".", "unique", ":", "# mismo numero de caracteristicas que de cantidades", "# si el feature special esta marcado como 'unico'", "if", "len", "(", "feature_special_value", ")", "!=", "quantity", ":", "errors", "=", "form", ".", "_errors", ".", "setdefault", "(", "\"feature_special_value\"", ",", "ErrorList", "(", ")", ")", "errors", ".", "append", "(", "_", "(", "\"Quantity and values of feature special not equals\"", ")", ")", "return", "super", "(", "LineAlbaranCreate", ",", "self", ")", ".", "form_invalid", "(", "form", ")", "# no existen las caracteristicas especiales dadas de alta en el sistema", "elif", "ProductUnique", ".", "objects", ".", "filter", "(", "product_final", "=", "product_final", ",", "value__in", "=", "feature_special_value", ")", ".", "exists", "(", ")", ":", "errors", "=", "form", ".", "_errors", ".", "setdefault", "(", "\"feature_special_value\"", ",", "ErrorList", "(", ")", ")", "errors", ".", "append", "(", "_", "(", "\"Some value of feature special exists\"", ")", ")", "return", "super", "(", "LineAlbaranCreate", ",", "self", ")", ".", "form_invalid", "(", "form", ")", "elif", "len", "(", "feature_special_value", ")", "!=", "1", ":", "errors", "=", "form", ".", "_errors", ".", "setdefault", "(", "\"feature_special_value\"", ",", "ErrorList", "(", ")", ")", "errors", ".", "append", "(", "_", "(", "\"The special feature must be unique for all products\"", ")", ")", "return", "super", "(", "LineAlbaranCreate", ",", "self", ")", ".", "form_invalid", "(", "form", ")", "try", ":", "with", "transaction", ".", "atomic", "(", ")", ":", "# save line albaran", "result", "=", "super", "(", "LineAlbaranCreate", ",", "self", ")", ".", "form_valid", "(", "form", ")", "raise", "Exception", "(", "\"Cambiar ProductStock por ProductUnique\"", ")", "\"\"\"\n if self.object.status != PURCHASE_ALBARAN_LINE_STATUS_REJECTED:\n # prepare stock\n ps = ProductStock()\n ps.product_final = product_final\n ps.line_albaran = self.object\n ps.batch = batch\n # save stock\n ps.quantity = self.object.quantity\n ps.save()\n\n if feature_special_value:\n # prepare product feature special\n if product_final.product.feature_special.unique:\n pfs = ProductUnique()\n pfs.product_final = product_final\n # save product featureSpecial and stock\n for fs in feature_special_value:\n pfs.pk = None\n pfs.value = fs\n pfs.save()\n\n else:\n pfs = ProductUnique.objects.filter(\n value=feature_special_value[0],\n product_final=product_final\n ).first()\n if pfs:\n pfs.stock_real += self.object.quantity\n else:\n pfs = ProductUnique()\n pfs.product_final = product_final\n pfs.value = feature_special_value[0]\n pfs.stock_real = self.object.quantity\n pfs.save()\n else:\n # product unique by default\n pfs = ProductUnique.objects.filter(product_final=product_final).first()\n if not pfs:\n pfs = ProductUnique()\n pfs.product_final = product_final\n pfs.stock_real = self.object.quantity\n else:\n pfs.stock_real += self.object.quantity\n pfs.save()\n \"\"\"", "return", "result", "except", "IntegrityError", "as", "e", ":", "errors", "=", "form", ".", "_errors", ".", "setdefault", "(", "\"product\"", ",", "ErrorList", "(", ")", ")", "errors", ".", "append", "(", "_", "(", "\"Integrity Error: {}\"", ".", "format", "(", "e", ")", ")", ")", "return", "super", "(", "LineAlbaranCreate", ",", "self", ")", ".", "form_invalid", "(", "form", ")" ]
batch = StorageBatch.objects.filter(pk=form.data['batch']).first() if not batch: errors = form._errors.setdefault("batch", ErrorList()) errors.append(_("Batch invalid")) return super(LineAlbaranCreate, self).form_invalid(form)
[ "batch", "=", "StorageBatch", ".", "objects", ".", "filter", "(", "pk", "=", "form", ".", "data", "[", "batch", "]", ")", ".", "first", "()", "if", "not", "batch", ":", "errors", "=", "form", ".", "_errors", ".", "setdefault", "(", "batch", "ErrorList", "()", ")", "errors", ".", "append", "(", "_", "(", "Batch", "invalid", "))", "return", "super", "(", "LineAlbaranCreate", "self", ")", ".", "form_invalid", "(", "form", ")" ]
python
train
52.857143
apache/spark
python/pyspark/sql/dataframe.py
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/dataframe.py#L1097-L1099
def _jseq(self, cols, converter=None): """Return a JVM Seq of Columns from a list of Column or names""" return _to_seq(self.sql_ctx._sc, cols, converter)
[ "def", "_jseq", "(", "self", ",", "cols", ",", "converter", "=", "None", ")", ":", "return", "_to_seq", "(", "self", ".", "sql_ctx", ".", "_sc", ",", "cols", ",", "converter", ")" ]
Return a JVM Seq of Columns from a list of Column or names
[ "Return", "a", "JVM", "Seq", "of", "Columns", "from", "a", "list", "of", "Column", "or", "names" ]
python
train
55.666667
NLeSC/noodles
noodles/lib/streams.py
https://github.com/NLeSC/noodles/blob/3759e24e6e54a3a1a364431309dbb1061f617c04/noodles/lib/streams.py#L212-L226
def broadcast(*sinks_): """The |broadcast| decorator creates a |push| object that receives a message by ``yield`` and then sends this message on to all the given sinks. .. |broadcast| replace:: :py:func:`broadcast` """ @push def bc(): sinks = [s() for s in sinks_] while True: msg = yield for s in sinks: s.send(msg) return bc
[ "def", "broadcast", "(", "*", "sinks_", ")", ":", "@", "push", "def", "bc", "(", ")", ":", "sinks", "=", "[", "s", "(", ")", "for", "s", "in", "sinks_", "]", "while", "True", ":", "msg", "=", "yield", "for", "s", "in", "sinks", ":", "s", ".", "send", "(", "msg", ")", "return", "bc" ]
The |broadcast| decorator creates a |push| object that receives a message by ``yield`` and then sends this message on to all the given sinks. .. |broadcast| replace:: :py:func:`broadcast`
[ "The", "|broadcast|", "decorator", "creates", "a", "|push|", "object", "that", "receives", "a", "message", "by", "yield", "and", "then", "sends", "this", "message", "on", "to", "all", "the", "given", "sinks", "." ]
python
train
26.533333
BD2KGenomics/toil-scripts
src/toil_scripts/adam_kmers/count_kmers.py
https://github.com/BD2KGenomics/toil-scripts/blob/f878d863defcdccaabb7fe06f991451b7a198fb7/src/toil_scripts/adam_kmers/count_kmers.py#L175-L223
def main(): ''' Sets up command line parser for Toil/ADAM based k-mer counter, and launches k-mer counter with optional Spark cluster. ''' parser = argparse.ArgumentParser() # add parser arguments parser.add_argument('--input_path', help='The full path to the input SAM/BAM/ADAM/FASTQ file.') parser.add_argument('--output-path', help='full path where final results will be output.') parser.add_argument('--kmer-length', help='Length to use for k-mer counting. Defaults to 20.', default=20, type=int) parser.add_argument('--spark-conf', help='Optional configuration to pass to Spark commands. Either this or --workers must be specified.', default=None) parser.add_argument('--memory', help='Optional memory configuration for Spark workers/driver. This must be specified if --workers is specified.', default=None, type=int) parser.add_argument('--cores', help='Optional core configuration for Spark workers/driver. This must be specified if --workers is specified.', default=None, type=int) parser.add_argument('--workers', help='Number of workers to spin up in Toil. Either this or --spark-conf must be specified. If this is specified, --memory and --cores must be specified.', default=None, type=int) parser.add_argument('--sudo', help='Run docker containers with sudo. Defaults to False.', default=False, action='store_true') Job.Runner.addToilOptions(parser) args = parser.parse_args() Job.Runner.startToil(Job.wrapJobFn(kmer_dag, args.kmer_length, args.input_path, args.output_path, args.spark_conf, args.workers, args.cores, args.memory, args.sudo, checkpoint=True), args)
[ "def", "main", "(", ")", ":", "parser", "=", "argparse", ".", "ArgumentParser", "(", ")", "# add parser arguments", "parser", ".", "add_argument", "(", "'--input_path'", ",", "help", "=", "'The full path to the input SAM/BAM/ADAM/FASTQ file.'", ")", "parser", ".", "add_argument", "(", "'--output-path'", ",", "help", "=", "'full path where final results will be output.'", ")", "parser", ".", "add_argument", "(", "'--kmer-length'", ",", "help", "=", "'Length to use for k-mer counting. Defaults to 20.'", ",", "default", "=", "20", ",", "type", "=", "int", ")", "parser", ".", "add_argument", "(", "'--spark-conf'", ",", "help", "=", "'Optional configuration to pass to Spark commands. Either this or --workers must be specified.'", ",", "default", "=", "None", ")", "parser", ".", "add_argument", "(", "'--memory'", ",", "help", "=", "'Optional memory configuration for Spark workers/driver. This must be specified if --workers is specified.'", ",", "default", "=", "None", ",", "type", "=", "int", ")", "parser", ".", "add_argument", "(", "'--cores'", ",", "help", "=", "'Optional core configuration for Spark workers/driver. This must be specified if --workers is specified.'", ",", "default", "=", "None", ",", "type", "=", "int", ")", "parser", ".", "add_argument", "(", "'--workers'", ",", "help", "=", "'Number of workers to spin up in Toil. Either this or --spark-conf must be specified. If this is specified, --memory and --cores must be specified.'", ",", "default", "=", "None", ",", "type", "=", "int", ")", "parser", ".", "add_argument", "(", "'--sudo'", ",", "help", "=", "'Run docker containers with sudo. Defaults to False.'", ",", "default", "=", "False", ",", "action", "=", "'store_true'", ")", "Job", ".", "Runner", ".", "addToilOptions", "(", "parser", ")", "args", "=", "parser", ".", "parse_args", "(", ")", "Job", ".", "Runner", ".", "startToil", "(", "Job", ".", "wrapJobFn", "(", "kmer_dag", ",", "args", ".", "kmer_length", ",", "args", ".", "input_path", ",", "args", ".", "output_path", ",", "args", ".", "spark_conf", ",", "args", ".", "workers", ",", "args", ".", "cores", ",", "args", ".", "memory", ",", "args", ".", "sudo", ",", "checkpoint", "=", "True", ")", ",", "args", ")" ]
Sets up command line parser for Toil/ADAM based k-mer counter, and launches k-mer counter with optional Spark cluster.
[ "Sets", "up", "command", "line", "parser", "for", "Toil", "/", "ADAM", "based", "k", "-", "mer", "counter", "and", "launches", "k", "-", "mer", "counter", "with", "optional", "Spark", "cluster", "." ]
python
train
49.122449
pypa/pipenv
pipenv/vendor/pep517/_in_process.py
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/pep517/_in_process.py#L87-L103
def _get_wheel_metadata_from_wheel( backend, metadata_directory, config_settings): """Build a wheel and extract the metadata from it. Fallback for when the build backend does not define the 'get_wheel_metadata' hook. """ from zipfile import ZipFile whl_basename = backend.build_wheel(metadata_directory, config_settings) with open(os.path.join(metadata_directory, WHEEL_BUILT_MARKER), 'wb'): pass # Touch marker file whl_file = os.path.join(metadata_directory, whl_basename) with ZipFile(whl_file) as zipf: dist_info = _dist_info_files(zipf) zipf.extractall(path=metadata_directory, members=dist_info) return dist_info[0].split('/')[0]
[ "def", "_get_wheel_metadata_from_wheel", "(", "backend", ",", "metadata_directory", ",", "config_settings", ")", ":", "from", "zipfile", "import", "ZipFile", "whl_basename", "=", "backend", ".", "build_wheel", "(", "metadata_directory", ",", "config_settings", ")", "with", "open", "(", "os", ".", "path", ".", "join", "(", "metadata_directory", ",", "WHEEL_BUILT_MARKER", ")", ",", "'wb'", ")", ":", "pass", "# Touch marker file", "whl_file", "=", "os", ".", "path", ".", "join", "(", "metadata_directory", ",", "whl_basename", ")", "with", "ZipFile", "(", "whl_file", ")", "as", "zipf", ":", "dist_info", "=", "_dist_info_files", "(", "zipf", ")", "zipf", ".", "extractall", "(", "path", "=", "metadata_directory", ",", "members", "=", "dist_info", ")", "return", "dist_info", "[", "0", "]", ".", "split", "(", "'/'", ")", "[", "0", "]" ]
Build a wheel and extract the metadata from it. Fallback for when the build backend does not define the 'get_wheel_metadata' hook.
[ "Build", "a", "wheel", "and", "extract", "the", "metadata", "from", "it", "." ]
python
train
40.823529
econ-ark/HARK
HARK/ConsumptionSaving/ConsAggShockModel.py
https://github.com/econ-ark/HARK/blob/3d184153a189e618a87c9540df1cd12044039cc5/HARK/ConsumptionSaving/ConsAggShockModel.py#L1173-L1198
def update(self): ''' Use primitive parameters to set basic objects. This is an extremely stripped-down version of update for CobbDouglasEconomy. Parameters ---------- none Returns ------- none ''' self.kSS = 1.0 self.MSS = 1.0 self.KtoLnow_init = self.kSS self.Rfunc = ConstantFunction(self.Rfree) self.wFunc = ConstantFunction(self.wRte) self.RfreeNow_init = self.Rfunc(self.kSS) self.wRteNow_init = self.wFunc(self.kSS) self.MaggNow_init = self.kSS self.AaggNow_init = self.kSS self.PermShkAggNow_init = 1.0 self.TranShkAggNow_init = 1.0 self.makeAggShkDstn() self.AFunc = ConstantFunction(1.0)
[ "def", "update", "(", "self", ")", ":", "self", ".", "kSS", "=", "1.0", "self", ".", "MSS", "=", "1.0", "self", ".", "KtoLnow_init", "=", "self", ".", "kSS", "self", ".", "Rfunc", "=", "ConstantFunction", "(", "self", ".", "Rfree", ")", "self", ".", "wFunc", "=", "ConstantFunction", "(", "self", ".", "wRte", ")", "self", ".", "RfreeNow_init", "=", "self", ".", "Rfunc", "(", "self", ".", "kSS", ")", "self", ".", "wRteNow_init", "=", "self", ".", "wFunc", "(", "self", ".", "kSS", ")", "self", ".", "MaggNow_init", "=", "self", ".", "kSS", "self", ".", "AaggNow_init", "=", "self", ".", "kSS", "self", ".", "PermShkAggNow_init", "=", "1.0", "self", ".", "TranShkAggNow_init", "=", "1.0", "self", ".", "makeAggShkDstn", "(", ")", "self", ".", "AFunc", "=", "ConstantFunction", "(", "1.0", ")" ]
Use primitive parameters to set basic objects. This is an extremely stripped-down version of update for CobbDouglasEconomy. Parameters ---------- none Returns ------- none
[ "Use", "primitive", "parameters", "to", "set", "basic", "objects", ".", "This", "is", "an", "extremely", "stripped", "-", "down", "version", "of", "update", "for", "CobbDouglasEconomy", "." ]
python
train
29.192308
openvax/isovar
isovar/cli/reference_context_args.py
https://github.com/openvax/isovar/blob/b39b684920e3f6b344851d6598a1a1c67bce913b/isovar/cli/reference_context_args.py#L26-L36
def add_reference_context_args(parser): """ Extends an ArgumentParser instance with the following commandline arguments: --context-size """ reference_context_group = parser.add_argument_group("Reference Transcripts") parser.add_argument( "--context-size", default=CDNA_CONTEXT_SIZE, type=int) return reference_context_group
[ "def", "add_reference_context_args", "(", "parser", ")", ":", "reference_context_group", "=", "parser", ".", "add_argument_group", "(", "\"Reference Transcripts\"", ")", "parser", ".", "add_argument", "(", "\"--context-size\"", ",", "default", "=", "CDNA_CONTEXT_SIZE", ",", "type", "=", "int", ")", "return", "reference_context_group" ]
Extends an ArgumentParser instance with the following commandline arguments: --context-size
[ "Extends", "an", "ArgumentParser", "instance", "with", "the", "following", "commandline", "arguments", ":", "--", "context", "-", "size" ]
python
train
33.545455
gaqzi/py-gocd
gocd/api/pipeline.py
https://github.com/gaqzi/py-gocd/blob/6fe5b62dea51e665c11a343aba5fc98e130c5c63/gocd/api/pipeline.py#L99-L120
def instance(self, counter=None): """Returns all the information regarding a specific pipeline run See the `Go pipeline instance documentation`__ for examples. .. __: http://api.go.cd/current/#get-pipeline-instance Args: counter (int): The pipeline instance to fetch. If falsey returns the latest pipeline instance from :meth:`history`. Returns: Response: :class:`gocd.api.response.Response` object """ if not counter: history = self.history() if not history: return history else: return Response._from_json(history['pipelines'][0]) return self._get('/instance/{counter:d}'.format(counter=counter))
[ "def", "instance", "(", "self", ",", "counter", "=", "None", ")", ":", "if", "not", "counter", ":", "history", "=", "self", ".", "history", "(", ")", "if", "not", "history", ":", "return", "history", "else", ":", "return", "Response", ".", "_from_json", "(", "history", "[", "'pipelines'", "]", "[", "0", "]", ")", "return", "self", ".", "_get", "(", "'/instance/{counter:d}'", ".", "format", "(", "counter", "=", "counter", ")", ")" ]
Returns all the information regarding a specific pipeline run See the `Go pipeline instance documentation`__ for examples. .. __: http://api.go.cd/current/#get-pipeline-instance Args: counter (int): The pipeline instance to fetch. If falsey returns the latest pipeline instance from :meth:`history`. Returns: Response: :class:`gocd.api.response.Response` object
[ "Returns", "all", "the", "information", "regarding", "a", "specific", "pipeline", "run" ]
python
valid
33.909091
rdireen/spherepy
spherepy/pysphi.py
https://github.com/rdireen/spherepy/blob/241521401d4d76851d4a1a564a365cfab8e98496/spherepy/pysphi.py#L251-L274
def sc_to_fc(spvec, nmax, mmax, nrows, ncols): """assume Ncols is even""" fdata = np.zeros([int(nrows), ncols], dtype=np.complex128) for k in xrange(0, int(ncols / 2)): if k < mmax: kk = k ind = mindx(kk, nmax, mmax) vec = spvec[ind:ind + nmax - np.abs(kk) + 1] fdata[:, kk] = fcvec_m_sc(vec, kk, nmax, nrows) kk = -(k + 1) ind = mindx(kk, nmax, mmax) vec = spvec[ind:ind + nmax - np.abs(kk) + 1] fdata[:, kk] = fcvec_m_sc(vec, kk, nmax, nrows) if k == mmax: kk = k ind = mindx(kk, nmax, mmax) vec = spvec[ind:ind + nmax - np.abs(kk) + 1] fdata[:, kk] = fcvec_m_sc(vec, kk, nmax, nrows) return fdata
[ "def", "sc_to_fc", "(", "spvec", ",", "nmax", ",", "mmax", ",", "nrows", ",", "ncols", ")", ":", "fdata", "=", "np", ".", "zeros", "(", "[", "int", "(", "nrows", ")", ",", "ncols", "]", ",", "dtype", "=", "np", ".", "complex128", ")", "for", "k", "in", "xrange", "(", "0", ",", "int", "(", "ncols", "/", "2", ")", ")", ":", "if", "k", "<", "mmax", ":", "kk", "=", "k", "ind", "=", "mindx", "(", "kk", ",", "nmax", ",", "mmax", ")", "vec", "=", "spvec", "[", "ind", ":", "ind", "+", "nmax", "-", "np", ".", "abs", "(", "kk", ")", "+", "1", "]", "fdata", "[", ":", ",", "kk", "]", "=", "fcvec_m_sc", "(", "vec", ",", "kk", ",", "nmax", ",", "nrows", ")", "kk", "=", "-", "(", "k", "+", "1", ")", "ind", "=", "mindx", "(", "kk", ",", "nmax", ",", "mmax", ")", "vec", "=", "spvec", "[", "ind", ":", "ind", "+", "nmax", "-", "np", ".", "abs", "(", "kk", ")", "+", "1", "]", "fdata", "[", ":", ",", "kk", "]", "=", "fcvec_m_sc", "(", "vec", ",", "kk", ",", "nmax", ",", "nrows", ")", "if", "k", "==", "mmax", ":", "kk", "=", "k", "ind", "=", "mindx", "(", "kk", ",", "nmax", ",", "mmax", ")", "vec", "=", "spvec", "[", "ind", ":", "ind", "+", "nmax", "-", "np", ".", "abs", "(", "kk", ")", "+", "1", "]", "fdata", "[", ":", ",", "kk", "]", "=", "fcvec_m_sc", "(", "vec", ",", "kk", ",", "nmax", ",", "nrows", ")", "return", "fdata" ]
assume Ncols is even
[ "assume", "Ncols", "is", "even" ]
python
train
32.5
gwastro/pycbc-glue
pycbc_glue/ligolw/utils/print_tables.py
https://github.com/gwastro/pycbc-glue/blob/a3e906bae59fbfd707c3ff82e5d008d939ec5e24/pycbc_glue/ligolw/utils/print_tables.py#L131-L136
def format_header_cell(val): """ Formats given header column. This involves changing '_Px_' to '(', '_xP_' to ')' and all other '_' to spaces. """ return re.sub('_', ' ', re.sub(r'(_Px_)', '(', re.sub(r'(_xP_)', ')', str(val) )))
[ "def", "format_header_cell", "(", "val", ")", ":", "return", "re", ".", "sub", "(", "'_'", ",", "' '", ",", "re", ".", "sub", "(", "r'(_Px_)'", ",", "'('", ",", "re", ".", "sub", "(", "r'(_xP_)'", ",", "')'", ",", "str", "(", "val", ")", ")", ")", ")" ]
Formats given header column. This involves changing '_Px_' to '(', '_xP_' to ')' and all other '_' to spaces.
[ "Formats", "given", "header", "column", ".", "This", "involves", "changing", "_Px_", "to", "(", "_xP_", "to", ")", "and", "all", "other", "_", "to", "spaces", "." ]
python
train
40.666667
spotify/luigi
luigi/interface.py
https://github.com/spotify/luigi/blob/c5eca1c3c3ee2a7eb612486192a0da146710a1e9/luigi/interface.py#L186-L195
def run(*args, **kwargs): """ Please dont use. Instead use `luigi` binary. Run from cmdline using argparse. :param use_dynamic_argparse: Deprecated and ignored """ luigi_run_result = _run(*args, **kwargs) return luigi_run_result if kwargs.get('detailed_summary') else luigi_run_result.scheduling_succeeded
[ "def", "run", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "luigi_run_result", "=", "_run", "(", "*", "args", ",", "*", "*", "kwargs", ")", "return", "luigi_run_result", "if", "kwargs", ".", "get", "(", "'detailed_summary'", ")", "else", "luigi_run_result", ".", "scheduling_succeeded" ]
Please dont use. Instead use `luigi` binary. Run from cmdline using argparse. :param use_dynamic_argparse: Deprecated and ignored
[ "Please", "dont", "use", ".", "Instead", "use", "luigi", "binary", "." ]
python
train
32.6
Opentrons/opentrons
api/src/opentrons/legacy_api/containers/placeable.py
https://github.com/Opentrons/opentrons/blob/a7c15cc2636ecb64ab56c7edc1d8a57163aaeadf/api/src/opentrons/legacy_api/containers/placeable.py#L440-L458
def from_center(self, x=None, y=None, z=None, r=None, theta=None, h=None, reference=None): """ Accepts a set of (:x:, :y:, :z:) ratios for Cartesian or (:r:, :theta:, :h:) rations/angle for Polar and returns :Vector: using :reference: as origin """ coords_to_endpoint = None if all([isinstance(i, numbers.Number) for i in (x, y, z)]): coords_to_endpoint = self.from_cartesian(x, y, z) if all([isinstance(i, numbers.Number) for i in (r, theta, h)]): coords_to_endpoint = self.from_polar(r, theta, h) coords_to_reference = Vector(0, 0, 0) if reference: coords_to_reference = self.coordinates(reference) return coords_to_reference + coords_to_endpoint
[ "def", "from_center", "(", "self", ",", "x", "=", "None", ",", "y", "=", "None", ",", "z", "=", "None", ",", "r", "=", "None", ",", "theta", "=", "None", ",", "h", "=", "None", ",", "reference", "=", "None", ")", ":", "coords_to_endpoint", "=", "None", "if", "all", "(", "[", "isinstance", "(", "i", ",", "numbers", ".", "Number", ")", "for", "i", "in", "(", "x", ",", "y", ",", "z", ")", "]", ")", ":", "coords_to_endpoint", "=", "self", ".", "from_cartesian", "(", "x", ",", "y", ",", "z", ")", "if", "all", "(", "[", "isinstance", "(", "i", ",", "numbers", ".", "Number", ")", "for", "i", "in", "(", "r", ",", "theta", ",", "h", ")", "]", ")", ":", "coords_to_endpoint", "=", "self", ".", "from_polar", "(", "r", ",", "theta", ",", "h", ")", "coords_to_reference", "=", "Vector", "(", "0", ",", "0", ",", "0", ")", "if", "reference", ":", "coords_to_reference", "=", "self", ".", "coordinates", "(", "reference", ")", "return", "coords_to_reference", "+", "coords_to_endpoint" ]
Accepts a set of (:x:, :y:, :z:) ratios for Cartesian or (:r:, :theta:, :h:) rations/angle for Polar and returns :Vector: using :reference: as origin
[ "Accepts", "a", "set", "of", "(", ":", "x", ":", ":", "y", ":", ":", "z", ":", ")", "ratios", "for", "Cartesian", "or", "(", ":", "r", ":", ":", "theta", ":", ":", "h", ":", ")", "rations", "/", "angle", "for", "Polar", "and", "returns", ":", "Vector", ":", "using", ":", "reference", ":", "as", "origin" ]
python
train
40.894737
sanskrit-coders/indic_transliteration
indic_transliteration_unmaintained/little/transliterator_tam.py
https://github.com/sanskrit-coders/indic_transliteration/blob/b7c5166a275c15a612fbb96fd3d765bc9004b299/indic_transliteration_unmaintained/little/transliterator_tam.py#L598-L618
def _equivalent(self, char, prev, next, implicitA): """ Transliterate a Latin character equivalent to Devanagari. Add VIRAMA for ligatures. Convert standalone to dependent vowels. """ result = [] if char.isVowel == False: result.append(char.chr) if char.isConsonant \ and ((next is not None and next.isConsonant) \ or next is None): result.append(DevanagariCharacter._VIRAMA) else: if prev is None or prev.isConsonant == False: result.append(char.chr) else: if char._dependentVowel is not None: result.append(char._dependentVowel) return result
[ "def", "_equivalent", "(", "self", ",", "char", ",", "prev", ",", "next", ",", "implicitA", ")", ":", "result", "=", "[", "]", "if", "char", ".", "isVowel", "==", "False", ":", "result", ".", "append", "(", "char", ".", "chr", ")", "if", "char", ".", "isConsonant", "and", "(", "(", "next", "is", "not", "None", "and", "next", ".", "isConsonant", ")", "or", "next", "is", "None", ")", ":", "result", ".", "append", "(", "DevanagariCharacter", ".", "_VIRAMA", ")", "else", ":", "if", "prev", "is", "None", "or", "prev", ".", "isConsonant", "==", "False", ":", "result", ".", "append", "(", "char", ".", "chr", ")", "else", ":", "if", "char", ".", "_dependentVowel", "is", "not", "None", ":", "result", ".", "append", "(", "char", ".", "_dependentVowel", ")", "return", "result" ]
Transliterate a Latin character equivalent to Devanagari. Add VIRAMA for ligatures. Convert standalone to dependent vowels.
[ "Transliterate", "a", "Latin", "character", "equivalent", "to", "Devanagari", ".", "Add", "VIRAMA", "for", "ligatures", ".", "Convert", "standalone", "to", "dependent", "vowels", "." ]
python
valid
35.571429
arne-cl/discoursegraphs
src/discoursegraphs/readwrite/mmax2.py
https://github.com/arne-cl/discoursegraphs/blob/842f0068a3190be2c75905754521b176b25a54fb/src/discoursegraphs/readwrite/mmax2.py#L270-L338
def add_annotation_layer(self, annotation_file, layer_name): """ adds all markables from the given annotation layer to the discourse graph. """ assert os.path.isfile(annotation_file), \ "Annotation file doesn't exist: {}".format(annotation_file) tree = etree.parse(annotation_file) root = tree.getroot() default_layers = {self.ns, self.ns+':markable', self.ns+':'+layer_name} # avoids eml.org namespace handling for markable in root.iterchildren(): markable_node_id = markable.attrib['id'] markable_attribs = add_prefix(markable.attrib, self.ns+':') self.add_node(markable_node_id, layers=default_layers, attr_dict=markable_attribs, label=markable_node_id+':'+layer_name) for target_node_id in spanstring2tokens(self, markable.attrib['span']): # manually add to_node if it's not in the graph, yet # cf. issue #39 if target_node_id not in self: self.add_node(target_node_id, # adding 'mmax:layer_name' here could be # misleading (e.g. each token would be part # of the 'mmax:sentence' layer layers={self.ns, self.ns+':markable'}, label=target_node_id) self.add_edge(markable_node_id, target_node_id, layers=default_layers, edge_type=EdgeTypes.spanning_relation, label=self.ns+':'+layer_name) # this is a workaround for Chiarcos-style MMAX files if has_antecedent(markable): antecedent_pointer = markable.attrib['anaphor_antecedent'] # mmax2 supports weird double antecedents, # e.g. "markable_1000131;markable_1000132", cf. Issue #40 # # handling these double antecendents increases the number of # chains, cf. commit edc28abdc4fd36065e8bbf5900eeb4d1326db153 for antecedent in antecedent_pointer.split(';'): ante_split = antecedent.split(":") if len(ante_split) == 2: # mark group:markable_n or secmark:markable_n as such edge_label = '{}:antecedent'.format(ante_split[0]) else: edge_label = ':antecedent' # handles both 'markable_n' and 'layer:markable_n' antecedent_node_id = ante_split[-1] if len(ante_split) == 2: antecedent_layer = ante_split[0] default_layers.add('{0}:{1}'.format(self.ns, antecedent_layer)) # manually add antecedent node if it's not yet in the graph # cf. issue #39 if antecedent_node_id not in self: self.add_node(antecedent_node_id, layers=default_layers) self.add_edge(markable_node_id, antecedent_node_id, layers=default_layers, edge_type=EdgeTypes.pointing_relation, label=self.ns+edge_label)
[ "def", "add_annotation_layer", "(", "self", ",", "annotation_file", ",", "layer_name", ")", ":", "assert", "os", ".", "path", ".", "isfile", "(", "annotation_file", ")", ",", "\"Annotation file doesn't exist: {}\"", ".", "format", "(", "annotation_file", ")", "tree", "=", "etree", ".", "parse", "(", "annotation_file", ")", "root", "=", "tree", ".", "getroot", "(", ")", "default_layers", "=", "{", "self", ".", "ns", ",", "self", ".", "ns", "+", "':markable'", ",", "self", ".", "ns", "+", "':'", "+", "layer_name", "}", "# avoids eml.org namespace handling", "for", "markable", "in", "root", ".", "iterchildren", "(", ")", ":", "markable_node_id", "=", "markable", ".", "attrib", "[", "'id'", "]", "markable_attribs", "=", "add_prefix", "(", "markable", ".", "attrib", ",", "self", ".", "ns", "+", "':'", ")", "self", ".", "add_node", "(", "markable_node_id", ",", "layers", "=", "default_layers", ",", "attr_dict", "=", "markable_attribs", ",", "label", "=", "markable_node_id", "+", "':'", "+", "layer_name", ")", "for", "target_node_id", "in", "spanstring2tokens", "(", "self", ",", "markable", ".", "attrib", "[", "'span'", "]", ")", ":", "# manually add to_node if it's not in the graph, yet", "# cf. issue #39", "if", "target_node_id", "not", "in", "self", ":", "self", ".", "add_node", "(", "target_node_id", ",", "# adding 'mmax:layer_name' here could be", "# misleading (e.g. each token would be part", "# of the 'mmax:sentence' layer", "layers", "=", "{", "self", ".", "ns", ",", "self", ".", "ns", "+", "':markable'", "}", ",", "label", "=", "target_node_id", ")", "self", ".", "add_edge", "(", "markable_node_id", ",", "target_node_id", ",", "layers", "=", "default_layers", ",", "edge_type", "=", "EdgeTypes", ".", "spanning_relation", ",", "label", "=", "self", ".", "ns", "+", "':'", "+", "layer_name", ")", "# this is a workaround for Chiarcos-style MMAX files", "if", "has_antecedent", "(", "markable", ")", ":", "antecedent_pointer", "=", "markable", ".", "attrib", "[", "'anaphor_antecedent'", "]", "# mmax2 supports weird double antecedents,", "# e.g. \"markable_1000131;markable_1000132\", cf. Issue #40", "#", "# handling these double antecendents increases the number of", "# chains, cf. commit edc28abdc4fd36065e8bbf5900eeb4d1326db153", "for", "antecedent", "in", "antecedent_pointer", ".", "split", "(", "';'", ")", ":", "ante_split", "=", "antecedent", ".", "split", "(", "\":\"", ")", "if", "len", "(", "ante_split", ")", "==", "2", ":", "# mark group:markable_n or secmark:markable_n as such", "edge_label", "=", "'{}:antecedent'", ".", "format", "(", "ante_split", "[", "0", "]", ")", "else", ":", "edge_label", "=", "':antecedent'", "# handles both 'markable_n' and 'layer:markable_n'", "antecedent_node_id", "=", "ante_split", "[", "-", "1", "]", "if", "len", "(", "ante_split", ")", "==", "2", ":", "antecedent_layer", "=", "ante_split", "[", "0", "]", "default_layers", ".", "add", "(", "'{0}:{1}'", ".", "format", "(", "self", ".", "ns", ",", "antecedent_layer", ")", ")", "# manually add antecedent node if it's not yet in the graph", "# cf. issue #39", "if", "antecedent_node_id", "not", "in", "self", ":", "self", ".", "add_node", "(", "antecedent_node_id", ",", "layers", "=", "default_layers", ")", "self", ".", "add_edge", "(", "markable_node_id", ",", "antecedent_node_id", ",", "layers", "=", "default_layers", ",", "edge_type", "=", "EdgeTypes", ".", "pointing_relation", ",", "label", "=", "self", ".", "ns", "+", "edge_label", ")" ]
adds all markables from the given annotation layer to the discourse graph.
[ "adds", "all", "markables", "from", "the", "given", "annotation", "layer", "to", "the", "discourse", "graph", "." ]
python
train
49.898551
TomasTomecek/sen
sen/tui/ui.py
https://github.com/TomasTomecek/sen/blob/239b4868125814e8bf5527708119fc08b35f6cc0/sen/tui/ui.py#L106-L116
def display_buffer(self, buffer, redraw=True): """ display provided buffer :param buffer: Buffer :return: """ logger.debug("display buffer %r", buffer) self.buffer_movement_history.append(buffer) self.current_buffer = buffer self._set_main_widget(buffer.widget, redraw=redraw)
[ "def", "display_buffer", "(", "self", ",", "buffer", ",", "redraw", "=", "True", ")", ":", "logger", ".", "debug", "(", "\"display buffer %r\"", ",", "buffer", ")", "self", ".", "buffer_movement_history", ".", "append", "(", "buffer", ")", "self", ".", "current_buffer", "=", "buffer", "self", ".", "_set_main_widget", "(", "buffer", ".", "widget", ",", "redraw", "=", "redraw", ")" ]
display provided buffer :param buffer: Buffer :return:
[ "display", "provided", "buffer" ]
python
train
30.818182
kensho-technologies/graphql-compiler
graphql_compiler/compiler/blocks.py
https://github.com/kensho-technologies/graphql-compiler/blob/f6079c6d10f64932f6b3af309b79bcea2123ca8f/graphql_compiler/compiler/blocks.py#L180-L186
def visit_and_update_expressions(self, visitor_fn): """Create an updated version (if needed) of the Filter via the visitor pattern.""" new_predicate = self.predicate.visit_and_update(visitor_fn) if new_predicate is not self.predicate: return Filter(new_predicate) else: return self
[ "def", "visit_and_update_expressions", "(", "self", ",", "visitor_fn", ")", ":", "new_predicate", "=", "self", ".", "predicate", ".", "visit_and_update", "(", "visitor_fn", ")", "if", "new_predicate", "is", "not", "self", ".", "predicate", ":", "return", "Filter", "(", "new_predicate", ")", "else", ":", "return", "self" ]
Create an updated version (if needed) of the Filter via the visitor pattern.
[ "Create", "an", "updated", "version", "(", "if", "needed", ")", "of", "the", "Filter", "via", "the", "visitor", "pattern", "." ]
python
train
47.285714
edx/edx-enterprise
enterprise/decorators.py
https://github.com/edx/edx-enterprise/blob/aea91379ab0a87cd3bc798961fce28b60ee49a80/enterprise/decorators.py#L93-L158
def enterprise_login_required(view): """ View decorator for allowing authenticated user with valid enterprise UUID. This decorator requires enterprise identifier as a parameter `enterprise_uuid`. This decorator will throw 404 if no kwarg `enterprise_uuid` is provided to the decorated view . If there is no enterprise in database against the kwarg `enterprise_uuid` or if the user is not authenticated then it will redirect the user to the enterprise-linked SSO login page. Usage:: @enterprise_login_required() def my_view(request, enterprise_uuid): # Some functionality ... OR class MyView(View): ... @method_decorator(enterprise_login_required) def get(self, request, enterprise_uuid): # Some functionality ... """ @wraps(view) def wrapper(request, *args, **kwargs): """ Wrap the decorator. """ if 'enterprise_uuid' not in kwargs: raise Http404 enterprise_uuid = kwargs['enterprise_uuid'] enterprise_customer = get_enterprise_customer_or_404(enterprise_uuid) # Now verify if the user is logged in. If user is not logged in then # send the user to the login screen to sign in with an # Enterprise-linked IdP and the pipeline will get them back here. if not request.user.is_authenticated: parsed_current_url = urlparse(request.get_full_path()) parsed_query_string = parse_qs(parsed_current_url.query) parsed_query_string.update({ 'tpa_hint': enterprise_customer.identity_provider, FRESH_LOGIN_PARAMETER: 'yes' }) next_url = '{current_path}?{query_string}'.format( current_path=quote(parsed_current_url.path), query_string=urlencode(parsed_query_string, doseq=True) ) return redirect( '{login_url}?{params}'.format( login_url='/login', params=urlencode( {'next': next_url} ) ) ) # Otherwise, they can proceed to the original view. return view(request, *args, **kwargs) return wrapper
[ "def", "enterprise_login_required", "(", "view", ")", ":", "@", "wraps", "(", "view", ")", "def", "wrapper", "(", "request", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "\"\"\"\n Wrap the decorator.\n \"\"\"", "if", "'enterprise_uuid'", "not", "in", "kwargs", ":", "raise", "Http404", "enterprise_uuid", "=", "kwargs", "[", "'enterprise_uuid'", "]", "enterprise_customer", "=", "get_enterprise_customer_or_404", "(", "enterprise_uuid", ")", "# Now verify if the user is logged in. If user is not logged in then", "# send the user to the login screen to sign in with an", "# Enterprise-linked IdP and the pipeline will get them back here.", "if", "not", "request", ".", "user", ".", "is_authenticated", ":", "parsed_current_url", "=", "urlparse", "(", "request", ".", "get_full_path", "(", ")", ")", "parsed_query_string", "=", "parse_qs", "(", "parsed_current_url", ".", "query", ")", "parsed_query_string", ".", "update", "(", "{", "'tpa_hint'", ":", "enterprise_customer", ".", "identity_provider", ",", "FRESH_LOGIN_PARAMETER", ":", "'yes'", "}", ")", "next_url", "=", "'{current_path}?{query_string}'", ".", "format", "(", "current_path", "=", "quote", "(", "parsed_current_url", ".", "path", ")", ",", "query_string", "=", "urlencode", "(", "parsed_query_string", ",", "doseq", "=", "True", ")", ")", "return", "redirect", "(", "'{login_url}?{params}'", ".", "format", "(", "login_url", "=", "'/login'", ",", "params", "=", "urlencode", "(", "{", "'next'", ":", "next_url", "}", ")", ")", ")", "# Otherwise, they can proceed to the original view.", "return", "view", "(", "request", ",", "*", "args", ",", "*", "*", "kwargs", ")", "return", "wrapper" ]
View decorator for allowing authenticated user with valid enterprise UUID. This decorator requires enterprise identifier as a parameter `enterprise_uuid`. This decorator will throw 404 if no kwarg `enterprise_uuid` is provided to the decorated view . If there is no enterprise in database against the kwarg `enterprise_uuid` or if the user is not authenticated then it will redirect the user to the enterprise-linked SSO login page. Usage:: @enterprise_login_required() def my_view(request, enterprise_uuid): # Some functionality ... OR class MyView(View): ... @method_decorator(enterprise_login_required) def get(self, request, enterprise_uuid): # Some functionality ...
[ "View", "decorator", "for", "allowing", "authenticated", "user", "with", "valid", "enterprise", "UUID", "." ]
python
valid
34.30303
bxlab/bx-python
lib/bx_extras/pyparsing.py
https://github.com/bxlab/bx-python/blob/09cb725284803df90a468d910f2274628d8647de/lib/bx_extras/pyparsing.py#L3443-L3481
def nestedExpr(opener="(", closer=")", content=None, ignoreExpr=quotedString): """Helper method for defining nested lists enclosed in opening and closing delimiters ("(" and ")" are the default). Parameters: - opener - opening character for a nested list (default="("); can also be a pyparsing expression - closer - closing character for a nested list (default=")"); can also be a pyparsing expression - content - expression for items within the nested lists (default=None) - ignoreExpr - expression for ignoring opening and closing delimiters (default=quotedString) If an expression is not provided for the content argument, the nested expression will capture all whitespace-delimited content between delimiters as a list of separate values. Use the ignoreExpr argument to define expressions that may contain opening or closing characters that should not be treated as opening or closing characters for nesting, such as quotedString or a comment expression. Specify multiple expressions using an Or or MatchFirst. The default is quotedString, but if no expressions are to be ignored, then pass None for this argument. """ if opener == closer: raise ValueError("opening and closing strings cannot be the same") if content is None: if isinstance(opener,basestring) and isinstance(closer,basestring): if ignoreExpr is not None: content = (Combine(OneOrMore(~ignoreExpr + CharsNotIn(opener+closer+ParserElement.DEFAULT_WHITE_CHARS,exact=1)) ).setParseAction(lambda t:t[0].strip())) else: content = (empty+CharsNotIn(opener+closer+ParserElement.DEFAULT_WHITE_CHARS).setParseAction(lambda t:t[0].strip())) else: raise ValueError("opening and closing arguments must be strings if no content expression is given") ret = Forward() if ignoreExpr is not None: ret << Group( Suppress(opener) + ZeroOrMore( ignoreExpr | ret | content ) + Suppress(closer) ) else: ret << Group( Suppress(opener) + ZeroOrMore( ret | content ) + Suppress(closer) ) return ret
[ "def", "nestedExpr", "(", "opener", "=", "\"(\"", ",", "closer", "=", "\")\"", ",", "content", "=", "None", ",", "ignoreExpr", "=", "quotedString", ")", ":", "if", "opener", "==", "closer", ":", "raise", "ValueError", "(", "\"opening and closing strings cannot be the same\"", ")", "if", "content", "is", "None", ":", "if", "isinstance", "(", "opener", ",", "basestring", ")", "and", "isinstance", "(", "closer", ",", "basestring", ")", ":", "if", "ignoreExpr", "is", "not", "None", ":", "content", "=", "(", "Combine", "(", "OneOrMore", "(", "~", "ignoreExpr", "+", "CharsNotIn", "(", "opener", "+", "closer", "+", "ParserElement", ".", "DEFAULT_WHITE_CHARS", ",", "exact", "=", "1", ")", ")", ")", ".", "setParseAction", "(", "lambda", "t", ":", "t", "[", "0", "]", ".", "strip", "(", ")", ")", ")", "else", ":", "content", "=", "(", "empty", "+", "CharsNotIn", "(", "opener", "+", "closer", "+", "ParserElement", ".", "DEFAULT_WHITE_CHARS", ")", ".", "setParseAction", "(", "lambda", "t", ":", "t", "[", "0", "]", ".", "strip", "(", ")", ")", ")", "else", ":", "raise", "ValueError", "(", "\"opening and closing arguments must be strings if no content expression is given\"", ")", "ret", "=", "Forward", "(", ")", "if", "ignoreExpr", "is", "not", "None", ":", "ret", "<<", "Group", "(", "Suppress", "(", "opener", ")", "+", "ZeroOrMore", "(", "ignoreExpr", "|", "ret", "|", "content", ")", "+", "Suppress", "(", "closer", ")", ")", "else", ":", "ret", "<<", "Group", "(", "Suppress", "(", "opener", ")", "+", "ZeroOrMore", "(", "ret", "|", "content", ")", "+", "Suppress", "(", "closer", ")", ")", "return", "ret" ]
Helper method for defining nested lists enclosed in opening and closing delimiters ("(" and ")" are the default). Parameters: - opener - opening character for a nested list (default="("); can also be a pyparsing expression - closer - closing character for a nested list (default=")"); can also be a pyparsing expression - content - expression for items within the nested lists (default=None) - ignoreExpr - expression for ignoring opening and closing delimiters (default=quotedString) If an expression is not provided for the content argument, the nested expression will capture all whitespace-delimited content between delimiters as a list of separate values. Use the ignoreExpr argument to define expressions that may contain opening or closing characters that should not be treated as opening or closing characters for nesting, such as quotedString or a comment expression. Specify multiple expressions using an Or or MatchFirst. The default is quotedString, but if no expressions are to be ignored, then pass None for this argument.
[ "Helper", "method", "for", "defining", "nested", "lists", "enclosed", "in", "opening", "and", "closing", "delimiters", "(", "(", "and", ")", "are", "the", "default", ")", "." ]
python
train
56.897436
Neurita/boyle
boyle/files/names.py
https://github.com/Neurita/boyle/blob/2dae7199849395a209c887d5f30506e1de8a9ad9/boyle/files/names.py#L43-L74
def get_extension(filepath, check_if_exists=False, allowed_exts=ALLOWED_EXTS): """Return the extension of fpath. Parameters ---------- fpath: string File name or path check_if_exists: bool allowed_exts: dict Dictionary of strings, where the key if the last part of a complex ('.' separated) extension and the value is the previous part. For example: for the '.nii.gz' extension I would have a dict as {'.gz': ['.nii',]} Returns ------- str The extension of the file name or path """ if check_if_exists: if not op.exists(filepath): raise IOError('File not found: ' + filepath) rest, ext = op.splitext(filepath) if ext in allowed_exts: alloweds = allowed_exts[ext] _, ext2 = op.splitext(rest) if ext2 in alloweds: ext = ext2 + ext return ext
[ "def", "get_extension", "(", "filepath", ",", "check_if_exists", "=", "False", ",", "allowed_exts", "=", "ALLOWED_EXTS", ")", ":", "if", "check_if_exists", ":", "if", "not", "op", ".", "exists", "(", "filepath", ")", ":", "raise", "IOError", "(", "'File not found: '", "+", "filepath", ")", "rest", ",", "ext", "=", "op", ".", "splitext", "(", "filepath", ")", "if", "ext", "in", "allowed_exts", ":", "alloweds", "=", "allowed_exts", "[", "ext", "]", "_", ",", "ext2", "=", "op", ".", "splitext", "(", "rest", ")", "if", "ext2", "in", "alloweds", ":", "ext", "=", "ext2", "+", "ext", "return", "ext" ]
Return the extension of fpath. Parameters ---------- fpath: string File name or path check_if_exists: bool allowed_exts: dict Dictionary of strings, where the key if the last part of a complex ('.' separated) extension and the value is the previous part. For example: for the '.nii.gz' extension I would have a dict as {'.gz': ['.nii',]} Returns ------- str The extension of the file name or path
[ "Return", "the", "extension", "of", "fpath", "." ]
python
valid
26.4375
mozilla/python-zeppelin
zeppelin/converters/markdown.py
https://github.com/mozilla/python-zeppelin/blob/76ce6b7608ef6cf7b807bd5d850a58ea6a59ef07/zeppelin/converters/markdown.py#L261-L268
def process_results(self, paragraph): """Routes Zeppelin output types to corresponding handlers.""" if 'editorMode' in paragraph['config']: mode = paragraph['config']['editorMode'].split('/')[-1] if 'results' in paragraph and paragraph['results']['msg']: msg = paragraph['results']['msg'][0] if mode not in ('text', 'markdown'): self.output_options[msg['type']](msg['data'])
[ "def", "process_results", "(", "self", ",", "paragraph", ")", ":", "if", "'editorMode'", "in", "paragraph", "[", "'config'", "]", ":", "mode", "=", "paragraph", "[", "'config'", "]", "[", "'editorMode'", "]", ".", "split", "(", "'/'", ")", "[", "-", "1", "]", "if", "'results'", "in", "paragraph", "and", "paragraph", "[", "'results'", "]", "[", "'msg'", "]", ":", "msg", "=", "paragraph", "[", "'results'", "]", "[", "'msg'", "]", "[", "0", "]", "if", "mode", "not", "in", "(", "'text'", ",", "'markdown'", ")", ":", "self", ".", "output_options", "[", "msg", "[", "'type'", "]", "]", "(", "msg", "[", "'data'", "]", ")" ]
Routes Zeppelin output types to corresponding handlers.
[ "Routes", "Zeppelin", "output", "types", "to", "corresponding", "handlers", "." ]
python
train
57.375
mikicz/arca
arca/backend/docker.py
https://github.com/mikicz/arca/blob/e67fdc00be473ecf8ec16d024e1a3f2c47ca882c/arca/backend/docker.py#L603-L618
def container_running(self, container_name): """ Finds out if a container with name ``container_name`` is running. :return: :class:`Container <docker.models.containers.Container>` if it's running, ``None`` otherwise. :rtype: Optional[docker.models.container.Container] """ filters = { "name": container_name, "status": "running", } for container in self.client.containers.list(filters=filters): if container_name == container.name: return container return None
[ "def", "container_running", "(", "self", ",", "container_name", ")", ":", "filters", "=", "{", "\"name\"", ":", "container_name", ",", "\"status\"", ":", "\"running\"", ",", "}", "for", "container", "in", "self", ".", "client", ".", "containers", ".", "list", "(", "filters", "=", "filters", ")", ":", "if", "container_name", "==", "container", ".", "name", ":", "return", "container", "return", "None" ]
Finds out if a container with name ``container_name`` is running. :return: :class:`Container <docker.models.containers.Container>` if it's running, ``None`` otherwise. :rtype: Optional[docker.models.container.Container]
[ "Finds", "out", "if", "a", "container", "with", "name", "container_name", "is", "running", "." ]
python
train
35.6875
lucapinello/Haystack
haystack/external.py
https://github.com/lucapinello/Haystack/blob/cc080d741f36cd77b07c0b59d08ea6a4cf0ef2f7/haystack/external.py#L391-L395
def giflogo(self,id,title=None,scale=0.8,info_str=''): """ m.giflogo(id,title=None,scale=0.8) -- (Requires seqlogo package) Make a gif sequence logo """ return giflogo(self,id,title,scale)
[ "def", "giflogo", "(", "self", ",", "id", ",", "title", "=", "None", ",", "scale", "=", "0.8", ",", "info_str", "=", "''", ")", ":", "return", "giflogo", "(", "self", ",", "id", ",", "title", ",", "scale", ")" ]
m.giflogo(id,title=None,scale=0.8) -- (Requires seqlogo package) Make a gif sequence logo
[ "m", ".", "giflogo", "(", "id", "title", "=", "None", "scale", "=", "0", ".", "8", ")", "--", "(", "Requires", "seqlogo", "package", ")", "Make", "a", "gif", "sequence", "logo" ]
python
train
43.2
cebel/pyctd
src/pyctd/manager/query.py
https://github.com/cebel/pyctd/blob/38ba02adaddb60cef031d3b75516773fe8a046b5/src/pyctd/manager/query.py#L124-L181
def get_disease(self, disease_name=None, disease_id=None, definition=None, parent_ids=None, tree_numbers=None, parent_tree_numbers=None, slim_mapping=None, synonym=None, alt_disease_id=None, limit=None, as_df=False): """ Get diseases :param bool as_df: if set to True result returns as `pandas.DataFrame` :param int limit: maximum number of results :param str disease_name: disease name :param str disease_id: disease identifier :param str definition: definition of disease :param str parent_ids: parent identifiers, delimiter | :param str tree_numbers: tree numbers, delimiter | :param str parent_tree_numbers: parent tree numbers, delimiter :param str slim_mapping: term derived from the MeSH tree structure for the “Diseases” [C] branch, \ that classifies MEDIC diseases into high-level categories :param str synonym: disease synonyms :param str alt_disease_id: alternative disease identifiers :return: list of :class:`pyctd.manager.models.Disease` object .. seealso:: :class:`pyctd.manager.models.Disease` .. todo:: normalize parent_ids, tree_numbers and parent_tree_numbers in :class:`pyctd.manager.models.Disease` """ q = self.session.query(models.Disease) if disease_name: q = q.filter(models.Disease.disease_name.like(disease_name)) if disease_id: q = q.filter(models.Disease.disease_id == disease_id) if definition: q = q.filter(models.Disease.definition.like(definition)) if parent_ids: q = q.filter(models.Disease.parent_ids.like(parent_ids)) if tree_numbers: q = q.filter(models.Disease.tree_numbers.like(tree_numbers)) if parent_tree_numbers: q = q.filter(models.Disease.parent_tree_numbers.like(parent_tree_numbers)) if slim_mapping: q = q.join(models.DiseaseSlimmapping).filter(models.DiseaseSlimmapping.slim_mapping.like(slim_mapping)) if synonym: q = q.join(models.DiseaseSynonym).filter(models.DiseaseSynonym.synonym.like(synonym)) if alt_disease_id: q = q.join(models.DiseaseAltdiseaseid).filter(models.DiseaseAltdiseaseid.alt_disease_id == alt_disease_id) return self._limit_and_df(q, limit, as_df)
[ "def", "get_disease", "(", "self", ",", "disease_name", "=", "None", ",", "disease_id", "=", "None", ",", "definition", "=", "None", ",", "parent_ids", "=", "None", ",", "tree_numbers", "=", "None", ",", "parent_tree_numbers", "=", "None", ",", "slim_mapping", "=", "None", ",", "synonym", "=", "None", ",", "alt_disease_id", "=", "None", ",", "limit", "=", "None", ",", "as_df", "=", "False", ")", ":", "q", "=", "self", ".", "session", ".", "query", "(", "models", ".", "Disease", ")", "if", "disease_name", ":", "q", "=", "q", ".", "filter", "(", "models", ".", "Disease", ".", "disease_name", ".", "like", "(", "disease_name", ")", ")", "if", "disease_id", ":", "q", "=", "q", ".", "filter", "(", "models", ".", "Disease", ".", "disease_id", "==", "disease_id", ")", "if", "definition", ":", "q", "=", "q", ".", "filter", "(", "models", ".", "Disease", ".", "definition", ".", "like", "(", "definition", ")", ")", "if", "parent_ids", ":", "q", "=", "q", ".", "filter", "(", "models", ".", "Disease", ".", "parent_ids", ".", "like", "(", "parent_ids", ")", ")", "if", "tree_numbers", ":", "q", "=", "q", ".", "filter", "(", "models", ".", "Disease", ".", "tree_numbers", ".", "like", "(", "tree_numbers", ")", ")", "if", "parent_tree_numbers", ":", "q", "=", "q", ".", "filter", "(", "models", ".", "Disease", ".", "parent_tree_numbers", ".", "like", "(", "parent_tree_numbers", ")", ")", "if", "slim_mapping", ":", "q", "=", "q", ".", "join", "(", "models", ".", "DiseaseSlimmapping", ")", ".", "filter", "(", "models", ".", "DiseaseSlimmapping", ".", "slim_mapping", ".", "like", "(", "slim_mapping", ")", ")", "if", "synonym", ":", "q", "=", "q", ".", "join", "(", "models", ".", "DiseaseSynonym", ")", ".", "filter", "(", "models", ".", "DiseaseSynonym", ".", "synonym", ".", "like", "(", "synonym", ")", ")", "if", "alt_disease_id", ":", "q", "=", "q", ".", "join", "(", "models", ".", "DiseaseAltdiseaseid", ")", ".", "filter", "(", "models", ".", "DiseaseAltdiseaseid", ".", "alt_disease_id", "==", "alt_disease_id", ")", "return", "self", ".", "_limit_and_df", "(", "q", ",", "limit", ",", "as_df", ")" ]
Get diseases :param bool as_df: if set to True result returns as `pandas.DataFrame` :param int limit: maximum number of results :param str disease_name: disease name :param str disease_id: disease identifier :param str definition: definition of disease :param str parent_ids: parent identifiers, delimiter | :param str tree_numbers: tree numbers, delimiter | :param str parent_tree_numbers: parent tree numbers, delimiter :param str slim_mapping: term derived from the MeSH tree structure for the “Diseases” [C] branch, \ that classifies MEDIC diseases into high-level categories :param str synonym: disease synonyms :param str alt_disease_id: alternative disease identifiers :return: list of :class:`pyctd.manager.models.Disease` object .. seealso:: :class:`pyctd.manager.models.Disease` .. todo:: normalize parent_ids, tree_numbers and parent_tree_numbers in :class:`pyctd.manager.models.Disease`
[ "Get", "diseases" ]
python
train
41.051724
tensorpack/tensorpack
tensorpack/graph_builder/distributed.py
https://github.com/tensorpack/tensorpack/blob/d7a13cb74c9066bc791d7aafc3b744b60ee79a9f/tensorpack/graph_builder/distributed.py#L226-L255
def _shadow_model_variables(shadow_vars): """ Create shadow vars for model_variables as well, and add to the list of ``shadow_vars``. Returns: list of (shadow_model_var, local_model_var) used for syncing. """ G = tf.get_default_graph() curr_shadow_vars = set([v.name for v in shadow_vars]) model_vars = tf.model_variables() shadow_model_vars = [] for v in model_vars: assert v.name.startswith('tower'), "Found some MODEL_VARIABLES created outside of the tower function!" stripped_op_name, stripped_var_name = get_op_tensor_name(re.sub('^tower[0-9]+/', '', v.name)) if stripped_op_name in curr_shadow_vars: continue try: G.get_tensor_by_name(stripped_var_name) logger.warn("Model Variable {} also appears in other collections.".format(stripped_var_name)) continue except KeyError: pass new_v = tf.get_variable(stripped_op_name, dtype=v.dtype.base_dtype, initializer=v.initial_value, trainable=False) curr_shadow_vars.add(stripped_op_name) # avoid duplicated shadow_model_vars shadow_vars.append(new_v) shadow_model_vars.append((new_v, v)) # only need to sync model_var from one tower return shadow_model_vars
[ "def", "_shadow_model_variables", "(", "shadow_vars", ")", ":", "G", "=", "tf", ".", "get_default_graph", "(", ")", "curr_shadow_vars", "=", "set", "(", "[", "v", ".", "name", "for", "v", "in", "shadow_vars", "]", ")", "model_vars", "=", "tf", ".", "model_variables", "(", ")", "shadow_model_vars", "=", "[", "]", "for", "v", "in", "model_vars", ":", "assert", "v", ".", "name", ".", "startswith", "(", "'tower'", ")", ",", "\"Found some MODEL_VARIABLES created outside of the tower function!\"", "stripped_op_name", ",", "stripped_var_name", "=", "get_op_tensor_name", "(", "re", ".", "sub", "(", "'^tower[0-9]+/'", ",", "''", ",", "v", ".", "name", ")", ")", "if", "stripped_op_name", "in", "curr_shadow_vars", ":", "continue", "try", ":", "G", ".", "get_tensor_by_name", "(", "stripped_var_name", ")", "logger", ".", "warn", "(", "\"Model Variable {} also appears in other collections.\"", ".", "format", "(", "stripped_var_name", ")", ")", "continue", "except", "KeyError", ":", "pass", "new_v", "=", "tf", ".", "get_variable", "(", "stripped_op_name", ",", "dtype", "=", "v", ".", "dtype", ".", "base_dtype", ",", "initializer", "=", "v", ".", "initial_value", ",", "trainable", "=", "False", ")", "curr_shadow_vars", ".", "add", "(", "stripped_op_name", ")", "# avoid duplicated shadow_model_vars", "shadow_vars", ".", "append", "(", "new_v", ")", "shadow_model_vars", ".", "append", "(", "(", "new_v", ",", "v", ")", ")", "# only need to sync model_var from one tower", "return", "shadow_model_vars" ]
Create shadow vars for model_variables as well, and add to the list of ``shadow_vars``. Returns: list of (shadow_model_var, local_model_var) used for syncing.
[ "Create", "shadow", "vars", "for", "model_variables", "as", "well", "and", "add", "to", "the", "list", "of", "shadow_vars", "." ]
python
train
47.8
googlefonts/fontbakery
Lib/fontbakery/profiles/googlefonts.py
https://github.com/googlefonts/fontbakery/blob/b355aea2e619a4477769e060d24c32448aa65399/Lib/fontbakery/profiles/googlefonts.py#L3579-L3611
def com_google_fonts_check_family_tnum_horizontal_metrics(fonts): """All tabular figures must have the same width across the RIBBI-family.""" from fontbakery.constants import RIBBI_STYLE_NAMES from fontTools.ttLib import TTFont RIBBI_ttFonts = [TTFont(f) for f in fonts if style(f) in RIBBI_STYLE_NAMES] tnum_widths = {} for ttFont in RIBBI_ttFonts: glyphs = ttFont.getGlyphSet() tnum_glyphs = [(glyph_id, glyphs[glyph_id]) for glyph_id in glyphs.keys() if glyph_id.endswith(".tnum")] for glyph_id, glyph in tnum_glyphs: if glyph.width not in tnum_widths: tnum_widths[glyph.width] = [glyph_id] else: tnum_widths[glyph.width].append(glyph_id) if len(tnum_widths.keys()) > 1: max_num = 0 most_common_width = None for width, glyphs in tnum_widths.items(): if len(glyphs) > max_num: max_num = len(glyphs) most_common_width = width del tnum_widths[most_common_width] yield FAIL, (f"The most common tabular glyph width is {most_common_width}." " But there are other tabular glyphs with different widths" f" such as the following ones:\n\t{tnum_widths}.") else: yield PASS, "OK"
[ "def", "com_google_fonts_check_family_tnum_horizontal_metrics", "(", "fonts", ")", ":", "from", "fontbakery", ".", "constants", "import", "RIBBI_STYLE_NAMES", "from", "fontTools", ".", "ttLib", "import", "TTFont", "RIBBI_ttFonts", "=", "[", "TTFont", "(", "f", ")", "for", "f", "in", "fonts", "if", "style", "(", "f", ")", "in", "RIBBI_STYLE_NAMES", "]", "tnum_widths", "=", "{", "}", "for", "ttFont", "in", "RIBBI_ttFonts", ":", "glyphs", "=", "ttFont", ".", "getGlyphSet", "(", ")", "tnum_glyphs", "=", "[", "(", "glyph_id", ",", "glyphs", "[", "glyph_id", "]", ")", "for", "glyph_id", "in", "glyphs", ".", "keys", "(", ")", "if", "glyph_id", ".", "endswith", "(", "\".tnum\"", ")", "]", "for", "glyph_id", ",", "glyph", "in", "tnum_glyphs", ":", "if", "glyph", ".", "width", "not", "in", "tnum_widths", ":", "tnum_widths", "[", "glyph", ".", "width", "]", "=", "[", "glyph_id", "]", "else", ":", "tnum_widths", "[", "glyph", ".", "width", "]", ".", "append", "(", "glyph_id", ")", "if", "len", "(", "tnum_widths", ".", "keys", "(", ")", ")", ">", "1", ":", "max_num", "=", "0", "most_common_width", "=", "None", "for", "width", ",", "glyphs", "in", "tnum_widths", ".", "items", "(", ")", ":", "if", "len", "(", "glyphs", ")", ">", "max_num", ":", "max_num", "=", "len", "(", "glyphs", ")", "most_common_width", "=", "width", "del", "tnum_widths", "[", "most_common_width", "]", "yield", "FAIL", ",", "(", "f\"The most common tabular glyph width is {most_common_width}.\"", "\" But there are other tabular glyphs with different widths\"", "f\" such as the following ones:\\n\\t{tnum_widths}.\"", ")", "else", ":", "yield", "PASS", ",", "\"OK\"" ]
All tabular figures must have the same width across the RIBBI-family.
[ "All", "tabular", "figures", "must", "have", "the", "same", "width", "across", "the", "RIBBI", "-", "family", "." ]
python
train
38
brian-rose/climlab
climlab/radiation/transmissivity.py
https://github.com/brian-rose/climlab/blob/eae188a2ae9308229b8cbb8fe0b65f51b50ee1e6/climlab/radiation/transmissivity.py#L121-L140
def flux_up(self, fluxUpBottom, emission=None): '''Compute downwelling radiative flux at interfaces between layers. Inputs: * fluxDownTop: flux down at top * emission: emission from atmospheric levels (N) defaults to zero if not given Returns: * vector of downwelling radiative flux between levels (N+1) element 0 is the flux down to the surface. ''' if emission is None: emission = np.zeros_like(self.absorptivity) E = np.concatenate((emission, np.atleast_1d(fluxUpBottom)), axis=-1) # dot product (matrix multiplication) along last axes return np.squeeze(matrix_multiply(self.Tup, E[..., np.newaxis]))
[ "def", "flux_up", "(", "self", ",", "fluxUpBottom", ",", "emission", "=", "None", ")", ":", "if", "emission", "is", "None", ":", "emission", "=", "np", ".", "zeros_like", "(", "self", ".", "absorptivity", ")", "E", "=", "np", ".", "concatenate", "(", "(", "emission", ",", "np", ".", "atleast_1d", "(", "fluxUpBottom", ")", ")", ",", "axis", "=", "-", "1", ")", "# dot product (matrix multiplication) along last axes", "return", "np", ".", "squeeze", "(", "matrix_multiply", "(", "self", ".", "Tup", ",", "E", "[", "...", ",", "np", ".", "newaxis", "]", ")", ")" ]
Compute downwelling radiative flux at interfaces between layers. Inputs: * fluxDownTop: flux down at top * emission: emission from atmospheric levels (N) defaults to zero if not given Returns: * vector of downwelling radiative flux between levels (N+1) element 0 is the flux down to the surface.
[ "Compute", "downwelling", "radiative", "flux", "at", "interfaces", "between", "layers", "." ]
python
train
36.5
datajoint/datajoint-python
datajoint/table.py
https://github.com/datajoint/datajoint-python/blob/4f29bb154a7ed2b8b64b4d3a9c8be4c16b39621c/datajoint/table.py#L98-L105
def children(self, primary=None): """ :param primary: if None, then all parents are returned. If True, then only foreign keys composed of primary key attributes are considered. If False, the only foreign keys including at least one non-primary attribute are considered. :return: dict of tables with foreign keys referencing self """ return self.connection.dependencies.children(self.full_table_name, primary)
[ "def", "children", "(", "self", ",", "primary", "=", "None", ")", ":", "return", "self", ".", "connection", ".", "dependencies", ".", "children", "(", "self", ".", "full_table_name", ",", "primary", ")" ]
:param primary: if None, then all parents are returned. If True, then only foreign keys composed of primary key attributes are considered. If False, the only foreign keys including at least one non-primary attribute are considered. :return: dict of tables with foreign keys referencing self
[ ":", "param", "primary", ":", "if", "None", "then", "all", "parents", "are", "returned", ".", "If", "True", "then", "only", "foreign", "keys", "composed", "of", "primary", "key", "attributes", "are", "considered", ".", "If", "False", "the", "only", "foreign", "keys", "including", "at", "least", "one", "non", "-", "primary", "attribute", "are", "considered", ".", ":", "return", ":", "dict", "of", "tables", "with", "foreign", "keys", "referencing", "self" ]
python
train
58.25
klen/aioauth-client
aioauth_client.py
https://github.com/klen/aioauth-client/blob/54f58249496c26965adb4f752f2b24cfe18d0084/aioauth_client.py#L141-L165
async def _request(self, method, url, loop=None, timeout=None, **kwargs): """Make a request through AIOHTTP.""" session = self.session or aiohttp.ClientSession( loop=loop, conn_timeout=timeout, read_timeout=timeout) try: async with session.request(method, url, **kwargs) as response: if response.status / 100 > 2: raise web.HTTPBadRequest( reason='HTTP status code: %s' % response.status) if 'json' in response.headers.get('CONTENT-TYPE'): data = await response.json() else: data = await response.text() data = dict(parse_qsl(data)) return data except asyncio.TimeoutError: raise web.HTTPBadRequest(reason='HTTP Timeout') finally: if not self.session and not session.closed: await session.close()
[ "async", "def", "_request", "(", "self", ",", "method", ",", "url", ",", "loop", "=", "None", ",", "timeout", "=", "None", ",", "*", "*", "kwargs", ")", ":", "session", "=", "self", ".", "session", "or", "aiohttp", ".", "ClientSession", "(", "loop", "=", "loop", ",", "conn_timeout", "=", "timeout", ",", "read_timeout", "=", "timeout", ")", "try", ":", "async", "with", "session", ".", "request", "(", "method", ",", "url", ",", "*", "*", "kwargs", ")", "as", "response", ":", "if", "response", ".", "status", "/", "100", ">", "2", ":", "raise", "web", ".", "HTTPBadRequest", "(", "reason", "=", "'HTTP status code: %s'", "%", "response", ".", "status", ")", "if", "'json'", "in", "response", ".", "headers", ".", "get", "(", "'CONTENT-TYPE'", ")", ":", "data", "=", "await", "response", ".", "json", "(", ")", "else", ":", "data", "=", "await", "response", ".", "text", "(", ")", "data", "=", "dict", "(", "parse_qsl", "(", "data", ")", ")", "return", "data", "except", "asyncio", ".", "TimeoutError", ":", "raise", "web", ".", "HTTPBadRequest", "(", "reason", "=", "'HTTP Timeout'", ")", "finally", ":", "if", "not", "self", ".", "session", "and", "not", "session", ".", "closed", ":", "await", "session", ".", "close", "(", ")" ]
Make a request through AIOHTTP.
[ "Make", "a", "request", "through", "AIOHTTP", "." ]
python
train
37.96
gboeing/osmnx
osmnx/projection.py
https://github.com/gboeing/osmnx/blob/be59fd313bcb68af8fc79242c56194f1247e26e2/osmnx/projection.py#L20-L55
def project_geometry(geometry, crs=None, to_crs=None, to_latlong=False): """ Project a shapely Polygon or MultiPolygon from lat-long to UTM, or vice-versa Parameters ---------- geometry : shapely Polygon or MultiPolygon the geometry to project crs : dict the starting coordinate reference system of the passed-in geometry, default value (None) will set settings.default_crs as the CRS to_crs : dict if not None, just project to this CRS instead of to UTM to_latlong : bool if True, project from crs to lat-long, if False, project from crs to local UTM zone Returns ------- tuple (geometry_proj, crs), the projected shapely geometry and the crs of the projected geometry """ if crs is None: crs = settings.default_crs gdf = gpd.GeoDataFrame() gdf.crs = crs gdf.gdf_name = 'geometry to project' gdf['geometry'] = None gdf.loc[0, 'geometry'] = geometry gdf_proj = project_gdf(gdf, to_crs=to_crs, to_latlong=to_latlong) geometry_proj = gdf_proj['geometry'].iloc[0] return geometry_proj, gdf_proj.crs
[ "def", "project_geometry", "(", "geometry", ",", "crs", "=", "None", ",", "to_crs", "=", "None", ",", "to_latlong", "=", "False", ")", ":", "if", "crs", "is", "None", ":", "crs", "=", "settings", ".", "default_crs", "gdf", "=", "gpd", ".", "GeoDataFrame", "(", ")", "gdf", ".", "crs", "=", "crs", "gdf", ".", "gdf_name", "=", "'geometry to project'", "gdf", "[", "'geometry'", "]", "=", "None", "gdf", ".", "loc", "[", "0", ",", "'geometry'", "]", "=", "geometry", "gdf_proj", "=", "project_gdf", "(", "gdf", ",", "to_crs", "=", "to_crs", ",", "to_latlong", "=", "to_latlong", ")", "geometry_proj", "=", "gdf_proj", "[", "'geometry'", "]", ".", "iloc", "[", "0", "]", "return", "geometry_proj", ",", "gdf_proj", ".", "crs" ]
Project a shapely Polygon or MultiPolygon from lat-long to UTM, or vice-versa Parameters ---------- geometry : shapely Polygon or MultiPolygon the geometry to project crs : dict the starting coordinate reference system of the passed-in geometry, default value (None) will set settings.default_crs as the CRS to_crs : dict if not None, just project to this CRS instead of to UTM to_latlong : bool if True, project from crs to lat-long, if False, project from crs to local UTM zone Returns ------- tuple (geometry_proj, crs), the projected shapely geometry and the crs of the projected geometry
[ "Project", "a", "shapely", "Polygon", "or", "MultiPolygon", "from", "lat", "-", "long", "to", "UTM", "or", "vice", "-", "versa" ]
python
train
31.222222
mikedh/trimesh
trimesh/path/path.py
https://github.com/mikedh/trimesh/blob/25e059bf6d4caa74f62ffd58ce4f61a90ee4e518/trimesh/path/path.py#L367-L419
def apply_transform(self, transform): """ Apply a transformation matrix to the current path in- place Parameters ----------- transform : (d+1, d+1) float Homogenous transformation for vertices """ dimension = self.vertices.shape[1] transform = np.asanyarray(transform, dtype=np.float64) if transform.shape != (dimension + 1, dimension + 1): raise ValueError('transform is incorrect shape!') elif np.abs(transform - np.eye(dimension + 1)).max() < 1e-8: # if we've been passed an identity matrix do nothing return # make sure cache is up to date self._cache.verify() # new cache to transfer items cache = {} # apply transform to discretized paths if 'discrete' in self._cache.cache: cache['discrete'] = np.array([ transformations.transform_points( d, matrix=transform) for d in self.discrete]) # things we can just straight up copy # as they are topological not geometric for key in ['root', 'paths', 'path_valid', 'dangling', 'vertex_graph', 'enclosure', 'enclosure_shell', 'enclosure_directed']: # if they're in cache save them from the purge if key in self._cache.cache: cache[key] = self._cache.cache[key] # transform vertices in place self.vertices = transformations.transform_points( self.vertices, matrix=transform) # explicitly clear the cache self._cache.clear() self._cache.id_set() # populate the things we wangled self._cache.cache.update(cache)
[ "def", "apply_transform", "(", "self", ",", "transform", ")", ":", "dimension", "=", "self", ".", "vertices", ".", "shape", "[", "1", "]", "transform", "=", "np", ".", "asanyarray", "(", "transform", ",", "dtype", "=", "np", ".", "float64", ")", "if", "transform", ".", "shape", "!=", "(", "dimension", "+", "1", ",", "dimension", "+", "1", ")", ":", "raise", "ValueError", "(", "'transform is incorrect shape!'", ")", "elif", "np", ".", "abs", "(", "transform", "-", "np", ".", "eye", "(", "dimension", "+", "1", ")", ")", ".", "max", "(", ")", "<", "1e-8", ":", "# if we've been passed an identity matrix do nothing", "return", "# make sure cache is up to date", "self", ".", "_cache", ".", "verify", "(", ")", "# new cache to transfer items", "cache", "=", "{", "}", "# apply transform to discretized paths", "if", "'discrete'", "in", "self", ".", "_cache", ".", "cache", ":", "cache", "[", "'discrete'", "]", "=", "np", ".", "array", "(", "[", "transformations", ".", "transform_points", "(", "d", ",", "matrix", "=", "transform", ")", "for", "d", "in", "self", ".", "discrete", "]", ")", "# things we can just straight up copy", "# as they are topological not geometric", "for", "key", "in", "[", "'root'", ",", "'paths'", ",", "'path_valid'", ",", "'dangling'", ",", "'vertex_graph'", ",", "'enclosure'", ",", "'enclosure_shell'", ",", "'enclosure_directed'", "]", ":", "# if they're in cache save them from the purge", "if", "key", "in", "self", ".", "_cache", ".", "cache", ":", "cache", "[", "key", "]", "=", "self", ".", "_cache", ".", "cache", "[", "key", "]", "# transform vertices in place", "self", ".", "vertices", "=", "transformations", ".", "transform_points", "(", "self", ".", "vertices", ",", "matrix", "=", "transform", ")", "# explicitly clear the cache", "self", ".", "_cache", ".", "clear", "(", ")", "self", ".", "_cache", ".", "id_set", "(", ")", "# populate the things we wangled", "self", ".", "_cache", ".", "cache", ".", "update", "(", "cache", ")" ]
Apply a transformation matrix to the current path in- place Parameters ----------- transform : (d+1, d+1) float Homogenous transformation for vertices
[ "Apply", "a", "transformation", "matrix", "to", "the", "current", "path", "in", "-", "place" ]
python
train
34.54717
laike9m/pdir2
pdir/api.py
https://github.com/laike9m/pdir2/blob/c4550523fe9b54bf9b755ffa28900a5e9f493d02/pdir/api.py#L119-L131
def methods(self) -> 'PrettyDir': """Returns all methods of the inspected object. Note that "methods" can mean "functions" when inspecting a module. """ return PrettyDir( self.obj, [ pattr for pattr in self.pattrs if category_match(pattr.category, AttrCategory.FUNCTION) ], )
[ "def", "methods", "(", "self", ")", "->", "'PrettyDir'", ":", "return", "PrettyDir", "(", "self", ".", "obj", ",", "[", "pattr", "for", "pattr", "in", "self", ".", "pattrs", "if", "category_match", "(", "pattr", ".", "category", ",", "AttrCategory", ".", "FUNCTION", ")", "]", ",", ")" ]
Returns all methods of the inspected object. Note that "methods" can mean "functions" when inspecting a module.
[ "Returns", "all", "methods", "of", "the", "inspected", "object", "." ]
python
train
29.846154
Parallels/artifactory
artifactory.py
https://github.com/Parallels/artifactory/blob/09ddcc4ae15095eec2347d39774c3f8aca6c4654/artifactory.py#L451-L457
def rest_get_stream(self, url, auth=None, verify=True, cert=None): """ Perform a chunked GET request to url with optional authentication This is specifically to download files. """ res = requests.get(url, auth=auth, stream=True, verify=verify, cert=cert) return res.raw, res.status_code
[ "def", "rest_get_stream", "(", "self", ",", "url", ",", "auth", "=", "None", ",", "verify", "=", "True", ",", "cert", "=", "None", ")", ":", "res", "=", "requests", ".", "get", "(", "url", ",", "auth", "=", "auth", ",", "stream", "=", "True", ",", "verify", "=", "verify", ",", "cert", "=", "cert", ")", "return", "res", ".", "raw", ",", "res", ".", "status_code" ]
Perform a chunked GET request to url with optional authentication This is specifically to download files.
[ "Perform", "a", "chunked", "GET", "request", "to", "url", "with", "optional", "authentication", "This", "is", "specifically", "to", "download", "files", "." ]
python
train
46.857143
scivision/gridaurora
gridaurora/__init__.py
https://github.com/scivision/gridaurora/blob/c3957b93c2201afff62bd104e0acead52c0d9e90/gridaurora/__init__.py#L28-L43
def to_ut1unix(time: Union[str, datetime, float, np.ndarray]) -> np.ndarray: """ converts time inputs to UT1 seconds since Unix epoch """ # keep this order time = totime(time) if isinstance(time, (float, int)): return time if isinstance(time, (tuple, list, np.ndarray)): assert isinstance(time[0], datetime), f'expected datetime, not {type(time[0])}' return np.array(list(map(dt2ut1, time))) else: assert isinstance(time, datetime) return dt2ut1(time)
[ "def", "to_ut1unix", "(", "time", ":", "Union", "[", "str", ",", "datetime", ",", "float", ",", "np", ".", "ndarray", "]", ")", "->", "np", ".", "ndarray", ":", "# keep this order", "time", "=", "totime", "(", "time", ")", "if", "isinstance", "(", "time", ",", "(", "float", ",", "int", ")", ")", ":", "return", "time", "if", "isinstance", "(", "time", ",", "(", "tuple", ",", "list", ",", "np", ".", "ndarray", ")", ")", ":", "assert", "isinstance", "(", "time", "[", "0", "]", ",", "datetime", ")", ",", "f'expected datetime, not {type(time[0])}'", "return", "np", ".", "array", "(", "list", "(", "map", "(", "dt2ut1", ",", "time", ")", ")", ")", "else", ":", "assert", "isinstance", "(", "time", ",", "datetime", ")", "return", "dt2ut1", "(", "time", ")" ]
converts time inputs to UT1 seconds since Unix epoch
[ "converts", "time", "inputs", "to", "UT1", "seconds", "since", "Unix", "epoch" ]
python
train
31.875
Esri/ArcREST
src/arcrest/ags/_geodataservice.py
https://github.com/Esri/ArcREST/blob/ab240fde2b0200f61d4a5f6df033516e53f2f416/src/arcrest/ags/_geodataservice.py#L111-L118
def replicasResource(self): """returns a list of replices""" if self._replicasResource is None: self._replicasResource = {} for replica in self.replicas: self._replicasResource["replicaName"] = replica.name self._replicasResource["replicaID"] = replica.guid return self._replicasResource
[ "def", "replicasResource", "(", "self", ")", ":", "if", "self", ".", "_replicasResource", "is", "None", ":", "self", ".", "_replicasResource", "=", "{", "}", "for", "replica", "in", "self", ".", "replicas", ":", "self", ".", "_replicasResource", "[", "\"replicaName\"", "]", "=", "replica", ".", "name", "self", ".", "_replicasResource", "[", "\"replicaID\"", "]", "=", "replica", ".", "guid", "return", "self", ".", "_replicasResource" ]
returns a list of replices
[ "returns", "a", "list", "of", "replices" ]
python
train
45
clab/dynet
python/dynet_viz.py
https://github.com/clab/dynet/blob/21cc62606b74f81bb4b11a9989a6c2bd0caa09c5/python/dynet_viz.py#L775-L947
def make_network_graph(compact, expression_names, lookup_names): """ Make a network graph, represented as of nodes and a set of edges. The nodes are represented as tuples: (name: string, input_dim: Dim, label: string, output_dim: Dim, children: set[name], features: string) # The edges are represented as dict of children to sets of parents: (child: string) -> [(parent: string, features: string)] """ nodes = set() # edges = defaultdict(set) # parent -> (child, extra) var_name_dict = dict() if expression_names: for e in graphviz_items: # e: Expression if e in expression_names: var_name_dict[e.vindex] = expression_names[e] rnn_bldr_name = defaultdict(lambda: chr(len(rnn_bldr_name)+ord('A'))) def vidx2str(vidx): return '%s%s' % ('N', vidx) for e in graphviz_items: # e: Expression vidx = e.vindex f_name = e.name args = e.args output_dim = e.dim input_dim = None # basically just RNNStates use this since everything else has input_dim==output_dim children = set() node_type = '2_regular' if f_name == 'vecInput': [_dim] = args arg_strs = [] elif f_name == 'inputVector': [_v] = args arg_strs = [] elif f_name == 'matInput': [_d1, _d2] = args arg_strs = [] elif f_name == 'inputMatrix': [_v, _d] = args arg_strs = [] elif f_name == 'parameters': [_dim] = args arg_strs = [] if compact: if vidx in var_name_dict: f_name = var_name_dict[vidx] node_type = '1_param' elif f_name == 'lookup_parameters': [_dim] = args arg_strs = [] if compact: if vidx in var_name_dict: f_name = var_name_dict[vidx] node_type = '1_param' elif f_name == 'lookup': [p, idx, update] = args [_dim] = p.args if vidx in var_name_dict: name = var_name_dict[vidx] else: name = None item_name = None if lookup_names and p in expression_names: param_name = expression_names[p] if param_name in lookup_names: item_name = '\\"%s\\"' % (lookup_names[param_name][idx],) if compact: if item_name is not None: f_name = item_name elif name is not None: f_name = '%s[%s]' % (name, idx) else: f_name = 'lookup(%s)' % (idx) arg_strs = [] else: arg_strs = [var_name_dict.get(p.vindex, 'v%d' % (p.vindex))] if item_name is not None: arg_strs.append(item_name) vocab_size = _dim[0] arg_strs.extend(['%s' % (idx), '%s' % (vocab_size), 'update' if update else 'fixed']) #children.add(vidx2str(p.vindex)) #node_type = '1_param' elif f_name == 'RNNState': [arg, input_dim, bldr_type, bldr_num, state_idx] = args # arg==input_e rnn_name = rnn_bldr_name[bldr_num] if bldr_type.endswith('Builder'): bldr_type[:-len('Builder')] f_name = '%s-%s-%s' % (bldr_type, rnn_name, state_idx) if not compact: i = arg.vindex s = var_name_dict.get(i, 'v%d' % (i)) arg_strs = [s] else: arg_strs = [] children.add(vidx2str(arg.vindex)) node_type = '3_rnn_state' else: arg_strs = [] for arg in args: if isinstance(arg, Expression): if not compact: i = arg.vindex s = var_name_dict.get(i, 'v%d' % (i)) arg_strs.append(s) children.add(vidx2str(arg.vindex)) elif isinstance(arg, float) and compact: s = re.sub('0+$', '', '%.3f' % (arg)) if s == '0.': s = str(arg) arg_strs.append(s) else: arg_strs.append(str(arg)) # f_name = { , # }.get(f_name, f_name) if compact: f_name = { 'add': '+', 'sub': '-', 'mul': '*', 'div': '/', 'cadd': '+', 'cmul': '*', 'cdiv': '/', 'scalarsub': '-', 'concatenate': 'cat', 'esum': 'sum', 'emax': 'max', 'emin': 'min', }.get(f_name, f_name) if arg_strs: str_repr = '%s(%s)' % (f_name, ', '.join(arg_strs)) else: str_repr = f_name elif f_name == 'add': [a,b] = arg_strs str_repr = '%s + %s' % (a,b) elif f_name == 'sub': [a,b] = arg_strs str_repr = '%s - %s' % (a,b) elif f_name == 'mul': [a,b] = arg_strs str_repr = '%s * %s' % (a,b) elif f_name == 'div': [a,b] = arg_strs str_repr = '%s / %s' % (a,b) elif f_name == 'neg': [a,] = arg_strs str_repr = '-%s' % (a) elif f_name == 'affine_transform': str_repr = arg_strs[0] for i in xrange(1, len(arg_strs), 2): str_repr += ' + %s*%s' % tuple(arg_strs[i:i+2]) else: if arg_strs is not None: str_repr = '%s(%s)' % (f_name, ', '.join(arg_strs)) else: str_repr = f_name name = vidx2str(vidx) var_name = '%s' % (var_name_dict.get(vidx, 'v%d' % (vidx))) if not compact else '' # if show_dims: # str_repr = '%s\\n%s' % (shape_str(e.dim), str_repr) label = str_repr if not compact: label = '%s = %s' % (var_name, label) features = '' # if output_dim.invalid(): # features += " [color=red,style=filled,fillcolor=red]" # node_def_lines.append(' %s [label="%s%s"] %s;' % (vidx2str(vidx), label_prefix, str_repr, '')) expr_name = expression_names[e] if compact and expression_names and (e in expression_names) and (expression_names[e] != f_name) else None nodes.add(GVNode(name, input_dim, label, output_dim, frozenset(children), features, node_type, expr_name)) return nodes
[ "def", "make_network_graph", "(", "compact", ",", "expression_names", ",", "lookup_names", ")", ":", "nodes", "=", "set", "(", ")", "# edges = defaultdict(set) # parent -> (child, extra)", "var_name_dict", "=", "dict", "(", ")", "if", "expression_names", ":", "for", "e", "in", "graphviz_items", ":", "# e: Expression", "if", "e", "in", "expression_names", ":", "var_name_dict", "[", "e", ".", "vindex", "]", "=", "expression_names", "[", "e", "]", "rnn_bldr_name", "=", "defaultdict", "(", "lambda", ":", "chr", "(", "len", "(", "rnn_bldr_name", ")", "+", "ord", "(", "'A'", ")", ")", ")", "def", "vidx2str", "(", "vidx", ")", ":", "return", "'%s%s'", "%", "(", "'N'", ",", "vidx", ")", "for", "e", "in", "graphviz_items", ":", "# e: Expression", "vidx", "=", "e", ".", "vindex", "f_name", "=", "e", ".", "name", "args", "=", "e", ".", "args", "output_dim", "=", "e", ".", "dim", "input_dim", "=", "None", "# basically just RNNStates use this since everything else has input_dim==output_dim", "children", "=", "set", "(", ")", "node_type", "=", "'2_regular'", "if", "f_name", "==", "'vecInput'", ":", "[", "_dim", "]", "=", "args", "arg_strs", "=", "[", "]", "elif", "f_name", "==", "'inputVector'", ":", "[", "_v", "]", "=", "args", "arg_strs", "=", "[", "]", "elif", "f_name", "==", "'matInput'", ":", "[", "_d1", ",", "_d2", "]", "=", "args", "arg_strs", "=", "[", "]", "elif", "f_name", "==", "'inputMatrix'", ":", "[", "_v", ",", "_d", "]", "=", "args", "arg_strs", "=", "[", "]", "elif", "f_name", "==", "'parameters'", ":", "[", "_dim", "]", "=", "args", "arg_strs", "=", "[", "]", "if", "compact", ":", "if", "vidx", "in", "var_name_dict", ":", "f_name", "=", "var_name_dict", "[", "vidx", "]", "node_type", "=", "'1_param'", "elif", "f_name", "==", "'lookup_parameters'", ":", "[", "_dim", "]", "=", "args", "arg_strs", "=", "[", "]", "if", "compact", ":", "if", "vidx", "in", "var_name_dict", ":", "f_name", "=", "var_name_dict", "[", "vidx", "]", "node_type", "=", "'1_param'", "elif", "f_name", "==", "'lookup'", ":", "[", "p", ",", "idx", ",", "update", "]", "=", "args", "[", "_dim", "]", "=", "p", ".", "args", "if", "vidx", "in", "var_name_dict", ":", "name", "=", "var_name_dict", "[", "vidx", "]", "else", ":", "name", "=", "None", "item_name", "=", "None", "if", "lookup_names", "and", "p", "in", "expression_names", ":", "param_name", "=", "expression_names", "[", "p", "]", "if", "param_name", "in", "lookup_names", ":", "item_name", "=", "'\\\\\"%s\\\\\"'", "%", "(", "lookup_names", "[", "param_name", "]", "[", "idx", "]", ",", ")", "if", "compact", ":", "if", "item_name", "is", "not", "None", ":", "f_name", "=", "item_name", "elif", "name", "is", "not", "None", ":", "f_name", "=", "'%s[%s]'", "%", "(", "name", ",", "idx", ")", "else", ":", "f_name", "=", "'lookup(%s)'", "%", "(", "idx", ")", "arg_strs", "=", "[", "]", "else", ":", "arg_strs", "=", "[", "var_name_dict", ".", "get", "(", "p", ".", "vindex", ",", "'v%d'", "%", "(", "p", ".", "vindex", ")", ")", "]", "if", "item_name", "is", "not", "None", ":", "arg_strs", ".", "append", "(", "item_name", ")", "vocab_size", "=", "_dim", "[", "0", "]", "arg_strs", ".", "extend", "(", "[", "'%s'", "%", "(", "idx", ")", ",", "'%s'", "%", "(", "vocab_size", ")", ",", "'update'", "if", "update", "else", "'fixed'", "]", ")", "#children.add(vidx2str(p.vindex))", "#node_type = '1_param'", "elif", "f_name", "==", "'RNNState'", ":", "[", "arg", ",", "input_dim", ",", "bldr_type", ",", "bldr_num", ",", "state_idx", "]", "=", "args", "# arg==input_e", "rnn_name", "=", "rnn_bldr_name", "[", "bldr_num", "]", "if", "bldr_type", ".", "endswith", "(", "'Builder'", ")", ":", "bldr_type", "[", ":", "-", "len", "(", "'Builder'", ")", "]", "f_name", "=", "'%s-%s-%s'", "%", "(", "bldr_type", ",", "rnn_name", ",", "state_idx", ")", "if", "not", "compact", ":", "i", "=", "arg", ".", "vindex", "s", "=", "var_name_dict", ".", "get", "(", "i", ",", "'v%d'", "%", "(", "i", ")", ")", "arg_strs", "=", "[", "s", "]", "else", ":", "arg_strs", "=", "[", "]", "children", ".", "add", "(", "vidx2str", "(", "arg", ".", "vindex", ")", ")", "node_type", "=", "'3_rnn_state'", "else", ":", "arg_strs", "=", "[", "]", "for", "arg", "in", "args", ":", "if", "isinstance", "(", "arg", ",", "Expression", ")", ":", "if", "not", "compact", ":", "i", "=", "arg", ".", "vindex", "s", "=", "var_name_dict", ".", "get", "(", "i", ",", "'v%d'", "%", "(", "i", ")", ")", "arg_strs", ".", "append", "(", "s", ")", "children", ".", "add", "(", "vidx2str", "(", "arg", ".", "vindex", ")", ")", "elif", "isinstance", "(", "arg", ",", "float", ")", "and", "compact", ":", "s", "=", "re", ".", "sub", "(", "'0+$'", ",", "''", ",", "'%.3f'", "%", "(", "arg", ")", ")", "if", "s", "==", "'0.'", ":", "s", "=", "str", "(", "arg", ")", "arg_strs", ".", "append", "(", "s", ")", "else", ":", "arg_strs", ".", "append", "(", "str", "(", "arg", ")", ")", "# f_name = { ,", "# }.get(f_name, f_name)", "if", "compact", ":", "f_name", "=", "{", "'add'", ":", "'+'", ",", "'sub'", ":", "'-'", ",", "'mul'", ":", "'*'", ",", "'div'", ":", "'/'", ",", "'cadd'", ":", "'+'", ",", "'cmul'", ":", "'*'", ",", "'cdiv'", ":", "'/'", ",", "'scalarsub'", ":", "'-'", ",", "'concatenate'", ":", "'cat'", ",", "'esum'", ":", "'sum'", ",", "'emax'", ":", "'max'", ",", "'emin'", ":", "'min'", ",", "}", ".", "get", "(", "f_name", ",", "f_name", ")", "if", "arg_strs", ":", "str_repr", "=", "'%s(%s)'", "%", "(", "f_name", ",", "', '", ".", "join", "(", "arg_strs", ")", ")", "else", ":", "str_repr", "=", "f_name", "elif", "f_name", "==", "'add'", ":", "[", "a", ",", "b", "]", "=", "arg_strs", "str_repr", "=", "'%s + %s'", "%", "(", "a", ",", "b", ")", "elif", "f_name", "==", "'sub'", ":", "[", "a", ",", "b", "]", "=", "arg_strs", "str_repr", "=", "'%s - %s'", "%", "(", "a", ",", "b", ")", "elif", "f_name", "==", "'mul'", ":", "[", "a", ",", "b", "]", "=", "arg_strs", "str_repr", "=", "'%s * %s'", "%", "(", "a", ",", "b", ")", "elif", "f_name", "==", "'div'", ":", "[", "a", ",", "b", "]", "=", "arg_strs", "str_repr", "=", "'%s / %s'", "%", "(", "a", ",", "b", ")", "elif", "f_name", "==", "'neg'", ":", "[", "a", ",", "]", "=", "arg_strs", "str_repr", "=", "'-%s'", "%", "(", "a", ")", "elif", "f_name", "==", "'affine_transform'", ":", "str_repr", "=", "arg_strs", "[", "0", "]", "for", "i", "in", "xrange", "(", "1", ",", "len", "(", "arg_strs", ")", ",", "2", ")", ":", "str_repr", "+=", "' + %s*%s'", "%", "tuple", "(", "arg_strs", "[", "i", ":", "i", "+", "2", "]", ")", "else", ":", "if", "arg_strs", "is", "not", "None", ":", "str_repr", "=", "'%s(%s)'", "%", "(", "f_name", ",", "', '", ".", "join", "(", "arg_strs", ")", ")", "else", ":", "str_repr", "=", "f_name", "name", "=", "vidx2str", "(", "vidx", ")", "var_name", "=", "'%s'", "%", "(", "var_name_dict", ".", "get", "(", "vidx", ",", "'v%d'", "%", "(", "vidx", ")", ")", ")", "if", "not", "compact", "else", "''", "# if show_dims:", "# str_repr = '%s\\\\n%s' % (shape_str(e.dim), str_repr)", "label", "=", "str_repr", "if", "not", "compact", ":", "label", "=", "'%s = %s'", "%", "(", "var_name", ",", "label", ")", "features", "=", "''", "# if output_dim.invalid():", "# features += \" [color=red,style=filled,fillcolor=red]\"", "# node_def_lines.append(' %s [label=\"%s%s\"] %s;' % (vidx2str(vidx), label_prefix, str_repr, ''))", "expr_name", "=", "expression_names", "[", "e", "]", "if", "compact", "and", "expression_names", "and", "(", "e", "in", "expression_names", ")", "and", "(", "expression_names", "[", "e", "]", "!=", "f_name", ")", "else", "None", "nodes", ".", "add", "(", "GVNode", "(", "name", ",", "input_dim", ",", "label", ",", "output_dim", ",", "frozenset", "(", "children", ")", ",", "features", ",", "node_type", ",", "expr_name", ")", ")", "return", "nodes" ]
Make a network graph, represented as of nodes and a set of edges. The nodes are represented as tuples: (name: string, input_dim: Dim, label: string, output_dim: Dim, children: set[name], features: string) # The edges are represented as dict of children to sets of parents: (child: string) -> [(parent: string, features: string)]
[ "Make", "a", "network", "graph", "represented", "as", "of", "nodes", "and", "a", "set", "of", "edges", ".", "The", "nodes", "are", "represented", "as", "tuples", ":", "(", "name", ":", "string", "input_dim", ":", "Dim", "label", ":", "string", "output_dim", ":", "Dim", "children", ":", "set", "[", "name", "]", "features", ":", "string", ")", "#", "The", "edges", "are", "represented", "as", "dict", "of", "children", "to", "sets", "of", "parents", ":", "(", "child", ":", "string", ")", "-", ">", "[", "(", "parent", ":", "string", "features", ":", "string", ")", "]" ]
python
valid
32.791908
vadimk2016/v-vk-api
v_vk_api/__init__.py
https://github.com/vadimk2016/v-vk-api/blob/ef5656e09944b5319a1f573cfb7b022f3d31c0cf/v_vk_api/__init__.py#L9-L29
def create(app_id: int = None, login: str = None, password: str = None, service_token: str = None, proxies: dict = None) -> API: """ Creates an API instance, requires app ID, login and password or service token to create connection :param app_id: int: specifies app ID :param login: str: specifies login, can be phone number or email :param password: str: specifies password :param service_token: str: specifies password service token :param proxies: dict: specifies proxies, require http and https proxy """ session_ = APISession(app_id, login, password, service_token, proxies) return API(session_)
[ "def", "create", "(", "app_id", ":", "int", "=", "None", ",", "login", ":", "str", "=", "None", ",", "password", ":", "str", "=", "None", ",", "service_token", ":", "str", "=", "None", ",", "proxies", ":", "dict", "=", "None", ")", "->", "API", ":", "session_", "=", "APISession", "(", "app_id", ",", "login", ",", "password", ",", "service_token", ",", "proxies", ")", "return", "API", "(", "session_", ")" ]
Creates an API instance, requires app ID, login and password or service token to create connection :param app_id: int: specifies app ID :param login: str: specifies login, can be phone number or email :param password: str: specifies password :param service_token: str: specifies password service token :param proxies: dict: specifies proxies, require http and https proxy
[ "Creates", "an", "API", "instance", "requires", "app", "ID", "login", "and", "password", "or", "service", "token", "to", "create", "connection" ]
python
train
36.809524
gwastro/pycbc
pycbc/population/rates_functions.py
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/population/rates_functions.py#L413-L444
def draw_lnm_samples(**kwargs): ''' Draw samples for uniform-in-log model Parameters ---------- **kwargs: string Keyword arguments as model parameters and number of samples Returns ------- array The first mass array The second mass ''' #PDF doesnt match with sampler nsamples = kwargs.get('nsamples', 1) min_mass = kwargs.get('min_mass', 5.) max_mass = kwargs.get('max_mass', 95.) max_mtotal = min_mass + max_mass lnmmin = log(min_mass) lnmmax = log(max_mass) k = nsamples * int(1.5 + log(1 + 100./nsamples)) aa = np.exp(np.random.uniform(lnmmin, lnmmax, k)) bb = np.exp(np.random.uniform(lnmmin, lnmmax, k)) idx = np.where(aa + bb < max_mtotal) m1, m2 = (np.maximum(aa, bb))[idx], (np.minimum(aa, bb))[idx] return np.resize(m1, nsamples), np.resize(m2, nsamples)
[ "def", "draw_lnm_samples", "(", "*", "*", "kwargs", ")", ":", "#PDF doesnt match with sampler", "nsamples", "=", "kwargs", ".", "get", "(", "'nsamples'", ",", "1", ")", "min_mass", "=", "kwargs", ".", "get", "(", "'min_mass'", ",", "5.", ")", "max_mass", "=", "kwargs", ".", "get", "(", "'max_mass'", ",", "95.", ")", "max_mtotal", "=", "min_mass", "+", "max_mass", "lnmmin", "=", "log", "(", "min_mass", ")", "lnmmax", "=", "log", "(", "max_mass", ")", "k", "=", "nsamples", "*", "int", "(", "1.5", "+", "log", "(", "1", "+", "100.", "/", "nsamples", ")", ")", "aa", "=", "np", ".", "exp", "(", "np", ".", "random", ".", "uniform", "(", "lnmmin", ",", "lnmmax", ",", "k", ")", ")", "bb", "=", "np", ".", "exp", "(", "np", ".", "random", ".", "uniform", "(", "lnmmin", ",", "lnmmax", ",", "k", ")", ")", "idx", "=", "np", ".", "where", "(", "aa", "+", "bb", "<", "max_mtotal", ")", "m1", ",", "m2", "=", "(", "np", ".", "maximum", "(", "aa", ",", "bb", ")", ")", "[", "idx", "]", ",", "(", "np", ".", "minimum", "(", "aa", ",", "bb", ")", ")", "[", "idx", "]", "return", "np", ".", "resize", "(", "m1", ",", "nsamples", ")", ",", "np", ".", "resize", "(", "m2", ",", "nsamples", ")" ]
Draw samples for uniform-in-log model Parameters ---------- **kwargs: string Keyword arguments as model parameters and number of samples Returns ------- array The first mass array The second mass
[ "Draw", "samples", "for", "uniform", "-", "in", "-", "log", "model" ]
python
train
27.71875
inveniosoftware-attic/invenio-utils
invenio_utils/serializers.py
https://github.com/inveniosoftware-attic/invenio-utils/blob/9a1c6db4e3f1370901f329f510480dd8df188296/invenio_utils/serializers.py#L105-L116
def loads(astring): """Decompress and deserialize string into a Python object via pickle.""" try: return pickle.loads(lzma.decompress(astring)) except lzma.LZMAError as e: raise SerializerError( 'Cannot decompress object ("{}")'.format(str(e)) ) except pickle.UnpicklingError as e: raise SerializerError( 'Cannot restore object ("{}")'.format(str(e)) )
[ "def", "loads", "(", "astring", ")", ":", "try", ":", "return", "pickle", ".", "loads", "(", "lzma", ".", "decompress", "(", "astring", ")", ")", "except", "lzma", ".", "LZMAError", "as", "e", ":", "raise", "SerializerError", "(", "'Cannot decompress object (\"{}\")'", ".", "format", "(", "str", "(", "e", ")", ")", ")", "except", "pickle", ".", "UnpicklingError", "as", "e", ":", "raise", "SerializerError", "(", "'Cannot restore object (\"{}\")'", ".", "format", "(", "str", "(", "e", ")", ")", ")" ]
Decompress and deserialize string into a Python object via pickle.
[ "Decompress", "and", "deserialize", "string", "into", "a", "Python", "object", "via", "pickle", "." ]
python
train
38.75
saltstack/salt
salt/states/cloud.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/cloud.py#L218-L313
def profile(name, profile, onlyif=None, unless=None, opts=None, **kwargs): ''' Create a single instance on a cloud provider, using a salt-cloud profile. Note that while profiles used this function do take any configuration argument that would normally be used to create an instance using a profile, this state will not verify the state of any of those arguments on an existing instance. Stateful properties of an instance should be configured using their own individual state (i.e., cloud.tagged, cloud.untagged, etc). name The name of the instance to create profile The name of the cloud profile to use onlyif Do run the state only if is unless succeed unless Do not run the state at least unless succeed kwargs Any profile override or addition opts Any extra opts that need to be used ''' ret = {'name': name, 'changes': {}, 'result': None, 'comment': ''} retcode = __salt__['cmd.retcode'] if onlyif is not None: if not isinstance(onlyif, six.string_types): if not onlyif: return _valid(name, comment='onlyif condition is false') elif isinstance(onlyif, six.string_types): if retcode(onlyif, python_shell=True) != 0: return _valid(name, comment='onlyif condition is false') if unless is not None: if not isinstance(unless, six.string_types): if unless: return _valid(name, comment='unless condition is true') elif isinstance(unless, six.string_types): if retcode(unless, python_shell=True) == 0: return _valid(name, comment='unless condition is true') instance = _get_instance([name]) if instance and not any('Not Actioned' in key for key in instance): ret['result'] = True ret['comment'] = 'Already present instance {0}'.format(name) return ret if __opts__['test']: ret['comment'] = 'Instance {0} needs to be created'.format(name) return ret info = __salt__['cloud.profile'](profile, name, vm_overrides=kwargs, opts=opts) # get either {Error: ''} or {namestring: {Error: ''}} # which is what we can get from providers returns main_error = info.get('Error', '') name_error = '' if isinstance(info, dict): subinfo = info.get(name, {}) if isinstance(subinfo, dict): name_error = subinfo.get('Error', None) error = main_error or name_error if info and not error: node_info = info.get(name) ret['result'] = True default_msg = 'Created instance {0} using profile {1}'.format( name, profile,) # some providers support changes if 'changes' in node_info: ret['changes'] = node_info['changes'] ret['comment'] = node_info.get('comment', default_msg) else: ret['changes'] = info ret['comment'] = default_msg elif error: ret['result'] = False ret['comment'] = ('Failed to create instance {0}' ' using profile {1}: {2}').format( name, profile, '{0}\n{1}\n'.format(main_error, name_error).strip(), ) else: ret['result'] = False ret['comment'] = ('Failed to create instance {0}' 'using profile {1}').format( name, profile, ) return ret
[ "def", "profile", "(", "name", ",", "profile", ",", "onlyif", "=", "None", ",", "unless", "=", "None", ",", "opts", "=", "None", ",", "*", "*", "kwargs", ")", ":", "ret", "=", "{", "'name'", ":", "name", ",", "'changes'", ":", "{", "}", ",", "'result'", ":", "None", ",", "'comment'", ":", "''", "}", "retcode", "=", "__salt__", "[", "'cmd.retcode'", "]", "if", "onlyif", "is", "not", "None", ":", "if", "not", "isinstance", "(", "onlyif", ",", "six", ".", "string_types", ")", ":", "if", "not", "onlyif", ":", "return", "_valid", "(", "name", ",", "comment", "=", "'onlyif condition is false'", ")", "elif", "isinstance", "(", "onlyif", ",", "six", ".", "string_types", ")", ":", "if", "retcode", "(", "onlyif", ",", "python_shell", "=", "True", ")", "!=", "0", ":", "return", "_valid", "(", "name", ",", "comment", "=", "'onlyif condition is false'", ")", "if", "unless", "is", "not", "None", ":", "if", "not", "isinstance", "(", "unless", ",", "six", ".", "string_types", ")", ":", "if", "unless", ":", "return", "_valid", "(", "name", ",", "comment", "=", "'unless condition is true'", ")", "elif", "isinstance", "(", "unless", ",", "six", ".", "string_types", ")", ":", "if", "retcode", "(", "unless", ",", "python_shell", "=", "True", ")", "==", "0", ":", "return", "_valid", "(", "name", ",", "comment", "=", "'unless condition is true'", ")", "instance", "=", "_get_instance", "(", "[", "name", "]", ")", "if", "instance", "and", "not", "any", "(", "'Not Actioned'", "in", "key", "for", "key", "in", "instance", ")", ":", "ret", "[", "'result'", "]", "=", "True", "ret", "[", "'comment'", "]", "=", "'Already present instance {0}'", ".", "format", "(", "name", ")", "return", "ret", "if", "__opts__", "[", "'test'", "]", ":", "ret", "[", "'comment'", "]", "=", "'Instance {0} needs to be created'", ".", "format", "(", "name", ")", "return", "ret", "info", "=", "__salt__", "[", "'cloud.profile'", "]", "(", "profile", ",", "name", ",", "vm_overrides", "=", "kwargs", ",", "opts", "=", "opts", ")", "# get either {Error: ''} or {namestring: {Error: ''}}", "# which is what we can get from providers returns", "main_error", "=", "info", ".", "get", "(", "'Error'", ",", "''", ")", "name_error", "=", "''", "if", "isinstance", "(", "info", ",", "dict", ")", ":", "subinfo", "=", "info", ".", "get", "(", "name", ",", "{", "}", ")", "if", "isinstance", "(", "subinfo", ",", "dict", ")", ":", "name_error", "=", "subinfo", ".", "get", "(", "'Error'", ",", "None", ")", "error", "=", "main_error", "or", "name_error", "if", "info", "and", "not", "error", ":", "node_info", "=", "info", ".", "get", "(", "name", ")", "ret", "[", "'result'", "]", "=", "True", "default_msg", "=", "'Created instance {0} using profile {1}'", ".", "format", "(", "name", ",", "profile", ",", ")", "# some providers support changes", "if", "'changes'", "in", "node_info", ":", "ret", "[", "'changes'", "]", "=", "node_info", "[", "'changes'", "]", "ret", "[", "'comment'", "]", "=", "node_info", ".", "get", "(", "'comment'", ",", "default_msg", ")", "else", ":", "ret", "[", "'changes'", "]", "=", "info", "ret", "[", "'comment'", "]", "=", "default_msg", "elif", "error", ":", "ret", "[", "'result'", "]", "=", "False", "ret", "[", "'comment'", "]", "=", "(", "'Failed to create instance {0}'", "' using profile {1}: {2}'", ")", ".", "format", "(", "name", ",", "profile", ",", "'{0}\\n{1}\\n'", ".", "format", "(", "main_error", ",", "name_error", ")", ".", "strip", "(", ")", ",", ")", "else", ":", "ret", "[", "'result'", "]", "=", "False", "ret", "[", "'comment'", "]", "=", "(", "'Failed to create instance {0}'", "'using profile {1}'", ")", ".", "format", "(", "name", ",", "profile", ",", ")", "return", "ret" ]
Create a single instance on a cloud provider, using a salt-cloud profile. Note that while profiles used this function do take any configuration argument that would normally be used to create an instance using a profile, this state will not verify the state of any of those arguments on an existing instance. Stateful properties of an instance should be configured using their own individual state (i.e., cloud.tagged, cloud.untagged, etc). name The name of the instance to create profile The name of the cloud profile to use onlyif Do run the state only if is unless succeed unless Do not run the state at least unless succeed kwargs Any profile override or addition opts Any extra opts that need to be used
[ "Create", "a", "single", "instance", "on", "a", "cloud", "provider", "using", "a", "salt", "-", "cloud", "profile", "." ]
python
train
35.6875
gwastro/pycbc
pycbc/inference/burn_in.py
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/inference/burn_in.py#L124-L149
def posterior_step(logposts, dim): """Finds the last time a chain made a jump > dim/2. Parameters ---------- logposts : array 1D array of values that are proportional to the log posterior values. dim : int The dimension of the parameter space. Returns ------- int The index of the last time the logpost made a jump > dim/2. If that never happened, returns 0. """ if logposts.ndim > 1: raise ValueError("logposts must be a 1D array") criteria = dim/2. dp = numpy.diff(logposts) indices = numpy.where(dp >= criteria)[0] if indices.size > 0: idx = indices[-1] + 1 else: idx = 0 return idx
[ "def", "posterior_step", "(", "logposts", ",", "dim", ")", ":", "if", "logposts", ".", "ndim", ">", "1", ":", "raise", "ValueError", "(", "\"logposts must be a 1D array\"", ")", "criteria", "=", "dim", "/", "2.", "dp", "=", "numpy", ".", "diff", "(", "logposts", ")", "indices", "=", "numpy", ".", "where", "(", "dp", ">=", "criteria", ")", "[", "0", "]", "if", "indices", ".", "size", ">", "0", ":", "idx", "=", "indices", "[", "-", "1", "]", "+", "1", "else", ":", "idx", "=", "0", "return", "idx" ]
Finds the last time a chain made a jump > dim/2. Parameters ---------- logposts : array 1D array of values that are proportional to the log posterior values. dim : int The dimension of the parameter space. Returns ------- int The index of the last time the logpost made a jump > dim/2. If that never happened, returns 0.
[ "Finds", "the", "last", "time", "a", "chain", "made", "a", "jump", ">", "dim", "/", "2", "." ]
python
train
26.230769
GoogleCloudPlatform/cloud-debug-python
src/googleclouddebugger/gcp_hub_client.py
https://github.com/GoogleCloudPlatform/cloud-debug-python/blob/89ce3782c98b814838a3ecb5479ed3882368cbee/src/googleclouddebugger/gcp_hub_client.py#L475-L497
def _ComputeUniquifier(self, debuggee): """Computes debuggee uniquifier. The debuggee uniquifier has to be identical on all instances. Therefore the uniquifier should not include any random numbers and should only be based on inputs that are guaranteed to be the same on all instances. Args: debuggee: complete debuggee message without the uniquifier Returns: Hex string of SHA1 hash of project information, debuggee labels and debuglet version. """ uniquifier = hashlib.sha1() # Compute hash of application files if we don't have source context. This # way we can still distinguish between different deployments. if ('minorversion' not in debuggee.get('labels', []) and 'sourceContexts' not in debuggee): uniquifier_computer.ComputeApplicationUniquifier(uniquifier) return uniquifier.hexdigest()
[ "def", "_ComputeUniquifier", "(", "self", ",", "debuggee", ")", ":", "uniquifier", "=", "hashlib", ".", "sha1", "(", ")", "# Compute hash of application files if we don't have source context. This", "# way we can still distinguish between different deployments.", "if", "(", "'minorversion'", "not", "in", "debuggee", ".", "get", "(", "'labels'", ",", "[", "]", ")", "and", "'sourceContexts'", "not", "in", "debuggee", ")", ":", "uniquifier_computer", ".", "ComputeApplicationUniquifier", "(", "uniquifier", ")", "return", "uniquifier", ".", "hexdigest", "(", ")" ]
Computes debuggee uniquifier. The debuggee uniquifier has to be identical on all instances. Therefore the uniquifier should not include any random numbers and should only be based on inputs that are guaranteed to be the same on all instances. Args: debuggee: complete debuggee message without the uniquifier Returns: Hex string of SHA1 hash of project information, debuggee labels and debuglet version.
[ "Computes", "debuggee", "uniquifier", "." ]
python
train
37.347826
readbeyond/aeneas
aeneas/ttswrappers/basettswrapper.py
https://github.com/readbeyond/aeneas/blob/9d95535ad63eef4a98530cfdff033b8c35315ee1/aeneas/ttswrappers/basettswrapper.py#L445-L460
def _synthesize_multiple_python(self, text_file, output_file_path, quit_after=None, backwards=False): """ Synthesize multiple fragments via a Python call. :rtype: tuple (result, (anchors, current_time, num_chars)) """ self.log(u"Synthesizing multiple via a Python call...") ret = self._synthesize_multiple_generic( helper_function=self._synthesize_single_python_helper, text_file=text_file, output_file_path=output_file_path, quit_after=quit_after, backwards=backwards ) self.log(u"Synthesizing multiple via a Python call... done") return ret
[ "def", "_synthesize_multiple_python", "(", "self", ",", "text_file", ",", "output_file_path", ",", "quit_after", "=", "None", ",", "backwards", "=", "False", ")", ":", "self", ".", "log", "(", "u\"Synthesizing multiple via a Python call...\"", ")", "ret", "=", "self", ".", "_synthesize_multiple_generic", "(", "helper_function", "=", "self", ".", "_synthesize_single_python_helper", ",", "text_file", "=", "text_file", ",", "output_file_path", "=", "output_file_path", ",", "quit_after", "=", "quit_after", ",", "backwards", "=", "backwards", ")", "self", ".", "log", "(", "u\"Synthesizing multiple via a Python call... done\"", ")", "return", "ret" ]
Synthesize multiple fragments via a Python call. :rtype: tuple (result, (anchors, current_time, num_chars))
[ "Synthesize", "multiple", "fragments", "via", "a", "Python", "call", "." ]
python
train
41.25
ethereum/pyethereum
ethereum/tools/_solidity.py
https://github.com/ethereum/pyethereum/blob/b704a5c6577863edc539a1ec3d2620a443b950fb/ethereum/tools/_solidity.py#L447-L480
def combined(cls, code, path=None, extra_args=None): """ Compile combined-json with abi,bin,devdoc,userdoc. @param code: literal solidity code as a string. @param path: absolute path to solidity-file. Note: code & path are mutually exclusive! @param extra_args: Either a space separated string or a list of extra arguments to be passed to the solidity compiler. """ if code and path: raise ValueError('sourcecode and path are mutually exclusive.') if path: contracts = compile_file(path, extra_args=extra_args) with open(path) as handler: code = handler.read() elif code: contracts = compile_code(code, extra_args=extra_args) else: raise ValueError('either code or path needs to be supplied.') sorted_contracts = [] for name in solidity_names(code): sorted_contracts.append( ( name[1], solidity_get_contract_data(contracts, path, name[1]) ) ) return sorted_contracts
[ "def", "combined", "(", "cls", ",", "code", ",", "path", "=", "None", ",", "extra_args", "=", "None", ")", ":", "if", "code", "and", "path", ":", "raise", "ValueError", "(", "'sourcecode and path are mutually exclusive.'", ")", "if", "path", ":", "contracts", "=", "compile_file", "(", "path", ",", "extra_args", "=", "extra_args", ")", "with", "open", "(", "path", ")", "as", "handler", ":", "code", "=", "handler", ".", "read", "(", ")", "elif", "code", ":", "contracts", "=", "compile_code", "(", "code", ",", "extra_args", "=", "extra_args", ")", "else", ":", "raise", "ValueError", "(", "'either code or path needs to be supplied.'", ")", "sorted_contracts", "=", "[", "]", "for", "name", "in", "solidity_names", "(", "code", ")", ":", "sorted_contracts", ".", "append", "(", "(", "name", "[", "1", "]", ",", "solidity_get_contract_data", "(", "contracts", ",", "path", ",", "name", "[", "1", "]", ")", ")", ")", "return", "sorted_contracts" ]
Compile combined-json with abi,bin,devdoc,userdoc. @param code: literal solidity code as a string. @param path: absolute path to solidity-file. Note: code & path are mutually exclusive! @param extra_args: Either a space separated string or a list of extra arguments to be passed to the solidity compiler.
[ "Compile", "combined", "-", "json", "with", "abi", "bin", "devdoc", "userdoc", "." ]
python
train
34
OpenTreeOfLife/peyotl
peyotl/utility/get_logger.py
https://github.com/OpenTreeOfLife/peyotl/blob/5e4e52a0fdbd17f490aa644ad79fda6ea2eda7c0/peyotl/utility/get_logger.py#L60-L82
def get_logger(name="peyotl"): """Returns a logger with name set as given. See _read_logging_config for a description of the env var/config file cascade that controls configuration of the logger. """ logger = logging.getLogger(name) if len(logger.handlers) == 0: log_init_warnings = [] lc = _read_logging_config(log_init_warnings) logger.setLevel(lc['level']) if lc['filepath'] is not None: log_dir = lc['log_dir'] if log_dir and not os.path.exists(log_dir): os.makedirs(log_dir) ch = logging.FileHandler(lc['filepath']) else: ch = logging.StreamHandler() ch.setLevel(lc['level']) ch.setFormatter(lc['formatter']) logger.addHandler(ch) if log_init_warnings: for w in log_init_warnings: logger.warn(w) return logger
[ "def", "get_logger", "(", "name", "=", "\"peyotl\"", ")", ":", "logger", "=", "logging", ".", "getLogger", "(", "name", ")", "if", "len", "(", "logger", ".", "handlers", ")", "==", "0", ":", "log_init_warnings", "=", "[", "]", "lc", "=", "_read_logging_config", "(", "log_init_warnings", ")", "logger", ".", "setLevel", "(", "lc", "[", "'level'", "]", ")", "if", "lc", "[", "'filepath'", "]", "is", "not", "None", ":", "log_dir", "=", "lc", "[", "'log_dir'", "]", "if", "log_dir", "and", "not", "os", ".", "path", ".", "exists", "(", "log_dir", ")", ":", "os", ".", "makedirs", "(", "log_dir", ")", "ch", "=", "logging", ".", "FileHandler", "(", "lc", "[", "'filepath'", "]", ")", "else", ":", "ch", "=", "logging", ".", "StreamHandler", "(", ")", "ch", ".", "setLevel", "(", "lc", "[", "'level'", "]", ")", "ch", ".", "setFormatter", "(", "lc", "[", "'formatter'", "]", ")", "logger", ".", "addHandler", "(", "ch", ")", "if", "log_init_warnings", ":", "for", "w", "in", "log_init_warnings", ":", "logger", ".", "warn", "(", "w", ")", "return", "logger" ]
Returns a logger with name set as given. See _read_logging_config for a description of the env var/config file cascade that controls configuration of the logger.
[ "Returns", "a", "logger", "with", "name", "set", "as", "given", ".", "See", "_read_logging_config", "for", "a", "description", "of", "the", "env", "var", "/", "config", "file", "cascade", "that", "controls", "configuration", "of", "the", "logger", "." ]
python
train
38.26087
clinicedc/edc-form-label
edc_form_label/custom_label_condition.py
https://github.com/clinicedc/edc-form-label/blob/9d90807ddf784045b3867d676bee6e384a8e9d71/edc_form_label/custom_label_condition.py#L64-L78
def previous_obj(self): """Returns a model obj that is the first occurrence of a previous obj relative to this object's appointment. Override this method if not am EDC subject model / CRF. """ previous_obj = None if self.previous_visit: try: previous_obj = self.model.objects.get( **{f"{self.model.visit_model_attr()}": self.previous_visit} ) except ObjectDoesNotExist: pass return previous_obj
[ "def", "previous_obj", "(", "self", ")", ":", "previous_obj", "=", "None", "if", "self", ".", "previous_visit", ":", "try", ":", "previous_obj", "=", "self", ".", "model", ".", "objects", ".", "get", "(", "*", "*", "{", "f\"{self.model.visit_model_attr()}\"", ":", "self", ".", "previous_visit", "}", ")", "except", "ObjectDoesNotExist", ":", "pass", "return", "previous_obj" ]
Returns a model obj that is the first occurrence of a previous obj relative to this object's appointment. Override this method if not am EDC subject model / CRF.
[ "Returns", "a", "model", "obj", "that", "is", "the", "first", "occurrence", "of", "a", "previous", "obj", "relative", "to", "this", "object", "s", "appointment", "." ]
python
train
35.266667
quantopian/zipline
zipline/pipeline/factors/factor.py
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/factors/factor.py#L1076-L1098
def bottom(self, N, mask=NotSpecified, groupby=NotSpecified): """ Construct a Filter matching the bottom N asset values of self each day. If ``groupby`` is supplied, returns a Filter matching the bottom N asset values for each group. Parameters ---------- N : int Number of assets passing the returned filter each day. mask : zipline.pipeline.Filter, optional A Filter representing assets to consider when computing ranks. If mask is supplied, bottom values are computed ignoring any asset/date pairs for which `mask` produces a value of False. groupby : zipline.pipeline.Classifier, optional A classifier defining partitions over which to perform ranking. Returns ------- filter : zipline.pipeline.Filter """ return self.rank(ascending=True, mask=mask, groupby=groupby) <= N
[ "def", "bottom", "(", "self", ",", "N", ",", "mask", "=", "NotSpecified", ",", "groupby", "=", "NotSpecified", ")", ":", "return", "self", ".", "rank", "(", "ascending", "=", "True", ",", "mask", "=", "mask", ",", "groupby", "=", "groupby", ")", "<=", "N" ]
Construct a Filter matching the bottom N asset values of self each day. If ``groupby`` is supplied, returns a Filter matching the bottom N asset values for each group. Parameters ---------- N : int Number of assets passing the returned filter each day. mask : zipline.pipeline.Filter, optional A Filter representing assets to consider when computing ranks. If mask is supplied, bottom values are computed ignoring any asset/date pairs for which `mask` produces a value of False. groupby : zipline.pipeline.Classifier, optional A classifier defining partitions over which to perform ranking. Returns ------- filter : zipline.pipeline.Filter
[ "Construct", "a", "Filter", "matching", "the", "bottom", "N", "asset", "values", "of", "self", "each", "day", "." ]
python
train
40.347826
javipalanca/spade
spade/message.py
https://github.com/javipalanca/spade/blob/59942bd1a1edae4c807d06cabb178d5630cbf61b/spade/message.py#L240-L278
def prepare(self): """ Returns an aioxmpp.stanza.Message built from the Message and prepared to be sent. Returns: aioxmpp.stanza.Message: the message prepared to be sent """ msg = aioxmpp.stanza.Message( to=self.to, from_=self.sender, type_=aioxmpp.MessageType.CHAT, ) msg.body[None] = self.body # Send metadata using xep-0004: Data Forms (https://xmpp.org/extensions/xep-0004.html) if len(self.metadata): data = forms_xso.Data(type_=forms_xso.DataType.FORM) for name, value in self.metadata.items(): data.fields.append( forms_xso.Field( var=name, type_=forms_xso.FieldType.TEXT_SINGLE, values=[value], ) ) if self.thread: data.fields.append(forms_xso.Field(var="_thread_node", type_=forms_xso.FieldType.TEXT_SINGLE, values=[self.thread])) data.title = SPADE_X_METADATA msg.xep0004_data = [data] return msg
[ "def", "prepare", "(", "self", ")", ":", "msg", "=", "aioxmpp", ".", "stanza", ".", "Message", "(", "to", "=", "self", ".", "to", ",", "from_", "=", "self", ".", "sender", ",", "type_", "=", "aioxmpp", ".", "MessageType", ".", "CHAT", ",", ")", "msg", ".", "body", "[", "None", "]", "=", "self", ".", "body", "# Send metadata using xep-0004: Data Forms (https://xmpp.org/extensions/xep-0004.html)", "if", "len", "(", "self", ".", "metadata", ")", ":", "data", "=", "forms_xso", ".", "Data", "(", "type_", "=", "forms_xso", ".", "DataType", ".", "FORM", ")", "for", "name", ",", "value", "in", "self", ".", "metadata", ".", "items", "(", ")", ":", "data", ".", "fields", ".", "append", "(", "forms_xso", ".", "Field", "(", "var", "=", "name", ",", "type_", "=", "forms_xso", ".", "FieldType", ".", "TEXT_SINGLE", ",", "values", "=", "[", "value", "]", ",", ")", ")", "if", "self", ".", "thread", ":", "data", ".", "fields", ".", "append", "(", "forms_xso", ".", "Field", "(", "var", "=", "\"_thread_node\"", ",", "type_", "=", "forms_xso", ".", "FieldType", ".", "TEXT_SINGLE", ",", "values", "=", "[", "self", ".", "thread", "]", ")", ")", "data", ".", "title", "=", "SPADE_X_METADATA", "msg", ".", "xep0004_data", "=", "[", "data", "]", "return", "msg" ]
Returns an aioxmpp.stanza.Message built from the Message and prepared to be sent. Returns: aioxmpp.stanza.Message: the message prepared to be sent
[ "Returns", "an", "aioxmpp", ".", "stanza", ".", "Message", "built", "from", "the", "Message", "and", "prepared", "to", "be", "sent", "." ]
python
train
31.410256
titusjan/argos
argos/widgets/mainwindow.py
https://github.com/titusjan/argos/blob/20d0a3cae26c36ea789a5d219c02ca7df21279dd/argos/widgets/mainwindow.py#L562-L575
def _updateNonDefaultsForInspector(self, inspectorRegItem, inspector): """ Store the (non-default) config values for the current inspector in a local dictionary. This dictionary is later used to store value for persistence. This function must be called after the inspector was drawn because that may update some derived config values (e.g. ranges) """ if inspectorRegItem and inspector: key = inspectorRegItem.identifier logger.debug("_updateNonDefaultsForInspector: {} {}" .format(key, type(inspector))) self._inspectorsNonDefaults[key] = inspector.config.getNonDefaultsDict() else: logger.debug("_updateNonDefaultsForInspector: no inspector")
[ "def", "_updateNonDefaultsForInspector", "(", "self", ",", "inspectorRegItem", ",", "inspector", ")", ":", "if", "inspectorRegItem", "and", "inspector", ":", "key", "=", "inspectorRegItem", ".", "identifier", "logger", ".", "debug", "(", "\"_updateNonDefaultsForInspector: {} {}\"", ".", "format", "(", "key", ",", "type", "(", "inspector", ")", ")", ")", "self", ".", "_inspectorsNonDefaults", "[", "key", "]", "=", "inspector", ".", "config", ".", "getNonDefaultsDict", "(", ")", "else", ":", "logger", ".", "debug", "(", "\"_updateNonDefaultsForInspector: no inspector\"", ")" ]
Store the (non-default) config values for the current inspector in a local dictionary. This dictionary is later used to store value for persistence. This function must be called after the inspector was drawn because that may update some derived config values (e.g. ranges)
[ "Store", "the", "(", "non", "-", "default", ")", "config", "values", "for", "the", "current", "inspector", "in", "a", "local", "dictionary", ".", "This", "dictionary", "is", "later", "used", "to", "store", "value", "for", "persistence", "." ]
python
train
55.214286
bfarr/kombine
examples/twoD.py
https://github.com/bfarr/kombine/blob/50c946dee5da33e7baab71d9bd6c265ff02ffb13/examples/twoD.py#L49-L55
def prior_draw(self, N=1): """ Draw ``N`` samples from the prior. """ p = np.random.ranf(size=(N, self.ndim)) p = (self._upper_right - self._lower_left) * p + self._lower_left return p
[ "def", "prior_draw", "(", "self", ",", "N", "=", "1", ")", ":", "p", "=", "np", ".", "random", ".", "ranf", "(", "size", "=", "(", "N", ",", "self", ".", "ndim", ")", ")", "p", "=", "(", "self", ".", "_upper_right", "-", "self", ".", "_lower_left", ")", "*", "p", "+", "self", ".", "_lower_left", "return", "p" ]
Draw ``N`` samples from the prior.
[ "Draw", "N", "samples", "from", "the", "prior", "." ]
python
train
32.285714
idlesign/uwsgiconf
uwsgiconf/options/routing_routers.py
https://github.com/idlesign/uwsgiconf/blob/475407acb44199edbf7e0a66261bfeb51de1afae/uwsgiconf/options/routing_routers.py#L814-L825
def set_connections_params(self, harakiri=None, timeout_socket=None): """Sets connection-related parameters. :param int harakiri: Set gateway harakiri timeout (seconds). :param int timeout_socket: Node socket timeout (seconds). Default: 60. """ self._set_aliased('harakiri', harakiri) self._set_aliased('timeout', timeout_socket) return self
[ "def", "set_connections_params", "(", "self", ",", "harakiri", "=", "None", ",", "timeout_socket", "=", "None", ")", ":", "self", ".", "_set_aliased", "(", "'harakiri'", ",", "harakiri", ")", "self", ".", "_set_aliased", "(", "'timeout'", ",", "timeout_socket", ")", "return", "self" ]
Sets connection-related parameters. :param int harakiri: Set gateway harakiri timeout (seconds). :param int timeout_socket: Node socket timeout (seconds). Default: 60.
[ "Sets", "connection", "-", "related", "parameters", "." ]
python
train
32.5
SheffieldML/GPy
GPy/likelihoods/exponential.py
https://github.com/SheffieldML/GPy/blob/54c32d79d289d622fb18b898aee65a2a431d90cf/GPy/likelihoods/exponential.py#L25-L42
def pdf_link(self, link_f, y, Y_metadata=None): """ Likelihood function given link(f) .. math:: p(y_{i}|\\lambda(f_{i})) = \\lambda(f_{i})\\exp (-y\\lambda(f_{i})) :param link_f: latent variables link(f) :type link_f: Nx1 array :param y: data :type y: Nx1 array :param Y_metadata: Y_metadata which is not used in exponential distribution :returns: likelihood evaluated for this point :rtype: float """ assert np.atleast_1d(link_f).shape == np.atleast_1d(y).shape log_objective = link_f*np.exp(-y*link_f) return np.exp(np.sum(np.log(log_objective)))
[ "def", "pdf_link", "(", "self", ",", "link_f", ",", "y", ",", "Y_metadata", "=", "None", ")", ":", "assert", "np", ".", "atleast_1d", "(", "link_f", ")", ".", "shape", "==", "np", ".", "atleast_1d", "(", "y", ")", ".", "shape", "log_objective", "=", "link_f", "*", "np", ".", "exp", "(", "-", "y", "*", "link_f", ")", "return", "np", ".", "exp", "(", "np", ".", "sum", "(", "np", ".", "log", "(", "log_objective", ")", ")", ")" ]
Likelihood function given link(f) .. math:: p(y_{i}|\\lambda(f_{i})) = \\lambda(f_{i})\\exp (-y\\lambda(f_{i})) :param link_f: latent variables link(f) :type link_f: Nx1 array :param y: data :type y: Nx1 array :param Y_metadata: Y_metadata which is not used in exponential distribution :returns: likelihood evaluated for this point :rtype: float
[ "Likelihood", "function", "given", "link", "(", "f", ")" ]
python
train
36.5
aktaylor08/RosbagPandas
src/rosbag_pandas/rosbag_pandas.py
https://github.com/aktaylor08/RosbagPandas/blob/c2af9f22537102696dffdf2e61790362726a8403/src/rosbag_pandas/rosbag_pandas.py#L218-L229
def get_topics(yaml_info): ''' Returns the names of all of the topics in the bag, and prints them to stdout if requested ''' # Pull out the topic info names = [] # Store all of the topics in a dictionary topics = yaml_info['topics'] for topic in topics: names.append(topic['topic']) return names
[ "def", "get_topics", "(", "yaml_info", ")", ":", "# Pull out the topic info", "names", "=", "[", "]", "# Store all of the topics in a dictionary", "topics", "=", "yaml_info", "[", "'topics'", "]", "for", "topic", "in", "topics", ":", "names", ".", "append", "(", "topic", "[", "'topic'", "]", ")", "return", "names" ]
Returns the names of all of the topics in the bag, and prints them to stdout if requested
[ "Returns", "the", "names", "of", "all", "of", "the", "topics", "in", "the", "bag", "and", "prints", "them", "to", "stdout", "if", "requested" ]
python
train
27.75
cloudera/cm_api
python/examples/bulk_config_update.py
https://github.com/cloudera/cm_api/blob/5d2512375bd94684b4da36df9e0d9177865ffcbb/python/examples/bulk_config_update.py#L108-L118
def do_bulk_config_update(hostnames): """ Given a list of hostnames, update the configs of all the datanodes, tasktrackers and regionservers on those hosts. """ api = ApiResource(CM_HOST, username=CM_USER, password=CM_PASSWD) hosts = collect_hosts(api, hostnames) # Set config for h in hosts: configure_roles_on_host(api, h)
[ "def", "do_bulk_config_update", "(", "hostnames", ")", ":", "api", "=", "ApiResource", "(", "CM_HOST", ",", "username", "=", "CM_USER", ",", "password", "=", "CM_PASSWD", ")", "hosts", "=", "collect_hosts", "(", "api", ",", "hostnames", ")", "# Set config", "for", "h", "in", "hosts", ":", "configure_roles_on_host", "(", "api", ",", "h", ")" ]
Given a list of hostnames, update the configs of all the datanodes, tasktrackers and regionservers on those hosts.
[ "Given", "a", "list", "of", "hostnames", "update", "the", "configs", "of", "all", "the", "datanodes", "tasktrackers", "and", "regionservers", "on", "those", "hosts", "." ]
python
train
30.454545
daler/gffutils
gffutils/interface.py
https://github.com/daler/gffutils/blob/6f7f547cad898738a1bd0a999fd68ba68db2c524/gffutils/interface.py#L935-L943
def _insert(self, feature, cursor): """ Insert a feature into the database. """ try: cursor.execute(constants._INSERT, feature.astuple()) except sqlite3.ProgrammingError: cursor.execute( constants._INSERT, feature.astuple(self.default_encoding))
[ "def", "_insert", "(", "self", ",", "feature", ",", "cursor", ")", ":", "try", ":", "cursor", ".", "execute", "(", "constants", ".", "_INSERT", ",", "feature", ".", "astuple", "(", ")", ")", "except", "sqlite3", ".", "ProgrammingError", ":", "cursor", ".", "execute", "(", "constants", ".", "_INSERT", ",", "feature", ".", "astuple", "(", "self", ".", "default_encoding", ")", ")" ]
Insert a feature into the database.
[ "Insert", "a", "feature", "into", "the", "database", "." ]
python
train
35.222222
cloudmesh/cloudmesh-common
cloudmesh/common/Shell.py
https://github.com/cloudmesh/cloudmesh-common/blob/ae4fae09cd78205d179ea692dc58f0b0c8fea2b8/cloudmesh/common/Shell.py#L636-L652
def terminal_type(cls): """ returns darwin, cygwin, cmd, or linux """ what = sys.platform kind = 'UNDEFINED_TERMINAL_TYPE' if 'linux' in what: kind = 'linux' elif 'darwin' in what: kind = 'darwin' elif 'cygwin' in what: kind = 'cygwin' elif 'windows' in what: kind = 'windows' return kind
[ "def", "terminal_type", "(", "cls", ")", ":", "what", "=", "sys", ".", "platform", "kind", "=", "'UNDEFINED_TERMINAL_TYPE'", "if", "'linux'", "in", "what", ":", "kind", "=", "'linux'", "elif", "'darwin'", "in", "what", ":", "kind", "=", "'darwin'", "elif", "'cygwin'", "in", "what", ":", "kind", "=", "'cygwin'", "elif", "'windows'", "in", "what", ":", "kind", "=", "'windows'", "return", "kind" ]
returns darwin, cygwin, cmd, or linux
[ "returns", "darwin", "cygwin", "cmd", "or", "linux" ]
python
train
23.705882
mbr/flask-nav
flask_nav/__init__.py
https://github.com/mbr/flask-nav/blob/06f3b5b2addad29c2fc531a7e8e74958e9e4b793/flask_nav/__init__.py#L114-L128
def navigation(self, id=None): """Function decorator for navbar registration. Convenience function, calls :meth:`.register_element` with ``id`` and the decorated function as ``elem``. :param id: ID to pass on. If ``None``, uses the decorated functions name. """ def wrapper(f): self.register_element(id or f.__name__, f) return f return wrapper
[ "def", "navigation", "(", "self", ",", "id", "=", "None", ")", ":", "def", "wrapper", "(", "f", ")", ":", "self", ".", "register_element", "(", "id", "or", "f", ".", "__name__", ",", "f", ")", "return", "f", "return", "wrapper" ]
Function decorator for navbar registration. Convenience function, calls :meth:`.register_element` with ``id`` and the decorated function as ``elem``. :param id: ID to pass on. If ``None``, uses the decorated functions name.
[ "Function", "decorator", "for", "navbar", "registration", "." ]
python
train
28.866667
tensorflow/probability
tensorflow_probability/python/sts/fitting.py
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/sts/fitting.py#L92-L264
def build_factored_variational_loss(model, observed_time_series, init_batch_shape=(), seed=None, name=None): """Build a loss function for variational inference in STS models. Variational inference searches for the distribution within some family of approximate posteriors that minimizes a divergence between the approximate posterior `q(z)` and true posterior `p(z|observed_time_series)`. By converting inference to optimization, it's generally much faster than sampling-based inference algorithms such as HMC. The tradeoff is that the approximating family rarely contains the true posterior, so it may miss important aspects of posterior structure (in particular, dependence between variables) and should not be blindly trusted. Results may vary; it's generally wise to compare to HMC to evaluate whether inference quality is sufficient for your task at hand. This method constructs a loss function for variational inference using the Kullback-Liebler divergence `KL[q(z) || p(z|observed_time_series)]`, with an approximating family given by independent Normal distributions transformed to the appropriate parameter space for each parameter. Minimizing this loss (the negative ELBO) maximizes a lower bound on the log model evidence `-log p(observed_time_series)`. This is equivalent to the 'mean-field' method implemented in [1]. and is a standard approach. The resulting posterior approximations are unimodal; they will tend to underestimate posterior uncertainty when the true posterior contains multiple modes (the `KL[q||p]` divergence encourages choosing a single mode) or dependence between variables. Args: model: An instance of `StructuralTimeSeries` representing a time-series model. This represents a joint distribution over time-series and their parameters with batch shape `[b1, ..., bN]`. observed_time_series: `float` `Tensor` of shape `concat([sample_shape, model.batch_shape, [num_timesteps, 1]]) where `sample_shape` corresponds to i.i.d. observations, and the trailing `[1]` dimension may (optionally) be omitted if `num_timesteps > 1`. May optionally be an instance of `tfp.sts.MaskedTimeSeries`, which includes a mask `Tensor` to specify timesteps with missing observations. init_batch_shape: Batch shape (Python `tuple`, `list`, or `int`) of initial states to optimize in parallel. Default value: `()`. (i.e., just run a single optimization). seed: Python integer to seed the random number generator. name: Python `str` name prefixed to ops created by this function. Default value: `None` (i.e., 'build_factored_variational_loss'). Returns: variational_loss: `float` `Tensor` of shape `concat([init_batch_shape, model.batch_shape])`, encoding a stochastic estimate of an upper bound on the negative model evidence `-log p(y)`. Minimizing this loss performs variational inference; the gap between the variational bound and the true (generally unknown) model evidence corresponds to the divergence `KL[q||p]` between the approximate and true posterior. variational_distributions: `collections.OrderedDict` giving the approximate posterior for each model parameter. The keys are Python `str` parameter names in order, corresponding to `[param.name for param in model.parameters]`. The values are `tfd.Distribution` instances with batch shape `concat([init_batch_shape, model.batch_shape])`; these will typically be of the form `tfd.TransformedDistribution(tfd.Normal(...), bijector=param.bijector)`. #### Examples Assume we've built a structural time-series model: ```python day_of_week = tfp.sts.Seasonal( num_seasons=7, observed_time_series=observed_time_series, name='day_of_week') local_linear_trend = tfp.sts.LocalLinearTrend( observed_time_series=observed_time_series, name='local_linear_trend') model = tfp.sts.Sum(components=[day_of_week, local_linear_trend], observed_time_series=observed_time_series) ``` To run variational inference, we simply construct the loss and optimize it: ```python (variational_loss, variational_distributions) = tfp.sts.build_factored_variational_loss( model=model, observed_time_series=observed_time_series) train_op = tf.train.AdamOptimizer(0.1).minimize(variational_loss) with tf.Session() as sess: sess.run(tf.global_variables_initializer()) for step in range(200): _, loss_ = sess.run((train_op, variational_loss)) if step % 20 == 0: print("step {} loss {}".format(step, loss_)) posterior_samples_ = sess.run({ param_name: q.sample(50) for param_name, q in variational_distributions.items()}) ``` As a more complex example, we might try to avoid local optima by optimizing from multiple initializations in parallel, and selecting the result with the lowest loss: ```python (variational_loss, variational_distributions) = tfp.sts.build_factored_variational_loss( model=model, observed_time_series=observed_time_series, init_batch_shape=[10]) train_op = tf.train.AdamOptimizer(0.1).minimize(variational_loss) with tf.Session() as sess: sess.run(tf.global_variables_initializer()) for step in range(200): _, loss_ = sess.run((train_op, variational_loss)) if step % 20 == 0: print("step {} losses {}".format(step, loss_)) # Draw multiple samples to reduce Monte Carlo error in the optimized # variational bounds. avg_loss = np.mean( [sess.run(variational_loss) for _ in range(25)], axis=0) best_posterior_idx = np.argmin(avg_loss, axis=0).astype(np.int32) ``` #### References [1]: Alp Kucukelbir, Dustin Tran, Rajesh Ranganath, Andrew Gelman, and David M. Blei. Automatic Differentiation Variational Inference. In _Journal of Machine Learning Research_, 2017. https://arxiv.org/abs/1603.00788 """ with tf.compat.v1.name_scope( name, 'build_factored_variational_loss', values=[observed_time_series]) as name: seed = tfd.SeedStream( seed, salt='StructuralTimeSeries_build_factored_variational_loss') variational_distributions = collections.OrderedDict() variational_samples = [] for param in model.parameters: def initial_loc_fn(param): return sample_uniform_initial_state( param, return_constrained=True, init_sample_shape=init_batch_shape, seed=seed()) q = _build_trainable_posterior(param, initial_loc_fn=initial_loc_fn) variational_distributions[param.name] = q variational_samples.append(q.sample(seed=seed())) # Multiple initializations (similar to HMC chains) manifest as an extra # param batch dimension, so we need to add corresponding batch dimension(s) # to `observed_time_series`. observed_time_series = sts_util.pad_batch_dimension_for_multiple_chains( observed_time_series, model, chain_batch_shape=init_batch_shape) # Construct the variational bound. log_prob_fn = model.joint_log_prob(observed_time_series) expected_log_joint = log_prob_fn(*variational_samples) entropy = tf.reduce_sum( input_tensor=[ -q.log_prob(sample) for (q, sample) in zip( variational_distributions.values(), variational_samples) ], axis=0) variational_loss = -(expected_log_joint + entropy) # -ELBO return variational_loss, variational_distributions
[ "def", "build_factored_variational_loss", "(", "model", ",", "observed_time_series", ",", "init_batch_shape", "=", "(", ")", ",", "seed", "=", "None", ",", "name", "=", "None", ")", ":", "with", "tf", ".", "compat", ".", "v1", ".", "name_scope", "(", "name", ",", "'build_factored_variational_loss'", ",", "values", "=", "[", "observed_time_series", "]", ")", "as", "name", ":", "seed", "=", "tfd", ".", "SeedStream", "(", "seed", ",", "salt", "=", "'StructuralTimeSeries_build_factored_variational_loss'", ")", "variational_distributions", "=", "collections", ".", "OrderedDict", "(", ")", "variational_samples", "=", "[", "]", "for", "param", "in", "model", ".", "parameters", ":", "def", "initial_loc_fn", "(", "param", ")", ":", "return", "sample_uniform_initial_state", "(", "param", ",", "return_constrained", "=", "True", ",", "init_sample_shape", "=", "init_batch_shape", ",", "seed", "=", "seed", "(", ")", ")", "q", "=", "_build_trainable_posterior", "(", "param", ",", "initial_loc_fn", "=", "initial_loc_fn", ")", "variational_distributions", "[", "param", ".", "name", "]", "=", "q", "variational_samples", ".", "append", "(", "q", ".", "sample", "(", "seed", "=", "seed", "(", ")", ")", ")", "# Multiple initializations (similar to HMC chains) manifest as an extra", "# param batch dimension, so we need to add corresponding batch dimension(s)", "# to `observed_time_series`.", "observed_time_series", "=", "sts_util", ".", "pad_batch_dimension_for_multiple_chains", "(", "observed_time_series", ",", "model", ",", "chain_batch_shape", "=", "init_batch_shape", ")", "# Construct the variational bound.", "log_prob_fn", "=", "model", ".", "joint_log_prob", "(", "observed_time_series", ")", "expected_log_joint", "=", "log_prob_fn", "(", "*", "variational_samples", ")", "entropy", "=", "tf", ".", "reduce_sum", "(", "input_tensor", "=", "[", "-", "q", ".", "log_prob", "(", "sample", ")", "for", "(", "q", ",", "sample", ")", "in", "zip", "(", "variational_distributions", ".", "values", "(", ")", ",", "variational_samples", ")", "]", ",", "axis", "=", "0", ")", "variational_loss", "=", "-", "(", "expected_log_joint", "+", "entropy", ")", "# -ELBO", "return", "variational_loss", ",", "variational_distributions" ]
Build a loss function for variational inference in STS models. Variational inference searches for the distribution within some family of approximate posteriors that minimizes a divergence between the approximate posterior `q(z)` and true posterior `p(z|observed_time_series)`. By converting inference to optimization, it's generally much faster than sampling-based inference algorithms such as HMC. The tradeoff is that the approximating family rarely contains the true posterior, so it may miss important aspects of posterior structure (in particular, dependence between variables) and should not be blindly trusted. Results may vary; it's generally wise to compare to HMC to evaluate whether inference quality is sufficient for your task at hand. This method constructs a loss function for variational inference using the Kullback-Liebler divergence `KL[q(z) || p(z|observed_time_series)]`, with an approximating family given by independent Normal distributions transformed to the appropriate parameter space for each parameter. Minimizing this loss (the negative ELBO) maximizes a lower bound on the log model evidence `-log p(observed_time_series)`. This is equivalent to the 'mean-field' method implemented in [1]. and is a standard approach. The resulting posterior approximations are unimodal; they will tend to underestimate posterior uncertainty when the true posterior contains multiple modes (the `KL[q||p]` divergence encourages choosing a single mode) or dependence between variables. Args: model: An instance of `StructuralTimeSeries` representing a time-series model. This represents a joint distribution over time-series and their parameters with batch shape `[b1, ..., bN]`. observed_time_series: `float` `Tensor` of shape `concat([sample_shape, model.batch_shape, [num_timesteps, 1]]) where `sample_shape` corresponds to i.i.d. observations, and the trailing `[1]` dimension may (optionally) be omitted if `num_timesteps > 1`. May optionally be an instance of `tfp.sts.MaskedTimeSeries`, which includes a mask `Tensor` to specify timesteps with missing observations. init_batch_shape: Batch shape (Python `tuple`, `list`, or `int`) of initial states to optimize in parallel. Default value: `()`. (i.e., just run a single optimization). seed: Python integer to seed the random number generator. name: Python `str` name prefixed to ops created by this function. Default value: `None` (i.e., 'build_factored_variational_loss'). Returns: variational_loss: `float` `Tensor` of shape `concat([init_batch_shape, model.batch_shape])`, encoding a stochastic estimate of an upper bound on the negative model evidence `-log p(y)`. Minimizing this loss performs variational inference; the gap between the variational bound and the true (generally unknown) model evidence corresponds to the divergence `KL[q||p]` between the approximate and true posterior. variational_distributions: `collections.OrderedDict` giving the approximate posterior for each model parameter. The keys are Python `str` parameter names in order, corresponding to `[param.name for param in model.parameters]`. The values are `tfd.Distribution` instances with batch shape `concat([init_batch_shape, model.batch_shape])`; these will typically be of the form `tfd.TransformedDistribution(tfd.Normal(...), bijector=param.bijector)`. #### Examples Assume we've built a structural time-series model: ```python day_of_week = tfp.sts.Seasonal( num_seasons=7, observed_time_series=observed_time_series, name='day_of_week') local_linear_trend = tfp.sts.LocalLinearTrend( observed_time_series=observed_time_series, name='local_linear_trend') model = tfp.sts.Sum(components=[day_of_week, local_linear_trend], observed_time_series=observed_time_series) ``` To run variational inference, we simply construct the loss and optimize it: ```python (variational_loss, variational_distributions) = tfp.sts.build_factored_variational_loss( model=model, observed_time_series=observed_time_series) train_op = tf.train.AdamOptimizer(0.1).minimize(variational_loss) with tf.Session() as sess: sess.run(tf.global_variables_initializer()) for step in range(200): _, loss_ = sess.run((train_op, variational_loss)) if step % 20 == 0: print("step {} loss {}".format(step, loss_)) posterior_samples_ = sess.run({ param_name: q.sample(50) for param_name, q in variational_distributions.items()}) ``` As a more complex example, we might try to avoid local optima by optimizing from multiple initializations in parallel, and selecting the result with the lowest loss: ```python (variational_loss, variational_distributions) = tfp.sts.build_factored_variational_loss( model=model, observed_time_series=observed_time_series, init_batch_shape=[10]) train_op = tf.train.AdamOptimizer(0.1).minimize(variational_loss) with tf.Session() as sess: sess.run(tf.global_variables_initializer()) for step in range(200): _, loss_ = sess.run((train_op, variational_loss)) if step % 20 == 0: print("step {} losses {}".format(step, loss_)) # Draw multiple samples to reduce Monte Carlo error in the optimized # variational bounds. avg_loss = np.mean( [sess.run(variational_loss) for _ in range(25)], axis=0) best_posterior_idx = np.argmin(avg_loss, axis=0).astype(np.int32) ``` #### References [1]: Alp Kucukelbir, Dustin Tran, Rajesh Ranganath, Andrew Gelman, and David M. Blei. Automatic Differentiation Variational Inference. In _Journal of Machine Learning Research_, 2017. https://arxiv.org/abs/1603.00788
[ "Build", "a", "loss", "function", "for", "variational", "inference", "in", "STS", "models", "." ]
python
test
44.063584
ev3dev/ev3dev-lang-python
ev3dev2/motor.py
https://github.com/ev3dev/ev3dev-lang-python/blob/afc98d35004b533dc161a01f7c966e78607d7c1e/ev3dev2/motor.py#L778-L785
def run_timed(self, **kwargs): """ Run the motor for the amount of time specified in `time_sp` and then stop the motor using the action specified by `stop_action`. """ for key in kwargs: setattr(self, key, kwargs[key]) self.command = self.COMMAND_RUN_TIMED
[ "def", "run_timed", "(", "self", ",", "*", "*", "kwargs", ")", ":", "for", "key", "in", "kwargs", ":", "setattr", "(", "self", ",", "key", ",", "kwargs", "[", "key", "]", ")", "self", ".", "command", "=", "self", ".", "COMMAND_RUN_TIMED" ]
Run the motor for the amount of time specified in `time_sp` and then stop the motor using the action specified by `stop_action`.
[ "Run", "the", "motor", "for", "the", "amount", "of", "time", "specified", "in", "time_sp", "and", "then", "stop", "the", "motor", "using", "the", "action", "specified", "by", "stop_action", "." ]
python
train
38.625
apragacz/django-rest-registration
rest_registration/notifications/email.py
https://github.com/apragacz/django-rest-registration/blob/7373571264dd567c2a73a97ff4c45b64f113605b/rest_registration/notifications/email.py#L60-L196
def parse_template_config(template_config_data): """ >>> from tests import doctest_utils >>> convert_html_to_text = registration_settings.VERIFICATION_EMAIL_HTML_TO_TEXT_CONVERTER # noqa: E501 >>> parse_template_config({}) # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... ImproperlyConfigured >>> parse_template_config({ ... 'subject': 'blah', ... }) # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... ImproperlyConfigured >>> parse_template_config({ ... 'subject': 'blah', ... 'body': 'blah', ... }) # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... ImproperlyConfigured >>> doctest_utils.equals( ... parse_template_config({ ... 'subject': 'rest_registration/register/subject.txt', ... 'html_body': 'rest_registration/register/body.html', ... 'text_body': 'rest_registration/register/body.txt', ... }), ... EmailTemplateConfig( ... 'rest_registration/register/subject.txt', ... 'rest_registration/register/body.txt', ... 'rest_registration/register/body.html', ... identity)) OK >>> doctest_utils.equals( ... parse_template_config({ ... 'subject': 'rest_registration/register/subject.txt', ... 'html_body': 'rest_registration/register/body.html', ... }), ... EmailTemplateConfig( ... 'rest_registration/register/subject.txt', ... 'rest_registration/register/body.html', ... 'rest_registration/register/body.html', ... convert_html_to_text)) OK >>> doctest_utils.equals( ... parse_template_config({ ... 'subject': 'rest_registration/register/subject.txt', ... 'text_body': 'rest_registration/register/body.txt', ... }), ... EmailTemplateConfig( ... 'rest_registration/register/subject.txt', ... 'rest_registration/register/body.txt', None, ... identity)) OK >>> doctest_utils.equals( ... parse_template_config({ ... 'subject': 'rest_registration/register/subject.txt', ... 'body': 'rest_registration/register/body.txt', ... }), ... EmailTemplateConfig( ... 'rest_registration/register/subject.txt', ... 'rest_registration/register/body.txt', None, ... identity)) OK >>> doctest_utils.equals( ... parse_template_config({ ... 'subject': 'rest_registration/register/subject.txt', ... 'body': 'rest_registration/register/body.html', ... 'is_html': True, ... }), ... EmailTemplateConfig( ... 'rest_registration/register/subject.txt', ... 'rest_registration/register/body.html', ... 'rest_registration/register/body.html', ... convert_html_to_text)) OK """ try: subject_template_name = template_config_data['subject'] except KeyError: raise ImproperlyConfigured("No 'subject' key found") body_template_name = template_config_data.get('body') text_body_template_name = template_config_data.get('text_body') html_body_template_name = template_config_data.get('html_body') is_html_body = template_config_data.get('is_html') convert_html_to_text = registration_settings.VERIFICATION_EMAIL_HTML_TO_TEXT_CONVERTER # noqa: E501 if html_body_template_name and text_body_template_name: config = EmailTemplateConfig( subject_template_name=subject_template_name, text_body_template_name=text_body_template_name, html_body_template_name=html_body_template_name, text_body_processor=identity, ) elif html_body_template_name: config = EmailTemplateConfig( subject_template_name=subject_template_name, text_body_template_name=html_body_template_name, html_body_template_name=html_body_template_name, text_body_processor=convert_html_to_text, ) elif text_body_template_name: config = EmailTemplateConfig( subject_template_name=subject_template_name, text_body_template_name=text_body_template_name, html_body_template_name=None, text_body_processor=identity, ) elif body_template_name: if is_html_body: config = EmailTemplateConfig( subject_template_name=subject_template_name, text_body_template_name=body_template_name, html_body_template_name=body_template_name, text_body_processor=convert_html_to_text, ) else: config = EmailTemplateConfig( subject_template_name=subject_template_name, text_body_template_name=body_template_name, html_body_template_name=None, text_body_processor=identity, ) else: raise ImproperlyConfigured( 'Could not parse template config data: {template_config_data}'.format( # noqa: E501 template_config_data=template_config_data)) _validate_template_name_existence(config.subject_template_name) _validate_template_name_existence(config.text_body_template_name) if config.html_body_template_name: _validate_template_name_existence(config.html_body_template_name) assert callable(config.text_body_processor) return config
[ "def", "parse_template_config", "(", "template_config_data", ")", ":", "try", ":", "subject_template_name", "=", "template_config_data", "[", "'subject'", "]", "except", "KeyError", ":", "raise", "ImproperlyConfigured", "(", "\"No 'subject' key found\"", ")", "body_template_name", "=", "template_config_data", ".", "get", "(", "'body'", ")", "text_body_template_name", "=", "template_config_data", ".", "get", "(", "'text_body'", ")", "html_body_template_name", "=", "template_config_data", ".", "get", "(", "'html_body'", ")", "is_html_body", "=", "template_config_data", ".", "get", "(", "'is_html'", ")", "convert_html_to_text", "=", "registration_settings", ".", "VERIFICATION_EMAIL_HTML_TO_TEXT_CONVERTER", "# noqa: E501", "if", "html_body_template_name", "and", "text_body_template_name", ":", "config", "=", "EmailTemplateConfig", "(", "subject_template_name", "=", "subject_template_name", ",", "text_body_template_name", "=", "text_body_template_name", ",", "html_body_template_name", "=", "html_body_template_name", ",", "text_body_processor", "=", "identity", ",", ")", "elif", "html_body_template_name", ":", "config", "=", "EmailTemplateConfig", "(", "subject_template_name", "=", "subject_template_name", ",", "text_body_template_name", "=", "html_body_template_name", ",", "html_body_template_name", "=", "html_body_template_name", ",", "text_body_processor", "=", "convert_html_to_text", ",", ")", "elif", "text_body_template_name", ":", "config", "=", "EmailTemplateConfig", "(", "subject_template_name", "=", "subject_template_name", ",", "text_body_template_name", "=", "text_body_template_name", ",", "html_body_template_name", "=", "None", ",", "text_body_processor", "=", "identity", ",", ")", "elif", "body_template_name", ":", "if", "is_html_body", ":", "config", "=", "EmailTemplateConfig", "(", "subject_template_name", "=", "subject_template_name", ",", "text_body_template_name", "=", "body_template_name", ",", "html_body_template_name", "=", "body_template_name", ",", "text_body_processor", "=", "convert_html_to_text", ",", ")", "else", ":", "config", "=", "EmailTemplateConfig", "(", "subject_template_name", "=", "subject_template_name", ",", "text_body_template_name", "=", "body_template_name", ",", "html_body_template_name", "=", "None", ",", "text_body_processor", "=", "identity", ",", ")", "else", ":", "raise", "ImproperlyConfigured", "(", "'Could not parse template config data: {template_config_data}'", ".", "format", "(", "# noqa: E501", "template_config_data", "=", "template_config_data", ")", ")", "_validate_template_name_existence", "(", "config", ".", "subject_template_name", ")", "_validate_template_name_existence", "(", "config", ".", "text_body_template_name", ")", "if", "config", ".", "html_body_template_name", ":", "_validate_template_name_existence", "(", "config", ".", "html_body_template_name", ")", "assert", "callable", "(", "config", ".", "text_body_processor", ")", "return", "config" ]
>>> from tests import doctest_utils >>> convert_html_to_text = registration_settings.VERIFICATION_EMAIL_HTML_TO_TEXT_CONVERTER # noqa: E501 >>> parse_template_config({}) # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... ImproperlyConfigured >>> parse_template_config({ ... 'subject': 'blah', ... }) # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... ImproperlyConfigured >>> parse_template_config({ ... 'subject': 'blah', ... 'body': 'blah', ... }) # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... ImproperlyConfigured >>> doctest_utils.equals( ... parse_template_config({ ... 'subject': 'rest_registration/register/subject.txt', ... 'html_body': 'rest_registration/register/body.html', ... 'text_body': 'rest_registration/register/body.txt', ... }), ... EmailTemplateConfig( ... 'rest_registration/register/subject.txt', ... 'rest_registration/register/body.txt', ... 'rest_registration/register/body.html', ... identity)) OK >>> doctest_utils.equals( ... parse_template_config({ ... 'subject': 'rest_registration/register/subject.txt', ... 'html_body': 'rest_registration/register/body.html', ... }), ... EmailTemplateConfig( ... 'rest_registration/register/subject.txt', ... 'rest_registration/register/body.html', ... 'rest_registration/register/body.html', ... convert_html_to_text)) OK >>> doctest_utils.equals( ... parse_template_config({ ... 'subject': 'rest_registration/register/subject.txt', ... 'text_body': 'rest_registration/register/body.txt', ... }), ... EmailTemplateConfig( ... 'rest_registration/register/subject.txt', ... 'rest_registration/register/body.txt', None, ... identity)) OK >>> doctest_utils.equals( ... parse_template_config({ ... 'subject': 'rest_registration/register/subject.txt', ... 'body': 'rest_registration/register/body.txt', ... }), ... EmailTemplateConfig( ... 'rest_registration/register/subject.txt', ... 'rest_registration/register/body.txt', None, ... identity)) OK >>> doctest_utils.equals( ... parse_template_config({ ... 'subject': 'rest_registration/register/subject.txt', ... 'body': 'rest_registration/register/body.html', ... 'is_html': True, ... }), ... EmailTemplateConfig( ... 'rest_registration/register/subject.txt', ... 'rest_registration/register/body.html', ... 'rest_registration/register/body.html', ... convert_html_to_text)) OK
[ ">>>", "from", "tests", "import", "doctest_utils", ">>>", "convert_html_to_text", "=", "registration_settings", ".", "VERIFICATION_EMAIL_HTML_TO_TEXT_CONVERTER", "#", "noqa", ":", "E501", ">>>", "parse_template_config", "(", "{}", ")", "#", "doctest", ":", "+", "IGNORE_EXCEPTION_DETAIL", "Traceback", "(", "most", "recent", "call", "last", ")", ":", "...", "ImproperlyConfigured", ">>>", "parse_template_config", "(", "{", "...", "subject", ":", "blah", "...", "}", ")", "#", "doctest", ":", "+", "IGNORE_EXCEPTION_DETAIL", "Traceback", "(", "most", "recent", "call", "last", ")", ":", "...", "ImproperlyConfigured", ">>>", "parse_template_config", "(", "{", "...", "subject", ":", "blah", "...", "body", ":", "blah", "...", "}", ")", "#", "doctest", ":", "+", "IGNORE_EXCEPTION_DETAIL", "Traceback", "(", "most", "recent", "call", "last", ")", ":", "...", "ImproperlyConfigured", ">>>", "doctest_utils", ".", "equals", "(", "...", "parse_template_config", "(", "{", "...", "subject", ":", "rest_registration", "/", "register", "/", "subject", ".", "txt", "...", "html_body", ":", "rest_registration", "/", "register", "/", "body", ".", "html", "...", "text_body", ":", "rest_registration", "/", "register", "/", "body", ".", "txt", "...", "}", ")", "...", "EmailTemplateConfig", "(", "...", "rest_registration", "/", "register", "/", "subject", ".", "txt", "...", "rest_registration", "/", "register", "/", "body", ".", "txt", "...", "rest_registration", "/", "register", "/", "body", ".", "html", "...", "identity", "))", "OK", ">>>", "doctest_utils", ".", "equals", "(", "...", "parse_template_config", "(", "{", "...", "subject", ":", "rest_registration", "/", "register", "/", "subject", ".", "txt", "...", "html_body", ":", "rest_registration", "/", "register", "/", "body", ".", "html", "...", "}", ")", "...", "EmailTemplateConfig", "(", "...", "rest_registration", "/", "register", "/", "subject", ".", "txt", "...", "rest_registration", "/", "register", "/", "body", ".", "html", "...", "rest_registration", "/", "register", "/", "body", ".", "html", "...", "convert_html_to_text", "))", "OK", ">>>", "doctest_utils", ".", "equals", "(", "...", "parse_template_config", "(", "{", "...", "subject", ":", "rest_registration", "/", "register", "/", "subject", ".", "txt", "...", "text_body", ":", "rest_registration", "/", "register", "/", "body", ".", "txt", "...", "}", ")", "...", "EmailTemplateConfig", "(", "...", "rest_registration", "/", "register", "/", "subject", ".", "txt", "...", "rest_registration", "/", "register", "/", "body", ".", "txt", "None", "...", "identity", "))", "OK", ">>>", "doctest_utils", ".", "equals", "(", "...", "parse_template_config", "(", "{", "...", "subject", ":", "rest_registration", "/", "register", "/", "subject", ".", "txt", "...", "body", ":", "rest_registration", "/", "register", "/", "body", ".", "txt", "...", "}", ")", "...", "EmailTemplateConfig", "(", "...", "rest_registration", "/", "register", "/", "subject", ".", "txt", "...", "rest_registration", "/", "register", "/", "body", ".", "txt", "None", "...", "identity", "))", "OK", ">>>", "doctest_utils", ".", "equals", "(", "...", "parse_template_config", "(", "{", "...", "subject", ":", "rest_registration", "/", "register", "/", "subject", ".", "txt", "...", "body", ":", "rest_registration", "/", "register", "/", "body", ".", "html", "...", "is_html", ":", "True", "...", "}", ")", "...", "EmailTemplateConfig", "(", "...", "rest_registration", "/", "register", "/", "subject", ".", "txt", "...", "rest_registration", "/", "register", "/", "body", ".", "html", "...", "rest_registration", "/", "register", "/", "body", ".", "html", "...", "convert_html_to_text", "))", "OK" ]
python
train
40.167883
pief/python-netsnmpagent
examples/threading_agent.py
https://github.com/pief/python-netsnmpagent/blob/b1aad1c7f034509c40d9ab17d59be32e809bd31d/examples/threading_agent.py#L131-L157
def UpdateSNMPObjs(): """ Function that does the actual data update. """ global threadingString LogMsg("Beginning data update.") data = "" # Obtain the data by calling an external command. We don't use # subprocess.check_output() here for compatibility with Python versions # older than 2.7. LogMsg("Calling external command \"sleep 5; date\".") proc = subprocess.Popen( "sleep 5; date", shell=True, env={ "LANG": "C" }, stdout=subprocess.PIPE, stderr=subprocess.STDOUT ) output = proc.communicate()[0].splitlines()[0] rc = proc.poll() if rc != 0: LogMsg("An error occured executing the command: {0}".format(output)) return msg = "Updating \"threadingString\" object with data \"{0}\"." LogMsg(msg.format(output)) threadingString.update(output) LogMsg("Data update done, exiting thread.")
[ "def", "UpdateSNMPObjs", "(", ")", ":", "global", "threadingString", "LogMsg", "(", "\"Beginning data update.\"", ")", "data", "=", "\"\"", "# Obtain the data by calling an external command. We don't use", "# subprocess.check_output() here for compatibility with Python versions", "# older than 2.7.", "LogMsg", "(", "\"Calling external command \\\"sleep 5; date\\\".\"", ")", "proc", "=", "subprocess", ".", "Popen", "(", "\"sleep 5; date\"", ",", "shell", "=", "True", ",", "env", "=", "{", "\"LANG\"", ":", "\"C\"", "}", ",", "stdout", "=", "subprocess", ".", "PIPE", ",", "stderr", "=", "subprocess", ".", "STDOUT", ")", "output", "=", "proc", ".", "communicate", "(", ")", "[", "0", "]", ".", "splitlines", "(", ")", "[", "0", "]", "rc", "=", "proc", ".", "poll", "(", ")", "if", "rc", "!=", "0", ":", "LogMsg", "(", "\"An error occured executing the command: {0}\"", ".", "format", "(", "output", ")", ")", "return", "msg", "=", "\"Updating \\\"threadingString\\\" object with data \\\"{0}\\\".\"", "LogMsg", "(", "msg", ".", "format", "(", "output", ")", ")", "threadingString", ".", "update", "(", "output", ")", "LogMsg", "(", "\"Data update done, exiting thread.\"", ")" ]
Function that does the actual data update.
[ "Function", "that", "does", "the", "actual", "data", "update", "." ]
python
train
29.333333
JdeRobot/base
src/drivers/MAVLinkServer/MAVProxy/modules/mavproxy_cmdlong.py
https://github.com/JdeRobot/base/blob/303b18992785b2fe802212f2d758a60873007f1f/src/drivers/MAVLinkServer/MAVProxy/modules/mavproxy_cmdlong.py#L24-L44
def cmd_takeoff(self, args): '''take off''' if ( len(args) != 1): print("Usage: takeoff ALTITUDE_IN_METERS") return if (len(args) == 1): altitude = float(args[0]) print("Take Off started") self.master.mav.command_long_send( self.settings.target_system, # target_system mavutil.mavlink.MAV_COMP_ID_SYSTEM_CONTROL, # target_component mavutil.mavlink.MAV_CMD_NAV_TAKEOFF, # command 0, # confirmation 0, # param1 0, # param2 0, # param3 0, # param4 0, # param5 0, # param6 altitude)
[ "def", "cmd_takeoff", "(", "self", ",", "args", ")", ":", "if", "(", "len", "(", "args", ")", "!=", "1", ")", ":", "print", "(", "\"Usage: takeoff ALTITUDE_IN_METERS\"", ")", "return", "if", "(", "len", "(", "args", ")", "==", "1", ")", ":", "altitude", "=", "float", "(", "args", "[", "0", "]", ")", "print", "(", "\"Take Off started\"", ")", "self", ".", "master", ".", "mav", ".", "command_long_send", "(", "self", ".", "settings", ".", "target_system", ",", "# target_system", "mavutil", ".", "mavlink", ".", "MAV_COMP_ID_SYSTEM_CONTROL", ",", "# target_component", "mavutil", ".", "mavlink", ".", "MAV_CMD_NAV_TAKEOFF", ",", "# command", "0", ",", "# confirmation", "0", ",", "# param1", "0", ",", "# param2", "0", ",", "# param3", "0", ",", "# param4", "0", ",", "# param5", "0", ",", "# param6", "altitude", ")" ]
take off
[ "take", "off" ]
python
train
34.285714
Shinichi-Nakagawa/pitchpx
pitchpx/game/inning.py
https://github.com/Shinichi-Nakagawa/pitchpx/blob/5747402a0b3416f5e910b479e100df858f0b6440/pitchpx/game/inning.py#L16-L27
def is_pa_terminal(cls, ball_tally, strike_tally, pitch_res, event_cd): """ Is PA terminal :param ball_tally: Ball telly :param strike_tally: Strike telly :param pitch_res: pitching result(Retrosheet format) :param event_cd: Event code :return: FLG(T or F) """ if RetroSheet.is_pa_terminal(ball_tally, strike_tally, pitch_res, event_cd): return MlbamConst.FLG_TRUE return MlbamConst.FLG_FALSE
[ "def", "is_pa_terminal", "(", "cls", ",", "ball_tally", ",", "strike_tally", ",", "pitch_res", ",", "event_cd", ")", ":", "if", "RetroSheet", ".", "is_pa_terminal", "(", "ball_tally", ",", "strike_tally", ",", "pitch_res", ",", "event_cd", ")", ":", "return", "MlbamConst", ".", "FLG_TRUE", "return", "MlbamConst", ".", "FLG_FALSE" ]
Is PA terminal :param ball_tally: Ball telly :param strike_tally: Strike telly :param pitch_res: pitching result(Retrosheet format) :param event_cd: Event code :return: FLG(T or F)
[ "Is", "PA", "terminal", ":", "param", "ball_tally", ":", "Ball", "telly", ":", "param", "strike_tally", ":", "Strike", "telly", ":", "param", "pitch_res", ":", "pitching", "result", "(", "Retrosheet", "format", ")", ":", "param", "event_cd", ":", "Event", "code", ":", "return", ":", "FLG", "(", "T", "or", "F", ")" ]
python
train
39.416667
aeguana/PyFileMaker
PyFileMaker/FMServer.py
https://github.com/aeguana/PyFileMaker/blob/ef269b52a97e329d91da3c4851ddac800d7fd7e6/PyFileMaker/FMServer.py#L536-L562
def doDup(self, WHAT={}, **params): """This function will perform the command -dup.""" if hasattr(WHAT, '_modified'): for key, value in WHAT._modified(): if WHAT.__new2old__.has_key(key): self._addDBParam(WHAT.__new2old__[key].encode('utf-8'), value) else: self._addDBParam(key, value) self._addDBParam('RECORDID', WHAT.RECORDID) self._addDBParam('MODID', WHAT.MODID) elif type(WHAT) == dict: for key in WHAT: self._addDBParam(key, WHAT[key]) else: raise FMError, 'Python Runtime: Object type (%s) given to function doDup as argument WHAT cannot be used.' % type(WHAT) if self._layout == '': raise FMError, 'No layout was selected' for key in params: self._addDBParam(key, params[key]) if self._checkRecordID() == 0: raise FMError, 'RecordID is missing' return self._doAction('-dup')
[ "def", "doDup", "(", "self", ",", "WHAT", "=", "{", "}", ",", "*", "*", "params", ")", ":", "if", "hasattr", "(", "WHAT", ",", "'_modified'", ")", ":", "for", "key", ",", "value", "in", "WHAT", ".", "_modified", "(", ")", ":", "if", "WHAT", ".", "__new2old__", ".", "has_key", "(", "key", ")", ":", "self", ".", "_addDBParam", "(", "WHAT", ".", "__new2old__", "[", "key", "]", ".", "encode", "(", "'utf-8'", ")", ",", "value", ")", "else", ":", "self", ".", "_addDBParam", "(", "key", ",", "value", ")", "self", ".", "_addDBParam", "(", "'RECORDID'", ",", "WHAT", ".", "RECORDID", ")", "self", ".", "_addDBParam", "(", "'MODID'", ",", "WHAT", ".", "MODID", ")", "elif", "type", "(", "WHAT", ")", "==", "dict", ":", "for", "key", "in", "WHAT", ":", "self", ".", "_addDBParam", "(", "key", ",", "WHAT", "[", "key", "]", ")", "else", ":", "raise", "FMError", ",", "'Python Runtime: Object type (%s) given to function doDup as argument WHAT cannot be used.'", "%", "type", "(", "WHAT", ")", "if", "self", ".", "_layout", "==", "''", ":", "raise", "FMError", ",", "'No layout was selected'", "for", "key", "in", "params", ":", "self", ".", "_addDBParam", "(", "key", ",", "params", "[", "key", "]", ")", "if", "self", ".", "_checkRecordID", "(", ")", "==", "0", ":", "raise", "FMError", ",", "'RecordID is missing'", "return", "self", ".", "_doAction", "(", "'-dup'", ")" ]
This function will perform the command -dup.
[ "This", "function", "will", "perform", "the", "command", "-", "dup", "." ]
python
train
30.518519
PyGithub/PyGithub
github/Repository.py
https://github.com/PyGithub/PyGithub/blob/f716df86bbe7dc276c6596699fa9712b61ef974c/github/Repository.py#L2283-L2306
def get_pulls_review_comments(self, sort=github.GithubObject.NotSet, direction=github.GithubObject.NotSet, since=github.GithubObject.NotSet): """ :calls: `GET /repos/:owner/:repo/pulls/comments <http://developer.github.com/v3/pulls/comments>`_ :param sort: string :param direction: string :param since: datetime.datetime :rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.PullRequestComment.PullRequestComment` """ assert sort is github.GithubObject.NotSet or isinstance(sort, (str, unicode)), sort assert direction is github.GithubObject.NotSet or isinstance(direction, (str, unicode)), direction assert since is github.GithubObject.NotSet or isinstance(since, datetime.datetime), since url_parameters = dict() if sort is not github.GithubObject.NotSet: url_parameters["sort"] = sort if direction is not github.GithubObject.NotSet: url_parameters["direction"] = direction if since is not github.GithubObject.NotSet: url_parameters["since"] = since.strftime("%Y-%m-%dT%H:%M:%SZ") return github.PaginatedList.PaginatedList( github.IssueComment.IssueComment, self._requester, self.url + "/pulls/comments", url_parameters )
[ "def", "get_pulls_review_comments", "(", "self", ",", "sort", "=", "github", ".", "GithubObject", ".", "NotSet", ",", "direction", "=", "github", ".", "GithubObject", ".", "NotSet", ",", "since", "=", "github", ".", "GithubObject", ".", "NotSet", ")", ":", "assert", "sort", "is", "github", ".", "GithubObject", ".", "NotSet", "or", "isinstance", "(", "sort", ",", "(", "str", ",", "unicode", ")", ")", ",", "sort", "assert", "direction", "is", "github", ".", "GithubObject", ".", "NotSet", "or", "isinstance", "(", "direction", ",", "(", "str", ",", "unicode", ")", ")", ",", "direction", "assert", "since", "is", "github", ".", "GithubObject", ".", "NotSet", "or", "isinstance", "(", "since", ",", "datetime", ".", "datetime", ")", ",", "since", "url_parameters", "=", "dict", "(", ")", "if", "sort", "is", "not", "github", ".", "GithubObject", ".", "NotSet", ":", "url_parameters", "[", "\"sort\"", "]", "=", "sort", "if", "direction", "is", "not", "github", ".", "GithubObject", ".", "NotSet", ":", "url_parameters", "[", "\"direction\"", "]", "=", "direction", "if", "since", "is", "not", "github", ".", "GithubObject", ".", "NotSet", ":", "url_parameters", "[", "\"since\"", "]", "=", "since", ".", "strftime", "(", "\"%Y-%m-%dT%H:%M:%SZ\"", ")", "return", "github", ".", "PaginatedList", ".", "PaginatedList", "(", "github", ".", "IssueComment", ".", "IssueComment", ",", "self", ".", "_requester", ",", "self", ".", "url", "+", "\"/pulls/comments\"", ",", "url_parameters", ")" ]
:calls: `GET /repos/:owner/:repo/pulls/comments <http://developer.github.com/v3/pulls/comments>`_ :param sort: string :param direction: string :param since: datetime.datetime :rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.PullRequestComment.PullRequestComment`
[ ":", "calls", ":", "GET", "/", "repos", "/", ":", "owner", "/", ":", "repo", "/", "pulls", "/", "comments", "<http", ":", "//", "developer", ".", "github", ".", "com", "/", "v3", "/", "pulls", "/", "comments", ">", "_", ":", "param", "sort", ":", "string", ":", "param", "direction", ":", "string", ":", "param", "since", ":", "datetime", ".", "datetime", ":", "rtype", ":", ":", "class", ":", "github", ".", "PaginatedList", ".", "PaginatedList", "of", ":", "class", ":", "github", ".", "PullRequestComment", ".", "PullRequestComment" ]
python
train
55.333333
bhmm/bhmm
bhmm/output_models/gaussian.py
https://github.com/bhmm/bhmm/blob/9804d18c2ddb684fb4d90b544cc209617a89ca9a/bhmm/output_models/gaussian.py#L132-L168
def _p_o(self, o): """ Returns the output probability for symbol o from all hidden states Parameters ---------- o : float A single observation. Return ------ p_o : ndarray (N) p_o[i] is the probability density of the observation o from state i emission distribution Examples -------- Create an observation model. >>> output_model = GaussianOutputModel(nstates=3, means=[-1, 0, 1], sigmas=[0.5, 1, 2]) Compute the output probability of a single observation from all hidden states. >>> observation = 0 >>> p_o = output_model._p_o(observation) """ if self.__impl__ == self.__IMPL_C__: return gc.p_o(o, self.means, self.sigmas, out=None, dtype=type(o)) elif self.__impl__ == self.__IMPL_PYTHON__: if np.any(self.sigmas < np.finfo(self.sigmas.dtype).eps): raise RuntimeError('at least one sigma is too small to continue.') C = 1.0 / (np.sqrt(2.0 * np.pi) * self.sigmas) Pobs = C * np.exp(-0.5 * ((o-self.means)/self.sigmas)**2) return Pobs else: raise RuntimeError('Implementation '+str(self.__impl__)+' not available')
[ "def", "_p_o", "(", "self", ",", "o", ")", ":", "if", "self", ".", "__impl__", "==", "self", ".", "__IMPL_C__", ":", "return", "gc", ".", "p_o", "(", "o", ",", "self", ".", "means", ",", "self", ".", "sigmas", ",", "out", "=", "None", ",", "dtype", "=", "type", "(", "o", ")", ")", "elif", "self", ".", "__impl__", "==", "self", ".", "__IMPL_PYTHON__", ":", "if", "np", ".", "any", "(", "self", ".", "sigmas", "<", "np", ".", "finfo", "(", "self", ".", "sigmas", ".", "dtype", ")", ".", "eps", ")", ":", "raise", "RuntimeError", "(", "'at least one sigma is too small to continue.'", ")", "C", "=", "1.0", "/", "(", "np", ".", "sqrt", "(", "2.0", "*", "np", ".", "pi", ")", "*", "self", ".", "sigmas", ")", "Pobs", "=", "C", "*", "np", ".", "exp", "(", "-", "0.5", "*", "(", "(", "o", "-", "self", ".", "means", ")", "/", "self", ".", "sigmas", ")", "**", "2", ")", "return", "Pobs", "else", ":", "raise", "RuntimeError", "(", "'Implementation '", "+", "str", "(", "self", ".", "__impl__", ")", "+", "' not available'", ")" ]
Returns the output probability for symbol o from all hidden states Parameters ---------- o : float A single observation. Return ------ p_o : ndarray (N) p_o[i] is the probability density of the observation o from state i emission distribution Examples -------- Create an observation model. >>> output_model = GaussianOutputModel(nstates=3, means=[-1, 0, 1], sigmas=[0.5, 1, 2]) Compute the output probability of a single observation from all hidden states. >>> observation = 0 >>> p_o = output_model._p_o(observation)
[ "Returns", "the", "output", "probability", "for", "symbol", "o", "from", "all", "hidden", "states" ]
python
train
33.783784
Jarn/jarn.viewdoc
jarn/viewdoc/viewdoc.py
https://github.com/Jarn/jarn.viewdoc/blob/59ae82fd1658889c41096c1d8c08dcb1047dc349/jarn/viewdoc/viewdoc.py#L389-L395
def upgrade(self): """Upgrade the config file. """ warn('Upgrading ' + self.filename) if self.backup_config(self.filename): return self.write_default_config(self.filename) return False
[ "def", "upgrade", "(", "self", ")", ":", "warn", "(", "'Upgrading '", "+", "self", ".", "filename", ")", "if", "self", ".", "backup_config", "(", "self", ".", "filename", ")", ":", "return", "self", ".", "write_default_config", "(", "self", ".", "filename", ")", "return", "False" ]
Upgrade the config file.
[ "Upgrade", "the", "config", "file", "." ]
python
train
32.857143
vertexproject/synapse
synapse/lib/agenda.py
https://github.com/vertexproject/synapse/blob/22e67c5a8f6d7caddbcf34b39ab1bd2d6c4a6e0b/synapse/lib/agenda.py#L590-L599
async def _execute(self, appt): ''' Fire off the task to make the storm query ''' user = self.core.auth.user(appt.useriden) if user is None: logger.warning('Unknown user %s in stored appointment', appt.useriden) await self._markfailed(appt) return await self.core.boss.execute(self._runJob(user, appt), f'Agenda {appt.iden}', user)
[ "async", "def", "_execute", "(", "self", ",", "appt", ")", ":", "user", "=", "self", ".", "core", ".", "auth", ".", "user", "(", "appt", ".", "useriden", ")", "if", "user", "is", "None", ":", "logger", ".", "warning", "(", "'Unknown user %s in stored appointment'", ",", "appt", ".", "useriden", ")", "await", "self", ".", "_markfailed", "(", "appt", ")", "return", "await", "self", ".", "core", ".", "boss", ".", "execute", "(", "self", ".", "_runJob", "(", "user", ",", "appt", ")", ",", "f'Agenda {appt.iden}'", ",", "user", ")" ]
Fire off the task to make the storm query
[ "Fire", "off", "the", "task", "to", "make", "the", "storm", "query" ]
python
train
40.6
ihgazni2/elist
elist/elist.py
https://github.com/ihgazni2/elist/blob/8c07b5029bda34ead60ce10335ceb145f209263c/elist/elist.py#L2570-L2595
def shift(ol,**kwargs): ''' from elist.jprint import pobj from elist.elist import * ol = [1,2,3,4] id(ol) rslt = shift(ol) pobj(rslt) ol id(ol) id(rslt['list']) #### ol = [1,2,3,4] id(ol) rslt = shift(ol,mode="original") rslt ol id(ol) ''' if('mode' in kwargs): mode = kwargs['mode'] else: mode = "new" length = ol.__len__() rslt = pop(ol,0,mode=mode) return(rslt)
[ "def", "shift", "(", "ol", ",", "*", "*", "kwargs", ")", ":", "if", "(", "'mode'", "in", "kwargs", ")", ":", "mode", "=", "kwargs", "[", "'mode'", "]", "else", ":", "mode", "=", "\"new\"", "length", "=", "ol", ".", "__len__", "(", ")", "rslt", "=", "pop", "(", "ol", ",", "0", ",", "mode", "=", "mode", ")", "return", "(", "rslt", ")" ]
from elist.jprint import pobj from elist.elist import * ol = [1,2,3,4] id(ol) rslt = shift(ol) pobj(rslt) ol id(ol) id(rslt['list']) #### ol = [1,2,3,4] id(ol) rslt = shift(ol,mode="original") rslt ol id(ol)
[ "from", "elist", ".", "jprint", "import", "pobj", "from", "elist", ".", "elist", "import", "*", "ol", "=", "[", "1", "2", "3", "4", "]", "id", "(", "ol", ")", "rslt", "=", "shift", "(", "ol", ")", "pobj", "(", "rslt", ")", "ol", "id", "(", "ol", ")", "id", "(", "rslt", "[", "list", "]", ")", "####", "ol", "=", "[", "1", "2", "3", "4", "]", "id", "(", "ol", ")", "rslt", "=", "shift", "(", "ol", "mode", "=", "original", ")", "rslt", "ol", "id", "(", "ol", ")" ]
python
valid
19.653846
avanwyk/cipy
cipy/algorithms/pso/functions.py
https://github.com/avanwyk/cipy/blob/98450dd01767b3615c113e50dc396f135e177b29/cipy/algorithms/pso/functions.py#L40-L71
def std_velocity(particle, social, state): """ Standard particle velocity update according to the equation: :math:`v_{ij}(t+1) &= \\omega v_{ij}(t) + \ c_1 r_{1j}(t)[y_{ij}(t) - x_{ij}(t)]\\:+ \ c_2 r_{2j}(t)[\\hat{y}_{ij}(t) - x_{ij}(t)],\\;\\;\ \\forall\\; j \\in\\; \\{1,...,n\\}` If a v_max parameter is supplied (state.params['v_max'] is not None) the returned velocity is clamped to v_max. Args: particle (cipy.algorithms.pso.types.Particle): Particle to update the velocity for. social (numpy.array): The social best for the particle. state (cipy.algorithms.pso.types.State): The PSO algorithm state. Returns: numpy.array: The calculated velocity, clamped to state.params['v_max']. """ inertia = state.params['inertia'] c_1, c_2 = state.params['c_1'], state.params['c_2'] v_max = state.params['v_max'] size = particle.position.size c1r1 = __acceleration__(state.rng, c_1, size) c2r2 = __acceleration__(state.rng, c_2, size) velocity = __std_velocity_equation__(inertia, c1r1, c2r2, particle, social) return __clamp__(velocity, v_max)
[ "def", "std_velocity", "(", "particle", ",", "social", ",", "state", ")", ":", "inertia", "=", "state", ".", "params", "[", "'inertia'", "]", "c_1", ",", "c_2", "=", "state", ".", "params", "[", "'c_1'", "]", ",", "state", ".", "params", "[", "'c_2'", "]", "v_max", "=", "state", ".", "params", "[", "'v_max'", "]", "size", "=", "particle", ".", "position", ".", "size", "c1r1", "=", "__acceleration__", "(", "state", ".", "rng", ",", "c_1", ",", "size", ")", "c2r2", "=", "__acceleration__", "(", "state", ".", "rng", ",", "c_2", ",", "size", ")", "velocity", "=", "__std_velocity_equation__", "(", "inertia", ",", "c1r1", ",", "c2r2", ",", "particle", ",", "social", ")", "return", "__clamp__", "(", "velocity", ",", "v_max", ")" ]
Standard particle velocity update according to the equation: :math:`v_{ij}(t+1) &= \\omega v_{ij}(t) + \ c_1 r_{1j}(t)[y_{ij}(t) - x_{ij}(t)]\\:+ \ c_2 r_{2j}(t)[\\hat{y}_{ij}(t) - x_{ij}(t)],\\;\\;\ \\forall\\; j \\in\\; \\{1,...,n\\}` If a v_max parameter is supplied (state.params['v_max'] is not None) the returned velocity is clamped to v_max. Args: particle (cipy.algorithms.pso.types.Particle): Particle to update the velocity for. social (numpy.array): The social best for the particle. state (cipy.algorithms.pso.types.State): The PSO algorithm state. Returns: numpy.array: The calculated velocity, clamped to state.params['v_max'].
[ "Standard", "particle", "velocity", "update", "according", "to", "the", "equation", ":" ]
python
train
35.84375
GeorgeArgyros/symautomata
symautomata/pdadiff.py
https://github.com/GeorgeArgyros/symautomata/blob/f5d66533573b27e155bec3f36b8c00b8e3937cb3/symautomata/pdadiff.py#L166-L173
def diff(self): """The Difference between a PDA and a DFA""" self.mmb.complement(self.alphabet) self.mmb.minimize() print 'start intersection' self.mmc = self._intesect() print 'end intersection' return self.mmc
[ "def", "diff", "(", "self", ")", ":", "self", ".", "mmb", ".", "complement", "(", "self", ".", "alphabet", ")", "self", ".", "mmb", ".", "minimize", "(", ")", "print", "'start intersection'", "self", ".", "mmc", "=", "self", ".", "_intesect", "(", ")", "print", "'end intersection'", "return", "self", ".", "mmc" ]
The Difference between a PDA and a DFA
[ "The", "Difference", "between", "a", "PDA", "and", "a", "DFA" ]
python
train
32.5
hotdoc/hotdoc
hotdoc/core/extension.py
https://github.com/hotdoc/hotdoc/blob/1067cdc8482b585b364a38fb52ca5d904e486280/hotdoc/core/extension.py#L670-L684
def write_out_sitemap(self, opath): """ Banana banana """ if opath not in self.written_out_sitemaps: Extension.formatted_sitemap = self.formatter.format_navigation( self.app.project) if Extension.formatted_sitemap: escaped_sitemap = Extension.formatted_sitemap.replace( '\\', '\\\\').replace('"', '\\"').replace('\n', '') js_wrapper = 'sitemap_downloaded_cb("%s");' % escaped_sitemap with open(opath, 'w') as _: _.write(js_wrapper) self.written_out_sitemaps.add(opath)
[ "def", "write_out_sitemap", "(", "self", ",", "opath", ")", ":", "if", "opath", "not", "in", "self", ".", "written_out_sitemaps", ":", "Extension", ".", "formatted_sitemap", "=", "self", ".", "formatter", ".", "format_navigation", "(", "self", ".", "app", ".", "project", ")", "if", "Extension", ".", "formatted_sitemap", ":", "escaped_sitemap", "=", "Extension", ".", "formatted_sitemap", ".", "replace", "(", "'\\\\'", ",", "'\\\\\\\\'", ")", ".", "replace", "(", "'\"'", ",", "'\\\\\"'", ")", ".", "replace", "(", "'\\n'", ",", "''", ")", "js_wrapper", "=", "'sitemap_downloaded_cb(\"%s\");'", "%", "escaped_sitemap", "with", "open", "(", "opath", ",", "'w'", ")", "as", "_", ":", "_", ".", "write", "(", "js_wrapper", ")", "self", ".", "written_out_sitemaps", ".", "add", "(", "opath", ")" ]
Banana banana
[ "Banana", "banana" ]
python
train
41.533333
scrapinghub/dateparser
dateparser/__init__.py
https://github.com/scrapinghub/dateparser/blob/11a761c99d3ee522a3c63756b70c106a579e8b5c/dateparser/__init__.py#L11-L56
def parse(date_string, date_formats=None, languages=None, locales=None, region=None, settings=None): """Parse date and time from given date string. :param date_string: A string representing date and/or time in a recognizably valid format. :type date_string: str|unicode :param date_formats: A list of format strings using directives as given `here <https://docs.python.org/2/library/datetime.html#strftime-and-strptime-behavior>`_. The parser applies formats one by one, taking into account the detected languages/locales. :type date_formats: list :param languages: A list of language codes, e.g. ['en', 'es', 'zh-Hant']. If locales are not given, languages and region are used to construct locales for translation. :type languages: list :param locales: A list of locale codes, e.g. ['fr-PF', 'qu-EC', 'af-NA']. The parser uses locales to translate date string. :type locales: list :param region: A region code, e.g. 'IN', '001', 'NE'. If locales are not given, languages and region are used to construct locales for translation. :type region: str|unicode :param settings: Configure customized behavior using settings defined in :mod:`dateparser.conf.Settings`. :type settings: dict :return: Returns :class:`datetime <datetime.datetime>` representing parsed date if successful, else returns None :rtype: :class:`datetime <datetime.datetime>`. :raises: ValueError - Unknown Language """ parser = _default_parser if any([languages, locales, region, not settings._default]): parser = DateDataParser(languages=languages, locales=locales, region=region, settings=settings) data = parser.get_date_data(date_string, date_formats) if data: return data['date_obj']
[ "def", "parse", "(", "date_string", ",", "date_formats", "=", "None", ",", "languages", "=", "None", ",", "locales", "=", "None", ",", "region", "=", "None", ",", "settings", "=", "None", ")", ":", "parser", "=", "_default_parser", "if", "any", "(", "[", "languages", ",", "locales", ",", "region", ",", "not", "settings", ".", "_default", "]", ")", ":", "parser", "=", "DateDataParser", "(", "languages", "=", "languages", ",", "locales", "=", "locales", ",", "region", "=", "region", ",", "settings", "=", "settings", ")", "data", "=", "parser", ".", "get_date_data", "(", "date_string", ",", "date_formats", ")", "if", "data", ":", "return", "data", "[", "'date_obj'", "]" ]
Parse date and time from given date string. :param date_string: A string representing date and/or time in a recognizably valid format. :type date_string: str|unicode :param date_formats: A list of format strings using directives as given `here <https://docs.python.org/2/library/datetime.html#strftime-and-strptime-behavior>`_. The parser applies formats one by one, taking into account the detected languages/locales. :type date_formats: list :param languages: A list of language codes, e.g. ['en', 'es', 'zh-Hant']. If locales are not given, languages and region are used to construct locales for translation. :type languages: list :param locales: A list of locale codes, e.g. ['fr-PF', 'qu-EC', 'af-NA']. The parser uses locales to translate date string. :type locales: list :param region: A region code, e.g. 'IN', '001', 'NE'. If locales are not given, languages and region are used to construct locales for translation. :type region: str|unicode :param settings: Configure customized behavior using settings defined in :mod:`dateparser.conf.Settings`. :type settings: dict :return: Returns :class:`datetime <datetime.datetime>` representing parsed date if successful, else returns None :rtype: :class:`datetime <datetime.datetime>`. :raises: ValueError - Unknown Language
[ "Parse", "date", "and", "time", "from", "given", "date", "string", "." ]
python
test
40
ladybug-tools/ladybug
ladybug/dt.py
https://github.com/ladybug-tools/ladybug/blob/c08b7308077a48d5612f644943f92d5b5dade583/ladybug/dt.py#L159-L165
def _calculate_hour_and_minute(float_hour): """Calculate hour and minutes as integers from a float hour.""" hour, minute = int(float_hour), int(round((float_hour - int(float_hour)) * 60)) if minute == 60: return hour + 1, 0 else: return hour, minute
[ "def", "_calculate_hour_and_minute", "(", "float_hour", ")", ":", "hour", ",", "minute", "=", "int", "(", "float_hour", ")", ",", "int", "(", "round", "(", "(", "float_hour", "-", "int", "(", "float_hour", ")", ")", "*", "60", ")", ")", "if", "minute", "==", "60", ":", "return", "hour", "+", "1", ",", "0", "else", ":", "return", "hour", ",", "minute" ]
Calculate hour and minutes as integers from a float hour.
[ "Calculate", "hour", "and", "minutes", "as", "integers", "from", "a", "float", "hour", "." ]
python
train
42.714286
holgern/pyedflib
pyedflib/edfwriter.py
https://github.com/holgern/pyedflib/blob/0f787fc1202b84a6f30d098296acf72666eaeeb4/pyedflib/edfwriter.py#L193-L211
def setSignalHeader(self, edfsignal, channel_info): """ Sets the parameter for signal edfsignal. channel_info should be a dict with these values: 'label' : channel label (string, <= 16 characters, must be unique) 'dimension' : physical dimension (e.g., mV) (string, <= 8 characters) 'sample_rate' : sample frequency in hertz (int) 'physical_max' : maximum physical value (float) 'physical_min' : minimum physical value (float) 'digital_max' : maximum digital value (int, -2**15 <= x < 2**15) 'digital_min' : minimum digital value (int, -2**15 <= x < 2**15) """ if edfsignal < 0 or edfsignal > self.n_channels: raise ChannelDoesNotExist(edfsignal) self.channels[edfsignal] = channel_info self.update_header()
[ "def", "setSignalHeader", "(", "self", ",", "edfsignal", ",", "channel_info", ")", ":", "if", "edfsignal", "<", "0", "or", "edfsignal", ">", "self", ".", "n_channels", ":", "raise", "ChannelDoesNotExist", "(", "edfsignal", ")", "self", ".", "channels", "[", "edfsignal", "]", "=", "channel_info", "self", ".", "update_header", "(", ")" ]
Sets the parameter for signal edfsignal. channel_info should be a dict with these values: 'label' : channel label (string, <= 16 characters, must be unique) 'dimension' : physical dimension (e.g., mV) (string, <= 8 characters) 'sample_rate' : sample frequency in hertz (int) 'physical_max' : maximum physical value (float) 'physical_min' : minimum physical value (float) 'digital_max' : maximum digital value (int, -2**15 <= x < 2**15) 'digital_min' : minimum digital value (int, -2**15 <= x < 2**15)
[ "Sets", "the", "parameter", "for", "signal", "edfsignal", "." ]
python
train
44.789474
Capitains/MyCapytain
MyCapytain/resources/texts/remote/cts.py
https://github.com/Capitains/MyCapytain/blob/b11bbf6b6ae141fc02be70471e3fbf6907be6593/MyCapytain/resources/texts/remote/cts.py#L368-L379
def export(self, output=Mimetypes.PLAINTEXT, exclude=None, **kwargs): """ Export the collection item in the Mimetype required. ..note:: If current implementation does not have special mimetypes, reuses default_export method :param output: Mimetype to export to (Uses Mimetypes) :type output: str :param exclude: Informations to exclude. Specific to implementations :type exclude: [str] :return: Object using a different representation """ return self.getTextualNode().export(output, exclude)
[ "def", "export", "(", "self", ",", "output", "=", "Mimetypes", ".", "PLAINTEXT", ",", "exclude", "=", "None", ",", "*", "*", "kwargs", ")", ":", "return", "self", ".", "getTextualNode", "(", ")", ".", "export", "(", "output", ",", "exclude", ")" ]
Export the collection item in the Mimetype required. ..note:: If current implementation does not have special mimetypes, reuses default_export method :param output: Mimetype to export to (Uses Mimetypes) :type output: str :param exclude: Informations to exclude. Specific to implementations :type exclude: [str] :return: Object using a different representation
[ "Export", "the", "collection", "item", "in", "the", "Mimetype", "required", "." ]
python
train
46.166667
gem/oq-engine
openquake/hazardlib/gsim/atkinson_boore_2003.py
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/hazardlib/gsim/atkinson_boore_2003.py#L415-L424
def _compute_soil_amplification(cls, C, vs30, pga_rock, imt): """ Compute soil amplification (5th, 6th, and 7th terms in equation 1, page 1706) and add the B/C site condition as implemented by NSHMP. Call :meth:`AtkinsonBoore2003SInterNSHMP2008._compute_soil_amplification` """ return AtkinsonBoore2003SInterNSHMP2008._compute_soil_amplification( C, vs30, pga_rock, imt)
[ "def", "_compute_soil_amplification", "(", "cls", ",", "C", ",", "vs30", ",", "pga_rock", ",", "imt", ")", ":", "return", "AtkinsonBoore2003SInterNSHMP2008", ".", "_compute_soil_amplification", "(", "C", ",", "vs30", ",", "pga_rock", ",", "imt", ")" ]
Compute soil amplification (5th, 6th, and 7th terms in equation 1, page 1706) and add the B/C site condition as implemented by NSHMP. Call :meth:`AtkinsonBoore2003SInterNSHMP2008._compute_soil_amplification`
[ "Compute", "soil", "amplification", "(", "5th", "6th", "and", "7th", "terms", "in", "equation", "1", "page", "1706", ")", "and", "add", "the", "B", "/", "C", "site", "condition", "as", "implemented", "by", "NSHMP", "." ]
python
train
43
cni/MRS
doc/tools/apigen.py
https://github.com/cni/MRS/blob/16098b3cf4830780efd787fee9efa46513850283/doc/tools/apigen.py#L249-L310
def generate_api_doc(self, uri): '''Make autodoc documentation template string for a module Parameters ---------- uri : string python location of module - e.g 'sphinx.builder' Returns ------- head : string Module name, table of contents. body : string Function and class docstrings. ''' # get the names of all classes and functions functions, classes = self._parse_module_with_import(uri) if not len(functions) and not len(classes) and DEBUG: print('WARNING: Empty -', uri) # dbg # Make a shorter version of the uri that omits the package name for # titles uri_short = re.sub(r'^%s\.' % self.package_name,'',uri) head = '.. AUTO-GENERATED FILE -- DO NOT EDIT!\n\n' body = '' # Set the chapter title to read 'module' for all modules except for the # main packages if '.' in uri_short: title = 'Module: :mod:`' + uri_short + '`' head += title + '\n' + self.rst_section_levels[2] * len(title) else: title = ':mod:`' + uri_short + '`' head += title + '\n' + self.rst_section_levels[1] * len(title) head += '\n.. automodule:: ' + uri + '\n' head += '\n.. currentmodule:: ' + uri + '\n' body += '\n.. currentmodule:: ' + uri + '\n' for c in classes: body += '\n:class:`' + c + '`\n' \ + self.rst_section_levels[3] * \ (len(c)+9) + '\n\n' body += '\n.. autoclass:: ' + c + '\n' # must NOT exclude from index to keep cross-refs working body += ' :members:\n' \ ' :undoc-members:\n' \ ' :show-inheritance:\n' \ '\n' \ ' .. automethod:: __init__\n\n' head += '.. autosummary::\n\n' for f in classes + functions: head += ' ' + f + '\n' head += '\n' for f in functions: # must NOT exclude from index to keep cross-refs working body += f + '\n' body += self.rst_section_levels[3] * len(f) + '\n' body += '\n.. autofunction:: ' + f + '\n\n' return head, body
[ "def", "generate_api_doc", "(", "self", ",", "uri", ")", ":", "# get the names of all classes and functions", "functions", ",", "classes", "=", "self", ".", "_parse_module_with_import", "(", "uri", ")", "if", "not", "len", "(", "functions", ")", "and", "not", "len", "(", "classes", ")", "and", "DEBUG", ":", "print", "(", "'WARNING: Empty -'", ",", "uri", ")", "# dbg", "# Make a shorter version of the uri that omits the package name for", "# titles", "uri_short", "=", "re", ".", "sub", "(", "r'^%s\\.'", "%", "self", ".", "package_name", ",", "''", ",", "uri", ")", "head", "=", "'.. AUTO-GENERATED FILE -- DO NOT EDIT!\\n\\n'", "body", "=", "''", "# Set the chapter title to read 'module' for all modules except for the", "# main packages", "if", "'.'", "in", "uri_short", ":", "title", "=", "'Module: :mod:`'", "+", "uri_short", "+", "'`'", "head", "+=", "title", "+", "'\\n'", "+", "self", ".", "rst_section_levels", "[", "2", "]", "*", "len", "(", "title", ")", "else", ":", "title", "=", "':mod:`'", "+", "uri_short", "+", "'`'", "head", "+=", "title", "+", "'\\n'", "+", "self", ".", "rst_section_levels", "[", "1", "]", "*", "len", "(", "title", ")", "head", "+=", "'\\n.. automodule:: '", "+", "uri", "+", "'\\n'", "head", "+=", "'\\n.. currentmodule:: '", "+", "uri", "+", "'\\n'", "body", "+=", "'\\n.. currentmodule:: '", "+", "uri", "+", "'\\n'", "for", "c", "in", "classes", ":", "body", "+=", "'\\n:class:`'", "+", "c", "+", "'`\\n'", "+", "self", ".", "rst_section_levels", "[", "3", "]", "*", "(", "len", "(", "c", ")", "+", "9", ")", "+", "'\\n\\n'", "body", "+=", "'\\n.. autoclass:: '", "+", "c", "+", "'\\n'", "# must NOT exclude from index to keep cross-refs working", "body", "+=", "' :members:\\n'", "' :undoc-members:\\n'", "' :show-inheritance:\\n'", "'\\n'", "' .. automethod:: __init__\\n\\n'", "head", "+=", "'.. autosummary::\\n\\n'", "for", "f", "in", "classes", "+", "functions", ":", "head", "+=", "' '", "+", "f", "+", "'\\n'", "head", "+=", "'\\n'", "for", "f", "in", "functions", ":", "# must NOT exclude from index to keep cross-refs working", "body", "+=", "f", "+", "'\\n'", "body", "+=", "self", ".", "rst_section_levels", "[", "3", "]", "*", "len", "(", "f", ")", "+", "'\\n'", "body", "+=", "'\\n.. autofunction:: '", "+", "f", "+", "'\\n\\n'", "return", "head", ",", "body" ]
Make autodoc documentation template string for a module Parameters ---------- uri : string python location of module - e.g 'sphinx.builder' Returns ------- head : string Module name, table of contents. body : string Function and class docstrings.
[ "Make", "autodoc", "documentation", "template", "string", "for", "a", "module" ]
python
train
36.290323
hvac/hvac
hvac/api/system_backend/seal.py
https://github.com/hvac/hvac/blob/cce5b86889193f622c2a72a4a1b7e1c9c8aff1ce/hvac/api/system_backend/seal.py#L15-L30
def read_seal_status(self): """Read the seal status of the Vault. This is an unauthenticated endpoint. Supported methods: GET: /sys/seal-status. Produces: 200 application/json :return: The JSON response of the request. :rtype: dict """ api_path = '/v1/sys/seal-status' response = self._adapter.get( url=api_path, ) return response.json()
[ "def", "read_seal_status", "(", "self", ")", ":", "api_path", "=", "'/v1/sys/seal-status'", "response", "=", "self", ".", "_adapter", ".", "get", "(", "url", "=", "api_path", ",", ")", "return", "response", ".", "json", "(", ")" ]
Read the seal status of the Vault. This is an unauthenticated endpoint. Supported methods: GET: /sys/seal-status. Produces: 200 application/json :return: The JSON response of the request. :rtype: dict
[ "Read", "the", "seal", "status", "of", "the", "Vault", "." ]
python
train
26.8125
mitsei/dlkit
dlkit/json_/repository/objects.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/repository/objects.py#L583-L604
def _init_metadata(self, **kwargs): """Initialize form metadata""" osid_objects.OsidSourceableForm._init_metadata(self) osid_objects.OsidObjectForm._init_metadata(self, **kwargs) self._copyright_registration_default = self._mdata['copyright_registration']['default_string_values'][0] update_display_text_defaults(self._mdata['copyright'], self._locale_map) self._copyright_default = dict(self._mdata['copyright']['default_string_values'][0]) update_display_text_defaults(self._mdata['title'], self._locale_map) self._title_default = dict(self._mdata['title']['default_string_values'][0]) self._distribute_verbatim_default = self._mdata['distribute_verbatim']['default_boolean_values'][0] self._created_date_default = self._mdata['created_date']['default_date_time_values'][0] self._distribute_alterations_default = self._mdata['distribute_alterations']['default_boolean_values'][0] update_display_text_defaults(self._mdata['principal_credit_string'], self._locale_map) self._principal_credit_string_default = dict(self._mdata['principal_credit_string']['default_string_values'][0]) self._published_date_default = self._mdata['published_date']['default_date_time_values'][0] self._source_default = self._mdata['source']['default_id_values'][0] self._provider_links_default = self._mdata['provider_links']['default_id_values'] self._public_domain_default = self._mdata['public_domain']['default_boolean_values'][0] self._distribute_compositions_default = self._mdata['distribute_compositions']['default_boolean_values'][0] self._composition_default = self._mdata['composition']['default_id_values'][0] self._published_default = self._mdata['published']['default_boolean_values'][0]
[ "def", "_init_metadata", "(", "self", ",", "*", "*", "kwargs", ")", ":", "osid_objects", ".", "OsidSourceableForm", ".", "_init_metadata", "(", "self", ")", "osid_objects", ".", "OsidObjectForm", ".", "_init_metadata", "(", "self", ",", "*", "*", "kwargs", ")", "self", ".", "_copyright_registration_default", "=", "self", ".", "_mdata", "[", "'copyright_registration'", "]", "[", "'default_string_values'", "]", "[", "0", "]", "update_display_text_defaults", "(", "self", ".", "_mdata", "[", "'copyright'", "]", ",", "self", ".", "_locale_map", ")", "self", ".", "_copyright_default", "=", "dict", "(", "self", ".", "_mdata", "[", "'copyright'", "]", "[", "'default_string_values'", "]", "[", "0", "]", ")", "update_display_text_defaults", "(", "self", ".", "_mdata", "[", "'title'", "]", ",", "self", ".", "_locale_map", ")", "self", ".", "_title_default", "=", "dict", "(", "self", ".", "_mdata", "[", "'title'", "]", "[", "'default_string_values'", "]", "[", "0", "]", ")", "self", ".", "_distribute_verbatim_default", "=", "self", ".", "_mdata", "[", "'distribute_verbatim'", "]", "[", "'default_boolean_values'", "]", "[", "0", "]", "self", ".", "_created_date_default", "=", "self", ".", "_mdata", "[", "'created_date'", "]", "[", "'default_date_time_values'", "]", "[", "0", "]", "self", ".", "_distribute_alterations_default", "=", "self", ".", "_mdata", "[", "'distribute_alterations'", "]", "[", "'default_boolean_values'", "]", "[", "0", "]", "update_display_text_defaults", "(", "self", ".", "_mdata", "[", "'principal_credit_string'", "]", ",", "self", ".", "_locale_map", ")", "self", ".", "_principal_credit_string_default", "=", "dict", "(", "self", ".", "_mdata", "[", "'principal_credit_string'", "]", "[", "'default_string_values'", "]", "[", "0", "]", ")", "self", ".", "_published_date_default", "=", "self", ".", "_mdata", "[", "'published_date'", "]", "[", "'default_date_time_values'", "]", "[", "0", "]", "self", ".", "_source_default", "=", "self", ".", "_mdata", "[", "'source'", "]", "[", "'default_id_values'", "]", "[", "0", "]", "self", ".", "_provider_links_default", "=", "self", ".", "_mdata", "[", "'provider_links'", "]", "[", "'default_id_values'", "]", "self", ".", "_public_domain_default", "=", "self", ".", "_mdata", "[", "'public_domain'", "]", "[", "'default_boolean_values'", "]", "[", "0", "]", "self", ".", "_distribute_compositions_default", "=", "self", ".", "_mdata", "[", "'distribute_compositions'", "]", "[", "'default_boolean_values'", "]", "[", "0", "]", "self", ".", "_composition_default", "=", "self", ".", "_mdata", "[", "'composition'", "]", "[", "'default_id_values'", "]", "[", "0", "]", "self", ".", "_published_default", "=", "self", ".", "_mdata", "[", "'published'", "]", "[", "'default_boolean_values'", "]", "[", "0", "]" ]
Initialize form metadata
[ "Initialize", "form", "metadata" ]
python
train
82.681818
rdussurget/py-altimetry
altimetry/data/alti_data.py
https://github.com/rdussurget/py-altimetry/blob/57ce7f2d63c6bbc4993821af0bbe46929e3a2d98/altimetry/data/alti_data.py#L443-L530
def read_slaext(self,filename,params=None,force=False,timerange=None,datatype=None,**kwargs): """ Read AVISO Along-Track SLAEXT regional products :return outStr: Output data structure containing all recorded parameters as specificied by NetCDF file PARAMETER list. :author: Renaud Dussurget """ self.message(2,'Reading SLAext data ({0})'.format(datatype)) self.message(2,'Loading %s' % (filename)) #Open file self._filename = filename try: self._ncfile = ncfile(self._filename, "r") except Exception,e: self.warning(1, repr(e)) return {} #Get delimiter if os.path.basename(filename).count('.') > os.path.basename(filename).count('_'): delim='.' else : delim = '_' #Gat sat name splitted=os.path.basename(filename).split(delim) if (datatype == 'DT') | (datatype == 'NRT') : sat_name = splitted[2] if splitted[0] == 'nrt' else splitted[3] if datatype == 'PISTACH' : sat_name = 'J2' #Get list of recorded parameters: par_list=[i.encode() for i in self._ncfile.variables.keys()] for i in ['time','longitude','latitude'] : par_list.pop(par_list.index(i)) nparam=len(par_list) self.message(2,'Recorded parameters : '+str(nparam)+' -> '+str(par_list)) lon = self.load_ncVar('longitude',**kwargs) lon['data'] = recale(lon['data'], degrees=True, zero_2pi=True) #shift longitudes lat = self.load_ncVar('latitude',**kwargs) #Extract within limits ind, flag = in_limits(lon['data'],lat['data'],limit=self.limit) dim_lon = lon['_dimensions'] lat = lat['data'].compress(flag) lon = lon['data'].compress(flag) dist=cumulative_distance(lat, lon) sz=np.shape(lon) ndims=np.size(sz) id=np.repeat(sat_name,sz) date = self.load_ncVar('time',time=ind,**kwargs) dimStr = date['_dimensions'] date=date['data'] outStr=varStr(dimensions=dimStr) outStr.update({'lon':lon}) outStr.update({'lat':lat}) outStr.update({'date':date}) outStr.update({'id':id}) #{'_dimensions':dimStr,'lon':lon,'lat':lat,'date':date} for param in par_list : dumVar = self.load_ncVar(param,time=ind,**kwargs) #Load variables dimStr=dumVar['_dimensions'] #update dimensions curDim = [str(dimname) for dimname in dimStr.keys()[1:]] #[str(dimname) for dimname in self._ncfile.variables['LONGITUDE'].dimensions] curDimval = [dimStr[dim] for dim in curDim] #[len(self._ncfile.dimensions[dimname]) for dimname in curDim] flag = [(np.array(dimname) == outStr['_dimensions'].keys()).sum() == 0 for dimname in curDim] #find dimensions to update dimUpdate = np.array(curDim).compress(flag) for enum in enumerate(dimUpdate) : self.message(3, 'Appending dimensions {0}:{1} to dataStructure'.format(enum[1],np.array(curDimval).compress(flag)[enum[0]])) outStr['_dimensions'].update({enum[1]:np.array(curDimval).compress(flag)[enum[0]]}) #Append new dimension if not isinstance(outStr['_dimensions'],dimStr) : outStr['_dimensions']['_ndims']+=1 #update dimension counts cmd = 'dumStr = {\''+param.lower()+'\':dumVar[\'data\']}' self.message(4, 'exec : '+cmd) exec(cmd) outStr.update(dumStr) self._ncfile.close() return outStr
[ "def", "read_slaext", "(", "self", ",", "filename", ",", "params", "=", "None", ",", "force", "=", "False", ",", "timerange", "=", "None", ",", "datatype", "=", "None", ",", "*", "*", "kwargs", ")", ":", "self", ".", "message", "(", "2", ",", "'Reading SLAext data ({0})'", ".", "format", "(", "datatype", ")", ")", "self", ".", "message", "(", "2", ",", "'Loading %s'", "%", "(", "filename", ")", ")", "#Open file\r", "self", ".", "_filename", "=", "filename", "try", ":", "self", ".", "_ncfile", "=", "ncfile", "(", "self", ".", "_filename", ",", "\"r\"", ")", "except", "Exception", ",", "e", ":", "self", ".", "warning", "(", "1", ",", "repr", "(", "e", ")", ")", "return", "{", "}", "#Get delimiter\r", "if", "os", ".", "path", ".", "basename", "(", "filename", ")", ".", "count", "(", "'.'", ")", ">", "os", ".", "path", ".", "basename", "(", "filename", ")", ".", "count", "(", "'_'", ")", ":", "delim", "=", "'.'", "else", ":", "delim", "=", "'_'", "#Gat sat name\r", "splitted", "=", "os", ".", "path", ".", "basename", "(", "filename", ")", ".", "split", "(", "delim", ")", "if", "(", "datatype", "==", "'DT'", ")", "|", "(", "datatype", "==", "'NRT'", ")", ":", "sat_name", "=", "splitted", "[", "2", "]", "if", "splitted", "[", "0", "]", "==", "'nrt'", "else", "splitted", "[", "3", "]", "if", "datatype", "==", "'PISTACH'", ":", "sat_name", "=", "'J2'", "#Get list of recorded parameters:\r", "par_list", "=", "[", "i", ".", "encode", "(", ")", "for", "i", "in", "self", ".", "_ncfile", ".", "variables", ".", "keys", "(", ")", "]", "for", "i", "in", "[", "'time'", ",", "'longitude'", ",", "'latitude'", "]", ":", "par_list", ".", "pop", "(", "par_list", ".", "index", "(", "i", ")", ")", "nparam", "=", "len", "(", "par_list", ")", "self", ".", "message", "(", "2", ",", "'Recorded parameters : '", "+", "str", "(", "nparam", ")", "+", "' -> '", "+", "str", "(", "par_list", ")", ")", "lon", "=", "self", ".", "load_ncVar", "(", "'longitude'", ",", "*", "*", "kwargs", ")", "lon", "[", "'data'", "]", "=", "recale", "(", "lon", "[", "'data'", "]", ",", "degrees", "=", "True", ",", "zero_2pi", "=", "True", ")", "#shift longitudes\r", "lat", "=", "self", ".", "load_ncVar", "(", "'latitude'", ",", "*", "*", "kwargs", ")", "#Extract within limits\r", "ind", ",", "flag", "=", "in_limits", "(", "lon", "[", "'data'", "]", ",", "lat", "[", "'data'", "]", ",", "limit", "=", "self", ".", "limit", ")", "dim_lon", "=", "lon", "[", "'_dimensions'", "]", "lat", "=", "lat", "[", "'data'", "]", ".", "compress", "(", "flag", ")", "lon", "=", "lon", "[", "'data'", "]", ".", "compress", "(", "flag", ")", "dist", "=", "cumulative_distance", "(", "lat", ",", "lon", ")", "sz", "=", "np", ".", "shape", "(", "lon", ")", "ndims", "=", "np", ".", "size", "(", "sz", ")", "id", "=", "np", ".", "repeat", "(", "sat_name", ",", "sz", ")", "date", "=", "self", ".", "load_ncVar", "(", "'time'", ",", "time", "=", "ind", ",", "*", "*", "kwargs", ")", "dimStr", "=", "date", "[", "'_dimensions'", "]", "date", "=", "date", "[", "'data'", "]", "outStr", "=", "varStr", "(", "dimensions", "=", "dimStr", ")", "outStr", ".", "update", "(", "{", "'lon'", ":", "lon", "}", ")", "outStr", ".", "update", "(", "{", "'lat'", ":", "lat", "}", ")", "outStr", ".", "update", "(", "{", "'date'", ":", "date", "}", ")", "outStr", ".", "update", "(", "{", "'id'", ":", "id", "}", ")", "#{'_dimensions':dimStr,'lon':lon,'lat':lat,'date':date}\r", "for", "param", "in", "par_list", ":", "dumVar", "=", "self", ".", "load_ncVar", "(", "param", ",", "time", "=", "ind", ",", "*", "*", "kwargs", ")", "#Load variables\r", "dimStr", "=", "dumVar", "[", "'_dimensions'", "]", "#update dimensions\r", "curDim", "=", "[", "str", "(", "dimname", ")", "for", "dimname", "in", "dimStr", ".", "keys", "(", ")", "[", "1", ":", "]", "]", "#[str(dimname) for dimname in self._ncfile.variables['LONGITUDE'].dimensions]\r", "curDimval", "=", "[", "dimStr", "[", "dim", "]", "for", "dim", "in", "curDim", "]", "#[len(self._ncfile.dimensions[dimname]) for dimname in curDim]\r", "flag", "=", "[", "(", "np", ".", "array", "(", "dimname", ")", "==", "outStr", "[", "'_dimensions'", "]", ".", "keys", "(", ")", ")", ".", "sum", "(", ")", "==", "0", "for", "dimname", "in", "curDim", "]", "#find dimensions to update\r", "dimUpdate", "=", "np", ".", "array", "(", "curDim", ")", ".", "compress", "(", "flag", ")", "for", "enum", "in", "enumerate", "(", "dimUpdate", ")", ":", "self", ".", "message", "(", "3", ",", "'Appending dimensions {0}:{1} to dataStructure'", ".", "format", "(", "enum", "[", "1", "]", ",", "np", ".", "array", "(", "curDimval", ")", ".", "compress", "(", "flag", ")", "[", "enum", "[", "0", "]", "]", ")", ")", "outStr", "[", "'_dimensions'", "]", ".", "update", "(", "{", "enum", "[", "1", "]", ":", "np", ".", "array", "(", "curDimval", ")", ".", "compress", "(", "flag", ")", "[", "enum", "[", "0", "]", "]", "}", ")", "#Append new dimension\r", "if", "not", "isinstance", "(", "outStr", "[", "'_dimensions'", "]", ",", "dimStr", ")", ":", "outStr", "[", "'_dimensions'", "]", "[", "'_ndims'", "]", "+=", "1", "#update dimension counts\r", "cmd", "=", "'dumStr = {\\''", "+", "param", ".", "lower", "(", ")", "+", "'\\':dumVar[\\'data\\']}'", "self", ".", "message", "(", "4", ",", "'exec : '", "+", "cmd", ")", "exec", "(", "cmd", ")", "outStr", ".", "update", "(", "dumStr", ")", "self", ".", "_ncfile", ".", "close", "(", ")", "return", "outStr" ]
Read AVISO Along-Track SLAEXT regional products :return outStr: Output data structure containing all recorded parameters as specificied by NetCDF file PARAMETER list. :author: Renaud Dussurget
[ "Read", "AVISO", "Along", "-", "Track", "SLAEXT", "regional", "products", ":", "return", "outStr", ":", "Output", "data", "structure", "containing", "all", "recorded", "parameters", "as", "specificied", "by", "NetCDF", "file", "PARAMETER", "list", ".", ":", "author", ":", "Renaud", "Dussurget" ]
python
train
42.829545
abseil/abseil-py
absl/flags/_flagvalues.py
https://github.com/abseil/abseil-py/blob/9d73fdaa23a6b6726aa5731390f388c0c6250ee5/absl/flags/_flagvalues.py#L1088-L1168
def read_flags_from_files(self, argv, force_gnu=True): """Processes command line args, but also allow args to be read from file. Args: argv: [str], a list of strings, usually sys.argv[1:], which may contain one or more flagfile directives of the form --flagfile="./filename". Note that the name of the program (sys.argv[0]) should be omitted. force_gnu: bool, if False, --flagfile parsing obeys the FLAGS.is_gnu_getopt() value. If True, ignore the value and always follow gnu_getopt semantics. Returns: A new list which has the original list combined with what we read from any flagfile(s). Raises: IllegalFlagValueError: Raised when --flagfile is provided with no argument. This function is called by FLAGS(argv). It scans the input list for a flag that looks like: --flagfile=<somefile>. Then it opens <somefile>, reads all valid key and value pairs and inserts them into the input list in exactly the place where the --flagfile arg is found. Note that your application's flags are still defined the usual way using absl.flags DEFINE_flag() type functions. Notes (assuming we're getting a commandline of some sort as our input): --> For duplicate flags, the last one we hit should "win". --> Since flags that appear later win, a flagfile's settings can be "weak" if the --flagfile comes at the beginning of the argument sequence, and it can be "strong" if the --flagfile comes at the end. --> A further "--flagfile=<otherfile.cfg>" CAN be nested in a flagfile. It will be expanded in exactly the spot where it is found. --> In a flagfile, a line beginning with # or // is a comment. --> Entirely blank lines _should_ be ignored. """ rest_of_args = argv new_argv = [] while rest_of_args: current_arg = rest_of_args[0] rest_of_args = rest_of_args[1:] if self._is_flag_file_directive(current_arg): # This handles the case of -(-)flagfile foo. In this case the # next arg really is part of this one. if current_arg == '--flagfile' or current_arg == '-flagfile': if not rest_of_args: raise _exceptions.IllegalFlagValueError( '--flagfile with no argument') flag_filename = os.path.expanduser(rest_of_args[0]) rest_of_args = rest_of_args[1:] else: # This handles the case of (-)-flagfile=foo. flag_filename = self._extract_filename(current_arg) new_argv.extend(self._get_flag_file_lines(flag_filename)) else: new_argv.append(current_arg) # Stop parsing after '--', like getopt and gnu_getopt. if current_arg == '--': break # Stop parsing after a non-flag, like getopt. if not current_arg.startswith('-'): if not force_gnu and not self.__dict__['__use_gnu_getopt']: break else: if ('=' not in current_arg and rest_of_args and not rest_of_args[0].startswith('-')): # If this is an occurrence of a legitimate --x y, skip the value # so that it won't be mistaken for a standalone arg. fl = self._flags() name = current_arg.lstrip('-') if name in fl and not fl[name].boolean: current_arg = rest_of_args[0] rest_of_args = rest_of_args[1:] new_argv.append(current_arg) if rest_of_args: new_argv.extend(rest_of_args) return new_argv
[ "def", "read_flags_from_files", "(", "self", ",", "argv", ",", "force_gnu", "=", "True", ")", ":", "rest_of_args", "=", "argv", "new_argv", "=", "[", "]", "while", "rest_of_args", ":", "current_arg", "=", "rest_of_args", "[", "0", "]", "rest_of_args", "=", "rest_of_args", "[", "1", ":", "]", "if", "self", ".", "_is_flag_file_directive", "(", "current_arg", ")", ":", "# This handles the case of -(-)flagfile foo. In this case the", "# next arg really is part of this one.", "if", "current_arg", "==", "'--flagfile'", "or", "current_arg", "==", "'-flagfile'", ":", "if", "not", "rest_of_args", ":", "raise", "_exceptions", ".", "IllegalFlagValueError", "(", "'--flagfile with no argument'", ")", "flag_filename", "=", "os", ".", "path", ".", "expanduser", "(", "rest_of_args", "[", "0", "]", ")", "rest_of_args", "=", "rest_of_args", "[", "1", ":", "]", "else", ":", "# This handles the case of (-)-flagfile=foo.", "flag_filename", "=", "self", ".", "_extract_filename", "(", "current_arg", ")", "new_argv", ".", "extend", "(", "self", ".", "_get_flag_file_lines", "(", "flag_filename", ")", ")", "else", ":", "new_argv", ".", "append", "(", "current_arg", ")", "# Stop parsing after '--', like getopt and gnu_getopt.", "if", "current_arg", "==", "'--'", ":", "break", "# Stop parsing after a non-flag, like getopt.", "if", "not", "current_arg", ".", "startswith", "(", "'-'", ")", ":", "if", "not", "force_gnu", "and", "not", "self", ".", "__dict__", "[", "'__use_gnu_getopt'", "]", ":", "break", "else", ":", "if", "(", "'='", "not", "in", "current_arg", "and", "rest_of_args", "and", "not", "rest_of_args", "[", "0", "]", ".", "startswith", "(", "'-'", ")", ")", ":", "# If this is an occurrence of a legitimate --x y, skip the value", "# so that it won't be mistaken for a standalone arg.", "fl", "=", "self", ".", "_flags", "(", ")", "name", "=", "current_arg", ".", "lstrip", "(", "'-'", ")", "if", "name", "in", "fl", "and", "not", "fl", "[", "name", "]", ".", "boolean", ":", "current_arg", "=", "rest_of_args", "[", "0", "]", "rest_of_args", "=", "rest_of_args", "[", "1", ":", "]", "new_argv", ".", "append", "(", "current_arg", ")", "if", "rest_of_args", ":", "new_argv", ".", "extend", "(", "rest_of_args", ")", "return", "new_argv" ]
Processes command line args, but also allow args to be read from file. Args: argv: [str], a list of strings, usually sys.argv[1:], which may contain one or more flagfile directives of the form --flagfile="./filename". Note that the name of the program (sys.argv[0]) should be omitted. force_gnu: bool, if False, --flagfile parsing obeys the FLAGS.is_gnu_getopt() value. If True, ignore the value and always follow gnu_getopt semantics. Returns: A new list which has the original list combined with what we read from any flagfile(s). Raises: IllegalFlagValueError: Raised when --flagfile is provided with no argument. This function is called by FLAGS(argv). It scans the input list for a flag that looks like: --flagfile=<somefile>. Then it opens <somefile>, reads all valid key and value pairs and inserts them into the input list in exactly the place where the --flagfile arg is found. Note that your application's flags are still defined the usual way using absl.flags DEFINE_flag() type functions. Notes (assuming we're getting a commandline of some sort as our input): --> For duplicate flags, the last one we hit should "win". --> Since flags that appear later win, a flagfile's settings can be "weak" if the --flagfile comes at the beginning of the argument sequence, and it can be "strong" if the --flagfile comes at the end. --> A further "--flagfile=<otherfile.cfg>" CAN be nested in a flagfile. It will be expanded in exactly the spot where it is found. --> In a flagfile, a line beginning with # or // is a comment. --> Entirely blank lines _should_ be ignored.
[ "Processes", "command", "line", "args", "but", "also", "allow", "args", "to", "be", "read", "from", "file", "." ]
python
train
43.185185
googleapis/google-cloud-python
datalabeling/google/cloud/datalabeling_v1beta1/gapic/data_labeling_service_client.py
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/datalabeling/google/cloud/datalabeling_v1beta1/gapic/data_labeling_service_client.py#L93-L99
def annotation_spec_set_path(cls, project, annotation_spec_set): """Return a fully-qualified annotation_spec_set string.""" return google.api_core.path_template.expand( "projects/{project}/annotationSpecSets/{annotation_spec_set}", project=project, annotation_spec_set=annotation_spec_set, )
[ "def", "annotation_spec_set_path", "(", "cls", ",", "project", ",", "annotation_spec_set", ")", ":", "return", "google", ".", "api_core", ".", "path_template", ".", "expand", "(", "\"projects/{project}/annotationSpecSets/{annotation_spec_set}\"", ",", "project", "=", "project", ",", "annotation_spec_set", "=", "annotation_spec_set", ",", ")" ]
Return a fully-qualified annotation_spec_set string.
[ "Return", "a", "fully", "-", "qualified", "annotation_spec_set", "string", "." ]
python
train
49.285714
django-userena-ce/django-userena-ce
userena/backends.py
https://github.com/django-userena-ce/django-userena-ce/blob/2d8b745eed25128134e961ca96c270802e730256/userena/backends.py#L12-L48
def authenticate(self, request, identification, password=None, check_password=True): """ Authenticates a user through the combination email/username with password. :param request: The authenticate() method of authentication backends requires request as the first positional argument from Django 2.1. :param identification: A string containing the username or e-mail of the user that is trying to authenticate. :password: Optional string containing the password for the user. :param check_password: Boolean that defines if the password should be checked for this user. Always keep this ``True``. This is only used by userena at activation when a user opens a page with a secret hash. :return: The signed in :class:`User`. """ User = get_user_model() try: django.core.validators.validate_email(identification) try: user = User.objects.get(email__iexact=identification) except User.DoesNotExist: return None except django.core.validators.ValidationError: try: user = User.objects.get(username__iexact=identification) except User.DoesNotExist: return None if check_password: if user.check_password(password): return user return None else: return user
[ "def", "authenticate", "(", "self", ",", "request", ",", "identification", ",", "password", "=", "None", ",", "check_password", "=", "True", ")", ":", "User", "=", "get_user_model", "(", ")", "try", ":", "django", ".", "core", ".", "validators", ".", "validate_email", "(", "identification", ")", "try", ":", "user", "=", "User", ".", "objects", ".", "get", "(", "email__iexact", "=", "identification", ")", "except", "User", ".", "DoesNotExist", ":", "return", "None", "except", "django", ".", "core", ".", "validators", ".", "ValidationError", ":", "try", ":", "user", "=", "User", ".", "objects", ".", "get", "(", "username__iexact", "=", "identification", ")", "except", "User", ".", "DoesNotExist", ":", "return", "None", "if", "check_password", ":", "if", "user", ".", "check_password", "(", "password", ")", ":", "return", "user", "return", "None", "else", ":", "return", "user" ]
Authenticates a user through the combination email/username with password. :param request: The authenticate() method of authentication backends requires request as the first positional argument from Django 2.1. :param identification: A string containing the username or e-mail of the user that is trying to authenticate. :password: Optional string containing the password for the user. :param check_password: Boolean that defines if the password should be checked for this user. Always keep this ``True``. This is only used by userena at activation when a user opens a page with a secret hash. :return: The signed in :class:`User`.
[ "Authenticates", "a", "user", "through", "the", "combination", "email", "/", "username", "with", "password", "." ]
python
train
38.513514
poppy-project/pypot
pypot/primitive/primitive.py
https://github.com/poppy-project/pypot/blob/d9c6551bbc87d45d9d1f0bc15e35b616d0002afd/pypot/primitive/primitive.py#L116-L124
def start(self): """ Start or restart (the :meth:`~pypot.primitive.primitive.Primitive.stop` method will automatically be called) the primitive. """ if not self.robot._primitive_manager.running: raise RuntimeError('Cannot run a primitive when the sync is stopped!') StoppableThread.start(self) self.wait_to_start() logger.info("Primitive %s started.", self)
[ "def", "start", "(", "self", ")", ":", "if", "not", "self", ".", "robot", ".", "_primitive_manager", ".", "running", ":", "raise", "RuntimeError", "(", "'Cannot run a primitive when the sync is stopped!'", ")", "StoppableThread", ".", "start", "(", "self", ")", "self", ".", "wait_to_start", "(", ")", "logger", ".", "info", "(", "\"Primitive %s started.\"", ",", "self", ")" ]
Start or restart (the :meth:`~pypot.primitive.primitive.Primitive.stop` method will automatically be called) the primitive.
[ "Start", "or", "restart", "(", "the", ":", "meth", ":", "~pypot", ".", "primitive", ".", "primitive", ".", "Primitive", ".", "stop", "method", "will", "automatically", "be", "called", ")", "the", "primitive", "." ]
python
train
44.777778
thomasdelaet/python-velbus
velbus/messages/set_daylight_saving.py
https://github.com/thomasdelaet/python-velbus/blob/af2f8af43f1a24bf854eff9f3126fd7b5c41b3dd/velbus/messages/set_daylight_saving.py#L42-L48
def to_json(self): """ :return: str """ json_dict = self.to_json_basic() json_dict['ds'] = self._ds return json.dumps(json_dict)
[ "def", "to_json", "(", "self", ")", ":", "json_dict", "=", "self", ".", "to_json_basic", "(", ")", "json_dict", "[", "'ds'", "]", "=", "self", ".", "_ds", "return", "json", ".", "dumps", "(", "json_dict", ")" ]
:return: str
[ ":", "return", ":", "str" ]
python
train
24.285714
cloud9ers/gurumate
environment/lib/python2.7/site-packages/IPython/parallel/controller/sqlitedb.py
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/parallel/controller/sqlitedb.py#L341-L353
def update_record(self, msg_id, rec): """Update the data in an existing record.""" query = "UPDATE %s SET "%self.table sets = [] keys = sorted(rec.keys()) values = [] for key in keys: sets.append('%s = ?'%key) values.append(rec[key]) query += ', '.join(sets) query += ' WHERE msg_id == ?' values.append(msg_id) self._db.execute(query, values)
[ "def", "update_record", "(", "self", ",", "msg_id", ",", "rec", ")", ":", "query", "=", "\"UPDATE %s SET \"", "%", "self", ".", "table", "sets", "=", "[", "]", "keys", "=", "sorted", "(", "rec", ".", "keys", "(", ")", ")", "values", "=", "[", "]", "for", "key", "in", "keys", ":", "sets", ".", "append", "(", "'%s = ?'", "%", "key", ")", "values", ".", "append", "(", "rec", "[", "key", "]", ")", "query", "+=", "', '", ".", "join", "(", "sets", ")", "query", "+=", "' WHERE msg_id == ?'", "values", ".", "append", "(", "msg_id", ")", "self", ".", "_db", ".", "execute", "(", "query", ",", "values", ")" ]
Update the data in an existing record.
[ "Update", "the", "data", "in", "an", "existing", "record", "." ]
python
test
33.384615
saltstack/salt
salt/modules/nftables.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/nftables.py#L948-L993
def flush(table='filter', chain='', family='ipv4'): ''' Flush the chain in the specified table, flush all chains in the specified table if chain is not specified. CLI Example: .. code-block:: bash salt '*' nftables.flush filter salt '*' nftables.flush filter input IPv6: salt '*' nftables.flush filter input family=ipv6 ''' ret = {'comment': 'Failed to flush rules from chain {0} in table {1}.'.format(chain, table), 'result': False} res = check_table(table, family=family) if not res['result']: return res nft_family = _NFTABLES_FAMILIES[family] if chain: res = check_chain(table, chain, family=family) if not res['result']: return res cmd = '{0} flush chain {1} {2} {3}'.\ format(_nftables_cmd(), nft_family, table, chain) comment = 'from chain {0} in table {1} in family {2}.'.\ format(chain, table, family) else: cmd = '{0} flush table {1} {2}'.\ format(_nftables_cmd(), nft_family, table) comment = 'from table {0} in family {1}.'.\ format(table, family) out = __salt__['cmd.run'](cmd, python_shell=False) if len(out) == 0: ret['result'] = True ret['comment'] = 'Flushed rules {0}'.format(comment) else: ret['comment'] = 'Failed to flush rules {0}'.format(comment) return ret
[ "def", "flush", "(", "table", "=", "'filter'", ",", "chain", "=", "''", ",", "family", "=", "'ipv4'", ")", ":", "ret", "=", "{", "'comment'", ":", "'Failed to flush rules from chain {0} in table {1}.'", ".", "format", "(", "chain", ",", "table", ")", ",", "'result'", ":", "False", "}", "res", "=", "check_table", "(", "table", ",", "family", "=", "family", ")", "if", "not", "res", "[", "'result'", "]", ":", "return", "res", "nft_family", "=", "_NFTABLES_FAMILIES", "[", "family", "]", "if", "chain", ":", "res", "=", "check_chain", "(", "table", ",", "chain", ",", "family", "=", "family", ")", "if", "not", "res", "[", "'result'", "]", ":", "return", "res", "cmd", "=", "'{0} flush chain {1} {2} {3}'", ".", "format", "(", "_nftables_cmd", "(", ")", ",", "nft_family", ",", "table", ",", "chain", ")", "comment", "=", "'from chain {0} in table {1} in family {2}.'", ".", "format", "(", "chain", ",", "table", ",", "family", ")", "else", ":", "cmd", "=", "'{0} flush table {1} {2}'", ".", "format", "(", "_nftables_cmd", "(", ")", ",", "nft_family", ",", "table", ")", "comment", "=", "'from table {0} in family {1}.'", ".", "format", "(", "table", ",", "family", ")", "out", "=", "__salt__", "[", "'cmd.run'", "]", "(", "cmd", ",", "python_shell", "=", "False", ")", "if", "len", "(", "out", ")", "==", "0", ":", "ret", "[", "'result'", "]", "=", "True", "ret", "[", "'comment'", "]", "=", "'Flushed rules {0}'", ".", "format", "(", "comment", ")", "else", ":", "ret", "[", "'comment'", "]", "=", "'Failed to flush rules {0}'", ".", "format", "(", "comment", ")", "return", "ret" ]
Flush the chain in the specified table, flush all chains in the specified table if chain is not specified. CLI Example: .. code-block:: bash salt '*' nftables.flush filter salt '*' nftables.flush filter input IPv6: salt '*' nftables.flush filter input family=ipv6
[ "Flush", "the", "chain", "in", "the", "specified", "table", "flush", "all", "chains", "in", "the", "specified", "table", "if", "chain", "is", "not", "specified", "." ]
python
train
30.630435
tensorflow/probability
tensorflow_probability/python/distributions/gaussian_process.py
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/gaussian_process.py#L388-L413
def _get_index_points(self, index_points=None): """Return `index_points` if not None, else `self._index_points`. Args: index_points: if given, this is what is returned; else, `self._index_points` Returns: index_points: the given arg, if not None, else the class member `self._index_points`. Rases: ValueError: if `index_points` and `self._index_points` are both `None`. """ if self._index_points is None and index_points is None: raise ValueError( 'This GaussianProcess instance was not instantiated with a value for ' 'index_points. One must therefore be provided when calling sample, ' 'log_prob, and other such methods. In particular, one can\'t compute ' 'KL divergences to/from an instance of `GaussianProccess` with ' 'unspecified `index_points` directly. Instead, use the ' '`get_marginal_distribution` function, which takes `index_points` as ' 'an argument and returns a `Normal` or ' '`MultivariateNormalLinearOperator` instance, whose KL can be ' 'computed.') return index_points if index_points is not None else self._index_points
[ "def", "_get_index_points", "(", "self", ",", "index_points", "=", "None", ")", ":", "if", "self", ".", "_index_points", "is", "None", "and", "index_points", "is", "None", ":", "raise", "ValueError", "(", "'This GaussianProcess instance was not instantiated with a value for '", "'index_points. One must therefore be provided when calling sample, '", "'log_prob, and other such methods. In particular, one can\\'t compute '", "'KL divergences to/from an instance of `GaussianProccess` with '", "'unspecified `index_points` directly. Instead, use the '", "'`get_marginal_distribution` function, which takes `index_points` as '", "'an argument and returns a `Normal` or '", "'`MultivariateNormalLinearOperator` instance, whose KL can be '", "'computed.'", ")", "return", "index_points", "if", "index_points", "is", "not", "None", "else", "self", ".", "_index_points" ]
Return `index_points` if not None, else `self._index_points`. Args: index_points: if given, this is what is returned; else, `self._index_points` Returns: index_points: the given arg, if not None, else the class member `self._index_points`. Rases: ValueError: if `index_points` and `self._index_points` are both `None`.
[ "Return", "index_points", "if", "not", "None", "else", "self", ".", "_index_points", "." ]
python
test
45.115385
pymc-devs/pymc
pymc/distributions.py
https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/distributions.py#L3067-L3076
def extend_dirichlet(p): """ extend_dirichlet(p) Concatenates 1-sum(p) to the end of p and returns. """ if len(np.shape(p)) > 1: return np.hstack((p, np.atleast_2d(1. - np.sum(p)))) else: return np.hstack((p, 1. - np.sum(p)))
[ "def", "extend_dirichlet", "(", "p", ")", ":", "if", "len", "(", "np", ".", "shape", "(", "p", ")", ")", ">", "1", ":", "return", "np", ".", "hstack", "(", "(", "p", ",", "np", ".", "atleast_2d", "(", "1.", "-", "np", ".", "sum", "(", "p", ")", ")", ")", ")", "else", ":", "return", "np", ".", "hstack", "(", "(", "p", ",", "1.", "-", "np", ".", "sum", "(", "p", ")", ")", ")" ]
extend_dirichlet(p) Concatenates 1-sum(p) to the end of p and returns.
[ "extend_dirichlet", "(", "p", ")" ]
python
train
25.7
instacart/ahab
examples/nathook.py
https://github.com/instacart/ahab/blob/da85dc6d89f5d0c49d3a26a25ea3710c7881b150/examples/nathook.py#L124-L139
def table(tab): """Access IPTables transactionally in a uniform way. Ensures all access is done without autocommit and that only the outer most task commits, and also ensures we refresh once and commit once. """ global open_tables if tab in open_tables: yield open_tables[tab] else: open_tables[tab] = iptc.Table(tab) open_tables[tab].refresh() open_tables[tab].autocommit = False yield open_tables[tab] open_tables[tab].commit() del open_tables[tab]
[ "def", "table", "(", "tab", ")", ":", "global", "open_tables", "if", "tab", "in", "open_tables", ":", "yield", "open_tables", "[", "tab", "]", "else", ":", "open_tables", "[", "tab", "]", "=", "iptc", ".", "Table", "(", "tab", ")", "open_tables", "[", "tab", "]", ".", "refresh", "(", ")", "open_tables", "[", "tab", "]", ".", "autocommit", "=", "False", "yield", "open_tables", "[", "tab", "]", "open_tables", "[", "tab", "]", ".", "commit", "(", ")", "del", "open_tables", "[", "tab", "]" ]
Access IPTables transactionally in a uniform way. Ensures all access is done without autocommit and that only the outer most task commits, and also ensures we refresh once and commit once.
[ "Access", "IPTables", "transactionally", "in", "a", "uniform", "way", "." ]
python
train
32.5
shoebot/shoebot
shoebot/grammar/nodebox.py
https://github.com/shoebot/shoebot/blob/d554c1765c1899fa25727c9fc6805d221585562b/shoebot/grammar/nodebox.py#L620-L640
def text(self, txt, x, y, width=None, height=1000000, outline=False, draw=True, **kwargs): ''' Draws a string of text according to current font settings. :param txt: Text to output :param x: x-coordinate of the top left corner :param y: y-coordinate of the top left corner :param width: text width :param height: text height :param outline: If True draws outline text (defaults to False) :param draw: Set to False to inhibit immediate drawing (defaults to True) :return: Path object representing the text. ''' txt = self.Text(txt, x, y, width, height, outline=outline, ctx=None, **kwargs) if outline: path = txt.path if draw: path.draw() return path else: return txt
[ "def", "text", "(", "self", ",", "txt", ",", "x", ",", "y", ",", "width", "=", "None", ",", "height", "=", "1000000", ",", "outline", "=", "False", ",", "draw", "=", "True", ",", "*", "*", "kwargs", ")", ":", "txt", "=", "self", ".", "Text", "(", "txt", ",", "x", ",", "y", ",", "width", ",", "height", ",", "outline", "=", "outline", ",", "ctx", "=", "None", ",", "*", "*", "kwargs", ")", "if", "outline", ":", "path", "=", "txt", ".", "path", "if", "draw", ":", "path", ".", "draw", "(", ")", "return", "path", "else", ":", "return", "txt" ]
Draws a string of text according to current font settings. :param txt: Text to output :param x: x-coordinate of the top left corner :param y: y-coordinate of the top left corner :param width: text width :param height: text height :param outline: If True draws outline text (defaults to False) :param draw: Set to False to inhibit immediate drawing (defaults to True) :return: Path object representing the text.
[ "Draws", "a", "string", "of", "text", "according", "to", "current", "font", "settings", "." ]
python
valid
39.190476
fabric-bolt/fabric-bolt
fabric_bolt/hosts/views.py
https://github.com/fabric-bolt/fabric-bolt/blob/0f434783026f1b9ce16a416fa496d76921fe49ca/fabric_bolt/hosts/views.py#L30-L36
def form_valid(self, form): """First call the parent's form valid then let the user know it worked.""" form_valid_from_parent = super(HostCreate, self).form_valid(form) messages.success(self.request, 'Host {} Successfully Created'.format(self.object)) return form_valid_from_parent
[ "def", "form_valid", "(", "self", ",", "form", ")", ":", "form_valid_from_parent", "=", "super", "(", "HostCreate", ",", "self", ")", ".", "form_valid", "(", "form", ")", "messages", ".", "success", "(", "self", ".", "request", ",", "'Host {} Successfully Created'", ".", "format", "(", "self", ".", "object", ")", ")", "return", "form_valid_from_parent" ]
First call the parent's form valid then let the user know it worked.
[ "First", "call", "the", "parent", "s", "form", "valid", "then", "let", "the", "user", "know", "it", "worked", "." ]
python
train
44.142857
wrongwaycn/ssdb-py
ssdb/client.py
https://github.com/wrongwaycn/ssdb-py/blob/ce7b1542f0faa06fe71a60c667fe15992af0f621/ssdb/client.py#L1427-L1451
def zrlist(self, name_start, name_end, limit=10): """ Return a list of the top ``limit`` zset's name between ``name_start`` and ``name_end`` in descending order .. note:: The range is (``name_start``, ``name_end``]. The ``name_start`` isn't in the range, but ``name_end`` is. :param string name_start: The lower bound(not included) of zset names to be returned, empty string ``''`` means +inf :param string name_end: The upper bound(included) of zset names to be returned, empty string ``''`` means -inf :param int limit: number of elements will be returned. :return: a list of zset's name :rtype: list >>> ssdb.zlist('zset_ ', 'zset_z', 10) ['zset_2', 'zset_1'] >>> ssdb.zlist('zset_ ', '', 3) ['zset_2', 'zset_1'] >>> ssdb.zlist('', 'aaa_not_exist', 10) [] """ limit = get_positive_integer('limit', limit) return self.execute_command('zrlist', name_start, name_end, limit)
[ "def", "zrlist", "(", "self", ",", "name_start", ",", "name_end", ",", "limit", "=", "10", ")", ":", "limit", "=", "get_positive_integer", "(", "'limit'", ",", "limit", ")", "return", "self", ".", "execute_command", "(", "'zrlist'", ",", "name_start", ",", "name_end", ",", "limit", ")" ]
Return a list of the top ``limit`` zset's name between ``name_start`` and ``name_end`` in descending order .. note:: The range is (``name_start``, ``name_end``]. The ``name_start`` isn't in the range, but ``name_end`` is. :param string name_start: The lower bound(not included) of zset names to be returned, empty string ``''`` means +inf :param string name_end: The upper bound(included) of zset names to be returned, empty string ``''`` means -inf :param int limit: number of elements will be returned. :return: a list of zset's name :rtype: list >>> ssdb.zlist('zset_ ', 'zset_z', 10) ['zset_2', 'zset_1'] >>> ssdb.zlist('zset_ ', '', 3) ['zset_2', 'zset_1'] >>> ssdb.zlist('', 'aaa_not_exist', 10) []
[ "Return", "a", "list", "of", "the", "top", "limit", "zset", "s", "name", "between", "name_start", "and", "name_end", "in", "descending", "order" ]
python
train
41.68
FujiMakoto/AgentML
agentml/common.py
https://github.com/FujiMakoto/AgentML/blob/c8cb64b460d876666bf29ea2c682189874c7c403/agentml/common.py#L143-L160
def bool_element(element, name, default=True): """ Returns the bool value of an element, or a default if it's not defined :param element: The XML Element object :type element: etree._Element :param name: The name of the element to evaluate :type name: str :param default: The default value to return if the element is not defined :type default: bool """ element_value = element.find(name) if element_value is not None: return element_value.text == 'true' return default
[ "def", "bool_element", "(", "element", ",", "name", ",", "default", "=", "True", ")", ":", "element_value", "=", "element", ".", "find", "(", "name", ")", "if", "element_value", "is", "not", "None", ":", "return", "element_value", ".", "text", "==", "'true'", "return", "default" ]
Returns the bool value of an element, or a default if it's not defined :param element: The XML Element object :type element: etree._Element :param name: The name of the element to evaluate :type name: str :param default: The default value to return if the element is not defined :type default: bool
[ "Returns", "the", "bool", "value", "of", "an", "element", "or", "a", "default", "if", "it", "s", "not", "defined", ":", "param", "element", ":", "The", "XML", "Element", "object", ":", "type", "element", ":", "etree", ".", "_Element" ]
python
train
28.611111
saltstack/salt
salt/cloud/clouds/gce.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/gce.py#L1207-L1228
def show_hc(kwargs=None, call=None): ''' Show the details of an existing health check. CLI Example: .. code-block:: bash salt-cloud -f show_hc gce name=hc ''' if call != 'function': raise SaltCloudSystemExit( 'The show_hc function must be called with -f or --function.' ) if not kwargs or 'name' not in kwargs: log.error( 'Must specify name of health check.' ) return False conn = get_conn() return _expand_item(conn.ex_get_healthcheck(kwargs['name']))
[ "def", "show_hc", "(", "kwargs", "=", "None", ",", "call", "=", "None", ")", ":", "if", "call", "!=", "'function'", ":", "raise", "SaltCloudSystemExit", "(", "'The show_hc function must be called with -f or --function.'", ")", "if", "not", "kwargs", "or", "'name'", "not", "in", "kwargs", ":", "log", ".", "error", "(", "'Must specify name of health check.'", ")", "return", "False", "conn", "=", "get_conn", "(", ")", "return", "_expand_item", "(", "conn", ".", "ex_get_healthcheck", "(", "kwargs", "[", "'name'", "]", ")", ")" ]
Show the details of an existing health check. CLI Example: .. code-block:: bash salt-cloud -f show_hc gce name=hc
[ "Show", "the", "details", "of", "an", "existing", "health", "check", "." ]
python
train
24.681818