repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
geophysics-ubonn/reda
lib/reda/containers/ERT.py
https://github.com/geophysics-ubonn/reda/blob/46a939729e40c7c4723315c03679c40761152e9e/lib/reda/containers/ERT.py#L371-L388
def delete_measurements(self, row_or_rows): """Delete one or more measurements by index of the DataFrame. Resets the DataFrame index. Parameters ---------- row_or_rows : int or list of ints Row numbers (starting with zero) of the data DataFrame (ert.data) to delete Returns ------- None """ self.data.drop(self.data.index[row_or_rows], inplace=True) self.data = self.data.reset_index()
[ "def", "delete_measurements", "(", "self", ",", "row_or_rows", ")", ":", "self", ".", "data", ".", "drop", "(", "self", ".", "data", ".", "index", "[", "row_or_rows", "]", ",", "inplace", "=", "True", ")", "self", ".", "data", "=", "self", ".", "data", ".", "reset_index", "(", ")" ]
Delete one or more measurements by index of the DataFrame. Resets the DataFrame index. Parameters ---------- row_or_rows : int or list of ints Row numbers (starting with zero) of the data DataFrame (ert.data) to delete Returns ------- None
[ "Delete", "one", "or", "more", "measurements", "by", "index", "of", "the", "DataFrame", "." ]
python
train
wearpants/fakesleep
fakesleep.py
https://github.com/wearpants/fakesleep/blob/b2b8f422a0838fff50143ec00c1bbb7974e92a83/fakesleep.py#L53-L59
def monkey_restore(): """restore real versions. Inverse of `monkey_patch`""" for k, v in originals.items(): setattr(time_mod, k, v) global epoch epoch = None
[ "def", "monkey_restore", "(", ")", ":", "for", "k", ",", "v", "in", "originals", ".", "items", "(", ")", ":", "setattr", "(", "time_mod", ",", "k", ",", "v", ")", "global", "epoch", "epoch", "=", "None" ]
restore real versions. Inverse of `monkey_patch`
[ "restore", "real", "versions", ".", "Inverse", "of", "monkey_patch" ]
python
train
iotile/coretools
scripts/release.py
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/scripts/release.py#L76-L87
def check_version(component, expected_version): """Make sure the package version in setuptools matches what we expect it to be""" comp = comp_names[component] compath = os.path.realpath(os.path.abspath(comp.path)) sys.path.insert(0, compath) import version if version.version != expected_version: raise EnvironmentError("Version mismatch during release, expected={}, found={}".format(expected_version, version.version))
[ "def", "check_version", "(", "component", ",", "expected_version", ")", ":", "comp", "=", "comp_names", "[", "component", "]", "compath", "=", "os", ".", "path", ".", "realpath", "(", "os", ".", "path", ".", "abspath", "(", "comp", ".", "path", ")", ")", "sys", ".", "path", ".", "insert", "(", "0", ",", "compath", ")", "import", "version", "if", "version", ".", "version", "!=", "expected_version", ":", "raise", "EnvironmentError", "(", "\"Version mismatch during release, expected={}, found={}\"", ".", "format", "(", "expected_version", ",", "version", ".", "version", ")", ")" ]
Make sure the package version in setuptools matches what we expect it to be
[ "Make", "sure", "the", "package", "version", "in", "setuptools", "matches", "what", "we", "expect", "it", "to", "be" ]
python
train
guaix-ucm/pyemir
emirdrp/processing/wavecal/median_slitlets_rectified.py
https://github.com/guaix-ucm/pyemir/blob/fef6bbabcb13f80123cafd1800a0f508a3c21702/emirdrp/processing/wavecal/median_slitlets_rectified.py#L42-L167
def median_slitlets_rectified( input_image, mode=0, minimum_slitlet_width_mm=EMIR_MINIMUM_SLITLET_WIDTH_MM, maximum_slitlet_width_mm=EMIR_MAXIMUM_SLITLET_WIDTH_MM, debugplot=0 ): """Compute median spectrum for each slitlet. Parameters ---------- input_image : HDUList object Input 2D image. mode : int Indicate desired result: 0 : image with the same size as the input image, with the median spectrum of each slitlet spanning all the spectra of the corresponding slitlet 1 : image with 55 spectra, containing the median spectra of each slitlet 2 : single collapsed median spectrum, using exclusively the useful slitlets from the input image minimum_slitlet_width_mm : float Minimum slitlet width (mm) for a valid slitlet. maximum_slitlet_width_mm : float Maximum slitlet width (mm) for a valid slitlet. debugplot : int Determines whether intermediate computations and/or plots are displayed. The valid codes are defined in numina.array.display.pause_debugplot. Returns ------- image_median : HDUList object Output image. """ image_header = input_image[0].header image2d = input_image[0].data # check image dimensions naxis2_expected = EMIR_NBARS * EMIR_NPIXPERSLIT_RECTIFIED naxis2, naxis1 = image2d.shape if naxis2 != naxis2_expected: raise ValueError("NAXIS2={0} should be {1}".format( naxis2, naxis2_expected )) # check that the FITS file has been obtained with EMIR instrument = image_header['instrume'] if instrument != 'EMIR': raise ValueError("INSTRUME keyword is not 'EMIR'!") # initialize output image if mode == 0: image2d_median = np.zeros((naxis2, naxis1)) else: image2d_median = np.zeros((EMIR_NBARS, naxis1)) # main loop for i in range(EMIR_NBARS): ns1 = i * EMIR_NPIXPERSLIT_RECTIFIED + 1 ns2 = ns1 + EMIR_NPIXPERSLIT_RECTIFIED - 1 sp_median = np.median(image2d[(ns1-1):ns2, :], axis=0) if mode == 0: image2d_median[(ns1-1):ns2, :] = np.tile( sp_median, (EMIR_NPIXPERSLIT_RECTIFIED, 1) ) else: image2d_median[i] = np.copy(sp_median) if mode == 2: # get CSU configuration from FITS header csu_config = CsuConfiguration.define_from_header(image_header) # define wavelength calibration parameters crpix1 = image_header['crpix1'] crval1 = image_header['crval1'] cdelt1 = image_header['cdelt1'] # segregate slitlets list_useful_slitlets = csu_config.widths_in_range_mm( minwidth=minimum_slitlet_width_mm, maxwidth=maximum_slitlet_width_mm ) list_not_useful_slitlets = [i for i in list(range(1, EMIR_NBARS + 1)) if i not in list_useful_slitlets] if abs(debugplot) != 0: print('>>> list_useful_slitlets....:', list_useful_slitlets) print('>>> list_not_useful_slitlets:', list_not_useful_slitlets) # define mask from array data mask2d, borders = define_mask_borders(image2d_median, sought_value=0) if abs(debugplot) % 10 != 0: ximshow(mask2d.astype(int), z1z2=(-.2, 1.2), crpix1=crpix1, crval1=crval1, cdelt1=cdelt1, debugplot=debugplot) # update mask with unused slitlets for islitlet in list_not_useful_slitlets: mask2d[islitlet - 1, :] = np.array([True] * naxis1) if abs(debugplot) % 10 != 0: ximshow(mask2d.astype(int), z1z2=(-.2, 1.2), crpix1=crpix1, crval1=crval1, cdelt1=cdelt1, debugplot=debugplot) # useful image pixels image2d_masked = image2d_median * (1 - mask2d.astype(int)) if abs(debugplot) % 10 != 0: ximshow(image2d_masked, crpix1=crpix1, crval1=crval1, cdelt1=cdelt1, debugplot=debugplot) # masked image image2d_masked = np.ma.masked_array(image2d_median, mask=mask2d) # median spectrum image1d_median = np.ma.median(image2d_masked, axis=0).data image_median = fits.PrimaryHDU(data=image1d_median, header=image_header) else: image_median = fits.PrimaryHDU(data=image2d_median, header=image_header) return fits.HDUList([image_median])
[ "def", "median_slitlets_rectified", "(", "input_image", ",", "mode", "=", "0", ",", "minimum_slitlet_width_mm", "=", "EMIR_MINIMUM_SLITLET_WIDTH_MM", ",", "maximum_slitlet_width_mm", "=", "EMIR_MAXIMUM_SLITLET_WIDTH_MM", ",", "debugplot", "=", "0", ")", ":", "image_header", "=", "input_image", "[", "0", "]", ".", "header", "image2d", "=", "input_image", "[", "0", "]", ".", "data", "# check image dimensions", "naxis2_expected", "=", "EMIR_NBARS", "*", "EMIR_NPIXPERSLIT_RECTIFIED", "naxis2", ",", "naxis1", "=", "image2d", ".", "shape", "if", "naxis2", "!=", "naxis2_expected", ":", "raise", "ValueError", "(", "\"NAXIS2={0} should be {1}\"", ".", "format", "(", "naxis2", ",", "naxis2_expected", ")", ")", "# check that the FITS file has been obtained with EMIR", "instrument", "=", "image_header", "[", "'instrume'", "]", "if", "instrument", "!=", "'EMIR'", ":", "raise", "ValueError", "(", "\"INSTRUME keyword is not 'EMIR'!\"", ")", "# initialize output image", "if", "mode", "==", "0", ":", "image2d_median", "=", "np", ".", "zeros", "(", "(", "naxis2", ",", "naxis1", ")", ")", "else", ":", "image2d_median", "=", "np", ".", "zeros", "(", "(", "EMIR_NBARS", ",", "naxis1", ")", ")", "# main loop", "for", "i", "in", "range", "(", "EMIR_NBARS", ")", ":", "ns1", "=", "i", "*", "EMIR_NPIXPERSLIT_RECTIFIED", "+", "1", "ns2", "=", "ns1", "+", "EMIR_NPIXPERSLIT_RECTIFIED", "-", "1", "sp_median", "=", "np", ".", "median", "(", "image2d", "[", "(", "ns1", "-", "1", ")", ":", "ns2", ",", ":", "]", ",", "axis", "=", "0", ")", "if", "mode", "==", "0", ":", "image2d_median", "[", "(", "ns1", "-", "1", ")", ":", "ns2", ",", ":", "]", "=", "np", ".", "tile", "(", "sp_median", ",", "(", "EMIR_NPIXPERSLIT_RECTIFIED", ",", "1", ")", ")", "else", ":", "image2d_median", "[", "i", "]", "=", "np", ".", "copy", "(", "sp_median", ")", "if", "mode", "==", "2", ":", "# get CSU configuration from FITS header", "csu_config", "=", "CsuConfiguration", ".", "define_from_header", "(", "image_header", ")", "# define wavelength calibration parameters", "crpix1", "=", "image_header", "[", "'crpix1'", "]", "crval1", "=", "image_header", "[", "'crval1'", "]", "cdelt1", "=", "image_header", "[", "'cdelt1'", "]", "# segregate slitlets", "list_useful_slitlets", "=", "csu_config", ".", "widths_in_range_mm", "(", "minwidth", "=", "minimum_slitlet_width_mm", ",", "maxwidth", "=", "maximum_slitlet_width_mm", ")", "list_not_useful_slitlets", "=", "[", "i", "for", "i", "in", "list", "(", "range", "(", "1", ",", "EMIR_NBARS", "+", "1", ")", ")", "if", "i", "not", "in", "list_useful_slitlets", "]", "if", "abs", "(", "debugplot", ")", "!=", "0", ":", "print", "(", "'>>> list_useful_slitlets....:'", ",", "list_useful_slitlets", ")", "print", "(", "'>>> list_not_useful_slitlets:'", ",", "list_not_useful_slitlets", ")", "# define mask from array data", "mask2d", ",", "borders", "=", "define_mask_borders", "(", "image2d_median", ",", "sought_value", "=", "0", ")", "if", "abs", "(", "debugplot", ")", "%", "10", "!=", "0", ":", "ximshow", "(", "mask2d", ".", "astype", "(", "int", ")", ",", "z1z2", "=", "(", "-", ".2", ",", "1.2", ")", ",", "crpix1", "=", "crpix1", ",", "crval1", "=", "crval1", ",", "cdelt1", "=", "cdelt1", ",", "debugplot", "=", "debugplot", ")", "# update mask with unused slitlets", "for", "islitlet", "in", "list_not_useful_slitlets", ":", "mask2d", "[", "islitlet", "-", "1", ",", ":", "]", "=", "np", ".", "array", "(", "[", "True", "]", "*", "naxis1", ")", "if", "abs", "(", "debugplot", ")", "%", "10", "!=", "0", ":", "ximshow", "(", "mask2d", ".", "astype", "(", "int", ")", ",", "z1z2", "=", "(", "-", ".2", ",", "1.2", ")", ",", "crpix1", "=", "crpix1", ",", "crval1", "=", "crval1", ",", "cdelt1", "=", "cdelt1", ",", "debugplot", "=", "debugplot", ")", "# useful image pixels", "image2d_masked", "=", "image2d_median", "*", "(", "1", "-", "mask2d", ".", "astype", "(", "int", ")", ")", "if", "abs", "(", "debugplot", ")", "%", "10", "!=", "0", ":", "ximshow", "(", "image2d_masked", ",", "crpix1", "=", "crpix1", ",", "crval1", "=", "crval1", ",", "cdelt1", "=", "cdelt1", ",", "debugplot", "=", "debugplot", ")", "# masked image", "image2d_masked", "=", "np", ".", "ma", ".", "masked_array", "(", "image2d_median", ",", "mask", "=", "mask2d", ")", "# median spectrum", "image1d_median", "=", "np", ".", "ma", ".", "median", "(", "image2d_masked", ",", "axis", "=", "0", ")", ".", "data", "image_median", "=", "fits", ".", "PrimaryHDU", "(", "data", "=", "image1d_median", ",", "header", "=", "image_header", ")", "else", ":", "image_median", "=", "fits", ".", "PrimaryHDU", "(", "data", "=", "image2d_median", ",", "header", "=", "image_header", ")", "return", "fits", ".", "HDUList", "(", "[", "image_median", "]", ")" ]
Compute median spectrum for each slitlet. Parameters ---------- input_image : HDUList object Input 2D image. mode : int Indicate desired result: 0 : image with the same size as the input image, with the median spectrum of each slitlet spanning all the spectra of the corresponding slitlet 1 : image with 55 spectra, containing the median spectra of each slitlet 2 : single collapsed median spectrum, using exclusively the useful slitlets from the input image minimum_slitlet_width_mm : float Minimum slitlet width (mm) for a valid slitlet. maximum_slitlet_width_mm : float Maximum slitlet width (mm) for a valid slitlet. debugplot : int Determines whether intermediate computations and/or plots are displayed. The valid codes are defined in numina.array.display.pause_debugplot. Returns ------- image_median : HDUList object Output image.
[ "Compute", "median", "spectrum", "for", "each", "slitlet", "." ]
python
train
ethereum/py-evm
eth/db/header.py
https://github.com/ethereum/py-evm/blob/58346848f076116381d3274bbcea96b9e2cfcbdf/eth/db/header.py#L120-L127
def get_canonical_block_header_by_number(self, block_number: BlockNumber) -> BlockHeader: """ Returns the block header with the given number in the canonical chain. Raises BlockNotFound if there's no block header with the given number in the canonical chain. """ return self._get_canonical_block_header_by_number(self.db, block_number)
[ "def", "get_canonical_block_header_by_number", "(", "self", ",", "block_number", ":", "BlockNumber", ")", "->", "BlockHeader", ":", "return", "self", ".", "_get_canonical_block_header_by_number", "(", "self", ".", "db", ",", "block_number", ")" ]
Returns the block header with the given number in the canonical chain. Raises BlockNotFound if there's no block header with the given number in the canonical chain.
[ "Returns", "the", "block", "header", "with", "the", "given", "number", "in", "the", "canonical", "chain", "." ]
python
train
biolink/ontobio
ontobio/sparql/skos.py
https://github.com/biolink/ontobio/blob/4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345/ontobio/sparql/skos.py#L51-L105
def process_rdfgraph(self, rg, ont=None): """ Transform a skos terminology expressed in an rdf graph into an Ontology object Arguments --------- rg: rdflib.Graph graph object Returns ------- Ontology """ # TODO: ontology metadata if ont is None: ont = Ontology() subjs = list(rg.subjects(RDF.type, SKOS.ConceptScheme)) if len(subjs) == 0: logging.warning("No ConceptScheme") else: ont.id = self._uri2id(subjs[0]) subset_map = {} for concept in rg.subjects(RDF.type, SKOS.Concept): for s in self._get_schemes(rg, concept): subset_map[self._uri2id(s)] = s for concept in sorted(list(rg.subjects(RDF.type, SKOS.Concept))): concept_uri = str(concept) id=self._uri2id(concept) logging.info("ADDING: {}".format(id)) ont.add_node(id, self._get_label(rg,concept)) for defn in rg.objects(concept, SKOS.definition): if (defn.language == self.lang): td = TextDefinition(id, escape_value(defn.value)) ont.add_text_definition(td) for s in rg.objects(concept, SKOS.broader): ont.add_parent(id, self._uri2id(s)) for s in rg.objects(concept, SKOS.related): ont.add_parent(id, self._uri2id(s), self._uri2id(SKOS.related)) for m in rg.objects(concept, SKOS.exactMatch): ont.add_xref(id, self._uri2id(m)) for m in rg.objects(concept, SKOS.altLabel): syn = Synonym(id, val=self._uri2id(m)) ont.add_synonym(syn) for s in self._get_schemes(rg,concept): ont.add_to_subset(id, self._uri2id(s)) return ont
[ "def", "process_rdfgraph", "(", "self", ",", "rg", ",", "ont", "=", "None", ")", ":", "# TODO: ontology metadata", "if", "ont", "is", "None", ":", "ont", "=", "Ontology", "(", ")", "subjs", "=", "list", "(", "rg", ".", "subjects", "(", "RDF", ".", "type", ",", "SKOS", ".", "ConceptScheme", ")", ")", "if", "len", "(", "subjs", ")", "==", "0", ":", "logging", ".", "warning", "(", "\"No ConceptScheme\"", ")", "else", ":", "ont", ".", "id", "=", "self", ".", "_uri2id", "(", "subjs", "[", "0", "]", ")", "subset_map", "=", "{", "}", "for", "concept", "in", "rg", ".", "subjects", "(", "RDF", ".", "type", ",", "SKOS", ".", "Concept", ")", ":", "for", "s", "in", "self", ".", "_get_schemes", "(", "rg", ",", "concept", ")", ":", "subset_map", "[", "self", ".", "_uri2id", "(", "s", ")", "]", "=", "s", "for", "concept", "in", "sorted", "(", "list", "(", "rg", ".", "subjects", "(", "RDF", ".", "type", ",", "SKOS", ".", "Concept", ")", ")", ")", ":", "concept_uri", "=", "str", "(", "concept", ")", "id", "=", "self", ".", "_uri2id", "(", "concept", ")", "logging", ".", "info", "(", "\"ADDING: {}\"", ".", "format", "(", "id", ")", ")", "ont", ".", "add_node", "(", "id", ",", "self", ".", "_get_label", "(", "rg", ",", "concept", ")", ")", "for", "defn", "in", "rg", ".", "objects", "(", "concept", ",", "SKOS", ".", "definition", ")", ":", "if", "(", "defn", ".", "language", "==", "self", ".", "lang", ")", ":", "td", "=", "TextDefinition", "(", "id", ",", "escape_value", "(", "defn", ".", "value", ")", ")", "ont", ".", "add_text_definition", "(", "td", ")", "for", "s", "in", "rg", ".", "objects", "(", "concept", ",", "SKOS", ".", "broader", ")", ":", "ont", ".", "add_parent", "(", "id", ",", "self", ".", "_uri2id", "(", "s", ")", ")", "for", "s", "in", "rg", ".", "objects", "(", "concept", ",", "SKOS", ".", "related", ")", ":", "ont", ".", "add_parent", "(", "id", ",", "self", ".", "_uri2id", "(", "s", ")", ",", "self", ".", "_uri2id", "(", "SKOS", ".", "related", ")", ")", "for", "m", "in", "rg", ".", "objects", "(", "concept", ",", "SKOS", ".", "exactMatch", ")", ":", "ont", ".", "add_xref", "(", "id", ",", "self", ".", "_uri2id", "(", "m", ")", ")", "for", "m", "in", "rg", ".", "objects", "(", "concept", ",", "SKOS", ".", "altLabel", ")", ":", "syn", "=", "Synonym", "(", "id", ",", "val", "=", "self", ".", "_uri2id", "(", "m", ")", ")", "ont", ".", "add_synonym", "(", "syn", ")", "for", "s", "in", "self", ".", "_get_schemes", "(", "rg", ",", "concept", ")", ":", "ont", ".", "add_to_subset", "(", "id", ",", "self", ".", "_uri2id", "(", "s", ")", ")", "return", "ont" ]
Transform a skos terminology expressed in an rdf graph into an Ontology object Arguments --------- rg: rdflib.Graph graph object Returns ------- Ontology
[ "Transform", "a", "skos", "terminology", "expressed", "in", "an", "rdf", "graph", "into", "an", "Ontology", "object" ]
python
train
agile-geoscience/striplog
striplog/striplog.py
https://github.com/agile-geoscience/striplog/blob/8033b673a151f96c29802b43763e863519a3124c/striplog/striplog.py#L1902-L1939
def merge_neighbours(self, strict=True): """ Makes a new striplog in which matching neighbours (for which the components are the same) are unioned. That is, they are replaced by a new Interval with the same top as the uppermost and the same bottom as the lowermost. Args strict (bool): If True, then all of the components must match. If False, then only the primary must match. Returns: Striplog. A new striplog. TODO: Might need to be tweaked to deal with 'binary striplogs' if those aren't implemented with components. """ new_strip = [self[0].copy()] for lower in self[1:]: # Determine if touching. touching = new_strip[-1].touches(lower) # Decide if match. if strict: similar = new_strip[-1].components == lower.components else: similar = new_strip[-1].primary == lower.primary # Union if both criteria met. if touching and similar: new_strip[-1] = new_strip[-1].union(lower) else: new_strip.append(lower.copy()) return Striplog(new_strip)
[ "def", "merge_neighbours", "(", "self", ",", "strict", "=", "True", ")", ":", "new_strip", "=", "[", "self", "[", "0", "]", ".", "copy", "(", ")", "]", "for", "lower", "in", "self", "[", "1", ":", "]", ":", "# Determine if touching.", "touching", "=", "new_strip", "[", "-", "1", "]", ".", "touches", "(", "lower", ")", "# Decide if match.", "if", "strict", ":", "similar", "=", "new_strip", "[", "-", "1", "]", ".", "components", "==", "lower", ".", "components", "else", ":", "similar", "=", "new_strip", "[", "-", "1", "]", ".", "primary", "==", "lower", ".", "primary", "# Union if both criteria met.", "if", "touching", "and", "similar", ":", "new_strip", "[", "-", "1", "]", "=", "new_strip", "[", "-", "1", "]", ".", "union", "(", "lower", ")", "else", ":", "new_strip", ".", "append", "(", "lower", ".", "copy", "(", ")", ")", "return", "Striplog", "(", "new_strip", ")" ]
Makes a new striplog in which matching neighbours (for which the components are the same) are unioned. That is, they are replaced by a new Interval with the same top as the uppermost and the same bottom as the lowermost. Args strict (bool): If True, then all of the components must match. If False, then only the primary must match. Returns: Striplog. A new striplog. TODO: Might need to be tweaked to deal with 'binary striplogs' if those aren't implemented with components.
[ "Makes", "a", "new", "striplog", "in", "which", "matching", "neighbours", "(", "for", "which", "the", "components", "are", "the", "same", ")", "are", "unioned", ".", "That", "is", "they", "are", "replaced", "by", "a", "new", "Interval", "with", "the", "same", "top", "as", "the", "uppermost", "and", "the", "same", "bottom", "as", "the", "lowermost", "." ]
python
test
tensorflow/tensorboard
tensorboard/plugins/hparams/hparams_plugin_loader.py
https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/plugins/hparams/hparams_plugin_loader.py#L30-L46
def load(self, context): """Returns the plugin, if possible. Args: context: The TBContext flags. Returns: A HParamsPlugin instance or None if it couldn't be loaded. """ try: # pylint: disable=g-import-not-at-top,unused-import import tensorflow except ImportError: return # pylint: disable=g-import-not-at-top from tensorboard.plugins.hparams.hparams_plugin import HParamsPlugin return HParamsPlugin(context)
[ "def", "load", "(", "self", ",", "context", ")", ":", "try", ":", "# pylint: disable=g-import-not-at-top,unused-import", "import", "tensorflow", "except", "ImportError", ":", "return", "# pylint: disable=g-import-not-at-top", "from", "tensorboard", ".", "plugins", ".", "hparams", ".", "hparams_plugin", "import", "HParamsPlugin", "return", "HParamsPlugin", "(", "context", ")" ]
Returns the plugin, if possible. Args: context: The TBContext flags. Returns: A HParamsPlugin instance or None if it couldn't be loaded.
[ "Returns", "the", "plugin", "if", "possible", "." ]
python
train
JasonKessler/scattertext
scattertext/TermDocMatrixWithoutCategories.py
https://github.com/JasonKessler/scattertext/blob/cacf1f687d218ee8cae3fc05cc901db824bb1b81/scattertext/TermDocMatrixWithoutCategories.py#L423-L431
def term_doc_lists(self): ''' Returns ------- dict ''' doc_ids = self._X.transpose().tolil().rows terms = self._term_idx_store.values() return dict(zip(terms, doc_ids))
[ "def", "term_doc_lists", "(", "self", ")", ":", "doc_ids", "=", "self", ".", "_X", ".", "transpose", "(", ")", ".", "tolil", "(", ")", ".", "rows", "terms", "=", "self", ".", "_term_idx_store", ".", "values", "(", ")", "return", "dict", "(", "zip", "(", "terms", ",", "doc_ids", ")", ")" ]
Returns ------- dict
[ "Returns", "-------", "dict" ]
python
train
klen/zeta-library
zetalibrary/scss/__init__.py
https://github.com/klen/zeta-library/blob/b76f89000f467e10ddcc94aded3f6c6bf4a0e5bd/zetalibrary/scss/__init__.py#L1352-L1371
def _do_each(self, rule, p_selectors, p_parents, p_children, scope, media, c_lineno, c_property, c_codestr, code, name): """ Implements @each """ var, _, name = name.partition('in') name = self.calculate(name, rule[CONTEXT], rule[OPTIONS], rule) if name: name = ListValue(name) var = var.strip() var = self.do_glob_math( var, rule[CONTEXT], rule[OPTIONS], rule, True) for n, v in name.items(): v = to_str(v) rule[CODESTR] = c_codestr rule[CONTEXT][var] = v if not isinstance(n, int): rule[CONTEXT][n] = v self.manage_children( rule, p_selectors, p_parents, p_children, scope, media)
[ "def", "_do_each", "(", "self", ",", "rule", ",", "p_selectors", ",", "p_parents", ",", "p_children", ",", "scope", ",", "media", ",", "c_lineno", ",", "c_property", ",", "c_codestr", ",", "code", ",", "name", ")", ":", "var", ",", "_", ",", "name", "=", "name", ".", "partition", "(", "'in'", ")", "name", "=", "self", ".", "calculate", "(", "name", ",", "rule", "[", "CONTEXT", "]", ",", "rule", "[", "OPTIONS", "]", ",", "rule", ")", "if", "name", ":", "name", "=", "ListValue", "(", "name", ")", "var", "=", "var", ".", "strip", "(", ")", "var", "=", "self", ".", "do_glob_math", "(", "var", ",", "rule", "[", "CONTEXT", "]", ",", "rule", "[", "OPTIONS", "]", ",", "rule", ",", "True", ")", "for", "n", ",", "v", "in", "name", ".", "items", "(", ")", ":", "v", "=", "to_str", "(", "v", ")", "rule", "[", "CODESTR", "]", "=", "c_codestr", "rule", "[", "CONTEXT", "]", "[", "var", "]", "=", "v", "if", "not", "isinstance", "(", "n", ",", "int", ")", ":", "rule", "[", "CONTEXT", "]", "[", "n", "]", "=", "v", "self", ".", "manage_children", "(", "rule", ",", "p_selectors", ",", "p_parents", ",", "p_children", ",", "scope", ",", "media", ")" ]
Implements @each
[ "Implements" ]
python
train
spyder-ide/spyder
spyder/app/mainwindow.py
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/app/mainwindow.py#L2600-L2654
def render_issue(self, description='', traceback=''): """Render issue before sending it to Github""" # Get component versions versions = get_versions() # Get git revision for development version revision = '' if versions['revision']: revision = versions['revision'] # Make a description header in case no description is supplied if not description: description = "### What steps reproduce the problem?" # Make error section from traceback and add appropriate reminder header if traceback: error_section = ("### Traceback\n" "```python-traceback\n" "{}\n" "```".format(traceback)) else: error_section = '' issue_template = """\ ## Description {description} {error_section} ## Versions * Spyder version: {spyder_version} {commit} * Python version: {python_version} * Qt version: {qt_version} * {qt_api_name} version: {qt_api_version} * Operating System: {os_name} {os_version} ### Dependencies ``` {dependencies} ``` """.format(description=description, error_section=error_section, spyder_version=versions['spyder'], commit=revision, python_version=versions['python'], qt_version=versions['qt'], qt_api_name=versions['qt_api'], qt_api_version=versions['qt_api_ver'], os_name=versions['system'], os_version=versions['release'], dependencies=dependencies.status()) return issue_template
[ "def", "render_issue", "(", "self", ",", "description", "=", "''", ",", "traceback", "=", "''", ")", ":", "# Get component versions\r", "versions", "=", "get_versions", "(", ")", "# Get git revision for development version\r", "revision", "=", "''", "if", "versions", "[", "'revision'", "]", ":", "revision", "=", "versions", "[", "'revision'", "]", "# Make a description header in case no description is supplied\r", "if", "not", "description", ":", "description", "=", "\"### What steps reproduce the problem?\"", "# Make error section from traceback and add appropriate reminder header\r", "if", "traceback", ":", "error_section", "=", "(", "\"### Traceback\\n\"", "\"```python-traceback\\n\"", "\"{}\\n\"", "\"```\"", ".", "format", "(", "traceback", ")", ")", "else", ":", "error_section", "=", "''", "issue_template", "=", "\"\"\"\\\r\n## Description\r\n\r\n{description}\r\n\r\n{error_section}\r\n\r\n## Versions\r\n\r\n* Spyder version: {spyder_version} {commit}\r\n* Python version: {python_version}\r\n* Qt version: {qt_version}\r\n* {qt_api_name} version: {qt_api_version}\r\n* Operating System: {os_name} {os_version}\r\n\r\n### Dependencies\r\n\r\n```\r\n{dependencies}\r\n```\r\n\"\"\"", ".", "format", "(", "description", "=", "description", ",", "error_section", "=", "error_section", ",", "spyder_version", "=", "versions", "[", "'spyder'", "]", ",", "commit", "=", "revision", ",", "python_version", "=", "versions", "[", "'python'", "]", ",", "qt_version", "=", "versions", "[", "'qt'", "]", ",", "qt_api_name", "=", "versions", "[", "'qt_api'", "]", ",", "qt_api_version", "=", "versions", "[", "'qt_api_ver'", "]", ",", "os_name", "=", "versions", "[", "'system'", "]", ",", "os_version", "=", "versions", "[", "'release'", "]", ",", "dependencies", "=", "dependencies", ".", "status", "(", ")", ")", "return", "issue_template" ]
Render issue before sending it to Github
[ "Render", "issue", "before", "sending", "it", "to", "Github" ]
python
train
mrcagney/gtfstk
gtfstk/stops.py
https://github.com/mrcagney/gtfstk/blob/c91494e6fefc02523889655a0dc92d1c0eee8d03/gtfstk/stops.py#L905-L961
def map_stops( feed: "Feed", stop_ids: List[str], stop_style: Dict = STOP_STYLE ): """ Return a Folium map showing the given stops. Parameters ---------- feed : Feed stop_ids : list IDs of trips in ``feed.stops`` stop_style: dictionary Folium CircleMarker parameters to use for styling stops. Returns ------- dictionary A Folium Map depicting the stops as CircleMarkers. Notes ------ - Requires Folium """ import folium as fl # Initialize map my_map = fl.Map(tiles="cartodbpositron") # Create a feature group for the stops and add it to the map group = fl.FeatureGroup(name="Stops") # Add stops to feature group stops = feed.stops.loc[lambda x: x.stop_id.isin(stop_ids)].fillna("n/a") for prop in stops.to_dict(orient="records"): # Add stop lon = prop["stop_lon"] lat = prop["stop_lat"] fl.CircleMarker( location=[lat, lon], popup=fl.Popup(hp.make_html(prop)), **stop_style, ).add_to(group) group.add_to(my_map) # Add layer control fl.LayerControl().add_to(my_map) # Fit map to stop bounds bounds = [ (stops.stop_lat.min(), stops.stop_lon.min()), (stops.stop_lat.max(), stops.stop_lon.max()), ] my_map.fit_bounds(bounds, padding=[1, 1]) return my_map
[ "def", "map_stops", "(", "feed", ":", "\"Feed\"", ",", "stop_ids", ":", "List", "[", "str", "]", ",", "stop_style", ":", "Dict", "=", "STOP_STYLE", ")", ":", "import", "folium", "as", "fl", "# Initialize map", "my_map", "=", "fl", ".", "Map", "(", "tiles", "=", "\"cartodbpositron\"", ")", "# Create a feature group for the stops and add it to the map", "group", "=", "fl", ".", "FeatureGroup", "(", "name", "=", "\"Stops\"", ")", "# Add stops to feature group", "stops", "=", "feed", ".", "stops", ".", "loc", "[", "lambda", "x", ":", "x", ".", "stop_id", ".", "isin", "(", "stop_ids", ")", "]", ".", "fillna", "(", "\"n/a\"", ")", "for", "prop", "in", "stops", ".", "to_dict", "(", "orient", "=", "\"records\"", ")", ":", "# Add stop", "lon", "=", "prop", "[", "\"stop_lon\"", "]", "lat", "=", "prop", "[", "\"stop_lat\"", "]", "fl", ".", "CircleMarker", "(", "location", "=", "[", "lat", ",", "lon", "]", ",", "popup", "=", "fl", ".", "Popup", "(", "hp", ".", "make_html", "(", "prop", ")", ")", ",", "*", "*", "stop_style", ",", ")", ".", "add_to", "(", "group", ")", "group", ".", "add_to", "(", "my_map", ")", "# Add layer control", "fl", ".", "LayerControl", "(", ")", ".", "add_to", "(", "my_map", ")", "# Fit map to stop bounds", "bounds", "=", "[", "(", "stops", ".", "stop_lat", ".", "min", "(", ")", ",", "stops", ".", "stop_lon", ".", "min", "(", ")", ")", ",", "(", "stops", ".", "stop_lat", ".", "max", "(", ")", ",", "stops", ".", "stop_lon", ".", "max", "(", ")", ")", ",", "]", "my_map", ".", "fit_bounds", "(", "bounds", ",", "padding", "=", "[", "1", ",", "1", "]", ")", "return", "my_map" ]
Return a Folium map showing the given stops. Parameters ---------- feed : Feed stop_ids : list IDs of trips in ``feed.stops`` stop_style: dictionary Folium CircleMarker parameters to use for styling stops. Returns ------- dictionary A Folium Map depicting the stops as CircleMarkers. Notes ------ - Requires Folium
[ "Return", "a", "Folium", "map", "showing", "the", "given", "stops", "." ]
python
train
krukas/Trionyx
trionyx/trionyx/apps.py
https://github.com/krukas/Trionyx/blob/edac132cc0797190153f2e60bc7e88cb50e80da6/trionyx/trionyx/apps.py#L47-L54
def auto_load_app_modules(self, modules): """Auto load app modules""" for app in apps.get_app_configs(): for module in modules: try: import_module('{}.{}'.format(app.module.__package__, module)) except ImportError: pass
[ "def", "auto_load_app_modules", "(", "self", ",", "modules", ")", ":", "for", "app", "in", "apps", ".", "get_app_configs", "(", ")", ":", "for", "module", "in", "modules", ":", "try", ":", "import_module", "(", "'{}.{}'", ".", "format", "(", "app", ".", "module", ".", "__package__", ",", "module", ")", ")", "except", "ImportError", ":", "pass" ]
Auto load app modules
[ "Auto", "load", "app", "modules" ]
python
train
AlecAivazis/graphql-over-kafka
nautilus/conventions/api.py
https://github.com/AlecAivazis/graphql-over-kafka/blob/70e2acef27a2f87355590be1a6ca60ce3ab4d09c/nautilus/conventions/api.py#L15-L25
def crud_mutation_name(action, model): """ This function returns the name of a mutation that performs the specified crud action on the given model service """ model_string = get_model_string(model) # make sure the mutation name is correctly camelcases model_string = model_string[0].upper() + model_string[1:] # return the mutation name return "{}{}".format(action, model_string)
[ "def", "crud_mutation_name", "(", "action", ",", "model", ")", ":", "model_string", "=", "get_model_string", "(", "model", ")", "# make sure the mutation name is correctly camelcases", "model_string", "=", "model_string", "[", "0", "]", ".", "upper", "(", ")", "+", "model_string", "[", "1", ":", "]", "# return the mutation name", "return", "\"{}{}\"", ".", "format", "(", "action", ",", "model_string", ")" ]
This function returns the name of a mutation that performs the specified crud action on the given model service
[ "This", "function", "returns", "the", "name", "of", "a", "mutation", "that", "performs", "the", "specified", "crud", "action", "on", "the", "given", "model", "service" ]
python
train
CartoDB/carto-python
carto/auth.py
https://github.com/CartoDB/carto-python/blob/f6ac3d17ed08e5bc3f99edd2bde4fb7dba3eee16/carto/auth.py#L128-L154
def send(self, relative_path, http_method, **requests_args): """ Makes an API-key-authorized request :param relative_path: URL path relative to self.base_url :param http_method: HTTP method :param requests_args: kwargs to be sent to requests :type relative_path: str :type http_method: str :type requests_args: kwargs :return: A request response object :raise: CartoException """ try: http_method, requests_args = self.prepare_send(http_method, **requests_args) response = super(APIKeyAuthClient, self).send(relative_path, http_method, **requests_args) except Exception as e: raise CartoException(e) if CartoRateLimitException.is_rate_limited(response): raise CartoRateLimitException(response) return response
[ "def", "send", "(", "self", ",", "relative_path", ",", "http_method", ",", "*", "*", "requests_args", ")", ":", "try", ":", "http_method", ",", "requests_args", "=", "self", ".", "prepare_send", "(", "http_method", ",", "*", "*", "requests_args", ")", "response", "=", "super", "(", "APIKeyAuthClient", ",", "self", ")", ".", "send", "(", "relative_path", ",", "http_method", ",", "*", "*", "requests_args", ")", "except", "Exception", "as", "e", ":", "raise", "CartoException", "(", "e", ")", "if", "CartoRateLimitException", ".", "is_rate_limited", "(", "response", ")", ":", "raise", "CartoRateLimitException", "(", "response", ")", "return", "response" ]
Makes an API-key-authorized request :param relative_path: URL path relative to self.base_url :param http_method: HTTP method :param requests_args: kwargs to be sent to requests :type relative_path: str :type http_method: str :type requests_args: kwargs :return: A request response object :raise: CartoException
[ "Makes", "an", "API", "-", "key", "-", "authorized", "request" ]
python
train
wadda/gps3
examples/human.py
https://github.com/wadda/gps3/blob/91adcd7073b891b135b2a46d039ce2125cf09a09/examples/human.py#L39-L57
def add_args(): """Adds commandline arguments and formatted Help""" parser = argparse.ArgumentParser() parser.add_argument('-host', action='store', dest='host', default='127.0.0.1', help='DEFAULT "127.0.0.1"') parser.add_argument('-port', action='store', dest='port', default='2947', help='DEFAULT 2947', type=int) parser.add_argument('-json', dest='gpsd_protocol', const='json', action='store_const', default='json', help='DEFAULT JSON objects */') parser.add_argument('-device', dest='devicepath', action='store', help='alternate devicepath e.g.,"-device /dev/ttyUSB4"') # Infrequently used options parser.add_argument('-nmea', dest='gpsd_protocol', const='nmea', action='store_const', help='*/ output in NMEA */') # parser.add_argument('-rare', dest='gpsd_protocol', const='rare', action='store_const', help='*/ output of packets in hex */') # parser.add_argument('-raw', dest='gpsd_protocol', const='raw', action='store_const', help='*/ output of raw packets */') # parser.add_argument('-scaled', dest='gpsd_protocol', const='scaled', action='store_const', help='*/ scale output to floats */') # parser.add_argument('-timing', dest='gpsd_protocol', const='timing', action='store_const', help='*/ timing information */') # parser.add_argument('-split24', dest='gpsd_protocol', const='split24', action='store_const', help='*/ split AIS Type 24s */') # parser.add_argument('-pps', dest='gpsd_protocol', const='pps', action='store_const', help='*/ enable PPS JSON */') parser.add_argument('-v', '--version', action='version', version='Version: {}'.format(__version__)) cli_args = parser.parse_args() return cli_args
[ "def", "add_args", "(", ")", ":", "parser", "=", "argparse", ".", "ArgumentParser", "(", ")", "parser", ".", "add_argument", "(", "'-host'", ",", "action", "=", "'store'", ",", "dest", "=", "'host'", ",", "default", "=", "'127.0.0.1'", ",", "help", "=", "'DEFAULT \"127.0.0.1\"'", ")", "parser", ".", "add_argument", "(", "'-port'", ",", "action", "=", "'store'", ",", "dest", "=", "'port'", ",", "default", "=", "'2947'", ",", "help", "=", "'DEFAULT 2947'", ",", "type", "=", "int", ")", "parser", ".", "add_argument", "(", "'-json'", ",", "dest", "=", "'gpsd_protocol'", ",", "const", "=", "'json'", ",", "action", "=", "'store_const'", ",", "default", "=", "'json'", ",", "help", "=", "'DEFAULT JSON objects */'", ")", "parser", ".", "add_argument", "(", "'-device'", ",", "dest", "=", "'devicepath'", ",", "action", "=", "'store'", ",", "help", "=", "'alternate devicepath e.g.,\"-device /dev/ttyUSB4\"'", ")", "# Infrequently used options", "parser", ".", "add_argument", "(", "'-nmea'", ",", "dest", "=", "'gpsd_protocol'", ",", "const", "=", "'nmea'", ",", "action", "=", "'store_const'", ",", "help", "=", "'*/ output in NMEA */'", ")", "# parser.add_argument('-rare', dest='gpsd_protocol', const='rare', action='store_const', help='*/ output of packets in hex */')", "# parser.add_argument('-raw', dest='gpsd_protocol', const='raw', action='store_const', help='*/ output of raw packets */')", "# parser.add_argument('-scaled', dest='gpsd_protocol', const='scaled', action='store_const', help='*/ scale output to floats */')", "# parser.add_argument('-timing', dest='gpsd_protocol', const='timing', action='store_const', help='*/ timing information */')", "# parser.add_argument('-split24', dest='gpsd_protocol', const='split24', action='store_const', help='*/ split AIS Type 24s */')", "# parser.add_argument('-pps', dest='gpsd_protocol', const='pps', action='store_const', help='*/ enable PPS JSON */')", "parser", ".", "add_argument", "(", "'-v'", ",", "'--version'", ",", "action", "=", "'version'", ",", "version", "=", "'Version: {}'", ".", "format", "(", "__version__", ")", ")", "cli_args", "=", "parser", ".", "parse_args", "(", ")", "return", "cli_args" ]
Adds commandline arguments and formatted Help
[ "Adds", "commandline", "arguments", "and", "formatted", "Help" ]
python
train
greenbender/pynntp
nntp/nntp.py
https://github.com/greenbender/pynntp/blob/991a76331cdf5d8f9dbf5b18f6e29adc80749a2f/nntp/nntp.py#L692-L708
def newnews(self, pattern, timestamp): """NEWNEWS command. Retrieves a list of message-ids for articles created since the specified timestamp for newsgroups with names that match the given pattern. See newnews_gen() for more details. See <http://tools.ietf.org/html/rfc3977#section-7.4> Args: pattern: Glob matching newsgroups of intrest. timestamp: Datetime object giving 'created since' datetime. Returns: A list of message-ids as given by newnews_gen() """ return [x for x in self.newnews_gen(pattern, timestamp)]
[ "def", "newnews", "(", "self", ",", "pattern", ",", "timestamp", ")", ":", "return", "[", "x", "for", "x", "in", "self", ".", "newnews_gen", "(", "pattern", ",", "timestamp", ")", "]" ]
NEWNEWS command. Retrieves a list of message-ids for articles created since the specified timestamp for newsgroups with names that match the given pattern. See newnews_gen() for more details. See <http://tools.ietf.org/html/rfc3977#section-7.4> Args: pattern: Glob matching newsgroups of intrest. timestamp: Datetime object giving 'created since' datetime. Returns: A list of message-ids as given by newnews_gen()
[ "NEWNEWS", "command", "." ]
python
test
HazyResearch/fonduer
src/fonduer/utils/utils_udf.py
https://github.com/HazyResearch/fonduer/blob/4520f86a716f03dcca458a9f4bddac75b4e7068f/src/fonduer/utils/utils_udf.py#L224-L280
def drop_all_keys(session, key_table, candidate_classes): """Bulk drop annotation keys for all the candidate_classes in the table. Rather than directly dropping the keys, this removes the candidate_classes specified for the given keys only. If all candidate_classes are removed for a key, the key is dropped. :param key_table: The sqlalchemy class to insert into. :param candidate_classes: A list of candidate classes to drop. """ if not candidate_classes: return candidate_classes = set([c.__tablename__ for c in candidate_classes]) # Select all rows that contain ANY of the candidate_classes all_rows = ( session.query(key_table) .filter( key_table.candidate_classes.overlap(cast(candidate_classes, ARRAY(String))) ) .all() ) to_delete = set() to_update = [] # All candidate classes will be the same for all keys, so just look at one for row in all_rows: # Remove the selected candidate_classes. If empty, mark for deletion. row.candidate_classes = list( set(row.candidate_classes) - set(candidate_classes) ) if len(row.candidate_classes) == 0: to_delete.add(row.name) else: to_update.append( {"name": row.name, "candidate_classes": row.candidate_classes} ) # Perform all deletes if to_delete: query = session.query(key_table).filter(key_table.name.in_(to_delete)) query.delete(synchronize_session="fetch") # Perform all updates if to_update: for batch in _batch_postgres_query(key_table, to_update): stmt = insert(key_table.__table__) stmt = stmt.on_conflict_do_update( constraint=key_table.__table__.primary_key, set_={ "name": stmt.excluded.get("name"), "candidate_classes": stmt.excluded.get("candidate_classes"), }, ) session.execute(stmt, batch) session.commit()
[ "def", "drop_all_keys", "(", "session", ",", "key_table", ",", "candidate_classes", ")", ":", "if", "not", "candidate_classes", ":", "return", "candidate_classes", "=", "set", "(", "[", "c", ".", "__tablename__", "for", "c", "in", "candidate_classes", "]", ")", "# Select all rows that contain ANY of the candidate_classes", "all_rows", "=", "(", "session", ".", "query", "(", "key_table", ")", ".", "filter", "(", "key_table", ".", "candidate_classes", ".", "overlap", "(", "cast", "(", "candidate_classes", ",", "ARRAY", "(", "String", ")", ")", ")", ")", ".", "all", "(", ")", ")", "to_delete", "=", "set", "(", ")", "to_update", "=", "[", "]", "# All candidate classes will be the same for all keys, so just look at one", "for", "row", "in", "all_rows", ":", "# Remove the selected candidate_classes. If empty, mark for deletion.", "row", ".", "candidate_classes", "=", "list", "(", "set", "(", "row", ".", "candidate_classes", ")", "-", "set", "(", "candidate_classes", ")", ")", "if", "len", "(", "row", ".", "candidate_classes", ")", "==", "0", ":", "to_delete", ".", "add", "(", "row", ".", "name", ")", "else", ":", "to_update", ".", "append", "(", "{", "\"name\"", ":", "row", ".", "name", ",", "\"candidate_classes\"", ":", "row", ".", "candidate_classes", "}", ")", "# Perform all deletes", "if", "to_delete", ":", "query", "=", "session", ".", "query", "(", "key_table", ")", ".", "filter", "(", "key_table", ".", "name", ".", "in_", "(", "to_delete", ")", ")", "query", ".", "delete", "(", "synchronize_session", "=", "\"fetch\"", ")", "# Perform all updates", "if", "to_update", ":", "for", "batch", "in", "_batch_postgres_query", "(", "key_table", ",", "to_update", ")", ":", "stmt", "=", "insert", "(", "key_table", ".", "__table__", ")", "stmt", "=", "stmt", ".", "on_conflict_do_update", "(", "constraint", "=", "key_table", ".", "__table__", ".", "primary_key", ",", "set_", "=", "{", "\"name\"", ":", "stmt", ".", "excluded", ".", "get", "(", "\"name\"", ")", ",", "\"candidate_classes\"", ":", "stmt", ".", "excluded", ".", "get", "(", "\"candidate_classes\"", ")", ",", "}", ",", ")", "session", ".", "execute", "(", "stmt", ",", "batch", ")", "session", ".", "commit", "(", ")" ]
Bulk drop annotation keys for all the candidate_classes in the table. Rather than directly dropping the keys, this removes the candidate_classes specified for the given keys only. If all candidate_classes are removed for a key, the key is dropped. :param key_table: The sqlalchemy class to insert into. :param candidate_classes: A list of candidate classes to drop.
[ "Bulk", "drop", "annotation", "keys", "for", "all", "the", "candidate_classes", "in", "the", "table", "." ]
python
train
pyroscope/pyrocore
src/pyrocore/scripts/base.py
https://github.com/pyroscope/pyrocore/blob/89ad01346a570943d20311a0b488440975876612/src/pyrocore/scripts/base.py#L353-L371
def get_options(self): """ Get program options. """ super(ScriptBaseWithConfig, self).get_options() self.config_dir = os.path.abspath(os.path.expanduser(self.options.config_dir or os.environ.get('PYRO_CONFIG_DIR', None) or self.CONFIG_DIR_DEFAULT)) load_config.ConfigLoader(self.config_dir).load(self.OPTIONAL_CFG_FILES + self.options.config_file) if self.options.debug: config.debug = True for key_val in self.options.defines: try: key, val = key_val.split('=', 1) except ValueError, exc: raise error.UserError("Bad config override %r (%s)" % (key_val, exc)) else: setattr(config, key, load_config.validate(key, val))
[ "def", "get_options", "(", "self", ")", ":", "super", "(", "ScriptBaseWithConfig", ",", "self", ")", ".", "get_options", "(", ")", "self", ".", "config_dir", "=", "os", ".", "path", ".", "abspath", "(", "os", ".", "path", ".", "expanduser", "(", "self", ".", "options", ".", "config_dir", "or", "os", ".", "environ", ".", "get", "(", "'PYRO_CONFIG_DIR'", ",", "None", ")", "or", "self", ".", "CONFIG_DIR_DEFAULT", ")", ")", "load_config", ".", "ConfigLoader", "(", "self", ".", "config_dir", ")", ".", "load", "(", "self", ".", "OPTIONAL_CFG_FILES", "+", "self", ".", "options", ".", "config_file", ")", "if", "self", ".", "options", ".", "debug", ":", "config", ".", "debug", "=", "True", "for", "key_val", "in", "self", ".", "options", ".", "defines", ":", "try", ":", "key", ",", "val", "=", "key_val", ".", "split", "(", "'='", ",", "1", ")", "except", "ValueError", ",", "exc", ":", "raise", "error", ".", "UserError", "(", "\"Bad config override %r (%s)\"", "%", "(", "key_val", ",", "exc", ")", ")", "else", ":", "setattr", "(", "config", ",", "key", ",", "load_config", ".", "validate", "(", "key", ",", "val", ")", ")" ]
Get program options.
[ "Get", "program", "options", "." ]
python
train
OSSOS/MOP
src/jjk/preproc/MOPplot.py
https://github.com/OSSOS/MOP/blob/94f91d32ad5ec081d5a1ebd67604a838003465af/src/jjk/preproc/MOPplot.py#L43-L133
def load_abgfiles(dir=None): """Load the targets from a file""" import ephem,string if dir is None: import tkFileDialog try: dir=tkFileDialog.askdirectory() except: return if dir is None: return None from glob import glob files=glob(dir+"/*.abg") import os for f in files: (name,ext)=os.path.splitext(os.path.basename(f)) NAME=name.strip().upper() kbos[name]=f #print name aei=file(dir+"/"+name+".aei") lines=aei.readlines() aei.close() objInfoDict[name]="%6s %6s %6s\n" % ( string.center("a",6), string.center("e",6), string.center("i",6) ) try: (a,e,i,N,w,T) = lines[4].split() except: print lines[4] (a,e,i,N,w,T) = (0,0,0,0,0,0) objInfoDict[name]+="%6.2f %6.3f %6.2f\n" % (float(a),float(e),float(i)) s=lines[5][0:2] (a,e,i,N,w,T) = lines[5][2:-1].split() objInfoDict[name]+=" %6.3f %6.3f %6.3f\n" % (float(a),float(e),float(i)) abg = file(dir+"/"+name+".abg") lines = abg.readlines() abg.close() for line in lines: if not line[0:5] == "# Bar": continue objInfoDict[name]+=line[2:-1]+"\n" break res=file(dir+"/"+name+".res") lines=res.readlines() res.close() line=lines.pop() values=line.split() s = "[nobs: "+values[0]+" dt: "+values[1]+"]\n" objInfoDict[name]+=s mpc = file(dir+"/../mpc/"+name+".mpc") lines=mpc.readlines() mpc.close() last_date=0 for line in lines: if len(line)==0: continue if line[0]=="#": continue this_year=int(line[15:19]) this_month=int(line[20:22]) this_day = int(line[23:25]) this_date = this_year+this_month/12.0 +this_day/365.25 if last_date < this_date: last_date=this_date year=this_year day = this_day month=this_month mag={} for line in lines: try: mags=line[65:69].strip() if len(mags)==0: continue filter=line[70] if filter not in mag: mag[filter]=[] mag[filter].append(float(mags)) except: continue mags='' for filter in mag: magv=0 for m in mag[filter]: magv = magv+m/len(mag[filter]) mags = mags+ "%4.1f-%s " % ( magv , filter) if len(mags)==0: mags= "N/A" objInfoDict[name]+="MAG: "+mags+"\n" objInfoDict[name]+="Last obs: %s %s %s \n" % (year,month, day) doplot(kbos)
[ "def", "load_abgfiles", "(", "dir", "=", "None", ")", ":", "import", "ephem", ",", "string", "if", "dir", "is", "None", ":", "import", "tkFileDialog", "try", ":", "dir", "=", "tkFileDialog", ".", "askdirectory", "(", ")", "except", ":", "return", "if", "dir", "is", "None", ":", "return", "None", "from", "glob", "import", "glob", "files", "=", "glob", "(", "dir", "+", "\"/*.abg\"", ")", "import", "os", "for", "f", "in", "files", ":", "(", "name", ",", "ext", ")", "=", "os", ".", "path", ".", "splitext", "(", "os", ".", "path", ".", "basename", "(", "f", ")", ")", "NAME", "=", "name", ".", "strip", "(", ")", ".", "upper", "(", ")", "kbos", "[", "name", "]", "=", "f", "#print name", "aei", "=", "file", "(", "dir", "+", "\"/\"", "+", "name", "+", "\".aei\"", ")", "lines", "=", "aei", ".", "readlines", "(", ")", "aei", ".", "close", "(", ")", "objInfoDict", "[", "name", "]", "=", "\"%6s %6s %6s\\n\"", "%", "(", "string", ".", "center", "(", "\"a\"", ",", "6", ")", ",", "string", ".", "center", "(", "\"e\"", ",", "6", ")", ",", "string", ".", "center", "(", "\"i\"", ",", "6", ")", ")", "try", ":", "(", "a", ",", "e", ",", "i", ",", "N", ",", "w", ",", "T", ")", "=", "lines", "[", "4", "]", ".", "split", "(", ")", "except", ":", "print", "lines", "[", "4", "]", "(", "a", ",", "e", ",", "i", ",", "N", ",", "w", ",", "T", ")", "=", "(", "0", ",", "0", ",", "0", ",", "0", ",", "0", ",", "0", ")", "objInfoDict", "[", "name", "]", "+=", "\"%6.2f %6.3f %6.2f\\n\"", "%", "(", "float", "(", "a", ")", ",", "float", "(", "e", ")", ",", "float", "(", "i", ")", ")", "s", "=", "lines", "[", "5", "]", "[", "0", ":", "2", "]", "(", "a", ",", "e", ",", "i", ",", "N", ",", "w", ",", "T", ")", "=", "lines", "[", "5", "]", "[", "2", ":", "-", "1", "]", ".", "split", "(", ")", "objInfoDict", "[", "name", "]", "+=", "\" %6.3f %6.3f %6.3f\\n\"", "%", "(", "float", "(", "a", ")", ",", "float", "(", "e", ")", ",", "float", "(", "i", ")", ")", "abg", "=", "file", "(", "dir", "+", "\"/\"", "+", "name", "+", "\".abg\"", ")", "lines", "=", "abg", ".", "readlines", "(", ")", "abg", ".", "close", "(", ")", "for", "line", "in", "lines", ":", "if", "not", "line", "[", "0", ":", "5", "]", "==", "\"# Bar\"", ":", "continue", "objInfoDict", "[", "name", "]", "+=", "line", "[", "2", ":", "-", "1", "]", "+", "\"\\n\"", "break", "res", "=", "file", "(", "dir", "+", "\"/\"", "+", "name", "+", "\".res\"", ")", "lines", "=", "res", ".", "readlines", "(", ")", "res", ".", "close", "(", ")", "line", "=", "lines", ".", "pop", "(", ")", "values", "=", "line", ".", "split", "(", ")", "s", "=", "\"[nobs: \"", "+", "values", "[", "0", "]", "+", "\" dt: \"", "+", "values", "[", "1", "]", "+", "\"]\\n\"", "objInfoDict", "[", "name", "]", "+=", "s", "mpc", "=", "file", "(", "dir", "+", "\"/../mpc/\"", "+", "name", "+", "\".mpc\"", ")", "lines", "=", "mpc", ".", "readlines", "(", ")", "mpc", ".", "close", "(", ")", "last_date", "=", "0", "for", "line", "in", "lines", ":", "if", "len", "(", "line", ")", "==", "0", ":", "continue", "if", "line", "[", "0", "]", "==", "\"#\"", ":", "continue", "this_year", "=", "int", "(", "line", "[", "15", ":", "19", "]", ")", "this_month", "=", "int", "(", "line", "[", "20", ":", "22", "]", ")", "this_day", "=", "int", "(", "line", "[", "23", ":", "25", "]", ")", "this_date", "=", "this_year", "+", "this_month", "/", "12.0", "+", "this_day", "/", "365.25", "if", "last_date", "<", "this_date", ":", "last_date", "=", "this_date", "year", "=", "this_year", "day", "=", "this_day", "month", "=", "this_month", "mag", "=", "{", "}", "for", "line", "in", "lines", ":", "try", ":", "mags", "=", "line", "[", "65", ":", "69", "]", ".", "strip", "(", ")", "if", "len", "(", "mags", ")", "==", "0", ":", "continue", "filter", "=", "line", "[", "70", "]", "if", "filter", "not", "in", "mag", ":", "mag", "[", "filter", "]", "=", "[", "]", "mag", "[", "filter", "]", ".", "append", "(", "float", "(", "mags", ")", ")", "except", ":", "continue", "mags", "=", "''", "for", "filter", "in", "mag", ":", "magv", "=", "0", "for", "m", "in", "mag", "[", "filter", "]", ":", "magv", "=", "magv", "+", "m", "/", "len", "(", "mag", "[", "filter", "]", ")", "mags", "=", "mags", "+", "\"%4.1f-%s \"", "%", "(", "magv", ",", "filter", ")", "if", "len", "(", "mags", ")", "==", "0", ":", "mags", "=", "\"N/A\"", "objInfoDict", "[", "name", "]", "+=", "\"MAG: \"", "+", "mags", "+", "\"\\n\"", "objInfoDict", "[", "name", "]", "+=", "\"Last obs: %s %s %s \\n\"", "%", "(", "year", ",", "month", ",", "day", ")", "doplot", "(", "kbos", ")" ]
Load the targets from a file
[ "Load", "the", "targets", "from", "a", "file" ]
python
train
devassistant/devassistant
devassistant/gui/run_window.py
https://github.com/devassistant/devassistant/blob/2dbfeaa666a64127263664d18969c55d19ecc83e/devassistant/gui/run_window.py#L192-L212
def delete_event(self, widget, event, data=None): """ Event cancels the project creation """ if not self.close_win: if self.thread.isAlive(): dlg = self.gui_helper.create_message_dialog("Do you want to cancel project creation?", buttons=Gtk.ButtonsType.YES_NO) response = dlg.run() if response == Gtk.ResponseType.YES: if self.thread.isAlive(): self.info_label.set_label('<span color="#FFA500">Cancelling...</span>') self.dev_assistant_runner.stop() self.project_canceled = True else: self.info_label.set_label('<span color="#008000">Done</span>') self.allow_close_window() dlg.destroy() return True else: return False
[ "def", "delete_event", "(", "self", ",", "widget", ",", "event", ",", "data", "=", "None", ")", ":", "if", "not", "self", ".", "close_win", ":", "if", "self", ".", "thread", ".", "isAlive", "(", ")", ":", "dlg", "=", "self", ".", "gui_helper", ".", "create_message_dialog", "(", "\"Do you want to cancel project creation?\"", ",", "buttons", "=", "Gtk", ".", "ButtonsType", ".", "YES_NO", ")", "response", "=", "dlg", ".", "run", "(", ")", "if", "response", "==", "Gtk", ".", "ResponseType", ".", "YES", ":", "if", "self", ".", "thread", ".", "isAlive", "(", ")", ":", "self", ".", "info_label", ".", "set_label", "(", "'<span color=\"#FFA500\">Cancelling...</span>'", ")", "self", ".", "dev_assistant_runner", ".", "stop", "(", ")", "self", ".", "project_canceled", "=", "True", "else", ":", "self", ".", "info_label", ".", "set_label", "(", "'<span color=\"#008000\">Done</span>'", ")", "self", ".", "allow_close_window", "(", ")", "dlg", ".", "destroy", "(", ")", "return", "True", "else", ":", "return", "False" ]
Event cancels the project creation
[ "Event", "cancels", "the", "project", "creation" ]
python
train
mapnik/Cascadenik
cascadenik/compile.py
https://github.com/mapnik/Cascadenik/blob/82f66859340a31dfcb24af127274f262d4f3ad85/cascadenik/compile.py#L146-L169
def output_path(self, path_name): """ Modify a path so it fits expectations. Avoid returning relative paths that start with '../' and possibly return relative paths when output and cache directories match. """ # make sure it is a valid posix format path = to_posix(path_name) assert (path == path_name), "path_name passed to output_path must be in posix format" if posixpath.isabs(path): if self.output == self.cache: # worth seeing if an absolute path can be avoided path = posixpath.relpath(path, self.output) else: return posixpath.realpath(path) if path.startswith('../'): joined = posixpath.join(self.output, path) return posixpath.realpath(joined) return path
[ "def", "output_path", "(", "self", ",", "path_name", ")", ":", "# make sure it is a valid posix format", "path", "=", "to_posix", "(", "path_name", ")", "assert", "(", "path", "==", "path_name", ")", ",", "\"path_name passed to output_path must be in posix format\"", "if", "posixpath", ".", "isabs", "(", "path", ")", ":", "if", "self", ".", "output", "==", "self", ".", "cache", ":", "# worth seeing if an absolute path can be avoided", "path", "=", "posixpath", ".", "relpath", "(", "path", ",", "self", ".", "output", ")", "else", ":", "return", "posixpath", ".", "realpath", "(", "path", ")", "if", "path", ".", "startswith", "(", "'../'", ")", ":", "joined", "=", "posixpath", ".", "join", "(", "self", ".", "output", ",", "path", ")", "return", "posixpath", ".", "realpath", "(", "joined", ")", "return", "path" ]
Modify a path so it fits expectations. Avoid returning relative paths that start with '../' and possibly return relative paths when output and cache directories match.
[ "Modify", "a", "path", "so", "it", "fits", "expectations", ".", "Avoid", "returning", "relative", "paths", "that", "start", "with", "..", "/", "and", "possibly", "return", "relative", "paths", "when", "output", "and", "cache", "directories", "match", "." ]
python
train
dhermes/bezier
src/bezier/_helpers.py
https://github.com/dhermes/bezier/blob/4f941f82637a8e70a5b159a9203132192e23406b/src/bezier/_helpers.py#L294-L384
def _simple_convex_hull(points): r"""Compute the convex hull for a set of points. .. _wikibooks: https://en.wikibooks.org/wiki/Algorithm_Implementation/\ Geometry/Convex_hull/Monotone_chain This uses Andrew's monotone chain convex hull algorithm and this code used a `wikibooks`_ implementation as motivation. tion. The code there is licensed CC BY-SA 3.0. .. note:: There is also a Fortran implementation of this function, which will be used if it can be built. Note that ``scipy.spatial.ConvexHull`` can do this as well (via Qhull), but that would require a hard dependency on ``scipy`` and that helper computes much more than we need. .. note:: This computes the convex hull in a "naive" way. It's expected that internal callers of this function will have a small number of points so ``n log n`` vs. ``n^2`` vs. ``n`` aren't that relevant. Args: points (numpy.ndarray): A ``2 x N`` array (``float64``) of points. Returns: numpy.ndarray: The ``2 x N`` array (``float64``) of ordered points in the polygonal convex hull. """ # NOTE: There is no corresponding "enable", but the disable only applies # in this lexical scope. # pylint: disable=too-many-branches if points.size == 0: return points # First, drop duplicates. unique_points = np.unique(points, axis=1) _, num_points = unique_points.shape if num_points < 2: return unique_points # Then sort the data in left-to-right order (and break ties by y-value). points = np.empty((2, num_points), order="F") for index, xy_val in enumerate( sorted(tuple(column) for column in unique_points.T) ): points[:, index] = xy_val # After sorting, if there are only 2 points, return. if num_points < 3: return points # Build lower hull lower = [0, 1] for index in six.moves.xrange(2, num_points): point2 = points[:, index] while len(lower) >= 2: point0 = points[:, lower[-2]] point1 = points[:, lower[-1]] if cross_product_compare(point0, point1, point2) > 0: break else: lower.pop() lower.append(index) # Build upper hull upper = [num_points - 1] for index in six.moves.xrange(num_points - 2, -1, -1): # Don't consider indices from the lower hull (other than the ends). if index > 0 and in_sorted(lower, index): continue point2 = points[:, index] while len(upper) >= 2: point0 = points[:, upper[-2]] point1 = points[:, upper[-1]] if cross_product_compare(point0, point1, point2) > 0: break else: upper.pop() upper.append(index) # **Both** corners are double counted. size_polygon = len(lower) + len(upper) - 2 polygon = np.empty((2, size_polygon), order="F") for index, column in enumerate(lower[:-1]): polygon[:, index] = points[:, column] index_start = len(lower) - 1 for index, column in enumerate(upper[:-1]): polygon[:, index + index_start] = points[:, column] return polygon
[ "def", "_simple_convex_hull", "(", "points", ")", ":", "# NOTE: There is no corresponding \"enable\", but the disable only applies", "# in this lexical scope.", "# pylint: disable=too-many-branches", "if", "points", ".", "size", "==", "0", ":", "return", "points", "# First, drop duplicates.", "unique_points", "=", "np", ".", "unique", "(", "points", ",", "axis", "=", "1", ")", "_", ",", "num_points", "=", "unique_points", ".", "shape", "if", "num_points", "<", "2", ":", "return", "unique_points", "# Then sort the data in left-to-right order (and break ties by y-value).", "points", "=", "np", ".", "empty", "(", "(", "2", ",", "num_points", ")", ",", "order", "=", "\"F\"", ")", "for", "index", ",", "xy_val", "in", "enumerate", "(", "sorted", "(", "tuple", "(", "column", ")", "for", "column", "in", "unique_points", ".", "T", ")", ")", ":", "points", "[", ":", ",", "index", "]", "=", "xy_val", "# After sorting, if there are only 2 points, return.", "if", "num_points", "<", "3", ":", "return", "points", "# Build lower hull", "lower", "=", "[", "0", ",", "1", "]", "for", "index", "in", "six", ".", "moves", ".", "xrange", "(", "2", ",", "num_points", ")", ":", "point2", "=", "points", "[", ":", ",", "index", "]", "while", "len", "(", "lower", ")", ">=", "2", ":", "point0", "=", "points", "[", ":", ",", "lower", "[", "-", "2", "]", "]", "point1", "=", "points", "[", ":", ",", "lower", "[", "-", "1", "]", "]", "if", "cross_product_compare", "(", "point0", ",", "point1", ",", "point2", ")", ">", "0", ":", "break", "else", ":", "lower", ".", "pop", "(", ")", "lower", ".", "append", "(", "index", ")", "# Build upper hull", "upper", "=", "[", "num_points", "-", "1", "]", "for", "index", "in", "six", ".", "moves", ".", "xrange", "(", "num_points", "-", "2", ",", "-", "1", ",", "-", "1", ")", ":", "# Don't consider indices from the lower hull (other than the ends).", "if", "index", ">", "0", "and", "in_sorted", "(", "lower", ",", "index", ")", ":", "continue", "point2", "=", "points", "[", ":", ",", "index", "]", "while", "len", "(", "upper", ")", ">=", "2", ":", "point0", "=", "points", "[", ":", ",", "upper", "[", "-", "2", "]", "]", "point1", "=", "points", "[", ":", ",", "upper", "[", "-", "1", "]", "]", "if", "cross_product_compare", "(", "point0", ",", "point1", ",", "point2", ")", ">", "0", ":", "break", "else", ":", "upper", ".", "pop", "(", ")", "upper", ".", "append", "(", "index", ")", "# **Both** corners are double counted.", "size_polygon", "=", "len", "(", "lower", ")", "+", "len", "(", "upper", ")", "-", "2", "polygon", "=", "np", ".", "empty", "(", "(", "2", ",", "size_polygon", ")", ",", "order", "=", "\"F\"", ")", "for", "index", ",", "column", "in", "enumerate", "(", "lower", "[", ":", "-", "1", "]", ")", ":", "polygon", "[", ":", ",", "index", "]", "=", "points", "[", ":", ",", "column", "]", "index_start", "=", "len", "(", "lower", ")", "-", "1", "for", "index", ",", "column", "in", "enumerate", "(", "upper", "[", ":", "-", "1", "]", ")", ":", "polygon", "[", ":", ",", "index", "+", "index_start", "]", "=", "points", "[", ":", ",", "column", "]", "return", "polygon" ]
r"""Compute the convex hull for a set of points. .. _wikibooks: https://en.wikibooks.org/wiki/Algorithm_Implementation/\ Geometry/Convex_hull/Monotone_chain This uses Andrew's monotone chain convex hull algorithm and this code used a `wikibooks`_ implementation as motivation. tion. The code there is licensed CC BY-SA 3.0. .. note:: There is also a Fortran implementation of this function, which will be used if it can be built. Note that ``scipy.spatial.ConvexHull`` can do this as well (via Qhull), but that would require a hard dependency on ``scipy`` and that helper computes much more than we need. .. note:: This computes the convex hull in a "naive" way. It's expected that internal callers of this function will have a small number of points so ``n log n`` vs. ``n^2`` vs. ``n`` aren't that relevant. Args: points (numpy.ndarray): A ``2 x N`` array (``float64``) of points. Returns: numpy.ndarray: The ``2 x N`` array (``float64``) of ordered points in the polygonal convex hull.
[ "r", "Compute", "the", "convex", "hull", "for", "a", "set", "of", "points", "." ]
python
train
dcramer/django-ratings
djangoratings/fields.py
https://github.com/dcramer/django-ratings/blob/4d00dedc920a4e32d650dc12d5f480c51fc6216c/djangoratings/fields.py#L82-L88
def get_real_rating(self): """get_rating() Returns the unmodified average rating.""" if not (self.votes and self.score): return 0 return float(self.score)/self.votes
[ "def", "get_real_rating", "(", "self", ")", ":", "if", "not", "(", "self", ".", "votes", "and", "self", ".", "score", ")", ":", "return", "0", "return", "float", "(", "self", ".", "score", ")", "/", "self", ".", "votes" ]
get_rating() Returns the unmodified average rating.
[ "get_rating", "()", "Returns", "the", "unmodified", "average", "rating", "." ]
python
train
SiLab-Bonn/pyBAR
pybar/fei4/register.py
https://github.com/SiLab-Bonn/pyBAR/blob/5ad95bbcd41cd358825823fb78f396cfce23593e/pybar/fei4/register.py#L467-L493
def get_global_register_attributes(self, register_attribute, do_sort=True, **kwargs): """Calculating register numbers from register names. Usage: get_global_register_attributes("attribute_name", name = [regname_1, regname_2, ...], addresses = 2) Receives: attribute name to be returned, dictionaries (kwargs) of register attributes and values for making cuts Returns: list of attribute values that matches dictionaries of attributes """ # speed up of the most often used keyword name try: names = iterable(kwargs.pop('name')) except KeyError: register_attribute_list = [] else: register_attribute_list = [self.global_registers[reg][register_attribute] for reg in names] for keyword in kwargs.keys(): allowed_values = iterable(kwargs[keyword]) try: register_attribute_list.extend(map(itemgetter(register_attribute), filter(lambda global_register: set(iterable(global_register[keyword])).intersection(allowed_values), self.global_registers.itervalues()))) except AttributeError: pass if not register_attribute_list and filter(None, kwargs.itervalues()): raise ValueError('Global register attribute %s empty' % register_attribute) if do_sort: return sorted(set(flatten_iterable(register_attribute_list))) else: return flatten_iterable(register_attribute_list)
[ "def", "get_global_register_attributes", "(", "self", ",", "register_attribute", ",", "do_sort", "=", "True", ",", "*", "*", "kwargs", ")", ":", "# speed up of the most often used keyword name\r", "try", ":", "names", "=", "iterable", "(", "kwargs", ".", "pop", "(", "'name'", ")", ")", "except", "KeyError", ":", "register_attribute_list", "=", "[", "]", "else", ":", "register_attribute_list", "=", "[", "self", ".", "global_registers", "[", "reg", "]", "[", "register_attribute", "]", "for", "reg", "in", "names", "]", "for", "keyword", "in", "kwargs", ".", "keys", "(", ")", ":", "allowed_values", "=", "iterable", "(", "kwargs", "[", "keyword", "]", ")", "try", ":", "register_attribute_list", ".", "extend", "(", "map", "(", "itemgetter", "(", "register_attribute", ")", ",", "filter", "(", "lambda", "global_register", ":", "set", "(", "iterable", "(", "global_register", "[", "keyword", "]", ")", ")", ".", "intersection", "(", "allowed_values", ")", ",", "self", ".", "global_registers", ".", "itervalues", "(", ")", ")", ")", ")", "except", "AttributeError", ":", "pass", "if", "not", "register_attribute_list", "and", "filter", "(", "None", ",", "kwargs", ".", "itervalues", "(", ")", ")", ":", "raise", "ValueError", "(", "'Global register attribute %s empty'", "%", "register_attribute", ")", "if", "do_sort", ":", "return", "sorted", "(", "set", "(", "flatten_iterable", "(", "register_attribute_list", ")", ")", ")", "else", ":", "return", "flatten_iterable", "(", "register_attribute_list", ")" ]
Calculating register numbers from register names. Usage: get_global_register_attributes("attribute_name", name = [regname_1, regname_2, ...], addresses = 2) Receives: attribute name to be returned, dictionaries (kwargs) of register attributes and values for making cuts Returns: list of attribute values that matches dictionaries of attributes
[ "Calculating", "register", "numbers", "from", "register", "names", ".", "Usage", ":", "get_global_register_attributes", "(", "attribute_name", "name", "=", "[", "regname_1", "regname_2", "...", "]", "addresses", "=", "2", ")", "Receives", ":", "attribute", "name", "to", "be", "returned", "dictionaries", "(", "kwargs", ")", "of", "register", "attributes", "and", "values", "for", "making", "cuts", "Returns", ":", "list", "of", "attribute", "values", "that", "matches", "dictionaries", "of", "attributes" ]
python
train
IBM/ibm-cos-sdk-python-s3transfer
ibm_s3transfer/aspera/futures.py
https://github.com/IBM/ibm-cos-sdk-python-s3transfer/blob/24ba53137213e26e6b8fc2c3ec1e8198d507d22b/ibm_s3transfer/aspera/futures.py#L634-L640
def _run_queued_callbacks(self): ''' run the init/quued calback when the trasnfer is initiated on apsera ''' for callback in self._queued_callbacks: try: callback() except Exception as ex: logger.error("Exception: %s" % str(ex))
[ "def", "_run_queued_callbacks", "(", "self", ")", ":", "for", "callback", "in", "self", ".", "_queued_callbacks", ":", "try", ":", "callback", "(", ")", "except", "Exception", "as", "ex", ":", "logger", ".", "error", "(", "\"Exception: %s\"", "%", "str", "(", "ex", ")", ")" ]
run the init/quued calback when the trasnfer is initiated on apsera
[ "run", "the", "init", "/", "quued", "calback", "when", "the", "trasnfer", "is", "initiated", "on", "apsera" ]
python
train
CivicSpleen/ambry
ambry/etl/pipeline.py
https://github.com/CivicSpleen/ambry/blob/d7f2be4bf1f7ffd086f3fadd4fcae60c32473e42/ambry/etl/pipeline.py#L486-L496
def report_progress(self): """ This function can be called from a higher level to report progress. It is usually called from an alarm signal handler which is installed just before starting an operation: :return: Tuple: (process description, #records, #total records, #rate) """ from time import time # rows, rate = pl.sink.report_progress() return (self.i, round(float(self.i) / float(time() - self._start_time), 2))
[ "def", "report_progress", "(", "self", ")", ":", "from", "time", "import", "time", "# rows, rate = pl.sink.report_progress()", "return", "(", "self", ".", "i", ",", "round", "(", "float", "(", "self", ".", "i", ")", "/", "float", "(", "time", "(", ")", "-", "self", ".", "_start_time", ")", ",", "2", ")", ")" ]
This function can be called from a higher level to report progress. It is usually called from an alarm signal handler which is installed just before starting an operation: :return: Tuple: (process description, #records, #total records, #rate)
[ "This", "function", "can", "be", "called", "from", "a", "higher", "level", "to", "report", "progress", ".", "It", "is", "usually", "called", "from", "an", "alarm", "signal", "handler", "which", "is", "installed", "just", "before", "starting", "an", "operation", ":" ]
python
train
Esri/ArcREST
src/arcrest/ags/_gpobjects.py
https://github.com/Esri/ArcREST/blob/ab240fde2b0200f61d4a5f6df033516e53f2f416/src/arcrest/ags/_gpobjects.py#L513-L525
def fromJSON(value): """loads the GP object from a JSON string """ j = json.loads(value) v = GPBoolean() if "defaultValue" in j: v.value = j['defaultValue'] else: v.value = j['value'] if 'paramName' in j: v.paramName = j['paramName'] elif 'name' in j: v.paramName = j['name'] return v
[ "def", "fromJSON", "(", "value", ")", ":", "j", "=", "json", ".", "loads", "(", "value", ")", "v", "=", "GPBoolean", "(", ")", "if", "\"defaultValue\"", "in", "j", ":", "v", ".", "value", "=", "j", "[", "'defaultValue'", "]", "else", ":", "v", ".", "value", "=", "j", "[", "'value'", "]", "if", "'paramName'", "in", "j", ":", "v", ".", "paramName", "=", "j", "[", "'paramName'", "]", "elif", "'name'", "in", "j", ":", "v", ".", "paramName", "=", "j", "[", "'name'", "]", "return", "v" ]
loads the GP object from a JSON string
[ "loads", "the", "GP", "object", "from", "a", "JSON", "string" ]
python
train
hobson/pug-dj
pug/dj/crawler/views.py
https://github.com/hobson/pug-dj/blob/55678b08755a55366ce18e7d3b8ea8fa4491ab04/pug/dj/crawler/views.py#L16-L24
def stop(self, spider_name=None): """Stop the named running spider, or the first spider found, if spider_name is None""" if spider_name is None: spider_name = self.spider_name else: self.spider_name = spider_name if self.spider_name is None: self.spider_name = self.list_running()[0].split(':')[-1] self.jsonrpc_call('crawler/engine', 'close_spider', self.spider_name)
[ "def", "stop", "(", "self", ",", "spider_name", "=", "None", ")", ":", "if", "spider_name", "is", "None", ":", "spider_name", "=", "self", ".", "spider_name", "else", ":", "self", ".", "spider_name", "=", "spider_name", "if", "self", ".", "spider_name", "is", "None", ":", "self", ".", "spider_name", "=", "self", ".", "list_running", "(", ")", "[", "0", "]", ".", "split", "(", "':'", ")", "[", "-", "1", "]", "self", ".", "jsonrpc_call", "(", "'crawler/engine'", ",", "'close_spider'", ",", "self", ".", "spider_name", ")" ]
Stop the named running spider, or the first spider found, if spider_name is None
[ "Stop", "the", "named", "running", "spider", "or", "the", "first", "spider", "found", "if", "spider_name", "is", "None" ]
python
train
ahtn/python-easyhid
easyhid/easyhid.py
https://github.com/ahtn/python-easyhid/blob/b89a60e5b378495b34c51ef11c5260bb43885780/easyhid/easyhid.py#L329-L350
def description(self): """ Get a string describing the HID descriptor. """ return \ """HIDDevice: {} | {:x}:{:x} | {} | {} | {} release_number: {} usage_page: {} usage: {} interface_number: {}\ """.format(self.path, self.vendor_id, self.product_id, self.manufacturer_string, self.product_string, self.serial_number, self.release_number, self.usage_page, self.usage, self.interface_number )
[ "def", "description", "(", "self", ")", ":", "return", "\"\"\"HIDDevice:\n {} | {:x}:{:x} | {} | {} | {}\n release_number: {}\n usage_page: {}\n usage: {}\n interface_number: {}\\\n\"\"\"", ".", "format", "(", "self", ".", "path", ",", "self", ".", "vendor_id", ",", "self", ".", "product_id", ",", "self", ".", "manufacturer_string", ",", "self", ".", "product_string", ",", "self", ".", "serial_number", ",", "self", ".", "release_number", ",", "self", ".", "usage_page", ",", "self", ".", "usage", ",", "self", ".", "interface_number", ")" ]
Get a string describing the HID descriptor.
[ "Get", "a", "string", "describing", "the", "HID", "descriptor", "." ]
python
train
dixudx/rtcclient
rtcclient/template.py
https://github.com/dixudx/rtcclient/blob/1721dd0b047478f5bdd6359b07a2c503cfafd86f/rtcclient/template.py#L196-L344
def getTemplate(self, copied_from, template_name=None, template_folder=None, keep=False, encoding="UTF-8"): """Get template from some to-be-copied :class:`rtcclient.workitem.Workitem` The resulting XML document is returned as a :class:`string`, but if `template_name` (a string value) is specified, it is written there instead. :param copied_from: the to-be-copied :class:`rtcclient.workitem.Workitem` id (integer or equivalent string) :param template_name: the template file name :param template_folder: the folder to store template file :param keep: (default is False) If `True`, some of below parameters (which may not be included in some customized :class:`rtcclient.workitem.Workitem` type ) will remain unchangeable with the to-be-copied :class:`rtcclient.workitem.Workitem`. Otherwise for `False`. * teamArea (Team Area) * ownedBy (Owned By) * plannedFor(Planned For) * severity(Severity) * priority(Priority) * filedAgainst(Filed Against) :param encoding: (default is "UTF-8") coding format :return: * a :class:`string` object: if `template_name` is not specified * write the template to file `template_name`: if `template_name` is specified """ try: if isinstance(copied_from, bool) or isinstance(copied_from, float): raise ValueError() if isinstance(copied_from, six.string_types): copied_from = int(copied_from) if not isinstance(copied_from, int): raise ValueError() except ValueError: err_msg = "Please input a valid workitem id you want to copy from" self.log.error(err_msg) raise exception.BadValue(err_msg) self.log.info("Fetch the template from <Workitem %s> with [keep]=%s", copied_from, keep) if template_folder is None: template_folder = self.searchpath # identify whether output to a file if template_name is not None: template_file_path = os.path.join(template_folder, template_name) output = open(template_file_path, "w") else: template_file_path = None output = None workitem_url = "/".join([self.url, "oslc/workitems/%s" % copied_from]) resp = self.get(workitem_url, verify=False, proxies=self.rtc_obj.proxies, headers=self.rtc_obj.headers) raw_data = xmltodict.parse(resp.content) # pre-adjust the template: # remove some attribute to avoid being overwritten, which will only be # generated when the workitem is created wk_raw_data = raw_data.get("oslc_cm:ChangeRequest") self._remove_long_fields(wk_raw_data) # Be cautious when you want to modify these fields # These fields have been tested as must-removed one remove_fields = ["@rdf:about", "dc:created", "dc:creator", "dc:identifier", "rtc_cm:contextId", "rtc_cm:comments", "rtc_cm:state", "dc:type", "rtc_cm:subscribers", "dc:modified", "rtc_cm:modifiedBy", "rtc_cm:resolved", "rtc_cm:resolvedBy", "rtc_cm:resolution", "rtc_cm:startDate", "rtc_cm:timeSpent", "rtc_cm:progressTracking", "rtc_cm:projectArea", "oslc_cm:relatedChangeManagement", "oslc_cm:trackedWorkItem", "oslc_cm:tracksWorkItem", "rtc_cm:timeSheet", "oslc_pl:schedule"] for remove_field in remove_fields: try: wk_raw_data.pop(remove_field) self.log.debug("Successfully remove field [%s] from the " "template originated from <Workitem %s>", remove_field, copied_from) except: self.log.warning("No field named [%s] in this template " "from <Workitem %s>", remove_field, copied_from) continue wk_raw_data["dc:description"] = "{{ description }}" wk_raw_data["dc:title"] = "{{ title }}" if keep: if template_file_path: self.log.info("Writing the template to file %s", template_file_path) return xmltodict.unparse(raw_data, output=output, encoding=encoding, pretty=True) replace_fields = [("rtc_cm:teamArea", "{{ teamArea }}"), ("rtc_cm:ownedBy", "{{ ownedBy }}"), ("rtc_cm:plannedFor", "{{ plannedFor }}"), ("rtc_cm:foundIn", "{{ foundIn }}"), ("oslc_cm:severity", "{{ severity }}"), ("oslc_cm:priority", "{{ priority }}"), ("rtc_cm:filedAgainst", "{{ filedAgainst }}")] for field in replace_fields: try: wk_raw_data[field[0]]["@rdf:resource"] = field[1] self.log.debug("Successfully replace field [%s] with [%s]", field[0], field[1]) except: self.log.warning("Cannot replace field [%s]", field[0]) continue if template_file_path: self.log.info("Writing the template to file %s", template_file_path) return xmltodict.unparse(raw_data, output=output, encoding=encoding, pretty=True)
[ "def", "getTemplate", "(", "self", ",", "copied_from", ",", "template_name", "=", "None", ",", "template_folder", "=", "None", ",", "keep", "=", "False", ",", "encoding", "=", "\"UTF-8\"", ")", ":", "try", ":", "if", "isinstance", "(", "copied_from", ",", "bool", ")", "or", "isinstance", "(", "copied_from", ",", "float", ")", ":", "raise", "ValueError", "(", ")", "if", "isinstance", "(", "copied_from", ",", "six", ".", "string_types", ")", ":", "copied_from", "=", "int", "(", "copied_from", ")", "if", "not", "isinstance", "(", "copied_from", ",", "int", ")", ":", "raise", "ValueError", "(", ")", "except", "ValueError", ":", "err_msg", "=", "\"Please input a valid workitem id you want to copy from\"", "self", ".", "log", ".", "error", "(", "err_msg", ")", "raise", "exception", ".", "BadValue", "(", "err_msg", ")", "self", ".", "log", ".", "info", "(", "\"Fetch the template from <Workitem %s> with [keep]=%s\"", ",", "copied_from", ",", "keep", ")", "if", "template_folder", "is", "None", ":", "template_folder", "=", "self", ".", "searchpath", "# identify whether output to a file", "if", "template_name", "is", "not", "None", ":", "template_file_path", "=", "os", ".", "path", ".", "join", "(", "template_folder", ",", "template_name", ")", "output", "=", "open", "(", "template_file_path", ",", "\"w\"", ")", "else", ":", "template_file_path", "=", "None", "output", "=", "None", "workitem_url", "=", "\"/\"", ".", "join", "(", "[", "self", ".", "url", ",", "\"oslc/workitems/%s\"", "%", "copied_from", "]", ")", "resp", "=", "self", ".", "get", "(", "workitem_url", ",", "verify", "=", "False", ",", "proxies", "=", "self", ".", "rtc_obj", ".", "proxies", ",", "headers", "=", "self", ".", "rtc_obj", ".", "headers", ")", "raw_data", "=", "xmltodict", ".", "parse", "(", "resp", ".", "content", ")", "# pre-adjust the template:", "# remove some attribute to avoid being overwritten, which will only be", "# generated when the workitem is created", "wk_raw_data", "=", "raw_data", ".", "get", "(", "\"oslc_cm:ChangeRequest\"", ")", "self", ".", "_remove_long_fields", "(", "wk_raw_data", ")", "# Be cautious when you want to modify these fields", "# These fields have been tested as must-removed one", "remove_fields", "=", "[", "\"@rdf:about\"", ",", "\"dc:created\"", ",", "\"dc:creator\"", ",", "\"dc:identifier\"", ",", "\"rtc_cm:contextId\"", ",", "\"rtc_cm:comments\"", ",", "\"rtc_cm:state\"", ",", "\"dc:type\"", ",", "\"rtc_cm:subscribers\"", ",", "\"dc:modified\"", ",", "\"rtc_cm:modifiedBy\"", ",", "\"rtc_cm:resolved\"", ",", "\"rtc_cm:resolvedBy\"", ",", "\"rtc_cm:resolution\"", ",", "\"rtc_cm:startDate\"", ",", "\"rtc_cm:timeSpent\"", ",", "\"rtc_cm:progressTracking\"", ",", "\"rtc_cm:projectArea\"", ",", "\"oslc_cm:relatedChangeManagement\"", ",", "\"oslc_cm:trackedWorkItem\"", ",", "\"oslc_cm:tracksWorkItem\"", ",", "\"rtc_cm:timeSheet\"", ",", "\"oslc_pl:schedule\"", "]", "for", "remove_field", "in", "remove_fields", ":", "try", ":", "wk_raw_data", ".", "pop", "(", "remove_field", ")", "self", ".", "log", ".", "debug", "(", "\"Successfully remove field [%s] from the \"", "\"template originated from <Workitem %s>\"", ",", "remove_field", ",", "copied_from", ")", "except", ":", "self", ".", "log", ".", "warning", "(", "\"No field named [%s] in this template \"", "\"from <Workitem %s>\"", ",", "remove_field", ",", "copied_from", ")", "continue", "wk_raw_data", "[", "\"dc:description\"", "]", "=", "\"{{ description }}\"", "wk_raw_data", "[", "\"dc:title\"", "]", "=", "\"{{ title }}\"", "if", "keep", ":", "if", "template_file_path", ":", "self", ".", "log", ".", "info", "(", "\"Writing the template to file %s\"", ",", "template_file_path", ")", "return", "xmltodict", ".", "unparse", "(", "raw_data", ",", "output", "=", "output", ",", "encoding", "=", "encoding", ",", "pretty", "=", "True", ")", "replace_fields", "=", "[", "(", "\"rtc_cm:teamArea\"", ",", "\"{{ teamArea }}\"", ")", ",", "(", "\"rtc_cm:ownedBy\"", ",", "\"{{ ownedBy }}\"", ")", ",", "(", "\"rtc_cm:plannedFor\"", ",", "\"{{ plannedFor }}\"", ")", ",", "(", "\"rtc_cm:foundIn\"", ",", "\"{{ foundIn }}\"", ")", ",", "(", "\"oslc_cm:severity\"", ",", "\"{{ severity }}\"", ")", ",", "(", "\"oslc_cm:priority\"", ",", "\"{{ priority }}\"", ")", ",", "(", "\"rtc_cm:filedAgainst\"", ",", "\"{{ filedAgainst }}\"", ")", "]", "for", "field", "in", "replace_fields", ":", "try", ":", "wk_raw_data", "[", "field", "[", "0", "]", "]", "[", "\"@rdf:resource\"", "]", "=", "field", "[", "1", "]", "self", ".", "log", ".", "debug", "(", "\"Successfully replace field [%s] with [%s]\"", ",", "field", "[", "0", "]", ",", "field", "[", "1", "]", ")", "except", ":", "self", ".", "log", ".", "warning", "(", "\"Cannot replace field [%s]\"", ",", "field", "[", "0", "]", ")", "continue", "if", "template_file_path", ":", "self", ".", "log", ".", "info", "(", "\"Writing the template to file %s\"", ",", "template_file_path", ")", "return", "xmltodict", ".", "unparse", "(", "raw_data", ",", "output", "=", "output", ",", "encoding", "=", "encoding", ",", "pretty", "=", "True", ")" ]
Get template from some to-be-copied :class:`rtcclient.workitem.Workitem` The resulting XML document is returned as a :class:`string`, but if `template_name` (a string value) is specified, it is written there instead. :param copied_from: the to-be-copied :class:`rtcclient.workitem.Workitem` id (integer or equivalent string) :param template_name: the template file name :param template_folder: the folder to store template file :param keep: (default is False) If `True`, some of below parameters (which may not be included in some customized :class:`rtcclient.workitem.Workitem` type ) will remain unchangeable with the to-be-copied :class:`rtcclient.workitem.Workitem`. Otherwise for `False`. * teamArea (Team Area) * ownedBy (Owned By) * plannedFor(Planned For) * severity(Severity) * priority(Priority) * filedAgainst(Filed Against) :param encoding: (default is "UTF-8") coding format :return: * a :class:`string` object: if `template_name` is not specified * write the template to file `template_name`: if `template_name` is specified
[ "Get", "template", "from", "some", "to", "-", "be", "-", "copied", ":", "class", ":", "rtcclient", ".", "workitem", ".", "Workitem" ]
python
train
uw-it-aca/uw-restclients-grad
uw_grad/petition.py
https://github.com/uw-it-aca/uw-restclients-grad/blob/ca06ed2f24f3683314a5690f6078e97d37fc8e52/uw_grad/petition.py#L20-L39
def _process_json(data): """ return a list of GradPetition objects. """ requests = [] for item in data: petition = GradPetition() petition.description = item.get('description') petition.submit_date = parse_datetime(item.get('submitDate')) petition.decision_date = parse_datetime(item.get('decisionDate')) if item.get('deptRecommend') and len(item.get('deptRecommend')): petition.dept_recommend = item.get('deptRecommend').lower() if item.get('gradSchoolDecision') and\ len(item.get('gradSchoolDecision')): petition.gradschool_decision =\ item.get('gradSchoolDecision').lower() requests.append(petition) return requests
[ "def", "_process_json", "(", "data", ")", ":", "requests", "=", "[", "]", "for", "item", "in", "data", ":", "petition", "=", "GradPetition", "(", ")", "petition", ".", "description", "=", "item", ".", "get", "(", "'description'", ")", "petition", ".", "submit_date", "=", "parse_datetime", "(", "item", ".", "get", "(", "'submitDate'", ")", ")", "petition", ".", "decision_date", "=", "parse_datetime", "(", "item", ".", "get", "(", "'decisionDate'", ")", ")", "if", "item", ".", "get", "(", "'deptRecommend'", ")", "and", "len", "(", "item", ".", "get", "(", "'deptRecommend'", ")", ")", ":", "petition", ".", "dept_recommend", "=", "item", ".", "get", "(", "'deptRecommend'", ")", ".", "lower", "(", ")", "if", "item", ".", "get", "(", "'gradSchoolDecision'", ")", "and", "len", "(", "item", ".", "get", "(", "'gradSchoolDecision'", ")", ")", ":", "petition", ".", "gradschool_decision", "=", "item", ".", "get", "(", "'gradSchoolDecision'", ")", ".", "lower", "(", ")", "requests", ".", "append", "(", "petition", ")", "return", "requests" ]
return a list of GradPetition objects.
[ "return", "a", "list", "of", "GradPetition", "objects", "." ]
python
train
sk-/git-lint
gitlint/git.py
https://github.com/sk-/git-lint/blob/4f19ec88bfa1b6670ff37ccbfc53c6b67251b027/gitlint/git.py#L109-L140
def modified_lines(filename, extra_data, commit=None): """Returns the lines that have been modifed for this file. Args: filename: the file to check. extra_data: is the extra_data returned by modified_files. Additionally, a value of None means that the file was not modified. commit: the complete sha1 (40 chars) of the commit. Note that specifying this value will only work (100%) when commit == last_commit (with respect to the currently checked out revision), otherwise, we could miss some lines. Returns: a list of lines that were modified, or None in case all lines are new. """ if extra_data is None: return [] if extra_data not in ('M ', ' M', 'MM'): return None if commit is None: commit = '0' * 40 commit = commit.encode('utf-8') # Split as bytes, as the output may have some non unicode characters. blame_lines = subprocess.check_output( ['git', 'blame', '--porcelain', filename]).split( os.linesep.encode('utf-8')) modified_line_numbers = utils.filter_lines( blame_lines, commit + br' (?P<line>\d+) (\d+)', groups=('line', )) return list(map(int, modified_line_numbers))
[ "def", "modified_lines", "(", "filename", ",", "extra_data", ",", "commit", "=", "None", ")", ":", "if", "extra_data", "is", "None", ":", "return", "[", "]", "if", "extra_data", "not", "in", "(", "'M '", ",", "' M'", ",", "'MM'", ")", ":", "return", "None", "if", "commit", "is", "None", ":", "commit", "=", "'0'", "*", "40", "commit", "=", "commit", ".", "encode", "(", "'utf-8'", ")", "# Split as bytes, as the output may have some non unicode characters.", "blame_lines", "=", "subprocess", ".", "check_output", "(", "[", "'git'", ",", "'blame'", ",", "'--porcelain'", ",", "filename", "]", ")", ".", "split", "(", "os", ".", "linesep", ".", "encode", "(", "'utf-8'", ")", ")", "modified_line_numbers", "=", "utils", ".", "filter_lines", "(", "blame_lines", ",", "commit", "+", "br' (?P<line>\\d+) (\\d+)'", ",", "groups", "=", "(", "'line'", ",", ")", ")", "return", "list", "(", "map", "(", "int", ",", "modified_line_numbers", ")", ")" ]
Returns the lines that have been modifed for this file. Args: filename: the file to check. extra_data: is the extra_data returned by modified_files. Additionally, a value of None means that the file was not modified. commit: the complete sha1 (40 chars) of the commit. Note that specifying this value will only work (100%) when commit == last_commit (with respect to the currently checked out revision), otherwise, we could miss some lines. Returns: a list of lines that were modified, or None in case all lines are new.
[ "Returns", "the", "lines", "that", "have", "been", "modifed", "for", "this", "file", "." ]
python
train
thombashi/pathvalidate
pathvalidate/_file.py
https://github.com/thombashi/pathvalidate/blob/22d64038fb08c04aa9d0e8dd9fd1a955c1a9bfef/pathvalidate/_file.py#L477-L515
def validate_filepath(file_path, platform=None, min_len=1, max_len=None): """Verifying whether the ``file_path`` is a valid file path or not. Args: file_path (str): File path to validate. platform (str, optional): .. include:: platform.txt min_len (int, optional): Minimum length of the ``file_path``. The value must be greater or equal to one. Defaults to ``1``. max_len (int, optional): Maximum length of the ``file_path`` length. If the value is |None|, in the default, automatically determined by the ``platform``: - ``Linux``: 4096 - ``macOS``: 1024 - ``Windows``: 260 Raises: NullNameError: If the ``file_path`` is empty. InvalidCharError: If the ``file_path`` includes invalid char(s): |invalid_file_path_chars|. The following characters are also invalid for Windows platform: |invalid_win_file_path_chars| InvalidLengthError: If the ``file_path`` is longer than ``max_len`` characters. Example: :ref:`example-validate-file-path` See Also: `Naming Files, Paths, and Namespaces (Windows) <https://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx>`__ """ FilePathSanitizer(platform=platform, min_len=min_len, max_len=max_len).validate(file_path)
[ "def", "validate_filepath", "(", "file_path", ",", "platform", "=", "None", ",", "min_len", "=", "1", ",", "max_len", "=", "None", ")", ":", "FilePathSanitizer", "(", "platform", "=", "platform", ",", "min_len", "=", "min_len", ",", "max_len", "=", "max_len", ")", ".", "validate", "(", "file_path", ")" ]
Verifying whether the ``file_path`` is a valid file path or not. Args: file_path (str): File path to validate. platform (str, optional): .. include:: platform.txt min_len (int, optional): Minimum length of the ``file_path``. The value must be greater or equal to one. Defaults to ``1``. max_len (int, optional): Maximum length of the ``file_path`` length. If the value is |None|, in the default, automatically determined by the ``platform``: - ``Linux``: 4096 - ``macOS``: 1024 - ``Windows``: 260 Raises: NullNameError: If the ``file_path`` is empty. InvalidCharError: If the ``file_path`` includes invalid char(s): |invalid_file_path_chars|. The following characters are also invalid for Windows platform: |invalid_win_file_path_chars| InvalidLengthError: If the ``file_path`` is longer than ``max_len`` characters. Example: :ref:`example-validate-file-path` See Also: `Naming Files, Paths, and Namespaces (Windows) <https://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx>`__
[ "Verifying", "whether", "the", "file_path", "is", "a", "valid", "file", "path", "or", "not", "." ]
python
train
mobolic/facebook-sdk
facebook/__init__.py
https://github.com/mobolic/facebook-sdk/blob/65ff582e77f7ed68b6e9643a7490e5dee2a1031b/facebook/__init__.py#L218-L222
def delete_request(self, user_id, request_id): """Deletes the Request with the given ID for the given user.""" return self.request( "{0}_{1}".format(request_id, user_id), method="DELETE" )
[ "def", "delete_request", "(", "self", ",", "user_id", ",", "request_id", ")", ":", "return", "self", ".", "request", "(", "\"{0}_{1}\"", ".", "format", "(", "request_id", ",", "user_id", ")", ",", "method", "=", "\"DELETE\"", ")" ]
Deletes the Request with the given ID for the given user.
[ "Deletes", "the", "Request", "with", "the", "given", "ID", "for", "the", "given", "user", "." ]
python
train
m0n5t3r/gstats
examples/generate_traffic.py
https://github.com/m0n5t3r/gstats/blob/ae600d309ae8a159079fe1d6e6fa1c9097125f5b/examples/generate_traffic.py#L16-L24
def run(self): """ generate <nreq> requests taking a random amount of time between 0 and 0.5 seconds """ for i in xrange(self.nreq): req = '%s_%s' % (self.ident, i) pre_request(None, req) sleep(random() / 2) post_request(None, req)
[ "def", "run", "(", "self", ")", ":", "for", "i", "in", "xrange", "(", "self", ".", "nreq", ")", ":", "req", "=", "'%s_%s'", "%", "(", "self", ".", "ident", ",", "i", ")", "pre_request", "(", "None", ",", "req", ")", "sleep", "(", "random", "(", ")", "/", "2", ")", "post_request", "(", "None", ",", "req", ")" ]
generate <nreq> requests taking a random amount of time between 0 and 0.5 seconds
[ "generate", "<nreq", ">", "requests", "taking", "a", "random", "amount", "of", "time", "between", "0", "and", "0", ".", "5", "seconds" ]
python
train
DLR-RM/RAFCON
source/rafcon/gui/controllers/state_editor/semantic_data_editor.py
https://github.com/DLR-RM/RAFCON/blob/24942ef1a904531f49ab8830a1dbb604441be498/source/rafcon/gui/controllers/state_editor/semantic_data_editor.py#L170-L174
def add_action_callback(self, key_value, modifier_mask, a_dict=False): """Callback method for add action""" if react_to_event(self.view, self.tree_view, event=(key_value, modifier_mask)) and self.active_entry_widget is None: self.on_add(None, a_dict) return True
[ "def", "add_action_callback", "(", "self", ",", "key_value", ",", "modifier_mask", ",", "a_dict", "=", "False", ")", ":", "if", "react_to_event", "(", "self", ".", "view", ",", "self", ".", "tree_view", ",", "event", "=", "(", "key_value", ",", "modifier_mask", ")", ")", "and", "self", ".", "active_entry_widget", "is", "None", ":", "self", ".", "on_add", "(", "None", ",", "a_dict", ")", "return", "True" ]
Callback method for add action
[ "Callback", "method", "for", "add", "action" ]
python
train
Devoxin/Lavalink.py
lavalink/WebSocket.py
https://github.com/Devoxin/Lavalink.py/blob/63f55c3d726d24c4cfd3674d3cd6aab6f5be110d/lavalink/WebSocket.py#L36-L73
async def connect(self): """ Establishes a connection to the Lavalink server. """ await self._lavalink.bot.wait_until_ready() if self._ws and self._ws.open: log.debug('WebSocket still open, closing...') await self._ws.close() user_id = self._lavalink.bot.user.id shard_count = self._lavalink.bot.shard_count or self._shards headers = { 'Authorization': self._password, 'Num-Shards': shard_count, 'User-Id': str(user_id) } log.debug('Preparing to connect to Lavalink') log.debug(' with URI: {}'.format(self._uri)) log.debug(' with headers: {}'.format(str(headers))) log.info('Connecting to Lavalink...') try: self._ws = await websockets.connect(self._uri, loop=self._loop, extra_headers=headers) except OSError as error: log.exception('Failed to connect to Lavalink: {}'.format(str(error))) else: log.info('Connected to Lavalink!') self._loop.create_task(self.listen()) version = self._ws.response_headers.get('Lavalink-Major-Version', 2) try: self._lavalink._server_version = int(version) except ValueError: self._lavalink._server_version = 2 log.info('Lavalink server version is {}'.format(version)) if self._queue: log.info('Replaying {} queued events...'.format(len(self._queue))) for task in self._queue: await self.send(**task)
[ "async", "def", "connect", "(", "self", ")", ":", "await", "self", ".", "_lavalink", ".", "bot", ".", "wait_until_ready", "(", ")", "if", "self", ".", "_ws", "and", "self", ".", "_ws", ".", "open", ":", "log", ".", "debug", "(", "'WebSocket still open, closing...'", ")", "await", "self", ".", "_ws", ".", "close", "(", ")", "user_id", "=", "self", ".", "_lavalink", ".", "bot", ".", "user", ".", "id", "shard_count", "=", "self", ".", "_lavalink", ".", "bot", ".", "shard_count", "or", "self", ".", "_shards", "headers", "=", "{", "'Authorization'", ":", "self", ".", "_password", ",", "'Num-Shards'", ":", "shard_count", ",", "'User-Id'", ":", "str", "(", "user_id", ")", "}", "log", ".", "debug", "(", "'Preparing to connect to Lavalink'", ")", "log", ".", "debug", "(", "' with URI: {}'", ".", "format", "(", "self", ".", "_uri", ")", ")", "log", ".", "debug", "(", "' with headers: {}'", ".", "format", "(", "str", "(", "headers", ")", ")", ")", "log", ".", "info", "(", "'Connecting to Lavalink...'", ")", "try", ":", "self", ".", "_ws", "=", "await", "websockets", ".", "connect", "(", "self", ".", "_uri", ",", "loop", "=", "self", ".", "_loop", ",", "extra_headers", "=", "headers", ")", "except", "OSError", "as", "error", ":", "log", ".", "exception", "(", "'Failed to connect to Lavalink: {}'", ".", "format", "(", "str", "(", "error", ")", ")", ")", "else", ":", "log", ".", "info", "(", "'Connected to Lavalink!'", ")", "self", ".", "_loop", ".", "create_task", "(", "self", ".", "listen", "(", ")", ")", "version", "=", "self", ".", "_ws", ".", "response_headers", ".", "get", "(", "'Lavalink-Major-Version'", ",", "2", ")", "try", ":", "self", ".", "_lavalink", ".", "_server_version", "=", "int", "(", "version", ")", "except", "ValueError", ":", "self", ".", "_lavalink", ".", "_server_version", "=", "2", "log", ".", "info", "(", "'Lavalink server version is {}'", ".", "format", "(", "version", ")", ")", "if", "self", ".", "_queue", ":", "log", ".", "info", "(", "'Replaying {} queued events...'", ".", "format", "(", "len", "(", "self", ".", "_queue", ")", ")", ")", "for", "task", "in", "self", ".", "_queue", ":", "await", "self", ".", "send", "(", "*", "*", "task", ")" ]
Establishes a connection to the Lavalink server.
[ "Establishes", "a", "connection", "to", "the", "Lavalink", "server", "." ]
python
valid
tensorflow/mesh
mesh_tensorflow/ops.py
https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/ops.py#L225-L245
def tensor_dimension_to_mesh_axis(self, tensor_dimension, mesh_shape): """Mesh axis associated with tensor dimension (or None). Args: tensor_dimension: Dimension. mesh_shape: Shape. Returns: Integer or None. Raises: ValueError: If one Tensor dimension maps to two mesh dimensions. """ val = [i for i, mesh_dimension in enumerate(mesh_shape) if (tensor_dimension.name, mesh_dimension.name) in self._pairs] if len(val) > 1: raise ValueError( "Tensor dimension maps to multiple mesh dimensions" " tensor_dimension=%s mesh_shape=%s layout=%s" % (tensor_dimension, mesh_shape, self._pairs)) return val[0] if val else None
[ "def", "tensor_dimension_to_mesh_axis", "(", "self", ",", "tensor_dimension", ",", "mesh_shape", ")", ":", "val", "=", "[", "i", "for", "i", ",", "mesh_dimension", "in", "enumerate", "(", "mesh_shape", ")", "if", "(", "tensor_dimension", ".", "name", ",", "mesh_dimension", ".", "name", ")", "in", "self", ".", "_pairs", "]", "if", "len", "(", "val", ")", ">", "1", ":", "raise", "ValueError", "(", "\"Tensor dimension maps to multiple mesh dimensions\"", "\" tensor_dimension=%s mesh_shape=%s layout=%s\"", "%", "(", "tensor_dimension", ",", "mesh_shape", ",", "self", ".", "_pairs", ")", ")", "return", "val", "[", "0", "]", "if", "val", "else", "None" ]
Mesh axis associated with tensor dimension (or None). Args: tensor_dimension: Dimension. mesh_shape: Shape. Returns: Integer or None. Raises: ValueError: If one Tensor dimension maps to two mesh dimensions.
[ "Mesh", "axis", "associated", "with", "tensor", "dimension", "(", "or", "None", ")", "." ]
python
train
datastax/python-driver
cassandra/cqlengine/models.py
https://github.com/datastax/python-driver/blob/30a80d0b798b1f45f8cb77163b1fa791f3e3ca29/cassandra/cqlengine/models.py#L448-L488
def _construct_instance(cls, values): """ method used to construct instances from query results this is where polymorphic deserialization occurs """ # we're going to take the values, which is from the DB as a dict # and translate that into our local fields # the db_map is a db_field -> model field map if cls._db_map: values = dict((cls._db_map.get(k, k), v) for k, v in values.items()) if cls._is_polymorphic: disc_key = values.get(cls._discriminator_column_name) if disc_key is None: raise PolymorphicModelException('discriminator value was not found in values') poly_base = cls if cls._is_polymorphic_base else cls._polymorphic_base klass = poly_base._get_model_by_discriminator_value(disc_key) if klass is None: poly_base._discover_polymorphic_submodels() klass = poly_base._get_model_by_discriminator_value(disc_key) if klass is None: raise PolymorphicModelException( 'unrecognized discriminator column {0} for class {1}'.format(disc_key, poly_base.__name__) ) if not issubclass(klass, cls): raise PolymorphicModelException( '{0} is not a subclass of {1}'.format(klass.__name__, cls.__name__) ) values = dict((k, v) for k, v in values.items() if k in klass._columns.keys()) else: klass = cls instance = klass(**values) instance._set_persisted(force=True) return instance
[ "def", "_construct_instance", "(", "cls", ",", "values", ")", ":", "# we're going to take the values, which is from the DB as a dict", "# and translate that into our local fields", "# the db_map is a db_field -> model field map", "if", "cls", ".", "_db_map", ":", "values", "=", "dict", "(", "(", "cls", ".", "_db_map", ".", "get", "(", "k", ",", "k", ")", ",", "v", ")", "for", "k", ",", "v", "in", "values", ".", "items", "(", ")", ")", "if", "cls", ".", "_is_polymorphic", ":", "disc_key", "=", "values", ".", "get", "(", "cls", ".", "_discriminator_column_name", ")", "if", "disc_key", "is", "None", ":", "raise", "PolymorphicModelException", "(", "'discriminator value was not found in values'", ")", "poly_base", "=", "cls", "if", "cls", ".", "_is_polymorphic_base", "else", "cls", ".", "_polymorphic_base", "klass", "=", "poly_base", ".", "_get_model_by_discriminator_value", "(", "disc_key", ")", "if", "klass", "is", "None", ":", "poly_base", ".", "_discover_polymorphic_submodels", "(", ")", "klass", "=", "poly_base", ".", "_get_model_by_discriminator_value", "(", "disc_key", ")", "if", "klass", "is", "None", ":", "raise", "PolymorphicModelException", "(", "'unrecognized discriminator column {0} for class {1}'", ".", "format", "(", "disc_key", ",", "poly_base", ".", "__name__", ")", ")", "if", "not", "issubclass", "(", "klass", ",", "cls", ")", ":", "raise", "PolymorphicModelException", "(", "'{0} is not a subclass of {1}'", ".", "format", "(", "klass", ".", "__name__", ",", "cls", ".", "__name__", ")", ")", "values", "=", "dict", "(", "(", "k", ",", "v", ")", "for", "k", ",", "v", "in", "values", ".", "items", "(", ")", "if", "k", "in", "klass", ".", "_columns", ".", "keys", "(", ")", ")", "else", ":", "klass", "=", "cls", "instance", "=", "klass", "(", "*", "*", "values", ")", "instance", ".", "_set_persisted", "(", "force", "=", "True", ")", "return", "instance" ]
method used to construct instances from query results this is where polymorphic deserialization occurs
[ "method", "used", "to", "construct", "instances", "from", "query", "results", "this", "is", "where", "polymorphic", "deserialization", "occurs" ]
python
train
inspirehep/refextract
refextract/references/text.py
https://github.com/inspirehep/refextract/blob/d70e3787be3c495a3a07d1517b53f81d51c788c7/refextract/references/text.py#L91-L142
def get_reference_lines(docbody, ref_sect_start_line, ref_sect_end_line, ref_sect_title, ref_line_marker_ptn, title_marker_same_line): """After the reference section of a document has been identified, and the first and last lines of the reference section have been recorded, this function is called to take the reference lines out of the document body. The document's reference lines are returned in a list of strings whereby each string is a reference line. Before this can be done however, the reference section is passed to another function that rebuilds any broken reference lines. @param docbody: (list) of strings - the entire document body. @param ref_sect_start_line: (integer) - the index in docbody of the first reference line. @param ref_sect_end_line: (integer) - the index in docbody of the last reference line. @param ref_sect_title: (string) - the title of the reference section (e.g. "References"). @param ref_line_marker_ptn: (string) - the patern used to match the marker for each reference line (e.g., could be used to match lines with markers of the form [1], [2], etc.) @param title_marker_same_line: (integer) - a flag to indicate whether or not the reference section title was on the same line as the first reference line's marker. @return: (list) of strings. Each string is a reference line, extracted from the document. """ start_idx = ref_sect_start_line if title_marker_same_line: # Title on same line as 1st ref- take title out! title_start = docbody[start_idx].find(ref_sect_title) if title_start != -1: # Set the first line with no title docbody[start_idx] = docbody[start_idx][title_start + len(ref_sect_title):] elif ref_sect_title is not None: # Set the start of the reference section to be after the title line start_idx += 1 if ref_sect_end_line is not None: ref_lines = docbody[start_idx:ref_sect_end_line + 1] else: ref_lines = docbody[start_idx:] if ref_sect_title: ref_lines = strip_footer(ref_lines, ref_sect_title) # Now rebuild reference lines: # (Go through each raw reference line, and format them into a set # of properly ordered lines based on markers) return rebuild_reference_lines(ref_lines, ref_line_marker_ptn)
[ "def", "get_reference_lines", "(", "docbody", ",", "ref_sect_start_line", ",", "ref_sect_end_line", ",", "ref_sect_title", ",", "ref_line_marker_ptn", ",", "title_marker_same_line", ")", ":", "start_idx", "=", "ref_sect_start_line", "if", "title_marker_same_line", ":", "# Title on same line as 1st ref- take title out!", "title_start", "=", "docbody", "[", "start_idx", "]", ".", "find", "(", "ref_sect_title", ")", "if", "title_start", "!=", "-", "1", ":", "# Set the first line with no title", "docbody", "[", "start_idx", "]", "=", "docbody", "[", "start_idx", "]", "[", "title_start", "+", "len", "(", "ref_sect_title", ")", ":", "]", "elif", "ref_sect_title", "is", "not", "None", ":", "# Set the start of the reference section to be after the title line", "start_idx", "+=", "1", "if", "ref_sect_end_line", "is", "not", "None", ":", "ref_lines", "=", "docbody", "[", "start_idx", ":", "ref_sect_end_line", "+", "1", "]", "else", ":", "ref_lines", "=", "docbody", "[", "start_idx", ":", "]", "if", "ref_sect_title", ":", "ref_lines", "=", "strip_footer", "(", "ref_lines", ",", "ref_sect_title", ")", "# Now rebuild reference lines:", "# (Go through each raw reference line, and format them into a set", "# of properly ordered lines based on markers)", "return", "rebuild_reference_lines", "(", "ref_lines", ",", "ref_line_marker_ptn", ")" ]
After the reference section of a document has been identified, and the first and last lines of the reference section have been recorded, this function is called to take the reference lines out of the document body. The document's reference lines are returned in a list of strings whereby each string is a reference line. Before this can be done however, the reference section is passed to another function that rebuilds any broken reference lines. @param docbody: (list) of strings - the entire document body. @param ref_sect_start_line: (integer) - the index in docbody of the first reference line. @param ref_sect_end_line: (integer) - the index in docbody of the last reference line. @param ref_sect_title: (string) - the title of the reference section (e.g. "References"). @param ref_line_marker_ptn: (string) - the patern used to match the marker for each reference line (e.g., could be used to match lines with markers of the form [1], [2], etc.) @param title_marker_same_line: (integer) - a flag to indicate whether or not the reference section title was on the same line as the first reference line's marker. @return: (list) of strings. Each string is a reference line, extracted from the document.
[ "After", "the", "reference", "section", "of", "a", "document", "has", "been", "identified", "and", "the", "first", "and", "last", "lines", "of", "the", "reference", "section", "have", "been", "recorded", "this", "function", "is", "called", "to", "take", "the", "reference", "lines", "out", "of", "the", "document", "body", ".", "The", "document", "s", "reference", "lines", "are", "returned", "in", "a", "list", "of", "strings", "whereby", "each", "string", "is", "a", "reference", "line", ".", "Before", "this", "can", "be", "done", "however", "the", "reference", "section", "is", "passed", "to", "another", "function", "that", "rebuilds", "any", "broken", "reference", "lines", "." ]
python
train
wummel/patool
patoolib/programs/star.py
https://github.com/wummel/patool/blob/d7e64d9fd60faaa4b3f824bd97c43ce59b185c40/patoolib/programs/star.py#L26-L31
def list_tar (archive, compression, cmd, verbosity, interactive): """List a TAR archive.""" cmdlist = [cmd, '-n'] add_star_opts(cmdlist, compression, verbosity) cmdlist.append("file=%s" % archive) return cmdlist
[ "def", "list_tar", "(", "archive", ",", "compression", ",", "cmd", ",", "verbosity", ",", "interactive", ")", ":", "cmdlist", "=", "[", "cmd", ",", "'-n'", "]", "add_star_opts", "(", "cmdlist", ",", "compression", ",", "verbosity", ")", "cmdlist", ".", "append", "(", "\"file=%s\"", "%", "archive", ")", "return", "cmdlist" ]
List a TAR archive.
[ "List", "a", "TAR", "archive", "." ]
python
train
craffel/mir_eval
mir_eval/chord.py
https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/chord.py#L759-L804
def thirds_inv(reference_labels, estimated_labels): """Score chords along root, third, & bass relationships. Examples -------- >>> (ref_intervals, ... ref_labels) = mir_eval.io.load_labeled_intervals('ref.lab') >>> (est_intervals, ... est_labels) = mir_eval.io.load_labeled_intervals('est.lab') >>> est_intervals, est_labels = mir_eval.util.adjust_intervals( ... est_intervals, est_labels, ref_intervals.min(), ... ref_intervals.max(), mir_eval.chord.NO_CHORD, ... mir_eval.chord.NO_CHORD) >>> (intervals, ... ref_labels, ... est_labels) = mir_eval.util.merge_labeled_intervals( ... ref_intervals, ref_labels, est_intervals, est_labels) >>> durations = mir_eval.util.intervals_to_durations(intervals) >>> comparisons = mir_eval.chord.thirds_inv(ref_labels, est_labels) >>> score = mir_eval.chord.weighted_accuracy(comparisons, durations) Parameters ---------- reference_labels : list, len=n Reference chord labels to score against. estimated_labels : list, len=n Estimated chord labels to score against. Returns ------- scores : np.ndarray, shape=(n,), dtype=float Comparison scores, in [0.0, 1.0] """ validate(reference_labels, estimated_labels) ref_roots, ref_semitones, ref_bass = encode_many(reference_labels, False) est_roots, est_semitones, est_bass = encode_many(estimated_labels, False) eq_root = ref_roots == est_roots eq_bass = ref_bass == est_bass eq_third = ref_semitones[:, 3] == est_semitones[:, 3] comparison_scores = (eq_root * eq_third * eq_bass).astype(np.float) # Ignore 'X' chords comparison_scores[np.any(ref_semitones < 0, axis=1)] = -1.0 return comparison_scores
[ "def", "thirds_inv", "(", "reference_labels", ",", "estimated_labels", ")", ":", "validate", "(", "reference_labels", ",", "estimated_labels", ")", "ref_roots", ",", "ref_semitones", ",", "ref_bass", "=", "encode_many", "(", "reference_labels", ",", "False", ")", "est_roots", ",", "est_semitones", ",", "est_bass", "=", "encode_many", "(", "estimated_labels", ",", "False", ")", "eq_root", "=", "ref_roots", "==", "est_roots", "eq_bass", "=", "ref_bass", "==", "est_bass", "eq_third", "=", "ref_semitones", "[", ":", ",", "3", "]", "==", "est_semitones", "[", ":", ",", "3", "]", "comparison_scores", "=", "(", "eq_root", "*", "eq_third", "*", "eq_bass", ")", ".", "astype", "(", "np", ".", "float", ")", "# Ignore 'X' chords", "comparison_scores", "[", "np", ".", "any", "(", "ref_semitones", "<", "0", ",", "axis", "=", "1", ")", "]", "=", "-", "1.0", "return", "comparison_scores" ]
Score chords along root, third, & bass relationships. Examples -------- >>> (ref_intervals, ... ref_labels) = mir_eval.io.load_labeled_intervals('ref.lab') >>> (est_intervals, ... est_labels) = mir_eval.io.load_labeled_intervals('est.lab') >>> est_intervals, est_labels = mir_eval.util.adjust_intervals( ... est_intervals, est_labels, ref_intervals.min(), ... ref_intervals.max(), mir_eval.chord.NO_CHORD, ... mir_eval.chord.NO_CHORD) >>> (intervals, ... ref_labels, ... est_labels) = mir_eval.util.merge_labeled_intervals( ... ref_intervals, ref_labels, est_intervals, est_labels) >>> durations = mir_eval.util.intervals_to_durations(intervals) >>> comparisons = mir_eval.chord.thirds_inv(ref_labels, est_labels) >>> score = mir_eval.chord.weighted_accuracy(comparisons, durations) Parameters ---------- reference_labels : list, len=n Reference chord labels to score against. estimated_labels : list, len=n Estimated chord labels to score against. Returns ------- scores : np.ndarray, shape=(n,), dtype=float Comparison scores, in [0.0, 1.0]
[ "Score", "chords", "along", "root", "third", "&", "bass", "relationships", "." ]
python
train
geophysics-ubonn/reda
lib/reda/containers/sEIT.py
https://github.com/geophysics-ubonn/reda/blob/46a939729e40c7c4723315c03679c40761152e9e/lib/reda/containers/sEIT.py#L455-L493
def plot_pseudosections(self, column, filename=None, return_fig=False): """Create a multi-plot with one pseudosection for each frequency. Parameters ---------- column : string which column to plot filename : None|string output filename. If set to None, do not write to file. Default: None return_fig : bool if True, return the generated figure object. Default: False Returns ------- fig : None|matplotlib.Figure if return_fig is set to True, return the generated Figure object """ assert column in self.data.columns g = self.data.groupby('frequency') fig, axes = plt.subplots( 4, 2, figsize=(15 / 2.54, 20 / 2.54), sharex=True, sharey=True ) for ax, (key, item) in zip(axes.flat, g): fig, ax, cb = PS.plot_pseudosection_type2( item, ax=ax, column=column ) ax.set_title('f: {} Hz'.format(key)) fig.tight_layout() if filename is not None: fig.savefig(filename, dpi=300) if return_fig: return fig else: plt.close(fig)
[ "def", "plot_pseudosections", "(", "self", ",", "column", ",", "filename", "=", "None", ",", "return_fig", "=", "False", ")", ":", "assert", "column", "in", "self", ".", "data", ".", "columns", "g", "=", "self", ".", "data", ".", "groupby", "(", "'frequency'", ")", "fig", ",", "axes", "=", "plt", ".", "subplots", "(", "4", ",", "2", ",", "figsize", "=", "(", "15", "/", "2.54", ",", "20", "/", "2.54", ")", ",", "sharex", "=", "True", ",", "sharey", "=", "True", ")", "for", "ax", ",", "(", "key", ",", "item", ")", "in", "zip", "(", "axes", ".", "flat", ",", "g", ")", ":", "fig", ",", "ax", ",", "cb", "=", "PS", ".", "plot_pseudosection_type2", "(", "item", ",", "ax", "=", "ax", ",", "column", "=", "column", ")", "ax", ".", "set_title", "(", "'f: {} Hz'", ".", "format", "(", "key", ")", ")", "fig", ".", "tight_layout", "(", ")", "if", "filename", "is", "not", "None", ":", "fig", ".", "savefig", "(", "filename", ",", "dpi", "=", "300", ")", "if", "return_fig", ":", "return", "fig", "else", ":", "plt", ".", "close", "(", "fig", ")" ]
Create a multi-plot with one pseudosection for each frequency. Parameters ---------- column : string which column to plot filename : None|string output filename. If set to None, do not write to file. Default: None return_fig : bool if True, return the generated figure object. Default: False Returns ------- fig : None|matplotlib.Figure if return_fig is set to True, return the generated Figure object
[ "Create", "a", "multi", "-", "plot", "with", "one", "pseudosection", "for", "each", "frequency", "." ]
python
train
stefankoegl/kdtree
kdtree.py
https://github.com/stefankoegl/kdtree/blob/587edc7056d7735177ad56a84ad5abccdea91693/kdtree.py#L644-L677
def visualize(tree, max_level=100, node_width=10, left_padding=5): """ Prints the tree to stdout """ height = min(max_level, tree.height()-1) max_width = pow(2, height) per_level = 1 in_level = 0 level = 0 for node in level_order(tree, include_all=True): if in_level == 0: print() print() print(' '*left_padding, end=' ') width = int(max_width*node_width/per_level) node_str = (str(node.data) if node else '').center(width) print(node_str, end=' ') in_level += 1 if in_level == per_level: in_level = 0 per_level *= 2 level += 1 if level > height: break print() print()
[ "def", "visualize", "(", "tree", ",", "max_level", "=", "100", ",", "node_width", "=", "10", ",", "left_padding", "=", "5", ")", ":", "height", "=", "min", "(", "max_level", ",", "tree", ".", "height", "(", ")", "-", "1", ")", "max_width", "=", "pow", "(", "2", ",", "height", ")", "per_level", "=", "1", "in_level", "=", "0", "level", "=", "0", "for", "node", "in", "level_order", "(", "tree", ",", "include_all", "=", "True", ")", ":", "if", "in_level", "==", "0", ":", "print", "(", ")", "print", "(", ")", "print", "(", "' '", "*", "left_padding", ",", "end", "=", "' '", ")", "width", "=", "int", "(", "max_width", "*", "node_width", "/", "per_level", ")", "node_str", "=", "(", "str", "(", "node", ".", "data", ")", "if", "node", "else", "''", ")", ".", "center", "(", "width", ")", "print", "(", "node_str", ",", "end", "=", "' '", ")", "in_level", "+=", "1", "if", "in_level", "==", "per_level", ":", "in_level", "=", "0", "per_level", "*=", "2", "level", "+=", "1", "if", "level", ">", "height", ":", "break", "print", "(", ")", "print", "(", ")" ]
Prints the tree to stdout
[ "Prints", "the", "tree", "to", "stdout" ]
python
train
nicolargo/glances
glances/plugins/glances_ports.py
https://github.com/nicolargo/glances/blob/5bd4d587a736e0d2b03170b56926841d2a3eb7ee/glances/plugins/glances_ports.py#L75-L98
def update(self): """Update the ports list.""" if self.input_method == 'local': # Only refresh: # * if there is not other scanning thread # * every refresh seconds (define in the configuration file) if self._thread is None: thread_is_running = False else: thread_is_running = self._thread.isAlive() if self.timer_ports.finished() and not thread_is_running: # Run ports scanner self._thread = ThreadScanner(self.stats) self._thread.start() # Restart timer if len(self.stats) > 0: self.timer_ports = Timer(self.stats[0]['refresh']) else: self.timer_ports = Timer(0) else: # Not available in SNMP mode pass return self.stats
[ "def", "update", "(", "self", ")", ":", "if", "self", ".", "input_method", "==", "'local'", ":", "# Only refresh:", "# * if there is not other scanning thread", "# * every refresh seconds (define in the configuration file)", "if", "self", ".", "_thread", "is", "None", ":", "thread_is_running", "=", "False", "else", ":", "thread_is_running", "=", "self", ".", "_thread", ".", "isAlive", "(", ")", "if", "self", ".", "timer_ports", ".", "finished", "(", ")", "and", "not", "thread_is_running", ":", "# Run ports scanner", "self", ".", "_thread", "=", "ThreadScanner", "(", "self", ".", "stats", ")", "self", ".", "_thread", ".", "start", "(", ")", "# Restart timer", "if", "len", "(", "self", ".", "stats", ")", ">", "0", ":", "self", ".", "timer_ports", "=", "Timer", "(", "self", ".", "stats", "[", "0", "]", "[", "'refresh'", "]", ")", "else", ":", "self", ".", "timer_ports", "=", "Timer", "(", "0", ")", "else", ":", "# Not available in SNMP mode", "pass", "return", "self", ".", "stats" ]
Update the ports list.
[ "Update", "the", "ports", "list", "." ]
python
train
data-8/datascience
datascience/util.py
https://github.com/data-8/datascience/blob/4cee38266903ca169cea4a53b8cc39502d85c464/datascience/util.py#L203-L244
def minimize(f, start=None, smooth=False, log=None, array=False, **vargs): """Minimize a function f of one or more arguments. Args: f: A function that takes numbers and returns a number start: A starting value or list of starting values smooth: Whether to assume that f is smooth and use first-order info log: Logging function called on the result of optimization (e.g. print) vargs: Other named arguments passed to scipy.optimize.minimize Returns either: (a) the minimizing argument of a one-argument function (b) an array of minimizing arguments of a multi-argument function """ if start is None: assert not array, "Please pass starting values explicitly when array=True" arg_count = f.__code__.co_argcount assert arg_count > 0, "Please pass starting values explicitly for variadic functions" start = [0] * arg_count if not hasattr(start, '__len__'): start = [start] if array: objective = f else: @functools.wraps(f) def objective(args): return f(*args) if not smooth and 'method' not in vargs: vargs['method'] = 'Powell' result = optimize.minimize(objective, start, **vargs) if log is not None: log(result) if len(start) == 1: return result.x.item(0) else: return result.x
[ "def", "minimize", "(", "f", ",", "start", "=", "None", ",", "smooth", "=", "False", ",", "log", "=", "None", ",", "array", "=", "False", ",", "*", "*", "vargs", ")", ":", "if", "start", "is", "None", ":", "assert", "not", "array", ",", "\"Please pass starting values explicitly when array=True\"", "arg_count", "=", "f", ".", "__code__", ".", "co_argcount", "assert", "arg_count", ">", "0", ",", "\"Please pass starting values explicitly for variadic functions\"", "start", "=", "[", "0", "]", "*", "arg_count", "if", "not", "hasattr", "(", "start", ",", "'__len__'", ")", ":", "start", "=", "[", "start", "]", "if", "array", ":", "objective", "=", "f", "else", ":", "@", "functools", ".", "wraps", "(", "f", ")", "def", "objective", "(", "args", ")", ":", "return", "f", "(", "*", "args", ")", "if", "not", "smooth", "and", "'method'", "not", "in", "vargs", ":", "vargs", "[", "'method'", "]", "=", "'Powell'", "result", "=", "optimize", ".", "minimize", "(", "objective", ",", "start", ",", "*", "*", "vargs", ")", "if", "log", "is", "not", "None", ":", "log", "(", "result", ")", "if", "len", "(", "start", ")", "==", "1", ":", "return", "result", ".", "x", ".", "item", "(", "0", ")", "else", ":", "return", "result", ".", "x" ]
Minimize a function f of one or more arguments. Args: f: A function that takes numbers and returns a number start: A starting value or list of starting values smooth: Whether to assume that f is smooth and use first-order info log: Logging function called on the result of optimization (e.g. print) vargs: Other named arguments passed to scipy.optimize.minimize Returns either: (a) the minimizing argument of a one-argument function (b) an array of minimizing arguments of a multi-argument function
[ "Minimize", "a", "function", "f", "of", "one", "or", "more", "arguments", "." ]
python
train
laysakura/relshell
relshell/daemon_shelloperator.py
https://github.com/laysakura/relshell/blob/9ca5c03a34c11cb763a4a75595f18bf4383aa8cc/relshell/daemon_shelloperator.py#L82-L118
def run(self, in_batches): """Run shell operator synchronously to eat `in_batches` :param in_batches: `tuple` of batches to process """ if len(in_batches) != len(self._batcmd.batch_to_file_s): BaseShellOperator._rm_process_input_tmpfiles(self._batcmd.batch_to_file_s) # [todo] - Removing tmpfiles can be easily forgot. Less lifetime for tmpfile. raise AttributeError('len(in_batches) == %d, while %d IN_BATCH* are specified in command below:%s$ %s' % (len(in_batches), len(self._batcmd.batch_to_file_s), os.linesep, self._batcmd.sh_cmd)) # prepare & start process (if necessary) BaseShellOperator._batches_to_tmpfile(self._in_record_sep, self._in_column_sep, in_batches, self._batcmd.batch_to_file_s) if self._process is None: self._process = BaseShellOperator._start_process( self._batcmd, self._cwd, self._env, non_blocking_stdout=True) # Begin thread to read from subprocess's stdout. # Without this thread, subprocess's output buffer becomes full and no one solves it. t_consumer = Thread(target=get_subprocess_output, args=(self._process.stdout, self._batch_done_output, self._subprocess_out_str)) t_consumer.start() # pass batch to subprocess BaseShellOperator._batch_to_stdin(self._process, self._in_record_sep, self._in_column_sep, in_batches, self._batcmd.batch_to_file_s) # pass batch-done indicator to subprocess self._process.stdin.write(self._batch_done_indicator) # get output from subprocess t_consumer.join() subprocess_out_str = self._subprocess_out_str[0] self._subprocess_out_str = [] out_batch = BaseShellOperator._out_str_to_batch(subprocess_out_str, self._out_recdef, self._out_col_patterns) return out_batch
[ "def", "run", "(", "self", ",", "in_batches", ")", ":", "if", "len", "(", "in_batches", ")", "!=", "len", "(", "self", ".", "_batcmd", ".", "batch_to_file_s", ")", ":", "BaseShellOperator", ".", "_rm_process_input_tmpfiles", "(", "self", ".", "_batcmd", ".", "batch_to_file_s", ")", "# [todo] - Removing tmpfiles can be easily forgot. Less lifetime for tmpfile.", "raise", "AttributeError", "(", "'len(in_batches) == %d, while %d IN_BATCH* are specified in command below:%s$ %s'", "%", "(", "len", "(", "in_batches", ")", ",", "len", "(", "self", ".", "_batcmd", ".", "batch_to_file_s", ")", ",", "os", ".", "linesep", ",", "self", ".", "_batcmd", ".", "sh_cmd", ")", ")", "# prepare & start process (if necessary)", "BaseShellOperator", ".", "_batches_to_tmpfile", "(", "self", ".", "_in_record_sep", ",", "self", ".", "_in_column_sep", ",", "in_batches", ",", "self", ".", "_batcmd", ".", "batch_to_file_s", ")", "if", "self", ".", "_process", "is", "None", ":", "self", ".", "_process", "=", "BaseShellOperator", ".", "_start_process", "(", "self", ".", "_batcmd", ",", "self", ".", "_cwd", ",", "self", ".", "_env", ",", "non_blocking_stdout", "=", "True", ")", "# Begin thread to read from subprocess's stdout.", "# Without this thread, subprocess's output buffer becomes full and no one solves it.", "t_consumer", "=", "Thread", "(", "target", "=", "get_subprocess_output", ",", "args", "=", "(", "self", ".", "_process", ".", "stdout", ",", "self", ".", "_batch_done_output", ",", "self", ".", "_subprocess_out_str", ")", ")", "t_consumer", ".", "start", "(", ")", "# pass batch to subprocess", "BaseShellOperator", ".", "_batch_to_stdin", "(", "self", ".", "_process", ",", "self", ".", "_in_record_sep", ",", "self", ".", "_in_column_sep", ",", "in_batches", ",", "self", ".", "_batcmd", ".", "batch_to_file_s", ")", "# pass batch-done indicator to subprocess", "self", ".", "_process", ".", "stdin", ".", "write", "(", "self", ".", "_batch_done_indicator", ")", "# get output from subprocess", "t_consumer", ".", "join", "(", ")", "subprocess_out_str", "=", "self", ".", "_subprocess_out_str", "[", "0", "]", "self", ".", "_subprocess_out_str", "=", "[", "]", "out_batch", "=", "BaseShellOperator", ".", "_out_str_to_batch", "(", "subprocess_out_str", ",", "self", ".", "_out_recdef", ",", "self", ".", "_out_col_patterns", ")", "return", "out_batch" ]
Run shell operator synchronously to eat `in_batches` :param in_batches: `tuple` of batches to process
[ "Run", "shell", "operator", "synchronously", "to", "eat", "in_batches" ]
python
train
DLR-RM/RAFCON
source/rafcon/gui/controllers/state_machine_tree.py
https://github.com/DLR-RM/RAFCON/blob/24942ef1a904531f49ab8830a1dbb604441be498/source/rafcon/gui/controllers/state_machine_tree.py#L509-L528
def _handle_double_click(self, event): """ Double click with left mouse button focuses the state and toggles the collapse status""" if event.get_button()[1] == 1: # Left mouse button path_info = self.tree_view.get_path_at_pos(int(event.x), int(event.y)) if path_info: # Valid entry was clicked on path = path_info[0] iter = self.tree_store.get_iter(path) state_model = self.tree_store.get_value(iter, self.MODEL_STORAGE_ID) # Set focus to StateModel selection = self._selected_sm_model.selection selection.focus = state_model # Toggle collapse status if applicable for this kind of state if self.view.row_expanded(path): self.view.collapse_row(path) else: if isinstance(state_model, ContainerStateModel) or \ isinstance(state_model, LibraryStateModel) and self.show_content(state_model): self.view.expand_to_path(path)
[ "def", "_handle_double_click", "(", "self", ",", "event", ")", ":", "if", "event", ".", "get_button", "(", ")", "[", "1", "]", "==", "1", ":", "# Left mouse button", "path_info", "=", "self", ".", "tree_view", ".", "get_path_at_pos", "(", "int", "(", "event", ".", "x", ")", ",", "int", "(", "event", ".", "y", ")", ")", "if", "path_info", ":", "# Valid entry was clicked on", "path", "=", "path_info", "[", "0", "]", "iter", "=", "self", ".", "tree_store", ".", "get_iter", "(", "path", ")", "state_model", "=", "self", ".", "tree_store", ".", "get_value", "(", "iter", ",", "self", ".", "MODEL_STORAGE_ID", ")", "# Set focus to StateModel", "selection", "=", "self", ".", "_selected_sm_model", ".", "selection", "selection", ".", "focus", "=", "state_model", "# Toggle collapse status if applicable for this kind of state", "if", "self", ".", "view", ".", "row_expanded", "(", "path", ")", ":", "self", ".", "view", ".", "collapse_row", "(", "path", ")", "else", ":", "if", "isinstance", "(", "state_model", ",", "ContainerStateModel", ")", "or", "isinstance", "(", "state_model", ",", "LibraryStateModel", ")", "and", "self", ".", "show_content", "(", "state_model", ")", ":", "self", ".", "view", ".", "expand_to_path", "(", "path", ")" ]
Double click with left mouse button focuses the state and toggles the collapse status
[ "Double", "click", "with", "left", "mouse", "button", "focuses", "the", "state", "and", "toggles", "the", "collapse", "status" ]
python
train
scour-project/scour
scour/scour.py
https://github.com/scour-project/scour/blob/049264eba6b1a54ae5ba1d6a5077d8e7b80e8835/scour/scour.py#L831-L854
def unprotected_ids(doc, options): u"""Returns a list of unprotected IDs within the document doc.""" identifiedElements = findElementsWithId(doc.documentElement) if not (options.protect_ids_noninkscape or options.protect_ids_list or options.protect_ids_prefix): return identifiedElements if options.protect_ids_list: protect_ids_list = options.protect_ids_list.split(",") if options.protect_ids_prefix: protect_ids_prefixes = options.protect_ids_prefix.split(",") for id in list(identifiedElements): protected = False if options.protect_ids_noninkscape and not id[-1].isdigit(): protected = True if options.protect_ids_list and id in protect_ids_list: protected = True if options.protect_ids_prefix: for prefix in protect_ids_prefixes: if id.startswith(prefix): protected = True if protected: del identifiedElements[id] return identifiedElements
[ "def", "unprotected_ids", "(", "doc", ",", "options", ")", ":", "identifiedElements", "=", "findElementsWithId", "(", "doc", ".", "documentElement", ")", "if", "not", "(", "options", ".", "protect_ids_noninkscape", "or", "options", ".", "protect_ids_list", "or", "options", ".", "protect_ids_prefix", ")", ":", "return", "identifiedElements", "if", "options", ".", "protect_ids_list", ":", "protect_ids_list", "=", "options", ".", "protect_ids_list", ".", "split", "(", "\",\"", ")", "if", "options", ".", "protect_ids_prefix", ":", "protect_ids_prefixes", "=", "options", ".", "protect_ids_prefix", ".", "split", "(", "\",\"", ")", "for", "id", "in", "list", "(", "identifiedElements", ")", ":", "protected", "=", "False", "if", "options", ".", "protect_ids_noninkscape", "and", "not", "id", "[", "-", "1", "]", ".", "isdigit", "(", ")", ":", "protected", "=", "True", "if", "options", ".", "protect_ids_list", "and", "id", "in", "protect_ids_list", ":", "protected", "=", "True", "if", "options", ".", "protect_ids_prefix", ":", "for", "prefix", "in", "protect_ids_prefixes", ":", "if", "id", ".", "startswith", "(", "prefix", ")", ":", "protected", "=", "True", "if", "protected", ":", "del", "identifiedElements", "[", "id", "]", "return", "identifiedElements" ]
u"""Returns a list of unprotected IDs within the document doc.
[ "u", "Returns", "a", "list", "of", "unprotected", "IDs", "within", "the", "document", "doc", "." ]
python
train
dls-controls/annotypes
annotypes/_serializable.py
https://github.com/dls-controls/annotypes/blob/31ab68a0367bb70ebd9898e8b9fa9405423465bd/annotypes/_serializable.py#L125-L138
def to_dict(self, dict_cls=FrozenOrderedDict): # type: (Type[dict]) -> Dict[str, Any] """Create a dictionary representation of object attributes Returns: OrderedDict serialised version of self """ pairs = tuple((k, serialize_object(getattr(self, k), dict_cls)) for k in self.call_types) if self.typeid: d = dict_cls((("typeid", self.typeid),) + pairs) else: d = dict_cls(pairs) return d
[ "def", "to_dict", "(", "self", ",", "dict_cls", "=", "FrozenOrderedDict", ")", ":", "# type: (Type[dict]) -> Dict[str, Any]", "pairs", "=", "tuple", "(", "(", "k", ",", "serialize_object", "(", "getattr", "(", "self", ",", "k", ")", ",", "dict_cls", ")", ")", "for", "k", "in", "self", ".", "call_types", ")", "if", "self", ".", "typeid", ":", "d", "=", "dict_cls", "(", "(", "(", "\"typeid\"", ",", "self", ".", "typeid", ")", ",", ")", "+", "pairs", ")", "else", ":", "d", "=", "dict_cls", "(", "pairs", ")", "return", "d" ]
Create a dictionary representation of object attributes Returns: OrderedDict serialised version of self
[ "Create", "a", "dictionary", "representation", "of", "object", "attributes" ]
python
train
matthiask/django-cte-forest
cte_forest/models.py
https://github.com/matthiask/django-cte-forest/blob/7bff29d69eddfcf214e9cf61647c91d28655619c/cte_forest/models.py#L880-L910
def prepare_delete_monarchy(self, node, position=None, save=True): """ Prepares a given :class:`CTENode` `node` for deletion, by executing the :const:`DELETE_METHOD_MONARCHY` semantics. Descendant nodes, if present, will be moved; in this case the optional `position` can be a ``callable`` which is invoked prior to each move operation (see :meth:`move` for details). By default, after each move operation, sub-tree nodes which were moved will be saved through a call to :meth:`Model.save` unless `save` is ``False``. This method delegates move operations to :meth:`move`. :param node: the :class:`CTENode` to prepare for deletion. :param position: optionally, a ``callable`` to invoke prior to each move operation. :param save: flag indicating whether to save after each move operation, ``True`` by default. """ # We are going to iterate all children, even though the first child is # treated in a special way, because the query iterator may be custom, so # we will avoid using slicing children[0] and children[1:]. first = None for child in node.children.all(): if first is None: first = child first.move(node.parent, position, save) else: child.move(first, position, save)
[ "def", "prepare_delete_monarchy", "(", "self", ",", "node", ",", "position", "=", "None", ",", "save", "=", "True", ")", ":", "# We are going to iterate all children, even though the first child is", "# treated in a special way, because the query iterator may be custom, so", "# we will avoid using slicing children[0] and children[1:].", "first", "=", "None", "for", "child", "in", "node", ".", "children", ".", "all", "(", ")", ":", "if", "first", "is", "None", ":", "first", "=", "child", "first", ".", "move", "(", "node", ".", "parent", ",", "position", ",", "save", ")", "else", ":", "child", ".", "move", "(", "first", ",", "position", ",", "save", ")" ]
Prepares a given :class:`CTENode` `node` for deletion, by executing the :const:`DELETE_METHOD_MONARCHY` semantics. Descendant nodes, if present, will be moved; in this case the optional `position` can be a ``callable`` which is invoked prior to each move operation (see :meth:`move` for details). By default, after each move operation, sub-tree nodes which were moved will be saved through a call to :meth:`Model.save` unless `save` is ``False``. This method delegates move operations to :meth:`move`. :param node: the :class:`CTENode` to prepare for deletion. :param position: optionally, a ``callable`` to invoke prior to each move operation. :param save: flag indicating whether to save after each move operation, ``True`` by default.
[ "Prepares", "a", "given", ":", "class", ":", "CTENode", "node", "for", "deletion", "by", "executing", "the", ":", "const", ":", "DELETE_METHOD_MONARCHY", "semantics", ".", "Descendant", "nodes", "if", "present", "will", "be", "moved", ";", "in", "this", "case", "the", "optional", "position", "can", "be", "a", "callable", "which", "is", "invoked", "prior", "to", "each", "move", "operation", "(", "see", ":", "meth", ":", "move", "for", "details", ")", "." ]
python
train
adafruit/Adafruit_CircuitPython_framebuf
adafruit_framebuf.py
https://github.com/adafruit/Adafruit_CircuitPython_framebuf/blob/b9f62c4b71efa963150f9c5a0284b61c7add9d02/adafruit_framebuf.py#L249-L275
def line(self, x_0, y_0, x_1, y_1, color): # pylint: disable=too-many-arguments """Bresenham's line algorithm""" d_x = abs(x_1 - x_0) d_y = abs(y_1 - y_0) x, y = x_0, y_0 s_x = -1 if x_0 > x_1 else 1 s_y = -1 if y_0 > y_1 else 1 if d_x > d_y: err = d_x / 2.0 while x != x_1: self.pixel(x, y, color) err -= d_y if err < 0: y += s_y err += d_x x += s_x else: err = d_y / 2.0 while y != y_1: self.pixel(x, y, color) err -= d_x if err < 0: x += s_x err += d_y y += s_y self.pixel(x, y, color)
[ "def", "line", "(", "self", ",", "x_0", ",", "y_0", ",", "x_1", ",", "y_1", ",", "color", ")", ":", "# pylint: disable=too-many-arguments", "d_x", "=", "abs", "(", "x_1", "-", "x_0", ")", "d_y", "=", "abs", "(", "y_1", "-", "y_0", ")", "x", ",", "y", "=", "x_0", ",", "y_0", "s_x", "=", "-", "1", "if", "x_0", ">", "x_1", "else", "1", "s_y", "=", "-", "1", "if", "y_0", ">", "y_1", "else", "1", "if", "d_x", ">", "d_y", ":", "err", "=", "d_x", "/", "2.0", "while", "x", "!=", "x_1", ":", "self", ".", "pixel", "(", "x", ",", "y", ",", "color", ")", "err", "-=", "d_y", "if", "err", "<", "0", ":", "y", "+=", "s_y", "err", "+=", "d_x", "x", "+=", "s_x", "else", ":", "err", "=", "d_y", "/", "2.0", "while", "y", "!=", "y_1", ":", "self", ".", "pixel", "(", "x", ",", "y", ",", "color", ")", "err", "-=", "d_x", "if", "err", "<", "0", ":", "x", "+=", "s_x", "err", "+=", "d_y", "y", "+=", "s_y", "self", ".", "pixel", "(", "x", ",", "y", ",", "color", ")" ]
Bresenham's line algorithm
[ "Bresenham", "s", "line", "algorithm" ]
python
train
openstack/proliantutils
proliantutils/redfish/resources/system/storage/array_controller.py
https://github.com/openstack/proliantutils/blob/86ef3b47b4eca97c221577e3570b0240d6a25f22/proliantutils/redfish/resources/system/storage/array_controller.py#L151-L158
def array_controller_by_model(self, model): """Returns array controller instance by model :returns Instance of array controller """ for member in self.get_members(): if member.model == model: return member
[ "def", "array_controller_by_model", "(", "self", ",", "model", ")", ":", "for", "member", "in", "self", ".", "get_members", "(", ")", ":", "if", "member", ".", "model", "==", "model", ":", "return", "member" ]
Returns array controller instance by model :returns Instance of array controller
[ "Returns", "array", "controller", "instance", "by", "model" ]
python
train
vmware/pyvmomi
pyVmomi/VmomiSupport.py
https://github.com/vmware/pyvmomi/blob/3ffcb23bf77d757175c0d5216ba9a25345d824cd/pyVmomi/VmomiSupport.py#L379-L395
def explode(self, obj): """ Determine if the object should be exploded. """ if obj in self._done: return False result = False for item in self._explode: if hasattr(item, '_moId'): # If it has a _moId it is an instance if obj._moId == item._moId: result = True else: # If it does not have a _moId it is a template if obj.__class__.__name__ == item.__name__: result = True if result: self._done.add(obj) return result
[ "def", "explode", "(", "self", ",", "obj", ")", ":", "if", "obj", "in", "self", ".", "_done", ":", "return", "False", "result", "=", "False", "for", "item", "in", "self", ".", "_explode", ":", "if", "hasattr", "(", "item", ",", "'_moId'", ")", ":", "# If it has a _moId it is an instance", "if", "obj", ".", "_moId", "==", "item", ".", "_moId", ":", "result", "=", "True", "else", ":", "# If it does not have a _moId it is a template", "if", "obj", ".", "__class__", ".", "__name__", "==", "item", ".", "__name__", ":", "result", "=", "True", "if", "result", ":", "self", ".", "_done", ".", "add", "(", "obj", ")", "return", "result" ]
Determine if the object should be exploded.
[ "Determine", "if", "the", "object", "should", "be", "exploded", "." ]
python
train
quantumlib/Cirq
dev_tools/incremental_coverage.py
https://github.com/quantumlib/Cirq/blob/0827da80dd7880e5b923eb69407e980ed9bc0bd2/dev_tools/incremental_coverage.py#L115-L162
def get_incremental_uncovered_lines(abs_path: str, base_commit: str, actual_commit: Optional[str] ) -> List[Tuple[int, str, str]]: """ Uses git diff and the annotation files created by `pytest --cov-report annotate` to find touched but uncovered lines in the given file. Args: abs_path: The path of a file to look for uncovered lines in. base_commit: Old state to diff against. actual_commit: Current state. Use None to use local uncommitted files. Returns: A list of the indices, content, and reason-for-including of 'interesting' uncovered lines. An interesting uncovered line is one involved with the diff. """ # Deleted files don't have any lines that need to be covered. if not os.path.isfile(abs_path): return [] unified_diff_lines_str = shell_tools.output_of( 'git', 'diff', '--unified=0', base_commit, actual_commit, '--', abs_path) unified_diff_lines = [e for e in unified_diff_lines_str.split('\n') if e.strip()] touched_lines = diff_to_new_interesting_lines(unified_diff_lines) with open(abs_path, 'r') as actual_file: ignored_lines = determine_ignored_lines(actual_file.read()) cover_path = abs_path + ',cover' has_cover_file = os.path.isfile(cover_path) content_file = cover_path if has_cover_file else abs_path with open(content_file, 'r') as annotated_coverage_file: return [(i, fix_line_from_coverage_file(line), touched_lines[i]) for i, line in enumerate(annotated_coverage_file, start=1) if i in touched_lines and i not in ignored_lines if line_counts_as_uncovered(line, has_cover_file)]
[ "def", "get_incremental_uncovered_lines", "(", "abs_path", ":", "str", ",", "base_commit", ":", "str", ",", "actual_commit", ":", "Optional", "[", "str", "]", ")", "->", "List", "[", "Tuple", "[", "int", ",", "str", ",", "str", "]", "]", ":", "# Deleted files don't have any lines that need to be covered.", "if", "not", "os", ".", "path", ".", "isfile", "(", "abs_path", ")", ":", "return", "[", "]", "unified_diff_lines_str", "=", "shell_tools", ".", "output_of", "(", "'git'", ",", "'diff'", ",", "'--unified=0'", ",", "base_commit", ",", "actual_commit", ",", "'--'", ",", "abs_path", ")", "unified_diff_lines", "=", "[", "e", "for", "e", "in", "unified_diff_lines_str", ".", "split", "(", "'\\n'", ")", "if", "e", ".", "strip", "(", ")", "]", "touched_lines", "=", "diff_to_new_interesting_lines", "(", "unified_diff_lines", ")", "with", "open", "(", "abs_path", ",", "'r'", ")", "as", "actual_file", ":", "ignored_lines", "=", "determine_ignored_lines", "(", "actual_file", ".", "read", "(", ")", ")", "cover_path", "=", "abs_path", "+", "',cover'", "has_cover_file", "=", "os", ".", "path", ".", "isfile", "(", "cover_path", ")", "content_file", "=", "cover_path", "if", "has_cover_file", "else", "abs_path", "with", "open", "(", "content_file", ",", "'r'", ")", "as", "annotated_coverage_file", ":", "return", "[", "(", "i", ",", "fix_line_from_coverage_file", "(", "line", ")", ",", "touched_lines", "[", "i", "]", ")", "for", "i", ",", "line", "in", "enumerate", "(", "annotated_coverage_file", ",", "start", "=", "1", ")", "if", "i", "in", "touched_lines", "and", "i", "not", "in", "ignored_lines", "if", "line_counts_as_uncovered", "(", "line", ",", "has_cover_file", ")", "]" ]
Uses git diff and the annotation files created by `pytest --cov-report annotate` to find touched but uncovered lines in the given file. Args: abs_path: The path of a file to look for uncovered lines in. base_commit: Old state to diff against. actual_commit: Current state. Use None to use local uncommitted files. Returns: A list of the indices, content, and reason-for-including of 'interesting' uncovered lines. An interesting uncovered line is one involved with the diff.
[ "Uses", "git", "diff", "and", "the", "annotation", "files", "created", "by", "pytest", "--", "cov", "-", "report", "annotate", "to", "find", "touched", "but", "uncovered", "lines", "in", "the", "given", "file", "." ]
python
train
nerandell/cauldron
cauldron/redis_cache.py
https://github.com/nerandell/cauldron/blob/d363bac763781bb2da18debfa0fdd4be28288b92/cauldron/redis_cache.py#L9-L16
def connect(self, host, port, minsize=5, maxsize=10, loop=asyncio.get_event_loop()): """ Setup a connection pool :param host: Redis host :param port: Redis port :param loop: Event loop """ self._pool = yield from aioredis.create_pool((host, port), minsize=minsize, maxsize=maxsize, loop=loop)
[ "def", "connect", "(", "self", ",", "host", ",", "port", ",", "minsize", "=", "5", ",", "maxsize", "=", "10", ",", "loop", "=", "asyncio", ".", "get_event_loop", "(", ")", ")", ":", "self", ".", "_pool", "=", "yield", "from", "aioredis", ".", "create_pool", "(", "(", "host", ",", "port", ")", ",", "minsize", "=", "minsize", ",", "maxsize", "=", "maxsize", ",", "loop", "=", "loop", ")" ]
Setup a connection pool :param host: Redis host :param port: Redis port :param loop: Event loop
[ "Setup", "a", "connection", "pool", ":", "param", "host", ":", "Redis", "host", ":", "param", "port", ":", "Redis", "port", ":", "param", "loop", ":", "Event", "loop" ]
python
valid
pyamg/pyamg
pyamg/vis/vis_coarse.py
https://github.com/pyamg/pyamg/blob/89dc54aa27e278f65d2f54bdaf16ab97d7768fa6/pyamg/vis/vis_coarse.py#L20-L147
def vis_aggregate_groups(Verts, E2V, Agg, mesh_type, output='vtk', fname='output.vtu'): """Coarse grid visualization of aggregate groups. Create .vtu files for use in Paraview or display with Matplotlib. Parameters ---------- Verts : {array} coordinate array (N x D) E2V : {array} element index array (Nel x Nelnodes) Agg : {csr_matrix} sparse matrix for the aggregate-vertex relationship (N x Nagg) mesh_type : {string} type of elements: vertex, tri, quad, tet, hex (all 3d) fname : {string, file object} file to be written, e.g. 'output.vtu' output : {string} 'vtk' or 'matplotlib' Returns ------- - Writes data to .vtu file for use in paraview (xml 0.1 format) or displays to screen using matplotlib Notes ----- - Works for both 2d and 3d elements. Element groupings are colored with data equal to 2.0 and stringy edges in the aggregate are colored with 3.0 Examples -------- >>> from pyamg.aggregation import standard_aggregation >>> from pyamg.vis.vis_coarse import vis_aggregate_groups >>> from pyamg.gallery import load_example >>> data = load_example('unit_square') >>> A = data['A'].tocsr() >>> V = data['vertices'] >>> E2V = data['elements'] >>> Agg = standard_aggregation(A)[0] >>> vis_aggregate_groups(Verts=V, E2V=E2V, Agg=Agg, mesh_type='tri', output='vtk', fname='output.vtu') >>> from pyamg.aggregation import standard_aggregation >>> from pyamg.vis.vis_coarse import vis_aggregate_groups >>> from pyamg.gallery import load_example >>> data = load_example('unit_cube') >>> A = data['A'].tocsr() >>> V = data['vertices'] >>> E2V = data['elements'] >>> Agg = standard_aggregation(A)[0] >>> vis_aggregate_groups(Verts=V, E2V=E2V, Agg=Agg, mesh_type='tet', output='vtk', fname='output.vtu') """ check_input(Verts=Verts, E2V=E2V, Agg=Agg, mesh_type=mesh_type) map_type_to_key = {'tri': 5, 'quad': 9, 'tet': 10, 'hex': 12} if mesh_type not in map_type_to_key: raise ValueError('unknown mesh_type=%s' % mesh_type) key = map_type_to_key[mesh_type] Agg = csr_matrix(Agg) # remove elements with dirichlet BCs if E2V.max() >= Agg.shape[0]: E2V = E2V[E2V.max(axis=1) < Agg.shape[0]] # 1 # # Find elements with all vertices in same aggregate # account for 0 rows. Mark them as solitary aggregates # TODO: (Luke) full_aggs is not defined, I think its just a mask # indicated with rows are not 0. if len(Agg.indices) != Agg.shape[0]: full_aggs = ((Agg.indptr[1:] - Agg.indptr[:-1]) == 0).nonzero()[0] new_aggs = np.array(Agg.sum(axis=1), dtype=int).ravel() new_aggs[full_aggs == 1] = Agg.indices # keep existing aggregate IDs new_aggs[full_aggs == 0] = Agg.shape[1] # fill in singletons maxID+1 ElementAggs = new_aggs[E2V] else: ElementAggs = Agg.indices[E2V] # 2 # # find all aggregates encompassing full elements # mask[i] == True if all vertices in element i belong to the same aggregate mask = np.where(abs(np.diff(ElementAggs)).max(axis=1) == 0)[0] # mask = (ElementAggs[:,:] == ElementAggs[:,0]).all(axis=1) E2V_a = E2V[mask, :] # elements where element is full Nel_a = E2V_a.shape[0] # 3 # # find edges of elements in the same aggregate (brute force) # construct vertex to vertex graph col = E2V.ravel() row = np.kron(np.arange(0, E2V.shape[0]), np.ones((E2V.shape[1],), dtype=int)) data = np.ones((len(col),)) if len(row) != len(col): raise ValueError('Problem constructing vertex-to-vertex map') V2V = coo_matrix((data, (row, col)), shape=(E2V.shape[0], E2V.max()+1)) V2V = V2V.T * V2V V2V = triu(V2V, 1).tocoo() # get all the edges edges = np.vstack((V2V.row, V2V.col)).T # all the edges in the same aggregate E2V_b = edges[Agg.indices[V2V.row] == Agg.indices[V2V.col]] Nel_b = E2V_b.shape[0] # 3.5 # # single node aggregates sums = np.array(Agg.sum(axis=0)).ravel() E2V_c = np.where(sums == 1)[0] Nel_c = len(E2V_c) # 4 # # now write out the elements and edges colors_a = 3*np.ones((Nel_a,)) # color triangles with threes colors_b = 2*np.ones((Nel_b,)) # color edges with twos colors_c = 1*np.ones((Nel_c,)) # color the vertices with ones Cells = {1: E2V_c, 3: E2V_b, key: E2V_a} cdata = {1: colors_c, 3: colors_b, key: colors_a} # make sure it's a tuple write_vtu(Verts=Verts, Cells=Cells, fname=fname, cdata=cdata)
[ "def", "vis_aggregate_groups", "(", "Verts", ",", "E2V", ",", "Agg", ",", "mesh_type", ",", "output", "=", "'vtk'", ",", "fname", "=", "'output.vtu'", ")", ":", "check_input", "(", "Verts", "=", "Verts", ",", "E2V", "=", "E2V", ",", "Agg", "=", "Agg", ",", "mesh_type", "=", "mesh_type", ")", "map_type_to_key", "=", "{", "'tri'", ":", "5", ",", "'quad'", ":", "9", ",", "'tet'", ":", "10", ",", "'hex'", ":", "12", "}", "if", "mesh_type", "not", "in", "map_type_to_key", ":", "raise", "ValueError", "(", "'unknown mesh_type=%s'", "%", "mesh_type", ")", "key", "=", "map_type_to_key", "[", "mesh_type", "]", "Agg", "=", "csr_matrix", "(", "Agg", ")", "# remove elements with dirichlet BCs", "if", "E2V", ".", "max", "(", ")", ">=", "Agg", ".", "shape", "[", "0", "]", ":", "E2V", "=", "E2V", "[", "E2V", ".", "max", "(", "axis", "=", "1", ")", "<", "Agg", ".", "shape", "[", "0", "]", "]", "# 1 #", "# Find elements with all vertices in same aggregate", "# account for 0 rows. Mark them as solitary aggregates", "# TODO: (Luke) full_aggs is not defined, I think its just a mask", "# indicated with rows are not 0.", "if", "len", "(", "Agg", ".", "indices", ")", "!=", "Agg", ".", "shape", "[", "0", "]", ":", "full_aggs", "=", "(", "(", "Agg", ".", "indptr", "[", "1", ":", "]", "-", "Agg", ".", "indptr", "[", ":", "-", "1", "]", ")", "==", "0", ")", ".", "nonzero", "(", ")", "[", "0", "]", "new_aggs", "=", "np", ".", "array", "(", "Agg", ".", "sum", "(", "axis", "=", "1", ")", ",", "dtype", "=", "int", ")", ".", "ravel", "(", ")", "new_aggs", "[", "full_aggs", "==", "1", "]", "=", "Agg", ".", "indices", "# keep existing aggregate IDs", "new_aggs", "[", "full_aggs", "==", "0", "]", "=", "Agg", ".", "shape", "[", "1", "]", "# fill in singletons maxID+1", "ElementAggs", "=", "new_aggs", "[", "E2V", "]", "else", ":", "ElementAggs", "=", "Agg", ".", "indices", "[", "E2V", "]", "# 2 #", "# find all aggregates encompassing full elements", "# mask[i] == True if all vertices in element i belong to the same aggregate", "mask", "=", "np", ".", "where", "(", "abs", "(", "np", ".", "diff", "(", "ElementAggs", ")", ")", ".", "max", "(", "axis", "=", "1", ")", "==", "0", ")", "[", "0", "]", "# mask = (ElementAggs[:,:] == ElementAggs[:,0]).all(axis=1)", "E2V_a", "=", "E2V", "[", "mask", ",", ":", "]", "# elements where element is full", "Nel_a", "=", "E2V_a", ".", "shape", "[", "0", "]", "# 3 #", "# find edges of elements in the same aggregate (brute force)", "# construct vertex to vertex graph", "col", "=", "E2V", ".", "ravel", "(", ")", "row", "=", "np", ".", "kron", "(", "np", ".", "arange", "(", "0", ",", "E2V", ".", "shape", "[", "0", "]", ")", ",", "np", ".", "ones", "(", "(", "E2V", ".", "shape", "[", "1", "]", ",", ")", ",", "dtype", "=", "int", ")", ")", "data", "=", "np", ".", "ones", "(", "(", "len", "(", "col", ")", ",", ")", ")", "if", "len", "(", "row", ")", "!=", "len", "(", "col", ")", ":", "raise", "ValueError", "(", "'Problem constructing vertex-to-vertex map'", ")", "V2V", "=", "coo_matrix", "(", "(", "data", ",", "(", "row", ",", "col", ")", ")", ",", "shape", "=", "(", "E2V", ".", "shape", "[", "0", "]", ",", "E2V", ".", "max", "(", ")", "+", "1", ")", ")", "V2V", "=", "V2V", ".", "T", "*", "V2V", "V2V", "=", "triu", "(", "V2V", ",", "1", ")", ".", "tocoo", "(", ")", "# get all the edges", "edges", "=", "np", ".", "vstack", "(", "(", "V2V", ".", "row", ",", "V2V", ".", "col", ")", ")", ".", "T", "# all the edges in the same aggregate", "E2V_b", "=", "edges", "[", "Agg", ".", "indices", "[", "V2V", ".", "row", "]", "==", "Agg", ".", "indices", "[", "V2V", ".", "col", "]", "]", "Nel_b", "=", "E2V_b", ".", "shape", "[", "0", "]", "# 3.5 #", "# single node aggregates", "sums", "=", "np", ".", "array", "(", "Agg", ".", "sum", "(", "axis", "=", "0", ")", ")", ".", "ravel", "(", ")", "E2V_c", "=", "np", ".", "where", "(", "sums", "==", "1", ")", "[", "0", "]", "Nel_c", "=", "len", "(", "E2V_c", ")", "# 4 #", "# now write out the elements and edges", "colors_a", "=", "3", "*", "np", ".", "ones", "(", "(", "Nel_a", ",", ")", ")", "# color triangles with threes", "colors_b", "=", "2", "*", "np", ".", "ones", "(", "(", "Nel_b", ",", ")", ")", "# color edges with twos", "colors_c", "=", "1", "*", "np", ".", "ones", "(", "(", "Nel_c", ",", ")", ")", "# color the vertices with ones", "Cells", "=", "{", "1", ":", "E2V_c", ",", "3", ":", "E2V_b", ",", "key", ":", "E2V_a", "}", "cdata", "=", "{", "1", ":", "colors_c", ",", "3", ":", "colors_b", ",", "key", ":", "colors_a", "}", "# make sure it's a tuple", "write_vtu", "(", "Verts", "=", "Verts", ",", "Cells", "=", "Cells", ",", "fname", "=", "fname", ",", "cdata", "=", "cdata", ")" ]
Coarse grid visualization of aggregate groups. Create .vtu files for use in Paraview or display with Matplotlib. Parameters ---------- Verts : {array} coordinate array (N x D) E2V : {array} element index array (Nel x Nelnodes) Agg : {csr_matrix} sparse matrix for the aggregate-vertex relationship (N x Nagg) mesh_type : {string} type of elements: vertex, tri, quad, tet, hex (all 3d) fname : {string, file object} file to be written, e.g. 'output.vtu' output : {string} 'vtk' or 'matplotlib' Returns ------- - Writes data to .vtu file for use in paraview (xml 0.1 format) or displays to screen using matplotlib Notes ----- - Works for both 2d and 3d elements. Element groupings are colored with data equal to 2.0 and stringy edges in the aggregate are colored with 3.0 Examples -------- >>> from pyamg.aggregation import standard_aggregation >>> from pyamg.vis.vis_coarse import vis_aggregate_groups >>> from pyamg.gallery import load_example >>> data = load_example('unit_square') >>> A = data['A'].tocsr() >>> V = data['vertices'] >>> E2V = data['elements'] >>> Agg = standard_aggregation(A)[0] >>> vis_aggregate_groups(Verts=V, E2V=E2V, Agg=Agg, mesh_type='tri', output='vtk', fname='output.vtu') >>> from pyamg.aggregation import standard_aggregation >>> from pyamg.vis.vis_coarse import vis_aggregate_groups >>> from pyamg.gallery import load_example >>> data = load_example('unit_cube') >>> A = data['A'].tocsr() >>> V = data['vertices'] >>> E2V = data['elements'] >>> Agg = standard_aggregation(A)[0] >>> vis_aggregate_groups(Verts=V, E2V=E2V, Agg=Agg, mesh_type='tet', output='vtk', fname='output.vtu')
[ "Coarse", "grid", "visualization", "of", "aggregate", "groups", "." ]
python
train
timknip/pycsg
csg/geom.py
https://github.com/timknip/pycsg/blob/b8f9710fd15c38dcc275d56a2108f604af38dcc8/csg/geom.py#L50-L52
def plus(self, a): """ Add. """ return Vector(self.x+a.x, self.y+a.y, self.z+a.z)
[ "def", "plus", "(", "self", ",", "a", ")", ":", "return", "Vector", "(", "self", ".", "x", "+", "a", ".", "x", ",", "self", ".", "y", "+", "a", ".", "y", ",", "self", ".", "z", "+", "a", ".", "z", ")" ]
Add.
[ "Add", "." ]
python
train
google/grr
grr/server/grr_response_server/flow.py
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/server/grr_response_server/flow.py#L560-L566
def Close(self): """Flushes the flow and all its requests to the data_store.""" self._CheckLeaseAndFlush() super(FlowBase, self).Close() # Writing the messages queued in the queue_manager of the runner always has # to be the last thing that happens or we will have a race condition. self.FlushMessages()
[ "def", "Close", "(", "self", ")", ":", "self", ".", "_CheckLeaseAndFlush", "(", ")", "super", "(", "FlowBase", ",", "self", ")", ".", "Close", "(", ")", "# Writing the messages queued in the queue_manager of the runner always has", "# to be the last thing that happens or we will have a race condition.", "self", ".", "FlushMessages", "(", ")" ]
Flushes the flow and all its requests to the data_store.
[ "Flushes", "the", "flow", "and", "all", "its", "requests", "to", "the", "data_store", "." ]
python
train
ihmeuw/vivarium
src/vivarium/interface/interactive.py
https://github.com/ihmeuw/vivarium/blob/c5f5d50f775c8bf337d3aae1ff7c57c025a8e258/src/vivarium/interface/interactive.py#L261-L286
def initialize_simulation_from_model_specification(model_specification_file: str) -> InteractiveContext: """Construct a simulation from a model specification file. The simulation context returned by this method still needs to be setup by calling its setup method. It is mostly useful for testing and debugging. Parameters ---------- model_specification_file The path to a model specification file. Returns ------- An initialized (but not set up) simulation context. """ model_specification = build_model_specification(model_specification_file) plugin_config = model_specification.plugins component_config = model_specification.components simulation_config = model_specification.configuration plugin_manager = PluginManager(plugin_config) component_config_parser = plugin_manager.get_plugin('component_configuration_parser') components = component_config_parser.get_components(component_config) return InteractiveContext(simulation_config, components, plugin_manager)
[ "def", "initialize_simulation_from_model_specification", "(", "model_specification_file", ":", "str", ")", "->", "InteractiveContext", ":", "model_specification", "=", "build_model_specification", "(", "model_specification_file", ")", "plugin_config", "=", "model_specification", ".", "plugins", "component_config", "=", "model_specification", ".", "components", "simulation_config", "=", "model_specification", ".", "configuration", "plugin_manager", "=", "PluginManager", "(", "plugin_config", ")", "component_config_parser", "=", "plugin_manager", ".", "get_plugin", "(", "'component_configuration_parser'", ")", "components", "=", "component_config_parser", ".", "get_components", "(", "component_config", ")", "return", "InteractiveContext", "(", "simulation_config", ",", "components", ",", "plugin_manager", ")" ]
Construct a simulation from a model specification file. The simulation context returned by this method still needs to be setup by calling its setup method. It is mostly useful for testing and debugging. Parameters ---------- model_specification_file The path to a model specification file. Returns ------- An initialized (but not set up) simulation context.
[ "Construct", "a", "simulation", "from", "a", "model", "specification", "file", "." ]
python
train
PredixDev/predixpy
predix/admin/cf/api.py
https://github.com/PredixDev/predixpy/blob/a0cb34cf40f716229351bb6d90d6ecace958c81f/predix/admin/cf/api.py#L50-L66
def get(self, path): """ Generic GET with headers """ uri = self.config.get_target() + path headers = self._get_headers() logging.debug("URI=GET " + str(uri)) logging.debug("HEADERS=" + str(headers)) response = self.session.get(uri, headers=headers) if response.status_code == 200: return response.json() elif response.status_code == 401: raise predix.admin.cf.config.CloudFoundryLoginError('token invalid') else: response.raise_for_status()
[ "def", "get", "(", "self", ",", "path", ")", ":", "uri", "=", "self", ".", "config", ".", "get_target", "(", ")", "+", "path", "headers", "=", "self", ".", "_get_headers", "(", ")", "logging", ".", "debug", "(", "\"URI=GET \"", "+", "str", "(", "uri", ")", ")", "logging", ".", "debug", "(", "\"HEADERS=\"", "+", "str", "(", "headers", ")", ")", "response", "=", "self", ".", "session", ".", "get", "(", "uri", ",", "headers", "=", "headers", ")", "if", "response", ".", "status_code", "==", "200", ":", "return", "response", ".", "json", "(", ")", "elif", "response", ".", "status_code", "==", "401", ":", "raise", "predix", ".", "admin", ".", "cf", ".", "config", ".", "CloudFoundryLoginError", "(", "'token invalid'", ")", "else", ":", "response", ".", "raise_for_status", "(", ")" ]
Generic GET with headers
[ "Generic", "GET", "with", "headers" ]
python
train
jssimporter/python-jss
jss/jss_prefs.py
https://github.com/jssimporter/python-jss/blob/b95185d74e0c0531b0b563f280d4129e21d5fe5d/jss/jss_prefs.py#L134-L167
def parse_plist(self, preferences_file): """Try to reset preferences from preference_file.""" preferences_file = os.path.expanduser(preferences_file) # Try to open using FoundationPlist. If it's not available, # fall back to plistlib and hope it's not binary encoded. try: prefs = FoundationPlist.readPlist(preferences_file) except NameError: try: prefs = plistlib.readPlist(preferences_file) except ExpatError: # If we're on OSX, try to convert using another # tool. if is_osx(): subprocess.call(["plutil", "-convert", "xml1", preferences_file]) prefs = plistlib.readPlist(preferences_file) self.preferences_file = preferences_file self.user = prefs.get("jss_user") self.password = prefs.get("jss_pass") self.url = prefs.get("jss_url") if not all([self.user, self.password, self.url]): raise JSSPrefsMissingKeyError("Please provide all required " "preferences!") # Optional file repository array. Defaults to empty list. self.repos = [] for repo in prefs.get("repos", []): self.repos.append(dict(repo)) self.verify = prefs.get("verify", True) self.suppress_warnings = prefs.get("suppress_warnings", True)
[ "def", "parse_plist", "(", "self", ",", "preferences_file", ")", ":", "preferences_file", "=", "os", ".", "path", ".", "expanduser", "(", "preferences_file", ")", "# Try to open using FoundationPlist. If it's not available,", "# fall back to plistlib and hope it's not binary encoded.", "try", ":", "prefs", "=", "FoundationPlist", ".", "readPlist", "(", "preferences_file", ")", "except", "NameError", ":", "try", ":", "prefs", "=", "plistlib", ".", "readPlist", "(", "preferences_file", ")", "except", "ExpatError", ":", "# If we're on OSX, try to convert using another", "# tool.", "if", "is_osx", "(", ")", ":", "subprocess", ".", "call", "(", "[", "\"plutil\"", ",", "\"-convert\"", ",", "\"xml1\"", ",", "preferences_file", "]", ")", "prefs", "=", "plistlib", ".", "readPlist", "(", "preferences_file", ")", "self", ".", "preferences_file", "=", "preferences_file", "self", ".", "user", "=", "prefs", ".", "get", "(", "\"jss_user\"", ")", "self", ".", "password", "=", "prefs", ".", "get", "(", "\"jss_pass\"", ")", "self", ".", "url", "=", "prefs", ".", "get", "(", "\"jss_url\"", ")", "if", "not", "all", "(", "[", "self", ".", "user", ",", "self", ".", "password", ",", "self", ".", "url", "]", ")", ":", "raise", "JSSPrefsMissingKeyError", "(", "\"Please provide all required \"", "\"preferences!\"", ")", "# Optional file repository array. Defaults to empty list.", "self", ".", "repos", "=", "[", "]", "for", "repo", "in", "prefs", ".", "get", "(", "\"repos\"", ",", "[", "]", ")", ":", "self", ".", "repos", ".", "append", "(", "dict", "(", "repo", ")", ")", "self", ".", "verify", "=", "prefs", ".", "get", "(", "\"verify\"", ",", "True", ")", "self", ".", "suppress_warnings", "=", "prefs", ".", "get", "(", "\"suppress_warnings\"", ",", "True", ")" ]
Try to reset preferences from preference_file.
[ "Try", "to", "reset", "preferences", "from", "preference_file", "." ]
python
train
pybel/pybel
src/pybel/manager/database_io.py
https://github.com/pybel/pybel/blob/c8a7a1bdae4c475fa2a8c77f3a9a5f6d79556ca0/src/pybel/manager/database_io.py#L20-L38
def to_database(graph, manager: Optional[Manager] = None, store_parts: bool = True, use_tqdm: bool = False): """Store a graph in a database. :param BELGraph graph: A BEL graph :param store_parts: Should the graph be stored in the edge store? :return: If successful, returns the network object from the database. :rtype: Optional[Network] """ if manager is None: manager = Manager() try: return manager.insert_graph(graph, store_parts=store_parts, use_tqdm=use_tqdm) except (IntegrityError, OperationalError): manager.session.rollback() log.exception('Error storing graph') except Exception as e: manager.session.rollback() raise e
[ "def", "to_database", "(", "graph", ",", "manager", ":", "Optional", "[", "Manager", "]", "=", "None", ",", "store_parts", ":", "bool", "=", "True", ",", "use_tqdm", ":", "bool", "=", "False", ")", ":", "if", "manager", "is", "None", ":", "manager", "=", "Manager", "(", ")", "try", ":", "return", "manager", ".", "insert_graph", "(", "graph", ",", "store_parts", "=", "store_parts", ",", "use_tqdm", "=", "use_tqdm", ")", "except", "(", "IntegrityError", ",", "OperationalError", ")", ":", "manager", ".", "session", ".", "rollback", "(", ")", "log", ".", "exception", "(", "'Error storing graph'", ")", "except", "Exception", "as", "e", ":", "manager", ".", "session", ".", "rollback", "(", ")", "raise", "e" ]
Store a graph in a database. :param BELGraph graph: A BEL graph :param store_parts: Should the graph be stored in the edge store? :return: If successful, returns the network object from the database. :rtype: Optional[Network]
[ "Store", "a", "graph", "in", "a", "database", "." ]
python
train
xeroc/python-graphenelib
grapheneapi/http.py
https://github.com/xeroc/python-graphenelib/blob/8bb5396bc79998ee424cf3813af478304173f3a6/grapheneapi/http.py#L22-L38
def rpcexec(self, payload): """ Execute a call by sending the payload :param json payload: Payload data :raises ValueError: if the server does not respond in proper JSON format :raises HttpInvalidStatusCode: if the server returns a status code that is not 200 """ log.debug(json.dumps(payload)) query = requests.post(self.url, json=payload, proxies=self.proxies()) if query.status_code != 200: # pragma: no cover raise HttpInvalidStatusCode( "Status code returned: {}".format(query.status_code) ) return query.text
[ "def", "rpcexec", "(", "self", ",", "payload", ")", ":", "log", ".", "debug", "(", "json", ".", "dumps", "(", "payload", ")", ")", "query", "=", "requests", ".", "post", "(", "self", ".", "url", ",", "json", "=", "payload", ",", "proxies", "=", "self", ".", "proxies", "(", ")", ")", "if", "query", ".", "status_code", "!=", "200", ":", "# pragma: no cover", "raise", "HttpInvalidStatusCode", "(", "\"Status code returned: {}\"", ".", "format", "(", "query", ".", "status_code", ")", ")", "return", "query", ".", "text" ]
Execute a call by sending the payload :param json payload: Payload data :raises ValueError: if the server does not respond in proper JSON format :raises HttpInvalidStatusCode: if the server returns a status code that is not 200
[ "Execute", "a", "call", "by", "sending", "the", "payload" ]
python
valid
Karaage-Cluster/python-tldap
tldap/backend/fake_transactions.py
https://github.com/Karaage-Cluster/python-tldap/blob/61f1af74a3648cb6491e7eeb1ee2eb395d67bf59/tldap/backend/fake_transactions.py#L215-L294
def modify(self, dn: str, mod_list: dict) -> None: """ Modify a DN in the LDAP database; See ldap module. Doesn't return a result if transactions enabled. """ _debug("modify", self, dn, mod_list) # need to work out how to reverse changes in mod_list; result in revlist revlist = {} # get the current cached attributes result = self._cache_get_for_dn(dn) # find the how to reverse mod_list (for rollback) and put result in # revlist. Also simulate actions on cache. for mod_type, l in six.iteritems(mod_list): for mod_op, mod_vals in l: _debug("attribute:", mod_type) if mod_type in result: _debug("attribute cache:", result[mod_type]) else: _debug("attribute cache is empty") _debug("attribute modify:", (mod_op, mod_vals)) if mod_vals is not None: if not isinstance(mod_vals, list): mod_vals = [mod_vals] if mod_op == ldap3.MODIFY_ADD: # reverse of MODIFY_ADD is MODIFY_DELETE reverse = (ldap3.MODIFY_DELETE, mod_vals) elif mod_op == ldap3.MODIFY_DELETE and len(mod_vals) > 0: # Reverse of MODIFY_DELETE is MODIFY_ADD, but only if value # is given if mod_vals is None, this means all values where # deleted. reverse = (ldap3.MODIFY_ADD, mod_vals) elif mod_op == ldap3.MODIFY_DELETE \ or mod_op == ldap3.MODIFY_REPLACE: if mod_type in result: # If MODIFY_DELETE with no values or MODIFY_REPLACE # then we have to replace all attributes with cached # state reverse = ( ldap3.MODIFY_REPLACE, tldap.modlist.escape_list(result[mod_type]) ) else: # except if we have no cached state for this DN, in # which case we delete it. reverse = (ldap3.MODIFY_DELETE, []) else: raise RuntimeError("mod_op of %d not supported" % mod_op) reverse = [reverse] _debug("attribute reverse:", reverse) if mod_type in result: _debug("attribute cache:", result[mod_type]) else: _debug("attribute cache is empty") revlist[mod_type] = reverse _debug("--") _debug("mod_list:", mod_list) _debug("revlist:", revlist) _debug("--") # now the hard stuff is over, we get to the easy stuff def on_commit(obj): obj.modify(dn, mod_list) def on_rollback(obj): obj.modify(dn, revlist) return self._process(on_commit, on_rollback)
[ "def", "modify", "(", "self", ",", "dn", ":", "str", ",", "mod_list", ":", "dict", ")", "->", "None", ":", "_debug", "(", "\"modify\"", ",", "self", ",", "dn", ",", "mod_list", ")", "# need to work out how to reverse changes in mod_list; result in revlist", "revlist", "=", "{", "}", "# get the current cached attributes", "result", "=", "self", ".", "_cache_get_for_dn", "(", "dn", ")", "# find the how to reverse mod_list (for rollback) and put result in", "# revlist. Also simulate actions on cache.", "for", "mod_type", ",", "l", "in", "six", ".", "iteritems", "(", "mod_list", ")", ":", "for", "mod_op", ",", "mod_vals", "in", "l", ":", "_debug", "(", "\"attribute:\"", ",", "mod_type", ")", "if", "mod_type", "in", "result", ":", "_debug", "(", "\"attribute cache:\"", ",", "result", "[", "mod_type", "]", ")", "else", ":", "_debug", "(", "\"attribute cache is empty\"", ")", "_debug", "(", "\"attribute modify:\"", ",", "(", "mod_op", ",", "mod_vals", ")", ")", "if", "mod_vals", "is", "not", "None", ":", "if", "not", "isinstance", "(", "mod_vals", ",", "list", ")", ":", "mod_vals", "=", "[", "mod_vals", "]", "if", "mod_op", "==", "ldap3", ".", "MODIFY_ADD", ":", "# reverse of MODIFY_ADD is MODIFY_DELETE", "reverse", "=", "(", "ldap3", ".", "MODIFY_DELETE", ",", "mod_vals", ")", "elif", "mod_op", "==", "ldap3", ".", "MODIFY_DELETE", "and", "len", "(", "mod_vals", ")", ">", "0", ":", "# Reverse of MODIFY_DELETE is MODIFY_ADD, but only if value", "# is given if mod_vals is None, this means all values where", "# deleted.", "reverse", "=", "(", "ldap3", ".", "MODIFY_ADD", ",", "mod_vals", ")", "elif", "mod_op", "==", "ldap3", ".", "MODIFY_DELETE", "or", "mod_op", "==", "ldap3", ".", "MODIFY_REPLACE", ":", "if", "mod_type", "in", "result", ":", "# If MODIFY_DELETE with no values or MODIFY_REPLACE", "# then we have to replace all attributes with cached", "# state", "reverse", "=", "(", "ldap3", ".", "MODIFY_REPLACE", ",", "tldap", ".", "modlist", ".", "escape_list", "(", "result", "[", "mod_type", "]", ")", ")", "else", ":", "# except if we have no cached state for this DN, in", "# which case we delete it.", "reverse", "=", "(", "ldap3", ".", "MODIFY_DELETE", ",", "[", "]", ")", "else", ":", "raise", "RuntimeError", "(", "\"mod_op of %d not supported\"", "%", "mod_op", ")", "reverse", "=", "[", "reverse", "]", "_debug", "(", "\"attribute reverse:\"", ",", "reverse", ")", "if", "mod_type", "in", "result", ":", "_debug", "(", "\"attribute cache:\"", ",", "result", "[", "mod_type", "]", ")", "else", ":", "_debug", "(", "\"attribute cache is empty\"", ")", "revlist", "[", "mod_type", "]", "=", "reverse", "_debug", "(", "\"--\"", ")", "_debug", "(", "\"mod_list:\"", ",", "mod_list", ")", "_debug", "(", "\"revlist:\"", ",", "revlist", ")", "_debug", "(", "\"--\"", ")", "# now the hard stuff is over, we get to the easy stuff", "def", "on_commit", "(", "obj", ")", ":", "obj", ".", "modify", "(", "dn", ",", "mod_list", ")", "def", "on_rollback", "(", "obj", ")", ":", "obj", ".", "modify", "(", "dn", ",", "revlist", ")", "return", "self", ".", "_process", "(", "on_commit", ",", "on_rollback", ")" ]
Modify a DN in the LDAP database; See ldap module. Doesn't return a result if transactions enabled.
[ "Modify", "a", "DN", "in", "the", "LDAP", "database", ";", "See", "ldap", "module", ".", "Doesn", "t", "return", "a", "result", "if", "transactions", "enabled", "." ]
python
train
BernardFW/bernard
src/bernard/platforms/facebook/platform.py
https://github.com/BernardFW/bernard/blob/9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab/src/bernard/platforms/facebook/platform.py#L484-L505
async def _get_messenger_profile(self, page, fields: List[Text]): """ Fetch the value of specified fields in order to avoid setting the same field twice at the same value (since Facebook engineers are not able to make menus that keep on working if set again). """ params = { 'access_token': page['page_token'], 'fields': ','.join(fields), } get = self.session.get(PROFILE_ENDPOINT, params=params) async with get as r: await self._handle_fb_response(r) out = {} for data in (await r.json())['data']: out.update(data) return out
[ "async", "def", "_get_messenger_profile", "(", "self", ",", "page", ",", "fields", ":", "List", "[", "Text", "]", ")", ":", "params", "=", "{", "'access_token'", ":", "page", "[", "'page_token'", "]", ",", "'fields'", ":", "','", ".", "join", "(", "fields", ")", ",", "}", "get", "=", "self", ".", "session", ".", "get", "(", "PROFILE_ENDPOINT", ",", "params", "=", "params", ")", "async", "with", "get", "as", "r", ":", "await", "self", ".", "_handle_fb_response", "(", "r", ")", "out", "=", "{", "}", "for", "data", "in", "(", "await", "r", ".", "json", "(", ")", ")", "[", "'data'", "]", ":", "out", ".", "update", "(", "data", ")", "return", "out" ]
Fetch the value of specified fields in order to avoid setting the same field twice at the same value (since Facebook engineers are not able to make menus that keep on working if set again).
[ "Fetch", "the", "value", "of", "specified", "fields", "in", "order", "to", "avoid", "setting", "the", "same", "field", "twice", "at", "the", "same", "value", "(", "since", "Facebook", "engineers", "are", "not", "able", "to", "make", "menus", "that", "keep", "on", "working", "if", "set", "again", ")", "." ]
python
train
GoogleCloudPlatform/compute-image-packages
packages/python-google-compute-engine/google_compute_engine/accounts/accounts_daemon.py
https://github.com/GoogleCloudPlatform/compute-image-packages/blob/53ea8cd069fb4d9a1984d1c167e54c133033f8da/packages/python-google-compute-engine/google_compute_engine/accounts/accounts_daemon.py#L221-L230
def _RemoveUsers(self, remove_users): """Deprovision Linux user accounts that do not appear in account metadata. Args: remove_users: list, the username strings of the Linux accounts to remove. """ for username in remove_users: self.utils.RemoveUser(username) self.user_ssh_keys.pop(username, None) self.invalid_users -= set(remove_users)
[ "def", "_RemoveUsers", "(", "self", ",", "remove_users", ")", ":", "for", "username", "in", "remove_users", ":", "self", ".", "utils", ".", "RemoveUser", "(", "username", ")", "self", ".", "user_ssh_keys", ".", "pop", "(", "username", ",", "None", ")", "self", ".", "invalid_users", "-=", "set", "(", "remove_users", ")" ]
Deprovision Linux user accounts that do not appear in account metadata. Args: remove_users: list, the username strings of the Linux accounts to remove.
[ "Deprovision", "Linux", "user", "accounts", "that", "do", "not", "appear", "in", "account", "metadata", "." ]
python
train
jvamvas/rhymediscovery
rhymediscovery/celex.py
https://github.com/jvamvas/rhymediscovery/blob/b76509c98554b12efa06fe9ab557cca5fa5e4a79/rhymediscovery/celex.py#L44-L74
def init_perfect_ttable(words): """initialize (normalized) theta according to whether words rhyme""" d = read_celex() not_in_dict = 0 n = len(words) t_table = numpy.zeros((n, n + 1)) # initialize P(c|r) accordingly for r, w in enumerate(words): if w not in d: not_in_dict += 1 for c, v in enumerate(words): if c < r: t_table[r, c] = t_table[c, r] elif w in d and v in d: t_table[r, c] = int(is_rhyme(d, w, v)) + 0.001 # for backoff else: t_table[r, c] = random.random() t_table[r, n] = random.random() # no estimate for P(r|no history) print(not_in_dict, "of", n, " words are not in CELEX") # normalize for c in range(n + 1): tot = sum(t_table[:, c]) for r in range(n): t_table[r, c] = t_table[r, c] / tot return t_table
[ "def", "init_perfect_ttable", "(", "words", ")", ":", "d", "=", "read_celex", "(", ")", "not_in_dict", "=", "0", "n", "=", "len", "(", "words", ")", "t_table", "=", "numpy", ".", "zeros", "(", "(", "n", ",", "n", "+", "1", ")", ")", "# initialize P(c|r) accordingly", "for", "r", ",", "w", "in", "enumerate", "(", "words", ")", ":", "if", "w", "not", "in", "d", ":", "not_in_dict", "+=", "1", "for", "c", ",", "v", "in", "enumerate", "(", "words", ")", ":", "if", "c", "<", "r", ":", "t_table", "[", "r", ",", "c", "]", "=", "t_table", "[", "c", ",", "r", "]", "elif", "w", "in", "d", "and", "v", "in", "d", ":", "t_table", "[", "r", ",", "c", "]", "=", "int", "(", "is_rhyme", "(", "d", ",", "w", ",", "v", ")", ")", "+", "0.001", "# for backoff", "else", ":", "t_table", "[", "r", ",", "c", "]", "=", "random", ".", "random", "(", ")", "t_table", "[", "r", ",", "n", "]", "=", "random", ".", "random", "(", ")", "# no estimate for P(r|no history)", "print", "(", "not_in_dict", ",", "\"of\"", ",", "n", ",", "\" words are not in CELEX\"", ")", "# normalize", "for", "c", "in", "range", "(", "n", "+", "1", ")", ":", "tot", "=", "sum", "(", "t_table", "[", ":", ",", "c", "]", ")", "for", "r", "in", "range", "(", "n", ")", ":", "t_table", "[", "r", ",", "c", "]", "=", "t_table", "[", "r", ",", "c", "]", "/", "tot", "return", "t_table" ]
initialize (normalized) theta according to whether words rhyme
[ "initialize", "(", "normalized", ")", "theta", "according", "to", "whether", "words", "rhyme" ]
python
train
lsst-sqre/sqre-codekit
codekit/codetools.py
https://github.com/lsst-sqre/sqre-codekit/blob/98122404cd9065d4d1d570867fe518042669126c/codekit/codetools.py#L24-L91
def setup_logging(verbosity=0): """Configure python `logging`. This is required before the `debug()`, `info()`, etc. functions may be used. If any other `codekit.*` modules, which are not a "package", have been imported, and they have a `setup_logging()` function, that is called before `logging` is configured. This gives other modules a chance to configure their own logging. As an example, if `progressbar2` is being used, it needs to be configure a `sys.stderr` wrapper before `logging` is configured. Thus, some gymnastics are being done to delay `logging` setup while simultanously not requiring that `progressbar2` be imported unless it is actually being used. Parameters ---------- verbosity: int Logging / output verbosity level. 1 is useful for more purposes while 2+ is generaly TMI. """ import pkgutil import logging import codekit # https://packaging.python.org/guides/creating-and-discovering-plugins/#using-namespace-packages def iter_namespace(ns_pkg): # Specifying the second argument (prefix) to iter_modules makes the # returned name an absolute name instead of a relative one. This allows # import_module to work without having to do additional modification to # the name. return pkgutil.iter_modules(ns_pkg.__path__, ns_pkg.__name__ + ".") # find codekit modules that are not a package codekit_mods = [name for finder, name, ispkg in iter_namespace(codekit) if ispkg is False] # filter out the current module # XXX `is not` doesn't work here but `!=` does... why??? codekit_mods = [m for m in codekit_mods if m != __name__] # filter out modules that have not been imported codekit_mods = [m for m in codekit_mods if m in sys.modules] # record funcs successfully called logging_funcs = [] for m in codekit_mods: try: lsetup = getattr(sys.modules[m], 'setup_logging') lsetup(verbosity=verbosity) logging_funcs.append(lsetup) except AttributeError: # ignore modules that do have a setup_logging() pass logging.basicConfig() # configure `logger` for the entire module global logger logger = logging.getLogger('codekit') if verbosity: logger.setLevel(logging.DEBUG) else: logger.setLevel(logging.INFO) [debug("{m}.{f}()".format(m=f.__module__, f=f.__name__)) for f in logging_funcs]
[ "def", "setup_logging", "(", "verbosity", "=", "0", ")", ":", "import", "pkgutil", "import", "logging", "import", "codekit", "# https://packaging.python.org/guides/creating-and-discovering-plugins/#using-namespace-packages", "def", "iter_namespace", "(", "ns_pkg", ")", ":", "# Specifying the second argument (prefix) to iter_modules makes the", "# returned name an absolute name instead of a relative one. This allows", "# import_module to work without having to do additional modification to", "# the name.", "return", "pkgutil", ".", "iter_modules", "(", "ns_pkg", ".", "__path__", ",", "ns_pkg", ".", "__name__", "+", "\".\"", ")", "# find codekit modules that are not a package", "codekit_mods", "=", "[", "name", "for", "finder", ",", "name", ",", "ispkg", "in", "iter_namespace", "(", "codekit", ")", "if", "ispkg", "is", "False", "]", "# filter out the current module", "# XXX `is not` doesn't work here but `!=` does... why???", "codekit_mods", "=", "[", "m", "for", "m", "in", "codekit_mods", "if", "m", "!=", "__name__", "]", "# filter out modules that have not been imported", "codekit_mods", "=", "[", "m", "for", "m", "in", "codekit_mods", "if", "m", "in", "sys", ".", "modules", "]", "# record funcs successfully called", "logging_funcs", "=", "[", "]", "for", "m", "in", "codekit_mods", ":", "try", ":", "lsetup", "=", "getattr", "(", "sys", ".", "modules", "[", "m", "]", ",", "'setup_logging'", ")", "lsetup", "(", "verbosity", "=", "verbosity", ")", "logging_funcs", ".", "append", "(", "lsetup", ")", "except", "AttributeError", ":", "# ignore modules that do have a setup_logging()", "pass", "logging", ".", "basicConfig", "(", ")", "# configure `logger` for the entire module", "global", "logger", "logger", "=", "logging", ".", "getLogger", "(", "'codekit'", ")", "if", "verbosity", ":", "logger", ".", "setLevel", "(", "logging", ".", "DEBUG", ")", "else", ":", "logger", ".", "setLevel", "(", "logging", ".", "INFO", ")", "[", "debug", "(", "\"{m}.{f}()\"", ".", "format", "(", "m", "=", "f", ".", "__module__", ",", "f", "=", "f", ".", "__name__", ")", ")", "for", "f", "in", "logging_funcs", "]" ]
Configure python `logging`. This is required before the `debug()`, `info()`, etc. functions may be used. If any other `codekit.*` modules, which are not a "package", have been imported, and they have a `setup_logging()` function, that is called before `logging` is configured. This gives other modules a chance to configure their own logging. As an example, if `progressbar2` is being used, it needs to be configure a `sys.stderr` wrapper before `logging` is configured. Thus, some gymnastics are being done to delay `logging` setup while simultanously not requiring that `progressbar2` be imported unless it is actually being used. Parameters ---------- verbosity: int Logging / output verbosity level. 1 is useful for more purposes while 2+ is generaly TMI.
[ "Configure", "python", "logging", ".", "This", "is", "required", "before", "the", "debug", "()", "info", "()", "etc", ".", "functions", "may", "be", "used", "." ]
python
train
vxgmichel/aiostream
aiostream/stream/advanced.py
https://github.com/vxgmichel/aiostream/blob/43bdf04ab19108a3f1b5a472062e1392a26cbcf8/aiostream/stream/advanced.py#L116-L126
def flatten(source, task_limit=None): """Given an asynchronous sequence of sequences, generate the elements of the sequences as soon as they're received. The sequences are awaited concurrently, although it's possible to limit the amount of running sequences using the `task_limit` argument. Errors raised in the source or an element sequence are propagated. """ return base_combine.raw( source, task_limit=task_limit, switch=False, ordered=False)
[ "def", "flatten", "(", "source", ",", "task_limit", "=", "None", ")", ":", "return", "base_combine", ".", "raw", "(", "source", ",", "task_limit", "=", "task_limit", ",", "switch", "=", "False", ",", "ordered", "=", "False", ")" ]
Given an asynchronous sequence of sequences, generate the elements of the sequences as soon as they're received. The sequences are awaited concurrently, although it's possible to limit the amount of running sequences using the `task_limit` argument. Errors raised in the source or an element sequence are propagated.
[ "Given", "an", "asynchronous", "sequence", "of", "sequences", "generate", "the", "elements", "of", "the", "sequences", "as", "soon", "as", "they", "re", "received", "." ]
python
train
ASKIDA/Selenium2LibraryExtension
src/Selenium2LibraryExtension/keywords/__init__.py
https://github.com/ASKIDA/Selenium2LibraryExtension/blob/5ca3fa776063c6046dff317cb2575e4772d7541f/src/Selenium2LibraryExtension/keywords/__init__.py#L332-L339
def element_focus_should_be_set(self, locator): """Verifies the element identified by `locator` has focus. | *Argument* | *Description* | *Example* | | locator | Selenium 2 element locator | id=my_id |""" self._info("Verifying element '%s' focus is set" % locator) self._check_element_focus(True, locator)
[ "def", "element_focus_should_be_set", "(", "self", ",", "locator", ")", ":", "self", ".", "_info", "(", "\"Verifying element '%s' focus is set\"", "%", "locator", ")", "self", ".", "_check_element_focus", "(", "True", ",", "locator", ")" ]
Verifies the element identified by `locator` has focus. | *Argument* | *Description* | *Example* | | locator | Selenium 2 element locator | id=my_id |
[ "Verifies", "the", "element", "identified", "by", "locator", "has", "focus", "." ]
python
train
google/grr
api_client/python/grr_api_client/root.py
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/api_client/python/grr_api_client/root.py#L144-L159
def CreateGrrUser(self, username=None, user_type=None, password=None): """Creates a new GRR user of a given type with a given username/password.""" if not username: raise ValueError("Username can't be empty.") args = user_management_pb2.ApiCreateGrrUserArgs(username=username) if user_type is not None: args.user_type = user_type if password is not None: args.password = password data = self._context.SendRequest("CreateGrrUser", args) return GrrUser(data=data, context=self._context)
[ "def", "CreateGrrUser", "(", "self", ",", "username", "=", "None", ",", "user_type", "=", "None", ",", "password", "=", "None", ")", ":", "if", "not", "username", ":", "raise", "ValueError", "(", "\"Username can't be empty.\"", ")", "args", "=", "user_management_pb2", ".", "ApiCreateGrrUserArgs", "(", "username", "=", "username", ")", "if", "user_type", "is", "not", "None", ":", "args", ".", "user_type", "=", "user_type", "if", "password", "is", "not", "None", ":", "args", ".", "password", "=", "password", "data", "=", "self", ".", "_context", ".", "SendRequest", "(", "\"CreateGrrUser\"", ",", "args", ")", "return", "GrrUser", "(", "data", "=", "data", ",", "context", "=", "self", ".", "_context", ")" ]
Creates a new GRR user of a given type with a given username/password.
[ "Creates", "a", "new", "GRR", "user", "of", "a", "given", "type", "with", "a", "given", "username", "/", "password", "." ]
python
train
Stewori/pytypes
pytypes/type_util.py
https://github.com/Stewori/pytypes/blob/b814d38709e84c0e0825caf8b721c20eb5a8ab3b/pytypes/type_util.py#L1104-L1145
def _issubclass_Mapping_covariant(subclass, superclass, bound_Generic, bound_typevars, bound_typevars_readonly, follow_fwd_refs, _recursion_check): """Helper for _issubclass, a.k.a pytypes.issubtype. This subclass-check treats Mapping-values as covariant. """ if is_Generic(subclass): if subclass.__origin__ is None or not issubclass(subclass.__origin__, Mapping): return _issubclass_Generic(subclass, superclass, bound_Generic, bound_typevars, bound_typevars_readonly, follow_fwd_refs, _recursion_check) if superclass.__args__ is None: if not pytypes.check_unbound_types: raise TypeError("Attempted to check unbound mapping type(superclass): "+ str(superclass)) if pytypes.strict_unknown_check: # Nothing is subtype of unknown type return False super_args = (Any, Any) else: super_args = superclass.__args__ if subclass.__args__ is None: if not pytypes.check_unbound_types: raise TypeError("Attempted to check unbound mapping type(subclass): "+ str(subclass)) if pytypes.strict_unknown_check: # Nothing can subclass unknown type # For value type it would be okay if superclass had Any as value type, # as unknown type is subtype of Any. However, since key type is invariant # and also unknown, it cannot pass. return False sub_args = (Any, Any) else: sub_args = subclass.__args__ if not _issubclass(sub_args[0], super_args[0], bound_Generic, bound_typevars, bound_typevars_readonly, follow_fwd_refs, _recursion_check): return False if not _issubclass(sub_args[1], super_args[1], bound_Generic, bound_typevars, bound_typevars_readonly, follow_fwd_refs, _recursion_check): return False return True return issubclass(subclass, superclass)
[ "def", "_issubclass_Mapping_covariant", "(", "subclass", ",", "superclass", ",", "bound_Generic", ",", "bound_typevars", ",", "bound_typevars_readonly", ",", "follow_fwd_refs", ",", "_recursion_check", ")", ":", "if", "is_Generic", "(", "subclass", ")", ":", "if", "subclass", ".", "__origin__", "is", "None", "or", "not", "issubclass", "(", "subclass", ".", "__origin__", ",", "Mapping", ")", ":", "return", "_issubclass_Generic", "(", "subclass", ",", "superclass", ",", "bound_Generic", ",", "bound_typevars", ",", "bound_typevars_readonly", ",", "follow_fwd_refs", ",", "_recursion_check", ")", "if", "superclass", ".", "__args__", "is", "None", ":", "if", "not", "pytypes", ".", "check_unbound_types", ":", "raise", "TypeError", "(", "\"Attempted to check unbound mapping type(superclass): \"", "+", "str", "(", "superclass", ")", ")", "if", "pytypes", ".", "strict_unknown_check", ":", "# Nothing is subtype of unknown type", "return", "False", "super_args", "=", "(", "Any", ",", "Any", ")", "else", ":", "super_args", "=", "superclass", ".", "__args__", "if", "subclass", ".", "__args__", "is", "None", ":", "if", "not", "pytypes", ".", "check_unbound_types", ":", "raise", "TypeError", "(", "\"Attempted to check unbound mapping type(subclass): \"", "+", "str", "(", "subclass", ")", ")", "if", "pytypes", ".", "strict_unknown_check", ":", "# Nothing can subclass unknown type", "# For value type it would be okay if superclass had Any as value type,", "# as unknown type is subtype of Any. However, since key type is invariant", "# and also unknown, it cannot pass.", "return", "False", "sub_args", "=", "(", "Any", ",", "Any", ")", "else", ":", "sub_args", "=", "subclass", ".", "__args__", "if", "not", "_issubclass", "(", "sub_args", "[", "0", "]", ",", "super_args", "[", "0", "]", ",", "bound_Generic", ",", "bound_typevars", ",", "bound_typevars_readonly", ",", "follow_fwd_refs", ",", "_recursion_check", ")", ":", "return", "False", "if", "not", "_issubclass", "(", "sub_args", "[", "1", "]", ",", "super_args", "[", "1", "]", ",", "bound_Generic", ",", "bound_typevars", ",", "bound_typevars_readonly", ",", "follow_fwd_refs", ",", "_recursion_check", ")", ":", "return", "False", "return", "True", "return", "issubclass", "(", "subclass", ",", "superclass", ")" ]
Helper for _issubclass, a.k.a pytypes.issubtype. This subclass-check treats Mapping-values as covariant.
[ "Helper", "for", "_issubclass", "a", ".", "k", ".", "a", "pytypes", ".", "issubtype", ".", "This", "subclass", "-", "check", "treats", "Mapping", "-", "values", "as", "covariant", "." ]
python
train
sassoo/goldman
goldman/queryparams/page.py
https://github.com/sassoo/goldman/blob/b72540c9ad06b5c68aadb1b4fa8cb0b716260bf2/goldman/queryparams/page.py#L100-L110
def prev(self): """ Generate query parameters for the prev page """ if self.total: if self.offset - self.limit - self.limit < 0: return self.first else: offset = self.offset - self.limit return {'page[offset]': offset, 'page[limit]': self.limit} else: return None
[ "def", "prev", "(", "self", ")", ":", "if", "self", ".", "total", ":", "if", "self", ".", "offset", "-", "self", ".", "limit", "-", "self", ".", "limit", "<", "0", ":", "return", "self", ".", "first", "else", ":", "offset", "=", "self", ".", "offset", "-", "self", ".", "limit", "return", "{", "'page[offset]'", ":", "offset", ",", "'page[limit]'", ":", "self", ".", "limit", "}", "else", ":", "return", "None" ]
Generate query parameters for the prev page
[ "Generate", "query", "parameters", "for", "the", "prev", "page" ]
python
train
basho/riak-python-client
riak/riak_object.py
https://github.com/basho/riak-python-client/blob/91de13a16607cdf553d1a194e762734e3bec4231/riak/riak_object.py#L319-L348
def delete(self, r=None, w=None, dw=None, pr=None, pw=None, timeout=None): """ Delete this object from Riak. :param r: R-value, wait for this many partitions to read object before performing the put :type r: integer :param w: W-value, wait for this many partitions to respond before returning to client. :type w: integer :param dw: DW-value, wait for this many partitions to confirm the write before returning to client. :type dw: integer :param pr: PR-value, require this many primary partitions to be available before performing the read that precedes the put :type pr: integer :param pw: PW-value, require this many primary partitions to be available before performing the put :type pw: integer :param timeout: a timeout value in milliseconds :type timeout: int :rtype: :class:`RiakObject` """ self.client.delete(self, r=r, w=w, dw=dw, pr=pr, pw=pw, timeout=timeout) self.clear() return self
[ "def", "delete", "(", "self", ",", "r", "=", "None", ",", "w", "=", "None", ",", "dw", "=", "None", ",", "pr", "=", "None", ",", "pw", "=", "None", ",", "timeout", "=", "None", ")", ":", "self", ".", "client", ".", "delete", "(", "self", ",", "r", "=", "r", ",", "w", "=", "w", ",", "dw", "=", "dw", ",", "pr", "=", "pr", ",", "pw", "=", "pw", ",", "timeout", "=", "timeout", ")", "self", ".", "clear", "(", ")", "return", "self" ]
Delete this object from Riak. :param r: R-value, wait for this many partitions to read object before performing the put :type r: integer :param w: W-value, wait for this many partitions to respond before returning to client. :type w: integer :param dw: DW-value, wait for this many partitions to confirm the write before returning to client. :type dw: integer :param pr: PR-value, require this many primary partitions to be available before performing the read that precedes the put :type pr: integer :param pw: PW-value, require this many primary partitions to be available before performing the put :type pw: integer :param timeout: a timeout value in milliseconds :type timeout: int :rtype: :class:`RiakObject`
[ "Delete", "this", "object", "from", "Riak", "." ]
python
train
mongodb/mongo-python-driver
pymongo/message.py
https://github.com/mongodb/mongo-python-driver/blob/c29c21449e3aae74154207058cf85fd94018d4cd/pymongo/message.py#L544-L553
def _insert_uncompressed(collection_name, docs, check_keys, safe, last_error_args, continue_on_error, opts): """Internal insert message helper.""" op_insert, max_bson_size = _insert( collection_name, docs, check_keys, continue_on_error, opts) rid, msg = __pack_message(2002, op_insert) if safe: rid, gle, _ = __last_error(collection_name, last_error_args) return rid, msg + gle, max_bson_size return rid, msg, max_bson_size
[ "def", "_insert_uncompressed", "(", "collection_name", ",", "docs", ",", "check_keys", ",", "safe", ",", "last_error_args", ",", "continue_on_error", ",", "opts", ")", ":", "op_insert", ",", "max_bson_size", "=", "_insert", "(", "collection_name", ",", "docs", ",", "check_keys", ",", "continue_on_error", ",", "opts", ")", "rid", ",", "msg", "=", "__pack_message", "(", "2002", ",", "op_insert", ")", "if", "safe", ":", "rid", ",", "gle", ",", "_", "=", "__last_error", "(", "collection_name", ",", "last_error_args", ")", "return", "rid", ",", "msg", "+", "gle", ",", "max_bson_size", "return", "rid", ",", "msg", ",", "max_bson_size" ]
Internal insert message helper.
[ "Internal", "insert", "message", "helper", "." ]
python
train
DataBiosphere/toil
src/toil/jobStores/googleJobStore.py
https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/jobStores/googleJobStore.py#L450-L476
def _uploadStream(self, fileName, update=False, encrypt=True): """ Yields a context manager that can be used to write to the bucket with a stream. See :class:`~toil.jobStores.utils.WritablePipe` for an example. Will throw assertion error if the file shouldn't be updated and yet exists. :param fileName: name of file to be inserted into bucket :type fileName: str :param update: whether or not the file is to be updated :type update: bool :param encrypt: whether or not the file is encrypted :type encrypt: bool :return: an instance of WritablePipe. :rtype: :class:`~toil.jobStores.utils.writablePipe` """ blob = self.bucket.blob(bytes(fileName), encryption_key=self.sseKey if encrypt else None) class UploadPipe(WritablePipe): def readFrom(self, readable): if not update: assert not blob.exists() blob.upload_from_file(readable) with UploadPipe() as writable: yield writable
[ "def", "_uploadStream", "(", "self", ",", "fileName", ",", "update", "=", "False", ",", "encrypt", "=", "True", ")", ":", "blob", "=", "self", ".", "bucket", ".", "blob", "(", "bytes", "(", "fileName", ")", ",", "encryption_key", "=", "self", ".", "sseKey", "if", "encrypt", "else", "None", ")", "class", "UploadPipe", "(", "WritablePipe", ")", ":", "def", "readFrom", "(", "self", ",", "readable", ")", ":", "if", "not", "update", ":", "assert", "not", "blob", ".", "exists", "(", ")", "blob", ".", "upload_from_file", "(", "readable", ")", "with", "UploadPipe", "(", ")", "as", "writable", ":", "yield", "writable" ]
Yields a context manager that can be used to write to the bucket with a stream. See :class:`~toil.jobStores.utils.WritablePipe` for an example. Will throw assertion error if the file shouldn't be updated and yet exists. :param fileName: name of file to be inserted into bucket :type fileName: str :param update: whether or not the file is to be updated :type update: bool :param encrypt: whether or not the file is encrypted :type encrypt: bool :return: an instance of WritablePipe. :rtype: :class:`~toil.jobStores.utils.writablePipe`
[ "Yields", "a", "context", "manager", "that", "can", "be", "used", "to", "write", "to", "the", "bucket", "with", "a", "stream", ".", "See", ":", "class", ":", "~toil", ".", "jobStores", ".", "utils", ".", "WritablePipe", "for", "an", "example", "." ]
python
train
rstoneback/pysat
pysat/instruments/netcdf_pandas.py
https://github.com/rstoneback/pysat/blob/4ae1afd80e15e4449397d39dce8c3e969c32c422/pysat/instruments/netcdf_pandas.py#L49-L104
def load(fnames, tag=None, sat_id=None, **kwargs): """Loads data using pysat.utils.load_netcdf4 . This routine is called as needed by pysat. It is not intended for direct user interaction. Parameters ---------- fnames : array-like iterable of filename strings, full path, to data files to be loaded. This input is nominally provided by pysat itself. tag : string tag name used to identify particular data set to be loaded. This input is nominally provided by pysat itself. sat_id : string Satellite ID used to identify particular data set to be loaded. This input is nominally provided by pysat itself. **kwargs : extra keywords Passthrough for additional keyword arguments specified when instantiating an Instrument object. These additional keywords are passed through to this routine by pysat. Returns ------- data, metadata Data and Metadata are formatted for pysat. Data is a pandas DataFrame while metadata is a pysat.Meta instance. Note ---- Any additional keyword arguments passed to pysat.Instrument upon instantiation are passed along to this routine and through to the load_netcdf4 call. Examples -------- :: inst = pysat.Instrument('sport', 'ivm') inst.load(2019,1) # create quick Instrument object for a new, random netCDF4 file # define filename template string to identify files # this is normally done by instrument code, but in this case # there is no built in pysat instrument support # presumes files are named default_2019-01-01.NC format_str = 'default_{year:04d}-{month:02d}-{day:02d}.NC' inst = pysat.Instrument('netcdf', 'pandas', custom_kwarg='test' data_path='./', format_str=format_str) inst.load(2019,1) """ return pysat.utils.load_netcdf4(fnames, **kwargs)
[ "def", "load", "(", "fnames", ",", "tag", "=", "None", ",", "sat_id", "=", "None", ",", "*", "*", "kwargs", ")", ":", "return", "pysat", ".", "utils", ".", "load_netcdf4", "(", "fnames", ",", "*", "*", "kwargs", ")" ]
Loads data using pysat.utils.load_netcdf4 . This routine is called as needed by pysat. It is not intended for direct user interaction. Parameters ---------- fnames : array-like iterable of filename strings, full path, to data files to be loaded. This input is nominally provided by pysat itself. tag : string tag name used to identify particular data set to be loaded. This input is nominally provided by pysat itself. sat_id : string Satellite ID used to identify particular data set to be loaded. This input is nominally provided by pysat itself. **kwargs : extra keywords Passthrough for additional keyword arguments specified when instantiating an Instrument object. These additional keywords are passed through to this routine by pysat. Returns ------- data, metadata Data and Metadata are formatted for pysat. Data is a pandas DataFrame while metadata is a pysat.Meta instance. Note ---- Any additional keyword arguments passed to pysat.Instrument upon instantiation are passed along to this routine and through to the load_netcdf4 call. Examples -------- :: inst = pysat.Instrument('sport', 'ivm') inst.load(2019,1) # create quick Instrument object for a new, random netCDF4 file # define filename template string to identify files # this is normally done by instrument code, but in this case # there is no built in pysat instrument support # presumes files are named default_2019-01-01.NC format_str = 'default_{year:04d}-{month:02d}-{day:02d}.NC' inst = pysat.Instrument('netcdf', 'pandas', custom_kwarg='test' data_path='./', format_str=format_str) inst.load(2019,1)
[ "Loads", "data", "using", "pysat", ".", "utils", ".", "load_netcdf4", "." ]
python
train
saltstack/salt
salt/modules/poudriere.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/poudriere.py#L175-L204
def create_jail(name, arch, version="9.0-RELEASE"): ''' Creates a new poudriere jail if one does not exist *NOTE* creating a new jail will take some time the master is not hanging CLI Example: .. code-block:: bash salt '*' poudriere.create_jail 90amd64 amd64 ''' # Config file must be on system to create a poudriere jail _check_config_exists() # Check if the jail is there if is_jail(name): return '{0} already exists'.format(name) cmd = 'poudriere jails -c -j {0} -v {1} -a {2}'.format(name, version, arch) __salt__['cmd.run'](cmd) # Make jail pkgng aware make_pkgng_aware(name) # Make sure the jail was created if is_jail(name): return 'Created jail {0}'.format(name) return 'Issue creating jail {0}'.format(name)
[ "def", "create_jail", "(", "name", ",", "arch", ",", "version", "=", "\"9.0-RELEASE\"", ")", ":", "# Config file must be on system to create a poudriere jail", "_check_config_exists", "(", ")", "# Check if the jail is there", "if", "is_jail", "(", "name", ")", ":", "return", "'{0} already exists'", ".", "format", "(", "name", ")", "cmd", "=", "'poudriere jails -c -j {0} -v {1} -a {2}'", ".", "format", "(", "name", ",", "version", ",", "arch", ")", "__salt__", "[", "'cmd.run'", "]", "(", "cmd", ")", "# Make jail pkgng aware", "make_pkgng_aware", "(", "name", ")", "# Make sure the jail was created", "if", "is_jail", "(", "name", ")", ":", "return", "'Created jail {0}'", ".", "format", "(", "name", ")", "return", "'Issue creating jail {0}'", ".", "format", "(", "name", ")" ]
Creates a new poudriere jail if one does not exist *NOTE* creating a new jail will take some time the master is not hanging CLI Example: .. code-block:: bash salt '*' poudriere.create_jail 90amd64 amd64
[ "Creates", "a", "new", "poudriere", "jail", "if", "one", "does", "not", "exist" ]
python
train
nikhilkumarsingh/gnewsclient
gnewsclient/gnewsclient.py
https://github.com/nikhilkumarsingh/gnewsclient/blob/65422f1dee9408f1b51ae6a2ee08ae478432e1d5/gnewsclient/gnewsclient.py#L24-L33
def get_config(self): """ function to get current configuration """ config = { 'location': self.location, 'language': self.language, 'topic': self.topic, } return config
[ "def", "get_config", "(", "self", ")", ":", "config", "=", "{", "'location'", ":", "self", ".", "location", ",", "'language'", ":", "self", ".", "language", ",", "'topic'", ":", "self", ".", "topic", ",", "}", "return", "config" ]
function to get current configuration
[ "function", "to", "get", "current", "configuration" ]
python
train
nteract/papermill
papermill/parameterize.py
https://github.com/nteract/papermill/blob/7423a303f3fa22ec6d03edf5fd9700d659b5a6fa/papermill/parameterize.py#L55-L106
def parameterize_notebook(nb, parameters, report_mode=False): """Assigned parameters into the appropriate place in the input notebook Parameters ---------- nb : NotebookNode Executable notebook object parameters : dict Arbitrary keyword arguments to pass as notebook parameters report_mode : bool, optional Flag to set report mode """ # Load from a file if 'parameters' is a string. if isinstance(parameters, six.string_types): parameters = read_yaml_file(parameters) # Copy the nb object to avoid polluting the input nb = copy.deepcopy(nb) kernel_name = nb.metadata.kernelspec.name language = nb.metadata.kernelspec.language # Generate parameter content based on the kernel_name param_content = translate_parameters(kernel_name, language, parameters) newcell = nbformat.v4.new_code_cell(source=param_content) newcell.metadata['tags'] = ['injected-parameters'] if report_mode: newcell.metadata['jupyter'] = newcell.get('jupyter', {}) newcell.metadata['jupyter']['source_hidden'] = True param_cell_index = _find_first_tagged_cell_index(nb, 'parameters') injected_cell_index = _find_first_tagged_cell_index(nb, 'injected-parameters') if injected_cell_index >= 0: # Replace the injected cell with a new version before = nb.cells[:injected_cell_index] after = nb.cells[injected_cell_index + 1 :] elif param_cell_index >= 0: # Add an injected cell after the parameter cell before = nb.cells[: param_cell_index + 1] after = nb.cells[param_cell_index + 1 :] else: # Inject to the top of the notebook logger.warning("Input notebook does not contain a cell with tag 'parameters'") before = [] after = nb.cells nb.cells = before + [newcell] + after nb.metadata.papermill['parameters'] = parameters return nb
[ "def", "parameterize_notebook", "(", "nb", ",", "parameters", ",", "report_mode", "=", "False", ")", ":", "# Load from a file if 'parameters' is a string.", "if", "isinstance", "(", "parameters", ",", "six", ".", "string_types", ")", ":", "parameters", "=", "read_yaml_file", "(", "parameters", ")", "# Copy the nb object to avoid polluting the input", "nb", "=", "copy", ".", "deepcopy", "(", "nb", ")", "kernel_name", "=", "nb", ".", "metadata", ".", "kernelspec", ".", "name", "language", "=", "nb", ".", "metadata", ".", "kernelspec", ".", "language", "# Generate parameter content based on the kernel_name", "param_content", "=", "translate_parameters", "(", "kernel_name", ",", "language", ",", "parameters", ")", "newcell", "=", "nbformat", ".", "v4", ".", "new_code_cell", "(", "source", "=", "param_content", ")", "newcell", ".", "metadata", "[", "'tags'", "]", "=", "[", "'injected-parameters'", "]", "if", "report_mode", ":", "newcell", ".", "metadata", "[", "'jupyter'", "]", "=", "newcell", ".", "get", "(", "'jupyter'", ",", "{", "}", ")", "newcell", ".", "metadata", "[", "'jupyter'", "]", "[", "'source_hidden'", "]", "=", "True", "param_cell_index", "=", "_find_first_tagged_cell_index", "(", "nb", ",", "'parameters'", ")", "injected_cell_index", "=", "_find_first_tagged_cell_index", "(", "nb", ",", "'injected-parameters'", ")", "if", "injected_cell_index", ">=", "0", ":", "# Replace the injected cell with a new version", "before", "=", "nb", ".", "cells", "[", ":", "injected_cell_index", "]", "after", "=", "nb", ".", "cells", "[", "injected_cell_index", "+", "1", ":", "]", "elif", "param_cell_index", ">=", "0", ":", "# Add an injected cell after the parameter cell", "before", "=", "nb", ".", "cells", "[", ":", "param_cell_index", "+", "1", "]", "after", "=", "nb", ".", "cells", "[", "param_cell_index", "+", "1", ":", "]", "else", ":", "# Inject to the top of the notebook", "logger", ".", "warning", "(", "\"Input notebook does not contain a cell with tag 'parameters'\"", ")", "before", "=", "[", "]", "after", "=", "nb", ".", "cells", "nb", ".", "cells", "=", "before", "+", "[", "newcell", "]", "+", "after", "nb", ".", "metadata", ".", "papermill", "[", "'parameters'", "]", "=", "parameters", "return", "nb" ]
Assigned parameters into the appropriate place in the input notebook Parameters ---------- nb : NotebookNode Executable notebook object parameters : dict Arbitrary keyword arguments to pass as notebook parameters report_mode : bool, optional Flag to set report mode
[ "Assigned", "parameters", "into", "the", "appropriate", "place", "in", "the", "input", "notebook" ]
python
train
dpkp/kafka-python
kafka/coordinator/base.py
https://github.com/dpkp/kafka-python/blob/f6a8a38937688ea2cc5dc13d3d1039493be5c9b5/kafka/coordinator/base.py#L297-L321
def poll_heartbeat(self): """ Check the status of the heartbeat thread (if it is active) and indicate the liveness of the client. This must be called periodically after joining with :meth:`.ensure_active_group` to ensure that the member stays in the group. If an interval of time longer than the provided rebalance timeout (max_poll_interval_ms) expires without calling this method, then the client will proactively leave the group. Raises: RuntimeError for unexpected errors raised from the heartbeat thread """ with self._lock: if self._heartbeat_thread is not None: if self._heartbeat_thread.failed: # set the heartbeat thread to None and raise an exception. # If the user catches it, the next call to ensure_active_group() # will spawn a new heartbeat thread. cause = self._heartbeat_thread.failed self._heartbeat_thread = None raise cause # pylint: disable-msg=raising-bad-type # Awake the heartbeat thread if needed if self.heartbeat.should_heartbeat(): self._lock.notify() self.heartbeat.poll()
[ "def", "poll_heartbeat", "(", "self", ")", ":", "with", "self", ".", "_lock", ":", "if", "self", ".", "_heartbeat_thread", "is", "not", "None", ":", "if", "self", ".", "_heartbeat_thread", ".", "failed", ":", "# set the heartbeat thread to None and raise an exception.", "# If the user catches it, the next call to ensure_active_group()", "# will spawn a new heartbeat thread.", "cause", "=", "self", ".", "_heartbeat_thread", ".", "failed", "self", ".", "_heartbeat_thread", "=", "None", "raise", "cause", "# pylint: disable-msg=raising-bad-type", "# Awake the heartbeat thread if needed", "if", "self", ".", "heartbeat", ".", "should_heartbeat", "(", ")", ":", "self", ".", "_lock", ".", "notify", "(", ")", "self", ".", "heartbeat", ".", "poll", "(", ")" ]
Check the status of the heartbeat thread (if it is active) and indicate the liveness of the client. This must be called periodically after joining with :meth:`.ensure_active_group` to ensure that the member stays in the group. If an interval of time longer than the provided rebalance timeout (max_poll_interval_ms) expires without calling this method, then the client will proactively leave the group. Raises: RuntimeError for unexpected errors raised from the heartbeat thread
[ "Check", "the", "status", "of", "the", "heartbeat", "thread", "(", "if", "it", "is", "active", ")", "and", "indicate", "the", "liveness", "of", "the", "client", ".", "This", "must", "be", "called", "periodically", "after", "joining", "with", ":", "meth", ":", ".", "ensure_active_group", "to", "ensure", "that", "the", "member", "stays", "in", "the", "group", ".", "If", "an", "interval", "of", "time", "longer", "than", "the", "provided", "rebalance", "timeout", "(", "max_poll_interval_ms", ")", "expires", "without", "calling", "this", "method", "then", "the", "client", "will", "proactively", "leave", "the", "group", "." ]
python
train
ojarva/python-sshpubkeys
sshpubkeys/keys.py
https://github.com/ojarva/python-sshpubkeys/blob/86dc1ab27ce82dcc091ce127416cc3ee219e9bec/sshpubkeys/keys.py#L163-L166
def hash_sha512(self): """Calculates sha512 fingerprint.""" fp_plain = hashlib.sha512(self._decoded_key).digest() return (b"SHA512:" + base64.b64encode(fp_plain).replace(b"=", b"")).decode("utf-8")
[ "def", "hash_sha512", "(", "self", ")", ":", "fp_plain", "=", "hashlib", ".", "sha512", "(", "self", ".", "_decoded_key", ")", ".", "digest", "(", ")", "return", "(", "b\"SHA512:\"", "+", "base64", ".", "b64encode", "(", "fp_plain", ")", ".", "replace", "(", "b\"=\"", ",", "b\"\"", ")", ")", ".", "decode", "(", "\"utf-8\"", ")" ]
Calculates sha512 fingerprint.
[ "Calculates", "sha512", "fingerprint", "." ]
python
test
brainiak/brainiak
brainiak/fcma/voxelselector.py
https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/fcma/voxelselector.py#L284-L329
def _correlation_computation(self, task): """Use BLAS API to do correlation computation (matrix multiplication). Parameters ---------- task: tuple (start_voxel_id, num_processed_voxels) depicting the voxels assigned to compute Returns ------- corr: 3D array in shape [num_processed_voxels, num_epochs, num_voxels] the correlation values of all subjects in all epochs for the assigned values, in row-major corr[i, e, s + j] = corr[j, e, s + i] """ time1 = time.time() s = task[0] nEpochs = len(self.raw_data) logger.debug( 'start to compute the correlation: #epochs: %d, ' '#processed voxels: %d, #total voxels to compute against: %d' % (nEpochs, task[1], self.num_voxels2) ) corr = np.zeros((task[1], nEpochs, self.num_voxels2), np.float32, order='C') count = 0 for i in range(len(self.raw_data)): mat = self.raw_data[i] mat2 = self.raw_data2[i] if self.raw_data2 is not None else mat no_trans = 'N' trans = 'T' blas.compute_self_corr_for_voxel_sel(no_trans, trans, self.num_voxels2, task[1], mat.shape[0], 1.0, mat2, self.num_voxels2, s, mat, self.num_voxels, 0.0, corr, self.num_voxels2 * nEpochs, count) count += 1 time2 = time.time() logger.debug( 'correlation computation for %d voxels, takes %.2f s' % (task[1], (time2 - time1)) ) return corr
[ "def", "_correlation_computation", "(", "self", ",", "task", ")", ":", "time1", "=", "time", ".", "time", "(", ")", "s", "=", "task", "[", "0", "]", "nEpochs", "=", "len", "(", "self", ".", "raw_data", ")", "logger", ".", "debug", "(", "'start to compute the correlation: #epochs: %d, '", "'#processed voxels: %d, #total voxels to compute against: %d'", "%", "(", "nEpochs", ",", "task", "[", "1", "]", ",", "self", ".", "num_voxels2", ")", ")", "corr", "=", "np", ".", "zeros", "(", "(", "task", "[", "1", "]", ",", "nEpochs", ",", "self", ".", "num_voxels2", ")", ",", "np", ".", "float32", ",", "order", "=", "'C'", ")", "count", "=", "0", "for", "i", "in", "range", "(", "len", "(", "self", ".", "raw_data", ")", ")", ":", "mat", "=", "self", ".", "raw_data", "[", "i", "]", "mat2", "=", "self", ".", "raw_data2", "[", "i", "]", "if", "self", ".", "raw_data2", "is", "not", "None", "else", "mat", "no_trans", "=", "'N'", "trans", "=", "'T'", "blas", ".", "compute_self_corr_for_voxel_sel", "(", "no_trans", ",", "trans", ",", "self", ".", "num_voxels2", ",", "task", "[", "1", "]", ",", "mat", ".", "shape", "[", "0", "]", ",", "1.0", ",", "mat2", ",", "self", ".", "num_voxels2", ",", "s", ",", "mat", ",", "self", ".", "num_voxels", ",", "0.0", ",", "corr", ",", "self", ".", "num_voxels2", "*", "nEpochs", ",", "count", ")", "count", "+=", "1", "time2", "=", "time", ".", "time", "(", ")", "logger", ".", "debug", "(", "'correlation computation for %d voxels, takes %.2f s'", "%", "(", "task", "[", "1", "]", ",", "(", "time2", "-", "time1", ")", ")", ")", "return", "corr" ]
Use BLAS API to do correlation computation (matrix multiplication). Parameters ---------- task: tuple (start_voxel_id, num_processed_voxels) depicting the voxels assigned to compute Returns ------- corr: 3D array in shape [num_processed_voxels, num_epochs, num_voxels] the correlation values of all subjects in all epochs for the assigned values, in row-major corr[i, e, s + j] = corr[j, e, s + i]
[ "Use", "BLAS", "API", "to", "do", "correlation", "computation", "(", "matrix", "multiplication", ")", "." ]
python
train
janpipek/physt
physt/binnings.py
https://github.com/janpipek/physt/blob/6dd441b073514e7728235f50b2352d56aacf38d4/physt/binnings.py#L972-L1007
def ideal_bin_count(data, method: str = "default") -> int: """A theoretically ideal bin count. Parameters ---------- data: array_likes Data to work on. Most methods don't use this. method: str Name of the method to apply, available values: - default (~sturges) - sqrt - sturges - doane - rice See https://en.wikipedia.org/wiki/Histogram for the description """ n = data.size if n < 1: return 1 if method == "default": if n <= 32: return 7 else: return ideal_bin_count(data, "sturges") elif method == "sqrt": return int(np.ceil(np.sqrt(n))) elif method == "sturges": return int(np.ceil(np.log2(n)) + 1) elif method == "doane": if n < 3: return 1 from scipy.stats import skew sigma = np.sqrt(6 * (n-2) / (n + 1) * (n + 3)) return int(np.ceil(1 + np.log2(n) + np.log2(1 + np.abs(skew(data)) / sigma))) elif method == "rice": return int(np.ceil(2 * np.power(n, 1 / 3)))
[ "def", "ideal_bin_count", "(", "data", ",", "method", ":", "str", "=", "\"default\"", ")", "->", "int", ":", "n", "=", "data", ".", "size", "if", "n", "<", "1", ":", "return", "1", "if", "method", "==", "\"default\"", ":", "if", "n", "<=", "32", ":", "return", "7", "else", ":", "return", "ideal_bin_count", "(", "data", ",", "\"sturges\"", ")", "elif", "method", "==", "\"sqrt\"", ":", "return", "int", "(", "np", ".", "ceil", "(", "np", ".", "sqrt", "(", "n", ")", ")", ")", "elif", "method", "==", "\"sturges\"", ":", "return", "int", "(", "np", ".", "ceil", "(", "np", ".", "log2", "(", "n", ")", ")", "+", "1", ")", "elif", "method", "==", "\"doane\"", ":", "if", "n", "<", "3", ":", "return", "1", "from", "scipy", ".", "stats", "import", "skew", "sigma", "=", "np", ".", "sqrt", "(", "6", "*", "(", "n", "-", "2", ")", "/", "(", "n", "+", "1", ")", "*", "(", "n", "+", "3", ")", ")", "return", "int", "(", "np", ".", "ceil", "(", "1", "+", "np", ".", "log2", "(", "n", ")", "+", "np", ".", "log2", "(", "1", "+", "np", ".", "abs", "(", "skew", "(", "data", ")", ")", "/", "sigma", ")", ")", ")", "elif", "method", "==", "\"rice\"", ":", "return", "int", "(", "np", ".", "ceil", "(", "2", "*", "np", ".", "power", "(", "n", ",", "1", "/", "3", ")", ")", ")" ]
A theoretically ideal bin count. Parameters ---------- data: array_likes Data to work on. Most methods don't use this. method: str Name of the method to apply, available values: - default (~sturges) - sqrt - sturges - doane - rice See https://en.wikipedia.org/wiki/Histogram for the description
[ "A", "theoretically", "ideal", "bin", "count", "." ]
python
train
bukun/TorCMS
torcms/handlers/filter_handler.py
https://github.com/bukun/TorCMS/blob/6567c7fe2604a1d646d4570c017840958630ed2b/torcms/handlers/filter_handler.py#L110-L159
def echo_html(self, url_str): ''' Show the HTML ''' logger.info('info echo html: {0}'.format(url_str)) condition = self.gen_redis_kw() url_arr = self.parse_url(url_str) sig = url_arr[0] num = (len(url_arr) - 2) // 2 catinfo = MCategory.get_by_uid(sig) if catinfo.pid == '0000': condition['def_cat_pid'] = sig else: condition['def_cat_uid'] = sig fenye_num = 1 for idx in range(num): ckey = url_arr[idx * 2 + 2] tval = url_arr[idx * 2 + 3] if tval == '0': continue if ckey == 'fenye': # 分页参数。单独处理。 fenye_num = int(tval) continue else: cval = tval ckey = 'tag_' + ckey condition[ckey] = cval if url_arr[1] == 'con': infos = MPost.query_list_pager(condition, fenye_num, kind=catinfo.kind) self.echo_html_list_str(sig, infos) elif url_arr[1] == 'num': allinfos = MPost.query_under_condition(condition, kind=catinfo.kind) self.write( tornado.escape.xhtml_unescape( echo_html_fenye_str( allinfos.count(), fenye_num ) ) )
[ "def", "echo_html", "(", "self", ",", "url_str", ")", ":", "logger", ".", "info", "(", "'info echo html: {0}'", ".", "format", "(", "url_str", ")", ")", "condition", "=", "self", ".", "gen_redis_kw", "(", ")", "url_arr", "=", "self", ".", "parse_url", "(", "url_str", ")", "sig", "=", "url_arr", "[", "0", "]", "num", "=", "(", "len", "(", "url_arr", ")", "-", "2", ")", "//", "2", "catinfo", "=", "MCategory", ".", "get_by_uid", "(", "sig", ")", "if", "catinfo", ".", "pid", "==", "'0000'", ":", "condition", "[", "'def_cat_pid'", "]", "=", "sig", "else", ":", "condition", "[", "'def_cat_uid'", "]", "=", "sig", "fenye_num", "=", "1", "for", "idx", "in", "range", "(", "num", ")", ":", "ckey", "=", "url_arr", "[", "idx", "*", "2", "+", "2", "]", "tval", "=", "url_arr", "[", "idx", "*", "2", "+", "3", "]", "if", "tval", "==", "'0'", ":", "continue", "if", "ckey", "==", "'fenye'", ":", "# 分页参数。单独处理。", "fenye_num", "=", "int", "(", "tval", ")", "continue", "else", ":", "cval", "=", "tval", "ckey", "=", "'tag_'", "+", "ckey", "condition", "[", "ckey", "]", "=", "cval", "if", "url_arr", "[", "1", "]", "==", "'con'", ":", "infos", "=", "MPost", ".", "query_list_pager", "(", "condition", ",", "fenye_num", ",", "kind", "=", "catinfo", ".", "kind", ")", "self", ".", "echo_html_list_str", "(", "sig", ",", "infos", ")", "elif", "url_arr", "[", "1", "]", "==", "'num'", ":", "allinfos", "=", "MPost", ".", "query_under_condition", "(", "condition", ",", "kind", "=", "catinfo", ".", "kind", ")", "self", ".", "write", "(", "tornado", ".", "escape", ".", "xhtml_unescape", "(", "echo_html_fenye_str", "(", "allinfos", ".", "count", "(", ")", ",", "fenye_num", ")", ")", ")" ]
Show the HTML
[ "Show", "the", "HTML" ]
python
train
saltstack/salt
salt/client/ssh/ssh_py_shim.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/client/ssh/ssh_py_shim.py#L159-L172
def unpack_thin(thin_path): ''' Unpack the Salt thin archive. ''' tfile = tarfile.TarFile.gzopen(thin_path) old_umask = os.umask(0o077) # pylint: disable=blacklisted-function tfile.extractall(path=OPTIONS.saltdir) tfile.close() os.umask(old_umask) # pylint: disable=blacklisted-function try: os.unlink(thin_path) except OSError: pass reset_time(OPTIONS.saltdir)
[ "def", "unpack_thin", "(", "thin_path", ")", ":", "tfile", "=", "tarfile", ".", "TarFile", ".", "gzopen", "(", "thin_path", ")", "old_umask", "=", "os", ".", "umask", "(", "0o077", ")", "# pylint: disable=blacklisted-function", "tfile", ".", "extractall", "(", "path", "=", "OPTIONS", ".", "saltdir", ")", "tfile", ".", "close", "(", ")", "os", ".", "umask", "(", "old_umask", ")", "# pylint: disable=blacklisted-function", "try", ":", "os", ".", "unlink", "(", "thin_path", ")", "except", "OSError", ":", "pass", "reset_time", "(", "OPTIONS", ".", "saltdir", ")" ]
Unpack the Salt thin archive.
[ "Unpack", "the", "Salt", "thin", "archive", "." ]
python
train
soravux/scoop
examples/shared_example.py
https://github.com/soravux/scoop/blob/d391dfa62f47e49d48328ee9cf08aa114256fd33/examples/shared_example.py#L26-L35
def myFunc(parameter): """This function will be executed on the remote host even if it was not available at launch.""" print('Hello World from {0}!'.format(scoop.worker)) # It is possible to get a constant anywhere print(shared.getConst('myVar')[2]) # Parameters are handled as usual return parameter + 1
[ "def", "myFunc", "(", "parameter", ")", ":", "print", "(", "'Hello World from {0}!'", ".", "format", "(", "scoop", ".", "worker", ")", ")", "# It is possible to get a constant anywhere", "print", "(", "shared", ".", "getConst", "(", "'myVar'", ")", "[", "2", "]", ")", "# Parameters are handled as usual", "return", "parameter", "+", "1" ]
This function will be executed on the remote host even if it was not available at launch.
[ "This", "function", "will", "be", "executed", "on", "the", "remote", "host", "even", "if", "it", "was", "not", "available", "at", "launch", "." ]
python
train
jantman/awslimitchecker
awslimitchecker/limit.py
https://github.com/jantman/awslimitchecker/blob/e50197f70f3d0abcc5cfc7fde6336f548b790e34/awslimitchecker/limit.py#L323-L353
def set_threshold_override(self, warn_percent=None, warn_count=None, crit_percent=None, crit_count=None): """ Override the default warning and critical thresholds used to evaluate this limit's usage. Theresholds can be specified as a percentage of the limit, or as a usage count, or both. **Note:** The percent thresholds (``warn_percent`` and ``crit_percent``) have default values that are set globally for awslimitchecker, unlike the count thresholds. When setting threshold overrides to quiet or suppress alerts for a limit, you **must** set the percent thresholds. If you only set overrides for the ``count`` thresholds, the percent thresholds will continue to be evaluated at their awslimitchecker-wide default, and likely prevent alerts from being suppressed. see :py:meth:`~.check_thresholds` for further information on threshold evaluation. :param warn_percent: new warning threshold, percentage used :type warn_percent: int :param warn_count: new warning threshold, actual count/number :type warn_count: int :param crit_percent: new critical threshold, percentage used :type crit_percent: int :param crit_count: new critical threshold, actual count/number :type crit_count: int """ self.warn_percent = warn_percent self.warn_count = warn_count self.crit_percent = crit_percent self.crit_count = crit_count
[ "def", "set_threshold_override", "(", "self", ",", "warn_percent", "=", "None", ",", "warn_count", "=", "None", ",", "crit_percent", "=", "None", ",", "crit_count", "=", "None", ")", ":", "self", ".", "warn_percent", "=", "warn_percent", "self", ".", "warn_count", "=", "warn_count", "self", ".", "crit_percent", "=", "crit_percent", "self", ".", "crit_count", "=", "crit_count" ]
Override the default warning and critical thresholds used to evaluate this limit's usage. Theresholds can be specified as a percentage of the limit, or as a usage count, or both. **Note:** The percent thresholds (``warn_percent`` and ``crit_percent``) have default values that are set globally for awslimitchecker, unlike the count thresholds. When setting threshold overrides to quiet or suppress alerts for a limit, you **must** set the percent thresholds. If you only set overrides for the ``count`` thresholds, the percent thresholds will continue to be evaluated at their awslimitchecker-wide default, and likely prevent alerts from being suppressed. see :py:meth:`~.check_thresholds` for further information on threshold evaluation. :param warn_percent: new warning threshold, percentage used :type warn_percent: int :param warn_count: new warning threshold, actual count/number :type warn_count: int :param crit_percent: new critical threshold, percentage used :type crit_percent: int :param crit_count: new critical threshold, actual count/number :type crit_count: int
[ "Override", "the", "default", "warning", "and", "critical", "thresholds", "used", "to", "evaluate", "this", "limit", "s", "usage", ".", "Theresholds", "can", "be", "specified", "as", "a", "percentage", "of", "the", "limit", "or", "as", "a", "usage", "count", "or", "both", "." ]
python
train
VingtCinq/python-mailchimp
mailchimp3/entities/campaignfeedback.py
https://github.com/VingtCinq/python-mailchimp/blob/1b472f1b64fdde974732ac4b7ed48908bb707260/mailchimp3/entities/campaignfeedback.py#L28-L51
def create(self, campaign_id, data, **queryparams): """ Add feedback on a specific campaign. :param campaign_id: The unique id for the campaign. :type campaign_id: :py:class:`str` :param data: The request body parameters :type data: :py:class:`dict` data = { "message": string* } :param queryparams: The query string parameters queryparams['fields'] = [] queryparams['exclude_fields'] = [] """ self.campaign_id = campaign_id if 'message' not in data: raise KeyError('The campaign feedback must have a message') response = self._mc_client._post(url=self._build_path(campaign_id, 'feedback'), data=data, **queryparams) if response is not None: self.feedback_id = response['feedback_id'] else: self.feedback_id = None return response
[ "def", "create", "(", "self", ",", "campaign_id", ",", "data", ",", "*", "*", "queryparams", ")", ":", "self", ".", "campaign_id", "=", "campaign_id", "if", "'message'", "not", "in", "data", ":", "raise", "KeyError", "(", "'The campaign feedback must have a message'", ")", "response", "=", "self", ".", "_mc_client", ".", "_post", "(", "url", "=", "self", ".", "_build_path", "(", "campaign_id", ",", "'feedback'", ")", ",", "data", "=", "data", ",", "*", "*", "queryparams", ")", "if", "response", "is", "not", "None", ":", "self", ".", "feedback_id", "=", "response", "[", "'feedback_id'", "]", "else", ":", "self", ".", "feedback_id", "=", "None", "return", "response" ]
Add feedback on a specific campaign. :param campaign_id: The unique id for the campaign. :type campaign_id: :py:class:`str` :param data: The request body parameters :type data: :py:class:`dict` data = { "message": string* } :param queryparams: The query string parameters queryparams['fields'] = [] queryparams['exclude_fields'] = []
[ "Add", "feedback", "on", "a", "specific", "campaign", "." ]
python
valid
pyca/pyopenssl
src/OpenSSL/SSL.py
https://github.com/pyca/pyopenssl/blob/1fbe064c50fd030948141d7d630673761525b0d0/src/OpenSSL/SSL.py#L861-L870
def _check_env_vars_set(self, dir_env_var, file_env_var): """ Check to see if the default cert dir/file environment vars are present. :return: bool """ return ( os.environ.get(file_env_var) is not None or os.environ.get(dir_env_var) is not None )
[ "def", "_check_env_vars_set", "(", "self", ",", "dir_env_var", ",", "file_env_var", ")", ":", "return", "(", "os", ".", "environ", ".", "get", "(", "file_env_var", ")", "is", "not", "None", "or", "os", ".", "environ", ".", "get", "(", "dir_env_var", ")", "is", "not", "None", ")" ]
Check to see if the default cert dir/file environment vars are present. :return: bool
[ "Check", "to", "see", "if", "the", "default", "cert", "dir", "/", "file", "environment", "vars", "are", "present", "." ]
python
test
LuminosoInsight/python-ftfy
ftfy/fixes.py
https://github.com/LuminosoInsight/python-ftfy/blob/476acc6ad270bffe07f97d4f7cf2139acdc69633/ftfy/fixes.py#L582-L600
def restore_byte_a0(byts): """ Some mojibake has been additionally altered by a process that said "hmm, byte A0, that's basically a space!" and replaced it with an ASCII space. When the A0 is part of a sequence that we intend to decode as UTF-8, changing byte A0 to 20 would make it fail to decode. This process finds sequences that would convincingly decode as UTF-8 if byte 20 were changed to A0, and puts back the A0. For the purpose of deciding whether this is a good idea, this step gets a cost of twice the number of bytes that are changed. This is used as a step within `fix_encoding`. """ def replacement(match): "The function to apply when this regex matches." return match.group(0).replace(b'\x20', b'\xa0') return ALTERED_UTF8_RE.sub(replacement, byts)
[ "def", "restore_byte_a0", "(", "byts", ")", ":", "def", "replacement", "(", "match", ")", ":", "\"The function to apply when this regex matches.\"", "return", "match", ".", "group", "(", "0", ")", ".", "replace", "(", "b'\\x20'", ",", "b'\\xa0'", ")", "return", "ALTERED_UTF8_RE", ".", "sub", "(", "replacement", ",", "byts", ")" ]
Some mojibake has been additionally altered by a process that said "hmm, byte A0, that's basically a space!" and replaced it with an ASCII space. When the A0 is part of a sequence that we intend to decode as UTF-8, changing byte A0 to 20 would make it fail to decode. This process finds sequences that would convincingly decode as UTF-8 if byte 20 were changed to A0, and puts back the A0. For the purpose of deciding whether this is a good idea, this step gets a cost of twice the number of bytes that are changed. This is used as a step within `fix_encoding`.
[ "Some", "mojibake", "has", "been", "additionally", "altered", "by", "a", "process", "that", "said", "hmm", "byte", "A0", "that", "s", "basically", "a", "space!", "and", "replaced", "it", "with", "an", "ASCII", "space", ".", "When", "the", "A0", "is", "part", "of", "a", "sequence", "that", "we", "intend", "to", "decode", "as", "UTF", "-", "8", "changing", "byte", "A0", "to", "20", "would", "make", "it", "fail", "to", "decode", "." ]
python
train
pixelogik/NearPy
nearpy/hashes/permutation/permutation.py
https://github.com/pixelogik/NearPy/blob/1b534b864d320d875508e95cd2b76b6d8c07a90b/nearpy/hashes/permutation/permutation.py#L40-L65
def build_permuted_index( self, lshash, buckets, num_permutation, beam_size, num_neighbour): """ Build a permutedIndex and store it into the dict self.permutedIndexs. lshash: the binary lshash object (nearpy.hashes.lshash). buckets: the buckets object corresponding to lshash. It's a dict object which can get from nearpy.storage.buckets[lshash.hash_name] num_permutation: the number of sorted randomly-permuted bucket key lists (SRPBKL). beam_size: beam size, details please refer to __init__() in nearpy.hashes.permutation.PermutedIndex num_neighbour: the number of neighbour bucket keys needed to return in self.get_neighbour_keys(). """ # Init a PermutedIndex pi = PermutedIndex( lshash, buckets, num_permutation, beam_size, num_neighbour) # get hash_name hash_name = lshash.hash_name self.permutedIndexs[hash_name] = pi
[ "def", "build_permuted_index", "(", "self", ",", "lshash", ",", "buckets", ",", "num_permutation", ",", "beam_size", ",", "num_neighbour", ")", ":", "# Init a PermutedIndex", "pi", "=", "PermutedIndex", "(", "lshash", ",", "buckets", ",", "num_permutation", ",", "beam_size", ",", "num_neighbour", ")", "# get hash_name", "hash_name", "=", "lshash", ".", "hash_name", "self", ".", "permutedIndexs", "[", "hash_name", "]", "=", "pi" ]
Build a permutedIndex and store it into the dict self.permutedIndexs. lshash: the binary lshash object (nearpy.hashes.lshash). buckets: the buckets object corresponding to lshash. It's a dict object which can get from nearpy.storage.buckets[lshash.hash_name] num_permutation: the number of sorted randomly-permuted bucket key lists (SRPBKL). beam_size: beam size, details please refer to __init__() in nearpy.hashes.permutation.PermutedIndex num_neighbour: the number of neighbour bucket keys needed to return in self.get_neighbour_keys().
[ "Build", "a", "permutedIndex", "and", "store", "it", "into", "the", "dict", "self", ".", "permutedIndexs", ".", "lshash", ":", "the", "binary", "lshash", "object", "(", "nearpy", ".", "hashes", ".", "lshash", ")", ".", "buckets", ":", "the", "buckets", "object", "corresponding", "to", "lshash", ".", "It", "s", "a", "dict", "object", "which", "can", "get", "from", "nearpy", ".", "storage", ".", "buckets", "[", "lshash", ".", "hash_name", "]", "num_permutation", ":", "the", "number", "of", "sorted", "randomly", "-", "permuted", "bucket", "key", "lists", "(", "SRPBKL", ")", ".", "beam_size", ":", "beam", "size", "details", "please", "refer", "to", "__init__", "()", "in", "nearpy", ".", "hashes", ".", "permutation", ".", "PermutedIndex", "num_neighbour", ":", "the", "number", "of", "neighbour", "bucket", "keys", "needed", "to", "return", "in", "self", ".", "get_neighbour_keys", "()", "." ]
python
train
housecanary/hc-api-python
housecanary/utilities.py
https://github.com/housecanary/hc-api-python/blob/2bb9e2208b34e8617575de45934357ee33b8531c/housecanary/utilities.py#L6-L26
def get_readable_time_string(seconds): """Returns human readable string from number of seconds""" seconds = int(seconds) minutes = seconds // 60 seconds = seconds % 60 hours = minutes // 60 minutes = minutes % 60 days = hours // 24 hours = hours % 24 result = "" if days > 0: result += "%d %s " % (days, "Day" if (days == 1) else "Days") if hours > 0: result += "%d %s " % (hours, "Hour" if (hours == 1) else "Hours") if minutes > 0: result += "%d %s " % (minutes, "Minute" if (minutes == 1) else "Minutes") if seconds > 0: result += "%d %s " % (seconds, "Second" if (seconds == 1) else "Seconds") return result.strip()
[ "def", "get_readable_time_string", "(", "seconds", ")", ":", "seconds", "=", "int", "(", "seconds", ")", "minutes", "=", "seconds", "//", "60", "seconds", "=", "seconds", "%", "60", "hours", "=", "minutes", "//", "60", "minutes", "=", "minutes", "%", "60", "days", "=", "hours", "//", "24", "hours", "=", "hours", "%", "24", "result", "=", "\"\"", "if", "days", ">", "0", ":", "result", "+=", "\"%d %s \"", "%", "(", "days", ",", "\"Day\"", "if", "(", "days", "==", "1", ")", "else", "\"Days\"", ")", "if", "hours", ">", "0", ":", "result", "+=", "\"%d %s \"", "%", "(", "hours", ",", "\"Hour\"", "if", "(", "hours", "==", "1", ")", "else", "\"Hours\"", ")", "if", "minutes", ">", "0", ":", "result", "+=", "\"%d %s \"", "%", "(", "minutes", ",", "\"Minute\"", "if", "(", "minutes", "==", "1", ")", "else", "\"Minutes\"", ")", "if", "seconds", ">", "0", ":", "result", "+=", "\"%d %s \"", "%", "(", "seconds", ",", "\"Second\"", "if", "(", "seconds", "==", "1", ")", "else", "\"Seconds\"", ")", "return", "result", ".", "strip", "(", ")" ]
Returns human readable string from number of seconds
[ "Returns", "human", "readable", "string", "from", "number", "of", "seconds" ]
python
train
LLNL/certipy
certipy/certipy.py
https://github.com/LLNL/certipy/blob/8705a8ba32655e12021d2893cf1c3c98c697edd7/certipy/certipy.py#L414-L428
def remove_files(self, common_name, delete_dir=False): """Delete files and record associated with this common name""" record = self.remove_record(common_name) if delete_dir: delete_dirs = [] if 'files' in record: key_containing_dir = os.path.dirname(record['files']['key']) delete_dirs.append(key_containing_dir) cert_containing_dir = os.path.dirname(record['files']['cert']) if key_containing_dir != cert_containing_dir: delete_dirs.append(cert_containing_dir) for d in delete_dirs: shutil.rmtree(d) return record
[ "def", "remove_files", "(", "self", ",", "common_name", ",", "delete_dir", "=", "False", ")", ":", "record", "=", "self", ".", "remove_record", "(", "common_name", ")", "if", "delete_dir", ":", "delete_dirs", "=", "[", "]", "if", "'files'", "in", "record", ":", "key_containing_dir", "=", "os", ".", "path", ".", "dirname", "(", "record", "[", "'files'", "]", "[", "'key'", "]", ")", "delete_dirs", ".", "append", "(", "key_containing_dir", ")", "cert_containing_dir", "=", "os", ".", "path", ".", "dirname", "(", "record", "[", "'files'", "]", "[", "'cert'", "]", ")", "if", "key_containing_dir", "!=", "cert_containing_dir", ":", "delete_dirs", ".", "append", "(", "cert_containing_dir", ")", "for", "d", "in", "delete_dirs", ":", "shutil", ".", "rmtree", "(", "d", ")", "return", "record" ]
Delete files and record associated with this common name
[ "Delete", "files", "and", "record", "associated", "with", "this", "common", "name" ]
python
train
fermiPy/fermipy
fermipy/jobs/file_archive.py
https://github.com/fermiPy/fermipy/blob/9df5e7e3728307fd58c5bba36fd86783c39fbad4/fermipy/jobs/file_archive.py#L386-L397
def copy_to_scratch(file_mapping, dry_run=True): """Copy input files to scratch area """ for key, value in file_mapping.items(): if not os.path.exists(key): continue if dry_run: print ("copy %s %s" % (key, value)) else: print ("copy %s %s" % (key, value)) copyfile(key, value) return file_mapping
[ "def", "copy_to_scratch", "(", "file_mapping", ",", "dry_run", "=", "True", ")", ":", "for", "key", ",", "value", "in", "file_mapping", ".", "items", "(", ")", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "key", ")", ":", "continue", "if", "dry_run", ":", "print", "(", "\"copy %s %s\"", "%", "(", "key", ",", "value", ")", ")", "else", ":", "print", "(", "\"copy %s %s\"", "%", "(", "key", ",", "value", ")", ")", "copyfile", "(", "key", ",", "value", ")", "return", "file_mapping" ]
Copy input files to scratch area
[ "Copy", "input", "files", "to", "scratch", "area" ]
python
train
valohai/valohai-yaml
valohai_yaml/commands.py
https://github.com/valohai/valohai-yaml/blob/3d2e92381633d84cdba039f6905df34c9633a2e1/valohai_yaml/commands.py#L44-L83
def build_command(command, parameter_map): """ Build command line(s) using the given parameter map. Even if the passed a single `command`, this function will return a list of shell commands. It is the caller's responsibility to concatenate them, likely using the semicolon or double ampersands. :param command: The command to interpolate params into. :type command: str|list[str] :param parameter_map: A ParameterMap object containing parameter knowledge. :type parameter_map: valohai_yaml.objs.parameter_map.ParameterMap :return: list of commands :rtype: list[str] """ if isinstance(parameter_map, list): # Partially emulate old (pre-0.7) API for this function. parameter_map = LegacyParameterMap(parameter_map) out_commands = [] for command in listify(command): # Only attempt formatting if the string smells like it should be formatted. # This allows the user to include shell syntax in the commands, if required. # (There's still naturally the chance for false-positives, so guard against # those value errors and warn about them.) if interpolable_re.search(command): try: command = interpolable_re.sub( lambda match: _replace_interpolation(parameter_map, match), command, ) except ValueError as exc: # pragma: no cover warnings.warn( 'failed to interpolate into %r: %s' % (command, exc), CommandInterpolationWarning ) out_commands.append(command.strip()) return out_commands
[ "def", "build_command", "(", "command", ",", "parameter_map", ")", ":", "if", "isinstance", "(", "parameter_map", ",", "list", ")", ":", "# Partially emulate old (pre-0.7) API for this function.", "parameter_map", "=", "LegacyParameterMap", "(", "parameter_map", ")", "out_commands", "=", "[", "]", "for", "command", "in", "listify", "(", "command", ")", ":", "# Only attempt formatting if the string smells like it should be formatted.", "# This allows the user to include shell syntax in the commands, if required.", "# (There's still naturally the chance for false-positives, so guard against", "# those value errors and warn about them.)", "if", "interpolable_re", ".", "search", "(", "command", ")", ":", "try", ":", "command", "=", "interpolable_re", ".", "sub", "(", "lambda", "match", ":", "_replace_interpolation", "(", "parameter_map", ",", "match", ")", ",", "command", ",", ")", "except", "ValueError", "as", "exc", ":", "# pragma: no cover", "warnings", ".", "warn", "(", "'failed to interpolate into %r: %s'", "%", "(", "command", ",", "exc", ")", ",", "CommandInterpolationWarning", ")", "out_commands", ".", "append", "(", "command", ".", "strip", "(", ")", ")", "return", "out_commands" ]
Build command line(s) using the given parameter map. Even if the passed a single `command`, this function will return a list of shell commands. It is the caller's responsibility to concatenate them, likely using the semicolon or double ampersands. :param command: The command to interpolate params into. :type command: str|list[str] :param parameter_map: A ParameterMap object containing parameter knowledge. :type parameter_map: valohai_yaml.objs.parameter_map.ParameterMap :return: list of commands :rtype: list[str]
[ "Build", "command", "line", "(", "s", ")", "using", "the", "given", "parameter", "map", "." ]
python
train
seung-lab/cloud-volume
cloudvolume/lib.py
https://github.com/seung-lab/cloud-volume/blob/d2fd4500333f1bc3cd3e3919a8b649cec5d8e214/cloudvolume/lib.py#L206-L212
def divisors(n): """Generate the divisors of n""" for i in range(1, int(math.sqrt(n) + 1)): if n % i == 0: yield i if i*i != n: yield n / i
[ "def", "divisors", "(", "n", ")", ":", "for", "i", "in", "range", "(", "1", ",", "int", "(", "math", ".", "sqrt", "(", "n", ")", "+", "1", ")", ")", ":", "if", "n", "%", "i", "==", "0", ":", "yield", "i", "if", "i", "*", "i", "!=", "n", ":", "yield", "n", "/", "i" ]
Generate the divisors of n
[ "Generate", "the", "divisors", "of", "n" ]
python
train
kytos/python-openflow
pyof/foundation/network_types.py
https://github.com/kytos/python-openflow/blob/4f2d0d08ab28e102ed88fe57a4ee17729f1e1bb7/pyof/foundation/network_types.py#L640-L649
def value(self): """Return sub type and sub value as binary data. Returns: :class:`~pyof.foundation.basic_types.BinaryData`: BinaryData calculated. """ binary = UBInt8(self.sub_type).pack() + self.sub_value.pack() return BinaryData(binary)
[ "def", "value", "(", "self", ")", ":", "binary", "=", "UBInt8", "(", "self", ".", "sub_type", ")", ".", "pack", "(", ")", "+", "self", ".", "sub_value", ".", "pack", "(", ")", "return", "BinaryData", "(", "binary", ")" ]
Return sub type and sub value as binary data. Returns: :class:`~pyof.foundation.basic_types.BinaryData`: BinaryData calculated.
[ "Return", "sub", "type", "and", "sub", "value", "as", "binary", "data", "." ]
python
train