repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
aganezov/bg
bg/grimm.py
https://github.com/aganezov/bg/blob/1ec758193441e49e7b34e0da09571480f4c24455/bg/grimm.py#L342-L360
def get_blocks_in_grimm_from_breakpoint_graph(bg): """ :param bg: a breakpoint graph, that contians all the information :type bg: ``bg.breakpoint_graph.BreakpointGraph`` :return: list of strings, which represent genomes present in breakpoint graph as orders of blocks and is compatible with GRIMM format """ result = [] genomes = bg.get_overall_set_of_colors() for genome in genomes: genome_graph = bg.get_genome_graph(color=genome) genome_blocks_orders = genome_graph.get_blocks_order() blocks_orders = genome_blocks_orders[genome] if len(blocks_orders) > 0: result.append(">{genome_name}".format(genome_name=genome.name)) for chr_type, blocks_order in blocks_orders: string = " ".join(value if sign == "+" else sign + value for sign, value in blocks_order) string += " {chr_type}".format(chr_type=chr_type) result.append(string) return result
[ "def", "get_blocks_in_grimm_from_breakpoint_graph", "(", "bg", ")", ":", "result", "=", "[", "]", "genomes", "=", "bg", ".", "get_overall_set_of_colors", "(", ")", "for", "genome", "in", "genomes", ":", "genome_graph", "=", "bg", ".", "get_genome_graph", "(", "color", "=", "genome", ")", "genome_blocks_orders", "=", "genome_graph", ".", "get_blocks_order", "(", ")", "blocks_orders", "=", "genome_blocks_orders", "[", "genome", "]", "if", "len", "(", "blocks_orders", ")", ">", "0", ":", "result", ".", "append", "(", "\">{genome_name}\"", ".", "format", "(", "genome_name", "=", "genome", ".", "name", ")", ")", "for", "chr_type", ",", "blocks_order", "in", "blocks_orders", ":", "string", "=", "\" \"", ".", "join", "(", "value", "if", "sign", "==", "\"+\"", "else", "sign", "+", "value", "for", "sign", ",", "value", "in", "blocks_order", ")", "string", "+=", "\" {chr_type}\"", ".", "format", "(", "chr_type", "=", "chr_type", ")", "result", ".", "append", "(", "string", ")", "return", "result" ]
:param bg: a breakpoint graph, that contians all the information :type bg: ``bg.breakpoint_graph.BreakpointGraph`` :return: list of strings, which represent genomes present in breakpoint graph as orders of blocks and is compatible with GRIMM format
[ ":", "param", "bg", ":", "a", "breakpoint", "graph", "that", "contians", "all", "the", "information", ":", "type", "bg", ":", "bg", ".", "breakpoint_graph", ".", "BreakpointGraph", ":", "return", ":", "list", "of", "strings", "which", "represent", "genomes", "present", "in", "breakpoint", "graph", "as", "orders", "of", "blocks", "and", "is", "compatible", "with", "GRIMM", "format" ]
python
train
uw-it-aca/django-saferecipient-email-backend
saferecipient/__init__.py
https://github.com/uw-it-aca/django-saferecipient-email-backend/blob/8af20dece5a668d6bcad5dea75cc60871a4bd9fa/saferecipient/__init__.py#L56-L61
def _is_whitelisted(self, email): """Check if an email is in the whitelist. If there's no whitelist, it's assumed it's not whitelisted.""" return hasattr(settings, "SAFE_EMAIL_WHITELIST") and \ any(re.match(m, email) for m in settings.SAFE_EMAIL_WHITELIST)
[ "def", "_is_whitelisted", "(", "self", ",", "email", ")", ":", "return", "hasattr", "(", "settings", ",", "\"SAFE_EMAIL_WHITELIST\"", ")", "and", "any", "(", "re", ".", "match", "(", "m", ",", "email", ")", "for", "m", "in", "settings", ".", "SAFE_EMAIL_WHITELIST", ")" ]
Check if an email is in the whitelist. If there's no whitelist, it's assumed it's not whitelisted.
[ "Check", "if", "an", "email", "is", "in", "the", "whitelist", ".", "If", "there", "s", "no", "whitelist", "it", "s", "assumed", "it", "s", "not", "whitelisted", "." ]
python
train
quantumlib/Cirq
cirq/linalg/diagonalize.py
https://github.com/quantumlib/Cirq/blob/0827da80dd7880e5b923eb69407e980ed9bc0bd2/cirq/linalg/diagonalize.py#L153-L230
def bidiagonalize_real_matrix_pair_with_symmetric_products( mat1: np.ndarray, mat2: np.ndarray, *, rtol: float = 1e-5, atol: float = 1e-8, check_preconditions: bool = True) -> Tuple[np.ndarray, np.ndarray]: """Finds orthogonal matrices that diagonalize both mat1 and mat2. Requires mat1 and mat2 to be real. Requires mat1.T @ mat2 to be symmetric. Requires mat1 @ mat2.T to be symmetric. Args: mat1: One of the real matrices. mat2: The other real matrix. rtol: Relative numeric error threshold. atol: Absolute numeric error threshold. check_preconditions: If set, verifies that the inputs are real, and that mat1.T @ mat2 and mat1 @ mat2.T are both symmetric. Defaults to set. Returns: A tuple (L, R) of two orthogonal matrices, such that both L @ mat1 @ R and L @ mat2 @ R are diagonal matrices. Raises: ValueError: Matrices don't meet preconditions (e.g. not real). """ if check_preconditions: if np.any(np.imag(mat1) != 0): raise ValueError('mat1 must be real.') if np.any(np.imag(mat2) != 0): raise ValueError('mat2 must be real.') if not predicates.is_hermitian(mat1.dot(mat2.T), rtol=rtol, atol=atol): raise ValueError('mat1 @ mat2.T must be symmetric.') if not predicates.is_hermitian(mat1.T.dot(mat2), rtol=rtol, atol=atol): raise ValueError('mat1.T @ mat2 must be symmetric.') # Use SVD to bi-diagonalize the first matrix. base_left, base_diag, base_right = _svd_handling_empty(np.real(mat1)) base_diag = np.diag(base_diag) # Determine where we switch between diagonalization-fixup strategies. dim = base_diag.shape[0] rank = dim while rank > 0 and tolerance.all_near_zero(base_diag[rank - 1, rank - 1], atol=atol): rank -= 1 base_diag = base_diag[:rank, :rank] # Try diagonalizing the second matrix with the same factors as the first. semi_corrected = base_left.T.dot(np.real(mat2)).dot(base_right.T) # Fix up the part of the second matrix's diagonalization that's matched # against non-zero diagonal entries in the first matrix's diagonalization # by performing simultaneous diagonalization. overlap = semi_corrected[:rank, :rank] overlap_adjust = diagonalize_real_symmetric_and_sorted_diagonal_matrices( overlap, base_diag, rtol=rtol, atol=atol, check_preconditions=check_preconditions) # Fix up the part of the second matrix's diagonalization that's matched # against zeros in the first matrix's diagonalization by performing an SVD. extra = semi_corrected[rank:, rank:] extra_left_adjust, _, extra_right_adjust = _svd_handling_empty(extra) # Merge the fixup factors into the initial diagonalization. left_adjust = combinators.block_diag(overlap_adjust, extra_left_adjust) right_adjust = combinators.block_diag(overlap_adjust.T, extra_right_adjust) left = left_adjust.T.dot(base_left.T) right = base_right.T.dot(right_adjust.T) return left, right
[ "def", "bidiagonalize_real_matrix_pair_with_symmetric_products", "(", "mat1", ":", "np", ".", "ndarray", ",", "mat2", ":", "np", ".", "ndarray", ",", "*", ",", "rtol", ":", "float", "=", "1e-5", ",", "atol", ":", "float", "=", "1e-8", ",", "check_preconditions", ":", "bool", "=", "True", ")", "->", "Tuple", "[", "np", ".", "ndarray", ",", "np", ".", "ndarray", "]", ":", "if", "check_preconditions", ":", "if", "np", ".", "any", "(", "np", ".", "imag", "(", "mat1", ")", "!=", "0", ")", ":", "raise", "ValueError", "(", "'mat1 must be real.'", ")", "if", "np", ".", "any", "(", "np", ".", "imag", "(", "mat2", ")", "!=", "0", ")", ":", "raise", "ValueError", "(", "'mat2 must be real.'", ")", "if", "not", "predicates", ".", "is_hermitian", "(", "mat1", ".", "dot", "(", "mat2", ".", "T", ")", ",", "rtol", "=", "rtol", ",", "atol", "=", "atol", ")", ":", "raise", "ValueError", "(", "'mat1 @ mat2.T must be symmetric.'", ")", "if", "not", "predicates", ".", "is_hermitian", "(", "mat1", ".", "T", ".", "dot", "(", "mat2", ")", ",", "rtol", "=", "rtol", ",", "atol", "=", "atol", ")", ":", "raise", "ValueError", "(", "'mat1.T @ mat2 must be symmetric.'", ")", "# Use SVD to bi-diagonalize the first matrix.", "base_left", ",", "base_diag", ",", "base_right", "=", "_svd_handling_empty", "(", "np", ".", "real", "(", "mat1", ")", ")", "base_diag", "=", "np", ".", "diag", "(", "base_diag", ")", "# Determine where we switch between diagonalization-fixup strategies.", "dim", "=", "base_diag", ".", "shape", "[", "0", "]", "rank", "=", "dim", "while", "rank", ">", "0", "and", "tolerance", ".", "all_near_zero", "(", "base_diag", "[", "rank", "-", "1", ",", "rank", "-", "1", "]", ",", "atol", "=", "atol", ")", ":", "rank", "-=", "1", "base_diag", "=", "base_diag", "[", ":", "rank", ",", ":", "rank", "]", "# Try diagonalizing the second matrix with the same factors as the first.", "semi_corrected", "=", "base_left", ".", "T", ".", "dot", "(", "np", ".", "real", "(", "mat2", ")", ")", ".", "dot", "(", "base_right", ".", "T", ")", "# Fix up the part of the second matrix's diagonalization that's matched", "# against non-zero diagonal entries in the first matrix's diagonalization", "# by performing simultaneous diagonalization.", "overlap", "=", "semi_corrected", "[", ":", "rank", ",", ":", "rank", "]", "overlap_adjust", "=", "diagonalize_real_symmetric_and_sorted_diagonal_matrices", "(", "overlap", ",", "base_diag", ",", "rtol", "=", "rtol", ",", "atol", "=", "atol", ",", "check_preconditions", "=", "check_preconditions", ")", "# Fix up the part of the second matrix's diagonalization that's matched", "# against zeros in the first matrix's diagonalization by performing an SVD.", "extra", "=", "semi_corrected", "[", "rank", ":", ",", "rank", ":", "]", "extra_left_adjust", ",", "_", ",", "extra_right_adjust", "=", "_svd_handling_empty", "(", "extra", ")", "# Merge the fixup factors into the initial diagonalization.", "left_adjust", "=", "combinators", ".", "block_diag", "(", "overlap_adjust", ",", "extra_left_adjust", ")", "right_adjust", "=", "combinators", ".", "block_diag", "(", "overlap_adjust", ".", "T", ",", "extra_right_adjust", ")", "left", "=", "left_adjust", ".", "T", ".", "dot", "(", "base_left", ".", "T", ")", "right", "=", "base_right", ".", "T", ".", "dot", "(", "right_adjust", ".", "T", ")", "return", "left", ",", "right" ]
Finds orthogonal matrices that diagonalize both mat1 and mat2. Requires mat1 and mat2 to be real. Requires mat1.T @ mat2 to be symmetric. Requires mat1 @ mat2.T to be symmetric. Args: mat1: One of the real matrices. mat2: The other real matrix. rtol: Relative numeric error threshold. atol: Absolute numeric error threshold. check_preconditions: If set, verifies that the inputs are real, and that mat1.T @ mat2 and mat1 @ mat2.T are both symmetric. Defaults to set. Returns: A tuple (L, R) of two orthogonal matrices, such that both L @ mat1 @ R and L @ mat2 @ R are diagonal matrices. Raises: ValueError: Matrices don't meet preconditions (e.g. not real).
[ "Finds", "orthogonal", "matrices", "that", "diagonalize", "both", "mat1", "and", "mat2", "." ]
python
train
dshean/pygeotools
pygeotools/lib/filtlib.py
https://github.com/dshean/pygeotools/blob/5ac745717c0098d01eb293ff1fe32fd7358c76ab/pygeotools/lib/filtlib.py#L341-L380
def median_fltr_skimage(dem, radius=3, erode=1, origmask=False): """ Older skimage.filter.median_filter This smooths, removes noise and fills in nodata areas with median of valid pixels! Effectively an inpainting routine """ #Note, ndimage doesn't properly handle ma - convert to nan dem = malib.checkma(dem) dem = dem.astype(np.float64) #Mask islands if erode > 0: print("Eroding islands smaller than %s pixels" % (erode * 2)) dem = malib.mask_islands(dem, iterations=erode) print("Applying median filter with radius %s" % radius) #Note: this funcitonality was present in scikit-image 0.9.3 import skimage.filter dem_filt_med = skimage.filter.median_filter(dem, radius, mask=~dem.mask) #Starting in version 0.10.0, this is the new filter #This is the new filter, but only supports uint8 or unit16 #import skimage.filters #import skimage.morphology #dem_filt_med = skimage.filters.rank.median(dem, disk(radius), mask=~dem.mask) #dem_filt_med = skimage.filters.median(dem, skimage.morphology.disk(radius), mask=~dem.mask) #Now mask all nans #skimage assigns the minimum value as nodata #CHECK THIS, seems pretty hacky #Also, looks like some valid values are masked at this stage, even though they should be above min ndv = np.min(dem_filt_med) #ndv = dem_filt_med.min() + 0.001 out = np.ma.masked_less_equal(dem_filt_med, ndv) #Should probably replace the ndv with original ndv out.set_fill_value(dem.fill_value) if origmask: print("Applying original mask") #Allow filling of interior holes, but use original outer edge #maskfill = malib.maskfill(dem, iterations=radius) maskfill = malib.maskfill(dem) #dem_filt_gauss = np.ma.array(dem_filt_gauss, mask=dem.mask, fill_value=dem.fill_value) out = np.ma.array(out, mask=maskfill, fill_value=dem.fill_value) return out
[ "def", "median_fltr_skimage", "(", "dem", ",", "radius", "=", "3", ",", "erode", "=", "1", ",", "origmask", "=", "False", ")", ":", "#Note, ndimage doesn't properly handle ma - convert to nan", "dem", "=", "malib", ".", "checkma", "(", "dem", ")", "dem", "=", "dem", ".", "astype", "(", "np", ".", "float64", ")", "#Mask islands", "if", "erode", ">", "0", ":", "print", "(", "\"Eroding islands smaller than %s pixels\"", "%", "(", "erode", "*", "2", ")", ")", "dem", "=", "malib", ".", "mask_islands", "(", "dem", ",", "iterations", "=", "erode", ")", "print", "(", "\"Applying median filter with radius %s\"", "%", "radius", ")", "#Note: this funcitonality was present in scikit-image 0.9.3", "import", "skimage", ".", "filter", "dem_filt_med", "=", "skimage", ".", "filter", ".", "median_filter", "(", "dem", ",", "radius", ",", "mask", "=", "~", "dem", ".", "mask", ")", "#Starting in version 0.10.0, this is the new filter", "#This is the new filter, but only supports uint8 or unit16", "#import skimage.filters", "#import skimage.morphology ", "#dem_filt_med = skimage.filters.rank.median(dem, disk(radius), mask=~dem.mask)", "#dem_filt_med = skimage.filters.median(dem, skimage.morphology.disk(radius), mask=~dem.mask)", "#Now mask all nans", "#skimage assigns the minimum value as nodata", "#CHECK THIS, seems pretty hacky", "#Also, looks like some valid values are masked at this stage, even though they should be above min", "ndv", "=", "np", ".", "min", "(", "dem_filt_med", ")", "#ndv = dem_filt_med.min() + 0.001", "out", "=", "np", ".", "ma", ".", "masked_less_equal", "(", "dem_filt_med", ",", "ndv", ")", "#Should probably replace the ndv with original ndv", "out", ".", "set_fill_value", "(", "dem", ".", "fill_value", ")", "if", "origmask", ":", "print", "(", "\"Applying original mask\"", ")", "#Allow filling of interior holes, but use original outer edge", "#maskfill = malib.maskfill(dem, iterations=radius)", "maskfill", "=", "malib", ".", "maskfill", "(", "dem", ")", "#dem_filt_gauss = np.ma.array(dem_filt_gauss, mask=dem.mask, fill_value=dem.fill_value)", "out", "=", "np", ".", "ma", ".", "array", "(", "out", ",", "mask", "=", "maskfill", ",", "fill_value", "=", "dem", ".", "fill_value", ")", "return", "out" ]
Older skimage.filter.median_filter This smooths, removes noise and fills in nodata areas with median of valid pixels! Effectively an inpainting routine
[ "Older", "skimage", ".", "filter", ".", "median_filter" ]
python
train
bcbio/bcbio-nextgen
bcbio/pipeline/run_info.py
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/run_info.py#L850-L871
def _sanity_check_files(item, files): """Ensure input files correspond with supported approaches. Handles BAM, fastqs, plus split fastqs. """ msg = None file_types = set([("bam" if x.endswith(".bam") else "fastq") for x in files if x]) if len(file_types) > 1: msg = "Found multiple file types (BAM and fastq)" file_type = file_types.pop() if file_type == "bam": if len(files) != 1: msg = "Expect a single BAM file input as input" elif file_type == "fastq": if len(files) not in [1, 2] and item["analysis"].lower() != "scrna-seq": pair_types = set([len(xs) for xs in fastq.combine_pairs(files)]) if len(pair_types) != 1 or pair_types.pop() not in [1, 2]: msg = "Expect either 1 (single end) or 2 (paired end) fastq inputs" if len(files) == 2 and files[0] == files[1]: msg = "Expect both fastq files to not be the same" if msg: raise ValueError("%s for %s: %s" % (msg, item.get("description", ""), files))
[ "def", "_sanity_check_files", "(", "item", ",", "files", ")", ":", "msg", "=", "None", "file_types", "=", "set", "(", "[", "(", "\"bam\"", "if", "x", ".", "endswith", "(", "\".bam\"", ")", "else", "\"fastq\"", ")", "for", "x", "in", "files", "if", "x", "]", ")", "if", "len", "(", "file_types", ")", ">", "1", ":", "msg", "=", "\"Found multiple file types (BAM and fastq)\"", "file_type", "=", "file_types", ".", "pop", "(", ")", "if", "file_type", "==", "\"bam\"", ":", "if", "len", "(", "files", ")", "!=", "1", ":", "msg", "=", "\"Expect a single BAM file input as input\"", "elif", "file_type", "==", "\"fastq\"", ":", "if", "len", "(", "files", ")", "not", "in", "[", "1", ",", "2", "]", "and", "item", "[", "\"analysis\"", "]", ".", "lower", "(", ")", "!=", "\"scrna-seq\"", ":", "pair_types", "=", "set", "(", "[", "len", "(", "xs", ")", "for", "xs", "in", "fastq", ".", "combine_pairs", "(", "files", ")", "]", ")", "if", "len", "(", "pair_types", ")", "!=", "1", "or", "pair_types", ".", "pop", "(", ")", "not", "in", "[", "1", ",", "2", "]", ":", "msg", "=", "\"Expect either 1 (single end) or 2 (paired end) fastq inputs\"", "if", "len", "(", "files", ")", "==", "2", "and", "files", "[", "0", "]", "==", "files", "[", "1", "]", ":", "msg", "=", "\"Expect both fastq files to not be the same\"", "if", "msg", ":", "raise", "ValueError", "(", "\"%s for %s: %s\"", "%", "(", "msg", ",", "item", ".", "get", "(", "\"description\"", ",", "\"\"", ")", ",", "files", ")", ")" ]
Ensure input files correspond with supported approaches. Handles BAM, fastqs, plus split fastqs.
[ "Ensure", "input", "files", "correspond", "with", "supported", "approaches", "." ]
python
train
adrienverge/yamllint
yamllint/rules/common.py
https://github.com/adrienverge/yamllint/blob/fec2c2fba736cabf6bee6b5eeb905cab0dc820f6/yamllint/rules/common.py#L51-L58
def get_line_indent(token): """Finds the indent of the line the token starts in.""" start = token.start_mark.buffer.rfind('\n', 0, token.start_mark.pointer) + 1 content = start while token.start_mark.buffer[content] == ' ': content += 1 return content - start
[ "def", "get_line_indent", "(", "token", ")", ":", "start", "=", "token", ".", "start_mark", ".", "buffer", ".", "rfind", "(", "'\\n'", ",", "0", ",", "token", ".", "start_mark", ".", "pointer", ")", "+", "1", "content", "=", "start", "while", "token", ".", "start_mark", ".", "buffer", "[", "content", "]", "==", "' '", ":", "content", "+=", "1", "return", "content", "-", "start" ]
Finds the indent of the line the token starts in.
[ "Finds", "the", "indent", "of", "the", "line", "the", "token", "starts", "in", "." ]
python
train
note35/sinon
sinon/lib/stub.py
https://github.com/note35/sinon/blob/f1d551b679b393d64d926a8a279320904c38d0f5/sinon/lib/stub.py#L89-L99
def __get_call_count(self, args, kwargs, args_list, kwargs_list): """ Args: args: tuple, the arguments inputed by the user kwargs: dictionary, the keyword arguments inputed by the user args_list: list, the tuples of args from all the times this stub was called kwargs_list: list, the dictionaries of kwargs from all the times this stub was called Returns: integer, the number of times this combination of args/kwargs has been called """ return len(self.__get_matching_indices(args, kwargs, args_list, kwargs_list))
[ "def", "__get_call_count", "(", "self", ",", "args", ",", "kwargs", ",", "args_list", ",", "kwargs_list", ")", ":", "return", "len", "(", "self", ".", "__get_matching_indices", "(", "args", ",", "kwargs", ",", "args_list", ",", "kwargs_list", ")", ")" ]
Args: args: tuple, the arguments inputed by the user kwargs: dictionary, the keyword arguments inputed by the user args_list: list, the tuples of args from all the times this stub was called kwargs_list: list, the dictionaries of kwargs from all the times this stub was called Returns: integer, the number of times this combination of args/kwargs has been called
[ "Args", ":", "args", ":", "tuple", "the", "arguments", "inputed", "by", "the", "user", "kwargs", ":", "dictionary", "the", "keyword", "arguments", "inputed", "by", "the", "user", "args_list", ":", "list", "the", "tuples", "of", "args", "from", "all", "the", "times", "this", "stub", "was", "called", "kwargs_list", ":", "list", "the", "dictionaries", "of", "kwargs", "from", "all", "the", "times", "this", "stub", "was", "called", "Returns", ":", "integer", "the", "number", "of", "times", "this", "combination", "of", "args", "/", "kwargs", "has", "been", "called" ]
python
train
mitsei/dlkit
dlkit/handcar/learning/sessions.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/handcar/learning/sessions.py#L3754-L3773
def get_objective_bank_form_for_create(self, objective_bank_record_types=None): """Gets the objective bank form for creating new objective banks. A new form should be requested for each create transaction. arg: objectiveBankRecordTypes (osid.type.Type): array of objective bank record types return: (osid.learning.ObjectiveBankForm) - the objective bank form raise: NullArgument - objectiveBankRecordTypes is null raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure raise: Unsupported - unable to get form for requested record types. compliance: mandatory - This method must be implemented. """ if objective_bank_record_types is None: pass # Still need to deal with the record_types argument objective_bank_form = objects.ObjectiveBankForm() self._forms[objective_bank_form.get_id().get_identifier()] = not CREATED return objective_bank_form
[ "def", "get_objective_bank_form_for_create", "(", "self", ",", "objective_bank_record_types", "=", "None", ")", ":", "if", "objective_bank_record_types", "is", "None", ":", "pass", "# Still need to deal with the record_types argument", "objective_bank_form", "=", "objects", ".", "ObjectiveBankForm", "(", ")", "self", ".", "_forms", "[", "objective_bank_form", ".", "get_id", "(", ")", ".", "get_identifier", "(", ")", "]", "=", "not", "CREATED", "return", "objective_bank_form" ]
Gets the objective bank form for creating new objective banks. A new form should be requested for each create transaction. arg: objectiveBankRecordTypes (osid.type.Type): array of objective bank record types return: (osid.learning.ObjectiveBankForm) - the objective bank form raise: NullArgument - objectiveBankRecordTypes is null raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure raise: Unsupported - unable to get form for requested record types. compliance: mandatory - This method must be implemented.
[ "Gets", "the", "objective", "bank", "form", "for", "creating", "new", "objective", "banks", ".", "A", "new", "form", "should", "be", "requested", "for", "each", "create", "transaction", ".", "arg", ":", "objectiveBankRecordTypes", "(", "osid", ".", "type", ".", "Type", ")", ":", "array", "of", "objective", "bank", "record", "types", "return", ":", "(", "osid", ".", "learning", ".", "ObjectiveBankForm", ")", "-", "the", "objective", "bank", "form", "raise", ":", "NullArgument", "-", "objectiveBankRecordTypes", "is", "null", "raise", ":", "OperationFailed", "-", "unable", "to", "complete", "request", "raise", ":", "PermissionDenied", "-", "authorization", "failure", "raise", ":", "Unsupported", "-", "unable", "to", "get", "form", "for", "requested", "record", "types", ".", "compliance", ":", "mandatory", "-", "This", "method", "must", "be", "implemented", "." ]
python
train
google/grr
grr/core/grr_response_core/lib/rdfvalues/client.py
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/core/grr_response_core/lib/rdfvalues/client.py#L534-L572
def FromCurrentSystem(cls): """Fill a Uname from the currently running platform.""" uname = platform.uname() fqdn = socket.getfqdn() system = uname[0] architecture, _ = platform.architecture() if system == "Windows": service_pack = platform.win32_ver()[2] kernel = uname[3] # 5.1.2600 release = uname[2] # XP, 2000, 7 version = uname[3] + service_pack # 5.1.2600 SP3, 6.1.7601 SP1 elif system == "Darwin": kernel = uname[2] # 12.2.0 release = "OSX" # OSX version = platform.mac_ver()[0] # 10.8.2 elif system == "Linux": kernel = uname[2] # 3.2.5 release = platform.linux_distribution()[0] # Ubuntu version = platform.linux_distribution()[1] # 12.04 # Emulate PEP 425 naming conventions - e.g. cp27-cp27mu-linux_x86_64. if pep425tags: pep425tag = "%s%s-%s-%s" % ( pep425tags.get_abbr_impl(), pep425tags.get_impl_ver(), str(pep425tags.get_abi_tag()).lower(), pep425tags.get_platform()) else: # For example: windows_7_amd64 pep425tag = "%s_%s_%s" % (system, release, architecture) return cls( system=system, architecture=architecture, release=release, version=version, machine=uname[4], # x86, x86_64 kernel=kernel, fqdn=fqdn, pep425tag=pep425tag, )
[ "def", "FromCurrentSystem", "(", "cls", ")", ":", "uname", "=", "platform", ".", "uname", "(", ")", "fqdn", "=", "socket", ".", "getfqdn", "(", ")", "system", "=", "uname", "[", "0", "]", "architecture", ",", "_", "=", "platform", ".", "architecture", "(", ")", "if", "system", "==", "\"Windows\"", ":", "service_pack", "=", "platform", ".", "win32_ver", "(", ")", "[", "2", "]", "kernel", "=", "uname", "[", "3", "]", "# 5.1.2600", "release", "=", "uname", "[", "2", "]", "# XP, 2000, 7", "version", "=", "uname", "[", "3", "]", "+", "service_pack", "# 5.1.2600 SP3, 6.1.7601 SP1", "elif", "system", "==", "\"Darwin\"", ":", "kernel", "=", "uname", "[", "2", "]", "# 12.2.0", "release", "=", "\"OSX\"", "# OSX", "version", "=", "platform", ".", "mac_ver", "(", ")", "[", "0", "]", "# 10.8.2", "elif", "system", "==", "\"Linux\"", ":", "kernel", "=", "uname", "[", "2", "]", "# 3.2.5", "release", "=", "platform", ".", "linux_distribution", "(", ")", "[", "0", "]", "# Ubuntu", "version", "=", "platform", ".", "linux_distribution", "(", ")", "[", "1", "]", "# 12.04", "# Emulate PEP 425 naming conventions - e.g. cp27-cp27mu-linux_x86_64.", "if", "pep425tags", ":", "pep425tag", "=", "\"%s%s-%s-%s\"", "%", "(", "pep425tags", ".", "get_abbr_impl", "(", ")", ",", "pep425tags", ".", "get_impl_ver", "(", ")", ",", "str", "(", "pep425tags", ".", "get_abi_tag", "(", ")", ")", ".", "lower", "(", ")", ",", "pep425tags", ".", "get_platform", "(", ")", ")", "else", ":", "# For example: windows_7_amd64", "pep425tag", "=", "\"%s_%s_%s\"", "%", "(", "system", ",", "release", ",", "architecture", ")", "return", "cls", "(", "system", "=", "system", ",", "architecture", "=", "architecture", ",", "release", "=", "release", ",", "version", "=", "version", ",", "machine", "=", "uname", "[", "4", "]", ",", "# x86, x86_64", "kernel", "=", "kernel", ",", "fqdn", "=", "fqdn", ",", "pep425tag", "=", "pep425tag", ",", ")" ]
Fill a Uname from the currently running platform.
[ "Fill", "a", "Uname", "from", "the", "currently", "running", "platform", "." ]
python
train
googledatalab/pydatalab
solutionbox/ml_workbench/tensorflow/trainer/feature_transforms.py
https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/solutionbox/ml_workbench/tensorflow/trainer/feature_transforms.py#L503-L531
def csv_header_and_defaults(features, schema, stats, keep_target): """Gets csv header and default lists.""" target_name = get_target_name(features) if keep_target and not target_name: raise ValueError('Cannot find target transform') csv_header = [] record_defaults = [] for col in schema: if not keep_target and col['name'] == target_name: continue # Note that numerical key columns do not have a stats entry, hence the use # of get(col['name'], {}) csv_header.append(col['name']) if col['type'].lower() == INTEGER_SCHEMA: dtype = tf.int64 default = int(stats['column_stats'].get(col['name'], {}).get('mean', 0)) elif col['type'].lower() == FLOAT_SCHEMA: dtype = tf.float32 default = float(stats['column_stats'].get(col['name'], {}).get('mean', 0.0)) else: dtype = tf.string default = '' record_defaults.append(tf.constant([default], dtype=dtype)) return csv_header, record_defaults
[ "def", "csv_header_and_defaults", "(", "features", ",", "schema", ",", "stats", ",", "keep_target", ")", ":", "target_name", "=", "get_target_name", "(", "features", ")", "if", "keep_target", "and", "not", "target_name", ":", "raise", "ValueError", "(", "'Cannot find target transform'", ")", "csv_header", "=", "[", "]", "record_defaults", "=", "[", "]", "for", "col", "in", "schema", ":", "if", "not", "keep_target", "and", "col", "[", "'name'", "]", "==", "target_name", ":", "continue", "# Note that numerical key columns do not have a stats entry, hence the use", "# of get(col['name'], {})", "csv_header", ".", "append", "(", "col", "[", "'name'", "]", ")", "if", "col", "[", "'type'", "]", ".", "lower", "(", ")", "==", "INTEGER_SCHEMA", ":", "dtype", "=", "tf", ".", "int64", "default", "=", "int", "(", "stats", "[", "'column_stats'", "]", ".", "get", "(", "col", "[", "'name'", "]", ",", "{", "}", ")", ".", "get", "(", "'mean'", ",", "0", ")", ")", "elif", "col", "[", "'type'", "]", ".", "lower", "(", ")", "==", "FLOAT_SCHEMA", ":", "dtype", "=", "tf", ".", "float32", "default", "=", "float", "(", "stats", "[", "'column_stats'", "]", ".", "get", "(", "col", "[", "'name'", "]", ",", "{", "}", ")", ".", "get", "(", "'mean'", ",", "0.0", ")", ")", "else", ":", "dtype", "=", "tf", ".", "string", "default", "=", "''", "record_defaults", ".", "append", "(", "tf", ".", "constant", "(", "[", "default", "]", ",", "dtype", "=", "dtype", ")", ")", "return", "csv_header", ",", "record_defaults" ]
Gets csv header and default lists.
[ "Gets", "csv", "header", "and", "default", "lists", "." ]
python
train
SpriteLink/NIPAP
nipap-www/nipapwww/controllers/version.py
https://github.com/SpriteLink/NIPAP/blob/f96069f11ab952d80b13cab06e0528f2d24b3de9/nipap-www/nipapwww/controllers/version.py#L14-L25
def index(self): """ Display NIPAP version info """ c.pynipap_version = pynipap.__version__ try: c.nipapd_version = pynipap.nipapd_version() except: c.nipapd_version = 'unknown' c.nipap_db_version = pynipap.nipap_db_version() return render('/version.html')
[ "def", "index", "(", "self", ")", ":", "c", ".", "pynipap_version", "=", "pynipap", ".", "__version__", "try", ":", "c", ".", "nipapd_version", "=", "pynipap", ".", "nipapd_version", "(", ")", "except", ":", "c", ".", "nipapd_version", "=", "'unknown'", "c", ".", "nipap_db_version", "=", "pynipap", ".", "nipap_db_version", "(", ")", "return", "render", "(", "'/version.html'", ")" ]
Display NIPAP version info
[ "Display", "NIPAP", "version", "info" ]
python
train
TheHive-Project/TheHive4py
thehive4py/api.py
https://github.com/TheHive-Project/TheHive4py/blob/35762bbd50d8376943268464326b59c752d6241b/thehive4py/api.py#L145-L164
def update_case_task(self, task): """ :Updates TheHive Task :param case: The task to update. The task's `id` determines which Task to update. :return: """ req = self.url + "/api/case/task/{}".format(task.id) # Choose which attributes to send update_keys = [ 'title', 'description', 'status', 'order', 'user', 'owner', 'flag', 'endDate' ] data = {k: v for k, v in task.__dict__.items() if k in update_keys} try: return requests.patch(req, headers={'Content-Type': 'application/json'}, json=data, proxies=self.proxies, auth=self.auth, verify=self.cert) except requests.exceptions.RequestException as e: raise CaseTaskException("Case task update error: {}".format(e))
[ "def", "update_case_task", "(", "self", ",", "task", ")", ":", "req", "=", "self", ".", "url", "+", "\"/api/case/task/{}\"", ".", "format", "(", "task", ".", "id", ")", "# Choose which attributes to send", "update_keys", "=", "[", "'title'", ",", "'description'", ",", "'status'", ",", "'order'", ",", "'user'", ",", "'owner'", ",", "'flag'", ",", "'endDate'", "]", "data", "=", "{", "k", ":", "v", "for", "k", ",", "v", "in", "task", ".", "__dict__", ".", "items", "(", ")", "if", "k", "in", "update_keys", "}", "try", ":", "return", "requests", ".", "patch", "(", "req", ",", "headers", "=", "{", "'Content-Type'", ":", "'application/json'", "}", ",", "json", "=", "data", ",", "proxies", "=", "self", ".", "proxies", ",", "auth", "=", "self", ".", "auth", ",", "verify", "=", "self", ".", "cert", ")", "except", "requests", ".", "exceptions", ".", "RequestException", "as", "e", ":", "raise", "CaseTaskException", "(", "\"Case task update error: {}\"", ".", "format", "(", "e", ")", ")" ]
:Updates TheHive Task :param case: The task to update. The task's `id` determines which Task to update. :return:
[ ":", "Updates", "TheHive", "Task", ":", "param", "case", ":", "The", "task", "to", "update", ".", "The", "task", "s", "id", "determines", "which", "Task", "to", "update", ".", ":", "return", ":" ]
python
train
andreikop/qutepart
qutepart/__init__.py
https://github.com/andreikop/qutepart/blob/109d76b239751318bcef06f39b2fbbf18687a40b/qutepart/__init__.py#L1266-L1285
def _currentLineExtraSelections(self): """QTextEdit.ExtraSelection, which highlightes current line """ if self._currentLineColor is None: return [] def makeSelection(cursor): selection = QTextEdit.ExtraSelection() selection.format.setBackground(self._currentLineColor) selection.format.setProperty(QTextFormat.FullWidthSelection, True) cursor.clearSelection() selection.cursor = cursor return selection rectangularSelectionCursors = self._rectangularSelection.cursors() if rectangularSelectionCursors: return [makeSelection(cursor) \ for cursor in rectangularSelectionCursors] else: return [makeSelection(self.textCursor())]
[ "def", "_currentLineExtraSelections", "(", "self", ")", ":", "if", "self", ".", "_currentLineColor", "is", "None", ":", "return", "[", "]", "def", "makeSelection", "(", "cursor", ")", ":", "selection", "=", "QTextEdit", ".", "ExtraSelection", "(", ")", "selection", ".", "format", ".", "setBackground", "(", "self", ".", "_currentLineColor", ")", "selection", ".", "format", ".", "setProperty", "(", "QTextFormat", ".", "FullWidthSelection", ",", "True", ")", "cursor", ".", "clearSelection", "(", ")", "selection", ".", "cursor", "=", "cursor", "return", "selection", "rectangularSelectionCursors", "=", "self", ".", "_rectangularSelection", ".", "cursors", "(", ")", "if", "rectangularSelectionCursors", ":", "return", "[", "makeSelection", "(", "cursor", ")", "for", "cursor", "in", "rectangularSelectionCursors", "]", "else", ":", "return", "[", "makeSelection", "(", "self", ".", "textCursor", "(", ")", ")", "]" ]
QTextEdit.ExtraSelection, which highlightes current line
[ "QTextEdit", ".", "ExtraSelection", "which", "highlightes", "current", "line" ]
python
train
ARMmbed/icetea
icetea_lib/tools/GenericProcess.py
https://github.com/ARMmbed/icetea/blob/b2b97ac607429830cf7d62dae2e3903692c7c778/icetea_lib/tools/GenericProcess.py#L132-L164
def _read_select_poll(poll): """ Read PIPEs using select.poll() method Available on Linux and some Unixes """ npipes = len(NonBlockingStreamReader._streams) for stream_descr in NonBlockingStreamReader._streams: if not stream_descr.has_error: poll.register(stream_descr.stream, select.POLLIN | select.POLLERR | select.POLLHUP | select.POLLNVAL) while NonBlockingStreamReader._run_flag: for (file_descr, event) in poll.poll(500): if event == select.POLLIN: NonBlockingStreamReader._read_fd(file_descr) else: # Because event != select.POLLIN, the pipe is closed # but we still want to read all bytes while NonBlockingStreamReader._read_fd(file_descr) != 0: pass # Dut died, signal the processing thread so it notices that no lines coming in stream_descr = NonBlockingStreamReader._get_sd(file_descr) if stream_descr is None: return # PIPE closed but DUT already disappeared stream_descr.has_error = True if stream_descr.callback is not None: stream_descr.callback() return # Force poll object to reregister only alive descriptors # Check if new pipes added, don't need mutext just for reading the size # If we will not get it right now, we will at next time if npipes != len(NonBlockingStreamReader._streams): return
[ "def", "_read_select_poll", "(", "poll", ")", ":", "npipes", "=", "len", "(", "NonBlockingStreamReader", ".", "_streams", ")", "for", "stream_descr", "in", "NonBlockingStreamReader", ".", "_streams", ":", "if", "not", "stream_descr", ".", "has_error", ":", "poll", ".", "register", "(", "stream_descr", ".", "stream", ",", "select", ".", "POLLIN", "|", "select", ".", "POLLERR", "|", "select", ".", "POLLHUP", "|", "select", ".", "POLLNVAL", ")", "while", "NonBlockingStreamReader", ".", "_run_flag", ":", "for", "(", "file_descr", ",", "event", ")", "in", "poll", ".", "poll", "(", "500", ")", ":", "if", "event", "==", "select", ".", "POLLIN", ":", "NonBlockingStreamReader", ".", "_read_fd", "(", "file_descr", ")", "else", ":", "# Because event != select.POLLIN, the pipe is closed", "# but we still want to read all bytes", "while", "NonBlockingStreamReader", ".", "_read_fd", "(", "file_descr", ")", "!=", "0", ":", "pass", "# Dut died, signal the processing thread so it notices that no lines coming in", "stream_descr", "=", "NonBlockingStreamReader", ".", "_get_sd", "(", "file_descr", ")", "if", "stream_descr", "is", "None", ":", "return", "# PIPE closed but DUT already disappeared", "stream_descr", ".", "has_error", "=", "True", "if", "stream_descr", ".", "callback", "is", "not", "None", ":", "stream_descr", ".", "callback", "(", ")", "return", "# Force poll object to reregister only alive descriptors", "# Check if new pipes added, don't need mutext just for reading the size", "# If we will not get it right now, we will at next time", "if", "npipes", "!=", "len", "(", "NonBlockingStreamReader", ".", "_streams", ")", ":", "return" ]
Read PIPEs using select.poll() method Available on Linux and some Unixes
[ "Read", "PIPEs", "using", "select", ".", "poll", "()", "method", "Available", "on", "Linux", "and", "some", "Unixes" ]
python
train
log2timeline/plaso
plaso/parsers/interface.py
https://github.com/log2timeline/plaso/blob/9c564698d2da3ffbe23607a3c54c0582ea18a6cc/plaso/parsers/interface.py#L170-L187
def RegisterPlugin(cls, plugin_class): """Registers a plugin class. The plugin classes are identified based on their lower case name. Args: plugin_class (type): class of the plugin. Raises: KeyError: if plugin class is already set for the corresponding name. """ plugin_name = plugin_class.NAME.lower() if plugin_name in cls._plugin_classes: raise KeyError(( 'Plugin class already set for name: {0:s}.').format( plugin_class.NAME)) cls._plugin_classes[plugin_name] = plugin_class
[ "def", "RegisterPlugin", "(", "cls", ",", "plugin_class", ")", ":", "plugin_name", "=", "plugin_class", ".", "NAME", ".", "lower", "(", ")", "if", "plugin_name", "in", "cls", ".", "_plugin_classes", ":", "raise", "KeyError", "(", "(", "'Plugin class already set for name: {0:s}.'", ")", ".", "format", "(", "plugin_class", ".", "NAME", ")", ")", "cls", ".", "_plugin_classes", "[", "plugin_name", "]", "=", "plugin_class" ]
Registers a plugin class. The plugin classes are identified based on their lower case name. Args: plugin_class (type): class of the plugin. Raises: KeyError: if plugin class is already set for the corresponding name.
[ "Registers", "a", "plugin", "class", "." ]
python
train
storax/upme
src/upme/main.py
https://github.com/storax/upme/blob/41c2d91f922691e31ff940f33b755d2cb64dfef8/src/upme/main.py#L8-L29
def get_required(dist): """Return a set with all distributions that are required of dist This also includes subdependencies and the given distribution. :param dist: the distribution to query. Can also be the name of the distribution :type dist: :class:`pkg_resources.Distribution` | str :returns: a list of distributions that are required including the given one :rtype: set of :class:`pkg_resources.Distribution` :raises: class:`pkg_resources.DistributionNotFound` """ d = pkg_resources.get_distribution(dist) reqs = set(d.requires()) allds = set([d]) while reqs: newreqs = set([]) for r in reqs: dr = pkg_resources.get_distribution(r) allds.add(dr) newreqs = newreqs & set(dr.requires()) reqs = newreqs - reqs return allds
[ "def", "get_required", "(", "dist", ")", ":", "d", "=", "pkg_resources", ".", "get_distribution", "(", "dist", ")", "reqs", "=", "set", "(", "d", ".", "requires", "(", ")", ")", "allds", "=", "set", "(", "[", "d", "]", ")", "while", "reqs", ":", "newreqs", "=", "set", "(", "[", "]", ")", "for", "r", "in", "reqs", ":", "dr", "=", "pkg_resources", ".", "get_distribution", "(", "r", ")", "allds", ".", "add", "(", "dr", ")", "newreqs", "=", "newreqs", "&", "set", "(", "dr", ".", "requires", "(", ")", ")", "reqs", "=", "newreqs", "-", "reqs", "return", "allds" ]
Return a set with all distributions that are required of dist This also includes subdependencies and the given distribution. :param dist: the distribution to query. Can also be the name of the distribution :type dist: :class:`pkg_resources.Distribution` | str :returns: a list of distributions that are required including the given one :rtype: set of :class:`pkg_resources.Distribution` :raises: class:`pkg_resources.DistributionNotFound`
[ "Return", "a", "set", "with", "all", "distributions", "that", "are", "required", "of", "dist" ]
python
train
senaite/senaite.core
bika/lims/browser/analysisrequest/reject_samples.py
https://github.com/senaite/senaite.core/blob/7602ce2ea2f9e81eb34e20ce17b98a3e70713f85/bika/lims/browser/analysisrequest/reject_samples.py#L147-L161
def get_samples_data(self): """Returns a list of Samples data (dictionary) """ for obj in self.get_samples_from_request(): yield { "obj": obj, "id": api.get_id(obj), "uid": api.get_uid(obj), "title": api.get_title(obj), "path": api.get_path(obj), "url": api.get_url(obj), "sample_type": obj.getSampleTypeTitle(), "client_title": obj.getClientTitle(), "date": ulocalized_time(obj.created(), long_format=True), }
[ "def", "get_samples_data", "(", "self", ")", ":", "for", "obj", "in", "self", ".", "get_samples_from_request", "(", ")", ":", "yield", "{", "\"obj\"", ":", "obj", ",", "\"id\"", ":", "api", ".", "get_id", "(", "obj", ")", ",", "\"uid\"", ":", "api", ".", "get_uid", "(", "obj", ")", ",", "\"title\"", ":", "api", ".", "get_title", "(", "obj", ")", ",", "\"path\"", ":", "api", ".", "get_path", "(", "obj", ")", ",", "\"url\"", ":", "api", ".", "get_url", "(", "obj", ")", ",", "\"sample_type\"", ":", "obj", ".", "getSampleTypeTitle", "(", ")", ",", "\"client_title\"", ":", "obj", ".", "getClientTitle", "(", ")", ",", "\"date\"", ":", "ulocalized_time", "(", "obj", ".", "created", "(", ")", ",", "long_format", "=", "True", ")", ",", "}" ]
Returns a list of Samples data (dictionary)
[ "Returns", "a", "list", "of", "Samples", "data", "(", "dictionary", ")" ]
python
train
StackStorm/pybind
pybind/slxos/v17r_1_01a/interface/port_channel/logical_interface/port_channel/pc_cmd_container_dummy/service_instance_vlan_cmds_dummy_container/get_untagged_vlan_dummy/__init__.py
https://github.com/StackStorm/pybind/blob/44c467e71b2b425be63867aba6e6fa28b2cfe7fb/pybind/slxos/v17r_1_01a/interface/port_channel/logical_interface/port_channel/pc_cmd_container_dummy/service_instance_vlan_cmds_dummy_container/get_untagged_vlan_dummy/__init__.py#L92-L113
def _set_untagged(self, v, load=False): """ Setter method for untagged, mapped from YANG variable /interface/port_channel/logical_interface/port_channel/pc_cmd_container_dummy/service_instance_vlan_cmds_dummy_container/get_untagged_vlan_dummy/untagged (container) If this variable is read-only (config: false) in the source YANG file, then _set_untagged is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_untagged() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=untagged.untagged, is_container='container', presence=False, yang_name="untagged", rest_name="untagged", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure untagged VLAN on this logical interface', u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-lif', defining_module='brocade-lif', yang_type='container', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """untagged must be of a type compatible with container""", 'defined-type': "container", 'generated-type': """YANGDynClass(base=untagged.untagged, is_container='container', presence=False, yang_name="untagged", rest_name="untagged", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure untagged VLAN on this logical interface', u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-lif', defining_module='brocade-lif', yang_type='container', is_config=True)""", }) self.__untagged = t if hasattr(self, '_set'): self._set()
[ "def", "_set_untagged", "(", "self", ",", "v", ",", "load", "=", "False", ")", ":", "if", "hasattr", "(", "v", ",", "\"_utype\"", ")", ":", "v", "=", "v", ".", "_utype", "(", "v", ")", "try", ":", "t", "=", "YANGDynClass", "(", "v", ",", "base", "=", "untagged", ".", "untagged", ",", "is_container", "=", "'container'", ",", "presence", "=", "False", ",", "yang_name", "=", "\"untagged\"", ",", "rest_name", "=", "\"untagged\"", ",", "parent", "=", "self", ",", "path_helper", "=", "self", ".", "_path_helper", ",", "extmethods", "=", "self", ".", "_extmethods", ",", "register_paths", "=", "True", ",", "extensions", "=", "{", "u'tailf-common'", ":", "{", "u'info'", ":", "u'Configure untagged VLAN on this logical interface'", ",", "u'cli-incomplete-command'", ":", "None", "}", "}", ",", "namespace", "=", "'urn:brocade.com:mgmt:brocade-lif'", ",", "defining_module", "=", "'brocade-lif'", ",", "yang_type", "=", "'container'", ",", "is_config", "=", "True", ")", "except", "(", "TypeError", ",", "ValueError", ")", ":", "raise", "ValueError", "(", "{", "'error-string'", ":", "\"\"\"untagged must be of a type compatible with container\"\"\"", ",", "'defined-type'", ":", "\"container\"", ",", "'generated-type'", ":", "\"\"\"YANGDynClass(base=untagged.untagged, is_container='container', presence=False, yang_name=\"untagged\", rest_name=\"untagged\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure untagged VLAN on this logical interface', u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-lif', defining_module='brocade-lif', yang_type='container', is_config=True)\"\"\"", ",", "}", ")", "self", ".", "__untagged", "=", "t", "if", "hasattr", "(", "self", ",", "'_set'", ")", ":", "self", ".", "_set", "(", ")" ]
Setter method for untagged, mapped from YANG variable /interface/port_channel/logical_interface/port_channel/pc_cmd_container_dummy/service_instance_vlan_cmds_dummy_container/get_untagged_vlan_dummy/untagged (container) If this variable is read-only (config: false) in the source YANG file, then _set_untagged is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_untagged() directly.
[ "Setter", "method", "for", "untagged", "mapped", "from", "YANG", "variable", "/", "interface", "/", "port_channel", "/", "logical_interface", "/", "port_channel", "/", "pc_cmd_container_dummy", "/", "service_instance_vlan_cmds_dummy_container", "/", "get_untagged_vlan_dummy", "/", "untagged", "(", "container", ")", "If", "this", "variable", "is", "read", "-", "only", "(", "config", ":", "false", ")", "in", "the", "source", "YANG", "file", "then", "_set_untagged", "is", "considered", "as", "a", "private", "method", ".", "Backends", "looking", "to", "populate", "this", "variable", "should", "do", "so", "via", "calling", "thisObj", ".", "_set_untagged", "()", "directly", "." ]
python
train
annoviko/pyclustering
pyclustering/core/som_wrapper.py
https://github.com/annoviko/pyclustering/blob/98aa0dd89fd36f701668fb1eb29c8fb5662bf7d0/pyclustering/core/som_wrapper.py#L73-L95
def som_load(som_pointer, weights, award, capture_objects): """! @brief Load dump of the network to SOM. @details Initialize SOM using existed weights, amount of captured objects by each neuron, captured objects by each neuron. Initialization is not performed if weights are empty. @param[in] som_pointer (POINTER): pointer to object of self-organized map. @param[in] weights (list): weights that should assigned to neurons. @param[in] awards (list): amount of captured objects by each neuron. @param[in] capture_objects (list): captured objects by each neuron. """ if len(weights) == 0: return ccore = ccore_library.get() package_weights = package_builder(weights, c_double).create() package_award = package_builder(award, c_size_t).create() package_capture_objects = package_builder(capture_objects, c_size_t).create() ccore.som_load(som_pointer, package_weights, package_award, package_capture_objects)
[ "def", "som_load", "(", "som_pointer", ",", "weights", ",", "award", ",", "capture_objects", ")", ":", "if", "len", "(", "weights", ")", "==", "0", ":", "return", "ccore", "=", "ccore_library", ".", "get", "(", ")", "package_weights", "=", "package_builder", "(", "weights", ",", "c_double", ")", ".", "create", "(", ")", "package_award", "=", "package_builder", "(", "award", ",", "c_size_t", ")", ".", "create", "(", ")", "package_capture_objects", "=", "package_builder", "(", "capture_objects", ",", "c_size_t", ")", ".", "create", "(", ")", "ccore", ".", "som_load", "(", "som_pointer", ",", "package_weights", ",", "package_award", ",", "package_capture_objects", ")" ]
! @brief Load dump of the network to SOM. @details Initialize SOM using existed weights, amount of captured objects by each neuron, captured objects by each neuron. Initialization is not performed if weights are empty. @param[in] som_pointer (POINTER): pointer to object of self-organized map. @param[in] weights (list): weights that should assigned to neurons. @param[in] awards (list): amount of captured objects by each neuron. @param[in] capture_objects (list): captured objects by each neuron.
[ "!" ]
python
valid
tcalmant/ipopo
pelix/internals/registry.py
https://github.com/tcalmant/ipopo/blob/2f9ae0c44cd9c34ef1a9d50837b3254e75678eb1/pelix/internals/registry.py#L1090-L1115
def __sort_registry(self, svc_ref): # type: (ServiceReference) -> None """ Sorts the registry, after the update of the sort key of given service reference :param svc_ref: A service reference with a modified sort key """ with self.__svc_lock: if svc_ref not in self.__svc_registry: raise BundleException("Unknown service: {0}".format(svc_ref)) # Remove current references for spec in svc_ref.get_property(OBJECTCLASS): # Use bisect to remove the reference (faster) spec_refs = self.__svc_specs[spec] idx = bisect.bisect_left(spec_refs, svc_ref) del spec_refs[idx] # ... use the new sort key svc_ref.update_sort_key() for spec in svc_ref.get_property(OBJECTCLASS): # ... and insert it again spec_refs = self.__svc_specs[spec] bisect.insort_left(spec_refs, svc_ref)
[ "def", "__sort_registry", "(", "self", ",", "svc_ref", ")", ":", "# type: (ServiceReference) -> None", "with", "self", ".", "__svc_lock", ":", "if", "svc_ref", "not", "in", "self", ".", "__svc_registry", ":", "raise", "BundleException", "(", "\"Unknown service: {0}\"", ".", "format", "(", "svc_ref", ")", ")", "# Remove current references", "for", "spec", "in", "svc_ref", ".", "get_property", "(", "OBJECTCLASS", ")", ":", "# Use bisect to remove the reference (faster)", "spec_refs", "=", "self", ".", "__svc_specs", "[", "spec", "]", "idx", "=", "bisect", ".", "bisect_left", "(", "spec_refs", ",", "svc_ref", ")", "del", "spec_refs", "[", "idx", "]", "# ... use the new sort key", "svc_ref", ".", "update_sort_key", "(", ")", "for", "spec", "in", "svc_ref", ".", "get_property", "(", "OBJECTCLASS", ")", ":", "# ... and insert it again", "spec_refs", "=", "self", ".", "__svc_specs", "[", "spec", "]", "bisect", ".", "insort_left", "(", "spec_refs", ",", "svc_ref", ")" ]
Sorts the registry, after the update of the sort key of given service reference :param svc_ref: A service reference with a modified sort key
[ "Sorts", "the", "registry", "after", "the", "update", "of", "the", "sort", "key", "of", "given", "service", "reference" ]
python
train
daviddrysdale/python-phonenumbers
python/phonenumbers/carrier.py
https://github.com/daviddrysdale/python-phonenumbers/blob/9cc5bb4ab5e661e70789b4c64bf7a9383c7bdc20/python/phonenumbers/carrier.py#L53-L79
def name_for_valid_number(numobj, lang, script=None, region=None): """Returns a carrier name for the given PhoneNumber object, in the language provided. The carrier name is the one the number was originally allocated to, however if the country supports mobile number portability the number might not belong to the returned carrier anymore. If no mapping is found an empty string is returned. This method assumes the validity of the number passed in has already been checked, and that the number is suitable for carrier lookup. We consider mobile and pager numbers possible candidates for carrier lookup. Arguments: numobj -- The PhoneNumber object for which we want to get a carrier name. lang -- A 2-letter lowercase ISO 639-1 language code for the language in which the description should be returned (e.g. "en") script -- A 4-letter titlecase (first letter uppercase, rest lowercase) ISO script code as defined in ISO 15924, separated by an underscore (e.g. "Hant") region -- A 2-letter uppercase ISO 3166-1 country code (e.g. "GB") Returns a carrier name in the given language code, for the given phone number, or an empty string if no description is available. """ return _prefix_description_for_number(CARRIER_DATA, CARRIER_LONGEST_PREFIX, numobj, lang, script, region)
[ "def", "name_for_valid_number", "(", "numobj", ",", "lang", ",", "script", "=", "None", ",", "region", "=", "None", ")", ":", "return", "_prefix_description_for_number", "(", "CARRIER_DATA", ",", "CARRIER_LONGEST_PREFIX", ",", "numobj", ",", "lang", ",", "script", ",", "region", ")" ]
Returns a carrier name for the given PhoneNumber object, in the language provided. The carrier name is the one the number was originally allocated to, however if the country supports mobile number portability the number might not belong to the returned carrier anymore. If no mapping is found an empty string is returned. This method assumes the validity of the number passed in has already been checked, and that the number is suitable for carrier lookup. We consider mobile and pager numbers possible candidates for carrier lookup. Arguments: numobj -- The PhoneNumber object for which we want to get a carrier name. lang -- A 2-letter lowercase ISO 639-1 language code for the language in which the description should be returned (e.g. "en") script -- A 4-letter titlecase (first letter uppercase, rest lowercase) ISO script code as defined in ISO 15924, separated by an underscore (e.g. "Hant") region -- A 2-letter uppercase ISO 3166-1 country code (e.g. "GB") Returns a carrier name in the given language code, for the given phone number, or an empty string if no description is available.
[ "Returns", "a", "carrier", "name", "for", "the", "given", "PhoneNumber", "object", "in", "the", "language", "provided", "." ]
python
train
mamrhein/identifiers
identifiers/banking.py
https://github.com/mamrhein/identifiers/blob/93ab2609e461faff245d1f582411bf831b428eef/identifiers/banking.py#L74-L78
def elements(self): """Return the BIC's Party Prefix, Country Code, Party Suffix and Branch Code as a tuple.""" return (self.party_prefix, self.country_code, self.party_suffix, self.branch_code)
[ "def", "elements", "(", "self", ")", ":", "return", "(", "self", ".", "party_prefix", ",", "self", ".", "country_code", ",", "self", ".", "party_suffix", ",", "self", ".", "branch_code", ")" ]
Return the BIC's Party Prefix, Country Code, Party Suffix and Branch Code as a tuple.
[ "Return", "the", "BIC", "s", "Party", "Prefix", "Country", "Code", "Party", "Suffix", "and", "Branch", "Code", "as", "a", "tuple", "." ]
python
train
last-partizan/pytils
pytils/translit.py
https://github.com/last-partizan/pytils/blob/1c570a32b15e564bc68587b8207e32d464e61d08/pytils/translit.py#L190-L217
def slugify(in_string): """ Prepare string for slug (i.e. URL or file/dir name) @param in_string: input string @type in_string: C{basestring} @return: slug-string @rtype: C{str} @raise ValueError: if in_string is C{str}, but it isn't ascii """ try: u_in_string = six.text_type(in_string).lower() except UnicodeDecodeError: raise ValueError("We expects when in_string is str type," + \ "it is an ascii, but now it isn't. Use unicode " + \ "in this case.") # convert & to "and" u_in_string = re.sub('\&amp\;|\&', ' and ', u_in_string) # replace spaces by hyphen u_in_string = re.sub('[-\s]+', '-', u_in_string) # remove symbols that not in alphabet u_in_string = u''.join([symb for symb in u_in_string if symb in ALPHABET]) # translify it out_string = translify(u_in_string) # remove non-alpha return re.sub('[^\w\s-]', '', out_string).strip().lower()
[ "def", "slugify", "(", "in_string", ")", ":", "try", ":", "u_in_string", "=", "six", ".", "text_type", "(", "in_string", ")", ".", "lower", "(", ")", "except", "UnicodeDecodeError", ":", "raise", "ValueError", "(", "\"We expects when in_string is str type,\"", "+", "\"it is an ascii, but now it isn't. Use unicode \"", "+", "\"in this case.\"", ")", "# convert & to \"and\"", "u_in_string", "=", "re", ".", "sub", "(", "'\\&amp\\;|\\&'", ",", "' and '", ",", "u_in_string", ")", "# replace spaces by hyphen", "u_in_string", "=", "re", ".", "sub", "(", "'[-\\s]+'", ",", "'-'", ",", "u_in_string", ")", "# remove symbols that not in alphabet", "u_in_string", "=", "u''", ".", "join", "(", "[", "symb", "for", "symb", "in", "u_in_string", "if", "symb", "in", "ALPHABET", "]", ")", "# translify it", "out_string", "=", "translify", "(", "u_in_string", ")", "# remove non-alpha", "return", "re", ".", "sub", "(", "'[^\\w\\s-]'", ",", "''", ",", "out_string", ")", ".", "strip", "(", ")", ".", "lower", "(", ")" ]
Prepare string for slug (i.e. URL or file/dir name) @param in_string: input string @type in_string: C{basestring} @return: slug-string @rtype: C{str} @raise ValueError: if in_string is C{str}, but it isn't ascii
[ "Prepare", "string", "for", "slug", "(", "i", ".", "e", ".", "URL", "or", "file", "/", "dir", "name", ")" ]
python
train
mbj4668/pyang
pyang/plugins/sample-xml-skeleton.py
https://github.com/mbj4668/pyang/blob/f2a5cc3142162e5b9ee4e18d154568d939ff63dd/pyang/plugins/sample-xml-skeleton.py#L199-L220
def sample_element(self, node, parent, module, path): """Create element under `parent`. Declare new namespace if necessary. """ if path is None: return parent, module, None elif path == []: # GO ON pass else: if node.arg == path[0]: path = path[1:] else: return parent, module, None res = etree.SubElement(parent, node.arg) mm = node.main_module() if mm != module: res.attrib["xmlns"] = self.ns_uri[mm] module = mm return res, module, path
[ "def", "sample_element", "(", "self", ",", "node", ",", "parent", ",", "module", ",", "path", ")", ":", "if", "path", "is", "None", ":", "return", "parent", ",", "module", ",", "None", "elif", "path", "==", "[", "]", ":", "# GO ON", "pass", "else", ":", "if", "node", ".", "arg", "==", "path", "[", "0", "]", ":", "path", "=", "path", "[", "1", ":", "]", "else", ":", "return", "parent", ",", "module", ",", "None", "res", "=", "etree", ".", "SubElement", "(", "parent", ",", "node", ".", "arg", ")", "mm", "=", "node", ".", "main_module", "(", ")", "if", "mm", "!=", "module", ":", "res", ".", "attrib", "[", "\"xmlns\"", "]", "=", "self", ".", "ns_uri", "[", "mm", "]", "module", "=", "mm", "return", "res", ",", "module", ",", "path" ]
Create element under `parent`. Declare new namespace if necessary.
[ "Create", "element", "under", "parent", "." ]
python
train
3DLIRIOUS/MeshLabXML
meshlabxml/transform.py
https://github.com/3DLIRIOUS/MeshLabXML/blob/177cce21e92baca500f56a932d66bd9a33257af8/meshlabxml/transform.py#L198-L220
def rotate(script, axis='z', angle=0.0): """An alternative rotate implementation that uses a geometric function. This is more accurate than the built-in version.""" angle = math.radians(angle) if axis.lower() == 'x': vert_function(script, x_func='x', y_func='y*cos({angle})-z*sin({angle})'.format(angle=angle), z_func='y*sin({angle})+z*cos({angle})'.format(angle=angle)) elif axis.lower() == 'y': vert_function(script, x_func='z*sin({angle})+x*cos({angle})'.format(angle=angle), y_func='y', z_func='z*cos({angle})-x*sin({angle})'.format(angle=angle)) elif axis.lower() == 'z': vert_function(script, x_func='x*cos({angle})-y*sin({angle})'.format(angle=angle), y_func='x*sin({angle})+y*cos({angle})'.format(angle=angle), z_func='z') else: print('Axis name is not valid; exiting ...') sys.exit(1) return None
[ "def", "rotate", "(", "script", ",", "axis", "=", "'z'", ",", "angle", "=", "0.0", ")", ":", "angle", "=", "math", ".", "radians", "(", "angle", ")", "if", "axis", ".", "lower", "(", ")", "==", "'x'", ":", "vert_function", "(", "script", ",", "x_func", "=", "'x'", ",", "y_func", "=", "'y*cos({angle})-z*sin({angle})'", ".", "format", "(", "angle", "=", "angle", ")", ",", "z_func", "=", "'y*sin({angle})+z*cos({angle})'", ".", "format", "(", "angle", "=", "angle", ")", ")", "elif", "axis", ".", "lower", "(", ")", "==", "'y'", ":", "vert_function", "(", "script", ",", "x_func", "=", "'z*sin({angle})+x*cos({angle})'", ".", "format", "(", "angle", "=", "angle", ")", ",", "y_func", "=", "'y'", ",", "z_func", "=", "'z*cos({angle})-x*sin({angle})'", ".", "format", "(", "angle", "=", "angle", ")", ")", "elif", "axis", ".", "lower", "(", ")", "==", "'z'", ":", "vert_function", "(", "script", ",", "x_func", "=", "'x*cos({angle})-y*sin({angle})'", ".", "format", "(", "angle", "=", "angle", ")", ",", "y_func", "=", "'x*sin({angle})+y*cos({angle})'", ".", "format", "(", "angle", "=", "angle", ")", ",", "z_func", "=", "'z'", ")", "else", ":", "print", "(", "'Axis name is not valid; exiting ...'", ")", "sys", ".", "exit", "(", "1", ")", "return", "None" ]
An alternative rotate implementation that uses a geometric function. This is more accurate than the built-in version.
[ "An", "alternative", "rotate", "implementation", "that", "uses", "a", "geometric", "function", ".", "This", "is", "more", "accurate", "than", "the", "built", "-", "in", "version", "." ]
python
test
daknuett/py_register_machine2
engine_tools/output/gpu_alike/rendering.py
https://github.com/daknuett/py_register_machine2/blob/599c53cd7576297d0d7a53344ed5d9aa98acc751/engine_tools/output/gpu_alike/rendering.py#L65-L71
def interrupt(self): """ Invoked on a write operation into the IR of the RendererDevice. """ if(self.device.read(9) & 0x01): self.handle_request() self.device.clear_IR()
[ "def", "interrupt", "(", "self", ")", ":", "if", "(", "self", ".", "device", ".", "read", "(", "9", ")", "&", "0x01", ")", ":", "self", ".", "handle_request", "(", ")", "self", ".", "device", ".", "clear_IR", "(", ")" ]
Invoked on a write operation into the IR of the RendererDevice.
[ "Invoked", "on", "a", "write", "operation", "into", "the", "IR", "of", "the", "RendererDevice", "." ]
python
train
glitchassassin/lackey
lackey/RegionMatching.py
https://github.com/glitchassassin/lackey/blob/7adadfacd7f45d81186710be992f5668b15399fe/lackey/RegionMatching.py#L1112-L1132
def getCol(self, column, numberColumns=None): """ Returns the specified column of the region (if the raster is set) If numberColumns is provided, uses that instead of the raster """ column = int(column) if self._raster[0] == 0 or self._raster[1] == 0: return self if numberColumns is None or numberColumns < 1 or numberColumns > 9: numberColumns = self._raster[1] columnWidth = self.w / numberColumns if column < 0: # If column is negative, count backwards from the end column = numberColumns - column if column < 0: # Bad column index, return last column return Region(self.x+self.w-columnWidth, self.y, columnWidth, self.h) elif column > numberColumns: # Bad column index, return first column return Region(self.x, self.y, columnWidth, self.h) return Region(self.x + (column * columnWidth), self.y, columnWidth, self.h)
[ "def", "getCol", "(", "self", ",", "column", ",", "numberColumns", "=", "None", ")", ":", "column", "=", "int", "(", "column", ")", "if", "self", ".", "_raster", "[", "0", "]", "==", "0", "or", "self", ".", "_raster", "[", "1", "]", "==", "0", ":", "return", "self", "if", "numberColumns", "is", "None", "or", "numberColumns", "<", "1", "or", "numberColumns", ">", "9", ":", "numberColumns", "=", "self", ".", "_raster", "[", "1", "]", "columnWidth", "=", "self", ".", "w", "/", "numberColumns", "if", "column", "<", "0", ":", "# If column is negative, count backwards from the end", "column", "=", "numberColumns", "-", "column", "if", "column", "<", "0", ":", "# Bad column index, return last column", "return", "Region", "(", "self", ".", "x", "+", "self", ".", "w", "-", "columnWidth", ",", "self", ".", "y", ",", "columnWidth", ",", "self", ".", "h", ")", "elif", "column", ">", "numberColumns", ":", "# Bad column index, return first column", "return", "Region", "(", "self", ".", "x", ",", "self", ".", "y", ",", "columnWidth", ",", "self", ".", "h", ")", "return", "Region", "(", "self", ".", "x", "+", "(", "column", "*", "columnWidth", ")", ",", "self", ".", "y", ",", "columnWidth", ",", "self", ".", "h", ")" ]
Returns the specified column of the region (if the raster is set) If numberColumns is provided, uses that instead of the raster
[ "Returns", "the", "specified", "column", "of", "the", "region", "(", "if", "the", "raster", "is", "set", ")" ]
python
train
atlassian-api/atlassian-python-api
atlassian/confluence.py
https://github.com/atlassian-api/atlassian-python-api/blob/540d269905c3e7547b666fe30c647b2d512cf358/atlassian/confluence.py#L374-L384
def set_page_label(self, page_id, label): """ Set a label on the page :param page_id: content_id format :param label: label to add :return: """ url = 'rest/api/content/{page_id}/label'.format(page_id=page_id) data = {'prefix': 'global', 'name': label} return self.post(path=url, data=data)
[ "def", "set_page_label", "(", "self", ",", "page_id", ",", "label", ")", ":", "url", "=", "'rest/api/content/{page_id}/label'", ".", "format", "(", "page_id", "=", "page_id", ")", "data", "=", "{", "'prefix'", ":", "'global'", ",", "'name'", ":", "label", "}", "return", "self", ".", "post", "(", "path", "=", "url", ",", "data", "=", "data", ")" ]
Set a label on the page :param page_id: content_id format :param label: label to add :return:
[ "Set", "a", "label", "on", "the", "page", ":", "param", "page_id", ":", "content_id", "format", ":", "param", "label", ":", "label", "to", "add", ":", "return", ":" ]
python
train
RRZE-HPC/kerncraft
kerncraft/kerncraft.py
https://github.com/RRZE-HPC/kerncraft/blob/c60baf8043e4da8d8d66da7575021c2f4c6c78af/kerncraft/kerncraft.py#L189-L202
def check_arguments(args, parser): """Check arguments passed by user that are not checked by argparse itself.""" if args.asm_block not in ['auto', 'manual']: try: args.asm_block = int(args.asm_block) except ValueError: parser.error('--asm-block can only be "auto", "manual" or an integer') # Set default unit depending on performance model requested if not args.unit: if 'Roofline' in args.pmodel or 'RooflineIACA' in args.pmodel: args.unit = 'FLOP/s' else: args.unit = 'cy/CL'
[ "def", "check_arguments", "(", "args", ",", "parser", ")", ":", "if", "args", ".", "asm_block", "not", "in", "[", "'auto'", ",", "'manual'", "]", ":", "try", ":", "args", ".", "asm_block", "=", "int", "(", "args", ".", "asm_block", ")", "except", "ValueError", ":", "parser", ".", "error", "(", "'--asm-block can only be \"auto\", \"manual\" or an integer'", ")", "# Set default unit depending on performance model requested", "if", "not", "args", ".", "unit", ":", "if", "'Roofline'", "in", "args", ".", "pmodel", "or", "'RooflineIACA'", "in", "args", ".", "pmodel", ":", "args", ".", "unit", "=", "'FLOP/s'", "else", ":", "args", ".", "unit", "=", "'cy/CL'" ]
Check arguments passed by user that are not checked by argparse itself.
[ "Check", "arguments", "passed", "by", "user", "that", "are", "not", "checked", "by", "argparse", "itself", "." ]
python
test
Josef-Friedrich/phrydy
phrydy/mediafile.py
https://github.com/Josef-Friedrich/phrydy/blob/aa13755155977b4776e49f79984f9968ac1d74dc/phrydy/mediafile.py#L947-L966
def serialize(self, image): """Return an APIC frame populated with data from ``image``. """ assert isinstance(image, Image) frame = mutagen.id3.Frames[self.key]() frame.data = image.data frame.mime = image.mime_type frame.desc = image.desc or u'' # For compatibility with OS X/iTunes prefer latin-1 if possible. # See issue #899 try: frame.desc.encode("latin-1") except UnicodeEncodeError: frame.encoding = mutagen.id3.Encoding.UTF16 else: frame.encoding = mutagen.id3.Encoding.LATIN1 frame.type = image.type_index return frame
[ "def", "serialize", "(", "self", ",", "image", ")", ":", "assert", "isinstance", "(", "image", ",", "Image", ")", "frame", "=", "mutagen", ".", "id3", ".", "Frames", "[", "self", ".", "key", "]", "(", ")", "frame", ".", "data", "=", "image", ".", "data", "frame", ".", "mime", "=", "image", ".", "mime_type", "frame", ".", "desc", "=", "image", ".", "desc", "or", "u''", "# For compatibility with OS X/iTunes prefer latin-1 if possible.", "# See issue #899", "try", ":", "frame", ".", "desc", ".", "encode", "(", "\"latin-1\"", ")", "except", "UnicodeEncodeError", ":", "frame", ".", "encoding", "=", "mutagen", ".", "id3", ".", "Encoding", ".", "UTF16", "else", ":", "frame", ".", "encoding", "=", "mutagen", ".", "id3", ".", "Encoding", ".", "LATIN1", "frame", ".", "type", "=", "image", ".", "type_index", "return", "frame" ]
Return an APIC frame populated with data from ``image``.
[ "Return", "an", "APIC", "frame", "populated", "with", "data", "from", "image", "." ]
python
train
yunpian/yunpian-python-sdk
yunpian_python_sdk/model/result.py
https://github.com/yunpian/yunpian-python-sdk/blob/405a1196ec83fdf29ff454f74ef036974be11970/yunpian_python_sdk/model/result.py#L53-L58
def detail(self, detail=None, ret_r=False): '''code's detail''' if detail or ret_r: self._detail = detail return self return self._detail
[ "def", "detail", "(", "self", ",", "detail", "=", "None", ",", "ret_r", "=", "False", ")", ":", "if", "detail", "or", "ret_r", ":", "self", ".", "_detail", "=", "detail", "return", "self", "return", "self", ".", "_detail" ]
code's detail
[ "code", "s", "detail" ]
python
train
python/core-workflow
cherry_picker/cherry_picker/cherry_picker.py
https://github.com/python/core-workflow/blob/b93c76195f6db382cfcefee334380fb4c68d4e21/cherry_picker/cherry_picker/cherry_picker.py#L656-L668
def validate_sha(sha): """ Validate that a hexdigest sha is a valid commit in the repo raises ValueError if the sha does not reference a commit within the repo """ cmd = ["git", "log", "-r", sha] try: subprocess.check_output(cmd, stderr=subprocess.STDOUT) except subprocess.SubprocessError: raise ValueError( f"The sha listed in the branch name, {sha}, is not present in the repository" )
[ "def", "validate_sha", "(", "sha", ")", ":", "cmd", "=", "[", "\"git\"", ",", "\"log\"", ",", "\"-r\"", ",", "sha", "]", "try", ":", "subprocess", ".", "check_output", "(", "cmd", ",", "stderr", "=", "subprocess", ".", "STDOUT", ")", "except", "subprocess", ".", "SubprocessError", ":", "raise", "ValueError", "(", "f\"The sha listed in the branch name, {sha}, is not present in the repository\"", ")" ]
Validate that a hexdigest sha is a valid commit in the repo raises ValueError if the sha does not reference a commit within the repo
[ "Validate", "that", "a", "hexdigest", "sha", "is", "a", "valid", "commit", "in", "the", "repo" ]
python
train
dixudx/rtcclient
rtcclient/client.py
https://github.com/dixudx/rtcclient/blob/1721dd0b047478f5bdd6359b07a2c503cfafd86f/rtcclient/client.py#L1143-L1151
def _checkMissingParamsFromWorkitem(self, copied_from, keep=False, **kwargs): """Check the missing parameters for rendering directly from the copied workitem """ parameters = self.listFieldsFromWorkitem(copied_from, keep=keep) self._findMissingParams(parameters, **kwargs)
[ "def", "_checkMissingParamsFromWorkitem", "(", "self", ",", "copied_from", ",", "keep", "=", "False", ",", "*", "*", "kwargs", ")", ":", "parameters", "=", "self", ".", "listFieldsFromWorkitem", "(", "copied_from", ",", "keep", "=", "keep", ")", "self", ".", "_findMissingParams", "(", "parameters", ",", "*", "*", "kwargs", ")" ]
Check the missing parameters for rendering directly from the copied workitem
[ "Check", "the", "missing", "parameters", "for", "rendering", "directly", "from", "the", "copied", "workitem" ]
python
train
ANTsX/ANTsPy
ants/utils/quantile.py
https://github.com/ANTsX/ANTsPy/blob/638020af2cdfc5ff4bdb9809ffe67aa505727a3b/ants/utils/quantile.py#L227-L292
def bandpass_filter_matrix( matrix, tr=1, lowf=0.01, highf=0.1, order = 3): """ Bandpass filter the input time series image ANTsR function: `frequencyFilterfMRI` Arguments --------- image: input time series image tr: sampling time interval (inverse of sampling rate) lowf: low frequency cutoff highf: high frequency cutoff order: order of the butterworth filter run using `filtfilt` Returns ------- filtered matrix Example ------- >>> import numpy as np >>> import ants >>> import matplotlib.pyplot as plt >>> brainSignal = np.random.randn( 400, 1000 ) >>> tr = 1 >>> filtered = ants.bandpass_filter_matrix( brainSignal, tr = tr ) >>> nsamples = brainSignal.shape[0] >>> t = np.linspace(0, tr*nsamples, nsamples, endpoint=False) >>> k = 20 >>> plt.plot(t, brainSignal[:,k], label='Noisy signal') >>> plt.plot(t, filtered[:,k], label='Filtered signal') >>> plt.xlabel('time (seconds)') >>> plt.grid(True) >>> plt.axis('tight') >>> plt.legend(loc='upper left') >>> plt.show() """ from scipy.signal import butter, filtfilt def butter_bandpass(lowcut, highcut, fs, order ): nyq = 0.5 * fs low = lowcut / nyq high = highcut / nyq b, a = butter(order, [low, high], btype='band') return b, a def butter_bandpass_filter(data, lowcut, highcut, fs, order ): b, a = butter_bandpass(lowcut, highcut, fs, order=order) y = filtfilt(b, a, data) return y fs = 1/tr # sampling rate based on tr nsamples = matrix.shape[0] ncolumns = matrix.shape[1] matrixOut = matrix.copy() for k in range( ncolumns ): matrixOut[:,k] = butter_bandpass_filter( matrix[:,k], lowf, highf, fs, order=order ) return matrixOut
[ "def", "bandpass_filter_matrix", "(", "matrix", ",", "tr", "=", "1", ",", "lowf", "=", "0.01", ",", "highf", "=", "0.1", ",", "order", "=", "3", ")", ":", "from", "scipy", ".", "signal", "import", "butter", ",", "filtfilt", "def", "butter_bandpass", "(", "lowcut", ",", "highcut", ",", "fs", ",", "order", ")", ":", "nyq", "=", "0.5", "*", "fs", "low", "=", "lowcut", "/", "nyq", "high", "=", "highcut", "/", "nyq", "b", ",", "a", "=", "butter", "(", "order", ",", "[", "low", ",", "high", "]", ",", "btype", "=", "'band'", ")", "return", "b", ",", "a", "def", "butter_bandpass_filter", "(", "data", ",", "lowcut", ",", "highcut", ",", "fs", ",", "order", ")", ":", "b", ",", "a", "=", "butter_bandpass", "(", "lowcut", ",", "highcut", ",", "fs", ",", "order", "=", "order", ")", "y", "=", "filtfilt", "(", "b", ",", "a", ",", "data", ")", "return", "y", "fs", "=", "1", "/", "tr", "# sampling rate based on tr", "nsamples", "=", "matrix", ".", "shape", "[", "0", "]", "ncolumns", "=", "matrix", ".", "shape", "[", "1", "]", "matrixOut", "=", "matrix", ".", "copy", "(", ")", "for", "k", "in", "range", "(", "ncolumns", ")", ":", "matrixOut", "[", ":", ",", "k", "]", "=", "butter_bandpass_filter", "(", "matrix", "[", ":", ",", "k", "]", ",", "lowf", ",", "highf", ",", "fs", ",", "order", "=", "order", ")", "return", "matrixOut" ]
Bandpass filter the input time series image ANTsR function: `frequencyFilterfMRI` Arguments --------- image: input time series image tr: sampling time interval (inverse of sampling rate) lowf: low frequency cutoff highf: high frequency cutoff order: order of the butterworth filter run using `filtfilt` Returns ------- filtered matrix Example ------- >>> import numpy as np >>> import ants >>> import matplotlib.pyplot as plt >>> brainSignal = np.random.randn( 400, 1000 ) >>> tr = 1 >>> filtered = ants.bandpass_filter_matrix( brainSignal, tr = tr ) >>> nsamples = brainSignal.shape[0] >>> t = np.linspace(0, tr*nsamples, nsamples, endpoint=False) >>> k = 20 >>> plt.plot(t, brainSignal[:,k], label='Noisy signal') >>> plt.plot(t, filtered[:,k], label='Filtered signal') >>> plt.xlabel('time (seconds)') >>> plt.grid(True) >>> plt.axis('tight') >>> plt.legend(loc='upper left') >>> plt.show()
[ "Bandpass", "filter", "the", "input", "time", "series", "image" ]
python
train
DataONEorg/d1_python
gmn/src/d1_gmn/app/views/util.py
https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/gmn/src/d1_gmn/app/views/util.py#L179-L196
def parse_and_normalize_url_date(date_str): """Parse a ISO 8601 date-time with optional timezone. - Return as datetime with timezone adjusted to UTC. - Return naive date-time set to UTC. """ if date_str is None: return None try: return d1_common.date_time.dt_from_iso8601_str(date_str) except d1_common.date_time.iso8601.ParseError as e: raise d1_common.types.exceptions.InvalidRequest( 0, 'Invalid date format for URL parameter. date="{}" error="{}"'.format( date_str, str(e) ), )
[ "def", "parse_and_normalize_url_date", "(", "date_str", ")", ":", "if", "date_str", "is", "None", ":", "return", "None", "try", ":", "return", "d1_common", ".", "date_time", ".", "dt_from_iso8601_str", "(", "date_str", ")", "except", "d1_common", ".", "date_time", ".", "iso8601", ".", "ParseError", "as", "e", ":", "raise", "d1_common", ".", "types", ".", "exceptions", ".", "InvalidRequest", "(", "0", ",", "'Invalid date format for URL parameter. date=\"{}\" error=\"{}\"'", ".", "format", "(", "date_str", ",", "str", "(", "e", ")", ")", ",", ")" ]
Parse a ISO 8601 date-time with optional timezone. - Return as datetime with timezone adjusted to UTC. - Return naive date-time set to UTC.
[ "Parse", "a", "ISO", "8601", "date", "-", "time", "with", "optional", "timezone", "." ]
python
train
tensorflow/cleverhans
cleverhans/experimental/certification/nn.py
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/cleverhans/experimental/certification/nn.py#L161-L226
def load_network_from_checkpoint(checkpoint, model_json, input_shape=None): """Function to read the weights from checkpoint based on json description. Args: checkpoint: tensorflow checkpoint with trained model to verify model_json: path of json file with model description of the network list of dictionary items for each layer containing 'type', 'weight_var', 'bias_var' and 'is_transpose' 'type'is one of {'ff', 'ff_relu' or 'conv'}; 'weight_var' is the name of tf variable for weights of layer i; 'bias_var' is the name of tf variable for bias of layer i; 'is_transpose' is set to True if the weights have to be transposed as per convention Note that last layer is always feedforward net_weights: list of numpy matrices of weights of each layer convention: x[i+1] = W[i] x[i] net_biases: list of numpy arrays of biases of each layer net_layer_types: type of each layer ['ff' or 'ff_relu' or 'ff_conv' or 'ff_conv_relu'] 'ff': Simple feedforward layer with no activations 'ff_relu': Simple feedforward layer with ReLU activations 'ff_conv': Convolution layer with no activation 'ff_conv_relu': Convolution layer with ReLU activation Raises: ValueError: If layer_types are invalid or variable names not found in checkpoint """ # Load checkpoint reader = tf.train.load_checkpoint(checkpoint) variable_map = reader.get_variable_to_shape_map() checkpoint_variable_names = variable_map.keys() # Parse JSON file for names with tf.gfile.Open(model_json) as f: list_model_var = json.load(f) net_layer_types = [] net_weights = [] net_biases = [] cnn_params = [] # Checking validity of the input and adding to list for layer_model_var in list_model_var: if layer_model_var['type'] not in {'ff', 'ff_relu', 'conv'}: raise ValueError('Invalid layer type in description') if (layer_model_var['weight_var'] not in checkpoint_variable_names or layer_model_var['bias_var'] not in checkpoint_variable_names): raise ValueError('Variable names not found in checkpoint') net_layer_types.append(layer_model_var['type']) layer_weight = reader.get_tensor(layer_model_var['weight_var']) layer_bias = reader.get_tensor(layer_model_var['bias_var']) # TODO(aditirag): is there a way to automatically check when to transpose # We want weights W such that x^{i+1} = W^i x^i + b^i # Can think of a hack involving matching shapes but if shapes are equal # it can be ambiguous if layer_model_var['type'] in {'ff', 'ff_relu'}: layer_weight = np.transpose(layer_weight) cnn_params.append(None) if layer_model_var['type'] in {'conv'}: if 'stride' not in layer_model_var or 'padding' not in layer_model_var: raise ValueError('Please define stride and padding for conv layers.') cnn_params.append({'stride': layer_model_var['stride'], 'padding': layer_model_var['padding']}) net_weights.append(layer_weight) net_biases.append(np.reshape(layer_bias, (np.size(layer_bias), 1))) return NeuralNetwork(net_weights, net_biases, net_layer_types, input_shape, cnn_params)
[ "def", "load_network_from_checkpoint", "(", "checkpoint", ",", "model_json", ",", "input_shape", "=", "None", ")", ":", "# Load checkpoint", "reader", "=", "tf", ".", "train", ".", "load_checkpoint", "(", "checkpoint", ")", "variable_map", "=", "reader", ".", "get_variable_to_shape_map", "(", ")", "checkpoint_variable_names", "=", "variable_map", ".", "keys", "(", ")", "# Parse JSON file for names", "with", "tf", ".", "gfile", ".", "Open", "(", "model_json", ")", "as", "f", ":", "list_model_var", "=", "json", ".", "load", "(", "f", ")", "net_layer_types", "=", "[", "]", "net_weights", "=", "[", "]", "net_biases", "=", "[", "]", "cnn_params", "=", "[", "]", "# Checking validity of the input and adding to list", "for", "layer_model_var", "in", "list_model_var", ":", "if", "layer_model_var", "[", "'type'", "]", "not", "in", "{", "'ff'", ",", "'ff_relu'", ",", "'conv'", "}", ":", "raise", "ValueError", "(", "'Invalid layer type in description'", ")", "if", "(", "layer_model_var", "[", "'weight_var'", "]", "not", "in", "checkpoint_variable_names", "or", "layer_model_var", "[", "'bias_var'", "]", "not", "in", "checkpoint_variable_names", ")", ":", "raise", "ValueError", "(", "'Variable names not found in checkpoint'", ")", "net_layer_types", ".", "append", "(", "layer_model_var", "[", "'type'", "]", ")", "layer_weight", "=", "reader", ".", "get_tensor", "(", "layer_model_var", "[", "'weight_var'", "]", ")", "layer_bias", "=", "reader", ".", "get_tensor", "(", "layer_model_var", "[", "'bias_var'", "]", ")", "# TODO(aditirag): is there a way to automatically check when to transpose", "# We want weights W such that x^{i+1} = W^i x^i + b^i", "# Can think of a hack involving matching shapes but if shapes are equal", "# it can be ambiguous", "if", "layer_model_var", "[", "'type'", "]", "in", "{", "'ff'", ",", "'ff_relu'", "}", ":", "layer_weight", "=", "np", ".", "transpose", "(", "layer_weight", ")", "cnn_params", ".", "append", "(", "None", ")", "if", "layer_model_var", "[", "'type'", "]", "in", "{", "'conv'", "}", ":", "if", "'stride'", "not", "in", "layer_model_var", "or", "'padding'", "not", "in", "layer_model_var", ":", "raise", "ValueError", "(", "'Please define stride and padding for conv layers.'", ")", "cnn_params", ".", "append", "(", "{", "'stride'", ":", "layer_model_var", "[", "'stride'", "]", ",", "'padding'", ":", "layer_model_var", "[", "'padding'", "]", "}", ")", "net_weights", ".", "append", "(", "layer_weight", ")", "net_biases", ".", "append", "(", "np", ".", "reshape", "(", "layer_bias", ",", "(", "np", ".", "size", "(", "layer_bias", ")", ",", "1", ")", ")", ")", "return", "NeuralNetwork", "(", "net_weights", ",", "net_biases", ",", "net_layer_types", ",", "input_shape", ",", "cnn_params", ")" ]
Function to read the weights from checkpoint based on json description. Args: checkpoint: tensorflow checkpoint with trained model to verify model_json: path of json file with model description of the network list of dictionary items for each layer containing 'type', 'weight_var', 'bias_var' and 'is_transpose' 'type'is one of {'ff', 'ff_relu' or 'conv'}; 'weight_var' is the name of tf variable for weights of layer i; 'bias_var' is the name of tf variable for bias of layer i; 'is_transpose' is set to True if the weights have to be transposed as per convention Note that last layer is always feedforward net_weights: list of numpy matrices of weights of each layer convention: x[i+1] = W[i] x[i] net_biases: list of numpy arrays of biases of each layer net_layer_types: type of each layer ['ff' or 'ff_relu' or 'ff_conv' or 'ff_conv_relu'] 'ff': Simple feedforward layer with no activations 'ff_relu': Simple feedforward layer with ReLU activations 'ff_conv': Convolution layer with no activation 'ff_conv_relu': Convolution layer with ReLU activation Raises: ValueError: If layer_types are invalid or variable names not found in checkpoint
[ "Function", "to", "read", "the", "weights", "from", "checkpoint", "based", "on", "json", "description", "." ]
python
train
kevinconway/venvctrl
venvctrl/venv/command.py
https://github.com/kevinconway/venvctrl/blob/36d4e0e4d5ebced6385a6ade1198f4769ff2df41/venvctrl/venv/command.py#L73-L77
def python(self, cmd): """Execute a python script using the virtual environment python.""" python_bin = self.cmd_path('python') cmd = '{0} {1}'.format(python_bin, cmd) return self._execute(cmd)
[ "def", "python", "(", "self", ",", "cmd", ")", ":", "python_bin", "=", "self", ".", "cmd_path", "(", "'python'", ")", "cmd", "=", "'{0} {1}'", ".", "format", "(", "python_bin", ",", "cmd", ")", "return", "self", ".", "_execute", "(", "cmd", ")" ]
Execute a python script using the virtual environment python.
[ "Execute", "a", "python", "script", "using", "the", "virtual", "environment", "python", "." ]
python
train
joferkington/mpldatacursor
mpldatacursor/pick_info.py
https://github.com/joferkington/mpldatacursor/blob/7dabc589ed02c35ac5d89de5931f91e0323aa795/mpldatacursor/pick_info.py#L61-L83
def image_props(event): """ Get information for a pick event on an ``AxesImage`` artist. Returns a dict of "i" & "j" index values of the image for the point clicked, and "z": the (uninterpolated) value of the image at i,j. Parameters ----------- event : PickEvent The pick event to process Returns -------- props : dict A dict with keys: z, i, j """ x, y = event.mouseevent.xdata, event.mouseevent.ydata i, j = _coords2index(event.artist, x, y) z = event.artist.get_array()[i,j] if z.size > 1: # Override default numpy formatting for this specific case. Bad idea? z = ', '.join('{:0.3g}'.format(item) for item in z) return dict(z=z, i=i, j=j)
[ "def", "image_props", "(", "event", ")", ":", "x", ",", "y", "=", "event", ".", "mouseevent", ".", "xdata", ",", "event", ".", "mouseevent", ".", "ydata", "i", ",", "j", "=", "_coords2index", "(", "event", ".", "artist", ",", "x", ",", "y", ")", "z", "=", "event", ".", "artist", ".", "get_array", "(", ")", "[", "i", ",", "j", "]", "if", "z", ".", "size", ">", "1", ":", "# Override default numpy formatting for this specific case. Bad idea?", "z", "=", "', '", ".", "join", "(", "'{:0.3g}'", ".", "format", "(", "item", ")", "for", "item", "in", "z", ")", "return", "dict", "(", "z", "=", "z", ",", "i", "=", "i", ",", "j", "=", "j", ")" ]
Get information for a pick event on an ``AxesImage`` artist. Returns a dict of "i" & "j" index values of the image for the point clicked, and "z": the (uninterpolated) value of the image at i,j. Parameters ----------- event : PickEvent The pick event to process Returns -------- props : dict A dict with keys: z, i, j
[ "Get", "information", "for", "a", "pick", "event", "on", "an", "AxesImage", "artist", ".", "Returns", "a", "dict", "of", "i", "&", "j", "index", "values", "of", "the", "image", "for", "the", "point", "clicked", "and", "z", ":", "the", "(", "uninterpolated", ")", "value", "of", "the", "image", "at", "i", "j", "." ]
python
train
gunthercox/mathparse
mathparse/mathwords.py
https://github.com/gunthercox/mathparse/blob/8aadd40e7238febbfea19b29023583720e374ac0/mathparse/mathwords.py#L411-L423
def words_for_language(language_code): """ Return the math words for a language code. The language_code should be an ISO 639-2 language code. https://www.loc.gov/standards/iso639-2/php/code_list.php """ word_groups = word_groups_for_language(language_code) words = [] for group in word_groups: words.extend(word_groups[group].keys()) return words
[ "def", "words_for_language", "(", "language_code", ")", ":", "word_groups", "=", "word_groups_for_language", "(", "language_code", ")", "words", "=", "[", "]", "for", "group", "in", "word_groups", ":", "words", ".", "extend", "(", "word_groups", "[", "group", "]", ".", "keys", "(", ")", ")", "return", "words" ]
Return the math words for a language code. The language_code should be an ISO 639-2 language code. https://www.loc.gov/standards/iso639-2/php/code_list.php
[ "Return", "the", "math", "words", "for", "a", "language", "code", ".", "The", "language_code", "should", "be", "an", "ISO", "639", "-", "2", "language", "code", ".", "https", ":", "//", "www", ".", "loc", ".", "gov", "/", "standards", "/", "iso639", "-", "2", "/", "php", "/", "code_list", ".", "php" ]
python
train
openstack/pyghmi
pyghmi/ipmi/console.py
https://github.com/openstack/pyghmi/blob/f710b1d30a8eed19a9e86f01f9351c737666f3e5/pyghmi/ipmi/console.py#L192-L197
def _got_cons_input(self, handle): """Callback for handle events detected by ipmi session """ self._addpendingdata(handle.read()) if not self.awaitingack: self._sendpendingoutput()
[ "def", "_got_cons_input", "(", "self", ",", "handle", ")", ":", "self", ".", "_addpendingdata", "(", "handle", ".", "read", "(", ")", ")", "if", "not", "self", ".", "awaitingack", ":", "self", ".", "_sendpendingoutput", "(", ")" ]
Callback for handle events detected by ipmi session
[ "Callback", "for", "handle", "events", "detected", "by", "ipmi", "session" ]
python
train
SpriteLink/NIPAP
nipap/nipap/backend.py
https://github.com/SpriteLink/NIPAP/blob/f96069f11ab952d80b13cab06e0528f2d24b3de9/nipap/nipap/backend.py#L1130-L1139
def _db_upgrade(self, db_name): """ Upgrade nipap database schema """ current_db_version = self._get_db_version() self._execute(db_schema.functions) for i in range(current_db_version, nipap.__db_version__): self._logger.info("Upgrading DB schema:", i, "to", i+1) upgrade_sql = db_schema.upgrade[i-1] # 0 count on array self._execute(upgrade_sql % (db_name)) self._execute(db_schema.triggers)
[ "def", "_db_upgrade", "(", "self", ",", "db_name", ")", ":", "current_db_version", "=", "self", ".", "_get_db_version", "(", ")", "self", ".", "_execute", "(", "db_schema", ".", "functions", ")", "for", "i", "in", "range", "(", "current_db_version", ",", "nipap", ".", "__db_version__", ")", ":", "self", ".", "_logger", ".", "info", "(", "\"Upgrading DB schema:\"", ",", "i", ",", "\"to\"", ",", "i", "+", "1", ")", "upgrade_sql", "=", "db_schema", ".", "upgrade", "[", "i", "-", "1", "]", "# 0 count on array", "self", ".", "_execute", "(", "upgrade_sql", "%", "(", "db_name", ")", ")", "self", ".", "_execute", "(", "db_schema", ".", "triggers", ")" ]
Upgrade nipap database schema
[ "Upgrade", "nipap", "database", "schema" ]
python
train
msmbuilder/msmbuilder
msmbuilder/utils/param_sweep.py
https://github.com/msmbuilder/msmbuilder/blob/556a93a170782f47be53f4a1e9d740fb1c8272b3/msmbuilder/utils/param_sweep.py#L13-L51
def param_sweep(model, sequences, param_grid, n_jobs=1, verbose=0): """Fit a series of models over a range of parameters. Parameters ---------- model : msmbuilder.BaseEstimator An *instance* of an estimator to be used to fit data. sequences : list of array-like List of sequences, or a single sequence. Each sequence should be a 1D iterable of state labels. Labels can be integers, strings, or other orderable objects. param_grid : dict or sklearn.grid_search.ParameterGrid Parameter grid to specify models to fit. See sklearn.grid_search.ParameterGrid for an explanation n_jobs : int, optional Number of jobs to run in parallel using joblib.Parallel Returns ------- models : list List of models fit to the data according to param_grid """ if isinstance(param_grid, dict): param_grid = ParameterGrid(param_grid) elif not isinstance(param_grid, ParameterGrid): raise ValueError("param_grid must be a dict or ParamaterGrid instance") # iterable with (model, sequence) as items iter_args = ((clone(model).set_params(**params), sequences) for params in param_grid) models = Parallel(n_jobs=n_jobs, verbose=verbose)( delayed(_param_sweep_helper)(args) for args in iter_args) return models
[ "def", "param_sweep", "(", "model", ",", "sequences", ",", "param_grid", ",", "n_jobs", "=", "1", ",", "verbose", "=", "0", ")", ":", "if", "isinstance", "(", "param_grid", ",", "dict", ")", ":", "param_grid", "=", "ParameterGrid", "(", "param_grid", ")", "elif", "not", "isinstance", "(", "param_grid", ",", "ParameterGrid", ")", ":", "raise", "ValueError", "(", "\"param_grid must be a dict or ParamaterGrid instance\"", ")", "# iterable with (model, sequence) as items", "iter_args", "=", "(", "(", "clone", "(", "model", ")", ".", "set_params", "(", "*", "*", "params", ")", ",", "sequences", ")", "for", "params", "in", "param_grid", ")", "models", "=", "Parallel", "(", "n_jobs", "=", "n_jobs", ",", "verbose", "=", "verbose", ")", "(", "delayed", "(", "_param_sweep_helper", ")", "(", "args", ")", "for", "args", "in", "iter_args", ")", "return", "models" ]
Fit a series of models over a range of parameters. Parameters ---------- model : msmbuilder.BaseEstimator An *instance* of an estimator to be used to fit data. sequences : list of array-like List of sequences, or a single sequence. Each sequence should be a 1D iterable of state labels. Labels can be integers, strings, or other orderable objects. param_grid : dict or sklearn.grid_search.ParameterGrid Parameter grid to specify models to fit. See sklearn.grid_search.ParameterGrid for an explanation n_jobs : int, optional Number of jobs to run in parallel using joblib.Parallel Returns ------- models : list List of models fit to the data according to param_grid
[ "Fit", "a", "series", "of", "models", "over", "a", "range", "of", "parameters", "." ]
python
train
openstack/horizon
horizon/utils/units.py
https://github.com/openstack/horizon/blob/5601ea9477323e599d9b766fcac1f8be742935b2/horizon/utils/units.py#L40-L53
def is_larger(unit_1, unit_2): """Returns a boolean indicating whether unit_1 is larger than unit_2. E.g: >>> is_larger('KB', 'B') True >>> is_larger('min', 'day') False """ unit_1 = functions.value_for_key(INFORMATION_UNITS, unit_1) unit_2 = functions.value_for_key(INFORMATION_UNITS, unit_2) return ureg.parse_expression(unit_1) > ureg.parse_expression(unit_2)
[ "def", "is_larger", "(", "unit_1", ",", "unit_2", ")", ":", "unit_1", "=", "functions", ".", "value_for_key", "(", "INFORMATION_UNITS", ",", "unit_1", ")", "unit_2", "=", "functions", ".", "value_for_key", "(", "INFORMATION_UNITS", ",", "unit_2", ")", "return", "ureg", ".", "parse_expression", "(", "unit_1", ")", ">", "ureg", ".", "parse_expression", "(", "unit_2", ")" ]
Returns a boolean indicating whether unit_1 is larger than unit_2. E.g: >>> is_larger('KB', 'B') True >>> is_larger('min', 'day') False
[ "Returns", "a", "boolean", "indicating", "whether", "unit_1", "is", "larger", "than", "unit_2", "." ]
python
train
cggh/scikit-allel
allel/compat.py
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/compat.py#L51-L66
def memoryview_safe(x): """Make array safe to run in a Cython memoryview-based kernel. These kernels typically break down with the error ``ValueError: buffer source array is read-only`` when running in dask distributed. See Also -------- https://github.com/dask/distributed/issues/1978 https://github.com/cggh/scikit-allel/issues/206 """ if not x.flags.writeable: if not x.flags.owndata: x = x.copy(order='A') x.setflags(write=True) return x
[ "def", "memoryview_safe", "(", "x", ")", ":", "if", "not", "x", ".", "flags", ".", "writeable", ":", "if", "not", "x", ".", "flags", ".", "owndata", ":", "x", "=", "x", ".", "copy", "(", "order", "=", "'A'", ")", "x", ".", "setflags", "(", "write", "=", "True", ")", "return", "x" ]
Make array safe to run in a Cython memoryview-based kernel. These kernels typically break down with the error ``ValueError: buffer source array is read-only`` when running in dask distributed. See Also -------- https://github.com/dask/distributed/issues/1978 https://github.com/cggh/scikit-allel/issues/206
[ "Make", "array", "safe", "to", "run", "in", "a", "Cython", "memoryview", "-", "based", "kernel", ".", "These", "kernels", "typically", "break", "down", "with", "the", "error", "ValueError", ":", "buffer", "source", "array", "is", "read", "-", "only", "when", "running", "in", "dask", "distributed", "." ]
python
train
CitrineInformatics/pypif-sdk
pypif_sdk/interop/mdf.py
https://github.com/CitrineInformatics/pypif-sdk/blob/8b01d10d9a1426d5eef12e4b2f31c4657aa0fe59/pypif_sdk/interop/mdf.py#L11-L56
def query_to_mdf_records(query=None, dataset_id=None, mdf_acl=None): """Evaluate a query and return a list of MDF records If a datasetID is specified by there is no query, a simple whole dataset query is formed for the user """ if not query and not dataset_id: raise ValueError("Either query or dataset_id must be specified") if query and dataset_id: raise ValueError("Both query and dataset_id were specified; pick one or the other.") if not query: query = PifSystemReturningQuery( query=DataQuery( dataset=DatasetQuery( id=Filter(equal=dataset_id) ) ), size = 10000 # Don't pull down all the results by default ) client = get_client() if not mdf_acl: raise ValueError('Access controls (mdf_acl) must be specified. Use ["public"] for public access') pif_result = client.pif_search(query) if len(pif_result.hits) == 0: return [] example_uid = pif_result.hits[0].system.uid dataset_query = DatasetReturningQuery( query=DataQuery( system=PifSystemQuery( uid=Filter(equal=example_uid) ) ), size = 1 # we only expect one dataset to hit ) dataset_result = client.dataset_search(dataset_query) records = [] for hit in pif_result.hits: records.append(pif_to_mdf_record(hit.system, dataset_result.hits[0], mdf_acl)) return records
[ "def", "query_to_mdf_records", "(", "query", "=", "None", ",", "dataset_id", "=", "None", ",", "mdf_acl", "=", "None", ")", ":", "if", "not", "query", "and", "not", "dataset_id", ":", "raise", "ValueError", "(", "\"Either query or dataset_id must be specified\"", ")", "if", "query", "and", "dataset_id", ":", "raise", "ValueError", "(", "\"Both query and dataset_id were specified; pick one or the other.\"", ")", "if", "not", "query", ":", "query", "=", "PifSystemReturningQuery", "(", "query", "=", "DataQuery", "(", "dataset", "=", "DatasetQuery", "(", "id", "=", "Filter", "(", "equal", "=", "dataset_id", ")", ")", ")", ",", "size", "=", "10000", "# Don't pull down all the results by default", ")", "client", "=", "get_client", "(", ")", "if", "not", "mdf_acl", ":", "raise", "ValueError", "(", "'Access controls (mdf_acl) must be specified. Use [\"public\"] for public access'", ")", "pif_result", "=", "client", ".", "pif_search", "(", "query", ")", "if", "len", "(", "pif_result", ".", "hits", ")", "==", "0", ":", "return", "[", "]", "example_uid", "=", "pif_result", ".", "hits", "[", "0", "]", ".", "system", ".", "uid", "dataset_query", "=", "DatasetReturningQuery", "(", "query", "=", "DataQuery", "(", "system", "=", "PifSystemQuery", "(", "uid", "=", "Filter", "(", "equal", "=", "example_uid", ")", ")", ")", ",", "size", "=", "1", "# we only expect one dataset to hit", ")", "dataset_result", "=", "client", ".", "dataset_search", "(", "dataset_query", ")", "records", "=", "[", "]", "for", "hit", "in", "pif_result", ".", "hits", ":", "records", ".", "append", "(", "pif_to_mdf_record", "(", "hit", ".", "system", ",", "dataset_result", ".", "hits", "[", "0", "]", ",", "mdf_acl", ")", ")", "return", "records" ]
Evaluate a query and return a list of MDF records If a datasetID is specified by there is no query, a simple whole dataset query is formed for the user
[ "Evaluate", "a", "query", "and", "return", "a", "list", "of", "MDF", "records" ]
python
train
brocade/pynos
pynos/versions/ver_6/ver_6_0_1/yang/brocade_preprovision.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/brocade_preprovision.py#L26-L38
def preprovision_rbridge_id_wwn(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") preprovision = ET.SubElement(config, "preprovision", xmlns="urn:brocade.com:mgmt:brocade-preprovision") rbridge_id = ET.SubElement(preprovision, "rbridge-id") rbridge_id_key = ET.SubElement(rbridge_id, "rbridge-id") rbridge_id_key.text = kwargs.pop('rbridge_id') wwn = ET.SubElement(rbridge_id, "wwn") wwn.text = kwargs.pop('wwn') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "preprovision_rbridge_id_wwn", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "preprovision", "=", "ET", ".", "SubElement", "(", "config", ",", "\"preprovision\"", ",", "xmlns", "=", "\"urn:brocade.com:mgmt:brocade-preprovision\"", ")", "rbridge_id", "=", "ET", ".", "SubElement", "(", "preprovision", ",", "\"rbridge-id\"", ")", "rbridge_id_key", "=", "ET", ".", "SubElement", "(", "rbridge_id", ",", "\"rbridge-id\"", ")", "rbridge_id_key", ".", "text", "=", "kwargs", ".", "pop", "(", "'rbridge_id'", ")", "wwn", "=", "ET", ".", "SubElement", "(", "rbridge_id", ",", "\"wwn\"", ")", "wwn", ".", "text", "=", "kwargs", ".", "pop", "(", "'wwn'", ")", "callback", "=", "kwargs", ".", "pop", "(", "'callback'", ",", "self", ".", "_callback", ")", "return", "callback", "(", "config", ")" ]
Auto Generated Code
[ "Auto", "Generated", "Code" ]
python
train
odlgroup/odl
odl/solvers/functional/functional.py
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/solvers/functional/functional.py#L1070-L1104
def convex_conj(self): r"""Convex conjugate functional of the functional. Notes ----- Given a functional :math:`f`, the convex conjugate of a linearly perturbed version :math:`f(x) + <y, x>` is given by a translation of the convex conjugate of :math:`f`: .. math:: (f + \langle y, \cdot \rangle)^* (x^*) = f^*(x^* - y). For reference on the identity used, see `[KP2015]`_. Moreover, the convex conjugate of :math:`f + c` is by definition .. math:: (f + c)^* (x^*) = f^*(x^*) - c. References ---------- [KP2015] Komodakis, N, and Pesquet, J-C. *Playing with Duality: An overview of recent primal-dual approaches for solving large-scale optimization problems*. IEEE Signal Processing Magazine, 32.6 (2015), pp 31--54. .. _[KP2015]: https://arxiv.org/abs/1406.5429 """ if self.quadratic_coeff == 0: cconj = self.functional.convex_conj.translated(self.linear_term) if self.constant != 0: cconj = cconj - self.constant return cconj else: return super(FunctionalQuadraticPerturb, self).convex_conj
[ "def", "convex_conj", "(", "self", ")", ":", "if", "self", ".", "quadratic_coeff", "==", "0", ":", "cconj", "=", "self", ".", "functional", ".", "convex_conj", ".", "translated", "(", "self", ".", "linear_term", ")", "if", "self", ".", "constant", "!=", "0", ":", "cconj", "=", "cconj", "-", "self", ".", "constant", "return", "cconj", "else", ":", "return", "super", "(", "FunctionalQuadraticPerturb", ",", "self", ")", ".", "convex_conj" ]
r"""Convex conjugate functional of the functional. Notes ----- Given a functional :math:`f`, the convex conjugate of a linearly perturbed version :math:`f(x) + <y, x>` is given by a translation of the convex conjugate of :math:`f`: .. math:: (f + \langle y, \cdot \rangle)^* (x^*) = f^*(x^* - y). For reference on the identity used, see `[KP2015]`_. Moreover, the convex conjugate of :math:`f + c` is by definition .. math:: (f + c)^* (x^*) = f^*(x^*) - c. References ---------- [KP2015] Komodakis, N, and Pesquet, J-C. *Playing with Duality: An overview of recent primal-dual approaches for solving large-scale optimization problems*. IEEE Signal Processing Magazine, 32.6 (2015), pp 31--54. .. _[KP2015]: https://arxiv.org/abs/1406.5429
[ "r", "Convex", "conjugate", "functional", "of", "the", "functional", "." ]
python
train
titusjan/argos
argos/repo/registry.py
https://github.com/titusjan/argos/blob/20d0a3cae26c36ea789a5d219c02ca7df21279dd/argos/repo/registry.py#L129-L136
def getFileDialogFilter(self): """ Returns a filter that can be used in open file dialogs, for example: 'All files (*);;Txt (*.txt;*.text);;netCDF(*.nc;*.nc4)' """ filters = [] for regRti in self.items: filters.append(regRti.getFileDialogFilter()) return ';;'.join(filters)
[ "def", "getFileDialogFilter", "(", "self", ")", ":", "filters", "=", "[", "]", "for", "regRti", "in", "self", ".", "items", ":", "filters", ".", "append", "(", "regRti", ".", "getFileDialogFilter", "(", ")", ")", "return", "';;'", ".", "join", "(", "filters", ")" ]
Returns a filter that can be used in open file dialogs, for example: 'All files (*);;Txt (*.txt;*.text);;netCDF(*.nc;*.nc4)'
[ "Returns", "a", "filter", "that", "can", "be", "used", "in", "open", "file", "dialogs", "for", "example", ":", "All", "files", "(", "*", ")", ";;", "Txt", "(", "*", ".", "txt", ";", "*", ".", "text", ")", ";;", "netCDF", "(", "*", ".", "nc", ";", "*", ".", "nc4", ")" ]
python
train
pymc-devs/pymc
pymc/StepMethods.py
https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/StepMethods.py#L97-L130
def assign_method(stochastic, scale=None, verbose=-1): """ Returns a step method instance to handle a variable. If several methods have the same competence, it picks one arbitrarily (using set.pop()). """ # Retrieve set of best candidates best_candidates = pick_best_methods(stochastic) # Randomly grab and appropriate method method = best_candidates.pop() failure_header = """Failed attempting to automatically assign step method class %s to stochastic variable %s. Try setting %s's competence method to return 0 and manually assigning it when appropriate. See the user guide. Error message: """ % (method.__name__, stochastic.__name__, method.__name__) try: if scale: out = method(stochastic, scale=scale, verbose=verbose) else: out = method(stochastic, verbose=verbose) except: a, b, c = sys.exc_info() try: args = list(b.args) except AttributeError: args = [] args.append(failure_header) b.args = args six.reraise(a, b, c) return out
[ "def", "assign_method", "(", "stochastic", ",", "scale", "=", "None", ",", "verbose", "=", "-", "1", ")", ":", "# Retrieve set of best candidates", "best_candidates", "=", "pick_best_methods", "(", "stochastic", ")", "# Randomly grab and appropriate method", "method", "=", "best_candidates", ".", "pop", "(", ")", "failure_header", "=", "\"\"\"Failed attempting to automatically assign step method class %s\nto stochastic variable %s. Try setting %s's competence method to return 0\nand manually assigning it when appropriate. See the user guide.\n\nError message: \"\"\"", "%", "(", "method", ".", "__name__", ",", "stochastic", ".", "__name__", ",", "method", ".", "__name__", ")", "try", ":", "if", "scale", ":", "out", "=", "method", "(", "stochastic", ",", "scale", "=", "scale", ",", "verbose", "=", "verbose", ")", "else", ":", "out", "=", "method", "(", "stochastic", ",", "verbose", "=", "verbose", ")", "except", ":", "a", ",", "b", ",", "c", "=", "sys", ".", "exc_info", "(", ")", "try", ":", "args", "=", "list", "(", "b", ".", "args", ")", "except", "AttributeError", ":", "args", "=", "[", "]", "args", ".", "append", "(", "failure_header", ")", "b", ".", "args", "=", "args", "six", ".", "reraise", "(", "a", ",", "b", ",", "c", ")", "return", "out" ]
Returns a step method instance to handle a variable. If several methods have the same competence, it picks one arbitrarily (using set.pop()).
[ "Returns", "a", "step", "method", "instance", "to", "handle", "a", "variable", ".", "If", "several", "methods", "have", "the", "same", "competence", "it", "picks", "one", "arbitrarily", "(", "using", "set", ".", "pop", "()", ")", "." ]
python
train
MultipedRobotics/pyxl320
pyxl320/utils.py
https://github.com/MultipedRobotics/pyxl320/blob/1a56540e208b028ee47d5fa0a7c7babcee0d9214/pyxl320/utils.py#L22-L38
def listSerialPorts(): """ http://pyserial.readthedocs.io/en/latest/shortintro.html This calls the command line tool from pyserial to list the available serial ports. """ cmd = 'python -m serial.tools.list_ports' err, ret = commands.getstatusoutput(cmd) if not err: r = ret.split('\n') ret = [] for line in r: if line.find('/dev/') >= 0: line = line.replace(' ', '') ret.append(line) return err, ret
[ "def", "listSerialPorts", "(", ")", ":", "cmd", "=", "'python -m serial.tools.list_ports'", "err", ",", "ret", "=", "commands", ".", "getstatusoutput", "(", "cmd", ")", "if", "not", "err", ":", "r", "=", "ret", ".", "split", "(", "'\\n'", ")", "ret", "=", "[", "]", "for", "line", "in", "r", ":", "if", "line", ".", "find", "(", "'/dev/'", ")", ">=", "0", ":", "line", "=", "line", ".", "replace", "(", "' '", ",", "''", ")", "ret", ".", "append", "(", "line", ")", "return", "err", ",", "ret" ]
http://pyserial.readthedocs.io/en/latest/shortintro.html This calls the command line tool from pyserial to list the available serial ports.
[ "http", ":", "//", "pyserial", ".", "readthedocs", ".", "io", "/", "en", "/", "latest", "/", "shortintro", ".", "html" ]
python
train
django-crispy-forms/django-crispy-forms
crispy_forms/layout_slice.py
https://github.com/django-crispy-forms/django-crispy-forms/blob/cd476927a756133c667c199bb12120f877bf6b7e/crispy_forms/layout_slice.py#L118-L144
def map(self, function): """ Iterates over layout objects pointed in `self.slice` executing `function` on them It passes `function` last layout object """ if isinstance(self.slice, slice): for i in range(*self.slice.indices(len(self.layout.fields))): function(self.layout.fields[i]) elif isinstance(self.slice, list): # A list of pointers Ex: [[[0, 0], 'div'], [[0, 2, 3], 'field_name']] for pointer in self.slice: position = pointer[0] layout_object = self.layout.fields[position[0]] for i in position[1:]: previous_layout_object = layout_object layout_object = layout_object.fields[i] # If update_attrs is applied to a string, we call to its wrapping layout object if ( function.__name__ == 'update_attrs' and isinstance(layout_object, string_types) ): function(previous_layout_object) else: function(layout_object)
[ "def", "map", "(", "self", ",", "function", ")", ":", "if", "isinstance", "(", "self", ".", "slice", ",", "slice", ")", ":", "for", "i", "in", "range", "(", "*", "self", ".", "slice", ".", "indices", "(", "len", "(", "self", ".", "layout", ".", "fields", ")", ")", ")", ":", "function", "(", "self", ".", "layout", ".", "fields", "[", "i", "]", ")", "elif", "isinstance", "(", "self", ".", "slice", ",", "list", ")", ":", "# A list of pointers Ex: [[[0, 0], 'div'], [[0, 2, 3], 'field_name']]", "for", "pointer", "in", "self", ".", "slice", ":", "position", "=", "pointer", "[", "0", "]", "layout_object", "=", "self", ".", "layout", ".", "fields", "[", "position", "[", "0", "]", "]", "for", "i", "in", "position", "[", "1", ":", "]", ":", "previous_layout_object", "=", "layout_object", "layout_object", "=", "layout_object", ".", "fields", "[", "i", "]", "# If update_attrs is applied to a string, we call to its wrapping layout object", "if", "(", "function", ".", "__name__", "==", "'update_attrs'", "and", "isinstance", "(", "layout_object", ",", "string_types", ")", ")", ":", "function", "(", "previous_layout_object", ")", "else", ":", "function", "(", "layout_object", ")" ]
Iterates over layout objects pointed in `self.slice` executing `function` on them It passes `function` last layout object
[ "Iterates", "over", "layout", "objects", "pointed", "in", "self", ".", "slice", "executing", "function", "on", "them", "It", "passes", "function", "last", "layout", "object" ]
python
train
coleifer/walrus
walrus/containers.py
https://github.com/coleifer/walrus/blob/82bf15a6613487b5b5fefeb488f186d7e0106547/walrus/containers.py#L477-L489
def interstore(self, dest, *others): """ Store the intersection of the current set and one or more others in a new key. :param dest: the name of the key to store intersection :param others: One or more :py:class:`Set` instances :returns: A :py:class:`Set` referencing ``dest``. """ keys = [self.key] keys.extend([other.key for other in others]) self.database.sinterstore(dest, keys) return self.database.Set(dest)
[ "def", "interstore", "(", "self", ",", "dest", ",", "*", "others", ")", ":", "keys", "=", "[", "self", ".", "key", "]", "keys", ".", "extend", "(", "[", "other", ".", "key", "for", "other", "in", "others", "]", ")", "self", ".", "database", ".", "sinterstore", "(", "dest", ",", "keys", ")", "return", "self", ".", "database", ".", "Set", "(", "dest", ")" ]
Store the intersection of the current set and one or more others in a new key. :param dest: the name of the key to store intersection :param others: One or more :py:class:`Set` instances :returns: A :py:class:`Set` referencing ``dest``.
[ "Store", "the", "intersection", "of", "the", "current", "set", "and", "one", "or", "more", "others", "in", "a", "new", "key", "." ]
python
train
Julian/jsonschema
jsonschema/validators.py
https://github.com/Julian/jsonschema/blob/a72332004cdc3ba456de7918bc32059822b2f69a/jsonschema/validators.py#L712-L730
def resolving(self, ref): """ Resolve the given ``ref`` and enter its resolution scope. Exits the scope on exit of this context manager. Arguments: ref (str): The reference to resolve """ url, resolved = self.resolve(ref) self.push_scope(url) try: yield resolved finally: self.pop_scope()
[ "def", "resolving", "(", "self", ",", "ref", ")", ":", "url", ",", "resolved", "=", "self", ".", "resolve", "(", "ref", ")", "self", ".", "push_scope", "(", "url", ")", "try", ":", "yield", "resolved", "finally", ":", "self", ".", "pop_scope", "(", ")" ]
Resolve the given ``ref`` and enter its resolution scope. Exits the scope on exit of this context manager. Arguments: ref (str): The reference to resolve
[ "Resolve", "the", "given", "ref", "and", "enter", "its", "resolution", "scope", "." ]
python
train
ChrisCummins/labm8
__init__.py
https://github.com/ChrisCummins/labm8/blob/dd10d67a757aefb180cb508f86696f99440c94f5/__init__.py#L26-L34
def exit(status=0): """ Terminate the program with the given status code. """ if status == 0: lab.io.printf(lab.io.Colours.GREEN, "Done.") else: lab.io.printf(lab.io.Colours.RED, "Error {0}".format(status)) sys.exit(status)
[ "def", "exit", "(", "status", "=", "0", ")", ":", "if", "status", "==", "0", ":", "lab", ".", "io", ".", "printf", "(", "lab", ".", "io", ".", "Colours", ".", "GREEN", ",", "\"Done.\"", ")", "else", ":", "lab", ".", "io", ".", "printf", "(", "lab", ".", "io", ".", "Colours", ".", "RED", ",", "\"Error {0}\"", ".", "format", "(", "status", ")", ")", "sys", ".", "exit", "(", "status", ")" ]
Terminate the program with the given status code.
[ "Terminate", "the", "program", "with", "the", "given", "status", "code", "." ]
python
train
ModisWorks/modis
modis/discord_modis/modules/music/_musicplayer.py
https://github.com/ModisWorks/modis/blob/1f1225c9841835ec1d1831fc196306527567db8b/modis/discord_modis/modules/music/_musicplayer.py#L1149-L1160
async def setup_streamer(self): """Sets up basic defaults for the streamer""" self.streamer.volume = self.volume / 100 self.streamer.start() self.pause_time = None self.vclient_starttime = self.vclient.loop.time() # Cache next song self.logger.debug("Caching next song") dl_thread = threading.Thread(target=self.download_next_song_cache) dl_thread.start()
[ "async", "def", "setup_streamer", "(", "self", ")", ":", "self", ".", "streamer", ".", "volume", "=", "self", ".", "volume", "/", "100", "self", ".", "streamer", ".", "start", "(", ")", "self", ".", "pause_time", "=", "None", "self", ".", "vclient_starttime", "=", "self", ".", "vclient", ".", "loop", ".", "time", "(", ")", "# Cache next song", "self", ".", "logger", ".", "debug", "(", "\"Caching next song\"", ")", "dl_thread", "=", "threading", ".", "Thread", "(", "target", "=", "self", ".", "download_next_song_cache", ")", "dl_thread", ".", "start", "(", ")" ]
Sets up basic defaults for the streamer
[ "Sets", "up", "basic", "defaults", "for", "the", "streamer" ]
python
train
mobinrg/rpi_spark_drives
JMRPiSpark/Drives/Display/SSD1306.py
https://github.com/mobinrg/rpi_spark_drives/blob/e1602d8268a5ef48e9e0a8b37de89e0233f946ea/JMRPiSpark/Drives/Display/SSD1306.py#L225-L241
def clear(self, fill = 0x00): """! \~english Clear buffer data and fill color into buffer @param fill: a color value, it will fill into buffer.<br> The SSD1306 only chosen two colors: <br> 0 (0x0): black <br> 1 (0x1): white <br> \~chinese 清除缓冲区数据并在缓冲区中填充颜色 @param fill: 一个颜色值,它会填充到缓冲区中 <br>                      SSD1306只能选择两种颜色: <br>                         0(0x0):黑色 <br>                         1(0x1):白色 <br> """ self._buffer = [ fill ] * ( self.width * self._mem_pages )
[ "def", "clear", "(", "self", ",", "fill", "=", "0x00", ")", ":", "self", ".", "_buffer", "=", "[", "fill", "]", "*", "(", "self", ".", "width", "*", "self", ".", "_mem_pages", ")" ]
! \~english Clear buffer data and fill color into buffer @param fill: a color value, it will fill into buffer.<br> The SSD1306 only chosen two colors: <br> 0 (0x0): black <br> 1 (0x1): white <br> \~chinese 清除缓冲区数据并在缓冲区中填充颜色 @param fill: 一个颜色值,它会填充到缓冲区中 <br>                      SSD1306只能选择两种颜色: <br>                         0(0x0):黑色 <br>                         1(0x1):白色 <br>
[ "!", "\\", "~english", "Clear", "buffer", "data", "and", "fill", "color", "into", "buffer", "@param", "fill", ":", "a", "color", "value", "it", "will", "fill", "into", "buffer", ".", "<br", ">", "The", "SSD1306", "only", "chosen", "two", "colors", ":", "<br", ">", "0", "(", "0x0", ")", ":", "black", "<br", ">", "1", "(", "0x1", ")", ":", "white", "<br", ">" ]
python
train
trailofbits/manticore
manticore/native/cpu/bitwise.py
https://github.com/trailofbits/manticore/blob/54c5a15b1119c523ae54c09972413e8b97f11629/manticore/native/cpu/bitwise.py#L29-L46
def GetNBits(value, nbits): """ Get the first `nbits` from `value`. :param value: Source value from which to extract :type value: int or long or BitVec :param int nbits: How many bits to extract :return: Low `nbits` bits of `value`. :rtype int or long or BitVec """ # NOP if sizes are the same if isinstance(value, int): return Operators.EXTRACT(value, 0, nbits) elif isinstance(value, BitVec): if value.size < nbits: return Operators.ZEXTEND(value, nbits) else: return Operators.EXTRACT(value, 0, nbits)
[ "def", "GetNBits", "(", "value", ",", "nbits", ")", ":", "# NOP if sizes are the same", "if", "isinstance", "(", "value", ",", "int", ")", ":", "return", "Operators", ".", "EXTRACT", "(", "value", ",", "0", ",", "nbits", ")", "elif", "isinstance", "(", "value", ",", "BitVec", ")", ":", "if", "value", ".", "size", "<", "nbits", ":", "return", "Operators", ".", "ZEXTEND", "(", "value", ",", "nbits", ")", "else", ":", "return", "Operators", ".", "EXTRACT", "(", "value", ",", "0", ",", "nbits", ")" ]
Get the first `nbits` from `value`. :param value: Source value from which to extract :type value: int or long or BitVec :param int nbits: How many bits to extract :return: Low `nbits` bits of `value`. :rtype int or long or BitVec
[ "Get", "the", "first", "nbits", "from", "value", "." ]
python
valid
iotile/coretools
iotilegateway/iotilegateway/device.py
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotilegateway/iotilegateway/device.py#L214-L243
async def connect(self, conn_id, connection_string): """Connect to a device. See :meth:`AbstractDeviceAdapter.connect`. """ if connection_string.startswith('device/'): adapter_id, local_conn = self._find_best_adapter(connection_string, conn_id) translate_conn = True elif connection_string.startswith('adapter/'): adapter_str, _, local_conn = connection_string[8:].partition('/') adapter_id = int(adapter_str) translate_conn = False else: raise DeviceAdapterError(conn_id, 'connect', 'invalid connection string format') if self.adapters[adapter_id].can_connect() is False: raise DeviceAdapterError(conn_id, 'connect', 'chosen adapter cannot handle another connection') # Make sure to set up the connection information before # so there are no races with events coming soon after connect. self._setup_connection(conn_id, local_conn) self._track_property(conn_id, 'adapter', adapter_id) self._track_property(conn_id, 'translate', translate_conn) try: await self.adapters[adapter_id].connect(conn_id, local_conn) except: self._teardown_connection(conn_id) raise
[ "async", "def", "connect", "(", "self", ",", "conn_id", ",", "connection_string", ")", ":", "if", "connection_string", ".", "startswith", "(", "'device/'", ")", ":", "adapter_id", ",", "local_conn", "=", "self", ".", "_find_best_adapter", "(", "connection_string", ",", "conn_id", ")", "translate_conn", "=", "True", "elif", "connection_string", ".", "startswith", "(", "'adapter/'", ")", ":", "adapter_str", ",", "_", ",", "local_conn", "=", "connection_string", "[", "8", ":", "]", ".", "partition", "(", "'/'", ")", "adapter_id", "=", "int", "(", "adapter_str", ")", "translate_conn", "=", "False", "else", ":", "raise", "DeviceAdapterError", "(", "conn_id", ",", "'connect'", ",", "'invalid connection string format'", ")", "if", "self", ".", "adapters", "[", "adapter_id", "]", ".", "can_connect", "(", ")", "is", "False", ":", "raise", "DeviceAdapterError", "(", "conn_id", ",", "'connect'", ",", "'chosen adapter cannot handle another connection'", ")", "# Make sure to set up the connection information before", "# so there are no races with events coming soon after connect.", "self", ".", "_setup_connection", "(", "conn_id", ",", "local_conn", ")", "self", ".", "_track_property", "(", "conn_id", ",", "'adapter'", ",", "adapter_id", ")", "self", ".", "_track_property", "(", "conn_id", ",", "'translate'", ",", "translate_conn", ")", "try", ":", "await", "self", ".", "adapters", "[", "adapter_id", "]", ".", "connect", "(", "conn_id", ",", "local_conn", ")", "except", ":", "self", ".", "_teardown_connection", "(", "conn_id", ")", "raise" ]
Connect to a device. See :meth:`AbstractDeviceAdapter.connect`.
[ "Connect", "to", "a", "device", "." ]
python
train
bitesofcode/projexui
projexui/widgets/xnodewidget/xnodewidget.py
https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xnodewidget/xnodewidget.py#L129-L143
def centerOnItems(self, items = None): """ Centers on the given items, if no items are supplied, then all items will be centered on. :param items | [<QGraphicsItem>, ..] """ if not items: rect = self.scene().visibleItemsBoundingRect() if not rect.width(): rect = self.scene().sceneRect() self.centerOn(rect.center()) else: self.centerOn(self.scene().calculateBoundingRect(items).center())
[ "def", "centerOnItems", "(", "self", ",", "items", "=", "None", ")", ":", "if", "not", "items", ":", "rect", "=", "self", ".", "scene", "(", ")", ".", "visibleItemsBoundingRect", "(", ")", "if", "not", "rect", ".", "width", "(", ")", ":", "rect", "=", "self", ".", "scene", "(", ")", ".", "sceneRect", "(", ")", "self", ".", "centerOn", "(", "rect", ".", "center", "(", ")", ")", "else", ":", "self", ".", "centerOn", "(", "self", ".", "scene", "(", ")", ".", "calculateBoundingRect", "(", "items", ")", ".", "center", "(", ")", ")" ]
Centers on the given items, if no items are supplied, then all items will be centered on. :param items | [<QGraphicsItem>, ..]
[ "Centers", "on", "the", "given", "items", "if", "no", "items", "are", "supplied", "then", "all", "items", "will", "be", "centered", "on", ".", ":", "param", "items", "|", "[", "<QGraphicsItem", ">", "..", "]" ]
python
train
blackecho/Deep-Learning-TensorFlow
yadlt/core/supervised_model.py
https://github.com/blackecho/Deep-Learning-TensorFlow/blob/ddeb1f2848da7b7bee166ad2152b4afc46bb2086/yadlt/core/supervised_model.py#L29-L72
def fit(self, train_X, train_Y, val_X=None, val_Y=None, graph=None): """Fit the model to the data. Parameters ---------- train_X : array_like, shape (n_samples, n_features) Training data. train_Y : array_like, shape (n_samples, n_classes) Training labels. val_X : array_like, shape (N, n_features) optional, (default = None). Validation data. val_Y : array_like, shape (N, n_classes) optional, (default = None). Validation labels. graph : tf.Graph, optional (default = None) Tensorflow Graph object. Returns ------- """ if len(train_Y.shape) != 1: num_classes = train_Y.shape[1] else: raise Exception("Please convert the labels with one-hot encoding.") g = graph if graph is not None else self.tf_graph with g.as_default(): # Build model self.build_model(train_X.shape[1], num_classes) with tf.Session() as self.tf_session: # Initialize tf stuff summary_objs = tf_utils.init_tf_ops(self.tf_session) self.tf_merged_summaries = summary_objs[0] self.tf_summary_writer = summary_objs[1] self.tf_saver = summary_objs[2] # Train model self._train_model(train_X, train_Y, val_X, val_Y) # Save model self.tf_saver.save(self.tf_session, self.model_path)
[ "def", "fit", "(", "self", ",", "train_X", ",", "train_Y", ",", "val_X", "=", "None", ",", "val_Y", "=", "None", ",", "graph", "=", "None", ")", ":", "if", "len", "(", "train_Y", ".", "shape", ")", "!=", "1", ":", "num_classes", "=", "train_Y", ".", "shape", "[", "1", "]", "else", ":", "raise", "Exception", "(", "\"Please convert the labels with one-hot encoding.\"", ")", "g", "=", "graph", "if", "graph", "is", "not", "None", "else", "self", ".", "tf_graph", "with", "g", ".", "as_default", "(", ")", ":", "# Build model", "self", ".", "build_model", "(", "train_X", ".", "shape", "[", "1", "]", ",", "num_classes", ")", "with", "tf", ".", "Session", "(", ")", "as", "self", ".", "tf_session", ":", "# Initialize tf stuff", "summary_objs", "=", "tf_utils", ".", "init_tf_ops", "(", "self", ".", "tf_session", ")", "self", ".", "tf_merged_summaries", "=", "summary_objs", "[", "0", "]", "self", ".", "tf_summary_writer", "=", "summary_objs", "[", "1", "]", "self", ".", "tf_saver", "=", "summary_objs", "[", "2", "]", "# Train model", "self", ".", "_train_model", "(", "train_X", ",", "train_Y", ",", "val_X", ",", "val_Y", ")", "# Save model", "self", ".", "tf_saver", ".", "save", "(", "self", ".", "tf_session", ",", "self", ".", "model_path", ")" ]
Fit the model to the data. Parameters ---------- train_X : array_like, shape (n_samples, n_features) Training data. train_Y : array_like, shape (n_samples, n_classes) Training labels. val_X : array_like, shape (N, n_features) optional, (default = None). Validation data. val_Y : array_like, shape (N, n_classes) optional, (default = None). Validation labels. graph : tf.Graph, optional (default = None) Tensorflow Graph object. Returns -------
[ "Fit", "the", "model", "to", "the", "data", "." ]
python
train
acorg/dark-matter
dark/local_align.py
https://github.com/acorg/dark-matter/blob/c78a1bf262667fa5db3548fa7066c4ec14d0551d/dark/local_align.py#L201-L242
def _cigarString(self, output): """ Return a cigar string of aligned sequences. @param output: a C{tup} of strings (align1, align, align2) @return: a C{str} containing the cigar string. Eg with input: 'GGCCCGCA' and 'GG-CTGCA', return 2=1D1=1X3= """ cigar = [] count = 0 align1 = output[0] align2 = output[2] for nt1, nt2 in zip(align1, align2): if nt1 == nt2: cigar.append('=') elif nt1 == '-': cigar.append('I') elif nt2 == '-': cigar.append('D') else: cigar.append('X') # Initially create a list of characters, # eg ['=', '=', 'D', '=', 'X', '=', '=', '='] cigar.append('*') # Append an arbitrary character to ensure parsing below functions cigarString = '' previousCharacter = '' count = 0 first = True for character in cigar: if first: previousCharacter = character count += 1 first = False else: if character == previousCharacter: count += 1 else: cigarString += (str(count) + str(previousCharacter)) count = 1 previousCharacter = character return cigarString
[ "def", "_cigarString", "(", "self", ",", "output", ")", ":", "cigar", "=", "[", "]", "count", "=", "0", "align1", "=", "output", "[", "0", "]", "align2", "=", "output", "[", "2", "]", "for", "nt1", ",", "nt2", "in", "zip", "(", "align1", ",", "align2", ")", ":", "if", "nt1", "==", "nt2", ":", "cigar", ".", "append", "(", "'='", ")", "elif", "nt1", "==", "'-'", ":", "cigar", ".", "append", "(", "'I'", ")", "elif", "nt2", "==", "'-'", ":", "cigar", ".", "append", "(", "'D'", ")", "else", ":", "cigar", ".", "append", "(", "'X'", ")", "# Initially create a list of characters,", "# eg ['=', '=', 'D', '=', 'X', '=', '=', '=']", "cigar", ".", "append", "(", "'*'", ")", "# Append an arbitrary character to ensure parsing below functions", "cigarString", "=", "''", "previousCharacter", "=", "''", "count", "=", "0", "first", "=", "True", "for", "character", "in", "cigar", ":", "if", "first", ":", "previousCharacter", "=", "character", "count", "+=", "1", "first", "=", "False", "else", ":", "if", "character", "==", "previousCharacter", ":", "count", "+=", "1", "else", ":", "cigarString", "+=", "(", "str", "(", "count", ")", "+", "str", "(", "previousCharacter", ")", ")", "count", "=", "1", "previousCharacter", "=", "character", "return", "cigarString" ]
Return a cigar string of aligned sequences. @param output: a C{tup} of strings (align1, align, align2) @return: a C{str} containing the cigar string. Eg with input: 'GGCCCGCA' and 'GG-CTGCA', return 2=1D1=1X3=
[ "Return", "a", "cigar", "string", "of", "aligned", "sequences", "." ]
python
train
avirshup/DockerMake
dockermake/step.py
https://github.com/avirshup/DockerMake/blob/2173199904f086353ef539ea578788b99f6fea0a/dockermake/step.py#L85-L181
def build(self, client, pull=False, usecache=True): """ Drives an individual build step. Build steps are separated by build_directory. If a build has zero one or less build_directories, it will be built in a single step. Args: client (docker.Client): docker client object that will build the image pull (bool): whether to pull dependent layers from remote repositories usecache (bool): whether to use cached layers or rebuild from scratch """ print(colored(' Building step', 'blue'), colored(self.imagename, 'blue', attrs=['bold']), colored('defined in', 'blue'), colored(self.sourcefile, 'blue', attrs=['bold'])) if self.build_first and not self.build_first.built: self.build_external_dockerfile(client, self.build_first) if self.bust_cache: usecache = False if not usecache: cprint(' Build cache disabled - this image will be rebuilt from scratch', 'yellow') dockerfile = u'\n'.join(self.dockerfile_lines) kwargs = dict(tag=self.buildname, pull=pull, nocache=not usecache, decode=True, rm=True, buildargs=self.buildargs, squash=self.squash) if usecache: utils.set_build_cachefrom(self.cache_from, kwargs, client) if self.build_dir is not None: tempdir = self.write_dockerfile(dockerfile) context_path = os.path.abspath(os.path.expanduser(self.build_dir)) kwargs.update(fileobj=None, dockerfile=os.path.join(DOCKER_TMPDIR, 'Dockerfile')) print(colored(' Build context:', 'blue'), colored(os.path.relpath(context_path), 'blue', attrs=['bold'])) if not self.custom_exclude: kwargs.update(path=context_path) else: print(colored(' Custom .dockerignore from:', 'blue'), colored(os.path.relpath(self.ignoredefs_file), 'blue', attrs=['bold'])) # AMV - this is a brittle call to an apparently "private' docker sdk method context = docker.utils.tar(self.build_dir, exclude=self.custom_exclude, dockerfile=(os.path.join(DOCKER_TMPDIR, 'Dockerfile'), dockerfile), gzip=False) kwargs.update(fileobj=context, custom_context=True) else: if sys.version_info.major == 2: fileobj = StringIO(dockerfile) else: fileobj = BytesIO(dockerfile.encode('utf-8')) kwargs.update(fileobj=fileobj, path=None, dockerfile=None) tempdir = None # start the build stream = client.api.build(**kwargs) try: utils.stream_docker_logs(stream, self.buildname) except docker.errors.APIError as e: if self.squash and not client.version().get('Experimental', False): raise errors.ExperimentalDaemonRequiredError( 'Docker error message:\n ' + str(e) + '\n\nUsing `squash` and/or `secret_files` requires a docker' " daemon with experimental features enabled. See\n" " https://github.com/docker/docker-ce/blob/master/components/cli/" "experimental/README.md") else: raise errors.BuildError(dockerfile, str(e), kwargs) except ValueError as e: raise errors.BuildError(dockerfile, str(e), kwargs) if self.squash and not self.bust_cache: self._resolve_squash_cache(client) # remove the temporary dockerfile if tempdir is not None: os.unlink(os.path.join(tempdir, 'Dockerfile')) os.rmdir(tempdir)
[ "def", "build", "(", "self", ",", "client", ",", "pull", "=", "False", ",", "usecache", "=", "True", ")", ":", "print", "(", "colored", "(", "' Building step'", ",", "'blue'", ")", ",", "colored", "(", "self", ".", "imagename", ",", "'blue'", ",", "attrs", "=", "[", "'bold'", "]", ")", ",", "colored", "(", "'defined in'", ",", "'blue'", ")", ",", "colored", "(", "self", ".", "sourcefile", ",", "'blue'", ",", "attrs", "=", "[", "'bold'", "]", ")", ")", "if", "self", ".", "build_first", "and", "not", "self", ".", "build_first", ".", "built", ":", "self", ".", "build_external_dockerfile", "(", "client", ",", "self", ".", "build_first", ")", "if", "self", ".", "bust_cache", ":", "usecache", "=", "False", "if", "not", "usecache", ":", "cprint", "(", "' Build cache disabled - this image will be rebuilt from scratch'", ",", "'yellow'", ")", "dockerfile", "=", "u'\\n'", ".", "join", "(", "self", ".", "dockerfile_lines", ")", "kwargs", "=", "dict", "(", "tag", "=", "self", ".", "buildname", ",", "pull", "=", "pull", ",", "nocache", "=", "not", "usecache", ",", "decode", "=", "True", ",", "rm", "=", "True", ",", "buildargs", "=", "self", ".", "buildargs", ",", "squash", "=", "self", ".", "squash", ")", "if", "usecache", ":", "utils", ".", "set_build_cachefrom", "(", "self", ".", "cache_from", ",", "kwargs", ",", "client", ")", "if", "self", ".", "build_dir", "is", "not", "None", ":", "tempdir", "=", "self", ".", "write_dockerfile", "(", "dockerfile", ")", "context_path", "=", "os", ".", "path", ".", "abspath", "(", "os", ".", "path", ".", "expanduser", "(", "self", ".", "build_dir", ")", ")", "kwargs", ".", "update", "(", "fileobj", "=", "None", ",", "dockerfile", "=", "os", ".", "path", ".", "join", "(", "DOCKER_TMPDIR", ",", "'Dockerfile'", ")", ")", "print", "(", "colored", "(", "' Build context:'", ",", "'blue'", ")", ",", "colored", "(", "os", ".", "path", ".", "relpath", "(", "context_path", ")", ",", "'blue'", ",", "attrs", "=", "[", "'bold'", "]", ")", ")", "if", "not", "self", ".", "custom_exclude", ":", "kwargs", ".", "update", "(", "path", "=", "context_path", ")", "else", ":", "print", "(", "colored", "(", "' Custom .dockerignore from:'", ",", "'blue'", ")", ",", "colored", "(", "os", ".", "path", ".", "relpath", "(", "self", ".", "ignoredefs_file", ")", ",", "'blue'", ",", "attrs", "=", "[", "'bold'", "]", ")", ")", "# AMV - this is a brittle call to an apparently \"private' docker sdk method", "context", "=", "docker", ".", "utils", ".", "tar", "(", "self", ".", "build_dir", ",", "exclude", "=", "self", ".", "custom_exclude", ",", "dockerfile", "=", "(", "os", ".", "path", ".", "join", "(", "DOCKER_TMPDIR", ",", "'Dockerfile'", ")", ",", "dockerfile", ")", ",", "gzip", "=", "False", ")", "kwargs", ".", "update", "(", "fileobj", "=", "context", ",", "custom_context", "=", "True", ")", "else", ":", "if", "sys", ".", "version_info", ".", "major", "==", "2", ":", "fileobj", "=", "StringIO", "(", "dockerfile", ")", "else", ":", "fileobj", "=", "BytesIO", "(", "dockerfile", ".", "encode", "(", "'utf-8'", ")", ")", "kwargs", ".", "update", "(", "fileobj", "=", "fileobj", ",", "path", "=", "None", ",", "dockerfile", "=", "None", ")", "tempdir", "=", "None", "# start the build", "stream", "=", "client", ".", "api", ".", "build", "(", "*", "*", "kwargs", ")", "try", ":", "utils", ".", "stream_docker_logs", "(", "stream", ",", "self", ".", "buildname", ")", "except", "docker", ".", "errors", ".", "APIError", "as", "e", ":", "if", "self", ".", "squash", "and", "not", "client", ".", "version", "(", ")", ".", "get", "(", "'Experimental'", ",", "False", ")", ":", "raise", "errors", ".", "ExperimentalDaemonRequiredError", "(", "'Docker error message:\\n '", "+", "str", "(", "e", ")", "+", "'\\n\\nUsing `squash` and/or `secret_files` requires a docker'", "\" daemon with experimental features enabled. See\\n\"", "\" https://github.com/docker/docker-ce/blob/master/components/cli/\"", "\"experimental/README.md\"", ")", "else", ":", "raise", "errors", ".", "BuildError", "(", "dockerfile", ",", "str", "(", "e", ")", ",", "kwargs", ")", "except", "ValueError", "as", "e", ":", "raise", "errors", ".", "BuildError", "(", "dockerfile", ",", "str", "(", "e", ")", ",", "kwargs", ")", "if", "self", ".", "squash", "and", "not", "self", ".", "bust_cache", ":", "self", ".", "_resolve_squash_cache", "(", "client", ")", "# remove the temporary dockerfile", "if", "tempdir", "is", "not", "None", ":", "os", ".", "unlink", "(", "os", ".", "path", ".", "join", "(", "tempdir", ",", "'Dockerfile'", ")", ")", "os", ".", "rmdir", "(", "tempdir", ")" ]
Drives an individual build step. Build steps are separated by build_directory. If a build has zero one or less build_directories, it will be built in a single step. Args: client (docker.Client): docker client object that will build the image pull (bool): whether to pull dependent layers from remote repositories usecache (bool): whether to use cached layers or rebuild from scratch
[ "Drives", "an", "individual", "build", "step", ".", "Build", "steps", "are", "separated", "by", "build_directory", ".", "If", "a", "build", "has", "zero", "one", "or", "less", "build_directories", "it", "will", "be", "built", "in", "a", "single", "step", "." ]
python
train
Hypex/hyppy
hyppy/func.py
https://github.com/Hypex/hyppy/blob/a425619c2a102b0e598fd6cac8aa0f6b766f542d/hyppy/func.py#L19-L40
def fleet_ttb(unit_type, quantity, factories, is_techno=False, is_dict=False, stasis_enabled=False): """ Calculate the time taken to construct a given fleet """ unit_weights = { UNIT_SCOUT: 1, UNIT_DESTROYER: 13, UNIT_BOMBER: 10, UNIT_CRUISER: 85, UNIT_STARBASE: 1, } govt_weight = 80 if is_dict else 100 prod_weight = 85 if is_techno else 100 weighted_qty = unit_weights[unit_type] * quantity ttb = (weighted_qty * govt_weight * prod_weight) * (2 * factories) # TTB is 66% longer with stasis enabled return ttb + (ttb * 0.66) if stasis_enabled else ttb
[ "def", "fleet_ttb", "(", "unit_type", ",", "quantity", ",", "factories", ",", "is_techno", "=", "False", ",", "is_dict", "=", "False", ",", "stasis_enabled", "=", "False", ")", ":", "unit_weights", "=", "{", "UNIT_SCOUT", ":", "1", ",", "UNIT_DESTROYER", ":", "13", ",", "UNIT_BOMBER", ":", "10", ",", "UNIT_CRUISER", ":", "85", ",", "UNIT_STARBASE", ":", "1", ",", "}", "govt_weight", "=", "80", "if", "is_dict", "else", "100", "prod_weight", "=", "85", "if", "is_techno", "else", "100", "weighted_qty", "=", "unit_weights", "[", "unit_type", "]", "*", "quantity", "ttb", "=", "(", "weighted_qty", "*", "govt_weight", "*", "prod_weight", ")", "*", "(", "2", "*", "factories", ")", "# TTB is 66% longer with stasis enabled", "return", "ttb", "+", "(", "ttb", "*", "0.66", ")", "if", "stasis_enabled", "else", "ttb" ]
Calculate the time taken to construct a given fleet
[ "Calculate", "the", "time", "taken", "to", "construct", "a", "given", "fleet" ]
python
train
binux/pyspider
pyspider/fetcher/tornado_fetcher.py
https://github.com/binux/pyspider/blob/3fccfabe2b057b7a56d4a4c79dc0dd6cd2239fe9/pyspider/fetcher/tornado_fetcher.py#L780-L787
def quit(self): '''Quit fetcher''' self._running = False self._quit = True self.ioloop.add_callback(self.ioloop.stop) if hasattr(self, 'xmlrpc_server'): self.xmlrpc_ioloop.add_callback(self.xmlrpc_server.stop) self.xmlrpc_ioloop.add_callback(self.xmlrpc_ioloop.stop)
[ "def", "quit", "(", "self", ")", ":", "self", ".", "_running", "=", "False", "self", ".", "_quit", "=", "True", "self", ".", "ioloop", ".", "add_callback", "(", "self", ".", "ioloop", ".", "stop", ")", "if", "hasattr", "(", "self", ",", "'xmlrpc_server'", ")", ":", "self", ".", "xmlrpc_ioloop", ".", "add_callback", "(", "self", ".", "xmlrpc_server", ".", "stop", ")", "self", ".", "xmlrpc_ioloop", ".", "add_callback", "(", "self", ".", "xmlrpc_ioloop", ".", "stop", ")" ]
Quit fetcher
[ "Quit", "fetcher" ]
python
train
astroML/gatspy
gatspy/datasets/rrlyrae.py
https://github.com/astroML/gatspy/blob/a8f94082a3f27dfe9cb58165707b883bf28d9223/gatspy/datasets/rrlyrae.py#L175-L184
def get_metadata(self, lcid): """Get the parameters derived from the fit for the given id. This is table 2 of Sesar 2010 """ if self._metadata is None: self._metadata = fetch_rrlyrae_lc_params() i = np.where(self._metadata['id'] == lcid)[0] if len(i) == 0: raise ValueError("invalid lcid: {0}".format(lcid)) return self._metadata[i[0]]
[ "def", "get_metadata", "(", "self", ",", "lcid", ")", ":", "if", "self", ".", "_metadata", "is", "None", ":", "self", ".", "_metadata", "=", "fetch_rrlyrae_lc_params", "(", ")", "i", "=", "np", ".", "where", "(", "self", ".", "_metadata", "[", "'id'", "]", "==", "lcid", ")", "[", "0", "]", "if", "len", "(", "i", ")", "==", "0", ":", "raise", "ValueError", "(", "\"invalid lcid: {0}\"", ".", "format", "(", "lcid", ")", ")", "return", "self", ".", "_metadata", "[", "i", "[", "0", "]", "]" ]
Get the parameters derived from the fit for the given id. This is table 2 of Sesar 2010
[ "Get", "the", "parameters", "derived", "from", "the", "fit", "for", "the", "given", "id", ".", "This", "is", "table", "2", "of", "Sesar", "2010" ]
python
train
apache/spark
python/pyspark/mllib/clustering.py
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/clustering.py#L856-L865
def setRandomCenters(self, dim, weight, seed): """ Set the initial centres to be random samples from a gaussian population with constant weights. """ rng = random.RandomState(seed) clusterCenters = rng.randn(self._k, dim) clusterWeights = tile(weight, self._k) self._model = StreamingKMeansModel(clusterCenters, clusterWeights) return self
[ "def", "setRandomCenters", "(", "self", ",", "dim", ",", "weight", ",", "seed", ")", ":", "rng", "=", "random", ".", "RandomState", "(", "seed", ")", "clusterCenters", "=", "rng", ".", "randn", "(", "self", ".", "_k", ",", "dim", ")", "clusterWeights", "=", "tile", "(", "weight", ",", "self", ".", "_k", ")", "self", ".", "_model", "=", "StreamingKMeansModel", "(", "clusterCenters", ",", "clusterWeights", ")", "return", "self" ]
Set the initial centres to be random samples from a gaussian population with constant weights.
[ "Set", "the", "initial", "centres", "to", "be", "random", "samples", "from", "a", "gaussian", "population", "with", "constant", "weights", "." ]
python
train
pbrisk/timewave
timewave/engine.py
https://github.com/pbrisk/timewave/blob/cf641391d1607a424042724c8b990d43ee270ef6/timewave/engine.py#L280-L288
def finalize(self): """ finalize simulation for consumer """ # todo sort self.result by path_num if self.result: self.result = sorted(self.result, key=lambda x: x[0]) p, r = map(list, zip(*self.result)) self.result = r
[ "def", "finalize", "(", "self", ")", ":", "# todo sort self.result by path_num", "if", "self", ".", "result", ":", "self", ".", "result", "=", "sorted", "(", "self", ".", "result", ",", "key", "=", "lambda", "x", ":", "x", "[", "0", "]", ")", "p", ",", "r", "=", "map", "(", "list", ",", "zip", "(", "*", "self", ".", "result", ")", ")", "self", ".", "result", "=", "r" ]
finalize simulation for consumer
[ "finalize", "simulation", "for", "consumer" ]
python
train
PyCQA/pylint
pylint/utils/ast_walker.py
https://github.com/PyCQA/pylint/blob/2bf5c61a3ff6ae90613b81679de42c0f19aea600/pylint/utils/ast_walker.py#L58-L80
def walk(self, astroid): """call visit events of astroid checkers for the given node, recurse on its children, then leave events. """ cid = astroid.__class__.__name__.lower() # Detect if the node is a new name for a deprecated alias. # In this case, favour the methods for the deprecated # alias if any, in order to maintain backwards # compatibility. visit_events = self.visit_events.get(cid, ()) leave_events = self.leave_events.get(cid, ()) if astroid.is_statement: self.nbstatements += 1 # generate events for this node on each checker for cb in visit_events or (): cb(astroid) # recurse on children for child in astroid.get_children(): self.walk(child) for cb in leave_events or (): cb(astroid)
[ "def", "walk", "(", "self", ",", "astroid", ")", ":", "cid", "=", "astroid", ".", "__class__", ".", "__name__", ".", "lower", "(", ")", "# Detect if the node is a new name for a deprecated alias.", "# In this case, favour the methods for the deprecated", "# alias if any, in order to maintain backwards", "# compatibility.", "visit_events", "=", "self", ".", "visit_events", ".", "get", "(", "cid", ",", "(", ")", ")", "leave_events", "=", "self", ".", "leave_events", ".", "get", "(", "cid", ",", "(", ")", ")", "if", "astroid", ".", "is_statement", ":", "self", ".", "nbstatements", "+=", "1", "# generate events for this node on each checker", "for", "cb", "in", "visit_events", "or", "(", ")", ":", "cb", "(", "astroid", ")", "# recurse on children", "for", "child", "in", "astroid", ".", "get_children", "(", ")", ":", "self", ".", "walk", "(", "child", ")", "for", "cb", "in", "leave_events", "or", "(", ")", ":", "cb", "(", "astroid", ")" ]
call visit events of astroid checkers for the given node, recurse on its children, then leave events.
[ "call", "visit", "events", "of", "astroid", "checkers", "for", "the", "given", "node", "recurse", "on", "its", "children", "then", "leave", "events", "." ]
python
test
rh-marketingops/dwm
dwm/dwmmain.py
https://github.com/rh-marketingops/dwm/blob/66c7d18db857afbe5d574478ceaaad6159ae7469/dwm/dwmmain.py#L291-L425
def _derive(self, record, hist=None): """ Derivation filters like 'deriveValue' to replace given input values from one or more fields. In case 'copyValue' copy value to the target field from given an input value from one field. 'deriveRegex' replace given an input value from one field, derive target field value using regular expressions. If 'deriveIncludes' applies then given an input value from one field, derive target field based on at least one of the following: includes strings, excludes strings, starts with string, ends with string :param dict record: dictionary of values to validate :param dict hist: existing input of history values """ def check_derive_options(option, derive_set_config): """ Check derive option is exist into options list and return relevant flag. :param str option: drive options value :param list derive_set_config: options list :return boolean: True or False based on option exist into options list """ return option in derive_set_config hist_obj = {} if hist is None: hist = {} for field in record: field_val_new = field_val = record[field] if field in self.fields: for derive_set in self.fields[field]['derive']: check_match = False derive_set_config = derive_set if set.issubset(set(derive_set_config['fieldSet']), record.keys()): # sorting here to ensure sub document match from # query derive_input = {val: record[val] for val in derive_set_config['fieldSet']} if derive_set_config['type'] == 'deriveValue': overwrite_flag = check_derive_options( 'overwrite', derive_set_config["options"]) blank_if_no_match_flag = check_derive_options( 'blankIfNoMatch', derive_set_config["options"]) field_val_new, hist_obj, check_match = \ DeriveDataLookup( fieldName=field, db=self.mongo, deriveInput=derive_input, overwrite=overwrite_flag, fieldVal=record[field], histObj=hist, blankIfNoMatch=blank_if_no_match_flag) elif derive_set_config['type'] == 'copyValue': overwrite_flag = check_derive_options( 'overwrite', derive_set_config["options"]) field_val_new, hist_obj, check_match = \ DeriveDataCopyValue( fieldName=field, deriveInput=derive_input, overwrite=overwrite_flag, fieldVal=record[field], histObj=hist) elif derive_set_config['type'] == 'deriveRegex': overwrite_flag = check_derive_options( 'overwrite', derive_set_config["options"]) blank_if_no_match_flag = check_derive_options( 'blankIfNoMatch', derive_set_config["options"]) field_val_new, hist_obj, check_match = \ DeriveDataRegex( fieldName=field, db=self.mongo, deriveInput=derive_input, overwrite=overwrite_flag, fieldVal=record[field], histObj=hist, blankIfNoMatch=blank_if_no_match_flag) elif derive_set_config['type'] == 'deriveIncludes': overwrite_flag = check_derive_options( 'overwrite', derive_set_config["options"]) blank_if_no_match_flag = check_derive_options( 'blankIfNoMatch', derive_set_config["options"]) field_val_new, hist_obj, check_match = \ IncludesLookup( fieldVal=record[field], lookupType='deriveIncludes', deriveFieldName= \ derive_set_config['fieldSet'][0], deriveInput=derive_input, db=self.mongo, fieldName=field, histObj=hist, overwrite=overwrite_flag, blankIfNoMatch=blank_if_no_match_flag) if check_match or field_val_new != field_val: record[field] = field_val_new break return record, hist_obj
[ "def", "_derive", "(", "self", ",", "record", ",", "hist", "=", "None", ")", ":", "def", "check_derive_options", "(", "option", ",", "derive_set_config", ")", ":", "\"\"\"\n Check derive option is exist into options list and return relevant\n flag.\n :param str option: drive options value\n :param list derive_set_config: options list\n :return boolean: True or False based on option exist into options\n list\n \"\"\"", "return", "option", "in", "derive_set_config", "hist_obj", "=", "{", "}", "if", "hist", "is", "None", ":", "hist", "=", "{", "}", "for", "field", "in", "record", ":", "field_val_new", "=", "field_val", "=", "record", "[", "field", "]", "if", "field", "in", "self", ".", "fields", ":", "for", "derive_set", "in", "self", ".", "fields", "[", "field", "]", "[", "'derive'", "]", ":", "check_match", "=", "False", "derive_set_config", "=", "derive_set", "if", "set", ".", "issubset", "(", "set", "(", "derive_set_config", "[", "'fieldSet'", "]", ")", ",", "record", ".", "keys", "(", ")", ")", ":", "# sorting here to ensure sub document match from", "# query", "derive_input", "=", "{", "val", ":", "record", "[", "val", "]", "for", "val", "in", "derive_set_config", "[", "'fieldSet'", "]", "}", "if", "derive_set_config", "[", "'type'", "]", "==", "'deriveValue'", ":", "overwrite_flag", "=", "check_derive_options", "(", "'overwrite'", ",", "derive_set_config", "[", "\"options\"", "]", ")", "blank_if_no_match_flag", "=", "check_derive_options", "(", "'blankIfNoMatch'", ",", "derive_set_config", "[", "\"options\"", "]", ")", "field_val_new", ",", "hist_obj", ",", "check_match", "=", "DeriveDataLookup", "(", "fieldName", "=", "field", ",", "db", "=", "self", ".", "mongo", ",", "deriveInput", "=", "derive_input", ",", "overwrite", "=", "overwrite_flag", ",", "fieldVal", "=", "record", "[", "field", "]", ",", "histObj", "=", "hist", ",", "blankIfNoMatch", "=", "blank_if_no_match_flag", ")", "elif", "derive_set_config", "[", "'type'", "]", "==", "'copyValue'", ":", "overwrite_flag", "=", "check_derive_options", "(", "'overwrite'", ",", "derive_set_config", "[", "\"options\"", "]", ")", "field_val_new", ",", "hist_obj", ",", "check_match", "=", "DeriveDataCopyValue", "(", "fieldName", "=", "field", ",", "deriveInput", "=", "derive_input", ",", "overwrite", "=", "overwrite_flag", ",", "fieldVal", "=", "record", "[", "field", "]", ",", "histObj", "=", "hist", ")", "elif", "derive_set_config", "[", "'type'", "]", "==", "'deriveRegex'", ":", "overwrite_flag", "=", "check_derive_options", "(", "'overwrite'", ",", "derive_set_config", "[", "\"options\"", "]", ")", "blank_if_no_match_flag", "=", "check_derive_options", "(", "'blankIfNoMatch'", ",", "derive_set_config", "[", "\"options\"", "]", ")", "field_val_new", ",", "hist_obj", ",", "check_match", "=", "DeriveDataRegex", "(", "fieldName", "=", "field", ",", "db", "=", "self", ".", "mongo", ",", "deriveInput", "=", "derive_input", ",", "overwrite", "=", "overwrite_flag", ",", "fieldVal", "=", "record", "[", "field", "]", ",", "histObj", "=", "hist", ",", "blankIfNoMatch", "=", "blank_if_no_match_flag", ")", "elif", "derive_set_config", "[", "'type'", "]", "==", "'deriveIncludes'", ":", "overwrite_flag", "=", "check_derive_options", "(", "'overwrite'", ",", "derive_set_config", "[", "\"options\"", "]", ")", "blank_if_no_match_flag", "=", "check_derive_options", "(", "'blankIfNoMatch'", ",", "derive_set_config", "[", "\"options\"", "]", ")", "field_val_new", ",", "hist_obj", ",", "check_match", "=", "IncludesLookup", "(", "fieldVal", "=", "record", "[", "field", "]", ",", "lookupType", "=", "'deriveIncludes'", ",", "deriveFieldName", "=", "derive_set_config", "[", "'fieldSet'", "]", "[", "0", "]", ",", "deriveInput", "=", "derive_input", ",", "db", "=", "self", ".", "mongo", ",", "fieldName", "=", "field", ",", "histObj", "=", "hist", ",", "overwrite", "=", "overwrite_flag", ",", "blankIfNoMatch", "=", "blank_if_no_match_flag", ")", "if", "check_match", "or", "field_val_new", "!=", "field_val", ":", "record", "[", "field", "]", "=", "field_val_new", "break", "return", "record", ",", "hist_obj" ]
Derivation filters like 'deriveValue' to replace given input values from one or more fields. In case 'copyValue' copy value to the target field from given an input value from one field. 'deriveRegex' replace given an input value from one field, derive target field value using regular expressions. If 'deriveIncludes' applies then given an input value from one field, derive target field based on at least one of the following: includes strings, excludes strings, starts with string, ends with string :param dict record: dictionary of values to validate :param dict hist: existing input of history values
[ "Derivation", "filters", "like", "deriveValue", "to", "replace", "given", "input", "values", "from", "one", "or", "more", "fields", ".", "In", "case", "copyValue", "copy", "value", "to", "the", "target", "field", "from", "given", "an", "input", "value", "from", "one", "field", ".", "deriveRegex", "replace", "given", "an", "input", "value", "from", "one", "field", "derive", "target", "field", "value", "using", "regular", "expressions", ".", "If", "deriveIncludes", "applies", "then", "given", "an", "input", "value", "from", "one", "field", "derive", "target", "field", "based", "on", "at", "least", "one", "of", "the", "following", ":", "includes", "strings", "excludes", "strings", "starts", "with", "string", "ends", "with", "string" ]
python
train
horazont/aioxmpp
aioxmpp/pep/service.py
https://github.com/horazont/aioxmpp/blob/22a68e5e1d23f2a4dee470092adbd4672f9ef061/aioxmpp/pep/service.py#L193-L249
def publish(self, node, data, *, id_=None, access_model=None): """ Publish an item `data` in the PubSub node `node` on the PEP service associated with the user's JID. :param node: The PubSub node to publish to. :param data: The item to publish. :type data: An XSO representing the paylaod. :param id_: The id the published item shall have. :param access_model: The access model to enforce on the node. Defaults to not enforcing any access model. :returns: The PubSub id of the published item or :data:`None` if it is unknown. :raises RuntimeError: if PEP is not supported. :raises RuntimeError: if `access_model` is set and `publish_options` is not supported by the server If no `id_` is given it is generated by the server (and may be returned). `access_model` defines a pre-condition on the access model used for the `node`. The valid values depend on the service; commonly useful ``"presence"`` (the default for PEP; allows access to anyone who can receive the presence) and ``"whitelist"`` (allows access only to a whitelist (which defaults to the own account only)). """ publish_options = None def autocreate_publish_options(): nonlocal publish_options if publish_options is None: publish_options = aioxmpp.forms.Data( aioxmpp.forms.DataType.SUBMIT ) publish_options.fields.append( aioxmpp.forms.Field( type_=aioxmpp.forms.FieldType.HIDDEN, var="FORM_TYPE", values=[ "http://jabber.org/protocol/pubsub#publish-options" ] ) ) return publish_options if access_model is not None: autocreate_publish_options() publish_options.fields.append(aioxmpp.forms.Field( var="pubsub#access_model", values=[access_model], )) yield from self._check_for_pep() return (yield from self._pubsub.publish( None, node, data, id_=id_, publish_options=publish_options ))
[ "def", "publish", "(", "self", ",", "node", ",", "data", ",", "*", ",", "id_", "=", "None", ",", "access_model", "=", "None", ")", ":", "publish_options", "=", "None", "def", "autocreate_publish_options", "(", ")", ":", "nonlocal", "publish_options", "if", "publish_options", "is", "None", ":", "publish_options", "=", "aioxmpp", ".", "forms", ".", "Data", "(", "aioxmpp", ".", "forms", ".", "DataType", ".", "SUBMIT", ")", "publish_options", ".", "fields", ".", "append", "(", "aioxmpp", ".", "forms", ".", "Field", "(", "type_", "=", "aioxmpp", ".", "forms", ".", "FieldType", ".", "HIDDEN", ",", "var", "=", "\"FORM_TYPE\"", ",", "values", "=", "[", "\"http://jabber.org/protocol/pubsub#publish-options\"", "]", ")", ")", "return", "publish_options", "if", "access_model", "is", "not", "None", ":", "autocreate_publish_options", "(", ")", "publish_options", ".", "fields", ".", "append", "(", "aioxmpp", ".", "forms", ".", "Field", "(", "var", "=", "\"pubsub#access_model\"", ",", "values", "=", "[", "access_model", "]", ",", ")", ")", "yield", "from", "self", ".", "_check_for_pep", "(", ")", "return", "(", "yield", "from", "self", ".", "_pubsub", ".", "publish", "(", "None", ",", "node", ",", "data", ",", "id_", "=", "id_", ",", "publish_options", "=", "publish_options", ")", ")" ]
Publish an item `data` in the PubSub node `node` on the PEP service associated with the user's JID. :param node: The PubSub node to publish to. :param data: The item to publish. :type data: An XSO representing the paylaod. :param id_: The id the published item shall have. :param access_model: The access model to enforce on the node. Defaults to not enforcing any access model. :returns: The PubSub id of the published item or :data:`None` if it is unknown. :raises RuntimeError: if PEP is not supported. :raises RuntimeError: if `access_model` is set and `publish_options` is not supported by the server If no `id_` is given it is generated by the server (and may be returned). `access_model` defines a pre-condition on the access model used for the `node`. The valid values depend on the service; commonly useful ``"presence"`` (the default for PEP; allows access to anyone who can receive the presence) and ``"whitelist"`` (allows access only to a whitelist (which defaults to the own account only)).
[ "Publish", "an", "item", "data", "in", "the", "PubSub", "node", "node", "on", "the", "PEP", "service", "associated", "with", "the", "user", "s", "JID", "." ]
python
train
StanfordVL/robosuite
robosuite/wrappers/ik_wrapper.py
https://github.com/StanfordVL/robosuite/blob/65cd16810e2ed647e3ec88746af3412065b7f278/robosuite/wrappers/ik_wrapper.py#L53-L59
def set_robot_joint_positions(self, positions): """ Overrides the function to set the joint positions directly, since we need to notify the IK controller of the change. """ self.env.set_robot_joint_positions(positions) self.controller.sync_state()
[ "def", "set_robot_joint_positions", "(", "self", ",", "positions", ")", ":", "self", ".", "env", ".", "set_robot_joint_positions", "(", "positions", ")", "self", ".", "controller", ".", "sync_state", "(", ")" ]
Overrides the function to set the joint positions directly, since we need to notify the IK controller of the change.
[ "Overrides", "the", "function", "to", "set", "the", "joint", "positions", "directly", "since", "we", "need", "to", "notify", "the", "IK", "controller", "of", "the", "change", "." ]
python
train
log2timeline/dfvfs
dfvfs/resolver/cache.py
https://github.com/log2timeline/dfvfs/blob/2b3ccd115f9901d89f383397d4a1376a873c83c4/dfvfs/resolver/cache.py#L195-L210
def RemoveObject(self, identifier): """Removes a cached object based on the identifier. This method ignores the cache value reference count. Args: identifier (str): VFS object identifier. Raises: KeyError: if the VFS object is not found in the cache. """ if identifier not in self._values: raise KeyError('Missing cached object for identifier: {0:s}'.format( identifier)) del self._values[identifier]
[ "def", "RemoveObject", "(", "self", ",", "identifier", ")", ":", "if", "identifier", "not", "in", "self", ".", "_values", ":", "raise", "KeyError", "(", "'Missing cached object for identifier: {0:s}'", ".", "format", "(", "identifier", ")", ")", "del", "self", ".", "_values", "[", "identifier", "]" ]
Removes a cached object based on the identifier. This method ignores the cache value reference count. Args: identifier (str): VFS object identifier. Raises: KeyError: if the VFS object is not found in the cache.
[ "Removes", "a", "cached", "object", "based", "on", "the", "identifier", "." ]
python
train
ucfopen/canvasapi
canvasapi/canvas.py
https://github.com/ucfopen/canvasapi/blob/319064b5fc97ba54250af683eb98723ef3f76cf8/canvasapi/canvas.py#L450-L468
def get_group_category(self, category): """ Get a single group category. :calls: `GET /api/v1/group_categories/:group_category_id \ <https://canvas.instructure.com/doc/api/group_categories.html#method.group_categories.show>`_ :param category: The object or ID of the category. :type category: :class:`canvasapi.group.GroupCategory` or int :rtype: :class:`canvasapi.group.GroupCategory` """ category_id = obj_or_id(category, "category", (GroupCategory,)) response = self.__requester.request( 'GET', 'group_categories/{}'.format(category_id) ) return GroupCategory(self.__requester, response.json())
[ "def", "get_group_category", "(", "self", ",", "category", ")", ":", "category_id", "=", "obj_or_id", "(", "category", ",", "\"category\"", ",", "(", "GroupCategory", ",", ")", ")", "response", "=", "self", ".", "__requester", ".", "request", "(", "'GET'", ",", "'group_categories/{}'", ".", "format", "(", "category_id", ")", ")", "return", "GroupCategory", "(", "self", ".", "__requester", ",", "response", ".", "json", "(", ")", ")" ]
Get a single group category. :calls: `GET /api/v1/group_categories/:group_category_id \ <https://canvas.instructure.com/doc/api/group_categories.html#method.group_categories.show>`_ :param category: The object or ID of the category. :type category: :class:`canvasapi.group.GroupCategory` or int :rtype: :class:`canvasapi.group.GroupCategory`
[ "Get", "a", "single", "group", "category", "." ]
python
train
secdev/scapy
scapy/layers/dns.py
https://github.com/secdev/scapy/blob/3ffe757c184017dd46464593a8f80f85abc1e79a/scapy/layers/dns.py#L550-L599
def RRlist2bitmap(lst): """ Encode a list of integers representing Resource Records to a bitmap field used in the NSEC Resource Record. """ # RFC 4034, 4.1.2. The Type Bit Maps Field import math bitmap = b"" lst = [abs(x) for x in sorted(set(lst)) if x <= 65535] # number of window blocks max_window_blocks = int(math.ceil(lst[-1] / 256.)) min_window_blocks = int(math.floor(lst[0] / 256.)) if min_window_blocks == max_window_blocks: max_window_blocks += 1 for wb in range(min_window_blocks, max_window_blocks + 1): # First, filter out RR not encoded in the current window block # i.e. keep everything between 256*wb <= 256*(wb+1) rrlist = sorted(x for x in lst if 256 * wb <= x < 256 * (wb + 1)) if not rrlist: continue # Compute the number of bytes used to store the bitmap if rrlist[-1] == 0: # only one element in the list bytes_count = 1 else: max = rrlist[-1] - 256 * wb bytes_count = int(math.ceil(max // 8)) + 1 # use at least 1 byte if bytes_count > 32: # Don't encode more than 256 bits / values bytes_count = 32 bitmap += struct.pack("BB", wb, bytes_count) # Generate the bitmap # The idea is to remove out of range Resource Records with these steps # 1. rescale to fit into 8 bits # 2. x gives the bit position ; compute the corresponding value # 3. sum everything bitmap += b"".join( struct.pack( b"B", sum(2 ** (7 - (x - 256 * wb) + (tmp * 8)) for x in rrlist if 256 * wb + 8 * tmp <= x < 256 * wb + 8 * tmp + 8), ) for tmp in range(bytes_count) ) return bitmap
[ "def", "RRlist2bitmap", "(", "lst", ")", ":", "# RFC 4034, 4.1.2. The Type Bit Maps Field", "import", "math", "bitmap", "=", "b\"\"", "lst", "=", "[", "abs", "(", "x", ")", "for", "x", "in", "sorted", "(", "set", "(", "lst", ")", ")", "if", "x", "<=", "65535", "]", "# number of window blocks", "max_window_blocks", "=", "int", "(", "math", ".", "ceil", "(", "lst", "[", "-", "1", "]", "/", "256.", ")", ")", "min_window_blocks", "=", "int", "(", "math", ".", "floor", "(", "lst", "[", "0", "]", "/", "256.", ")", ")", "if", "min_window_blocks", "==", "max_window_blocks", ":", "max_window_blocks", "+=", "1", "for", "wb", "in", "range", "(", "min_window_blocks", ",", "max_window_blocks", "+", "1", ")", ":", "# First, filter out RR not encoded in the current window block", "# i.e. keep everything between 256*wb <= 256*(wb+1)", "rrlist", "=", "sorted", "(", "x", "for", "x", "in", "lst", "if", "256", "*", "wb", "<=", "x", "<", "256", "*", "(", "wb", "+", "1", ")", ")", "if", "not", "rrlist", ":", "continue", "# Compute the number of bytes used to store the bitmap", "if", "rrlist", "[", "-", "1", "]", "==", "0", ":", "# only one element in the list", "bytes_count", "=", "1", "else", ":", "max", "=", "rrlist", "[", "-", "1", "]", "-", "256", "*", "wb", "bytes_count", "=", "int", "(", "math", ".", "ceil", "(", "max", "//", "8", ")", ")", "+", "1", "# use at least 1 byte", "if", "bytes_count", ">", "32", ":", "# Don't encode more than 256 bits / values", "bytes_count", "=", "32", "bitmap", "+=", "struct", ".", "pack", "(", "\"BB\"", ",", "wb", ",", "bytes_count", ")", "# Generate the bitmap", "# The idea is to remove out of range Resource Records with these steps", "# 1. rescale to fit into 8 bits", "# 2. x gives the bit position ; compute the corresponding value", "# 3. sum everything", "bitmap", "+=", "b\"\"", ".", "join", "(", "struct", ".", "pack", "(", "b\"B\"", ",", "sum", "(", "2", "**", "(", "7", "-", "(", "x", "-", "256", "*", "wb", ")", "+", "(", "tmp", "*", "8", ")", ")", "for", "x", "in", "rrlist", "if", "256", "*", "wb", "+", "8", "*", "tmp", "<=", "x", "<", "256", "*", "wb", "+", "8", "*", "tmp", "+", "8", ")", ",", ")", "for", "tmp", "in", "range", "(", "bytes_count", ")", ")", "return", "bitmap" ]
Encode a list of integers representing Resource Records to a bitmap field used in the NSEC Resource Record.
[ "Encode", "a", "list", "of", "integers", "representing", "Resource", "Records", "to", "a", "bitmap", "field", "used", "in", "the", "NSEC", "Resource", "Record", "." ]
python
train
onnx/onnx
onnx/external_data_helper.py
https://github.com/onnx/onnx/blob/2f7dc10f03a072526d94b6820cedbf2a1ec5a2c4/onnx/external_data_helper.py#L174-L181
def _get_attribute_tensors(onnx_model_proto): # type: (ModelProto) -> Iterable[TensorProto] """Create an iterator of tensors from node attributes of an ONNX model.""" for node in onnx_model_proto.graph.node: for attribute in node.attribute: if attribute.HasField("t"): yield attribute.t for tensor in attribute.tensors: yield tensor
[ "def", "_get_attribute_tensors", "(", "onnx_model_proto", ")", ":", "# type: (ModelProto) -> Iterable[TensorProto]", "for", "node", "in", "onnx_model_proto", ".", "graph", ".", "node", ":", "for", "attribute", "in", "node", ".", "attribute", ":", "if", "attribute", ".", "HasField", "(", "\"t\"", ")", ":", "yield", "attribute", ".", "t", "for", "tensor", "in", "attribute", ".", "tensors", ":", "yield", "tensor" ]
Create an iterator of tensors from node attributes of an ONNX model.
[ "Create", "an", "iterator", "of", "tensors", "from", "node", "attributes", "of", "an", "ONNX", "model", "." ]
python
train
toomore/grs
grs/twseno.py
https://github.com/toomore/grs/blob/a1285cb57878284a886952968be9e31fbfa595dd/grs/twseno.py#L55-L64
def __industry_code(self): ''' import industry_code ''' csv_path = os.path.join(os.path.dirname(__file__), self.industry_code_files) with open(csv_path) as csv_file: csv_data = csv.reader(csv_file) result = {} for i in csv_data: result[i[0]] = i[1].decode('utf-8') return result
[ "def", "__industry_code", "(", "self", ")", ":", "csv_path", "=", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "dirname", "(", "__file__", ")", ",", "self", ".", "industry_code_files", ")", "with", "open", "(", "csv_path", ")", "as", "csv_file", ":", "csv_data", "=", "csv", ".", "reader", "(", "csv_file", ")", "result", "=", "{", "}", "for", "i", "in", "csv_data", ":", "result", "[", "i", "[", "0", "]", "]", "=", "i", "[", "1", "]", ".", "decode", "(", "'utf-8'", ")", "return", "result" ]
import industry_code
[ "import", "industry_code" ]
python
train
bitesofcode/projexui
projexui/widgets/xorbtreewidget/xorbtreewidget.py
https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xorbtreewidget/xorbtreewidget.py#L2179-L2210
def setTableType(self, tableType): """ Defines the table class type that this tree will be displaying. :param table | <subclass of orb.Table> """ if tableType == self._tableType: return # clear all the information blocked = self.signalsBlocked() self.blockAllSignals(True) # only clear if necessary if self._tableType: self.clearAll() # update the table type data self._tableType = tableType if tableType: self._tableTypeName = tableType.__name__ else: self._tableTypeName = '' self.initializeColumns() self.blockAllSignals(blocked) if not self.signalsBlocked(): self.tableTypeChanged.emit() self.recordsChanged.emit()
[ "def", "setTableType", "(", "self", ",", "tableType", ")", ":", "if", "tableType", "==", "self", ".", "_tableType", ":", "return", "# clear all the information\r", "blocked", "=", "self", ".", "signalsBlocked", "(", ")", "self", ".", "blockAllSignals", "(", "True", ")", "# only clear if necessary\r", "if", "self", ".", "_tableType", ":", "self", ".", "clearAll", "(", ")", "# update the table type data\r", "self", ".", "_tableType", "=", "tableType", "if", "tableType", ":", "self", ".", "_tableTypeName", "=", "tableType", ".", "__name__", "else", ":", "self", ".", "_tableTypeName", "=", "''", "self", ".", "initializeColumns", "(", ")", "self", ".", "blockAllSignals", "(", "blocked", ")", "if", "not", "self", ".", "signalsBlocked", "(", ")", ":", "self", ".", "tableTypeChanged", ".", "emit", "(", ")", "self", ".", "recordsChanged", ".", "emit", "(", ")" ]
Defines the table class type that this tree will be displaying. :param table | <subclass of orb.Table>
[ "Defines", "the", "table", "class", "type", "that", "this", "tree", "will", "be", "displaying", ".", ":", "param", "table", "|", "<subclass", "of", "orb", ".", "Table", ">" ]
python
train
apple/turicreate
src/unity/python/turicreate/util/_cloudpickle.py
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/util/_cloudpickle.py#L1134-L1141
def _rehydrate_skeleton_class(skeleton_class, class_dict): """Put attributes from `class_dict` back on `skeleton_class`. See CloudPickler.save_dynamic_class for more info. """ for attrname, attr in class_dict.items(): setattr(skeleton_class, attrname, attr) return skeleton_class
[ "def", "_rehydrate_skeleton_class", "(", "skeleton_class", ",", "class_dict", ")", ":", "for", "attrname", ",", "attr", "in", "class_dict", ".", "items", "(", ")", ":", "setattr", "(", "skeleton_class", ",", "attrname", ",", "attr", ")", "return", "skeleton_class" ]
Put attributes from `class_dict` back on `skeleton_class`. See CloudPickler.save_dynamic_class for more info.
[ "Put", "attributes", "from", "class_dict", "back", "on", "skeleton_class", "." ]
python
train
Crunch-io/crunch-cube
src/cr/cube/crunch_cube.py
https://github.com/Crunch-io/crunch-cube/blob/a837840755690eb14b2ec8e8d93b4104e01c854f/src/cr/cube/crunch_cube.py#L525-L613
def proportions( self, axis=None, weighted=True, include_transforms_for_dims=None, include_mr_cat=False, prune=False, ): """Return percentage values for cube as `numpy.ndarray`. This function calculates the proportions across the selected axis of a crunch cube. For most variable types, it means the value divided by the margin value. For a multiple-response variable, the value is divided by the sum of selected and non-selected slices. *axis* (int): base axis of proportions calculation. If no axis is provided, calculations are done across the entire table. *weighted* (bool): Specifies weighted or non-weighted proportions. *include_transforms_for_dims* (list): Also include headings and subtotals transformations for the provided dimensions. If the dimensions have the transformations, they'll be included in the resulting numpy array. If the dimensions don't have the transformations, nothing will happen (the result will be the same as if the argument weren't provided). *include_transforms_for_dims* (list): Include headers and subtotals (H&S) across various dimensions. The dimensions are provided as list elements. For example: "include_transforms_for_dims=[0, 1]" instructs the CrunchCube to return H&S for both rows and columns (if it's a 2D cube). *include_mr_cat* (bool): Include MR categories. *prune* (bool): Instructs the CrunchCube to prune empty rows/cols. Emptiness is determined by the state of the margin (if it's either 0 or nan at certain index). If it is, the corresponding row/col is not included in the result. Example 1:: >>> cube = CrunchCube(fixt_cat_x_cat) np.array([ [5, 2], [5, 3], ]) >>> cube.proportions() np.array([ [0.3333333, 0.1333333], [0.3333333, 0.2000000], ]) Example 2:: >>> cube = CrunchCube(fixt_cat_x_cat) np.array([ [5, 2], [5, 3], ]) >>> cube.proportions(axis=0) np.array([ [0.5, 0.4], [0.5, 0.6], ]) """ # Calculate numerator from table (include all H&S dimensions). table = self._measure(weighted).raw_cube_array num = self._apply_subtotals( self._apply_missings(table), include_transforms_for_dims ) proportions = num / self._denominator( weighted, include_transforms_for_dims, axis ) if not include_mr_cat: proportions = self._drop_mr_cat_dims(proportions) # Apply correct mask (based on the as_array shape) arr = self.as_array( prune=prune, include_transforms_for_dims=include_transforms_for_dims ) if isinstance(arr, np.ma.core.MaskedArray): proportions = np.ma.masked_array(proportions, arr.mask) return proportions
[ "def", "proportions", "(", "self", ",", "axis", "=", "None", ",", "weighted", "=", "True", ",", "include_transforms_for_dims", "=", "None", ",", "include_mr_cat", "=", "False", ",", "prune", "=", "False", ",", ")", ":", "# Calculate numerator from table (include all H&S dimensions).", "table", "=", "self", ".", "_measure", "(", "weighted", ")", ".", "raw_cube_array", "num", "=", "self", ".", "_apply_subtotals", "(", "self", ".", "_apply_missings", "(", "table", ")", ",", "include_transforms_for_dims", ")", "proportions", "=", "num", "/", "self", ".", "_denominator", "(", "weighted", ",", "include_transforms_for_dims", ",", "axis", ")", "if", "not", "include_mr_cat", ":", "proportions", "=", "self", ".", "_drop_mr_cat_dims", "(", "proportions", ")", "# Apply correct mask (based on the as_array shape)", "arr", "=", "self", ".", "as_array", "(", "prune", "=", "prune", ",", "include_transforms_for_dims", "=", "include_transforms_for_dims", ")", "if", "isinstance", "(", "arr", ",", "np", ".", "ma", ".", "core", ".", "MaskedArray", ")", ":", "proportions", "=", "np", ".", "ma", ".", "masked_array", "(", "proportions", ",", "arr", ".", "mask", ")", "return", "proportions" ]
Return percentage values for cube as `numpy.ndarray`. This function calculates the proportions across the selected axis of a crunch cube. For most variable types, it means the value divided by the margin value. For a multiple-response variable, the value is divided by the sum of selected and non-selected slices. *axis* (int): base axis of proportions calculation. If no axis is provided, calculations are done across the entire table. *weighted* (bool): Specifies weighted or non-weighted proportions. *include_transforms_for_dims* (list): Also include headings and subtotals transformations for the provided dimensions. If the dimensions have the transformations, they'll be included in the resulting numpy array. If the dimensions don't have the transformations, nothing will happen (the result will be the same as if the argument weren't provided). *include_transforms_for_dims* (list): Include headers and subtotals (H&S) across various dimensions. The dimensions are provided as list elements. For example: "include_transforms_for_dims=[0, 1]" instructs the CrunchCube to return H&S for both rows and columns (if it's a 2D cube). *include_mr_cat* (bool): Include MR categories. *prune* (bool): Instructs the CrunchCube to prune empty rows/cols. Emptiness is determined by the state of the margin (if it's either 0 or nan at certain index). If it is, the corresponding row/col is not included in the result. Example 1:: >>> cube = CrunchCube(fixt_cat_x_cat) np.array([ [5, 2], [5, 3], ]) >>> cube.proportions() np.array([ [0.3333333, 0.1333333], [0.3333333, 0.2000000], ]) Example 2:: >>> cube = CrunchCube(fixt_cat_x_cat) np.array([ [5, 2], [5, 3], ]) >>> cube.proportions(axis=0) np.array([ [0.5, 0.4], [0.5, 0.6], ])
[ "Return", "percentage", "values", "for", "cube", "as", "numpy", ".", "ndarray", "." ]
python
train
dwwkelly/note
note/server.py
https://github.com/dwwkelly/note/blob/b41d5fe1e4a3b67b50285168dd58f903bf219e6c/note/server.py#L48-L71
def Handle_Events(self, events): """ Handle events from poll() :events: A list of tuples form zmq.poll() :type events: list :returns: None """ for e in events: sock = e[0] event_type = e[1] if event_type == zmq.POLLIN: msg = sock.recv() reply = self.Handle_Receive(msg) sock.send(reply) elif event_type == zmq.POLLOUT: pass # FIXME -- handle this correctly elif event_type == zmq.POLLERR: pass # FIXME -- handle this correctly else: pass
[ "def", "Handle_Events", "(", "self", ",", "events", ")", ":", "for", "e", "in", "events", ":", "sock", "=", "e", "[", "0", "]", "event_type", "=", "e", "[", "1", "]", "if", "event_type", "==", "zmq", ".", "POLLIN", ":", "msg", "=", "sock", ".", "recv", "(", ")", "reply", "=", "self", ".", "Handle_Receive", "(", "msg", ")", "sock", ".", "send", "(", "reply", ")", "elif", "event_type", "==", "zmq", ".", "POLLOUT", ":", "pass", "# FIXME -- handle this correctly", "elif", "event_type", "==", "zmq", ".", "POLLERR", ":", "pass", "# FIXME -- handle this correctly", "else", ":", "pass" ]
Handle events from poll() :events: A list of tuples form zmq.poll() :type events: list :returns: None
[ "Handle", "events", "from", "poll", "()" ]
python
train
bcbio/bcbio-nextgen
bcbio/pipeline/datadict.py
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/datadict.py#L223-L235
def get_input_sequence_files(data, default=None): """ returns the input sequencing files, these can be single or paired FASTQ files or BAM files """ if "files" not in data or data.get("files") is None: file1, file2 = None, None elif len(data["files"]) == 2: file1, file2 = data["files"] else: assert len(data["files"]) == 1, data["files"] file1, file2 = data["files"][0], None return file1, file2
[ "def", "get_input_sequence_files", "(", "data", ",", "default", "=", "None", ")", ":", "if", "\"files\"", "not", "in", "data", "or", "data", ".", "get", "(", "\"files\"", ")", "is", "None", ":", "file1", ",", "file2", "=", "None", ",", "None", "elif", "len", "(", "data", "[", "\"files\"", "]", ")", "==", "2", ":", "file1", ",", "file2", "=", "data", "[", "\"files\"", "]", "else", ":", "assert", "len", "(", "data", "[", "\"files\"", "]", ")", "==", "1", ",", "data", "[", "\"files\"", "]", "file1", ",", "file2", "=", "data", "[", "\"files\"", "]", "[", "0", "]", ",", "None", "return", "file1", ",", "file2" ]
returns the input sequencing files, these can be single or paired FASTQ files or BAM files
[ "returns", "the", "input", "sequencing", "files", "these", "can", "be", "single", "or", "paired", "FASTQ", "files", "or", "BAM", "files" ]
python
train
asifpy/django-crudbuilder
crudbuilder/views.py
https://github.com/asifpy/django-crudbuilder/blob/9de1c6fa555086673dd7ccc351d4b771c6192489/crudbuilder/views.py#L85-L111
def generate_list_view(self): """Generate class based view for ListView""" name = model_class_form(self.model + 'ListView') list_args = dict( model=self.get_model_class, context_object_name=plural(self.model), template_name=self.get_template('list'), table_class=self.get_actual_table(), context_table_name='table_objects', crud=self.crud, permissions=self.view_permission('list'), permission_required=self.check_permission_required, login_required=self.check_login_required, table_pagination={'per_page': self.tables2_pagination or 10}, custom_queryset=self.custom_queryset, custom_context=self.custom_context, custom_postfix_url=self.custom_postfix_url ) list_class = type( name, (BaseListViewMixin, SingleTableView), list_args ) self.classes[name] = list_class return list_class
[ "def", "generate_list_view", "(", "self", ")", ":", "name", "=", "model_class_form", "(", "self", ".", "model", "+", "'ListView'", ")", "list_args", "=", "dict", "(", "model", "=", "self", ".", "get_model_class", ",", "context_object_name", "=", "plural", "(", "self", ".", "model", ")", ",", "template_name", "=", "self", ".", "get_template", "(", "'list'", ")", ",", "table_class", "=", "self", ".", "get_actual_table", "(", ")", ",", "context_table_name", "=", "'table_objects'", ",", "crud", "=", "self", ".", "crud", ",", "permissions", "=", "self", ".", "view_permission", "(", "'list'", ")", ",", "permission_required", "=", "self", ".", "check_permission_required", ",", "login_required", "=", "self", ".", "check_login_required", ",", "table_pagination", "=", "{", "'per_page'", ":", "self", ".", "tables2_pagination", "or", "10", "}", ",", "custom_queryset", "=", "self", ".", "custom_queryset", ",", "custom_context", "=", "self", ".", "custom_context", ",", "custom_postfix_url", "=", "self", ".", "custom_postfix_url", ")", "list_class", "=", "type", "(", "name", ",", "(", "BaseListViewMixin", ",", "SingleTableView", ")", ",", "list_args", ")", "self", ".", "classes", "[", "name", "]", "=", "list_class", "return", "list_class" ]
Generate class based view for ListView
[ "Generate", "class", "based", "view", "for", "ListView" ]
python
train
cgrok/cr-async
crasync/core.py
https://github.com/cgrok/cr-async/blob/f65a968e54704168706d137d1ba662f55f8ab852/crasync/core.py#L82-L91
async def get_profile(self, *tags): '''Get a profile object using tag(s)''' url = '{0.BASE}/profile/{1}'.format(self, ','.join(tags)) data = await self.request(url) if isinstance(data, list): return [Profile(self, c) for c in data] else: return Profile(self, data)
[ "async", "def", "get_profile", "(", "self", ",", "*", "tags", ")", ":", "url", "=", "'{0.BASE}/profile/{1}'", ".", "format", "(", "self", ",", "','", ".", "join", "(", "tags", ")", ")", "data", "=", "await", "self", ".", "request", "(", "url", ")", "if", "isinstance", "(", "data", ",", "list", ")", ":", "return", "[", "Profile", "(", "self", ",", "c", ")", "for", "c", "in", "data", "]", "else", ":", "return", "Profile", "(", "self", ",", "data", ")" ]
Get a profile object using tag(s)
[ "Get", "a", "profile", "object", "using", "tag", "(", "s", ")" ]
python
train
SMTG-UCL/sumo
sumo/plotting/__init__.py
https://github.com/SMTG-UCL/sumo/blob/47aec6bbfa033a624435a65bd4edabd18bfb437f/sumo/plotting/__init__.py#L145-L157
def power_tick(val, pos, times_sign=r'\times'): """Custom power ticker function. """ if val == 0: return r'$\mathregular{0}$' elif val < 0: exponent = int(np.log10(-val)) else: exponent = int(np.log10(val)) coeff = val / 10**exponent return r'$\mathregular{{{:.1f} {} 10^{:2d}}}$'.format(coeff, times_sign, exponent)
[ "def", "power_tick", "(", "val", ",", "pos", ",", "times_sign", "=", "r'\\times'", ")", ":", "if", "val", "==", "0", ":", "return", "r'$\\mathregular{0}$'", "elif", "val", "<", "0", ":", "exponent", "=", "int", "(", "np", ".", "log10", "(", "-", "val", ")", ")", "else", ":", "exponent", "=", "int", "(", "np", ".", "log10", "(", "val", ")", ")", "coeff", "=", "val", "/", "10", "**", "exponent", "return", "r'$\\mathregular{{{:.1f} {} 10^{:2d}}}$'", ".", "format", "(", "coeff", ",", "times_sign", ",", "exponent", ")" ]
Custom power ticker function.
[ "Custom", "power", "ticker", "function", "." ]
python
train
saltstack/salt
salt/modules/keystone.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/keystone.py#L232-L255
def ec2_credentials_delete(user_id=None, name=None, access_key=None, profile=None, **connection_args): ''' Delete EC2-compatible credentials CLI Examples: .. code-block:: bash salt '*' keystone.ec2_credentials_delete \ 860f8c2c38ca4fab989f9bc56a061a64 access_key=5f66d2f24f604b8bb9cd28886106f442 salt '*' keystone.ec2_credentials_delete name=admin \ access_key=5f66d2f24f604b8bb9cd28886106f442 ''' kstone = auth(profile, **connection_args) if name: user_id = user_get(name=name, profile=None, **connection_args)[name]['id'] if not user_id: return {'Error': 'Could not resolve User ID'} kstone.ec2.delete(user_id, access_key) return 'ec2 key "{0}" deleted under user id "{1}"'.format(access_key, user_id)
[ "def", "ec2_credentials_delete", "(", "user_id", "=", "None", ",", "name", "=", "None", ",", "access_key", "=", "None", ",", "profile", "=", "None", ",", "*", "*", "connection_args", ")", ":", "kstone", "=", "auth", "(", "profile", ",", "*", "*", "connection_args", ")", "if", "name", ":", "user_id", "=", "user_get", "(", "name", "=", "name", ",", "profile", "=", "None", ",", "*", "*", "connection_args", ")", "[", "name", "]", "[", "'id'", "]", "if", "not", "user_id", ":", "return", "{", "'Error'", ":", "'Could not resolve User ID'", "}", "kstone", ".", "ec2", ".", "delete", "(", "user_id", ",", "access_key", ")", "return", "'ec2 key \"{0}\" deleted under user id \"{1}\"'", ".", "format", "(", "access_key", ",", "user_id", ")" ]
Delete EC2-compatible credentials CLI Examples: .. code-block:: bash salt '*' keystone.ec2_credentials_delete \ 860f8c2c38ca4fab989f9bc56a061a64 access_key=5f66d2f24f604b8bb9cd28886106f442 salt '*' keystone.ec2_credentials_delete name=admin \ access_key=5f66d2f24f604b8bb9cd28886106f442
[ "Delete", "EC2", "-", "compatible", "credentials" ]
python
train
ckan/losser
losser/cli.py
https://github.com/ckan/losser/blob/fd0832d9fa93cabe9ce9a9153dc923f2cf39cb5f/losser/cli.py#L153-L211
def make_parser(add_help=True, exclude_args=None): """Return an argparse.ArgumentParser object with losser's arguments. Other projects can call this to get an ArgumentParser with losser's command line interface to use as a parent parser for their own parser. For example:: parent_parser = losser.cli.make_parser( add_help=False, exclude_args=["-i"]) parser = argparse.ArgumentParser( description="Export datasets from a CKAN site to JSON or CSV.", parents=[parent_parser]) parser.add_argument(... :param add_help: Whether or not to add losser's help text to the parser. Pass add_help=False if you want to use your own help text in a child parser. :type add_help: bool :param exclude_args: List of losser command-line arguments to exclude, use this to exclude any default losser arguments that you don't want in your own command. For example: exclude_args=["-i", "--max-length"]. :type exclude_args: list of strings """ if exclude_args is None: exclude_args = [] parser = argparse.ArgumentParser(add_help=add_help) parser.description = ("Filter, transform and export a list of JSON " "objects on stdin to JSON or CSV on stdout") if "--columns" not in exclude_args: parser.add_argument( "--columns", dest="columns_file", help="the JSON file specifying the columns to be output", ) if ("-i" not in exclude_args) and ("--input" not in exclude_args): parser.add_argument( "-i", "--input", help="read input from the given file instead of from stdin", dest='input_data', # Because input is a Python builtin. ) if ("-c" not in exclude_args) and ("--column" not in exclude_args): parser.add_argument("-c", "--column", action=ColumnsAction) if "--pattern" not in exclude_args: parser.add_argument("--pattern", action=ColumnsAction, nargs='+') if "--max-length" not in exclude_args: parser.add_argument("--max-length", action=ColumnsAction) if "--strip" not in exclude_args: parser.add_argument("--strip", nargs="?", action=ColumnsAction) if "--deduplicate" not in exclude_args: parser.add_argument("--deduplicate", nargs='?', action=ColumnsAction) if "--case-sensitive" not in exclude_args: parser.add_argument( "--case-sensitive", nargs='?', action=ColumnsAction) if "--unique" not in exclude_args: parser.add_argument("--unique", nargs="?", action=ColumnsAction) if ("-p" not in exclude_args) and ("--pretty" not in exclude_args): parser.add_argument("-p", "--pretty", action="store_true") return parser
[ "def", "make_parser", "(", "add_help", "=", "True", ",", "exclude_args", "=", "None", ")", ":", "if", "exclude_args", "is", "None", ":", "exclude_args", "=", "[", "]", "parser", "=", "argparse", ".", "ArgumentParser", "(", "add_help", "=", "add_help", ")", "parser", ".", "description", "=", "(", "\"Filter, transform and export a list of JSON \"", "\"objects on stdin to JSON or CSV on stdout\"", ")", "if", "\"--columns\"", "not", "in", "exclude_args", ":", "parser", ".", "add_argument", "(", "\"--columns\"", ",", "dest", "=", "\"columns_file\"", ",", "help", "=", "\"the JSON file specifying the columns to be output\"", ",", ")", "if", "(", "\"-i\"", "not", "in", "exclude_args", ")", "and", "(", "\"--input\"", "not", "in", "exclude_args", ")", ":", "parser", ".", "add_argument", "(", "\"-i\"", ",", "\"--input\"", ",", "help", "=", "\"read input from the given file instead of from stdin\"", ",", "dest", "=", "'input_data'", ",", "# Because input is a Python builtin.", ")", "if", "(", "\"-c\"", "not", "in", "exclude_args", ")", "and", "(", "\"--column\"", "not", "in", "exclude_args", ")", ":", "parser", ".", "add_argument", "(", "\"-c\"", ",", "\"--column\"", ",", "action", "=", "ColumnsAction", ")", "if", "\"--pattern\"", "not", "in", "exclude_args", ":", "parser", ".", "add_argument", "(", "\"--pattern\"", ",", "action", "=", "ColumnsAction", ",", "nargs", "=", "'+'", ")", "if", "\"--max-length\"", "not", "in", "exclude_args", ":", "parser", ".", "add_argument", "(", "\"--max-length\"", ",", "action", "=", "ColumnsAction", ")", "if", "\"--strip\"", "not", "in", "exclude_args", ":", "parser", ".", "add_argument", "(", "\"--strip\"", ",", "nargs", "=", "\"?\"", ",", "action", "=", "ColumnsAction", ")", "if", "\"--deduplicate\"", "not", "in", "exclude_args", ":", "parser", ".", "add_argument", "(", "\"--deduplicate\"", ",", "nargs", "=", "'?'", ",", "action", "=", "ColumnsAction", ")", "if", "\"--case-sensitive\"", "not", "in", "exclude_args", ":", "parser", ".", "add_argument", "(", "\"--case-sensitive\"", ",", "nargs", "=", "'?'", ",", "action", "=", "ColumnsAction", ")", "if", "\"--unique\"", "not", "in", "exclude_args", ":", "parser", ".", "add_argument", "(", "\"--unique\"", ",", "nargs", "=", "\"?\"", ",", "action", "=", "ColumnsAction", ")", "if", "(", "\"-p\"", "not", "in", "exclude_args", ")", "and", "(", "\"--pretty\"", "not", "in", "exclude_args", ")", ":", "parser", ".", "add_argument", "(", "\"-p\"", ",", "\"--pretty\"", ",", "action", "=", "\"store_true\"", ")", "return", "parser" ]
Return an argparse.ArgumentParser object with losser's arguments. Other projects can call this to get an ArgumentParser with losser's command line interface to use as a parent parser for their own parser. For example:: parent_parser = losser.cli.make_parser( add_help=False, exclude_args=["-i"]) parser = argparse.ArgumentParser( description="Export datasets from a CKAN site to JSON or CSV.", parents=[parent_parser]) parser.add_argument(... :param add_help: Whether or not to add losser's help text to the parser. Pass add_help=False if you want to use your own help text in a child parser. :type add_help: bool :param exclude_args: List of losser command-line arguments to exclude, use this to exclude any default losser arguments that you don't want in your own command. For example: exclude_args=["-i", "--max-length"]. :type exclude_args: list of strings
[ "Return", "an", "argparse", ".", "ArgumentParser", "object", "with", "losser", "s", "arguments", "." ]
python
train
Gandi/gandi.cli
gandi/cli/commands/disk.py
https://github.com/Gandi/gandi.cli/blob/6ee5b8fc8ec44b0a6c232043ca610606ad8f693d/gandi/cli/commands/disk.py#L355-L402
def migrate(gandi, resource, force, background): """ Migrate a disk to another datacenter. """ # check it's not attached source_info = gandi.disk.info(resource) if source_info['vms_id']: click.echo('Cannot start the migration: disk %s is attached. ' 'Please detach the disk before starting the migration.' % resource) return disk_datacenter = source_info['datacenter_id'] dc_choices = gandi.datacenter.list_migration_choice(disk_datacenter) if not dc_choices: click.echo('No datacenter is available for migration') return elif len(dc_choices) == 1: # use the only one available datacenter_id = dc_choices[0]['id'] else: choice_list = [dc['dc_code'] for dc in dc_choices] dc_choice = click.Choice(choice_list) dc_chosen = click.prompt('Select a datacenter [%s]' % '|'.join(choice_list), # noqa type=dc_choice, show_default=True) datacenter_id = [dc['id'] for dc in dc_choices if dc['dc_code'] == dc_chosen][0] if not force: proceed = click.confirm('Are you sure you want to migrate disk %s ?' % resource) if not proceed: return datacenters = gandi.datacenter.list() dc_from = [dc['dc_code'] for dc in datacenters if dc['id'] == disk_datacenter][0] dc_to = [dc['dc_code'] for dc in datacenters if dc['id'] == datacenter_id][0] migration_msg = ('* Starting the migration of disk %s from datacenter %s ' 'to %s' % (resource, dc_from, dc_to)) gandi.echo(migration_msg) output_keys = ['id', 'type', 'step'] oper = gandi.disk.migrate(resource, datacenter_id, background) if background: output_generic(gandi, oper, output_keys) return oper
[ "def", "migrate", "(", "gandi", ",", "resource", ",", "force", ",", "background", ")", ":", "# check it's not attached", "source_info", "=", "gandi", ".", "disk", ".", "info", "(", "resource", ")", "if", "source_info", "[", "'vms_id'", "]", ":", "click", ".", "echo", "(", "'Cannot start the migration: disk %s is attached. '", "'Please detach the disk before starting the migration.'", "%", "resource", ")", "return", "disk_datacenter", "=", "source_info", "[", "'datacenter_id'", "]", "dc_choices", "=", "gandi", ".", "datacenter", ".", "list_migration_choice", "(", "disk_datacenter", ")", "if", "not", "dc_choices", ":", "click", ".", "echo", "(", "'No datacenter is available for migration'", ")", "return", "elif", "len", "(", "dc_choices", ")", "==", "1", ":", "# use the only one available", "datacenter_id", "=", "dc_choices", "[", "0", "]", "[", "'id'", "]", "else", ":", "choice_list", "=", "[", "dc", "[", "'dc_code'", "]", "for", "dc", "in", "dc_choices", "]", "dc_choice", "=", "click", ".", "Choice", "(", "choice_list", ")", "dc_chosen", "=", "click", ".", "prompt", "(", "'Select a datacenter [%s]'", "%", "'|'", ".", "join", "(", "choice_list", ")", ",", "# noqa", "type", "=", "dc_choice", ",", "show_default", "=", "True", ")", "datacenter_id", "=", "[", "dc", "[", "'id'", "]", "for", "dc", "in", "dc_choices", "if", "dc", "[", "'dc_code'", "]", "==", "dc_chosen", "]", "[", "0", "]", "if", "not", "force", ":", "proceed", "=", "click", ".", "confirm", "(", "'Are you sure you want to migrate disk %s ?'", "%", "resource", ")", "if", "not", "proceed", ":", "return", "datacenters", "=", "gandi", ".", "datacenter", ".", "list", "(", ")", "dc_from", "=", "[", "dc", "[", "'dc_code'", "]", "for", "dc", "in", "datacenters", "if", "dc", "[", "'id'", "]", "==", "disk_datacenter", "]", "[", "0", "]", "dc_to", "=", "[", "dc", "[", "'dc_code'", "]", "for", "dc", "in", "datacenters", "if", "dc", "[", "'id'", "]", "==", "datacenter_id", "]", "[", "0", "]", "migration_msg", "=", "(", "'* Starting the migration of disk %s from datacenter %s '", "'to %s'", "%", "(", "resource", ",", "dc_from", ",", "dc_to", ")", ")", "gandi", ".", "echo", "(", "migration_msg", ")", "output_keys", "=", "[", "'id'", ",", "'type'", ",", "'step'", "]", "oper", "=", "gandi", ".", "disk", ".", "migrate", "(", "resource", ",", "datacenter_id", ",", "background", ")", "if", "background", ":", "output_generic", "(", "gandi", ",", "oper", ",", "output_keys", ")", "return", "oper" ]
Migrate a disk to another datacenter.
[ "Migrate", "a", "disk", "to", "another", "datacenter", "." ]
python
train
yyuu/botornado
boto/__init__.py
https://github.com/yyuu/botornado/blob/fffb056f5ff2324d1d5c1304014cfb1d899f602e/boto/__init__.py#L250-L262
def connect_rds(aws_access_key_id=None, aws_secret_access_key=None, **kwargs): """ :type aws_access_key_id: string :param aws_access_key_id: Your AWS Access Key ID :type aws_secret_access_key: string :param aws_secret_access_key: Your AWS Secret Access Key :rtype: :class:`boto.rds.RDSConnection` :return: A connection to RDS """ from boto.rds import RDSConnection return RDSConnection(aws_access_key_id, aws_secret_access_key, **kwargs)
[ "def", "connect_rds", "(", "aws_access_key_id", "=", "None", ",", "aws_secret_access_key", "=", "None", ",", "*", "*", "kwargs", ")", ":", "from", "boto", ".", "rds", "import", "RDSConnection", "return", "RDSConnection", "(", "aws_access_key_id", ",", "aws_secret_access_key", ",", "*", "*", "kwargs", ")" ]
:type aws_access_key_id: string :param aws_access_key_id: Your AWS Access Key ID :type aws_secret_access_key: string :param aws_secret_access_key: Your AWS Secret Access Key :rtype: :class:`boto.rds.RDSConnection` :return: A connection to RDS
[ ":", "type", "aws_access_key_id", ":", "string", ":", "param", "aws_access_key_id", ":", "Your", "AWS", "Access", "Key", "ID" ]
python
train
klen/aioauth-client
aioauth_client.py
https://github.com/klen/aioauth-client/blob/54f58249496c26965adb4f752f2b24cfe18d0084/aioauth_client.py#L472-L480
def user_parse(data): """Parse information from the provider.""" user_ = data.get('user', {}) yield 'id', data.get('user_nsid') or user_.get('id') yield 'username', user_.get('username', {}).get('_content') first_name, _, last_name = data.get( 'fullname', {}).get('_content', '').partition(' ') yield 'first_name', first_name yield 'last_name', last_name
[ "def", "user_parse", "(", "data", ")", ":", "user_", "=", "data", ".", "get", "(", "'user'", ",", "{", "}", ")", "yield", "'id'", ",", "data", ".", "get", "(", "'user_nsid'", ")", "or", "user_", ".", "get", "(", "'id'", ")", "yield", "'username'", ",", "user_", ".", "get", "(", "'username'", ",", "{", "}", ")", ".", "get", "(", "'_content'", ")", "first_name", ",", "_", ",", "last_name", "=", "data", ".", "get", "(", "'fullname'", ",", "{", "}", ")", ".", "get", "(", "'_content'", ",", "''", ")", ".", "partition", "(", "' '", ")", "yield", "'first_name'", ",", "first_name", "yield", "'last_name'", ",", "last_name" ]
Parse information from the provider.
[ "Parse", "information", "from", "the", "provider", "." ]
python
train
cimm-kzn/CGRtools
CGRtools/containers/molecule.py
https://github.com/cimm-kzn/CGRtools/blob/15a19b04f6e4e1d0dab8e0d32a0877c7f7d70f34/CGRtools/containers/molecule.py#L112-L125
def substructure(self, atoms, meta=False, as_view=True): """ create substructure containing atoms from nbunch list :param atoms: list of atoms numbers of substructure :param meta: if True metadata will be copied to substructure :param as_view: If True, the returned graph-view provides a read-only view of the original structure scaffold without actually copying any data. """ s = super().substructure(atoms, meta, as_view) if as_view: s.check_valence = s.explicify_hydrogens = s.implicify_hydrogens = s.reset_query_marks = frozen s.standardize = s.aromatize = frozen return s
[ "def", "substructure", "(", "self", ",", "atoms", ",", "meta", "=", "False", ",", "as_view", "=", "True", ")", ":", "s", "=", "super", "(", ")", ".", "substructure", "(", "atoms", ",", "meta", ",", "as_view", ")", "if", "as_view", ":", "s", ".", "check_valence", "=", "s", ".", "explicify_hydrogens", "=", "s", ".", "implicify_hydrogens", "=", "s", ".", "reset_query_marks", "=", "frozen", "s", ".", "standardize", "=", "s", ".", "aromatize", "=", "frozen", "return", "s" ]
create substructure containing atoms from nbunch list :param atoms: list of atoms numbers of substructure :param meta: if True metadata will be copied to substructure :param as_view: If True, the returned graph-view provides a read-only view of the original structure scaffold without actually copying any data.
[ "create", "substructure", "containing", "atoms", "from", "nbunch", "list" ]
python
train
siznax/wptools
wptools/category.py
https://github.com/siznax/wptools/blob/100eaea585c34aa9ad87a9eda8982bb4898f6ec9/wptools/category.py#L92-L102
def _query(self, action, qobj): """ Form query to enumerate category """ title = self.params.get('title') pageid = self.params.get('pageid') if action == 'random': return qobj.random(namespace=14) elif action == 'category': return qobj.category(title, pageid, self._continue_params())
[ "def", "_query", "(", "self", ",", "action", ",", "qobj", ")", ":", "title", "=", "self", ".", "params", ".", "get", "(", "'title'", ")", "pageid", "=", "self", ".", "params", ".", "get", "(", "'pageid'", ")", "if", "action", "==", "'random'", ":", "return", "qobj", ".", "random", "(", "namespace", "=", "14", ")", "elif", "action", "==", "'category'", ":", "return", "qobj", ".", "category", "(", "title", ",", "pageid", ",", "self", ".", "_continue_params", "(", ")", ")" ]
Form query to enumerate category
[ "Form", "query", "to", "enumerate", "category" ]
python
train
apple/turicreate
deps/src/boost_1_68_0/tools/build/src/build/virtual_target.py
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/build/virtual_target.py#L532-L584
def __adjust_name(self, specified_name): """Given the target name specified in constructor, returns the name which should be really used, by looking at the <tag> properties. The tag properties come in two flavour: - <tag>value, - <tag>@rule-name In the first case, value is just added to name In the second case, the specified rule is called with specified name, target type and properties and should return the new name. If not <tag> property is specified, or the rule specified by <tag> returns nothing, returns the result of calling virtual-target.add-suffix""" assert isinstance(specified_name, basestring) if self.action_: ps = self.action_.properties() else: ps = property_set.empty() # FIXME: I'm not sure how this is used, need to check with # Rene to figure out how to implement #~ We add ourselves to the properties so that any tag rule can get #~ more direct information about the target than just that available #~ through the properties. This is useful in implementing #~ name changes based on the sources of the target. For example to #~ make unique names of object files based on the source file. #~ --grafik #ps = property_set.create(ps.raw() + ["<target>%s" % "XXXX"]) #ps = [ property-set.create [ $(ps).raw ] <target>$(__name__) ] ; tag = ps.get("<tag>") if tag: if len(tag) > 1: get_manager().errors()( """<tag>@rulename is present but is not the only <tag> feature""") tag = tag[0] if callable(tag): self.name_ = tag(specified_name, self.type_, ps) else: if not tag[0] == '@': self.manager_.errors()("""The value of the <tag> feature must be '@rule-nane'""") exported_ps = b2.util.value_to_jam(ps, methods=True) self.name_ = b2.util.call_jam_function( tag[1:], specified_name, self.type_, exported_ps) if self.name_: self.name_ = self.name_[0] # If there's no tag or the tag rule returned nothing. if not tag or not self.name_: self.name_ = add_prefix_and_suffix(specified_name, self.type_, ps)
[ "def", "__adjust_name", "(", "self", ",", "specified_name", ")", ":", "assert", "isinstance", "(", "specified_name", ",", "basestring", ")", "if", "self", ".", "action_", ":", "ps", "=", "self", ".", "action_", ".", "properties", "(", ")", "else", ":", "ps", "=", "property_set", ".", "empty", "(", ")", "# FIXME: I'm not sure how this is used, need to check with", "# Rene to figure out how to implement", "#~ We add ourselves to the properties so that any tag rule can get", "#~ more direct information about the target than just that available", "#~ through the properties. This is useful in implementing", "#~ name changes based on the sources of the target. For example to", "#~ make unique names of object files based on the source file.", "#~ --grafik", "#ps = property_set.create(ps.raw() + [\"<target>%s\" % \"XXXX\"])", "#ps = [ property-set.create [ $(ps).raw ] <target>$(__name__) ] ;", "tag", "=", "ps", ".", "get", "(", "\"<tag>\"", ")", "if", "tag", ":", "if", "len", "(", "tag", ")", ">", "1", ":", "get_manager", "(", ")", ".", "errors", "(", ")", "(", "\"\"\"<tag>@rulename is present but is not the only <tag> feature\"\"\"", ")", "tag", "=", "tag", "[", "0", "]", "if", "callable", "(", "tag", ")", ":", "self", ".", "name_", "=", "tag", "(", "specified_name", ",", "self", ".", "type_", ",", "ps", ")", "else", ":", "if", "not", "tag", "[", "0", "]", "==", "'@'", ":", "self", ".", "manager_", ".", "errors", "(", ")", "(", "\"\"\"The value of the <tag> feature must be '@rule-nane'\"\"\"", ")", "exported_ps", "=", "b2", ".", "util", ".", "value_to_jam", "(", "ps", ",", "methods", "=", "True", ")", "self", ".", "name_", "=", "b2", ".", "util", ".", "call_jam_function", "(", "tag", "[", "1", ":", "]", ",", "specified_name", ",", "self", ".", "type_", ",", "exported_ps", ")", "if", "self", ".", "name_", ":", "self", ".", "name_", "=", "self", ".", "name_", "[", "0", "]", "# If there's no tag or the tag rule returned nothing.", "if", "not", "tag", "or", "not", "self", ".", "name_", ":", "self", ".", "name_", "=", "add_prefix_and_suffix", "(", "specified_name", ",", "self", ".", "type_", ",", "ps", ")" ]
Given the target name specified in constructor, returns the name which should be really used, by looking at the <tag> properties. The tag properties come in two flavour: - <tag>value, - <tag>@rule-name In the first case, value is just added to name In the second case, the specified rule is called with specified name, target type and properties and should return the new name. If not <tag> property is specified, or the rule specified by <tag> returns nothing, returns the result of calling virtual-target.add-suffix
[ "Given", "the", "target", "name", "specified", "in", "constructor", "returns", "the", "name", "which", "should", "be", "really", "used", "by", "looking", "at", "the", "<tag", ">", "properties", ".", "The", "tag", "properties", "come", "in", "two", "flavour", ":", "-", "<tag", ">", "value", "-", "<tag", ">" ]
python
train
learningequality/iceqube
src/iceqube/common/utils.py
https://github.com/learningequality/iceqube/blob/97ac9e0f65bfedb0efa9f94638bcb57c7926dea2/src/iceqube/common/utils.py#L134-L146
def main_loop(self, timeout=None): """ Check if self.trigger_event is set. If it is, then run our function. If not, return early. :param timeout: How long to wait for a trigger event. Defaults to 0. :return: """ if self.trigger_event.wait(timeout): try: self.func() except Exception as e: self.logger.warning("Got an exception running {func}: {e}".format(func=self.func, e=str(e))) finally: self.trigger_event.clear()
[ "def", "main_loop", "(", "self", ",", "timeout", "=", "None", ")", ":", "if", "self", ".", "trigger_event", ".", "wait", "(", "timeout", ")", ":", "try", ":", "self", ".", "func", "(", ")", "except", "Exception", "as", "e", ":", "self", ".", "logger", ".", "warning", "(", "\"Got an exception running {func}: {e}\"", ".", "format", "(", "func", "=", "self", ".", "func", ",", "e", "=", "str", "(", "e", ")", ")", ")", "finally", ":", "self", ".", "trigger_event", ".", "clear", "(", ")" ]
Check if self.trigger_event is set. If it is, then run our function. If not, return early. :param timeout: How long to wait for a trigger event. Defaults to 0. :return:
[ "Check", "if", "self", ".", "trigger_event", "is", "set", ".", "If", "it", "is", "then", "run", "our", "function", ".", "If", "not", "return", "early", ".", ":", "param", "timeout", ":", "How", "long", "to", "wait", "for", "a", "trigger", "event", ".", "Defaults", "to", "0", ".", ":", "return", ":" ]
python
train
jhshi/wltrace
wltrace/utils.py
https://github.com/jhshi/wltrace/blob/4c8441162f7cddd47375da2effc52c95b97dc81d/wltrace/utils.py#L23-L43
def calc_padding(fmt, align): """Calculate how many padding bytes needed for ``fmt`` to be aligned to ``align``. Args: fmt (str): :mod:`struct` format. align (int): alignment (2, 4, 8, etc.) Returns: str: padding format (e.g., various number of 'x'). >>> calc_padding('b', 2) 'x' >>> calc_padding('b', 3) 'xx' """ remain = struct.calcsize(fmt) % align if remain == 0: return "" return 'x' * (align - remain)
[ "def", "calc_padding", "(", "fmt", ",", "align", ")", ":", "remain", "=", "struct", ".", "calcsize", "(", "fmt", ")", "%", "align", "if", "remain", "==", "0", ":", "return", "\"\"", "return", "'x'", "*", "(", "align", "-", "remain", ")" ]
Calculate how many padding bytes needed for ``fmt`` to be aligned to ``align``. Args: fmt (str): :mod:`struct` format. align (int): alignment (2, 4, 8, etc.) Returns: str: padding format (e.g., various number of 'x'). >>> calc_padding('b', 2) 'x' >>> calc_padding('b', 3) 'xx'
[ "Calculate", "how", "many", "padding", "bytes", "needed", "for", "fmt", "to", "be", "aligned", "to", "align", "." ]
python
train
twilio/twilio-python
twilio/rest/api/v2010/account/usage/record/all_time.py
https://github.com/twilio/twilio-python/blob/c867895f55dcc29f522e6e8b8868d0d18483132f/twilio/rest/api/v2010/account/usage/record/all_time.py#L185-L194
def get_instance(self, payload): """ Build an instance of AllTimeInstance :param dict payload: Payload response from the API :returns: twilio.rest.api.v2010.account.usage.record.all_time.AllTimeInstance :rtype: twilio.rest.api.v2010.account.usage.record.all_time.AllTimeInstance """ return AllTimeInstance(self._version, payload, account_sid=self._solution['account_sid'], )
[ "def", "get_instance", "(", "self", ",", "payload", ")", ":", "return", "AllTimeInstance", "(", "self", ".", "_version", ",", "payload", ",", "account_sid", "=", "self", ".", "_solution", "[", "'account_sid'", "]", ",", ")" ]
Build an instance of AllTimeInstance :param dict payload: Payload response from the API :returns: twilio.rest.api.v2010.account.usage.record.all_time.AllTimeInstance :rtype: twilio.rest.api.v2010.account.usage.record.all_time.AllTimeInstance
[ "Build", "an", "instance", "of", "AllTimeInstance" ]
python
train
brocade/pynos
pynos/versions/base/yang/brocade_sflow.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/base/yang/brocade_sflow.py#L61-L70
def sflow_polling_interval(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") sflow = ET.SubElement(config, "sflow", xmlns="urn:brocade.com:mgmt:brocade-sflow") polling_interval = ET.SubElement(sflow, "polling-interval") polling_interval.text = kwargs.pop('polling_interval') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "sflow_polling_interval", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "sflow", "=", "ET", ".", "SubElement", "(", "config", ",", "\"sflow\"", ",", "xmlns", "=", "\"urn:brocade.com:mgmt:brocade-sflow\"", ")", "polling_interval", "=", "ET", ".", "SubElement", "(", "sflow", ",", "\"polling-interval\"", ")", "polling_interval", ".", "text", "=", "kwargs", ".", "pop", "(", "'polling_interval'", ")", "callback", "=", "kwargs", ".", "pop", "(", "'callback'", ",", "self", ".", "_callback", ")", "return", "callback", "(", "config", ")" ]
Auto Generated Code
[ "Auto", "Generated", "Code" ]
python
train
bitesofcode/projexui
projexui/wizards/xscaffoldwizard/xscaffoldwizard.py
https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/wizards/xscaffoldwizard/xscaffoldwizard.py#L166-L203
def validatePage(self): """ Validates the page against the scaffold information, setting the values along the way. """ widgets = self.propertyWidgetMap() failed = '' for prop, widget in widgets.items(): val, success = projexui.widgetValue(widget) if success: # ensure we have the required values if not val and not (prop.type == 'bool' and val is False): if prop.default: val = prop.default elif prop.required: msg = '{0} is a required value'.format(prop.label) failed = msg break # ensure the values match the required expression elif prop.regex and not re.match(prop.regex, nativestring(val)): msg = '{0} needs to be in the format {1}'.format(prop.label, prop.regex) failed = msg break prop.value = val else: msg = 'Failed to get a proper value for {0}'.format(prop.label) failed = msg break if failed: QtGui.QMessageBox.warning(None, 'Properties Failed', failed) return False return True
[ "def", "validatePage", "(", "self", ")", ":", "widgets", "=", "self", ".", "propertyWidgetMap", "(", ")", "failed", "=", "''", "for", "prop", ",", "widget", "in", "widgets", ".", "items", "(", ")", ":", "val", ",", "success", "=", "projexui", ".", "widgetValue", "(", "widget", ")", "if", "success", ":", "# ensure we have the required values\r", "if", "not", "val", "and", "not", "(", "prop", ".", "type", "==", "'bool'", "and", "val", "is", "False", ")", ":", "if", "prop", ".", "default", ":", "val", "=", "prop", ".", "default", "elif", "prop", ".", "required", ":", "msg", "=", "'{0} is a required value'", ".", "format", "(", "prop", ".", "label", ")", "failed", "=", "msg", "break", "# ensure the values match the required expression\r", "elif", "prop", ".", "regex", "and", "not", "re", ".", "match", "(", "prop", ".", "regex", ",", "nativestring", "(", "val", ")", ")", ":", "msg", "=", "'{0} needs to be in the format {1}'", ".", "format", "(", "prop", ".", "label", ",", "prop", ".", "regex", ")", "failed", "=", "msg", "break", "prop", ".", "value", "=", "val", "else", ":", "msg", "=", "'Failed to get a proper value for {0}'", ".", "format", "(", "prop", ".", "label", ")", "failed", "=", "msg", "break", "if", "failed", ":", "QtGui", ".", "QMessageBox", ".", "warning", "(", "None", ",", "'Properties Failed'", ",", "failed", ")", "return", "False", "return", "True" ]
Validates the page against the scaffold information, setting the values along the way.
[ "Validates", "the", "page", "against", "the", "scaffold", "information", "setting", "the", "values", "along", "the", "way", "." ]
python
train
CalebBell/fluids
fluids/units.py
https://github.com/CalebBell/fluids/blob/57f556752e039f1d3e5a822f408c184783db2828/fluids/units.py#L116-L134
def check_args_order(func): '''Reads a numpydoc function and compares the Parameters and Other Parameters with the input arguments of the actual function signature. Raises an exception if not correctly defined. >>> check_args_order(fluids.core.Reynolds) ''' argspec = inspect.getargspec(func) parsed_data = parse_numpydoc_variables_units(func) # compare the parsed arguments with those actually defined parsed_units = parsed_data['Parameters']['units'] parsed_parameters = parsed_data['Parameters']['vars'] if 'Other Parameters' in parsed_data: parsed_parameters += parsed_data['Other Parameters']['vars'] parsed_units += parsed_data['Other Parameters']['units'] if argspec.args != parsed_parameters: # pragma: no cover raise Exception('Function %s signature is not the same as the documentation' ' signature = %s; documentation = %s' %(func.__name__, argspec.args, parsed_parameters))
[ "def", "check_args_order", "(", "func", ")", ":", "argspec", "=", "inspect", ".", "getargspec", "(", "func", ")", "parsed_data", "=", "parse_numpydoc_variables_units", "(", "func", ")", "# compare the parsed arguments with those actually defined", "parsed_units", "=", "parsed_data", "[", "'Parameters'", "]", "[", "'units'", "]", "parsed_parameters", "=", "parsed_data", "[", "'Parameters'", "]", "[", "'vars'", "]", "if", "'Other Parameters'", "in", "parsed_data", ":", "parsed_parameters", "+=", "parsed_data", "[", "'Other Parameters'", "]", "[", "'vars'", "]", "parsed_units", "+=", "parsed_data", "[", "'Other Parameters'", "]", "[", "'units'", "]", "if", "argspec", ".", "args", "!=", "parsed_parameters", ":", "# pragma: no cover", "raise", "Exception", "(", "'Function %s signature is not the same as the documentation'", "' signature = %s; documentation = %s'", "%", "(", "func", ".", "__name__", ",", "argspec", ".", "args", ",", "parsed_parameters", ")", ")" ]
Reads a numpydoc function and compares the Parameters and Other Parameters with the input arguments of the actual function signature. Raises an exception if not correctly defined. >>> check_args_order(fluids.core.Reynolds)
[ "Reads", "a", "numpydoc", "function", "and", "compares", "the", "Parameters", "and", "Other", "Parameters", "with", "the", "input", "arguments", "of", "the", "actual", "function", "signature", ".", "Raises", "an", "exception", "if", "not", "correctly", "defined", ".", ">>>", "check_args_order", "(", "fluids", ".", "core", ".", "Reynolds", ")" ]
python
train
brocade/pynos
pynos/versions/ver_6/ver_6_0_1/yang/brocade_ras_ext.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/brocade_ras_ext.py#L66-L79
def show_raslog_output_show_all_raslog_raslog_entries_index(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") show_raslog = ET.Element("show_raslog") config = show_raslog output = ET.SubElement(show_raslog, "output") show_all_raslog = ET.SubElement(output, "show-all-raslog") raslog_entries = ET.SubElement(show_all_raslog, "raslog-entries") index = ET.SubElement(raslog_entries, "index") index.text = kwargs.pop('index') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "show_raslog_output_show_all_raslog_raslog_entries_index", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "show_raslog", "=", "ET", ".", "Element", "(", "\"show_raslog\"", ")", "config", "=", "show_raslog", "output", "=", "ET", ".", "SubElement", "(", "show_raslog", ",", "\"output\"", ")", "show_all_raslog", "=", "ET", ".", "SubElement", "(", "output", ",", "\"show-all-raslog\"", ")", "raslog_entries", "=", "ET", ".", "SubElement", "(", "show_all_raslog", ",", "\"raslog-entries\"", ")", "index", "=", "ET", ".", "SubElement", "(", "raslog_entries", ",", "\"index\"", ")", "index", ".", "text", "=", "kwargs", ".", "pop", "(", "'index'", ")", "callback", "=", "kwargs", ".", "pop", "(", "'callback'", ",", "self", ".", "_callback", ")", "return", "callback", "(", "config", ")" ]
Auto Generated Code
[ "Auto", "Generated", "Code" ]
python
train
ansible/tower-cli
tower_cli/resources/workflow.py
https://github.com/ansible/tower-cli/blob/a2b151fed93c47725018d3034848cb3a1814bed7/tower_cli/resources/workflow.py#L266-L306
def schema(self, wfjt, node_network=None): """ Convert YAML/JSON content into workflow node objects if node_network param is given. If not, print a YAML representation of the node network. =====API DOCS===== Convert YAML/JSON content into workflow node objects if ``node_network`` param is given. If not, print a YAML representation of the node network. :param wfjt: Primary key or name of the workflow job template to run schema against. :type wfjt: str :param node_network: JSON- or YAML-formatted string representing the topology of the workflow job template be updated to. :type node_network: str :returns: The latest topology (possibly after modification) of the workflow job template. :rtype: dict =====API DOCS===== """ existing_network = self._get_schema(wfjt) if not isinstance(existing_network, list): existing_network = [] if node_network is None: if settings.format == 'human': settings.format = 'yaml' return existing_network if hasattr(node_network, 'read'): node_network = node_network.read() node_network = string_to_dict( node_network, allow_kv=False, require_dict=False) if not isinstance(node_network, list): node_network = [] _update_workflow([TreeNode(x, wfjt, include_id=True) for x in existing_network], [TreeNode(x, wfjt) for x in node_network]) if settings.format == 'human': settings.format = 'yaml' return self._get_schema(wfjt)
[ "def", "schema", "(", "self", ",", "wfjt", ",", "node_network", "=", "None", ")", ":", "existing_network", "=", "self", ".", "_get_schema", "(", "wfjt", ")", "if", "not", "isinstance", "(", "existing_network", ",", "list", ")", ":", "existing_network", "=", "[", "]", "if", "node_network", "is", "None", ":", "if", "settings", ".", "format", "==", "'human'", ":", "settings", ".", "format", "=", "'yaml'", "return", "existing_network", "if", "hasattr", "(", "node_network", ",", "'read'", ")", ":", "node_network", "=", "node_network", ".", "read", "(", ")", "node_network", "=", "string_to_dict", "(", "node_network", ",", "allow_kv", "=", "False", ",", "require_dict", "=", "False", ")", "if", "not", "isinstance", "(", "node_network", ",", "list", ")", ":", "node_network", "=", "[", "]", "_update_workflow", "(", "[", "TreeNode", "(", "x", ",", "wfjt", ",", "include_id", "=", "True", ")", "for", "x", "in", "existing_network", "]", ",", "[", "TreeNode", "(", "x", ",", "wfjt", ")", "for", "x", "in", "node_network", "]", ")", "if", "settings", ".", "format", "==", "'human'", ":", "settings", ".", "format", "=", "'yaml'", "return", "self", ".", "_get_schema", "(", "wfjt", ")" ]
Convert YAML/JSON content into workflow node objects if node_network param is given. If not, print a YAML representation of the node network. =====API DOCS===== Convert YAML/JSON content into workflow node objects if ``node_network`` param is given. If not, print a YAML representation of the node network. :param wfjt: Primary key or name of the workflow job template to run schema against. :type wfjt: str :param node_network: JSON- or YAML-formatted string representing the topology of the workflow job template be updated to. :type node_network: str :returns: The latest topology (possibly after modification) of the workflow job template. :rtype: dict =====API DOCS=====
[ "Convert", "YAML", "/", "JSON", "content", "into", "workflow", "node", "objects", "if", "node_network", "param", "is", "given", ".", "If", "not", "print", "a", "YAML", "representation", "of", "the", "node", "network", "." ]
python
valid