text
stringlengths
89
104k
code_tokens
list
avg_line_len
float64
7.91
980
score
float64
0
630
def _find_and_cache_best_function(self, dispatch_type): """Finds the best implementation of this function given a type. This function caches the result, and uses locking for thread safety. Returns: Implementing function, in below order of preference: 1. Explicitly registered implementations (through multimethod.implement) for types that 'dispatch_type' either is or inherits from directly. 2. Explicitly registered implementations accepting an abstract type (interface) in which dispatch_type participates (through abstract_type.register() or the convenience methods). 3. Default behavior of the multimethod function. This will usually raise a NotImplementedError, by convention. Raises: TypeError: If two implementing functions are registered for different abstract types, and 'dispatch_type' participates in both, and no order of preference was specified using prefer_type. """ result = self._dispatch_table.get(dispatch_type) if result: return result # The outer try ensures the lock is always released. with self._write_lock: try: dispatch_mro = dispatch_type.mro() except TypeError: # Not every type has an MRO. dispatch_mro = () best_match = None result_type = None for candidate_type, candidate_func in self.implementations: if not issubclass(dispatch_type, candidate_type): # Skip implementations that are obviously unrelated. continue try: # The candidate implementation may be for a type that's # actually in the MRO, or it may be for an abstract type. match = dispatch_mro.index(candidate_type) except ValueError: # This means we have an implementation for an abstract # type, which ranks below all concrete types. match = None if best_match is None: if result and match is None: # Already have a result, and no order of preference. # This is probably because the type is a member of two # abstract types and we have separate implementations # for those two abstract types. if self._preferred(candidate_type, over=result_type): result = candidate_func result_type = candidate_type elif self._preferred(result_type, over=candidate_type): # No need to update anything. pass else: raise TypeError( "Two candidate implementations found for " "multimethod function %s (dispatch type %s) " "and neither is preferred." % (self.func_name, dispatch_type)) else: result = candidate_func result_type = candidate_type best_match = match if (match or 0) < (best_match or 0): result = candidate_func result_type = candidate_type best_match = match self._dispatch_table[dispatch_type] = result return result
[ "def", "_find_and_cache_best_function", "(", "self", ",", "dispatch_type", ")", ":", "result", "=", "self", ".", "_dispatch_table", ".", "get", "(", "dispatch_type", ")", "if", "result", ":", "return", "result", "# The outer try ensures the lock is always released.", "with", "self", ".", "_write_lock", ":", "try", ":", "dispatch_mro", "=", "dispatch_type", ".", "mro", "(", ")", "except", "TypeError", ":", "# Not every type has an MRO.", "dispatch_mro", "=", "(", ")", "best_match", "=", "None", "result_type", "=", "None", "for", "candidate_type", ",", "candidate_func", "in", "self", ".", "implementations", ":", "if", "not", "issubclass", "(", "dispatch_type", ",", "candidate_type", ")", ":", "# Skip implementations that are obviously unrelated.", "continue", "try", ":", "# The candidate implementation may be for a type that's", "# actually in the MRO, or it may be for an abstract type.", "match", "=", "dispatch_mro", ".", "index", "(", "candidate_type", ")", "except", "ValueError", ":", "# This means we have an implementation for an abstract", "# type, which ranks below all concrete types.", "match", "=", "None", "if", "best_match", "is", "None", ":", "if", "result", "and", "match", "is", "None", ":", "# Already have a result, and no order of preference.", "# This is probably because the type is a member of two", "# abstract types and we have separate implementations", "# for those two abstract types.", "if", "self", ".", "_preferred", "(", "candidate_type", ",", "over", "=", "result_type", ")", ":", "result", "=", "candidate_func", "result_type", "=", "candidate_type", "elif", "self", ".", "_preferred", "(", "result_type", ",", "over", "=", "candidate_type", ")", ":", "# No need to update anything.", "pass", "else", ":", "raise", "TypeError", "(", "\"Two candidate implementations found for \"", "\"multimethod function %s (dispatch type %s) \"", "\"and neither is preferred.\"", "%", "(", "self", ".", "func_name", ",", "dispatch_type", ")", ")", "else", ":", "result", "=", "candidate_func", "result_type", "=", "candidate_type", "best_match", "=", "match", "if", "(", "match", "or", "0", ")", "<", "(", "best_match", "or", "0", ")", ":", "result", "=", "candidate_func", "result_type", "=", "candidate_type", "best_match", "=", "match", "self", ".", "_dispatch_table", "[", "dispatch_type", "]", "=", "result", "return", "result" ]
44.902439
22.195122
def recall(self, mode='all'): """Recalls from the file specified by :attr:`~SR850.filename`. :param mode: Specifies the recall mode. ======= ================================================== Value Description ======= ================================================== 'all' Recalls the active display's data trace, the trace definition and the instrument state. 'state' Recalls the instrument state. ======= ================================================== """ if mode == 'all': self._write('RDAT') elif mode == 'state': self._write('RSET') else: raise ValueError('Invalid recall mode.')
[ "def", "recall", "(", "self", ",", "mode", "=", "'all'", ")", ":", "if", "mode", "==", "'all'", ":", "self", ".", "_write", "(", "'RDAT'", ")", "elif", "mode", "==", "'state'", ":", "self", ".", "_write", "(", "'RSET'", ")", "else", ":", "raise", "ValueError", "(", "'Invalid recall mode.'", ")" ]
38.05
19
def read_tags(filename): """Reads values of "magic tags" defined in the given Python file. :param filename: Python filename to read the tags from :return: Dictionary of tags """ with open(filename) as f: ast_tree = ast.parse(f.read(), filename) res = {} for node in ast.walk(ast_tree): if type(node) is not ast.Assign: continue target = node.targets[0] if type(target) is not ast.Name: continue if not (target.id.startswith('__') and target.id.endswith('__')): continue name = target.id[2:-2] res[name] = ast.literal_eval(node.value) return res
[ "def", "read_tags", "(", "filename", ")", ":", "with", "open", "(", "filename", ")", "as", "f", ":", "ast_tree", "=", "ast", ".", "parse", "(", "f", ".", "read", "(", ")", ",", "filename", ")", "res", "=", "{", "}", "for", "node", "in", "ast", ".", "walk", "(", "ast_tree", ")", ":", "if", "type", "(", "node", ")", "is", "not", "ast", ".", "Assign", ":", "continue", "target", "=", "node", ".", "targets", "[", "0", "]", "if", "type", "(", "target", ")", "is", "not", "ast", ".", "Name", ":", "continue", "if", "not", "(", "target", ".", "id", ".", "startswith", "(", "'__'", ")", "and", "target", ".", "id", ".", "endswith", "(", "'__'", ")", ")", ":", "continue", "name", "=", "target", ".", "id", "[", "2", ":", "-", "2", "]", "res", "[", "name", "]", "=", "ast", ".", "literal_eval", "(", "node", ".", "value", ")", "return", "res" ]
27.083333
18.333333
def _wrap(func, msg): """This is a decorator which can be used to mark functions as deprecated. It will result in a warning being emitted when the function is used.""" @functools.wraps(func) def new_func(*args, **kwargs): warnings.simplefilter('always', DeprecationWarning) # turn off filter warnings.warn(msg, category=DeprecationWarning, stacklevel=2) warnings.simplefilter('default', DeprecationWarning) # reset filter return func(*args, **kwargs) return new_func
[ "def", "_wrap", "(", "func", ",", "msg", ")", ":", "@", "functools", ".", "wraps", "(", "func", ")", "def", "new_func", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "warnings", ".", "simplefilter", "(", "'always'", ",", "DeprecationWarning", ")", "# turn off filter", "warnings", ".", "warn", "(", "msg", ",", "category", "=", "DeprecationWarning", ",", "stacklevel", "=", "2", ")", "warnings", ".", "simplefilter", "(", "'default'", ",", "DeprecationWarning", ")", "# reset filter", "return", "func", "(", "*", "args", ",", "*", "*", "kwargs", ")", "return", "new_func" ]
43.25
19.333333
def update_variable(self, variable, value) -> None: """Assign the given value(s) to the given target or base variable. If the assignment fails, |ChangeItem.update_variable| raises an error like the following: >>> from hydpy.core.examples import prepare_full_example_2 >>> hp, pub, TestIO = prepare_full_example_2() >>> item = SetItem('alpha', 'hland_v1', 'control.alpha', 0) >>> item.collect_variables(pub.selections) >>> item.update_variables() # doctest: +ELLIPSIS Traceback (most recent call last): ... TypeError: When trying to update a target variable of SetItem `alpha` \ with the value(s) `None`, the following error occurred: While trying to set \ the value(s) of variable `alpha` of element `...`, the following error \ occurred: The given value `None` cannot be converted to type `float`. """ try: variable(value) except BaseException: objecttools.augment_excmessage( f'When trying to update a target variable of ' f'{objecttools.classname(self)} `{self.name}` ' f'with the value(s) `{value}`')
[ "def", "update_variable", "(", "self", ",", "variable", ",", "value", ")", "->", "None", ":", "try", ":", "variable", "(", "value", ")", "except", "BaseException", ":", "objecttools", ".", "augment_excmessage", "(", "f'When trying to update a target variable of '", "f'{objecttools.classname(self)} `{self.name}` '", "f'with the value(s) `{value}`'", ")" ]
46.76
20
def bg_compensate(img, sigma, splinepoints, scale): '''Reads file, subtracts background. Returns [compensated image, background].''' from PIL import Image import pylab from matplotlib.image import pil_to_array from centrosome.filter import canny import matplotlib img = Image.open(img) if img.mode=='I;16': # 16-bit image # deal with the endianness explicitly... I'm not sure # why PIL doesn't get this right. imgdata = np.fromstring(img.tostring(),np.uint8) imgdata.shape=(int(imgdata.shape[0]/2),2) imgdata = imgdata.astype(np.uint16) hi,lo = (0,1) if img.tag.prefix == 'MM' else (1,0) imgdata = imgdata[:,hi]*256 + imgdata[:,lo] img_size = list(img.size) img_size.reverse() new_img = imgdata.reshape(img_size) # The magic # for maximum sample value is 281 if 281 in img.tag: img = new_img.astype(np.float32) / img.tag[281][0] elif np.max(new_img) < 4096: img = new_img.astype(np.float32) / 4095. else: img = new_img.astype(np.float32) / 65535. else: img = pil_to_array(img) pylab.subplot(1,3,1).imshow(img, cmap=matplotlib.cm.Greys_r) pylab.show() if len(img.shape)>2: raise ValueError('Image must be grayscale') ## Create mask that will fix problem when image has black areas outside of well edges = canny(img, np.ones(img.shape, bool), 2, .1, .3) ci = np.cumsum(edges, 0) cj = np.cumsum(edges, 1) i,j = np.mgrid[0:img.shape[0], 0:img.shape[1]] mask = ci > 0 mask = mask & (cj > 0) mask[1:,:] &= (ci[0:-1,:] < ci[-1,j[0:-1,:]]) mask[:,1:] &= (cj[:,0:-1] < cj[i[:,0:-1],-1]) import time t0 = time.clock() bg = backgr(img, mask, MODE_AUTO, sigma, splinepoints=splinepoints, scale=scale) print("Executed in %f sec" % (time.clock() - t0)) bg[~mask] = img[~mask] pylab.subplot(1,3,2).imshow(img - bg, cmap=matplotlib.cm.Greys_r) pylab.subplot(1,3,3).imshow(bg, cmap=matplotlib.cm.Greys_r) pylab.show()
[ "def", "bg_compensate", "(", "img", ",", "sigma", ",", "splinepoints", ",", "scale", ")", ":", "from", "PIL", "import", "Image", "import", "pylab", "from", "matplotlib", ".", "image", "import", "pil_to_array", "from", "centrosome", ".", "filter", "import", "canny", "import", "matplotlib", "img", "=", "Image", ".", "open", "(", "img", ")", "if", "img", ".", "mode", "==", "'I;16'", ":", "# 16-bit image", "# deal with the endianness explicitly... I'm not sure", "# why PIL doesn't get this right.", "imgdata", "=", "np", ".", "fromstring", "(", "img", ".", "tostring", "(", ")", ",", "np", ".", "uint8", ")", "imgdata", ".", "shape", "=", "(", "int", "(", "imgdata", ".", "shape", "[", "0", "]", "/", "2", ")", ",", "2", ")", "imgdata", "=", "imgdata", ".", "astype", "(", "np", ".", "uint16", ")", "hi", ",", "lo", "=", "(", "0", ",", "1", ")", "if", "img", ".", "tag", ".", "prefix", "==", "'MM'", "else", "(", "1", ",", "0", ")", "imgdata", "=", "imgdata", "[", ":", ",", "hi", "]", "*", "256", "+", "imgdata", "[", ":", ",", "lo", "]", "img_size", "=", "list", "(", "img", ".", "size", ")", "img_size", ".", "reverse", "(", ")", "new_img", "=", "imgdata", ".", "reshape", "(", "img_size", ")", "# The magic # for maximum sample value is 281", "if", "281", "in", "img", ".", "tag", ":", "img", "=", "new_img", ".", "astype", "(", "np", ".", "float32", ")", "/", "img", ".", "tag", "[", "281", "]", "[", "0", "]", "elif", "np", ".", "max", "(", "new_img", ")", "<", "4096", ":", "img", "=", "new_img", ".", "astype", "(", "np", ".", "float32", ")", "/", "4095.", "else", ":", "img", "=", "new_img", ".", "astype", "(", "np", ".", "float32", ")", "/", "65535.", "else", ":", "img", "=", "pil_to_array", "(", "img", ")", "pylab", ".", "subplot", "(", "1", ",", "3", ",", "1", ")", ".", "imshow", "(", "img", ",", "cmap", "=", "matplotlib", ".", "cm", ".", "Greys_r", ")", "pylab", ".", "show", "(", ")", "if", "len", "(", "img", ".", "shape", ")", ">", "2", ":", "raise", "ValueError", "(", "'Image must be grayscale'", ")", "## Create mask that will fix problem when image has black areas outside of well", "edges", "=", "canny", "(", "img", ",", "np", ".", "ones", "(", "img", ".", "shape", ",", "bool", ")", ",", "2", ",", ".1", ",", ".3", ")", "ci", "=", "np", ".", "cumsum", "(", "edges", ",", "0", ")", "cj", "=", "np", ".", "cumsum", "(", "edges", ",", "1", ")", "i", ",", "j", "=", "np", ".", "mgrid", "[", "0", ":", "img", ".", "shape", "[", "0", "]", ",", "0", ":", "img", ".", "shape", "[", "1", "]", "]", "mask", "=", "ci", ">", "0", "mask", "=", "mask", "&", "(", "cj", ">", "0", ")", "mask", "[", "1", ":", ",", ":", "]", "&=", "(", "ci", "[", "0", ":", "-", "1", ",", ":", "]", "<", "ci", "[", "-", "1", ",", "j", "[", "0", ":", "-", "1", ",", ":", "]", "]", ")", "mask", "[", ":", ",", "1", ":", "]", "&=", "(", "cj", "[", ":", ",", "0", ":", "-", "1", "]", "<", "cj", "[", "i", "[", ":", ",", "0", ":", "-", "1", "]", ",", "-", "1", "]", ")", "import", "time", "t0", "=", "time", ".", "clock", "(", ")", "bg", "=", "backgr", "(", "img", ",", "mask", ",", "MODE_AUTO", ",", "sigma", ",", "splinepoints", "=", "splinepoints", ",", "scale", "=", "scale", ")", "print", "(", "\"Executed in %f sec\"", "%", "(", "time", ".", "clock", "(", ")", "-", "t0", ")", ")", "bg", "[", "~", "mask", "]", "=", "img", "[", "~", "mask", "]", "pylab", ".", "subplot", "(", "1", ",", "3", ",", "2", ")", ".", "imshow", "(", "img", "-", "bg", ",", "cmap", "=", "matplotlib", ".", "cm", ".", "Greys_r", ")", "pylab", ".", "subplot", "(", "1", ",", "3", ",", "3", ")", ".", "imshow", "(", "bg", ",", "cmap", "=", "matplotlib", ".", "cm", ".", "Greys_r", ")", "pylab", ".", "show", "(", ")" ]
35.982456
19.175439
def infer(self, pattern=False): """https://github.com/frictionlessdata/datapackage-py#package """ # Files if pattern: # No base path if not self.__base_path: message = 'Base path is required for pattern infer' raise exceptions.DataPackageException(message) # Add resources options = {'recursive': True} if '**' in pattern else {} for path in glob.glob(os.path.join(self.__base_path, pattern), **options): self.add_resource({'path': os.path.relpath(path, self.__base_path)}) # Resources for index, resource in enumerate(self.resources): descriptor = resource.infer() self.__current_descriptor['resources'][index] = descriptor self.__build() # Profile if self.__next_descriptor['profile'] == config.DEFAULT_DATA_PACKAGE_PROFILE: if self.resources and all(map(lambda resource: resource.tabular, self.resources)): self.__current_descriptor['profile'] = 'tabular-data-package' self.__build() return self.__current_descriptor
[ "def", "infer", "(", "self", ",", "pattern", "=", "False", ")", ":", "# Files", "if", "pattern", ":", "# No base path", "if", "not", "self", ".", "__base_path", ":", "message", "=", "'Base path is required for pattern infer'", "raise", "exceptions", ".", "DataPackageException", "(", "message", ")", "# Add resources", "options", "=", "{", "'recursive'", ":", "True", "}", "if", "'**'", "in", "pattern", "else", "{", "}", "for", "path", "in", "glob", ".", "glob", "(", "os", ".", "path", ".", "join", "(", "self", ".", "__base_path", ",", "pattern", ")", ",", "*", "*", "options", ")", ":", "self", ".", "add_resource", "(", "{", "'path'", ":", "os", ".", "path", ".", "relpath", "(", "path", ",", "self", ".", "__base_path", ")", "}", ")", "# Resources", "for", "index", ",", "resource", "in", "enumerate", "(", "self", ".", "resources", ")", ":", "descriptor", "=", "resource", ".", "infer", "(", ")", "self", ".", "__current_descriptor", "[", "'resources'", "]", "[", "index", "]", "=", "descriptor", "self", ".", "__build", "(", ")", "# Profile", "if", "self", ".", "__next_descriptor", "[", "'profile'", "]", "==", "config", ".", "DEFAULT_DATA_PACKAGE_PROFILE", ":", "if", "self", ".", "resources", "and", "all", "(", "map", "(", "lambda", "resource", ":", "resource", ".", "tabular", ",", "self", ".", "resources", ")", ")", ":", "self", ".", "__current_descriptor", "[", "'profile'", "]", "=", "'tabular-data-package'", "self", ".", "__build", "(", ")", "return", "self", ".", "__current_descriptor" ]
38.533333
24.8
def build_fptree(self, transactions, root_value, root_count, frequent, headers): """ Build the FP tree and return the root node. """ root = FPNode(root_value, root_count, None) for transaction in transactions: sorted_items = [x for x in transaction if x in frequent] sorted_items.sort(key=lambda x: frequent[x], reverse=True) if len(sorted_items) > 0: self.insert_tree(sorted_items, root, headers) return root
[ "def", "build_fptree", "(", "self", ",", "transactions", ",", "root_value", ",", "root_count", ",", "frequent", ",", "headers", ")", ":", "root", "=", "FPNode", "(", "root_value", ",", "root_count", ",", "None", ")", "for", "transaction", "in", "transactions", ":", "sorted_items", "=", "[", "x", "for", "x", "in", "transaction", "if", "x", "in", "frequent", "]", "sorted_items", ".", "sort", "(", "key", "=", "lambda", "x", ":", "frequent", "[", "x", "]", ",", "reverse", "=", "True", ")", "if", "len", "(", "sorted_items", ")", ">", "0", ":", "self", ".", "insert_tree", "(", "sorted_items", ",", "root", ",", "headers", ")", "return", "root" ]
37.071429
16.071429
def _process_genotype_backgrounds(self, limit=None): """ This table provides a mapping of genotypes to background genotypes Note that the background_id is also a genotype_id. Makes these triples: <ZFIN:genotype_id> GENO:has_reference_part <ZFIN:background_id> <ZFIN:background_id> a GENO:genomic_background <ZFIN:background_id> in_taxon <taxon_id> <taxon_id> a class :param limit: :return: """ if self.test_mode: graph = self.testgraph else: graph = self.graph model = Model(graph) LOG.info("Processing genotype backgrounds") line_counter = 0 raw = '/'.join((self.rawdir, self.files['backgrounds']['file'])) geno = Genotype(graph) # Add the taxon as a class taxon_id = self.globaltt['Danio rerio'] model.addClassToGraph(taxon_id, None) with open(raw, 'r', encoding="iso-8859-1") as csvfile: filereader = csv.reader(csvfile, delimiter='\t', quotechar='\"') for row in filereader: line_counter += 1 # Genotype_ID Genotype_Name Background Background_Name (genotype_id, genotype_name, background_id, unused) = row if self.test_mode and genotype_id not in self.test_ids['genotype']: continue genotype_id = 'ZFIN:' + genotype_id.strip() background_id = 'ZFIN:' + background_id.strip() # store this in the hash for later lookup # when building fish genotypes self.genotype_backgrounds[genotype_id] = background_id # add the background into the graph, # in case we haven't seen it before geno.addGenomicBackground(background_id, None) # hang the taxon from the background geno.addTaxon(taxon_id, background_id) # add the intrinsic genotype to the graph # we DO NOT ADD THE LABEL here # as it doesn't include the background geno.addGenotype(genotype_id, None, self.globaltt['intrinsic_genotype']) # Add background to the intrinsic genotype geno.addGenomicBackgroundToGenotype(background_id, genotype_id) if not self.test_mode and limit is not None and line_counter > limit: break LOG.info("Done with genotype backgrounds") return
[ "def", "_process_genotype_backgrounds", "(", "self", ",", "limit", "=", "None", ")", ":", "if", "self", ".", "test_mode", ":", "graph", "=", "self", ".", "testgraph", "else", ":", "graph", "=", "self", ".", "graph", "model", "=", "Model", "(", "graph", ")", "LOG", ".", "info", "(", "\"Processing genotype backgrounds\"", ")", "line_counter", "=", "0", "raw", "=", "'/'", ".", "join", "(", "(", "self", ".", "rawdir", ",", "self", ".", "files", "[", "'backgrounds'", "]", "[", "'file'", "]", ")", ")", "geno", "=", "Genotype", "(", "graph", ")", "# Add the taxon as a class", "taxon_id", "=", "self", ".", "globaltt", "[", "'Danio rerio'", "]", "model", ".", "addClassToGraph", "(", "taxon_id", ",", "None", ")", "with", "open", "(", "raw", ",", "'r'", ",", "encoding", "=", "\"iso-8859-1\"", ")", "as", "csvfile", ":", "filereader", "=", "csv", ".", "reader", "(", "csvfile", ",", "delimiter", "=", "'\\t'", ",", "quotechar", "=", "'\\\"'", ")", "for", "row", "in", "filereader", ":", "line_counter", "+=", "1", "# Genotype_ID \tGenotype_Name \tBackground \tBackground_Name", "(", "genotype_id", ",", "genotype_name", ",", "background_id", ",", "unused", ")", "=", "row", "if", "self", ".", "test_mode", "and", "genotype_id", "not", "in", "self", ".", "test_ids", "[", "'genotype'", "]", ":", "continue", "genotype_id", "=", "'ZFIN:'", "+", "genotype_id", ".", "strip", "(", ")", "background_id", "=", "'ZFIN:'", "+", "background_id", ".", "strip", "(", ")", "# store this in the hash for later lookup", "# when building fish genotypes", "self", ".", "genotype_backgrounds", "[", "genotype_id", "]", "=", "background_id", "# add the background into the graph,", "# in case we haven't seen it before", "geno", ".", "addGenomicBackground", "(", "background_id", ",", "None", ")", "# hang the taxon from the background", "geno", ".", "addTaxon", "(", "taxon_id", ",", "background_id", ")", "# add the intrinsic genotype to the graph", "# we DO NOT ADD THE LABEL here", "# as it doesn't include the background", "geno", ".", "addGenotype", "(", "genotype_id", ",", "None", ",", "self", ".", "globaltt", "[", "'intrinsic_genotype'", "]", ")", "# Add background to the intrinsic genotype", "geno", ".", "addGenomicBackgroundToGenotype", "(", "background_id", ",", "genotype_id", ")", "if", "not", "self", ".", "test_mode", "and", "limit", "is", "not", "None", "and", "line_counter", ">", "limit", ":", "break", "LOG", ".", "info", "(", "\"Done with genotype backgrounds\"", ")", "return" ]
37.69697
22.69697
def tmatrix_cov(C, row=None): r"""Covariance tensor for the non-reversible transition matrix ensemble Normally the covariance tensor cov(p_ij, p_kl) would carry four indices (i,j,k,l). In the non-reversible case rows are independent so that cov(p_ij, p_kl)=0 for i not equal to k. Therefore the function will only return cov(p_ij, p_ik). Parameters ---------- C : (M, M) ndarray Count matrix row : int (optional) If row is given return covariance matrix for specified row only Returns ------- cov : (M, M, M) ndarray Covariance tensor """ if row is None: alpha = C + 1.0 # Dirichlet parameters alpha0 = alpha.sum(axis=1) # Sum of paramters (per row) norm = alpha0 ** 2 * (alpha0 + 1.0) """Non-normalized covariance tensor""" Z = -alpha[:, :, np.newaxis] * alpha[:, np.newaxis, :] """Correct-diagonal""" ind = np.diag_indices(C.shape[0]) Z[:, ind[0], ind[1]] += alpha0[:, np.newaxis] * alpha """Covariance matrix""" cov = Z / norm[:, np.newaxis, np.newaxis] return cov else: alpha = C[row, :] + 1.0 return dirichlet_covariance(alpha)
[ "def", "tmatrix_cov", "(", "C", ",", "row", "=", "None", ")", ":", "if", "row", "is", "None", ":", "alpha", "=", "C", "+", "1.0", "# Dirichlet parameters", "alpha0", "=", "alpha", ".", "sum", "(", "axis", "=", "1", ")", "# Sum of paramters (per row)", "norm", "=", "alpha0", "**", "2", "*", "(", "alpha0", "+", "1.0", ")", "\"\"\"Non-normalized covariance tensor\"\"\"", "Z", "=", "-", "alpha", "[", ":", ",", ":", ",", "np", ".", "newaxis", "]", "*", "alpha", "[", ":", ",", "np", ".", "newaxis", ",", ":", "]", "\"\"\"Correct-diagonal\"\"\"", "ind", "=", "np", ".", "diag_indices", "(", "C", ".", "shape", "[", "0", "]", ")", "Z", "[", ":", ",", "ind", "[", "0", "]", ",", "ind", "[", "1", "]", "]", "+=", "alpha0", "[", ":", ",", "np", ".", "newaxis", "]", "*", "alpha", "\"\"\"Covariance matrix\"\"\"", "cov", "=", "Z", "/", "norm", "[", ":", ",", "np", ".", "newaxis", ",", "np", ".", "newaxis", "]", "return", "cov", "else", ":", "alpha", "=", "C", "[", "row", ",", ":", "]", "+", "1.0", "return", "dirichlet_covariance", "(", "alpha", ")" ]
27.697674
22.325581
def transforms(self): """Return an array of arrays of column transforms. #The return value is an list of list, with each list being a segment of column transformations, and #each segment having one entry per column. """ tr = [] for c in self.columns: tr.append(c.expanded_transform) return six.moves.zip_longest(*tr)
[ "def", "transforms", "(", "self", ")", ":", "tr", "=", "[", "]", "for", "c", "in", "self", ".", "columns", ":", "tr", ".", "append", "(", "c", ".", "expanded_transform", ")", "return", "six", ".", "moves", ".", "zip_longest", "(", "*", "tr", ")" ]
28.923077
22.692308
def Initialize(self): """Open the delegate object.""" if "r" in self.mode: delegate = self.Get(self.Schema.DELEGATE) if delegate: self.delegate = aff4.FACTORY.Open( delegate, mode=self.mode, token=self.token, age=self.age_policy)
[ "def", "Initialize", "(", "self", ")", ":", "if", "\"r\"", "in", "self", ".", "mode", ":", "delegate", "=", "self", ".", "Get", "(", "self", ".", "Schema", ".", "DELEGATE", ")", "if", "delegate", ":", "self", ".", "delegate", "=", "aff4", ".", "FACTORY", ".", "Open", "(", "delegate", ",", "mode", "=", "self", ".", "mode", ",", "token", "=", "self", ".", "token", ",", "age", "=", "self", ".", "age_policy", ")" ]
37.571429
14.571429
def vm_present(name, vmconfig, config=None): ''' Ensure vm is present on the computenode name : string hostname of vm vmconfig : dict options to set for the vm config : dict fine grain control over vm_present .. note:: The following configuration properties can be toggled in the config parameter. - kvm_reboot (true) - reboots of kvm zones if needed for a config update - auto_import (false) - automatic importing of missing images - auto_lx_vars (true) - copy kernel_version and docker:* variables from image - reprovision (false) - reprovision on image_uuid changes - enforce_tags (true) - false = add tags only, true = add, update, and remove tags - enforce_routes (true) - false = add tags only, true = add, update, and remove routes - enforce_internal_metadata (true) - false = add metadata only, true = add, update, and remove metadata - enforce_customer_metadata (true) - false = add metadata only, true = add, update, and remove metadata .. note:: State ID is used as hostname. Hostnames must be unique. .. note:: If hostname is provided in vmconfig this will take president over the State ID. This allows multiple states to be applied to the same vm. .. note:: The following instances should have a unique ID. - nic : mac - filesystem: target - disk : path or diskN for zvols e.g. disk0 will be the first disk added, disk1 the 2nd,... .. versionchanged:: 2019.2.0 Added support for docker image uuids, added auto_lx_vars configuration, documented some missing configuration options. ''' name = name.lower() ret = {'name': name, 'changes': {}, 'result': None, 'comment': ''} # config defaults state_config = config if config else {} config = { 'kvm_reboot': True, 'auto_import': False, 'auto_lx_vars': True, 'reprovision': False, 'enforce_tags': True, 'enforce_routes': True, 'enforce_internal_metadata': True, 'enforce_customer_metadata': True, } config.update(state_config) log.debug('smartos.vm_present::%s::config - %s', name, config) # map special vmconfig parameters # collections have set/remove handlers # instances have add/update/remove handlers and a unique id vmconfig_type = { 'collection': [ 'tags', 'customer_metadata', 'internal_metadata', 'routes' ], 'instance': { 'nics': 'mac', 'disks': 'path', 'filesystems': 'target' }, 'create_only': [ 'filesystems' ] } vmconfig_docker_keep = [ 'docker:id', 'docker:restartcount', ] vmconfig_docker_array = [ 'docker:env', 'docker:cmd', 'docker:entrypoint', ] # parse vmconfig vmconfig = _parse_vmconfig(vmconfig, vmconfig_type['instance']) log.debug('smartos.vm_present::%s::vmconfig - %s', name, vmconfig) # set hostname if needed if 'hostname' not in vmconfig: vmconfig['hostname'] = name # prepare image_uuid if 'image_uuid' in vmconfig: # NOTE: lookup uuid from docker uuid (normal uuid's are passed throuhg unmodified) # we must do this again if we end up importing a missing image later! docker_uuid = __salt__['imgadm.docker_to_uuid'](vmconfig['image_uuid']) vmconfig['image_uuid'] = docker_uuid if docker_uuid else vmconfig['image_uuid'] # NOTE: import image (if missing and allowed) if vmconfig['image_uuid'] not in __salt__['imgadm.list'](): if config['auto_import']: if not __opts__['test']: res = __salt__['imgadm.import'](vmconfig['image_uuid']) vmconfig['image_uuid'] = __salt__['imgadm.docker_to_uuid'](vmconfig['image_uuid']) if vmconfig['image_uuid'] not in res: ret['result'] = False ret['comment'] = 'failed to import image {0}'.format(vmconfig['image_uuid']) else: ret['result'] = False ret['comment'] = 'image {0} not installed'.format(vmconfig['image_uuid']) # prepare disk.*.image_uuid for disk in vmconfig['disks'] if 'disks' in vmconfig else []: if 'image_uuid' in disk and disk['image_uuid'] not in __salt__['imgadm.list'](): if config['auto_import']: if not __opts__['test']: res = __salt__['imgadm.import'](disk['image_uuid']) if disk['image_uuid'] not in res: ret['result'] = False ret['comment'] = 'failed to import image {0}'.format(disk['image_uuid']) else: ret['result'] = False ret['comment'] = 'image {0} not installed'.format(disk['image_uuid']) # docker json-array handling if 'internal_metadata' in vmconfig: for var in vmconfig_docker_array: if var not in vmconfig['internal_metadata']: continue if isinstance(vmconfig['internal_metadata'][var], list): vmconfig['internal_metadata'][var] = json.dumps( vmconfig['internal_metadata'][var] ) # copy lx variables if vmconfig['brand'] == 'lx' and config['auto_lx_vars']: # NOTE: we can only copy the lx vars after the image has bene imported vmconfig = _copy_lx_vars(vmconfig) # quick abort if things look wrong # NOTE: use explicit check for false, otherwise None also matches! if ret['result'] is False: return ret # check if vm exists if vmconfig['hostname'] in __salt__['vmadm.list'](order='hostname'): # update vm ret['result'] = True # expand vmconfig vmconfig = { 'state': vmconfig, 'current': __salt__['vmadm.get'](vmconfig['hostname'], key='hostname'), 'changed': {}, 'reprovision_uuid': None } # prepare reprovision if 'image_uuid' in vmconfig['state']: vmconfig['reprovision_uuid'] = vmconfig['state']['image_uuid'] vmconfig['state']['image_uuid'] = vmconfig['current']['image_uuid'] # disks need some special care if 'disks' in vmconfig['state']: new_disks = [] for disk in vmconfig['state']['disks']: path = False if 'disks' in vmconfig['current']: for cdisk in vmconfig['current']['disks']: if cdisk['path'].endswith(disk['path']): path = cdisk['path'] break if not path: del disk['path'] else: disk['path'] = path new_disks.append(disk) vmconfig['state']['disks'] = new_disks # process properties for prop in vmconfig['state']: # skip special vmconfig_types if prop in vmconfig_type['instance'] or \ prop in vmconfig_type['collection'] or \ prop in vmconfig_type['create_only']: continue # skip unchanged properties if prop in vmconfig['current']: if isinstance(vmconfig['current'][prop], (list)) or isinstance(vmconfig['current'][prop], (dict)): if vmconfig['current'][prop] == vmconfig['state'][prop]: continue else: if "{0}".format(vmconfig['current'][prop]) == "{0}".format(vmconfig['state'][prop]): continue # add property to changeset vmconfig['changed'][prop] = vmconfig['state'][prop] # process collections for collection in vmconfig_type['collection']: # skip create only collections if collection in vmconfig_type['create_only']: continue # enforcement enforce = config['enforce_{0}'.format(collection)] log.debug('smartos.vm_present::enforce_%s = %s', collection, enforce) # dockerinit handling if collection == 'internal_metadata' and vmconfig['state'].get('docker', False): if 'internal_metadata' not in vmconfig['state']: vmconfig['state']['internal_metadata'] = {} # preserve some docker specific metadata (added and needed by dockerinit) for var in vmconfig_docker_keep: val = vmconfig['current'].get(collection, {}).get(var, None) if val is not None: vmconfig['state']['internal_metadata'][var] = val # process add and update for collection if collection in vmconfig['state'] and vmconfig['state'][collection] is not None: for prop in vmconfig['state'][collection]: # skip unchanged properties if prop in vmconfig['current'][collection] and \ vmconfig['current'][collection][prop] == vmconfig['state'][collection][prop]: continue # skip update if not enforcing if not enforce and prop in vmconfig['current'][collection]: continue # create set_ dict if 'set_{0}'.format(collection) not in vmconfig['changed']: vmconfig['changed']['set_{0}'.format(collection)] = {} # add property to changeset vmconfig['changed']['set_{0}'.format(collection)][prop] = vmconfig['state'][collection][prop] # process remove for collection if enforce and collection in vmconfig['current'] and vmconfig['current'][collection] is not None: for prop in vmconfig['current'][collection]: # skip if exists in state if collection in vmconfig['state'] and vmconfig['state'][collection] is not None: if prop in vmconfig['state'][collection]: continue # create remove_ array if 'remove_{0}'.format(collection) not in vmconfig['changed']: vmconfig['changed']['remove_{0}'.format(collection)] = [] # remove property vmconfig['changed']['remove_{0}'.format(collection)].append(prop) # process instances for instance in vmconfig_type['instance']: # skip create only instances if instance in vmconfig_type['create_only']: continue # add or update instances if instance in vmconfig['state'] and vmconfig['state'][instance] is not None: for state_cfg in vmconfig['state'][instance]: add_instance = True # find instance with matching ids for current_cfg in vmconfig['current'][instance]: if vmconfig_type['instance'][instance] not in state_cfg: continue if state_cfg[vmconfig_type['instance'][instance]] == current_cfg[vmconfig_type['instance'][instance]]: # ids have matched, disable add instance add_instance = False changed = _get_instance_changes(current_cfg, state_cfg) update_cfg = {} # handle changes for prop in changed: update_cfg[prop] = state_cfg[prop] # handle new properties for prop in state_cfg: # skip empty props like ips, options,.. if isinstance(state_cfg[prop], (list)) and not state_cfg[prop]: continue if prop not in current_cfg: update_cfg[prop] = state_cfg[prop] # update instance if update_cfg: # create update_ array if 'update_{0}'.format(instance) not in vmconfig['changed']: vmconfig['changed']['update_{0}'.format(instance)] = [] update_cfg[vmconfig_type['instance'][instance]] = state_cfg[vmconfig_type['instance'][instance]] vmconfig['changed']['update_{0}'.format(instance)].append(update_cfg) if add_instance: # create add_ array if 'add_{0}'.format(instance) not in vmconfig['changed']: vmconfig['changed']['add_{0}'.format(instance)] = [] # add instance vmconfig['changed']['add_{0}'.format(instance)].append(state_cfg) # remove instances if instance in vmconfig['current'] and vmconfig['current'][instance] is not None: for current_cfg in vmconfig['current'][instance]: remove_instance = True # find instance with matching ids if instance in vmconfig['state'] and vmconfig['state'][instance] is not None: for state_cfg in vmconfig['state'][instance]: if vmconfig_type['instance'][instance] not in state_cfg: continue if state_cfg[vmconfig_type['instance'][instance]] == current_cfg[vmconfig_type['instance'][instance]]: # keep instance if matched remove_instance = False if remove_instance: # create remove_ array if 'remove_{0}'.format(instance) not in vmconfig['changed']: vmconfig['changed']['remove_{0}'.format(instance)] = [] # remove instance vmconfig['changed']['remove_{0}'.format(instance)].append( current_cfg[vmconfig_type['instance'][instance]] ) # update vm if we have pending changes kvm_needs_start = False if not __opts__['test'] and vmconfig['changed']: # stop kvm if disk updates and kvm_reboot if vmconfig['current']['brand'] == 'kvm' and config['kvm_reboot']: if 'add_disks' in vmconfig['changed'] or \ 'update_disks' in vmconfig['changed'] or \ 'remove_disks' in vmconfig['changed']: if vmconfig['state']['hostname'] in __salt__['vmadm.list'](order='hostname', search='state=running'): kvm_needs_start = True __salt__['vmadm.stop'](vm=vmconfig['state']['hostname'], key='hostname') # do update rret = __salt__['vmadm.update'](vm=vmconfig['state']['hostname'], key='hostname', **vmconfig['changed']) if not isinstance(rret, (bool)) and 'Error' in rret: ret['result'] = False ret['comment'] = "{0}".format(rret['Error']) else: ret['result'] = True ret['changes'][vmconfig['state']['hostname']] = vmconfig['changed'] if ret['result']: if __opts__['test']: ret['changes'][vmconfig['state']['hostname']] = vmconfig['changed'] if vmconfig['state']['hostname'] in ret['changes'] and ret['changes'][vmconfig['state']['hostname']]: ret['comment'] = 'vm {0} updated'.format(vmconfig['state']['hostname']) if config['kvm_reboot'] and vmconfig['current']['brand'] == 'kvm' and not __opts__['test']: if vmconfig['state']['hostname'] in __salt__['vmadm.list'](order='hostname', search='state=running'): __salt__['vmadm.reboot'](vm=vmconfig['state']['hostname'], key='hostname') if kvm_needs_start: __salt__['vmadm.start'](vm=vmconfig['state']['hostname'], key='hostname') else: ret['changes'] = {} ret['comment'] = 'vm {0} is up to date'.format(vmconfig['state']['hostname']) # reprovision (if required and allowed) if 'image_uuid' in vmconfig['current'] and vmconfig['reprovision_uuid'] != vmconfig['current']['image_uuid']: if config['reprovision']: rret = __salt__['vmadm.reprovision']( vm=vmconfig['state']['hostname'], key='hostname', image=vmconfig['reprovision_uuid'] ) if not isinstance(rret, (bool)) and 'Error' in rret: ret['result'] = False ret['comment'] = 'vm {0} updated, reprovision failed'.format( vmconfig['state']['hostname'] ) else: ret['comment'] = 'vm {0} updated and reprovisioned'.format(vmconfig['state']['hostname']) if vmconfig['state']['hostname'] not in ret['changes']: ret['changes'][vmconfig['state']['hostname']] = {} ret['changes'][vmconfig['state']['hostname']]['image_uuid'] = vmconfig['reprovision_uuid'] else: log.warning('smartos.vm_present::%s::reprovision - ' 'image_uuid in state does not match current, ' 'reprovision not allowed', name) else: ret['comment'] = 'vm {0} failed to be updated'.format(vmconfig['state']['hostname']) if not isinstance(rret, (bool)) and 'Error' in rret: ret['comment'] = "{0}".format(rret['Error']) else: # check required image installed ret['result'] = True # disks need some special care if 'disks' in vmconfig: new_disks = [] for disk in vmconfig['disks']: if 'path' in disk: del disk['path'] new_disks.append(disk) vmconfig['disks'] = new_disks # create vm if ret['result']: uuid = __salt__['vmadm.create'](**vmconfig) if not __opts__['test'] else True if not isinstance(uuid, (bool)) and 'Error' in uuid: ret['result'] = False ret['comment'] = "{0}".format(uuid['Error']) else: ret['result'] = True ret['changes'][vmconfig['hostname']] = vmconfig ret['comment'] = 'vm {0} created'.format(vmconfig['hostname']) return ret
[ "def", "vm_present", "(", "name", ",", "vmconfig", ",", "config", "=", "None", ")", ":", "name", "=", "name", ".", "lower", "(", ")", "ret", "=", "{", "'name'", ":", "name", ",", "'changes'", ":", "{", "}", ",", "'result'", ":", "None", ",", "'comment'", ":", "''", "}", "# config defaults", "state_config", "=", "config", "if", "config", "else", "{", "}", "config", "=", "{", "'kvm_reboot'", ":", "True", ",", "'auto_import'", ":", "False", ",", "'auto_lx_vars'", ":", "True", ",", "'reprovision'", ":", "False", ",", "'enforce_tags'", ":", "True", ",", "'enforce_routes'", ":", "True", ",", "'enforce_internal_metadata'", ":", "True", ",", "'enforce_customer_metadata'", ":", "True", ",", "}", "config", ".", "update", "(", "state_config", ")", "log", ".", "debug", "(", "'smartos.vm_present::%s::config - %s'", ",", "name", ",", "config", ")", "# map special vmconfig parameters", "# collections have set/remove handlers", "# instances have add/update/remove handlers and a unique id", "vmconfig_type", "=", "{", "'collection'", ":", "[", "'tags'", ",", "'customer_metadata'", ",", "'internal_metadata'", ",", "'routes'", "]", ",", "'instance'", ":", "{", "'nics'", ":", "'mac'", ",", "'disks'", ":", "'path'", ",", "'filesystems'", ":", "'target'", "}", ",", "'create_only'", ":", "[", "'filesystems'", "]", "}", "vmconfig_docker_keep", "=", "[", "'docker:id'", ",", "'docker:restartcount'", ",", "]", "vmconfig_docker_array", "=", "[", "'docker:env'", ",", "'docker:cmd'", ",", "'docker:entrypoint'", ",", "]", "# parse vmconfig", "vmconfig", "=", "_parse_vmconfig", "(", "vmconfig", ",", "vmconfig_type", "[", "'instance'", "]", ")", "log", ".", "debug", "(", "'smartos.vm_present::%s::vmconfig - %s'", ",", "name", ",", "vmconfig", ")", "# set hostname if needed", "if", "'hostname'", "not", "in", "vmconfig", ":", "vmconfig", "[", "'hostname'", "]", "=", "name", "# prepare image_uuid", "if", "'image_uuid'", "in", "vmconfig", ":", "# NOTE: lookup uuid from docker uuid (normal uuid's are passed throuhg unmodified)", "# we must do this again if we end up importing a missing image later!", "docker_uuid", "=", "__salt__", "[", "'imgadm.docker_to_uuid'", "]", "(", "vmconfig", "[", "'image_uuid'", "]", ")", "vmconfig", "[", "'image_uuid'", "]", "=", "docker_uuid", "if", "docker_uuid", "else", "vmconfig", "[", "'image_uuid'", "]", "# NOTE: import image (if missing and allowed)", "if", "vmconfig", "[", "'image_uuid'", "]", "not", "in", "__salt__", "[", "'imgadm.list'", "]", "(", ")", ":", "if", "config", "[", "'auto_import'", "]", ":", "if", "not", "__opts__", "[", "'test'", "]", ":", "res", "=", "__salt__", "[", "'imgadm.import'", "]", "(", "vmconfig", "[", "'image_uuid'", "]", ")", "vmconfig", "[", "'image_uuid'", "]", "=", "__salt__", "[", "'imgadm.docker_to_uuid'", "]", "(", "vmconfig", "[", "'image_uuid'", "]", ")", "if", "vmconfig", "[", "'image_uuid'", "]", "not", "in", "res", ":", "ret", "[", "'result'", "]", "=", "False", "ret", "[", "'comment'", "]", "=", "'failed to import image {0}'", ".", "format", "(", "vmconfig", "[", "'image_uuid'", "]", ")", "else", ":", "ret", "[", "'result'", "]", "=", "False", "ret", "[", "'comment'", "]", "=", "'image {0} not installed'", ".", "format", "(", "vmconfig", "[", "'image_uuid'", "]", ")", "# prepare disk.*.image_uuid", "for", "disk", "in", "vmconfig", "[", "'disks'", "]", "if", "'disks'", "in", "vmconfig", "else", "[", "]", ":", "if", "'image_uuid'", "in", "disk", "and", "disk", "[", "'image_uuid'", "]", "not", "in", "__salt__", "[", "'imgadm.list'", "]", "(", ")", ":", "if", "config", "[", "'auto_import'", "]", ":", "if", "not", "__opts__", "[", "'test'", "]", ":", "res", "=", "__salt__", "[", "'imgadm.import'", "]", "(", "disk", "[", "'image_uuid'", "]", ")", "if", "disk", "[", "'image_uuid'", "]", "not", "in", "res", ":", "ret", "[", "'result'", "]", "=", "False", "ret", "[", "'comment'", "]", "=", "'failed to import image {0}'", ".", "format", "(", "disk", "[", "'image_uuid'", "]", ")", "else", ":", "ret", "[", "'result'", "]", "=", "False", "ret", "[", "'comment'", "]", "=", "'image {0} not installed'", ".", "format", "(", "disk", "[", "'image_uuid'", "]", ")", "# docker json-array handling", "if", "'internal_metadata'", "in", "vmconfig", ":", "for", "var", "in", "vmconfig_docker_array", ":", "if", "var", "not", "in", "vmconfig", "[", "'internal_metadata'", "]", ":", "continue", "if", "isinstance", "(", "vmconfig", "[", "'internal_metadata'", "]", "[", "var", "]", ",", "list", ")", ":", "vmconfig", "[", "'internal_metadata'", "]", "[", "var", "]", "=", "json", ".", "dumps", "(", "vmconfig", "[", "'internal_metadata'", "]", "[", "var", "]", ")", "# copy lx variables", "if", "vmconfig", "[", "'brand'", "]", "==", "'lx'", "and", "config", "[", "'auto_lx_vars'", "]", ":", "# NOTE: we can only copy the lx vars after the image has bene imported", "vmconfig", "=", "_copy_lx_vars", "(", "vmconfig", ")", "# quick abort if things look wrong", "# NOTE: use explicit check for false, otherwise None also matches!", "if", "ret", "[", "'result'", "]", "is", "False", ":", "return", "ret", "# check if vm exists", "if", "vmconfig", "[", "'hostname'", "]", "in", "__salt__", "[", "'vmadm.list'", "]", "(", "order", "=", "'hostname'", ")", ":", "# update vm", "ret", "[", "'result'", "]", "=", "True", "# expand vmconfig", "vmconfig", "=", "{", "'state'", ":", "vmconfig", ",", "'current'", ":", "__salt__", "[", "'vmadm.get'", "]", "(", "vmconfig", "[", "'hostname'", "]", ",", "key", "=", "'hostname'", ")", ",", "'changed'", ":", "{", "}", ",", "'reprovision_uuid'", ":", "None", "}", "# prepare reprovision", "if", "'image_uuid'", "in", "vmconfig", "[", "'state'", "]", ":", "vmconfig", "[", "'reprovision_uuid'", "]", "=", "vmconfig", "[", "'state'", "]", "[", "'image_uuid'", "]", "vmconfig", "[", "'state'", "]", "[", "'image_uuid'", "]", "=", "vmconfig", "[", "'current'", "]", "[", "'image_uuid'", "]", "# disks need some special care", "if", "'disks'", "in", "vmconfig", "[", "'state'", "]", ":", "new_disks", "=", "[", "]", "for", "disk", "in", "vmconfig", "[", "'state'", "]", "[", "'disks'", "]", ":", "path", "=", "False", "if", "'disks'", "in", "vmconfig", "[", "'current'", "]", ":", "for", "cdisk", "in", "vmconfig", "[", "'current'", "]", "[", "'disks'", "]", ":", "if", "cdisk", "[", "'path'", "]", ".", "endswith", "(", "disk", "[", "'path'", "]", ")", ":", "path", "=", "cdisk", "[", "'path'", "]", "break", "if", "not", "path", ":", "del", "disk", "[", "'path'", "]", "else", ":", "disk", "[", "'path'", "]", "=", "path", "new_disks", ".", "append", "(", "disk", ")", "vmconfig", "[", "'state'", "]", "[", "'disks'", "]", "=", "new_disks", "# process properties", "for", "prop", "in", "vmconfig", "[", "'state'", "]", ":", "# skip special vmconfig_types", "if", "prop", "in", "vmconfig_type", "[", "'instance'", "]", "or", "prop", "in", "vmconfig_type", "[", "'collection'", "]", "or", "prop", "in", "vmconfig_type", "[", "'create_only'", "]", ":", "continue", "# skip unchanged properties", "if", "prop", "in", "vmconfig", "[", "'current'", "]", ":", "if", "isinstance", "(", "vmconfig", "[", "'current'", "]", "[", "prop", "]", ",", "(", "list", ")", ")", "or", "isinstance", "(", "vmconfig", "[", "'current'", "]", "[", "prop", "]", ",", "(", "dict", ")", ")", ":", "if", "vmconfig", "[", "'current'", "]", "[", "prop", "]", "==", "vmconfig", "[", "'state'", "]", "[", "prop", "]", ":", "continue", "else", ":", "if", "\"{0}\"", ".", "format", "(", "vmconfig", "[", "'current'", "]", "[", "prop", "]", ")", "==", "\"{0}\"", ".", "format", "(", "vmconfig", "[", "'state'", "]", "[", "prop", "]", ")", ":", "continue", "# add property to changeset", "vmconfig", "[", "'changed'", "]", "[", "prop", "]", "=", "vmconfig", "[", "'state'", "]", "[", "prop", "]", "# process collections", "for", "collection", "in", "vmconfig_type", "[", "'collection'", "]", ":", "# skip create only collections", "if", "collection", "in", "vmconfig_type", "[", "'create_only'", "]", ":", "continue", "# enforcement", "enforce", "=", "config", "[", "'enforce_{0}'", ".", "format", "(", "collection", ")", "]", "log", ".", "debug", "(", "'smartos.vm_present::enforce_%s = %s'", ",", "collection", ",", "enforce", ")", "# dockerinit handling", "if", "collection", "==", "'internal_metadata'", "and", "vmconfig", "[", "'state'", "]", ".", "get", "(", "'docker'", ",", "False", ")", ":", "if", "'internal_metadata'", "not", "in", "vmconfig", "[", "'state'", "]", ":", "vmconfig", "[", "'state'", "]", "[", "'internal_metadata'", "]", "=", "{", "}", "# preserve some docker specific metadata (added and needed by dockerinit)", "for", "var", "in", "vmconfig_docker_keep", ":", "val", "=", "vmconfig", "[", "'current'", "]", ".", "get", "(", "collection", ",", "{", "}", ")", ".", "get", "(", "var", ",", "None", ")", "if", "val", "is", "not", "None", ":", "vmconfig", "[", "'state'", "]", "[", "'internal_metadata'", "]", "[", "var", "]", "=", "val", "# process add and update for collection", "if", "collection", "in", "vmconfig", "[", "'state'", "]", "and", "vmconfig", "[", "'state'", "]", "[", "collection", "]", "is", "not", "None", ":", "for", "prop", "in", "vmconfig", "[", "'state'", "]", "[", "collection", "]", ":", "# skip unchanged properties", "if", "prop", "in", "vmconfig", "[", "'current'", "]", "[", "collection", "]", "and", "vmconfig", "[", "'current'", "]", "[", "collection", "]", "[", "prop", "]", "==", "vmconfig", "[", "'state'", "]", "[", "collection", "]", "[", "prop", "]", ":", "continue", "# skip update if not enforcing", "if", "not", "enforce", "and", "prop", "in", "vmconfig", "[", "'current'", "]", "[", "collection", "]", ":", "continue", "# create set_ dict", "if", "'set_{0}'", ".", "format", "(", "collection", ")", "not", "in", "vmconfig", "[", "'changed'", "]", ":", "vmconfig", "[", "'changed'", "]", "[", "'set_{0}'", ".", "format", "(", "collection", ")", "]", "=", "{", "}", "# add property to changeset", "vmconfig", "[", "'changed'", "]", "[", "'set_{0}'", ".", "format", "(", "collection", ")", "]", "[", "prop", "]", "=", "vmconfig", "[", "'state'", "]", "[", "collection", "]", "[", "prop", "]", "# process remove for collection", "if", "enforce", "and", "collection", "in", "vmconfig", "[", "'current'", "]", "and", "vmconfig", "[", "'current'", "]", "[", "collection", "]", "is", "not", "None", ":", "for", "prop", "in", "vmconfig", "[", "'current'", "]", "[", "collection", "]", ":", "# skip if exists in state", "if", "collection", "in", "vmconfig", "[", "'state'", "]", "and", "vmconfig", "[", "'state'", "]", "[", "collection", "]", "is", "not", "None", ":", "if", "prop", "in", "vmconfig", "[", "'state'", "]", "[", "collection", "]", ":", "continue", "# create remove_ array", "if", "'remove_{0}'", ".", "format", "(", "collection", ")", "not", "in", "vmconfig", "[", "'changed'", "]", ":", "vmconfig", "[", "'changed'", "]", "[", "'remove_{0}'", ".", "format", "(", "collection", ")", "]", "=", "[", "]", "# remove property", "vmconfig", "[", "'changed'", "]", "[", "'remove_{0}'", ".", "format", "(", "collection", ")", "]", ".", "append", "(", "prop", ")", "# process instances", "for", "instance", "in", "vmconfig_type", "[", "'instance'", "]", ":", "# skip create only instances", "if", "instance", "in", "vmconfig_type", "[", "'create_only'", "]", ":", "continue", "# add or update instances", "if", "instance", "in", "vmconfig", "[", "'state'", "]", "and", "vmconfig", "[", "'state'", "]", "[", "instance", "]", "is", "not", "None", ":", "for", "state_cfg", "in", "vmconfig", "[", "'state'", "]", "[", "instance", "]", ":", "add_instance", "=", "True", "# find instance with matching ids", "for", "current_cfg", "in", "vmconfig", "[", "'current'", "]", "[", "instance", "]", ":", "if", "vmconfig_type", "[", "'instance'", "]", "[", "instance", "]", "not", "in", "state_cfg", ":", "continue", "if", "state_cfg", "[", "vmconfig_type", "[", "'instance'", "]", "[", "instance", "]", "]", "==", "current_cfg", "[", "vmconfig_type", "[", "'instance'", "]", "[", "instance", "]", "]", ":", "# ids have matched, disable add instance", "add_instance", "=", "False", "changed", "=", "_get_instance_changes", "(", "current_cfg", ",", "state_cfg", ")", "update_cfg", "=", "{", "}", "# handle changes", "for", "prop", "in", "changed", ":", "update_cfg", "[", "prop", "]", "=", "state_cfg", "[", "prop", "]", "# handle new properties", "for", "prop", "in", "state_cfg", ":", "# skip empty props like ips, options,..", "if", "isinstance", "(", "state_cfg", "[", "prop", "]", ",", "(", "list", ")", ")", "and", "not", "state_cfg", "[", "prop", "]", ":", "continue", "if", "prop", "not", "in", "current_cfg", ":", "update_cfg", "[", "prop", "]", "=", "state_cfg", "[", "prop", "]", "# update instance", "if", "update_cfg", ":", "# create update_ array", "if", "'update_{0}'", ".", "format", "(", "instance", ")", "not", "in", "vmconfig", "[", "'changed'", "]", ":", "vmconfig", "[", "'changed'", "]", "[", "'update_{0}'", ".", "format", "(", "instance", ")", "]", "=", "[", "]", "update_cfg", "[", "vmconfig_type", "[", "'instance'", "]", "[", "instance", "]", "]", "=", "state_cfg", "[", "vmconfig_type", "[", "'instance'", "]", "[", "instance", "]", "]", "vmconfig", "[", "'changed'", "]", "[", "'update_{0}'", ".", "format", "(", "instance", ")", "]", ".", "append", "(", "update_cfg", ")", "if", "add_instance", ":", "# create add_ array", "if", "'add_{0}'", ".", "format", "(", "instance", ")", "not", "in", "vmconfig", "[", "'changed'", "]", ":", "vmconfig", "[", "'changed'", "]", "[", "'add_{0}'", ".", "format", "(", "instance", ")", "]", "=", "[", "]", "# add instance", "vmconfig", "[", "'changed'", "]", "[", "'add_{0}'", ".", "format", "(", "instance", ")", "]", ".", "append", "(", "state_cfg", ")", "# remove instances", "if", "instance", "in", "vmconfig", "[", "'current'", "]", "and", "vmconfig", "[", "'current'", "]", "[", "instance", "]", "is", "not", "None", ":", "for", "current_cfg", "in", "vmconfig", "[", "'current'", "]", "[", "instance", "]", ":", "remove_instance", "=", "True", "# find instance with matching ids", "if", "instance", "in", "vmconfig", "[", "'state'", "]", "and", "vmconfig", "[", "'state'", "]", "[", "instance", "]", "is", "not", "None", ":", "for", "state_cfg", "in", "vmconfig", "[", "'state'", "]", "[", "instance", "]", ":", "if", "vmconfig_type", "[", "'instance'", "]", "[", "instance", "]", "not", "in", "state_cfg", ":", "continue", "if", "state_cfg", "[", "vmconfig_type", "[", "'instance'", "]", "[", "instance", "]", "]", "==", "current_cfg", "[", "vmconfig_type", "[", "'instance'", "]", "[", "instance", "]", "]", ":", "# keep instance if matched", "remove_instance", "=", "False", "if", "remove_instance", ":", "# create remove_ array", "if", "'remove_{0}'", ".", "format", "(", "instance", ")", "not", "in", "vmconfig", "[", "'changed'", "]", ":", "vmconfig", "[", "'changed'", "]", "[", "'remove_{0}'", ".", "format", "(", "instance", ")", "]", "=", "[", "]", "# remove instance", "vmconfig", "[", "'changed'", "]", "[", "'remove_{0}'", ".", "format", "(", "instance", ")", "]", ".", "append", "(", "current_cfg", "[", "vmconfig_type", "[", "'instance'", "]", "[", "instance", "]", "]", ")", "# update vm if we have pending changes", "kvm_needs_start", "=", "False", "if", "not", "__opts__", "[", "'test'", "]", "and", "vmconfig", "[", "'changed'", "]", ":", "# stop kvm if disk updates and kvm_reboot", "if", "vmconfig", "[", "'current'", "]", "[", "'brand'", "]", "==", "'kvm'", "and", "config", "[", "'kvm_reboot'", "]", ":", "if", "'add_disks'", "in", "vmconfig", "[", "'changed'", "]", "or", "'update_disks'", "in", "vmconfig", "[", "'changed'", "]", "or", "'remove_disks'", "in", "vmconfig", "[", "'changed'", "]", ":", "if", "vmconfig", "[", "'state'", "]", "[", "'hostname'", "]", "in", "__salt__", "[", "'vmadm.list'", "]", "(", "order", "=", "'hostname'", ",", "search", "=", "'state=running'", ")", ":", "kvm_needs_start", "=", "True", "__salt__", "[", "'vmadm.stop'", "]", "(", "vm", "=", "vmconfig", "[", "'state'", "]", "[", "'hostname'", "]", ",", "key", "=", "'hostname'", ")", "# do update", "rret", "=", "__salt__", "[", "'vmadm.update'", "]", "(", "vm", "=", "vmconfig", "[", "'state'", "]", "[", "'hostname'", "]", ",", "key", "=", "'hostname'", ",", "*", "*", "vmconfig", "[", "'changed'", "]", ")", "if", "not", "isinstance", "(", "rret", ",", "(", "bool", ")", ")", "and", "'Error'", "in", "rret", ":", "ret", "[", "'result'", "]", "=", "False", "ret", "[", "'comment'", "]", "=", "\"{0}\"", ".", "format", "(", "rret", "[", "'Error'", "]", ")", "else", ":", "ret", "[", "'result'", "]", "=", "True", "ret", "[", "'changes'", "]", "[", "vmconfig", "[", "'state'", "]", "[", "'hostname'", "]", "]", "=", "vmconfig", "[", "'changed'", "]", "if", "ret", "[", "'result'", "]", ":", "if", "__opts__", "[", "'test'", "]", ":", "ret", "[", "'changes'", "]", "[", "vmconfig", "[", "'state'", "]", "[", "'hostname'", "]", "]", "=", "vmconfig", "[", "'changed'", "]", "if", "vmconfig", "[", "'state'", "]", "[", "'hostname'", "]", "in", "ret", "[", "'changes'", "]", "and", "ret", "[", "'changes'", "]", "[", "vmconfig", "[", "'state'", "]", "[", "'hostname'", "]", "]", ":", "ret", "[", "'comment'", "]", "=", "'vm {0} updated'", ".", "format", "(", "vmconfig", "[", "'state'", "]", "[", "'hostname'", "]", ")", "if", "config", "[", "'kvm_reboot'", "]", "and", "vmconfig", "[", "'current'", "]", "[", "'brand'", "]", "==", "'kvm'", "and", "not", "__opts__", "[", "'test'", "]", ":", "if", "vmconfig", "[", "'state'", "]", "[", "'hostname'", "]", "in", "__salt__", "[", "'vmadm.list'", "]", "(", "order", "=", "'hostname'", ",", "search", "=", "'state=running'", ")", ":", "__salt__", "[", "'vmadm.reboot'", "]", "(", "vm", "=", "vmconfig", "[", "'state'", "]", "[", "'hostname'", "]", ",", "key", "=", "'hostname'", ")", "if", "kvm_needs_start", ":", "__salt__", "[", "'vmadm.start'", "]", "(", "vm", "=", "vmconfig", "[", "'state'", "]", "[", "'hostname'", "]", ",", "key", "=", "'hostname'", ")", "else", ":", "ret", "[", "'changes'", "]", "=", "{", "}", "ret", "[", "'comment'", "]", "=", "'vm {0} is up to date'", ".", "format", "(", "vmconfig", "[", "'state'", "]", "[", "'hostname'", "]", ")", "# reprovision (if required and allowed)", "if", "'image_uuid'", "in", "vmconfig", "[", "'current'", "]", "and", "vmconfig", "[", "'reprovision_uuid'", "]", "!=", "vmconfig", "[", "'current'", "]", "[", "'image_uuid'", "]", ":", "if", "config", "[", "'reprovision'", "]", ":", "rret", "=", "__salt__", "[", "'vmadm.reprovision'", "]", "(", "vm", "=", "vmconfig", "[", "'state'", "]", "[", "'hostname'", "]", ",", "key", "=", "'hostname'", ",", "image", "=", "vmconfig", "[", "'reprovision_uuid'", "]", ")", "if", "not", "isinstance", "(", "rret", ",", "(", "bool", ")", ")", "and", "'Error'", "in", "rret", ":", "ret", "[", "'result'", "]", "=", "False", "ret", "[", "'comment'", "]", "=", "'vm {0} updated, reprovision failed'", ".", "format", "(", "vmconfig", "[", "'state'", "]", "[", "'hostname'", "]", ")", "else", ":", "ret", "[", "'comment'", "]", "=", "'vm {0} updated and reprovisioned'", ".", "format", "(", "vmconfig", "[", "'state'", "]", "[", "'hostname'", "]", ")", "if", "vmconfig", "[", "'state'", "]", "[", "'hostname'", "]", "not", "in", "ret", "[", "'changes'", "]", ":", "ret", "[", "'changes'", "]", "[", "vmconfig", "[", "'state'", "]", "[", "'hostname'", "]", "]", "=", "{", "}", "ret", "[", "'changes'", "]", "[", "vmconfig", "[", "'state'", "]", "[", "'hostname'", "]", "]", "[", "'image_uuid'", "]", "=", "vmconfig", "[", "'reprovision_uuid'", "]", "else", ":", "log", ".", "warning", "(", "'smartos.vm_present::%s::reprovision - '", "'image_uuid in state does not match current, '", "'reprovision not allowed'", ",", "name", ")", "else", ":", "ret", "[", "'comment'", "]", "=", "'vm {0} failed to be updated'", ".", "format", "(", "vmconfig", "[", "'state'", "]", "[", "'hostname'", "]", ")", "if", "not", "isinstance", "(", "rret", ",", "(", "bool", ")", ")", "and", "'Error'", "in", "rret", ":", "ret", "[", "'comment'", "]", "=", "\"{0}\"", ".", "format", "(", "rret", "[", "'Error'", "]", ")", "else", ":", "# check required image installed", "ret", "[", "'result'", "]", "=", "True", "# disks need some special care", "if", "'disks'", "in", "vmconfig", ":", "new_disks", "=", "[", "]", "for", "disk", "in", "vmconfig", "[", "'disks'", "]", ":", "if", "'path'", "in", "disk", ":", "del", "disk", "[", "'path'", "]", "new_disks", ".", "append", "(", "disk", ")", "vmconfig", "[", "'disks'", "]", "=", "new_disks", "# create vm", "if", "ret", "[", "'result'", "]", ":", "uuid", "=", "__salt__", "[", "'vmadm.create'", "]", "(", "*", "*", "vmconfig", ")", "if", "not", "__opts__", "[", "'test'", "]", "else", "True", "if", "not", "isinstance", "(", "uuid", ",", "(", "bool", ")", ")", "and", "'Error'", "in", "uuid", ":", "ret", "[", "'result'", "]", "=", "False", "ret", "[", "'comment'", "]", "=", "\"{0}\"", ".", "format", "(", "uuid", "[", "'Error'", "]", ")", "else", ":", "ret", "[", "'result'", "]", "=", "True", "ret", "[", "'changes'", "]", "[", "vmconfig", "[", "'hostname'", "]", "]", "=", "vmconfig", "ret", "[", "'comment'", "]", "=", "'vm {0} created'", ".", "format", "(", "vmconfig", "[", "'hostname'", "]", ")", "return", "ret" ]
43.979215
25.692841
def __find_args_separator(self, decl_string, start_pos): """implementation details""" bracket_depth = 0 for index, ch in enumerate(decl_string[start_pos:]): if ch not in (self.__begin, self.__end, self.__separator): continue # I am interested only in < and > elif self.__separator == ch: if not bracket_depth: return index + start_pos elif self.__begin == ch: bracket_depth += 1 elif not bracket_depth: return index + start_pos else: bracket_depth -= 1 return -1
[ "def", "__find_args_separator", "(", "self", ",", "decl_string", ",", "start_pos", ")", ":", "bracket_depth", "=", "0", "for", "index", ",", "ch", "in", "enumerate", "(", "decl_string", "[", "start_pos", ":", "]", ")", ":", "if", "ch", "not", "in", "(", "self", ".", "__begin", ",", "self", ".", "__end", ",", "self", ".", "__separator", ")", ":", "continue", "# I am interested only in < and >", "elif", "self", ".", "__separator", "==", "ch", ":", "if", "not", "bracket_depth", ":", "return", "index", "+", "start_pos", "elif", "self", ".", "__begin", "==", "ch", ":", "bracket_depth", "+=", "1", "elif", "not", "bracket_depth", ":", "return", "index", "+", "start_pos", "else", ":", "bracket_depth", "-=", "1", "return", "-", "1" ]
40
10.875
def _nt_on_change(self, key, value, isNew): """NetworkTables global listener callback""" self._send_update({"k": key, "v": value, "n": isNew})
[ "def", "_nt_on_change", "(", "self", ",", "key", ",", "value", ",", "isNew", ")", ":", "self", ".", "_send_update", "(", "{", "\"k\"", ":", "key", ",", "\"v\"", ":", "value", ",", "\"n\"", ":", "isNew", "}", ")" ]
52
8
def phoncontent(self, cls='current', correctionhandling=CorrectionHandling.CURRENT): """See :meth:`AbstractElement.phoncontent`""" if cls == 'original': correctionhandling = CorrectionHandling.ORIGINAL #backward compatibility if correctionhandling in (CorrectionHandling.CURRENT, CorrectionHandling.EITHER): for e in self: if isinstance(e, New) or isinstance(e, Current): return e.phoncontent(cls, correctionhandling) if correctionhandling in (CorrectionHandling.ORIGINAL, CorrectionHandling.EITHER): for e in self: if isinstance(e, Original): return e.phoncontent(cls, correctionhandling) raise NoSuchPhon
[ "def", "phoncontent", "(", "self", ",", "cls", "=", "'current'", ",", "correctionhandling", "=", "CorrectionHandling", ".", "CURRENT", ")", ":", "if", "cls", "==", "'original'", ":", "correctionhandling", "=", "CorrectionHandling", ".", "ORIGINAL", "#backward compatibility", "if", "correctionhandling", "in", "(", "CorrectionHandling", ".", "CURRENT", ",", "CorrectionHandling", ".", "EITHER", ")", ":", "for", "e", "in", "self", ":", "if", "isinstance", "(", "e", ",", "New", ")", "or", "isinstance", "(", "e", ",", "Current", ")", ":", "return", "e", ".", "phoncontent", "(", "cls", ",", "correctionhandling", ")", "if", "correctionhandling", "in", "(", "CorrectionHandling", ".", "ORIGINAL", ",", "CorrectionHandling", ".", "EITHER", ")", ":", "for", "e", "in", "self", ":", "if", "isinstance", "(", "e", ",", "Original", ")", ":", "return", "e", ".", "phoncontent", "(", "cls", ",", "correctionhandling", ")", "raise", "NoSuchPhon" ]
60.916667
27.166667
def _get_pydot(self): """Return pydot package. Load pydot, if necessary.""" if self.pydot: return self.pydot self.pydot = __import__("pydot") return self.pydot
[ "def", "_get_pydot", "(", "self", ")", ":", "if", "self", ".", "pydot", ":", "return", "self", ".", "pydot", "self", ".", "pydot", "=", "__import__", "(", "\"pydot\"", ")", "return", "self", ".", "pydot" ]
33
10.5
def user(name, id='', user='', priv='', password='', status='active'): ''' Ensures that a user is configured on the device. Due to being unable to verify the user password. This is a forced operation. .. versionadded:: 2019.2.0 name: The name of the module function to execute. id(int): The user ID slot on the device. user(str): The username of the user. priv(str): The privilege level of the user. password(str): The password of the user. status(str): The status of the user. Can be either active or inactive. SLS Example: .. code-block:: yaml user_configuration: cimc.user: - id: 11 - user: foo - priv: admin - password: mypassword - status: active ''' ret = _default_ret(name) user_conf = __salt__['cimc.get_users']() try: for entry in user_conf['outConfigs']['aaaUser']: if entry['id'] == str(id): conf = entry if not conf: ret['result'] = False ret['comment'] = "Unable to find requested user id on device. Please verify id is valid." return ret updates = __salt__['cimc.set_user'](str(id), user, password, priv, status) if 'outConfig' in updates: ret['changes']['before'] = conf ret['changes']['after'] = updates['outConfig']['aaaUser'] ret['comment'] = "User settings modified." else: ret['result'] = False ret['comment'] = "Error setting user configuration." return ret except Exception as err: ret['result'] = False ret['comment'] = "Error setting user configuration." log.error(err) return ret ret['result'] = True return ret
[ "def", "user", "(", "name", ",", "id", "=", "''", ",", "user", "=", "''", ",", "priv", "=", "''", ",", "password", "=", "''", ",", "status", "=", "'active'", ")", ":", "ret", "=", "_default_ret", "(", "name", ")", "user_conf", "=", "__salt__", "[", "'cimc.get_users'", "]", "(", ")", "try", ":", "for", "entry", "in", "user_conf", "[", "'outConfigs'", "]", "[", "'aaaUser'", "]", ":", "if", "entry", "[", "'id'", "]", "==", "str", "(", "id", ")", ":", "conf", "=", "entry", "if", "not", "conf", ":", "ret", "[", "'result'", "]", "=", "False", "ret", "[", "'comment'", "]", "=", "\"Unable to find requested user id on device. Please verify id is valid.\"", "return", "ret", "updates", "=", "__salt__", "[", "'cimc.set_user'", "]", "(", "str", "(", "id", ")", ",", "user", ",", "password", ",", "priv", ",", "status", ")", "if", "'outConfig'", "in", "updates", ":", "ret", "[", "'changes'", "]", "[", "'before'", "]", "=", "conf", "ret", "[", "'changes'", "]", "[", "'after'", "]", "=", "updates", "[", "'outConfig'", "]", "[", "'aaaUser'", "]", "ret", "[", "'comment'", "]", "=", "\"User settings modified.\"", "else", ":", "ret", "[", "'result'", "]", "=", "False", "ret", "[", "'comment'", "]", "=", "\"Error setting user configuration.\"", "return", "ret", "except", "Exception", "as", "err", ":", "ret", "[", "'result'", "]", "=", "False", "ret", "[", "'comment'", "]", "=", "\"Error setting user configuration.\"", "log", ".", "error", "(", "err", ")", "return", "ret", "ret", "[", "'result'", "]", "=", "True", "return", "ret" ]
26.149254
24.507463
def get_right_geo_fhs(self, dsid, fhs): """Find the right geographical file handlers for given dataset ID *dsid*.""" ds_info = self.ids[dsid] req_geo, rem_geo = self._get_req_rem_geo(ds_info) desired, other = split_desired_other(fhs, req_geo, rem_geo) if desired: try: ds_info['dataset_groups'].remove(rem_geo) except ValueError: pass return desired else: return other
[ "def", "get_right_geo_fhs", "(", "self", ",", "dsid", ",", "fhs", ")", ":", "ds_info", "=", "self", ".", "ids", "[", "dsid", "]", "req_geo", ",", "rem_geo", "=", "self", ".", "_get_req_rem_geo", "(", "ds_info", ")", "desired", ",", "other", "=", "split_desired_other", "(", "fhs", ",", "req_geo", ",", "rem_geo", ")", "if", "desired", ":", "try", ":", "ds_info", "[", "'dataset_groups'", "]", ".", "remove", "(", "rem_geo", ")", "except", "ValueError", ":", "pass", "return", "desired", "else", ":", "return", "other" ]
37.230769
15.538462
def _get_bundles_by_type(self, type): """Get a dictionary of bundles for requested type. Args: type: 'javascript' or 'css' """ bundles = {} bundle_definitions = self.config.get(type) if bundle_definitions is None: return bundles # bundle name: common for bundle_name, paths in bundle_definitions.items(): bundle_files = [] # path: static/js/vendor/*.js for path in paths: # pattern: /tmp/static/js/vendor/*.js pattern = abspath = os.path.join(self.basedir, path) # assetdir: /tmp/static/js/vendor # assetdir contents: # - /tmp/static/js/vendor/t1.js # - /tmp/static/js/vendor/t2.js # - /tmp/static/js/vendor/index.html assetdir = os.path.dirname(abspath) # expanded_fnames after filtering using the pattern: # - /tmp/static/js/vendor/t1.js # - /tmp/static/js/vendor/t2.js fnames = [os.path.join(assetdir, fname) for fname in os.listdir(assetdir)] expanded_fnames = fnmatch.filter(fnames, pattern) bundle_files.extend(sorted(expanded_fnames)) bundles[bundle_name] = bundle_files return bundles
[ "def", "_get_bundles_by_type", "(", "self", ",", "type", ")", ":", "bundles", "=", "{", "}", "bundle_definitions", "=", "self", ".", "config", ".", "get", "(", "type", ")", "if", "bundle_definitions", "is", "None", ":", "return", "bundles", "# bundle name: common", "for", "bundle_name", ",", "paths", "in", "bundle_definitions", ".", "items", "(", ")", ":", "bundle_files", "=", "[", "]", "# path: static/js/vendor/*.js", "for", "path", "in", "paths", ":", "# pattern: /tmp/static/js/vendor/*.js", "pattern", "=", "abspath", "=", "os", ".", "path", ".", "join", "(", "self", ".", "basedir", ",", "path", ")", "# assetdir: /tmp/static/js/vendor", "# assetdir contents:", "# - /tmp/static/js/vendor/t1.js", "# - /tmp/static/js/vendor/t2.js", "# - /tmp/static/js/vendor/index.html", "assetdir", "=", "os", ".", "path", ".", "dirname", "(", "abspath", ")", "# expanded_fnames after filtering using the pattern:", "# - /tmp/static/js/vendor/t1.js", "# - /tmp/static/js/vendor/t2.js", "fnames", "=", "[", "os", ".", "path", ".", "join", "(", "assetdir", ",", "fname", ")", "for", "fname", "in", "os", ".", "listdir", "(", "assetdir", ")", "]", "expanded_fnames", "=", "fnmatch", ".", "filter", "(", "fnames", ",", "pattern", ")", "bundle_files", ".", "extend", "(", "sorted", "(", "expanded_fnames", ")", ")", "bundles", "[", "bundle_name", "]", "=", "bundle_files", "return", "bundles" ]
41.242424
13.757576
def visit_Module(self, node): """ Visit the whole module and add all import at the top level. >> import numpy.linalg Becomes >> import numpy """ node.body = [k for k in (self.visit(n) for n in node.body) if k] imports = [ast.Import([ast.alias(i, mangle(i))]) for i in self.imports] node.body = imports + node.body ast.fix_missing_locations(node) return node
[ "def", "visit_Module", "(", "self", ",", "node", ")", ":", "node", ".", "body", "=", "[", "k", "for", "k", "in", "(", "self", ".", "visit", "(", "n", ")", "for", "n", "in", "node", ".", "body", ")", "if", "k", "]", "imports", "=", "[", "ast", ".", "Import", "(", "[", "ast", ".", "alias", "(", "i", ",", "mangle", "(", "i", ")", ")", "]", ")", "for", "i", "in", "self", ".", "imports", "]", "node", ".", "body", "=", "imports", "+", "node", ".", "body", "ast", ".", "fix_missing_locations", "(", "node", ")", "return", "node" ]
27.125
21.5
def split_package(package): """ Split package in name, version arch and build tag. """ name = ver = arch = build = [] split = package.split("-") if len(split) > 2: build = split[-1] build_a, build_b = "", "" build_a = build[:1] if build[1:2].isdigit(): build_b = build[1:2] build = build_a + build_b arch = split[-2] ver = split[-3] name = "-".join(split[:-3]) return [name, ver, arch, build]
[ "def", "split_package", "(", "package", ")", ":", "name", "=", "ver", "=", "arch", "=", "build", "=", "[", "]", "split", "=", "package", ".", "split", "(", "\"-\"", ")", "if", "len", "(", "split", ")", ">", "2", ":", "build", "=", "split", "[", "-", "1", "]", "build_a", ",", "build_b", "=", "\"\"", ",", "\"\"", "build_a", "=", "build", "[", ":", "1", "]", "if", "build", "[", "1", ":", "2", "]", ".", "isdigit", "(", ")", ":", "build_b", "=", "build", "[", "1", ":", "2", "]", "build", "=", "build_a", "+", "build_b", "arch", "=", "split", "[", "-", "2", "]", "ver", "=", "split", "[", "-", "3", "]", "name", "=", "\"-\"", ".", "join", "(", "split", "[", ":", "-", "3", "]", ")", "return", "[", "name", ",", "ver", ",", "arch", ",", "build", "]" ]
26.833333
9.5
def remap_link_target(path, absolute=False): """ remap a link target to a static URL if it's prefixed with @ """ if path.startswith('@'): # static resource return static_url(path[1:], absolute=absolute) if absolute: # absolute-ify whatever the URL is return urllib.parse.urljoin(flask.request.url, path) return path
[ "def", "remap_link_target", "(", "path", ",", "absolute", "=", "False", ")", ":", "if", "path", ".", "startswith", "(", "'@'", ")", ":", "# static resource", "return", "static_url", "(", "path", "[", "1", ":", "]", ",", "absolute", "=", "absolute", ")", "if", "absolute", ":", "# absolute-ify whatever the URL is", "return", "urllib", ".", "parse", ".", "urljoin", "(", "flask", ".", "request", ".", "url", ",", "path", ")", "return", "path" ]
32.272727
17.818182
def lab_office(self, column=None, value=None, **kwargs): """Abbreviations, names, and locations of labratories and offices.""" return self._resolve_call('GIC_LAB_OFFICE', column, value, **kwargs)
[ "def", "lab_office", "(", "self", ",", "column", "=", "None", ",", "value", "=", "None", ",", "*", "*", "kwargs", ")", ":", "return", "self", ".", "_resolve_call", "(", "'GIC_LAB_OFFICE'", ",", "column", ",", "value", ",", "*", "*", "kwargs", ")" ]
69.666667
17.333333
def run(self, params): """Main run method for PyMOC tool. Takes a list of command line arguments to process. Each operation is performed on a current "running" MOC object. """ self.params = list(reversed(params)) if not self.params: self.help() return while self.params: p = self.params.pop() if p in self.command: # If we got a known command, execute it. self.command[p](self) elif os.path.exists(p): # If we were given the name of an existing file, read it. self.read_moc(p) else: # Otherwise raise an error. raise CommandError('file or command {0} not found'.format(p))
[ "def", "run", "(", "self", ",", "params", ")", ":", "self", ".", "params", "=", "list", "(", "reversed", "(", "params", ")", ")", "if", "not", "self", ".", "params", ":", "self", ".", "help", "(", ")", "return", "while", "self", ".", "params", ":", "p", "=", "self", ".", "params", ".", "pop", "(", ")", "if", "p", "in", "self", ".", "command", ":", "# If we got a known command, execute it.", "self", ".", "command", "[", "p", "]", "(", "self", ")", "elif", "os", ".", "path", ".", "exists", "(", "p", ")", ":", "# If we were given the name of an existing file, read it.", "self", ".", "read_moc", "(", "p", ")", "else", ":", "# Otherwise raise an error.", "raise", "CommandError", "(", "'file or command {0} not found'", ".", "format", "(", "p", ")", ")" ]
27.034483
21.206897
def is_waiting_for_input(self): """ could make one step further :return: """ return self.waiting_for and \ not isinstance(self.waiting_for, forking.SwitchOnValue) and \ not is_base_type(self.waiting_for)
[ "def", "is_waiting_for_input", "(", "self", ")", ":", "return", "self", ".", "waiting_for", "and", "not", "isinstance", "(", "self", ".", "waiting_for", ",", "forking", ".", "SwitchOnValue", ")", "and", "not", "is_base_type", "(", "self", ".", "waiting_for", ")" ]
33.25
10.75
def explain_prediction_df(estimator, doc, **kwargs): # type: (...) -> pd.DataFrame """ Explain prediction and export explanation to ``pandas.DataFrame`` All keyword arguments are passed to :func:`eli5.explain_prediction`. Weights of all features are exported by default. """ kwargs = _set_defaults(kwargs) return format_as_dataframe( eli5.explain_prediction(estimator, doc, **kwargs))
[ "def", "explain_prediction_df", "(", "estimator", ",", "doc", ",", "*", "*", "kwargs", ")", ":", "# type: (...) -> pd.DataFrame", "kwargs", "=", "_set_defaults", "(", "kwargs", ")", "return", "format_as_dataframe", "(", "eli5", ".", "explain_prediction", "(", "estimator", ",", "doc", ",", "*", "*", "kwargs", ")", ")" ]
45.777778
10.666667
def convert_site_dm3_table_intensity(sites_df): """ Convert MagIC site headers to short/readable headers for a figure (used by ipmag.sites_extract) Intensity data only. Parameters ---------- sites_df : pandas DataFrame sites information Returns --------- int_df : pandas DataFrame intensity site data with easily readable headers """ # now for the intensities has_vadms, has_vdms = False, False if 'int_abs' not in sites_df: sites_df['int_abs'] = None if 'int_n_samples' not in sites_df: sites_df['int_n_samples'] = None int_df = sites_df.copy().dropna(subset=['int_abs']) int_df['int_n_samples'] = int_df['int_n_samples'].values.astype('int') if len(int_df) > 0: int_df['int_abs_uT'] = 1e6*int_df.int_abs.values # convert to uT int_df['int_abs_sigma_uT'] = 1e6 * \ int_df.int_abs_sigma.values # convert to uT int_df['int_abs_uT'] = int_df['int_abs_uT'].values.astype('int') int_df['int_abs_sigma_uT'] = int_df['int_abs_sigma_uT'].values.astype( 'int') int_df['int_abs_sigma_perc'] = int_df['int_abs_sigma_perc'].values.astype( 'int') IntCols = ["Site", "N", "B", "B sigma", "sigma (%)"] if 'vadm' in int_df.columns: test_vadm = int_df.dropna(subset=['vadm']) if len(test_vadm) > 0: has_vadms = True if 'vdm' in int_df.columns: test_vdm = int_df.dropna(subset=['vdm']) if len(test_vdm) > 0: has_vdms = True if has_vadms: IntCols.append("VADM") IntCols.append("VADM sigma") if has_vdms: IntCols.append("VDM") IntCols.append("VDM sigma") if not has_vadms and not has_vdms: int_df = int_df[['site', 'int_n_samples', 'int_abs_uT', 'int_abs_sigma_uT', 'int_abs_sigma_perc']] if has_vadms and not has_vdms: int_df.sort_values(by=['site', 'vadm'], ascending=False, inplace=True) int_df.drop_duplicates(subset=['int_abs_uT', 'site'], inplace=True) int_df['vadm_ZAm2'] = 1e-21*int_df.vadm.values int_df['vadm_sigma_ZAm2'] = 1e-21*int_df.vadm_sigma.values int_df = int_df[['site', 'int_n_samples', 'int_abs_uT', 'int_abs_sigma_uT', 'int_abs_sigma_perc', 'vadm_ZAm2', 'vadm_ZAm2_sigma']] if not has_vadms and has_vdms: int_df.sort_values(by=['site', 'vdm'], ascending=False, inplace=True) int_df.drop_duplicates(subset=['int_abs_uT', 'site'], inplace=True) int_df['vdm_ZAm2'] = 1e-21*int_df.vdm.values() int_df['vdm_sigma_ZAm2'] = 1e-21*int_df.vdm_sigma.values() int_df = int_df[['site', 'int_n_samples', 'int_abs_uT', 'int_abs_sigma_uT', 'int_abs_sigma_perc', 'vdm_ZAm2', 'vdm_ZAm2_sigma']] if has_vadms and has_vdms: int_df.sort_values(by=['site', 'vadm'], ascending=False, inplace=True) int_df.drop_duplicates(subset=['int_abs_uT', 'site'], inplace=True) int_df['vadm_ZAm2'] = 1e-21*int_df.vadm.values int_df['vadm_sigma_ZAm2'] = 1e-21*int_df.vadm_sigma.values int_df['vdm_ZAm2'] = 1e-21*int_df.vdm.values int_df['vdm_sigma_ZAm2'] = 1e-21*int_df.vdm_sigma.values int_df = int_df[['site', 'int_n_samples', 'int_abs_uT', 'int_abs_sigma_uT', 'int_abs_sigma_perc', 'vadm_ZAm2', 'vadm_sigma_ZAm2', 'vdm_ZAm2', 'vdm_sigma_ZAm2']] int_df.columns = IntCols int_df.sort_values(by=['Site'], inplace=True, ascending=True) int_df.fillna(value='', inplace=True) return int_df
[ "def", "convert_site_dm3_table_intensity", "(", "sites_df", ")", ":", "# now for the intensities", "has_vadms", ",", "has_vdms", "=", "False", ",", "False", "if", "'int_abs'", "not", "in", "sites_df", ":", "sites_df", "[", "'int_abs'", "]", "=", "None", "if", "'int_n_samples'", "not", "in", "sites_df", ":", "sites_df", "[", "'int_n_samples'", "]", "=", "None", "int_df", "=", "sites_df", ".", "copy", "(", ")", ".", "dropna", "(", "subset", "=", "[", "'int_abs'", "]", ")", "int_df", "[", "'int_n_samples'", "]", "=", "int_df", "[", "'int_n_samples'", "]", ".", "values", ".", "astype", "(", "'int'", ")", "if", "len", "(", "int_df", ")", ">", "0", ":", "int_df", "[", "'int_abs_uT'", "]", "=", "1e6", "*", "int_df", ".", "int_abs", ".", "values", "# convert to uT", "int_df", "[", "'int_abs_sigma_uT'", "]", "=", "1e6", "*", "int_df", ".", "int_abs_sigma", ".", "values", "# convert to uT", "int_df", "[", "'int_abs_uT'", "]", "=", "int_df", "[", "'int_abs_uT'", "]", ".", "values", ".", "astype", "(", "'int'", ")", "int_df", "[", "'int_abs_sigma_uT'", "]", "=", "int_df", "[", "'int_abs_sigma_uT'", "]", ".", "values", ".", "astype", "(", "'int'", ")", "int_df", "[", "'int_abs_sigma_perc'", "]", "=", "int_df", "[", "'int_abs_sigma_perc'", "]", ".", "values", ".", "astype", "(", "'int'", ")", "IntCols", "=", "[", "\"Site\"", ",", "\"N\"", ",", "\"B\"", ",", "\"B sigma\"", ",", "\"sigma (%)\"", "]", "if", "'vadm'", "in", "int_df", ".", "columns", ":", "test_vadm", "=", "int_df", ".", "dropna", "(", "subset", "=", "[", "'vadm'", "]", ")", "if", "len", "(", "test_vadm", ")", ">", "0", ":", "has_vadms", "=", "True", "if", "'vdm'", "in", "int_df", ".", "columns", ":", "test_vdm", "=", "int_df", ".", "dropna", "(", "subset", "=", "[", "'vdm'", "]", ")", "if", "len", "(", "test_vdm", ")", ">", "0", ":", "has_vdms", "=", "True", "if", "has_vadms", ":", "IntCols", ".", "append", "(", "\"VADM\"", ")", "IntCols", ".", "append", "(", "\"VADM sigma\"", ")", "if", "has_vdms", ":", "IntCols", ".", "append", "(", "\"VDM\"", ")", "IntCols", ".", "append", "(", "\"VDM sigma\"", ")", "if", "not", "has_vadms", "and", "not", "has_vdms", ":", "int_df", "=", "int_df", "[", "[", "'site'", ",", "'int_n_samples'", ",", "'int_abs_uT'", ",", "'int_abs_sigma_uT'", ",", "'int_abs_sigma_perc'", "]", "]", "if", "has_vadms", "and", "not", "has_vdms", ":", "int_df", ".", "sort_values", "(", "by", "=", "[", "'site'", ",", "'vadm'", "]", ",", "ascending", "=", "False", ",", "inplace", "=", "True", ")", "int_df", ".", "drop_duplicates", "(", "subset", "=", "[", "'int_abs_uT'", ",", "'site'", "]", ",", "inplace", "=", "True", ")", "int_df", "[", "'vadm_ZAm2'", "]", "=", "1e-21", "*", "int_df", ".", "vadm", ".", "values", "int_df", "[", "'vadm_sigma_ZAm2'", "]", "=", "1e-21", "*", "int_df", ".", "vadm_sigma", ".", "values", "int_df", "=", "int_df", "[", "[", "'site'", ",", "'int_n_samples'", ",", "'int_abs_uT'", ",", "'int_abs_sigma_uT'", ",", "'int_abs_sigma_perc'", ",", "'vadm_ZAm2'", ",", "'vadm_ZAm2_sigma'", "]", "]", "if", "not", "has_vadms", "and", "has_vdms", ":", "int_df", ".", "sort_values", "(", "by", "=", "[", "'site'", ",", "'vdm'", "]", ",", "ascending", "=", "False", ",", "inplace", "=", "True", ")", "int_df", ".", "drop_duplicates", "(", "subset", "=", "[", "'int_abs_uT'", ",", "'site'", "]", ",", "inplace", "=", "True", ")", "int_df", "[", "'vdm_ZAm2'", "]", "=", "1e-21", "*", "int_df", ".", "vdm", ".", "values", "(", ")", "int_df", "[", "'vdm_sigma_ZAm2'", "]", "=", "1e-21", "*", "int_df", ".", "vdm_sigma", ".", "values", "(", ")", "int_df", "=", "int_df", "[", "[", "'site'", ",", "'int_n_samples'", ",", "'int_abs_uT'", ",", "'int_abs_sigma_uT'", ",", "'int_abs_sigma_perc'", ",", "'vdm_ZAm2'", ",", "'vdm_ZAm2_sigma'", "]", "]", "if", "has_vadms", "and", "has_vdms", ":", "int_df", ".", "sort_values", "(", "by", "=", "[", "'site'", ",", "'vadm'", "]", ",", "ascending", "=", "False", ",", "inplace", "=", "True", ")", "int_df", ".", "drop_duplicates", "(", "subset", "=", "[", "'int_abs_uT'", ",", "'site'", "]", ",", "inplace", "=", "True", ")", "int_df", "[", "'vadm_ZAm2'", "]", "=", "1e-21", "*", "int_df", ".", "vadm", ".", "values", "int_df", "[", "'vadm_sigma_ZAm2'", "]", "=", "1e-21", "*", "int_df", ".", "vadm_sigma", ".", "values", "int_df", "[", "'vdm_ZAm2'", "]", "=", "1e-21", "*", "int_df", ".", "vdm", ".", "values", "int_df", "[", "'vdm_sigma_ZAm2'", "]", "=", "1e-21", "*", "int_df", ".", "vdm_sigma", ".", "values", "int_df", "=", "int_df", "[", "[", "'site'", ",", "'int_n_samples'", ",", "'int_abs_uT'", ",", "'int_abs_sigma_uT'", ",", "'int_abs_sigma_perc'", ",", "'vadm_ZAm2'", ",", "'vadm_sigma_ZAm2'", ",", "'vdm_ZAm2'", ",", "'vdm_sigma_ZAm2'", "]", "]", "int_df", ".", "columns", "=", "IntCols", "int_df", ".", "sort_values", "(", "by", "=", "[", "'Site'", "]", ",", "inplace", "=", "True", ",", "ascending", "=", "True", ")", "int_df", ".", "fillna", "(", "value", "=", "''", ",", "inplace", "=", "True", ")", "return", "int_df" ]
44.313953
20.430233
def dump_TDDFT_data_in_GW_run(self, TDDFT_dump=True): """ :param TDDFT_dump: boolen :return: set the do_tddft variable to one in cell.in """ if TDDFT_dump == True: self.BSE_TDDFT_options.update(do_bse=0, do_tddft=1) else: self.BSE_TDDFT_options.update(do_bse=0, do_tddft=0)
[ "def", "dump_TDDFT_data_in_GW_run", "(", "self", ",", "TDDFT_dump", "=", "True", ")", ":", "if", "TDDFT_dump", "==", "True", ":", "self", ".", "BSE_TDDFT_options", ".", "update", "(", "do_bse", "=", "0", ",", "do_tddft", "=", "1", ")", "else", ":", "self", ".", "BSE_TDDFT_options", ".", "update", "(", "do_bse", "=", "0", ",", "do_tddft", "=", "0", ")" ]
37.444444
13.666667
def totz(when, tz=None): """ Return a date, time, or datetime converted to a datetime in the given timezone. If when is a datetime and has no timezone it is assumed to be local time. Date and time objects are also assumed to be UTC. The tz value defaults to UTC. Raise TypeError if when cannot be converted to a datetime. """ if when is None: return None when = to_datetime(when) if when.tzinfo is None: when = when.replace(tzinfo=localtz) return when.astimezone(tz or utc)
[ "def", "totz", "(", "when", ",", "tz", "=", "None", ")", ":", "if", "when", "is", "None", ":", "return", "None", "when", "=", "to_datetime", "(", "when", ")", "if", "when", ".", "tzinfo", "is", "None", ":", "when", "=", "when", ".", "replace", "(", "tzinfo", "=", "localtz", ")", "return", "when", ".", "astimezone", "(", "tz", "or", "utc", ")" ]
39.769231
21.769231
def visitShapeExprDecl(self, ctx: ShExDocParser.ShapeExprDeclContext): """ shapeExprDecl: shapeExprLabel (shapeExpression | KW_EXTERNAL) """ label = self.context.shapeexprlabel_to_IRI(ctx.shapeExprLabel()) if self.context.schema.shapes is None: self.context.schema.shapes = [] if ctx.KW_EXTERNAL(): shape = ShapeExternal(id=label) else: shexpr = ShexShapeExpressionParser(self.context, label) shexpr.visit(ctx.shapeExpression()) shape = shexpr.expr self.context.schema.shapes.append(shape)
[ "def", "visitShapeExprDecl", "(", "self", ",", "ctx", ":", "ShExDocParser", ".", "ShapeExprDeclContext", ")", ":", "label", "=", "self", ".", "context", ".", "shapeexprlabel_to_IRI", "(", "ctx", ".", "shapeExprLabel", "(", ")", ")", "if", "self", ".", "context", ".", "schema", ".", "shapes", "is", "None", ":", "self", ".", "context", ".", "schema", ".", "shapes", "=", "[", "]", "if", "ctx", ".", "KW_EXTERNAL", "(", ")", ":", "shape", "=", "ShapeExternal", "(", "id", "=", "label", ")", "else", ":", "shexpr", "=", "ShexShapeExpressionParser", "(", "self", ".", "context", ",", "label", ")", "shexpr", ".", "visit", "(", "ctx", ".", "shapeExpression", "(", ")", ")", "shape", "=", "shexpr", ".", "expr", "self", ".", "context", ".", "schema", ".", "shapes", ".", "append", "(", "shape", ")" ]
48.833333
13.583333
def title(self): """ The title of the course. If no entry in the namemap of the configuration is found a new entry is created with name=$STUD.IP_NAME + $SEMESTER_NAME """ name = c.namemap_lookup(self.id) if name is None: name = self._title + " " + client.get_semester_title(self) c.namemap_set(self.id, name) return secure_filename(name)
[ "def", "title", "(", "self", ")", ":", "name", "=", "c", ".", "namemap_lookup", "(", "self", ".", "id", ")", "if", "name", "is", "None", ":", "name", "=", "self", ".", "_title", "+", "\" \"", "+", "client", ".", "get_semester_title", "(", "self", ")", "c", ".", "namemap_set", "(", "self", ".", "id", ",", "name", ")", "return", "secure_filename", "(", "name", ")" ]
44.555556
20.777778
def get_constant_state(self): """Read state that was written in "first_part" mode. Returns: a structure """ ret = self.constant_states[self.next_constant_state] self.next_constant_state += 1 return ret
[ "def", "get_constant_state", "(", "self", ")", ":", "ret", "=", "self", ".", "constant_states", "[", "self", ".", "next_constant_state", "]", "self", ".", "next_constant_state", "+=", "1", "return", "ret" ]
24.888889
16.777778
def _stream_annotation(file_name, pb_dir): """ Stream an entire remote annotation file from physiobank Parameters ---------- file_name : str The name of the annotation file to be read. pb_dir : str The physiobank directory where the annotation file is located. """ # Full url of annotation file url = posixpath.join(config.db_index_url, pb_dir, file_name) # Get the content response = requests.get(url) # Raise HTTPError if invalid url response.raise_for_status() # Convert to numpy array ann_data = np.fromstring(response.content, dtype=np.dtype('<u1')) return ann_data
[ "def", "_stream_annotation", "(", "file_name", ",", "pb_dir", ")", ":", "# Full url of annotation file", "url", "=", "posixpath", ".", "join", "(", "config", ".", "db_index_url", ",", "pb_dir", ",", "file_name", ")", "# Get the content", "response", "=", "requests", ".", "get", "(", "url", ")", "# Raise HTTPError if invalid url", "response", ".", "raise_for_status", "(", ")", "# Convert to numpy array", "ann_data", "=", "np", ".", "fromstring", "(", "response", ".", "content", ",", "dtype", "=", "np", ".", "dtype", "(", "'<u1'", ")", ")", "return", "ann_data" ]
26.333333
20.5
def retarget_with_change_points(song, cp_times, duration): """Create a composition of a song of a given duration that reaches music change points at specified times. This is still under construction. It might not work as well with more than 2 ``cp_times`` at the moment. Here's an example of retargeting music to be 40 seconds long and hit a change point at the 10 and 30 second marks:: song = Song("instrumental_music.wav") composition, change_points =\ retarget.retarget_with_change_points(song, [10, 30], 40) composition.export(filename="retargeted_instrumental_music.") :param song: Song to retarget :type song: :py:class:`radiotool.composer.Song` :param cp_times: Times to reach change points (in seconds) :type cp_times: list of floats :param duration: Target length of retargeted music (in seconds) :type duration: float :returns: Composition of retargeted song and list of locations of change points in the retargeted composition :rtype: (:py:class:`radiotool.composer.Composition`, list) """ analysis = song.analysis beat_length = analysis[BEAT_DUR_KEY] beats = np.array(analysis["beats"]) # find change points cps = np.array(novelty(song, nchangepoints=4)) cp_times = np.array(cp_times) # mark change points in original music def music_labels(t): # find beat closest to t closest_beat_idx = np.argmin(np.abs(beats - t)) closest_beat = beats[closest_beat_idx] closest_cp = cps[np.argmin(np.abs(cps - closest_beat))] if np.argmin(np.abs(beats - closest_cp)) == closest_beat_idx: return "cp" else: return "noncp" # mark where we want change points in the output music # (a few beats of slack to improve the quality of the end result) def out_labels(t): if np.min(np.abs(cp_times - t)) < 1.5 * beat_length: return "cp" return "noncp" m_labels = [music_labels(i) for i in np.arange(0, song.duration_in_seconds, beat_length)] o_labels = [out_labels(i) for i in np.arange(0, duration, beat_length)] constraints = [ rt_constraints.TimbrePitchConstraint( context=0, timbre_weight=1.0, chroma_weight=1.0), rt_constraints.EnergyConstraint(penalty=.5), rt_constraints.MinimumLoopConstraint(8), rt_constraints.NoveltyConstraint(m_labels, o_labels, 1.0) ] comp, info = retarget( [song], duration, constraints=[constraints], fade_in_len=None, fade_out_len=None) final_cp_locations = [beat_length * i for i, label in enumerate(info['result_labels']) if label == 'cp'] return comp, final_cp_locations
[ "def", "retarget_with_change_points", "(", "song", ",", "cp_times", ",", "duration", ")", ":", "analysis", "=", "song", ".", "analysis", "beat_length", "=", "analysis", "[", "BEAT_DUR_KEY", "]", "beats", "=", "np", ".", "array", "(", "analysis", "[", "\"beats\"", "]", ")", "# find change points", "cps", "=", "np", ".", "array", "(", "novelty", "(", "song", ",", "nchangepoints", "=", "4", ")", ")", "cp_times", "=", "np", ".", "array", "(", "cp_times", ")", "# mark change points in original music", "def", "music_labels", "(", "t", ")", ":", "# find beat closest to t", "closest_beat_idx", "=", "np", ".", "argmin", "(", "np", ".", "abs", "(", "beats", "-", "t", ")", ")", "closest_beat", "=", "beats", "[", "closest_beat_idx", "]", "closest_cp", "=", "cps", "[", "np", ".", "argmin", "(", "np", ".", "abs", "(", "cps", "-", "closest_beat", ")", ")", "]", "if", "np", ".", "argmin", "(", "np", ".", "abs", "(", "beats", "-", "closest_cp", ")", ")", "==", "closest_beat_idx", ":", "return", "\"cp\"", "else", ":", "return", "\"noncp\"", "# mark where we want change points in the output music", "# (a few beats of slack to improve the quality of the end result)", "def", "out_labels", "(", "t", ")", ":", "if", "np", ".", "min", "(", "np", ".", "abs", "(", "cp_times", "-", "t", ")", ")", "<", "1.5", "*", "beat_length", ":", "return", "\"cp\"", "return", "\"noncp\"", "m_labels", "=", "[", "music_labels", "(", "i", ")", "for", "i", "in", "np", ".", "arange", "(", "0", ",", "song", ".", "duration_in_seconds", ",", "beat_length", ")", "]", "o_labels", "=", "[", "out_labels", "(", "i", ")", "for", "i", "in", "np", ".", "arange", "(", "0", ",", "duration", ",", "beat_length", ")", "]", "constraints", "=", "[", "rt_constraints", ".", "TimbrePitchConstraint", "(", "context", "=", "0", ",", "timbre_weight", "=", "1.0", ",", "chroma_weight", "=", "1.0", ")", ",", "rt_constraints", ".", "EnergyConstraint", "(", "penalty", "=", ".5", ")", ",", "rt_constraints", ".", "MinimumLoopConstraint", "(", "8", ")", ",", "rt_constraints", ".", "NoveltyConstraint", "(", "m_labels", ",", "o_labels", ",", "1.0", ")", "]", "comp", ",", "info", "=", "retarget", "(", "[", "song", "]", ",", "duration", ",", "constraints", "=", "[", "constraints", "]", ",", "fade_in_len", "=", "None", ",", "fade_out_len", "=", "None", ")", "final_cp_locations", "=", "[", "beat_length", "*", "i", "for", "i", ",", "label", "in", "enumerate", "(", "info", "[", "'result_labels'", "]", ")", "if", "label", "==", "'cp'", "]", "return", "comp", ",", "final_cp_locations" ]
37.506849
19.575342
def serialize(obj, **options): ''' Serialize Python data to YAML. :param obj: the data structure to serialize :param options: options given to lower yaml module. ''' options.setdefault('Dumper', Dumper) try: response = yaml.dump(obj, **options) if response.endswith('\n...\n'): return response[:-5] if response.endswith('\n'): return response[:-1] return response except Exception as error: log.exception('Error encountered while serializing') raise SerializationError(error)
[ "def", "serialize", "(", "obj", ",", "*", "*", "options", ")", ":", "options", ".", "setdefault", "(", "'Dumper'", ",", "Dumper", ")", "try", ":", "response", "=", "yaml", ".", "dump", "(", "obj", ",", "*", "*", "options", ")", "if", "response", ".", "endswith", "(", "'\\n...\\n'", ")", ":", "return", "response", "[", ":", "-", "5", "]", "if", "response", ".", "endswith", "(", "'\\n'", ")", ":", "return", "response", "[", ":", "-", "1", "]", "return", "response", "except", "Exception", "as", "error", ":", "log", ".", "exception", "(", "'Error encountered while serializing'", ")", "raise", "SerializationError", "(", "error", ")" ]
29.631579
15.210526
def parse_qc(self, qc_file): """ Parse phantompeakqualtools (spp) QC table and return quality metrics. :param str qc_file: Path to phantompeakqualtools output file, which contains sample quality measurements. """ import pandas as pd series = pd.Series() try: with open(qc_file) as handle: line = handle.readlines()[0].strip().split("\t") # list of strings per line series["NSC"] = line[-3] series["RSC"] = line[-2] series["qualityTag"] = line[-1] except: pass return series
[ "def", "parse_qc", "(", "self", ",", "qc_file", ")", ":", "import", "pandas", "as", "pd", "series", "=", "pd", ".", "Series", "(", ")", "try", ":", "with", "open", "(", "qc_file", ")", "as", "handle", ":", "line", "=", "handle", ".", "readlines", "(", ")", "[", "0", "]", ".", "strip", "(", ")", ".", "split", "(", "\"\\t\"", ")", "# list of strings per line", "series", "[", "\"NSC\"", "]", "=", "line", "[", "-", "3", "]", "series", "[", "\"RSC\"", "]", "=", "line", "[", "-", "2", "]", "series", "[", "\"qualityTag\"", "]", "=", "line", "[", "-", "1", "]", "except", ":", "pass", "return", "series" ]
34.333333
17.666667
def get_user_data(self, subid, params=None): ''' /v1/server/get_user_data GET - account Retrieves the (base64 encoded) user-data for this subscription. Link: https://www.vultr.com/api/#server_get_user_data ''' params = update_params(params, {'SUBID': subid}) return self.request('/v1/server/get_user_data', params, 'GET')
[ "def", "get_user_data", "(", "self", ",", "subid", ",", "params", "=", "None", ")", ":", "params", "=", "update_params", "(", "params", ",", "{", "'SUBID'", ":", "subid", "}", ")", "return", "self", ".", "request", "(", "'/v1/server/get_user_data'", ",", "params", ",", "'GET'", ")" ]
41.111111
21.555556
def get_color_map(name): """ Return a colormap as (redmap, greenmap, bluemap) Arguments: name -- the name of the colormap. If unrecognized, will default to 'jet'. """ redmap = [0] greenmap = [0] bluemap = [0] if name == 'white': # essentially a noop redmap = [0, 1] greenmap = [0, 1] bluemap = [0, 1] elif name == 'simple': redmap = [0, 1, 1, 1] greenmap = [0, 0, 1, 1] bluemap = [0, 0, 0, 1] elif name == 'hot': redmap = [0, 0.03968253968253968, 0.07936507936507936, 0.119047619047619, 0.1587301587301587, 0.1984126984126984, 0.2380952380952381, 0.2777777777777778, 0.3174603174603174, 0.3571428571428571, 0.3968253968253968, 0.4365079365079365, 0.4761904761904762, 0.5158730158730158, 0.5555555555555556, 0.5952380952380952, 0.6349206349206349, 0.6746031746031745, 0.7142857142857142, 0.753968253968254, 0.7936507936507936, 0.8333333333333333, 0.873015873015873, 0.9126984126984127, 0.9523809523809523, 0.992063492063492, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1] greenmap = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.03174603174603163, 0.0714285714285714, 0.1111111111111112, 0.1507936507936507, 0.1904761904761905, 0.23015873015873, 0.2698412698412698, 0.3095238095238093, 0.3492063492063491, 0.3888888888888888, 0.4285714285714284, 0.4682539682539679, 0.5079365079365079, 0.5476190476190477, 0.5873015873015872, 0.6269841269841268, 0.6666666666666665, 0.7063492063492065, 0.746031746031746, 0.7857142857142856, 0.8253968253968254, 0.8650793650793651, 0.9047619047619047, 0.9444444444444442, 0.984126984126984, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1] bluemap = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.04761904761904745, 0.1269841269841265, 0.2063492063492056, 0.2857142857142856, 0.3650793650793656, 0.4444444444444446, 0.5238095238095237, 0.6031746031746028, 0.6825396825396828, 0.7619047619047619, 0.8412698412698409, 0.92063492063492, 1] elif name == 'rainbow': redmap = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0.9365079365079367, 0.8571428571428572, 0.7777777777777777, 0.6984126984126986, 0.6190476190476191, 0.53968253968254, 0.4603174603174605, 0.3809523809523814, 0.3015873015873018, 0.2222222222222223, 0.1428571428571432, 0.06349206349206415, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.03174603174603208, 0.08465608465608465, 0.1375661375661377, 0.1904761904761907, 0.2433862433862437, 0.2962962962962963, 0.3492063492063493, 0.4021164021164023, 0.4550264550264553, 0.5079365079365079, 0.5608465608465609, 0.6137566137566139, 0.666666666666667] greenmap = [0, 0.03968253968253968, 0.07936507936507936, 0.119047619047619, 0.1587301587301587, 0.1984126984126984, 0.2380952380952381, 0.2777777777777778, 0.3174603174603174, 0.3571428571428571, 0.3968253968253968, 0.4365079365079365, 0.4761904761904762, 0.5158730158730158, 0.5555555555555556, 0.5952380952380952, 0.6349206349206349, 0.6746031746031745, 0.7142857142857142, 0.753968253968254, 0.7936507936507936, 0.8333333333333333, 0.873015873015873, 0.9126984126984127, 0.9523809523809523, 0.992063492063492, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0.9841269841269842, 0.9047619047619047, 0.8253968253968256, 0.7460317460317465, 0.666666666666667, 0.587301587301587, 0.5079365079365079, 0.4285714285714288, 0.3492063492063493, 0.2698412698412698, 0.1904761904761907, 0.1111111111111116, 0.03174603174603208, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] bluemap = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.01587301587301582, 0.09523809523809534, 0.1746031746031744, 0.2539682539682535, 0.333333333333333, 0.412698412698413, 0.4920634920634921, 0.5714285714285712, 0.6507936507936507, 0.7301587301587302, 0.8095238095238093, 0.8888888888888884, 0.9682539682539679, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1] elif name == 'winter': greenmap = [0, 1] bluemap = [1, 0.5] else: if name != 'jet': print('Warning: colormap "%s" not supported. Using jet instead.' % name) redmap = [0, 0, 0, 0, 0.5, 1, 1, 1, 0.5] greenmap = [0, 0, 0.5, 1, 1, 1, 0.5, 0, 0] bluemap = [0.5, 1, 1, 1, 0.5, 0, 0, 0, 0] return 255.0 * np.array(redmap), 255.0 * np.array(greenmap), 255.0 * np.array(bluemap)
[ "def", "get_color_map", "(", "name", ")", ":", "redmap", "=", "[", "0", "]", "greenmap", "=", "[", "0", "]", "bluemap", "=", "[", "0", "]", "if", "name", "==", "'white'", ":", "# essentially a noop", "redmap", "=", "[", "0", ",", "1", "]", "greenmap", "=", "[", "0", ",", "1", "]", "bluemap", "=", "[", "0", ",", "1", "]", "elif", "name", "==", "'simple'", ":", "redmap", "=", "[", "0", ",", "1", ",", "1", ",", "1", "]", "greenmap", "=", "[", "0", ",", "0", ",", "1", ",", "1", "]", "bluemap", "=", "[", "0", ",", "0", ",", "0", ",", "1", "]", "elif", "name", "==", "'hot'", ":", "redmap", "=", "[", "0", ",", "0.03968253968253968", ",", "0.07936507936507936", ",", "0.119047619047619", ",", "0.1587301587301587", ",", "0.1984126984126984", ",", "0.2380952380952381", ",", "0.2777777777777778", ",", "0.3174603174603174", ",", "0.3571428571428571", ",", "0.3968253968253968", ",", "0.4365079365079365", ",", "0.4761904761904762", ",", "0.5158730158730158", ",", "0.5555555555555556", ",", "0.5952380952380952", ",", "0.6349206349206349", ",", "0.6746031746031745", ",", "0.7142857142857142", ",", "0.753968253968254", ",", "0.7936507936507936", ",", "0.8333333333333333", ",", "0.873015873015873", ",", "0.9126984126984127", ",", "0.9523809523809523", ",", "0.992063492063492", ",", "1", ",", "1", ",", "1", ",", "1", ",", "1", ",", "1", ",", "1", ",", "1", ",", "1", ",", "1", ",", "1", ",", "1", ",", "1", ",", "1", ",", "1", ",", "1", ",", "1", ",", "1", ",", "1", ",", "1", ",", "1", ",", "1", ",", "1", ",", "1", ",", "1", ",", "1", ",", "1", ",", "1", ",", "1", ",", "1", ",", "1", ",", "1", ",", "1", ",", "1", ",", "1", ",", "1", ",", "1", ",", "1", "]", "greenmap", "=", "[", "0", ",", "0", ",", "0", ",", "0", ",", "0", ",", "0", ",", "0", ",", "0", ",", "0", ",", "0", ",", "0", ",", "0", ",", "0", ",", "0", ",", "0", ",", "0", ",", "0", ",", "0", ",", "0", ",", "0", ",", "0", ",", "0", ",", "0", ",", "0", ",", "0", ",", "0", ",", "0.03174603174603163", ",", "0.0714285714285714", ",", "0.1111111111111112", ",", "0.1507936507936507", ",", "0.1904761904761905", ",", "0.23015873015873", ",", "0.2698412698412698", ",", "0.3095238095238093", ",", "0.3492063492063491", ",", "0.3888888888888888", ",", "0.4285714285714284", ",", "0.4682539682539679", ",", "0.5079365079365079", ",", "0.5476190476190477", ",", "0.5873015873015872", ",", "0.6269841269841268", ",", "0.6666666666666665", ",", "0.7063492063492065", ",", "0.746031746031746", ",", "0.7857142857142856", ",", "0.8253968253968254", ",", "0.8650793650793651", ",", "0.9047619047619047", ",", "0.9444444444444442", ",", "0.984126984126984", ",", "1", ",", "1", ",", "1", ",", "1", ",", "1", ",", "1", ",", "1", ",", "1", ",", "1", ",", "1", ",", "1", ",", "1", ",", "1", "]", "bluemap", "=", "[", "0", ",", "0", ",", "0", ",", "0", ",", "0", ",", "0", ",", "0", ",", "0", ",", "0", ",", "0", ",", "0", ",", "0", ",", "0", ",", "0", ",", "0", ",", "0", ",", "0", ",", "0", ",", "0", ",", "0", ",", "0", ",", "0", ",", "0", ",", "0", ",", "0", ",", "0", ",", "0", ",", "0", ",", "0", ",", "0", ",", "0", ",", "0", ",", "0", ",", "0", ",", "0", ",", "0", ",", "0", ",", "0", ",", "0", ",", "0", ",", "0", ",", "0", ",", "0", ",", "0", ",", "0", ",", "0", ",", "0", ",", "0", ",", "0", ",", "0", ",", "0", ",", "0.04761904761904745", ",", "0.1269841269841265", ",", "0.2063492063492056", ",", "0.2857142857142856", ",", "0.3650793650793656", ",", "0.4444444444444446", ",", "0.5238095238095237", ",", "0.6031746031746028", ",", "0.6825396825396828", ",", "0.7619047619047619", ",", "0.8412698412698409", ",", "0.92063492063492", ",", "1", "]", "elif", "name", "==", "'rainbow'", ":", "redmap", "=", "[", "1", ",", "1", ",", "1", ",", "1", ",", "1", ",", "1", ",", "1", ",", "1", ",", "1", ",", "1", ",", "1", ",", "1", ",", "1", ",", "1", ",", "1", ",", "1", ",", "1", ",", "1", ",", "1", ",", "1", ",", "1", ",", "1", ",", "1", ",", "1", ",", "1", ",", "1", ",", "0.9365079365079367", ",", "0.8571428571428572", ",", "0.7777777777777777", ",", "0.6984126984126986", ",", "0.6190476190476191", ",", "0.53968253968254", ",", "0.4603174603174605", ",", "0.3809523809523814", ",", "0.3015873015873018", ",", "0.2222222222222223", ",", "0.1428571428571432", ",", "0.06349206349206415", ",", "0", ",", "0", ",", "0", ",", "0", ",", "0", ",", "0", ",", "0", ",", "0", ",", "0", ",", "0", ",", "0", ",", "0", ",", "0", ",", "0.03174603174603208", ",", "0.08465608465608465", ",", "0.1375661375661377", ",", "0.1904761904761907", ",", "0.2433862433862437", ",", "0.2962962962962963", ",", "0.3492063492063493", ",", "0.4021164021164023", ",", "0.4550264550264553", ",", "0.5079365079365079", ",", "0.5608465608465609", ",", "0.6137566137566139", ",", "0.666666666666667", "]", "greenmap", "=", "[", "0", ",", "0.03968253968253968", ",", "0.07936507936507936", ",", "0.119047619047619", ",", "0.1587301587301587", ",", "0.1984126984126984", ",", "0.2380952380952381", ",", "0.2777777777777778", ",", "0.3174603174603174", ",", "0.3571428571428571", ",", "0.3968253968253968", ",", "0.4365079365079365", ",", "0.4761904761904762", ",", "0.5158730158730158", ",", "0.5555555555555556", ",", "0.5952380952380952", ",", "0.6349206349206349", ",", "0.6746031746031745", ",", "0.7142857142857142", ",", "0.753968253968254", ",", "0.7936507936507936", ",", "0.8333333333333333", ",", "0.873015873015873", ",", "0.9126984126984127", ",", "0.9523809523809523", ",", "0.992063492063492", ",", "1", ",", "1", ",", "1", ",", "1", ",", "1", ",", "1", ",", "1", ",", "1", ",", "1", ",", "1", ",", "1", ",", "1", ",", "0.9841269841269842", ",", "0.9047619047619047", ",", "0.8253968253968256", ",", "0.7460317460317465", ",", "0.666666666666667", ",", "0.587301587301587", ",", "0.5079365079365079", ",", "0.4285714285714288", ",", "0.3492063492063493", ",", "0.2698412698412698", ",", "0.1904761904761907", ",", "0.1111111111111116", ",", "0.03174603174603208", ",", "0", ",", "0", ",", "0", ",", "0", ",", "0", ",", "0", ",", "0", ",", "0", ",", "0", ",", "0", ",", "0", ",", "0", ",", "0", "]", "bluemap", "=", "[", "0", ",", "0", ",", "0", ",", "0", ",", "0", ",", "0", ",", "0", ",", "0", ",", "0", ",", "0", ",", "0", ",", "0", ",", "0", ",", "0", ",", "0", ",", "0", ",", "0", ",", "0", ",", "0", ",", "0", ",", "0", ",", "0", ",", "0", ",", "0", ",", "0", ",", "0", ",", "0", ",", "0", ",", "0", ",", "0", ",", "0", ",", "0", ",", "0", ",", "0", ",", "0", ",", "0", ",", "0", ",", "0", ",", "0.01587301587301582", ",", "0.09523809523809534", ",", "0.1746031746031744", ",", "0.2539682539682535", ",", "0.333333333333333", ",", "0.412698412698413", ",", "0.4920634920634921", ",", "0.5714285714285712", ",", "0.6507936507936507", ",", "0.7301587301587302", ",", "0.8095238095238093", ",", "0.8888888888888884", ",", "0.9682539682539679", ",", "1", ",", "1", ",", "1", ",", "1", ",", "1", ",", "1", ",", "1", ",", "1", ",", "1", ",", "1", ",", "1", ",", "1", ",", "1", "]", "elif", "name", "==", "'winter'", ":", "greenmap", "=", "[", "0", ",", "1", "]", "bluemap", "=", "[", "1", ",", "0.5", "]", "else", ":", "if", "name", "!=", "'jet'", ":", "print", "(", "'Warning: colormap \"%s\" not supported. Using jet instead.'", "%", "name", ")", "redmap", "=", "[", "0", ",", "0", ",", "0", ",", "0", ",", "0.5", ",", "1", ",", "1", ",", "1", ",", "0.5", "]", "greenmap", "=", "[", "0", ",", "0", ",", "0.5", ",", "1", ",", "1", ",", "1", ",", "0.5", ",", "0", ",", "0", "]", "bluemap", "=", "[", "0.5", ",", "1", ",", "1", ",", "1", ",", "0.5", ",", "0", ",", "0", ",", "0", ",", "0", "]", "return", "255.0", "*", "np", ".", "array", "(", "redmap", ")", ",", "255.0", "*", "np", ".", "array", "(", "greenmap", ")", ",", "255.0", "*", "np", ".", "array", "(", "bluemap", ")" ]
110.904762
89.190476
def trigger_arbitrary_job(repo_name, builder, revision, auth, files=None, dry_run=False, extra_properties=None): """ Request buildapi to trigger a job for us. We return the request or None if dry_run is True. Raises BuildapiAuthError if credentials are invalid. """ assert len(revision) == 40, \ 'We do not accept revisions shorter than 40 chars' url = _builders_api_url(repo_name, builder, revision) payload = _payload(repo_name, revision, files, extra_properties) if dry_run: LOG.info("Dry-run: We were going to request a job for '{}'".format(builder)) LOG.info(" with this payload: {}".format(str(payload))) LOG.info(" with these files: {}".format(files)) return None # NOTE: A good response returns json with request_id as one of the keys req = requests.post( url, headers={'Accept': 'application/json'}, data=payload, auth=auth, timeout=TCP_TIMEOUT, ) if req.status_code == 401: raise BuildapiAuthError("Your credentials were invalid. Please try again.") elif req.status_code == 503: raise BuildapiDown("Please file a bug {}".format(url)) try: req.json() return req except ValueError: LOG.info('repo: {}, builder: {}, revision: {}'.format(repo_name, builder, revision)) LOG.error("We did not get info from %s (status code: %s)" % (url, req.status_code)) return None
[ "def", "trigger_arbitrary_job", "(", "repo_name", ",", "builder", ",", "revision", ",", "auth", ",", "files", "=", "None", ",", "dry_run", "=", "False", ",", "extra_properties", "=", "None", ")", ":", "assert", "len", "(", "revision", ")", "==", "40", ",", "'We do not accept revisions shorter than 40 chars'", "url", "=", "_builders_api_url", "(", "repo_name", ",", "builder", ",", "revision", ")", "payload", "=", "_payload", "(", "repo_name", ",", "revision", ",", "files", ",", "extra_properties", ")", "if", "dry_run", ":", "LOG", ".", "info", "(", "\"Dry-run: We were going to request a job for '{}'\"", ".", "format", "(", "builder", ")", ")", "LOG", ".", "info", "(", "\" with this payload: {}\"", ".", "format", "(", "str", "(", "payload", ")", ")", ")", "LOG", ".", "info", "(", "\" with these files: {}\"", ".", "format", "(", "files", ")", ")", "return", "None", "# NOTE: A good response returns json with request_id as one of the keys", "req", "=", "requests", ".", "post", "(", "url", ",", "headers", "=", "{", "'Accept'", ":", "'application/json'", "}", ",", "data", "=", "payload", ",", "auth", "=", "auth", ",", "timeout", "=", "TCP_TIMEOUT", ",", ")", "if", "req", ".", "status_code", "==", "401", ":", "raise", "BuildapiAuthError", "(", "\"Your credentials were invalid. Please try again.\"", ")", "elif", "req", ".", "status_code", "==", "503", ":", "raise", "BuildapiDown", "(", "\"Please file a bug {}\"", ".", "format", "(", "url", ")", ")", "try", ":", "req", ".", "json", "(", ")", "return", "req", "except", "ValueError", ":", "LOG", ".", "info", "(", "'repo: {}, builder: {}, revision: {}'", ".", "format", "(", "repo_name", ",", "builder", ",", "revision", ")", ")", "LOG", ".", "error", "(", "\"We did not get info from %s (status code: %s)\"", "%", "(", "url", ",", "req", ".", "status_code", ")", ")", "return", "None" ]
36.04878
24.878049
def workspace_backup_undo(ctx): """ Restore the last backup """ backup_manager = WorkspaceBackupManager(Workspace(ctx.resolver, directory=ctx.directory, mets_basename=ctx.mets_basename, automatic_backup=ctx.automatic_backup)) backup_manager.undo()
[ "def", "workspace_backup_undo", "(", "ctx", ")", ":", "backup_manager", "=", "WorkspaceBackupManager", "(", "Workspace", "(", "ctx", ".", "resolver", ",", "directory", "=", "ctx", ".", "directory", ",", "mets_basename", "=", "ctx", ".", "mets_basename", ",", "automatic_backup", "=", "ctx", ".", "automatic_backup", ")", ")", "backup_manager", ".", "undo", "(", ")" ]
43.666667
27
def agent_key_info_from_key_id(key_id): """Find a matching key in the ssh-agent. @param key_id {str} Either a private ssh key fingerprint, e.g. 'b3:f0:a1:6c:18:3b:42:63:fd:6e:57:42:74:17:d4:bc', or the path to an ssh private key file (like ssh's IdentityFile config option). @return {dict} with these keys: - type: "agent" - agent_key: paramiko AgentKey - fingerprint: key fingerprint - algorithm: "rsa-sha1" Currently don't support DSA agent signing. """ # Need the fingerprint of the key we're using for signing. If it # is a path to a priv key, then we need to load it. if not FINGERPRINT_RE.match(key_id): ssh_key = load_ssh_key(key_id, True) fingerprint = ssh_key["fingerprint"] else: fingerprint = key_id # Look for a matching fingerprint in the ssh-agent keys. keys = Agent().get_keys() for key in keys: raw_key = key.blob # The MD5 fingerprint functions return the hexdigest without the hash # algorithm prefix ("MD5:"), and the SHA256 functions return the # fingerprint with the prefix ("SHA256:"). Ideally we'd want to # normalize these, but more importantly we don't want to break backwards # compatibility for either the SHA or MD5 users. md5_fp = fingerprint_from_raw_ssh_pub_key(raw_key) sha_fp = sha256_fingerprint_from_raw_ssh_pub_key(raw_key) if (sha_fp == fingerprint or md5_fp == fingerprint or "MD5:" + md5_fp == fingerprint): # Canonicalize it to the md5 fingerprint. md5_fingerprint = md5_fp break else: raise MantaError('no ssh-agent key with fingerprint "%s"' % fingerprint) return { "type": "agent", "agent_key": key, "fingerprint": md5_fingerprint, "algorithm": ALGO_FROM_SSH_KEY_TYPE[key.name] }
[ "def", "agent_key_info_from_key_id", "(", "key_id", ")", ":", "# Need the fingerprint of the key we're using for signing. If it", "# is a path to a priv key, then we need to load it.", "if", "not", "FINGERPRINT_RE", ".", "match", "(", "key_id", ")", ":", "ssh_key", "=", "load_ssh_key", "(", "key_id", ",", "True", ")", "fingerprint", "=", "ssh_key", "[", "\"fingerprint\"", "]", "else", ":", "fingerprint", "=", "key_id", "# Look for a matching fingerprint in the ssh-agent keys.", "keys", "=", "Agent", "(", ")", ".", "get_keys", "(", ")", "for", "key", "in", "keys", ":", "raw_key", "=", "key", ".", "blob", "# The MD5 fingerprint functions return the hexdigest without the hash", "# algorithm prefix (\"MD5:\"), and the SHA256 functions return the", "# fingerprint with the prefix (\"SHA256:\"). Ideally we'd want to", "# normalize these, but more importantly we don't want to break backwards", "# compatibility for either the SHA or MD5 users.", "md5_fp", "=", "fingerprint_from_raw_ssh_pub_key", "(", "raw_key", ")", "sha_fp", "=", "sha256_fingerprint_from_raw_ssh_pub_key", "(", "raw_key", ")", "if", "(", "sha_fp", "==", "fingerprint", "or", "md5_fp", "==", "fingerprint", "or", "\"MD5:\"", "+", "md5_fp", "==", "fingerprint", ")", ":", "# Canonicalize it to the md5 fingerprint.", "md5_fingerprint", "=", "md5_fp", "break", "else", ":", "raise", "MantaError", "(", "'no ssh-agent key with fingerprint \"%s\"'", "%", "fingerprint", ")", "return", "{", "\"type\"", ":", "\"agent\"", ",", "\"agent_key\"", ":", "key", ",", "\"fingerprint\"", ":", "md5_fingerprint", ",", "\"algorithm\"", ":", "ALGO_FROM_SSH_KEY_TYPE", "[", "key", ".", "name", "]", "}" ]
37.372549
19.862745
def summaries(self): """Yield (name, (value, value, ...)) for each summary in the file.""" length = self.summary_length step = self.summary_step for record_number, n_summaries, summary_data in self.summary_records(): name_data = self.read_record(record_number + 1) for i in range(0, int(n_summaries) * step, step): j = self.summary_control_struct.size + i name = name_data[i:i+step].strip() data = summary_data[j:j+length] values = self.summary_struct.unpack(data) yield name, values
[ "def", "summaries", "(", "self", ")", ":", "length", "=", "self", ".", "summary_length", "step", "=", "self", ".", "summary_step", "for", "record_number", ",", "n_summaries", ",", "summary_data", "in", "self", ".", "summary_records", "(", ")", ":", "name_data", "=", "self", ".", "read_record", "(", "record_number", "+", "1", ")", "for", "i", "in", "range", "(", "0", ",", "int", "(", "n_summaries", ")", "*", "step", ",", "step", ")", ":", "j", "=", "self", ".", "summary_control_struct", ".", "size", "+", "i", "name", "=", "name_data", "[", "i", ":", "i", "+", "step", "]", ".", "strip", "(", ")", "data", "=", "summary_data", "[", "j", ":", "j", "+", "length", "]", "values", "=", "self", ".", "summary_struct", ".", "unpack", "(", "data", ")", "yield", "name", ",", "values" ]
50.666667
13.916667
def setup_TreeRegression(self, covariation=True): """instantiate a TreeRegression object and set its tip_value and branch_value function to defaults that are sensible for treetime instances. Parameters ---------- covariation : bool, optional account for phylogenetic covariation Returns ------- TreeRegression a TreeRegression instance with self.tree attached as tree. """ from .treeregression import TreeRegression tip_value = lambda x:np.mean(x.raw_date_constraint) if (x.is_terminal() and (x.bad_branch is False)) else None branch_value = lambda x:x.mutation_length if covariation: om = self.one_mutation branch_variance = lambda x:((x.clock_length if hasattr(x,'clock_length') else x.mutation_length) +(self.tip_slack**2*om if x.is_terminal() else 0.0))*om else: branch_variance = lambda x:1.0 if x.is_terminal() else 0.0 Treg = TreeRegression(self.tree, tip_value=tip_value, branch_value=branch_value, branch_variance=branch_variance) Treg.valid_confidence = covariation return Treg
[ "def", "setup_TreeRegression", "(", "self", ",", "covariation", "=", "True", ")", ":", "from", ".", "treeregression", "import", "TreeRegression", "tip_value", "=", "lambda", "x", ":", "np", ".", "mean", "(", "x", ".", "raw_date_constraint", ")", "if", "(", "x", ".", "is_terminal", "(", ")", "and", "(", "x", ".", "bad_branch", "is", "False", ")", ")", "else", "None", "branch_value", "=", "lambda", "x", ":", "x", ".", "mutation_length", "if", "covariation", ":", "om", "=", "self", ".", "one_mutation", "branch_variance", "=", "lambda", "x", ":", "(", "(", "x", ".", "clock_length", "if", "hasattr", "(", "x", ",", "'clock_length'", ")", "else", "x", ".", "mutation_length", ")", "+", "(", "self", ".", "tip_slack", "**", "2", "*", "om", "if", "x", ".", "is_terminal", "(", ")", "else", "0.0", ")", ")", "*", "om", "else", ":", "branch_variance", "=", "lambda", "x", ":", "1.0", "if", "x", ".", "is_terminal", "(", ")", "else", "0.0", "Treg", "=", "TreeRegression", "(", "self", ".", "tree", ",", "tip_value", "=", "tip_value", ",", "branch_value", "=", "branch_value", ",", "branch_variance", "=", "branch_variance", ")", "Treg", ".", "valid_confidence", "=", "covariation", "return", "Treg" ]
45.481481
24.333333
def _from_pointer(pointer, incref): """Wrap an existing :c:type:`cairo_scaled_font_t *` cdata pointer. :type incref: bool :param incref: Whether increase the :ref:`reference count <refcounting>` now. :return: A new :class:`ScaledFont` instance. """ if pointer == ffi.NULL: raise ValueError('Null pointer') if incref: cairo.cairo_scaled_font_reference(pointer) self = object.__new__(ScaledFont) ScaledFont._init_pointer(self, pointer) return self
[ "def", "_from_pointer", "(", "pointer", ",", "incref", ")", ":", "if", "pointer", "==", "ffi", ".", "NULL", ":", "raise", "ValueError", "(", "'Null pointer'", ")", "if", "incref", ":", "cairo", ".", "cairo_scaled_font_reference", "(", "pointer", ")", "self", "=", "object", ".", "__new__", "(", "ScaledFont", ")", "ScaledFont", ".", "_init_pointer", "(", "self", ",", "pointer", ")", "return", "self" ]
34.25
15.0625
def pcapname(dev): """Get the device pcap name by device name or Scapy NetworkInterface """ if isinstance(dev, NetworkInterface): if dev.is_invalid(): return None return dev.pcap_name try: return IFACES.dev_from_name(dev).pcap_name except ValueError: return IFACES.dev_from_pcapname(dev).pcap_name
[ "def", "pcapname", "(", "dev", ")", ":", "if", "isinstance", "(", "dev", ",", "NetworkInterface", ")", ":", "if", "dev", ".", "is_invalid", "(", ")", ":", "return", "None", "return", "dev", ".", "pcap_name", "try", ":", "return", "IFACES", ".", "dev_from_name", "(", "dev", ")", ".", "pcap_name", "except", "ValueError", ":", "return", "IFACES", ".", "dev_from_pcapname", "(", "dev", ")", ".", "pcap_name" ]
29.25
14.833333
def arcs(self): """Get information about the arcs available in the code. Returns a sorted list of line number pairs. Line numbers have been normalized to the first line of multiline statements. """ all_arcs = [] for l1, l2 in self.byte_parser._all_arcs(): fl1 = self.first_line(l1) fl2 = self.first_line(l2) if fl1 != fl2: all_arcs.append((fl1, fl2)) return sorted(all_arcs)
[ "def", "arcs", "(", "self", ")", ":", "all_arcs", "=", "[", "]", "for", "l1", ",", "l2", "in", "self", ".", "byte_parser", ".", "_all_arcs", "(", ")", ":", "fl1", "=", "self", ".", "first_line", "(", "l1", ")", "fl2", "=", "self", ".", "first_line", "(", "l2", ")", "if", "fl1", "!=", "fl2", ":", "all_arcs", ".", "append", "(", "(", "fl1", ",", "fl2", ")", ")", "return", "sorted", "(", "all_arcs", ")" ]
33.714286
15.928571
def process(self, config, grains): ''' Process the configured beacons The config must be a list and looks like this in yaml .. code_block:: yaml beacons: inotify: - files: - /etc/fstab: {} - /var/cache/foo: {} ''' ret = [] b_config = copy.deepcopy(config) if 'enabled' in b_config and not b_config['enabled']: return for mod in config: if mod == 'enabled': continue # Convert beacons that are lists to a dict to make processing easier current_beacon_config = None if isinstance(config[mod], list): current_beacon_config = {} list(map(current_beacon_config.update, config[mod])) elif isinstance(config[mod], dict): current_beacon_config = config[mod] if 'enabled' in current_beacon_config: if not current_beacon_config['enabled']: log.trace('Beacon %s disabled', mod) continue else: # remove 'enabled' item before processing the beacon if isinstance(config[mod], dict): del config[mod]['enabled'] else: self._remove_list_item(config[mod], 'enabled') log.trace('Beacon processing: %s', mod) beacon_name = None if self._determine_beacon_config(current_beacon_config, 'beacon_module'): beacon_name = current_beacon_config['beacon_module'] else: beacon_name = mod fun_str = '{0}.beacon'.format(beacon_name) validate_str = '{0}.validate'.format(beacon_name) if fun_str in self.beacons: runonce = self._determine_beacon_config(current_beacon_config, 'run_once') interval = self._determine_beacon_config(current_beacon_config, 'interval') if interval: b_config = self._trim_config(b_config, mod, 'interval') if not self._process_interval(mod, interval): log.trace('Skipping beacon %s. Interval not reached.', mod) continue if self._determine_beacon_config(current_beacon_config, 'disable_during_state_run'): log.trace('Evaluting if beacon %s should be skipped due to a state run.', mod) b_config = self._trim_config(b_config, mod, 'disable_during_state_run') is_running = False running_jobs = salt.utils.minion.running(self.opts) for job in running_jobs: if re.match('state.*', job['fun']): is_running = True if is_running: close_str = '{0}.close'.format(beacon_name) if close_str in self.beacons: log.info('Closing beacon %s. State run in progress.', mod) self.beacons[close_str](b_config[mod]) else: log.info('Skipping beacon %s. State run in progress.', mod) continue # Update __grains__ on the beacon self.beacons[fun_str].__globals__['__grains__'] = grains # Run the validate function if it's available, # otherwise there is a warning about it being missing if validate_str in self.beacons: valid, vcomment = self.beacons[validate_str](b_config[mod]) if not valid: log.info('Beacon %s configuration invalid, ' 'not running.\n%s', mod, vcomment) continue # Fire the beacon! raw = self.beacons[fun_str](b_config[mod]) for data in raw: tag = 'salt/beacon/{0}/{1}/'.format(self.opts['id'], mod) if 'tag' in data: tag += data.pop('tag') if 'id' not in data: data['id'] = self.opts['id'] ret.append({'tag': tag, 'data': data, 'beacon_name': beacon_name}) if runonce: self.disable_beacon(mod) else: log.warning('Unable to process beacon %s', mod) return ret
[ "def", "process", "(", "self", ",", "config", ",", "grains", ")", ":", "ret", "=", "[", "]", "b_config", "=", "copy", ".", "deepcopy", "(", "config", ")", "if", "'enabled'", "in", "b_config", "and", "not", "b_config", "[", "'enabled'", "]", ":", "return", "for", "mod", "in", "config", ":", "if", "mod", "==", "'enabled'", ":", "continue", "# Convert beacons that are lists to a dict to make processing easier", "current_beacon_config", "=", "None", "if", "isinstance", "(", "config", "[", "mod", "]", ",", "list", ")", ":", "current_beacon_config", "=", "{", "}", "list", "(", "map", "(", "current_beacon_config", ".", "update", ",", "config", "[", "mod", "]", ")", ")", "elif", "isinstance", "(", "config", "[", "mod", "]", ",", "dict", ")", ":", "current_beacon_config", "=", "config", "[", "mod", "]", "if", "'enabled'", "in", "current_beacon_config", ":", "if", "not", "current_beacon_config", "[", "'enabled'", "]", ":", "log", ".", "trace", "(", "'Beacon %s disabled'", ",", "mod", ")", "continue", "else", ":", "# remove 'enabled' item before processing the beacon", "if", "isinstance", "(", "config", "[", "mod", "]", ",", "dict", ")", ":", "del", "config", "[", "mod", "]", "[", "'enabled'", "]", "else", ":", "self", ".", "_remove_list_item", "(", "config", "[", "mod", "]", ",", "'enabled'", ")", "log", ".", "trace", "(", "'Beacon processing: %s'", ",", "mod", ")", "beacon_name", "=", "None", "if", "self", ".", "_determine_beacon_config", "(", "current_beacon_config", ",", "'beacon_module'", ")", ":", "beacon_name", "=", "current_beacon_config", "[", "'beacon_module'", "]", "else", ":", "beacon_name", "=", "mod", "fun_str", "=", "'{0}.beacon'", ".", "format", "(", "beacon_name", ")", "validate_str", "=", "'{0}.validate'", ".", "format", "(", "beacon_name", ")", "if", "fun_str", "in", "self", ".", "beacons", ":", "runonce", "=", "self", ".", "_determine_beacon_config", "(", "current_beacon_config", ",", "'run_once'", ")", "interval", "=", "self", ".", "_determine_beacon_config", "(", "current_beacon_config", ",", "'interval'", ")", "if", "interval", ":", "b_config", "=", "self", ".", "_trim_config", "(", "b_config", ",", "mod", ",", "'interval'", ")", "if", "not", "self", ".", "_process_interval", "(", "mod", ",", "interval", ")", ":", "log", ".", "trace", "(", "'Skipping beacon %s. Interval not reached.'", ",", "mod", ")", "continue", "if", "self", ".", "_determine_beacon_config", "(", "current_beacon_config", ",", "'disable_during_state_run'", ")", ":", "log", ".", "trace", "(", "'Evaluting if beacon %s should be skipped due to a state run.'", ",", "mod", ")", "b_config", "=", "self", ".", "_trim_config", "(", "b_config", ",", "mod", ",", "'disable_during_state_run'", ")", "is_running", "=", "False", "running_jobs", "=", "salt", ".", "utils", ".", "minion", ".", "running", "(", "self", ".", "opts", ")", "for", "job", "in", "running_jobs", ":", "if", "re", ".", "match", "(", "'state.*'", ",", "job", "[", "'fun'", "]", ")", ":", "is_running", "=", "True", "if", "is_running", ":", "close_str", "=", "'{0}.close'", ".", "format", "(", "beacon_name", ")", "if", "close_str", "in", "self", ".", "beacons", ":", "log", ".", "info", "(", "'Closing beacon %s. State run in progress.'", ",", "mod", ")", "self", ".", "beacons", "[", "close_str", "]", "(", "b_config", "[", "mod", "]", ")", "else", ":", "log", ".", "info", "(", "'Skipping beacon %s. State run in progress.'", ",", "mod", ")", "continue", "# Update __grains__ on the beacon", "self", ".", "beacons", "[", "fun_str", "]", ".", "__globals__", "[", "'__grains__'", "]", "=", "grains", "# Run the validate function if it's available,", "# otherwise there is a warning about it being missing", "if", "validate_str", "in", "self", ".", "beacons", ":", "valid", ",", "vcomment", "=", "self", ".", "beacons", "[", "validate_str", "]", "(", "b_config", "[", "mod", "]", ")", "if", "not", "valid", ":", "log", ".", "info", "(", "'Beacon %s configuration invalid, '", "'not running.\\n%s'", ",", "mod", ",", "vcomment", ")", "continue", "# Fire the beacon!", "raw", "=", "self", ".", "beacons", "[", "fun_str", "]", "(", "b_config", "[", "mod", "]", ")", "for", "data", "in", "raw", ":", "tag", "=", "'salt/beacon/{0}/{1}/'", ".", "format", "(", "self", ".", "opts", "[", "'id'", "]", ",", "mod", ")", "if", "'tag'", "in", "data", ":", "tag", "+=", "data", ".", "pop", "(", "'tag'", ")", "if", "'id'", "not", "in", "data", ":", "data", "[", "'id'", "]", "=", "self", ".", "opts", "[", "'id'", "]", "ret", ".", "append", "(", "{", "'tag'", ":", "tag", ",", "'data'", ":", "data", ",", "'beacon_name'", ":", "beacon_name", "}", ")", "if", "runonce", ":", "self", ".", "disable_beacon", "(", "mod", ")", "else", ":", "log", ".", "warning", "(", "'Unable to process beacon %s'", ",", "mod", ")", "return", "ret" ]
45.247525
20.376238
def _HasOOOWrite(self, path): """Returns whether the path has had an out-of-order write.""" # Check the sizes of each path before the current one. size = tf.io.gfile.stat(path).length old_size = self._finalized_sizes.get(path, None) if size != old_size: if old_size is None: logger.error('File %s created after file %s even though it\'s ' 'lexicographically earlier', path, self._path) else: logger.error('File %s updated even though the current file is %s', path, self._path) return True else: return False
[ "def", "_HasOOOWrite", "(", "self", ",", "path", ")", ":", "# Check the sizes of each path before the current one.", "size", "=", "tf", ".", "io", ".", "gfile", ".", "stat", "(", "path", ")", ".", "length", "old_size", "=", "self", ".", "_finalized_sizes", ".", "get", "(", "path", ",", "None", ")", "if", "size", "!=", "old_size", ":", "if", "old_size", "is", "None", ":", "logger", ".", "error", "(", "'File %s created after file %s even though it\\'s '", "'lexicographically earlier'", ",", "path", ",", "self", ".", "_path", ")", "else", ":", "logger", ".", "error", "(", "'File %s updated even though the current file is %s'", ",", "path", ",", "self", ".", "_path", ")", "return", "True", "else", ":", "return", "False" ]
40.466667
18.266667
def pformat(self, consumed_capacity=None): """ Pretty format for insertion into table pformat """ consumed_capacity = consumed_capacity or {} lines = [] parts = ["GLOBAL", self.index_type, "INDEX", self.name] if self.status != "ACTIVE": parts.insert(0, "[%s]" % self.status) lines.append(" ".join(parts)) lines.append(" items: {0:,} ({1:,} bytes)".format(self.item_count, self.size)) read = "Read: " + format_throughput( self.read_throughput, consumed_capacity.get("read") ) write = "Write: " + format_throughput( self.write_throughput, consumed_capacity.get("write") ) lines.append(" " + read + " " + write) lines.append(" " + self.hash_key.schema) if self.range_key is not None: lines.append(" " + self.range_key.schema) if self.includes is not None: keys = "[%s]" % ", ".join(("'%s'" % i for i in self.includes)) lines.append(" Projection: %s" % keys) return "\n".join(lines)
[ "def", "pformat", "(", "self", ",", "consumed_capacity", "=", "None", ")", ":", "consumed_capacity", "=", "consumed_capacity", "or", "{", "}", "lines", "=", "[", "]", "parts", "=", "[", "\"GLOBAL\"", ",", "self", ".", "index_type", ",", "\"INDEX\"", ",", "self", ".", "name", "]", "if", "self", ".", "status", "!=", "\"ACTIVE\"", ":", "parts", ".", "insert", "(", "0", ",", "\"[%s]\"", "%", "self", ".", "status", ")", "lines", ".", "append", "(", "\" \"", ".", "join", "(", "parts", ")", ")", "lines", ".", "append", "(", "\" items: {0:,} ({1:,} bytes)\"", ".", "format", "(", "self", ".", "item_count", ",", "self", ".", "size", ")", ")", "read", "=", "\"Read: \"", "+", "format_throughput", "(", "self", ".", "read_throughput", ",", "consumed_capacity", ".", "get", "(", "\"read\"", ")", ")", "write", "=", "\"Write: \"", "+", "format_throughput", "(", "self", ".", "write_throughput", ",", "consumed_capacity", ".", "get", "(", "\"write\"", ")", ")", "lines", ".", "append", "(", "\" \"", "+", "read", "+", "\" \"", "+", "write", ")", "lines", ".", "append", "(", "\" \"", "+", "self", ".", "hash_key", ".", "schema", ")", "if", "self", ".", "range_key", "is", "not", "None", ":", "lines", ".", "append", "(", "\" \"", "+", "self", ".", "range_key", ".", "schema", ")", "if", "self", ".", "includes", "is", "not", "None", ":", "keys", "=", "\"[%s]\"", "%", "\", \"", ".", "join", "(", "(", "\"'%s'\"", "%", "i", "for", "i", "in", "self", ".", "includes", ")", ")", "lines", ".", "append", "(", "\" Projection: %s\"", "%", "keys", ")", "return", "\"\\n\"", ".", "join", "(", "lines", ")" ]
44.25
15.5
def _input_as_multifile(self, data): """For use with the -profile option This input handler expects data to be a tuple containing two filenames. Index 0 will be set to -in1 and index 1 to -in2 """ if data: try: filename1, filename2 = data except: raise ValueError, "Expected two filenames" self.Parameters['-in'].off() self.Parameters['-in1'].on(filename1) self.Parameters['-in2'].on(filename2) return ''
[ "def", "_input_as_multifile", "(", "self", ",", "data", ")", ":", "if", "data", ":", "try", ":", "filename1", ",", "filename2", "=", "data", "except", ":", "raise", "ValueError", ",", "\"Expected two filenames\"", "self", ".", "Parameters", "[", "'-in'", "]", ".", "off", "(", ")", "self", ".", "Parameters", "[", "'-in1'", "]", ".", "on", "(", "filename1", ")", "self", ".", "Parameters", "[", "'-in2'", "]", ".", "on", "(", "filename2", ")", "return", "''" ]
33.1875
16.8125
def get_xy(self, yidx, xidx=0): """ Return stored data for the given indices for plot :param yidx: the indices of the y-axis variables(1-indexing) :param xidx: the index of the x-axis variables :return: None """ assert isinstance(xidx, int) if isinstance(yidx, int): yidx = [yidx] t_vars = self.concat_t_vars() xdata = t_vars[:, xidx] ydata = t_vars[:, yidx] return xdata.tolist(), ydata.transpose().tolist()
[ "def", "get_xy", "(", "self", ",", "yidx", ",", "xidx", "=", "0", ")", ":", "assert", "isinstance", "(", "xidx", ",", "int", ")", "if", "isinstance", "(", "yidx", ",", "int", ")", ":", "yidx", "=", "[", "yidx", "]", "t_vars", "=", "self", ".", "concat_t_vars", "(", ")", "xdata", "=", "t_vars", "[", ":", ",", "xidx", "]", "ydata", "=", "t_vars", "[", ":", ",", "yidx", "]", "return", "xdata", ".", "tolist", "(", ")", ",", "ydata", ".", "transpose", "(", ")", ".", "tolist", "(", ")" ]
27.944444
17.277778
def average(self, projection=None): """ Takes the average of elements in the sequence >>> seq([1, 2]).average() 1.5 >>> seq([('a', 1), ('b', 2)]).average(lambda x: x[1]) :param projection: function to project on the sequence before taking the average :return: average of elements in the sequence """ length = self.size() if projection: return sum(self.map(projection)) / length else: return sum(self) / length
[ "def", "average", "(", "self", ",", "projection", "=", "None", ")", ":", "length", "=", "self", ".", "size", "(", ")", "if", "projection", ":", "return", "sum", "(", "self", ".", "map", "(", "projection", ")", ")", "/", "length", "else", ":", "return", "sum", "(", "self", ")", "/", "length" ]
29.882353
19.294118
def get_tag_names(self): """Returns the set of tag names present in the XML.""" root = etree.fromstring(self.xml_full_text.encode('utf-8')) return self.get_children_tag_names(root)
[ "def", "get_tag_names", "(", "self", ")", ":", "root", "=", "etree", ".", "fromstring", "(", "self", ".", "xml_full_text", ".", "encode", "(", "'utf-8'", ")", ")", "return", "self", ".", "get_children_tag_names", "(", "root", ")" ]
50.25
12.75
def fileChunkIter(file_object, file_chunk_size=65536): """ Return an iterator to a file-like object that yields fixed size chunks :param file_object: a file-like object :param file_chunk_size: maximum size of chunk """ while True: chunk = file_object.read(file_chunk_size) if chunk: yield chunk else: break
[ "def", "fileChunkIter", "(", "file_object", ",", "file_chunk_size", "=", "65536", ")", ":", "while", "True", ":", "chunk", "=", "file_object", ".", "read", "(", "file_chunk_size", ")", "if", "chunk", ":", "yield", "chunk", "else", ":", "break" ]
28.230769
17.153846
def get_matching_session(self, session, db_session, timediff=5): """ Tries to match a session with it's counterpart. For bait session it will try to match it with honeypot sessions and the other way around. :param session: session object which will be used as base for query. :param timediff: +/- allowed time difference between a session and a potential matching session. """ db_session = db_session min_datetime = session.timestamp - timedelta(seconds=timediff) max_datetime = session.timestamp + timedelta(seconds=timediff) # default return value match = None classification = db_session.query(Classification).filter( Classification.type == 'pending').one() # get all sessions that match basic properties. sessions = db_session.query(Session).options(joinedload(Session.authentication)) \ .filter(Session.protocol == session.protocol) \ .filter(Session.honeypot == session.honeypot) \ .filter(Session.timestamp >= min_datetime) \ .filter(Session.timestamp <= max_datetime) \ .filter(Session.id != session.id) \ .filter(Session.classification == classification) # identify the correct session by comparing authentication. # this could properly also be done using some fancy ORM/SQL construct. for potential_match in sessions: if potential_match.discriminator == session.discriminator: continue assert potential_match.id != session.id for honey_auth in session.authentication: for session_auth in potential_match.authentication: if session_auth.username == honey_auth.username and \ session_auth.password == honey_auth.password and \ session_auth.successful == honey_auth.successful: assert potential_match.id != session.id match = potential_match break return match
[ "def", "get_matching_session", "(", "self", ",", "session", ",", "db_session", ",", "timediff", "=", "5", ")", ":", "db_session", "=", "db_session", "min_datetime", "=", "session", ".", "timestamp", "-", "timedelta", "(", "seconds", "=", "timediff", ")", "max_datetime", "=", "session", ".", "timestamp", "+", "timedelta", "(", "seconds", "=", "timediff", ")", "# default return value", "match", "=", "None", "classification", "=", "db_session", ".", "query", "(", "Classification", ")", ".", "filter", "(", "Classification", ".", "type", "==", "'pending'", ")", ".", "one", "(", ")", "# get all sessions that match basic properties.", "sessions", "=", "db_session", ".", "query", "(", "Session", ")", ".", "options", "(", "joinedload", "(", "Session", ".", "authentication", ")", ")", ".", "filter", "(", "Session", ".", "protocol", "==", "session", ".", "protocol", ")", ".", "filter", "(", "Session", ".", "honeypot", "==", "session", ".", "honeypot", ")", ".", "filter", "(", "Session", ".", "timestamp", ">=", "min_datetime", ")", ".", "filter", "(", "Session", ".", "timestamp", "<=", "max_datetime", ")", ".", "filter", "(", "Session", ".", "id", "!=", "session", ".", "id", ")", ".", "filter", "(", "Session", ".", "classification", "==", "classification", ")", "# identify the correct session by comparing authentication.", "# this could properly also be done using some fancy ORM/SQL construct.", "for", "potential_match", "in", "sessions", ":", "if", "potential_match", ".", "discriminator", "==", "session", ".", "discriminator", ":", "continue", "assert", "potential_match", ".", "id", "!=", "session", ".", "id", "for", "honey_auth", "in", "session", ".", "authentication", ":", "for", "session_auth", "in", "potential_match", ".", "authentication", ":", "if", "session_auth", ".", "username", "==", "honey_auth", ".", "username", "and", "session_auth", ".", "password", "==", "honey_auth", ".", "password", "and", "session_auth", ".", "successful", "==", "honey_auth", ".", "successful", ":", "assert", "potential_match", ".", "id", "!=", "session", ".", "id", "match", "=", "potential_match", "break", "return", "match" ]
51.875
23.975
def calc_model_cosine(self, decimate=None, mode='err'): """ Calculates the cosine of the residuals with the model. Parameters ---------- decimate : Int or None, optional Decimate the residuals by `decimate` pixels. If None, no decimation is used. Valid only with mode='svd'. Default is None mode : {'svd', 'err'} Which mode to use; see Notes section. Default is 'err'. Returns ------- abs_cos : numpy.float64 The absolute value of the model cosine. Notes ----- The model cosine is defined in terms of the geometric view of curve-fitting, as a model manifold embedded in a high-dimensional space. The model cosine is the cosine of the residuals vector with its projection on the tangent space: :math:`cos(phi) = |P^T r|/|r|` where :math:`P^T` is the projection operator onto the model manifold and :math:`r` the residuals. This can be calculated two ways: By calculating the projection operator P directly with SVD (mode=`svd`), or by using the expected error if the model were linear to calculate a model sine first (mode=`err`). Since the SVD of a large matrix is slow, mode=`err` is faster. `decimate` allows for every nth pixel only to be counted in the SVD matrix of J for speed. While this is n x faster, it is considerably less accurate, so the default is no decimation. """ #we calculate the model cosine only in the data space of the #sampled indices if mode == 'err': expected_error = self.find_expected_error(delta_params='perfect', adjust=False) derr = self.error - expected_error residuals_err = lambda r: np.dot(r,r).sum() current_partial_error = residuals_err(self.calc_residuals()) expected_partial_error = current_partial_error - derr model_sine_2 = expected_partial_error / current_partial_error abs_cos = np.sqrt(1 - model_sine_2) else: #superclass is fine abs_cos = super(self.__class__, self).calc_model_cosine(decimate= decimate, mode=mode) return abs_cos
[ "def", "calc_model_cosine", "(", "self", ",", "decimate", "=", "None", ",", "mode", "=", "'err'", ")", ":", "#we calculate the model cosine only in the data space of the", "#sampled indices", "if", "mode", "==", "'err'", ":", "expected_error", "=", "self", ".", "find_expected_error", "(", "delta_params", "=", "'perfect'", ",", "adjust", "=", "False", ")", "derr", "=", "self", ".", "error", "-", "expected_error", "residuals_err", "=", "lambda", "r", ":", "np", ".", "dot", "(", "r", ",", "r", ")", ".", "sum", "(", ")", "current_partial_error", "=", "residuals_err", "(", "self", ".", "calc_residuals", "(", ")", ")", "expected_partial_error", "=", "current_partial_error", "-", "derr", "model_sine_2", "=", "expected_partial_error", "/", "current_partial_error", "abs_cos", "=", "np", ".", "sqrt", "(", "1", "-", "model_sine_2", ")", "else", ":", "#superclass is fine", "abs_cos", "=", "super", "(", "self", ".", "__class__", ",", "self", ")", ".", "calc_model_cosine", "(", "decimate", "=", "decimate", ",", "mode", "=", "mode", ")", "return", "abs_cos" ]
45.294118
23.45098
def brew_query_parts(self): """ Make columns, group_bys, filters, havings """ columns, group_bys, filters, havings = [], [], set(), set() for ingredient in self.ingredients(): if ingredient.query_columns: columns.extend(ingredient.query_columns) if ingredient.group_by: group_bys.extend(ingredient.group_by) if ingredient.filters: filters.update(ingredient.filters) if ingredient.havings: havings.update(ingredient.havings) return { 'columns': columns, 'group_bys': group_bys, 'filters': filters, 'havings': havings, }
[ "def", "brew_query_parts", "(", "self", ")", ":", "columns", ",", "group_bys", ",", "filters", ",", "havings", "=", "[", "]", ",", "[", "]", ",", "set", "(", ")", ",", "set", "(", ")", "for", "ingredient", "in", "self", ".", "ingredients", "(", ")", ":", "if", "ingredient", ".", "query_columns", ":", "columns", ".", "extend", "(", "ingredient", ".", "query_columns", ")", "if", "ingredient", ".", "group_by", ":", "group_bys", ".", "extend", "(", "ingredient", ".", "group_by", ")", "if", "ingredient", ".", "filters", ":", "filters", ".", "update", "(", "ingredient", ".", "filters", ")", "if", "ingredient", ".", "havings", ":", "havings", ".", "update", "(", "ingredient", ".", "havings", ")", "return", "{", "'columns'", ":", "columns", ",", "'group_bys'", ":", "group_bys", ",", "'filters'", ":", "filters", ",", "'havings'", ":", "havings", ",", "}" ]
35.4
11.9
def load_installed_plugins(self): """ :rtype: list of Plugin """ result = [] plugin_dirs = [d for d in os.listdir(self.plugin_path) if os.path.isdir(os.path.join(self.plugin_path, d))] settings = constants.SETTINGS for d in plugin_dirs: if d == "__pycache__": continue try: class_module = self.load_plugin(d) plugin = class_module() plugin.plugin_path = os.path.join(self.plugin_path, plugin.name) plugin.load_description() plugin.enabled = settings.value(plugin.name, type=bool) if plugin.name in settings.allKeys() else False result.append(plugin) except ImportError as e: logger.warning("Could not load plugin {0} ({1})".format(d, e)) continue return result
[ "def", "load_installed_plugins", "(", "self", ")", ":", "result", "=", "[", "]", "plugin_dirs", "=", "[", "d", "for", "d", "in", "os", ".", "listdir", "(", "self", ".", "plugin_path", ")", "if", "os", ".", "path", ".", "isdir", "(", "os", ".", "path", ".", "join", "(", "self", ".", "plugin_path", ",", "d", ")", ")", "]", "settings", "=", "constants", ".", "SETTINGS", "for", "d", "in", "plugin_dirs", ":", "if", "d", "==", "\"__pycache__\"", ":", "continue", "try", ":", "class_module", "=", "self", ".", "load_plugin", "(", "d", ")", "plugin", "=", "class_module", "(", ")", "plugin", ".", "plugin_path", "=", "os", ".", "path", ".", "join", "(", "self", ".", "plugin_path", ",", "plugin", ".", "name", ")", "plugin", ".", "load_description", "(", ")", "plugin", ".", "enabled", "=", "settings", ".", "value", "(", "plugin", ".", "name", ",", "type", "=", "bool", ")", "if", "plugin", ".", "name", "in", "settings", ".", "allKeys", "(", ")", "else", "False", "result", ".", "append", "(", "plugin", ")", "except", "ImportError", "as", "e", ":", "logger", ".", "warning", "(", "\"Could not load plugin {0} ({1})\"", ".", "format", "(", "d", ",", "e", ")", ")", "continue", "return", "result" ]
41.428571
21.619048
def get_headline(self, name): """Get stored messages for a service. Args: name (string): The name of the service to get messages from. Returns: ServiceMessage: the headline or None if no headline has been set """ return self._loop.run_coroutine(self._client.get_headline(name))
[ "def", "get_headline", "(", "self", ",", "name", ")", ":", "return", "self", ".", "_loop", ".", "run_coroutine", "(", "self", ".", "_client", ".", "get_headline", "(", "name", ")", ")" ]
30.363636
25.636364
def get_min_row_num(mention): """Return the lowest row number that a Mention occupies. :param mention: The Mention to evaluate. If a candidate is given, default to its first Mention. :rtype: integer or None """ span = _to_span(mention) if span.sentence.is_tabular(): return span.sentence.cell.row_start else: return None
[ "def", "get_min_row_num", "(", "mention", ")", ":", "span", "=", "_to_span", "(", "mention", ")", "if", "span", ".", "sentence", ".", "is_tabular", "(", ")", ":", "return", "span", ".", "sentence", ".", "cell", ".", "row_start", "else", ":", "return", "None" ]
30.166667
15.416667
def list_workers(config, *, filter_by_queues=None): """ Return a list of all available workers. Args: config (Config): Reference to the configuration object from which the settings are retrieved. filter_by_queues (list): Restrict the returned workers to workers that listen to at least one of the queue names in this list. Returns: list: A list of WorkerStats objects. """ celery_app = create_app(config) worker_stats = celery_app.control.inspect().stats() queue_stats = celery_app.control.inspect().active_queues() if worker_stats is None: return [] workers = [] for name, w_stat in worker_stats.items(): queues = [QueueStats.from_celery(q_stat) for q_stat in queue_stats[name]] add_worker = filter_by_queues is None if not add_worker: for queue in queues: if queue.name in filter_by_queues: add_worker = True break if add_worker: workers.append(WorkerStats.from_celery(name, w_stat, queues)) return workers
[ "def", "list_workers", "(", "config", ",", "*", ",", "filter_by_queues", "=", "None", ")", ":", "celery_app", "=", "create_app", "(", "config", ")", "worker_stats", "=", "celery_app", ".", "control", ".", "inspect", "(", ")", ".", "stats", "(", ")", "queue_stats", "=", "celery_app", ".", "control", ".", "inspect", "(", ")", ".", "active_queues", "(", ")", "if", "worker_stats", "is", "None", ":", "return", "[", "]", "workers", "=", "[", "]", "for", "name", ",", "w_stat", "in", "worker_stats", ".", "items", "(", ")", ":", "queues", "=", "[", "QueueStats", ".", "from_celery", "(", "q_stat", ")", "for", "q_stat", "in", "queue_stats", "[", "name", "]", "]", "add_worker", "=", "filter_by_queues", "is", "None", "if", "not", "add_worker", ":", "for", "queue", "in", "queues", ":", "if", "queue", ".", "name", "in", "filter_by_queues", ":", "add_worker", "=", "True", "break", "if", "add_worker", ":", "workers", ".", "append", "(", "WorkerStats", ".", "from_celery", "(", "name", ",", "w_stat", ",", "queues", ")", ")", "return", "workers" ]
32.176471
21.647059
def downloaded(name, version=None, pkgs=None, fromrepo=None, ignore_epoch=None, **kwargs): ''' .. versionadded:: 2017.7.0 Ensure that the package is downloaded, and that it is the correct version (if specified). Currently supported for the following pkg providers: :mod:`yumpkg <salt.modules.yumpkg>` and :mod:`zypper <salt.modules.zypper>` :param str name: The name of the package to be downloaded. This parameter is ignored if either "pkgs" is used. Additionally, please note that this option can only be used to download packages from a software repository. :param str version: Download a specific version of a package. .. important:: As of version 2015.8.7, for distros which use yum/dnf, packages which have a version with a nonzero epoch (that is, versions which start with a number followed by a colon must have the epoch included when specifying the version number. For example: .. code-block:: yaml vim-enhanced: pkg.downloaded: - version: 2:7.4.160-1.el7 An **ignore_epoch** argument has been added to which causes the epoch to be disregarded when the state checks to see if the desired version was installed. You can install a specific version when using the ``pkgs`` argument by including the version after the package: .. code-block:: yaml common_packages: pkg.downloaded: - pkgs: - unzip - dos2unix - salt-minion: 2015.8.5-1.el6 :param bool resolve_capabilities: Turn on resolving capabilities. This allow one to name "provides" or alias names for packages. .. versionadded:: 2018.3.0 CLI Example: .. code-block:: yaml zsh: pkg.downloaded: - version: 5.0.5-4.63 - fromrepo: "myrepository" ''' ret = {'name': name, 'changes': {}, 'result': None, 'comment': ''} if 'pkg.list_downloaded' not in __salt__: ret['result'] = False ret['comment'] = 'The pkg.downloaded state is not available on ' \ 'this platform' return ret if not pkgs and isinstance(pkgs, list): ret['result'] = True ret['comment'] = 'No packages to download provided' return ret # If just a name (and optionally a version) is passed, just pack them into # the pkgs argument. if name and not pkgs: if version: pkgs = [{name: version}] version = None else: pkgs = [name] # It doesn't make sense here to received 'downloadonly' as kwargs # as we're explicitly passing 'downloadonly=True' to execution module. if 'downloadonly' in kwargs: del kwargs['downloadonly'] pkgs, _refresh = _resolve_capabilities(pkgs, **kwargs) # Only downloading not yet downloaded packages targets = _find_download_targets(name, version, pkgs, fromrepo=fromrepo, ignore_epoch=ignore_epoch, **kwargs) if isinstance(targets, dict) and 'result' in targets: return targets elif not isinstance(targets, dict): ret['result'] = False ret['comment'] = 'An error was encountered while checking targets: ' \ '{0}'.format(targets) return ret if __opts__['test']: summary = ', '.join(targets) ret['comment'] = 'The following packages would be ' \ 'downloaded: {0}'.format(summary) return ret try: pkg_ret = __salt__['pkg.install'](name=name, pkgs=pkgs, version=version, downloadonly=True, fromrepo=fromrepo, ignore_epoch=ignore_epoch, **kwargs) ret['result'] = True ret['changes'].update(pkg_ret) except CommandExecutionError as exc: ret = {'name': name, 'result': False} if exc.info: # Get information for state return from the exception. ret['changes'] = exc.info.get('changes', {}) ret['comment'] = exc.strerror_without_changes else: ret['changes'] = {} ret['comment'] = 'An error was encountered while downloading ' \ 'package(s): {0}'.format(exc) return ret new_pkgs = __salt__['pkg.list_downloaded'](**kwargs) ok, failed = _verify_install(targets, new_pkgs, ignore_epoch=ignore_epoch) if failed: summary = ', '.join([_get_desired_pkg(x, targets) for x in failed]) ret['result'] = False ret['comment'] = 'The following packages failed to ' \ 'download: {0}'.format(summary) if not ret['changes'] and not ret['comment']: ret['result'] = True ret['comment'] = 'Packages are already downloaded: ' \ '{0}'.format(', '.join(targets)) return ret
[ "def", "downloaded", "(", "name", ",", "version", "=", "None", ",", "pkgs", "=", "None", ",", "fromrepo", "=", "None", ",", "ignore_epoch", "=", "None", ",", "*", "*", "kwargs", ")", ":", "ret", "=", "{", "'name'", ":", "name", ",", "'changes'", ":", "{", "}", ",", "'result'", ":", "None", ",", "'comment'", ":", "''", "}", "if", "'pkg.list_downloaded'", "not", "in", "__salt__", ":", "ret", "[", "'result'", "]", "=", "False", "ret", "[", "'comment'", "]", "=", "'The pkg.downloaded state is not available on '", "'this platform'", "return", "ret", "if", "not", "pkgs", "and", "isinstance", "(", "pkgs", ",", "list", ")", ":", "ret", "[", "'result'", "]", "=", "True", "ret", "[", "'comment'", "]", "=", "'No packages to download provided'", "return", "ret", "# If just a name (and optionally a version) is passed, just pack them into", "# the pkgs argument.", "if", "name", "and", "not", "pkgs", ":", "if", "version", ":", "pkgs", "=", "[", "{", "name", ":", "version", "}", "]", "version", "=", "None", "else", ":", "pkgs", "=", "[", "name", "]", "# It doesn't make sense here to received 'downloadonly' as kwargs", "# as we're explicitly passing 'downloadonly=True' to execution module.", "if", "'downloadonly'", "in", "kwargs", ":", "del", "kwargs", "[", "'downloadonly'", "]", "pkgs", ",", "_refresh", "=", "_resolve_capabilities", "(", "pkgs", ",", "*", "*", "kwargs", ")", "# Only downloading not yet downloaded packages", "targets", "=", "_find_download_targets", "(", "name", ",", "version", ",", "pkgs", ",", "fromrepo", "=", "fromrepo", ",", "ignore_epoch", "=", "ignore_epoch", ",", "*", "*", "kwargs", ")", "if", "isinstance", "(", "targets", ",", "dict", ")", "and", "'result'", "in", "targets", ":", "return", "targets", "elif", "not", "isinstance", "(", "targets", ",", "dict", ")", ":", "ret", "[", "'result'", "]", "=", "False", "ret", "[", "'comment'", "]", "=", "'An error was encountered while checking targets: '", "'{0}'", ".", "format", "(", "targets", ")", "return", "ret", "if", "__opts__", "[", "'test'", "]", ":", "summary", "=", "', '", ".", "join", "(", "targets", ")", "ret", "[", "'comment'", "]", "=", "'The following packages would be '", "'downloaded: {0}'", ".", "format", "(", "summary", ")", "return", "ret", "try", ":", "pkg_ret", "=", "__salt__", "[", "'pkg.install'", "]", "(", "name", "=", "name", ",", "pkgs", "=", "pkgs", ",", "version", "=", "version", ",", "downloadonly", "=", "True", ",", "fromrepo", "=", "fromrepo", ",", "ignore_epoch", "=", "ignore_epoch", ",", "*", "*", "kwargs", ")", "ret", "[", "'result'", "]", "=", "True", "ret", "[", "'changes'", "]", ".", "update", "(", "pkg_ret", ")", "except", "CommandExecutionError", "as", "exc", ":", "ret", "=", "{", "'name'", ":", "name", ",", "'result'", ":", "False", "}", "if", "exc", ".", "info", ":", "# Get information for state return from the exception.", "ret", "[", "'changes'", "]", "=", "exc", ".", "info", ".", "get", "(", "'changes'", ",", "{", "}", ")", "ret", "[", "'comment'", "]", "=", "exc", ".", "strerror_without_changes", "else", ":", "ret", "[", "'changes'", "]", "=", "{", "}", "ret", "[", "'comment'", "]", "=", "'An error was encountered while downloading '", "'package(s): {0}'", ".", "format", "(", "exc", ")", "return", "ret", "new_pkgs", "=", "__salt__", "[", "'pkg.list_downloaded'", "]", "(", "*", "*", "kwargs", ")", "ok", ",", "failed", "=", "_verify_install", "(", "targets", ",", "new_pkgs", ",", "ignore_epoch", "=", "ignore_epoch", ")", "if", "failed", ":", "summary", "=", "', '", ".", "join", "(", "[", "_get_desired_pkg", "(", "x", ",", "targets", ")", "for", "x", "in", "failed", "]", ")", "ret", "[", "'result'", "]", "=", "False", "ret", "[", "'comment'", "]", "=", "'The following packages failed to '", "'download: {0}'", ".", "format", "(", "summary", ")", "if", "not", "ret", "[", "'changes'", "]", "and", "not", "ret", "[", "'comment'", "]", ":", "ret", "[", "'result'", "]", "=", "True", "ret", "[", "'comment'", "]", "=", "'Packages are already downloaded: '", "'{0}'", ".", "format", "(", "', '", ".", "join", "(", "targets", ")", ")", "return", "ret" ]
34.897436
21.641026
def interpolate_nearest(self, xi, yi, zdata): """ Nearest-neighbour interpolation. Calls nearnd to find the index of the closest neighbours to xi,yi Parameters ---------- xi : float / array of floats, shape (l,) x coordinates on the Cartesian plane yi : float / array of floats, shape (l,) y coordinates on the Cartesian plane Returns ------- zi : float / array of floats, shape (l,) nearest-neighbour interpolated value(s) of (xi,yi) """ if zdata.size != self.npoints: raise ValueError('zdata should be same size as mesh') zdata = self._shuffle_field(zdata) ist = np.ones_like(xi, dtype=np.int32) ist, dist = _tripack.nearnds(xi, yi, ist, self._x, self._y, self.lst, self.lptr, self.lend) return zdata[ist - 1]
[ "def", "interpolate_nearest", "(", "self", ",", "xi", ",", "yi", ",", "zdata", ")", ":", "if", "zdata", ".", "size", "!=", "self", ".", "npoints", ":", "raise", "ValueError", "(", "'zdata should be same size as mesh'", ")", "zdata", "=", "self", ".", "_shuffle_field", "(", "zdata", ")", "ist", "=", "np", ".", "ones_like", "(", "xi", ",", "dtype", "=", "np", ".", "int32", ")", "ist", ",", "dist", "=", "_tripack", ".", "nearnds", "(", "xi", ",", "yi", ",", "ist", ",", "self", ".", "_x", ",", "self", ".", "_y", ",", "self", ".", "lst", ",", "self", ".", "lptr", ",", "self", ".", "lend", ")", "return", "zdata", "[", "ist", "-", "1", "]" ]
34.846154
17.615385
def compute_stop_stats( feed: "Feed", dates: List[str], stop_ids: Optional[List[str]] = None, headway_start_time: str = "07:00:00", headway_end_time: str = "19:00:00", *, split_directions: bool = False, ) -> DataFrame: """ Compute stats for all stops for the given dates. Optionally, restrict to the stop IDs given. Parameters ---------- feed : Feed dates : string or list A YYYYMMDD date string or list thereof indicating the date(s) for which to compute stats stop_ids : list Optional list of stop IDs to restrict stats to headway_start_time : string HH:MM:SS time string indicating the start time for computing headway stats headway_end_time : string HH:MM:SS time string indicating the end time for computing headway stats split_directions : boolean If ``True``, then separate the stop stats by direction (0 or 1) of the trips visiting the stops; otherwise aggregate trips visiting from both directions Returns ------- DataFrame Columns are - ``'date'`` - ``'stop_id'`` - ``'direction_id'``: present if and only if ``split_directions`` - ``'num_routes'``: number of routes visiting the stop (in the given direction) on the date - ``'num_trips'``: number of trips visiting stop (in the givin direction) on the date - ``'max_headway'``: maximum of the durations (in minutes) between trip departures at the stop between ``headway_start_time`` and ``headway_end_time`` on the date - ``'min_headway'``: minimum of the durations (in minutes) mentioned above - ``'mean_headway'``: mean of the durations (in minutes) mentioned above - ``'start_time'``: earliest departure time of a trip from this stop on the date - ``'end_time'``: latest departure time of a trip from this stop on the date Dates with no trip activity will have null stats. Exclude dates that lie outside of the Feed's date range. If all the dates given lie outside of the Feed's date range, then return an empty DataFrame. Notes ----- - Assume the following feed attributes are not ``None``: * ``feed.stop_times`` * Those used in :func:`.trips.get_trips` - Raise a ValueError if ``split_directions`` and no non-NaN direction ID values present """ dates = feed.restrict_dates(dates) if not dates: return pd.DataFrame() # Restrict stop times to stop IDs if specified if stop_ids is not None: stop_times_subset = feed.stop_times.loc[ lambda x: x["stop_id"].isin(stop_ids) ].copy() else: stop_times_subset = feed.stop_times.copy() activity = feed.compute_trip_activity(dates) # Collect stats for each date, memoizing stats by trip ID sequence # to avoid unnecessary recomputations. # Store in dictionary of the form # trip ID sequence -> # [stats DataFarme, date list that stats apply] stats_and_dates_by_ids = {} cols = [ "stop_id", "num_routes", "num_trips", "max_headway", "min_headway", "mean_headway", "start_time", "end_time", ] if split_directions: cols.append("direction_id") null_stats = pd.DataFrame({c: np.nan for c in cols}, index=[0]) for date in dates: ids = tuple(activity.loc[activity[date] > 0, "trip_id"]) if ids in stats_and_dates_by_ids: # Append date to date list stats_and_dates_by_ids[ids][1].append(date) elif not ids: # Null stats stats_and_dates_by_ids[ids] = [null_stats, [date]] else: # Compute stats t = feed.trips trips = t[t["trip_id"].isin(ids)].copy() stats = compute_stop_stats_base( stop_times_subset, trips, split_directions=split_directions, headway_start_time=headway_start_time, headway_end_time=headway_end_time, ) # Remember stats stats_and_dates_by_ids[ids] = [stats, [date]] # Assemble stats into DataFrame frames = [] for stats, dates_ in stats_and_dates_by_ids.values(): for date in dates_: f = stats.copy() f["date"] = date frames.append(f) f = ( pd.concat(frames) .sort_values(["date", "stop_id"]) .reset_index(drop=True) ) return f
[ "def", "compute_stop_stats", "(", "feed", ":", "\"Feed\"", ",", "dates", ":", "List", "[", "str", "]", ",", "stop_ids", ":", "Optional", "[", "List", "[", "str", "]", "]", "=", "None", ",", "headway_start_time", ":", "str", "=", "\"07:00:00\"", ",", "headway_end_time", ":", "str", "=", "\"19:00:00\"", ",", "*", ",", "split_directions", ":", "bool", "=", "False", ",", ")", "->", "DataFrame", ":", "dates", "=", "feed", ".", "restrict_dates", "(", "dates", ")", "if", "not", "dates", ":", "return", "pd", ".", "DataFrame", "(", ")", "# Restrict stop times to stop IDs if specified", "if", "stop_ids", "is", "not", "None", ":", "stop_times_subset", "=", "feed", ".", "stop_times", ".", "loc", "[", "lambda", "x", ":", "x", "[", "\"stop_id\"", "]", ".", "isin", "(", "stop_ids", ")", "]", ".", "copy", "(", ")", "else", ":", "stop_times_subset", "=", "feed", ".", "stop_times", ".", "copy", "(", ")", "activity", "=", "feed", ".", "compute_trip_activity", "(", "dates", ")", "# Collect stats for each date, memoizing stats by trip ID sequence", "# to avoid unnecessary recomputations.", "# Store in dictionary of the form", "# trip ID sequence ->", "# [stats DataFarme, date list that stats apply]", "stats_and_dates_by_ids", "=", "{", "}", "cols", "=", "[", "\"stop_id\"", ",", "\"num_routes\"", ",", "\"num_trips\"", ",", "\"max_headway\"", ",", "\"min_headway\"", ",", "\"mean_headway\"", ",", "\"start_time\"", ",", "\"end_time\"", ",", "]", "if", "split_directions", ":", "cols", ".", "append", "(", "\"direction_id\"", ")", "null_stats", "=", "pd", ".", "DataFrame", "(", "{", "c", ":", "np", ".", "nan", "for", "c", "in", "cols", "}", ",", "index", "=", "[", "0", "]", ")", "for", "date", "in", "dates", ":", "ids", "=", "tuple", "(", "activity", ".", "loc", "[", "activity", "[", "date", "]", ">", "0", ",", "\"trip_id\"", "]", ")", "if", "ids", "in", "stats_and_dates_by_ids", ":", "# Append date to date list", "stats_and_dates_by_ids", "[", "ids", "]", "[", "1", "]", ".", "append", "(", "date", ")", "elif", "not", "ids", ":", "# Null stats", "stats_and_dates_by_ids", "[", "ids", "]", "=", "[", "null_stats", ",", "[", "date", "]", "]", "else", ":", "# Compute stats", "t", "=", "feed", ".", "trips", "trips", "=", "t", "[", "t", "[", "\"trip_id\"", "]", ".", "isin", "(", "ids", ")", "]", ".", "copy", "(", ")", "stats", "=", "compute_stop_stats_base", "(", "stop_times_subset", ",", "trips", ",", "split_directions", "=", "split_directions", ",", "headway_start_time", "=", "headway_start_time", ",", "headway_end_time", "=", "headway_end_time", ",", ")", "# Remember stats", "stats_and_dates_by_ids", "[", "ids", "]", "=", "[", "stats", ",", "[", "date", "]", "]", "# Assemble stats into DataFrame", "frames", "=", "[", "]", "for", "stats", ",", "dates_", "in", "stats_and_dates_by_ids", ".", "values", "(", ")", ":", "for", "date", "in", "dates_", ":", "f", "=", "stats", ".", "copy", "(", ")", "f", "[", "\"date\"", "]", "=", "date", "frames", ".", "append", "(", "f", ")", "f", "=", "(", "pd", ".", "concat", "(", "frames", ")", ".", "sort_values", "(", "[", "\"date\"", ",", "\"stop_id\"", "]", ")", ".", "reset_index", "(", "drop", "=", "True", ")", ")", "return", "f" ]
32.414286
19.242857
def _set_user(self, v, load=False): """ Setter method for user, mapped from YANG variable /snmp_server/user (list) If this variable is read-only (config: false) in the source YANG file, then _set_user is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_user() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=YANGListType("username",user.user, yang_name="user", rest_name="user", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='username', extensions={u'tailf-common': {u'info': u'Holds username, groupname (admin | user), auth\nand priv attributes associated with SNMP username', u'cli-suppress-mode': None, u'sort-priority': u'23', u'cli-suppress-show-match': None, u'cli-suppress-list-no': None, u'callpoint': u'snmpuser', u'cli-compact-syntax': None, u'cli-suppress-key-abbreviation': None, u'cli-full-no': None}}), is_container='list', yang_name="user", rest_name="user", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Holds username, groupname (admin | user), auth\nand priv attributes associated with SNMP username', u'cli-suppress-mode': None, u'sort-priority': u'23', u'cli-suppress-show-match': None, u'cli-suppress-list-no': None, u'callpoint': u'snmpuser', u'cli-compact-syntax': None, u'cli-suppress-key-abbreviation': None, u'cli-full-no': None}}, namespace='urn:brocade.com:mgmt:brocade-snmp', defining_module='brocade-snmp', yang_type='list', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """user must be of a type compatible with list""", 'defined-type': "list", 'generated-type': """YANGDynClass(base=YANGListType("username",user.user, yang_name="user", rest_name="user", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='username', extensions={u'tailf-common': {u'info': u'Holds username, groupname (admin | user), auth\nand priv attributes associated with SNMP username', u'cli-suppress-mode': None, u'sort-priority': u'23', u'cli-suppress-show-match': None, u'cli-suppress-list-no': None, u'callpoint': u'snmpuser', u'cli-compact-syntax': None, u'cli-suppress-key-abbreviation': None, u'cli-full-no': None}}), is_container='list', yang_name="user", rest_name="user", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Holds username, groupname (admin | user), auth\nand priv attributes associated with SNMP username', u'cli-suppress-mode': None, u'sort-priority': u'23', u'cli-suppress-show-match': None, u'cli-suppress-list-no': None, u'callpoint': u'snmpuser', u'cli-compact-syntax': None, u'cli-suppress-key-abbreviation': None, u'cli-full-no': None}}, namespace='urn:brocade.com:mgmt:brocade-snmp', defining_module='brocade-snmp', yang_type='list', is_config=True)""", }) self.__user = t if hasattr(self, '_set'): self._set()
[ "def", "_set_user", "(", "self", ",", "v", ",", "load", "=", "False", ")", ":", "if", "hasattr", "(", "v", ",", "\"_utype\"", ")", ":", "v", "=", "v", ".", "_utype", "(", "v", ")", "try", ":", "t", "=", "YANGDynClass", "(", "v", ",", "base", "=", "YANGListType", "(", "\"username\"", ",", "user", ".", "user", ",", "yang_name", "=", "\"user\"", ",", "rest_name", "=", "\"user\"", ",", "parent", "=", "self", ",", "is_container", "=", "'list'", ",", "user_ordered", "=", "False", ",", "path_helper", "=", "self", ".", "_path_helper", ",", "yang_keys", "=", "'username'", ",", "extensions", "=", "{", "u'tailf-common'", ":", "{", "u'info'", ":", "u'Holds username, groupname (admin | user), auth\\nand priv attributes associated with SNMP username'", ",", "u'cli-suppress-mode'", ":", "None", ",", "u'sort-priority'", ":", "u'23'", ",", "u'cli-suppress-show-match'", ":", "None", ",", "u'cli-suppress-list-no'", ":", "None", ",", "u'callpoint'", ":", "u'snmpuser'", ",", "u'cli-compact-syntax'", ":", "None", ",", "u'cli-suppress-key-abbreviation'", ":", "None", ",", "u'cli-full-no'", ":", "None", "}", "}", ")", ",", "is_container", "=", "'list'", ",", "yang_name", "=", "\"user\"", ",", "rest_name", "=", "\"user\"", ",", "parent", "=", "self", ",", "path_helper", "=", "self", ".", "_path_helper", ",", "extmethods", "=", "self", ".", "_extmethods", ",", "register_paths", "=", "True", ",", "extensions", "=", "{", "u'tailf-common'", ":", "{", "u'info'", ":", "u'Holds username, groupname (admin | user), auth\\nand priv attributes associated with SNMP username'", ",", "u'cli-suppress-mode'", ":", "None", ",", "u'sort-priority'", ":", "u'23'", ",", "u'cli-suppress-show-match'", ":", "None", ",", "u'cli-suppress-list-no'", ":", "None", ",", "u'callpoint'", ":", "u'snmpuser'", ",", "u'cli-compact-syntax'", ":", "None", ",", "u'cli-suppress-key-abbreviation'", ":", "None", ",", "u'cli-full-no'", ":", "None", "}", "}", ",", "namespace", "=", "'urn:brocade.com:mgmt:brocade-snmp'", ",", "defining_module", "=", "'brocade-snmp'", ",", "yang_type", "=", "'list'", ",", "is_config", "=", "True", ")", "except", "(", "TypeError", ",", "ValueError", ")", ":", "raise", "ValueError", "(", "{", "'error-string'", ":", "\"\"\"user must be of a type compatible with list\"\"\"", ",", "'defined-type'", ":", "\"list\"", ",", "'generated-type'", ":", "\"\"\"YANGDynClass(base=YANGListType(\"username\",user.user, yang_name=\"user\", rest_name=\"user\", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='username', extensions={u'tailf-common': {u'info': u'Holds username, groupname (admin | user), auth\\nand priv attributes associated with SNMP username', u'cli-suppress-mode': None, u'sort-priority': u'23', u'cli-suppress-show-match': None, u'cli-suppress-list-no': None, u'callpoint': u'snmpuser', u'cli-compact-syntax': None, u'cli-suppress-key-abbreviation': None, u'cli-full-no': None}}), is_container='list', yang_name=\"user\", rest_name=\"user\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Holds username, groupname (admin | user), auth\\nand priv attributes associated with SNMP username', u'cli-suppress-mode': None, u'sort-priority': u'23', u'cli-suppress-show-match': None, u'cli-suppress-list-no': None, u'callpoint': u'snmpuser', u'cli-compact-syntax': None, u'cli-suppress-key-abbreviation': None, u'cli-full-no': None}}, namespace='urn:brocade.com:mgmt:brocade-snmp', defining_module='brocade-snmp', yang_type='list', is_config=True)\"\"\"", ",", "}", ")", "self", ".", "__user", "=", "t", "if", "hasattr", "(", "self", ",", "'_set'", ")", ":", "self", ".", "_set", "(", ")" ]
142.772727
69.090909
def evaluate(self, m): """Search for comments.""" g = m.groupdict() if g["strings"]: self.line_num += g['strings'].count('\n') elif g["code"]: self.line_num += g["code"].count('\n') else: if g['block']: self.evaluate_block(g) elif self.stylesheets != CSS: if g['start'] is None: self.evaluate_inline_tail(g) else: self.evaluate_inline(g) self.line_num += g['comments'].count('\n')
[ "def", "evaluate", "(", "self", ",", "m", ")", ":", "g", "=", "m", ".", "groupdict", "(", ")", "if", "g", "[", "\"strings\"", "]", ":", "self", ".", "line_num", "+=", "g", "[", "'strings'", "]", ".", "count", "(", "'\\n'", ")", "elif", "g", "[", "\"code\"", "]", ":", "self", ".", "line_num", "+=", "g", "[", "\"code\"", "]", ".", "count", "(", "'\\n'", ")", "else", ":", "if", "g", "[", "'block'", "]", ":", "self", ".", "evaluate_block", "(", "g", ")", "elif", "self", ".", "stylesheets", "!=", "CSS", ":", "if", "g", "[", "'start'", "]", "is", "None", ":", "self", ".", "evaluate_inline_tail", "(", "g", ")", "else", ":", "self", ".", "evaluate_inline", "(", "g", ")", "self", ".", "line_num", "+=", "g", "[", "'comments'", "]", ".", "count", "(", "'\\n'", ")" ]
32.529412
12.882353
def _sample_template(sample, out_dir): """R code to get QC for one sample""" bam_fn = dd.get_work_bam(sample) genome = dd.get_genome_build(sample) if genome in supported: peaks = sample.get("peaks_files", []).get("main") if peaks: r_code = ("library(ChIPQC);\n" "sample = ChIPQCsample(\"{bam_fn}\"," "\"{peaks}\", " "annotation = \"{genome}\"," ");\n" "ChIPQCreport(sample);\n") r_code_fn = os.path.join(out_dir, "chipqc.r") with open(r_code_fn, 'w') as inh: inh.write(r_code.format(**locals())) return r_code_fn
[ "def", "_sample_template", "(", "sample", ",", "out_dir", ")", ":", "bam_fn", "=", "dd", ".", "get_work_bam", "(", "sample", ")", "genome", "=", "dd", ".", "get_genome_build", "(", "sample", ")", "if", "genome", "in", "supported", ":", "peaks", "=", "sample", ".", "get", "(", "\"peaks_files\"", ",", "[", "]", ")", ".", "get", "(", "\"main\"", ")", "if", "peaks", ":", "r_code", "=", "(", "\"library(ChIPQC);\\n\"", "\"sample = ChIPQCsample(\\\"{bam_fn}\\\",\"", "\"\\\"{peaks}\\\", \"", "\"annotation = \\\"{genome}\\\",\"", "\");\\n\"", "\"ChIPQCreport(sample);\\n\"", ")", "r_code_fn", "=", "os", ".", "path", ".", "join", "(", "out_dir", ",", "\"chipqc.r\"", ")", "with", "open", "(", "r_code_fn", ",", "'w'", ")", "as", "inh", ":", "inh", ".", "write", "(", "r_code", ".", "format", "(", "*", "*", "locals", "(", ")", ")", ")", "return", "r_code_fn" ]
41.294118
9.352941
def check_precondition(self, key, value): ''' Override to check for timeout ''' timeout = float(value) curr_time = self.get_current_time() if curr_time > timeout: return True return False
[ "def", "check_precondition", "(", "self", ",", "key", ",", "value", ")", ":", "timeout", "=", "float", "(", "value", ")", "curr_time", "=", "self", ".", "get_current_time", "(", ")", "if", "curr_time", ">", "timeout", ":", "return", "True", "return", "False" ]
27.444444
13.444444
def cublasDestroy(handle): """ Release CUBLAS resources. Releases hardware resources used by CUBLAS. Parameters ---------- handle : void_p CUBLAS context. """ status = _libcublas.cublasDestroy_v2(ctypes.c_void_p(handle)) cublasCheckStatus(status)
[ "def", "cublasDestroy", "(", "handle", ")", ":", "status", "=", "_libcublas", ".", "cublasDestroy_v2", "(", "ctypes", ".", "c_void_p", "(", "handle", ")", ")", "cublasCheckStatus", "(", "status", ")" ]
19.2
20.666667
def GetCustomJsonFieldMapping(message_type, python_name=None, json_name=None): """Return the appropriate remapping for the given field, or None.""" return _FetchRemapping(message_type, 'field', python_name=python_name, json_name=json_name, mappings=_JSON_FIELD_MAPPINGS)
[ "def", "GetCustomJsonFieldMapping", "(", "message_type", ",", "python_name", "=", "None", ",", "json_name", "=", "None", ")", ":", "return", "_FetchRemapping", "(", "message_type", ",", "'field'", ",", "python_name", "=", "python_name", ",", "json_name", "=", "json_name", ",", "mappings", "=", "_JSON_FIELD_MAPPINGS", ")" ]
65.6
19.2
def _check_equals_spacing(self, tokens, i): """Check the spacing of a single equals sign.""" if self._has_valid_type_annotation(tokens, i): self._check_space(tokens, i, (_MUST, _MUST)) elif self._inside_brackets("(") or self._inside_brackets("lambda"): self._check_space(tokens, i, (_MUST_NOT, _MUST_NOT)) else: self._check_space(tokens, i, (_MUST, _MUST))
[ "def", "_check_equals_spacing", "(", "self", ",", "tokens", ",", "i", ")", ":", "if", "self", ".", "_has_valid_type_annotation", "(", "tokens", ",", "i", ")", ":", "self", ".", "_check_space", "(", "tokens", ",", "i", ",", "(", "_MUST", ",", "_MUST", ")", ")", "elif", "self", ".", "_inside_brackets", "(", "\"(\"", ")", "or", "self", ".", "_inside_brackets", "(", "\"lambda\"", ")", ":", "self", ".", "_check_space", "(", "tokens", ",", "i", ",", "(", "_MUST_NOT", ",", "_MUST_NOT", ")", ")", "else", ":", "self", ".", "_check_space", "(", "tokens", ",", "i", ",", "(", "_MUST", ",", "_MUST", ")", ")" ]
52.125
16.875
def is_notifying(cls, user_or_email, instance): """Check if the watch created by notify exists.""" return super(InstanceEvent, cls).is_notifying(user_or_email, object_id=instance.pk)
[ "def", "is_notifying", "(", "cls", ",", "user_or_email", ",", "instance", ")", ":", "return", "super", "(", "InstanceEvent", ",", "cls", ")", ".", "is_notifying", "(", "user_or_email", ",", "object_id", "=", "instance", ".", "pk", ")" ]
62.25
17.75
def search(self, tid, seller_nick): '''taobao.logistics.trace.search 物流流转信息查询 用户根据淘宝交易号查询物流流转信息,如2010-8-10 15:23:00到达杭州集散地''' request = TOPRequest('taobao.logistics.address.add') request['tid'] = tid request['seller_nick'] = seller_nick self.create(self.execute(request)) return self
[ "def", "search", "(", "self", ",", "tid", ",", "seller_nick", ")", ":", "request", "=", "TOPRequest", "(", "'taobao.logistics.address.add'", ")", "request", "[", "'tid'", "]", "=", "tid", "request", "[", "'seller_nick'", "]", "=", "seller_nick", "self", ".", "create", "(", "self", ".", "execute", "(", "request", ")", ")", "return", "self" ]
37.777778
13.333333
def create_figures(n, *fig_args, **fig_kwargs): '''Create multiple figures. Args and Kwargs are passed to `matplotlib.figure.Figure`. This routine is provided in order to avoid usage of pyplot which is stateful and not thread safe. As drawing routines in tf-matplotlib are called from py-funcs in their respective thread, avoid usage of pyplot where possible. ''' return [create_figure(*fig_args, **fig_kwargs) for _ in range(n)]
[ "def", "create_figures", "(", "n", ",", "*", "fig_args", ",", "*", "*", "fig_kwargs", ")", ":", "return", "[", "create_figure", "(", "*", "fig_args", ",", "*", "*", "fig_kwargs", ")", "for", "_", "in", "range", "(", "n", ")", "]" ]
41.545455
25
def load_data_file_to_net(self, filename): ''' Load Clustergrammer's dat format (saved as JSON). ''' inst_dat = self.load_json_to_dict(filename) load_data.load_data_to_net(self, inst_dat)
[ "def", "load_data_file_to_net", "(", "self", ",", "filename", ")", ":", "inst_dat", "=", "self", ".", "load_json_to_dict", "(", "filename", ")", "load_data", ".", "load_data_to_net", "(", "self", ",", "inst_dat", ")" ]
33.666667
15.666667
def all_blocks(self): status = OrderedDict.fromkeys(parameters.BLOCKS.keys()) status['13AE'] = ['discovery complete', '50', '24.05'] status['13AO'] = ['discovery complete', '36', '24.40'] status['13BL'] = ['discovery complete', '79', '24.48'] status['14BH'] = ['discovery running', '-', '-'] status['15AP'] = ['discovery running', '-', '-'] status['15AM'] = ['discovery running', '-', '-'] '''Overview tal table is expecting: ID observations processing status discoveries m_r 40% ''' bks = [] for block in status.iterkeys(): bk = [block, self.num_block_images(block)] # if set in the .fromkeys(), doesn't give a unique list if status[block] is not None: bk = bk + status[block] else: bk = bk + ['awaiting triplets', '-', '-'] bks.append(bk) retval = {'blocks': bks, 'status': status} return retval
[ "def", "all_blocks", "(", "self", ")", ":", "status", "=", "OrderedDict", ".", "fromkeys", "(", "parameters", ".", "BLOCKS", ".", "keys", "(", ")", ")", "status", "[", "'13AE'", "]", "=", "[", "'discovery complete'", ",", "'50'", ",", "'24.05'", "]", "status", "[", "'13AO'", "]", "=", "[", "'discovery complete'", ",", "'36'", ",", "'24.40'", "]", "status", "[", "'13BL'", "]", "=", "[", "'discovery complete'", ",", "'79'", ",", "'24.48'", "]", "status", "[", "'14BH'", "]", "=", "[", "'discovery running'", ",", "'-'", ",", "'-'", "]", "status", "[", "'15AP'", "]", "=", "[", "'discovery running'", ",", "'-'", ",", "'-'", "]", "status", "[", "'15AM'", "]", "=", "[", "'discovery running'", ",", "'-'", ",", "'-'", "]", "bks", "=", "[", "]", "for", "block", "in", "status", ".", "iterkeys", "(", ")", ":", "bk", "=", "[", "block", ",", "self", ".", "num_block_images", "(", "block", ")", "]", "# if set in the .fromkeys(), doesn't give a unique list", "if", "status", "[", "block", "]", "is", "not", "None", ":", "bk", "=", "bk", "+", "status", "[", "block", "]", "else", ":", "bk", "=", "bk", "+", "[", "'awaiting triplets'", ",", "'-'", ",", "'-'", "]", "bks", ".", "append", "(", "bk", ")", "retval", "=", "{", "'blocks'", ":", "bks", ",", "'status'", ":", "status", "}", "return", "retval" ]
39.08
22.28
def _can_be_double(x): """ Return if the array can be safely converted to double. That happens when the dtype is a float with the same size of a double or narrower, or when is an integer that can be safely converted to double (if the roundtrip conversion works). """ return ((np.issubdtype(x.dtype, np.floating) and x.dtype.itemsize <= np.dtype(float).itemsize) or (np.issubdtype(x.dtype, np.signedinteger) and np.can_cast(x, float)))
[ "def", "_can_be_double", "(", "x", ")", ":", "return", "(", "(", "np", ".", "issubdtype", "(", "x", ".", "dtype", ",", "np", ".", "floating", ")", "and", "x", ".", "dtype", ".", "itemsize", "<=", "np", ".", "dtype", "(", "float", ")", ".", "itemsize", ")", "or", "(", "np", ".", "issubdtype", "(", "x", ".", "dtype", ",", "np", ".", "signedinteger", ")", "and", "np", ".", "can_cast", "(", "x", ",", "float", ")", ")", ")" ]
37.538462
18.461538
def create(cls, name, servers=None, time_range='yesterday', all_logs=False, filter_for_delete=None, comment=None, **kwargs): """ Create a new delete log task. Provide True to all_logs to delete all log types. Otherwise provide kwargs to specify each log by type of interest. :param str name: name for this task :param servers: servers to back up. Servers must be instances of management servers or log servers. If no value is provided, all servers are backed up. :type servers: list(ManagementServer or LogServer) :param str time_range: specify a time range for the deletion. Valid options are 'yesterday', 'last_full_week_sun_sat', 'last_full_week_mon_sun', 'last_full_month' (default 'yesterday') :param FilterExpression filter_for_delete: optional filter for deleting. (default: FilterExpression('Match All') :param bool all_logs: if True, all log types will be deleted. If this is True, kwargs are ignored (default: False) :param kwargs: see :func:`~log_target_types` for keyword arguments and default values. :raises ElementNotFound: specified servers were not found :raises CreateElementFailed: failure to create the task :return: the task :rtype: DeleteLogTask """ if not servers: servers = [svr.href for svr in ManagementServer.objects.all()] servers.extend([svr.href for svr in LogServer.objects.all()]) else: servers = [svr.href for svr in servers] filter_for_delete = filter_for_delete.href if filter_for_delete else \ FilterExpression('Match All').href json = { 'name': name, 'resources': servers, 'time_limit_type': time_range, 'start_time': 0, 'end_time': 0, 'file_format': 'unknown', 'filter_for_delete': filter_for_delete, 'comment': comment} json.update(**log_target_types(all_logs, **kwargs)) return ElementCreator(cls, json)
[ "def", "create", "(", "cls", ",", "name", ",", "servers", "=", "None", ",", "time_range", "=", "'yesterday'", ",", "all_logs", "=", "False", ",", "filter_for_delete", "=", "None", ",", "comment", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "not", "servers", ":", "servers", "=", "[", "svr", ".", "href", "for", "svr", "in", "ManagementServer", ".", "objects", ".", "all", "(", ")", "]", "servers", ".", "extend", "(", "[", "svr", ".", "href", "for", "svr", "in", "LogServer", ".", "objects", ".", "all", "(", ")", "]", ")", "else", ":", "servers", "=", "[", "svr", ".", "href", "for", "svr", "in", "servers", "]", "filter_for_delete", "=", "filter_for_delete", ".", "href", "if", "filter_for_delete", "else", "FilterExpression", "(", "'Match All'", ")", ".", "href", "json", "=", "{", "'name'", ":", "name", ",", "'resources'", ":", "servers", ",", "'time_limit_type'", ":", "time_range", ",", "'start_time'", ":", "0", ",", "'end_time'", ":", "0", ",", "'file_format'", ":", "'unknown'", ",", "'filter_for_delete'", ":", "filter_for_delete", ",", "'comment'", ":", "comment", "}", "json", ".", "update", "(", "*", "*", "log_target_types", "(", "all_logs", ",", "*", "*", "kwargs", ")", ")", "return", "ElementCreator", "(", "cls", ",", "json", ")" ]
45.041667
20.125
def camel_to_underscore(string): """Convert camelcase to lowercase and underscore. Recipe from http://stackoverflow.com/a/1176023 Args: string (str): The string to convert. Returns: str: The converted string. """ string = FIRST_CAP_RE.sub(r'\1_\2', string) return ALL_CAP_RE.sub(r'\1_\2', string).lower()
[ "def", "camel_to_underscore", "(", "string", ")", ":", "string", "=", "FIRST_CAP_RE", ".", "sub", "(", "r'\\1_\\2'", ",", "string", ")", "return", "ALL_CAP_RE", ".", "sub", "(", "r'\\1_\\2'", ",", "string", ")", ".", "lower", "(", ")" ]
26.076923
17.307692
def lookup_character_keycode(self, character): """ Looks up the keysym for the character then returns the keycode mapping for that keysym. """ keysym = self.string_to_keysym.get(character, 0) if keysym == 0: keysym = self.string_to_keysym.get(KEYSYMS[character], 0) return self.display.keysym_to_keycode(keysym)
[ "def", "lookup_character_keycode", "(", "self", ",", "character", ")", ":", "keysym", "=", "self", ".", "string_to_keysym", ".", "get", "(", "character", ",", "0", ")", "if", "keysym", "==", "0", ":", "keysym", "=", "self", ".", "string_to_keysym", ".", "get", "(", "KEYSYMS", "[", "character", "]", ",", "0", ")", "return", "self", ".", "display", ".", "keysym_to_keycode", "(", "keysym", ")" ]
41.222222
15
def run_preassembly(stmts_in, **kwargs): """Run preassembly on a list of statements. Parameters ---------- stmts_in : list[indra.statements.Statement] A list of statements to preassemble. return_toplevel : Optional[bool] If True, only the top-level statements are returned. If False, all statements are returned irrespective of level of specificity. Default: True poolsize : Optional[int] The number of worker processes to use to parallelize the comparisons performed by the function. If None (default), no parallelization is performed. NOTE: Parallelization is only available on Python 3.4 and above. size_cutoff : Optional[int] Groups with size_cutoff or more statements are sent to worker processes, while smaller groups are compared in the parent process. Default value is 100. Not relevant when parallelization is not used. belief_scorer : Optional[indra.belief.BeliefScorer] Instance of BeliefScorer class to use in calculating Statement probabilities. If None is provided (default), then the default scorer is used. hierarchies : Optional[dict] Dict of hierarchy managers to use for preassembly flatten_evidence : Optional[bool] If True, evidences are collected and flattened via supports/supported_by links. Default: False flatten_evidence_collect_from : Optional[str] String indicating whether to collect and flatten evidence from the `supports` attribute of each statement or the `supported_by` attribute. If not set, defaults to 'supported_by'. Only relevant when flatten_evidence is True. save : Optional[str] The name of a pickle file to save the results (stmts_out) into. save_unique : Optional[str] The name of a pickle file to save the unique statements into. Returns ------- stmts_out : list[indra.statements.Statement] A list of preassembled top-level statements. """ dump_pkl_unique = kwargs.get('save_unique') belief_scorer = kwargs.get('belief_scorer') use_hierarchies = kwargs['hierarchies'] if 'hierarchies' in kwargs else \ hierarchies be = BeliefEngine(scorer=belief_scorer) pa = Preassembler(hierarchies, stmts_in) run_preassembly_duplicate(pa, be, save=dump_pkl_unique) dump_pkl = kwargs.get('save') return_toplevel = kwargs.get('return_toplevel', True) poolsize = kwargs.get('poolsize', None) size_cutoff = kwargs.get('size_cutoff', 100) options = {'save': dump_pkl, 'return_toplevel': return_toplevel, 'poolsize': poolsize, 'size_cutoff': size_cutoff, 'flatten_evidence': kwargs.get('flatten_evidence', False), 'flatten_evidence_collect_from': kwargs.get('flatten_evidence_collect_from', 'supported_by') } stmts_out = run_preassembly_related(pa, be, **options) return stmts_out
[ "def", "run_preassembly", "(", "stmts_in", ",", "*", "*", "kwargs", ")", ":", "dump_pkl_unique", "=", "kwargs", ".", "get", "(", "'save_unique'", ")", "belief_scorer", "=", "kwargs", ".", "get", "(", "'belief_scorer'", ")", "use_hierarchies", "=", "kwargs", "[", "'hierarchies'", "]", "if", "'hierarchies'", "in", "kwargs", "else", "hierarchies", "be", "=", "BeliefEngine", "(", "scorer", "=", "belief_scorer", ")", "pa", "=", "Preassembler", "(", "hierarchies", ",", "stmts_in", ")", "run_preassembly_duplicate", "(", "pa", ",", "be", ",", "save", "=", "dump_pkl_unique", ")", "dump_pkl", "=", "kwargs", ".", "get", "(", "'save'", ")", "return_toplevel", "=", "kwargs", ".", "get", "(", "'return_toplevel'", ",", "True", ")", "poolsize", "=", "kwargs", ".", "get", "(", "'poolsize'", ",", "None", ")", "size_cutoff", "=", "kwargs", ".", "get", "(", "'size_cutoff'", ",", "100", ")", "options", "=", "{", "'save'", ":", "dump_pkl", ",", "'return_toplevel'", ":", "return_toplevel", ",", "'poolsize'", ":", "poolsize", ",", "'size_cutoff'", ":", "size_cutoff", ",", "'flatten_evidence'", ":", "kwargs", ".", "get", "(", "'flatten_evidence'", ",", "False", ")", ",", "'flatten_evidence_collect_from'", ":", "kwargs", ".", "get", "(", "'flatten_evidence_collect_from'", ",", "'supported_by'", ")", "}", "stmts_out", "=", "run_preassembly_related", "(", "pa", ",", "be", ",", "*", "*", "options", ")", "return", "stmts_out" ]
45.415385
19.261538
def _get_mean(self, sites, rup, dists, imt, stddev_types): """ See :meth:`superclass method <.base.GroundShakingIntensityModel.get_mean_and_stddevs>` for spec of input and result values. """ # Zhao et al. 2006 - Vs30 + Rrup mean_zh06, stds1 = super().get_mean_and_stddevs(sites, rup, dists, imt, stddev_types) # # Atkinson and Macias (2009) - Rrup gmpe = AtkinsonMacias2009() mean_am09, stds2 = gmpe.get_mean_and_stddevs(sites, rup, dists, imt, stddev_types) # # Abrahamson et al. (2015) - Rrup + vs30 + backarc gmpe = AbrahamsonEtAl2015SInter() mean_ab15, stds3 = gmpe.get_mean_and_stddevs(sites, rup, dists, imt, stddev_types) # # Ghofrani and Atkinson (2014) - Rrup + vs30 gmpe = GhofraniAtkinson2014() mean_ga14, stds4 = gmpe.get_mean_and_stddevs(sites, rup, dists, imt, stddev_types) # Computing adjusted mean and stds cff = self.SITE_COEFFS[imt] mean_adj = (np.log(np.exp(mean_zh06)*cff['mf'])*0.1 + mean_am09*0.5 + mean_ab15*0.2 + np.log(np.exp(mean_ga14)*cff['mf'])*0.2) return mean_adj
[ "def", "_get_mean", "(", "self", ",", "sites", ",", "rup", ",", "dists", ",", "imt", ",", "stddev_types", ")", ":", "# Zhao et al. 2006 - Vs30 + Rrup", "mean_zh06", ",", "stds1", "=", "super", "(", ")", ".", "get_mean_and_stddevs", "(", "sites", ",", "rup", ",", "dists", ",", "imt", ",", "stddev_types", ")", "#", "# Atkinson and Macias (2009) - Rrup", "gmpe", "=", "AtkinsonMacias2009", "(", ")", "mean_am09", ",", "stds2", "=", "gmpe", ".", "get_mean_and_stddevs", "(", "sites", ",", "rup", ",", "dists", ",", "imt", ",", "stddev_types", ")", "#", "# Abrahamson et al. (2015) - Rrup + vs30 + backarc", "gmpe", "=", "AbrahamsonEtAl2015SInter", "(", ")", "mean_ab15", ",", "stds3", "=", "gmpe", ".", "get_mean_and_stddevs", "(", "sites", ",", "rup", ",", "dists", ",", "imt", ",", "stddev_types", ")", "#", "# Ghofrani and Atkinson (2014) - Rrup + vs30", "gmpe", "=", "GhofraniAtkinson2014", "(", ")", "mean_ga14", ",", "stds4", "=", "gmpe", ".", "get_mean_and_stddevs", "(", "sites", ",", "rup", ",", "dists", ",", "imt", ",", "stddev_types", ")", "# Computing adjusted mean and stds", "cff", "=", "self", ".", "SITE_COEFFS", "[", "imt", "]", "mean_adj", "=", "(", "np", ".", "log", "(", "np", ".", "exp", "(", "mean_zh06", ")", "*", "cff", "[", "'mf'", "]", ")", "*", "0.1", "+", "mean_am09", "*", "0.5", "+", "mean_ab15", "*", "0.2", "+", "np", ".", "log", "(", "np", ".", "exp", "(", "mean_ga14", ")", "*", "cff", "[", "'mf'", "]", ")", "*", "0.2", ")", "return", "mean_adj" ]
46.8
17.2
def renew_token(self): """Convenience method: renew Vault token""" url = _url_joiner(self._vault_url, 'v1/auth/token/renew-self') resp = requests.get(url, headers=self._headers) resp.raise_for_status() data = resp.json() if data.get('errors'): raise VaultException(u'Error renewing Vault token: {}'.format(data['errors'])) return data
[ "def", "renew_token", "(", "self", ")", ":", "url", "=", "_url_joiner", "(", "self", ".", "_vault_url", ",", "'v1/auth/token/renew-self'", ")", "resp", "=", "requests", ".", "get", "(", "url", ",", "headers", "=", "self", ".", "_headers", ")", "resp", ".", "raise_for_status", "(", ")", "data", "=", "resp", ".", "json", "(", ")", "if", "data", ".", "get", "(", "'errors'", ")", ":", "raise", "VaultException", "(", "u'Error renewing Vault token: {}'", ".", "format", "(", "data", "[", "'errors'", "]", ")", ")", "return", "data" ]
43.777778
18.555556
def cache_clean(path=None, runas=None, env=None, force=False): ''' Clean cached NPM packages. If no path for a specific package is provided the entire cache will be cleared. path The cache subpath to delete, or None to clear the entire cache runas The user to run NPM with env Environment variables to set when invoking npm. Uses the same ``env`` format as the :py:func:`cmd.run <salt.modules.cmdmod.run>` execution function. force Force cleaning of cache. Required for npm@5 and greater .. versionadded:: 2016.11.6 CLI Example: .. code-block:: bash salt '*' npm.cache_clean force=True ''' env = env or {} if runas: uid = salt.utils.user.get_uid(runas) if uid: env.update({'SUDO_UID': uid, 'SUDO_USER': ''}) cmd = ['npm', 'cache', 'clean'] if path: cmd.append(path) if force is True: cmd.append('--force') cmd = ' '.join(cmd) result = __salt__['cmd.run_all']( cmd, cwd=None, runas=runas, env=env, python_shell=True, ignore_retcode=True) if result['retcode'] != 0: log.error(result['stderr']) return False return True
[ "def", "cache_clean", "(", "path", "=", "None", ",", "runas", "=", "None", ",", "env", "=", "None", ",", "force", "=", "False", ")", ":", "env", "=", "env", "or", "{", "}", "if", "runas", ":", "uid", "=", "salt", ".", "utils", ".", "user", ".", "get_uid", "(", "runas", ")", "if", "uid", ":", "env", ".", "update", "(", "{", "'SUDO_UID'", ":", "uid", ",", "'SUDO_USER'", ":", "''", "}", ")", "cmd", "=", "[", "'npm'", ",", "'cache'", ",", "'clean'", "]", "if", "path", ":", "cmd", ".", "append", "(", "path", ")", "if", "force", "is", "True", ":", "cmd", ".", "append", "(", "'--force'", ")", "cmd", "=", "' '", ".", "join", "(", "cmd", ")", "result", "=", "__salt__", "[", "'cmd.run_all'", "]", "(", "cmd", ",", "cwd", "=", "None", ",", "runas", "=", "runas", ",", "env", "=", "env", ",", "python_shell", "=", "True", ",", "ignore_retcode", "=", "True", ")", "if", "result", "[", "'retcode'", "]", "!=", "0", ":", "log", ".", "error", "(", "result", "[", "'stderr'", "]", ")", "return", "False", "return", "True" ]
23.8
26.64
def create_order_line_item(cls, order_line_item, **kwargs): """Create OrderLineItem Create a new OrderLineItem This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.create_order_line_item(order_line_item, async=True) >>> result = thread.get() :param async bool :param OrderLineItem order_line_item: Attributes of orderLineItem to create (required) :return: OrderLineItem If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._create_order_line_item_with_http_info(order_line_item, **kwargs) else: (data) = cls._create_order_line_item_with_http_info(order_line_item, **kwargs) return data
[ "def", "create_order_line_item", "(", "cls", ",", "order_line_item", ",", "*", "*", "kwargs", ")", ":", "kwargs", "[", "'_return_http_data_only'", "]", "=", "True", "if", "kwargs", ".", "get", "(", "'async'", ")", ":", "return", "cls", ".", "_create_order_line_item_with_http_info", "(", "order_line_item", ",", "*", "*", "kwargs", ")", "else", ":", "(", "data", ")", "=", "cls", ".", "_create_order_line_item_with_http_info", "(", "order_line_item", ",", "*", "*", "kwargs", ")", "return", "data" ]
43.666667
21.761905
def rebalance(self, weight, child, base=np.nan, update=True): """ Rebalance a child to a given weight. This is a helper method to simplify code logic. This method is used when we want to se the weight of a particular child to a set amount. It is similar to allocate, but it calculates the appropriate allocation based on the current weight. Args: * weight (float): The target weight. Usually between -1.0 and 1.0. * child (str): child to allocate to - specified by name. * base (float): If specified, this is the base amount all weight delta calculations will be based off of. This is useful when we determine a set of weights and want to rebalance each child given these new weights. However, as we iterate through each child and call this method, the base (which is by default the current value) will change. Therefore, we can set this base to the original value before the iteration to ensure the proper allocations are made. * update (bool): Force update? """ # if weight is 0 - we want to close child if weight == 0: if child in self.children: return self.close(child) else: return # if no base specified use self's value if np.isnan(base): base = self.value # else make sure we have child if child not in self.children: c = SecurityBase(child) c.setup(self._universe) # update child to bring up to speed c.update(self.now) self._add_child(c) # allocate to child # figure out weight delta c = self.children[child] delta = weight - c.weight c.allocate(delta * base)
[ "def", "rebalance", "(", "self", ",", "weight", ",", "child", ",", "base", "=", "np", ".", "nan", ",", "update", "=", "True", ")", ":", "# if weight is 0 - we want to close child", "if", "weight", "==", "0", ":", "if", "child", "in", "self", ".", "children", ":", "return", "self", ".", "close", "(", "child", ")", "else", ":", "return", "# if no base specified use self's value", "if", "np", ".", "isnan", "(", "base", ")", ":", "base", "=", "self", ".", "value", "# else make sure we have child", "if", "child", "not", "in", "self", ".", "children", ":", "c", "=", "SecurityBase", "(", "child", ")", "c", ".", "setup", "(", "self", ".", "_universe", ")", "# update child to bring up to speed", "c", ".", "update", "(", "self", ".", "now", ")", "self", ".", "_add_child", "(", "c", ")", "# allocate to child", "# figure out weight delta", "c", "=", "self", ".", "children", "[", "child", "]", "delta", "=", "weight", "-", "c", ".", "weight", "c", ".", "allocate", "(", "delta", "*", "base", ")" ]
39.765957
19.553191
def enhance_pubmed_annotations(pubmed: Mapping[str, Any]) -> Mapping[str, Any]: """Enhance pubmed namespace IDs Add additional entity and annotation types to annotations Use preferred id for namespaces as needed Add strings from Title, Abstract matching Pubtator BioConcept spans NOTE - basically duplicated code with bel_api:api.services.pubmed Args: pubmed Returns: pubmed object """ text = pubmed["title"] + pubmed["abstract"] annotations = {} for nsarg in pubmed["annotations"]: url = f'{config["bel_api"]["servers"]["api_url"]}/terms/{url_path_param_quoting(nsarg)}' log.info(f"URL: {url}") r = get_url(url) log.info(f"Result: {r}") new_nsarg = "" if r and r.status_code == 200: term = r.json() new_nsarg = bel_utils.convert_nsarg(term["id"], decanonicalize=True) pubmed["annotations"][nsarg]["name"] = term["name"] pubmed["annotations"][nsarg]["label"] = term["label"] pubmed["annotations"][nsarg]["entity_types"] = list( set( pubmed["annotations"][nsarg]["entity_types"] + term.get("entity_types", []) ) ) pubmed["annotations"][nsarg]["annotation_types"] = list( set( pubmed["annotations"][nsarg]["annotation_types"] + term.get("annotation_types", []) ) ) if new_nsarg != nsarg: annotations[new_nsarg] = copy.deepcopy(pubmed["annotations"][nsarg]) else: annotations[nsarg] = copy.deepcopy(pubmed["annotations"][nsarg]) for nsarg in annotations: for idx, span in enumerate(annotations[nsarg]["spans"]): string = text[span["begin"] - 1 : span["end"] - 1] annotations[nsarg]["spans"][idx]["text"] = string pubmed["annotations"] = copy.deepcopy(annotations) return pubmed
[ "def", "enhance_pubmed_annotations", "(", "pubmed", ":", "Mapping", "[", "str", ",", "Any", "]", ")", "->", "Mapping", "[", "str", ",", "Any", "]", ":", "text", "=", "pubmed", "[", "\"title\"", "]", "+", "pubmed", "[", "\"abstract\"", "]", "annotations", "=", "{", "}", "for", "nsarg", "in", "pubmed", "[", "\"annotations\"", "]", ":", "url", "=", "f'{config[\"bel_api\"][\"servers\"][\"api_url\"]}/terms/{url_path_param_quoting(nsarg)}'", "log", ".", "info", "(", "f\"URL: {url}\"", ")", "r", "=", "get_url", "(", "url", ")", "log", ".", "info", "(", "f\"Result: {r}\"", ")", "new_nsarg", "=", "\"\"", "if", "r", "and", "r", ".", "status_code", "==", "200", ":", "term", "=", "r", ".", "json", "(", ")", "new_nsarg", "=", "bel_utils", ".", "convert_nsarg", "(", "term", "[", "\"id\"", "]", ",", "decanonicalize", "=", "True", ")", "pubmed", "[", "\"annotations\"", "]", "[", "nsarg", "]", "[", "\"name\"", "]", "=", "term", "[", "\"name\"", "]", "pubmed", "[", "\"annotations\"", "]", "[", "nsarg", "]", "[", "\"label\"", "]", "=", "term", "[", "\"label\"", "]", "pubmed", "[", "\"annotations\"", "]", "[", "nsarg", "]", "[", "\"entity_types\"", "]", "=", "list", "(", "set", "(", "pubmed", "[", "\"annotations\"", "]", "[", "nsarg", "]", "[", "\"entity_types\"", "]", "+", "term", ".", "get", "(", "\"entity_types\"", ",", "[", "]", ")", ")", ")", "pubmed", "[", "\"annotations\"", "]", "[", "nsarg", "]", "[", "\"annotation_types\"", "]", "=", "list", "(", "set", "(", "pubmed", "[", "\"annotations\"", "]", "[", "nsarg", "]", "[", "\"annotation_types\"", "]", "+", "term", ".", "get", "(", "\"annotation_types\"", ",", "[", "]", ")", ")", ")", "if", "new_nsarg", "!=", "nsarg", ":", "annotations", "[", "new_nsarg", "]", "=", "copy", ".", "deepcopy", "(", "pubmed", "[", "\"annotations\"", "]", "[", "nsarg", "]", ")", "else", ":", "annotations", "[", "nsarg", "]", "=", "copy", ".", "deepcopy", "(", "pubmed", "[", "\"annotations\"", "]", "[", "nsarg", "]", ")", "for", "nsarg", "in", "annotations", ":", "for", "idx", ",", "span", "in", "enumerate", "(", "annotations", "[", "nsarg", "]", "[", "\"spans\"", "]", ")", ":", "string", "=", "text", "[", "span", "[", "\"begin\"", "]", "-", "1", ":", "span", "[", "\"end\"", "]", "-", "1", "]", "annotations", "[", "nsarg", "]", "[", "\"spans\"", "]", "[", "idx", "]", "[", "\"text\"", "]", "=", "string", "pubmed", "[", "\"annotations\"", "]", "=", "copy", ".", "deepcopy", "(", "annotations", ")", "return", "pubmed" ]
33.810345
24.87931
def _handle_get(self, request_data): """ An OCSP GET request contains the DER-in-base64 encoded OCSP request in the HTTP request URL. """ der = base64.b64decode(request_data) ocsp_request = self._parse_ocsp_request(der) return self._build_http_response(ocsp_request)
[ "def", "_handle_get", "(", "self", ",", "request_data", ")", ":", "der", "=", "base64", ".", "b64decode", "(", "request_data", ")", "ocsp_request", "=", "self", ".", "_parse_ocsp_request", "(", "der", ")", "return", "self", ".", "_build_http_response", "(", "ocsp_request", ")" ]
39.375
11.375
def set_vocabulary(self, peer_id, from_dialogue=None, update=False): """ Получает вокабулар из функции get_vocabulary и делает его активным. """ self.tokens_array = self.get_vocabulary( peer_id, from_dialogue, update ) self.create_base()
[ "def", "set_vocabulary", "(", "self", ",", "peer_id", ",", "from_dialogue", "=", "None", ",", "update", "=", "False", ")", ":", "self", ".", "tokens_array", "=", "self", ".", "get_vocabulary", "(", "peer_id", ",", "from_dialogue", ",", "update", ")", "self", ".", "create_base", "(", ")" ]
28.363636
19.272727
def AddFilterOptions(self, argument_group): """Adds the filter options to the argument group. Args: argument_group (argparse._ArgumentGroup): argparse argument group. """ names = ['artifact_filters', 'date_filters', 'filter_file'] helpers_manager.ArgumentHelperManager.AddCommandLineArguments( argument_group, names=names) argument_group.add_argument( '-x', '--extensions', dest='extensions_string', action='store', type=str, metavar='EXTENSIONS', help=( 'Filter on file name extensions. This option accepts multiple ' 'multiple comma separated values e.g. "csv,docx,pst".')) argument_group.add_argument( '--names', dest='names_string', action='store', type=str, metavar='NAMES', help=( 'Filter on file names. This option accepts a comma separated ' 'string denoting all file names, e.g. -x ' '"NTUSER.DAT,UsrClass.dat".')) argument_group.add_argument( '--signatures', dest='signature_identifiers', action='store', type=str, metavar='IDENTIFIERS', help=( 'Filter on file format signature identifiers. This option ' 'accepts multiple comma separated values e.g. "esedb,lnk". ' 'Use "list" to show an overview of the supported file format ' 'signatures.'))
[ "def", "AddFilterOptions", "(", "self", ",", "argument_group", ")", ":", "names", "=", "[", "'artifact_filters'", ",", "'date_filters'", ",", "'filter_file'", "]", "helpers_manager", ".", "ArgumentHelperManager", ".", "AddCommandLineArguments", "(", "argument_group", ",", "names", "=", "names", ")", "argument_group", ".", "add_argument", "(", "'-x'", ",", "'--extensions'", ",", "dest", "=", "'extensions_string'", ",", "action", "=", "'store'", ",", "type", "=", "str", ",", "metavar", "=", "'EXTENSIONS'", ",", "help", "=", "(", "'Filter on file name extensions. This option accepts multiple '", "'multiple comma separated values e.g. \"csv,docx,pst\".'", ")", ")", "argument_group", ".", "add_argument", "(", "'--names'", ",", "dest", "=", "'names_string'", ",", "action", "=", "'store'", ",", "type", "=", "str", ",", "metavar", "=", "'NAMES'", ",", "help", "=", "(", "'Filter on file names. This option accepts a comma separated '", "'string denoting all file names, e.g. -x '", "'\"NTUSER.DAT,UsrClass.dat\".'", ")", ")", "argument_group", ".", "add_argument", "(", "'--signatures'", ",", "dest", "=", "'signature_identifiers'", ",", "action", "=", "'store'", ",", "type", "=", "str", ",", "metavar", "=", "'IDENTIFIERS'", ",", "help", "=", "(", "'Filter on file format signature identifiers. This option '", "'accepts multiple comma separated values e.g. \"esedb,lnk\". '", "'Use \"list\" to show an overview of the supported file format '", "'signatures.'", ")", ")" ]
44.4
20.533333
def update_normals(self, normals): """ Update the triangle normals. """ normals = np.array(normals, dtype=np.float32) self._vbo_n.set_data(normals)
[ "def", "update_normals", "(", "self", ",", "normals", ")", ":", "normals", "=", "np", ".", "array", "(", "normals", ",", "dtype", "=", "np", ".", "float32", ")", "self", ".", "_vbo_n", ".", "set_data", "(", "normals", ")" ]
26
9.428571
def stop(self): """ trigger clean up by hand, needs to be done when not using context management via 'with' statement - will terminate loop process - show a last progress -> see the full 100% on exit - releases terminal reservation """ super(Progress, self).stop() terminal.terminal_unreserve(progress_obj=self, verbose=self.verbose) if self.show_on_exit: if not isinstance(self.pipe_handler, PipeToPrint): myout = inMemoryBuffer() stdout = sys.stdout sys.stdout = myout self._show_stat() self.pipe_handler(myout.getvalue()) sys.stdout = stdout else: self._show_stat() print() self.show_on_exit = False
[ "def", "stop", "(", "self", ")", ":", "super", "(", "Progress", ",", "self", ")", ".", "stop", "(", ")", "terminal", ".", "terminal_unreserve", "(", "progress_obj", "=", "self", ",", "verbose", "=", "self", ".", "verbose", ")", "if", "self", ".", "show_on_exit", ":", "if", "not", "isinstance", "(", "self", ".", "pipe_handler", ",", "PipeToPrint", ")", ":", "myout", "=", "inMemoryBuffer", "(", ")", "stdout", "=", "sys", ".", "stdout", "sys", ".", "stdout", "=", "myout", "self", ".", "_show_stat", "(", ")", "self", ".", "pipe_handler", "(", "myout", ".", "getvalue", "(", ")", ")", "sys", ".", "stdout", "=", "stdout", "else", ":", "self", ".", "_show_stat", "(", ")", "print", "(", ")", "self", ".", "show_on_exit", "=", "False" ]
35.375
13.541667
def copy(tree, source_filename): """ Copy file in tree, show a progress bar during operations, and return the sha1 sum of copied file. """ #_, ext = os.path.splitext(source_filename) filehash = sha1() with printer.progress(os.path.getsize(source_filename)) as update: with open(source_filename, 'rb') as fsource: with NamedTemporaryFile(dir=os.path.join(tree, '.kolekto', 'movies'), delete=False) as fdestination: # Copy the source into the temporary destination: while True: buf = fsource.read(10 * 1024) if not buf: break filehash.update(buf) fdestination.write(buf) update(len(buf)) # Rename the file to its final name or raise an error if # the file already exists: dest = os.path.join(tree, '.kolekto', 'movies', filehash.hexdigest()) if os.path.exists(dest): raise IOError('This file already exists in tree (%s)' % filehash.hexdigest()) else: os.rename(fdestination.name, dest) return filehash.hexdigest()
[ "def", "copy", "(", "tree", ",", "source_filename", ")", ":", "#_, ext = os.path.splitext(source_filename)", "filehash", "=", "sha1", "(", ")", "with", "printer", ".", "progress", "(", "os", ".", "path", ".", "getsize", "(", "source_filename", ")", ")", "as", "update", ":", "with", "open", "(", "source_filename", ",", "'rb'", ")", "as", "fsource", ":", "with", "NamedTemporaryFile", "(", "dir", "=", "os", ".", "path", ".", "join", "(", "tree", ",", "'.kolekto'", ",", "'movies'", ")", ",", "delete", "=", "False", ")", "as", "fdestination", ":", "# Copy the source into the temporary destination:", "while", "True", ":", "buf", "=", "fsource", ".", "read", "(", "10", "*", "1024", ")", "if", "not", "buf", ":", "break", "filehash", ".", "update", "(", "buf", ")", "fdestination", ".", "write", "(", "buf", ")", "update", "(", "len", "(", "buf", ")", ")", "# Rename the file to its final name or raise an error if", "# the file already exists:", "dest", "=", "os", ".", "path", ".", "join", "(", "tree", ",", "'.kolekto'", ",", "'movies'", ",", "filehash", ".", "hexdigest", "(", ")", ")", "if", "os", ".", "path", ".", "exists", "(", "dest", ")", ":", "raise", "IOError", "(", "'This file already exists in tree (%s)'", "%", "filehash", ".", "hexdigest", "(", ")", ")", "else", ":", "os", ".", "rename", "(", "fdestination", ".", "name", ",", "dest", ")", "return", "filehash", ".", "hexdigest", "(", ")" ]
48.6
16.28
def fromDataset(datatype, value, metadata=None, tmap=None): """ Return a representation of dataset argument as an instance of the class corresponding to its datatype """ if tmap is None: tmap = typemap return tmap[datatype.upper()].fromDataset(value, metadata=metadata)
[ "def", "fromDataset", "(", "datatype", ",", "value", ",", "metadata", "=", "None", ",", "tmap", "=", "None", ")", ":", "if", "tmap", "is", "None", ":", "tmap", "=", "typemap", "return", "tmap", "[", "datatype", ".", "upper", "(", ")", "]", ".", "fromDataset", "(", "value", ",", "metadata", "=", "metadata", ")" ]
36.222222
18.222222
def latlon(location, throttle=0.5, center=True, round_digits=2): '''Look up the latitude/longitude coordinates of a given location using the Google Maps API. The result is cached to avoid redundant API requests. throttle: send at most one request in this many seconds center: return the center of the region; if False, returns the region (lat1, lon1, lat2, lon2) round_digits: round coordinates to this many digits ''' global last_read if isinstance(location, list): return map(lambda x: latlon(x, throttle=throttle, center=center, round_digits=round_digits), location) if location in _latlons: result = _latlons[location] if center: lat1, lon1, lat2, lon2 = result result = (lat1+lat2)/2, (lon1+lon2)/2 return tuple([round(n, round_digits) for n in result]) while time.time() - last_read < throttle: pass last_read = time.time() try: url = "http://maps.google.com/maps/api/geocode/json?address=%s&sensor=false" % location.replace(' ', '+') data = json.loads(urllib.urlopen(url).read()) if data['status'] == 'OVER_QUERY_LIMIT': raise QueryLimitError('Google Maps API query limit exceeded. (Use the throttle keyword to control the request rate.') try: bounds = data['results'][0]['geometry']['bounds'] result1 = bounds['northeast'] lat1, lon1 = result1['lat'], result1['lng'] result2 = bounds['southwest'] lat2, lon2 = result2['lat'], result2['lng'] except KeyError: bounds = data['results'][0]['geometry']['location'] lat1 = bounds['lat'] lon1 = bounds['lng'] lat2 = lat1 lon2 = lon1 except IndexError: raise NoResultError('No result was found for location %s' % location) _latlons[location] = (lat1, lon1, lat2, lon2) save_cache() if center: return round((lat1+lat2)/2, round_digits), round((lon1+lon2)/2, round_digits) else: return tuple([round(n, round_digits) for n in (lat1, lon1, lat2, lon2)]) except Exception as e: raise return None
[ "def", "latlon", "(", "location", ",", "throttle", "=", "0.5", ",", "center", "=", "True", ",", "round_digits", "=", "2", ")", ":", "global", "last_read", "if", "isinstance", "(", "location", ",", "list", ")", ":", "return", "map", "(", "lambda", "x", ":", "latlon", "(", "x", ",", "throttle", "=", "throttle", ",", "center", "=", "center", ",", "round_digits", "=", "round_digits", ")", ",", "location", ")", "if", "location", "in", "_latlons", ":", "result", "=", "_latlons", "[", "location", "]", "if", "center", ":", "lat1", ",", "lon1", ",", "lat2", ",", "lon2", "=", "result", "result", "=", "(", "lat1", "+", "lat2", ")", "/", "2", ",", "(", "lon1", "+", "lon2", ")", "/", "2", "return", "tuple", "(", "[", "round", "(", "n", ",", "round_digits", ")", "for", "n", "in", "result", "]", ")", "while", "time", ".", "time", "(", ")", "-", "last_read", "<", "throttle", ":", "pass", "last_read", "=", "time", ".", "time", "(", ")", "try", ":", "url", "=", "\"http://maps.google.com/maps/api/geocode/json?address=%s&sensor=false\"", "%", "location", ".", "replace", "(", "' '", ",", "'+'", ")", "data", "=", "json", ".", "loads", "(", "urllib", ".", "urlopen", "(", "url", ")", ".", "read", "(", ")", ")", "if", "data", "[", "'status'", "]", "==", "'OVER_QUERY_LIMIT'", ":", "raise", "QueryLimitError", "(", "'Google Maps API query limit exceeded. (Use the throttle keyword to control the request rate.'", ")", "try", ":", "bounds", "=", "data", "[", "'results'", "]", "[", "0", "]", "[", "'geometry'", "]", "[", "'bounds'", "]", "result1", "=", "bounds", "[", "'northeast'", "]", "lat1", ",", "lon1", "=", "result1", "[", "'lat'", "]", ",", "result1", "[", "'lng'", "]", "result2", "=", "bounds", "[", "'southwest'", "]", "lat2", ",", "lon2", "=", "result2", "[", "'lat'", "]", ",", "result2", "[", "'lng'", "]", "except", "KeyError", ":", "bounds", "=", "data", "[", "'results'", "]", "[", "0", "]", "[", "'geometry'", "]", "[", "'location'", "]", "lat1", "=", "bounds", "[", "'lat'", "]", "lon1", "=", "bounds", "[", "'lng'", "]", "lat2", "=", "lat1", "lon2", "=", "lon1", "except", "IndexError", ":", "raise", "NoResultError", "(", "'No result was found for location %s'", "%", "location", ")", "_latlons", "[", "location", "]", "=", "(", "lat1", ",", "lon1", ",", "lat2", ",", "lon2", ")", "save_cache", "(", ")", "if", "center", ":", "return", "round", "(", "(", "lat1", "+", "lat2", ")", "/", "2", ",", "round_digits", ")", ",", "round", "(", "(", "lon1", "+", "lon2", ")", "/", "2", ",", "round_digits", ")", "else", ":", "return", "tuple", "(", "[", "round", "(", "n", ",", "round_digits", ")", "for", "n", "in", "(", "lat1", ",", "lon1", ",", "lat2", ",", "lon2", ")", "]", ")", "except", "Exception", "as", "e", ":", "raise", "return", "None" ]
38.824561
25.245614
def add_edge(self, ind_node, dep_node): """ Add an edge (dependency) between the specified nodes. Args: ind_node (str): The independent node to add an edge to. dep_node (str): The dependent node that has a dependency on the ind_node. Raises: KeyError: Either the ind_node, or dep_node do not exist. DAGValidationError: Raised if the resulting graph is invalid. """ graph = self.graph if ind_node not in graph: raise KeyError('independent node %s does not exist' % ind_node) if dep_node not in graph: raise KeyError('dependent node %s does not exist' % dep_node) test_graph = deepcopy(graph) test_graph[ind_node].add(dep_node) test_dag = DAG() test_dag.graph = test_graph is_valid, message = test_dag.validate() if is_valid: graph[ind_node].add(dep_node) else: raise DAGValidationError(message)
[ "def", "add_edge", "(", "self", ",", "ind_node", ",", "dep_node", ")", ":", "graph", "=", "self", ".", "graph", "if", "ind_node", "not", "in", "graph", ":", "raise", "KeyError", "(", "'independent node %s does not exist'", "%", "ind_node", ")", "if", "dep_node", "not", "in", "graph", ":", "raise", "KeyError", "(", "'dependent node %s does not exist'", "%", "dep_node", ")", "test_graph", "=", "deepcopy", "(", "graph", ")", "test_graph", "[", "ind_node", "]", ".", "add", "(", "dep_node", ")", "test_dag", "=", "DAG", "(", ")", "test_dag", ".", "graph", "=", "test_graph", "is_valid", ",", "message", "=", "test_dag", ".", "validate", "(", ")", "if", "is_valid", ":", "graph", "[", "ind_node", "]", ".", "add", "(", "dep_node", ")", "else", ":", "raise", "DAGValidationError", "(", "message", ")" ]
38.692308
17
def parse_file(self, sg_file=None, data=None): """Parse a sensor graph file into an AST describing the file. This function builds the statements list for this parser. If you pass ``sg_file``, it will be interpreted as the path to a file to parse. If you pass ``data`` it will be directly interpreted as the string to parse. """ if sg_file is not None and data is not None: raise ArgumentError("You must pass either a path to an sgf file or the sgf contents but not both") if sg_file is None and data is None: raise ArgumentError("You must pass either a path to an sgf file or the sgf contents, neither passed") if sg_file is not None: try: with open(sg_file, "r") as inf: data = inf.read() except IOError: raise ArgumentError("Could not read sensor graph file", path=sg_file) # convert tabs to spaces so our line numbers match correctly data = data.replace(u'\t', u' ') lang = get_language() result = lang.parseString(data) for statement in result: parsed = self.parse_statement(statement, orig_contents=data) self.statements.append(parsed)
[ "def", "parse_file", "(", "self", ",", "sg_file", "=", "None", ",", "data", "=", "None", ")", ":", "if", "sg_file", "is", "not", "None", "and", "data", "is", "not", "None", ":", "raise", "ArgumentError", "(", "\"You must pass either a path to an sgf file or the sgf contents but not both\"", ")", "if", "sg_file", "is", "None", "and", "data", "is", "None", ":", "raise", "ArgumentError", "(", "\"You must pass either a path to an sgf file or the sgf contents, neither passed\"", ")", "if", "sg_file", "is", "not", "None", ":", "try", ":", "with", "open", "(", "sg_file", ",", "\"r\"", ")", "as", "inf", ":", "data", "=", "inf", ".", "read", "(", ")", "except", "IOError", ":", "raise", "ArgumentError", "(", "\"Could not read sensor graph file\"", ",", "path", "=", "sg_file", ")", "# convert tabs to spaces so our line numbers match correctly", "data", "=", "data", ".", "replace", "(", "u'\\t'", ",", "u' '", ")", "lang", "=", "get_language", "(", ")", "result", "=", "lang", ".", "parseString", "(", "data", ")", "for", "statement", "in", "result", ":", "parsed", "=", "self", ".", "parse_statement", "(", "statement", ",", "orig_contents", "=", "data", ")", "self", ".", "statements", ".", "append", "(", "parsed", ")" ]
40.548387
24.096774
def info(cwd, targets=None, user=None, username=None, password=None, fmt='str'): ''' Display the Subversion information from the checkout. cwd The path to the Subversion repository targets : None files, directories, and URLs to pass to the command as arguments svn uses '.' by default user : None Run svn as a user other than what the minion runs as username : None Connect to the Subversion server as another user password : None Connect to the Subversion server with this password .. versionadded:: 0.17.0 fmt : str How to fmt the output from info. (str, xml, list, dict) CLI Example: .. code-block:: bash salt '*' svn.info /path/to/svn/repo ''' opts = list() if fmt == 'xml': opts.append('--xml') if targets: opts += salt.utils.args.shlex_split(targets) infos = _run_svn('info', cwd, user, username, password, opts) if fmt in ('str', 'xml'): return infos info_list = [] for infosplit in infos.split('\n\n'): info_list.append(_INI_RE.findall(infosplit)) if fmt == 'list': return info_list if fmt == 'dict': return [dict(tmp) for tmp in info_list]
[ "def", "info", "(", "cwd", ",", "targets", "=", "None", ",", "user", "=", "None", ",", "username", "=", "None", ",", "password", "=", "None", ",", "fmt", "=", "'str'", ")", ":", "opts", "=", "list", "(", ")", "if", "fmt", "==", "'xml'", ":", "opts", ".", "append", "(", "'--xml'", ")", "if", "targets", ":", "opts", "+=", "salt", ".", "utils", ".", "args", ".", "shlex_split", "(", "targets", ")", "infos", "=", "_run_svn", "(", "'info'", ",", "cwd", ",", "user", ",", "username", ",", "password", ",", "opts", ")", "if", "fmt", "in", "(", "'str'", ",", "'xml'", ")", ":", "return", "infos", "info_list", "=", "[", "]", "for", "infosplit", "in", "infos", ".", "split", "(", "'\\n\\n'", ")", ":", "info_list", ".", "append", "(", "_INI_RE", ".", "findall", "(", "infosplit", ")", ")", "if", "fmt", "==", "'list'", ":", "return", "info_list", "if", "fmt", "==", "'dict'", ":", "return", "[", "dict", "(", "tmp", ")", "for", "tmp", "in", "info_list", "]" ]
22.909091
23.236364
def _set_config_mode(self, v, load=False): """ Setter method for config_mode, mapped from YANG variable /interface/fc_port/config_mode (interface-fc-config-mode-type) If this variable is read-only (config: false) in the source YANG file, then _set_config_mode is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_config_mode() directly. YANG Description: Port Mode Configuration """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'auto': {'value': 1}, u'nport': {'value': 3}, u'eport': {'value': 2}, u'fport': {'value': 4}},), default=unicode("auto"), is_leaf=True, yang_name="config-mode", rest_name="config-mode", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure port as F/E/N/Auto', u'cli-incomplete-no': None, u'cli-suppress-no': None}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='interface-fc-config-mode-type', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """config_mode must be of a type compatible with interface-fc-config-mode-type""", 'defined-type': "brocade-interface:interface-fc-config-mode-type", 'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'auto': {'value': 1}, u'nport': {'value': 3}, u'eport': {'value': 2}, u'fport': {'value': 4}},), default=unicode("auto"), is_leaf=True, yang_name="config-mode", rest_name="config-mode", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure port as F/E/N/Auto', u'cli-incomplete-no': None, u'cli-suppress-no': None}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='interface-fc-config-mode-type', is_config=True)""", }) self.__config_mode = t if hasattr(self, '_set'): self._set()
[ "def", "_set_config_mode", "(", "self", ",", "v", ",", "load", "=", "False", ")", ":", "if", "hasattr", "(", "v", ",", "\"_utype\"", ")", ":", "v", "=", "v", ".", "_utype", "(", "v", ")", "try", ":", "t", "=", "YANGDynClass", "(", "v", ",", "base", "=", "RestrictedClassType", "(", "base_type", "=", "unicode", ",", "restriction_type", "=", "\"dict_key\"", ",", "restriction_arg", "=", "{", "u'auto'", ":", "{", "'value'", ":", "1", "}", ",", "u'nport'", ":", "{", "'value'", ":", "3", "}", ",", "u'eport'", ":", "{", "'value'", ":", "2", "}", ",", "u'fport'", ":", "{", "'value'", ":", "4", "}", "}", ",", ")", ",", "default", "=", "unicode", "(", "\"auto\"", ")", ",", "is_leaf", "=", "True", ",", "yang_name", "=", "\"config-mode\"", ",", "rest_name", "=", "\"config-mode\"", ",", "parent", "=", "self", ",", "path_helper", "=", "self", ".", "_path_helper", ",", "extmethods", "=", "self", ".", "_extmethods", ",", "register_paths", "=", "True", ",", "extensions", "=", "{", "u'tailf-common'", ":", "{", "u'info'", ":", "u'Configure port as F/E/N/Auto'", ",", "u'cli-incomplete-no'", ":", "None", ",", "u'cli-suppress-no'", ":", "None", "}", "}", ",", "namespace", "=", "'urn:brocade.com:mgmt:brocade-interface'", ",", "defining_module", "=", "'brocade-interface'", ",", "yang_type", "=", "'interface-fc-config-mode-type'", ",", "is_config", "=", "True", ")", "except", "(", "TypeError", ",", "ValueError", ")", ":", "raise", "ValueError", "(", "{", "'error-string'", ":", "\"\"\"config_mode must be of a type compatible with interface-fc-config-mode-type\"\"\"", ",", "'defined-type'", ":", "\"brocade-interface:interface-fc-config-mode-type\"", ",", "'generated-type'", ":", "\"\"\"YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type=\"dict_key\", restriction_arg={u'auto': {'value': 1}, u'nport': {'value': 3}, u'eport': {'value': 2}, u'fport': {'value': 4}},), default=unicode(\"auto\"), is_leaf=True, yang_name=\"config-mode\", rest_name=\"config-mode\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure port as F/E/N/Auto', u'cli-incomplete-no': None, u'cli-suppress-no': None}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='interface-fc-config-mode-type', is_config=True)\"\"\"", ",", "}", ")", "self", ".", "__config_mode", "=", "t", "if", "hasattr", "(", "self", ",", "'_set'", ")", ":", "self", ".", "_set", "(", ")" ]
98.416667
48.208333