text
stringlengths
89
104k
code_tokens
list
avg_line_len
float64
7.91
980
score
float64
0
630
def writeToFile(self, f, packed=True): """ Write serialized object to file. :param f: output file :param packed: If true, will pack contents. """ # Get capnproto schema from instance schema = self.getSchema() # Construct new message, otherwise refered to as `proto` proto = schema.new_message() # Populate message w/ `write()` instance method self.write(proto) # Finally, write to file if packed: proto.write_packed(f) else: proto.write(f)
[ "def", "writeToFile", "(", "self", ",", "f", ",", "packed", "=", "True", ")", ":", "# Get capnproto schema from instance", "schema", "=", "self", ".", "getSchema", "(", ")", "# Construct new message, otherwise refered to as `proto`", "proto", "=", "schema", ".", "new_message", "(", ")", "# Populate message w/ `write()` instance method", "self", ".", "write", "(", "proto", ")", "# Finally, write to file", "if", "packed", ":", "proto", ".", "write_packed", "(", "f", ")", "else", ":", "proto", ".", "write", "(", "f", ")" ]
23.380952
17.095238
def rgbmap_cb(self, rgbmap, channel): """ This method is called when the RGBMap is changed. We update the ColorBar to match. """ if not self.gui_up: return fitsimage = channel.fitsimage if fitsimage != self.fv.getfocus_fitsimage(): return False self.change_cbar(self.fv, channel)
[ "def", "rgbmap_cb", "(", "self", ",", "rgbmap", ",", "channel", ")", ":", "if", "not", "self", ".", "gui_up", ":", "return", "fitsimage", "=", "channel", ".", "fitsimage", "if", "fitsimage", "!=", "self", ".", "fv", ".", "getfocus_fitsimage", "(", ")", ":", "return", "False", "self", ".", "change_cbar", "(", "self", ".", "fv", ",", "channel", ")" ]
32.545455
10
def convert_cityscapes_instance_only( data_dir, out_dir): """Convert from cityscapes format to COCO instance seg format - polygons""" sets = [ 'gtFine_val', 'gtFine_train', 'gtFine_test', # 'gtCoarse_train', # 'gtCoarse_val', # 'gtCoarse_train_extra' ] ann_dirs = [ 'gtFine_trainvaltest/gtFine/val', 'gtFine_trainvaltest/gtFine/train', 'gtFine_trainvaltest/gtFine/test', # 'gtCoarse/train', # 'gtCoarse/train_extra', # 'gtCoarse/val' ] json_name = 'instancesonly_filtered_%s.json' ends_in = '%s_polygons.json' img_id = 0 ann_id = 0 cat_id = 1 category_dict = {} category_instancesonly = [ 'person', 'rider', 'car', 'truck', 'bus', 'train', 'motorcycle', 'bicycle', ] for data_set, ann_dir in zip(sets, ann_dirs): print('Starting %s' % data_set) ann_dict = {} images = [] annotations = [] ann_dir = os.path.join(data_dir, ann_dir) for root, _, files in os.walk(ann_dir): for filename in files: if filename.endswith(ends_in % data_set.split('_')[0]): if len(images) % 50 == 0: print("Processed %s images, %s annotations" % ( len(images), len(annotations))) json_ann = json.load(open(os.path.join(root, filename))) image = {} image['id'] = img_id img_id += 1 image['width'] = json_ann['imgWidth'] image['height'] = json_ann['imgHeight'] image['file_name'] = filename[:-len( ends_in % data_set.split('_')[0])] + 'leftImg8bit.png' image['seg_file_name'] = filename[:-len( ends_in % data_set.split('_')[0])] + \ '%s_instanceIds.png' % data_set.split('_')[0] images.append(image) fullname = os.path.join(root, image['seg_file_name']) objects = cs.instances2dict_with_polygons( [fullname], verbose=False)[fullname] for object_cls in objects: if object_cls not in category_instancesonly: continue # skip non-instance categories for obj in objects[object_cls]: if obj['contours'] == []: print('Warning: empty contours.') continue # skip non-instance categories len_p = [len(p) for p in obj['contours']] if min(len_p) <= 4: print('Warning: invalid contours.') continue # skip non-instance categories ann = {} ann['id'] = ann_id ann_id += 1 ann['image_id'] = image['id'] ann['segmentation'] = obj['contours'] if object_cls not in category_dict: category_dict[object_cls] = cat_id cat_id += 1 ann['category_id'] = category_dict[object_cls] ann['iscrowd'] = 0 ann['area'] = obj['pixelCount'] ann['bbox'] = bboxs_util.xyxy_to_xywh( segms_util.polys_to_boxes( [ann['segmentation']])).tolist()[0] annotations.append(ann) ann_dict['images'] = images categories = [{"id": category_dict[name], "name": name} for name in category_dict] ann_dict['categories'] = categories ann_dict['annotations'] = annotations print("Num categories: %s" % len(categories)) print("Num images: %s" % len(images)) print("Num annotations: %s" % len(annotations)) with open(os.path.join(out_dir, json_name % data_set), 'w') as outfile: outfile.write(json.dumps(ann_dict))
[ "def", "convert_cityscapes_instance_only", "(", "data_dir", ",", "out_dir", ")", ":", "sets", "=", "[", "'gtFine_val'", ",", "'gtFine_train'", ",", "'gtFine_test'", ",", "# 'gtCoarse_train',", "# 'gtCoarse_val',", "# 'gtCoarse_train_extra'", "]", "ann_dirs", "=", "[", "'gtFine_trainvaltest/gtFine/val'", ",", "'gtFine_trainvaltest/gtFine/train'", ",", "'gtFine_trainvaltest/gtFine/test'", ",", "# 'gtCoarse/train',", "# 'gtCoarse/train_extra',", "# 'gtCoarse/val'", "]", "json_name", "=", "'instancesonly_filtered_%s.json'", "ends_in", "=", "'%s_polygons.json'", "img_id", "=", "0", "ann_id", "=", "0", "cat_id", "=", "1", "category_dict", "=", "{", "}", "category_instancesonly", "=", "[", "'person'", ",", "'rider'", ",", "'car'", ",", "'truck'", ",", "'bus'", ",", "'train'", ",", "'motorcycle'", ",", "'bicycle'", ",", "]", "for", "data_set", ",", "ann_dir", "in", "zip", "(", "sets", ",", "ann_dirs", ")", ":", "print", "(", "'Starting %s'", "%", "data_set", ")", "ann_dict", "=", "{", "}", "images", "=", "[", "]", "annotations", "=", "[", "]", "ann_dir", "=", "os", ".", "path", ".", "join", "(", "data_dir", ",", "ann_dir", ")", "for", "root", ",", "_", ",", "files", "in", "os", ".", "walk", "(", "ann_dir", ")", ":", "for", "filename", "in", "files", ":", "if", "filename", ".", "endswith", "(", "ends_in", "%", "data_set", ".", "split", "(", "'_'", ")", "[", "0", "]", ")", ":", "if", "len", "(", "images", ")", "%", "50", "==", "0", ":", "print", "(", "\"Processed %s images, %s annotations\"", "%", "(", "len", "(", "images", ")", ",", "len", "(", "annotations", ")", ")", ")", "json_ann", "=", "json", ".", "load", "(", "open", "(", "os", ".", "path", ".", "join", "(", "root", ",", "filename", ")", ")", ")", "image", "=", "{", "}", "image", "[", "'id'", "]", "=", "img_id", "img_id", "+=", "1", "image", "[", "'width'", "]", "=", "json_ann", "[", "'imgWidth'", "]", "image", "[", "'height'", "]", "=", "json_ann", "[", "'imgHeight'", "]", "image", "[", "'file_name'", "]", "=", "filename", "[", ":", "-", "len", "(", "ends_in", "%", "data_set", ".", "split", "(", "'_'", ")", "[", "0", "]", ")", "]", "+", "'leftImg8bit.png'", "image", "[", "'seg_file_name'", "]", "=", "filename", "[", ":", "-", "len", "(", "ends_in", "%", "data_set", ".", "split", "(", "'_'", ")", "[", "0", "]", ")", "]", "+", "'%s_instanceIds.png'", "%", "data_set", ".", "split", "(", "'_'", ")", "[", "0", "]", "images", ".", "append", "(", "image", ")", "fullname", "=", "os", ".", "path", ".", "join", "(", "root", ",", "image", "[", "'seg_file_name'", "]", ")", "objects", "=", "cs", ".", "instances2dict_with_polygons", "(", "[", "fullname", "]", ",", "verbose", "=", "False", ")", "[", "fullname", "]", "for", "object_cls", "in", "objects", ":", "if", "object_cls", "not", "in", "category_instancesonly", ":", "continue", "# skip non-instance categories", "for", "obj", "in", "objects", "[", "object_cls", "]", ":", "if", "obj", "[", "'contours'", "]", "==", "[", "]", ":", "print", "(", "'Warning: empty contours.'", ")", "continue", "# skip non-instance categories", "len_p", "=", "[", "len", "(", "p", ")", "for", "p", "in", "obj", "[", "'contours'", "]", "]", "if", "min", "(", "len_p", ")", "<=", "4", ":", "print", "(", "'Warning: invalid contours.'", ")", "continue", "# skip non-instance categories", "ann", "=", "{", "}", "ann", "[", "'id'", "]", "=", "ann_id", "ann_id", "+=", "1", "ann", "[", "'image_id'", "]", "=", "image", "[", "'id'", "]", "ann", "[", "'segmentation'", "]", "=", "obj", "[", "'contours'", "]", "if", "object_cls", "not", "in", "category_dict", ":", "category_dict", "[", "object_cls", "]", "=", "cat_id", "cat_id", "+=", "1", "ann", "[", "'category_id'", "]", "=", "category_dict", "[", "object_cls", "]", "ann", "[", "'iscrowd'", "]", "=", "0", "ann", "[", "'area'", "]", "=", "obj", "[", "'pixelCount'", "]", "ann", "[", "'bbox'", "]", "=", "bboxs_util", ".", "xyxy_to_xywh", "(", "segms_util", ".", "polys_to_boxes", "(", "[", "ann", "[", "'segmentation'", "]", "]", ")", ")", ".", "tolist", "(", ")", "[", "0", "]", "annotations", ".", "append", "(", "ann", ")", "ann_dict", "[", "'images'", "]", "=", "images", "categories", "=", "[", "{", "\"id\"", ":", "category_dict", "[", "name", "]", ",", "\"name\"", ":", "name", "}", "for", "name", "in", "category_dict", "]", "ann_dict", "[", "'categories'", "]", "=", "categories", "ann_dict", "[", "'annotations'", "]", "=", "annotations", "print", "(", "\"Num categories: %s\"", "%", "len", "(", "categories", ")", ")", "print", "(", "\"Num images: %s\"", "%", "len", "(", "images", ")", ")", "print", "(", "\"Num annotations: %s\"", "%", "len", "(", "annotations", ")", ")", "with", "open", "(", "os", ".", "path", ".", "join", "(", "out_dir", ",", "json_name", "%", "data_set", ")", ",", "'w'", ")", "as", "outfile", ":", "outfile", ".", "write", "(", "json", ".", "dumps", "(", "ann_dict", ")", ")" ]
38.45045
20.027027
def play_human(env): """ Play the environment using keyboard as a human. Args: env (gym.Env): the initialized gym environment to play Returns: None """ # play the game and catch a potential keyboard interrupt try: play(env, fps=env.metadata['video.frames_per_second']) except KeyboardInterrupt: pass # reset and close the environment env.close()
[ "def", "play_human", "(", "env", ")", ":", "# play the game and catch a potential keyboard interrupt", "try", ":", "play", "(", "env", ",", "fps", "=", "env", ".", "metadata", "[", "'video.frames_per_second'", "]", ")", "except", "KeyboardInterrupt", ":", "pass", "# reset and close the environment", "env", ".", "close", "(", ")" ]
22.388889
22.277778
def __set_bp(self, aProcess): """ Writes a breakpoint instruction at the target address. @type aProcess: L{Process} @param aProcess: Process object. """ address = self.get_address() self.__previousValue = aProcess.read(address, len(self.bpInstruction)) if self.__previousValue == self.bpInstruction: msg = "Possible overlapping code breakpoints at %s" msg = msg % HexDump.address(address) warnings.warn(msg, BreakpointWarning) aProcess.write(address, self.bpInstruction)
[ "def", "__set_bp", "(", "self", ",", "aProcess", ")", ":", "address", "=", "self", ".", "get_address", "(", ")", "self", ".", "__previousValue", "=", "aProcess", ".", "read", "(", "address", ",", "len", "(", "self", ".", "bpInstruction", ")", ")", "if", "self", ".", "__previousValue", "==", "self", ".", "bpInstruction", ":", "msg", "=", "\"Possible overlapping code breakpoints at %s\"", "msg", "=", "msg", "%", "HexDump", ".", "address", "(", "address", ")", "warnings", ".", "warn", "(", "msg", ",", "BreakpointWarning", ")", "aProcess", ".", "write", "(", "address", ",", "self", ".", "bpInstruction", ")" ]
40.5
13.214286
def windows_k_distinct(x, k): """Find all largest windows containing exactly k distinct elements :param x: list or string :param k: positive integer :yields: largest intervals [i, j) with len(set(x[i:j])) == k :complexity: `O(|x|)` """ dist, i, j = 0, 0, 0 # dist = |{x[i], ..., x[j-1]}| occ = {xi: 0 for xi in x} # number of occurrences in x[i:j] while j < len(x): while dist == k: # move start of interval occ[x[i]] -= 1 # update counters if occ[x[i]] == 0: dist -= 1 i += 1 while j < len(x) and (dist < k or occ[x[j]]): if occ[x[j]] == 0: # update counters dist += 1 occ[x[j]] += 1 j += 1 # move end of interval if dist == k: yield (i, j)
[ "def", "windows_k_distinct", "(", "x", ",", "k", ")", ":", "dist", ",", "i", ",", "j", "=", "0", ",", "0", ",", "0", "# dist = |{x[i], ..., x[j-1]}|", "occ", "=", "{", "xi", ":", "0", "for", "xi", "in", "x", "}", "# number of occurrences in x[i:j]", "while", "j", "<", "len", "(", "x", ")", ":", "while", "dist", "==", "k", ":", "# move start of interval", "occ", "[", "x", "[", "i", "]", "]", "-=", "1", "# update counters", "if", "occ", "[", "x", "[", "i", "]", "]", "==", "0", ":", "dist", "-=", "1", "i", "+=", "1", "while", "j", "<", "len", "(", "x", ")", "and", "(", "dist", "<", "k", "or", "occ", "[", "x", "[", "j", "]", "]", ")", ":", "if", "occ", "[", "x", "[", "j", "]", "]", "==", "0", ":", "# update counters", "dist", "+=", "1", "occ", "[", "x", "[", "j", "]", "]", "+=", "1", "j", "+=", "1", "# move end of interval", "if", "dist", "==", "k", ":", "yield", "(", "i", ",", "j", ")" ]
38.217391
17.304348
def __get_bundle(self, io_handler, bundle_id): """ Retrieves the Bundle object with the given bundle ID. Writes errors through the I/O handler if any. :param io_handler: I/O Handler :param bundle_id: String or integer bundle ID :return: The Bundle object matching the given ID, None if not found """ try: bundle_id = int(bundle_id) return self._context.get_bundle(bundle_id) except (TypeError, ValueError): io_handler.write_line("Invalid bundle ID: {0}", bundle_id) except constants.BundleException: io_handler.write_line("Unknown bundle: {0}", bundle_id)
[ "def", "__get_bundle", "(", "self", ",", "io_handler", ",", "bundle_id", ")", ":", "try", ":", "bundle_id", "=", "int", "(", "bundle_id", ")", "return", "self", ".", "_context", ".", "get_bundle", "(", "bundle_id", ")", "except", "(", "TypeError", ",", "ValueError", ")", ":", "io_handler", ".", "write_line", "(", "\"Invalid bundle ID: {0}\"", ",", "bundle_id", ")", "except", "constants", ".", "BundleException", ":", "io_handler", ".", "write_line", "(", "\"Unknown bundle: {0}\"", ",", "bundle_id", ")" ]
41.8125
14.6875
def root_item_selected(self, item): """Root item has been selected: expanding it and collapsing others""" if self.show_all_files: return for root_item in self.get_top_level_items(): if root_item is item: self.expandItem(root_item) else: self.collapseItem(root_item)
[ "def", "root_item_selected", "(", "self", ",", "item", ")", ":", "if", "self", ".", "show_all_files", ":", "return", "for", "root_item", "in", "self", ".", "get_top_level_items", "(", ")", ":", "if", "root_item", "is", "item", ":", "self", ".", "expandItem", "(", "root_item", ")", "else", ":", "self", ".", "collapseItem", "(", "root_item", ")" ]
39.666667
9
def reportPrettyData(root, worker, job, job_types, options): """ print the important bits out. """ out_str = "Batch System: %s\n" % root.batch_system out_str += ("Default Cores: %s Default Memory: %s\n" "Max Cores: %s\n" % ( reportNumber(get(root, "default_cores"), options), reportMemory(get(root, "default_memory"), options, isBytes=True), reportNumber(get(root, "max_cores"), options), )) out_str += ("Total Clock: %s Total Runtime: %s\n" % ( reportTime(get(root, "total_clock"), options), reportTime(get(root, "total_run_time"), options), )) job_types = sortJobs(job_types, options) columnWidths = computeColumnWidths(job_types, worker, job, options) out_str += "Worker\n" out_str += sprintTag("worker", worker, options, columnWidths=columnWidths) out_str += "Job\n" out_str += sprintTag("job", job, options, columnWidths=columnWidths) for t in job_types: out_str += " %s\n" % t.name out_str += sprintTag(t.name, t, options, columnWidths=columnWidths) return out_str
[ "def", "reportPrettyData", "(", "root", ",", "worker", ",", "job", ",", "job_types", ",", "options", ")", ":", "out_str", "=", "\"Batch System: %s\\n\"", "%", "root", ".", "batch_system", "out_str", "+=", "(", "\"Default Cores: %s Default Memory: %s\\n\"", "\"Max Cores: %s\\n\"", "%", "(", "reportNumber", "(", "get", "(", "root", ",", "\"default_cores\"", ")", ",", "options", ")", ",", "reportMemory", "(", "get", "(", "root", ",", "\"default_memory\"", ")", ",", "options", ",", "isBytes", "=", "True", ")", ",", "reportNumber", "(", "get", "(", "root", ",", "\"max_cores\"", ")", ",", "options", ")", ",", ")", ")", "out_str", "+=", "(", "\"Total Clock: %s Total Runtime: %s\\n\"", "%", "(", "reportTime", "(", "get", "(", "root", ",", "\"total_clock\"", ")", ",", "options", ")", ",", "reportTime", "(", "get", "(", "root", ",", "\"total_run_time\"", ")", ",", "options", ")", ",", ")", ")", "job_types", "=", "sortJobs", "(", "job_types", ",", "options", ")", "columnWidths", "=", "computeColumnWidths", "(", "job_types", ",", "worker", ",", "job", ",", "options", ")", "out_str", "+=", "\"Worker\\n\"", "out_str", "+=", "sprintTag", "(", "\"worker\"", ",", "worker", ",", "options", ",", "columnWidths", "=", "columnWidths", ")", "out_str", "+=", "\"Job\\n\"", "out_str", "+=", "sprintTag", "(", "\"job\"", ",", "job", ",", "options", ",", "columnWidths", "=", "columnWidths", ")", "for", "t", "in", "job_types", ":", "out_str", "+=", "\" %s\\n\"", "%", "t", ".", "name", "out_str", "+=", "sprintTag", "(", "t", ".", "name", ",", "t", ",", "options", ",", "columnWidths", "=", "columnWidths", ")", "return", "out_str" ]
45.375
18.541667
def advance(self): """Carry out one iteration of Arnoldi.""" if self.iter >= self.maxiter: raise ArgumentError('Maximum number of iterations reached.') if self.invariant: raise ArgumentError('Krylov subspace was found to be invariant ' 'in the previous iteration.') N = self.V.shape[0] k = self.iter # the matrix-vector multiplication Av = self.A * self.V[:, [k]] if self.ortho == 'house': # Householder for j in range(k+1): Av[j:] = self.houses[j].apply(Av[j:]) Av[j] *= numpy.conj(self.houses[j].alpha) if k+1 < N: house = House(Av[k+1:]) self.houses.append(house) Av[k+1:] = house.apply(Av[k+1:]) * numpy.conj(house.alpha) self.H[:k+2, [k]] = Av[:k+2] else: self.H[:k+1, [k]] = Av[:k+1] # next line is safe due to the multiplications with alpha self.H[k+1, k] = numpy.abs(self.H[k+1, k]) if self.H[k+1, k] / numpy.linalg.norm(self.H[:k+2, :k+1], 2)\ <= 1e-14: self.invariant = True else: vnew = numpy.zeros((N, 1), dtype=self.dtype) vnew[k+1] = 1 for j in range(k+1, -1, -1): vnew[j:] = self.houses[j].apply(vnew[j:]) self.V[:, [k+1]] = vnew * self.houses[-1].alpha else: # determine vectors for orthogonalization start = 0 # Lanczos? if self.ortho == 'lanczos': start = k if k > 0: self.H[k-1, k] = self.H[k, k-1] if self.M is not None \ and not isinstance(self.M, IdentityLinearOperator): Av -= self.H[k, k-1] * self.P[:, [k-1]] else: Av -= self.H[k, k-1] * self.V[:, [k-1]] # (double) modified Gram-Schmidt for reortho in range(self.reorthos+1): # orthogonalize for j in range(start, k+1): alpha = inner(self.V[:, [j]], Av, ip_B=self.ip_B)[0, 0] if self.ortho == 'lanczos': # check if alpha is real if abs(alpha.imag) > 1e-10: warnings.warn( 'Iter {0}: abs(alpha.imag) = {1} > 1e-10. ' 'Is your operator self-adjoint in the ' 'provided inner product?' .format(self.iter, abs(alpha.imag))) alpha = alpha.real self.H[j, k] += alpha if self.M is not None: Av -= alpha * self.P[:, [j]] else: Av -= alpha * self.V[:, [j]] if self.M is not None: MAv = self.M * Av self.H[k+1, k] = norm(Av, MAv, ip_B=self.ip_B) else: self.H[k+1, k] = norm(Av, ip_B=self.ip_B) if self.H[k+1, k] / numpy.linalg.norm(self.H[:k+2, :k+1], 2)\ <= 1e-14: self.invariant = True else: if self.M is not None: self.P[:, [k+1]] = Av / self.H[k+1, k] self.V[:, [k+1]] = MAv / self.H[k+1, k] else: self.V[:, [k+1]] = Av / self.H[k+1, k] # increase iteration counter self.iter += 1
[ "def", "advance", "(", "self", ")", ":", "if", "self", ".", "iter", ">=", "self", ".", "maxiter", ":", "raise", "ArgumentError", "(", "'Maximum number of iterations reached.'", ")", "if", "self", ".", "invariant", ":", "raise", "ArgumentError", "(", "'Krylov subspace was found to be invariant '", "'in the previous iteration.'", ")", "N", "=", "self", ".", "V", ".", "shape", "[", "0", "]", "k", "=", "self", ".", "iter", "# the matrix-vector multiplication", "Av", "=", "self", ".", "A", "*", "self", ".", "V", "[", ":", ",", "[", "k", "]", "]", "if", "self", ".", "ortho", "==", "'house'", ":", "# Householder", "for", "j", "in", "range", "(", "k", "+", "1", ")", ":", "Av", "[", "j", ":", "]", "=", "self", ".", "houses", "[", "j", "]", ".", "apply", "(", "Av", "[", "j", ":", "]", ")", "Av", "[", "j", "]", "*=", "numpy", ".", "conj", "(", "self", ".", "houses", "[", "j", "]", ".", "alpha", ")", "if", "k", "+", "1", "<", "N", ":", "house", "=", "House", "(", "Av", "[", "k", "+", "1", ":", "]", ")", "self", ".", "houses", ".", "append", "(", "house", ")", "Av", "[", "k", "+", "1", ":", "]", "=", "house", ".", "apply", "(", "Av", "[", "k", "+", "1", ":", "]", ")", "*", "numpy", ".", "conj", "(", "house", ".", "alpha", ")", "self", ".", "H", "[", ":", "k", "+", "2", ",", "[", "k", "]", "]", "=", "Av", "[", ":", "k", "+", "2", "]", "else", ":", "self", ".", "H", "[", ":", "k", "+", "1", ",", "[", "k", "]", "]", "=", "Av", "[", ":", "k", "+", "1", "]", "# next line is safe due to the multiplications with alpha", "self", ".", "H", "[", "k", "+", "1", ",", "k", "]", "=", "numpy", ".", "abs", "(", "self", ".", "H", "[", "k", "+", "1", ",", "k", "]", ")", "if", "self", ".", "H", "[", "k", "+", "1", ",", "k", "]", "/", "numpy", ".", "linalg", ".", "norm", "(", "self", ".", "H", "[", ":", "k", "+", "2", ",", ":", "k", "+", "1", "]", ",", "2", ")", "<=", "1e-14", ":", "self", ".", "invariant", "=", "True", "else", ":", "vnew", "=", "numpy", ".", "zeros", "(", "(", "N", ",", "1", ")", ",", "dtype", "=", "self", ".", "dtype", ")", "vnew", "[", "k", "+", "1", "]", "=", "1", "for", "j", "in", "range", "(", "k", "+", "1", ",", "-", "1", ",", "-", "1", ")", ":", "vnew", "[", "j", ":", "]", "=", "self", ".", "houses", "[", "j", "]", ".", "apply", "(", "vnew", "[", "j", ":", "]", ")", "self", ".", "V", "[", ":", ",", "[", "k", "+", "1", "]", "]", "=", "vnew", "*", "self", ".", "houses", "[", "-", "1", "]", ".", "alpha", "else", ":", "# determine vectors for orthogonalization", "start", "=", "0", "# Lanczos?", "if", "self", ".", "ortho", "==", "'lanczos'", ":", "start", "=", "k", "if", "k", ">", "0", ":", "self", ".", "H", "[", "k", "-", "1", ",", "k", "]", "=", "self", ".", "H", "[", "k", ",", "k", "-", "1", "]", "if", "self", ".", "M", "is", "not", "None", "and", "not", "isinstance", "(", "self", ".", "M", ",", "IdentityLinearOperator", ")", ":", "Av", "-=", "self", ".", "H", "[", "k", ",", "k", "-", "1", "]", "*", "self", ".", "P", "[", ":", ",", "[", "k", "-", "1", "]", "]", "else", ":", "Av", "-=", "self", ".", "H", "[", "k", ",", "k", "-", "1", "]", "*", "self", ".", "V", "[", ":", ",", "[", "k", "-", "1", "]", "]", "# (double) modified Gram-Schmidt", "for", "reortho", "in", "range", "(", "self", ".", "reorthos", "+", "1", ")", ":", "# orthogonalize", "for", "j", "in", "range", "(", "start", ",", "k", "+", "1", ")", ":", "alpha", "=", "inner", "(", "self", ".", "V", "[", ":", ",", "[", "j", "]", "]", ",", "Av", ",", "ip_B", "=", "self", ".", "ip_B", ")", "[", "0", ",", "0", "]", "if", "self", ".", "ortho", "==", "'lanczos'", ":", "# check if alpha is real", "if", "abs", "(", "alpha", ".", "imag", ")", ">", "1e-10", ":", "warnings", ".", "warn", "(", "'Iter {0}: abs(alpha.imag) = {1} > 1e-10. '", "'Is your operator self-adjoint in the '", "'provided inner product?'", ".", "format", "(", "self", ".", "iter", ",", "abs", "(", "alpha", ".", "imag", ")", ")", ")", "alpha", "=", "alpha", ".", "real", "self", ".", "H", "[", "j", ",", "k", "]", "+=", "alpha", "if", "self", ".", "M", "is", "not", "None", ":", "Av", "-=", "alpha", "*", "self", ".", "P", "[", ":", ",", "[", "j", "]", "]", "else", ":", "Av", "-=", "alpha", "*", "self", ".", "V", "[", ":", ",", "[", "j", "]", "]", "if", "self", ".", "M", "is", "not", "None", ":", "MAv", "=", "self", ".", "M", "*", "Av", "self", ".", "H", "[", "k", "+", "1", ",", "k", "]", "=", "norm", "(", "Av", ",", "MAv", ",", "ip_B", "=", "self", ".", "ip_B", ")", "else", ":", "self", ".", "H", "[", "k", "+", "1", ",", "k", "]", "=", "norm", "(", "Av", ",", "ip_B", "=", "self", ".", "ip_B", ")", "if", "self", ".", "H", "[", "k", "+", "1", ",", "k", "]", "/", "numpy", ".", "linalg", ".", "norm", "(", "self", ".", "H", "[", ":", "k", "+", "2", ",", ":", "k", "+", "1", "]", ",", "2", ")", "<=", "1e-14", ":", "self", ".", "invariant", "=", "True", "else", ":", "if", "self", ".", "M", "is", "not", "None", ":", "self", ".", "P", "[", ":", ",", "[", "k", "+", "1", "]", "]", "=", "Av", "/", "self", ".", "H", "[", "k", "+", "1", ",", "k", "]", "self", ".", "V", "[", ":", ",", "[", "k", "+", "1", "]", "]", "=", "MAv", "/", "self", ".", "H", "[", "k", "+", "1", ",", "k", "]", "else", ":", "self", ".", "V", "[", ":", ",", "[", "k", "+", "1", "]", "]", "=", "Av", "/", "self", ".", "H", "[", "k", "+", "1", ",", "k", "]", "# increase iteration counter", "self", ".", "iter", "+=", "1" ]
41.045455
16.488636
def create_thread(cls, session, conversation, thread, imported=False): """Create a conversation thread. Please note that threads cannot be added to conversations with 100 threads (or more), if attempted the API will respond with HTTP 412. Args: conversation (helpscout.models.Conversation): The conversation that the thread is being added to. session (requests.sessions.Session): Authenticated session. thread (helpscout.models.Thread): The thread to be created. imported (bool, optional): The ``imported`` request parameter enables conversations to be created for historical purposes (i.e. if moving from a different platform, you can import your history). When ``imported`` is set to ``True``, no outgoing emails or notifications will be generated. Returns: helpscout.models.Conversation: Conversation including newly created thread. """ return super(Conversations, cls).create( session, thread, endpoint_override='/conversations/%s.json' % conversation.id, imported=imported, )
[ "def", "create_thread", "(", "cls", ",", "session", ",", "conversation", ",", "thread", ",", "imported", "=", "False", ")", ":", "return", "super", "(", "Conversations", ",", "cls", ")", ".", "create", "(", "session", ",", "thread", ",", "endpoint_override", "=", "'/conversations/%s.json'", "%", "conversation", ".", "id", ",", "imported", "=", "imported", ",", ")" ]
44.814815
25.888889
def check(self): """ Compare the :func:`os.stat` for the pam_env style environmnt file `path` with the previous result `old_st`, which may be :data:`None` if the previous stat attempt failed. Reload its contents if the file has changed or appeared since last attempt. :returns: New :func:`os.stat` result. The new call to :func:`reload_env` should pass it as the value of `old_st`. """ st = self._stat() if self._st == st: return self._st = st self._remove_existing() if st is None: LOG.debug('%r: file has disappeared', self) else: self._on_file_changed()
[ "def", "check", "(", "self", ")", ":", "st", "=", "self", ".", "_stat", "(", ")", "if", "self", ".", "_st", "==", "st", ":", "return", "self", ".", "_st", "=", "st", "self", ".", "_remove_existing", "(", ")", "if", "st", "is", "None", ":", "LOG", ".", "debug", "(", "'%r: file has disappeared'", ",", "self", ")", "else", ":", "self", ".", "_on_file_changed", "(", ")" ]
31.909091
21.454545
def stop(self): """ Stop this instance. :return: None """ instance_status = Instance.InstanceStatus(status='Terminated') xml_content = instance_status.serialize() headers = {'Content-Type': 'application/xml'} self._client.put(self.resource(), xml_content, headers=headers)
[ "def", "stop", "(", "self", ")", ":", "instance_status", "=", "Instance", ".", "InstanceStatus", "(", "status", "=", "'Terminated'", ")", "xml_content", "=", "instance_status", ".", "serialize", "(", ")", "headers", "=", "{", "'Content-Type'", ":", "'application/xml'", "}", "self", ".", "_client", ".", "put", "(", "self", ".", "resource", "(", ")", ",", "xml_content", ",", "headers", "=", "headers", ")" ]
27.333333
21.666667
def _ParsePathSpecification( self, knowledge_base, searcher, file_system, path_specification, path_separator): """Parses a file system for a preprocessing attribute. Args: knowledge_base (KnowledgeBase): to fill with preprocessing information. searcher (dfvfs.FileSystemSearcher): file system searcher to preprocess the file system. file_system (dfvfs.FileSystem): file system to be preprocessed. path_specification (dfvfs.PathSpec): path specification that contains the artifact value data. path_separator (str): path segment separator. Raises: PreProcessFail: if the preprocessing fails. """ try: file_entry = searcher.GetFileEntryByPathSpec(path_specification) except IOError as exception: relative_path = searcher.GetRelativePath(path_specification) if path_separator != file_system.PATH_SEPARATOR: relative_path_segments = file_system.SplitPath(relative_path) relative_path = '{0:s}{1:s}'.format( path_separator, path_separator.join(relative_path_segments)) raise errors.PreProcessFail(( 'Unable to retrieve file entry: {0:s} with error: ' '{1!s}').format(relative_path, exception)) if file_entry: self._ParseFileEntry(knowledge_base, file_entry)
[ "def", "_ParsePathSpecification", "(", "self", ",", "knowledge_base", ",", "searcher", ",", "file_system", ",", "path_specification", ",", "path_separator", ")", ":", "try", ":", "file_entry", "=", "searcher", ".", "GetFileEntryByPathSpec", "(", "path_specification", ")", "except", "IOError", "as", "exception", ":", "relative_path", "=", "searcher", ".", "GetRelativePath", "(", "path_specification", ")", "if", "path_separator", "!=", "file_system", ".", "PATH_SEPARATOR", ":", "relative_path_segments", "=", "file_system", ".", "SplitPath", "(", "relative_path", ")", "relative_path", "=", "'{0:s}{1:s}'", ".", "format", "(", "path_separator", ",", "path_separator", ".", "join", "(", "relative_path_segments", ")", ")", "raise", "errors", ".", "PreProcessFail", "(", "(", "'Unable to retrieve file entry: {0:s} with error: '", "'{1!s}'", ")", ".", "format", "(", "relative_path", ",", "exception", ")", ")", "if", "file_entry", ":", "self", ".", "_ParseFileEntry", "(", "knowledge_base", ",", "file_entry", ")" ]
40.5625
22.09375
def export(self, id, exclude_captures=False): # pylint: disable=invalid-name,redefined-builtin """Export a result. :param id: Result ID as an int. :param exclude_captures: If bool `True`, don't export capture files :rtype: tuple `(io.BytesIO, 'filename')` """ return self.service.export(self.base, id, params={'exclude_captures': exclude_captures})
[ "def", "export", "(", "self", ",", "id", ",", "exclude_captures", "=", "False", ")", ":", "# pylint: disable=invalid-name,redefined-builtin", "return", "self", ".", "service", ".", "export", "(", "self", ".", "base", ",", "id", ",", "params", "=", "{", "'exclude_captures'", ":", "exclude_captures", "}", ")" ]
48.75
24.25
def prop2b(gm, pvinit, dt): """ Given a central mass and the state of massless body at time t_0, this routine determines the state as predicted by a two-body force model at time t_0 + dt. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/prop2b_c.html :param gm: Gravity of the central mass. :type gm: float :param pvinit: Initial state from which to propagate a state. :type pvinit: 6-Element Array of floats :param dt: Time offset from initial state to propagate to. :type dt: float :return: The propagated state. :rtype: 6-Element Array of floats """ gm = ctypes.c_double(gm) pvinit = stypes.toDoubleVector(pvinit) dt = ctypes.c_double(dt) pvprop = stypes.emptyDoubleVector(6) libspice.prop2b_c(gm, pvinit, dt, pvprop) return stypes.cVectorToPython(pvprop)
[ "def", "prop2b", "(", "gm", ",", "pvinit", ",", "dt", ")", ":", "gm", "=", "ctypes", ".", "c_double", "(", "gm", ")", "pvinit", "=", "stypes", ".", "toDoubleVector", "(", "pvinit", ")", "dt", "=", "ctypes", ".", "c_double", "(", "dt", ")", "pvprop", "=", "stypes", ".", "emptyDoubleVector", "(", "6", ")", "libspice", ".", "prop2b_c", "(", "gm", ",", "pvinit", ",", "dt", ",", "pvprop", ")", "return", "stypes", ".", "cVectorToPython", "(", "pvprop", ")" ]
35.869565
13.956522
def add_eval(self, agent, e, fr=None): """Add or change agent's evaluation of the artifact with given framing information. :param agent: Name of the agent which did the evaluation. :param float e: Evaluation for the artifact. :param object fr: Framing information for the evaluation. """ self._evals[agent.name] = e self._framings[agent.name] = fr
[ "def", "add_eval", "(", "self", ",", "agent", ",", "e", ",", "fr", "=", "None", ")", ":", "self", ".", "_evals", "[", "agent", ".", "name", "]", "=", "e", "self", ".", "_framings", "[", "agent", ".", "name", "]", "=", "fr" ]
40.3
13
def count_sequences(infile): '''Returns the number of sequences in a file''' seq_reader = sequences.file_reader(infile) n = 0 for seq in seq_reader: n += 1 return n
[ "def", "count_sequences", "(", "infile", ")", ":", "seq_reader", "=", "sequences", ".", "file_reader", "(", "infile", ")", "n", "=", "0", "for", "seq", "in", "seq_reader", ":", "n", "+=", "1", "return", "n" ]
26.571429
18.285714
def add_minrmsd_to_ref(self, ref, ref_frame=0, atom_indices=None, precentered=False): r""" Adds the minimum root-mean-square-deviation (minrmsd) with respect to a reference structure to the feature list. Parameters ---------- ref: Reference structure for computing the minrmsd. Can be of two types: 1. :py:obj:`mdtraj.Trajectory` object 2. filename for mdtraj to load. In this case, only the :py:obj:`ref_frame` of that file will be used. ref_frame: integer, default=0 Reference frame of the filename specified in :py:obj:`ref`. This parameter has no effect if :py:obj:`ref` is not a filename. atom_indices: array_like, default=None Atoms that will be used for: 1. aligning the target and reference geometries. 2. computing rmsd after the alignment. If left to None, all atoms of :py:obj:`ref` will be used. precentered: bool, default=False Use this boolean at your own risk to let mdtraj know that the target conformations are already centered at the origin, i.e., their (uniformly weighted) center of mass lies at the origin. This will speed up the computation of the rmsd. """ from .misc import MinRmsdFeature f = MinRmsdFeature(ref, ref_frame=ref_frame, atom_indices=atom_indices, topology=self.topology, precentered=precentered) self.__add_feature(f)
[ "def", "add_minrmsd_to_ref", "(", "self", ",", "ref", ",", "ref_frame", "=", "0", ",", "atom_indices", "=", "None", ",", "precentered", "=", "False", ")", ":", "from", ".", "misc", "import", "MinRmsdFeature", "f", "=", "MinRmsdFeature", "(", "ref", ",", "ref_frame", "=", "ref_frame", ",", "atom_indices", "=", "atom_indices", ",", "topology", "=", "self", ".", "topology", ",", "precentered", "=", "precentered", ")", "self", ".", "__add_feature", "(", "f", ")" ]
47.28125
29.4375
def searchsorted(arr, N, x): """N is length of arr """ L = 0 R = N-1 done = False m = (L+R)//2 while not done: if arr[m] < x: L = m + 1 elif arr[m] > x: R = m - 1 elif arr[m] == x: done = True m = (L+R)//2 if L>R: done = True return L
[ "def", "searchsorted", "(", "arr", ",", "N", ",", "x", ")", ":", "L", "=", "0", "R", "=", "N", "-", "1", "done", "=", "False", "m", "=", "(", "L", "+", "R", ")", "//", "2", "while", "not", "done", ":", "if", "arr", "[", "m", "]", "<", "x", ":", "L", "=", "m", "+", "1", "elif", "arr", "[", "m", "]", ">", "x", ":", "R", "=", "m", "-", "1", "elif", "arr", "[", "m", "]", "==", "x", ":", "done", "=", "True", "m", "=", "(", "L", "+", "R", ")", "//", "2", "if", "L", ">", "R", ":", "done", "=", "True", "return", "L" ]
18.722222
18.611111
def default_file_encoder(): """ Get default encoder cwr file :return: """ config = CWRConfiguration() field_configs = config.load_field_config('table') field_configs.update(config.load_field_config('common')) field_values = CWRTables() for entry in field_configs.values(): if 'source' in entry: values_id = entry['source'] entry['values'] = field_values.get_data(values_id) record_configs = config.load_record_config('common') return CwrFileEncoder(record_configs, field_configs)
[ "def", "default_file_encoder", "(", ")", ":", "config", "=", "CWRConfiguration", "(", ")", "field_configs", "=", "config", ".", "load_field_config", "(", "'table'", ")", "field_configs", ".", "update", "(", "config", ".", "load_field_config", "(", "'common'", ")", ")", "field_values", "=", "CWRTables", "(", ")", "for", "entry", "in", "field_configs", ".", "values", "(", ")", ":", "if", "'source'", "in", "entry", ":", "values_id", "=", "entry", "[", "'source'", "]", "entry", "[", "'values'", "]", "=", "field_values", ".", "get_data", "(", "values_id", ")", "record_configs", "=", "config", ".", "load_record_config", "(", "'common'", ")", "return", "CwrFileEncoder", "(", "record_configs", ",", "field_configs", ")" ]
30.055556
15.944444
def attribute_value(self, doc: Document, attribute_name: str): """ Access data using attribute name rather than the numeric indices Returns: the value for the attribute """ return doc.cdr_document.get(self.header_translation_table[attribute_name])
[ "def", "attribute_value", "(", "self", ",", "doc", ":", "Document", ",", "attribute_name", ":", "str", ")", ":", "return", "doc", ".", "cdr_document", ".", "get", "(", "self", ".", "header_translation_table", "[", "attribute_name", "]", ")" ]
35.25
22.5
def generate_access_token_from_authorization_code(request, client): """ Generates a new AccessToken from a request with an authorization code. Read the specification: http://tools.ietf.org/html/rfc6749#section-4.1.3 """ authorization_code_value = request.POST.get('code') if not authorization_code_value: raise InvalidRequest('no "code" provided') try: authorization_code = AuthorizationCode.objects.get( value=authorization_code_value, client=client) except AuthorizationCode.DoesNotExist: raise InvalidGrant( '"{}" is not a valid "code"'.format(authorization_code_value)) if authorization_code.is_expired(): if authorization_code.invalidated: for access_token in authorization_code.access_tokens.all(): access_token.invalidate() raise InvalidGrant('provided "code" is expired') # From http://tools.ietf.org/html/rfc6749#section-4.1.3: # # redirect_uri # REQUIRED, if the "redirect_uri" parameter was included in the # authorization request as described in Section 4.1.1, and their # values MUST be identical. # # and later, # # The authorization server MUST: # # [ ... snip ... ] # # o ensure that the "redirect_uri" parameter is present if the # "redirect_uri" parameter was included in the initial authorization # request as described in Section 4.1.1, and if included ensure that # their values are identical. # # The 'redirect_uri' attribute of an AuthorizationCode will only be set if # the value was included as a parameter during the related authorization # request. if (authorization_code.redirect_uri and authorization_code.redirect_uri != request.POST.get('redirect_uri')): raise InvalidRequest('"redirect_uri" value must match the value from ' 'the authorization code request') new_access_token = AccessToken.objects.create( user=authorization_code.user, client=authorization_code.client) new_access_token.scopes = authorization_code.scopes.all() new_access_token.authorization_code = authorization_code new_access_token.save() # Mark this token as expired so that any future requests with the same token # can be handled with the correct behavior. From # http://tools.ietf.org/html/rfc6749#section-4.1.2 : # The client MUST NOT use the authorization code more than once. authorization_code.invalidate() return new_access_token
[ "def", "generate_access_token_from_authorization_code", "(", "request", ",", "client", ")", ":", "authorization_code_value", "=", "request", ".", "POST", ".", "get", "(", "'code'", ")", "if", "not", "authorization_code_value", ":", "raise", "InvalidRequest", "(", "'no \"code\" provided'", ")", "try", ":", "authorization_code", "=", "AuthorizationCode", ".", "objects", ".", "get", "(", "value", "=", "authorization_code_value", ",", "client", "=", "client", ")", "except", "AuthorizationCode", ".", "DoesNotExist", ":", "raise", "InvalidGrant", "(", "'\"{}\" is not a valid \"code\"'", ".", "format", "(", "authorization_code_value", ")", ")", "if", "authorization_code", ".", "is_expired", "(", ")", ":", "if", "authorization_code", ".", "invalidated", ":", "for", "access_token", "in", "authorization_code", ".", "access_tokens", ".", "all", "(", ")", ":", "access_token", ".", "invalidate", "(", ")", "raise", "InvalidGrant", "(", "'provided \"code\" is expired'", ")", "# From http://tools.ietf.org/html/rfc6749#section-4.1.3:", "#", "# redirect_uri", "# REQUIRED, if the \"redirect_uri\" parameter was included in the", "# authorization request as described in Section 4.1.1, and their", "# values MUST be identical.", "#", "# and later,", "#", "# The authorization server MUST:", "#", "# [ ... snip ... ]", "#", "# o ensure that the \"redirect_uri\" parameter is present if the", "# \"redirect_uri\" parameter was included in the initial authorization", "# request as described in Section 4.1.1, and if included ensure that", "# their values are identical.", "#", "# The 'redirect_uri' attribute of an AuthorizationCode will only be set if", "# the value was included as a parameter during the related authorization", "# request.", "if", "(", "authorization_code", ".", "redirect_uri", "and", "authorization_code", ".", "redirect_uri", "!=", "request", ".", "POST", ".", "get", "(", "'redirect_uri'", ")", ")", ":", "raise", "InvalidRequest", "(", "'\"redirect_uri\" value must match the value from '", "'the authorization code request'", ")", "new_access_token", "=", "AccessToken", ".", "objects", ".", "create", "(", "user", "=", "authorization_code", ".", "user", ",", "client", "=", "authorization_code", ".", "client", ")", "new_access_token", ".", "scopes", "=", "authorization_code", ".", "scopes", ".", "all", "(", ")", "new_access_token", ".", "authorization_code", "=", "authorization_code", "new_access_token", ".", "save", "(", ")", "# Mark this token as expired so that any future requests with the same token", "# can be handled with the correct behavior. From", "# http://tools.ietf.org/html/rfc6749#section-4.1.2 :", "# The client MUST NOT use the authorization code more than once.", "authorization_code", ".", "invalidate", "(", ")", "return", "new_access_token" ]
37.476923
22.384615
def _writeMzmlIndexList(xmlWriter, spectrumIndexList, chromatogramIndexList): """ #TODO: docstring :param xmlWriter: #TODO: docstring :param spectrumIndexList: #TODO: docstring :param chromatogramIndexList: #TODO: docstring """ counts = 0 if spectrumIndexList: counts += 1 if chromatogramIndexList: counts += 1 if counts == 0: return None #Create indexList node xmlIndexList = xmlWriter.element('indexList', {'count': str(counts)}) xmlIndexList.__enter__() xmlWriter.write('\n') _writeIndexListElement(xmlWriter, 'spectrum', spectrumIndexList) _writeIndexListElement(xmlWriter, 'chromatogram', chromatogramIndexList) #Close indexList node xmlIndexList.__exit__(None, None, None) xmlWriter.write('\n')
[ "def", "_writeMzmlIndexList", "(", "xmlWriter", ",", "spectrumIndexList", ",", "chromatogramIndexList", ")", ":", "counts", "=", "0", "if", "spectrumIndexList", ":", "counts", "+=", "1", "if", "chromatogramIndexList", ":", "counts", "+=", "1", "if", "counts", "==", "0", ":", "return", "None", "#Create indexList node", "xmlIndexList", "=", "xmlWriter", ".", "element", "(", "'indexList'", ",", "{", "'count'", ":", "str", "(", "counts", ")", "}", ")", "xmlIndexList", ".", "__enter__", "(", ")", "xmlWriter", ".", "write", "(", "'\\n'", ")", "_writeIndexListElement", "(", "xmlWriter", ",", "'spectrum'", ",", "spectrumIndexList", ")", "_writeIndexListElement", "(", "xmlWriter", ",", "'chromatogram'", ",", "chromatogramIndexList", ")", "#Close indexList node", "xmlIndexList", ".", "__exit__", "(", "None", ",", "None", ",", "None", ")", "xmlWriter", ".", "write", "(", "'\\n'", ")" ]
31
19.28
def update_tabs_text(self): """Update the text from the tabs.""" # This is needed to prevent that hanged consoles make reference # to an index that doesn't exist. See issue 4881 try: for index, fname in enumerate(self.filenames): client = self.clients[index] if fname: self.rename_client_tab(client, self.disambiguate_fname(fname)) else: self.rename_client_tab(client, None) except IndexError: pass
[ "def", "update_tabs_text", "(", "self", ")", ":", "# This is needed to prevent that hanged consoles make reference\r", "# to an index that doesn't exist. See issue 4881\r", "try", ":", "for", "index", ",", "fname", "in", "enumerate", "(", "self", ".", "filenames", ")", ":", "client", "=", "self", ".", "clients", "[", "index", "]", "if", "fname", ":", "self", ".", "rename_client_tab", "(", "client", ",", "self", ".", "disambiguate_fname", "(", "fname", ")", ")", "else", ":", "self", ".", "rename_client_tab", "(", "client", ",", "None", ")", "except", "IndexError", ":", "pass" ]
42.357143
17.428571
def remove(self, auto_confirm=False, verbose=False): """Remove paths in ``self.paths`` with confirmation (unless ``auto_confirm`` is True).""" if not self.paths: logger.info( "Can't uninstall '%s'. No files were found to uninstall.", self.dist.project_name, ) return dist_name_version = ( self.dist.project_name + "-" + self.dist.version ) logger.info('Uninstalling %s:', dist_name_version) with indent_log(): if auto_confirm or self._allowed_to_proceed(verbose): moved = self._moved_paths for_rename = compress_for_rename(self.paths) for path in sorted(compact(for_rename)): moved.stash(path) logger.debug('Removing file or directory %s', path) for pth in self.pth.values(): pth.remove() logger.info('Successfully uninstalled %s', dist_name_version)
[ "def", "remove", "(", "self", ",", "auto_confirm", "=", "False", ",", "verbose", "=", "False", ")", ":", "if", "not", "self", ".", "paths", ":", "logger", ".", "info", "(", "\"Can't uninstall '%s'. No files were found to uninstall.\"", ",", "self", ".", "dist", ".", "project_name", ",", ")", "return", "dist_name_version", "=", "(", "self", ".", "dist", ".", "project_name", "+", "\"-\"", "+", "self", ".", "dist", ".", "version", ")", "logger", ".", "info", "(", "'Uninstalling %s:'", ",", "dist_name_version", ")", "with", "indent_log", "(", ")", ":", "if", "auto_confirm", "or", "self", ".", "_allowed_to_proceed", "(", "verbose", ")", ":", "moved", "=", "self", ".", "_moved_paths", "for_rename", "=", "compress_for_rename", "(", "self", ".", "paths", ")", "for", "path", "in", "sorted", "(", "compact", "(", "for_rename", ")", ")", ":", "moved", ".", "stash", "(", "path", ")", "logger", ".", "debug", "(", "'Removing file or directory %s'", ",", "path", ")", "for", "pth", "in", "self", ".", "pth", ".", "values", "(", ")", ":", "pth", ".", "remove", "(", ")", "logger", ".", "info", "(", "'Successfully uninstalled %s'", ",", "dist_name_version", ")" ]
33.866667
21.533333
def terminate(self): """Delete all files created by this index, invalidating `self`. Use with care.""" try: self.id2sims.terminate() except: pass import glob for fname in glob.glob(self.fname + '*'): try: os.remove(fname) logger.info("deleted %s" % fname) except Exception, e: logger.warning("failed to delete %s: %s" % (fname, e)) for val in self.__dict__.keys(): try: delattr(self, val) except: pass
[ "def", "terminate", "(", "self", ")", ":", "try", ":", "self", ".", "id2sims", ".", "terminate", "(", ")", "except", ":", "pass", "import", "glob", "for", "fname", "in", "glob", ".", "glob", "(", "self", ".", "fname", "+", "'*'", ")", ":", "try", ":", "os", ".", "remove", "(", "fname", ")", "logger", ".", "info", "(", "\"deleted %s\"", "%", "fname", ")", "except", "Exception", ",", "e", ":", "logger", ".", "warning", "(", "\"failed to delete %s: %s\"", "%", "(", "fname", ",", "e", ")", ")", "for", "val", "in", "self", ".", "__dict__", ".", "keys", "(", ")", ":", "try", ":", "delattr", "(", "self", ",", "val", ")", "except", ":", "pass" ]
32.444444
15.611111
def compose_object(self, file_list, destination_file, content_type): """COMPOSE multiple objects together. Using the given list of files, calls the put object with the compose flag. This call merges all the files into the destination file. Args: file_list: list of dicts with the file name. destination_file: Path to the destination file. content_type: Content type for the destination file. """ xml_setting_list = ['<ComposeRequest>'] for meta_data in file_list: xml_setting_list.append('<Component>') for key, val in meta_data.iteritems(): xml_setting_list.append('<%s>%s</%s>' % (key, val, key)) xml_setting_list.append('</Component>') xml_setting_list.append('</ComposeRequest>') xml = ''.join(xml_setting_list) if content_type is not None: headers = {'Content-Type': content_type} else: headers = None status, resp_headers, content = self.put_object( api_utils._quote_filename(destination_file) + '?compose', payload=xml, headers=headers) errors.check_status(status, [200], destination_file, resp_headers, body=content)
[ "def", "compose_object", "(", "self", ",", "file_list", ",", "destination_file", ",", "content_type", ")", ":", "xml_setting_list", "=", "[", "'<ComposeRequest>'", "]", "for", "meta_data", "in", "file_list", ":", "xml_setting_list", ".", "append", "(", "'<Component>'", ")", "for", "key", ",", "val", "in", "meta_data", ".", "iteritems", "(", ")", ":", "xml_setting_list", ".", "append", "(", "'<%s>%s</%s>'", "%", "(", "key", ",", "val", ",", "key", ")", ")", "xml_setting_list", ".", "append", "(", "'</Component>'", ")", "xml_setting_list", ".", "append", "(", "'</ComposeRequest>'", ")", "xml", "=", "''", ".", "join", "(", "xml_setting_list", ")", "if", "content_type", "is", "not", "None", ":", "headers", "=", "{", "'Content-Type'", ":", "content_type", "}", "else", ":", "headers", "=", "None", "status", ",", "resp_headers", ",", "content", "=", "self", ".", "put_object", "(", "api_utils", ".", "_quote_filename", "(", "destination_file", ")", "+", "'?compose'", ",", "payload", "=", "xml", ",", "headers", "=", "headers", ")", "errors", ".", "check_status", "(", "status", ",", "[", "200", "]", ",", "destination_file", ",", "resp_headers", ",", "body", "=", "content", ")" ]
36.0625
18.5
def worker_main(self): """ The main function of for the mux process: setup the Mitogen broker thread and ansible_mitogen services, then sleep waiting for the socket connected to the parent to be closed (indicating the parent has died). """ self._setup_master() self._setup_services() try: # Let the parent know our listening socket is ready. mitogen.core.io_op(self.child_sock.send, b('1')) # Block until the socket is closed, which happens on parent exit. mitogen.core.io_op(self.child_sock.recv, 1) finally: self.broker.shutdown() self.broker.join() # Test frameworks living somewhere higher on the stack of the # original parent process may try to catch sys.exit(), so do a C # level exit instead. os._exit(0)
[ "def", "worker_main", "(", "self", ")", ":", "self", ".", "_setup_master", "(", ")", "self", ".", "_setup_services", "(", ")", "try", ":", "# Let the parent know our listening socket is ready.", "mitogen", ".", "core", ".", "io_op", "(", "self", ".", "child_sock", ".", "send", ",", "b", "(", "'1'", ")", ")", "# Block until the socket is closed, which happens on parent exit.", "mitogen", ".", "core", ".", "io_op", "(", "self", ".", "child_sock", ".", "recv", ",", "1", ")", "finally", ":", "self", ".", "broker", ".", "shutdown", "(", ")", "self", ".", "broker", ".", "join", "(", ")", "# Test frameworks living somewhere higher on the stack of the", "# original parent process may try to catch sys.exit(), so do a C", "# level exit instead.", "os", ".", "_exit", "(", "0", ")" ]
40.227273
22.136364
def superclasses(self, inherited=False): """Iterate over the superclasses of the class. This function is the Python equivalent of the CLIPS class-superclasses command. """ data = clips.data.DataObject(self._env) lib.EnvClassSuperclasses( self._env, self._cls, data.byref, int(inherited)) for klass in classes(self._env, data.value): yield klass
[ "def", "superclasses", "(", "self", ",", "inherited", "=", "False", ")", ":", "data", "=", "clips", ".", "data", ".", "DataObject", "(", "self", ".", "_env", ")", "lib", ".", "EnvClassSuperclasses", "(", "self", ".", "_env", ",", "self", ".", "_cls", ",", "data", ".", "byref", ",", "int", "(", "inherited", ")", ")", "for", "klass", "in", "classes", "(", "self", ".", "_env", ",", "data", ".", "value", ")", ":", "yield", "klass" ]
29.642857
17
def tokenize_akkadian_words(line): """ Operates on a single line of text, returns all words in the line as a tuple in a list. input: "1. isz-pur-ram a-na" output: [("isz-pur-ram", "akkadian"), ("a-na", "akkadian")] :param: line: text string :return: list of tuples: (word, language) """ beginning_underscore = "_[^_]+(?!_)$" # only match a string if it has a beginning underscore anywhere ending_underscore = "^(?<!_)[^_]+_" # only match a string if it has an ending underscore anywhere two_underscores = "_[^_]+_" # only match a string if it has two underscores words = line.split() # split the line on spaces ignoring the first split (which is the # line number) language = "akkadian" output_words = [] for word in words: if re.search(two_underscores, word): # If the string has two underscores in it then the word is # in Sumerian while the neighboring words are in Akkadian. output_words.append((word, "sumerian")) elif re.search(beginning_underscore, word): # If the word has an initial underscore somewhere # but no other underscores than we're starting a block # of Sumerian. language = "sumerian" output_words.append((word, language)) elif re.search(ending_underscore, word): # If the word has an ending underscore somewhere # but not other underscores than we're ending a block # of Sumerian. output_words.append((word, language)) language = "akkadian" else: # If there are no underscore than we are continuing # whatever language we're currently in. output_words.append((word, language)) return output_words
[ "def", "tokenize_akkadian_words", "(", "line", ")", ":", "beginning_underscore", "=", "\"_[^_]+(?!_)$\"", "# only match a string if it has a beginning underscore anywhere", "ending_underscore", "=", "\"^(?<!_)[^_]+_\"", "# only match a string if it has an ending underscore anywhere", "two_underscores", "=", "\"_[^_]+_\"", "# only match a string if it has two underscores", "words", "=", "line", ".", "split", "(", ")", "# split the line on spaces ignoring the first split (which is the", "# line number)", "language", "=", "\"akkadian\"", "output_words", "=", "[", "]", "for", "word", "in", "words", ":", "if", "re", ".", "search", "(", "two_underscores", ",", "word", ")", ":", "# If the string has two underscores in it then the word is", "# in Sumerian while the neighboring words are in Akkadian.", "output_words", ".", "append", "(", "(", "word", ",", "\"sumerian\"", ")", ")", "elif", "re", ".", "search", "(", "beginning_underscore", ",", "word", ")", ":", "# If the word has an initial underscore somewhere", "# but no other underscores than we're starting a block", "# of Sumerian.", "language", "=", "\"sumerian\"", "output_words", ".", "append", "(", "(", "word", ",", "language", ")", ")", "elif", "re", ".", "search", "(", "ending_underscore", ",", "word", ")", ":", "# If the word has an ending underscore somewhere", "# but not other underscores than we're ending a block", "# of Sumerian.", "output_words", ".", "append", "(", "(", "word", ",", "language", ")", ")", "language", "=", "\"akkadian\"", "else", ":", "# If there are no underscore than we are continuing", "# whatever language we're currently in.", "output_words", ".", "append", "(", "(", "word", ",", "language", ")", ")", "return", "output_words" ]
39.644444
16.711111
def start(self, container, instances=None, map_name=None, **kwargs): """ Starts instances for a container configuration. :param container: Container name. :type container: unicode | str :param instances: Instance names to start. If not specified, will start all instances as specified in the configuration (or just one default instance). :param map_name: Container map name. Optional - if not provided the default map is used. :type map_name: unicode | str :type instances: collections.Iterable[unicode | str | NoneType] :param kwargs: Additional kwargs. If multiple actions are resulting from this, they will only be applied to the main container start. :return: Return values of started containers. :rtype: list[dockermap.map.runner.ActionOutput] """ return self.run_actions('start', container, instances=instances, map_name=map_name, **kwargs)
[ "def", "start", "(", "self", ",", "container", ",", "instances", "=", "None", ",", "map_name", "=", "None", ",", "*", "*", "kwargs", ")", ":", "return", "self", ".", "run_actions", "(", "'start'", ",", "container", ",", "instances", "=", "instances", ",", "map_name", "=", "map_name", ",", "*", "*", "kwargs", ")" ]
56.117647
25.411765
def json_files_serializer(objs, status=None): """JSON Files Serializer. :parma objs: A list of:class:`invenio_files_rest.models.ObjectVersion` instances. :param status: A HTTP Status. (Default: ``None``) :returns: A Flask response with JSON data. :rtype: :py:class:`flask.Response`. """ files = [file_serializer(obj) for obj in objs] return make_response(json.dumps(files), status)
[ "def", "json_files_serializer", "(", "objs", ",", "status", "=", "None", ")", ":", "files", "=", "[", "file_serializer", "(", "obj", ")", "for", "obj", "in", "objs", "]", "return", "make_response", "(", "json", ".", "dumps", "(", "files", ")", ",", "status", ")" ]
37.454545
12.909091
def to_serializable_value(self): """ Run through all fields of the object and parse the values :return: :rtype: dict """ return { name: field.to_serializable_value() for name, field in self.value.__dict__.items() if isinstance(field, Field) and self.value }
[ "def", "to_serializable_value", "(", "self", ")", ":", "return", "{", "name", ":", "field", ".", "to_serializable_value", "(", ")", "for", "name", ",", "field", "in", "self", ".", "value", ".", "__dict__", ".", "items", "(", ")", "if", "isinstance", "(", "field", ",", "Field", ")", "and", "self", ".", "value", "}" ]
28.25
17.583333
def get_alert_log(self, current=0, minimum=0, maximum=100, header="", action_key=None): """Get the alert log.""" return self.get_alert(current=current, minimum=minimum, maximum=maximum, header=header, action_key=action_key, log=True)
[ "def", "get_alert_log", "(", "self", ",", "current", "=", "0", ",", "minimum", "=", "0", ",", "maximum", "=", "100", ",", "header", "=", "\"\"", ",", "action_key", "=", "None", ")", ":", "return", "self", ".", "get_alert", "(", "current", "=", "current", ",", "minimum", "=", "minimum", ",", "maximum", "=", "maximum", ",", "header", "=", "header", ",", "action_key", "=", "action_key", ",", "log", "=", "True", ")" ]
38.230769
6.384615
def get_new_service_instance_stub(service_instance, path, ns=None, version=None): ''' Returns a stub that points to a different path, created from an existing connection. service_instance The Service Instance. path Path of the new stub. ns Namespace of the new stub. Default value is None version Version of the new stub. Default value is None. ''' # For python 2.7.9 and later, the default SSL context has more strict # connection handshaking rule. We may need turn off the hostname checking # and the client side cert verification. context = None if sys.version_info[:3] > (2, 7, 8): context = ssl.create_default_context() context.check_hostname = False context.verify_mode = ssl.CERT_NONE stub = service_instance._stub hostname = stub.host.split(':')[0] session_cookie = stub.cookie.split('"')[1] VmomiSupport.GetRequestContext()['vcSessionCookie'] = session_cookie new_stub = SoapStubAdapter(host=hostname, ns=ns, path=path, version=version, poolSize=0, sslContext=context) new_stub.cookie = stub.cookie return new_stub
[ "def", "get_new_service_instance_stub", "(", "service_instance", ",", "path", ",", "ns", "=", "None", ",", "version", "=", "None", ")", ":", "# For python 2.7.9 and later, the default SSL context has more strict", "# connection handshaking rule. We may need turn off the hostname checking", "# and the client side cert verification.", "context", "=", "None", "if", "sys", ".", "version_info", "[", ":", "3", "]", ">", "(", "2", ",", "7", ",", "8", ")", ":", "context", "=", "ssl", ".", "create_default_context", "(", ")", "context", ".", "check_hostname", "=", "False", "context", ".", "verify_mode", "=", "ssl", ".", "CERT_NONE", "stub", "=", "service_instance", ".", "_stub", "hostname", "=", "stub", ".", "host", ".", "split", "(", "':'", ")", "[", "0", "]", "session_cookie", "=", "stub", ".", "cookie", ".", "split", "(", "'\"'", ")", "[", "1", "]", "VmomiSupport", ".", "GetRequestContext", "(", ")", "[", "'vcSessionCookie'", "]", "=", "session_cookie", "new_stub", "=", "SoapStubAdapter", "(", "host", "=", "hostname", ",", "ns", "=", "ns", ",", "path", "=", "path", ",", "version", "=", "version", ",", "poolSize", "=", "0", ",", "sslContext", "=", "context", ")", "new_stub", ".", "cookie", "=", "stub", ".", "cookie", "return", "new_stub" ]
32.414634
16.902439
def drop_table(self, table): """ Drop a table from the MyDB context. ## Arguments * `table` (str): The name of the table to drop. """ job_id = self.submit("DROP TABLE %s"%table, context="MYDB") status = self.monitor(job_id) if status[0] != 5: raise Exception("Couldn't drop table %s"%table)
[ "def", "drop_table", "(", "self", ",", "table", ")", ":", "job_id", "=", "self", ".", "submit", "(", "\"DROP TABLE %s\"", "%", "table", ",", "context", "=", "\"MYDB\"", ")", "status", "=", "self", ".", "monitor", "(", "job_id", ")", "if", "status", "[", "0", "]", "!=", "5", ":", "raise", "Exception", "(", "\"Couldn't drop table %s\"", "%", "table", ")" ]
27.461538
17.923077
def label(self): "Label inherited from items" if self._label: return self._label else: if len(self): label = get_ndmapping_label(self, 'label') return '' if label is None else label else: return ''
[ "def", "label", "(", "self", ")", ":", "if", "self", ".", "_label", ":", "return", "self", ".", "_label", "else", ":", "if", "len", "(", "self", ")", ":", "label", "=", "get_ndmapping_label", "(", "self", ",", "'label'", ")", "return", "''", "if", "label", "is", "None", "else", "label", "else", ":", "return", "''" ]
29.6
16.6
def get_block_height(self, is_full: bool = False) -> int or dict: """ This interface is used to get the decimal block height in current network. Return: the decimal total height of blocks in current network. """ response = self.get_block_count(is_full=True) response['result'] -= 1 if is_full: return response return response['result']
[ "def", "get_block_height", "(", "self", ",", "is_full", ":", "bool", "=", "False", ")", "->", "int", "or", "dict", ":", "response", "=", "self", ".", "get_block_count", "(", "is_full", "=", "True", ")", "response", "[", "'result'", "]", "-=", "1", "if", "is_full", ":", "return", "response", "return", "response", "[", "'result'", "]" ]
34.416667
18.416667
def td_waveform_to_fd_waveform(waveform, out=None, length=None, buffer_length=100): """ Convert a time domain into a frequency domain waveform by FFT. As a waveform is assumed to "wrap" in the time domain one must be careful to ensure the waveform goes to 0 at both "boundaries". To ensure this is done correctly the waveform must have the epoch set such the merger time is at t=0 and the length of the waveform should be shorter than the desired length of the FrequencySeries (times 2 - 1) so that zeroes can be suitably pre- and post-pended before FFTing. If given, out is a memory array to be used as the output of the FFT. If not given memory is allocated internally. If present the length of the returned FrequencySeries is determined from the length out. If out is not given the length can be provided expicitly, or it will be chosen as the nearest power of 2. If choosing length explicitly the waveform length + buffer_length is used when choosing the nearest binary number so that some zero padding is always added. """ # Figure out lengths and set out if needed if out is None: if length is None: N = pnutils.nearest_larger_binary_number(len(waveform) + \ buffer_length) n = int(N//2) + 1 else: n = length N = (n-1)*2 out = zeros(n, dtype=complex_same_precision_as(waveform)) else: n = len(out) N = (n-1)*2 delta_f = 1. / (N * waveform.delta_t) # total duration of the waveform tmplt_length = len(waveform) * waveform.delta_t if len(waveform) > N: err_msg = "The time domain template is longer than the intended " err_msg += "duration in the frequency domain. This situation is " err_msg += "not supported in this function. Please shorten the " err_msg += "waveform appropriately before calling this function or " err_msg += "increase the allowed waveform length. " err_msg += "Waveform length (in samples): {}".format(len(waveform)) err_msg += ". Intended length: {}.".format(N) raise ValueError(err_msg) # for IMR templates the zero of time is at max amplitude (merger) # thus the start time is minus the duration of the template from # lower frequency cutoff to merger, i.e. minus the 'chirp time' tChirp = - float( waveform.start_time ) # conversion from LIGOTimeGPS waveform.resize(N) k_zero = int(waveform.start_time / waveform.delta_t) waveform.roll(k_zero) htilde = FrequencySeries(out, delta_f=delta_f, copy=False) fft(waveform.astype(real_same_precision_as(htilde)), htilde) htilde.length_in_time = tmplt_length htilde.chirp_length = tChirp return htilde
[ "def", "td_waveform_to_fd_waveform", "(", "waveform", ",", "out", "=", "None", ",", "length", "=", "None", ",", "buffer_length", "=", "100", ")", ":", "# Figure out lengths and set out if needed", "if", "out", "is", "None", ":", "if", "length", "is", "None", ":", "N", "=", "pnutils", ".", "nearest_larger_binary_number", "(", "len", "(", "waveform", ")", "+", "buffer_length", ")", "n", "=", "int", "(", "N", "//", "2", ")", "+", "1", "else", ":", "n", "=", "length", "N", "=", "(", "n", "-", "1", ")", "*", "2", "out", "=", "zeros", "(", "n", ",", "dtype", "=", "complex_same_precision_as", "(", "waveform", ")", ")", "else", ":", "n", "=", "len", "(", "out", ")", "N", "=", "(", "n", "-", "1", ")", "*", "2", "delta_f", "=", "1.", "/", "(", "N", "*", "waveform", ".", "delta_t", ")", "# total duration of the waveform", "tmplt_length", "=", "len", "(", "waveform", ")", "*", "waveform", ".", "delta_t", "if", "len", "(", "waveform", ")", ">", "N", ":", "err_msg", "=", "\"The time domain template is longer than the intended \"", "err_msg", "+=", "\"duration in the frequency domain. This situation is \"", "err_msg", "+=", "\"not supported in this function. Please shorten the \"", "err_msg", "+=", "\"waveform appropriately before calling this function or \"", "err_msg", "+=", "\"increase the allowed waveform length. \"", "err_msg", "+=", "\"Waveform length (in samples): {}\"", ".", "format", "(", "len", "(", "waveform", ")", ")", "err_msg", "+=", "\". Intended length: {}.\"", ".", "format", "(", "N", ")", "raise", "ValueError", "(", "err_msg", ")", "# for IMR templates the zero of time is at max amplitude (merger)", "# thus the start time is minus the duration of the template from", "# lower frequency cutoff to merger, i.e. minus the 'chirp time'", "tChirp", "=", "-", "float", "(", "waveform", ".", "start_time", ")", "# conversion from LIGOTimeGPS", "waveform", ".", "resize", "(", "N", ")", "k_zero", "=", "int", "(", "waveform", ".", "start_time", "/", "waveform", ".", "delta_t", ")", "waveform", ".", "roll", "(", "k_zero", ")", "htilde", "=", "FrequencySeries", "(", "out", ",", "delta_f", "=", "delta_f", ",", "copy", "=", "False", ")", "fft", "(", "waveform", ".", "astype", "(", "real_same_precision_as", "(", "htilde", ")", ")", ",", "htilde", ")", "htilde", ".", "length_in_time", "=", "tmplt_length", "htilde", ".", "chirp_length", "=", "tChirp", "return", "htilde" ]
50.946429
23
def _get_local_files(self, path): """Returns a dictionary of all the files under a path.""" if not path: raise ValueError("No path specified") files = defaultdict(lambda: None) path_len = len(path) + 1 for root, dirs, filenames in os.walk(path): for name in filenames: full_path = join(root, name) files[full_path[path_len:]] = compute_md5(full_path) return files
[ "def", "_get_local_files", "(", "self", ",", "path", ")", ":", "if", "not", "path", ":", "raise", "ValueError", "(", "\"No path specified\"", ")", "files", "=", "defaultdict", "(", "lambda", ":", "None", ")", "path_len", "=", "len", "(", "path", ")", "+", "1", "for", "root", ",", "dirs", ",", "filenames", "in", "os", ".", "walk", "(", "path", ")", ":", "for", "name", "in", "filenames", ":", "full_path", "=", "join", "(", "root", ",", "name", ")", "files", "[", "full_path", "[", "path_len", ":", "]", "]", "=", "compute_md5", "(", "full_path", ")", "return", "files" ]
41.636364
10.454545
def show_keyword_help(cur, arg): """ Call the built-in "show <command>", to display help for an SQL keyword. :param cur: cursor :param arg: string :return: list """ keyword = arg.strip('"').strip("'") query = "help '{0}'".format(keyword) log.debug(query) cur.execute(query) if cur.description and cur.rowcount > 0: headers = [x[0] for x in cur.description] return [(None, cur.fetchall(), headers, '')] else: return [(None, None, None, 'No help found for {0}.'.format(keyword))]
[ "def", "show_keyword_help", "(", "cur", ",", "arg", ")", ":", "keyword", "=", "arg", ".", "strip", "(", "'\"'", ")", ".", "strip", "(", "\"'\"", ")", "query", "=", "\"help '{0}'\"", ".", "format", "(", "keyword", ")", "log", ".", "debug", "(", "query", ")", "cur", ".", "execute", "(", "query", ")", "if", "cur", ".", "description", "and", "cur", ".", "rowcount", ">", "0", ":", "headers", "=", "[", "x", "[", "0", "]", "for", "x", "in", "cur", ".", "description", "]", "return", "[", "(", "None", ",", "cur", ".", "fetchall", "(", ")", ",", "headers", ",", "''", ")", "]", "else", ":", "return", "[", "(", "None", ",", "None", ",", "None", ",", "'No help found for {0}.'", ".", "format", "(", "keyword", ")", ")", "]" ]
33.375
14.625
def pathFromHere_walk(self, astr_startPath = '/'): """ Return a list of paths from "here" in the stree, using the internal cd() to walk the path space. :return: a list of paths from "here" """ self.l_lwd = [] self.treeWalk(startPath = astr_startPath, f=self.lwd) return self.l_lwd
[ "def", "pathFromHere_walk", "(", "self", ",", "astr_startPath", "=", "'/'", ")", ":", "self", ".", "l_lwd", "=", "[", "]", "self", ".", "treeWalk", "(", "startPath", "=", "astr_startPath", ",", "f", "=", "self", ".", "lwd", ")", "return", "self", ".", "l_lwd" ]
33.545455
16.818182
def _init_append(self): """ Initializes file on 'a' mode. """ if self._content_length: # Adjust size if content length specified with _handle_azure_exception(): self._resize( content_length=self._content_length, **self._client_kwargs) self._reset_head() # Make initial seek position to current end of file self._seek = self._size
[ "def", "_init_append", "(", "self", ")", ":", "if", "self", ".", "_content_length", ":", "# Adjust size if content length specified", "with", "_handle_azure_exception", "(", ")", ":", "self", ".", "_resize", "(", "content_length", "=", "self", ".", "_content_length", ",", "*", "*", "self", ".", "_client_kwargs", ")", "self", ".", "_reset_head", "(", ")", "# Make initial seek position to current end of file", "self", ".", "_seek", "=", "self", ".", "_size" ]
34
12.923077
def get_bounds(locations, lonlat=False): """ Computes the bounds of the object in the form [[lat_min, lon_min], [lat_max, lon_max]] """ bounds = [[None, None], [None, None]] for point in iter_coords(locations): bounds = [ [ none_min(bounds[0][0], point[0]), none_min(bounds[0][1], point[1]), ], [ none_max(bounds[1][0], point[0]), none_max(bounds[1][1], point[1]), ], ] if lonlat: bounds = _locations_mirror(bounds) return bounds
[ "def", "get_bounds", "(", "locations", ",", "lonlat", "=", "False", ")", ":", "bounds", "=", "[", "[", "None", ",", "None", "]", ",", "[", "None", ",", "None", "]", "]", "for", "point", "in", "iter_coords", "(", "locations", ")", ":", "bounds", "=", "[", "[", "none_min", "(", "bounds", "[", "0", "]", "[", "0", "]", ",", "point", "[", "0", "]", ")", ",", "none_min", "(", "bounds", "[", "0", "]", "[", "1", "]", ",", "point", "[", "1", "]", ")", ",", "]", ",", "[", "none_max", "(", "bounds", "[", "1", "]", "[", "0", "]", ",", "point", "[", "0", "]", ")", ",", "none_max", "(", "bounds", "[", "1", "]", "[", "1", "]", ",", "point", "[", "1", "]", ")", ",", "]", ",", "]", "if", "lonlat", ":", "bounds", "=", "_locations_mirror", "(", "bounds", ")", "return", "bounds" ]
27.52381
14.285714
def is_iso8601(instance: str): """Validates ISO8601 format""" if not isinstance(instance, str): return True return ISO8601.match(instance) is not None
[ "def", "is_iso8601", "(", "instance", ":", "str", ")", ":", "if", "not", "isinstance", "(", "instance", ",", "str", ")", ":", "return", "True", "return", "ISO8601", ".", "match", "(", "instance", ")", "is", "not", "None" ]
33.2
8
def restart(self, container, instances=None, map_name=None, **kwargs): """ Restarts instances for a container configuration. :param container: Container name. :type container: unicode | str :param instances: Instance names to stop. If not specified, will restart all instances as specified in the configuration (or just one default instance). :type instances: collections.Iterable[unicode | str | NoneType] :param map_name: Container map name. Optional - if not provided the default map is used. :type map_name: unicode | str :param kwargs: Additional kwargs. If multiple actions are resulting from this, they will only be applied to the main container restart. :return: Return values of restarted containers. :rtype: list[dockermap.map.runner.ActionOutput] """ return self.run_actions('restart', container, instances=instances, map_name=map_name, **kwargs)
[ "def", "restart", "(", "self", ",", "container", ",", "instances", "=", "None", ",", "map_name", "=", "None", ",", "*", "*", "kwargs", ")", ":", "return", "self", ".", "run_actions", "(", "'restart'", ",", "container", ",", "instances", "=", "instances", ",", "map_name", "=", "map_name", ",", "*", "*", "kwargs", ")" ]
56.764706
25.823529
def _merge_parameters(self, other_trajectory, remove_duplicates=False, trial_parameter_name=None, ignore_data=()): """Merges parameters from the other trajectory into the current one. The explored parameters in the current trajectory are directly enlarged (in RAM), no storage service is needed here. Later on in `f_merge` the storage service will be requested to store the enlarge parameters to disk. Note explored parameters are always enlarged. Unexplored parameters might become new explored parameters if they differ in their default values in the current and the other trajectory, respectively. :return: A tuple with two elements: 1. Dictionary of run index mappings from old trajectroy to the new one. 2. List of names of parameters that were altered. """ if trial_parameter_name: if remove_duplicates: self._logger.warning('You have given a trial parameter and you want to ' 'remove_items duplicates. There cannot be any duplicates ' 'when adding trials, I will not look for duplicates.') remove_duplicates = False # Dictionary containing full parameter names as keys # and pairs of parameters from both trajectories as values. # Parameters kept in this dictionary are marked for merging and will be enlarged # with ranges and values of corresponding parameters in the other trajectory params_to_change = {} if trial_parameter_name: # We want to merge a trial parameter # First make some sanity checks my_trial_parameter = self.f_get(trial_parameter_name) other_trial_parameter = other_trajectory.f_get(trial_parameter_name) if not isinstance(my_trial_parameter, BaseParameter): raise TypeError('Your trial_parameter `%s` does not evaluate to a real parameter' ' in the trajectory' % trial_parameter_name) # Extract the ranges of both trial parameters if my_trial_parameter.f_has_range(): my_trial_list = my_trial_parameter.f_get_range(copy=False) else: # If we only have a single trial, we need to make a range of length 1 # This is probably a very exceptional case my_trial_list = [my_trial_parameter.f_get()] if other_trial_parameter.f_has_range(): other_trial_list = other_trial_parameter.f_get_range(copy=False) else: other_trial_list = [other_trial_parameter.f_get()] # Make sanity checks if both ranges contain all numbers from 0 to T1 # for the current trajectory and 0 to T2 for the other trajectory mytrialset = set(my_trial_list) mymaxtrial_T1 = max(mytrialset) # maximum trial index in current trajectory aka T1 if mytrialset != set(range(mymaxtrial_T1 + 1)): raise TypeError('In order to specify a trial parameter, this parameter must ' 'contain integers from 0 to %d, but it in fact it ' 'contains `%s`.' % (mymaxtrial_T1, str(mytrialset))) othertrialset = set(other_trial_list) othermaxtrial_T2 = max(othertrialset) # maximum trial index in other trajectory aka T2 if othertrialset != set(range(othermaxtrial_T2 + 1)): raise TypeError('In order to specify a trial parameter, this parameter must ' 'contain integers from 0 to %d, but it infact it contains `%s` ' 'in the other trajectory.' % (othermaxtrial_T2, str(othertrialset))) # If the trial parameter's name was just given in parts we update it here # to the full name trial_parameter_name = my_trial_parameter.v_full_name # If we had the very exceptional case, that our trial parameter was not explored, # aka we only had 1 trial, we have to add it to the explored parameters if not trial_parameter_name in self._explored_parameters: self._explored_parameters[trial_parameter_name] = my_trial_parameter # We need to mark the trial parameter for merging params_to_change[trial_parameter_name] = (my_trial_parameter, other_trial_parameter) # Dictionary containing all parameters of the other trajectory, we will iterate through it # to spot parameters that need to be enlarge or become new explored parameters params_to_merge = other_trajectory._parameters.copy() params_to_merge.update(other_trajectory._derived_parameters) for ignore in ignore_data: if ignore in params_to_merge: del params_to_merge[ignore] run_name_dummys = set([f(-1) for f in other_trajectory._wildcard_functions.values()]) # Iterate through all parameters of the other trajectory # and check which differ from the parameters of the current trajectory for key in params_to_merge: other_param = params_to_merge[key] # We don't need to merge anything based on wildcards split_key = key.split('.') if any(x in other_trajectory._reversed_wildcards for x in split_key): continue my_param = self.f_get(key) if not my_param._values_of_same_type(my_param.f_get(), other_param.f_get()): raise TypeError('The parameters with name `%s` are not of the same type, cannot ' 'merge trajectory.' % key) # We have taken care about the trial parameter before, it is already # marked for merging if my_param.v_full_name == trial_parameter_name: continue # If a parameter was explored in one of the trajectories or two unexplored # parameters differ, we need to mark them for merge if (my_param.f_has_range() or other_param.f_has_range() or not my_param._equal_values(my_param.f_get(), other_param.f_get())): # If two unexplored parameters differ, that means they differ in every run, # accordingly we do not need to check for duplicate runs anymore params_to_change[key] = (my_param, other_param) if not my_param.f_has_range() and not other_param.f_has_range(): remove_duplicates = False # Check if we use all runs or remove duplicates: used_runs = {} for idx in range(len(other_trajectory)): used_runs[idx] = idx if remove_duplicates: # We need to compare all parameter combinations in the current trajectory # to all parameter combinations in the other trajectory to spot duplicate points. # Quadratic Complexity! for irun in range(len(other_trajectory)): for jrun in range(len(self)): change = True # Check all marked parameters for my_param, other_param in params_to_change.values(): if other_param.f_has_range(): other_param._set_parameter_access(irun) if my_param.f_has_range(): my_param._set_parameter_access(jrun) val1 = my_param.f_get() val2 = other_param.f_get() # If only one parameter differs, the parameter space point differs # and we can skip the rest of the parameters if not my_param._equal_values(val1, val2): change = False break # If we found one parameter space point in the current trajectory # that matches the ith point in the other, we do not need the ith # point. We can also skip comparing to the rest of the points in the # current trajectory if change: del used_runs[irun] break # Restore changed default values for my_param, other_param in params_to_change.values(): other_param._restore_default() my_param._restore_default() # Merge parameters into the current trajectory adding_length = len(used_runs) starting_length = len(self) if adding_length == 0: return used_runs, [] count = 0 for key in sorted(used_runs.keys()): used_runs[key] = starting_length + count count += 1 for my_param, other_param in params_to_change.values(): fullname = my_param.v_full_name # We need new ranges to enlarge all parameters marked for merging if fullname == trial_parameter_name: # The trial parameter now has to cover the range 0 to T1+T2+1 other_range = [x + mymaxtrial_T1 + 1 for x in other_trial_list] else: # In case we do not use all runs we need to filter the ranges of the # parameters of the other trajectory if other_param.f_has_range(): other_range = (x for jdx, x in enumerate(other_param.f_get_range(copy=False)) if jdx in used_runs) else: other_range = (other_param.f_get() for _ in range(adding_length)) # If a parameter in the current trajectory was marked for merging but was not # explored before, we need to explore it first, simply by creating the range of # the current trajectory's length containing only it's default value if not my_param.f_has_range(): my_param.f_unlock() my_param._explore((my_param.f_get() for _ in range(len(self)))) # After determining the new range extension `other_range`, # expand the parameters my_param.f_unlock() my_param._expand(other_range) if not fullname in self._explored_parameters: self._explored_parameters[fullname] = my_param return used_runs, list(params_to_change.keys())
[ "def", "_merge_parameters", "(", "self", ",", "other_trajectory", ",", "remove_duplicates", "=", "False", ",", "trial_parameter_name", "=", "None", ",", "ignore_data", "=", "(", ")", ")", ":", "if", "trial_parameter_name", ":", "if", "remove_duplicates", ":", "self", ".", "_logger", ".", "warning", "(", "'You have given a trial parameter and you want to '", "'remove_items duplicates. There cannot be any duplicates '", "'when adding trials, I will not look for duplicates.'", ")", "remove_duplicates", "=", "False", "# Dictionary containing full parameter names as keys", "# and pairs of parameters from both trajectories as values.", "# Parameters kept in this dictionary are marked for merging and will be enlarged", "# with ranges and values of corresponding parameters in the other trajectory", "params_to_change", "=", "{", "}", "if", "trial_parameter_name", ":", "# We want to merge a trial parameter", "# First make some sanity checks", "my_trial_parameter", "=", "self", ".", "f_get", "(", "trial_parameter_name", ")", "other_trial_parameter", "=", "other_trajectory", ".", "f_get", "(", "trial_parameter_name", ")", "if", "not", "isinstance", "(", "my_trial_parameter", ",", "BaseParameter", ")", ":", "raise", "TypeError", "(", "'Your trial_parameter `%s` does not evaluate to a real parameter'", "' in the trajectory'", "%", "trial_parameter_name", ")", "# Extract the ranges of both trial parameters", "if", "my_trial_parameter", ".", "f_has_range", "(", ")", ":", "my_trial_list", "=", "my_trial_parameter", ".", "f_get_range", "(", "copy", "=", "False", ")", "else", ":", "# If we only have a single trial, we need to make a range of length 1", "# This is probably a very exceptional case", "my_trial_list", "=", "[", "my_trial_parameter", ".", "f_get", "(", ")", "]", "if", "other_trial_parameter", ".", "f_has_range", "(", ")", ":", "other_trial_list", "=", "other_trial_parameter", ".", "f_get_range", "(", "copy", "=", "False", ")", "else", ":", "other_trial_list", "=", "[", "other_trial_parameter", ".", "f_get", "(", ")", "]", "# Make sanity checks if both ranges contain all numbers from 0 to T1", "# for the current trajectory and 0 to T2 for the other trajectory", "mytrialset", "=", "set", "(", "my_trial_list", ")", "mymaxtrial_T1", "=", "max", "(", "mytrialset", ")", "# maximum trial index in current trajectory aka T1", "if", "mytrialset", "!=", "set", "(", "range", "(", "mymaxtrial_T1", "+", "1", ")", ")", ":", "raise", "TypeError", "(", "'In order to specify a trial parameter, this parameter must '", "'contain integers from 0 to %d, but it in fact it '", "'contains `%s`.'", "%", "(", "mymaxtrial_T1", ",", "str", "(", "mytrialset", ")", ")", ")", "othertrialset", "=", "set", "(", "other_trial_list", ")", "othermaxtrial_T2", "=", "max", "(", "othertrialset", ")", "# maximum trial index in other trajectory aka T2", "if", "othertrialset", "!=", "set", "(", "range", "(", "othermaxtrial_T2", "+", "1", ")", ")", ":", "raise", "TypeError", "(", "'In order to specify a trial parameter, this parameter must '", "'contain integers from 0 to %d, but it infact it contains `%s` '", "'in the other trajectory.'", "%", "(", "othermaxtrial_T2", ",", "str", "(", "othertrialset", ")", ")", ")", "# If the trial parameter's name was just given in parts we update it here", "# to the full name", "trial_parameter_name", "=", "my_trial_parameter", ".", "v_full_name", "# If we had the very exceptional case, that our trial parameter was not explored,", "# aka we only had 1 trial, we have to add it to the explored parameters", "if", "not", "trial_parameter_name", "in", "self", ".", "_explored_parameters", ":", "self", ".", "_explored_parameters", "[", "trial_parameter_name", "]", "=", "my_trial_parameter", "# We need to mark the trial parameter for merging", "params_to_change", "[", "trial_parameter_name", "]", "=", "(", "my_trial_parameter", ",", "other_trial_parameter", ")", "# Dictionary containing all parameters of the other trajectory, we will iterate through it", "# to spot parameters that need to be enlarge or become new explored parameters", "params_to_merge", "=", "other_trajectory", ".", "_parameters", ".", "copy", "(", ")", "params_to_merge", ".", "update", "(", "other_trajectory", ".", "_derived_parameters", ")", "for", "ignore", "in", "ignore_data", ":", "if", "ignore", "in", "params_to_merge", ":", "del", "params_to_merge", "[", "ignore", "]", "run_name_dummys", "=", "set", "(", "[", "f", "(", "-", "1", ")", "for", "f", "in", "other_trajectory", ".", "_wildcard_functions", ".", "values", "(", ")", "]", ")", "# Iterate through all parameters of the other trajectory", "# and check which differ from the parameters of the current trajectory", "for", "key", "in", "params_to_merge", ":", "other_param", "=", "params_to_merge", "[", "key", "]", "# We don't need to merge anything based on wildcards", "split_key", "=", "key", ".", "split", "(", "'.'", ")", "if", "any", "(", "x", "in", "other_trajectory", ".", "_reversed_wildcards", "for", "x", "in", "split_key", ")", ":", "continue", "my_param", "=", "self", ".", "f_get", "(", "key", ")", "if", "not", "my_param", ".", "_values_of_same_type", "(", "my_param", ".", "f_get", "(", ")", ",", "other_param", ".", "f_get", "(", ")", ")", ":", "raise", "TypeError", "(", "'The parameters with name `%s` are not of the same type, cannot '", "'merge trajectory.'", "%", "key", ")", "# We have taken care about the trial parameter before, it is already", "# marked for merging", "if", "my_param", ".", "v_full_name", "==", "trial_parameter_name", ":", "continue", "# If a parameter was explored in one of the trajectories or two unexplored", "# parameters differ, we need to mark them for merge", "if", "(", "my_param", ".", "f_has_range", "(", ")", "or", "other_param", ".", "f_has_range", "(", ")", "or", "not", "my_param", ".", "_equal_values", "(", "my_param", ".", "f_get", "(", ")", ",", "other_param", ".", "f_get", "(", ")", ")", ")", ":", "# If two unexplored parameters differ, that means they differ in every run,", "# accordingly we do not need to check for duplicate runs anymore", "params_to_change", "[", "key", "]", "=", "(", "my_param", ",", "other_param", ")", "if", "not", "my_param", ".", "f_has_range", "(", ")", "and", "not", "other_param", ".", "f_has_range", "(", ")", ":", "remove_duplicates", "=", "False", "# Check if we use all runs or remove duplicates:", "used_runs", "=", "{", "}", "for", "idx", "in", "range", "(", "len", "(", "other_trajectory", ")", ")", ":", "used_runs", "[", "idx", "]", "=", "idx", "if", "remove_duplicates", ":", "# We need to compare all parameter combinations in the current trajectory", "# to all parameter combinations in the other trajectory to spot duplicate points.", "# Quadratic Complexity!", "for", "irun", "in", "range", "(", "len", "(", "other_trajectory", ")", ")", ":", "for", "jrun", "in", "range", "(", "len", "(", "self", ")", ")", ":", "change", "=", "True", "# Check all marked parameters", "for", "my_param", ",", "other_param", "in", "params_to_change", ".", "values", "(", ")", ":", "if", "other_param", ".", "f_has_range", "(", ")", ":", "other_param", ".", "_set_parameter_access", "(", "irun", ")", "if", "my_param", ".", "f_has_range", "(", ")", ":", "my_param", ".", "_set_parameter_access", "(", "jrun", ")", "val1", "=", "my_param", ".", "f_get", "(", ")", "val2", "=", "other_param", ".", "f_get", "(", ")", "# If only one parameter differs, the parameter space point differs", "# and we can skip the rest of the parameters", "if", "not", "my_param", ".", "_equal_values", "(", "val1", ",", "val2", ")", ":", "change", "=", "False", "break", "# If we found one parameter space point in the current trajectory", "# that matches the ith point in the other, we do not need the ith", "# point. We can also skip comparing to the rest of the points in the", "# current trajectory", "if", "change", ":", "del", "used_runs", "[", "irun", "]", "break", "# Restore changed default values", "for", "my_param", ",", "other_param", "in", "params_to_change", ".", "values", "(", ")", ":", "other_param", ".", "_restore_default", "(", ")", "my_param", ".", "_restore_default", "(", ")", "# Merge parameters into the current trajectory", "adding_length", "=", "len", "(", "used_runs", ")", "starting_length", "=", "len", "(", "self", ")", "if", "adding_length", "==", "0", ":", "return", "used_runs", ",", "[", "]", "count", "=", "0", "for", "key", "in", "sorted", "(", "used_runs", ".", "keys", "(", ")", ")", ":", "used_runs", "[", "key", "]", "=", "starting_length", "+", "count", "count", "+=", "1", "for", "my_param", ",", "other_param", "in", "params_to_change", ".", "values", "(", ")", ":", "fullname", "=", "my_param", ".", "v_full_name", "# We need new ranges to enlarge all parameters marked for merging", "if", "fullname", "==", "trial_parameter_name", ":", "# The trial parameter now has to cover the range 0 to T1+T2+1", "other_range", "=", "[", "x", "+", "mymaxtrial_T1", "+", "1", "for", "x", "in", "other_trial_list", "]", "else", ":", "# In case we do not use all runs we need to filter the ranges of the", "# parameters of the other trajectory", "if", "other_param", ".", "f_has_range", "(", ")", ":", "other_range", "=", "(", "x", "for", "jdx", ",", "x", "in", "enumerate", "(", "other_param", ".", "f_get_range", "(", "copy", "=", "False", ")", ")", "if", "jdx", "in", "used_runs", ")", "else", ":", "other_range", "=", "(", "other_param", ".", "f_get", "(", ")", "for", "_", "in", "range", "(", "adding_length", ")", ")", "# If a parameter in the current trajectory was marked for merging but was not", "# explored before, we need to explore it first, simply by creating the range of", "# the current trajectory's length containing only it's default value", "if", "not", "my_param", ".", "f_has_range", "(", ")", ":", "my_param", ".", "f_unlock", "(", ")", "my_param", ".", "_explore", "(", "(", "my_param", ".", "f_get", "(", ")", "for", "_", "in", "range", "(", "len", "(", "self", ")", ")", ")", ")", "# After determining the new range extension `other_range`,", "# expand the parameters", "my_param", ".", "f_unlock", "(", ")", "my_param", ".", "_expand", "(", "other_range", ")", "if", "not", "fullname", "in", "self", ".", "_explored_parameters", ":", "self", ".", "_explored_parameters", "[", "fullname", "]", "=", "my_param", "return", "used_runs", ",", "list", "(", "params_to_change", ".", "keys", "(", ")", ")" ]
48.293578
27.720183
def pre_disconnect(self, sid, namespace): """Put the client in the to-be-disconnected list. This allows the client data structures to be present while the disconnect handler is invoked, but still recognize the fact that the client is soon going away. """ if namespace not in self.pending_disconnect: self.pending_disconnect[namespace] = [] self.pending_disconnect[namespace].append(sid)
[ "def", "pre_disconnect", "(", "self", ",", "sid", ",", "namespace", ")", ":", "if", "namespace", "not", "in", "self", ".", "pending_disconnect", ":", "self", ".", "pending_disconnect", "[", "namespace", "]", "=", "[", "]", "self", ".", "pending_disconnect", "[", "namespace", "]", ".", "append", "(", "sid", ")" ]
44.6
15
def profiler(self): """Creates a dictionary from the profile scheme(s)""" # Initialise variables profiledata = defaultdict(make_dict) profileset = set() # supplementalset = '' genedict = {} # Find all the unique profiles to use with a set for sample in self.metadata: if sample[self.analysistype].profile != 'NA': profileset.add(sample[self.analysistype].profile[0]) # if self.analysistype == 'rmlst': # supplementalset = sample[self.analysistype].supplementalprofile # Extract the profiles for each set for sequenceprofile in profileset: # Clear the list of genes genelist = [] for sample in self.metadata: if sequenceprofile == sample[self.analysistype].profile[0]: # genelist = [os.path.split(x)[1].split('.')[0] for x in sample[self.analysistype].alleles] genelist = sample[self.analysistype].allelenames try: # Open the sequence profile file as a dictionary profile = DictReader(open(sequenceprofile), dialect='excel-tab') # Revert to standard comma separated values except KeyError: # Open the sequence profile file as a dictionary profile = DictReader(open(sequenceprofile)) # Iterate through the rows for row in profile: # Iterate through the genes for gene in genelist: # Add the sequence profile, and type, the gene name and the allele number to the dictionary try: profiledata[sequenceprofile][row['ST']][gene] = row[gene] except KeyError: try: profiledata[sequenceprofile][row['rST']][gene] = row[gene] except KeyError: raise # # Load the supplemental profile definitions # if self.analysistype == 'rmlst': # supplementalprofile = DictReader(open(supplementalset), dialect='excel-tab') # # Do the same with the supplemental profile # for row in supplementalprofile: # # Iterate through the genes # for gene in genelist: # # Add the sequence profile, and type, the gene name and the allele number to the dictionary # profiledata[sequenceprofile][row['rST']][gene] = row[gene] # Add the gene list to a dictionary genedict[sequenceprofile] = sorted(genelist) # Add the profile data, and gene list to each sample for sample in self.metadata: if sample.general.bestassemblyfile != 'NA': if sequenceprofile == sample[self.analysistype].profile[0]: # Populate the metadata with the profile data sample[self.analysistype].profiledata = profiledata[sample[self.analysistype].profile[0]] # Add the allele directory to a list of directories used in this analysis self.allelefolders.add(sample[self.analysistype].alleledir) dotter()
[ "def", "profiler", "(", "self", ")", ":", "# Initialise variables", "profiledata", "=", "defaultdict", "(", "make_dict", ")", "profileset", "=", "set", "(", ")", "# supplementalset = ''", "genedict", "=", "{", "}", "# Find all the unique profiles to use with a set", "for", "sample", "in", "self", ".", "metadata", ":", "if", "sample", "[", "self", ".", "analysistype", "]", ".", "profile", "!=", "'NA'", ":", "profileset", ".", "add", "(", "sample", "[", "self", ".", "analysistype", "]", ".", "profile", "[", "0", "]", ")", "# if self.analysistype == 'rmlst':", "# supplementalset = sample[self.analysistype].supplementalprofile", "# Extract the profiles for each set", "for", "sequenceprofile", "in", "profileset", ":", "# Clear the list of genes", "genelist", "=", "[", "]", "for", "sample", "in", "self", ".", "metadata", ":", "if", "sequenceprofile", "==", "sample", "[", "self", ".", "analysistype", "]", ".", "profile", "[", "0", "]", ":", "# genelist = [os.path.split(x)[1].split('.')[0] for x in sample[self.analysistype].alleles]", "genelist", "=", "sample", "[", "self", ".", "analysistype", "]", ".", "allelenames", "try", ":", "# Open the sequence profile file as a dictionary", "profile", "=", "DictReader", "(", "open", "(", "sequenceprofile", ")", ",", "dialect", "=", "'excel-tab'", ")", "# Revert to standard comma separated values", "except", "KeyError", ":", "# Open the sequence profile file as a dictionary", "profile", "=", "DictReader", "(", "open", "(", "sequenceprofile", ")", ")", "# Iterate through the rows", "for", "row", "in", "profile", ":", "# Iterate through the genes", "for", "gene", "in", "genelist", ":", "# Add the sequence profile, and type, the gene name and the allele number to the dictionary", "try", ":", "profiledata", "[", "sequenceprofile", "]", "[", "row", "[", "'ST'", "]", "]", "[", "gene", "]", "=", "row", "[", "gene", "]", "except", "KeyError", ":", "try", ":", "profiledata", "[", "sequenceprofile", "]", "[", "row", "[", "'rST'", "]", "]", "[", "gene", "]", "=", "row", "[", "gene", "]", "except", "KeyError", ":", "raise", "# # Load the supplemental profile definitions", "# if self.analysistype == 'rmlst':", "# supplementalprofile = DictReader(open(supplementalset), dialect='excel-tab')", "# # Do the same with the supplemental profile", "# for row in supplementalprofile:", "# # Iterate through the genes", "# for gene in genelist:", "# # Add the sequence profile, and type, the gene name and the allele number to the dictionary", "# profiledata[sequenceprofile][row['rST']][gene] = row[gene]", "# Add the gene list to a dictionary", "genedict", "[", "sequenceprofile", "]", "=", "sorted", "(", "genelist", ")", "# Add the profile data, and gene list to each sample", "for", "sample", "in", "self", ".", "metadata", ":", "if", "sample", ".", "general", ".", "bestassemblyfile", "!=", "'NA'", ":", "if", "sequenceprofile", "==", "sample", "[", "self", ".", "analysistype", "]", ".", "profile", "[", "0", "]", ":", "# Populate the metadata with the profile data", "sample", "[", "self", ".", "analysistype", "]", ".", "profiledata", "=", "profiledata", "[", "sample", "[", "self", ".", "analysistype", "]", ".", "profile", "[", "0", "]", "]", "# Add the allele directory to a list of directories used in this analysis", "self", ".", "allelefolders", ".", "add", "(", "sample", "[", "self", ".", "analysistype", "]", ".", "alleledir", ")", "dotter", "(", ")" ]
54.377049
21.672131
def _get_all_constants(): """ Get list of all uppercase, non-private globals (doesn't start with ``_``). Returns: list: Uppercase names defined in `globals()` (variables from this \ module). """ return [ key for key in globals().keys() if all([ not key.startswith("_"), # publicly accesible key.upper() == key, # uppercase type(globals()[key]) in _ALLOWED # and with type from _ALLOWED ]) ]
[ "def", "_get_all_constants", "(", ")", ":", "return", "[", "key", "for", "key", "in", "globals", "(", ")", ".", "keys", "(", ")", "if", "all", "(", "[", "not", "key", ".", "startswith", "(", "\"_\"", ")", ",", "# publicly accesible", "key", ".", "upper", "(", ")", "==", "key", ",", "# uppercase", "type", "(", "globals", "(", ")", "[", "key", "]", ")", "in", "_ALLOWED", "# and with type from _ALLOWED", "]", ")", "]" ]
31.625
23.125
def validate_relation_data(self, sentry_unit, relation, expected): """Validate actual relation data based on expected relation data.""" actual = sentry_unit.relation(relation[0], relation[1]) return self._validate_dict_data(expected, actual)
[ "def", "validate_relation_data", "(", "self", ",", "sentry_unit", ",", "relation", ",", "expected", ")", ":", "actual", "=", "sentry_unit", ".", "relation", "(", "relation", "[", "0", "]", ",", "relation", "[", "1", "]", ")", "return", "self", ".", "_validate_dict_data", "(", "expected", ",", "actual", ")" ]
65.5
16.5
def register_add_user_command(self, add_user_func): """ Add the add-user command to the parser and call add_user_func(project_name, user_full_name, auth_role) when chosen. :param add_user_func: func Called when this option is chosen: upload_func(project_name, user_full_name, auth_role). """ description = "Gives user permission to access a remote project." add_user_parser = self.subparsers.add_parser('add-user', description=description) add_project_name_or_id_arg(add_user_parser, help_text_suffix="add a user to") user_or_email = add_user_parser.add_mutually_exclusive_group(required=True) add_user_arg(user_or_email) add_email_arg(user_or_email) _add_auth_role_arg(add_user_parser, default_permissions='project_admin') add_user_parser.set_defaults(func=add_user_func)
[ "def", "register_add_user_command", "(", "self", ",", "add_user_func", ")", ":", "description", "=", "\"Gives user permission to access a remote project.\"", "add_user_parser", "=", "self", ".", "subparsers", ".", "add_parser", "(", "'add-user'", ",", "description", "=", "description", ")", "add_project_name_or_id_arg", "(", "add_user_parser", ",", "help_text_suffix", "=", "\"add a user to\"", ")", "user_or_email", "=", "add_user_parser", ".", "add_mutually_exclusive_group", "(", "required", "=", "True", ")", "add_user_arg", "(", "user_or_email", ")", "add_email_arg", "(", "user_or_email", ")", "_add_auth_role_arg", "(", "add_user_parser", ",", "default_permissions", "=", "'project_admin'", ")", "add_user_parser", ".", "set_defaults", "(", "func", "=", "add_user_func", ")" ]
61.642857
29.928571
def string_to_identity(identity_str): """Parse string into Identity dictionary.""" m = _identity_regexp.match(identity_str) result = m.groupdict() log.debug('parsed identity: %s', result) return {k: v for k, v in result.items() if v}
[ "def", "string_to_identity", "(", "identity_str", ")", ":", "m", "=", "_identity_regexp", ".", "match", "(", "identity_str", ")", "result", "=", "m", ".", "groupdict", "(", ")", "log", ".", "debug", "(", "'parsed identity: %s'", ",", "result", ")", "return", "{", "k", ":", "v", "for", "k", ",", "v", "in", "result", ".", "items", "(", ")", "if", "v", "}" ]
41.333333
5.666667
def _addPolylineToElements(self): """Creates a new Polyline element that will be used for future movement/drawing. The old one (if filled) will be stored on the movement stack. """ if (len(self._pointsOfPolyline) > 1): s = '' for point in self._pointsOfPolyline: s += str(point) + ' '#str(point.x) + ',' + str(point.y) + ' ' p = Polyline(s) p.set_style('fill:' + self.fill + '; stroke:' + self.stroke + '; stroke-width:' + self.strokeWidth) self._svgElements.append(p) self._pointsOfPolyline = [] self._pointsOfPolyline.append(Vector(self._position.x, self._position.y))
[ "def", "_addPolylineToElements", "(", "self", ")", ":", "if", "(", "len", "(", "self", ".", "_pointsOfPolyline", ")", ">", "1", ")", ":", "s", "=", "''", "for", "point", "in", "self", ".", "_pointsOfPolyline", ":", "s", "+=", "str", "(", "point", ")", "+", "' '", "#str(point.x) + ',' + str(point.y) + ' '", "p", "=", "Polyline", "(", "s", ")", "p", ".", "set_style", "(", "'fill:'", "+", "self", ".", "fill", "+", "'; stroke:'", "+", "self", ".", "stroke", "+", "'; stroke-width:'", "+", "self", ".", "strokeWidth", ")", "self", ".", "_svgElements", ".", "append", "(", "p", ")", "self", ".", "_pointsOfPolyline", "=", "[", "]", "self", ".", "_pointsOfPolyline", ".", "append", "(", "Vector", "(", "self", ".", "_position", ".", "x", ",", "self", ".", "_position", ".", "y", ")", ")" ]
52.615385
18.461538
def decode(self, X, lengths=None, algorithm=None): """Find most likely state sequence corresponding to ``X``. Parameters ---------- X : array-like, shape (n_samples, n_features) Feature matrix of individual samples. lengths : array-like of integers, shape (n_sequences, ), optional Lengths of the individual sequences in ``X``. The sum of these should be ``n_samples``. algorithm : string Decoder algorithm. Must be one of "viterbi" or "map". If not given, :attr:`decoder` is used. Returns ------- logprob : float Log probability of the produced state sequence. state_sequence : array, shape (n_samples, ) Labels for each sample from ``X`` obtained via a given decoder ``algorithm``. See Also -------- score_samples : Compute the log probability under the model and posteriors. score : Compute the log probability under the model. """ check_is_fitted(self, "startprob_") self._check() algorithm = algorithm or self.algorithm if algorithm not in DECODER_ALGORITHMS: raise ValueError("Unknown decoder {!r}".format(algorithm)) decoder = { "viterbi": self._decode_viterbi, "map": self._decode_map }[algorithm] X = check_array(X) n_samples = X.shape[0] logprob = 0 state_sequence = np.empty(n_samples, dtype=int) for i, j in iter_from_X_lengths(X, lengths): # XXX decoder works on a single sample at a time! logprobij, state_sequenceij = decoder(X[i:j]) logprob += logprobij state_sequence[i:j] = state_sequenceij return logprob, state_sequence
[ "def", "decode", "(", "self", ",", "X", ",", "lengths", "=", "None", ",", "algorithm", "=", "None", ")", ":", "check_is_fitted", "(", "self", ",", "\"startprob_\"", ")", "self", ".", "_check", "(", ")", "algorithm", "=", "algorithm", "or", "self", ".", "algorithm", "if", "algorithm", "not", "in", "DECODER_ALGORITHMS", ":", "raise", "ValueError", "(", "\"Unknown decoder {!r}\"", ".", "format", "(", "algorithm", ")", ")", "decoder", "=", "{", "\"viterbi\"", ":", "self", ".", "_decode_viterbi", ",", "\"map\"", ":", "self", ".", "_decode_map", "}", "[", "algorithm", "]", "X", "=", "check_array", "(", "X", ")", "n_samples", "=", "X", ".", "shape", "[", "0", "]", "logprob", "=", "0", "state_sequence", "=", "np", ".", "empty", "(", "n_samples", ",", "dtype", "=", "int", ")", "for", "i", ",", "j", "in", "iter_from_X_lengths", "(", "X", ",", "lengths", ")", ":", "# XXX decoder works on a single sample at a time!", "logprobij", ",", "state_sequenceij", "=", "decoder", "(", "X", "[", "i", ":", "j", "]", ")", "logprob", "+=", "logprobij", "state_sequence", "[", "i", ":", "j", "]", "=", "state_sequenceij", "return", "logprob", ",", "state_sequence" ]
33.407407
19.981481
def pop(self): """ Pop the frame at the top of the stack. @return: The popped frame, else None. @rtype: L{Frame} """ if len(self.stack): popped = self.stack.pop() #log.debug('pop: (%s)\n%s', Repr(popped), Repr(self.stack)) return popped else: #log.debug('stack empty, not-popped') pass return None
[ "def", "pop", "(", "self", ")", ":", "if", "len", "(", "self", ".", "stack", ")", ":", "popped", "=", "self", ".", "stack", ".", "pop", "(", ")", "#log.debug('pop: (%s)\\n%s', Repr(popped), Repr(self.stack))", "return", "popped", "else", ":", "#log.debug('stack empty, not-popped')", "pass", "return", "None" ]
29.571429
13.571429
def _get_warped_mean(self, mean, std, pred_init=None, deg_gauss_hermite=20): """ Calculate the warped mean by using Gauss-Hermite quadrature. """ gh_samples, gh_weights = np.polynomial.hermite.hermgauss(deg_gauss_hermite) gh_samples = gh_samples[:, None] gh_weights = gh_weights[None, :] return gh_weights.dot(self._get_warped_term(mean, std, gh_samples)) / np.sqrt(np.pi)
[ "def", "_get_warped_mean", "(", "self", ",", "mean", ",", "std", ",", "pred_init", "=", "None", ",", "deg_gauss_hermite", "=", "20", ")", ":", "gh_samples", ",", "gh_weights", "=", "np", ".", "polynomial", ".", "hermite", ".", "hermgauss", "(", "deg_gauss_hermite", ")", "gh_samples", "=", "gh_samples", "[", ":", ",", "None", "]", "gh_weights", "=", "gh_weights", "[", "None", ",", ":", "]", "return", "gh_weights", ".", "dot", "(", "self", ".", "_get_warped_term", "(", "mean", ",", "std", ",", "gh_samples", ")", ")", "/", "np", ".", "sqrt", "(", "np", ".", "pi", ")" ]
52.625
19.875
def export_model(self, format, file_name=None): """Save the assembled model in a modeling formalism other than PySB. For more details on exporting PySB models, see http://pysb.readthedocs.io/en/latest/modules/export/index.html Parameters ---------- format : str The format to export into, for instance "kappa", "bngl", "sbml", "matlab", "mathematica", "potterswheel". See http://pysb.readthedocs.io/en/latest/modules/export/index.html for a list of supported formats. In addition to the formats supported by PySB itself, this method also provides "sbgn" output. file_name : Optional[str] An optional file name to save the exported model into. Returns ------- exp_str : str or object The exported model string or object """ # Handle SBGN as special case if format == 'sbgn': exp_str = export_sbgn(self.model) elif format == 'kappa_im': # NOTE: this export is not a str, rather a graph object return export_kappa_im(self.model, file_name) elif format == 'kappa_cm': # NOTE: this export is not a str, rather a graph object return export_kappa_cm(self.model, file_name) else: try: exp_str = pysb.export.export(self.model, format) except KeyError: logging.error('Unknown export format: %s' % format) return None if file_name: with open(file_name, 'wb') as fh: fh.write(exp_str.encode('utf-8')) return exp_str
[ "def", "export_model", "(", "self", ",", "format", ",", "file_name", "=", "None", ")", ":", "# Handle SBGN as special case", "if", "format", "==", "'sbgn'", ":", "exp_str", "=", "export_sbgn", "(", "self", ".", "model", ")", "elif", "format", "==", "'kappa_im'", ":", "# NOTE: this export is not a str, rather a graph object", "return", "export_kappa_im", "(", "self", ".", "model", ",", "file_name", ")", "elif", "format", "==", "'kappa_cm'", ":", "# NOTE: this export is not a str, rather a graph object", "return", "export_kappa_cm", "(", "self", ".", "model", ",", "file_name", ")", "else", ":", "try", ":", "exp_str", "=", "pysb", ".", "export", ".", "export", "(", "self", ".", "model", ",", "format", ")", "except", "KeyError", ":", "logging", ".", "error", "(", "'Unknown export format: %s'", "%", "format", ")", "return", "None", "if", "file_name", ":", "with", "open", "(", "file_name", ",", "'wb'", ")", "as", "fh", ":", "fh", ".", "write", "(", "exp_str", ".", "encode", "(", "'utf-8'", ")", ")", "return", "exp_str" ]
37.840909
20
def prepare_parameters(self, multi_row_parameters): """ Attribute sql parameters with meta data for a prepared statement. Make some basic checks that at least the number of parameters is correct. :param multi_row_parameters: A list/tuple containing list/tuples of parameters (for multiple rows) :returns: A generator producing parameters attributed with meta data for one sql statement (a row) at a time """ self._multi_row_parameters = multi_row_parameters self._num_rows = len(multi_row_parameters) self._iter_row_count = 0 return self
[ "def", "prepare_parameters", "(", "self", ",", "multi_row_parameters", ")", ":", "self", ".", "_multi_row_parameters", "=", "multi_row_parameters", "self", ".", "_num_rows", "=", "len", "(", "multi_row_parameters", ")", "self", ".", "_iter_row_count", "=", "0", "return", "self" ]
60
25
def cli(wio, send): ''' Sends a UDP command to the wio device. \b DOES: Support "VERSION", "SCAN", "Blank?", "DEBUG", "ENDEBUG: 1", "ENDEBUG: 0" "APCFG: AP\\tPWDs\\tTOKENs\\tSNs\\tSERVER_Domains\\tXSERVER_Domain\\t\\r\\n", Note: 1. Ensure your device is Configure Mode. 2. Change your computer network to Wio's AP. \b EXAMPLE: wio udp --send [command], send UPD command ''' command = send click.echo("UDP command: {}".format(command)) result = udp.common_send(command) if result is None: return debug_error() else: click.echo(result)
[ "def", "cli", "(", "wio", ",", "send", ")", ":", "command", "=", "send", "click", ".", "echo", "(", "\"UDP command: {}\"", ".", "format", "(", "command", ")", ")", "result", "=", "udp", ".", "common_send", "(", "command", ")", "if", "result", "is", "None", ":", "return", "debug_error", "(", ")", "else", ":", "click", ".", "echo", "(", "result", ")" ]
27.173913
23.782609
def _options_browser(cfg, ret_config, defaults, virtualname, options): """ Iterator generating all duples ```option name -> value``` @see :func:`get_returner_options` """ for option in options: # default place for the option in the config value = _fetch_option(cfg, ret_config, virtualname, options[option]) if value: yield option, value continue # Attribute not found, check for a default value if defaults: if option in defaults: log.info('Using default for %s %s', virtualname, option) yield option, defaults[option] continue # fallback (implicit else for all ifs) continue
[ "def", "_options_browser", "(", "cfg", ",", "ret_config", ",", "defaults", ",", "virtualname", ",", "options", ")", ":", "for", "option", "in", "options", ":", "# default place for the option in the config", "value", "=", "_fetch_option", "(", "cfg", ",", "ret_config", ",", "virtualname", ",", "options", "[", "option", "]", ")", "if", "value", ":", "yield", "option", ",", "value", "continue", "# Attribute not found, check for a default value", "if", "defaults", ":", "if", "option", "in", "defaults", ":", "log", ".", "info", "(", "'Using default for %s %s'", ",", "virtualname", ",", "option", ")", "yield", "option", ",", "defaults", "[", "option", "]", "continue", "# fallback (implicit else for all ifs)", "continue" ]
28.72
21.36
def lookup(self, nick): """Looks for the most recent paste by a given nick. Returns the uid or None""" query = dict(nick=nick) order = [('time', pymongo.DESCENDING)] recs = self.db.pastes.find(query).sort(order).limit(1) try: return next(recs)['uid'] except StopIteration: pass
[ "def", "lookup", "(", "self", ",", "nick", ")", ":", "query", "=", "dict", "(", "nick", "=", "nick", ")", "order", "=", "[", "(", "'time'", ",", "pymongo", ".", "DESCENDING", ")", "]", "recs", "=", "self", ".", "db", ".", "pastes", ".", "find", "(", "query", ")", ".", "sort", "(", "order", ")", ".", "limit", "(", "1", ")", "try", ":", "return", "next", "(", "recs", ")", "[", "'uid'", "]", "except", "StopIteration", ":", "pass" ]
34.8
12.1
def ignore_import_warnings_for_related_fields(orig_method, self, node): """ Replaces the leave_module method on the VariablesChecker class to prevent unused-import warnings which are caused by the ForeignKey and OneToOneField transformations. By replacing the nodes in the AST with their type rather than the django field, imports of the form 'from django.db.models import OneToOneField' raise an unused-import warning """ consumer = self._to_consume[0] # pylint: disable=W0212 # we can disable this warning ('Access to a protected member _to_consume of a client class') # as it's not actually a client class, but rather, this method is being monkey patched # onto the class and so the access is valid new_things = {} iterat = consumer.to_consume.items if PY3 else consumer.to_consume.iteritems for name, stmts in iterat(): if isinstance(stmts[0], ImportFrom): if any([n[0] in ('ForeignKey', 'OneToOneField') for n in stmts[0].names]): continue new_things[name] = stmts consumer._atomic = ScopeConsumer(new_things, consumer.consumed, consumer.scope_type) # pylint: disable=W0212 self._to_consume = [consumer] # pylint: disable=W0212 return orig_method(self, node)
[ "def", "ignore_import_warnings_for_related_fields", "(", "orig_method", ",", "self", ",", "node", ")", ":", "consumer", "=", "self", ".", "_to_consume", "[", "0", "]", "# pylint: disable=W0212", "# we can disable this warning ('Access to a protected member _to_consume of a client class')", "# as it's not actually a client class, but rather, this method is being monkey patched", "# onto the class and so the access is valid", "new_things", "=", "{", "}", "iterat", "=", "consumer", ".", "to_consume", ".", "items", "if", "PY3", "else", "consumer", ".", "to_consume", ".", "iteritems", "for", "name", ",", "stmts", "in", "iterat", "(", ")", ":", "if", "isinstance", "(", "stmts", "[", "0", "]", ",", "ImportFrom", ")", ":", "if", "any", "(", "[", "n", "[", "0", "]", "in", "(", "'ForeignKey'", ",", "'OneToOneField'", ")", "for", "n", "in", "stmts", "[", "0", "]", ".", "names", "]", ")", ":", "continue", "new_things", "[", "name", "]", "=", "stmts", "consumer", ".", "_atomic", "=", "ScopeConsumer", "(", "new_things", ",", "consumer", ".", "consumed", ",", "consumer", ".", "scope_type", ")", "# pylint: disable=W0212", "self", ".", "_to_consume", "=", "[", "consumer", "]", "# pylint: disable=W0212", "return", "orig_method", "(", "self", ",", "node", ")" ]
46.666667
27.481481
def pulse_train(time, start, duration, repeat_time, end): """ Implements vensim's PULSE TRAIN function In range [-inf, start) returns 0 In range [start + n * repeat_time, start + n * repeat_time + duration) return 1 In range [start + n * repeat_time + duration, start + (n+1) * repeat_time) return 0 """ t = time() if start <= t < end: return 1 if (t - start) % repeat_time < duration else 0 else: return 0
[ "def", "pulse_train", "(", "time", ",", "start", ",", "duration", ",", "repeat_time", ",", "end", ")", ":", "t", "=", "time", "(", ")", "if", "start", "<=", "t", "<", "end", ":", "return", "1", "if", "(", "t", "-", "start", ")", "%", "repeat_time", "<", "duration", "else", "0", "else", ":", "return", "0" ]
37
22.583333
def get_storage_info(self, human=False): """ Get storage info :param bool human: whether return human-readable size :return: total and used storage :rtype: dict """ res = self._req_get_storage_info() if human: res['total'] = humanize.naturalsize(res['total'], binary=True) res['used'] = humanize.naturalsize(res['used'], binary=True) return res
[ "def", "get_storage_info", "(", "self", ",", "human", "=", "False", ")", ":", "res", "=", "self", ".", "_req_get_storage_info", "(", ")", "if", "human", ":", "res", "[", "'total'", "]", "=", "humanize", ".", "naturalsize", "(", "res", "[", "'total'", "]", ",", "binary", "=", "True", ")", "res", "[", "'used'", "]", "=", "humanize", ".", "naturalsize", "(", "res", "[", "'used'", "]", ",", "binary", "=", "True", ")", "return", "res" ]
30.642857
17.928571
def delete_view(self, query_criteria=None, uid='_all_users'): ''' a method to delete a view associated with a user design doc :param query_criteria: [optional] dictionary with valid jsonmodel query criteria :param uid: [optional] string with uid of design document to update :return: integer with status of operation an example of how to construct the query_criteria argument: query_criteria = { '.path.to.number': { 'min_value': 4.5 }, '.path.to.string': { 'discrete_values': [ 'pond', 'lake', 'stream', 'brook' ] } } NOTE: only fields specified in the document schema at class initialization can be used as fields in query_criteria. otherwise, an error will be thrown. uid is automatically added to all document schemas at initialization NOTE: the full list of all criteria are found in the reference page for the jsonmodel module as well as the query-rules.json file included in the module. http://collectiveacuity.github.io/jsonModel/reference/#query-criteria NOTE: if a query_criteria is not specified, then the entire user design doc is removed otherwise, the existing design document is updated. ''' # https://developer.couchbase.com/documentation/mobile/1.5/references/sync-gateway/admin-rest-api/index.html#/query/delete__db___design__ddoc_ title = '%s.delete_view' % self.__class__.__name__ # validate inputs input_fields = { 'uid': uid } for key, value in input_fields.items(): if value: object_title = '%s(%s=%s)' % (title, key, str(value)) self.fields.validate(value, '.%s' % key, object_title) # validate inputs if query_criteria: if not self.model: raise ValueError('%s(query_criteria={...} requires a document_schema.' % title) self.model.query(query_criteria) else: query_criteria = {} if uid != '_all_users' and self.public: raise ValueError('%s(uid="%s") user ids are not applicable for a public bucket. % title') # handle deleting user design doc if not query_criteria: url = self.bucket_url + '/_design/%s' % uid response = requests.delete(url) # catch missing args elif not uid: raise IndexError('%s requires either a uid or query_criteria argument.' % title) # handle removing a view from a design doc else: # determine hash of query criteria import hashlib import json hashed_criteria = hashlib.md5(json.dumps(query_criteria, sort_key=True).encode('utf-8')).hexdigest() # determine design document to update url = self.bucket_url + '/_design/%s' % uid # remove view from design document and update response = requests.get(url) if response.status_code in (200, 201): design_details = response.json() design_details['views'] = self._clean_views(design_details['views']) if hashed_criteria in design_details['views'].keys(): del design_details['views'][hashed_criteria] if design_details['views']: response = requests.put(url, json=design_details) else: response = requests.delete(url) return response.status_code
[ "def", "delete_view", "(", "self", ",", "query_criteria", "=", "None", ",", "uid", "=", "'_all_users'", ")", ":", "# https://developer.couchbase.com/documentation/mobile/1.5/references/sync-gateway/admin-rest-api/index.html#/query/delete__db___design__ddoc_", "title", "=", "'%s.delete_view'", "%", "self", ".", "__class__", ".", "__name__", "# validate inputs", "input_fields", "=", "{", "'uid'", ":", "uid", "}", "for", "key", ",", "value", "in", "input_fields", ".", "items", "(", ")", ":", "if", "value", ":", "object_title", "=", "'%s(%s=%s)'", "%", "(", "title", ",", "key", ",", "str", "(", "value", ")", ")", "self", ".", "fields", ".", "validate", "(", "value", ",", "'.%s'", "%", "key", ",", "object_title", ")", "# validate inputs", "if", "query_criteria", ":", "if", "not", "self", ".", "model", ":", "raise", "ValueError", "(", "'%s(query_criteria={...} requires a document_schema.'", "%", "title", ")", "self", ".", "model", ".", "query", "(", "query_criteria", ")", "else", ":", "query_criteria", "=", "{", "}", "if", "uid", "!=", "'_all_users'", "and", "self", ".", "public", ":", "raise", "ValueError", "(", "'%s(uid=\"%s\") user ids are not applicable for a public bucket. % title'", ")", "# handle deleting user design doc", "if", "not", "query_criteria", ":", "url", "=", "self", ".", "bucket_url", "+", "'/_design/%s'", "%", "uid", "response", "=", "requests", ".", "delete", "(", "url", ")", "# catch missing args", "elif", "not", "uid", ":", "raise", "IndexError", "(", "'%s requires either a uid or query_criteria argument.'", "%", "title", ")", "# handle removing a view from a design doc", "else", ":", "# determine hash of query criteria", "import", "hashlib", "import", "json", "hashed_criteria", "=", "hashlib", ".", "md5", "(", "json", ".", "dumps", "(", "query_criteria", ",", "sort_key", "=", "True", ")", ".", "encode", "(", "'utf-8'", ")", ")", ".", "hexdigest", "(", ")", "# determine design document to update", "url", "=", "self", ".", "bucket_url", "+", "'/_design/%s'", "%", "uid", "# remove view from design document and update", "response", "=", "requests", ".", "get", "(", "url", ")", "if", "response", ".", "status_code", "in", "(", "200", ",", "201", ")", ":", "design_details", "=", "response", ".", "json", "(", ")", "design_details", "[", "'views'", "]", "=", "self", ".", "_clean_views", "(", "design_details", "[", "'views'", "]", ")", "if", "hashed_criteria", "in", "design_details", "[", "'views'", "]", ".", "keys", "(", ")", ":", "del", "design_details", "[", "'views'", "]", "[", "hashed_criteria", "]", "if", "design_details", "[", "'views'", "]", ":", "response", "=", "requests", ".", "put", "(", "url", ",", "json", "=", "design_details", ")", "else", ":", "response", "=", "requests", ".", "delete", "(", "url", ")", "return", "response", ".", "status_code" ]
40.94382
26.247191
def calc_frip(self, input_bam, input_bed, threads=4): """ Calculate fraction of reads in peaks. A file of with a pool of sequencing reads and a file with peak call regions define the operation that will be performed. Thread count for samtools can be specified as well. :param str input_bam: sequencing reads file :param str input_bed: file with called peak regions :param int threads: number of threads samtools may use :return float: fraction of reads in peaks defined in given peaks file """ cmd = self.simple_frip(input_bam, input_bed, threads) return subprocess.check_output(cmd.split(" "), shell=True)
[ "def", "calc_frip", "(", "self", ",", "input_bam", ",", "input_bed", ",", "threads", "=", "4", ")", ":", "cmd", "=", "self", ".", "simple_frip", "(", "input_bam", ",", "input_bed", ",", "threads", ")", "return", "subprocess", ".", "check_output", "(", "cmd", ".", "split", "(", "\" \"", ")", ",", "shell", "=", "True", ")" ]
46
20.533333
def is_new_namespace_preorder( self, namespace_id_hash, lastblock=None ): """ Given a namespace preorder hash, determine whether or not is is unseen before. """ if lastblock is None: lastblock = self.lastblock preorder = namedb_get_namespace_preorder( self.db, namespace_id_hash, lastblock ) if preorder is not None: return False else: return True
[ "def", "is_new_namespace_preorder", "(", "self", ",", "namespace_id_hash", ",", "lastblock", "=", "None", ")", ":", "if", "lastblock", "is", "None", ":", "lastblock", "=", "self", ".", "lastblock", "preorder", "=", "namedb_get_namespace_preorder", "(", "self", ".", "db", ",", "namespace_id_hash", ",", "lastblock", ")", "if", "preorder", "is", "not", "None", ":", "return", "False", "else", ":", "return", "True" ]
35.916667
20.583333
def _items(self): """Extract a list of (key, value) pairs, suitable for our __init__.""" for name in self.declarations: yield name, self.declarations[name] for subkey, value in self.contexts[name].items(): yield self.join(name, subkey), value
[ "def", "_items", "(", "self", ")", ":", "for", "name", "in", "self", ".", "declarations", ":", "yield", "name", ",", "self", ".", "declarations", "[", "name", "]", "for", "subkey", ",", "value", "in", "self", ".", "contexts", "[", "name", "]", ".", "items", "(", ")", ":", "yield", "self", ".", "join", "(", "name", ",", "subkey", ")", ",", "value" ]
48.833333
10.833333
def syslog(server, enable=True, host=None, admin_username=None, admin_password=None, module=None): ''' Configure syslog remote logging, by default syslog will automatically be enabled if a server is specified. However, if you want to disable syslog you will need to specify a server followed by False CLI Example: .. code-block:: bash salt dell dracr.syslog [SYSLOG IP] [ENABLE/DISABLE] salt dell dracr.syslog 0.0.0.0 False ''' if enable and __execute_cmd('config -g cfgRemoteHosts -o ' 'cfgRhostsSyslogEnable 1', host=host, admin_username=admin_username, admin_password=admin_password, module=None): return __execute_cmd('config -g cfgRemoteHosts -o ' 'cfgRhostsSyslogServer1 {0}'.format(server), host=host, admin_username=admin_username, admin_password=admin_password, module=module) return __execute_cmd('config -g cfgRemoteHosts -o cfgRhostsSyslogEnable 0', host=host, admin_username=admin_username, admin_password=admin_password, module=module)
[ "def", "syslog", "(", "server", ",", "enable", "=", "True", ",", "host", "=", "None", ",", "admin_username", "=", "None", ",", "admin_password", "=", "None", ",", "module", "=", "None", ")", ":", "if", "enable", "and", "__execute_cmd", "(", "'config -g cfgRemoteHosts -o '", "'cfgRhostsSyslogEnable 1'", ",", "host", "=", "host", ",", "admin_username", "=", "admin_username", ",", "admin_password", "=", "admin_password", ",", "module", "=", "None", ")", ":", "return", "__execute_cmd", "(", "'config -g cfgRemoteHosts -o '", "'cfgRhostsSyslogServer1 {0}'", ".", "format", "(", "server", ")", ",", "host", "=", "host", ",", "admin_username", "=", "admin_username", ",", "admin_password", "=", "admin_password", ",", "module", "=", "module", ")", "return", "__execute_cmd", "(", "'config -g cfgRemoteHosts -o cfgRhostsSyslogEnable 0'", ",", "host", "=", "host", ",", "admin_username", "=", "admin_username", ",", "admin_password", "=", "admin_password", ",", "module", "=", "module", ")" ]
43.6875
20.75
def profile_detail(request, username, template_name=userena_settings.USERENA_PROFILE_DETAIL_TEMPLATE, extra_context=None, **kwargs): """ Detailed view of an user. :param username: String of the username of which the profile should be viewed. :param template_name: String representing the template name that should be used to display the profile. :param extra_context: Dictionary of variables which should be supplied to the template. The ``profile`` key is always the current profile. **Context** ``profile`` Instance of the currently viewed ``Profile``. """ user = get_object_or_404(get_user_model(), username__iexact=username) profile = get_user_profile(user=user) if not profile.can_view_profile(request.user): raise PermissionDenied if not extra_context: extra_context = dict() extra_context['profile'] = profile extra_context['hide_email'] = userena_settings.USERENA_HIDE_EMAIL return ExtraContextTemplateView.as_view(template_name=template_name, extra_context=extra_context)(request)
[ "def", "profile_detail", "(", "request", ",", "username", ",", "template_name", "=", "userena_settings", ".", "USERENA_PROFILE_DETAIL_TEMPLATE", ",", "extra_context", "=", "None", ",", "*", "*", "kwargs", ")", ":", "user", "=", "get_object_or_404", "(", "get_user_model", "(", ")", ",", "username__iexact", "=", "username", ")", "profile", "=", "get_user_profile", "(", "user", "=", "user", ")", "if", "not", "profile", ".", "can_view_profile", "(", "request", ".", "user", ")", ":", "raise", "PermissionDenied", "if", "not", "extra_context", ":", "extra_context", "=", "dict", "(", ")", "extra_context", "[", "'profile'", "]", "=", "profile", "extra_context", "[", "'hide_email'", "]", "=", "userena_settings", ".", "USERENA_HIDE_EMAIL", "return", "ExtraContextTemplateView", ".", "as_view", "(", "template_name", "=", "template_name", ",", "extra_context", "=", "extra_context", ")", "(", "request", ")" ]
35.375
21.9375
def from_string(string): """Return the mnemonic represented by the given string. """ mnemonics = { # Arithmetic Instructions "add": ReilMnemonic.ADD, "sub": ReilMnemonic.SUB, "mul": ReilMnemonic.MUL, "div": ReilMnemonic.DIV, "mod": ReilMnemonic.MOD, "bsh": ReilMnemonic.BSH, # Bitwise Instructions "and": ReilMnemonic.AND, "or": ReilMnemonic.OR, "xor": ReilMnemonic.XOR, # Data Transfer Instructions "ldm": ReilMnemonic.LDM, "stm": ReilMnemonic.STM, "str": ReilMnemonic.STR, # Conditional Instructions "bisz": ReilMnemonic.BISZ, "jcc": ReilMnemonic.JCC, # Other Instructions "unkn": ReilMnemonic.UNKN, "undef": ReilMnemonic.UNDEF, "nop": ReilMnemonic.NOP, # Added Instructions "sext": ReilMnemonic.SEXT, "sdiv": ReilMnemonic.SDIV, "smod": ReilMnemonic.SMOD, "smul": ReilMnemonic.SMUL, } return mnemonics[string]
[ "def", "from_string", "(", "string", ")", ":", "mnemonics", "=", "{", "# Arithmetic Instructions", "\"add\"", ":", "ReilMnemonic", ".", "ADD", ",", "\"sub\"", ":", "ReilMnemonic", ".", "SUB", ",", "\"mul\"", ":", "ReilMnemonic", ".", "MUL", ",", "\"div\"", ":", "ReilMnemonic", ".", "DIV", ",", "\"mod\"", ":", "ReilMnemonic", ".", "MOD", ",", "\"bsh\"", ":", "ReilMnemonic", ".", "BSH", ",", "# Bitwise Instructions", "\"and\"", ":", "ReilMnemonic", ".", "AND", ",", "\"or\"", ":", "ReilMnemonic", ".", "OR", ",", "\"xor\"", ":", "ReilMnemonic", ".", "XOR", ",", "# Data Transfer Instructions", "\"ldm\"", ":", "ReilMnemonic", ".", "LDM", ",", "\"stm\"", ":", "ReilMnemonic", ".", "STM", ",", "\"str\"", ":", "ReilMnemonic", ".", "STR", ",", "# Conditional Instructions", "\"bisz\"", ":", "ReilMnemonic", ".", "BISZ", ",", "\"jcc\"", ":", "ReilMnemonic", ".", "JCC", ",", "# Other Instructions", "\"unkn\"", ":", "ReilMnemonic", ".", "UNKN", ",", "\"undef\"", ":", "ReilMnemonic", ".", "UNDEF", ",", "\"nop\"", ":", "ReilMnemonic", ".", "NOP", ",", "# Added Instructions", "\"sext\"", ":", "ReilMnemonic", ".", "SEXT", ",", "\"sdiv\"", ":", "ReilMnemonic", ".", "SDIV", ",", "\"smod\"", ":", "ReilMnemonic", ".", "SMOD", ",", "\"smul\"", ":", "ReilMnemonic", ".", "SMUL", ",", "}", "return", "mnemonics", "[", "string", "]" ]
29.435897
10.410256
def sweepCrossValidation(self): """ sweepCrossValidation() will go through each of the crossvalidation input/targets. The crossValidationCorpus is a list of dictionaries of input/targets referenced by layername. Example: ({"input": [0.0, 0.1], "output": [1.0]}, {"input": [0.5, 0.9], "output": [0.0]}) """ # get learning value and then turn it off oldLearning = self.learning self.learning = 0 tssError = 0.0; totalCorrect = 0; totalCount = 0; totalPCorrect = {} self._cv = True # in cross validation if self.autoCrossValidation: for i in range(len(self.inputs)): set = self.getDataCrossValidation(i) self._sweeping = 1 (error, correct, total, pcorrect) = self.step( **set ) self._sweeping = 0 if self.crossValidationReportLayers != []: (error, correct, total, pcorrect) = self.getError( *self.crossValidationReportLayers ) tssError += error totalCorrect += correct totalCount += total sumMerge(totalPCorrect, pcorrect) else: for set in self.crossValidationCorpus: self._sweeping = 1 (error, correct, total, pcorrect) = self.step( **set ) self._sweeping = 0 if self.crossValidationReportLayers != []: (error, correct, total, pcorrect) = self.getError( *self.crossValidationReportLayers ) tssError += error totalCorrect += correct totalCount += total sumMerge(totalPCorrect, pcorrect) self.learning = oldLearning self._cv = False return (tssError, totalCorrect, totalCount, totalPCorrect)
[ "def", "sweepCrossValidation", "(", "self", ")", ":", "# get learning value and then turn it off", "oldLearning", "=", "self", ".", "learning", "self", ".", "learning", "=", "0", "tssError", "=", "0.0", "totalCorrect", "=", "0", "totalCount", "=", "0", "totalPCorrect", "=", "{", "}", "self", ".", "_cv", "=", "True", "# in cross validation", "if", "self", ".", "autoCrossValidation", ":", "for", "i", "in", "range", "(", "len", "(", "self", ".", "inputs", ")", ")", ":", "set", "=", "self", ".", "getDataCrossValidation", "(", "i", ")", "self", ".", "_sweeping", "=", "1", "(", "error", ",", "correct", ",", "total", ",", "pcorrect", ")", "=", "self", ".", "step", "(", "*", "*", "set", ")", "self", ".", "_sweeping", "=", "0", "if", "self", ".", "crossValidationReportLayers", "!=", "[", "]", ":", "(", "error", ",", "correct", ",", "total", ",", "pcorrect", ")", "=", "self", ".", "getError", "(", "*", "self", ".", "crossValidationReportLayers", ")", "tssError", "+=", "error", "totalCorrect", "+=", "correct", "totalCount", "+=", "total", "sumMerge", "(", "totalPCorrect", ",", "pcorrect", ")", "else", ":", "for", "set", "in", "self", ".", "crossValidationCorpus", ":", "self", ".", "_sweeping", "=", "1", "(", "error", ",", "correct", ",", "total", ",", "pcorrect", ")", "=", "self", ".", "step", "(", "*", "*", "set", ")", "self", ".", "_sweeping", "=", "0", "if", "self", ".", "crossValidationReportLayers", "!=", "[", "]", ":", "(", "error", ",", "correct", ",", "total", ",", "pcorrect", ")", "=", "self", ".", "getError", "(", "*", "self", ".", "crossValidationReportLayers", ")", "tssError", "+=", "error", "totalCorrect", "+=", "correct", "totalCount", "+=", "total", "sumMerge", "(", "totalPCorrect", ",", "pcorrect", ")", "self", ".", "learning", "=", "oldLearning", "self", ".", "_cv", "=", "False", "return", "(", "tssError", ",", "totalCorrect", ",", "totalCount", ",", "totalPCorrect", ")" ]
47.736842
16.578947
def spanning_2d_grid(length): """ Generate a square lattice with auxiliary nodes for spanning detection Parameters ---------- length : int Number of nodes in one dimension, excluding the auxiliary nodes. Returns ------- networkx.Graph A square lattice graph with auxiliary nodes for spanning cluster detection See Also -------- sample_states : spanning cluster detection """ ret = nx.grid_2d_graph(length + 2, length) for i in range(length): # side 0 ret.node[(0, i)]['span'] = 0 ret[(0, i)][(1, i)]['span'] = 0 # side 1 ret.node[(length + 1, i)]['span'] = 1 ret[(length + 1, i)][(length, i)]['span'] = 1 return ret
[ "def", "spanning_2d_grid", "(", "length", ")", ":", "ret", "=", "nx", ".", "grid_2d_graph", "(", "length", "+", "2", ",", "length", ")", "for", "i", "in", "range", "(", "length", ")", ":", "# side 0", "ret", ".", "node", "[", "(", "0", ",", "i", ")", "]", "[", "'span'", "]", "=", "0", "ret", "[", "(", "0", ",", "i", ")", "]", "[", "(", "1", ",", "i", ")", "]", "[", "'span'", "]", "=", "0", "# side 1", "ret", ".", "node", "[", "(", "length", "+", "1", ",", "i", ")", "]", "[", "'span'", "]", "=", "1", "ret", "[", "(", "length", "+", "1", ",", "i", ")", "]", "[", "(", "length", ",", "i", ")", "]", "[", "'span'", "]", "=", "1", "return", "ret" ]
20.571429
24.685714
def _weight_by_hue(self): """ Returns a list of (hue, ranges, total weight, normalized total weight)-tuples. ColorTheme is made up out of (color, range, weight) tuples. For consistency with XML-output in the old Prism format (i.e. <color>s made up of <shade>s) we need a group weight per different hue. The same is true for the swatch() draw method. Hues are grouped as a single unit (e.g. dark red, intense red, weak red) after which the dimensions (rows/columns) is determined. """ grouped = {} weights = [] for clr, rng, weight in self.ranges: h = clr.nearest_hue(primary=False) if grouped.has_key(h): ranges, total_weight = grouped[h] ranges.append((clr, rng, weight)) total_weight += weight grouped[h] = (ranges, total_weight) else: grouped[h] = ([(clr, rng, weight)], weight) # Calculate the normalized (0.0-1.0) weight for each hue, # and transform the dictionary to a list. s = 1.0 * sum([w for r, w in grouped.values()]) grouped = [(grouped[h][1], grouped[h][1] / s, h, grouped[h][0]) for h in grouped] grouped.sort() grouped.reverse() return grouped
[ "def", "_weight_by_hue", "(", "self", ")", ":", "grouped", "=", "{", "}", "weights", "=", "[", "]", "for", "clr", ",", "rng", ",", "weight", "in", "self", ".", "ranges", ":", "h", "=", "clr", ".", "nearest_hue", "(", "primary", "=", "False", ")", "if", "grouped", ".", "has_key", "(", "h", ")", ":", "ranges", ",", "total_weight", "=", "grouped", "[", "h", "]", "ranges", ".", "append", "(", "(", "clr", ",", "rng", ",", "weight", ")", ")", "total_weight", "+=", "weight", "grouped", "[", "h", "]", "=", "(", "ranges", ",", "total_weight", ")", "else", ":", "grouped", "[", "h", "]", "=", "(", "[", "(", "clr", ",", "rng", ",", "weight", ")", "]", ",", "weight", ")", "# Calculate the normalized (0.0-1.0) weight for each hue,", "# and transform the dictionary to a list.", "s", "=", "1.0", "*", "sum", "(", "[", "w", "for", "r", ",", "w", "in", "grouped", ".", "values", "(", ")", "]", ")", "grouped", "=", "[", "(", "grouped", "[", "h", "]", "[", "1", "]", ",", "grouped", "[", "h", "]", "[", "1", "]", "/", "s", ",", "h", ",", "grouped", "[", "h", "]", "[", "0", "]", ")", "for", "h", "in", "grouped", "]", "grouped", ".", "sort", "(", ")", "grouped", ".", "reverse", "(", ")", "return", "grouped" ]
39.606061
19.787879
def run(self): """Run find_route_functions_taint_args on each CFG.""" function_cfgs = list() for _ in self.cfg_list: function_cfgs.extend(self.find_route_functions_taint_args()) self.cfg_list.extend(function_cfgs)
[ "def", "run", "(", "self", ")", ":", "function_cfgs", "=", "list", "(", ")", "for", "_", "in", "self", ".", "cfg_list", ":", "function_cfgs", ".", "extend", "(", "self", ".", "find_route_functions_taint_args", "(", ")", ")", "self", ".", "cfg_list", ".", "extend", "(", "function_cfgs", ")" ]
42
13.333333
def list(self, prefix='', delimiter='', filter_function=None, max_results=1, previous_key=''): ''' a method to list keys in the collection :param prefix: string with prefix value to filter results :param delimiter: string with value results must not contain (after prefix) :param filter_function: (positional arguments) function used to filter results :param max_results: integer with maximum number of results to return :param previous_key: string with key in collection to begin search after :return: list of key strings NOTE: each key string can be divided into one or more segments based upon the / characters which occur in the key string as well as its file extension type. if the key string represents a file path, then each directory in the path, the file name and the file extension are all separate indexed values. eg. lab/unittests/1473719695.2165067.json is indexed: [ 'lab', 'unittests', '1473719695.2165067', '.json' ] it is possible to filter the records in the collection according to one or more of these path segments using a filter_function. NOTE: the filter_function must be able to accept an array of positional arguments and return a value that can evaluate to true or false. while searching the records, list produces an array of strings which represent the directory structure in relative path of each key string. if a filter_function is provided, this list of strings is fed to the filter function. if the function evaluates this input and returns a true value the file will be included in the list results. ''' title = '%s.list' % self.__class__.__name__ # validate input input_fields = { 'prefix': prefix, 'delimiter': delimiter, 'max_results': max_results, 'record_key': previous_key } for key, value in input_fields.items(): if value: object_title = '%s(%s=%s)' % (title, key, str(value)) self.fields.validate(value, '.%s' % key, object_title) # construct default response results_list = [] # handle filter function filter if filter_function: # validate filter function try: path_segments = [ 'lab', 'unittests', '1473719695.2165067', '.json' ] filter_function(*path_segments) except: err_msg = '%s(filter_function=%s)' % (title, filter_function.__class__.__name__) raise TypeError('%s must accept positional arguments.' % err_msg) # construct keyword arguments list_kwargs = { 'bucket_name': self.bucket_name, 'prefix': prefix, 'delimiter': delimiter } # determine starting key starting_key = '1' if previous_key: previous_kwargs = {} previous_kwargs.update(**list_kwargs) previous_kwargs['max_results'] = 1 previous_kwargs['starting_key'] = previous_key search_list, next_key = self.s3.list_records(**list_kwargs) list_kwargs['starting_key'] = next_key # iterate filter over collection import os while starting_key: search_list, starting_key = self.s3.list_records(**list_kwargs) for record in search_list: record_key = record['key'] path_segments = record_key.split(os.sep) if filter_function(*path_segments): results_list.append(record_key) if len(results_list) == max_results: return results_list # handle other filters else: # construct keyword arguments list_kwargs = { 'bucket_name': self.bucket_name, 'prefix': prefix, 'delimiter': delimiter, 'max_results': max_results } # determine starting key if previous_key: previous_kwargs = {} previous_kwargs.update(**list_kwargs) previous_kwargs['max_results'] = 1 previous_kwargs['starting_key'] = previous_key search_list, starting_key = self.s3.list_records(**list_kwargs) list_kwargs['starting_key'] = starting_key # retrieve results search_list, starting_key = self.s3.list_records(**list_kwargs) # construct result list for record in search_list: results_list.append(record['key']) return results_list
[ "def", "list", "(", "self", ",", "prefix", "=", "''", ",", "delimiter", "=", "''", ",", "filter_function", "=", "None", ",", "max_results", "=", "1", ",", "previous_key", "=", "''", ")", ":", "title", "=", "'%s.list'", "%", "self", ".", "__class__", ".", "__name__", "# validate input", "input_fields", "=", "{", "'prefix'", ":", "prefix", ",", "'delimiter'", ":", "delimiter", ",", "'max_results'", ":", "max_results", ",", "'record_key'", ":", "previous_key", "}", "for", "key", ",", "value", "in", "input_fields", ".", "items", "(", ")", ":", "if", "value", ":", "object_title", "=", "'%s(%s=%s)'", "%", "(", "title", ",", "key", ",", "str", "(", "value", ")", ")", "self", ".", "fields", ".", "validate", "(", "value", ",", "'.%s'", "%", "key", ",", "object_title", ")", "# construct default response", "results_list", "=", "[", "]", "# handle filter function filter", "if", "filter_function", ":", "# validate filter function", "try", ":", "path_segments", "=", "[", "'lab'", ",", "'unittests'", ",", "'1473719695.2165067'", ",", "'.json'", "]", "filter_function", "(", "*", "path_segments", ")", "except", ":", "err_msg", "=", "'%s(filter_function=%s)'", "%", "(", "title", ",", "filter_function", ".", "__class__", ".", "__name__", ")", "raise", "TypeError", "(", "'%s must accept positional arguments.'", "%", "err_msg", ")", "# construct keyword arguments", "list_kwargs", "=", "{", "'bucket_name'", ":", "self", ".", "bucket_name", ",", "'prefix'", ":", "prefix", ",", "'delimiter'", ":", "delimiter", "}", "# determine starting key", "starting_key", "=", "'1'", "if", "previous_key", ":", "previous_kwargs", "=", "{", "}", "previous_kwargs", ".", "update", "(", "*", "*", "list_kwargs", ")", "previous_kwargs", "[", "'max_results'", "]", "=", "1", "previous_kwargs", "[", "'starting_key'", "]", "=", "previous_key", "search_list", ",", "next_key", "=", "self", ".", "s3", ".", "list_records", "(", "*", "*", "list_kwargs", ")", "list_kwargs", "[", "'starting_key'", "]", "=", "next_key", "# iterate filter over collection", "import", "os", "while", "starting_key", ":", "search_list", ",", "starting_key", "=", "self", ".", "s3", ".", "list_records", "(", "*", "*", "list_kwargs", ")", "for", "record", "in", "search_list", ":", "record_key", "=", "record", "[", "'key'", "]", "path_segments", "=", "record_key", ".", "split", "(", "os", ".", "sep", ")", "if", "filter_function", "(", "*", "path_segments", ")", ":", "results_list", ".", "append", "(", "record_key", ")", "if", "len", "(", "results_list", ")", "==", "max_results", ":", "return", "results_list", "# handle other filters", "else", ":", "# construct keyword arguments", "list_kwargs", "=", "{", "'bucket_name'", ":", "self", ".", "bucket_name", ",", "'prefix'", ":", "prefix", ",", "'delimiter'", ":", "delimiter", ",", "'max_results'", ":", "max_results", "}", "# determine starting key", "if", "previous_key", ":", "previous_kwargs", "=", "{", "}", "previous_kwargs", ".", "update", "(", "*", "*", "list_kwargs", ")", "previous_kwargs", "[", "'max_results'", "]", "=", "1", "previous_kwargs", "[", "'starting_key'", "]", "=", "previous_key", "search_list", ",", "starting_key", "=", "self", ".", "s3", ".", "list_records", "(", "*", "*", "list_kwargs", ")", "list_kwargs", "[", "'starting_key'", "]", "=", "starting_key", "# retrieve results ", "search_list", ",", "starting_key", "=", "self", ".", "s3", ".", "list_records", "(", "*", "*", "list_kwargs", ")", "# construct result list", "for", "record", "in", "search_list", ":", "results_list", ".", "append", "(", "record", "[", "'key'", "]", ")", "return", "results_list" ]
41.848739
22.436975
def read_dependencies(filename): """Read in the dependencies from the virtualenv requirements file. """ dependencies = [] filepath = os.path.join('requirements', filename) with open(filepath, 'r') as stream: for line in stream: package = line.strip().split('#')[0].strip() if package and package.split(' ')[0] != '-r': dependencies.append(package) return dependencies
[ "def", "read_dependencies", "(", "filename", ")", ":", "dependencies", "=", "[", "]", "filepath", "=", "os", ".", "path", ".", "join", "(", "'requirements'", ",", "filename", ")", "with", "open", "(", "filepath", ",", "'r'", ")", "as", "stream", ":", "for", "line", "in", "stream", ":", "package", "=", "line", ".", "strip", "(", ")", ".", "split", "(", "'#'", ")", "[", "0", "]", ".", "strip", "(", ")", "if", "package", "and", "package", ".", "split", "(", "' '", ")", "[", "0", "]", "!=", "'-r'", ":", "dependencies", ".", "append", "(", "package", ")", "return", "dependencies" ]
35.75
12.333333
def inv_std_norm_cdf(x): """ Inverse cumulative standard Gaussian distribution Based on Winitzki, S. (2008) """ z = 2*x -1 ln1z2 = np.log(1-z**2) a = 8*(np.pi -3)/(3*np.pi*(4-np.pi)) b = 2/(np.pi * a) + ln1z2/2 inv_erf = np.sign(z) * np.sqrt( np.sqrt(b**2 - ln1z2/a) - b ) return np.sqrt(2) * inv_erf
[ "def", "inv_std_norm_cdf", "(", "x", ")", ":", "z", "=", "2", "*", "x", "-", "1", "ln1z2", "=", "np", ".", "log", "(", "1", "-", "z", "**", "2", ")", "a", "=", "8", "*", "(", "np", ".", "pi", "-", "3", ")", "/", "(", "3", "*", "np", ".", "pi", "*", "(", "4", "-", "np", ".", "pi", ")", ")", "b", "=", "2", "/", "(", "np", ".", "pi", "*", "a", ")", "+", "ln1z2", "/", "2", "inv_erf", "=", "np", ".", "sign", "(", "z", ")", "*", "np", ".", "sqrt", "(", "np", ".", "sqrt", "(", "b", "**", "2", "-", "ln1z2", "/", "a", ")", "-", "b", ")", "return", "np", ".", "sqrt", "(", "2", ")", "*", "inv_erf" ]
30
10.909091
def get_font_glyph_data(font): """Return information for each glyph in a font""" from fontbakery.constants import (PlatformID, WindowsEncodingID) font_data = [] try: subtable = font['cmap'].getcmap(PlatformID.WINDOWS, WindowsEncodingID.UNICODE_BMP) if not subtable: # Well... Give it a chance here... # It may be using a different Encoding_ID value subtable = font['cmap'].tables[0] cmap = subtable.cmap except: return None cmap_reversed = dict(zip(cmap.values(), cmap.keys())) for glyph_name in font.getGlyphSet().keys(): if glyph_name in cmap_reversed: uni_glyph = cmap_reversed[glyph_name] contours = glyph_contour_count(font, glyph_name) font_data.append({ 'unicode': uni_glyph, 'name': glyph_name, 'contours': {contours} }) return font_data
[ "def", "get_font_glyph_data", "(", "font", ")", ":", "from", "fontbakery", ".", "constants", "import", "(", "PlatformID", ",", "WindowsEncodingID", ")", "font_data", "=", "[", "]", "try", ":", "subtable", "=", "font", "[", "'cmap'", "]", ".", "getcmap", "(", "PlatformID", ".", "WINDOWS", ",", "WindowsEncodingID", ".", "UNICODE_BMP", ")", "if", "not", "subtable", ":", "# Well... Give it a chance here...", "# It may be using a different Encoding_ID value", "subtable", "=", "font", "[", "'cmap'", "]", ".", "tables", "[", "0", "]", "cmap", "=", "subtable", ".", "cmap", "except", ":", "return", "None", "cmap_reversed", "=", "dict", "(", "zip", "(", "cmap", ".", "values", "(", ")", ",", "cmap", ".", "keys", "(", ")", ")", ")", "for", "glyph_name", "in", "font", ".", "getGlyphSet", "(", ")", ".", "keys", "(", ")", ":", "if", "glyph_name", "in", "cmap_reversed", ":", "uni_glyph", "=", "cmap_reversed", "[", "glyph_name", "]", "contours", "=", "glyph_contour_count", "(", "font", ",", "glyph_name", ")", "font_data", ".", "append", "(", "{", "'unicode'", ":", "uni_glyph", ",", "'name'", ":", "glyph_name", ",", "'contours'", ":", "{", "contours", "}", "}", ")", "return", "font_data" ]
33.2
17.366667
async def install_mediaroom_protocol(responses_callback, box_ip=None): """Install an asyncio protocol to process NOTIFY messages.""" from . import version _LOGGER.debug(version) loop = asyncio.get_event_loop() mediaroom_protocol = MediaroomProtocol(responses_callback, box_ip) sock = create_socket() await loop.create_datagram_endpoint(lambda: mediaroom_protocol, sock=sock) return mediaroom_protocol
[ "async", "def", "install_mediaroom_protocol", "(", "responses_callback", ",", "box_ip", "=", "None", ")", ":", "from", ".", "import", "version", "_LOGGER", ".", "debug", "(", "version", ")", "loop", "=", "asyncio", ".", "get_event_loop", "(", ")", "mediaroom_protocol", "=", "MediaroomProtocol", "(", "responses_callback", ",", "box_ip", ")", "sock", "=", "create_socket", "(", ")", "await", "loop", ".", "create_datagram_endpoint", "(", "lambda", ":", "mediaroom_protocol", ",", "sock", "=", "sock", ")", "return", "mediaroom_protocol" ]
30.285714
25.5
def add_object(self, object): """Add object to db session. Only for session-centric object-database mappers.""" if object.id is None: object.get_id() self.db.engine.save(object)
[ "def", "add_object", "(", "self", ",", "object", ")", ":", "if", "object", ".", "id", "is", "None", ":", "object", ".", "get_id", "(", ")", "self", ".", "db", ".", "engine", ".", "save", "(", "object", ")" ]
41.8
8
def dist_abs(self, src, tar): """Return the bag distance between two strings. Parameters ---------- src : str Source string for comparison tar : str Target string for comparison Returns ------- int Bag distance Examples -------- >>> cmp = Bag() >>> cmp.dist_abs('cat', 'hat') 1 >>> cmp.dist_abs('Niall', 'Neil') 2 >>> cmp.dist_abs('aluminum', 'Catalan') 5 >>> cmp.dist_abs('ATCG', 'TAGC') 0 >>> cmp.dist_abs('abcdefg', 'hijklm') 7 >>> cmp.dist_abs('abcdefg', 'hijklmno') 8 """ if tar == src: return 0 elif not src: return len(tar) elif not tar: return len(src) src_bag = Counter(src) tar_bag = Counter(tar) return max( sum((src_bag - tar_bag).values()), sum((tar_bag - src_bag).values()), )
[ "def", "dist_abs", "(", "self", ",", "src", ",", "tar", ")", ":", "if", "tar", "==", "src", ":", "return", "0", "elif", "not", "src", ":", "return", "len", "(", "tar", ")", "elif", "not", "tar", ":", "return", "len", "(", "src", ")", "src_bag", "=", "Counter", "(", "src", ")", "tar_bag", "=", "Counter", "(", "tar", ")", "return", "max", "(", "sum", "(", "(", "src_bag", "-", "tar_bag", ")", ".", "values", "(", ")", ")", ",", "sum", "(", "(", "tar_bag", "-", "src_bag", ")", ".", "values", "(", ")", ")", ",", ")" ]
22.111111
19
def convert_type(d, intype, outtype, convert_list=True, in_place=True): """ convert all values of one type to another Parameters ---------- d : dict intype : type_class outtype : type_class convert_list : bool whether to convert instances inside lists and tuples in_place : bool if True, applies conversions to original dict, else returns copy Examples -------- >>> from pprint import pprint >>> d = {'a':'1','b':'2'} >>> pprint(convert_type(d,str,float)) {'a': 1.0, 'b': 2.0} >>> d = {'a':['1','2']} >>> pprint(convert_type(d,str,float)) {'a': [1.0, 2.0]} >>> d = {'a':[('1','2'),[3,4]]} >>> pprint(convert_type(d,str,float)) {'a': [(1.0, 2.0), [3, 4]]} """ if not in_place: out_dict = copy.deepcopy(d) else: out_dict = d def _convert(obj): if isinstance(obj, intype): try: obj = outtype(obj) except Exception: pass elif isinstance(obj, list) and convert_list: obj = _traverse_iter(obj) elif isinstance(obj, tuple) and convert_list: obj = tuple(_traverse_iter(obj)) return obj def _traverse_dict(dic): for key in dic.keys(): if is_dict_like(dic[key]): _traverse_dict(dic[key]) else: dic[key] = _convert(dic[key]) def _traverse_iter(iter): new_iter = [] for key in iter: if is_dict_like(key): _traverse_dict(key) new_iter.append(key) else: new_iter.append(_convert(key)) return new_iter if is_dict_like(out_dict): _traverse_dict(out_dict) else: _convert(out_dict) return out_dict
[ "def", "convert_type", "(", "d", ",", "intype", ",", "outtype", ",", "convert_list", "=", "True", ",", "in_place", "=", "True", ")", ":", "if", "not", "in_place", ":", "out_dict", "=", "copy", ".", "deepcopy", "(", "d", ")", "else", ":", "out_dict", "=", "d", "def", "_convert", "(", "obj", ")", ":", "if", "isinstance", "(", "obj", ",", "intype", ")", ":", "try", ":", "obj", "=", "outtype", "(", "obj", ")", "except", "Exception", ":", "pass", "elif", "isinstance", "(", "obj", ",", "list", ")", "and", "convert_list", ":", "obj", "=", "_traverse_iter", "(", "obj", ")", "elif", "isinstance", "(", "obj", ",", "tuple", ")", "and", "convert_list", ":", "obj", "=", "tuple", "(", "_traverse_iter", "(", "obj", ")", ")", "return", "obj", "def", "_traverse_dict", "(", "dic", ")", ":", "for", "key", "in", "dic", ".", "keys", "(", ")", ":", "if", "is_dict_like", "(", "dic", "[", "key", "]", ")", ":", "_traverse_dict", "(", "dic", "[", "key", "]", ")", "else", ":", "dic", "[", "key", "]", "=", "_convert", "(", "dic", "[", "key", "]", ")", "def", "_traverse_iter", "(", "iter", ")", ":", "new_iter", "=", "[", "]", "for", "key", "in", "iter", ":", "if", "is_dict_like", "(", "key", ")", ":", "_traverse_dict", "(", "key", ")", "new_iter", ".", "append", "(", "key", ")", "else", ":", "new_iter", ".", "append", "(", "_convert", "(", "key", ")", ")", "return", "new_iter", "if", "is_dict_like", "(", "out_dict", ")", ":", "_traverse_dict", "(", "out_dict", ")", "else", ":", "_convert", "(", "out_dict", ")", "return", "out_dict" ]
24.068493
19.054795
def plot(self, title='TimeMoc', view=(None, None)): """ Plot the TimeMoc in a time window. This method uses interactive matplotlib. The user can move its mouse through the plot to see the time (at the mouse position). Parameters ---------- title : str, optional The title of the plot. Set to 'TimeMoc' by default. view : (`~astropy.time.Time`, `~astropy.time.Time`), optional Define the view window in which the observations are plotted. Set to (None, None) by default (i.e. all the observation time window is rendered). """ from matplotlib.colors import LinearSegmentedColormap import matplotlib.pyplot as plt if self._interval_set.empty(): print('Nothing to print. This TimeMoc object is empty.') return plot_order = 15 if self.max_order > plot_order: plotted_moc = self.degrade_to_order(plot_order) else: plotted_moc = self min_jd = plotted_moc.min_time.jd if not view[0] else view[0].jd max_jd = plotted_moc.max_time.jd if not view[1] else view[1].jd if max_jd < min_jd: raise ValueError("Invalid selection: max_jd = {0} must be > to min_jd = {1}".format(max_jd, min_jd)) fig1 = plt.figure(figsize=(9.5, 5)) ax = fig1.add_subplot(111) ax.set_xlabel('iso') ax.get_yaxis().set_visible(False) size = 2000 delta = (max_jd - min_jd) / size min_jd_time = min_jd ax.set_xticks([0, size]) ax.set_xticklabels(Time([min_jd_time, max_jd], format='jd', scale='tdb').iso, rotation=70) y = np.zeros(size) for (s_time_us, e_time_us) in plotted_moc._interval_set._intervals: s_index = int((s_time_us / TimeMOC.DAY_MICRO_SEC - min_jd_time) / delta) e_index = int((e_time_us / TimeMOC.DAY_MICRO_SEC - min_jd_time) / delta) y[s_index:(e_index+1)] = 1.0 # hack in case of full time mocs. if np.all(y): y[0] = 0 z = np.tile(y, (int(size//10), 1)) plt.title(title) color_map = LinearSegmentedColormap.from_list('w2r', ['#fffff0', '#aa0000']) color_map.set_under('w') color_map.set_bad('gray') plt.imshow(z, interpolation='bilinear', cmap=color_map) def on_mouse_motion(event): for txt in ax.texts: txt.set_visible(False) text = ax.text(0, 0, "", va="bottom", ha="left") time = Time(event.xdata * delta + min_jd_time, format='jd', scale='tdb') tx = '{0}'.format(time.iso) text.set_position((event.xdata - 50, 700)) text.set_rotation(70) text.set_text(tx) cid = fig1.canvas.mpl_connect('motion_notify_event', on_mouse_motion) plt.show()
[ "def", "plot", "(", "self", ",", "title", "=", "'TimeMoc'", ",", "view", "=", "(", "None", ",", "None", ")", ")", ":", "from", "matplotlib", ".", "colors", "import", "LinearSegmentedColormap", "import", "matplotlib", ".", "pyplot", "as", "plt", "if", "self", ".", "_interval_set", ".", "empty", "(", ")", ":", "print", "(", "'Nothing to print. This TimeMoc object is empty.'", ")", "return", "plot_order", "=", "15", "if", "self", ".", "max_order", ">", "plot_order", ":", "plotted_moc", "=", "self", ".", "degrade_to_order", "(", "plot_order", ")", "else", ":", "plotted_moc", "=", "self", "min_jd", "=", "plotted_moc", ".", "min_time", ".", "jd", "if", "not", "view", "[", "0", "]", "else", "view", "[", "0", "]", ".", "jd", "max_jd", "=", "plotted_moc", ".", "max_time", ".", "jd", "if", "not", "view", "[", "1", "]", "else", "view", "[", "1", "]", ".", "jd", "if", "max_jd", "<", "min_jd", ":", "raise", "ValueError", "(", "\"Invalid selection: max_jd = {0} must be > to min_jd = {1}\"", ".", "format", "(", "max_jd", ",", "min_jd", ")", ")", "fig1", "=", "plt", ".", "figure", "(", "figsize", "=", "(", "9.5", ",", "5", ")", ")", "ax", "=", "fig1", ".", "add_subplot", "(", "111", ")", "ax", ".", "set_xlabel", "(", "'iso'", ")", "ax", ".", "get_yaxis", "(", ")", ".", "set_visible", "(", "False", ")", "size", "=", "2000", "delta", "=", "(", "max_jd", "-", "min_jd", ")", "/", "size", "min_jd_time", "=", "min_jd", "ax", ".", "set_xticks", "(", "[", "0", ",", "size", "]", ")", "ax", ".", "set_xticklabels", "(", "Time", "(", "[", "min_jd_time", ",", "max_jd", "]", ",", "format", "=", "'jd'", ",", "scale", "=", "'tdb'", ")", ".", "iso", ",", "rotation", "=", "70", ")", "y", "=", "np", ".", "zeros", "(", "size", ")", "for", "(", "s_time_us", ",", "e_time_us", ")", "in", "plotted_moc", ".", "_interval_set", ".", "_intervals", ":", "s_index", "=", "int", "(", "(", "s_time_us", "/", "TimeMOC", ".", "DAY_MICRO_SEC", "-", "min_jd_time", ")", "/", "delta", ")", "e_index", "=", "int", "(", "(", "e_time_us", "/", "TimeMOC", ".", "DAY_MICRO_SEC", "-", "min_jd_time", ")", "/", "delta", ")", "y", "[", "s_index", ":", "(", "e_index", "+", "1", ")", "]", "=", "1.0", "# hack in case of full time mocs.", "if", "np", ".", "all", "(", "y", ")", ":", "y", "[", "0", "]", "=", "0", "z", "=", "np", ".", "tile", "(", "y", ",", "(", "int", "(", "size", "//", "10", ")", ",", "1", ")", ")", "plt", ".", "title", "(", "title", ")", "color_map", "=", "LinearSegmentedColormap", ".", "from_list", "(", "'w2r'", ",", "[", "'#fffff0'", ",", "'#aa0000'", "]", ")", "color_map", ".", "set_under", "(", "'w'", ")", "color_map", ".", "set_bad", "(", "'gray'", ")", "plt", ".", "imshow", "(", "z", ",", "interpolation", "=", "'bilinear'", ",", "cmap", "=", "color_map", ")", "def", "on_mouse_motion", "(", "event", ")", ":", "for", "txt", "in", "ax", ".", "texts", ":", "txt", ".", "set_visible", "(", "False", ")", "text", "=", "ax", ".", "text", "(", "0", ",", "0", ",", "\"\"", ",", "va", "=", "\"bottom\"", ",", "ha", "=", "\"left\"", ")", "time", "=", "Time", "(", "event", ".", "xdata", "*", "delta", "+", "min_jd_time", ",", "format", "=", "'jd'", ",", "scale", "=", "'tdb'", ")", "tx", "=", "'{0}'", ".", "format", "(", "time", ".", "iso", ")", "text", ".", "set_position", "(", "(", "event", ".", "xdata", "-", "50", ",", "700", ")", ")", "text", ".", "set_rotation", "(", "70", ")", "text", ".", "set_text", "(", "tx", ")", "cid", "=", "fig1", ".", "canvas", ".", "mpl_connect", "(", "'motion_notify_event'", ",", "on_mouse_motion", ")", "plt", ".", "show", "(", ")" ]
33.571429
24.5
def from_custom_template(cls, searchpath, name): """ Factory function for creating a subclass of ``Styler`` with a custom template and Jinja environment. Parameters ---------- searchpath : str or list Path or paths of directories containing the templates name : str Name of your custom template to use for rendering Returns ------- MyStyler : subclass of Styler Has the correct ``env`` and ``template`` class attributes set. """ loader = ChoiceLoader([ FileSystemLoader(searchpath), cls.loader, ]) class MyStyler(cls): env = Environment(loader=loader) template = env.get_template(name) return MyStyler
[ "def", "from_custom_template", "(", "cls", ",", "searchpath", ",", "name", ")", ":", "loader", "=", "ChoiceLoader", "(", "[", "FileSystemLoader", "(", "searchpath", ")", ",", "cls", ".", "loader", ",", "]", ")", "class", "MyStyler", "(", "cls", ")", ":", "env", "=", "Environment", "(", "loader", "=", "loader", ")", "template", "=", "env", ".", "get_template", "(", "name", ")", "return", "MyStyler" ]
29
18.703704
def _extents(self): """A (left, top, width, height) tuple describing range extents. Note this is normalized to accommodate the various orderings of the corner cells provided on construction, which may be in any of four configurations such as (top-left, bottom-right), (bottom-left, top-right), etc. """ def start_and_size(idx, other_idx): """Return beginning and length of range based on two indexes.""" return min(idx, other_idx), abs(idx - other_idx) + 1 tc, other_tc = self._tc, self._other_tc left, width = start_and_size(tc.col_idx, other_tc.col_idx) top, height = start_and_size(tc.row_idx, other_tc.row_idx) return left, top, width, height
[ "def", "_extents", "(", "self", ")", ":", "def", "start_and_size", "(", "idx", ",", "other_idx", ")", ":", "\"\"\"Return beginning and length of range based on two indexes.\"\"\"", "return", "min", "(", "idx", ",", "other_idx", ")", ",", "abs", "(", "idx", "-", "other_idx", ")", "+", "1", "tc", ",", "other_tc", "=", "self", ".", "_tc", ",", "self", ".", "_other_tc", "left", ",", "width", "=", "start_and_size", "(", "tc", ".", "col_idx", ",", "other_tc", ".", "col_idx", ")", "top", ",", "height", "=", "start_and_size", "(", "tc", ".", "row_idx", ",", "other_tc", ".", "row_idx", ")", "return", "left", ",", "top", ",", "width", ",", "height" ]
41.388889
19.722222
def get_root_path(obj): """ Get file path for object and returns its dirname """ try: filename = os.path.abspath(obj.__globals__['__file__']) except (KeyError, AttributeError): if getattr(obj, '__wrapped__', None): # decorator package has been used in view return get_root_path(obj.__wrapped__) filename = inspect.getfile(obj) return os.path.dirname(filename)
[ "def", "get_root_path", "(", "obj", ")", ":", "try", ":", "filename", "=", "os", ".", "path", ".", "abspath", "(", "obj", ".", "__globals__", "[", "'__file__'", "]", ")", "except", "(", "KeyError", ",", "AttributeError", ")", ":", "if", "getattr", "(", "obj", ",", "'__wrapped__'", ",", "None", ")", ":", "# decorator package has been used in view", "return", "get_root_path", "(", "obj", ".", "__wrapped__", ")", "filename", "=", "inspect", ".", "getfile", "(", "obj", ")", "return", "os", ".", "path", ".", "dirname", "(", "filename", ")" ]
35
9.833333
def rename(self, node): """ Translate a rename node into SQLQuery. :param node: a treebrd node :return: a SQLQuery object for the tree rooted at node """ child_object = self.translate(node.child) from_block = '({child}) AS {name}({attributes})'.format( child=child_object.to_sql(), name=node.name, attributes=', '.join(node.attributes.names)) return self.query(str(node.attributes), from_block=from_block)
[ "def", "rename", "(", "self", ",", "node", ")", ":", "child_object", "=", "self", ".", "translate", "(", "node", ".", "child", ")", "from_block", "=", "'({child}) AS {name}({attributes})'", ".", "format", "(", "child", "=", "child_object", ".", "to_sql", "(", ")", ",", "name", "=", "node", ".", "name", ",", "attributes", "=", "', '", ".", "join", "(", "node", ".", "attributes", ".", "names", ")", ")", "return", "self", ".", "query", "(", "str", "(", "node", ".", "attributes", ")", ",", "from_block", "=", "from_block", ")" ]
43.909091
13.181818
def get_collection(self, url): """ Pages through an object collection from the bitbucket API. Returns an iterator that lazily goes through all the 'values' of all the pages in the collection. """ url = self.BASE_API2 + url while url is not None: response = self.get_data(url) for value in response['values']: yield value url = response.get('next', None)
[ "def", "get_collection", "(", "self", ",", "url", ")", ":", "url", "=", "self", ".", "BASE_API2", "+", "url", "while", "url", "is", "not", "None", ":", "response", "=", "self", ".", "get_data", "(", "url", ")", "for", "value", "in", "response", "[", "'values'", "]", ":", "yield", "value", "url", "=", "response", ".", "get", "(", "'next'", ",", "None", ")" ]
43.6
7.7
def _TypecheckDecorator(subject=None, **kwargs): """Dispatches type checks based on what the subject is. Functions or methods are annotated directly. If this method is called with keyword arguments only, return a decorator. """ if subject is None: return _TypecheckDecoratorFactory(kwargs) elif inspect.isfunction(subject) or inspect.ismethod(subject): return _TypecheckFunction(subject, {}, 2, None) else: raise TypeError()
[ "def", "_TypecheckDecorator", "(", "subject", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "subject", "is", "None", ":", "return", "_TypecheckDecoratorFactory", "(", "kwargs", ")", "elif", "inspect", ".", "isfunction", "(", "subject", ")", "or", "inspect", ".", "ismethod", "(", "subject", ")", ":", "return", "_TypecheckFunction", "(", "subject", ",", "{", "}", ",", "2", ",", "None", ")", "else", ":", "raise", "TypeError", "(", ")" ]
36.666667
16.666667
def predict(self, dataset, output_type='cluster_id', verbose=True): """ Return predicted cluster label for instances in the new 'dataset'. K-means predictions are made by assigning each new instance to the closest cluster center. Parameters ---------- dataset : SFrame Dataset of new observations. Must include the features used for model training; additional columns are ignored. output_type : {'cluster_id', 'distance'}, optional Form of the prediction. 'cluster_id' (the default) returns the cluster label assigned to each input instance, while 'distance' returns the Euclidean distance between the instance and its assigned cluster's center. verbose : bool, optional If True, print progress updates to the screen. Returns ------- out : SArray Model predictions. Depending on the specified `output_type`, either the assigned cluster label or the distance of each point to its closest cluster center. The order of the predictions is the same as order of the input data rows. See Also -------- create Examples -------- >>> sf = turicreate.SFrame({ ... 'x1': [0.6777, -9.391, 7.0385, 2.2657, 7.7864, -10.16, -8.162, ... 8.8817, -9.525, -9.153, 2.0860, 7.6619, 6.5511, 2.7020], ... 'x2': [5.6110, 8.5139, 5.3913, 5.4743, 8.3606, 7.8843, 2.7305, ... 5.1679, 6.7231, 3.7051, 1.7682, 7.4608, 3.1270, 6.5624]}) ... >>> model = turicreate.kmeans.create(sf, num_clusters=3) ... >>> sf_new = turicreate.SFrame({'x1': [-5.6584, -1.0167, -9.6181], ... 'x2': [-6.3803, -3.7937, -1.1022]}) >>> clusters = model.predict(sf_new, output_type='cluster_id') >>> print clusters [1, 0, 1] """ ## Validate the input dataset. _tkutl._raise_error_if_not_sframe(dataset, "dataset") _tkutl._raise_error_if_sframe_empty(dataset, "dataset") ## Validate the output type. if not isinstance(output_type, str): raise TypeError("The 'output_type' parameter must be a string.") if not output_type in ('cluster_id', 'distance'): raise ValueError("The 'output_type' parameter must be either " + "'cluster_label' or 'distance'.") ## Get model features. ref_features = self.features sf_features = _tkutl._toolkits_select_columns(dataset, ref_features) ## Compute predictions. opts = {'model': self.__proxy__, 'model_name': self.__name__, 'dataset': sf_features} with _QuietProgress(verbose): result = _tc.extensions._kmeans.predict(opts) sf_result = result['predictions'] if output_type == 'distance': return sf_result['distance'] else: return sf_result['cluster_id']
[ "def", "predict", "(", "self", ",", "dataset", ",", "output_type", "=", "'cluster_id'", ",", "verbose", "=", "True", ")", ":", "## Validate the input dataset.", "_tkutl", ".", "_raise_error_if_not_sframe", "(", "dataset", ",", "\"dataset\"", ")", "_tkutl", ".", "_raise_error_if_sframe_empty", "(", "dataset", ",", "\"dataset\"", ")", "## Validate the output type.", "if", "not", "isinstance", "(", "output_type", ",", "str", ")", ":", "raise", "TypeError", "(", "\"The 'output_type' parameter must be a string.\"", ")", "if", "not", "output_type", "in", "(", "'cluster_id'", ",", "'distance'", ")", ":", "raise", "ValueError", "(", "\"The 'output_type' parameter must be either \"", "+", "\"'cluster_label' or 'distance'.\"", ")", "## Get model features.", "ref_features", "=", "self", ".", "features", "sf_features", "=", "_tkutl", ".", "_toolkits_select_columns", "(", "dataset", ",", "ref_features", ")", "## Compute predictions.", "opts", "=", "{", "'model'", ":", "self", ".", "__proxy__", ",", "'model_name'", ":", "self", ".", "__name__", ",", "'dataset'", ":", "sf_features", "}", "with", "_QuietProgress", "(", "verbose", ")", ":", "result", "=", "_tc", ".", "extensions", ".", "_kmeans", ".", "predict", "(", "opts", ")", "sf_result", "=", "result", "[", "'predictions'", "]", "if", "output_type", "==", "'distance'", ":", "return", "sf_result", "[", "'distance'", "]", "else", ":", "return", "sf_result", "[", "'cluster_id'", "]" ]
38.0375
23.5875
def clear(self): """Clear current state.""" # Adapted from http://stackoverflow.com/a/13103617/1198772 for i in reversed(list(range(self.extra_keywords_layout.count()))): self.extra_keywords_layout.itemAt(i).widget().setParent(None) self.widgets_dict = OrderedDict()
[ "def", "clear", "(", "self", ")", ":", "# Adapted from http://stackoverflow.com/a/13103617/1198772", "for", "i", "in", "reversed", "(", "list", "(", "range", "(", "self", ".", "extra_keywords_layout", ".", "count", "(", ")", ")", ")", ")", ":", "self", ".", "extra_keywords_layout", ".", "itemAt", "(", "i", ")", ".", "widget", "(", ")", ".", "setParent", "(", "None", ")", "self", ".", "widgets_dict", "=", "OrderedDict", "(", ")" ]
50.833333
19.833333
def _override_runner(runner): """ Context manager that monkey patches `bonobo.run` function with our current command logic. :param runner: the callable that will handle the `run()` logic. """ import bonobo _get_argument_parser = bonobo.util.environ.get_argument_parser _run = bonobo.run try: # Original get_argument_parser would create or update an argument parser with environment options, but here we # already had them parsed so let's patch with something that creates an empty one instead. def get_argument_parser(parser=None): return parser or argparse.ArgumentParser() bonobo.util.environ.get_argument_parser = get_argument_parser bonobo.run = runner yield runner finally: # Restore our saved values. bonobo.util.environ.get_argument_parser = _get_argument_parser bonobo.run = _run
[ "def", "_override_runner", "(", "runner", ")", ":", "import", "bonobo", "_get_argument_parser", "=", "bonobo", ".", "util", ".", "environ", ".", "get_argument_parser", "_run", "=", "bonobo", ".", "run", "try", ":", "# Original get_argument_parser would create or update an argument parser with environment options, but here we", "# already had them parsed so let's patch with something that creates an empty one instead.", "def", "get_argument_parser", "(", "parser", "=", "None", ")", ":", "return", "parser", "or", "argparse", ".", "ArgumentParser", "(", ")", "bonobo", ".", "util", ".", "environ", ".", "get_argument_parser", "=", "get_argument_parser", "bonobo", ".", "run", "=", "runner", "yield", "runner", "finally", ":", "# Restore our saved values.", "bonobo", ".", "util", ".", "environ", ".", "get_argument_parser", "=", "_get_argument_parser", "bonobo", ".", "run", "=", "_run" ]
37
26.916667
def post(self, request, bot_id, format=None): """ Add a new state --- serializer: StateSerializer responseMessages: - code: 401 message: Not authenticated - code: 400 message: Not valid request """ return super(StateList, self).post(request, bot_id, format)
[ "def", "post", "(", "self", ",", "request", ",", "bot_id", ",", "format", "=", "None", ")", ":", "return", "super", "(", "StateList", ",", "self", ")", ".", "post", "(", "request", ",", "bot_id", ",", "format", ")" ]
29.5
11
def redispatch(obj, device_type, session_prep=True): """Dynamically change Netmiko object's class to proper class. Generally used with terminal_server device_type when you need to redispatch after interacting with terminal server. """ new_class = ssh_dispatcher(device_type) obj.device_type = device_type obj.__class__ = new_class if session_prep: obj._try_session_preparation()
[ "def", "redispatch", "(", "obj", ",", "device_type", ",", "session_prep", "=", "True", ")", ":", "new_class", "=", "ssh_dispatcher", "(", "device_type", ")", "obj", ".", "device_type", "=", "device_type", "obj", ".", "__class__", "=", "new_class", "if", "session_prep", ":", "obj", ".", "_try_session_preparation", "(", ")" ]
37.181818
15.181818
def INIT(self): """INIT state. [:rfc:`2131#section-4.4.1`]:: The client SHOULD wait a random time between one and ten seconds to desynchronize the use of DHCP at startup .. todo:: - The initial delay is implemented, but probably is not in other implementations. Check what other implementations do. """ # NOTE: in case INIT is reached from other state, initialize attributes # reset all variables. logger.debug('In state: INIT') if self.current_state is not STATE_PREINIT: self.reset() self.current_state = STATE_INIT # NOTE: see previous TODO, maybe this is not needed. if self.delay_selecting: if self.delay_before_selecting is None: delay_before_selecting = gen_delay_selecting() else: delay_before_selecting = self.delay_before_selecting else: delay_before_selecting = 0 self.set_timeout(self.current_state, self.timeout_delay_before_selecting, delay_before_selecting) if self.timeout_select is not None: self.set_timeout(STATE_SELECTING, self.timeout_selecting, self.timeout_select)
[ "def", "INIT", "(", "self", ")", ":", "# NOTE: in case INIT is reached from other state, initialize attributes", "# reset all variables.", "logger", ".", "debug", "(", "'In state: INIT'", ")", "if", "self", ".", "current_state", "is", "not", "STATE_PREINIT", ":", "self", ".", "reset", "(", ")", "self", ".", "current_state", "=", "STATE_INIT", "# NOTE: see previous TODO, maybe this is not needed.", "if", "self", ".", "delay_selecting", ":", "if", "self", ".", "delay_before_selecting", "is", "None", ":", "delay_before_selecting", "=", "gen_delay_selecting", "(", ")", "else", ":", "delay_before_selecting", "=", "self", ".", "delay_before_selecting", "else", ":", "delay_before_selecting", "=", "0", "self", ".", "set_timeout", "(", "self", ".", "current_state", ",", "self", ".", "timeout_delay_before_selecting", ",", "delay_before_selecting", ")", "if", "self", ".", "timeout_select", "is", "not", "None", ":", "self", ".", "set_timeout", "(", "STATE_SELECTING", ",", "self", ".", "timeout_selecting", ",", "self", ".", "timeout_select", ")" ]
39.939394
17.121212
def update(self, data): """Updates the object information based on live data, if there were any changes made. Any changes will be automatically applied to the object, but will not be automatically persisted. You must manually call `db.session.add(ami)` on the object. Args: data (bunch): Data fetched from AWS API Returns: True if there were any changes to the object, else false """ updated = self.set_property('description', data.description) updated |= self.set_property('state', data.state) tags = {x['Key']: x['Value'] for x in data.tags or {}} existing_tags = {x.key: x for x in self.tags} # Check for new tags for key, value in list(tags.items()): updated |= self.set_tag(key, value) # Check for updated or removed tags for key in list(existing_tags.keys()): if key not in tags: updated |= self.delete_tag(key) return updated
[ "def", "update", "(", "self", ",", "data", ")", ":", "updated", "=", "self", ".", "set_property", "(", "'description'", ",", "data", ".", "description", ")", "updated", "|=", "self", ".", "set_property", "(", "'state'", ",", "data", ".", "state", ")", "tags", "=", "{", "x", "[", "'Key'", "]", ":", "x", "[", "'Value'", "]", "for", "x", "in", "data", ".", "tags", "or", "{", "}", "}", "existing_tags", "=", "{", "x", ".", "key", ":", "x", "for", "x", "in", "self", ".", "tags", "}", "# Check for new tags", "for", "key", ",", "value", "in", "list", "(", "tags", ".", "items", "(", ")", ")", ":", "updated", "|=", "self", ".", "set_tag", "(", "key", ",", "value", ")", "# Check for updated or removed tags", "for", "key", "in", "list", "(", "existing_tags", ".", "keys", "(", ")", ")", ":", "if", "key", "not", "in", "tags", ":", "updated", "|=", "self", ".", "delete_tag", "(", "key", ")", "return", "updated" ]
36.888889
20.962963
def get_gene_leaves(graph) -> Iterable[BaseEntity]: """Iterate over all genes who have only one connection, that's a transcription to its RNA. :param pybel.BELGraph graph: A BEL graph """ for node in get_nodes_by_function(graph, GENE): if graph.in_degree(node) != 0: continue if graph.out_degree(node) != 1: continue _, _, d = list(graph.out_edges(node, data=True))[0] if d[RELATION] == TRANSCRIBED_TO: yield node
[ "def", "get_gene_leaves", "(", "graph", ")", "->", "Iterable", "[", "BaseEntity", "]", ":", "for", "node", "in", "get_nodes_by_function", "(", "graph", ",", "GENE", ")", ":", "if", "graph", ".", "in_degree", "(", "node", ")", "!=", "0", ":", "continue", "if", "graph", ".", "out_degree", "(", "node", ")", "!=", "1", ":", "continue", "_", ",", "_", ",", "d", "=", "list", "(", "graph", ".", "out_edges", "(", "node", ",", "data", "=", "True", ")", ")", "[", "0", "]", "if", "d", "[", "RELATION", "]", "==", "TRANSCRIBED_TO", ":", "yield", "node" ]
30.375
16.6875