content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
import pickle def evaluate_single_model( model_path, model_index, save_preds_to_db, save_prefix, metrics, k_values, X, y, labeled_indices): """ Evaluate a single model with provided model specifications and data. Arguments: - model_path: path to load the model - model_index: index for the model - save_preds_to_db: whether or not to save predictions to database - save_prefix: string prefix for any tables created - metrics: a list of metrics to use - k_values: k-values used for computing the metrics - X: feature array - y: label array - labeled_indices: indices of rows that have labels Returns: - model_index: index for the model - model_results: an (M x K) array of model results, for each metric, at each k-value """ # Load saved model with open(model_path, 'rb') as file: model = pickle.load(file) # Get predictions pred_table_name = f'{save_prefix}_model_{model_index}' if save_preds_to_db else None y_preds, probs = get_predictions(model, X, k_values=k_values, pred_table_name=pred_table_name) # Filter labels y_preds_filtered = y_preds[labeled_indices] y_filtered = y.to_numpy(copy=True)[labeled_indices] # Calculate metrics for each k value model_results = np.zeros((len(metrics), len(k_values))) for i, metric in enumerate(metrics): for j in range(len(k_values)): model_results[i, j] = metric(y_filtered, y_preds_filtered[:, j]) return model_index, model_results
311589284c46d19e04cd04fd36056e1b53c4bb52
3,655,600
import argparse def parse_args ( ) -> argparse.Namespace: """ Parser for cli arguments. Returns: A Namespace containing all parsed data """ # The parser itself parser = argparse.ArgumentParser(add_help=False) parser.description = "Evaluates single choice sheets" # Groups for ordering arguments in help command grp_req_excl = parser.add_argument_group("required arguments, mutually exclusive") grp_req = parser.add_argument_group("required arguments") grp_opt = parser.add_argument_group("optional arguments") ######################### ##### Required Args ##### ######################### # Input path - either an url or a path to a local file io_grp = grp_req_excl.add_mutually_exclusive_group(required=True) io_grp.add_argument("-u", "--url", dest="url", help="URL to the image or pdf to be evaluated.") io_grp.add_argument("-f", "--file", dest="file", help="path to the image or pdf to be evaluated.") # required arg for number of answers each question grp_req.add_argument("-n", "--num", dest="num", required=True, type=_arg_int_pos, help="number of answers per question") ######################### ##### Optional Args ##### ######################### # help message. Added manually so it is shown under optional grp_opt.add_argument("-h", "--help", action="help", help="show this help message and exit") # path to store the result picture to grp_opt.add_argument("-i", "--iout", dest="iout", help="path for the output picture to be stored.") # path to store the result list to grp_opt.add_argument("-d", "--dout", dest="dout", help="path for the output data to be stored.") # path to compare results generated by the program with data stored in a file grp_opt.add_argument("-c", "--compare", dest="comp", help="compares the calculated result to a given result") # plotting all steps grp_opt.add_argument("-p", "--plot", dest="plot", action="store_true", help="plots every single step") return parser.parse_args()
0e65705a421734c7c1deda55cc70a9ff9c7bbde3
3,655,601
def sigmoid(x: np.ndarray, derivative: bool = False) -> np.ndarray: """ The sigmoid function which is given by 1/(1+exp(-x)) Where x is a number or np vector. if derivative is True it applied the derivative of the sigmoid function instead. Examples: >>> sigmoid(0) 0.5 >>> abs(sigmoid(np.array([100, 30, 10])) - 1) < 0.001 array([ True, True, True]) >>> abs(sigmoid(-100) - 0) < 0.001 True """ if derivative: return sigmoid(x) * (1 - sigmoid(x)) return 1 / (1 + np.exp(-x))
7a80b978a9dd8503ba6ec56ce11a5ee9c0564fdb
3,655,602
def create_hierarchy( num_samples, bundle_size, directory_sizes=None, root=".", start_sample_id=0, start_bundle_id=0, address="", n_digits=1, ): """ SampleIndex Hierarchy Factory method. Wraps create_hierarchy_from_max_sample, which is a max_sample-based API, not a numSample-based API like this method. :param num_samples: The total number of samples. :bundle_size: The max number of samples a bundle file is responsible for. :directory_sizes: The number of samples each directory is responsible for - a list, one value for each level in the directory hierarchy. :root: The root path of this index. Defaults to ".". :start_sample_id: The start of the sample count. Defaults to 0. :n_digits: The number of digits to pad the directories with """ if directory_sizes is None: directory_sizes = [] return create_hierarchy_from_max_sample( num_samples + start_sample_id, bundle_size, directory_sizes=directory_sizes, root=root, start_bundle_id=start_bundle_id, min_sample=start_sample_id, address=address, n_digits=n_digits, )
6d41b995d664eec2c9d6454abfc485c2c4202220
3,655,603
def eval_sysu(distmat, q_pids, g_pids, q_camids, g_camids, max_rank = 20): """Evaluation with sysu metric Key: for each query identity, its gallery images from the same camera view are discarded. "Following the original setting in ite dataset" """ num_q, num_g = distmat.shape if num_g < max_rank: max_rank = num_g print("Note: number of gallery samples is quite small, got {}".format(num_g)) indices = np.argsort(distmat, axis=1) pred_label = g_pids[indices] matches = (g_pids[indices] == q_pids[:, np.newaxis]).astype(np.int32) # compute cmc curve for each query new_all_cmc = [] all_cmc = [] all_AP = [] all_INP = [] num_valid_q = 0. # number of valid query for q_idx in range(num_q): # get query pid and camid q_pid = q_pids[q_idx] q_camid = q_camids[q_idx] # remove gallery samples that have the same pid and camid with query order = indices[q_idx] remove = (q_camid == 3) & (g_camids[order] == 2) keep = np.invert(remove) # compute cmc curve # the cmc calculation is different from standard protocol # we follow the protocol of the author's released code new_cmc = pred_label[q_idx][keep] new_index = np.unique(new_cmc, return_index=True)[1] new_cmc = [new_cmc[index] for index in sorted(new_index)] new_match = (new_cmc == q_pid).astype(np.int32) new_cmc = new_match.cumsum() new_all_cmc.append(new_cmc[:max_rank]) orig_cmc = matches[q_idx][keep] # binary vector, positions with value 1 are correct matches if not np.any(orig_cmc): # this condition is true when query identity does not appear in gallery continue cmc = orig_cmc.cumsum() # compute mINP # refernece Deep Learning for Person Re-identification: A Survey and Outlook pos_idx = np.where(orig_cmc == 1) pos_max_idx = np.max(pos_idx) inp = cmc[pos_max_idx]/ (pos_max_idx + 1.0) all_INP.append(inp) cmc[cmc > 1] = 1 all_cmc.append(cmc[:max_rank]) num_valid_q += 1. # compute average precision # reference: https://en.wikipedia.org/wiki/Evaluation_measures_(information_retrieval)#Average_precision num_rel = orig_cmc.sum() tmp_cmc = orig_cmc.cumsum() tmp_cmc = [x / (i+1.) for i, x in enumerate(tmp_cmc)] tmp_cmc = np.asarray(tmp_cmc) * orig_cmc AP = tmp_cmc.sum() / num_rel all_AP.append(AP) assert num_valid_q > 0, "Error: all query identities do not appear in gallery" all_cmc = np.asarray(all_cmc).astype(np.float32) all_cmc = all_cmc.sum(0) / num_valid_q # standard CMC new_all_cmc = np.asarray(new_all_cmc).astype(np.float32) new_all_cmc = new_all_cmc.sum(0) / num_valid_q mAP = np.mean(all_AP) mINP = np.mean(all_INP) return new_all_cmc, mAP, mINP
ccf61aa9f91e95cebfd63855aea366cb50de8887
3,655,604
import getpass def espa_login() -> str: """ Get ESPA password using command-line input :return: """ return getpass.getpass("Enter ESPA password: ")
3ba61567d23ba3771effd6f0aa1a4ac504467378
3,655,605
def row_up1_array(row, col): """This function establishes an array that contains the index for the row above each entry""" up1_array = np.zeros((row, col), dtype=np.uint8) for i in range(row): up1_array[i, :] = np.ones(col, dtype = np.uint8) * ((i - 1) % row) return up1_array
19cf1e3ceb9fe174c5cc3c6ba2c336fc58412037
3,655,606
def lcm(a, b): """Return lowest common multiple.""" return a * b // gcd(a, b)
27a7d5af9001015a0aff459af274a45921d2bc94
3,655,607
from typing import Callable def chl_mean_hsl(weights: np.ndarray) -> Callable[[np.ndarray], np.ndarray]: """ return a function that can calculate the channel-wise average of the input picture in HSL color space """ return lambda img: np.average(cv2.cvtColor(img, cv2.COLOR_BGR2HLS), axis=(0, 1), weights=weights)
b5e337fb3bee18762e31aef3d666906975305b4b
3,655,608
def cosine_mrl_option(labels, predicts): """For a minibatch of image and sentences embeddings, computes the pairwise contrastive loss""" #batch_size, double_n_emd = tensor.shape(predicts) #res = tensor.split(predicts, [double_n_emd/2, double_n_emd/2], 2, axis=-1) img = l2norm(labels) text = l2norm(predicts) scores = tensor.dot(img, text.T) diagonal = scores.diagonal() mrl_margin = 0.3 loss_max_violation = True # caption retrieval (1 + neg - pos) cost_s = tensor.maximum(0, mrl_margin + scores - diagonal.reshape((-1,1))) # clear diagonals cost_s = fill_diagonal(cost_s, 0) # img retrieval cost_im = tensor.maximum(0, mrl_margin + scores - diagonal) cost_im = fill_diagonal(cost_im, 0) if loss_max_violation: if cost_s: cost_s = tensor.max(cost_s, axis=1) if cost_im: cost_im = tensor.max(cost_im, axis=0) loss = cost_s.mean() + cost_im.mean() return loss
e103b1b0075438270e79913bb59b1117da09b51f
3,655,609
def escape_cdata(cdata): """Escape a string for an XML CDATA section""" return cdata.replace(']]>', ']]>]]&gt;<![CDATA[')
c38b934b4c357e8c15fd1f3942f84ca3aaab4ee1
3,655,610
import inspect import pprint def _collect_data_for_docstring(func, annotation): """ Collect data to be printed in docstring. The data is collected from custom annotation (dictionary passed as a parameter for the decorator) and standard Python annotations for the parameters (if any). Data from custom annotation always overrides Python parameter annotations. Parameters ---------- func: callable Reference to the function. annotation: dict Custom annotation. Returns ------- Dictionary of the collected parameters """ signature = inspect.signature(func) parameters = signature.parameters return_annotation = signature.return_annotation doc_params = dict() # Description of the function doc_params["description"] = annotation.get("description", "") # Flag that tells if the function is a generator. Title for returning # values for generator is 'Yields' and for regular functions it is 'Returns' doc_params["is_generator"] = inspect.isgeneratorfunction(func) doc_params["parameters"] = {} if parameters: # The function may have no parameters # We will print names of ALL parameters from the signature for p_name, p in parameters.items(): # Select description, annotation and types from available sources. # Annotation (parameter of the wrapper) always overrides Python annotation. doc_params["parameters"][p_name] = {} kind = p.kind.name kind = kind.lower().replace("_", " ") doc_params["parameters"][p_name]["kind"] = kind desc, an, plans, devices, enums = "", "", {}, {}, {} if ("parameters" in annotation) and (p_name in annotation["parameters"]): p_an = annotation["parameters"][p_name] desc = p_an.get("description", "") if "annotation" in p_an: an = p_an["annotation"] # Ignore annotation if it is an empty string. Lists of plans # and devices make no sense, so don't include them. if an: # Now save the lists of plans and devices if any plans = p_an.get("plans", {}) devices = p_an.get("devices", {}) enums = p_an.get("enums", {}) if not an and parameters[p_name].annotation != inspect.Parameter.empty: an = str(parameters[p_name].annotation) doc_params["parameters"][p_name]["annotation"] = _convert_annotation_to_type(an) doc_params["parameters"][p_name]["description"] = desc doc_params["parameters"][p_name]["plans"] = plans doc_params["parameters"][p_name]["devices"] = devices doc_params["parameters"][p_name]["enums"] = enums if p.default != inspect.Parameter.empty: # Print will print strings in quotes (desired behavior) v_default = pprint.pformat(p.default) else: v_default = None # If 'v_default' is None, it is not specified, so it should not be printed # in the docstring at all doc_params["parameters"][p_name]["default"] = v_default # Print return value annotation and description. Again the annotation from # custom annotation overrides Python annotation. doc_params["returns"] = {} desc, an = "", "" if "returns" in annotation or (return_annotation != inspect.Parameter.empty): if "returns" in annotation: desc = annotation["returns"].get("description", "") an = annotation["returns"].get("annotation", "") if not an: if return_annotation != inspect.Signature.empty: an = str(return_annotation) doc_params["returns"]["description"] = desc if doc_params["is_generator"]: an = _extract_yield_type(an) doc_params["returns"]["annotation"] = _convert_annotation_to_type(an) return doc_params
32a7ac62506dfc04157c613fa781b3d740a95451
3,655,611
def _strip_unbalanced_punctuation(text, is_open_char, is_close_char): """Remove unbalanced punctuation (e.g parentheses or quotes) from text. Removes each opening punctuation character for which it can't find corresponding closing character, and vice versa. It can only handle one type of punctuation (e.g. it could strip quotes or parentheses but not both). It takes functions (is_open_char, is_close_char), instead of the characters themselves, so that we can determine from nearby characters whether a straight quote is an opening or closing quote. Args: text (string): the text to fix is_open_char: a function that accepts the text and an index, and returns true if the character at that index is an opening punctuation mark. is_close_char: same as is_open_char for closing punctuation mark. Returns: The text with unmatched punctuation removed. """ # lists of unmatched opening and closing chararacters opening_chars = [] unmatched_closing_chars = [] for idx, c in enumerate(text): if is_open_char(text, idx): opening_chars.append(idx) elif is_close_char(text, idx): if opening_chars: # this matches a character we found earlier opening_chars.pop() else: # this doesn't match any opening character unmatched_closing_chars.append(idx) char_indices = [i for (i, _) in enumerate(text) if not(i in opening_chars or i in unmatched_closing_chars)] stripped_text = "".join([text[i] for i in char_indices]) return stripped_text
db4b8f201e7b01922e6c06086594a8b73677e2a2
3,655,612
def read(fin, alphabet=None): """Read and parse a fasta file. Args: fin -- A stream or file to read alphabet -- The expected alphabet of the data, if given Returns: SeqList -- A list of sequences Raises: ValueError -- If the file is unparsable """ seqs = [s for s in iterseq(fin, alphabet)] name = names[0] if hasattr(fin, "name"): name = fin.name return SeqList(seqs, name=name)
1ff492ac533a318605569f94ef66036c847b21d5
3,655,613
def get_min_max_value(dfg): """ Gets min and max value assigned to edges in DFG graph Parameters ----------- dfg Directly follows graph Returns ----------- min_value Minimum value in directly follows graph max_value Maximum value in directly follows graph """ min_value = 9999999999 max_value = -1 for edge in dfg: if dfg[edge] < min_value: min_value = dfg[edge] if dfg[edge] > max_value: max_value = dfg[edge] return min_value, max_value
17a98350f4e13ec51e72d4357e142ad661e57f54
3,655,614
import os def process_post(category_id, post_details, user): """Check topic is present in Discourse. IF exists then post, otherwise create new topic for category """ error = False error_message = '' # DISCOURSE_DEV_POST_SUFFIX is used to differentiate the same target name from different dev systems in Discourse # It is not intended to be used for production when there is a dedicated Discourse. post_details['title'] = post_details['title'] + settings.DISCOURSE_DEV_POST_SUFFIX try: topic = DiscourseTopic.objects.get(topic_title=post_details['title']) topic_id = topic.discourse_topic_id if post_details['content'] == '': # No content - Return the URL for the topic post_url = os.path.join(settings.DISCOURSE_HOST, 't', str(topic_id)) else: # Create post for topic error, error_message, null_id, post_url = create_post(user, post_details, topic_id=topic_id) except DiscourseTopic.DoesNotExist: # Create Topic for Category error, error_message, topic_id, post_url = create_post(user, post_details, category_id=category_id) if not error: DiscourseTopic.objects.create(topic_title=post_details['title'], author=user, discourse_topic_id=topic_id) return error, error_message, topic_id, post_url
6634d9f3c302e85c5cb0504cc4474364191a14a8
3,655,615
def vgg_fcn(num_classes=1000, pretrained=False, batch_norm=False, **kwargs): """VGG 16-layer model (configuration "D") Args: num_classes(int): the number of classes at dataset pretrained (bool): If True, returns a model pre-trained on ImageNet batch_norm: if you want to introduce batch normalization """ if pretrained: kwargs['init_weights'] = True model = VGG(make_layers(cfg['D'], batch_norm=batch_norm), num_classes, **kwargs) if pretrained: # loading weights if batch_norm: pretrained_weights = model_zoo.load_url(model_urls['vgg19_bn']) else: pretrained_weights = model_zoo.load_url(model_urls['vgg19']) model.load_state_dict(pretrained_weights, strict=False) return model
73c1e80e0ffc6aff670394d1b1ec5e2b7d21cf06
3,655,616
from typing import Union def apply_heatmap( frame: npt.NDArray[np.uint8], cmap: Union[str, Colormap] = "Pastel1", normalize: bool = True, ) -> npt.NDArray[np.uint8]: """Apply heatmap to an input BGR image. Args: frame (npt.NDArray[np.uint8]) : Input image (BGR). cmap (Union[str, Colormap], optional) : An identifier for color maps. Defaults to ``"Pastel1"``. normalize (bool, optional) : Whether to perform :func:`min-max normalization <veditor.utils.image_utils.min_max_normalization>`. Defaults to ``True``. Returns: npt.NDArray[np.uint8]: [description] .. plot:: :class: popup-img >>> import cv2 >>> import matplotlib.pyplot as plt >>> from veditor.utils import cv2plot, SampleData, apply_heatmap >>> frame = cv2.imread(SampleData().IMAGE_PATH) >>> colormaps = ["Pastel1", "Set1", "tab10", "hsv", "bwr", "Reds"] >>> num_methods = len(colormaps) >>> ncols = 3; nrows = num_methods//ncols >>> fig, axes = plt.subplots(ncols=ncols, nrows=nrows, figsize=(6 * ncols, 4 * nrows)) >>> for i,cmap in enumerate(colormaps): ... ax = cv2plot(apply_heatmap(frame, cmap=cmap), ax=axes[i%2][i//2]) ... ax.set_title(cmap) >>> fig.show() """ cmap = plt.get_cmap(cmap) gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) if normalize: gray = min_max_normalization(gray) gray = gray.astype(float) / 255.0 frame = cv2.cvtColor((255 * cmap(gray)).astype(np.uint8)[:, :, :3], cv2.COLOR_RGB2BGR) return frame
6965420744ab27c7d5d2ebf10c518105d9ba8191
3,655,617
import time def fmt_time(timestamp): """Return ISO formatted time from seconds from epoch.""" if timestamp: return time.strftime('%Y-%m-%dT%H:%M:%S', time.localtime(timestamp)) else: return '-'
c87f1da7b6a3b1b8d8daf7d85a2b0746be58133b
3,655,618
def lislice(iterable, *args): """ (iterable, stop) or (iterable, start, stop[, step]) >>> lislice('ABCDEFG', 2) ['A', 'B'] >>> lislice('ABCDEFG', 2, 4) ['C', 'D'] >>> lislice('ABCDEFG', 2, None) ['C', 'D', 'E', 'F', 'G'] >>> lislice('ABCDEFG', 0, None, 2) ['A', 'C', 'E', 'G'] """ return list(islice(iterable, *args))
6c7eb26a9ab5cb913c17f77c2a64929cfc7ebb06
3,655,619
def calculate_transition_cost(number_objs: int, target_storage_class: str) -> float: """ Calculates the cost of transition data from one class to another Args: number_objs: the number of objects that are added on a monthly basis target_storage_class: the storage class the objects will reside in after they are transitioned Returns: int, the cost of the transition """ target_storage_class_data = data[target_storage_class] transition_cost = ( number_objs / target_storage_class_data["items_per_transition_chunk"] ) * target_storage_class_data["transition_cost"] return transition_cost
01ec7d3e7149dadc020ab6f82033a178366c6ebf
3,655,620
import sys def _exceptionwarning(ui): """Produce a warning message for the current active exception""" # For compatibility checking, we discard the portion of the hg # version after the + on the assumption that if a "normal # user" is running a build with a + in it the packager # probably built from fairly close to a tag and anyone with a # 'make local' copy of hg (where the version number can be out # of date) will be clueful enough to notice the implausible # version number and try updating. ct = util.versiontuple(n=2) worst = None, ct, b'', b'' if ui.config(b'ui', b'supportcontact') is None: for name, mod in extensions.extensions(): # 'testedwith' should be bytes, but not all extensions are ported # to py3 and we don't want UnicodeException because of that. testedwith = stringutil.forcebytestr( getattr(mod, 'testedwith', b'') ) version = extensions.moduleversion(mod) report = getattr(mod, 'buglink', _(b'the extension author.')) if not testedwith.strip(): # We found an untested extension. It's likely the culprit. worst = name, b'unknown', report, version break # Never blame on extensions bundled with Mercurial. if extensions.ismoduleinternal(mod): continue tested = [util.versiontuple(t, 2) for t in testedwith.split()] if ct in tested: continue lower = [t for t in tested if t < ct] nearest = max(lower or tested) if worst[0] is None or nearest < worst[1]: worst = name, nearest, report, version if worst[0] is not None: name, testedwith, report, version = worst if not isinstance(testedwith, (bytes, str)): testedwith = b'.'.join( [stringutil.forcebytestr(c) for c in testedwith] ) extver = version or _(b"(version N/A)") warning = _( b'** Unknown exception encountered with ' b'possibly-broken third-party extension "%s" %s\n' b'** which supports versions %s of Mercurial.\n' b'** Please disable "%s" and try your action again.\n' b'** If that fixes the bug please report it to %s\n' ) % (name, extver, testedwith, name, stringutil.forcebytestr(report)) else: bugtracker = ui.config(b'ui', b'supportcontact') if bugtracker is None: bugtracker = _(b"https://mercurial-scm.org/wiki/BugTracker") warning = ( _( b"** unknown exception encountered, " b"please report by visiting\n** " ) + bugtracker + b'\n' ) sysversion = pycompat.sysbytes(sys.version).replace(b'\n', b'') def ext_with_ver(x): ext = x[0] ver = extensions.moduleversion(x[1]) if ver: ext += b' ' + ver return ext warning += ( (_(b"** Python %s\n") % sysversion) + (_(b"** Mercurial Distributed SCM (version %s)\n") % util.version()) + ( _(b"** Extensions loaded: %s\n") % b", ".join( [ext_with_ver(x) for x in sorted(extensions.extensions())] ) ) ) return warning
585e9db21957fdc52bd9d5d80bb977ccc326b6d8
3,655,621
def get_covid(): """This module sends off a covid notification. You can't get covid from this.""" covid_data = covid_handler() covid_content = Markup("Date: " + str(covid_data["date"]) + ",<br/>Country: " + str( covid_data["areaName"]) + ",<br/>New Cases: " + str( covid_data["newCasesByPublishDate"]) + ",<br/>Total Cases: " + str( covid_data["cumCasesByPublishDate"])) # The above formats the covid data, ready to send it off as a notification covid_notification = {"title": "Covid Cases", "content": covid_content} return covid_notification
0c6e4c8e5df7b7e13212eabe46f8a72a7874fde5
3,655,622
def send_songogram(your_name, artist_first_name, artist_last_name, song_name, number_to_call): """ Function for sending a Sonogram. :param your_name: string containing the person sending the sonogram's name. :param artist_first_name: string containing the musician's first name. :param artist_last_name: string containing the musician's last name. :param song_name: string containing the song name. :param number_to_call: string of the telephone number to send a sonogram to. """ try: lyrics = scrape_lyrics(artist_first_name, artist_last_name, song_name) make_call(number_to_call, lyrics, your_name) send_text(song_name, artist_first_name + ' ' + artist_last_name, number_to_call, your_name) return {'status': 201} except: return {'status': 400,'error': 'Bad Request', 'message': 'Unable to process request'}
84e67f7b8b185817596f0fd0173e4cc989616687
3,655,623
def segm_and_cat(sersic_2d_image): """fixture for segmentation and catalog""" image_mean, image_median, image_stddev = sigma_clipped_stats(sersic_2d_image, sigma=3) threshold = image_stddev * 3 # Define smoothing kernel kernel_size = 3 fwhm = 3 # Min Source size (area) npixels = 4 ** 2 return make_catalog( sersic_2d_image, threshold=threshold, deblend=True, kernel_size=kernel_size, fwhm=fwhm, npixels=npixels, contrast=0.00, plot=False, )
2a6018f7b4c2a1aea946b6744840bd2216352002
3,655,624
from typing import Tuple def break_word_by_trailing_integer(pname_fid: str) -> Tuple[str, str]: """ Splits a word that has a value that is an integer Parameters ---------- pname_fid : str the DVPRELx term (e.g., A(11), NSM(5)) Returns ------- word : str the value not in parentheses value : int the value in parentheses Examples -------- >>> break_word_by_trailing_integer('T11') ('T', '11') >>> break_word_by_trailing_integer('THETA11') ('THETA', '11') """ nums = [] i = 0 for i, letter in enumerate(reversed(pname_fid)): if letter.isdigit(): nums.append(letter) else: break num = ''.join(nums[::-1]) if not num: msg = ("pname_fid=%r does not follow the form 'T1', 'T11', 'THETA42' " "(letters and a number)" % pname_fid) raise SyntaxError(msg) word = pname_fid[:-i] assert len(word)+len(num) == len(pname_fid), 'word=%r num=%r pname_fid=%r' % (word, num, pname_fid) return word, num
e9b9c85b4225269c94918ce1cc2e746d3c74aa5c
3,655,625
import argparse import os def parse_args(): """ parse command line arguments :return dict: dictionary of parameters """ argparser = argparse.ArgumentParser() # training data argparser.add_argument('--trainfiles', nargs='*', default=['./data/valid.tfrecords'], help='Data file(s) for training (tfrecord).') argparser.add_argument('--testfiles', nargs='*', default=['./data/valid.tfrecords'], help='Data file(s) for validation or evaluation (tfrecord).') # input configuration argparser.add_argument('--map_pixel_in_meters', type=float, default=0.02, help='The width (and height) of a pixel of the map in meters. Defaults to 0.02 for House3D data.') argparser.add_argument('--init_particles_distr', type=str, default='tracking', help='Distribution of initial particles. Possible values: tracking / one-room.') argparser.add_argument('--init_particles_std', nargs='*', default=["0.3", "0.523599"], help='Standard deviations for generated initial particles for tracking distribution. Values: translation std (meters), rotation std (radians)') argparser.add_argument('--trajlen', type=int, default=24, help='Length of trajectories.') # PF configuration argparser.add_argument('--num_particles', type=int, default=30, help='Number of particles in Particle Filter.') argparser.add_argument('--transition_std', nargs='*', default=["0.0", "0.0"], help='Standard deviations for transition model. Values: translation std (meters), rotation std (radians)') argparser.add_argument('--resample', type=str, default='false', help='Resample particles in Particle Filter. Possible values: true / false.') argparser.add_argument('--alpha_resample_ratio', type=float, default=1.0, help='Trade-off parameter for soft-resampling in PF-net. Only effective if resample == true. Assumes values 0.0 < alpha <= 1.0. Alpha equal to 1.0 corresponds to hard-resampling.') # training configuration argparser.add_argument('--batch_size', type=int, default=24, help='Minibatch size for training.') argparser.add_argument('--learningrate', type=float, default=0.0025, help='Initial learning rate for training.') argparser.add_argument('--epochs', type=int, default=1, help='Number of epochs for training.') argparser.add_argument('--load', type=str, default='', help='Load a previously trained model from a checkpoint file.') argparser.add_argument('--seed', type=int, default='42', help='Fix the random seed of numpy and tensorflow.') argparser.add_argument('--logpath', type=str, default='./log/', help='Specify path for logs.') argparser.add_argument('--gpu_num', type=int, default='0', help='use gpu no. to train') params = argparser.parse_args() # convert multi-input fileds to numpy arrays params.transition_std = np.array(params.transition_std, np.float32) params.init_particles_std = np.array(params.init_particles_std, np.float32) # build initial covariance matrix of particles, in pixels and radians particle_std = params.init_particles_std.copy() particle_std[0] = particle_std[0] / params.map_pixel_in_meters # convert meters to pixels particle_std2 = np.square(particle_std) # variance params.init_particles_cov = np.diag(particle_std2[(0, 0, 1),]) params.transition_std = np.array(params.transition_std[0] / params.map_pixel_in_meters, params.transition_std[1]) # in pixels & radians # fix seed np.random.seed(params.seed) tf.random.set_seed(params.seed) # use RNN as stateful/non-stateful params.stateful = False params.return_state = True #HACK hardcode fix padding for map params.global_map_size = (4000, 4000, 1) params.window_scaler = 8.0 # filter out info and warning messages os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' # convert boolean fields if params.resample not in ['false', 'true']: raise ValurError else: params.resample = (params.resample == 'true') gpus = tf.config.experimental.list_physical_devices('GPU') assert params.gpu_num < len(gpus) if gpus: # restrict TF to only use the first GPU try: for gpu in gpus: tf.config.experimental.set_memory_growth(gpu, True) tf.config.experimental.set_visible_devices(gpus[params.gpu_num], 'GPU') logical_gpus = tf.config.experimental.list_logical_devices('GPU') except RuntimeError as e: # visible devices must be set before GPUs have been initialized print(e) return params
12afdc5cfa3acf033d529c6be9765642995946c7
3,655,626
def preprocess_data(image, label, is_training): """CIFAR data preprocessing""" image = tf.image.convert_image_dtype(image, tf.float32) if is_training: crop_padding = 4 image = tf.pad(image, [[crop_padding, crop_padding], [crop_padding, crop_padding], [0, 0]], 'REFLECT') image = tf.image.random_crop(image, [32, 32, 3]) image = tf.image.random_flip_left_right(image) if FLAGS.distort_color: image = color_distortion(image, s=1.0) else: image = tf.image.resize_with_crop_or_pad(image, 32, 32) # central crop return image, label
642f384fbf1aa2f884e64de2edf264890317b258
3,655,627
def load_Counties(): """ Use load_country() instead of this function """ # Get data # Load data using Pandas dfd = { 'positive': reread_csv(csv_data_file_Global['confirmed_US']), 'death': reread_csv(csv_data_file_Global['deaths_US']), } return dfd
098d08f3720b6c6148c51000e6e1512d382adeaf
3,655,628
def extract_parmtop_residue_with_name(filename, resname): """ fixme - update doc Extract residue name and atom name/type mapping from input parmtop. Note: Only one residue must be present in the topology. Parameters ---------- filename: Path Filename of the input parmtop. Returns ------- dict key = residue name, value = atom name to type mapping (dict). """ res_top = pmd.load_file(str(filename)) extracted_residues = {} for atom in res_top: # extract only the requested residues if atom.residue.name != resname: continue if atom.residue.name not in extracted_residues: extracted_residues[atom.residue.name] = {} extracted_residues[atom.residue.name][atom.name] = atom.type return extracted_residues, res_top
b2fc9ebc44e0ecd2f33108e7878e5f37d4eead2f
3,655,629
from typing import Iterator from typing import Any def issetiterator(object: Iterator[Any]) -> bool: """Returns True or False based on whether the given object is a set iterator. Parameters ---------- object: Any The object to see if it's a set iterator. Returns ------- bool Whether the given object is a set iterator. """ if not isiterable(object): return False return isinstance(object, SetIteratorType)
07ecdcc72c62c4ce3d5fb91181cd1bc785d6cb4d
3,655,630
def temporal_discretization(Y, method='ups_downs', kwargs={}): """This function acts as a switcher for temporal discretizations and wraps all the functions which carry out discretization over time of time-series. Parameters ---------- Y : array_like, shape (N, M) the signal of each element of the system. M time-series. method: str method used for performing temporal discretization. kwargs: dict required arguments for the method choosen. Returns ------- Yt : array_like, shape (N, M) discretized activation matrix. TODO ---- More than one descriptor. """ if method == 'ups_downs': Yt = ups_downs_temporal_discretization_matrix(Y, **kwargs) elif method not in ['ups_downs']: pass return Yt
98393c838eed156947ccec931477b2011f5951b9
3,655,631
from mpl_toolkits.mplot3d import Axes3D def plotModeScatter( pc , xMode=0, yMode=1, zMode=None, pointLabels=None, nTailLabels=3, classes=None): """ scatter plot mode projections for up to 3 different modes. PointLabels is a list of strings corresponding to each shape. nTailLabels defines number of points that are labelled at the tails of the distributions, can be 'all' to label all points. Point labels are for 2D plots only. """ xWeights = pc.projectedWeights[xMode] yWeights = pc.projectedWeights[yMode] colourMap = mpl.cm.gray if classes==None: c = 'r' else: c = classes if zMode == None: fig = plot.figure() ax = fig.add_subplot(111) plt = ax.scatter(xWeights,yWeights, c=c, marker='o', cmap=colourMap) ax.set_title('Scatter: Mode %d vs Mode %d'%(xMode, yMode)) ax.set_xlabel('Mode %d'%(xMode)) ax.set_ylabel('Mode %d'%(yMode)) if pointLabels!=None: if nTailLabels=='all': for label, x, y in zip(pointLabels, xWeights, yWeights): plot.annotate( label, xy=(x,y), xytext=(-5, 5), textcoords='offset points', ha='right', va='bottom', bbox=dict(boxstyle='round,pad=0.5', fc='yellow', alpha=0.5), arrowprops=dict(arrowstyle='->', connectionstyle='arc3,rad=0')) elif isinstance(nTailLabels, int): # sort weights xSortedArgs = scipy.argsort(xWeights) ySortedArgs = scipy.argsort(yWeights) # label x tails for i in xSortedArgs[:nTailLabels]: plot.annotate( pointLabels[i], xy=(xWeights[i],yWeights[i]), xytext=(-5, 5), textcoords='offset points', ha='right', va='bottom', bbox=dict(boxstyle='round,pad=0.5', fc='yellow', alpha=0.5), arrowprops=dict(arrowstyle='->', connectionstyle='arc3,rad=0')) for i in xSortedArgs[-nTailLabels:]: plot.annotate( pointLabels[i], xy=(xWeights[i],yWeights[i]), xytext=(-5, 5), textcoords='offset points', ha='right', va='bottom', bbox=dict(boxstyle='round,pad=0.5', fc='yellow', alpha=0.5), arrowprops=dict(arrowstyle='->', connectionstyle='arc3,rad=0')) # label y tails for i in ySortedArgs[:nTailLabels]: plot.annotate( pointLabels[i], xy=(xWeights[i],yWeights[i]), xytext=(-5, 5), textcoords='offset points', ha='right', va='bottom', bbox=dict(boxstyle='round,pad=0.5', fc='yellow', alpha=0.5), arrowprops=dict(arrowstyle='->', connectionstyle='arc3,rad=0')) for i in ySortedArgs[-nTailLabels:]: plot.annotate( pointLabels[i], xy=(xWeights[i],yWeights[i]), xytext=(-5, 5), textcoords='offset points', ha='right', va='bottom', bbox=dict(boxstyle='round,pad=0.5', fc='yellow', alpha=0.5), arrowprops=dict(arrowstyle='->', connectionstyle='arc3,rad=0')) else: raise ValueError, "nTailLabels must be 'all' or an integer" plot.show() else: fig = plot.figure() zWeights = pc.projectedWeights[zMode] ax = fig.add_subplot(111, projection='3d') plt = ax.scatter(xWeights,yWeights, zWeights, c =c, marker='o', cmap=colourMap) ax.set_title('3D Scatter') ax.set_xlabel('Mode %d'%(xMode)) ax.set_ylabel('Mode %d'%(yMode)) ax.set_zlabel('Mode %d'%(zMode)) plot.show() return fig, plt
72bc671d9d4fc0fc8df26965fd4d24d91ab51b72
3,655,632
import re def cron_worker(request): """Parse JSON/request arguments and start ingest for a single date export""" request_json = request.get_json(silent=True) request_args = request.args if request_json and 'image' in request_json: image_id = request_json['image'] elif request_args and 'image' in request_args: image_id = request_args['image'] else: abort(400, description='"image" parameter not set') # TODO: Add additional image ID format checking if not re.match('L[TEC]0[4578]_\d{6}_\d{8}', image_id.split('/')[-1], re.I): abort(400, description=f'Image ID {image_id} could not be parsed') elif not re.match('LANDSAT/L[TEC]0[4578]/C0[12]/T1\w+', image_id, re.I): abort(400, description=f'Image ID {image_id} could not be parsed') if request_json and 'overwrite' in request_json: overwrite_flag = request_json['overwrite'] elif request_args and 'overwrite' in request_args: overwrite_flag = request_args['overwrite'] else: overwrite_flag = 'true' if overwrite_flag.lower() in ['true', 't']: overwrite_flag = True elif overwrite_flag.lower() in ['false', 'f']: overwrite_flag = False else: abort(400, description=f'overwrite="{overwrite_flag}" could not be parsed') response = tcorr_gridded_asset_ingest( image_id=image_id, gee_key_file=GEE_KEY_FILE, overwrite_flag=overwrite_flag) return Response(response, mimetype='text/plain')
e4fa2bc92ec85d70e8c2ca7967d792686220204b
3,655,633
import math def calculatetm(seq): """ Calculate Tm of a target candidate, nearest neighbor model """ NNlist = chopseq(seq, 2, 1) NNtable = ['AA', 'AC', 'AG', 'AT', 'CA', 'CC', 'CG', 'CT', 'GA', 'GC', 'GG', 'GT', 'TA', 'TC', 'TG', 'TT'] NNendtable = ['A', 'C', 'G', 'T'] NNcount = np.zeros(16) NNend = np.zeros(4) for c, NN in enumerate(NNtable): NNcount[c] = NNlist.count(NN) for c, NN in enumerate(NNendtable): NNend[c] = seq[0].count(NN) # numbers below from Sugimoto et al. NAR (1996) NNEnthalpy = np.array([-8.0, -9.4, -6.6, -5.6, -8.2, -10.9, -11.8, -6.6, -8.8, -10.5, -10.9, -9.4, -6.6, -8.8, -8.2, -8.0]) NNEntropy = np.array([-21.9, -25.5, -16.4, -15.2, -21.0, -28.4, -29.0, -16.4, -23.5, -26.4, -28.4, -25.5, -18.4, -23.5, -21.0, -21.9]) NNendEnthalpy = np.array([.6, .6, .6, .6]) NNendEntropy = np.array([-9.0, -9.0, -9.0, -9.0]) sumEnthalpy = np.sum(np.multiply(NNcount, NNEnthalpy)) + np.sum(np.multiply(NNend, NNendEnthalpy)) sumEntropy = np.sum(np.multiply(NNcount, NNEntropy)) + np.sum(np.multiply(NNend, NNendEntropy)) Tm = (sumEnthalpy * 1000)/(sumEntropy + (1.9872 * math.log(1e-7))) - 273.15 # oligo concentration: 1e-7 M sumSalt = 0.075 + (3.795 * 0.01**0.5) # monovalent: 0.075 M, bivalent: 0.01 M Tm += 16.6 * math.log10(sumSalt) # salt correction Tm -= 0.72 * 20 # formamide correction return Tm
f53c1aa09cd335d603c721fa9922d85e2de0f612
3,655,634
def share_article_to_group(user, base_list_id, article_id, group_id, target_list_id): """ @api {post} /user/list/:id/article/:id/share/group/:id/list/:id Share a article to group list. @apiName Share a article into a group list. @apiGroup Share @apiUse AuthorizationTokenHeader @apiUse UnauthorizedAccessError @apiUse ResourceDoesNotExist """ app.logger.info('User {} Access {}'.format(user, request.full_path)) result = MongoUtil.share_article_to_group_list(user, base_list_id, article_id, group_id, target_list_id) if isinstance(result, str): app.logger.debug(result) return ResponseUtil.error_response(result) app.logger.info('User {} share article {} to group {}'.format(user, article_id, group_id)) return jsonify(msg='Success')
7074c43b9c51fb7959a4e835d3bff493b72980cc
3,655,635
def get_data_shape(X_train, X_test, X_val=None): """ Creates, updates and returns data_dict containing metadata of the dataset """ # Creates data_dict data_dict = {} # Updates data_dict with lenght of training, test, validation sets train_len = len(X_train) test_len = len(X_test) data_dict.update({'train_len': train_len, 'test_len': test_len}) if X_val is not None: val_len = len(X_val) data_dict.update({'val_len': val_len}) # else : val_len = None # Updates number of dimensions of data no_of_dim = X_train.ndim data_dict.update({'no_of_dim': no_of_dim}) # Updates number of features(, number of channels, width, height) if no_of_dim == 2: no_of_features = X_train.shape[1] data_dict.update({'no_of_features': no_of_features}) elif no_of_dim == 3: channels = X_train.shape[1] features_per_c = X_train.shape[2] no_of_features = channels * features_per_c data_dict.update({'no_of_features': no_of_features, 'channels': channels, 'features_per_c': features_per_c}) elif no_of_dim == 4: channels = X_train.shape[1] height = X_train.shape[2] width = X_train.shape[3] features_per_c = height*width no_of_features = channels*features_per_c data_dict.update({'height':height, 'width':width, 'channels':channels, 'features_per_c':features_per_c, 'no_of_features':no_of_features}) return data_dict
231a334b625d0bfe6aa6e63b79de2b2226b8e684
3,655,636
def setupAnnotations(context): """ set up the annotations if they haven't been set up already. The rest of the functions in here assume that this has already been set up """ annotations = IAnnotations(context) if not FAVBY in annotations: annotations[FAVBY] = PersistentList() return annotations
f427c8619452d7143a56d4b881422d01a90ba666
3,655,637
def _get_media(media_types): """Helper method to map the media types.""" get_mapped_media = (lambda x: maps.VIRTUAL_MEDIA_TYPES_MAP[x] if x in maps.VIRTUAL_MEDIA_TYPES_MAP else None) return list(map(get_mapped_media, media_types))
4dbbcf87c717fca2e1890a5258df023ebbca31c5
3,655,638
import ctypes def get_int_property(device_t, property): """ Search the given device for the specified string property @param device_t Device to search @param property String to search for. @return Python string containing the value, or None if not found. """ key = cf.CFStringCreateWithCString( kCFAllocatorDefault, property.encode("mac_roman"), kCFStringEncodingMacRoman ) CFContainer = iokit.IORegistryEntryCreateCFProperty( device_t, key, kCFAllocatorDefault, 0 ); number = ctypes.c_uint16() if CFContainer: output = cf.CFNumberGetValue(CFContainer, 2, ctypes.byref(number)) return number.value
75bc08117bb838e8070d3ea4d5134dfbeec9576c
3,655,639
def _get_unique_barcode_ids(pb_index, isoseq_mode=False): """ Get a list of sorted, unique fw/rev barcode indices from an index object. """ bc_sel = (pb_index.bcForward != -1) & (pb_index.bcReverse != -1) bcFw = pb_index.bcForward[bc_sel] bcRev = pb_index.bcReverse[bc_sel] bc_ids = sorted(list(set(zip(bcFw, bcRev)))) if isoseq_mode: bc_ids = sorted(list(set([tuple(sorted(bc)) for bc in bc_ids]))) return bc_ids
bdfb386d26415a7b3f9f16661d83a38a63958ad0
3,655,640
def clean_logs(test_yaml, args): """Remove the test log files on each test host. Args: test_yaml (str): yaml file containing host names args (argparse.Namespace): command line arguments for this program """ # Use the default server yaml and then the test yaml to update the default # DAOS log file locations. This should simulate how the test defines which # log files it will use when it is run. log_files = get_log_files(test_yaml, get_log_files(BASE_LOG_FILE_YAML)) host_list = get_hosts_from_yaml(test_yaml, args) command = "sudo rm -fr {}".format(" ".join(log_files.values())) print("Cleaning logs on {}".format(host_list)) if not spawn_commands(host_list, command): print("Error cleaning logs, aborting") return False return True
229f34615dc9a6f7ab9c484b9585151814656a77
3,655,641
def call_posterior_haplotypes(posteriors, threshold=0.01): """Call haplotype alleles for VCF output from a population of genotype posterior distributions. Parameters ---------- posteriors : list, PosteriorGenotypeDistribution A list of individual genotype posteriors. threshold : float Minimum required posterior probability of occurrence with in any individual for a haplotype to be included. Returns ------- haplotypes : ndarray, int, shape, (n_haplotypes, n_base) VCF sorted haplotype arrays. """ # maps of bytes to arrays and bytes to sum probs haplotype_arrays = {} haplotype_values = {} # iterate through genotype posterors for post in posteriors: # include haps based on probability of occurrence ( haps, probs, ) = post.allele_occurrence() _, weights = post.allele_frequencies(dosage=True) idx = probs >= threshold # order haps based on weighted prob haps = haps[idx] weights = weights[idx] for h, w in zip(haps, weights): b = h.tobytes() if b not in haplotype_arrays: haplotype_arrays[b] = h haplotype_values[b] = 0 haplotype_values[b] += w # remove reference allele if present refbytes = None for b, h in haplotype_arrays.items(): if np.all(h == 0): # ref allele refbytes = b if refbytes is not None: haplotype_arrays.pop(refbytes) haplotype_values.pop(refbytes) # combine all called haplotypes into array n_alleles = len(haplotype_arrays) + 1 n_base = posteriors[0].genotypes.shape[-1] haplotypes = np.full((n_alleles, n_base), -1, np.int8) values = np.full(n_alleles, -1, float) for i, (b, h) in enumerate(haplotype_arrays.items()): p = haplotype_values[b] haplotypes[i] = h values[i] = p haplotypes[-1][:] = 0 # ref allele values[-1] = values.max() + 1 order = np.flip(np.argsort(values)) return haplotypes[order]
46c26eb38c693d979ea4234af606b3b07ad1e75e
3,655,642
def to_linprog(x, y, xy_dist) -> LinProg: """ Parameters ---------- x : ndarray 1 - dimensional array of weights y : ndarray 1 - dimensional array of weights xy_dist : ndarray 2 - dimensional array containing distances between x and y density coordinates Returns ------- LinProg This was sometimes flaking out when called with single-precision matrices because of numerical instability in the scipy _presolve step when eliminating redundant constraints, so ensure sufficient precision TODO: use sparse A_eq, A_ub matrices """ # constant used in scipy.optimize._remove_redundancy tol = 1e-8 assert np.abs(x.sum() - y.sum()) < tol, "x and y must be close to avoid instability" assert xy_dist.shape[0] == x.shape[0] assert xy_dist.shape[1] == y.shape[0] x_dim = x.shape[0] y_dim = y.shape[0] c = xy_dist.flatten() A_eq = [] b_eq = [] for i in range(x_dim): constraint = np.zeros(xy_dist.shape) constraint[i] = 1.0 A_eq.append(constraint.flatten()) b_eq.append(x[i]) for i in range(y_dim): constraint = np.zeros(xy_dist.shape) constraint[:, i] = 1.0 A_eq.append(constraint.flatten()) b_eq.append(y[i]) A_ub = np.diag(-np.ones(x_dim * y_dim)) b_ub = np.zeros(x_dim * y_dim) return LinProg(c=c, A_ub=A_ub, b_ub=b_ub, A_eq=np.array(A_eq), b_eq=np.array(b_eq))
e792e6b701216cb232dcb823c6d32386d87e3be8
3,655,643
def get_discorded_labels(): """ Get videos with citizen discorded labels Partial labels will only be set by citizens """ return get_video_labels(discorded_labels)
6f3cbaf09b43956d14d9abf5cf4e77734c152d2f
3,655,644
def set_common_tags(span: object, result: object): """Function used to set a series of common tags to a span object""" if not isinstance(result, dict): return span for key, val in result.items(): if key.lower() in common_tags: span.set_tag(key, val) return span
365230fb6a69b94684aeac25d14fa4275c1549f8
3,655,645
import time def local_timezone(): """ Returns: (str): Name of current local timezone """ try: return time.tzname[0] except (IndexError, TypeError): return ""
c97c11582b27d8aa0205555535616d6ea11775b9
3,655,646
import getpass def ask_credentials(): """Interactive function asking the user for ASF credentials :return: tuple of username and password :rtype: tuple """ # SciHub account details (will be asked by execution) print( " If you do not have a ASF/NASA Earthdata user account" " go to: https://search.asf.alaska.edu/ and register" ) uname = input(" Your ASF/NASA Earthdata Username:") pword = getpass.getpass(" Your ASF/NASA Earthdata Password:") return uname, pword
a601a460b3aeddf9939f3acf267e58fdaf9ed7cd
3,655,647
def lab2lch(lab): """CIE-LAB to CIE-LCH color space conversion. LCH is the cylindrical representation of the LAB (Cartesian) colorspace Parameters ---------- lab : array_like The N-D image in CIE-LAB format. The last (``N+1``-th) dimension must have at least 3 elements, corresponding to the ``L``, ``a``, and ``b`` color channels. Subsequent elements are copied. Returns ------- out : ndarray The image in LCH format, in a N-D array with same shape as input `lab`. Raises ------ ValueError If `lch` does not have at least 3 color channels (i.e. l, a, b). Notes ----- The Hue is expressed as an angle between ``(0, 2*pi)`` Examples -------- >>> from skimage import data >>> from skimage.color import rgb2lab, lab2lch >>> img = data.astronaut() >>> img_lab = rgb2lab(img) >>> img_lch = lab2lch(img_lab) """ lch = _prepare_lab_array(lab) a, b = lch[..., 1], lch[..., 2] lch[..., 1], lch[..., 2] = _cart2polar_2pi(a, b) return lch
711d23d452413d738af162ac5b9e3f34c1a4eab6
3,655,648
def get_valid_principal_commitments(principal_id=None, consumer_id=None): """ Returns the list of valid commitments for the specified principal (org or actor. If optional consumer_id (actor) is supplied, then filtered by consumer_id """ log.debug("Finding commitments for principal: %s", principal_id) if principal_id is None: return None try: gov_controller = bootstrap.container_instance.governance_controller commitments, _ = gov_controller.rr.find_objects(principal_id, PRED.hasCommitment, RT.Commitment, id_only=False) if not commitments: return None cur_time = get_ion_ts_millis() commitment_list = [com for com in commitments if (consumer_id == None or com.consumer == consumer_id) and \ (int(com.expiration) == 0 or (int(com.expiration) > 0 and cur_time < int(com.expiration)))] if commitment_list: return commitment_list except Exception: log.exception("Could not determine actor resource commitments") return None
2e86f491acb583f349aec16c613552068b01de6f
3,655,649
def callattice(twotheta, energy_kev=17.794, hkl=(1, 0, 0)): """ Calculate cubic lattice parameter, a from reflection two-theta :param twotheta: Bragg angle, deg :param energy_kev: energy in keV :param hkl: reflection (cubic only :return: float, lattice contant """ qmag = calqmag(twotheta, energy_kev) dspace = q2dspace(qmag) return dspace * np.sqrt(np.sum(np.square(hkl)))
2718e4c44e08f5038ff4119cf477775ed9f3a678
3,655,650
def reset_password( *, db: Session = Depends(get_db), current_user: User = Depends(get_current_active_user), background_tasks: BackgroundTasks, ): """reset current user password""" email = current_user.email # send confirm email if settings.EMAILS_ENABLED and email: confirm_token = create_access_token( subject=email, expires_delta=timedelta(settings.EMAIL_CONFIRM_TOKEN_EXPIRE) ) background_tasks.add_task( send_reset_password_email, email_to=email, token=confirm_token ) return {"msg": "Password reset email sent"}
1f292188b3927c26eb41634acb7fb99e398e94b6
3,655,651
def rule_valid_histone_target(attr): """ { "applies" : ["ChIP-Seq", "experiment_target_histone"], "description" : "'experiment_target_histone' attributes must be 'NA' only for ChIP-Seq Input" } """ histone = attr.get('experiment_target_histone', [''])[0] if attr.get('experiment_type', [""])[0].lower() in ['ChIP-Seq Input'.lower()]: return histone == 'NA' else: return histone != 'NA'
0a10f09c6b9e50cf01583d0c803e5112629e503b
3,655,652
def extend(curve: CustomCurve, deg): """returns curve over the deg-th relative extension""" E = curve.EC q = curve.q K = curve.field if q % 2 != 0: R = K["x"] pol = R.irreducible_element(deg) Fext = GF(q ** deg, name="z", modulus=pol) return E.base_extend(Fext) charac = K.characteristic() R = GF(charac)["x"] ext_deg = q ** deg pol = R.irreducible_element(deg * ZZ(log(q, charac))) Kext = GF(ext_deg, name="ex", modulus=pol) gKext = Kext.gen() h = gKext ** ((ext_deg - 1) // (q - 1)) assert charac ** (h.minpoly().degree()) == q H = GF(q, name="h", modulus=h.minpoly()) inclusion = H.hom([h]) new_coefficients = [ inclusion(stupid_coerce_K_to_L(a, K, H)) for a in E.a_invariants() ] EE = EllipticCurve(Kext, new_coefficients) return EE
8d750b40d91d10d6b51c75765e2083300d7dccf6
3,655,653
def flatten3D(inputs: tf.Tensor) -> tf.Tensor: """ Flatten the given ``inputs`` tensor to 3 dimensions. :param inputs: >=3d tensor to be flattened :return: 3d flatten tensor """ shape = inputs.get_shape().as_list() if len(shape) == 3: return inputs assert len(shape) > 3 return tf.reshape(inputs, [tf.shape(inputs)[0], tf.shape(inputs)[1], np.prod(inputs.get_shape().as_list()[2:])])
11c9c7f7ab955594401468c64323f8f3a52dbe81
3,655,654
def get_classes(dataset): """Get class names of a dataset.""" alias2name = {} for name, aliases in dataset_aliases.items(): for alias in aliases: alias2name[alias] = name if mmcv.is_str(dataset): if dataset in alias2name: labels = eval(alias2name[dataset] + '_classes()') else: raise ValueError('Unrecognized dataset: {}'.format(dataset)) else: raise TypeError('dataset must a str, but got {}'.format(type(dataset))) return labels
d307793a85deef3be239d7dbff746c7c9643dc1b
3,655,655
def split_exclude_string(people): """ Function to split a given text of persons' name who wants to exclude with comma separated for each name e.g. ``Konrad, Titipat`` """ people = people.replace('Mentor: ', '').replace('Lab-mates: ', '').replace('\r\n', ',').replace(';', ',') people_list = people.split(',') return [p.strip() for p in people_list if p.strip() is not '']
5748a52039548175923f53384474f40ac8fb5e38
3,655,656
from datetime import datetime def now(tz=DEFAULT_TZ): """ Get the current datetime. :param tz: The preferred time-zone, defaults to DEFAULT_TZ :type tz: TzInfo (or similar pytz time-zone) :return: A time-zone aware datetime set to now :rtype: datetime """ return datetime.now(tz=tz)
1dcdd78898b726576f69f01cb9f4bfe3aeaef29d
3,655,657
def peek_with_kwargs(init, args=[], permissive=False): """ Make datatypes passing keyworded arguments to the constructor. This is a factory function; returns the actual `peek` routine. Arguments: init (callable): type constructor. args (iterable): arguments NOT to be keyworded; order does matter. permissive (bool): missing positional arguments are set to None (*new in 0.8.5*). Returns: callable: deserializer (`peek` routine). All the peeked attributes that are not referenced in `args` are passed to `init` as keyworded arguments. """ if permissive: def try_peek(store, attr, container, _stack=None): try: return store.peek(attr, container, _stack=_stack) except KeyError: return None def peek(store, container, _stack=None): return init(\ *[ try_peek(store, attr, container, _stack) for attr in args ], \ **dict([ (attr, store.peek(attr, container, _stack=_stack)) \ for attr in container if attr not in args ])) else: def peek(store, container, _stack=None): return init(\ *[ store.peek(attr, container, _stack=_stack) for attr in args ], \ **dict([ (attr, store.peek(attr, container, _stack=_stack)) \ for attr in container if attr not in args ])) return peek
d06df21ab439da1cacb52befa6c619f1efa23d1a
3,655,658
def idc_asset_manage(request,aid=None,action=None): """ Manage IDC """ if request.user.has_perms(['asset.view_asset', 'asset.edit_asset']): page_name = '' if aid: idc_list = get_object_or_404(IdcAsset, pk=aid) if action == 'edit': page_name = '编辑IDC机房' if action == 'delete': idc_list.delete() return redirect('idc_asset_list') else: idc_list = IdcAsset() action = 'add' page_name = '新增IDC机房' if request.method == 'POST': form = IdcAssetForm(request.POST,instance=idc_list) if form.is_valid(): if action == 'add': form.save() return redirect('idc_asset_list') if action == 'edit': form.save() return redirect('idc_asset_list') else: form = IdcAssetForm(instance=idc_list) return render(request, 'asset_idc_manage.html', {"form":form, "page_name":page_name, "action":action}) else: raise Http404
7fbf1729c87e9e9921f19cf5cba2810879958848
3,655,659
import os import json import time def set_justspeaklasttime(speackdata): """ Adds a warning to user """ data_file_path = os.getcwd() + '/just_speack_data.json' if not os.path.exists(data_file_path): with open(data_file_path, 'w', encoding='UTF-8') as data_file: data_file.write(json.dumps({})) data_file.close() with open(data_file_path, 'r', encoding='UTF-8') as data_file: justspeackdata = json.loads(data_file.read()) data_file.close() justspeackdata[speackdata]['pause_last'] = time.time() with open(data_file_path, 'w', encoding='UTF-8') as data_file: data_file.write(json.dumps(users_data)) data_file.close() return True
2420ef414eff4bb9b2ec36d055152cd1ce3afdf3
3,655,660
def get_detected_column_types(df): """ Get data type of each columns ('DATETIME', 'NUMERIC' or 'STRING') Parameters: df (df): pandas dataframe Returns df (df): dataframe that all datatypes are converted (df) """ assert isinstance(df, pd.DataFrame), 'Parameter must be DataFrame' for c in df.columns: # Convert column to string col_data = df[c].map(str) col_data = col_data.replace("NaT", None) col_data = col_data.replace("NaN", None) # Check NULL column if(df[c].isnull().values.all()): continue # Check DATETIME try: # Check if it's able to convert column to datetime # if column is datetime, then skip to convert if 'datetime' in str(col_data.dtype): continue df[c] = pd.to_datetime(col_data) continue except ValueError: pass # Check NUMERIC try: # Drop NaN rows series = df[c].dropna() # if column_name is int or float, then skip to convert if 'int' in str(col_data.dtype) or 'float' in str(col_data.dtype): continue # Check if it can be converted to numeric df[c] = pd.to_numeric(series) except ValueError: pass return df
23647127d0e5a125e06fb1932e74ba5f9c885ded
3,655,661
def distance(coords): """Calculates the distance of a path between multiple points Arguments: coords -- List of coordinates, e.g. [(0,0), (1,1)] Returns: Total distance as a float """ distance = 0 for p1, p2 in zip(coords[:-1], coords[1:]): distance += ((p2[0] - p1[0]) ** 2 + (p2[1] - p1[1]) ** 2) ** 0.5 return distance
9c6088b740f42b839d4aa482c276fe4cc5dc8114
3,655,662
def roll_dice(dicenum, dicetype, modifier=None, conditional=None, return_tuple=False): """ This is a standard dice roller. Args: dicenum (int): Number of dice to roll (the result to be added). dicetype (int): Number of sides of the dice to be rolled. modifier (tuple): A tuple `(operator, value)`, where operator is one of `"+"`, `"-"`, `"/"` or `"*"`. The result of the dice roll(s) will be modified by this value. conditional (tuple): A tuple `(conditional, value)`, where conditional is one of `"=="`,`"<"`,`">"`,`">="`,`"<=`" or "`!=`". This allows the roller to directly return a result depending on if the conditional was passed or not. return_tuple (bool): Return a tuple with all individual roll results or not. Returns: roll_result (int): The result of the roll + modifiers. This is the default return. condition_result (bool): A True/False value returned if `conditional` is set but not `return_tuple`. This effectively hides the result of the roll. full_result (tuple): If, return_tuple` is `True`, instead return a tuple `(result, outcome, diff, rolls)`. Here, `result` is the normal result of the roll + modifiers. `outcome` and `diff` are the boolean result of the roll and absolute difference to the `conditional` input; they will be will be `None` if `conditional` is not set. `rolls` is itself a tuple holding all the individual rolls in the case of multiple die-rolls. Raises: TypeError if non-supported modifiers or conditionals are given. Notes: All input numbers are converted to integers. Examples: print roll_dice(2, 6) # 2d6 <<< 7 print roll_dice(1, 100, ('+', 5) # 1d100 + 5 <<< 34 print roll_dice(1, 20, conditional=('<', 10) # let'say we roll 3 <<< True print roll_dice(3, 10, return_tuple=True) <<< (11, None, None, (2, 5, 4)) print roll_dice(2, 20, ('-', 2), conditional=('>=', 10), return_tuple=True) <<< (8, False, 2, (4, 6)) # roll was 4 + 6 - 2 = 8 """ dicenum = int(dicenum) dicetype = int(dicetype) # roll all dice, remembering each roll rolls = tuple([randint(1, dicetype) for roll in range(dicenum)]) result = sum(rolls) if modifier: # make sure to check types well before eval mod, modvalue = modifier if mod not in ('+', '-', '*', '/'): raise TypeError("Non-supported dice modifier: %s" % mod) modvalue = int(modvalue) # for safety result = eval("%s %s %s" % (result, mod, modvalue)) outcome, diff = None, None if conditional: # make sure to check types well before eval cond, condvalue = conditional if cond not in ('>', '<', '>=', '<=', '!=', '=='): raise TypeError("Non-supported dice result conditional: %s" % conditional) condvalue = int(condvalue) # for safety outcome = eval("%s %s %s" % (result, cond, condvalue)) # True/False diff = abs(result - condvalue) if return_tuple: return result, outcome, diff, rolls else: if conditional: return outcome else: return result
acbc97e4b7720129788c8c5d5d9a1d51936d9dc1
3,655,663
import math def build_central_hierarchical_histogram_computation( lower_bound: float, upper_bound: float, num_bins: int, arity: int = 2, max_records_per_user: int = 1, epsilon: float = 1, delta: float = 1e-5, secure_sum: bool = False): """Create the tff federated computation for central hierarchical histogram aggregation. Args: lower_bound: A `float` specifying the lower bound of the data range. upper_bound: A `float` specifying the upper bound of the data range. num_bins: The integer number of bins to compute. arity: The branching factor of the tree. Defaults to 2. max_records_per_user: The maximum number of records each user is allowed to contribute. Defaults to 1. epsilon: Differential privacy parameter. Defaults to 1. delta: Differential privacy parameter. Defaults to 1e-5. secure_sum: A boolean deciding whether to use secure aggregation. Defaults to `False`. Returns: A tff.federated_computation function to perform central tree aggregation. """ if upper_bound < lower_bound: raise ValueError(f'upper_bound: {upper_bound} is smaller than ' f'lower_bound: {lower_bound}.') if num_bins <= 0: raise ValueError(f'num_bins: {num_bins} smaller or equal to zero.') if arity < 2: raise ValueError(f'Arity should be at least 2.' f'arity={arity} is given.') if max_records_per_user < 1: raise ValueError(f'Maximum records per user should be at least 1. ' f'max_records_per_user={max_records_per_user} is given.') if epsilon < 0 or delta < 0 or delta > 1: raise ValueError(f'Privacy parameters in wrong range: ' f'(epsilon, delta): ({epsilon}, {delta})') if epsilon == 0.: stddev = 0. else: stddev = max_records_per_user * _find_noise_multiplier( epsilon, delta, steps=math.ceil(math.log(num_bins, arity))) central_tree_aggregation_factory = hierarchical_histogram_factory.create_central_hierarchical_histogram_factory( stddev, arity, max_records_per_user, secure_sum=secure_sum) return _build_hierarchical_histogram_computation( lower_bound, upper_bound, num_bins, central_tree_aggregation_factory)
fcd35bc2df5f61174d00079638dda0c04c1490ff
3,655,664
def initialise_halo_params(): """Initialise the basic parameters needed to simulate a forming Dark matter halo. Args: None Returns: G: gravitational constant. epsilon: softening parameter. limit: width of the simulated universe. radius: simulated radius of each particle (for proper handling of boundary conditions). num_pos_particles: number of positive mass particles. num_neg_particles: number of negative mass particles. chunks_value: dask chunks value. time_steps: number of time steps to simulate. """ G = 1.0 epsilon = 0.07 limit = 80000 radius = 4 num_pos_particles = 5000 num_neg_particles = 45000 chunks_value = (num_pos_particles+num_neg_particles)/5.0 time_steps = 1000 return G, epsilon, limit, radius, num_pos_particles, num_neg_particles, chunks_value, time_steps
ee3311fd17a40e8658f11d2ddf98d0ff8eb27a6d
3,655,665
def read_data(image_paths, label_list, image_size, batch_size, max_nrof_epochs, num_threads, shuffle, random_flip, random_brightness, random_contrast): """ Creates Tensorflow Queue to batch load images. Applies transformations to images as they are loaded. :param random_brightness: :param random_flip: :param image_paths: image paths to load :param label_list: class labels for image paths :param image_size: size to resize images to :param batch_size: num of images to load in batch :param max_nrof_epochs: total number of epochs to read through image list :param num_threads: num threads to use :param shuffle: Shuffle images :param random_flip: Random Flip image :param random_brightness: Apply random brightness transform to image :param random_contrast: Apply random contrast transform to image :return: images and labels of batch_size """ images = ops.convert_to_tensor(image_paths, dtype=tf.string) labels = ops.convert_to_tensor(label_list, dtype=tf.int32) # Makes an input queue input_queue = tf.train.slice_input_producer((images, labels), num_epochs=max_nrof_epochs, shuffle=shuffle, ) images_labels = [] imgs = [] lbls = [] for _ in range(num_threads): image, label = read_image_from_disk(filename_to_label_tuple=input_queue) image = tf.random_crop(image, size=[image_size, image_size, 3]) image.set_shape((image_size, image_size, 3)) image = tf.image.per_image_standardization(image) if random_flip: image = tf.image.random_flip_left_right(image) if random_brightness: image = tf.image.random_brightness(image, max_delta=0.3) if random_contrast: image = tf.image.random_contrast(image, lower=0.2, upper=1.8) imgs.append(image) lbls.append(label) images_labels.append([image, label]) image_batch, label_batch = tf.train.batch_join(images_labels, batch_size=batch_size, capacity=4 * num_threads, enqueue_many=False, allow_smaller_final_batch=True) return image_batch, label_batch
2bbb7f1be38764634e198f83b82fafb730ec3afa
3,655,666
def reorder_matrix (m, d) : """ Reorder similarity matrix : put species in same cluster together. INPUT: m - similarity matrix d - medoid dictionary : {medoid : [list of species index in cluster]} OUTPUT : m in new order new_order - order of species indexes in matrix """ new_order = [] for i, med_class in enumerate(d.values()): new_order.append(med_class) return m[np.concatenate(new_order), :], new_order
5d203ec6f61afe869008fa6749d18946f128ac87
3,655,667
import subprocess def extract_sound(video_filename): """Given the name of a video, extract the sound to a .wav file, and return the filename of the new file.""" # Generate a filename for the temporary audio file with NamedTemporaryFile(suffix='.wav') as tf: wave_filename = tf.name # Extract the sound from the video using ffmpeg subprocess.run(['ffmpeg', '-i', video_filename, '-vn', wave_filename], check=True, stderr=subprocess.DEVNULL, stdout=subprocess.DEVNULL) return wave_filename
25551239d084ee7240a341394bb20b44a150c907
3,655,668
def reward_penalized_log_p(mol): """ Reward that consists of log p penalized by SA and # long cycles, as described in (Kusner et al. 2017). Scores are normalized based on the statistics of 250k_rndm_zinc_drugs_clean.smi dataset :param mol: rdkit mol object :return: float """ # normalization constants, statistics from 250k_rndm_zinc_drugs_clean.smi logP_mean = 2.4570953396190123 logP_std = 1.434324401111988 SA_mean = -3.0525811293166134 SA_std = 0.8335207024513095 cycle_mean = -0.0485696876403053 cycle_std = 0.2860212110245455 log_p = MolLogP(mol) SA = -calculateScore(mol) # cycle score cycle_list = nx.cycle_basis(nx.Graph( Chem.rdmolops.GetAdjacencyMatrix(mol))) if len(cycle_list) == 0: cycle_length = 0 else: cycle_length = max([len(j) for j in cycle_list]) if cycle_length <= 6: cycle_length = 0 else: cycle_length = cycle_length - 6 cycle_score = -cycle_length normalized_log_p = (log_p - logP_mean) / logP_std normalized_SA = (SA - SA_mean) / SA_std normalized_cycle = (cycle_score - cycle_mean) / cycle_std return normalized_log_p + normalized_SA + normalized_cycle
e3e5ebfabf31e4980dc6f3b6c998a08444ce9851
3,655,669
def loadmat(filename, variable_names=None): """ load mat file from h5py files :param filename: mat filename :param variable_names: list of variable names that should be loaded :return: dictionary of loaded data """ data = {} matfile = h5py.File(filename, 'r') if variable_names is None: for key in matfile.keys(): data.update({key: matfile[key][()]}) else: for key in variable_names: if not key in matfile.keys(): raise RuntimeError('Variable: "' + key + '" is not in file: ' + filename) data.update({key: matfile[key][()]}) return data
3b9183968fba56d57c705bce0ec440c630cc0031
3,655,670
def date_start_search(line): """予定開始の日付を検出し,strで返す.""" # 全角スペース zen_space = ' ' # 全角0 zen_zero = '0' nichi = '日' dollar = '$' # 全角スペースを0に置き換えることで無理やり対応 line = line.replace(zen_space, zen_zero) index = line.find(nichi) # 日と曜日の位置関係から誤表記を訂正 index_first_dollar = line.find(dollar, index + 1) if index + 1 != index_first_dollar: index = index_first_dollar # ex. 1 → 01 #if line[index - 1] == zen_space: # line[index - 1] = zen_zero return zenhan.z2h(line[index - 2:index])
f89e332a2a0031acdf6fa443ea9752e528674b32
3,655,671
def train_sub1(sess, x, y, bbox_preds, x_sub, y_sub, nb_classes, nb_epochs_s, batch_size, learning_rate, data_aug, lmbda, aug_batch_size, rng, img_rows=48, img_cols=48, nchannels=3): """ This function creates the substitute by alternatively augmenting the training data and training the substitute. :param sess: TF session :param x: input TF placeholder :param y: output TF placeholder :param bbox_preds: output of black-box model predictions :param x_sub: initial substitute training data :param y_sub: initial substitute training labels :param nb_classes: number of output classes :param nb_epochs_s: number of epochs to train substitute model :param batch_size: size of training batches :param learning_rate: learning rate for training :param data_aug: number of times substitute training data is augmented :param lmbda: lambda from arxiv.org/abs/1602.02697 :param rng: numpy.random.RandomState instance :return: """ # Define TF model graph (for the black-box model) model_sub = ModelSubstitute('model_s', nb_classes) preds_sub = model_sub.get_logits(x) loss_sub = CrossEntropy(model_sub, smoothing=0) print("Defined TensorFlow model graph for the substitute.") # Define the Jacobian symbolically using TensorFlow grads = jacobian_graph(preds_sub, x, nb_classes) # Train the substitute and augment dataset alternatively for rho in xrange(data_aug): print("Substitute training epoch #" + str(rho)) train_params = { 'nb_epochs': nb_epochs_s, 'batch_size': batch_size, 'learning_rate': learning_rate } #with TemporaryLogLevel(logging.WARNING, "cleverhans.utils.tf"): train(sess, loss_sub, x, y, x_sub, to_categorical(y_sub, nb_classes), init_all=False, args=train_params, rng=rng) #var_list=model_sub.get_params()) # If we are not at last substitute training iteration, augment dataset if rho < data_aug - 1: print("Augmenting substitute training data.") # Perform the Jacobian augmentation lmbda_coef = 2 * int(int(rho / 3) != 0) - 1 # print(x.shape) # print(x_sub.shape) # print(y_sub.shape) #print(grads.shape) x_sub = jacobian_augmentation(sess, x, x_sub, y_sub, grads, lmbda_coef * lmbda, aug_batch_size) print("Labeling substitute training data.") # Label the newly generated synthetic points using the black-box y_sub = np.hstack([y_sub, y_sub]) x_sub_prev = x_sub[int(len(x_sub)/2):] eval_params = {'batch_size': batch_size} #tmp = batch_eval(sess, [x], [bbox_preds], [x_sub_prev],args=eval_params) tmp = batch_eval(sess, [x], [bbox_preds], [x_sub_prev],batch_size=batch_size) print(tmp) bbox_val = tmp[0] # Note here that we take the argmax because the adversary # only has access to the label (not the probabilities) output # by the black-box model y_sub[int(len(x_sub)/2):] = np.argmax(bbox_val, axis=1) return model_sub, preds_sub
a5433f78c60f6beec14a6d4fd414d45dc8c65999
3,655,672
def divideArray(array, factor): """Dzielimy tablice na #factor tablic, kazda podtablica ma tyle samo elem oprocz ostatniej""" factor = min(factor, len(array)) length = floor(len(array) * 1.0 / factor) res = [] for i in range(factor - 1): res = res + list([array[i * length:(i + 1) * length]]) return list(res + list([array[length * (factor - 1):]]))
d94441e6036e78f9b541b9d170d03681740c81d3
3,655,673
def argMax(scores): """ Returns the key with the highest value. """ if len(scores) == 0: return None all = scores.items() values = [x[1] for x in all] maxIndex = values.index(max(values)) return all[maxIndex][0]
9310988a0f8aa1279882d060ade7febdc102b0c5
3,655,674
def rotateright(arr,k)->list: """ Rotate the array right side k number of times. """ temp=a[0] poi=0 for i in range(len(arr)): for j in range(0,k): poi+=1 if(poi==len(arr)): poi=0 temp1=arr[poi] arr[poi]=temp temp=temp1 return arr
7d303f5b57cb10a1a28f5c78ffa848d2a9cb593f
3,655,675
import os def get_video_chunk_path(instance, filename): """ Get path to store video chunk the path will be of format : project_id/chunks/chunk_no.mp3 """ if (not instance.project_id) and (not instance.chunk_no): raise ValidationError('Invalid Project ID') return os.path.join(instance.project_id + '/chunks/' + instance.chunk_no + '.mp4')
c917529699a2bafdf9124eca81b23c5971a97a50
3,655,676
def get_ratio(numerator, denominator): """Get ratio from numerator and denominator.""" return ( 0 if not denominator else round(float(numerator or 0) / float(denominator), 2) )
e51a860292d54d2e44909ad878d0b1d8e66c37c2
3,655,677
import io def create_app(): """ Create a Flask application for face alignment Returns: flask.Flask -> Flask application """ app = Flask(__name__) model = setup_model() app.config.from_mapping(MODEL=model) @app.route("/", methods=["GET"]) def howto(): instruction = ( "Send POST request to /align to fix face orientation in input image" "\nex." "\n\tcurl -X POST -F 'image=@/path/to/face.jpg' --output output.jpg localhost:5000/align" ) return instruction @app.route("/align", methods=["POST"]) def align(): data = request.files["image"] img_str = data.read() nparr = np.fromstring(img_str, np.uint8) img = cv2.imdecode(nparr, cv2.IMREAD_ANYCOLOR) faces = model.detect(img) if len(faces) == 0: return "No face found. Try again", 400 elif len(faces) > 1: return "Too many faces found. Try again", 400 else: face = faces[0] rotated_image = rotate_bound(img, face.angle) # Encode image is_completed, buf = cv2.imencode(".jpg", rotated_image) if not is_completed: return "Unexpected encoding error. Try again", 400 byte_buffer = io.BytesIO(buf.tostring()) return send_file( byte_buffer, "image/jpeg", as_attachment=True, attachment_filename="output.jpg", ) return app
d9a5d59f64dc9227949bbe73065d18bcc8142b9d
3,655,678
def grad_clip(x:Tensor) -> Tensor: """ Clips too big and too small gradients. Example:: grad = grad_clip(grad) Args: x(:obj:`Tensor`): Gradient with too large or small values Returns: :obj:`Tensor`: Cliped Gradient """ x[x>5] = 5 x[x<-5] = -5 return x
5c07c4432fda16d06bda8569aca34cbbaf45b076
3,655,679
def unfold_kernel(kernel): """ In pytorch format, kernel is stored as [out_channel, in_channel, height, width] Unfold kernel into a 2-dimension weights: [height * width * in_channel, out_channel] :param kernel: numpy ndarray :return: """ k_shape = kernel.shape weight = np.zeros([k_shape[1] * k_shape[2] * k_shape[3], k_shape[0]]) for i in range(k_shape[0]): weight[:, i] = np.reshape(kernel[i, :, :, :], [-1]) return weight
7106ead9b4953024731d918fb3c356b056bca156
3,655,680
def _parse_polyline_locations(locations, max_n_locations): """Parse and validate locations in Google polyline format. The "locations" argument of the query should be a string of ascii characters above 63. Args: locations: The location query string. max_n_locations: The max allowable number of locations, to keep query times reasonable. Returns: lats: List of latitude floats. lons: List of longitude floats. Raises: ClientError: If too many locations are given, or if the location string can't be parsed. """ # The Google maps API prefixes their polylines with 'enc:'. if locations and locations.startswith("enc:"): locations = locations[4:] try: latlons = polyline.decode(locations) except Exception as e: msg = "Unable to parse locations as polyline." raise ClientError(msg) # Polyline result in in list of (lat, lon) tuples. lats = [p[0] for p in latlons] lons = [p[1] for p in latlons] # Check number. n_locations = len(lats) if n_locations > max_n_locations: msg = f"Too many locations provided ({n_locations}), the limit is {max_n_locations}." raise ClientError(msg) return lats, lons
3ebff7a35c86bad5986ee87c194dd9128936abb0
3,655,681
def dense(data, weight, bias=None, out_dtype=None): """The default implementation of dense in topi. Parameters ---------- data : tvm.Tensor 2-D with shape [batch, in_dim] weight : tvm.Tensor 2-D with shape [out_dim, in_dim] bias : tvm.Tensor, optional 1-D with shape [out_dim] out_dtype : str The output type. This is used for mixed precision. Returns ------- output : tvm.Tensor 2-D with shape [batch, out_dim] """ assert len(data.shape) == 2 and len(weight.shape) == 2, \ "only support 2-dim dense" if bias is not None: assert len(bias.shape) == 1 if out_dtype is None: out_dtype = data.dtype batch, in_dim = data.shape out_dim, _ = weight.shape k = tvm.reduce_axis((0, in_dim), name='k') matmul = tvm.compute((batch, out_dim), \ lambda i, j: tvm.sum(data[i, k].astype(out_dtype) * \ weight[j, k].astype(out_dtype), axis=k), \ name='T_dense', tag='dense') if bias is not None: matmul = tvm.compute((batch, out_dim), \ lambda i, j: matmul[i, j] + bias[j].astype(out_dtype), \ tag=tag.BROADCAST) return matmul
ac5550f901d1a7c94fee4b8e65fa9957d4b2ff78
3,655,682
from typing import Union from re import T from typing import Any import inspect from enum import Enum from typing import Type from typing import OrderedDict import warnings def choice(*choices: T, default: Union[T, _MISSING_TYPE] = MISSING, **kwargs: Any) -> T: """Makes a field which can be chosen from the set of choices from the command-line. Returns a regular `dataclasses.field()`, but with metadata which indicates the allowed values. (New:) If `choices` is a dictionary, then passing the 'key' will result in the corresponding value being used. The values may be objects, for example. Similarly for Enum types, passing a type of enum will Args: default (T, optional): The default value of the field. Defaults to dataclasses.MISSING, in which case the command-line argument is required. Raises: ValueError: If the default value isn't part of the given choices. Returns: T: the result of the usual `dataclasses.field()` function (a dataclass field/attribute). """ assert len(choices) > 0, "Choice requires at least one positional argument!" if len(choices) == 1: choices = choices[0] if inspect.isclass(choices) and issubclass(choices, Enum): # If given an enum, construct a mapping from names to values. choice_enum: Type[Enum] = choices choices = OrderedDict((e.name, e) for e in choice_enum) if default is not MISSING and not isinstance(default, choice_enum): if default in choices: warnings.warn( UserWarning( f"Setting default={default} could perhaps be ambiguous " f"(enum names vs enum values). Consider using the enum " f"value {choices[default]} instead." ) ) default = choices[default] else: raise ValueError( f"'default' arg should be of type {choice_enum}, but got {default}" ) if isinstance(choices, dict): # if the choices is a dict, the options are the keys # save the info about the choice_dict in the field metadata. metadata = kwargs.setdefault("metadata", {}) choice_dict = choices # save the choice_dict in metadata so that we can recover the values in postprocessing. metadata["choice_dict"] = choice_dict choices = list(choice_dict.keys()) # TODO: If the choice dict is given, then add encoding/decoding functions that just # get/set the right key. def _encoding_fn(value: Any) -> str: """Custom encoding function that will simply represent the value as the the key in the dict rather than the value itself. """ if value in choice_dict.keys(): return value elif value in choice_dict.values(): return [k for k, v in choice_dict.items() if v == value][0] return value kwargs.setdefault("encoding_fn", _encoding_fn) def _decoding_fn(value: Any) -> str: """Custom decoding function that will retrieve the value from the stored key in the dictionary. """ return choice_dict.get(value, value) kwargs.setdefault("decoding_fn", _decoding_fn) return field(default=default, choices=choices, **kwargs)
1316b6541d4c9dd0b03ddbbdbb41eee906c12aa1
3,655,683
def modulelink(module, baseurl=''): """Hyperlink to a module, either locally or on python.org""" if module+'.py' not in local_files: baseurl = 'http://www.python.org/doc/current/lib/module-' return link(baseurl+module+'.html', module)
b907d013b25570d062d49314bbbab637aeb4ffec
3,655,684
from typing import Optional from typing import Callable import inspect def add_reference( *, short_purpose: str, reference: Optional[str] = None, doi: Optional[str] = None ) -> Callable: """Decorator to link a reference to a function or method. Acts as a marker in code where particular alogrithms/data/... originates. General execution of code silently passes these markers, but remembers how and where they were called. Which markers were passed in a particular program run can be recalled with `print(BIBLIOGRAPHY)`. One and only one method for providing the reference is allowed. Args: short_purpose (str): Identify the thing being referenced. reference (Optional, str): The reference itself, as a plain text string. doi (Optional, str): DOI of the reference. Returns: The decorated function. """ if reference and doi: raise ValueError("Only one method for providing the reference is allowed.") elif reference: ref = reference elif doi: ref = doi if "doi.org" in doi else f"https://doi.org/{doi}" else: raise ValueError("No reference information provided!") @wrapt.decorator(enabled=lambda: BIBLIOGRAPHY.track_references) def wrapper(wrapped, instance, args, kwargs): source = inspect.getsourcefile(wrapped) line = inspect.getsourcelines(wrapped)[1] identifier = f"{source}:{line}" if identifier in BIBLIOGRAPHY and ref in BIBLIOGRAPHY[identifier].references: return wrapped(*args, **kwargs) if identifier not in BIBLIOGRAPHY: BIBLIOGRAPHY[identifier] = FunctionReference( wrapped.__name__, line, source, [], [] ) BIBLIOGRAPHY[identifier].short_purpose.append(short_purpose) BIBLIOGRAPHY[identifier].references.append(ref) return wrapped(*args, **kwargs) return wrapper
8e1a4c6425213779edabdb0879eacbb44d4e479a
3,655,685
import logging import subprocess def get_capital_ptd_act(): """Get chart of accounts from shared drive.""" logging.info('Retrieving latest CIP project to date') command = "smbclient //ad.sannet.gov/dfs " \ + "--user={adname}%{adpass} -W ad -c " \ + "'prompt OFF;" \ + " cd \"FMGT-Shared/Shared/BUDGET/" \ + "Open Data/Open Data Portal/" \ + "Shared with Performance and Analytics/" \ + "Actuals/Capital/P-T-D/\";" \ + " lcd \"/data/temp/\";" \ + " mget FY*ACTUALS.xlsx;'" command = command.format(adname=conf['alb_sannet_user'], adpass=conf['alb_sannet_pass'], temp_dir=conf['temp_data_dir']) logging.info(command) try: p = subprocess.check_output(command, shell=True, stderr=subprocess.STDOUT) return p except subprocess.CalledProcessError as e: return e.output
6d6c56da0216a063fd2b7b1f3f30c2a3c390f713
3,655,686
def eval_curvature(poly, x_vals): """ This function returns a vector with the curvature based on path defined by `poly` evaluated on distance vector `x_vals` """ # https://en.wikipedia.org/wiki/Curvature# Local_expressions def curvature(x): a = abs(2 * poly[1] + 6 * poly[0] * x) / (1 + (3 * poly[0] * x**2 + 2 * poly[1] * x + poly[2])**2)**(1.5) return a return np.vectorize(curvature)(x_vals)
0e0e04b7c49b0cdfaa0658df23816d61ac19141c
3,655,687
import subprocess def calculate_folder_size(path, _type="mb") -> float: """Return the size of the given path in MB, bytes if wanted""" p1 = subprocess.Popen(["du", "-sb", path], stdout=subprocess.PIPE) p2 = subprocess.Popen(["awk", "{print $1}"], stdin=p1.stdout, stdout=subprocess.PIPE) p1.stdout.close() # type: ignore byte_size = 0.0 byte_size = float(p2.communicate()[0].decode("utf-8").strip()) if _type == "bytes": return byte_size else: return byte_to_mb(byte_size)
cad7441e4f5a5f4c95eaecd6b2a5df77989b3737
3,655,688
def templates(): """Return all of the templates and settings.""" return settings
6cf1c151f2e0798e1b26002c29db898bcd3c42cf
3,655,689
def get_semitones(interval_tuplet): """ Takes an interval tuplet of the form returned by get_interval() Returns an int representing the semitones within the interval. """ return mintervals.semitones_from_shorthand(interval_tuplet[0]) + 12*interval_tuplet[1]
179f3894da3607b4fd4aa7915ec5e9c38fcdc592
3,655,690
import numpy def svds(a, k=6, *, ncv=None, tol=0, which='LM', maxiter=None, return_singular_vectors=True): """Finds the largest ``k`` singular values/vectors for a sparse matrix. Args: a (cupy.ndarray or cupyx.scipy.sparse.csr_matrix): A real or complex array with dimension ``(m, n)`` k (int): The number of singular values/vectors to compute. Must be ``1 <= k < min(m, n)``. ncv (int): The number of Lanczos vectors generated. Must be ``k + 1 < ncv < min(m, n)``. If ``None``, default value is used. tol (float): Tolerance for singular values. If ``0``, machine precision is used. which (str): Only 'LM' is supported. 'LM': finds ``k`` largest singular values. maxiter (int): Maximum number of Lanczos update iterations. If ``None``, default value is used. return_singular_vectors (bool): If ``True``, returns singular vectors in addition to singular values. Returns: tuple: If ``return_singular_vectors`` is ``True``, it returns ``u``, ``s`` and ``vt`` where ``u`` is left singular vectors, ``s`` is singular values and ``vt`` is right singular vectors. Otherwise, it returns only ``s``. .. seealso:: :func:`scipy.sparse.linalg.svds` .. note:: This is a naive implementation using cupyx.scipy.sparse.linalg.eigsh as an eigensolver on ``a.H @ a`` or ``a @ a.H``. """ if a.ndim != 2: raise ValueError('expected 2D (shape: {})'.format(a.shape)) if a.dtype.char not in 'fdFD': raise TypeError('unsupprted dtype (actual: {})'.format(a.dtype)) m, n = a.shape if k <= 0: raise ValueError('k must be greater than 0 (actual: {})'.format(k)) if k >= min(m, n): raise ValueError('k must be smaller than min(m, n) (actual: {})' ''.format(k)) aH = a.conj().T if m >= n: aa = aH @ a else: aa = a @ aH if return_singular_vectors: w, x = eigsh(aa, k=k, which=which, ncv=ncv, maxiter=maxiter, tol=tol, return_eigenvectors=True) else: w = eigsh(aa, k=k, which=which, ncv=ncv, maxiter=maxiter, tol=tol, return_eigenvectors=False) w = cupy.maximum(w, 0) t = w.dtype.char.lower() factor = {'f': 1e3, 'd': 1e6} cond = factor[t] * numpy.finfo(t).eps cutoff = cond * cupy.max(w) above_cutoff = (w > cutoff) n_large = above_cutoff.sum() s = cupy.zeros_like(w) s[:n_large] = cupy.sqrt(w[above_cutoff]) if not return_singular_vectors: return s x = x[:, above_cutoff] if m >= n: v = x u = a @ v / s[:n_large] else: u = x v = aH @ u / s[:n_large] u = _augmented_orthnormal_cols(u, k - n_large) v = _augmented_orthnormal_cols(v, k - n_large) return u, s, v.conj().T
9a96fc2fbca100a53ba81f609a58fc0934b5c524
3,655,691
def register_mongodb(app: Flask) -> Flask: """Instantiates database and initializes collections.""" config = app.config # Instantiate PyMongo client mongo = create_mongo_client(app=app, config=config) # Add database db = mongo.db[get_conf(config, "database", "name")] # Add database collection for '/service-info' collection_service_info = mongo.db["service-info"] # Add database collection for '/data_objects' collection_data_objects = mongo.db["data_objects"] collection_data_objects.create_index([("id", ASCENDING)], unique=True, sparse=True) # Add database to app config config["database"]["drs_db"] = collection_data_objects config["database"]["service_info"] = collection_service_info app.config = config return app
6b5bd3f5694470b3ba7dbbf94bacb9beb8ee55cd
3,655,692
def look(table, limit=0, vrepr=None, index_header=None, style=None, truncate=None, width=None): """ Format a portion of the table as text for inspection in an interactive session. E.g.:: >>> import petl as etl >>> table1 = [['foo', 'bar'], ... ['a', 1], ... ['b', 2]] >>> etl.look(table1) +-----+-----+ | foo | bar | +=====+=====+ | 'a' | 1 | +-----+-----+ | 'b' | 2 | +-----+-----+ >>> # alternative formatting styles ... etl.look(table1, style='simple') === === foo bar === === 'a' 1 'b' 2 === === >>> etl.look(table1, style='minimal') foo bar 'a' 1 'b' 2 >>> # any irregularities in the length of header and/or data ... # rows will appear as blank cells ... table2 = [['foo', 'bar'], ... ['a'], ... ['b', 2, True]] >>> etl.look(table2) +-----+-----+------+ | foo | bar | | +=====+=====+======+ | 'a' | | | +-----+-----+------+ | 'b' | 2 | True | +-----+-----+------+ Three alternative presentation styles are available: 'grid', 'simple' and 'minimal', where 'grid' is the default. A different style can be specified using the `style` keyword argument. The default style can also be changed by setting ``petl.config.look_style``. """ # determine defaults if limit == 0: limit = config.look_limit if vrepr is None: vrepr = config.look_vrepr if index_header is None: index_header = config.look_index_header if style is None: style = config.look_style if width is None: width = config.look_width return Look(table, limit=limit, vrepr=vrepr, index_header=index_header, style=style, truncate=truncate, width=width)
356d6fb1f0afe0f8812e460b8ee3b13f7c4ded4b
3,655,693
def refresh_devices(config, cache_path): """Refresh devices from configuration received""" global DEBUG, m_devices, Device_Cache if DEBUG: print("DEBUG: Refreshing device database") print_progress("Refresh devices") try: m_devices = config['devices'] except: print("ERROR: No device found in config!!") return None else: try: filep = open(cache_path, 'w') except: print("ERROR: Cannot write to device cache %s" % cache_path) else: filep.write(ujson.dumps(m_devices)) filep.close() if DEBUG: print("DEBUG: Written device DB to cache") return m_devices
650b174689777dc5da6f10b2d8b0715432541f9c
3,655,694
def warn_vars_naming_style(messages, line, style): """ Check whether varibales and function argumens fit the naming rule.""" naming_style_name = style.Get('CHECK_VAR_NAMING_STYLE') if not naming_style_name: return def is_expr(uwl): return (uwl.tokens and _find_parent(uwl.first.node, None, [syms.expr_stmt])) def is_assignment(uwl): return (is_expr(uwl) and next(filter(lambda t: t.is_name, uwl.tokens), None)) def get_lhs_tokens(uwl): root = _find_parent(uwl.first.node, None, [syms.expr_stmt]) lvalues = _FindLValues(root).lvalues for tok in uwl.tokens: if tok.name == 'EQUAL': break if tok.is_name and id(tok.node) in lvalues: chain = lvalues[id(tok.node)] if (len(chain) == 1 or (len(chain) == 2 and chain[0] == 'self')): yield tok def iter_token_range(first, last): while True: yield first if first is last: break first = first.next_token def iter_parameters(paramlist): for item in paramlist: tokens = iter_token_range(item.first_token, item.last_token) tokens = filter(lambda t: t.name in {'NAME', 'STAR'}, tokens) first = next(tokens, None) if first is None: # This is possible when a comment is added to a function # argument (in some cases, when there is a trailing comma): # # def fn(arg1, # arg2, #comment # arg3, # ): # pass # assert item.first_token.name == 'COMMENT' continue if first.name == 'STAR': yield next(tokens, first) yield first def get_func_args(uwl): for tok in uwl.tokens: if not tok.parameters: continue yield from iter_parameters(tok.parameters) if is_assignment(line): tokens = get_lhs_tokens(line) elif line.tokens and line.is_func_definition: tokens = get_func_args(line) else: return naming_style = REGEXPS['varname'][naming_style_name] for tok in tokens: # explicitly allow UPPER CASE names, because constants sould be # named this way regargless the naming style if not (tok.value == 'self' or tok.value.isupper() or naming_style.match(tok.value)): messages.add(tok, line.AsCode(), Warnings.VAR_NAMING_STYLE, variable=tok.value)
4b8d4cf72395d66ea80f5fbd364cdd47973bb332
3,655,695
import json def validate_schema(path, data, schema): """ Warns and returns the number of errors relating to JSON Schema validation. Uses the `jsonschema <https://python-jsonschema.readthedocs.io/>`__ module. :param object schema: the metaschema against which to validate :returns: the number of errors :rtype: int """ errors = 0 for error in validator(schema, format_checker=FormatChecker()).iter_errors(data): errors += 1 warn(f"{json.dumps(error.instance, indent=2)}\n{error.message} ({'/'.join(error.absolute_schema_path)})\n", SchemaWarning) return errors
abd6a2a05021586da41fd597eb4137d706c08b41
3,655,696
import os def class_dict(base_module, node): """class_dict(base_module, node) -> dict Returns the class dictionary for the module represented by node and with base class base_module""" class_dict_ = {} def update_dict(name, callable_): if class_dict_.has_key(name): class_dict_[name] = callable_(class_dict_[name]) elif hasattr(base_module, name): class_dict_[name] = callable_(getattr(base_module, name)) else: class_dict_[name] = callable_(None) def guarded_SimpleScalarTree_wrap_compute(old_compute): # This builds the scalar tree and makes it cacheable def compute(self): self.is_cacheable = lambda *args, **kwargs: True old_compute(self) self.vtkInstance.BuildTree() return compute def guarded_SetFileName_wrap_compute(old_compute): # This checks for the presence of file in VTK readers def compute(self): # Skips the check if it's a vtkImageReader or vtkPLOT3DReader, because # it has other ways of specifying files, like SetFilePrefix for # multiple files skip = [vtk.vtkBYUReader, vtk.vtkImageReader, vtk.vtkDICOMImageReader, vtk.vtkTIFFReader] # vtkPLOT3DReader does not exist from version 6.0.0 v = vtk.vtkVersion() version = [v.GetVTKMajorVersion(), v.GetVTKMinorVersion(), v.GetVTKBuildVersion()] if version < [6, 0, 0]: skip.append(vtk.vtkPLOT3DReader) if any(issubclass(self.vtkClass, x) for x in skip): old_compute(self) return if self.has_input('SetFileName'): name = self.get_input('SetFileName') elif self.has_input('SetFile'): name = self.get_input('SetFile').name else: raise ModuleError(self, 'Missing filename') if not os.path.isfile(name): raise ModuleError(self, 'File does not exist') old_compute(self) return compute def compute_SetDiffuseColorWidget(old_compute): if old_compute != None: return old_compute def call_SetDiffuseColorWidget(self, color): self.vtkInstance.SetDiffuseColor(color.tuple) return call_SetDiffuseColorWidget def compute_SetAmbientColorWidget(old_compute): if old_compute != None: return old_compute def call_SetAmbientColorWidget(self, color): self.vtkInstance.SetAmbientColor(color.tuple) return call_SetAmbientColorWidget def compute_SetSpecularColorWidget(old_compute): if old_compute != None: return old_compute def call_SetSpecularColorWidget(self, color): self.vtkInstance.SetSpecularColor(color.tuple) return call_SetSpecularColorWidget def compute_SetColorWidget(old_compute): if old_compute != None: return old_compute def call_SetColorWidget(self, color): self.vtkInstance.SetColor(color.tuple) return call_SetColorWidget def compute_SetEdgeColorWidget(old_compute): if old_compute != None: return old_compute def call_SetEdgeColorWidget(self, color): self.vtkInstance.SetEdgeColor(color.tuple) return call_SetEdgeColorWidget def compute_SetBackgroundWidget(old_compute): if old_compute != None: return old_compute def call_SetBackgroundWidget(self, color): self.vtkInstance.SetBackground(color.tuple) return call_SetBackgroundWidget def compute_SetBackground2Widget(old_compute): if old_compute != None: return old_compute def call_SetBackground2Widget(self, color): self.vtkInstance.SetBackground2(color.tuple) return call_SetBackground2Widget def compute_SetVTKCell(old_compute): if old_compute != None: return old_compute def call_SetRenderWindow(self, cellObj): if cellObj.cellWidget: self.vtkInstance.SetRenderWindow(cellObj.cellWidget.mRenWin) return call_SetRenderWindow def compute_SetTransferFunction(old_compute): # This sets the transfer function if old_compute != None: return old_compute def call_SetTransferFunction(self, tf): tf.set_on_vtk_volume_property(self.vtkInstance) return call_SetTransferFunction def compute_SetPointData(old_compute): if old_compute != None: return old_compute def call_SetPointData(self, pd): self.vtkInstance.GetPointData().ShallowCopy(pd) return call_SetPointData def compute_SetCellData(old_compute): if old_compute != None: return old_compute def call_SetCellData(self, cd): self.vtkInstance.GetCellData().ShallowCopy(cd) return call_SetCellData def compute_SetPointIds(old_compute): if old_compute != None: return old_compute def call_SetPointIds(self, point_ids): self.vtkInstance.GetPointIds().SetNumberOfIds(point_ids.GetNumberOfIds()) for i in xrange(point_ids.GetNumberOfIds()): self.vtkInstance.GetPointIds().SetId(i, point_ids.GetId(i)) return call_SetPointIds def compute_CopyImportString(old_compute): if old_compute != None: return old_compute def call_CopyImportVoidPointer(self, pointer): self.vtkInstance.CopyImportVoidPointer(pointer, len(pointer)) return call_CopyImportVoidPointer def guarded_Writer_wrap_compute(old_compute): # The behavior for vtkWriter subclasses is to call Write() # If the user sets a name, we will create a file with that name # If not, we will create a temporary file from the file pool def compute(self): old_compute(self) fn = self.vtkInstance.GetFileName() if not fn: o = self.interpreter.filePool.create_file(suffix='.vtk') self.vtkInstance.SetFileName(o.name) else: o = PathObject(fn) self.vtkInstance.Write() self.set_output('file', o) return compute for var in dir(node.klass): # Everyone that has a Set.*FileName should have a Set.*File port too if set_file_name_pattern.match(var): def get_compute_SetFile(method_name): def compute_SetFile(old_compute): if old_compute != None: return old_compute def call_SetFile(self, file_obj): getattr(self.vtkInstance, method_name)(file_obj.name) return call_SetFile return compute_SetFile update_dict('_special_input_function_' + var[:-4], get_compute_SetFile(var)) if hasattr(node.klass, 'SetFileName'): # ... BUT we only want to check existence of filenames on # readers. VTK is nice enough to be consistent with names, but # this is brittle.. if node.klass.__name__.endswith('Reader'): if not node.klass.__name__.endswith('TiffReader'): update_dict('compute', guarded_SetFileName_wrap_compute) if hasattr(node.klass, 'SetRenderWindow'): update_dict('_special_input_function_SetVTKCell', compute_SetVTKCell) #color gui wrapping if hasattr(node.klass, 'SetDiffuseColor'): update_dict('_special_input_function_SetDiffuseColorWidget', compute_SetDiffuseColorWidget) if hasattr(node.klass, 'SetAmbientColor'): update_dict('_special_input_function_SetAmbientColorWidget', compute_SetAmbientColorWidget) if hasattr(node.klass, 'SetSpecularColor'): update_dict('_special_input_function_SetSpecularColorWidget', compute_SetSpecularColorWidget) if hasattr(node.klass, 'SetEdgeColor'): update_dict('_special_input_function_SetEdgeColorWidget', compute_SetEdgeColorWidget) if hasattr(node.klass, 'SetColor'): update_dict('_special_input_function_SetColorWidget', compute_SetColorWidget) if (issubclass(node.klass, vtk.vtkRenderer) and hasattr(node.klass, 'SetBackground')): update_dict('_special_input_function_SetBackgroundWidget', compute_SetBackgroundWidget) if (issubclass(node.klass, vtk.vtkRenderer) and hasattr(node.klass, 'SetBackground2')): update_dict('_special_input_function_SetBackground2Widget', compute_SetBackground2Widget) if issubclass(node.klass, vtk.vtkWriter): update_dict('compute', guarded_Writer_wrap_compute) if issubclass(node.klass, vtk.vtkScalarTree): update_dict('compute', guarded_SimpleScalarTree_wrap_compute) if issubclass(node.klass, vtk.vtkVolumeProperty): update_dict('_special_input_function_SetTransferFunction', compute_SetTransferFunction) if issubclass(node.klass, vtk.vtkDataSet): update_dict('_special_input_function_SetPointData', compute_SetPointData) update_dict('_special_input_function_SetCellData', compute_SetCellData) if issubclass(node.klass, vtk.vtkCell): update_dict('_special_input_function_SetPointIds', compute_SetPointIds) if issubclass(node.klass, vtk.vtkImageImport): update_dict('_special_input_function_CopyImportString', compute_CopyImportString) return class_dict_
96a156b4eb5ac3342131f41d85070f7cfe1aea53
3,655,697
def get_playlist_decreasing_popularity(): """This function is used to return playlists in decreasing popularity""" all_ = PlaylistPopularityPrefixed.objects.all() results = [{"playlist_name": obj.playlist_name, "popularity": obj.played} for obj in all_] return results
45c8bb79af32cba58282910d1841611bc7f42d84
3,655,698
from typing import Any def validate_numeric_scalar(var: Any) -> bool: """Evaluates whether an argument is a single numeric value. Args: var: the input argument to validate Returns: var: the value if it passes validation Raises: AssertionError: `var` was not numeric. """ assert isinstance(var, (int, float)), "Argument must be single numeric value" return var
4db95a31021fd6c8ab0c31d9077a12fa5edd580b
3,655,699