content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def get_parser(dataset_name): """Returns a csv line parser function for the given dataset.""" def inat_parser(line, is_train=True): if is_train: user_id, image_id, class_id, _ = line return user_id, image_id, class_id else: image_id, class_id, _ = line return image_id, class_id def landmarks_parser(line, is_train=True): if is_train: user_id, image_id, class_id = line return user_id, image_id, class_id else: image_id, class_id = line return image_id, class_id parsers = { 'inat': inat_parser, 'landmarks': landmarks_parser, 'cifar': landmarks_parser # landmarks and cifar uses the same parser. } return parsers[dataset_name]
4901dde39ef6af9cab1adeacb50fff7b90950cd6
3,651,400
import os def read_all_bdds(project_path): """ Read all feature files from project :param project_path: base path of the project :return: Array of Feature objects """ features = [] for root, _, files in os.walk(project_path + '/features/'): for file in files: if file.endswith(".feature"): file_path = os.path.join(root, file) feature = read_feature(file_path) features.append(feature) return features
342bfd1911a1f25f9b41eec22fc512af6e5b7f4b
3,651,401
import sqlite3 def one_sentence_to_ids(sentence, sentence_length=SENTENCE_LENGTH): """Convert one sentence to a list of word IDs." Crop or pad to 0 the sentences to ensure equal length if necessary. Words without ID are assigned ID 1. >>> one_sentence_to_ids(['my','first','sentence'], 2) ([11095, 121], 2) >>> one_sentence_to_ids(['my','ssecond','sentence'], 2) ([11095, 1], 2) >>> one_sentence_to_ids(['yes'], 2) ([21402, 0], 1) """ vectordb = sqlite3.connect(DB) c = vectordb.cursor() word_ids = [] for w in sentence: if len(word_ids) >= sentence_length: break c.execute("""SELECT word_index, word FROM vectors INDEXED BY word_idx WHERE word=?""", (w, )) r = c.fetchall() if len(r) > 0: word_ids.append(r[0][0]) else: word_ids.append(1) # Pad with zeros if necessary num_words = len(word_ids) if num_words < sentence_length: word_ids += [0]*(sentence_length-num_words) vectordb.close() return word_ids, num_words
14bb42a5bbec7e05b28601903c8732c140bf92ed
3,651,402
def full_class_name(class_): """ Returns the absolute name of a class, with all nesting namespaces included """ return '::'.join(get_scope(class_) + [class_.name])
c2e7c0df1394d76a181677fcceec424a9bec1f4b
3,651,403
import math def mul_pdf(mean1, var1, mean2, var2): """ Multiply Gaussian (mean1, var1) with (mean2, var2) and return the results as a tuple (mean, var, scale_factor). Strictly speaking the product of two Gaussian PDFs is a Gaussian function, not Gaussian PDF. It is, however, proportional to a Gaussian PDF. `scale_factor` provides this proportionality constant Parameters ---------- mean1 : scalar mean of first Gaussian var1 : scalar variance of first Gaussian mean2 : scalar mean of second Gaussian var2 : scalar variance of second Gaussian Returns ------- mean : scalar mean of product var : scalar variance of product scale_factor : scalar proportionality constant Examples -------- >>> mul(1, 2, 3, 4) (1.6666666666666667, 1.3333333333333333) References ---------- Bromily. "Products and Convolutions of Gaussian Probability Functions", Tina Memo No. 2003-003. http://www.tina-vision.net/docs/memos/2003-003.pdf """ mean = (var1*mean2 + var2*mean1) / (var1 + var2) var = 1. / (1./var1 + 1./var2) S = math.exp(-(mean1 - mean2)**2 / (2*(var1 + var2))) / \ math.sqrt(2 * math.pi * (var1 + var2)) return mean, var, S
8ecb925273cd0e4276b867687e81b0a26419f35f
3,651,404
def mean_squared_error(y_true, y_pred): """ Mean squared error loss. :param y_true: groundtruth. :param y_pred: prediction. :return: loss symbolic value. """ P = norm_saliency(y_pred) # Normalized to sum = 1 Q = norm_saliency(y_true) # Normalized to sum = 1 return K.mean(K.square(P - Q))
9ecc02bfa6fc0417ea286a8f8195fdcc264c6593
3,651,405
def format_epilog(): """Program entry point. :param argv: command-line arguments :type argv: :class:`list` """ author_strings = [] for name, email in zip(metadata.authors, metadata.emails): author_strings.append('Author: {0} <{1}>'.format(name, email)) epilog = ''' {project} {version} {authors} URL: <{url}> '''.format( project=metadata.project, version=metadata.version, authors='\n'.join(author_strings), url=metadata.url) return epilog
ab1b378092006b3c4d7208e99c4a6ead41b528e4
3,651,406
import os import sys def _get_process_environment(process, proxy): """Get env to be used by the script process. This env must at the very least contain the proxy url, and a PATH allowing bash scripts to use `ctx`, which is expected to live next to the current executable. """ env = os.environ.copy() env.setdefault('TMPDIR', get_exec_tempdir()) process_env = process.get('env', {}) env.update(process_env) env[CTX_SOCKET_URL] = proxy.socket_url env_path = env.get('PATH') bin_dir = os.path.dirname(sys.executable) if env_path: if bin_dir not in env_path.split(os.pathsep): env['PATH'] = os.pathsep.join([env_path, bin_dir]) else: env['PATH'] = bin_dir return env
229ac3819b8d6137a4607aa456042fed2ecbe6df
3,651,407
from typing import List import difflib def text_differences(original_text: List[str], new_text_version: List[str]) -> TextDifferences: """ Builds text differences from input texts. Parameters ---------- original_text: List[str] original text (as a list of lines) new_text_version: List[str] new text version (as a list of lines) Returns ------- text_differences: TextDifferences TextDifferences object built on top of diffline output """ diffs = list(difflib.Differ().compare(_cleanup_text(original_text), _cleanup_text(new_text_version))) return TextDifferences(_build_difflines(diffs))
3f17489ac888714a2769e838e35a30384d76e961
3,651,408
def find_install_requires(): """Return a list of dependencies and non-pypi dependency links. A supported version of tensorflow and/or tensorflow-gpu is required. If not found, then tensorflow is added to the install_requires list. Depending on the version of tensorflow found or installed, either keras-contrib or tensorflow-addons needs to be installed as well. """ install_requires = [ 'sktime==0.7.0', 'h5py>=2.8.0', 'matplotlib', 'seaborn', 'keras-self-attention' ] # tensorflow version requirements # by default, make sure anything already installed is above 1.8.0, # or if installing from new get the most recent stable (i.e. not # nightly) version MINIMUM_TF_VERSION = '2.0.0' tf_requires = 'tensorflow>=' + MINIMUM_TF_VERSION has_tf_gpu = False has_tf = False tf = working_set.find(Requirement.parse('tensorflow')) tf_gpu = working_set.find(Requirement.parse('tensorflow-gpu')) if tf is not None: has_tf = True tf_version = tf._version if tf_gpu is not None: has_tf_gpu = True tf_gpu_version = tf_gpu._version if has_tf_gpu and not has_tf: # have -gpu only (1.x), make sure it's above 1.9.0 # Specify tensorflow-gpu version if it is already installed. tf_requires = 'tensorflow-gpu>=' + MINIMUM_TF_VERSION install_requires.append(tf_requires) # tensorflow itself handled, now find out what add-on package to use if (not has_tf and not has_tf_gpu) or (has_tf and tf_version >= '2.1.0'): # tensorflow will be up-to-date enough to use most recent # tensorflow-addons, the replacement for keras-contrib install_requires.append('tensorflow-addons') else: # fall back to keras-contrib, not on pypi so need to install it # separately not printing. TODO print( 'Existing version of tensorflow older than version 2.1.0 ' 'detected. You shall need to install keras-contrib (for tf.keras) ' 'in order to use all the features of sktime-dl. ' 'See https://github.com/keras-team/keras-contrib#install-keras_contrib-for-tensorflowkeras') return install_requires
7ef81561734166f21e4fdd25ac9db8d85f44f1ff
3,651,409
import json async def delete_port(request : VueRequest): """ 删除端口的接口 :param: :return: str response: 需要返回的数据 """ try: response = {'code': '', 'message': '', 'data': ''} request = rsa_crypto.decrypt(request.data) request = json.loads(request) target = request['target'] scan_ip = request['scan_ip'] port = request['port'] token = request['token'] query_str = { 'type': 'token', 'data': token } username_result = mysqldb.username(query_str) if username_result == 'L1001': response['code'] = 'L1001' response['message'] = '系统异常' return response elif username_result == None: response['code'] = 'L1003' response['message'] = '认证失败' return response else: delete_result = mysqldb.delete_port(username_result['username'], target, scan_ip, port) if delete_result == 'L1000': response['code'] = 'L1000' response['message'] = '请求成功' elif delete_result == 'L1001': response['code'] = 'L1001' response['message'] = '系统异常' return response except Exception as e: print(e) response['code'] = 'L1001' response['message'] = '系统异常' return response
71a90d9e14f514bb65e7d47d4a63090be70af033
3,651,410
def get_start_indices_labesl(waveform_length, start_times, end_times): """ Returns: a waveform_length size boolean array where the ith entry says wheter or not a frame starting from the ith sample is covered by an event """ label = np.zeros(waveform_length) for start, end in zip(start_times, end_times): event_first_start_index = int(start * cfg.working_sample_rate - cfg.frame_size * (1 - cfg.min_event_percentage_in_positive_frame)) event_last_start_index = int(end * cfg.working_sample_rate - cfg.frame_size * cfg.min_event_percentage_in_positive_frame) label[event_first_start_index: event_last_start_index] = 1 return label
39d264f837940e11b4d5e6897a93e30da234730d
3,651,411
def get_function_euler_and_module(): """ Function return tuple with value function euler and module. This tuple is namedtuple and we can use it this way: euler = tuple.euler module = tuple.module Thanks to this, we will not be mistaken. """ first_number_prime, second_number_prime = get_two_numbers_prime() results = euler_and_module( euler=(first_number_prime - 1) * (second_number_prime - 1), module=first_number_prime * second_number_prime ) return results
0b44391c9b38cc5f04f2e5faa212542716543883
3,651,412
def _read_host_df(host, seq=True): """Reads the metrics data for the host and returns a DataFrame. Args: host (str): Hostname, one of wally113, wally117, wally122, wally123, wally124 seq (bool): If sequential or concurrent metrics should be read Returns: DataFrame: Containing all the metrics as columns """ filepath = '' if seq: filepath = '%s/interim/sequential_data/metrics/%s_metrics.csv' % (DATA_DIR, host) else: filepath = '%s/interim/concurrent_data/metrics/%s_metrics_concurrent.csv' % (DATA_DIR, host) metrics_df = pd.read_csv( filepath, dtype={'now': str, 'load.cpucore': np.float64, 'load.min1': np.float64, 'load.min5': np.float64, 'load.min15': np.float64, 'mem.used': np.float64}) metrics_df['now'] = pd.to_datetime(metrics_df['now']) metrics_df = metrics_df.set_index('now') metrics_df = metrics_df.add_prefix('%s.' % host) return metrics_df.pivot_table(metrics_df, index=['now'], aggfunc='mean')
5e2872816b0e9b77ccccd1ead03e3c9660c604f2
3,651,413
def compacify(train_seq, test_seq, dev_seq, theano=False): """ Create a map for indices that is be compact (do not have unused indices) """ # REDO DICTS new_x_dict = LabelDictionary() new_y_dict = LabelDictionary(['noun']) for corpus_seq in [train_seq, test_seq, dev_seq]: for seq in corpus_seq: for index in seq.x: word = corpus_seq.x_dict.get_label_name(index) if word not in new_x_dict: new_x_dict.add(word) for index in seq.y: tag = corpus_seq.y_dict.get_label_name(index) if tag not in new_y_dict: new_y_dict.add(tag) # REDO INDICES # for corpus_seq in [train_seq2, test_seq2, dev_seq2]: for corpus_seq in [train_seq, test_seq, dev_seq]: for seq in corpus_seq: for i in seq.x: if corpus_seq.x_dict.get_label_name(i) not in new_x_dict: pass for i in seq.y: if corpus_seq.y_dict.get_label_name(i) not in new_y_dict: pass seq.x = [new_x_dict[corpus_seq.x_dict.get_label_name(i)] for i in seq.x] seq.y = [new_y_dict[corpus_seq.y_dict.get_label_name(i)] for i in seq.y] # For compatibility with GPUs store as numpy arrays and cats to int # 32 if theano: seq.x = np.array(seq.x, dtype='int32') seq.y = np.array(seq.y, dtype='int32') # Reinstate new dicts corpus_seq.x_dict = new_x_dict corpus_seq.y_dict = new_y_dict # Add reverse indices corpus_seq.word_dict = {v: k for k, v in new_x_dict.items()} corpus_seq.tag_dict = {v: k for k, v in new_y_dict.items()} # SANITY CHECK: # These must be the same # tmap = {v: k for k, v in train_seq.x_dict.items()} # tmap2 = {v: k for k, v in train_seq2.x_dict.items()} # [tmap[i] for i in train_seq[0].x] # [tmap2[i] for i in train_seq2[0].x] return train_seq, test_seq, dev_seq
c695022c6216b035618342c0aaecc39d8337a84c
3,651,414
def obfuscate_email(email): """Takes an email address and returns an obfuscated version of it. For example: [email protected] would turn into t**t@e*********m """ if email is None: return None splitmail = email.split("@") # If the prefix is 1 character, then we can't obfuscate it if len(splitmail[0]) <= 1: prefix = splitmail[0] else: prefix = f'{splitmail[0][0]}{"*"*(len(splitmail[0])-2)}{splitmail[0][-1]}' # If the domain is missing or 1 character, then we can't obfuscate it if len(splitmail) <= 1 or len(splitmail[1]) <= 1: return f"{prefix}" else: domain = f'{splitmail[1][0]}{"*"*(len(splitmail[1])-2)}{splitmail[1][-1]}' return f"{prefix}@{domain}"
36c230ed75fc75fc7ecd6dd2ea71a6b3310c4108
3,651,415
def list_small_kernels(): """Return list of small kernels to generate.""" kernels1d = [ NS(length= 1, threads_per_block= 64, threads_per_transform= 1, factors=(1,)), NS(length= 2, threads_per_block= 64, threads_per_transform= 1, factors=(2,)), NS(length= 3, threads_per_block= 64, threads_per_transform= 1, factors=(3,)), NS(length= 4, threads_per_block=128, threads_per_transform= 1, factors=(4,)), NS(length= 5, threads_per_block=128, threads_per_transform= 1, factors=(5,)), NS(length= 6, threads_per_block=128, threads_per_transform= 1, factors=(6,)), NS(length= 7, threads_per_block= 64, threads_per_transform= 1, factors=(7,)), NS(length= 8, threads_per_block= 64, threads_per_transform= 4, factors=(4, 2)), NS(length= 9, threads_per_block= 64, threads_per_transform= 3, factors=(3, 3)), NS(length= 10, threads_per_block= 64, threads_per_transform= 1, factors=(10,)), NS(length= 11, threads_per_block=128, threads_per_transform= 1, factors=(11,)), NS(length= 12, threads_per_block=128, threads_per_transform= 6, factors=(6, 2)), NS(length= 13, threads_per_block= 64, threads_per_transform= 1, factors=(13,)), NS(length= 14, threads_per_block=128, threads_per_transform= 7, factors=(7, 2)), NS(length= 15, threads_per_block=128, threads_per_transform= 5, factors=(3, 5)), NS(length= 16, threads_per_block= 64, threads_per_transform= 4, factors=(4, 4)), NS(length= 17, threads_per_block=256, threads_per_transform= 1, factors=(17,)), NS(length= 18, threads_per_block= 64, threads_per_transform= 6, factors=(3, 6)), NS(length= 20, threads_per_block=256, threads_per_transform= 10, factors=(5, 4)), NS(length= 21, threads_per_block=128, threads_per_transform= 7, factors=(3, 7)), NS(length= 22, threads_per_block= 64, threads_per_transform= 2, factors=(11, 2)), NS(length= 24, threads_per_block=256, threads_per_transform= 8, factors=(8, 3)), NS(length= 25, threads_per_block=256, threads_per_transform= 5, factors=(5, 5)), NS(length= 26, threads_per_block= 64, threads_per_transform= 2, factors=(13, 2)), NS(length= 27, threads_per_block=256, threads_per_transform= 9, factors=(3, 3, 3)), NS(length= 28, threads_per_block= 64, threads_per_transform= 4, factors=(7, 4)), NS(length= 30, threads_per_block=128, threads_per_transform= 10, factors=(10, 3)), NS(length= 32, threads_per_block= 64, threads_per_transform= 16, factors=(16, 2)), NS(length= 36, threads_per_block= 64, threads_per_transform= 6, factors=(6, 6)), NS(length= 40, threads_per_block=128, threads_per_transform= 10, factors=(10, 4)), NS(length= 42, threads_per_block=256, threads_per_transform= 7, factors=(7, 6)), NS(length= 44, threads_per_block= 64, threads_per_transform= 4, factors=(11, 4)), NS(length= 45, threads_per_block=128, threads_per_transform= 15, factors=(5, 3, 3)), NS(length= 48, threads_per_block= 64, threads_per_transform= 16, factors=(4, 3, 4)), NS(length= 49, threads_per_block= 64, threads_per_transform= 7, factors=(7, 7)), NS(length= 50, threads_per_block=256, threads_per_transform= 10, factors=(10, 5)), NS(length= 52, threads_per_block= 64, threads_per_transform= 4, factors=(13, 4)), NS(length= 54, threads_per_block=256, threads_per_transform= 18, factors=(6, 3, 3)), NS(length= 56, threads_per_block=128, threads_per_transform= 8, factors=(7, 8)), NS(length= 60, threads_per_block= 64, threads_per_transform= 10, factors=(6, 10)), NS(length= 64, threads_per_block= 64, threads_per_transform= 16, factors=(4, 4, 4)), NS(length= 72, threads_per_block= 64, threads_per_transform= 9, factors=(8, 3, 3)), NS(length= 75, threads_per_block=256, threads_per_transform= 25, factors=(5, 5, 3)), NS(length= 80, threads_per_block= 64, threads_per_transform= 10, factors=(5, 2, 8)), NS(length= 81, threads_per_block=128, threads_per_transform= 27, factors=(3, 3, 3, 3)), NS(length= 84, threads_per_block=128, threads_per_transform= 12, factors=(7, 2, 6)), NS(length= 88, threads_per_block=128, threads_per_transform= 11, factors=(11, 8)), NS(length= 90, threads_per_block= 64, threads_per_transform= 9, factors=(3, 3, 10)), NS(length= 96, threads_per_block=128, threads_per_transform= 16, factors=(6, 16)), NS(length= 100, threads_per_block= 64, threads_per_transform= 10, factors=(10, 10)), NS(length= 104, threads_per_block= 64, threads_per_transform= 8, factors=(13, 8)), NS(length= 108, threads_per_block=256, threads_per_transform= 36, factors=(6, 6, 3)), NS(length= 112, threads_per_block=256, threads_per_transform= 16, factors=(4, 7, 4), half_lds=False), NS(length= 120, threads_per_block= 64, threads_per_transform= 12, factors=(6, 10, 2)), NS(length= 121, threads_per_block=128, threads_per_transform= 11, factors=(11, 11)), NS(length= 125, threads_per_block=256, threads_per_transform= 25, factors=(5, 5, 5), half_lds=False), NS(length= 128, threads_per_block=256, threads_per_transform= 16, factors=(16, 8)), NS(length= 135, threads_per_block=128, threads_per_transform= 9, factors=(5, 3, 3, 3)), NS(length= 144, threads_per_block=128, threads_per_transform= 12, factors=(6, 6, 4)), NS(length= 150, threads_per_block= 64, threads_per_transform= 5, factors=(10, 5, 3)), NS(length= 160, threads_per_block=256, threads_per_transform= 16, factors=(16, 10)), NS(length= 162, threads_per_block=256, threads_per_transform= 27, factors=(6, 3, 3, 3)), NS(length= 168, threads_per_block=256, threads_per_transform= 56, factors=(8, 7, 3), half_lds=False), NS(length= 169, threads_per_block=256, threads_per_transform= 13, factors=(13, 13)), NS(length= 176, threads_per_block= 64, threads_per_transform= 16, factors=(11, 16)), NS(length= 180, threads_per_block=256, threads_per_transform= 60, factors=(10, 6, 3), half_lds=False), NS(length= 192, threads_per_block=128, threads_per_transform= 16, factors=(6, 4, 4, 2)), NS(length= 200, threads_per_block= 64, threads_per_transform= 20, factors=(10, 10, 2)), NS(length= 208, threads_per_block= 64, threads_per_transform= 16, factors=(13, 16)), NS(length= 216, threads_per_block=256, threads_per_transform= 36, factors=(6, 6, 6)), NS(length= 224, threads_per_block= 64, threads_per_transform= 16, factors=(7, 2, 2, 2, 2, 2)), NS(length= 225, threads_per_block=256, threads_per_transform= 75, factors=(5, 5, 3, 3)), NS(length= 240, threads_per_block=128, threads_per_transform= 48, factors=(8, 5, 6)), NS(length= 243, threads_per_block=256, threads_per_transform= 81, factors=(3, 3, 3, 3, 3)), NS(length= 250, threads_per_block=128, threads_per_transform= 25, factors=(10, 5, 5)), NS(length= 256, threads_per_block= 64, threads_per_transform= 64, factors=(4, 4, 4, 4)), NS(length= 270, threads_per_block=128, threads_per_transform= 27, factors=(10, 3, 3, 3)), NS(length= 272, threads_per_block=128, threads_per_transform= 17, factors=(16, 17)), NS(length= 288, threads_per_block=128, threads_per_transform= 24, factors=(6, 6, 4, 2)), NS(length= 300, threads_per_block= 64, threads_per_transform= 30, factors=(10, 10, 3)), NS(length= 320, threads_per_block= 64, threads_per_transform= 16, factors=(10, 4, 4, 2)), NS(length= 324, threads_per_block= 64, threads_per_transform= 54, factors=(3, 6, 6, 3)), NS(length= 336, threads_per_block=128, threads_per_transform= 56, factors=(8, 7, 6)), NS(length= 343, threads_per_block=256, threads_per_transform= 49, factors=(7, 7, 7)), NS(length= 360, threads_per_block=256, threads_per_transform= 60, factors=(10, 6, 6)), NS(length= 375, threads_per_block=128, threads_per_transform= 25, factors=(5, 5, 5, 3)), NS(length= 384, threads_per_block=128, threads_per_transform= 32, factors=(6, 4, 4, 4)), NS(length= 400, threads_per_block=128, threads_per_transform= 40, factors=(4, 10, 10)), NS(length= 405, threads_per_block=128, threads_per_transform= 27, factors=(5, 3, 3, 3, 3)), NS(length= 432, threads_per_block= 64, threads_per_transform= 27, factors=(3, 16, 3, 3)), NS(length= 450, threads_per_block=128, threads_per_transform= 30, factors=(10, 5, 3, 3)), NS(length= 480, threads_per_block= 64, threads_per_transform= 16, factors=(10, 8, 6)), NS(length= 486, threads_per_block=256, threads_per_transform=162, factors=(6, 3, 3, 3, 3)), NS(length= 500, threads_per_block=128, threads_per_transform=100, factors=(10, 5, 10)), NS(length= 512, threads_per_block= 64, threads_per_transform= 64, factors=(8, 8, 8)), NS(length= 528, threads_per_block= 64, threads_per_transform= 48, factors=(4, 4, 3, 11)), NS(length= 540, threads_per_block=256, threads_per_transform= 54, factors=(3, 10, 6, 3)), NS(length= 576, threads_per_block=128, threads_per_transform= 96, factors=(16, 6, 6)), NS(length= 600, threads_per_block= 64, threads_per_transform= 60, factors=(10, 6, 10)), NS(length= 625, threads_per_block=128, threads_per_transform=125, factors=(5, 5, 5, 5)), NS(length= 640, threads_per_block=128, threads_per_transform= 64, factors=(8, 10, 8)), NS(length= 648, threads_per_block=256, threads_per_transform=216, factors=(8, 3, 3, 3, 3)), NS(length= 675, threads_per_block=256, threads_per_transform=225, factors=(5, 5, 3, 3, 3)), NS(length= 720, threads_per_block=256, threads_per_transform=120, factors=(10, 3, 8, 3)), NS(length= 729, threads_per_block=256, threads_per_transform=243, factors=(3, 3, 3, 3, 3, 3)), NS(length= 750, threads_per_block=256, threads_per_transform=250, factors=(10, 5, 3, 5)), NS(length= 768, threads_per_block= 64, threads_per_transform= 48, factors=(16, 3, 16)), NS(length= 800, threads_per_block=256, threads_per_transform=160, factors=(16, 5, 10)), NS(length= 810, threads_per_block=128, threads_per_transform= 81, factors=(3, 10, 3, 3, 3)), NS(length= 864, threads_per_block= 64, threads_per_transform= 54, factors=(3, 6, 16, 3)), NS(length= 900, threads_per_block=256, threads_per_transform= 90, factors=(10, 10, 3, 3)), NS(length= 960, threads_per_block=256, threads_per_transform=160, factors=(16, 10, 6), half_lds=False), NS(length= 972, threads_per_block=256, threads_per_transform=162, factors=(3, 6, 3, 6, 3)), NS(length=1000, threads_per_block=128, threads_per_transform=100, factors=(10, 10, 10)), NS(length=1024, threads_per_block=128, threads_per_transform=128, factors=(8, 8, 4, 4)), NS(length=1040, threads_per_block=256, threads_per_transform=208, factors=(13, 16, 5)), NS(length=1080, threads_per_block=256, threads_per_transform=108, factors=(6, 10, 6, 3)), NS(length=1125, threads_per_block=256, threads_per_transform=225, factors=(5, 5, 3, 3, 5)), NS(length=1152, threads_per_block=256, threads_per_transform=144, factors=(4, 3, 8, 3, 4)), NS(length=1200, threads_per_block=256, threads_per_transform= 75, factors=(5, 5, 16, 3)), NS(length=1215, threads_per_block=256, threads_per_transform=243, factors=(5, 3, 3, 3, 3, 3)), NS(length=1250, threads_per_block=256, threads_per_transform=250, factors=(5, 10, 5, 5)), NS(length=1280, threads_per_block=128, threads_per_transform= 80, factors=(16, 5, 16)), NS(length=1296, threads_per_block=128, threads_per_transform=108, factors=(6, 6, 6, 6)), NS(length=1350, threads_per_block=256, threads_per_transform=135, factors=(5, 10, 3, 3, 3)), NS(length=1440, threads_per_block=128, threads_per_transform= 90, factors=(10, 16, 3, 3)), NS(length=1458, threads_per_block=256, threads_per_transform=243, factors=(6, 3, 3, 3, 3, 3)), NS(length=1500, threads_per_block=256, threads_per_transform=150, factors=(5, 10, 10, 3)), NS(length=1536, threads_per_block=256, threads_per_transform=256, factors=(16, 16, 6)), NS(length=1600, threads_per_block=256, threads_per_transform=100, factors=(10, 16, 10)), NS(length=1620, threads_per_block=256, threads_per_transform=162, factors=(10, 3, 3, 6, 3)), NS(length=1728, threads_per_block=128, threads_per_transform=108, factors=(3, 6, 6, 16)), NS(length=1800, threads_per_block=256, threads_per_transform=180, factors=(10, 6, 10, 3)), NS(length=1875, threads_per_block=256, threads_per_transform=125, factors=(5, 5, 5, 5, 3)), NS(length=1920, threads_per_block=256, threads_per_transform=120, factors=(10, 6, 16, 2)), NS(length=1944, threads_per_block=256, threads_per_transform=243, factors=(3, 3, 3, 3, 8, 3)), NS(length=2000, threads_per_block=128, threads_per_transform=125, factors=(5, 5, 5, 16)), NS(length=2025, threads_per_block=256, threads_per_transform=135, factors=(3, 3, 5, 5, 3, 3)), NS(length=2048, threads_per_block=256, threads_per_transform=256, factors=(16, 16, 8)), NS(length=2160, threads_per_block=256, threads_per_transform= 60, factors=(10, 6, 6, 6)), NS(length=2187, threads_per_block=256, threads_per_transform=243, factors=(3, 3, 3, 3, 3, 3, 3)), NS(length=2250, threads_per_block=256, threads_per_transform= 90, factors=(10, 3, 5, 3, 5)), NS(length=2304, threads_per_block=256, threads_per_transform=192, factors=(6, 6, 4, 4, 4), runtime_compile=True), NS(length=2400, threads_per_block=256, threads_per_transform=240, factors=(4, 10, 10, 6)), NS(length=2430, threads_per_block=256, threads_per_transform= 81, factors=(10, 3, 3, 3, 3, 3)), NS(length=2500, threads_per_block=256, threads_per_transform=250, factors=(10, 5, 10, 5)), NS(length=2560, threads_per_block=128, threads_per_transform=128, factors=(4, 4, 4, 10, 4)), NS(length=2592, threads_per_block=256, threads_per_transform=216, factors=(6, 6, 6, 6, 2)), NS(length=2700, threads_per_block=128, threads_per_transform= 90, factors=(3, 10, 10, 3, 3)), NS(length=2880, threads_per_block=256, threads_per_transform= 96, factors=(10, 6, 6, 2, 2, 2)), NS(length=2916, threads_per_block=256, threads_per_transform=243, factors=(6, 6, 3, 3, 3, 3)), NS(length=3000, threads_per_block=128, threads_per_transform=100, factors=(10, 3, 10, 10)), NS(length=3072, threads_per_block=256, threads_per_transform=256, factors=(6, 4, 4, 4, 4, 2)), NS(length=3125, threads_per_block=128, threads_per_transform=125, factors=(5, 5, 5, 5, 5)), NS(length=3200, threads_per_block=256, threads_per_transform=160, factors=(10, 10, 4, 4, 2)), NS(length=3240, threads_per_block=128, threads_per_transform=108, factors=(3, 3, 10, 6, 6)), NS(length=3375, threads_per_block=256, threads_per_transform=225, factors=(5, 5, 5, 3, 3, 3)), NS(length=3456, threads_per_block=256, threads_per_transform=144, factors=(6, 6, 6, 4, 4)), NS(length=3600, threads_per_block=256, threads_per_transform=120, factors=(10, 10, 6, 6)), NS(length=3645, threads_per_block=256, threads_per_transform=243, factors=(5, 3, 3, 3, 3, 3, 3)), NS(length=3750, threads_per_block=256, threads_per_transform=125, factors=(3, 5, 5, 10, 5)), NS(length=3840, threads_per_block=256, threads_per_transform=128, factors=(10, 6, 2, 2, 2, 2, 2, 2)), NS(length=3888, threads_per_block=512, threads_per_transform=324, factors=(16, 3, 3, 3, 3, 3)), NS(length=4000, threads_per_block=256, threads_per_transform=200, factors=(10, 10, 10, 4)), NS(length=4050, threads_per_block=256, threads_per_transform=135, factors=(10, 5, 3, 3, 3, 3)), NS(length=4096, threads_per_block=256, threads_per_transform=256, factors=(16, 16, 16)), ] kernels = [NS(**kernel.__dict__, scheme='CS_KERNEL_STOCKHAM', precision=['sp', 'dp']) for kernel in kernels1d] return kernels
332c94c0957ddb438822574e416a37eaef09c5f6
3,651,416
def parse_boolean(arg: str): """Returns boolean representation of argument.""" arg = str(arg).lower() if 'true'.startswith(arg): return True return False
2f0a214212aa43a8b27d9a3be04f14af67c586bc
3,651,417
def ascending_coin(coin): """Returns the next ascending coin in order. >>> ascending_coin(1) 5 >>> ascending_coin(5) 10 >>> ascending_coin(10) 25 >>> ascending_coin(2) # Other values return None """ if coin == 1: return 5 elif coin == 5: return 10 elif coin == 10: return 25
e927d8ac3f38d4b37de71711ac90d6ca2151a366
3,651,418
def to_numpy(qg8_tensor): """ Convert qg8_tensor to dense numpy array """ dtype = dtype_to_name(qg8_tensor.dtype_id) ndarray = np.zeros(qg8_tensor.dims, dtype=dtype) if np.iscomplexobj(ndarray): ndarray[tuple(qg8_tensor.indices)] = np.asfortranarray(qg8_tensor.re)\ + 1j*np.asfortranarray(qg8_tensor.im) else: ndarray[tuple(qg8_tensor.indices)] = np.asfortranarray(qg8_tensor.re) return ndarray
808c42fd1a6a4488cef34876674a212231fc2979
3,651,419
def _get_results(report): """Limit the number of documents to REPORT_MAX_DOCUMENTS so as not to crash the server.""" query = _build_query(report) try: session.execute(f"SET statement_timeout TO {int(REPORT_COUNT_TIMEOUT * 1000)}; commit;") if query.count() == 0: return None except OperationalError: pass session.execute("SET statement_timeout TO 0; commit;") results = query.order_by(Result.start_time.desc()).limit(REPORT_MAX_DOCUMENTS).all() return [result.to_dict() for result in results]
0575ec42c3b7cd729d4bc454986e02280ae4bb68
3,651,420
import warnings def text_match_one_hot(df, column=None, text_phrases=None, new_col_name=None, return_df=False, case=False, supress_warnings: bool=False): """Given a dataframe, text column to search and a list of text phrases, return a binary column with 1s when text is present and 0 otherwise """ # Ignore regex group match warning warnings.filterwarnings("ignore", 'This pattern has match groups') # Check params assert text_phrases, print(f"Must specify 'text_phrases' as a list of strings") if (column not in df.columns.values.tolist()): if not suppress_warnings: warnings.warn(f'Column "{column}" not found in dataframe. No matches attempted') return # Create regex pattern to match any phrase in list # The first phrase will be placed in its own groups regex_pattern = '({})'.format(text_phrases[0]) # If there's more than one phrase # Each phrase is placed in its own group () with an OR operand in front of it | # and added to the original phrase if len(text_phrases) > 1: subsquent_phrases = "".join(['|({})'.format(phrase) for phrase in text_phrases[1:]]) regex_pattern += subsquent_phrases # Cast to string to ensure .str methods work df_copy = df.copy() df_copy[column] = df_copy[column].astype(str) matches = df_copy[column].str.contains(regex_pattern, na=False, case=case).astype(int) ## Alter name if not new_col_name: # If none provided use column name and values matched new_col_name = column+'_match_for: '+str(text_phrases)[1:-1].replace(r"'", "") matches.name = new_col_name if return_df: df_copy = df.copy() df_copy[new_col_name] = matches return df_copy else: return matches
3db69fc6459dde7b14bbd1ae1507adc0b6c9a8b4
3,651,421
import six def pack_feed_dict(name_prefixs, origin_datas, paddings, input_fields): """ Args: name_prefixs: A prefix string of a list of strings. origin_datas: Data list or a list of data lists. paddings: A padding id or a list of padding ids. input_fields: The input fieds dict. Returns: A dict for while loop. """ data = dict() data["feed_dict"] = dict() def map_fn(n, d, p): # n: name prefix # d: data list # p: padding symbol data[concat_name(n, Constants.IDS_NAME)] = d n_samples = len(d) n_devices = len(input_fields) n_samples_per_gpu = n_samples // n_devices if n_samples % n_devices > 0: n_samples_per_gpu += 1 def _feed_batchs(_start_idx, _inpf): if _start_idx * n_samples_per_gpu >= n_samples: return 0 x, x_len = padding_batch_data( d[_start_idx * n_samples_per_gpu:(_start_idx + 1) * n_samples_per_gpu], p) data["feed_dict"][_inpf[concat_name(n, Constants.IDS_NAME)]] = x data["feed_dict"][_inpf[concat_name(n, Constants.LENGTH_NAME)]] = x_len return len(x_len) parallels = repeat_n_times( n_devices, _feed_batchs, range(n_devices), input_fields) data["feed_dict"]["parallels"] = parallels if isinstance(name_prefixs, six.string_types): map_fn(name_prefixs, origin_datas, paddings) else: [map_fn(n, d, p) for n, d, p in zip(name_prefixs, origin_datas, paddings)] return data
6de17aa1235d929fee20fcddcfcfb04e3907484b
3,651,422
from PyQt4 import QtGui def get_directory(**kwargs): """ Wrapper function for PyQt4.QtGui.QFileDialog.getExistingDirectory(). Returns the absolute directory of the chosen directory. Parameters ---------- None Returns ------- filename : string of absolute directory. """ filename = str(QtGui.QFileDialog.getExistingDirectory()) return filename
8b27ec800ccaa237d79e198d23058b70f71df4b8
3,651,423
def single_particle_relative_pzbt_metafit(fitfn, exp_list, **kwargs): """Fit to single-particle energies plus zero body term, relative to the first point """ return single_particle_metafit_int( fitfn, exp_list, dpath_sources=DPATH_FILES_INT, dpath_plots=DPATH_PLOTS, transform=relative_y_zbt, code='sprpz', mf_name='single_particle_relative_pzbt_metafit', xlabel='A', ylabel='Relative Single Particle Energy + Zero Body Term (MeV)', **kwargs )
81dfc60f1f27df710ccdb489ff322d576b8d9922
3,651,424
def hetmat_from_permuted_graph(hetmat, permutation_id, permuted_graph): """ Assumes subdirectory structure and that permutations inherit nodes but not edges. """ permuted_hetmat = initialize_permutation_directory(hetmat, permutation_id) permuted_hetmat = hetmat_from_graph( permuted_graph, permuted_hetmat.directory, save_metagraph=False, save_nodes=False, ) return permuted_hetmat
54728e3522f76e24d4a4107752980a57990c551d
3,651,425
import types def can_see_all_content(requesting_user: types.User, course_key: CourseKey) -> bool: """ Global staff, course staff, and instructors can see everything. There's no need to run processors to restrict results for these users. """ return ( GlobalStaff().has_user(requesting_user) or CourseStaffRole(course_key).has_user(requesting_user) or CourseInstructorRole(course_key).has_user(requesting_user) )
c4de054b235da20074841e7225123ef73e7d4a16
3,651,426
def part2(steps, workers=2, extra_time=0): """ Time is in seconds """ workers = [Worker() for _ in range(workers)] steps_to_time = { step: alphabet.index(step) + 1 + extra_time for step in alphabet } time = 0 graph = build_graph(steps) chain = find_orphans(graph) while chain or Worker.busy(workers): for worker in workers: if time == worker.current_step_end_time: worker.finish_step() new_children = [] for i, node in enumerate(chain): if node.ready: for worker in workers: if worker.idle: current_node = chain.pop(i) new_children += current_node.children current_step = current_node end_time = time + steps_to_time[current_node.step] worker.begin_step(current_step, end_time) break chain = list(set(new_children).union(set(chain))) chain = sorted(chain, key=lambda node: node.step) time += 1 return time - 1
792c6ca6e5334491eb38d8549b6c9df41d101924
3,651,427
from typing import Any from typing import List from typing import Iterable def to_local_df(df: Any, schema: Any = None, metadata: Any = None) -> LocalDataFrame: """Convert a data structure to :class:`~fugue.dataframe.dataframe.LocalDataFrame` :param df: :class:`~fugue.dataframe.dataframe.DataFrame`, pandas DataFramme and list or iterable of arrays :param schema: |SchemaLikeObject|, defaults to None, it should not be set for :class:`~fugue.dataframe.dataframe.DataFrame` type :param metadata: dict-like object with string keys, defaults to None :raises ValueError: if ``df`` is :class:`~fugue.dataframe.dataframe.DataFrame` but you set ``schema`` or ``metadata`` :raises TypeError: if ``df`` is not compatible :return: the dataframe itself if it's :class:`~fugue.dataframe.dataframe.LocalDataFrame` else a converted one :Examples: >>> a = to_local_df([[0,'a'],[1,'b']],"a:int,b:str") >>> assert to_local_df(a) is a >>> to_local_df(SparkDataFrame([[0,'a'],[1,'b']],"a:int,b:str")) """ assert_arg_not_none(df, "df") if isinstance(df, DataFrame): aot( schema is None and metadata is None, ValueError("schema and metadata must be None when df is a DataFrame"), ) return df.as_local() if isinstance(df, pd.DataFrame): return PandasDataFrame(df, schema, metadata) if isinstance(df, List): return ArrayDataFrame(df, schema, metadata) if isinstance(df, Iterable): return IterableDataFrame(df, schema, metadata) raise TypeError(f"{df} cannot convert to a LocalDataFrame")
12aae7869067b14f2f0f8ffcb3e393f41db5114f
3,651,428
def create_local_meta(name): """ Create the metadata dictionary for this level of execution. Parameters ---------- name : str String to describe the current level of execution. Returns ------- dict Dictionary containing the metadata. """ local_meta = { 'name': name, 'timestamp': None, 'success': 1, 'msg': '', } return local_meta
61a2ef73e8a6f74360881b97150a79079f3f8c29
3,651,429
def matches_uri_ref_syntax(s): """ This function returns true if the given string could be a URI reference, as defined in RFC 3986, just based on the string's syntax. A URI reference can be a URI or certain portions of one, including the empty string, and it can have a fragment component. """ if not _validation_setup_completed: _init_uri_validation_regex() return URI_REF_PATTERN.match(s) is not None
73b0dde1f76edcf4fe7f7754cc67d7604f984521
3,651,430
from datetime import datetime def get_end_hour(dt=None): """根据日期、或时间取得该小时59:59的时间;参数可以是date或datetime类型""" end = None if not dt: dt = datetime.date.today() if isinstance(dt, datetime.date): dt_str = dt.strftime("%Y-%m-%d %H") + ":59:59" end = datetime.datetime.strptime(dt_str, "%Y-%m-%d %H:%M:%S") return end
73d2760cf085295e13a4699aaf3dd8aa9dd5ae49
3,651,431
def all_ped_combos_strs(num_locs=4, val_set=("0", "1")): """Return a list of all pedestrian observation combinations (in string format) for a vehicle under the 4 location scheme""" res = [] lsts = all_ped_combos_lsts(num_locs, val_set) for lst in lsts: res.append(" ".join(lst)) return res
4a87cf48da5fb9582c7d7284fc78471e84918256
3,651,432
def create_image( image_request: ImageRequest, background_tasks: BackgroundTasks, db: Session = Depends(get_db), ): """ (2) add database record (3) give background_tasks a reference of image record """ image = Images() image.url = image_request.image # image.output = "4" # <<< image_request.output db.add(image) db.commit() background_tasks.add_task(predict_image, image.id) return { "code": "success", "message": "image added", }
77042168b61262b832bdad438a4d6661cfb263d6
3,651,433
def get_key(rule_tracker, value): """ Given an event index, its corresponding key from the dictionary is returned. Parameters: rule_tracker (dict): Key-value pairs specific to a rule where key is an activity, pair is an event index value (int): Index of event in event log Returns: key (int): Position of value in rule_tracker """ for key in rule_tracker: if rule_tracker[key] == value: return key
1921e9a68d0df0867248ca83e2ba641101735fc7
3,651,434
def get_cxml(filename): """ Create and return CXML object from File or LocalCache """ cxml = Cxml(filename) return cxml
c2c440793ea4b509823dd0ad90677eb7db2696ff
3,651,435
def save_layer(index, settings) -> Action: """Action to save layer settings""" return {"kind": SAVE_LAYER, "payload": {"index": index, "settings": settings}}
8fde0e1c752455e386745f428a69ae4a9936c028
3,651,436
def request_user_input(prompt='> '): """Request input from the user and return what has been entered.""" return raw_input(prompt)
1c8507edb17977005e068abee90b84832354adaf
3,651,437
def get_clinic_qs(): """ Returns a list of clinic uuid values for clinics whose patients will receive follow up reminder messages """ q = Q() for clinic in MESSAGE_CLINICS: q = q | Q(name__iexact=clinic) return list(Clinic.objects.filter(q).values_list('uuid', flat=True))
8224db73bd14839b8db6e2ee4a77c1404d846e34
3,651,438
def NPnm(n, m, x): """Eq:II.77 """ return sqrt( (2*n+1)/2 * abs(nmFactorial(n,m)) ) * lpmv(m, n, x)
8444f8d3a56e62bf66c6c0a318641d212202438d
3,651,439
def all_columns_empty(): """All columns are empty ... test will demoonstrate this edge case can be handled""" return [[] for i in range(0, 100)]
77a354978f82fd61d0f4d12db57a7fc455f4af28
3,651,440
def ping(host, destination, repeat_count, vrf_name): """Execute Ping RPC over NETCONF.""" # create NETCONF provider provider = NetconfServiceProvider(address=host, port=830, username='admin', password='admin', protocol='ssh') executor = ExecutorService() # create executor service ping = xr_ping_act.Ping() # create ping RPC object ping.input.destination = ping.input.Destination() ping.input.destination.destination = destination ping.input.destination.repeat_count = repeat_count ping.input.destination.vrf_name = vrf_name ping.output = executor.execute_rpc(provider, ping, ping.output) return dict(success_rate=int(str(ping.output.ping_response.ipv4[0].success_rate)), rtt_min=int(str(ping.output.ping_response.ipv4[0].rtt_min)), rtt_avg=int(str(ping.output.ping_response.ipv4[0].rtt_avg)), rtt_max=int(str(ping.output.ping_response.ipv4[0].rtt_max)))
b2486447a5c8e0c48a8420a2f8c7795d0eef68b8
3,651,441
def compute_shape_index(mesh) -> np.ndarray: """ Computes shape index for the patches. Shape index characterizes the shape around a point on the surface, computed using the local curvature around each point. These values are derived using PyMesh's available geometric processing functionality. Parameters ---------- mesh: Mesh Instance of the pymesh Mesh type. The mesh is constructed by using information on vertices and faces. Returns ------- si: np.ndarray, Shape index for each vertex """ n1 = mesh.get_attribute("vertex_nx") n2 = mesh.get_attribute("vertex_ny") n3 = mesh.get_attribute("vertex_nz") normals = np.stack([n1, n2, n3], axis=1) mesh.add_attribute("vertex_mean_curvature") H = mesh.get_attribute("vertex_mean_curvature") mesh.add_attribute("vertex_gaussian_curvature") K = mesh.get_attribute("vertex_gaussian_curvature") elem = np.square(H) - K # In some cases this equation is less than zero, likely due to the method # that computes the mean and gaussian curvature. set to an epsilon. elem[elem < 0] = 1e-8 k1 = H + np.sqrt(elem) k2 = H - np.sqrt(elem) # Compute the shape index si = (k1 + k2) / (k1 - k2) si = np.arctan(si) * (2 / np.pi) return si
e7c84aeb39eaf7e752fe8e98d6519b342b22088a
3,651,442
from typing import Tuple def edit_frame(frame: ndarray, y: int) -> Tuple[ndarray, ndarray]: """ Parameters ---------- frame : (is row-major) y Returns ------- (frame, cut) """ np.random.uniform(-1, 1, size=20000000) # 20000000@6cores cut = cv.cvtColor(frame[[y], :], cv.COLOR_BGR2GRAY)[0, :] # Convert OpenCV colors to PyQtGraph colors frame = cv.cvtColor(frame, cv.COLOR_BGR2RGB) return frame, cut
30f2550839067eb837f38734a761f1f06e50db27
3,651,443
def get_location(uniprot_id: str) -> Location: # pragma: no cover """Queries the UniProt database for a subcellular location with the id `uniprot_id` and returns a `Location` object""" g: LocationRDF = get_location_graph(uniprot_id) return Location.from_location_rdf(g)
cbe77792023954095962eaa8d379518a6ee10027
3,651,444
import copy def _gaussian2d_rot_no_bg(p,x,y): """ Required Arguments: p -- (m) [A,x0,y0,FWHMx,FWHMy,theta] x -- (n x o) ndarray of coordinate positions for dimension 1 y -- (n x o) ndarray of coordinate positions for dimension 2 Outputs: f -- (n x o) ndarray of function values at positions (x,y) """ theta=p[5] x0prime, y0prime, xprime, yprime=_2d_coord_transform(theta,p[1],p[2],x,y) newp=copy.copy(p)#this copy was needed so original parameters set isn't changed newp[1]=x0prime newp[2]=y0prime f=_gaussian2d_no_bg(newp[:5],xprime,yprime) return f
8f9433993ff4992c1d4d7fd5b36cd1ca57003f31
3,651,445
def queue_get_all(q): """ Used by report builder to extract all items from a :param q: queue to get all items from :return: hash of merged data from the queue by pid """ items = {} maxItemsToRetreive = 10000 for numOfItemsRetrieved in range(0, maxItemsToRetreive): try: if numOfItemsRetrieved == maxItemsToRetreive: break new = q.get_nowait() pid = new.pid ts = new.timestamp msg = new.msg if pid not in items: items[pid] = '' old = items[pid] new = '{0}\n[{1}]{2}'.format(old, ts, msg) items[pid] = new except Empty: break return items
221700485ee10893bfd1e4e290523ed35cf21418
3,651,446
def sample_account(self, profile, company, **params): """Create and return a sample customer""" defaults = { "balance": 0, "account_name": "string", "account_color": "string" } defaults.update(params) return Account.objects.create( profile=profile, company=company, **defaults )
6360d0b6a15592d42ffc7f9315181ae769812d4b
3,651,447
import os def extract_acqtime_and_physio_by_slice(log_fname, nSlices, nAcqs, acqTime_firstImg, TR=1000): """ :param log_fname: :param nSlices: :param nAcqs: :return: repsAcqTime: ((SC+all slices) x Nacq x (PulseOx, Resp) timePhysio: N_pulseOx_points x ((PulseOx, Resp) valuesPhysio: N_pulseOx_points x ((PulseOx, Resp) """ # repsAcqTime: ((SC+all slices) x Nacq x (PulseOx, Resp) # timePhysio: N_pulseOx_points x ((PulseOx, Resp) # valuesPhysio: N_pulseOx_points x ((PulseOx, Resp) repsAcqTime = np.zeros((1+nSlices, nAcqs, 2)) # pulseOx ---------------------------- if os.path.exists(log_fname+'.puls'): print('Processing pulseOx log: '+log_fname+'.puls') if 'slr' in os.path.basename(log_fname): print('\t[\'slr\'-type physiolog]') time_puls, puls_values, epi_acqtime_puls, epi_event_puls, acq_window_puls = dsc_extract_physio.read_physiolog(log_fname+'.puls', sampling_period=20) # extract physio signal reps_table_puls, slices_table_puls = dsc_extract_physio.sort_event_times(epi_acqtime_puls, epi_event_puls) # sort event times nrep_pulseOxLog = np.sum(reps_table_puls[:, 1]) if nAcqs != nrep_pulseOxLog: os.error('Number of repetitions in image is different from the number of repetitions recorded in pulseOx physiolog.') # get acquisition time for each slice repsAcqTime[1:, :, 0] = np.squeeze(slices_table_puls[np.where(reps_table_puls[:, 1] == 1), :]).T else: print('\t[\'CMRR\'-type physiolog]') time_puls, trigger_start_times_puls, trigger_end_times_puls, puls_values, acq_window_puls, acqStartTime_puls = dsc_extract_physio.read_physiolog_cmrr(log_fname+'.puls') triggerStartTimes_imgOnly_puls = dsc_extract_physio.extract_acqTimes_cmrr(trigger_start_times_puls, acqTime_firstImg, acqStartTime_puls, trigger_end_times_puls) repsAcqTime[1:, :, 0] = np.tile(triggerStartTimes_imgOnly_puls, (nSlices, 1)) + np.tile(TR/nSlices * np.arange(0, nSlices), (nAcqs, 1)).T else: print('\nNo log found for pulseOx.') repsAcqTime[1:, :, 0] = TR*np.tile(np.arange(0, nAcqs), (nSlices, 1)) + np.tile(TR/nSlices*np.arange(0, nSlices), (nAcqs, 1)).T time_puls = np.arange(np.min(repsAcqTime), np.max(repsAcqTime), step=20) puls_values = None # take the mean acquisition time across slices for the whole rep (SC) repsAcqTime[0, :, 0] = np.mean(repsAcqTime[1:nSlices, :, 0], axis=0) # respiration ---------------------------- if os.path.exists(log_fname+'.resp'): print('Processing respiration log: '+log_fname+'.resp') if 'slr' in os.path.basename(log_fname): print('\t[\'slr\'-type physiolog]') time_resp, resp_values, epi_acqtime_resp, epi_event_resp, acq_window_resp = dsc_extract_physio.read_physiolog(log_fname+'.resp', sampling_period=20) # extract physio signal reps_table_resp, slices_table_resp = dsc_extract_physio.sort_event_times(epi_acqtime_resp, epi_event_resp) # sort event times nrep_respLog = np.sum(reps_table_resp[:, 1]) if nAcqs != nrep_respLog: os.error('Number of repetitions in image is different from the number of repetitions recorded in respiration physiolog.') # get acquisition time for each slice repsAcqTime[1:, :, 1] = np.squeeze(slices_table_resp[np.where(reps_table_resp[:, 1] == 1), :]).T else: print('\t[\'CMRR\'-type physiolog]') time_resp, trigger_start_times_resp, trigger_end_times_resp, resp_values, acq_window_resp, acqStartTime_resp = dsc_extract_physio.read_physiolog_cmrr(log_fname+'.resp') else: print('\nNo log found for respiration.\n') repsAcqTime[1:, :, 1] = TR*np.tile(np.arange(0, nAcqs), (nSlices, 1)) + np.tile(TR/nSlices*np.arange(0, nSlices), (nAcqs, 1)).T time_resp = np.arange(np.min(repsAcqTime), np.max(repsAcqTime), step=20) resp_values = None # take the mean acquisition time across slices for the whole rep (SC) repsAcqTime[0, :, 1] = np.mean(repsAcqTime[1:nSlices, :, 1], axis=0) # merge the two physiological signal into one array each (for time and physio values) if time_puls.size > time_resp.size: time_resp = np.hstack((time_resp, time_puls[time_resp.size:])) resp_values = np.pad(resp_values, (0, puls_values.size - resp_values.size), 'reflect') elif time_puls.size < time_resp.size: time_puls = np.hstack((time_puls, time_resp[time_puls.size:])) puls_values = np.pad(puls_values, (0, resp_values.size - puls_values.size), 'reflect') timePhysio = np.vstack((time_puls, time_resp)).T valuesPhysio = np.vstack((puls_values, resp_values)).T return repsAcqTime, timePhysio, valuesPhysio
7f7b49b71d1666d8561df5a88ead0f37050abe8a
3,651,448
def get_funghi_type_dict(funghi_dict): """ Parameters ---------- funghi_dict: dict {str: list of strs} is the name: html lines dict created by get_funghi_book_entry_dict_from_html() Return ------------ dict {str: FunghiType} each entry contains a mushroom name and the corresponding FunghiType created with generate_funghi() """ funghis = {} for funghi_name in funghi_dict: funghis[funghi_name] = generate_funghi(funghi_dict, funghi_name) return funghis
6fe891fe4f9766b7f8a78e9bd13950d5c6af264e
3,651,449
def default_error_mesg_fmt(exc, no_color=False): """Generate a default error message for custom exceptions. Args: exc (Exception): the raised exception. no_color (bool): disable colors. Returns: str: colorized error message. """ return color_error_mesg('{err_name}: {err_mesg}', { 'err_name': Color(exc.__class__.__name__, '*red'), 'err_mesg': Color(str(exc), 'white') }, no_color)
248d99d5d08f9499a2349e66b03e9ec6ab1557a4
3,651,450
def check_values_on_diagonal(matrix): """ Checks if a matrix made out of dictionary of dictionaries has values on diagonal :param matrix: dictionary of dictionaries :return: boolean """ for line in matrix.keys(): if line not in matrix[line].keys(): return False return True
bc7979adcfb5dc7c19b3cdb3830cf2397c247846
3,651,451
import pandas as pd import os def volat(path): """volat Data loads lazily. Type data(volat) into the console. A data.frame with 558 rows and 17 variables: - date. 1947.01 to 1993.06 - sp500. S&P 500 index - divyld. div. yield annualized rate - i3. 3 mo. T-bill annualized rate - ip. index of industrial production - pcsp. pct chg, sp500, ann rate - rsp500. return on sp500: pcsp + divyld - pcip. pct chg, IP, ann rate - ci3. i3 - i3[\_n-1] - ci3\_1. ci3[\_n-1] - ci3\_2. ci3[\_n-2] - pcip\_1. pcip[\_n-1] - pcip\_2. pcip[\_n-2] - pcip\_3. pcip[\_n-3] - pcsp\_1. pcip[\_n-1] - pcsp\_2. pcip[\_n-2] - pcsp\_3. pcip[\_n-3] https://www.cengage.com/cgi-wadsworth/course_products_wp.pl?fid=M20b&product_ isbn_issn=9781111531041 Args: path: str. Path to directory which either stores file or otherwise file will be downloaded and extracted there. Filename is `volat.csv`. Returns: Tuple of np.ndarray `x_train` with 558 rows and 17 columns and dictionary `metadata` of column headers (feature names). """ path = os.path.expanduser(path) filename = 'volat.csv' if not os.path.exists(os.path.join(path, filename)): url = 'http://dustintran.com/data/r/wooldridge/volat.csv' maybe_download_and_extract(path, url, save_file_name='volat.csv', resume=False) data = pd.read_csv(os.path.join(path, filename), index_col=0, parse_dates=True) x_train = data.values metadata = {'columns': data.columns} return x_train, metadata
b71fe5cbcdd29fd76f21f92e44fd62c3eb7a7517
3,651,452
def read_quantity(string): """ convert a string to a quantity or vectorquantity the string must be formatted as '[1, 2, 3] unit' for a vectorquantity, or '1 unit' for a quantity. """ if "]" in string: # It's a list, so convert it to a VectorQuantity. # The unit part comes after the list. # The list itself must consist of floats only! values = list( map( float, string[1:].split('] ')[0].split(',') ) ) unit = find_unit(string.split('] ')[1].split(' ')) quantity = new_quantity(values, unit) else: value = float(string.split(' ')[0]) unit = find_unit(string.split(' ')[1:]) quantity = new_quantity(value, unit) return quantity
ab36a26425a4bbc236ac84a807707431d3c9dc14
3,651,453
async def stop_service(name: str) -> None: """ stop service """ task = TASKS.get(name) if task is None: raise Exception(f"No such task {name}") return task.cancel()
245f60e70dcce09147d83697128c525e3630f238
3,651,454
def rand_bbox(img_shape, lam, margin=0., count=None): """ Standard CutMix bounding-box Generates a random square bbox based on lambda value. This impl includes support for enforcing a border margin as percent of bbox dimensions. Args: img_shape (tuple): Image shape as tuple lam (float): Cutmix lambda value margin (float): Percentage of bbox dimension to enforce as margin (reduce amount of box outside image) count (int): Number of bbox to generate """ ratio = np.sqrt(1 - lam) img_h, img_w = img_shape[-2:] cut_h, cut_w = int(img_h * ratio), int(img_w * ratio) margin_y, margin_x = int(margin * cut_h), int(margin * cut_w) cy = np.random.randint(0 + margin_y, img_h - margin_y, size=count) cx = np.random.randint(0 + margin_x, img_w - margin_x, size=count) yl = np.clip(cy - cut_h // 2, 0, img_h) yh = np.clip(cy + cut_h // 2, 0, img_h) xl = np.clip(cx - cut_w // 2, 0, img_w) xh = np.clip(cx + cut_w // 2, 0, img_w) bbox_area = (yh - yl) * (xh - xl) lam = 1. - bbox_area / float(img_shape[-2] * img_shape[-1]) return yl, yh, xl, xh, lam
47fd5fa1f2530c198aad50e883ec57dbd60cb4db
3,651,455
from pathlib import Path def get_current_dir(): """ Get the directory of the executed Pyhton file (i.e. this file) """ # Resolve to get rid of any symlinks current_path = Path(__file__).resolve() current_dir = current_path.parent return current_dir
c0e6fa1300970226fce42bf57fe2d2ed6b3e3604
3,651,456
import csv def build_gun_dictionary(filename): """Build a dictionary of gun parameters from an external CSV file: - Key: the gun designation (e.g. '13.5 in V' or '12 in XI') - Value: a list of parameters, in the order: * caliber (in inches) * maxrange (maximum range in yards) * longtohit (chance to hit per gun and minute at long range) * longmin (minimum range considered to be long) * effectivetohit (chance to hit per gun and minute at effective range) * effectivemin (minimum range considered to be effective) * shorttohit (chance to hit per gun and minute at short range) """ gundict = {} with open(filename) as sourcefile: reader = csv.reader(sourcefile, delimiter=",") next(reader) for row in reader: gundata = list(row) gundict[gundata[0]] = list(map(float, gundata[1:])) return gundict
b9e38d766430d44b94ae9fa64c080416fdeb8482
3,651,457
def plot_learning_curve(estimator, title, X, y, ylim=None, cv=None, n_jobs=1, train_sizes=np.linspace(.1, 1.0, 5)): """ Generate a simple plot of the test and training learning curve. Parameters ---------- estimator : object type that implements the "fit" and "predict" methods An object of that type which is cloned for each validation. title : string Title for the chart. X : array-like, shape (n_samples, n_features) Training vector, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape (n_samples) or (n_samples, n_features), optional Target relative to X for classification or regression; None for unsupervised learning. ylim : tuple, shape (ymin, ymax), optional Defines minimum and maximum yvalues plotted. cv : int, cross-validation generator or an iterable, optional Determines the cross-validation splitting strategy. Possible inputs for cv are: - None, to use the default 3-fold cross-validation, - integer, to specify the number of folds. - An object to be used as a cross-validation generator. - An iterable yielding train/test splits. For integer/None inputs, if ``y`` is binary or multiclass, :class:`StratifiedKFold` used. If the estimator is not a classifier or if ``y`` is neither binary nor multiclass, :class:`KFold` is used. Refer :ref:`User Guide <cross_validation>` for the various cross-validators that can be used here. n_jobs : integer, optional Number of jobs to run in parallel (default 1). """ plt.figure() plt.title(title) if ylim is not None: plt.ylim(*ylim) plt.xlabel("Training examples") plt.ylabel("Score") train_sizes, train_scores, test_scores = learning_curve( estimator, X, y, cv=cv, n_jobs=n_jobs, train_sizes=train_sizes) train_scores_mean = np.mean(train_scores, axis=1) train_scores_std = np.std(train_scores, axis=1) test_scores_mean = np.mean(test_scores, axis=1) test_scores_std = np.std(test_scores, axis=1) plt.grid() plt.fill_between(train_sizes, train_scores_mean - train_scores_std, train_scores_mean + train_scores_std, alpha=0.1, color="r") plt.fill_between(train_sizes, test_scores_mean - test_scores_std, test_scores_mean + test_scores_std, alpha=0.1, color="g") plt.plot(train_sizes, train_scores_mean, 'o-', color="r", label="Training score") plt.plot(train_sizes, test_scores_mean, 'o-', color="g", label="Test score") plt.legend(loc="best") return plt
903f3119338c7886a663aa9a0e173849811365f9
3,651,458
from bs4 import BeautifulSoup import requests def fetch_events_AHEAD(base_url='http://www.ahead-penn.org'): """ Penn Events for Penn AHEAD """ page_soup = BeautifulSoup(requests.get( urljoin(base_url, '/events')).content, 'html.parser') events = [] event_table = page_soup.find('div', attrs={'id': 'main-content'}) all_events = event_table.find_all('div', attrs={'class': 'views-row'}) for event in all_events: event_url = urljoin(base_url, event.find('a')['href']) event_soup = BeautifulSoup(requests.get( event_url).content, 'html.parser') title = event_soup.find('h1', attrs={'class': 'title'}) title = title.text.strip() if title is not None else '' date = event_soup.find('span', attrs={'class': 'date-display-single'}) date = date.text.strip() if date is not None else '' starttime, endtime = find_startend_time(date) location = event_soup.find('div', attrs={ 'class': 'field field-name-field-location field-type-text field-label-hidden'}) location = location.text.strip() if location is not None else '' details = event_soup.find('div', attrs={ 'class': 'field field-name-body field-type-text-with-summary field-label-hidden'}) details = details.text.strip() if details is not None else '' events.append({ 'title': title, 'speaker': '', 'date': date, 'location': location, 'description': details, 'starttime': starttime, 'endtime': endtime, 'url': event_url, 'owner': 'Penn AHEAD', }) return events
24dc865a1db2ff5361e8d502ab47d78de94b875b
3,651,459
import string def column_to_index(ref): """ カラムを示すアルファベットを0ベース序数に変換する。 Params: column(str): A, B, C, ... Z, AA, AB, ... Returns: int: 0ベース座標 """ column = 0 for i, ch in enumerate(reversed(ref)): d = string.ascii_uppercase.index(ch) + 1 column += d * pow(len(string.ascii_uppercase),i) return column-1
7a6f89fa238d3d47a1e45b2e83821dbd4e8b23f8
3,651,460
def cols_to_tanh(df, columns): """Transform column data with hyperbolic tangent and return new columns of prefixed data. Args: df: Pandas DataFrame. columns: List of columns to transform. Returns: Original DataFrame with additional prefixed columns. """ for col in columns: df['tanh_' + col] = np.tanh(df[col]) return df
b24c5467fa3c38415c8a4a0ab399a3ab44f481e9
3,651,461
import copy def draw_png_heatmap_graph(obs, preds_dict, gt, mixes, network_padding_logits, trackwise_padding, plt_size, draw_prediction_track, plot_directory, log_file_name, multi_sample, global_step, graph_number, fig_dir, csv_name, rel_destination, parameters, padding_mask='None', distance=0): """ :param obs: :param preds_dict: :param gt: :param mixes: :param network_padding_logits: :param trackwise_padding: :param plt_size: :param draw_prediction_track: :param plot_directory: :param log_file_name: :param multi_sample: :param global_step: :param graph_number: :param fig_dir: :param csv_name: :param rel_destination: :param parameters: :param padding_mask: "None", "GT" or "Network" :return: """ ##FIXME gt_padding_bool = trackwise_padding # #padding_bool = np.argmax(padding_logits, axis=1) == 1 # 'results/20180412-104825/plots_img_final' legend_str = [] fig = plt.figure(figsize=plt_size) plt.plot(gt[:, 0], gt[:, 1], 'b-', zorder=3, label="Ground Truth") plt.plot(gt[:, 0], gt[:, 1], 'bo', zorder=3, ms=2) legend_str.append(['Ground Truth']) plt.plot(obs[:, 0], obs[:, 1], 'g-', zorder=4, label="Observations") plt.plot(obs[:, 0], obs[:, 1], 'go', zorder=4, ms=2) legend_str.append(['Observations']) plot_colors = ['r', 'c', 'm', 'y', 'k'] plot_colors_idx = 0 first_RNN = True for name, preds in preds_dict.iteritems(): # The input is designed for multiple future tracks. If only 1 is produced, the axis is missing. So reproduce it. # This is the most common case (one track) if len(preds.shape) < 3: preds = np.array([preds]) if name == 'RNN' and not draw_prediction_track: continue else: for j in range(preds.shape[0]): prediction = preds[j] # `Real data' if 'multipath' in name: plot_color = 'w' if first_RNN: first_RNN = False label_name = "RNN Proposed" else: label_name = None else: plot_color = plot_colors[plot_colors_idx] plot_colors_idx += 1 label_name = name if len(prediction) is not len(gt_padding_bool): padding_amount = len(gt_padding_bool) - len(prediction) if padding_amount < 0: prediction = prediction[:len(gt_padding_bool), :] else: prediction = np.pad(prediction, [[0, padding_amount], [0, 0]], 'edge') plt.plot(prediction[~gt_padding_bool, 0], prediction[~gt_padding_bool, 1], plot_color + 'o', ms=2, zorder=5) plt.plot(prediction[~gt_padding_bool, 0], prediction[~gt_padding_bool, 1], plot_color + '-', ms=1, zorder=5, label=label_name) # Padding `fake' data #plt.plot(prediction[gt_padding_bool, 0], prediction[gt_padding_bool, 1], # plot_color + 'x', ms=2, zorder=5) #legend_str.append([name + ' Pred']) plt.legend() if 'relative' in parameters['ibeo_data_columns'][0]: x_range = (-20, 20) y_range = (-10, 30) x_range = (-18, 18) y_range = (-8, 28) elif 'queen-hanks' in csv_name: x_range = (3, 47) y_range = (-17, 11) elif 'leith-croydon' in csv_name: x_range = (-35, 10) y_range = (-30, 15) elif 'roslyn-crieff' in csv_name: x_range = (-31, -10) y_range = (-15, 8) elif 'oliver-wyndora' in csv_name: x_range = (-28, -8) y_range = (-12, 6) elif 'orchard-mitchell' in csv_name: x_range = (-32, -5) y_range = (-23, 5) dx, dy = 0.5, 0.5 x = np.arange(min(x_range), max(x_range), dx) y = np.flip(np.arange(min(y_range), max(y_range), dy), axis=0) # Image Y axes are down positive, map axes are up positive. xx, yy = np.meshgrid(x, y) xxyy = np.c_[xx.ravel(), yy.ravel()] extent = np.min(x), np.max(x), np.min(y), np.max(y) # Return probability sum here. heatmaps = None plot_time = time.time() for sampled_mix, sampled_padding_logits in zip(mixes, network_padding_logits): # Sleep in process to improve niceness. time.sleep(0.05) #print "len sampled_mix: " + str(len(sampled_mix)) sample_time = time.time() network_padding_bools = np.argmax(sampled_padding_logits, axis=1) == 1 timeslot_num = 0 for timeslot, n_padded, gt_padded in zip(sampled_mix, network_padding_bools, gt_padding_bool): if 'Network' in padding_mask and n_padded: continue if 'GT' in padding_mask and gt_padded: continue #print "timeslot_num " + str(timeslot_num) gaussian_heatmaps = [] gaus_num = 0 for gaussian in timeslot: ##FIXME does not check padding_logit gaus_num += 1 #print gaus_num pi, mu1, mu2, s1, s2, rho = gaussian cov = np.array([[s1 * s1, rho * s1 * s2], [rho * s1 * s2, s2 * s2]]) norm = scipy.stats.multivariate_normal(mean=(mu1, mu2), cov=cov) zz = norm.pdf(xxyy) zz *= pi zz = zz.reshape((len(xx), len(yy[0]))) gaussian_heatmaps.append(zz) gaussian_heatmaps /= np.max(gaussian_heatmaps) # Normalize such that each timestep has equal weight #heatmaps.extend(gaussian_heatmaps) # This explodes #TODO Does not work! save_each_timestep = False if save_each_timestep: timestep_plt = copy.deepcopy(plt) timestep_plt.imshow(gaussian_heatmaps, cmap=plt.cm.viridis, alpha=.7, interpolation='bilinear', extent=extent, zorder=1) timestep_plt.legend() distance_str = ('n' if distance < 0 else 'p') + "%02i" % abs(distance+50) fig_name = padding_mask + '-' + str(graph_number) + '-' + distance_str + '-' + ("no_pred_track-" if draw_prediction_track is False else "") + str( multi_sample) + "-" + log_file_name + '-' + str(global_step) + '-' + rel_destination + 't_' + str(timeslot_num) + '.png' fig_path = os.path.join(fig_dir, fig_name) timestep_plt.savefig(fig_path, bbox_inches='tight') if heatmaps is None: heatmaps = gaussian_heatmaps else: heatmaps += gaussian_heatmaps timeslot_num += 1 #print "Time for this sample: " + str(time.time() - sample_time) #print "Time for gaussian plot of one track: " + str(time.time() - plot_time) # Its about 7 seconds per plot final_heatmap = sum(heatmaps) if heatmaps is not None else None if 'relative' in parameters['ibeo_data_columns'][0]: _ = 0 # Blank line to preserve lower logic flow image_filename = 'intersection_diagram_background.png' background_img = plt.imread(os.path.join('images', image_filename)) plt.imshow(background_img, zorder=0, # x_range = (-20, 20) y_range = (-10, 30) extent=extent)#[-20, 20, -10, 30]) elif 'queen-hanks' in csv_name: x_range = (3, 47) y_range = (-17, 11) elif 'leith-croydon' in csv_name: x_range = (-35, 10) y_range = (-30, 15) elif 'leith-croydon' in csv_name: image_filename = 'leith-croydon.png' background_img = plt.imread(os.path.join('images', image_filename)) plt.imshow(background_img, zorder=0, extent=[-15.275 - (147.45 / 2), -15.275 + (147.45 / 2), -3.1 - (77 / 2), -3.1 + (77 / 2)]) if final_heatmap is not None: plt.imshow(final_heatmap, cmap=plt.cm.viridis, alpha=.7, interpolation='bilinear', extent=extent, zorder=1) plt.legend() plt.xlabel("x (metres)") plt.ylabel("y (metres)") distance_str = ('n' if distance < 0 else 'p') + "%02i" % abs(distance+50) fig_name = padding_mask + '-' + str(graph_number) + '-' + distance_str + '-' + ("no_pred_track-" if draw_prediction_track is False else "") + str( multi_sample) + "-" + log_file_name + '-' + str(global_step) + '-' + rel_destination + '.png' fig_path = os.path.join(fig_dir, fig_name) plt.savefig(fig_path, bbox_inches='tight') print "Finished plotting " + fig_name # Now inject into tensorboard fig.canvas.draw() fig_s = fig.canvas.tostring_rgb() fig_data = np.fromstring(fig_s, np.uint8) fig_data = fig_data.reshape(fig.canvas.get_width_height()[::-1] + (3,)) # This last string return allows images to be saved in tensorboard. I don't use it anymore, and I want the threads # to run in the background, so I dropped the return value. s = StringIO.StringIO() plt.imsave(s, fig_data, format='png') fig_data = s.getvalue() plt.close() return None
70a24afa9560dd8c4c85a8dd7d4e16750ee89ce7
3,651,462
import numpy as np def stdev_time(arr1d, stdev): """ detects breakpoints through multiple standard deviations and divides breakpoints into timely separated sections (wanted_parts) - if sigma = 1 -> 68.3% - if sigma = 2 -> 95.5% - if sigma = 2.5 -> 99.0% - if sigma = 3 -> 99.7% - if sigma = 4 -> 99.9% ---------- arr1d: numpy.array 1D array representing the time series for one pixel stdev: float number multiplied with standard deviation to define the probability space for a breakpoint Returns ---------- numpy.int32 0 = no breakpoint over time 15 = breakpoint in the 1st section 16 = breakpoint in the 2nd section 17 = breakpoint in the 3rd section 18 = breakpoint in the 4th section 19 = breakpoint in the 5th section 31 = breakpoint in the 1st AND 2nd section 32 = breakpoint in the 1st AND 3rd section 33 = breakpoint in the 1st AND 4th section OR breakpoint in the 2nd AND 3rd section 34 = breakpoint in the 1st AND 5th section OR 2nd AND 4th section 35 = breakpoint in the 2nd section AND 5th section OR 3rd AND 4th section 36 = breakpoint in the 3rd AND 5th section 37 = breakpoint in the 4th AND 5th section 48 = breakpoint in the 1st, 2nd AND 3rd section 49 = breakpoint in the 1st, 2nd AND 4th section 50 = breakpoint in the 1st, 2nd AND 5th section OR 1st, 3rd AND 4th section 51 = breakpoint in the 1st, 3rd AND 5th section OR 2nd, 3rd AND 4th section 52 = breakpoint in the 1st, 3rd AND 5th section OR 2nd, 3rd AND 5th section 53 = breakpoint in the 2nd, 4th AND 5th section 54 = breakpoint in the 3rd, 4th AND 5th section 66 = breakpoint in the 1st, 2nd, 3rd AND 4th section 67 = breakpoint in the 1st, 2nd, 3rd AND 5th section 68 = breakpoint in the 1st, 2nd, 4th AND 5th section 69 = breakpoint in the 1st, 3rd, 4th AND 5th section 70 = breakpoint in the 2nd, 3rd , 4th AND 5th section 85 = breakpoints in all section """ time_series = arr1d arr_shape = arr1d.shape[0] time_series_index = np.indices((arr_shape,))[0] # internal function to split time series in n sub time series def split_list(alist, wanted_parts=1): # based on: https://stackoverflow.com/a/752562 length = len(alist) return [alist[i * length // wanted_parts: (i + 1) * length // wanted_parts] for i in range(wanted_parts)] # split time series and list of time series indices in 4 subarrays time_series_split = split_list(time_series, wanted_parts=5) time_series_index_split = split_list(time_series_index, wanted_parts=5) # calculate linear regression for each time series subarray mini_list = [] sigma_list = [] for i in range(0, len(time_series_index_split)): mea = np.mean(time_series_split[i]) std_mea = stdev * np.std(time_series_split[i]) mini = min(time_series_split[i]) sigma = mea - std_mea i += 1 mini_list = [mini_list, mini] sigma_list = [sigma_list, sigma] # weird list append, cause .append doesnt work with multiprocessing # check for dropping slope values from one fifth of time series to next temp = 0 if mini_list[0][0][0][0][1] < sigma_list[0][0][0][0][1]: temp = temp + 15 if mini_list[0][0][0][1] < sigma_list[0][0][0][1]: temp = temp + 16 if mini_list[0][0][1] < sigma_list[0][0][1]: temp = temp + 17 if mini_list[0][1] < sigma_list[0][1]: temp = temp + 18 if mini_list[1] < sigma_list[1]: temp = temp + 19 if temp == 0: return 0 return temp
b243f1d4ba904cbc2fb0e46b37305c857fce0be1
3,651,463
def main(_, **settings): """ This function returns a Pyramid WSGI application. """ config = Configurator(settings=settings, route_prefix="/api") # Initialise the broadcast view before c2cwsgiutils is initialised. This allows to test the # reconfiguration on the fly of the broadcast framework config.add_route("broadcast", r"/broadcast", request_method="GET") config.add_view( lambda request: broadcast_view(), route_name="broadcast", renderer="fast_json", http_cache=0 ) config.include(c2cwsgiutils.pyramid.includeme) models.init(config) config.scan("c2cwsgiutils_app.services") health_check = HealthCheck(config) health_check.add_db_session_check(models.DBSession, at_least_one_model=models.Hello) health_check.add_url_check("http://localhost:8080/api/hello") health_check.add_url_check(name="fun_url", url=lambda _request: "http://localhost:8080/api/hello") health_check.add_custom_check("fail", _failure, 2) health_check.add_custom_check("fail_json", _failure_json, 2) health_check.add_alembic_check(models.DBSession, "/app/alembic.ini", 1) return config.make_wsgi_app()
f47bdb2e551aabfb5d03c4eefd52ec37f875e55d
3,651,464
import os def get_available_modules(): """Return list of modules shipped with OnRamp. Returns: List of module shipped with OnRamp """ def verify_module_path(x): return os.path.isdir(os.path.join(_shipped_mod_dir, x)) return [{ 'mod_id': None, 'mod_name': name, 'installed_path': None, 'state': 'Available', 'error': None, 'source_location': { 'type': 'local', 'path': os.path.normpath(os.path.join(_shipped_mod_dir, name)) } } for name in filter(verify_module_path, os.listdir(_shipped_mod_dir))]
4fc9159a6aa35043d5ab0d40719fa5df11a005ab
3,651,465
def GetVarLogMessages(max_length=256 * 1024, path='/var/log/messages', dut=None): """Returns the last n bytes of /var/log/messages. Args: max_length: Maximum characters of messages. path: path to /var/log/messages. dut: a cros.factory.device.device_types.DeviceInterface instance, None for local. """ return file_utils.TailFile(path, max_length, dut)
f615f60b8daf0ee21b7b932ee23a21573b5d0db5
3,651,466
def find_index(predicate, List): """ (a → Boolean) → [a] → [Number] Return the index of first element that satisfy the predicate """ for i, x in enumerate(List): if predicate(x): return i
0c6010b8b169b7bfa780ca03c0551f189bda892a
3,651,467
from typing import Callable from typing import Any from typing import Dict def logger( wrapped: Callable[..., str], instance: Any, args: Any, kwargs: Dict[str, Any] ) -> str: """Handle logging for :class:`anndata.AnnData` writing functions of :class:`cellrank.estimators.BaseEstimator`.""" log, time = kwargs.pop("log", True), kwargs.pop("time", None) msg = wrapped(*args, **kwargs) if log: logg.info(msg, time=time) return msg
6fc9d5867d2f9ebbacb3fef902d4b4d84670e449
3,651,468
def search_front(): """ Search engine v0.1 - arguments: - q: query to search (required) """ q = request.args.get('q', None) if not q: return flask.jsonify({'status': 'error', 'message': 'Missing query'}), 400 res = dict() cursor = db.run(r.table(PRODUCTS_TABLE).pluck('shop').distinct()) shops = [c for c in cursor] reg = build_regex(q) cursor = db.run(r.table(PRODUCTS_TABLE).filter(lambda doc: doc['name'].match(reg.decode('utf-8')) ).order_by('price')) data = [c for c in cursor] d = {'shops': shops,'data': data} return flask.jsonify({'status': 'ok', 'data': d}), 200
8230cd0b304fce767dbd19d3073e05fe1e083928
3,651,469
def insert_rare_words(sentence: str) -> str: """ attack sentence by inserting a trigger token in the source sentence. """ words = sentence.split() insert_pos = randint(0, len(words)) insert_token_idx = randint(0, len(WORDS)-1) words.insert(insert_pos, WORDS[insert_token_idx]) return " ".join(words)
ca07dec0492bff7c843e073b1093a13a418052d4
3,651,470
def _can_be_quoted(loan_amount, lent_amounts): """ Checks if the borrower can obtain a quote. To this aim, the loan amount should be less than or equal to the total amounts given by lenders. :param loan_amount: the requested loan amount :param lent_amounts: the sum of the amounts given by lenders :return: True if the borrower can get a quote, False otherwise """ return sum(lent_amounts) - loan_amount >= 0;
6fd717f3d0e844752e07e9dd435ff72eaa4b34c9
3,651,471
def load_specs_from_docstring(docstring): """Get dict APISpec from any given docstring.""" # character sequence used by APISpec to separate # yaml specs from the rest of the method docstring yaml_sep = "---" if not docstring: return {} specs = yaml_utils.load_yaml_from_docstring(docstring) # extract summary out of docstring and make it part of specs summary = docstring.split(yaml_sep)[0] if yaml_sep in docstring else docstring if ( summary and not any(key in yaml_utils.PATH_KEYS for key in specs.keys()) and "summary" not in specs ): specs["summary"] = summary.strip() # sanitize return specs
88c245f56bba10355e78c20eb421f865b054bdbe
3,651,472
import os import sys def get_bt_mac_lsb_offset(any_path,config_file): """ Obains the offset of the BT_MAC LSB from the BASE_MAC LSB by sdkconfig inspection. """ mac_sdkconfig_string='CONFIG_NUMBER_OF_UNIVERSAL_MAC_ADDRESS' sdkconfig=os.path.join(any_path,config_file) config_lines=open(sdkconfig).readlines() for line in config_lines: if mac_sdkconfig_string in line: split_line=line.split('=') if '4' in split_line[1]: return 2 elif '2' in split_line[1]: return 1 else: print("Unable to find valid value of sdkconfig variable {mac_var}" .format(mac_var=mac_sdkconfig_string)) sys.exit(1)
8122c9fb3899d9316d9f223710e9a2c661f3e2fb
3,651,473
import skimage.transform from skimage.measure import ransac def get_transform(V1, V2, pair_ix, transform=None, use_ransac=True): """ Estimate parameters of an `~skimage.transform` tranformation given a list of coordinate matches. Parameters ---------- V1, V2 : [N,2] arrays Coordinate lists. The transform is applied to V1 to match V2. pair_ix : [M,2] array Indices of matched pairs. transform : `~skimage.transform` transformation. Transformation to fit to the matched pairs. If `None`, defaults to `~skimage.transform.SimilarityTransform`. Returns ------- tf : `transform` Fitted transformation. dx : [M,2] array X & Y differences between the transformed V1 list and V2. rms : (float, float) Standard deviation of the residuals in X & Y. """ if transform is None: transform = skimage.transform.SimilarityTransform if use_ransac: tf, inliers = ransac((V1[pair_ix[:,0],:], V2[pair_ix[:,1],:]), transform, min_samples=3, residual_threshold=3, max_trials=100) dx = tf(V1[pair_ix[:,0],:]) - V2[pair_ix[:,1],:] rms = np.std(dx[inliers,:], axis=0) else: tf = transform() tf.estimate(V1[pair_ix[:,0],:], V2[pair_ix[:,1],:]) dx = tf(V1[pair_ix[:,0],:]) - V2[pair_ix[:,1],:] rms = np.std(dx, axis=0) return tf, dx, rms
d68b0c639df48cad6278b021d7bdb347cfc0d0b0
3,651,474
def no_trajectory_dct(): """ Dictionary expected answer """ return ()
95cc96bbfb23e621511f99f4d19f1af5a31bcc0f
3,651,475
import json def transform_fn(net, data, input_content_type, output_content_type): """ Transform a request using the Gluon model. Called once per request. :param net: The Gluon model. :param data: The request payload. :param input_content_type: The request content type. :param output_content_type: The (desired) response content type. :return: response payload and content type. """ ctx = mx.cpu() parsed = json.loads(data) trained_net, customer_index, product_index = net users = pd.DataFrame({'customer_id': parsed['customer_id']}).merge(customer_index, how='left')['user'].values items = pd.DataFrame({'product_id': parsed['product_id']}).merge(product_index, how='left')['item'].values predictions = trained_net(nd.array(users).as_in_context(ctx), nd.array(items).as_in_context(ctx)) response_body = json.dumps(predictions.asnumpy().tolist()) return response_body, output_content_type
756eb7093c7c56ded15d24356ead8a08d3eea7e7
3,651,476
def superuser_required(method): """ Decorator to check whether user is super user or not If user is not a super-user, it will raise PermissionDenied or 403 Forbidden. """ @wraps(method) def _wrapped_view(request, *args, **kwargs): if request.user.is_superuser is False: raise PermissionDenied return method(request, *args, **kwargs) return _wrapped_view
7bab907af1be1e81448db660f7d05b42741015da
3,651,477
def _section_data_download(course, access): """ Provide data for the corresponding dashboard section """ course_key = course.id show_proctored_report_button = ( settings.FEATURES.get('ENABLE_SPECIAL_EXAMS', False) and course.enable_proctored_exams ) section_key = 'data_download_2' if data_download_v2_is_enabled() else 'data_download' section_data = { 'section_key': section_key, 'section_display_name': _('Data Download'), 'access': access, 'show_generate_proctored_exam_report_button': show_proctored_report_button, 'get_problem_responses_url': reverse('get_problem_responses', kwargs={'course_id': str(course_key)}), 'get_grading_config_url': reverse('get_grading_config', kwargs={'course_id': str(course_key)}), 'get_students_features_url': reverse('get_students_features', kwargs={'course_id': str(course_key)}), 'get_issued_certificates_url': reverse( 'get_issued_certificates', kwargs={'course_id': str(course_key)} ), 'get_students_who_may_enroll_url': reverse( 'get_students_who_may_enroll', kwargs={'course_id': str(course_key)} ), 'get_anon_ids_url': reverse('get_anon_ids', kwargs={'course_id': str(course_key)}), 'list_proctored_results_url': reverse( 'get_proctored_exam_results', kwargs={'course_id': str(course_key)} ), 'list_instructor_tasks_url': reverse('list_instructor_tasks', kwargs={'course_id': str(course_key)}), 'list_report_downloads_url': reverse('list_report_downloads', kwargs={'course_id': str(course_key)}), 'calculate_grades_csv_url': reverse('calculate_grades_csv', kwargs={'course_id': str(course_key)}), 'problem_grade_report_url': reverse('problem_grade_report', kwargs={'course_id': str(course_key)}), 'course_has_survey': True if course.course_survey_name else False, # lint-amnesty, pylint: disable=simplifiable-if-expression 'course_survey_results_url': reverse( 'get_course_survey_results', kwargs={'course_id': str(course_key)} ), 'export_ora2_data_url': reverse('export_ora2_data', kwargs={'course_id': str(course_key)}), 'export_ora2_submission_files_url': reverse( 'export_ora2_submission_files', kwargs={'course_id': str(course_key)} ), 'export_ora2_summary_url': reverse('export_ora2_summary', kwargs={'course_id': str(course_key)}), } if not access.get('data_researcher'): section_data['is_hidden'] = True return section_data
159d3fb4e13979826dbf1e95baf85224b82aeba8
3,651,478
import torch def reshape_kb_mask_to_keys_size(kb_mask, kb_keys, kb_total): """ TODO document TODO move to helpers """ if not isinstance(kb_keys, tuple): kb_keys = (kb_keys,) keys_dim = product([keys.shape[1] for keys in kb_keys]) kb_pad_len = kb_total - keys_dim assert kb_pad_len >= 0, f"kb dim of mask {kb_mask.shape}, with product of keys ={keys_dim} appears to be larger than self.kb_total={kb_total} => increase self.kb_total[-1] = {self.kb_total}" # FIXME why is this sometimes a tuple of filled pad tensors instead of one? TODO kb_mask_padding = torch.full((kb_mask.shape[0], kb_pad_len), fill_value=False) if type(kb_mask_padding) == tuple: assert False, kb_mask_padding if len(kb_mask_padding.shape) < 2: assert False, ((kb_mask.shape[0], kb_pad_len), kb_mask_padding.shape) assert len(kb_mask.shape) == 2, kb_mask.shape kb_mask_padded = torch.cat([ kb_mask, kb_mask_padding.to( dtype = kb_mask.dtype, device = kb_mask.device )], dim=1) kb_mask = kb_mask_padded.unsqueeze(1) ### end setup proj keys and mask dimensions return kb_mask
c658149ad78d394ec77fe408fd2aaa28faca828d
3,651,479
def tcache(parser, token): """ This will cache the contents of a template fragment for a given amount of time with support tags. Usage:: {% tcache [expire_time] [fragment_name] [tags='tag1,tag2'] %} .. some expensive processing .. {% endtcache %} This tag also supports varying by a list of arguments: {% tcache [expire_time] [fragment_name] [var1] [var2] .. [tags=tags] %} .. some expensive processing .. {% endtcache %} Each unique set of arguments will result in a unique cache entry. """ nodelist = parser.parse(('endtcache',)) parser.delete_first_token() tokens = token.split_contents() if len(tokens) < 3: raise template.TemplateSyntaxError("'%r' tag requires at least 2 arguments." % tokens[0]) tags = None if len(tokens) > 3 and 'tags=' in tokens[-1]: tags = parser.compile_filter(tokens[-1][5:]) del tokens[-1] return CacheNode(nodelist, parser.compile_filter(tokens[1]), tokens[2], # fragment_name can't be a variable. [parser.compile_filter(token) for token in tokens[3:]], tags )
206bcaa5c11a33e2f2bfe19fa75f7abe07fbc9c2
3,651,480
import os import subprocess def bpg_compress(input_image_p, q, tmp_dir=None, chroma_fmt='444'): """ Int -> image_out_path :: str """ assert 'png' in input_image_p if tmp_dir: input_image_name = os.path.basename(input_image_p) output_image_bpg_p = os.path.join(tmp_dir, input_image_name).replace('.png', '_tmp_bpg.bpg') else: output_image_bpg_p = input_image_p.replace('.png', '_tmp_bpg.bpg') subprocess.call([BPGENC, '-q', str(q), input_image_p, '-o', output_image_bpg_p, '-f', chroma_fmt]) return output_image_bpg_p
30cdf79d64b269ec6bb0973023ea1d45fec81c31
3,651,481
def location_edit(type_, id_, location_name, location_type, date, user, description=None, latitude=None, longitude=None): """ Update a location. :param type_: Type of TLO. :type type_: str :param id_: The ObjectId of the TLO. :type id_: str :param location_name: The name of the location to change. :type location_name: str :param location_type: The type of the location to change. :type location_type: str :param date: The location date to edit. :type date: str :param user: The user setting the new description. :type user: str :param description: The new description. :type description: str :param latitude: The new latitude. :type latitude: str :param longitude: The new longitude. :type longitude: str :returns: dict with key 'success' (boolean) and 'message' (str) if failed. """ crits_object = class_from_id(type_, id_) if not crits_object: return {'success': False, 'message': 'Cannot find %s.' % type_} crits_object.edit_location(location_name, location_type, date, description=description, latitude=latitude, longitude=longitude) try: crits_object.save(username=user) return {'success': True} except ValidationError, e: return {'success': False, 'message': "Invalid value: %s" % e}
b4bd584423e66242a6919fbcf3defcdd431ae9d3
3,651,482
def G2(species_index, eta, Rs): """G2 function generator. This is a radial function between an atom and atoms with some chemical symbol. It is defined in cite:khorshidi-2016-amp, eq. 6. This version is scaled a little differently than the one Behler uses. Parameters ---------- species_index : integer species index for this function. Elements that do not have this index will be masked out eta : float The gaussian width Rs : float The gaussian center or shift Returns ------- The g2 function with the cosine_cutoff function integrated into it. """ def g2(config, distances, atom_mask, species_masks): distances = np.array(distances) atom_mask = np.array(atom_mask) species_masks = np.array(species_masks) # Mask out non-species contributions smask = species_masks[:, species_index][:, None] distances *= smask distances *= atom_mask distances *= atom_mask[:, None] Rc = config.get('cutoff_radius', 6.5) result = np.where(distances > 0, np.exp(-eta * ((distances - Rs)**2 / Rc**2)), 0.0) result *= cosine_cutoff(config, distances, atom_mask) gsum = np.sum(result, (1, 2)) return gsum[:, None] g2.__desc__ = 'g2({species_index}, eta={eta}, Rs={Rs})'.format(**locals()) return g2
a98b6ee7f6ff602a9ac8003b4c7cf515580aa9a3
3,651,483
def convert_leg_pose_to_motor_angles(robot_class, leg_poses): """Convert swing-extend coordinate space to motor angles for a robot type. Args: robot_class: This returns the class (not the instance) for the robot. Currently it supports minitaur, laikago and mini-cheetah. leg_poses: A list of leg poses in [swing,extend] or [abduction, swing, extend] space for all 4 legs. The order is [abd_0, swing_0, extend_0, abd_1, swing_1, extend_1, ...] or [swing_0, extend_0, swing_1, extend_1, ...]. Zero swing and zero extend gives a neutral standing pose for all the robots. For minitaur, the conversion is fully accurate, for laikago and mini-cheetah the conversion is approximate where swing is reflected to hip and extend is reflected to both knee and the hip. Returns: List of motor positions for the selected robot. The list include 8 or 12 motor angles depending on the given robot type as an argument. Currently laikago and mini-cheetah has motors for abduction which does not exist for minitaur robot. Raises: ValueError: Conversion fails due to wrong inputs. """ if len(leg_poses) not in [8, 12]: raise ValueError("Dimension of the leg pose provided is not 8 or 12.") neutral_motor_angles = get_neutral_motor_angles(robot_class) motor_angles = leg_poses # If it is a robot with 12 motors but the provided leg pose does not contain # abduction, extend the pose to include abduction. if len(neutral_motor_angles) == 12 and len(leg_poses) == 8: for i in _ABDUCTION_ACTION_INDEXES: motor_angles.insert(i, 0) # If the robot does not have abduction (minitaur) but the input contains them, # ignore the abduction angles for the conversion. elif len(neutral_motor_angles) == 8 and len(leg_poses) == 12: del leg_poses[::3] # Minitaur specific conversion calculations using minitaur-specific safety # limits. if str(robot_class) == str(laikago.Laikago): swing_scale = 1.0 extension_scale = 1.0 # Laikago specific conversion multipliers. swing_scale = _LAIKAGO_SWING_CONVERSION_MULTIPLIER extension_scale = _LAIKAGO_EXTENSION_CONVERSION_MULTIPLIER else: motor_angles = robot_class.convert_leg_pose_to_motor_angles(leg_poses) return motor_angles
7d71edd6dede2e523a3b61b48ff291924ce9df23
3,651,484
import os def download_from_s3(s3_url: str, cache_dir: str = None, access_key: str = None, secret_access_key: str = None, region_name: str = None): """ Download a "folder" from s3 to local. Skip already existing files. Useful for downloading all files of one model The default and recommended authentication follows boto3's trajectory of checking for ENV variables, .aws/credentials etc. (see https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html). However, there's also the option to pass `access_key`, `secret_access_key` and `region_name` directly as this is needed in some enterprise enviroments with local s3 deployments. :param s3_url: Url of the "folder" in s3 (e.g. s3://mybucket/my_modelname) :param cache_dir: Optional local directory where the files shall be stored. If not supplied, we'll use a subfolder in torch's cache dir (~/.cache/torch/farm) :param access_key: Optional S3 Access Key :param secret_access_key: Optional S3 Secret Access Key :param region_name: Optional Region Name :return: local path of the folder """ if cache_dir is None: cache_dir = FARM_CACHE logger.info(f"Downloading from {s3_url} to {cache_dir}") if access_key or secret_access_key: assert secret_access_key and access_key, "You only supplied one of secret_access_key and access_key. We need both." session = boto3.Session( aws_access_key_id=access_key, aws_secret_access_key=secret_access_key, region_name=region_name ) s3_resource = session.resource('s3') else: s3_resource = boto3.resource('s3') bucket_name, s3_path = split_s3_path(s3_url) bucket = s3_resource.Bucket(bucket_name) objects = bucket.objects.filter(Prefix=s3_path) if not objects: raise ValueError("Could not find s3_url: {s3_url}") for obj in objects: path, filename = os.path.split(obj.key) path = os.path.join(cache_dir, path) # Create local folder if not os.path.exists(path): os.makedirs(path) # Download file if not present locally if filename: filepath = os.path.join(path, filename) if os.path.exists(filepath): logger.info(f"Skipping {obj.key} (exists locally)") else: logger.info(f"Downloading {obj.key} to {filepath} (size: {obj.size/1000000} MB)") bucket.download_file(obj.key, filepath) return path
e465c244db9da39f1806821e282b719e9762ac9b
3,651,485
def get_all_records(session): """ return all records """ result = session.query(Skeleton).all() skeletons = convert_results(result) return skeletons
7a5205a40afdff943e9ad15636e41563059fd8ee
3,651,486
import scipy def pwm_to_boltzmann_weights(prob_weight_matrix, temp): """Convert pwm to boltzmann weights for categorical distribution sampling.""" weights = np.array(prob_weight_matrix) cols_logsumexp = [] for i in range(weights.shape[1]): cols_logsumexp.append(scipy.special.logsumexp(weights.T[i] / temp)) for i in range(weights.shape[0]): for j in range(weights.shape[1]): weights[i, j] = np.exp(weights[i, j] / temp - cols_logsumexp[j]) return weights
f7dac6149660b230986682d6e52d5455708c1fcb
3,651,487
def mutation_delete_music_composition(identifier: str): """Returns a mutation for deleting a MusicComposition. Args: identifier: The identifier of the MusicComposition. Returns: The string for the mutation for deleting the music composition object based on the identifier. """ return format_mutation("DeleteMusicComposition", {"identifier": identifier})
64f4f2cba056e96d7c63ac2672d5613e3009c380
3,651,488
from astroquery.gaia import Gaia import warnings def coords_from_gaia(gaia_id): """Returns table of Gaia DR2 data given a source_id.""" warnings.filterwarnings('ignore', module='astropy.io.votable.tree') adql = 'SELECT gaia.source_id, ra, dec FROM gaiadr2.gaia_source AS gaia WHERE gaia.source_id={0}'.format(gaia_id) job = Gaia.launch_job(adql) table = job.get_results() coords = (table['ra'].data[0], table['dec'].data[0]) return coords
6177a846528003f56c82451622c671c100f5ea71
3,651,489
from random import shuffle, random import numpy as np def partition(smilist,ratio=0.7): """ A function to create test/ train split list :param smilist: smiles (list) :param ratio: test set split fraction (float) Return type: traininglist, testlist (list) """ shuffle(smilist, random) trainlen = int(np.floor( len(smilist)*ratio ) ) return smilist[0:trainlen],smilist[trainlen:]
6dbfa6ecdf543c03ecac210e634aaaeee68a6979
3,651,490
def align(reference, query): """ do a pairwise alignment of the query to the reference, outputting up to 10000 of the highest-scoring alignments. :param reference: a STRING of the reference sequence :param query: a STRING of the query sequence :return: a list of up to 10000 Alignment objects """ alns = pairwise2.align.localms(reference, query, 1, -1, -2, -1) # match, mismatch, gap-open, gap-extension alignments = [] for aln in alns: al1, al2, score, begin, end = aln alignments.append(Alignment(gappy_r=al1, gappy_q=al2)) return alignments
a10d9a5ade48fb11c8a8b497c6ef764115c9843d
3,651,491
import re def output_name(ncfile): """output_name. Args: ncfile: """ ncfile_has_datetime = re.search('[0-9]{4}-[0-9]{2}-[0-9]{2}_[0-9]{2}', ncfile) if ncfile_has_datetime: forecast_time = ncfile_has_datetime.group() else: raise Exception("ncfile doesn't have datetime data.") outname = (forecast_time + "apcp") return outname
81d04e9fe572e6ba2eb97506d4690818008a1aaf
3,651,492
def _replacement_func_decorator( fn=None, name=None, help="", args=None): """ Replaces xlo.func in jupyter but removes arguments which do not make sense when called from jupyter """ def decorate(fn): spec = _FuncDescription(fn, name or fn.__name__, help, args) publish_display_data( { "xloil/data": _serialise(spec) }, { 'type': "FuncRegister" } ) return fn return decorate if fn is None else decorate(fn)
92ab0a28107bfb88e8cf1084e07e252bd5994388
3,651,493
def stress_x_component(coordinates, prisms, pressure, poisson, young): """ x-component of the stress field. Parameters ---------- coordinates : 2d-array 2d numpy array containing ``y``, ``x`` and ``z`` Cartesian cordinates of the computation points. All coordinates should be in meters. prisms : 2d-array 2d array containing the Cartesian coordinates of the prism(s). Each line contains the coordinates of a prism in following order: y1, y2, x1, x2, z2 and z1. All coordinates should be in meters. pressure : 1d array 1d array containing the pressure of each prism in MPa. poisson : float Poisson’s ratio. young : float Young’s modulus in MPa. Returns ------- result : array x-component of the stress field generated by the prisms at the computation points. """ s_xz1 = field_component( coordinates, prisms, pressure, poisson, young, kernel='s_xz1' ) s_xz2 = field_component( coordinates, prisms, pressure, poisson, young, kernel='s_xz2' ) s_xzz2 = field_component( coordinates, prisms, pressure, poisson, young, kernel='s_xzz2' ) result = s_xz1 + s_xzz2 + s_xz2 result *= young/(1 + poisson) return result
f47b8e6301964454b85e5a124db642708ba7abf6
3,651,494
def process_time_data(flag, last_time, model_params_dict_raw, time_data_raw): """ This is a helper function that takes the raw time data from the model file and replaces it with the correct value in the params file. :param flag: :param last_time: :param model_params_dict_raw: :param time_data_raw: :return: """ low_time_used = False if "_" in flag and int(flag.split("_")[1]) == 1: # There is no time constraint low_time_used = True if "inst" in time_data_raw: temp_time = str(float(last_time) + 1) while temp_time in times: temp_time += 10 time_data = temp_time else: if low_time_used: time_data = get_param_value_bounded(time_data_raw, last_time) else: if time_data_raw in model_params_dict_raw.keys(): time_data = get_param_value_un_bounded(model_params_dict_raw, time_data_raw) else: time_data = time_data_raw return time_data
6684ba352f2a339029581816ac72690c26dd8a73
3,651,495
def create_pos_data(data, parser): """ creating the positive fh numeric dataset. performing another cleaning. :param data: suspected fh examples :param parser: parser used for the word tokenization :return: all positive examples (after the cleaning), will be used for creating the negative dataset """ pos_data = [] pos_examples = [] for entry in tqdm(data): try: a = map(unicode, parser.word_tokenize(entry[4].encode('utf-8'))) s, e = num_clean(a, entry[-1][1]) if s is not None and (s != entry[-1][1][0] or e != entry[-1][1][1]): s, e = num_clean(a, [s, e]) if s is not None: s_nlp = nlp_split(unicode(SEP.join(a))) s, e = find_boundaries(s_nlp, s_nlp[s]) if s >= e: continue if s > 0 and (e - s) == 1 and s_nlp[s - 1].pos_ in ['NOUN', 'PROPN'] and s_nlp[s].head == s_nlp[s - 1]: continue # time like examples - removing if ':' in s_nlp[s:e].text: continue # the one token in uppercase is often classified as NOUN if s_nlp[s].text.lower() != 'one' and s_nlp[s].pos_ != 'NUM': continue pos_data.append((a, (s, e))) new_entry = entry[:-1] target = (' '.join(a[s:e]), (s, e)) new_entry = new_entry + (target,) pos_examples.append(new_entry) except: print entry[4] pos_data, pos_examples = remove_dups(pos_data, pos_examples) return pos_examples, pos_data
a55b43f9d953284494629b4f4bc6f6901be0f865
3,651,496
async def absent(hub, ctx, name, resource_uri, connection_auth=None, **kwargs): """ .. versionadded:: 2.0.0 Ensure a diagnostic setting does not exist for the specified resource uri. :param name: The name of the diagnostic setting. :param resource_uri: The identifier of the resource. :param connection_auth: A dict with subscription and authentication parameters to be used in connecting to the Azure Resource Manager API. Example usage: .. code-block:: yaml Ensure diagnostic setting is absent: azurerm.monitor.diagnostic_setting.absent: - name: my_setting - resource_uri: my_resource """ ret = {"name": name, "result": False, "comment": "", "changes": {}} if not isinstance(connection_auth, dict): if ctx["acct"]: connection_auth = ctx["acct"] else: ret[ "comment" ] = "Connection information must be specified via acct or connection_auth dictionary!" return ret setting = await hub.exec.azurerm.monitor.diagnostic_setting.get( ctx, name, resource_uri, azurerm_log_level="info", **connection_auth ) if "error" in setting: ret["result"] = True ret["comment"] = "Diagnostic setting {0} was not found.".format(name) return ret if ctx["test"]: ret["comment"] = "Diagnostic setting {0} would be deleted.".format(name) ret["result"] = None ret["changes"] = { "old": setting, "new": {}, } return ret deleted = await hub.exec.azurerm.monitor.diagnostic_setting.delete( ctx, name, resource_uri, **connection_auth ) if deleted: ret["result"] = True ret["comment"] = "Diagnostic setting {0} has been deleted.".format(name) ret["changes"] = {"old": setting, "new": {}} return ret ret["comment"] = "Failed to delete diagnostic setting {0}!".format(name) return ret
ed97a9d765e8bda566b85b2bc22a585f02378dff
3,651,497
def get_ngrok() -> str or None: """Sends a `GET` request to api/tunnels to get the `ngrok` public url. See Also: Checks for output from get_port function. If nothing, then `ngrok` isn't running. However as a sanity check, the script uses port number stored in env var to make a `GET` request. Returns: str or None: - On success, returns the `ngrok` public URL. - On failure, returns None to exit function. """ if validate := get_port(): port = validate.split('.')[-1] else: if not (port := environ.get('PORT')): return try: response = get(f'http://{ip}:{port}/api/tunnels') except InvalidURL: return except ConnectionError: return tunnel = load(response.content.decode(), Loader=FullLoader)['tunnels'] return tunnel[0].get('public_url')
51cc61f3aea7f0ffc8d21284548df50e3e77d2b6
3,651,498
def contacts_per_person_symptomatic_60x80(): """ Real Name: b'contacts per person symptomatic 60x80' Original Eqn: b'contacts per person normal 60x80*(symptomatic contact fraction 80+symptomatic contact fraction 60\\\\ )/2' Units: b'contact/Day' Limits: (None, None) Type: component b'' """ return contacts_per_person_normal_60x80() * (symptomatic_contact_fraction_80() + symptomatic_contact_fraction_60()) / 2
bf887237e77ffe0c3cb39a12285904f14ca14dd2
3,651,499