content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def add(a,b):
""" This function adds two numbers together """
return a+b | 96173657034d469ea43142179cd408e0c1f1e12d | 3,651,388 |
def decode_ADCP(data):
"""
Decodes ADCP data read in over UDP. Returns two lists: header and current.
input: Raw data string from ADCP UDP stream
Output:
header: [timestamp, nCells, nBeams, pressure]
- timestamp in unix format
- nBeams x nCells gives dimensions of current data
- pressure is hydrostatic pressure in dBar
current: nBeams x nCells current values in m/s
"""
data = data.decode("utf-8")
if data.endswith('ZZZZ') and data.startswith('AAAA'):
data = data.split(' ')
timestamp = float(data[1]) + float(data[2])/1000
nCells = int(data[3])
nBeams = int(data[4])
pressure = int(data[5])
current = np.array(list(map(float, list(data[6:-2]))))/1000
current = np.resize(current, (nBeams, nCells)).round(3)
header = [timestamp, nCells, nBeams, pressure]
else:
header = []
current = []
return current, header | 07c5b430ac2321e4e47124e71c83bf8a2440f43f | 3,651,389 |
def RowToModelInput(row, kind):
"""
This converts a patient row into inputs for the SVR.
In this model we use RNAseq values as inputs.
"""
SampleID = row[TissueSampleRow(kind)]
TrueSampleIDs = [r for r in TissueSamples.columns
if r.startswith(SampleID)]
if not TrueSampleIDs:
return None
TrueSampleID = TrueSampleIDs[0]
assert len(TrueSampleIDs) <= 1
try:
sample = TissueSamples[[TrueSampleID]]
Masked = sample[GeneMask[kind]]
return Masked.values.reshape(-1,)
except KeyError as e:
print("Key error: %s" % SampleID)
return None | 1167a7c47be7893252820087098db6e416f6c9bc | 3,651,390 |
from datetime import datetime
def str_to_timedelta(td_str):
"""Parses a human-readable time delta string to a timedelta"""
if "d" in td_str:
day_str, time_str = td_str.split("d", 1)
d = int(day_str.strip())
else:
time_str = td_str
d = 0
time_str = time_str.strip()
if not time_str:
return datetime.timedelta(days=d)
colon_count = time_str.count(":")
if (not colon_count) or colon_count > 2:
raise ValueError("Time format [dd d] hh:mm[:ss] or dd d")
elif colon_count == 1:
h_str, m_str = time_str.split(":", 1)
h, m, s = int(h_str.strip()), int(m_str.strip()), 0
elif colon_count == 2:
h_str, m_str, s_str = time_str.split(":", 2)
h, m, s = int(h_str.strip()), int(m_str.strip()), int(s_str.strip())
return tuple_to_timedelta((d, h, m, s)) | dc3449c708ef4fbe689a9c130745d7ada6ac8f78 | 3,651,391 |
import cgi
def filter_safe_enter(s):
"""正文 换行替换"""
return '<p>' + cgi.escape(s).replace("\n", "</p><p>") + '</p>' | 6091abec0ff87361f1bbe4d146c64ddef3cc99f0 | 3,651,392 |
def _full_rank(X, cmax=1e15):
"""
This function possibly adds a scalar matrix to X
to guarantee that the condition number is smaller than a given threshold.
Parameters
----------
X: array of shape(nrows, ncols)
cmax=1.e-15, float tolerance for condition number
Returns
-------
X: array of shape(nrows, ncols) after regularization
cmax=1.e-15, float tolerance for condition number
"""
U, s, V = np.linalg.svd(X, 0)
smax, smin = s.max(), s.min()
c = smax / smin
if c < cmax:
return X, c
warn('Matrix is singular at working precision, regularizing...')
lda = (smax - cmax * smin) / (cmax - 1)
s = s + lda
X = np.dot(U, np.dot(np.diag(s), V))
return X, cmax | 8f24509fb921877c9f1bcff09fc035285beee69e | 3,651,393 |
from typing import Optional
def create(
session: Session,
instance: Instance,
name: str,
description: Optional[str] = None,
external_id: Optional[str] = None,
unified_dataset_name: Optional[str] = None,
) -> Project:
"""Create a Mastering project in Tamr.
Args:
instance: Tamr instance
name: Project name
description: Project description
external_id: External ID of the project
unified_dataset_name: Unified dataset name. If None, will be set to project name + _'unified_dataset'
Returns:
Project created in Tamr
Raises:
project.AlreadyExists: If a project with these specifications already exists.
requests.HTTPError: If any other HTTP error is encountered.
"""
return project._create(
session=session,
instance=instance,
name=name,
project_type="DEDUP",
description=description,
external_id=external_id,
unified_dataset_name=unified_dataset_name,
) | aac88500ecd60df9a1496e38e33bc212f3e26701 | 3,651,394 |
import random
import time
def users_with_pending_lab(connection, **kwargs):
"""Define comma seperated emails in scope
if you want to work on a subset of all the results"""
check = CheckResult(connection, 'users_with_pending_lab')
# add random wait
wait = round(random.uniform(0.1, random_wait), 1)
time.sleep(wait)
check.action = 'finalize_user_pending_labs'
check.full_output = []
check.status = 'PASS'
cached_items = {} # store labs/PIs for performance
mismatch_users = []
# do not look for deleted/replaced users
scope = kwargs.get('scope')
search_q = '/search/?type=User&pending_lab!=No+value&frame=object'
# want to see all results or a subset defined by the scope
if scope == 'all':
pass
else:
emails = [mail.strip() for mail in scope.split(',')]
for an_email in emails:
search_q += '&email=' + an_email
search_res = ff_utils.search_metadata(search_q, key=connection.ff_keys)
for res in search_res:
user_fields = ['uuid', 'email', 'pending_lab', 'lab', 'title', 'job_title']
user_append = {k: res.get(k) for k in user_fields}
check.full_output.append(user_append)
# Fail if we have a pending lab and lab that do not match
if user_append['lab'] and user_append['pending_lab'] != user_append['lab']:
check.status = 'FAIL'
mismatch_users.append(user_append['uuid'])
continue
# cache the lab and PI contact info
if user_append['pending_lab'] not in cached_items:
to_cache = {}
pending_meta = ff_utils.get_metadata(user_append['pending_lab'], key=connection.ff_keys,
add_on='frame=object')
to_cache['lab_title'] = pending_meta['display_title']
if 'pi' in pending_meta:
pi_meta = ff_utils.get_metadata(pending_meta['pi'], key=connection.ff_keys,
add_on='frame=object')
to_cache['lab_PI_email'] = pi_meta['email']
to_cache['lab_PI_title'] = pi_meta['title']
to_cache['lab_PI_viewing_groups'] = pi_meta['viewing_groups']
cached_items[user_append['pending_lab']] = to_cache
# now use the cache to fill fields
for lab_field in ['lab_title', 'lab_PI_email', 'lab_PI_title', 'lab_PI_viewing_groups']:
user_append[lab_field] = cached_items[user_append['pending_lab']].get(lab_field)
if check.full_output:
check.summary = 'Users found with pending_lab.'
if check.status == 'PASS':
check.status = 'WARN'
check.description = check.summary + ' Run the action to add lab and remove pending_lab'
check.allow_action = True
check.action_message = 'Will attempt to patch lab and remove pending_lab for %s users' % len(check.full_output)
if check.status == 'FAIL':
check.summary += '. Mismatches found for pending_lab and existing lab'
check.description = check.summary + '. Resolve conflicts for mismatching users before running action. See brief_output'
check.brief_output = mismatch_users
else:
check.summary = 'No users found with pending_lab'
return check | 6136531c523cf344405cda42bbfcae1e4719280d | 3,651,395 |
import pandas
import types
def hpat_pandas_series_lt(self, other, level=None, fill_value=None, axis=0):
"""
Pandas Series method :meth:`pandas.Series.lt` implementation.
.. only:: developer
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op8
Parameters
----------
self: :class:`pandas.Series`
input arg
other: :obj:`pandas.Series`, :obj:`int` or :obj:`float`
input arg
level: :obj:`int` or name
*unsupported*
fill_value: :obj:`float` or None, default None
*unsupported*
axis: default 0
*unsupported*
Returns
-------
:obj:`pandas.Series`
returns :obj:`pandas.Series` object
"""
_func_name = 'Method lt().'
if not isinstance(self, SeriesType):
raise TypingError('{} The object must be a pandas.series. Given: {}'.format(_func_name, self))
if level is not None or fill_value is not None or axis != 0:
raise TypingError(
'{} Unsupported parameters. Given level: {}, fill_value: {}, axis: {}'.format(_func_name, level, fill_value,
axis))
if isinstance(other, SeriesType):
def hpat_pandas_series_lt_impl(self, other):
"""
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op8
"""
return pandas.Series(self._data < other._data)
return hpat_pandas_series_lt_impl
if isinstance(other, types.Integer) or isinstance(other, types.Float):
def hpat_pandas_series_lt_impl(self, other):
"""
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op8_integer_scalar
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op8_float_scalar
"""
return pandas.Series(self._data < other)
return hpat_pandas_series_lt_impl
raise TypingError(
'{} The object must be a pandas.series and argument must be a number. Given: {} and other: {}'.format(
_func_name, self, other)) | b22497f64b711f92bbdf472feba657bc9b49115a | 3,651,396 |
def getBanner(host, port):
"""
Connects to host:port and returns the banner.
"""
try:
s = socket.socket()
s.connect((host, port))
banner = s.recv(1024)
return str(banner).strip()
except Exception, e:
error(str(host) + ':' + str(port) + ' ' + str(e)) | 46d497067790ef19521f84345adb8c8369ca8737 | 3,651,398 |
from typing import List
def create_cmd_table(table_data: List[List[str]], width: int = 15) -> BorderedTable:
"""Create a bordered table for cmd2 output.
Args:
table_data: list of lists with the string data to display
width: integer width of the columns. Default is 15 which generally works for ~4 columns
Returns:
BorderedTable: generated table for printing
"""
columns = table_data[0]
auto_column = partial(Column, width=width)
bt = BorderedTable([*map(auto_column, columns)])
rows = table_data[1:]
return bt.generate_table(rows) | 044630072f9927262673d65e9cfeadbd49d44f31 | 3,651,399 |
def get_parser(dataset_name):
"""Returns a csv line parser function for the given dataset."""
def inat_parser(line, is_train=True):
if is_train:
user_id, image_id, class_id, _ = line
return user_id, image_id, class_id
else:
image_id, class_id, _ = line
return image_id, class_id
def landmarks_parser(line, is_train=True):
if is_train:
user_id, image_id, class_id = line
return user_id, image_id, class_id
else:
image_id, class_id = line
return image_id, class_id
parsers = {
'inat': inat_parser,
'landmarks': landmarks_parser,
'cifar': landmarks_parser # landmarks and cifar uses the same parser.
}
return parsers[dataset_name] | 4901dde39ef6af9cab1adeacb50fff7b90950cd6 | 3,651,400 |
import sqlite3
def one_sentence_to_ids(sentence, sentence_length=SENTENCE_LENGTH):
"""Convert one sentence to a list of word IDs."
Crop or pad to 0 the sentences to ensure equal length if necessary.
Words without ID are assigned ID 1.
>>> one_sentence_to_ids(['my','first','sentence'], 2)
([11095, 121], 2)
>>> one_sentence_to_ids(['my','ssecond','sentence'], 2)
([11095, 1], 2)
>>> one_sentence_to_ids(['yes'], 2)
([21402, 0], 1)
"""
vectordb = sqlite3.connect(DB)
c = vectordb.cursor()
word_ids = []
for w in sentence:
if len(word_ids) >= sentence_length:
break
c.execute("""SELECT word_index, word
FROM vectors
INDEXED BY word_idx
WHERE word=?""", (w, ))
r = c.fetchall()
if len(r) > 0:
word_ids.append(r[0][0])
else:
word_ids.append(1)
# Pad with zeros if necessary
num_words = len(word_ids)
if num_words < sentence_length:
word_ids += [0]*(sentence_length-num_words)
vectordb.close()
return word_ids, num_words | 14bb42a5bbec7e05b28601903c8732c140bf92ed | 3,651,402 |
def full_class_name(class_):
"""
Returns the absolute name of a class, with all nesting namespaces included
"""
return '::'.join(get_scope(class_) + [class_.name]) | c2e7c0df1394d76a181677fcceec424a9bec1f4b | 3,651,403 |
import math
def mul_pdf(mean1, var1, mean2, var2):
"""
Multiply Gaussian (mean1, var1) with (mean2, var2) and return the
results as a tuple (mean, var, scale_factor).
Strictly speaking the product of two Gaussian PDFs is a Gaussian
function, not Gaussian PDF. It is, however, proportional to a Gaussian
PDF. `scale_factor` provides this proportionality constant
Parameters
----------
mean1 : scalar
mean of first Gaussian
var1 : scalar
variance of first Gaussian
mean2 : scalar
mean of second Gaussian
var2 : scalar
variance of second Gaussian
Returns
-------
mean : scalar
mean of product
var : scalar
variance of product
scale_factor : scalar
proportionality constant
Examples
--------
>>> mul(1, 2, 3, 4)
(1.6666666666666667, 1.3333333333333333)
References
----------
Bromily. "Products and Convolutions of Gaussian Probability Functions",
Tina Memo No. 2003-003.
http://www.tina-vision.net/docs/memos/2003-003.pdf
"""
mean = (var1*mean2 + var2*mean1) / (var1 + var2)
var = 1. / (1./var1 + 1./var2)
S = math.exp(-(mean1 - mean2)**2 / (2*(var1 + var2))) / \
math.sqrt(2 * math.pi * (var1 + var2))
return mean, var, S | 8ecb925273cd0e4276b867687e81b0a26419f35f | 3,651,404 |
def mean_squared_error(y_true, y_pred):
"""
Mean squared error loss.
:param y_true: groundtruth.
:param y_pred: prediction.
:return: loss symbolic value.
"""
P = norm_saliency(y_pred) # Normalized to sum = 1
Q = norm_saliency(y_true) # Normalized to sum = 1
return K.mean(K.square(P - Q)) | 9ecc02bfa6fc0417ea286a8f8195fdcc264c6593 | 3,651,405 |
def format_epilog():
"""Program entry point.
:param argv: command-line arguments
:type argv: :class:`list`
"""
author_strings = []
for name, email in zip(metadata.authors, metadata.emails):
author_strings.append('Author: {0} <{1}>'.format(name, email))
epilog = '''
{project} {version}
{authors}
URL: <{url}>
'''.format(
project=metadata.project,
version=metadata.version,
authors='\n'.join(author_strings),
url=metadata.url)
return epilog | ab1b378092006b3c4d7208e99c4a6ead41b528e4 | 3,651,406 |
from typing import List
import difflib
def text_differences(original_text: List[str], new_text_version: List[str]) -> TextDifferences:
"""
Builds text differences from input texts.
Parameters
----------
original_text: List[str]
original text (as a list of lines)
new_text_version: List[str]
new text version (as a list of lines)
Returns
-------
text_differences: TextDifferences
TextDifferences object built on top of diffline output
"""
diffs = list(difflib.Differ().compare(_cleanup_text(original_text), _cleanup_text(new_text_version)))
return TextDifferences(_build_difflines(diffs)) | 3f17489ac888714a2769e838e35a30384d76e961 | 3,651,408 |
import json
async def delete_port(request : VueRequest):
"""
删除端口的接口
:param:
:return: str response: 需要返回的数据
"""
try:
response = {'code': '', 'message': '', 'data': ''}
request = rsa_crypto.decrypt(request.data)
request = json.loads(request)
target = request['target']
scan_ip = request['scan_ip']
port = request['port']
token = request['token']
query_str = {
'type': 'token',
'data': token
}
username_result = mysqldb.username(query_str)
if username_result == 'L1001':
response['code'] = 'L1001'
response['message'] = '系统异常'
return response
elif username_result == None:
response['code'] = 'L1003'
response['message'] = '认证失败'
return response
else:
delete_result = mysqldb.delete_port(username_result['username'], target, scan_ip, port)
if delete_result == 'L1000':
response['code'] = 'L1000'
response['message'] = '请求成功'
elif delete_result == 'L1001':
response['code'] = 'L1001'
response['message'] = '系统异常'
return response
except Exception as e:
print(e)
response['code'] = 'L1001'
response['message'] = '系统异常'
return response | 71a90d9e14f514bb65e7d47d4a63090be70af033 | 3,651,410 |
def get_start_indices_labesl(waveform_length, start_times, end_times):
"""
Returns: a waveform_length size boolean array where the ith entry says wheter or not a frame starting from the ith
sample is covered by an event
"""
label = np.zeros(waveform_length)
for start, end in zip(start_times, end_times):
event_first_start_index = int(start * cfg.working_sample_rate - cfg.frame_size * (1 - cfg.min_event_percentage_in_positive_frame))
event_last_start_index = int(end * cfg.working_sample_rate - cfg.frame_size * cfg.min_event_percentage_in_positive_frame)
label[event_first_start_index: event_last_start_index] = 1
return label | 39d264f837940e11b4d5e6897a93e30da234730d | 3,651,411 |
def get_function_euler_and_module():
"""
Function return tuple with value function euler and module.
This tuple is namedtuple and we can use it this way:
euler = tuple.euler
module = tuple.module
Thanks to this, we will not be mistaken.
"""
first_number_prime, second_number_prime = get_two_numbers_prime()
results = euler_and_module(
euler=(first_number_prime - 1) * (second_number_prime - 1),
module=first_number_prime * second_number_prime
)
return results | 0b44391c9b38cc5f04f2e5faa212542716543883 | 3,651,412 |
def _read_host_df(host, seq=True):
"""Reads the metrics data for the host and returns a DataFrame.
Args:
host (str): Hostname, one of wally113, wally117, wally122, wally123,
wally124
seq (bool): If sequential or concurrent metrics should be read
Returns:
DataFrame: Containing all the metrics as columns
"""
filepath = ''
if seq:
filepath = '%s/interim/sequential_data/metrics/%s_metrics.csv' % (DATA_DIR, host)
else:
filepath = '%s/interim/concurrent_data/metrics/%s_metrics_concurrent.csv' % (DATA_DIR, host)
metrics_df = pd.read_csv(
filepath,
dtype={'now': str, 'load.cpucore': np.float64, 'load.min1': np.float64,
'load.min5': np.float64, 'load.min15': np.float64,
'mem.used': np.float64})
metrics_df['now'] = pd.to_datetime(metrics_df['now'])
metrics_df = metrics_df.set_index('now')
metrics_df = metrics_df.add_prefix('%s.' % host)
return metrics_df.pivot_table(metrics_df, index=['now'], aggfunc='mean') | 5e2872816b0e9b77ccccd1ead03e3c9660c604f2 | 3,651,413 |
def compacify(train_seq, test_seq, dev_seq, theano=False):
"""
Create a map for indices that is be compact (do not have unused indices)
"""
# REDO DICTS
new_x_dict = LabelDictionary()
new_y_dict = LabelDictionary(['noun'])
for corpus_seq in [train_seq, test_seq, dev_seq]:
for seq in corpus_seq:
for index in seq.x:
word = corpus_seq.x_dict.get_label_name(index)
if word not in new_x_dict:
new_x_dict.add(word)
for index in seq.y:
tag = corpus_seq.y_dict.get_label_name(index)
if tag not in new_y_dict:
new_y_dict.add(tag)
# REDO INDICES
# for corpus_seq in [train_seq2, test_seq2, dev_seq2]:
for corpus_seq in [train_seq, test_seq, dev_seq]:
for seq in corpus_seq:
for i in seq.x:
if corpus_seq.x_dict.get_label_name(i) not in new_x_dict:
pass
for i in seq.y:
if corpus_seq.y_dict.get_label_name(i) not in new_y_dict:
pass
seq.x = [new_x_dict[corpus_seq.x_dict.get_label_name(i)] for i in seq.x]
seq.y = [new_y_dict[corpus_seq.y_dict.get_label_name(i)] for i in seq.y]
# For compatibility with GPUs store as numpy arrays and cats to int
# 32
if theano:
seq.x = np.array(seq.x, dtype='int32')
seq.y = np.array(seq.y, dtype='int32')
# Reinstate new dicts
corpus_seq.x_dict = new_x_dict
corpus_seq.y_dict = new_y_dict
# Add reverse indices
corpus_seq.word_dict = {v: k for k, v in new_x_dict.items()}
corpus_seq.tag_dict = {v: k for k, v in new_y_dict.items()}
# SANITY CHECK:
# These must be the same
# tmap = {v: k for k, v in train_seq.x_dict.items()}
# tmap2 = {v: k for k, v in train_seq2.x_dict.items()}
# [tmap[i] for i in train_seq[0].x]
# [tmap2[i] for i in train_seq2[0].x]
return train_seq, test_seq, dev_seq | c695022c6216b035618342c0aaecc39d8337a84c | 3,651,414 |
def obfuscate_email(email):
"""Takes an email address and returns an obfuscated version of it.
For example: [email protected] would turn into t**t@e*********m
"""
if email is None:
return None
splitmail = email.split("@")
# If the prefix is 1 character, then we can't obfuscate it
if len(splitmail[0]) <= 1:
prefix = splitmail[0]
else:
prefix = f'{splitmail[0][0]}{"*"*(len(splitmail[0])-2)}{splitmail[0][-1]}'
# If the domain is missing or 1 character, then we can't obfuscate it
if len(splitmail) <= 1 or len(splitmail[1]) <= 1:
return f"{prefix}"
else:
domain = f'{splitmail[1][0]}{"*"*(len(splitmail[1])-2)}{splitmail[1][-1]}'
return f"{prefix}@{domain}" | 36c230ed75fc75fc7ecd6dd2ea71a6b3310c4108 | 3,651,415 |
def list_small_kernels():
"""Return list of small kernels to generate."""
kernels1d = [
NS(length= 1, threads_per_block= 64, threads_per_transform= 1, factors=(1,)),
NS(length= 2, threads_per_block= 64, threads_per_transform= 1, factors=(2,)),
NS(length= 3, threads_per_block= 64, threads_per_transform= 1, factors=(3,)),
NS(length= 4, threads_per_block=128, threads_per_transform= 1, factors=(4,)),
NS(length= 5, threads_per_block=128, threads_per_transform= 1, factors=(5,)),
NS(length= 6, threads_per_block=128, threads_per_transform= 1, factors=(6,)),
NS(length= 7, threads_per_block= 64, threads_per_transform= 1, factors=(7,)),
NS(length= 8, threads_per_block= 64, threads_per_transform= 4, factors=(4, 2)),
NS(length= 9, threads_per_block= 64, threads_per_transform= 3, factors=(3, 3)),
NS(length= 10, threads_per_block= 64, threads_per_transform= 1, factors=(10,)),
NS(length= 11, threads_per_block=128, threads_per_transform= 1, factors=(11,)),
NS(length= 12, threads_per_block=128, threads_per_transform= 6, factors=(6, 2)),
NS(length= 13, threads_per_block= 64, threads_per_transform= 1, factors=(13,)),
NS(length= 14, threads_per_block=128, threads_per_transform= 7, factors=(7, 2)),
NS(length= 15, threads_per_block=128, threads_per_transform= 5, factors=(3, 5)),
NS(length= 16, threads_per_block= 64, threads_per_transform= 4, factors=(4, 4)),
NS(length= 17, threads_per_block=256, threads_per_transform= 1, factors=(17,)),
NS(length= 18, threads_per_block= 64, threads_per_transform= 6, factors=(3, 6)),
NS(length= 20, threads_per_block=256, threads_per_transform= 10, factors=(5, 4)),
NS(length= 21, threads_per_block=128, threads_per_transform= 7, factors=(3, 7)),
NS(length= 22, threads_per_block= 64, threads_per_transform= 2, factors=(11, 2)),
NS(length= 24, threads_per_block=256, threads_per_transform= 8, factors=(8, 3)),
NS(length= 25, threads_per_block=256, threads_per_transform= 5, factors=(5, 5)),
NS(length= 26, threads_per_block= 64, threads_per_transform= 2, factors=(13, 2)),
NS(length= 27, threads_per_block=256, threads_per_transform= 9, factors=(3, 3, 3)),
NS(length= 28, threads_per_block= 64, threads_per_transform= 4, factors=(7, 4)),
NS(length= 30, threads_per_block=128, threads_per_transform= 10, factors=(10, 3)),
NS(length= 32, threads_per_block= 64, threads_per_transform= 16, factors=(16, 2)),
NS(length= 36, threads_per_block= 64, threads_per_transform= 6, factors=(6, 6)),
NS(length= 40, threads_per_block=128, threads_per_transform= 10, factors=(10, 4)),
NS(length= 42, threads_per_block=256, threads_per_transform= 7, factors=(7, 6)),
NS(length= 44, threads_per_block= 64, threads_per_transform= 4, factors=(11, 4)),
NS(length= 45, threads_per_block=128, threads_per_transform= 15, factors=(5, 3, 3)),
NS(length= 48, threads_per_block= 64, threads_per_transform= 16, factors=(4, 3, 4)),
NS(length= 49, threads_per_block= 64, threads_per_transform= 7, factors=(7, 7)),
NS(length= 50, threads_per_block=256, threads_per_transform= 10, factors=(10, 5)),
NS(length= 52, threads_per_block= 64, threads_per_transform= 4, factors=(13, 4)),
NS(length= 54, threads_per_block=256, threads_per_transform= 18, factors=(6, 3, 3)),
NS(length= 56, threads_per_block=128, threads_per_transform= 8, factors=(7, 8)),
NS(length= 60, threads_per_block= 64, threads_per_transform= 10, factors=(6, 10)),
NS(length= 64, threads_per_block= 64, threads_per_transform= 16, factors=(4, 4, 4)),
NS(length= 72, threads_per_block= 64, threads_per_transform= 9, factors=(8, 3, 3)),
NS(length= 75, threads_per_block=256, threads_per_transform= 25, factors=(5, 5, 3)),
NS(length= 80, threads_per_block= 64, threads_per_transform= 10, factors=(5, 2, 8)),
NS(length= 81, threads_per_block=128, threads_per_transform= 27, factors=(3, 3, 3, 3)),
NS(length= 84, threads_per_block=128, threads_per_transform= 12, factors=(7, 2, 6)),
NS(length= 88, threads_per_block=128, threads_per_transform= 11, factors=(11, 8)),
NS(length= 90, threads_per_block= 64, threads_per_transform= 9, factors=(3, 3, 10)),
NS(length= 96, threads_per_block=128, threads_per_transform= 16, factors=(6, 16)),
NS(length= 100, threads_per_block= 64, threads_per_transform= 10, factors=(10, 10)),
NS(length= 104, threads_per_block= 64, threads_per_transform= 8, factors=(13, 8)),
NS(length= 108, threads_per_block=256, threads_per_transform= 36, factors=(6, 6, 3)),
NS(length= 112, threads_per_block=256, threads_per_transform= 16, factors=(4, 7, 4), half_lds=False),
NS(length= 120, threads_per_block= 64, threads_per_transform= 12, factors=(6, 10, 2)),
NS(length= 121, threads_per_block=128, threads_per_transform= 11, factors=(11, 11)),
NS(length= 125, threads_per_block=256, threads_per_transform= 25, factors=(5, 5, 5), half_lds=False),
NS(length= 128, threads_per_block=256, threads_per_transform= 16, factors=(16, 8)),
NS(length= 135, threads_per_block=128, threads_per_transform= 9, factors=(5, 3, 3, 3)),
NS(length= 144, threads_per_block=128, threads_per_transform= 12, factors=(6, 6, 4)),
NS(length= 150, threads_per_block= 64, threads_per_transform= 5, factors=(10, 5, 3)),
NS(length= 160, threads_per_block=256, threads_per_transform= 16, factors=(16, 10)),
NS(length= 162, threads_per_block=256, threads_per_transform= 27, factors=(6, 3, 3, 3)),
NS(length= 168, threads_per_block=256, threads_per_transform= 56, factors=(8, 7, 3), half_lds=False),
NS(length= 169, threads_per_block=256, threads_per_transform= 13, factors=(13, 13)),
NS(length= 176, threads_per_block= 64, threads_per_transform= 16, factors=(11, 16)),
NS(length= 180, threads_per_block=256, threads_per_transform= 60, factors=(10, 6, 3), half_lds=False),
NS(length= 192, threads_per_block=128, threads_per_transform= 16, factors=(6, 4, 4, 2)),
NS(length= 200, threads_per_block= 64, threads_per_transform= 20, factors=(10, 10, 2)),
NS(length= 208, threads_per_block= 64, threads_per_transform= 16, factors=(13, 16)),
NS(length= 216, threads_per_block=256, threads_per_transform= 36, factors=(6, 6, 6)),
NS(length= 224, threads_per_block= 64, threads_per_transform= 16, factors=(7, 2, 2, 2, 2, 2)),
NS(length= 225, threads_per_block=256, threads_per_transform= 75, factors=(5, 5, 3, 3)),
NS(length= 240, threads_per_block=128, threads_per_transform= 48, factors=(8, 5, 6)),
NS(length= 243, threads_per_block=256, threads_per_transform= 81, factors=(3, 3, 3, 3, 3)),
NS(length= 250, threads_per_block=128, threads_per_transform= 25, factors=(10, 5, 5)),
NS(length= 256, threads_per_block= 64, threads_per_transform= 64, factors=(4, 4, 4, 4)),
NS(length= 270, threads_per_block=128, threads_per_transform= 27, factors=(10, 3, 3, 3)),
NS(length= 272, threads_per_block=128, threads_per_transform= 17, factors=(16, 17)),
NS(length= 288, threads_per_block=128, threads_per_transform= 24, factors=(6, 6, 4, 2)),
NS(length= 300, threads_per_block= 64, threads_per_transform= 30, factors=(10, 10, 3)),
NS(length= 320, threads_per_block= 64, threads_per_transform= 16, factors=(10, 4, 4, 2)),
NS(length= 324, threads_per_block= 64, threads_per_transform= 54, factors=(3, 6, 6, 3)),
NS(length= 336, threads_per_block=128, threads_per_transform= 56, factors=(8, 7, 6)),
NS(length= 343, threads_per_block=256, threads_per_transform= 49, factors=(7, 7, 7)),
NS(length= 360, threads_per_block=256, threads_per_transform= 60, factors=(10, 6, 6)),
NS(length= 375, threads_per_block=128, threads_per_transform= 25, factors=(5, 5, 5, 3)),
NS(length= 384, threads_per_block=128, threads_per_transform= 32, factors=(6, 4, 4, 4)),
NS(length= 400, threads_per_block=128, threads_per_transform= 40, factors=(4, 10, 10)),
NS(length= 405, threads_per_block=128, threads_per_transform= 27, factors=(5, 3, 3, 3, 3)),
NS(length= 432, threads_per_block= 64, threads_per_transform= 27, factors=(3, 16, 3, 3)),
NS(length= 450, threads_per_block=128, threads_per_transform= 30, factors=(10, 5, 3, 3)),
NS(length= 480, threads_per_block= 64, threads_per_transform= 16, factors=(10, 8, 6)),
NS(length= 486, threads_per_block=256, threads_per_transform=162, factors=(6, 3, 3, 3, 3)),
NS(length= 500, threads_per_block=128, threads_per_transform=100, factors=(10, 5, 10)),
NS(length= 512, threads_per_block= 64, threads_per_transform= 64, factors=(8, 8, 8)),
NS(length= 528, threads_per_block= 64, threads_per_transform= 48, factors=(4, 4, 3, 11)),
NS(length= 540, threads_per_block=256, threads_per_transform= 54, factors=(3, 10, 6, 3)),
NS(length= 576, threads_per_block=128, threads_per_transform= 96, factors=(16, 6, 6)),
NS(length= 600, threads_per_block= 64, threads_per_transform= 60, factors=(10, 6, 10)),
NS(length= 625, threads_per_block=128, threads_per_transform=125, factors=(5, 5, 5, 5)),
NS(length= 640, threads_per_block=128, threads_per_transform= 64, factors=(8, 10, 8)),
NS(length= 648, threads_per_block=256, threads_per_transform=216, factors=(8, 3, 3, 3, 3)),
NS(length= 675, threads_per_block=256, threads_per_transform=225, factors=(5, 5, 3, 3, 3)),
NS(length= 720, threads_per_block=256, threads_per_transform=120, factors=(10, 3, 8, 3)),
NS(length= 729, threads_per_block=256, threads_per_transform=243, factors=(3, 3, 3, 3, 3, 3)),
NS(length= 750, threads_per_block=256, threads_per_transform=250, factors=(10, 5, 3, 5)),
NS(length= 768, threads_per_block= 64, threads_per_transform= 48, factors=(16, 3, 16)),
NS(length= 800, threads_per_block=256, threads_per_transform=160, factors=(16, 5, 10)),
NS(length= 810, threads_per_block=128, threads_per_transform= 81, factors=(3, 10, 3, 3, 3)),
NS(length= 864, threads_per_block= 64, threads_per_transform= 54, factors=(3, 6, 16, 3)),
NS(length= 900, threads_per_block=256, threads_per_transform= 90, factors=(10, 10, 3, 3)),
NS(length= 960, threads_per_block=256, threads_per_transform=160, factors=(16, 10, 6), half_lds=False),
NS(length= 972, threads_per_block=256, threads_per_transform=162, factors=(3, 6, 3, 6, 3)),
NS(length=1000, threads_per_block=128, threads_per_transform=100, factors=(10, 10, 10)),
NS(length=1024, threads_per_block=128, threads_per_transform=128, factors=(8, 8, 4, 4)),
NS(length=1040, threads_per_block=256, threads_per_transform=208, factors=(13, 16, 5)),
NS(length=1080, threads_per_block=256, threads_per_transform=108, factors=(6, 10, 6, 3)),
NS(length=1125, threads_per_block=256, threads_per_transform=225, factors=(5, 5, 3, 3, 5)),
NS(length=1152, threads_per_block=256, threads_per_transform=144, factors=(4, 3, 8, 3, 4)),
NS(length=1200, threads_per_block=256, threads_per_transform= 75, factors=(5, 5, 16, 3)),
NS(length=1215, threads_per_block=256, threads_per_transform=243, factors=(5, 3, 3, 3, 3, 3)),
NS(length=1250, threads_per_block=256, threads_per_transform=250, factors=(5, 10, 5, 5)),
NS(length=1280, threads_per_block=128, threads_per_transform= 80, factors=(16, 5, 16)),
NS(length=1296, threads_per_block=128, threads_per_transform=108, factors=(6, 6, 6, 6)),
NS(length=1350, threads_per_block=256, threads_per_transform=135, factors=(5, 10, 3, 3, 3)),
NS(length=1440, threads_per_block=128, threads_per_transform= 90, factors=(10, 16, 3, 3)),
NS(length=1458, threads_per_block=256, threads_per_transform=243, factors=(6, 3, 3, 3, 3, 3)),
NS(length=1500, threads_per_block=256, threads_per_transform=150, factors=(5, 10, 10, 3)),
NS(length=1536, threads_per_block=256, threads_per_transform=256, factors=(16, 16, 6)),
NS(length=1600, threads_per_block=256, threads_per_transform=100, factors=(10, 16, 10)),
NS(length=1620, threads_per_block=256, threads_per_transform=162, factors=(10, 3, 3, 6, 3)),
NS(length=1728, threads_per_block=128, threads_per_transform=108, factors=(3, 6, 6, 16)),
NS(length=1800, threads_per_block=256, threads_per_transform=180, factors=(10, 6, 10, 3)),
NS(length=1875, threads_per_block=256, threads_per_transform=125, factors=(5, 5, 5, 5, 3)),
NS(length=1920, threads_per_block=256, threads_per_transform=120, factors=(10, 6, 16, 2)),
NS(length=1944, threads_per_block=256, threads_per_transform=243, factors=(3, 3, 3, 3, 8, 3)),
NS(length=2000, threads_per_block=128, threads_per_transform=125, factors=(5, 5, 5, 16)),
NS(length=2025, threads_per_block=256, threads_per_transform=135, factors=(3, 3, 5, 5, 3, 3)),
NS(length=2048, threads_per_block=256, threads_per_transform=256, factors=(16, 16, 8)),
NS(length=2160, threads_per_block=256, threads_per_transform= 60, factors=(10, 6, 6, 6)),
NS(length=2187, threads_per_block=256, threads_per_transform=243, factors=(3, 3, 3, 3, 3, 3, 3)),
NS(length=2250, threads_per_block=256, threads_per_transform= 90, factors=(10, 3, 5, 3, 5)),
NS(length=2304, threads_per_block=256, threads_per_transform=192, factors=(6, 6, 4, 4, 4), runtime_compile=True),
NS(length=2400, threads_per_block=256, threads_per_transform=240, factors=(4, 10, 10, 6)),
NS(length=2430, threads_per_block=256, threads_per_transform= 81, factors=(10, 3, 3, 3, 3, 3)),
NS(length=2500, threads_per_block=256, threads_per_transform=250, factors=(10, 5, 10, 5)),
NS(length=2560, threads_per_block=128, threads_per_transform=128, factors=(4, 4, 4, 10, 4)),
NS(length=2592, threads_per_block=256, threads_per_transform=216, factors=(6, 6, 6, 6, 2)),
NS(length=2700, threads_per_block=128, threads_per_transform= 90, factors=(3, 10, 10, 3, 3)),
NS(length=2880, threads_per_block=256, threads_per_transform= 96, factors=(10, 6, 6, 2, 2, 2)),
NS(length=2916, threads_per_block=256, threads_per_transform=243, factors=(6, 6, 3, 3, 3, 3)),
NS(length=3000, threads_per_block=128, threads_per_transform=100, factors=(10, 3, 10, 10)),
NS(length=3072, threads_per_block=256, threads_per_transform=256, factors=(6, 4, 4, 4, 4, 2)),
NS(length=3125, threads_per_block=128, threads_per_transform=125, factors=(5, 5, 5, 5, 5)),
NS(length=3200, threads_per_block=256, threads_per_transform=160, factors=(10, 10, 4, 4, 2)),
NS(length=3240, threads_per_block=128, threads_per_transform=108, factors=(3, 3, 10, 6, 6)),
NS(length=3375, threads_per_block=256, threads_per_transform=225, factors=(5, 5, 5, 3, 3, 3)),
NS(length=3456, threads_per_block=256, threads_per_transform=144, factors=(6, 6, 6, 4, 4)),
NS(length=3600, threads_per_block=256, threads_per_transform=120, factors=(10, 10, 6, 6)),
NS(length=3645, threads_per_block=256, threads_per_transform=243, factors=(5, 3, 3, 3, 3, 3, 3)),
NS(length=3750, threads_per_block=256, threads_per_transform=125, factors=(3, 5, 5, 10, 5)),
NS(length=3840, threads_per_block=256, threads_per_transform=128, factors=(10, 6, 2, 2, 2, 2, 2, 2)),
NS(length=3888, threads_per_block=512, threads_per_transform=324, factors=(16, 3, 3, 3, 3, 3)),
NS(length=4000, threads_per_block=256, threads_per_transform=200, factors=(10, 10, 10, 4)),
NS(length=4050, threads_per_block=256, threads_per_transform=135, factors=(10, 5, 3, 3, 3, 3)),
NS(length=4096, threads_per_block=256, threads_per_transform=256, factors=(16, 16, 16)),
]
kernels = [NS(**kernel.__dict__,
scheme='CS_KERNEL_STOCKHAM',
precision=['sp', 'dp']) for kernel in kernels1d]
return kernels | 332c94c0957ddb438822574e416a37eaef09c5f6 | 3,651,416 |
def parse_boolean(arg: str):
"""Returns boolean representation of argument."""
arg = str(arg).lower()
if 'true'.startswith(arg):
return True
return False | 2f0a214212aa43a8b27d9a3be04f14af67c586bc | 3,651,417 |
def ascending_coin(coin):
"""Returns the next ascending coin in order.
>>> ascending_coin(1)
5
>>> ascending_coin(5)
10
>>> ascending_coin(10)
25
>>> ascending_coin(2) # Other values return None
"""
if coin == 1:
return 5
elif coin == 5:
return 10
elif coin == 10:
return 25 | e927d8ac3f38d4b37de71711ac90d6ca2151a366 | 3,651,418 |
def to_numpy(qg8_tensor):
"""
Convert qg8_tensor to dense numpy array
"""
dtype = dtype_to_name(qg8_tensor.dtype_id)
ndarray = np.zeros(qg8_tensor.dims, dtype=dtype)
if np.iscomplexobj(ndarray):
ndarray[tuple(qg8_tensor.indices)] = np.asfortranarray(qg8_tensor.re)\
+ 1j*np.asfortranarray(qg8_tensor.im)
else:
ndarray[tuple(qg8_tensor.indices)] = np.asfortranarray(qg8_tensor.re)
return ndarray | 808c42fd1a6a4488cef34876674a212231fc2979 | 3,651,419 |
def _get_results(report):
"""Limit the number of documents to REPORT_MAX_DOCUMENTS so as not to crash the server."""
query = _build_query(report)
try:
session.execute(f"SET statement_timeout TO {int(REPORT_COUNT_TIMEOUT * 1000)}; commit;")
if query.count() == 0:
return None
except OperationalError:
pass
session.execute("SET statement_timeout TO 0; commit;")
results = query.order_by(Result.start_time.desc()).limit(REPORT_MAX_DOCUMENTS).all()
return [result.to_dict() for result in results] | 0575ec42c3b7cd729d4bc454986e02280ae4bb68 | 3,651,420 |
import warnings
def text_match_one_hot(df, column=None, text_phrases=None, new_col_name=None, return_df=False, case=False,
supress_warnings: bool=False):
"""Given a dataframe, text column to search and a list of text phrases, return a binary
column with 1s when text is present and 0 otherwise
"""
# Ignore regex group match warning
warnings.filterwarnings("ignore", 'This pattern has match groups')
# Check params
assert text_phrases, print(f"Must specify 'text_phrases' as a list of strings")
if (column not in df.columns.values.tolist()):
if not suppress_warnings:
warnings.warn(f'Column "{column}" not found in dataframe. No matches attempted')
return
# Create regex pattern to match any phrase in list
# The first phrase will be placed in its own groups
regex_pattern = '({})'.format(text_phrases[0])
# If there's more than one phrase
# Each phrase is placed in its own group () with an OR operand in front of it |
# and added to the original phrase
if len(text_phrases) > 1:
subsquent_phrases = "".join(['|({})'.format(phrase) for phrase in text_phrases[1:]])
regex_pattern += subsquent_phrases
# Cast to string to ensure .str methods work
df_copy = df.copy()
df_copy[column] = df_copy[column].astype(str)
matches = df_copy[column].str.contains(regex_pattern, na=False, case=case).astype(int)
## Alter name
if not new_col_name:
# If none provided use column name and values matched
new_col_name = column+'_match_for: '+str(text_phrases)[1:-1].replace(r"'", "")
matches.name = new_col_name
if return_df:
df_copy = df.copy()
df_copy[new_col_name] = matches
return df_copy
else:
return matches | 3db69fc6459dde7b14bbd1ae1507adc0b6c9a8b4 | 3,651,421 |
import six
def pack_feed_dict(name_prefixs, origin_datas, paddings, input_fields):
"""
Args:
name_prefixs: A prefix string of a list of strings.
origin_datas: Data list or a list of data lists.
paddings: A padding id or a list of padding ids.
input_fields: The input fieds dict.
Returns: A dict for while loop.
"""
data = dict()
data["feed_dict"] = dict()
def map_fn(n, d, p):
# n: name prefix
# d: data list
# p: padding symbol
data[concat_name(n, Constants.IDS_NAME)] = d
n_samples = len(d)
n_devices = len(input_fields)
n_samples_per_gpu = n_samples // n_devices
if n_samples % n_devices > 0:
n_samples_per_gpu += 1
def _feed_batchs(_start_idx, _inpf):
if _start_idx * n_samples_per_gpu >= n_samples:
return 0
x, x_len = padding_batch_data(
d[_start_idx * n_samples_per_gpu:(_start_idx + 1) * n_samples_per_gpu], p)
data["feed_dict"][_inpf[concat_name(n, Constants.IDS_NAME)]] = x
data["feed_dict"][_inpf[concat_name(n, Constants.LENGTH_NAME)]] = x_len
return len(x_len)
parallels = repeat_n_times(
n_devices, _feed_batchs,
range(n_devices), input_fields)
data["feed_dict"]["parallels"] = parallels
if isinstance(name_prefixs, six.string_types):
map_fn(name_prefixs, origin_datas, paddings)
else:
[map_fn(n, d, p) for n, d, p in zip(name_prefixs, origin_datas, paddings)]
return data | 6de17aa1235d929fee20fcddcfcfb04e3907484b | 3,651,422 |
from PyQt4 import QtGui
def get_directory(**kwargs):
"""
Wrapper function for PyQt4.QtGui.QFileDialog.getExistingDirectory().
Returns the absolute directory of the chosen directory.
Parameters
----------
None
Returns
-------
filename : string of absolute directory.
"""
filename = str(QtGui.QFileDialog.getExistingDirectory())
return filename | 8b27ec800ccaa237d79e198d23058b70f71df4b8 | 3,651,423 |
def single_particle_relative_pzbt_metafit(fitfn, exp_list, **kwargs):
"""Fit to single-particle energies plus zero body term, relative to the
first point
"""
return single_particle_metafit_int(
fitfn, exp_list,
dpath_sources=DPATH_FILES_INT, dpath_plots=DPATH_PLOTS,
transform=relative_y_zbt,
code='sprpz', mf_name='single_particle_relative_pzbt_metafit',
xlabel='A',
ylabel='Relative Single Particle Energy + Zero Body Term (MeV)',
**kwargs
) | 81dfc60f1f27df710ccdb489ff322d576b8d9922 | 3,651,424 |
def hetmat_from_permuted_graph(hetmat, permutation_id, permuted_graph):
"""
Assumes subdirectory structure and that permutations inherit nodes but not
edges.
"""
permuted_hetmat = initialize_permutation_directory(hetmat, permutation_id)
permuted_hetmat = hetmat_from_graph(
permuted_graph,
permuted_hetmat.directory,
save_metagraph=False,
save_nodes=False,
)
return permuted_hetmat | 54728e3522f76e24d4a4107752980a57990c551d | 3,651,425 |
import types
def can_see_all_content(requesting_user: types.User, course_key: CourseKey) -> bool:
"""
Global staff, course staff, and instructors can see everything.
There's no need to run processors to restrict results for these users.
"""
return (
GlobalStaff().has_user(requesting_user) or
CourseStaffRole(course_key).has_user(requesting_user) or
CourseInstructorRole(course_key).has_user(requesting_user)
) | c4de054b235da20074841e7225123ef73e7d4a16 | 3,651,426 |
def part2(steps, workers=2, extra_time=0):
""" Time is in seconds """
workers = [Worker() for _ in range(workers)]
steps_to_time = {
step: alphabet.index(step) + 1 + extra_time
for step in alphabet
}
time = 0
graph = build_graph(steps)
chain = find_orphans(graph)
while chain or Worker.busy(workers):
for worker in workers:
if time == worker.current_step_end_time:
worker.finish_step()
new_children = []
for i, node in enumerate(chain):
if node.ready:
for worker in workers:
if worker.idle:
current_node = chain.pop(i)
new_children += current_node.children
current_step = current_node
end_time = time + steps_to_time[current_node.step]
worker.begin_step(current_step, end_time)
break
chain = list(set(new_children).union(set(chain)))
chain = sorted(chain, key=lambda node: node.step)
time += 1
return time - 1 | 792c6ca6e5334491eb38d8549b6c9df41d101924 | 3,651,427 |
from typing import Any
from typing import List
from typing import Iterable
def to_local_df(df: Any, schema: Any = None, metadata: Any = None) -> LocalDataFrame:
"""Convert a data structure to :class:`~fugue.dataframe.dataframe.LocalDataFrame`
:param df: :class:`~fugue.dataframe.dataframe.DataFrame`, pandas DataFramme and
list or iterable of arrays
:param schema: |SchemaLikeObject|, defaults to None, it should not be set for
:class:`~fugue.dataframe.dataframe.DataFrame` type
:param metadata: dict-like object with string keys, defaults to None
:raises ValueError: if ``df`` is :class:`~fugue.dataframe.dataframe.DataFrame`
but you set ``schema`` or ``metadata``
:raises TypeError: if ``df`` is not compatible
:return: the dataframe itself if it's
:class:`~fugue.dataframe.dataframe.LocalDataFrame` else a converted one
:Examples:
>>> a = to_local_df([[0,'a'],[1,'b']],"a:int,b:str")
>>> assert to_local_df(a) is a
>>> to_local_df(SparkDataFrame([[0,'a'],[1,'b']],"a:int,b:str"))
"""
assert_arg_not_none(df, "df")
if isinstance(df, DataFrame):
aot(
schema is None and metadata is None,
ValueError("schema and metadata must be None when df is a DataFrame"),
)
return df.as_local()
if isinstance(df, pd.DataFrame):
return PandasDataFrame(df, schema, metadata)
if isinstance(df, List):
return ArrayDataFrame(df, schema, metadata)
if isinstance(df, Iterable):
return IterableDataFrame(df, schema, metadata)
raise TypeError(f"{df} cannot convert to a LocalDataFrame") | 12aae7869067b14f2f0f8ffcb3e393f41db5114f | 3,651,428 |
def create_local_meta(name):
"""
Create the metadata dictionary for this level of execution.
Parameters
----------
name : str
String to describe the current level of execution.
Returns
-------
dict
Dictionary containing the metadata.
"""
local_meta = {
'name': name,
'timestamp': None,
'success': 1,
'msg': '',
}
return local_meta | 61a2ef73e8a6f74360881b97150a79079f3f8c29 | 3,651,429 |
def matches_uri_ref_syntax(s):
"""
This function returns true if the given string could be a URI reference,
as defined in RFC 3986, just based on the string's syntax.
A URI reference can be a URI or certain portions of one, including the
empty string, and it can have a fragment component.
"""
if not _validation_setup_completed:
_init_uri_validation_regex()
return URI_REF_PATTERN.match(s) is not None | 73b0dde1f76edcf4fe7f7754cc67d7604f984521 | 3,651,430 |
from datetime import datetime
def get_end_hour(dt=None):
"""根据日期、或时间取得该小时59:59的时间;参数可以是date或datetime类型"""
end = None
if not dt:
dt = datetime.date.today()
if isinstance(dt, datetime.date):
dt_str = dt.strftime("%Y-%m-%d %H") + ":59:59"
end = datetime.datetime.strptime(dt_str, "%Y-%m-%d %H:%M:%S")
return end | 73d2760cf085295e13a4699aaf3dd8aa9dd5ae49 | 3,651,431 |
def all_ped_combos_strs(num_locs=4, val_set=("0", "1")):
"""Return a list of all pedestrian observation combinations (in string format) for a vehicle under the 4 location scheme"""
res = []
lsts = all_ped_combos_lsts(num_locs, val_set)
for lst in lsts:
res.append(" ".join(lst))
return res | 4a87cf48da5fb9582c7d7284fc78471e84918256 | 3,651,432 |
def create_image(
image_request: ImageRequest,
background_tasks: BackgroundTasks,
db: Session = Depends(get_db),
):
"""
(2) add database record
(3) give background_tasks a reference of image record
"""
image = Images()
image.url = image_request.image
# image.output = "4" # <<< image_request.output
db.add(image)
db.commit()
background_tasks.add_task(predict_image, image.id)
return {
"code": "success",
"message": "image added",
} | 77042168b61262b832bdad438a4d6661cfb263d6 | 3,651,433 |
def get_key(rule_tracker, value):
"""
Given an event index, its corresponding key from the dictionary is returned.
Parameters:
rule_tracker (dict): Key-value pairs specific to a rule where key is an activity, pair is an event index
value (int): Index of event in event log
Returns:
key (int): Position of value in rule_tracker
"""
for key in rule_tracker:
if rule_tracker[key] == value:
return key | 1921e9a68d0df0867248ca83e2ba641101735fc7 | 3,651,434 |
def get_cxml(filename):
""" Create and return CXML object from File or LocalCache """
cxml = Cxml(filename)
return cxml | c2c440793ea4b509823dd0ad90677eb7db2696ff | 3,651,435 |
def save_layer(index, settings) -> Action:
"""Action to save layer settings"""
return {"kind": SAVE_LAYER, "payload": {"index": index, "settings": settings}} | 8fde0e1c752455e386745f428a69ae4a9936c028 | 3,651,436 |
def request_user_input(prompt='> '):
"""Request input from the user and return what has been entered."""
return raw_input(prompt) | 1c8507edb17977005e068abee90b84832354adaf | 3,651,437 |
def get_clinic_qs():
""" Returns a list of clinic uuid values for clinics whose patients
will receive follow up reminder messages
"""
q = Q()
for clinic in MESSAGE_CLINICS:
q = q | Q(name__iexact=clinic)
return list(Clinic.objects.filter(q).values_list('uuid', flat=True)) | 8224db73bd14839b8db6e2ee4a77c1404d846e34 | 3,651,438 |
def NPnm(n, m, x):
"""Eq:II.77 """
return sqrt( (2*n+1)/2 * abs(nmFactorial(n,m)) ) * lpmv(m, n, x) | 8444f8d3a56e62bf66c6c0a318641d212202438d | 3,651,439 |
def all_columns_empty():
"""All columns are empty ... test will demoonstrate this edge case can be handled"""
return [[] for i in range(0, 100)] | 77a354978f82fd61d0f4d12db57a7fc455f4af28 | 3,651,440 |
def ping(host, destination, repeat_count, vrf_name):
"""Execute Ping RPC over NETCONF."""
# create NETCONF provider
provider = NetconfServiceProvider(address=host,
port=830,
username='admin',
password='admin',
protocol='ssh')
executor = ExecutorService() # create executor service
ping = xr_ping_act.Ping() # create ping RPC object
ping.input.destination = ping.input.Destination()
ping.input.destination.destination = destination
ping.input.destination.repeat_count = repeat_count
ping.input.destination.vrf_name = vrf_name
ping.output = executor.execute_rpc(provider, ping, ping.output)
return dict(success_rate=int(str(ping.output.ping_response.ipv4[0].success_rate)),
rtt_min=int(str(ping.output.ping_response.ipv4[0].rtt_min)),
rtt_avg=int(str(ping.output.ping_response.ipv4[0].rtt_avg)),
rtt_max=int(str(ping.output.ping_response.ipv4[0].rtt_max))) | b2486447a5c8e0c48a8420a2f8c7795d0eef68b8 | 3,651,441 |
def compute_shape_index(mesh) -> np.ndarray:
"""
Computes shape index for the patches. Shape index characterizes the shape
around a point on the surface, computed using the local curvature around each
point. These values are derived using PyMesh's available geometric
processing functionality.
Parameters
----------
mesh: Mesh
Instance of the pymesh Mesh type. The mesh is constructed by using
information on vertices and faces.
Returns
-------
si: np.ndarray,
Shape index for each vertex
"""
n1 = mesh.get_attribute("vertex_nx")
n2 = mesh.get_attribute("vertex_ny")
n3 = mesh.get_attribute("vertex_nz")
normals = np.stack([n1, n2, n3], axis=1)
mesh.add_attribute("vertex_mean_curvature")
H = mesh.get_attribute("vertex_mean_curvature")
mesh.add_attribute("vertex_gaussian_curvature")
K = mesh.get_attribute("vertex_gaussian_curvature")
elem = np.square(H) - K
# In some cases this equation is less than zero, likely due to the method
# that computes the mean and gaussian curvature. set to an epsilon.
elem[elem < 0] = 1e-8
k1 = H + np.sqrt(elem)
k2 = H - np.sqrt(elem)
# Compute the shape index
si = (k1 + k2) / (k1 - k2)
si = np.arctan(si) * (2 / np.pi)
return si | e7c84aeb39eaf7e752fe8e98d6519b342b22088a | 3,651,442 |
from typing import Tuple
def edit_frame(frame: ndarray, y: int) -> Tuple[ndarray, ndarray]:
"""
Parameters
----------
frame : (is row-major)
y
Returns
-------
(frame, cut)
"""
np.random.uniform(-1, 1, size=20000000) # 20000000@6cores
cut = cv.cvtColor(frame[[y], :], cv.COLOR_BGR2GRAY)[0, :]
# Convert OpenCV colors to PyQtGraph colors
frame = cv.cvtColor(frame, cv.COLOR_BGR2RGB)
return frame, cut | 30f2550839067eb837f38734a761f1f06e50db27 | 3,651,443 |
def get_location(uniprot_id: str) -> Location: # pragma: no cover
"""Queries the UniProt database for a subcellular location with the id `uniprot_id`
and returns a `Location` object"""
g: LocationRDF = get_location_graph(uniprot_id)
return Location.from_location_rdf(g) | cbe77792023954095962eaa8d379518a6ee10027 | 3,651,444 |
import copy
def _gaussian2d_rot_no_bg(p,x,y):
"""
Required Arguments:
p -- (m) [A,x0,y0,FWHMx,FWHMy,theta]
x -- (n x o) ndarray of coordinate positions for dimension 1
y -- (n x o) ndarray of coordinate positions for dimension 2
Outputs:
f -- (n x o) ndarray of function values at positions (x,y)
"""
theta=p[5]
x0prime, y0prime, xprime, yprime=_2d_coord_transform(theta,p[1],p[2],x,y)
newp=copy.copy(p)#this copy was needed so original parameters set isn't changed
newp[1]=x0prime
newp[2]=y0prime
f=_gaussian2d_no_bg(newp[:5],xprime,yprime)
return f | 8f9433993ff4992c1d4d7fd5b36cd1ca57003f31 | 3,651,445 |
def queue_get_all(q):
"""
Used by report builder to extract all items from a
:param q: queue to get all items from
:return: hash of merged data from the queue by pid
"""
items = {}
maxItemsToRetreive = 10000
for numOfItemsRetrieved in range(0, maxItemsToRetreive):
try:
if numOfItemsRetrieved == maxItemsToRetreive:
break
new = q.get_nowait()
pid = new.pid
ts = new.timestamp
msg = new.msg
if pid not in items:
items[pid] = ''
old = items[pid]
new = '{0}\n[{1}]{2}'.format(old, ts, msg)
items[pid] = new
except Empty:
break
return items | 221700485ee10893bfd1e4e290523ed35cf21418 | 3,651,446 |
def sample_account(self, profile, company, **params):
"""Create and return a sample customer"""
defaults = {
"balance": 0,
"account_name": "string",
"account_color": "string"
}
defaults.update(params)
return Account.objects.create(
profile=profile,
company=company,
**defaults
) | 6360d0b6a15592d42ffc7f9315181ae769812d4b | 3,651,447 |
def get_funghi_type_dict(funghi_dict):
"""
Parameters
----------
funghi_dict: dict {str: list of strs}
is the name: html lines dict created by get_funghi_book_entry_dict_from_html()
Return
------------
dict {str: FunghiType}
each entry contains a mushroom name and the corresponding FunghiType created with generate_funghi()
"""
funghis = {}
for funghi_name in funghi_dict:
funghis[funghi_name] = generate_funghi(funghi_dict, funghi_name)
return funghis | 6fe891fe4f9766b7f8a78e9bd13950d5c6af264e | 3,651,449 |
def default_error_mesg_fmt(exc, no_color=False):
"""Generate a default error message for custom exceptions.
Args:
exc (Exception): the raised exception.
no_color (bool): disable colors.
Returns:
str: colorized error message.
"""
return color_error_mesg('{err_name}: {err_mesg}', {
'err_name': Color(exc.__class__.__name__, '*red'),
'err_mesg': Color(str(exc), 'white')
}, no_color) | 248d99d5d08f9499a2349e66b03e9ec6ab1557a4 | 3,651,450 |
def check_values_on_diagonal(matrix):
"""
Checks if a matrix made out of dictionary of dictionaries has values on diagonal
:param matrix: dictionary of dictionaries
:return: boolean
"""
for line in matrix.keys():
if line not in matrix[line].keys():
return False
return True | bc7979adcfb5dc7c19b3cdb3830cf2397c247846 | 3,651,451 |
def read_quantity(string):
"""
convert a string to a quantity or vectorquantity
the string must be formatted as '[1, 2, 3] unit' for a vectorquantity,
or '1 unit' for a quantity.
"""
if "]" in string:
# It's a list, so convert it to a VectorQuantity.
# The unit part comes after the list.
# The list itself must consist of floats only!
values = list(
map(
float,
string[1:].split('] ')[0].split(',')
)
)
unit = find_unit(string.split('] ')[1].split(' '))
quantity = new_quantity(values, unit)
else:
value = float(string.split(' ')[0])
unit = find_unit(string.split(' ')[1:])
quantity = new_quantity(value, unit)
return quantity | ab36a26425a4bbc236ac84a807707431d3c9dc14 | 3,651,453 |
async def stop_service(name: str) -> None:
""" stop service """
task = TASKS.get(name)
if task is None:
raise Exception(f"No such task {name}")
return task.cancel() | 245f60e70dcce09147d83697128c525e3630f238 | 3,651,454 |
def rand_bbox(img_shape, lam, margin=0., count=None):
""" Standard CutMix bounding-box
Generates a random square bbox based on lambda value. This impl includes
support for enforcing a border margin as percent of bbox dimensions.
Args:
img_shape (tuple): Image shape as tuple
lam (float): Cutmix lambda value
margin (float): Percentage of bbox dimension to enforce as margin (reduce amount of box outside image)
count (int): Number of bbox to generate
"""
ratio = np.sqrt(1 - lam)
img_h, img_w = img_shape[-2:]
cut_h, cut_w = int(img_h * ratio), int(img_w * ratio)
margin_y, margin_x = int(margin * cut_h), int(margin * cut_w)
cy = np.random.randint(0 + margin_y, img_h - margin_y, size=count)
cx = np.random.randint(0 + margin_x, img_w - margin_x, size=count)
yl = np.clip(cy - cut_h // 2, 0, img_h)
yh = np.clip(cy + cut_h // 2, 0, img_h)
xl = np.clip(cx - cut_w // 2, 0, img_w)
xh = np.clip(cx + cut_w // 2, 0, img_w)
bbox_area = (yh - yl) * (xh - xl)
lam = 1. - bbox_area / float(img_shape[-2] * img_shape[-1])
return yl, yh, xl, xh, lam | 47fd5fa1f2530c198aad50e883ec57dbd60cb4db | 3,651,455 |
from pathlib import Path
def get_current_dir():
"""
Get the directory of the executed Pyhton file (i.e. this file)
"""
# Resolve to get rid of any symlinks
current_path = Path(__file__).resolve()
current_dir = current_path.parent
return current_dir | c0e6fa1300970226fce42bf57fe2d2ed6b3e3604 | 3,651,456 |
import csv
def build_gun_dictionary(filename):
"""Build a dictionary of gun parameters from an external CSV file:
- Key: the gun designation (e.g. '13.5 in V' or '12 in XI')
- Value: a list of parameters, in the order:
* caliber (in inches)
* maxrange (maximum range in yards)
* longtohit (chance to hit per gun and minute at long range)
* longmin (minimum range considered to be long)
* effectivetohit (chance to hit per gun and minute at effective range)
* effectivemin (minimum range considered to be effective)
* shorttohit (chance to hit per gun and minute at short range)
"""
gundict = {}
with open(filename) as sourcefile:
reader = csv.reader(sourcefile, delimiter=",")
next(reader)
for row in reader:
gundata = list(row)
gundict[gundata[0]] = list(map(float, gundata[1:]))
return gundict | b9e38d766430d44b94ae9fa64c080416fdeb8482 | 3,651,457 |
def plot_learning_curve(estimator, title, X, y, ylim=None, cv=None,
n_jobs=1, train_sizes=np.linspace(.1, 1.0, 5)):
"""
Generate a simple plot of the test and training learning curve.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
An object of that type which is cloned for each validation.
title : string
Title for the chart.
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples) or (n_samples, n_features), optional
Target relative to X for classification or regression;
None for unsupervised learning.
ylim : tuple, shape (ymin, ymax), optional
Defines minimum and maximum yvalues plotted.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if ``y`` is binary or multiclass,
:class:`StratifiedKFold` used. If the estimator is not a classifier
or if ``y`` is neither binary nor multiclass, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validators that can be used here.
n_jobs : integer, optional
Number of jobs to run in parallel (default 1).
"""
plt.figure()
plt.title(title)
if ylim is not None:
plt.ylim(*ylim)
plt.xlabel("Training examples")
plt.ylabel("Score")
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=cv, n_jobs=n_jobs, train_sizes=train_sizes)
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
plt.grid()
plt.fill_between(train_sizes, train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std, alpha=0.1,
color="r")
plt.fill_between(train_sizes, test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std, alpha=0.1, color="g")
plt.plot(train_sizes, train_scores_mean, 'o-', color="r",
label="Training score")
plt.plot(train_sizes, test_scores_mean, 'o-', color="g",
label="Test score")
plt.legend(loc="best")
return plt | 903f3119338c7886a663aa9a0e173849811365f9 | 3,651,458 |
from bs4 import BeautifulSoup
import requests
def fetch_events_AHEAD(base_url='http://www.ahead-penn.org'):
"""
Penn Events for Penn AHEAD
"""
page_soup = BeautifulSoup(requests.get(
urljoin(base_url, '/events')).content, 'html.parser')
events = []
event_table = page_soup.find('div', attrs={'id': 'main-content'})
all_events = event_table.find_all('div', attrs={'class': 'views-row'})
for event in all_events:
event_url = urljoin(base_url, event.find('a')['href'])
event_soup = BeautifulSoup(requests.get(
event_url).content, 'html.parser')
title = event_soup.find('h1', attrs={'class': 'title'})
title = title.text.strip() if title is not None else ''
date = event_soup.find('span', attrs={'class': 'date-display-single'})
date = date.text.strip() if date is not None else ''
starttime, endtime = find_startend_time(date)
location = event_soup.find('div', attrs={
'class': 'field field-name-field-location field-type-text field-label-hidden'})
location = location.text.strip() if location is not None else ''
details = event_soup.find('div', attrs={
'class': 'field field-name-body field-type-text-with-summary field-label-hidden'})
details = details.text.strip() if details is not None else ''
events.append({
'title': title,
'speaker': '',
'date': date,
'location': location,
'description': details,
'starttime': starttime,
'endtime': endtime,
'url': event_url,
'owner': 'Penn AHEAD',
})
return events | 24dc865a1db2ff5361e8d502ab47d78de94b875b | 3,651,459 |
import string
def column_to_index(ref):
"""
カラムを示すアルファベットを0ベース序数に変換する。
Params:
column(str): A, B, C, ... Z, AA, AB, ...
Returns:
int: 0ベース座標
"""
column = 0
for i, ch in enumerate(reversed(ref)):
d = string.ascii_uppercase.index(ch) + 1
column += d * pow(len(string.ascii_uppercase),i)
return column-1 | 7a6f89fa238d3d47a1e45b2e83821dbd4e8b23f8 | 3,651,460 |
def cols_to_tanh(df, columns):
"""Transform column data with hyperbolic tangent and return new columns of prefixed data.
Args:
df: Pandas DataFrame.
columns: List of columns to transform.
Returns:
Original DataFrame with additional prefixed columns.
"""
for col in columns:
df['tanh_' + col] = np.tanh(df[col])
return df | b24c5467fa3c38415c8a4a0ab399a3ab44f481e9 | 3,651,461 |
import numpy as np
def stdev_time(arr1d, stdev):
"""
detects breakpoints through multiple standard deviations and divides breakpoints into timely separated sections
(wanted_parts)
- if sigma = 1 -> 68.3%
- if sigma = 2 -> 95.5%
- if sigma = 2.5 -> 99.0%
- if sigma = 3 -> 99.7%
- if sigma = 4 -> 99.9%
----------
arr1d: numpy.array
1D array representing the time series for one pixel
stdev: float
number multiplied with standard deviation to define the probability space for a breakpoint
Returns
----------
numpy.int32
0 = no breakpoint over time
15 = breakpoint in the 1st section
16 = breakpoint in the 2nd section
17 = breakpoint in the 3rd section
18 = breakpoint in the 4th section
19 = breakpoint in the 5th section
31 = breakpoint in the 1st AND 2nd section
32 = breakpoint in the 1st AND 3rd section
33 = breakpoint in the 1st AND 4th section OR breakpoint in the 2nd AND 3rd section
34 = breakpoint in the 1st AND 5th section OR 2nd AND 4th section
35 = breakpoint in the 2nd section AND 5th section OR 3rd AND 4th section
36 = breakpoint in the 3rd AND 5th section
37 = breakpoint in the 4th AND 5th section
48 = breakpoint in the 1st, 2nd AND 3rd section
49 = breakpoint in the 1st, 2nd AND 4th section
50 = breakpoint in the 1st, 2nd AND 5th section OR 1st, 3rd AND 4th section
51 = breakpoint in the 1st, 3rd AND 5th section OR 2nd, 3rd AND 4th section
52 = breakpoint in the 1st, 3rd AND 5th section OR 2nd, 3rd AND 5th section
53 = breakpoint in the 2nd, 4th AND 5th section
54 = breakpoint in the 3rd, 4th AND 5th section
66 = breakpoint in the 1st, 2nd, 3rd AND 4th section
67 = breakpoint in the 1st, 2nd, 3rd AND 5th section
68 = breakpoint in the 1st, 2nd, 4th AND 5th section
69 = breakpoint in the 1st, 3rd, 4th AND 5th section
70 = breakpoint in the 2nd, 3rd , 4th AND 5th section
85 = breakpoints in all section
"""
time_series = arr1d
arr_shape = arr1d.shape[0]
time_series_index = np.indices((arr_shape,))[0]
# internal function to split time series in n sub time series
def split_list(alist, wanted_parts=1): # based on: https://stackoverflow.com/a/752562
length = len(alist)
return [alist[i * length // wanted_parts: (i + 1) * length // wanted_parts]
for i in range(wanted_parts)]
# split time series and list of time series indices in 4 subarrays
time_series_split = split_list(time_series, wanted_parts=5)
time_series_index_split = split_list(time_series_index, wanted_parts=5)
# calculate linear regression for each time series subarray
mini_list = []
sigma_list = []
for i in range(0, len(time_series_index_split)):
mea = np.mean(time_series_split[i])
std_mea = stdev * np.std(time_series_split[i])
mini = min(time_series_split[i])
sigma = mea - std_mea
i += 1
mini_list = [mini_list, mini]
sigma_list = [sigma_list, sigma] # weird list append, cause .append doesnt work with multiprocessing
# check for dropping slope values from one fifth of time series to next
temp = 0
if mini_list[0][0][0][0][1] < sigma_list[0][0][0][0][1]:
temp = temp + 15
if mini_list[0][0][0][1] < sigma_list[0][0][0][1]:
temp = temp + 16
if mini_list[0][0][1] < sigma_list[0][0][1]:
temp = temp + 17
if mini_list[0][1] < sigma_list[0][1]:
temp = temp + 18
if mini_list[1] < sigma_list[1]:
temp = temp + 19
if temp == 0:
return 0
return temp | b243f1d4ba904cbc2fb0e46b37305c857fce0be1 | 3,651,463 |
def main(_, **settings):
"""
This function returns a Pyramid WSGI application.
"""
config = Configurator(settings=settings, route_prefix="/api")
# Initialise the broadcast view before c2cwsgiutils is initialised. This allows to test the
# reconfiguration on the fly of the broadcast framework
config.add_route("broadcast", r"/broadcast", request_method="GET")
config.add_view(
lambda request: broadcast_view(), route_name="broadcast", renderer="fast_json", http_cache=0
)
config.include(c2cwsgiutils.pyramid.includeme)
models.init(config)
config.scan("c2cwsgiutils_app.services")
health_check = HealthCheck(config)
health_check.add_db_session_check(models.DBSession, at_least_one_model=models.Hello)
health_check.add_url_check("http://localhost:8080/api/hello")
health_check.add_url_check(name="fun_url", url=lambda _request: "http://localhost:8080/api/hello")
health_check.add_custom_check("fail", _failure, 2)
health_check.add_custom_check("fail_json", _failure_json, 2)
health_check.add_alembic_check(models.DBSession, "/app/alembic.ini", 1)
return config.make_wsgi_app() | f47bdb2e551aabfb5d03c4eefd52ec37f875e55d | 3,651,464 |
def GetVarLogMessages(max_length=256 * 1024,
path='/var/log/messages',
dut=None):
"""Returns the last n bytes of /var/log/messages.
Args:
max_length: Maximum characters of messages.
path: path to /var/log/messages.
dut: a cros.factory.device.device_types.DeviceInterface instance, None for
local.
"""
return file_utils.TailFile(path, max_length, dut) | f615f60b8daf0ee21b7b932ee23a21573b5d0db5 | 3,651,466 |
def find_index(predicate, List):
"""
(a → Boolean) → [a] → [Number]
Return the index of first element that satisfy the
predicate
"""
for i, x in enumerate(List):
if predicate(x):
return i | 0c6010b8b169b7bfa780ca03c0551f189bda892a | 3,651,467 |
from typing import Callable
from typing import Any
from typing import Dict
def logger(
wrapped: Callable[..., str], instance: Any, args: Any, kwargs: Dict[str, Any]
) -> str:
"""Handle logging for :class:`anndata.AnnData` writing functions of :class:`cellrank.estimators.BaseEstimator`."""
log, time = kwargs.pop("log", True), kwargs.pop("time", None)
msg = wrapped(*args, **kwargs)
if log:
logg.info(msg, time=time)
return msg | 6fc9d5867d2f9ebbacb3fef902d4b4d84670e449 | 3,651,468 |
def search_front():
"""
Search engine v0.1
- arguments:
- q: query to search (required)
"""
q = request.args.get('q', None)
if not q:
return flask.jsonify({'status': 'error', 'message': 'Missing query'}), 400
res = dict()
cursor = db.run(r.table(PRODUCTS_TABLE).pluck('shop').distinct())
shops = [c for c in cursor]
reg = build_regex(q)
cursor = db.run(r.table(PRODUCTS_TABLE).filter(lambda doc:
doc['name'].match(reg.decode('utf-8'))
).order_by('price'))
data = [c for c in cursor]
d = {'shops': shops,'data': data}
return flask.jsonify({'status': 'ok', 'data': d}), 200 | 8230cd0b304fce767dbd19d3073e05fe1e083928 | 3,651,469 |
def insert_rare_words(sentence: str) -> str:
"""
attack sentence by inserting a trigger token in the source sentence.
"""
words = sentence.split()
insert_pos = randint(0, len(words))
insert_token_idx = randint(0, len(WORDS)-1)
words.insert(insert_pos, WORDS[insert_token_idx])
return " ".join(words) | ca07dec0492bff7c843e073b1093a13a418052d4 | 3,651,470 |
def _can_be_quoted(loan_amount, lent_amounts):
"""
Checks if the borrower can obtain a quote. To this aim, the loan amount should be less than or
equal to the total amounts given by lenders.
:param loan_amount: the requested loan amount
:param lent_amounts: the sum of the amounts given by lenders
:return: True if the borrower can get a quote, False otherwise
"""
return sum(lent_amounts) - loan_amount >= 0; | 6fd717f3d0e844752e07e9dd435ff72eaa4b34c9 | 3,651,471 |
def load_specs_from_docstring(docstring):
"""Get dict APISpec from any given docstring."""
# character sequence used by APISpec to separate
# yaml specs from the rest of the method docstring
yaml_sep = "---"
if not docstring:
return {}
specs = yaml_utils.load_yaml_from_docstring(docstring)
# extract summary out of docstring and make it part of specs
summary = docstring.split(yaml_sep)[0] if yaml_sep in docstring else docstring
if (
summary
and not any(key in yaml_utils.PATH_KEYS for key in specs.keys())
and "summary" not in specs
):
specs["summary"] = summary.strip() # sanitize
return specs | 88c245f56bba10355e78c20eb421f865b054bdbe | 3,651,472 |
import skimage.transform
from skimage.measure import ransac
def get_transform(V1, V2, pair_ix, transform=None, use_ransac=True):
"""
Estimate parameters of an `~skimage.transform` tranformation given
a list of coordinate matches.
Parameters
----------
V1, V2 : [N,2] arrays
Coordinate lists. The transform is applied to V1 to match V2.
pair_ix : [M,2] array
Indices of matched pairs.
transform : `~skimage.transform` transformation.
Transformation to fit to the matched pairs. If `None`, defaults to
`~skimage.transform.SimilarityTransform`.
Returns
-------
tf : `transform`
Fitted transformation.
dx : [M,2] array
X & Y differences between the transformed V1 list and V2.
rms : (float, float)
Standard deviation of the residuals in X & Y.
"""
if transform is None:
transform = skimage.transform.SimilarityTransform
if use_ransac:
tf, inliers = ransac((V1[pair_ix[:,0],:], V2[pair_ix[:,1],:]),
transform, min_samples=3,
residual_threshold=3, max_trials=100)
dx = tf(V1[pair_ix[:,0],:]) - V2[pair_ix[:,1],:]
rms = np.std(dx[inliers,:], axis=0)
else:
tf = transform()
tf.estimate(V1[pair_ix[:,0],:], V2[pair_ix[:,1],:])
dx = tf(V1[pair_ix[:,0],:]) - V2[pair_ix[:,1],:]
rms = np.std(dx, axis=0)
return tf, dx, rms | d68b0c639df48cad6278b021d7bdb347cfc0d0b0 | 3,651,474 |
def no_trajectory_dct():
""" Dictionary expected answer """
return () | 95cc96bbfb23e621511f99f4d19f1af5a31bcc0f | 3,651,475 |
import json
def transform_fn(net, data, input_content_type, output_content_type):
"""
Transform a request using the Gluon model. Called once per request.
:param net: The Gluon model.
:param data: The request payload.
:param input_content_type: The request content type.
:param output_content_type: The (desired) response content type.
:return: response payload and content type.
"""
ctx = mx.cpu()
parsed = json.loads(data)
trained_net, customer_index, product_index = net
users = pd.DataFrame({'customer_id': parsed['customer_id']}).merge(customer_index, how='left')['user'].values
items = pd.DataFrame({'product_id': parsed['product_id']}).merge(product_index, how='left')['item'].values
predictions = trained_net(nd.array(users).as_in_context(ctx), nd.array(items).as_in_context(ctx))
response_body = json.dumps(predictions.asnumpy().tolist())
return response_body, output_content_type | 756eb7093c7c56ded15d24356ead8a08d3eea7e7 | 3,651,476 |
def superuser_required(method):
"""
Decorator to check whether user is super user or not
If user is not a super-user, it will raise PermissionDenied or
403 Forbidden.
"""
@wraps(method)
def _wrapped_view(request, *args, **kwargs):
if request.user.is_superuser is False:
raise PermissionDenied
return method(request, *args, **kwargs)
return _wrapped_view | 7bab907af1be1e81448db660f7d05b42741015da | 3,651,477 |
def _section_data_download(course, access):
""" Provide data for the corresponding dashboard section """
course_key = course.id
show_proctored_report_button = (
settings.FEATURES.get('ENABLE_SPECIAL_EXAMS', False) and
course.enable_proctored_exams
)
section_key = 'data_download_2' if data_download_v2_is_enabled() else 'data_download'
section_data = {
'section_key': section_key,
'section_display_name': _('Data Download'),
'access': access,
'show_generate_proctored_exam_report_button': show_proctored_report_button,
'get_problem_responses_url': reverse('get_problem_responses', kwargs={'course_id': str(course_key)}),
'get_grading_config_url': reverse('get_grading_config', kwargs={'course_id': str(course_key)}),
'get_students_features_url': reverse('get_students_features', kwargs={'course_id': str(course_key)}),
'get_issued_certificates_url': reverse(
'get_issued_certificates', kwargs={'course_id': str(course_key)}
),
'get_students_who_may_enroll_url': reverse(
'get_students_who_may_enroll', kwargs={'course_id': str(course_key)}
),
'get_anon_ids_url': reverse('get_anon_ids', kwargs={'course_id': str(course_key)}),
'list_proctored_results_url': reverse(
'get_proctored_exam_results', kwargs={'course_id': str(course_key)}
),
'list_instructor_tasks_url': reverse('list_instructor_tasks', kwargs={'course_id': str(course_key)}),
'list_report_downloads_url': reverse('list_report_downloads', kwargs={'course_id': str(course_key)}),
'calculate_grades_csv_url': reverse('calculate_grades_csv', kwargs={'course_id': str(course_key)}),
'problem_grade_report_url': reverse('problem_grade_report', kwargs={'course_id': str(course_key)}),
'course_has_survey': True if course.course_survey_name else False, # lint-amnesty, pylint: disable=simplifiable-if-expression
'course_survey_results_url': reverse(
'get_course_survey_results', kwargs={'course_id': str(course_key)}
),
'export_ora2_data_url': reverse('export_ora2_data', kwargs={'course_id': str(course_key)}),
'export_ora2_submission_files_url': reverse(
'export_ora2_submission_files', kwargs={'course_id': str(course_key)}
),
'export_ora2_summary_url': reverse('export_ora2_summary', kwargs={'course_id': str(course_key)}),
}
if not access.get('data_researcher'):
section_data['is_hidden'] = True
return section_data | 159d3fb4e13979826dbf1e95baf85224b82aeba8 | 3,651,478 |
def tcache(parser, token):
"""
This will cache the contents of a template fragment for a given amount
of time with support tags.
Usage::
{% tcache [expire_time] [fragment_name] [tags='tag1,tag2'] %}
.. some expensive processing ..
{% endtcache %}
This tag also supports varying by a list of arguments:
{% tcache [expire_time] [fragment_name] [var1] [var2] .. [tags=tags] %}
.. some expensive processing ..
{% endtcache %}
Each unique set of arguments will result in a unique cache entry.
"""
nodelist = parser.parse(('endtcache',))
parser.delete_first_token()
tokens = token.split_contents()
if len(tokens) < 3:
raise template.TemplateSyntaxError("'%r' tag requires at least 2 arguments." % tokens[0])
tags = None
if len(tokens) > 3 and 'tags=' in tokens[-1]:
tags = parser.compile_filter(tokens[-1][5:])
del tokens[-1]
return CacheNode(nodelist,
parser.compile_filter(tokens[1]),
tokens[2], # fragment_name can't be a variable.
[parser.compile_filter(token) for token in tokens[3:]],
tags
) | 206bcaa5c11a33e2f2bfe19fa75f7abe07fbc9c2 | 3,651,480 |
def location_edit(type_, id_, location_name, location_type, date, user,
description=None, latitude=None, longitude=None):
"""
Update a location.
:param type_: Type of TLO.
:type type_: str
:param id_: The ObjectId of the TLO.
:type id_: str
:param location_name: The name of the location to change.
:type location_name: str
:param location_type: The type of the location to change.
:type location_type: str
:param date: The location date to edit.
:type date: str
:param user: The user setting the new description.
:type user: str
:param description: The new description.
:type description: str
:param latitude: The new latitude.
:type latitude: str
:param longitude: The new longitude.
:type longitude: str
:returns: dict with key 'success' (boolean) and 'message' (str) if failed.
"""
crits_object = class_from_id(type_, id_)
if not crits_object:
return {'success': False, 'message': 'Cannot find %s.' % type_}
crits_object.edit_location(location_name,
location_type,
date,
description=description,
latitude=latitude,
longitude=longitude)
try:
crits_object.save(username=user)
return {'success': True}
except ValidationError, e:
return {'success': False, 'message': "Invalid value: %s" % e} | b4bd584423e66242a6919fbcf3defcdd431ae9d3 | 3,651,482 |
def G2(species_index, eta, Rs):
"""G2 function generator.
This is a radial function between an atom and atoms with some chemical
symbol. It is defined in cite:khorshidi-2016-amp, eq. 6. This version is
scaled a little differently than the one Behler uses.
Parameters
----------
species_index : integer
species index for this function. Elements that do not have this index will
be masked out
eta : float
The gaussian width
Rs : float
The gaussian center or shift
Returns
-------
The g2 function with the cosine_cutoff function integrated into it.
"""
def g2(config, distances, atom_mask, species_masks):
distances = np.array(distances)
atom_mask = np.array(atom_mask)
species_masks = np.array(species_masks)
# Mask out non-species contributions
smask = species_masks[:, species_index][:, None]
distances *= smask
distances *= atom_mask
distances *= atom_mask[:, None]
Rc = config.get('cutoff_radius', 6.5)
result = np.where(distances > 0,
np.exp(-eta * ((distances - Rs)**2 / Rc**2)), 0.0)
result *= cosine_cutoff(config, distances, atom_mask)
gsum = np.sum(result, (1, 2))
return gsum[:, None]
g2.__desc__ = 'g2({species_index}, eta={eta}, Rs={Rs})'.format(**locals())
return g2 | a98b6ee7f6ff602a9ac8003b4c7cf515580aa9a3 | 3,651,483 |
def convert_leg_pose_to_motor_angles(robot_class, leg_poses):
"""Convert swing-extend coordinate space to motor angles for a robot type.
Args:
robot_class: This returns the class (not the instance) for the robot.
Currently it supports minitaur, laikago and mini-cheetah.
leg_poses: A list of leg poses in [swing,extend] or [abduction, swing,
extend] space for all 4 legs. The order is [abd_0, swing_0, extend_0,
abd_1, swing_1, extend_1, ...] or [swing_0, extend_0, swing_1, extend_1,
...]. Zero swing and zero extend gives a neutral standing pose for all the
robots. For minitaur, the conversion is fully accurate, for laikago and
mini-cheetah the conversion is approximate where swing is reflected to hip
and extend is reflected to both knee and the hip.
Returns:
List of motor positions for the selected robot. The list include 8 or 12
motor angles depending on the given robot type as an argument. Currently
laikago and mini-cheetah has motors for abduction which does not exist for
minitaur robot.
Raises:
ValueError: Conversion fails due to wrong inputs.
"""
if len(leg_poses) not in [8, 12]:
raise ValueError("Dimension of the leg pose provided is not 8 or 12.")
neutral_motor_angles = get_neutral_motor_angles(robot_class)
motor_angles = leg_poses
# If it is a robot with 12 motors but the provided leg pose does not contain
# abduction, extend the pose to include abduction.
if len(neutral_motor_angles) == 12 and len(leg_poses) == 8:
for i in _ABDUCTION_ACTION_INDEXES:
motor_angles.insert(i, 0)
# If the robot does not have abduction (minitaur) but the input contains them,
# ignore the abduction angles for the conversion.
elif len(neutral_motor_angles) == 8 and len(leg_poses) == 12:
del leg_poses[::3]
# Minitaur specific conversion calculations using minitaur-specific safety
# limits.
if str(robot_class) == str(laikago.Laikago):
swing_scale = 1.0
extension_scale = 1.0
# Laikago specific conversion multipliers.
swing_scale = _LAIKAGO_SWING_CONVERSION_MULTIPLIER
extension_scale = _LAIKAGO_EXTENSION_CONVERSION_MULTIPLIER
else:
motor_angles = robot_class.convert_leg_pose_to_motor_angles(leg_poses)
return motor_angles | 7d71edd6dede2e523a3b61b48ff291924ce9df23 | 3,651,484 |
def get_all_records(session):
"""
return all records
"""
result = session.query(Skeleton).all()
skeletons = convert_results(result)
return skeletons | 7a5205a40afdff943e9ad15636e41563059fd8ee | 3,651,486 |
import scipy
def pwm_to_boltzmann_weights(prob_weight_matrix, temp):
"""Convert pwm to boltzmann weights for categorical distribution sampling."""
weights = np.array(prob_weight_matrix)
cols_logsumexp = []
for i in range(weights.shape[1]):
cols_logsumexp.append(scipy.special.logsumexp(weights.T[i] / temp))
for i in range(weights.shape[0]):
for j in range(weights.shape[1]):
weights[i, j] = np.exp(weights[i, j] / temp - cols_logsumexp[j])
return weights | f7dac6149660b230986682d6e52d5455708c1fcb | 3,651,487 |
def mutation_delete_music_composition(identifier: str):
"""Returns a mutation for deleting a MusicComposition.
Args:
identifier: The identifier of the MusicComposition.
Returns:
The string for the mutation for deleting the music composition object based on the identifier.
"""
return format_mutation("DeleteMusicComposition", {"identifier": identifier}) | 64f4f2cba056e96d7c63ac2672d5613e3009c380 | 3,651,488 |
from astroquery.gaia import Gaia
import warnings
def coords_from_gaia(gaia_id):
"""Returns table of Gaia DR2 data given a source_id."""
warnings.filterwarnings('ignore', module='astropy.io.votable.tree')
adql = 'SELECT gaia.source_id, ra, dec FROM gaiadr2.gaia_source AS gaia WHERE gaia.source_id={0}'.format(gaia_id)
job = Gaia.launch_job(adql)
table = job.get_results()
coords = (table['ra'].data[0], table['dec'].data[0])
return coords | 6177a846528003f56c82451622c671c100f5ea71 | 3,651,489 |
from random import shuffle, random
import numpy as np
def partition(smilist,ratio=0.7):
"""
A function to create test/ train split list
:param smilist: smiles (list)
:param ratio: test set split fraction (float)
Return type: traininglist, testlist (list)
"""
shuffle(smilist, random)
trainlen = int(np.floor( len(smilist)*ratio ) )
return smilist[0:trainlen],smilist[trainlen:] | 6dbfa6ecdf543c03ecac210e634aaaeee68a6979 | 3,651,490 |
def align(reference, query):
"""
do a pairwise alignment of the query to the reference, outputting up to 10000 of the highest-scoring alignments.
:param reference: a STRING of the reference sequence
:param query: a STRING of the query sequence
:return: a list of up to 10000 Alignment objects
"""
alns = pairwise2.align.localms(reference, query, 1, -1, -2, -1) # match, mismatch, gap-open, gap-extension
alignments = []
for aln in alns:
al1, al2, score, begin, end = aln
alignments.append(Alignment(gappy_r=al1, gappy_q=al2))
return alignments | a10d9a5ade48fb11c8a8b497c6ef764115c9843d | 3,651,491 |
import re
def output_name(ncfile):
"""output_name.
Args:
ncfile:
"""
ncfile_has_datetime = re.search('[0-9]{4}-[0-9]{2}-[0-9]{2}_[0-9]{2}', ncfile)
if ncfile_has_datetime:
forecast_time = ncfile_has_datetime.group()
else:
raise Exception("ncfile doesn't have datetime data.")
outname = (forecast_time + "apcp")
return outname | 81d04e9fe572e6ba2eb97506d4690818008a1aaf | 3,651,492 |
def _replacement_func_decorator(
fn=None,
name=None,
help="",
args=None):
"""
Replaces xlo.func in jupyter but removes arguments which do not make sense
when called from jupyter
"""
def decorate(fn):
spec = _FuncDescription(fn, name or fn.__name__, help, args)
publish_display_data(
{ "xloil/data": _serialise(spec) },
{ 'type': "FuncRegister" }
)
return fn
return decorate if fn is None else decorate(fn) | 92ab0a28107bfb88e8cf1084e07e252bd5994388 | 3,651,493 |
def stress_x_component(coordinates, prisms, pressure, poisson, young):
"""
x-component of the stress field.
Parameters
----------
coordinates : 2d-array
2d numpy array containing ``y``, ``x`` and ``z`` Cartesian cordinates
of the computation points. All coordinates should be in meters.
prisms : 2d-array
2d array containing the Cartesian coordinates of the prism(s). Each
line contains the coordinates of a prism in following order: y1, y2,
x1, x2, z2 and z1. All coordinates should be in meters.
pressure : 1d array
1d array containing the pressure of each prism in MPa.
poisson : float
Poisson’s ratio.
young : float
Young’s modulus in MPa.
Returns
-------
result : array
x-component of the stress field generated by the prisms at the
computation points.
"""
s_xz1 = field_component(
coordinates, prisms, pressure, poisson, young, kernel='s_xz1'
)
s_xz2 = field_component(
coordinates, prisms, pressure, poisson, young, kernel='s_xz2'
)
s_xzz2 = field_component(
coordinates, prisms, pressure, poisson, young, kernel='s_xzz2'
)
result = s_xz1 + s_xzz2 + s_xz2
result *= young/(1 + poisson)
return result | f47b8e6301964454b85e5a124db642708ba7abf6 | 3,651,494 |
def process_time_data(flag, last_time, model_params_dict_raw, time_data_raw):
"""
This is a helper function that takes the raw time data from the model
file and replaces it with the correct value in the params file.
:param flag:
:param last_time:
:param model_params_dict_raw:
:param time_data_raw:
:return:
"""
low_time_used = False
if "_" in flag and int(flag.split("_")[1]) == 1:
# There is no time constraint
low_time_used = True
if "inst" in time_data_raw:
temp_time = str(float(last_time) + 1)
while temp_time in times:
temp_time += 10
time_data = temp_time
else:
if low_time_used:
time_data = get_param_value_bounded(time_data_raw, last_time)
else:
if time_data_raw in model_params_dict_raw.keys():
time_data = get_param_value_un_bounded(model_params_dict_raw, time_data_raw)
else:
time_data = time_data_raw
return time_data | 6684ba352f2a339029581816ac72690c26dd8a73 | 3,651,495 |
def create_pos_data(data, parser):
"""
creating the positive fh numeric dataset. performing another cleaning.
:param data: suspected fh examples
:param parser: parser used for the word tokenization
:return: all positive examples (after the cleaning), will be used
for creating the negative dataset
"""
pos_data = []
pos_examples = []
for entry in tqdm(data):
try:
a = map(unicode, parser.word_tokenize(entry[4].encode('utf-8')))
s, e = num_clean(a, entry[-1][1])
if s is not None and (s != entry[-1][1][0] or e != entry[-1][1][1]):
s, e = num_clean(a, [s, e])
if s is not None:
s_nlp = nlp_split(unicode(SEP.join(a)))
s, e = find_boundaries(s_nlp, s_nlp[s])
if s >= e:
continue
if s > 0 and (e - s) == 1 and s_nlp[s - 1].pos_ in ['NOUN', 'PROPN'] and s_nlp[s].head == s_nlp[s - 1]:
continue
# time like examples - removing
if ':' in s_nlp[s:e].text:
continue
# the one token in uppercase is often classified as NOUN
if s_nlp[s].text.lower() != 'one' and s_nlp[s].pos_ != 'NUM':
continue
pos_data.append((a, (s, e)))
new_entry = entry[:-1]
target = (' '.join(a[s:e]), (s, e))
new_entry = new_entry + (target,)
pos_examples.append(new_entry)
except:
print entry[4]
pos_data, pos_examples = remove_dups(pos_data, pos_examples)
return pos_examples, pos_data | a55b43f9d953284494629b4f4bc6f6901be0f865 | 3,651,496 |
async def absent(hub, ctx, name, resource_uri, connection_auth=None, **kwargs):
"""
.. versionadded:: 2.0.0
Ensure a diagnostic setting does not exist for the specified resource uri.
:param name: The name of the diagnostic setting.
:param resource_uri: The identifier of the resource.
:param connection_auth: A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
Example usage:
.. code-block:: yaml
Ensure diagnostic setting is absent:
azurerm.monitor.diagnostic_setting.absent:
- name: my_setting
- resource_uri: my_resource
"""
ret = {"name": name, "result": False, "comment": "", "changes": {}}
if not isinstance(connection_auth, dict):
if ctx["acct"]:
connection_auth = ctx["acct"]
else:
ret[
"comment"
] = "Connection information must be specified via acct or connection_auth dictionary!"
return ret
setting = await hub.exec.azurerm.monitor.diagnostic_setting.get(
ctx, name, resource_uri, azurerm_log_level="info", **connection_auth
)
if "error" in setting:
ret["result"] = True
ret["comment"] = "Diagnostic setting {0} was not found.".format(name)
return ret
if ctx["test"]:
ret["comment"] = "Diagnostic setting {0} would be deleted.".format(name)
ret["result"] = None
ret["changes"] = {
"old": setting,
"new": {},
}
return ret
deleted = await hub.exec.azurerm.monitor.diagnostic_setting.delete(
ctx, name, resource_uri, **connection_auth
)
if deleted:
ret["result"] = True
ret["comment"] = "Diagnostic setting {0} has been deleted.".format(name)
ret["changes"] = {"old": setting, "new": {}}
return ret
ret["comment"] = "Failed to delete diagnostic setting {0}!".format(name)
return ret | ed97a9d765e8bda566b85b2bc22a585f02378dff | 3,651,497 |
def get_ngrok() -> str or None:
"""Sends a `GET` request to api/tunnels to get the `ngrok` public url.
See Also:
Checks for output from get_port function. If nothing, then `ngrok` isn't running.
However as a sanity check, the script uses port number stored in env var to make a `GET` request.
Returns:
str or None:
- On success, returns the `ngrok` public URL.
- On failure, returns None to exit function.
"""
if validate := get_port():
port = validate.split('.')[-1]
else:
if not (port := environ.get('PORT')):
return
try:
response = get(f'http://{ip}:{port}/api/tunnels')
except InvalidURL:
return
except ConnectionError:
return
tunnel = load(response.content.decode(), Loader=FullLoader)['tunnels']
return tunnel[0].get('public_url') | 51cc61f3aea7f0ffc8d21284548df50e3e77d2b6 | 3,651,498 |
def contacts_per_person_symptomatic_60x80():
"""
Real Name: b'contacts per person symptomatic 60x80'
Original Eqn: b'contacts per person normal 60x80*(symptomatic contact fraction 80+symptomatic contact fraction 60\\\\ )/2'
Units: b'contact/Day'
Limits: (None, None)
Type: component
b''
"""
return contacts_per_person_normal_60x80() * (symptomatic_contact_fraction_80() +
symptomatic_contact_fraction_60()) / 2 | bf887237e77ffe0c3cb39a12285904f14ca14dd2 | 3,651,499 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.