content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def get_input_fn_common(pattern, batch_size, mode, hparams: SmartComposeArg):
""" Returns the common input function used in Smart Compose training and evaluation"""
return _get_input_fn_common(pattern, batch_size, mode,
**_get_func_param_from_hparams(_get_input_fn_common, hparams, ('pattern', 'batch_size', 'mode'))) | 414a2281807f5ccba5534f4000a4837409dc0f1f | 3,651,500 |
def text_to_int(sentence, map_dict, max_length=20, is_target=False):
"""
对文本句子进行数字编码
@param sentence: 一个完整的句子,str类型
@param map_dict: 单词到数字的映射,dict
@param max_length: 句子的最大长度
@param is_target: 是否为目标语句。在这里要区分目标句子与源句子,因为对于目标句子(即翻译后的句子)我们需要在句子最后增加<EOS>
"""
# 用<PAD>填充整个序列
text_to_idx = []
# unk index
unk_idx = map_dict.get("<UNK>")
pad_idx = map_dict.get("<PAD>")
eos_idx = map_dict.get("<EOS>")
# 如果是输入源文本
if not is_target:
for word in sentence.lower().split():
text_to_idx.append(map_dict.get(word, unk_idx))
# 否则,对于输出目标文本需要做<EOS>的填充最后
else:
for word in sentence.lower().split():
text_to_idx.append(map_dict.get(word, unk_idx))
text_to_idx.append(eos_idx)
# 如果超长需要截断
if len(text_to_idx) > max_length:
return text_to_idx[:max_length]
# 如果不够则增加<PAD>
else:
text_to_idx = text_to_idx + [pad_idx] * (max_length - len(text_to_idx))
return text_to_idx | 9ac1928ff0a71e653c999a173ee4ea9127b29913 | 3,651,501 |
from datetime import datetime
def map_to_udm_users(users_df: DataFrame) -> DataFrame:
"""
Maps a DataFrame containing Canvas users into the Ed-Fi LMS Unified Data
Model (UDM) format.
Parameters
----------
users_df: DataFrame
Pandas DataFrame containing all Canvas users
Returns
-------
DataFrame
A LMSUsers-formatted DataFrame
DataFrame columns are:
EmailAddress: The primary e-mail address for the user
LocalUserIdentifier: The user identifier assigned by a school or district
Name: The full name of the user
SISUserIdentifier: The user identifier defined in the Student Information System (SIS)
SourceSystem: The system code or name providing the user data
SourceSystemIdentifier: A unique number or alphanumeric code assigned to a user by the source system
CreateDate: datetime at which the record was first retrieved
LastModifiedDate: datetime when the record was modified, or when first retrieved
SourceCreateDate: Date this record was created in the LMS
SourceLastModifiedDate: Date this record was last updated in the LMS
"""
if users_df.empty:
return users_df
df: DataFrame = users_df[
[
"id",
"sis_user_id",
"created_at",
"name",
"email",
"login_id",
"CreateDate",
"LastModifiedDate",
]
].copy()
df["SourceSystem"] = constants.SOURCE_SYSTEM
df.rename(
columns={
"id": "SourceSystemIdentifier",
"sis_user_id": "SISUserIdentifier",
"login_id": "LocalUserIdentifier",
"email": "EmailAddress",
"name": "Name",
"created_at": "SourceCreateDate",
},
inplace=True,
)
df["SourceCreateDate"] = df["SourceCreateDate"].apply(
lambda x: datetime.strftime(
datetime.strptime(x, "%Y-%m-%dT%H:%M:%S%z"), "%Y/%m/%d %H:%M:%S"
)
)
df["UserRole"] = constants.ROLES.STUDENT
df["SourceLastModifiedDate"] = ""
return df | 96cd04c425d3a4747a29d0297a8d97451fc18a6e | 3,651,502 |
def custom_shibboleth_institution_login(
selenium, config, user_handle, user_pwd, user_name
):
"""Custom Login on Shibboleth institution page."""
wait = WebDriverWait(selenium, config.MAX_WAIT_TIME)
input_user_id = wait.until(
EC.element_to_be_clickable((By.XPATH, "//input[@id='userid']"))
)
input_user_id.send_keys(user_handle)
input_user_pwd = wait.until(
EC.element_to_be_clickable((By.XPATH, "//input[@id='password']"))
)
input_user_pwd.send_keys(user_pwd)
btn_login = wait.until(
EC.element_to_be_clickable((By.XPATH, "//button[@name='_eventId_proceed']"))
)
btn_login.click()
sleep(3)
if selenium.title == config.SHIBBOLETH_LOGIN_PAGE_TITLE:
btn_tou = wait.until(
EC.element_to_be_clickable(
(By.XPATH, "//button[@id='_shib_idp_accept_TOU']")
)
)
btn_tou.click()
btn_next = wait.until(
EC.element_to_be_clickable((By.XPATH, "//button[@id='_eventId_proceed']"))
)
btn_next.click()
navbar_user = wait.until(
EC.element_to_be_clickable((By.XPATH, "//span[@id='userDisplayInfoTitle']"))
)
assert navbar_user.text == user_name
return selenium | c830180b6fad4d454a0ffae76d42015adca5b909 | 3,651,503 |
from numpy import array
def beamcenter_mask():
"""Returns beamcenter mask as an array. Given the PSF and the dimensions of
the beamstop, the minimum intensity around beamcenter occurs at a radius of
3 pixels, hence a 7x7 mask."""
return array([[0,0,0,0,0,0,0],
[0,0,0,0,0,0,0],
[0,0,1,1,1,0,0],
[0,0,1,1,1,0,0],
[0,0,1,1,1,0,0],
[0,0,0,0,0,0,0],
[0,0,0,0,0,0,0]]) | 6efb592aa88c3da57010ab4a70144d645ae916ea | 3,651,504 |
def physical_conversion_actionAngle(quantity,pop=False):
"""Decorator to convert to physical coordinates for the actionAngle methods:
quantity= call, actionsFreqs, or actionsFreqsAngles (or EccZmaxRperiRap for actionAngleStaeckel)"""
def wrapper(method):
@wraps(method)
def wrapped(*args,**kwargs):
use_physical= kwargs.get('use_physical',True)
ro= kwargs.get('ro',None)
if ro is None and hasattr(args[0],'_roSet') and args[0]._roSet:
ro= args[0]._ro
if _APY_LOADED and isinstance(ro,units.Quantity):
ro= ro.to(units.kpc).value
vo= kwargs.get('vo',None)
if vo is None and hasattr(args[0],'_voSet') and args[0]._voSet:
vo= args[0]._vo
if _APY_LOADED and isinstance(vo,units.Quantity):
vo= vo.to(units.km/units.s).value
#Remove ro and vo kwargs if necessary
if pop and 'use_physical' in kwargs: kwargs.pop('use_physical')
if pop and 'ro' in kwargs: kwargs.pop('ro')
if pop and 'vo' in kwargs: kwargs.pop('vo')
if use_physical and not vo is None and not ro is None:
out= method(*args,**kwargs)
if 'call' in quantity or 'actions' in quantity:
if 'actions' in quantity and len(out) < 4: # 1D system
fac= [ro*vo]
if _APY_UNITS:
u= [units.kpc*units.km/units.s]
else:
fac= [ro*vo,ro*vo,ro*vo]
if _APY_UNITS:
u= [units.kpc*units.km/units.s,
units.kpc*units.km/units.s,
units.kpc*units.km/units.s]
if 'Freqs' in quantity:
FreqsFac= freq_in_Gyr(vo,ro)
if len(out) < 4: # 1D system
fac.append(FreqsFac)
if _APY_UNITS:
Freqsu= units.Gyr**-1.
u.append(Freqsu)
else:
fac.extend([FreqsFac,FreqsFac,FreqsFac])
if _APY_UNITS:
Freqsu= units.Gyr**-1.
u.extend([Freqsu,Freqsu,Freqsu])
if 'Angles' in quantity:
if len(out) < 4: # 1D system
fac.append(1.)
if _APY_UNITS:
Freqsu= units.Gyr**-1.
u.append(units.rad)
else:
fac.extend([1.,1.,1.])
if _APY_UNITS:
Freqsu= units.Gyr**-1.
u.extend([units.rad,units.rad,units.rad])
if 'EccZmaxRperiRap' in quantity:
fac= [1.,ro,ro,ro]
if _APY_UNITS:
u= [1.,
units.kpc,
units.kpc,
units.kpc]
if _APY_UNITS:
newOut= ()
try:
for ii in range(len(out)):
newOut= newOut+(units.Quantity(out[ii]*fac[ii],
unit=u[ii]),)
except TypeError: # happens if out = scalar
newOut= units.Quantity(out*fac[0],unit=u[0])
else:
newOut= ()
try:
for ii in range(len(out)):
newOut= newOut+(out[ii]*fac[ii],)
except TypeError: # happens if out = scalar
newOut= out*fac[0]
return newOut
else:
return method(*args,**kwargs)
return wrapped
return wrapper | 36501fc563a1de71320b205ef1795ea369cc578a | 3,651,505 |
import functools
import click
def pass_api_client(function):
"""Create API client form API key and pass it to subcommand.
:param function: Subcommand that returns a result from the API.
:type function: callable
:returns: Wrapped function that prints subcommand results
:rtype: callable
"""
@functools.wraps(function)
def wrapper(*args, **kwargs):
context = click.get_current_context()
api_key = context.params.get("api_key")
offering = context.params.get("offering")
config = load_config()
if api_key is None:
if not config["api_key"]:
prog_name = context.parent.info_name
click.echo(
"\nError: API key not found.\n\n"
"To fix this problem, please use any of the following methods "
"(in order of precedence):\n"
"- Pass it using the -k/--api-key option.\n"
"- Set it in the GREYNOISE_API_KEY environment variable.\n"
"- Run {!r} to save it to the configuration file.\n".format(
"{} setup".format(prog_name)
)
)
context.exit(-1)
api_key = config["api_key"]
if offering is None:
if not config["offering"]:
offering = "enterprise"
else:
offering = config["offering"]
api_client = GreyNoise(
api_key=api_key,
offering=offering,
timeout=config["timeout"],
integration_name="cli",
)
return function(api_client, *args, **kwargs)
return wrapper | af806b8420cfb50b00ed313c5ae35ac847059af4 | 3,651,506 |
import torch
def vecs_Xg_ig(x):
""" Vi = vec(dg/dxi * inv(g)), where g = exp(x)
(== [Ad(exp(x))] * vecs_ig_Xg(x))
"""
t = x.view(-1, 3).norm(p=2, dim=1).view(-1, 1, 1)
X = mat(x)
S = X.bmm(X)
#B = x.view(-1,3,1).bmm(x.view(-1,1,3)) # B = x*x'
I = torch.eye(3).to(X)
#V = sinc1(t)*eye(3) + sinc2(t)*X + sinc3(t)*B
#V = eye(3) + sinc2(t)*X + sinc3(t)*S
V = I + sinc2(t)*X + sinc3(t)*S
return V.view(*(x.size()[0:-1]), 3, 3) | dcd7276fbb1aa59128f7c321b36e561e3f90f3f2 | 3,651,507 |
def wide_factorial(x):
"""factorial returns x! = x * x-1 * x-2 * ...,
Args:
x: bytes to evaluate as an integer
Returns:
bytes representing the integer that is the result of the factorial applied on the argument passed
"""
return If(
BitLen(x) == Int(1), x, BytesMul(x, wide_factorial(BytesMinus(x, Itob(Int(1)))))
) | c6a7b01ec5f140c6bcfad45ae78879c210dd1f33 | 3,651,508 |
import pathlib
def spring_outpath(filepath: pathlib.Path) -> pathlib.Path:
"""Build a spring path based on a fastq file path"""
LOG.info("Create spring path from %s", filepath)
file_name = filepath.name
file_parent = filepath.parent
splitted = file_name.split("_")
spring_base = pathlib.Path("_".join(splitted[:-2]))
spring_path = pathlib.Path(file_parent).joinpath(spring_base).with_suffix(".spring")
LOG.info("Creates spring path %s", spring_path)
return spring_path | dfe9d7d0fb592c8bdbf8f2074e9316e8e1e7fc31 | 3,651,509 |
import sys
def attach_tfidf_weights(storage, vocab, tf_arr):
"""Appends tf-idf weights to each word """
wordlist = vocab
storage_weighted = []
for i in range(len(storage)):
sys.stdout.write(str(i)+",")
sys.stdout.flush()
docweights = []
stor_list = storage[i].split()
for word in stor_list:
words = [word,0]
for j in range(len(wordlist)):
if (wordlist[j] == word):
words[1] = tf_arr[i][j]
docweights.append(words)
storage_weighted.append(docweights)
return storage_weighted | 5fe52dd87d091860dc3a7482a72860abbb2b49dd | 3,651,510 |
def expanded_bb( final_points):
"""computation of coordinates and distance"""
left, right = final_points
left_x, left_y = left
right_x, right_y = right
base_center_x = (left_x+right_x)/2
base_center_y = (left_y+right_y)/2
dist_base = abs(complex(left_x, left_y)-complex(right_x, right_y ) )
return (int(base_center_x), int(base_center_y) ), dist_base | c033130b0d43ccf9cea3e075305cf464f958c62f | 3,651,511 |
def gen_file_get_url(token, filename):
"""
Generate httpserver file url.
Format: http://<domain:port>/files/<token>/<filename>
"""
return '%s/files/%s/%s' % (get_httpserver_root(), token, urlquote(filename)) | 5e8f3367d5872457edc5a8808c3aabb57a8a2748 | 3,651,512 |
def count_items():
"""
Get number of items in the DB
Per the AWS documentation:
DynamoDB updates this value approximately every six hours. Recent changes might not be reflected in this value.
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/dynamodb.html#DynamoDB.Client.describe_table
"""
return dynamo_client.alerts_table.item_count | ac580e172ef2571a4a154af4460cdc1598832ab7 | 3,651,513 |
def extract_uris(data):
"""Convert a text/uri-list to a python list of (still escaped) URIs"""
lines = data.split('\r\n')
out = []
for l in lines:
if l == chr(0):
continue # (gmc adds a '\0' line)
if l and l[0] != '#':
out.append(l)
return out | 9f6ce28ecf94e07e03afca9852dd9952ed2a2488 | 3,651,514 |
def createConnection(ps, graph, e, q, maxIter):
"""
Try to build a path along a transition from a given configuration
"""
for i in range(maxIter):
q_rand = shootConfig(ps.robot, q, i)
res, q1, err = graph.generateTargetConfig(e, q, q_rand)
if not res:
continue
res, p, msg = ps.directPath(q, q1, True)
if not res:
continue
ps.addConfigToRoadmap(q1)
ps.addEdgeToRoadmap(q, q1, p, True)
print("Success (i={0})".format(i))
return p, q1
print("Failed (maxIter={0})".format(maxIter))
return None, None | 62d9c3a3bb5e90cfba5df86d9dbbab5cd3f7a8ea | 3,651,515 |
def delete_division_item(ids, my_divname):
"""
Given division id, delete from db
Return deleted division entry.
"""
settings = utils.load_json_definition_file(SETTINGS_FILE)
success, dubconn = utils.open_monitoring_db(settings['dbhost'],
settings['dbuser'],
settings['dbpass'],
settings['db_db'])
if success:
division = format_division(div_id=int(ids.div), name=my_divname)
query = """
DELETE FROM divisions
WHERE divid=%s
"""
app.logger.debug("Got a delete query of: %s ", query)
cursor = dubconn.cursor()
try:
cursor.execute(query, (division['ID'],))
except Exception, err:
app.logger.error("mysql exception: [%d]: %s", err.args[0],
err.args[1])
app.logger.error("generated by: %s", query)
success = 0
dubconn.commit()
cursor.close()
dubconn.close()
return division | a0425a274a2d73bd846a0f215420efcebd3b6fbe | 3,651,516 |
import re
def extract_info(filepath,pdbid,info_id_list):
"""Returns a dictionary where the key is pocket ID (starting at zero) and the value is a dictionary of information points."""
pockets_info = {}
pocket_file = open(filepath+pdbid+'_out/'+pdbid+'_info.txt')
pocket_lines = pocket_file.readlines()
pocket_file.close()
# create inner dictionaries
counter = 0
for line in pocket_lines:
if line[:6] == 'Pocket':
pockets_info[counter] = {}
counter += 1
# populate inner dictionaries
for info_id in info_id_list:
counter = 0
for line in pocket_lines:
if line.lstrip()[:len(info_id)] == info_id:
split = re.split(r'\s+',line.rstrip())
pockets_info[counter][info_id] = float(split[-1])
counter += 1
return pockets_info | aca4074bc1c48add487268641a66c6e80aa7dafb | 3,651,517 |
def eval_shape_fcn(w, x, N1, N2, yte):
"""
compute class and shape function
:param w:
:param x:
:param N1:
:param N2:
:param yte: trailing edge y coordinate
:return:
"""
C = x**N1 * (1-x)**N2
n = len(w) - 1 # degree of Bernstein polynomials
S = np.zeros_like(x)
for j in range(0, n+1):
K = factorial(n)/(factorial(j)*(factorial(n-j)))
S += w[j]*K*x**j * ((1-x)**(n-j))
return C * S + x * yte | c1047f6a586f51b4fd82423429b087ca28d87510 | 3,651,518 |
def _pickle_path(file_name):
"""Returns an absolute path to the specified pickle file."""
return project_root_path('pickles', file_name) | 18aef638bf3b06eb33b638e7c2038cf07cbd0d7d | 3,651,519 |
def streamentry(parser, token):
"""
streamentry <entry_var>
"""
bits = token.split_contents()
bits.reverse()
tag_name = bits.pop()
try:
entry_var = bits.pop()
except IndexError:
raise template.TemplateSyntaxError, "%r is missing entry argument" % tag_name
if bits:
raise template.TemplateSyntaxError, "%r has unexpected arguments" % tag_name
return StreamItemNode(entry_var) | 88e6abc56f817f0d4a0c814a672bf0173342347d | 3,651,520 |
def mult_int_list_int():
"""
>>> mult_int_list_int()
[1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2]
"""
return 3 * [1, 2] * 2 | cd34fa521ae3985f7770f96a1a8985e9473ee2b3 | 3,651,521 |
def _gcd_tf(a, b, dtype=tf.int64):
"""Calculates the greatest common denominator of 2 numbers.
Assumes that a and b are tf.Tensor of shape () and performs the extended
euclidean algorithm to find the gcd and the coefficients of Bézout's
identity (https://en.wikipedia.org/wiki/B%C3%A9zout%27s_identity)
Args:
a: A scalar `tf.Tensor`.
b: A scaler `tf.Tensor`.
dtype: Data type to perform operations in. `a` and `b` are casted to this
dtype.
Returns:
A tuple of `tf.Tensor`s `(g, x, y)` such that `a*x + b*y = g = gcd(a, b)`.
"""
a = tf.cast(a, dtype=dtype)
b = tf.cast(b, dtype=dtype)
x0, x1, y0, y1 = (tf.constant(0, dtype=dtype), tf.constant(1, dtype=dtype),
tf.constant(1, dtype=dtype), tf.constant(0, dtype=dtype))
def cond(a, b, x0, x1, y0, y1):
del b, x0, x1, y0, y1
return tf.math.not_equal(a, tf.constant(0, dtype=dtype))
def body(a, b, x0, x1, y0, y1):
(q, a), b = (tf.cast(b / a, dtype=dtype), b % a), a
y0, y1 = y1, y0 - q * y1
x0, x1 = x1, x0 - q * x1
return a, b, x0, x1, y0, y1
a, b, x0, x1, y0, y1 = tf.while_loop(
cond, body, loop_vars=(a, b, x0, x1, y0, y1))
return b, x0, y0 | e012ceb40fe778c23687a118ed139f1ba4ea4527 | 3,651,522 |
def compute_running_mean(x, kernel_size):
""" Fast analogue of scipy.signal.convolve2d with gaussian filter. """
k = kernel_size // 2
padded_x = np.pad(x, (k, k), mode='symmetric')
cumsum = np.cumsum(padded_x, axis=1)
cumsum = np.cumsum(cumsum, axis=0)
return _compute_running_mean_jit(x, kernel_size, cumsum) | 8d687c246b584dc43ce80cdfeb585c0f503be37f | 3,651,523 |
def _historicDataUrll(symbol, sDate=(1990,1,1),eDate=date.today().timetuple()[0:3]):
"""
generate url
symbol: Yahoo finanance symbol
sDate: start date (y,m,d)
eDate: end date (y,m,d)
"""
urlStr = 'http://ichart.finance.yahoo.com/table.csv?s={0}&a={1}&b={2}&c={3}&d={4}&e={5}&f={6}'.\
format(symbol.upper(),sDate[1]-1,sDate[2],sDate[0],eDate[1]-1,eDate[2],eDate[0])
return urlStr | 433c345ae9a55cd628f4232a4dd80507f675b30e | 3,651,524 |
def to_dict(eds, properties=True, lnk=True):
"""
Encode the EDS as a dictionary suitable for JSON serialization.
"""
nodes = {}
for node in eds.nodes:
nd = {
'label': node.predicate,
'edges': node.edges
}
if lnk and node.lnk is not None:
nd['lnk'] = {'from': node.cfrom, 'to': node.cto}
if node.type is not None:
nd['type'] = node.type
if properties:
props = node.properties
if props:
nd['properties'] = props
if node.carg is not None:
nd['carg'] = node.carg
nodes[node.id] = nd
return {'top': eds.top, 'nodes': nodes} | c1a777a0a81ad2e3b9197b3df5e0d35a5174d61f | 3,651,525 |
def find_lineage(tax_id: int) -> Lineage:
"""Finds lineage for a single tax id"""
if tax_id % 50000 == 0:
_LOGGER.info("working on tax_id: %d", tax_id)
lineage = []
while True:
record = TAXONOMY_DICT[tax_id]
lineage.append((record["tax_id"], record["rank"], record["rank_name"]))
tax_id = record["parent_tax_id"]
# every tax can be traced back to tax_id == 1, the root
if tax_id == ROOT_TAX_ID:
break
# reverse results in lineage of Kingdom => species, this is helpful for
# to_dict when there are multiple "no rank"s
lineage.reverse()
return Lineage(lineage) | 75aeb2a0e222f44e72ba315134278ec9e73de706 | 3,651,526 |
from datetime import datetime
import pytz
import json
import traceback
def modify(request):
"""
[メソッド概要]
グループのDB更新処理
"""
logger.logic_log('LOSI00001', 'None', request=request)
msg = ''
error_msg = {}
now = datetime.datetime.now(pytz.timezone('UTC'))
try:
with transaction.atomic():
json_str = json.loads(request.POST.get('json_str', '{}'))
if 'json_str' not in json_str:
msg = get_message('MOSJA23019', request.user.get_lang_mode(), showMsgId=False)
logger.user_log('LOSM04000', 'json_str', request=request)
raise Exception()
# 更新前にレコードロック
group_update_list = [
rq['group_id']
for rq in json_str['json_str']
if int(rq['ope']) in (defs.DABASE_OPECODE.OPE_UPDATE, defs.DABASE_OPECODE.OPE_DELETE)
]
Group.objects.select_for_update().filter(pk__in=group_update_list)
error_flag, error_msg = _validate(json_str['json_str'], request)
if error_flag:
raise Exception('validation error.')
# 更新データ作成
group_id_list_reg = []
group_id_list_mod = []
sorted_data = sorted(json_str['json_str'], key=lambda x: x['group_id'])
upd_data = list(filter(lambda x: int(x['ope']) == defs.DABASE_OPECODE.OPE_UPDATE, sorted_data))
del_data = list(filter(lambda x: int(x['ope']) == defs.DABASE_OPECODE.OPE_DELETE, sorted_data))
ins_data = list(filter(lambda x: int(x['ope']) == defs.DABASE_OPECODE.OPE_INSERT, sorted_data))
for rq in upd_data:
group_id_list_mod = Group.objects.filter(group_id=rq['group_id'])
if len(group_id_list_mod) <= 0:
logger.logic_log('LOSI04000', rq['group_name'], request=request)
continue
# システム管理系はグループ名の更新不可
if int(rq['group_id']) not in defs.GROUP_DEFINE.PROTECTED_GROUP_IDS:
group_id_list_mod[0].group_name = rq['group_name']
group_id_list_mod[0].summary = rq['summary']
group_id_list_mod[0].last_update_user = request.user.user_name
group_id_list_mod[0].last_update_timestamp = now
group_id_list_mod[0].save(force_update=True)
group_id_list_del = [rq['group_id'] for rq in del_data if int(rq['group_id']) not in defs.GROUP_DEFINE.PROTECTED_GROUP_IDS]
for rq in ins_data:
group_info = Group(
group_name=rq['group_name'],
summary=rq['summary'],
last_update_user=request.user.user_name,
last_update_timestamp=now
)
group_id_list_reg.append(group_info)
# 追加
Group.objects.bulk_create(group_id_list_reg)
# 権限を追加
_bulk_create_access_permission(
request.user.user_name,
[i.group_name for i in group_id_list_reg],
now,
)
# 削除対象グループを削除
Group.objects.filter(pk__in=group_id_list_del).delete()
# 削除対象ユーザグループに該当するユーザIDを取得
before_user_list = list(UserGroup.objects.filter(group_id__in=group_id_list_del).values_list('user_id', flat=True).distinct())
# ユーザグループを削除
UserGroup.objects.filter(group_id__in=group_id_list_del).delete()
# どのグループにも所属しないユーザを検索
after_user_list = list(UserGroup.objects.filter(user_id__in=before_user_list).values_list('user_id', flat=True).distinct())
delete_user_list = list(set(before_user_list) ^ set(after_user_list))
# ユーザ、パスワード履歴、アクセス権限を削除
User.objects.filter(pk__in=delete_user_list).delete()
PasswordHistory.objects.filter(user_id__in=delete_user_list).delete()
AccessPermission.objects.filter(group_id__in=group_id_list_del).delete()
except Exception as e:
logger.logic_log('LOSI00005', traceback.format_exc(), request=request)
msg = get_message('MOSJA23021', request.user.get_lang_mode()) + '\\n' + str(e.args)
response = {}
response['status'] = 'failure'
response['msg'] = msg
response['error_msg'] = error_msg
response_json = json.dumps(response)
return HttpResponse(response_json, content_type="application/json")
redirect_url = '/oase_web/system/group'
response_json = '{"status": "success", "redirect_url": "%s"}' % redirect_url
logger.logic_log('LOSI00002', 'None', request=request)
return HttpResponse(response_json, content_type="application/json") | d596f0e239d2017f61a9747e2a5ed9731ff9308d | 3,651,527 |
import inspect
def _function_args_doc(functions):
"""
Create documentation of a list of functions.
Return: usage dict (usage[funcname] = list of arguments, incl.
default values), doc dict (doc[funcname] = docstring (or None)).
Called by function_UI.
"""
usage = {}
doc = {}
for f in functions:
args = inspect.getargspec(f)
if args.defaults is None:
# Only positional arguments
usage[f.__name__] = args.args
else:
# Keyword arguments too, build complete list
usage[f.__name__] = args.args[:-len(args.defaults)] + \
['%s=%s' % (a, d) for a, d in \
zip(args.args[-len(args.defaults):], args.defaults)]
doc[f.__name__] = inspect.getdoc(f)
return usage, doc | 848fb1c7629d8e4feb848293cd965da6edc2ff4a | 3,651,528 |
def mock_modules_list():
"""Standard module list without any issues"""
return [
{"name": "foo", "module_type": "app", "supported_platforms": ["macos"]},
{"name": "bar", "module_type": "app"},
] | c4f20e95e87950a414b0ac156e6a07ac79dcdf19 | 3,651,529 |
def cal_iou(box1, box1_area, boxes2, boxes2_area):
"""
box1 [x1,y1,x2,y2]
boxes2 [Msample,x1,y1,x2,y2]
"""
x1 = np.maximum(box1[0], boxes2[:, 0])
x2 = np.minimum(box1[2], boxes2[:, 2])
y1 = np.maximum(box1[1], boxes2[:, 1])
y2 = np.minimum(box1[3], boxes2[:, 3])
intersection = np.maximum(x2 - x1, 0) * np.maximum(y2 - y1, 0)
iou = intersection / (box1_area + boxes2_area[:] - intersection[:])
return iou | e27d942730cfe043034ec3f063934d94907314cf | 3,651,530 |
def hbonds_single_c(snap, id1, id2, cut1, cut2, angle, names=False):
"""
Binding of C++ routines in :mod:`.hbonds_c` for couting of hydrogen bonds in a single snapshot.
Args:
snap (:class:`.Snap`): single snapshot containing the atomic information
id1 (str): identifier for oxygen atoms (e.g. 'O\_')
id2 (str): identifier for hydrogen atoms (e.g. 'H\_')
cut1 (float): maximum distance between two oxygen atoms
cut2 (float): maximum distance between an oxygen and a hydrogen atom
angle (float): minimum O-H-O angle in degree
names (list[str], optional): names of oxygen atoms used as search centers
Returns:
float: number of hydrogen bonds found for this snapshot
"""
atoms1 = snap.atoms[snap.atoms['id'] == id1]['pos'].values
atoms1 = atoms1.reshape(len(atoms1) * 3)
atoms2 = snap.atoms[snap.atoms['id'] == id2]['pos'].values
atoms2 = atoms2.reshape(len(atoms2) * 3)
cell = snap.cell.reshape(9)
if names:
center = snap.atoms.loc[snap.atoms['name'].isin(names)]
center = center['pos'].values
center = center.reshape(len(center) * 3)
number = hbonds_c.hbonds(atoms1, atoms2, center, cut1, cut2, angle, cell)
else:
number = hbonds_c.hbonds(atoms1, atoms2, atoms1, cut1, cut2, angle, cell)
return number | f4d7c73b631225505f8140e67da950979159e6c6 | 3,651,531 |
def _find_event_times(raw, event_id, mask):
"""Given the event_id and mask, find the event times.
"""
stim_ch = find_stim_channel(raw)
sfreq = raw.info['sfreq']
events = find_events(raw, stim_ch, mask, event_id)
times = [(event[0] - raw.first_samp) / sfreq for event in events]
return times | 1ade6a18567767db64ed57880b2b0837feade5d4 | 3,651,532 |
def get_parameters():
"""Load parameter values from AWS Systems Manager (SSM) Parameter Store"""
parameters = {
"kafka_servers": ssm_client.get_parameter(
Name="/kafka_spark_demo/kafka_servers")["Parameter"]["Value"],
"kafka_demo_bucket": ssm_client.get_parameter(
Name="/kafka_spark_demo/kafka_demo_bucket")["Parameter"]["Value"],
"schema_registry_url": ssm_client.get_parameter(
Name="/kafka_spark_demo/schema_registry_url_int")["Parameter"]["Value"],
}
return parameters | 0dbd8c505c5bf404d612bc83fb119f1291f5cbad | 3,651,533 |
async def get_accounts(context, names, observer=None):
"""Find and return lite accounts by `names`.
Observer: will include `followed` context.
"""
assert isinstance(names, list), 'names must be a list'
assert names, 'names cannot be blank'
assert len(names) < 100, 'too many accounts requested'
return await accounts_by_name(context['db'], names, observer, lite=True) | 9e088f691cb92cf495b238d20902b276943b6044 | 3,651,534 |
def softmax_crossentropy_logits(p, q):
"""see sparse cross entropy"""
return -(p * log_softmax(q)).sum(-1) | aa50eb4c7de8060a1ce9f9e7c879970db6d9b505 | 3,651,535 |
def SieveOfEratosthenes(limit=10**6):
"""Returns all primes not greater than limit."""
isPrime = [True]*(limit+1)
isPrime[0] = isPrime[1] = False
primes = []
for i in range(2, limit+1):
if not isPrime[i]:continue
primes += [i]
for j in range(i*i, limit+1, i):
isPrime[j] = False
return primes | 6d1e12d289c9bfcdfadf64f764deba077a09ffd1 | 3,651,536 |
import os
import re
def GetHistory():
"""Obtain mapping from release version to docs/root/intro/deprecated.rst PRs.
Returns:
A dictionary mapping from release version to a set of git commit objects.
"""
repo = Repo(os.getcwd())
version = None
history = defaultdict(set)
for commit, lines in repo.blame('HEAD', 'docs/root/intro/deprecated.rst'):
for line in lines:
sr = re.match('## Version (.*) \(.*\)', line)
if sr:
version = sr.group(1)
continue
history[version].add(commit)
return history | f305f0630d484a630377630f813ae121e07d50f4 | 3,651,537 |
def generate_chromosome(constraint = False, constraint_levers = [], constraint_values = [],
threshold = False, threshold_names = [], thresholds = []):
"""
Initialises a chromosome and returns its corresponding lever values, and temperature and cost.
**Args**:
- constraint (*boolean*): Flag to select whether any inputs have been fixed.
- constraint_levers (*list of strings*): Contains the name of levers to be fixed.
- constraint_values (*list of floats*): Contains the values to fix the selected levers to.
- threshold (*boolean*): Flag to select whether any inputs have to be bounded within a range.
- threshold_names (*list of strings*): Contains the name of the levers to be bounded within a range.
- thresholds (*list of lists of floats*): Contains the upper and lower bound for each specified lever.
**Returns**:
Lever values corresponding to generated chromosome and cost values corresponding to the current chromosome.
"""
lever_names = list(dfs_3.iloc[:, 0].to_numpy()) # Create list with all lever names
# Generate random lever combination
random_lever_values = new_lever_combination(threshold = threshold, threshold_names = threshold_names, thresholds = thresholds)
# Fix specified input levers
if constraint == True:
lever_names, random_lever_values = overwrite_lever_values(lever_names, random_lever_values, constraint_levers, constraint_values)
result = move_lever(lever_names, random_lever_values, costs = True, constraint = constraint, constraint_levers = constraint_levers, constraint_values = constraint_values) # Move lever accordingly and read temperature and cost valuesw
return random_lever_values, result | 02fe7b4f34064410f635b68f2764fb50451e7cf0 | 3,651,538 |
def make_link_request(data: dict, user_token: str):
"""
https://yandex.ru/dev/disk/api/reference/response-objects-docpage/#link
- it will not raise in case of error HTTP code.
- see `api/request.py` documentation for more.
:param data: Data of link to handle.
:param user_token: User OAuth token to access the API.
:raises NotImplementedError: If link requires templating.
"""
if (data["templated"]):
raise NotImplementedError("Templating not implemented")
url = data["href"]
method = data["method"].upper()
timeout = current_app.config["YANDEX_DISK_API_TIMEOUT"]
return request(
raise_for_status=False,
content_type="json",
method=method,
url=url,
timeout=timeout,
auth=HTTPOAuthAuth(user_token),
allow_redirects=False,
verify=True
) | 4c3c183b7c8bd713594ee42623f5db0a43e98ffd | 3,651,539 |
import warnings
def load_sample_bathymetry(**kwargs):
"""
(Deprecated) Load a table of ship observations of bathymetry off Baja
California as a pandas.DataFrame.
.. warning:: Deprecated since v0.6.0. This function has been replaced with
``load_sample_data(name="bathymetry")`` and will be removed in
v0.9.0.
This is the ``@tut_ship.xyz`` dataset used in the GMT tutorials.
The data are downloaded to a cache directory (usually ``~/.gmt/cache``) the
first time you invoke this function. Afterwards, it will load the data from
the cache. So you'll need an internet connection the first time around.
Returns
-------
data : pandas.DataFrame
The data table. Columns are longitude, latitude, and bathymetry.
"""
if "suppress_warning" not in kwargs:
warnings.warn(
"This function has been deprecated since v0.6.0 and will be "
"removed in v0.9.0. Please use "
"load_sample_data(name='bathymetry') instead.",
category=FutureWarning,
stacklevel=2,
)
fname = which("@tut_ship.xyz", download="c")
data = pd.read_csv(
fname, sep="\t", header=None, names=["longitude", "latitude", "bathymetry"]
)
return data | 085e2795f9f59a4222bdca5a97e8d1818aa11d75 | 3,651,540 |
import operator
def shift(obj, offset, excluded=(), op=operator.sub, verbose=False):
"""Shift soda.ir.Ref with the given offset.
All soda.ir.Ref, excluding the given names, will be shifted with the
given offset using the given operator. The operator will be applied pointwise
on the original index and the given offset.
Args:
obj: A haoda.ir.Node or a tensor.Tensor object.
offset: Second operand given to the operator.
excluded: Sequence of names to be excluded from the mutation. Default to ().
op: Shifting operator. Should be either add or sub. Default to sub.
verbose: Whether to log shiftings. Default to False.
Returns:
Mutated obj. If obj is an IR node, it will be a different object than the
input. If obj is a tensor, it will be the same object but with fields
mutated.
"""
if op not in (operator.add, operator.sub):
_logger.warn('shifting with neither + nor -, which most likely is an error')
def visitor(obj, args):
if isinstance(obj, ir.Ref):
if obj.name not in excluded:
new_idx = tuple(op(a, b) for a, b in zip(obj.idx, offset))
if verbose:
_logger.debug('reference %s(%s) shifted to %s(%s)', obj.name,
', '.join(map(str, obj.idx)), obj.name,
', '.join(map(str, new_idx)))
obj.idx = new_idx
if isinstance(obj, ir.Node):
return obj.visit(visitor)
if isinstance(obj, tensor.Tensor):
obj.mutate(visitor)
else:
raise TypeError('argument is not an IR node or a tensor')
return obj | c2c40770982da36681395e785f0e8417b252bc7c | 3,651,541 |
async def ticket_channel_embed(
_: hikari.InteractionCreateEvent, bot: hikari.GatewayBot
) -> hikari.Embed:
"""Provides an embed for individual ticket channels."""
description = (
"Thanks for submitting a ticket! We take all tickets "
"very seriously. Please provide a full explanation in this "
"channel. You can include text, images, files, video, or "
"documents. \n\nPlease do not ping the Mods or Staff unless "
"there is a life or death situation. Someone will address it "
"available."
)
embed = hikari.Embed(title="", description=description, color=8454399)
embed.set_thumbnail(
"https://cdn.discordapp.com/attachments/733789542884048906/900079323279663175/85d744c5310511ecb705f23c91500735.png"
)
embed.set_author(name=bot.get_me().username, icon=bot.get_me().avatar_url)
return embed | 1c45535c8a7b606ac80a8a2fefd7e78079ed25f6 | 3,651,542 |
from typing import List
def count_branching_factor(strips_ops: List[STRIPSOperator],
segments: List[Segment]) -> int:
"""Returns the total branching factor for all states in the segments."""
total_branching_factor = 0
for segment in segments:
atoms = segment.init_atoms
objects = set(segment.states[0])
ground_ops = {
ground_op
for op in strips_ops
for ground_op in all_ground_operators(op, objects)
}
for _ in get_applicable_operators(ground_ops, atoms):
total_branching_factor += 1
return total_branching_factor | 155b7258f320a95ca56736331686470bc8c5a5f7 | 3,651,543 |
import torch
def iou_overlaps(b1, b2):
"""
Arguments:
b1: dts, [n, >=4] (x1, y1, x2, y2, ...)
b1: gts, [n, >=4] (x1, y1, x2, y2, ...)
Returns:
intersection-over-union pair-wise, generalized iou.
"""
area1 = (b1[:, 2] - b1[:, 0] + 1) * (b1[:, 3] - b1[:, 1] + 1)
area2 = (b2[:, 2] - b2[:, 0] + 1) * (b2[:, 3] - b2[:, 1] + 1)
# only for giou loss
lt1 = torch.max(b1[:, :2], b2[:, :2])
rb1 = torch.max(b1[:, 2:4], b2[:, 2:4])
lt2 = torch.min(b1[:, :2], b2[:, :2])
rb2 = torch.min(b1[:, 2:4], b2[:, 2:4])
wh1 = (rb2 - lt1 + 1).clamp(min=0)
wh2 = (rb1 - lt2 + 1).clamp(min=0)
inter_area = wh1[:, 0] * wh1[:, 1]
union_area = area1 + area2 - inter_area
iou = inter_area / torch.clamp(union_area, min=1)
ac_union = wh2[:, 0] * wh2[:, 1] + 1e-7
giou = iou - (ac_union - union_area) / ac_union
return iou, giou | ba9b445223fea5ea8332a189b297c8c40205a4e5 | 3,651,544 |
def aggregate(data):
"""Aggregate the data."""
return NotImplemented | 2d7fd424d70858e6065dca34991308f0ed6c945c | 3,651,545 |
def get_valid_columns(solution):
"""Get a list of column indices for which the column has more than one class.
This is necessary when computing BAC or AUC which involves true positive and
true negative in the denominator. When some class is missing, these scores
don't make sense (or you have to add an epsilon to remedy the situation).
Args:
solution: array, a matrix of binary entries, of shape
(num_examples, num_features)
Returns:
valid_columns: a list of indices for which the column has more than one
class.
"""
num_examples = solution.shape[0]
col_sum = np.sum(solution, axis=0)
valid_columns = np.where(1 - np.isclose(col_sum, 0) -
np.isclose(col_sum, num_examples))[0]
return valid_columns | b5aeb01f3362dc8ab1ed22cd86ad7d6995e36a3e | 3,651,546 |
def fourier_transform(data, proc_parameters):
"""Perform Fourier Transform down dim dimension given in proc_parameters
.. Note::
Assumes dt = t[1] - t[0]
Args:
data (nddata): Data container
proc_parameters (dict, procParam): Processing parameters
Returns:
nddata: Fourier Transformed data
Example:
.. code-block:: python
proc_parameters['dim'] = 't'
proc_parameters['zero_fill_factor'] = 2
proc_parameters['shift'] = True
proc_parameters['convert_to_ppm'] = True
all_data = dnplab.dnpNMR.fourier_transform(all_data, proc_parameters)
"""
required_parameters = defaults._fourier_transform
# Add required parameters to proc_parameters
print(required_parameters)
for key in required_parameters:
if key not in proc_parameters:
proc_parameters[key] = required_parameters[key]
#
dim = proc_parameters["dim"]
zero_fill_factor = proc_parameters["zero_fill_factor"]
shift = proc_parameters["shift"]
convert_to_ppm = proc_parameters["convert_to_ppm"]
index = data.dims.index(dim)
dt = data.coords[index][1] - data.coords[index][0]
n_pts = zero_fill_factor * len(data.coords[index])
f = (1.0 / (n_pts * dt)) * np.r_[0:n_pts]
if shift == True:
f -= 1.0 / (2 * dt)
data.values = np.fft.fft(data.values, n=n_pts, axis=index)
if shift:
data.values = np.fft.fftshift(data.values, axes=index)
data.coords[index] = f
return data | e3a9aafdb2661d112f1e02885477711c2c6d3d22 | 3,651,547 |
import copy
def iupac_fasta_converter(header, sequence):
"""
Given a sequence (header and sequence itself) containing iupac characters,
return a dictionary with all possible sequences converted to ATCG.
"""
iupac_dict = {"R": "AG", "Y": "CT", "S": "GC", "W": "AT", "K": "GT",
"M": "AC", "B": "CGT", "D": "AGT", "H": "ACT", "V": "ACG",
"N": "ACGT"}
iupac_dict = {k: list(iupac_dict[k])
for k in list(iupac_dict.keys())}
if sequence.upper().count("N") >= 10:
return {header: sequence}
sequence = list(sequence.upper())
result_list = []
def iupac_recurse(seq):
for i in range(len(seq)):
if seq[i] in list(iupac_dict.keys()):
iup = iupac_dict[seq[i]]
for i_seq in iup:
new_seq = copy.deepcopy(seq)
new_seq[i] = i_seq
iupac_recurse(new_seq)
break
else:
result_list.append("".join(seq))
iupac_recurse(sequence)
if len(result_list) == 1:
return {header: result_list[0]}
else:
return {header + "-" + str(i): result_list[i]
for i in range(len(result_list))} | 95a713e87564c4d8e807e1d476439568a562731b | 3,651,548 |
def integer_list_to_named_tuple(list_of_integers):
"""
Converts a list of integers read from the ultrak498 into a named tuple
based upon the type. The type is determiend by the first integer in the
list. Since all tuples contain five fields, the list of integers must
have a length of five.
Returns a named tuple based on the type,
"""
# Dictionary mapping type id to record named tuples.
valid_types = {
0: namedtuple("RaceHeader", "type year month day id"),
1: namedtuple("RaceHeader", "type year month day id"),
2: namedtuple("RaceHeader", "type year month day id"),
3: namedtuple("RaceHeader", "type year month day id"),
4: namedtuple("RaceHeader", "type year month day id"),
5: namedtuple("RaceHeader", "type year month day id"),
6: namedtuple("RaceHeader", "type year month day id"),
7: namedtuple("RaceHeader", "type year month day id"),
8: namedtuple("RaceHeader", "type year month day id"),
9: namedtuple("RaceHeader", "type year month day id"),
10: namedtuple("LapTime", "type minutes seconds hundreths lap"),
20: namedtuple("AbsTime", "type minutes seconds hundreths lap"),
30: namedtuple("Type30", "type a b c laps"),
40: namedtuple("Type40", "type a b c laps"),
50: namedtuple("RaceEnd", "type minutes seconds hundreths laps"),
}
# List of integers must be length of five.
if len(list_of_integers) != 5:
raise ValueError("Unable to convert list of integers to tuple; incorrect number of integers.")
# First byte is the type; type must be known.
tuple_type = list_of_integers[0]
if tuple_type not in valid_types:
raise ValueError("Unable to convert list of integers to tuple; unknown record type [%d]." % tuple_type)
# Create a namedtuple based upon the tuple_type.
named_tuple = valid_types[tuple_type]._make(list_of_integers)
return named_tuple | 50aed101577c263f213c3487dc56d9d0886c6530 | 3,651,549 |
def get_albums(): # noqa: E501
"""get_albums
Muestra todos los albums disponibles # noqa: E501
:rtype: List[Album]
"""
albums = DBAlbum.query.all()
results = [
Album(album.id, album.title, album.description) for album in albums]
return results | d5d6faf6408519afbc0d91b8bd6839bcf031cb11 | 3,651,550 |
def get_final_shape(data_array, out_dims, direction_to_names):
"""
Determine the final shape that data_array must be reshaped to in order to
have one axis for each of the out_dims (for instance, combining all
axes collected by the '*' direction).
"""
final_shape = []
for direction in out_dims:
if len(direction_to_names[direction]) == 0:
final_shape.append(1)
else:
# determine shape once dimensions for direction (usually '*') are combined
final_shape.append(
np.product([len(data_array.coords[name])
for name in direction_to_names[direction]]))
return final_shape | f1407936f9e1e7bebe55461abe4999a4fdf9636d | 3,651,551 |
import pytz
def create_assignment_payload(subsection_block):
"""
Create a Canvas assignment dict matching a subsection block on edX
Args:
subsection_block (openedx.core.djangoapps.content.block_structure.block_structure.BlockData):
The block data for the graded assignment/exam (in the structure of a course, this unit is a subsection)
Returns:
dict:
Assignment payload to be sent to Canvas to create or update the assignment
"""
return {
"assignment": {
"name": subsection_block.display_name,
"integration_id": str(subsection_block.location),
"grading_type": "percent",
"points_possible": DEFAULT_ASSIGNMENT_POINTS,
"due_at": (
None if not subsection_block.fields.get("due")
# The internal API gives us a TZ-naive datetime for the due date, but Studio indicates that
# the user should enter a UTC datetime for the due date. Coerce this to UTC before creating the
# string representation.
else subsection_block.fields["due"].astimezone(pytz.UTC).isoformat()
),
"submission_types": ["none"],
"published": False,
}
} | 5c8327d0731aaae16769429833d80b87bf39fb9d | 3,651,552 |
def return_random_initial_muscle_lengths_and_activations(InitialTension,X_o,**kwargs):
"""
This function returns initial muscle lengths and muscle activations for a given pretensioning level, as derived from (***insert file_name here for scratchwork***) for the system that starts from rest. (Ex. pendulum_eqns.reference_trajectories._01).
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
**kwargs
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
1) Seed - Can see the random tension generated. When FixedInitialTension is provided, this seed will apply only to the initial conditions for activation and muscle length.
2) PlotBool - Must be either True or False. Default is False. Will plot all possible initial muscle lengths and activations for a given pretensioning level.
3) InitialTensionAcceleration - must be a numpy array of shape (2,). Default is set to the value generated from zero IC's. If using different reference trajectory, set InitialAngularAcceleration to d2r(0) (See below).
4) InitialAngularAcceleration - must be either a numpy.float64, float, or int. Default is set to 0 to simulate starting from rest. Choice of reference trajectory *should* not matter as it is either 0 or d2r(0) (either by convention or by choice).
5) InitialAngularSnap - must be either a numpy.float64, float, or int. Default is set to 0 to simulate starting from rest. Choice of reference trajectory *should* not matter as it is either 0 or d4r(0) (either by convention or by choice).
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
PlotBool = kwargs.get("PlotBool",False)
assert type(PlotBool)==bool,"PlotBool must be a boolean. Default is False."
InitialAngularAcceleration = kwargs.get(
"InitialAngularAcceleration",
0
) # 0 or d2r(0)
assert str(type(InitialAngularAcceleration)) in ["<class 'float'>","<class 'int'>","<class 'numpy.float64'>"], "InitialAngularAcceleration must be either a float or an int."
InitialAngularSnap = kwargs.get(
"InitialAngularSnap",
0
) # 0 or d4r(0)
assert str(type(InitialAngularSnap)) in ["<class 'float'>","<class 'int'>","<class 'numpy.float64'>"], "InitialAngularSnap must be either a float or an int."
InitialTensionAcceleration = kwargs.get(
"InitialTensionAcceleration",
return_initial_tension_acceleration(
InitialTension,
X_o,
InitialAngularAcceleration=InitialAngularAcceleration,
InitialAngularSnap=InitialAngularSnap
)
)
assert np.shape(InitialTensionAcceleration)==(2,) \
and str(type(InitialTensionAcceleration))=="<class 'numpy.ndarray'>", \
"InitialTensionAcceleration must be a numpy array of shape (2,)"
a_MTU1_o = np.sign(-r1(X_o[0]))*(
InitialAngularAcceleration
* np.sqrt(dr1_dθ(X_o[0])**2 + r1(X_o[0])**2)
+
X_o[1]**2
* dr1_dθ(X_o[0])
* (d2r1_dθ2(X_o[0]) + r1(X_o[0]))
/ np.sqrt(dr1_dθ(X_o[0])**2 + r1(X_o[0])**2)
)
a_MTU2_o = np.sign(-r2(X_o[0]))*(
InitialAngularAcceleration
* np.sqrt(dr2_dθ(X_o[0])**2 + r2(X_o[0])**2)
+
X_o[1]**2
* dr2_dθ(X_o[0])
* (d2r2_dθ2(X_o[0]) + r2(X_o[0]))
/ np.sqrt(dr2_dθ(X_o[0])**2 + r2(X_o[0])**2)
)
L1_UB = lo1*L_CE_max_1*(
k_1*np.log(
np.exp(
(m1*InitialTensionAcceleration[0]
+ (F_MAX1*cT/lTo1)
* (1-np.exp(-InitialTension[0]/(F_MAX1*cT*kT)))
* (c3*InitialTension[0]
- m1*a_MTU1_o
)
)
/ (F_MAX1*c3**2
*c_1*k_1
*(F_MAX1*cT/lTo1)
*(1-np.exp(-InitialTension[0]/(F_MAX1*cT*kT)))
)
)
- 1
)
+ Lr1
)
L2_UB = lo2*L_CE_max_2*(
k_1*np.log(
np.exp(
(m2*InitialTensionAcceleration[1]
+ (F_MAX2*cT/lTo2)
* (1-np.exp(-InitialTension[1]/(F_MAX2*cT*kT)))
* (c4*InitialTension[1]
- m2*a_MTU2_o
)
)
/ (F_MAX2*c4**2
*c_1*k_1
*(F_MAX2*cT/lTo2)
*(1-np.exp(-InitialTension[1]/(F_MAX2*cT*kT)))
)
)
- 1
)
+ Lr1
)
L1_LB = 0.5*lo1
if L1_UB > 1.5*lo1:
L1_UB = 1.5*lo1
L1 = np.linspace(L1_LB, L2_UB, 1001)
# mu1, sigma1 = lo1, 0.1*lo1
# L1 = np.array(list(sorted(np.random.normal(mu1, sigma1, 1001))))
U1 = (m1*InitialTensionAcceleration[0]
+ (F_MAX1*cT/lTo1)
* (1-np.exp(-InitialTension[0]/(F_MAX1*cT*kT)))
* (c3*InitialTension[0]
- m1*a_MTU1_o
- F_MAX1*c3**3
*c_1*k_1
*np.log(np.exp((L1/(lo1*L_CE_max_1) - Lr1)/k_1)+1)
)
) \
/ (
F_MAX1*c3**2
*(F_MAX1*cT/lTo1)
*(1-np.exp(-InitialTension[0]/(F_MAX1*cT*kT)))
*np.exp(-(abs((L1-lo1)/(lo1*ω))**ρ))
)
# U1 = (
# InitialTension[0][0]/(F_MAX1*np.cos(α1))
# - c_1*k_1*np.log(np.exp((L1/(lo1*L_CE_max_1) - Lr1)/k_1)+1)
# ) / (np.exp(-(abs((L1-lo1)/(lo1*ω))**ρ)))
L2_LB = 0.5*lo2
if L2_UB > 1.5*lo2:
L2_UB = 1.5*lo2
L2 = np.linspace(L2_LB, L2_UB, 1001)
# mu2, sigma2 = lo2, 0.1*lo2
# L2 = np.array(list(sorted(np.random.normal(mu2, sigma2, 1001))))
U2 = (m2*InitialTensionAcceleration[1]
+ (F_MAX2*cT/lTo2)
* (1-np.exp(-InitialTension[1]/(F_MAX2*cT*kT)))
* (c4*InitialTension[1]
- m2*a_MTU2_o
- F_MAX2*c4**3
*c_1*k_1
*np.log(np.exp((L2/(lo2*L_CE_max_2) - Lr1)/k_1)+1)
)
) \
/ (
F_MAX2*c4**2
*(F_MAX2*cT/lTo2)
*(1-np.exp(-InitialTension[1]/(F_MAX2*cT*kT)))
*np.exp(-(abs((L2-lo2)/(lo2*ω))**ρ))
)
# U2 = (
# InitialTension[1][0]/(F_MAX2*np.cos(α2))
# - c_1*k_1*np.log(np.exp((L2/(lo2*L_CE_max_2) - Lr1)/k_1)+1)
# ) / (np.exp(-(abs((L2-lo2)/(lo2*ω))**ρ)))
if PlotBool == True:
plt.figure(figsize=(10,8))
plt.title(r"Viable Initial $l_{m,1}$ and $u_{1}$ Values")
plt.xlabel(r"$l_{m,1}$ (m)",fontsize=14)
plt.ylabel(r"$u_{1}$",fontsize=14)
plt.scatter(L1,U1)
plt.plot([lo1,lo1],[0,1],'0.70',linestyle='--')
plt.gca().set_ylim((0,1))
plt.gca().set_xticks(
[0.25*lo1,
0.5*lo1,
0.75*lo1,
lo1,
1.25*lo1,
1.5*lo1,
1.75*lo1]
)
plt.gca().set_xticklabels(
["",
r"$\frac{1}{2}$ $l_{o,2}$",
"",
r"$l_{o,2}$",
"",
r"$\frac{3}{2}$ $l_{o,2}$",
""],
fontsize=12)
plt.figure(figsize=(10,8))
plt.title(r"Viable Initial $l_{m,2}$ and $u_{2}$ Values")
plt.xlabel(r"$l_{m,2}$ (m)",fontsize=14)
plt.ylabel(r"$u_{2}$",fontsize=14)
plt.scatter(L2,U2)
plt.plot([lo2,lo2],[0,1],'0.70',linestyle='--')
plt.gca().set_ylim((0,1))
plt.gca().set_xticks(
[0.25*lo2,
0.5*lo2,
0.75*lo2,
lo2,
1.25*lo2,
1.5*lo2,
1.75*lo2]
)
plt.gca().set_xticklabels(
["",
r"$\frac{1}{2}$ $l_{o,2}$",
"",
r"$l_{o,2}$",
"",
r"$\frac{3}{2}$ $l_{o,2}$",
""],
fontsize=12)
plt.show()
return(L1,U1,L2,U2) | afaa5905e3ae978217ac7f7e2b677af62bb33dd9 | 3,651,553 |
def add_top_features(df, vocab, n=10):
"""
INPUT: PySpark DataFrame, List, Int
RETURN: PySpark DataFrame
Take in DataFrame with TFIDF vectors, list of vocabulary words,
and number of features to extract. Map top features from TFIDF
vectors to vocabulary terms. Return new DataFrame with terms
"""
# Create udf function to extract top n features
extract_features_udf = udf(lambda x: extract_top_features(x, vocab, n))
# Apply udf, create new df with features column
df_features = df.withColumn("top_features",
extract_features_udf(df["tfidf_vectors_sum"]))
return df_features | 741bcbb2fea0894f5218871e3f72360bf6f2caab | 3,651,554 |
from typing import Union
from typing import Tuple
from typing import List
def java_solvability(level: MarioLevel, time_per_episode=20, verbose=False, return_trajectories=False) -> Union[bool, Tuple[bool, List[Tuple[float, float]]]]:
"""Returns a boolean indicating if this level is solvable.
Args:
level (MarioLevel): The level
time_per_episode (int, optional): How many seconds per episodes. Defaults to 500.
verbose (bool, optional): Should this print many info. Defaults to False.
return_trajectories (bool, optional). If this is true, then we are by default verbose and we return trajectories
Returns:
Union[bool, :Is this solvable
Tuple[bool, List[Tuple[float, float]]] : solvable, trajectory if return_trajectories = True
]
"""
filename = write_level_to_file(level)
verbose = verbose or return_trajectories
args = ["Astar_Solvability", filename, str(time_per_episode), str(1), str(verbose).lower()]
s = timer()
string = run_java_task(args)
e = timer()
lines = string.split("\n")
result_line = [l for l in lines if 'Result' in l]
if len(result_line) == 0:
raise Exception("Java didn't print out result properly: " + string + "args = " + ' '.join(args))
if return_trajectories:
traj_line = [l for l in lines if 'Trajectories' in l]
if len(traj_line) == 0:
raise Exception("Java didn't print out trajectory properly: " + string + "args = " + ' '.join(args))
vals = [s.strip() for s in traj_line[0].split(":")[1].split(" ")]
vals = [s for s in vals if s != '']
vals = [ tuple(map(float, s.split(','))) for s in vals]
return 'WIN' in result_line[0], vals
return 'WIN' in result_line[0] | 8f4b282a8ae0b217ca12828cb20724e943de35b2 | 3,651,555 |
def get_trait_value(traitspec, value_name, default=None):
""" Return the attribute `value_name` from traitspec if it is defined.
If not will return the value of `default`.
Parameters
----------
traitspec: TraitedSpec
value_name: str
Name of the `traitspect` attribute.
default: any
A default value in case the attribute does not exist or is not defined.
Returns
-------
trait_value: any
"""
val = getattr(traitspec, value_name, default)
return default if not isdefined(val) else val | 5bc4d23b326b59e0a542a5b3113f8906e9a88c49 | 3,651,556 |
import collections
def check_if_blank(cell_image: Image) -> bool:
"""Check if image is blank
Sample the color of the black and white content - if it is white enough
assume no text and skip. Function takes a small more centered section to
OCR to avoid edge lines.
:param cell_image: Image to OCR
:return: True or None
"""
w, h = cell_image.size
crop = cell_image.crop((w * 0.1, h * 0.1, w * 0.8, h * 0.8))
data = crop.getdata()
counts = collections.Counter(data)
if (
len(counts)
< 50 # this number needs to fluctuate - or i need to find a way to create this in code,
# Current ideas is to grab a predictable slice of page that is white and sample it and use that number as a threshold
): # this may need to fluctuate to be accurate at dropping empty sections to remove gibberish
return True
return False | 6cb3be0da1d15e1ba4fb2ccc7199709058792d5c | 3,651,557 |
def get_tpr_from_threshold(scores,labels, threshold_list):
"""Calculate the recall score list from the threshold score list.
Args:
score_target: list of (score,label)
threshold_list: list, the threshold list
Returns:
recall_list: list, the element is recall score calculated by the
correspond threshold
"""
tpr_list = []
hack_scores = []
for score, label in zip(scores,labels):
if label == 1:
hack_scores.append(float(score))
hack_scores.sort(reverse=True)
hack_nums = len(hack_scores)
for threshold in threshold_list:
hack_index = 0
while hack_index < hack_nums:
if hack_scores[hack_index] <= threshold:
break
else:
hack_index += 1
if hack_nums != 0:
tpr = hack_index * 1.0 / hack_nums
else:
tpr = 0
tpr_list.append(tpr)
return tpr_list | 97796fb0f1ba9d41cf6e9c4bb21d1ca8f94499e3 | 3,651,558 |
def updating_node_validation_error(address=False, port=False, id=False,
weight=False):
"""
Verified 2015-06-16:
- when trying to update a CLB node's address/port/id, which are
immutable.
- when trying to update a CLB node's weight to be < 1 or > 100
At least one of address, port, id, and weight should be `True` for this
error to apply.
:param bool address: Whether the address was passed to update
:param bool port: Whether the port was passed to update
:param bool id: Whether the ID was passed to update
:param bool weight: Whether the weight was passed to update and wrong
:return: a `tuple` of (dict body message, 400 http status code)
"""
messages = []
if address:
messages.append("Node ip field cannot be modified.")
if port:
messages.append("Port field cannot be modified.")
if weight:
messages.append("Node weight is invalid. Range is 1-100. "
"Please specify a valid weight.")
if id:
messages.append("Node id field cannot be modified.")
return(
{
"validationErrors": {
"messages": messages
},
"message": "Validation Failure",
"code": 400,
"details": "The object is not valid"
},
400
) | 68c5fdda121950c679afe446bfd7fb19331deb40 | 3,651,559 |
def gaussianDerivative(x):
"""This function returns the gaussian derivative of x
(Note: Not Real Derivative)
"""
return -2.0*x*(np.sqrt(-np.log(x))) | 6b8312b399f627708007e80e5c72cedde4e944fc | 3,651,560 |
def parse_numbers(numbers):
"""Return list of numbers."""
return [int(number) for number in numbers] | ee79d4e15cbfb269f7307710d9ad4735687f7128 | 3,651,561 |
import json
def add_server():
"""
Adds a server to database if not exists
"""
data = json.loads(request.data)
ip_addr = IPModel.get_or_create(address=data["ip_addr"])[0]
ServerModel.create(ip=ip_addr, port=data["port"])
return 'OK' | 31ed6860fb311e00e9ee266a121cb44c256723a6 | 3,651,562 |
import pickle
import hashlib
import os
import warnings
import shutil
def _fetch_files(data_dir, files, resume=True, verbose=1):
"""Load requested dataset, downloading it if needed or requested.
This function retrieves files from the hard drive or download them from
the given urls. Note to developpers: All the files will be first
downloaded in a sandbox and, if everything goes well, they will be moved
into the folder of the dataset. This prevents corrupting previously
downloaded data. In case of a big dataset, do not hesitate to make several
calls if needed.
Parameters
----------
data_dir : string
Path of the data directory. Used for data storage in a specified
location.
files : list of (string, string, dict)
List of files and their corresponding url with dictionary that contains
options regarding the files. Eg. (file_path, url, opt). If a file_path
is not found in data_dir, as in data_dir/file_path the download will
be immediately cancelled and any downloaded files will be deleted.
Options supported are:
* 'move' if renaming the file or moving it to a subfolder is needed
* 'uncompress' to indicate that the file is an archive
* 'md5sum' to check the md5 sum of the file
* 'overwrite' if the file should be re-downloaded even if it exists
resume : bool, optional
If true, try resuming download if possible. Default=True.
verbose : int, optional
Verbosity level (0 means no message). Default=1.
Returns
-------
files : list of string
Absolute paths of downloaded files on disk.
"""
# There are two working directories here:
# - data_dir is the destination directory of the dataset
# - temp_dir is a temporary directory dedicated to this fetching call. All
# files that must be downloaded will be in this directory. If a corrupted
# file is found, or a file is missing, this working directory will be
# deleted.
files = list(files)
files_pickle = pickle.dumps([(file_, url) for file_, url, _ in files])
files_md5 = hashlib.md5(files_pickle).hexdigest()
temp_dir = os.path.join(data_dir, files_md5)
# Create destination dir
if not os.path.exists(data_dir):
os.makedirs(data_dir)
# Abortion flag, in case of error
abort = None
files_ = []
for file_, url, opts in files:
# 3 possibilities:
# - the file exists in data_dir, nothing to do.
# - the file does not exists: we download it in temp_dir
# - the file exists in temp_dir: this can happen if an archive has been
# downloaded. There is nothing to do
# Target file in the data_dir
target_file = os.path.join(data_dir, file_)
# Target file in temp dir
temp_target_file = os.path.join(temp_dir, file_)
# Whether to keep existing files
overwrite = opts.get('overwrite', False)
if (abort is None and (overwrite or (not os.path.exists(target_file) and not
os.path.exists(temp_target_file)))):
# We may be in a global read-only repository. If so, we cannot
# download files.
if not os.access(data_dir, os.W_OK):
raise ValueError('Dataset files are missing but dataset'
' repository is read-only. Contact your data'
' administrator to solve the problem')
if not os.path.exists(temp_dir):
os.mkdir(temp_dir)
dl_file = _fetch_file(url, temp_dir, resume=resume,
verbose=verbose,
overwrite=overwrite)
if (abort is None and not os.path.exists(target_file) and not
os.path.exists(temp_target_file)):
warnings.warn('An error occured while fetching %s' % file_)
abort = ("Dataset has been downloaded but requested file was "
"not provided:\nURL: %s\n"
"Target file: %s\nDownloaded: %s" %
(url, target_file, dl_file))
if abort is not None:
if os.path.exists(temp_dir):
shutil.rmtree(temp_dir)
raise IOError('Fetching aborted: ' + abort)
files_.append(target_file)
# If needed, move files from temps directory to final directory.
if os.path.exists(temp_dir):
# XXX We could only moved the files requested
# XXX Movetree can go wrong
movetree(temp_dir, data_dir)
shutil.rmtree(temp_dir)
return files_ | 2faf17d344416759361e87bbca2d375927d0d70d | 3,651,563 |
import sys
import argparse
import os
def parse_cmdline(argv):
"""
Returns the parsed argument list and return code.
`argv` is a list of arguments, or `None` for ``sys.argv[1:]``.
"""
if argv is None:
argv = sys.argv[1:]
# initialize the parser object:
parser = argparse.ArgumentParser(description='Checks for normal termination of Gaussian output files in a '
'specified directory, and moves them to a new location.')
parser.add_argument("-a", "--all", help="Check convergence of all steps and print to standard out.",
action="store_true", default=False)
parser.add_argument("-b", "--best", help="Check convergence of each step and list the convergence of the best 10 "
"steps, sorted by convergence.", action="store_true", default=False)
parser.add_argument("-d", "--directory", help="The directory where to look for Gaussian output files to check for "
"normal termination, without checking in subdirectories.",
metavar="path", default=None)
parser.add_argument("-ds", "--dir_subdirs", help="The directory where to look for Gaussian output files to check "
"for normal termination, including checking in subdirectories.",
metavar="path", default=None)
parser.add_argument("-e", "--extension", help="The extension of the Gaussian output file(s) to look for when "
"searching a directory for output files. The default is '{}'."
"".format(DEF_EXT), metavar="ext", default=DEF_EXT)
parser.add_argument("-f", "--file_name", help="A file name (with path, if not the current directory) to check for "
"either normal termination or convergence. If used, this option "
"overrides the '-d' option, and no searching for files is "
"performed.", metavar="path", default=None)
parser.add_argument("-l", "--file_list", help="A file name (with path, if not the current directory) with a "
"list of files (also with path, if not the current directory) "
"overrides the '-d' option, and no searching for files is to check "
"for either normal termination or convergence. If used, this "
"option overrides the '-d' option, and no searching for files is "
"performed.", metavar="path", default=None)
parser.add_argument("-o", "--output_directory", help="The directory where to put Gaussian output files that have "
"terminated normally. The default is '{}'."
"".format(DEF_COMPLETE_DIR), metavar="path",
default=DEF_COMPLETE_DIR)
parser.add_argument("-s", "--step_converg", help="Report the convergence for each step value for the files in the "
"directory or those specified with the '-f' or '-l' options. When "
"this option is chosen, the check for normal termination is "
"skipped. The default is False.",
action="store_true", default=False)
parser.add_argument("-t", "--to_step", help="Check convergence of each step only to provided step number, and "
"before printing to standard out, sort by convergence.",
default=False)
parser.add_argument("-z", "--final_converg", help="Report the final convergence value for the files in the "
"directory or those specified with the '-f' or '-l' options. "
"When this option is chosen, the check for normal termination "
"is skipped. The default is False.", action="store_true",
default=False)
parser.add_argument("--scan", help="Read output file(s) from a scan and writes the converged energies from each "
"point of the scan to a csv file and creates a plot saved as the given file "
"name.", metavar="path", default=None)
args = None
try:
args = parser.parse_args(argv)
if args.to_step or args.best or args.all:
args.step_converg = True
if args.to_step:
try:
args.to_step = int(args.to_step)
except ValueError:
raise InvalidDataError("When the '-t' option is used, an integer must be provided.")
if args.step_converg and args.final_converg:
raise InvalidDataError("Choose either the '-a', '-b', '-s', '-t', or '-z' option.")
# make the default output directory a subdirectory of the directory to search
if args.output_directory == DEF_COMPLETE_DIR:
if args.dir_subdirs:
args.output_directory = os.path.relpath(os.path.join(args.dir_subdirs, DEF_COMPLETE_DIR))
if args.directory:
args.output_directory = os.path.relpath(os.path.join(args.directory, DEF_COMPLETE_DIR))
except (KeyError, InvalidDataError, MissingSectionHeaderError, SystemExit) as e:
if hasattr(e, 'code') and e.code == 0:
return args, GOOD_RET
warning(e)
parser.print_help()
return args, INPUT_ERROR
return args, GOOD_RET | 95be32e291bb133a69b2491f7871b9b7afa719c4 | 3,651,564 |
def get_continuum_extrapolation( # pylint: disable=C0103
df: pd.DataFrame,
n_poly_max: int = 4,
delta_x: float = 1.25e-13,
include_statistics: bool = True,
odd_poly: bool = False,
) -> pd.DataFrame:
"""Takes a data frame read in by read tables and runs a continuum extrapolation
for the spectrum.
The continuum extrapolation is executed by a even polynomial fit up to order
`n_poly_max`
**Arguments**
df: pd.DataFrame
DataFrame returend by `read_table`.
n_poly_max: int = 4
Maximal order of the polynomial used for the spectrum extrapolation.
The fitter runs fits from 1 to `n_poly_max` even polynomials and picks
the best one defined by the maximum of the logGBF.
delta_x:float=1.0e-8
Approximate error for the x-values.
include_statistics: bool = True
Includes fit statistics like chi2/dof or logGBF.
odd_poly: bool = False
Allow fits of odd polynomials.
"""
if lsqfit is None or gv is None:
raise ImportError(
"Cannort load `lsqfit` and `gvar`." " Thus fitting is not possible."
)
group = df.groupby(["L", "nstep", "nlevel"])[["epsilon", "x"]]
fit_df = group.apply(
_group_wise_fit,
n_poly_max=n_poly_max,
delta_x=delta_x,
include_statistics=include_statistics,
odd_poly=odd_poly,
).reset_index()
fit_df["epsilon"] = 0
if "level_3" in fit_df.columns:
fit_df = fit_df.drop(columns=["level_3"])
return fit_df | 7c4ce775b064142647259cf25c8f323c08fc99d0 | 3,651,565 |
def listvalues(d):
"""Return `d` value list"""
return list(itervalues(d)) | 2c0bcbc112e10afac3d6d958c6a494bdd19dea6c | 3,651,566 |
def _non_blank_line_count(string):
"""
Parameters
----------
string : str or unicode
String (potentially multi-line) to search in.
Returns
-------
int
Number of non-blank lines in string.
"""
non_blank_counter = 0
for line in string.splitlines():
if line.strip():
non_blank_counter += 1
return non_blank_counter | dfa6f43af95c898b1f4763573e8bf32ddf659520 | 3,651,567 |
def load(map_name, batch_size):
"""Load CaraEnvironment
Args:
map_name (str): name of the map. Currently available maps are:
'Town01, Town02', 'Town03', 'Town04', 'Town05', 'Town06', 'Town07',
and 'Town10HD'
batch_size (int): the number of vehicles in the simulation.
"""
return CarlaEnvironment(batch_size, map_name) | 4433ad4fc4985a9ceaabd8e7ce3d8d3b0d419c80 | 3,651,568 |
def decrypt_password(encrypted_password: str) -> str:
""" b64 decoding
:param encrypted_password: encrypted password with b64
:return: password in plain text
"""
return b64decode(encrypted_password).decode("UTF-8") | e501a3da671f28f6f751ed289da961f30377d248 | 3,651,569 |
def model_keplerian(positions, velocities, v_lsr=None, fit_method=None,
flag_singularity=True, flag_radius=None, flag_intervals=None,
return_stddevs=True, plot=False, debug=False):
"""Fit a Keplerian velocity profile to position-velocity-data.
Args:
positions (np.ndarray or Quantity):
PVdata object to compute the data from.
velocities (np.ndarray or Quantity):
Set as multiples of PVdata.noise (for instance 3sigma)
v_lsr (float):
Systemic velocity in units of km/ s.
fit_method (any, optional):
Method to fit the model to the data.
flag_singularity (bool, optional):
Flag the zero position data points, to avoid running in trouble there during fitting.
flag_radius (astropy.units.Quantity, optional):
If given, then all data points within this given radius from the position_reference are flagged.
flag_intervals (list of tupels of astropy.units.Quantity, optional):
Similar to flag_radius, but arbitrary intervals may be flagged. Each interval is
given as a tuple of two radial distances from the position_reference.
return_stddevs (boolean, optional):
The fit method LevMarLSQFitter is able to return the standard deviation of the fit parameters. Default is
True.
plot (boolean, optional):
If True, the fit will be displayed as a matplotlib pyplot.
debug (bool, optional):
Stream debugging information to the terminal.
Returns:
best_fit (astropy.modelling.models.custom_model):
Best fitting model.
stddevs (numpy.array):
Only if return_stddevs is True. The array entries correspond to the best_fit instance parameters in the
same order.
chi2 (float):
chi-squared residual of the fit to the unflagged data.
"""
# Transform Quantities to correct units
if isinstance(positions, Quantity):
positions = positions.to('AU').value
if isinstance(velocities, Quantity):
velocities = velocities.to('km/ s').value
# Apply fall back values
if fit_method is None:
fit_method = LevMarLSQFitter()
if v_lsr is None:
v_lsr = 0
# Create masked arrays
xdata = np.ma.masked_array(positions, np.zeros(positions.shape, dtype=bool))
ydata = np.ma.masked_array(velocities, np.zeros(velocities.shape, dtype=bool))
# Mask the desired flags and intervals
if flag_singularity:
print('Flagging the singularity')
singularity_mask = np.ma.masked_less(np.abs(xdata), 1e-3).mask
xdata.mask = np.logical_or(xdata.mask, singularity_mask)
ydata.mask = np.logical_or(ydata.mask, singularity_mask)
print(f">> Done")
else:
print("Not masking the singularity")
if flag_radius is not None:
print(f"Flagging towards a radial distance of {flag_radius}")
if isinstance(flag_radius, Quantity):
flag_radius = flag_radius.to('au').value
xdata = np.ma.masked_inside(xdata, -flag_radius, flag_radius)
ydata.mask = np.logical_or(ydata.mask, xdata.mask)
print(f">> Done")
print(f"The mask is {xdata.mask}")
else:
print("No flag radius provided")
if flag_intervals is not None:
print('Flagging intervals...')
for interval in flag_intervals:
xdata = np.ma.masked_inside(xdata, interval[0], interval[1])
ydata.mask = np.logical_or(ydata.mask, xdata.mask)
print(f">> Flagged {np.sum(xdata.mask)} elements")
else:
print("No flag intervals provided")
if debug:
print('x data:', xdata)
print('y data:', ydata)
# Initialize the fit model
print("Initializing the model...")
model = Keplerian1D(mass=10., v0=v_lsr, r0=0, bounds={'mass': (0.0, None)})
if debug:
print(f"Initialize the model: {model}")
# Fit the chosen model to the data
print("Fitting the model to the data...")
best_fit = fit_method(model, xdata.compressed(), ydata.compressed())
if debug:
print(fit_method.fit_info['message'])
# Estimate chi2
print("Computing the chi-squared value...")
chi2 = np.sum(np.square(best_fit(xdata.compressed()) - ydata.compressed()))
# Plot
if plot:
plt.plot(positions, velocities, 'o', label='data')
plt.xlabel('Position offset (AU)')
plt.ylabel('Velocity (km/ s)')
plt.axhline(v_lsr, c='k', ls='--', label=r'$v_\mathrm{LSR}$')
plt.plot(xdata, best_fit(xdata), label='model')
plt.fill_between(xdata, best_fit(xdata), best_fit.v0, facecolor='tab:orange', alpha=.5)
if debug:
plt.plot(xdata, model(xdata), label='init')
plt.grid()
plt.legend()
plt.show()
plt.close()
# Prepare the return
stddevs = None
if not isinstance(fit_method, LevMarLSQFitter):
return_stddevs = False
if return_stddevs:
covariance = fit_method.fit_info['param_cov']
if covariance is None:
print(f"[ERROR] Unable to compute the covariance matrix and fit parameter uncertainties!")
else:
stddevs = np.sqrt(np.diag(covariance))
return best_fit, stddevs, chi2 | 1a11f3493d3f33403f51dd6934e5044e53b959bc | 3,651,570 |
def walk(time_limit=_DEFAULT_TIME_LIMIT, random=None, environment_kwargs=None):
"""Returns the Walk task."""
# physics = Physics.from_xml_string(*get_model_and_assets())
physics = SuperballContactSimulation("tt_ntrt_on_ground.xml")
task = PlanarSuperball(move_speed=_WALK_SPEED, random=random)
environment_kwargs = environment_kwargs or {}
return control.Environment(
physics, task, time_limit=time_limit, control_timestep=_CONTROL_TIMESTEP,
**environment_kwargs) | 2b4de77661a7f0dd235c2f1d258e627ff110f3c3 | 3,651,571 |
def confound_isolating_sampling(y, z, random_seed=None, min_sample_size=None,
n_remove=None):
"""
Sampling method based on the 'Confound isolating cross-validation'
technique.
# TODO Reference to the paper
:param y: numpy.array, shape (n_samples), target
:param z: numpy.array, shape (n_samples), confound
:param random_seed: int
Random seed used to initialize the pseudo-random number generator.
Can be any integer between 0 and 2**32 - 1 inclusive. Default is None
:param min_sample_size: int
Minimum sample size (in samples) to be reached, default is 10% of the
data
:param n_remove: int,
number of the samples to be removed on each iteration of sampling,
default is 4
:return:
sampled_index,
mutual_information
correlation
"""
sampled_index = list(range(0, y.shape[0]))
mutual_information = []
correlation = []
index_to_remove = []
n_remove = _ensure_int_positive(n_remove, default=4)
min_sample_size = _ensure_int_positive(min_sample_size, default=10)
min_size = np.int(y.shape[0] * min_sample_size / 100)
while y.shape[0] > min_size:
# remove subject from the previous iteration
y = np.delete(y, index_to_remove, axis=0)
z = np.delete(z, index_to_remove, axis=0)
sampled_index = np.delete(sampled_index, index_to_remove, axis=0)
# control the pseudo random number generator
if random_seed is None:
prng = None
else:
prng = np.random.RandomState(seed=random_seed)
# return indexes
index_to_remove = confound_isolating_index_2remove(y, z,
n_remove=n_remove,
prng=prng)
# The case when target and confound are equal
if np.all(y==z) == True:
mutual_information.append('NaN')
else:
mutual_information.append(mutual_kde(y.astype(float),
z.astype(float)))
correlation.append(np.corrcoef(y.astype(float), z.astype(float))[0, 1])
# sampled_set = {'sampled_index': array_data[:, 2],
# 'mutual_information': mi_list,
# 'correlation': corr_list}
# sampled_index = array_data[:, 2]
# return Bunch(**sampled_set)
return sampled_index, mutual_information, correlation | 662dbb252b5271407cd0e7edcee3ef4ab821b414 | 3,651,572 |
def encode_direct(list_a: list):
"""Problem 13: Run-length encoding of a list (direct solution).
Parameters
----------
list_a : list
The input list
Returns
-------
list of list
An length-encoded list
Raises
------
TypeError
If the given argument is not of `list` type
"""
if not isinstance(list_a, list):
raise TypeError('The argument given is not of `list` type.')
if len(list_a) <= 1:
# In case of empty or one-element list return.
return list_a
encoded, current, count = [], list_a[0], 1
for element in list_a[1:]:
if current != element:
# If current element does not match the recorded current
# append the count to the list
encoded.append(current if count == 1 else [count, current])
current, count = element, 1
else:
# If another same element is found, increase counter
count += 1
encoded.append(current if count == 1 else [count, current])
return encoded | 9a20ffd2051003d5350f7e059d98c35310bc9bbe | 3,651,573 |
def handler500(request):
"""
HTTP Error 500 Internal Server Error
"""
return HttpResponse('<h1>HTTP Error 500 Internal server error</h1>', {}) | 92dc4cb815d34425e9c4f49ab878f6c57838d7b8 | 3,651,574 |
def increase_line_complexity(linestring, n_points):
"""
linestring (shapely.geometry.linestring.LineString):
n_points (int): target number of points
"""
# or to get the distances closest to the desired one:
# n = round(line.length / desired_distance_delta)
distances = np.linspace(0, linestring.length, n_points)
points = [linestring.interpolate(distance) for distance in distances]
return shapely.geometry.linestring.LineString(points) | 9747a6277a6333b6f1e92e479e0f286a01c8ae4e | 3,651,575 |
def get_topic_prevelance(doc_topic_matrix, num_topics, total_num_docs):
"""Input: doc_topic_matrix, a numpy nd array where each row represents a doc, and each collumn is the assocication
of the doc with a topic. Num_topics and integer holding the number of topics. Total_num_docs is an int holding the
number of docs in the corpus.
Output: a list where index i represents the prevelance of topic i within the corpus."""
topic_prev = [0] * num_topics
for i in range(0, num_topics):
topic_doc = doc_topic_matrix[:,i]
for j in range(0, len(topic_doc)):
if topic_doc[j] > TOPIC_PRESSENCE_THRESHOLD:
topic_prev[i] +=1
topic_prev[i] = topic_prev[i]/total_num_docs
return topic_prev | 752214cba87b8d1766ceba139b029197c4f51df2 | 3,651,576 |
async def drones_byDroneId_delete(request, droneId):
"""
Remove a drone from the fleet
It is handler for DELETE /drones/<droneId>
"""
return handlers.drones_byDroneId_deleteHandler(request, droneId) | 28900c7df711fde5833b50683a738fe5567202ff | 3,651,577 |
import six
def generate_sql_integration_data(sql_test_backends):
"""Populate test data for SQL backends for integration testing."""
sql_schema_info = get_sqlalchemy_schema_info()
vertex_values, edge_values, uuid_to_class_name = get_integration_data()
# Represent all edges as foreign keys
uuid_to_foreign_key_values = {}
for edge_name, edge_values in six.iteritems(edge_values):
for edge_value in edge_values:
from_classname = uuid_to_class_name[edge_value["from_uuid"]]
edge_field_name = "out_{}".format(edge_name)
join_descriptor = sql_schema_info.join_descriptors[from_classname][edge_field_name]
is_from_uuid = join_descriptor.from_column == "uuid"
is_to_uuid = join_descriptor.to_column == "uuid"
if is_from_uuid == is_to_uuid:
raise NotImplementedError(
"Exactly one of the join columns was expected to"
"be uuid. found {}".format(join_descriptor)
)
if is_from_uuid:
existing_foreign_key_values = uuid_to_foreign_key_values.setdefault(
edge_value["to_uuid"], {}
)
if join_descriptor.to_column in existing_foreign_key_values:
raise NotImplementedError(
"The SQL backend does not support many-to-many "
"edges. Found multiple edges of class {} from "
"vertex {}.".format(edge_name, edge_value["to_uuid"])
)
existing_foreign_key_values[join_descriptor.to_column] = edge_value["from_uuid"]
elif is_to_uuid:
existing_foreign_key_values = uuid_to_foreign_key_values.setdefault(
edge_value["from_uuid"], {}
)
if join_descriptor.from_column in existing_foreign_key_values:
raise NotImplementedError(
"The SQL backend does not support many-to-many "
"edges. Found multiple edges of class {} to "
"vertex {}.".format(edge_name, edge_value["to_uuid"])
)
existing_foreign_key_values[join_descriptor.from_column] = edge_value["to_uuid"]
# Insert all the prepared data into the test database
for sql_test_backend in six.itervalues(sql_test_backends):
for vertex_name, insert_values in six.iteritems(vertex_values):
table = sql_schema_info.vertex_name_to_table[vertex_name]
table.delete(bind=sql_test_backend.engine)
table.create(bind=sql_test_backend.engine)
for insert_value in insert_values:
foreign_key_values = uuid_to_foreign_key_values.get(insert_value["uuid"], {})
all_values = merge_non_overlapping_dicts(insert_value, foreign_key_values)
sql_test_backend.engine.execute(table.insert().values(**all_values))
return sql_schema_info | 1f8fe9550b069a942a900d547874c787d27576c3 | 3,651,578 |
import logging
import os
import marshal
def p4( command ):
"""
Run a perforce command line instance and marshal the
result as a list of dictionaries.
"""
commandline = 'p4 %s -G %s' % (P4_PORT_AND_USER, command)
logging.debug( '%s' % commandline )
stream = os.popen( commandline, 'rb' )
entries = []
try:
while 1:
entry = marshal.load(stream)
entries.append(entry)
except EOFError:
pass
code = stream.close()
if None != code:
raise IOError( "Failed to execute %s: %d" % (commandline, int(code)) )
return entries | fe7f642cf0059d5d0505453b0fb0c8619eabdac1 | 3,651,579 |
def measure_shear_metacal_plus_mof(res, *, s2n_cut, t_ratio_cut):
"""Measure the shear parameters for metacal+MOF.
NOTE: Returns None if nothing can be measured.
Parameters
----------
res : dict
The metacal results.
s2n_cut : float
The cut on `wmom_s2n`. Typically 10.
t_ratio_cut : float
The cut on `t_ratio_cut`. Typically 0.5.
Returns
-------
g1p : float
The mean 1-component shape for the plus metacal measurement.
g1m : float
The mean 1-component shape for the minus metacal measurement.
g1 : float
The mean 1-component shape for the zero-shear metacal measurement.
g2p : float
The mean 2-component shape for the plus metacal measurement.
g2m : float
The mean 2-component shape for the minus metacal measurement.
g2 : float
The mean 2-component shape for the zero-shear metacal measurement.
"""
def _mask(cat):
return (
(cat['flags'] == 0) &
(cat['mcal_flags'] == 0) &
(cat['mcal_s2n'] > s2n_cut) &
(cat['mcal_T_ratio'] > t_ratio_cut))
msks = {}
for sh in METACAL_TYPES:
logger.debug('%s: %s', sh, res[sh].dtype)
msks[sh] = _mask(res[sh])
if not np.any(msks[sh]):
return None
g1p = res['1p']['mcal_g'][msks['1p'], 0]
g1m = res['1m']['mcal_g'][msks['1m'], 0]
g2p = res['2p']['mcal_g'][msks['2p'], 1]
g2m = res['2m']['mcal_g'][msks['2m'], 1]
g1 = res['noshear']['mcal_g'][msks['noshear'], 0]
g2 = res['noshear']['mcal_g'][msks['noshear'], 1]
return (
np.mean(g1p), np.mean(g1m), np.mean(g1),
np.mean(g2p), np.mean(g2m), np.mean(g2)) | 0404f0253d0e73e815452aa3ac073e09002b7e63 | 3,651,580 |
def software_detail(request, context, task_id, vm_id):
""" render the detail of the user page: vm-stats, softwares, and runs """
softwares = model.get_software(task_id, vm_id)
runs = model.get_vm_runs_by_task(task_id, vm_id)
datasets = model.get_datasets_by_task(task_id)
# Construct a dictionary that has the software as a key and as value a list of runs with that software
# Note that we order the list in such a way, that evaluations of a run are right behind that run in the list
# (based on the input_run)
runs_with_input = {} # get the runs which have an input_run_id
for r in runs:
# if we loop once, might as well get the review-info here.
r['review'] = model.get_run_review(r.get("dataset"), vm_id, r.get("run_id"))
if r.get("input_run_id") == 'none':
continue
runs_with_input.setdefault(r.get("input_run_id"), []).append(r)
runs_without_input = [r for r in runs if r.get("input_run_id") == "none"]
runs_by_software = {}
for r in runs_without_input:
runs_by_software.setdefault(r.get("software"), []).append(r)
runs_by_software.setdefault(r.get("software"), []).extend(runs_with_input.pop(r.get("run_id"), []))
for k, v in runs_with_input.items(): # left-over runs_with_input, where the input-run does not exist anymore
for r in v:
runs_by_software.setdefault(r.get("software"), []).append(r)
software = [{
"software": sw,
"runs": runs_by_software.get(sw["id"])
} for sw in softwares]
vm = model.get_vm(vm_id)
context["task"] = model.get_task(task_id)
context["vm_id"] = vm_id
context["vm"] = {"host": vm.host, "user": vm.userName, "password": vm.userPw, "ssh": vm.portSsh, "rdp": vm.portRdp}
context["software"] = software
context["datasets"] = datasets
return render(request, 'tira/software.html', context) | 2e740426bc4f86d1b3d5dd2ddbaa4bdd5f6ae772 | 3,651,581 |
def compile_error_curves(dfs, window_size = 60):
"""
takes a list of timeseries dfs and
returns a DataFrame in which each column is
the monatonically decreasing version of % error
for one of the dfs in the list.
usefull for summarizing how a bunch of timeseries converge on
some value after a certain point.
params
-----
dfs: (list of pd.DataFrames)
each df should be a track timeseries
window_size: (int or float)
size of bins (in seconds)
"""
error_series = []
for i, t in enumerate(dfs):
df = dfs[t]
df_window = df[df['t'] <= window_size].copy()
if df_window is None:
continue
if len(df_window) < 0.8 * window_size:
continue
end_time = df_window.iloc[len(df_window)-1]['t']
#print(t, len(df_window) / 60., end_time)
d = calculate_error_window(df_window).set_index('t')['error_window']
d = d.reindex(np.arange(0, window_size + 1))
d = d.fillna(method='bfill')
d = d.fillna(method='ffill')
d.name = t
error_series.append(d)
return pd.concat(error_series, axis=1) | 602ec4563e2aa368db42b762db7f91c3f868fb73 | 3,651,582 |
def _get_cluster_medoids(idx_interval: np.ndarray, labels: np.ndarray,
pdist: np.ndarray, order_map: np.ndarray) \
-> np.ndarray:
"""
Get the indexes of the cluster medoids.
Parameters
----------
idx_interval : np.ndarray
Embedding indexes.
labels : np.ndarray
Cluster labels.
pdist : np.ndarray
Condensed pairwise distance matrix.
order_map : np.ndarray
Map to convert label indexes to pairwise distance matrix indexes.
Returns
-------
List[int]
List with indexes of the medoids for each cluster.
"""
medoids, m = [], len(idx_interval)
for start_i, stop_i in _get_cluster_group_idx(labels):
if stop_i - start_i > 1:
row_sum = np.zeros(stop_i - start_i, np.float32)
for row in range(stop_i - start_i):
for col in range(row + 1, stop_i - start_i):
i, j = order_map[start_i + row], order_map[start_i + col]
if i > j:
i, j = j, i
pdist_ij = pdist[m * i + j - ((i + 2) * (i + 1)) // 2]
row_sum[row] += pdist_ij
row_sum[col] += pdist_ij
medoids.append(idx_interval[start_i + np.argmin(row_sum)])
return np.asarray(medoids, dtype=np.int32) | 88739d625b5a58d41d9103824f5c733d6e2fcbf9 | 3,651,583 |
import webbrowser
def perform_authorization_code_flow():
"""
Performs spotify's Authorization Code Flow to retrive an API token.
This uses the OAuth 2.0 protocol, which requires user input and consent.
Output
______
api_key: str
a user's api key with prompted permissions
refresh_token: str
A refresh token used to retrive future api keys
expires_in: int
the time (in seconds) until the token expires
"""
# create server that runs at the redirect URI. This is used to catch the
# response sent from the OAuth authentication
server = OAuthServer(("127.0.0.1", 8080))
# generate a uri with the required Oauth headers and open it in a webbrowser
auth_uri, code_verifier, state_token = generate_client_PKCE()
webbrowser.open_new_tab(auth_uri)
# parse the spotify API's http response for the User's token
raw_http_response = server.handle_auth().decode("utf-8")
http_headers = parse_spotify_http_response(raw_http_response)
# verify that state tokens match to prevent CSRF
if state_token != http_headers["state"]:
raise StateTokenException
# exchange code for access token. The refresh token is automatically cached
access_token, refresh_token, expires_in = exchange_auth_code(
http_headers["code"], code_verifier
)
return access_token, refresh_token, expires_in | 6939a4414be28f40d712cc1d54f994b02ce9a688 | 3,651,584 |
def calculate_empirical_cdf(variable_values):
"""Calculate numerical cumulative distribution function.
Output tuple can be used to plot empirical cdf of input variable.
Parameters
----------
variable_values : numpy array
Values of a given variable.
Returns
-------
numpy array
Ordered variable values.
numpy array
Accumulated percentages of relative variable values.
"""
# Sort array and calculate accumulated percentages.
values = np.sort(variable_values)
accum_percentages = np.arange(1, len(values) + 1) / float(len(values))
return values, accum_percentages | 4c55f7b230318f212088a7218bac9929a9df01e5 | 3,651,585 |
def import_reference(filename):
"""
Imports object from reference node filename
:param filename: str
"""
return maya.cmds.file(filename, importReference=True) | 07747a3ceea95f222b81e7e3b938b758f30937b0 | 3,651,586 |
import os
def product_mapping(name, setup, cleanup=True):
"""Obtain the kernel mapping.
:return: Kernel Mapping
:rtype: str
"""
kernel_list_file = (
setup.working_directory
+ os.sep
+ f"{setup.mission_acronym}_{setup.run_type}_"
f"{int(setup.release):02d}.kernel_list"
)
get_map = False
mapping = False
with open(kernel_list_file, 'r') as lst:
for line in lst:
if name in line:
get_map = True
if get_map and "MAPPING" in line:
mapping = line.split("=")[-1].strip()
get_map = False
if not cleanup:
setup = False
#
# If cleanup is not being performed this is an indication that if the kernel
# mapping does not exist, this can be intentional and therefore an error
# does not have to be reported.
#
if not mapping and cleanup:
error_message(
f"{name} does not have mapping on {kernel_list_file}.",
setup=setup,
)
return mapping | 9c3d6dad6555587b0aa7ca0e2482b70e01f3e2c9 | 3,651,587 |
def remove_persons_with_few_joints(all_keypoints, min_total_joints=10, min_leg_joints=2, include_head=False):
"""Remove bad skeletons before sending to the tracker"""
good_keypoints = []
for keypoints in all_keypoints:
# include head point or not
total_keypoints = keypoints[5:, 1:] if not include_head else keypoints[:, 1:]
num_valid_joints = sum(total_keypoints!=0)[0] # number of valid joints
num_leg_joints = sum(total_keypoints[-7:-1]!=0)[0] # number of joints for legs
if num_valid_joints >= min_total_joints and num_leg_joints >= min_leg_joints:
good_keypoints.append(keypoints)
return np.array(good_keypoints) | 773e9317df75f5d4de12c574a3c599e2729bd427 | 3,651,588 |
def message_has_races(message):
"""
Checks to see if a message has a race kwarg.
"""
races = get_races_from_message(message)
return len(races) > 0 and races[0] != "" | e2f01498f8783d2c311e1e6e06f1e9cac3fe36a6 | 3,651,589 |
import re
def _find_word(input):
"""
_find_word - function to find words in the input sentence
Inputs:
- input : string
Input sentence
Outputs:
- outputs : list
List of words
"""
# lower case
input = input.lower()
# split by whitespace
input = re.split(pattern = '[\s]+', string = input)
# find words in WORD_POS pattern
valid_word = lambda x: True if re.findall(pattern = r'[a-z]*_[a-z]*', string = x) else False
outputs = []
for token in input:
if valid_word(token):
outputs.append(token.split('_')[0])
return outputs | c2e4aa6b5c127bf03593a9aa2c1ae035e83f5a64 | 3,651,590 |
def logp1_r_squared_linreg(y_true, y_pred):
"""Compute custom logp1 r squared ((follows the scipy linear regression implementation of R2).
Parameters
----------
y_true
y_true.
y_pred
y_pred.
Returns
-------
r2
"""
y_pred, _ = tf.split(y_pred, num_or_size_splits=2, axis=2)
x = tf.math.log(y_true + 1.0)
y = tf.math.log(y_pred + 1.0)
# means
xmean = tnp.mean(x)
ymean = tnp.mean(y)
ssxm = tnp.mean(tnp.square(x - xmean))
ssym = tnp.mean(tnp.square(y - ymean))
ssxym = tnp.mean((x - xmean) * (y - ymean))
# R-value
r = ssxym / tnp.sqrt(ssxm * ssym)
return r ** 2 | ea33ff1f16e9dcfd8ea4bdc27ca8388bd5086b1d | 3,651,591 |
from typing import Union
import json
def to_legacy_data_type(data_type: Union[JsonDict, dt.DataType]) -> JsonDict:
"""
Convert to simple datatypes ("String", "Long", etc) instead of JSON objects,
if possible.
The frontend expects the "type" field for enums and arrays to be lowercase.
"""
if not isinstance(data_type, dt.DataType):
return json.loads(data_type)
if data_type.is_simple:
return data_type.into_simple()
data = data_type.to_dict()
if data["type"] == "Enum":
data["type"] = "enum"
if data["type"] == "Array":
data["type"] = "array"
return data | 913c5e523ee74d86c3a64b98b291fb213513ae84 | 3,651,592 |
def display_dictionary(dictionary, renormalize=False, reshaping=None,
groupings=None, label_inds=False, highlighting=None,
plot_title=""):
"""
Plot each of the dictionary elements side by side
Parameters
----------
dictionary : ndarray(float32, size=(s, n) OR (s, c, kh, kw))
If the size of dictionary is (s, n), this is a 'fully-connected'
dictionary where each basis element has the same dimensionality as the
image it is trying to represent. n is the size of the image and s the
number of basis functions. If the size of dictionary is (s, c, kh, kw),
this is a 'convolutional' dictionary where each basis element is
(potentially much) smaller than the image it is trying to represent. c
is the number of channels that in the input space, kh is the dictionary
kernel height, and kw is the dictionary kernel width.
renormalize : bool, optional
If present, display basis functions on their own color scale, using
standardize_for_imshow() to put values in the range [0, 1]. Will
accentuate the largest-magnitude values in the dictionary element.
Default False.
reshaping : tuple(int, int), optional
Should only be specified for a fully-connected dictionary (where
dictionary.ndim==2). The dimension of each patch before vectorization
to size n. We reshape the dictionary elements based on this. Default None
label_inds : bool, optional
Supimpose the index into the dictionary of each element in the displayed
grid--helps with quick lookup/selection of individual dictionary
elements. Default False.
highlighting : dictionary, optional
This is used to re-sort and color code the dictionary elements according
to scalar weights. Has two keys:
'weights' : ndarray(float, size=(s,))
The weights for each dictionary element
'color_range': tuple(float, float)
Values less than or equal to highlighting['color_range'][0] get mapped
to dark blue, and values greater than or equal to
highlighting['color_range'][1] get mapped to dark red.
'reorder' : bool
Use the highlighting weights to reorder the dictionary.
Default None.
plot_title : str, optional
The title of the plot. Default ""
Returns
-------
dictionary_figs : list
A list containing pyplot figures. Can be saved separately, or whatever
from the calling function
"""
if groupings is None:
t_ims, raw_val_mapping, lab_w_pix_coords = get_dictionary_tile_imgs(
dictionary, reshape_to_these_dims=reshaping, indv_renorm=renormalize,
highlights=highlighting)
else:
t_ims = get_dictionary_tile_imgs_arr_by_group(dictionary, groupings,
indv_renorm=renormalize, reshape_to_these_dims=reshaping,
highlights=highlighting)
fig_refs = []
for fig_idx in range(len(t_ims)):
fig = plt.figure(figsize=(10, 10))
ax = plt.axes([0.075, 0.075, 0.85, 0.85]) # [bottom, left, height, width]
fig.suptitle(plot_title + ', fig {} of {}'.format(
fig_idx+1, len(t_ims)), fontsize=20)
im_ref = ax.imshow(t_ims[fig_idx], interpolation='None')
if label_inds and groupings is None:
for lab_and_coord in lab_w_pix_coords[fig_idx]:
ax.text(lab_and_coord[2], lab_and_coord[1], lab_and_coord[0],
fontsize=6, verticalalignment='top',
horizontalalignment='left', color='w')
ax.axis('off')
if not renormalize and groupings is None:
# add a luminance colorbar. Because there isn't good rgb colorbar
# support in pyplot I hack this by adding another image subplot
cbar_ax = plt.axes([0.945, 0.4, 0.01, 0.2])
gradient = np.linspace(1.0, 0.0, 256)[:, None]
cbar_ax.imshow(gradient, cmap='gray')
cbar_ax.set_aspect('auto')
cbar_ax.yaxis.tick_right()
cbar_ax.xaxis.set_ticks([])
cbar_ax.yaxis.set_ticks([255, 128, 0])
cbar_ax.yaxis.set_ticklabels(['{:.2f}'.format(x)
for x in raw_val_mapping], fontsize=8)
fig_refs.append(fig)
return fig_refs | 58e363f7f14ec9bc8b88613777ff446ae63feb85 | 3,651,593 |
def rbinary_search(arr, target, left=0, right=None):
"""Recursive implementation of binary search.
:param arr: input list
:param target: search item
:param left: left most item in the search sub-array
:param right: right most item in the search sub-array
:return: index of item if found `-1` otherwise
"""
right = len(arr) - 1 if right is None else right
#: base condition (search space is exhausted)
if left > right:
return UNSUCCESSFUL
mid = left + (right - left)//2
if arr[mid] < target:
#: focus on right subtree
result = rbinary_search(arr, target, mid+1, right)
elif arr[mid] > target:
#: focus on left subtree
result = rbinary_search(arr, target, left, mid-1)
else:
result = mid
return result | 23da6b29c122efe77c0dc592d2bfc42f324b1799 | 3,651,594 |
def get_redis_posts(author: str) -> (str, str):
"""Return user's first and other post IDs
Retrieve the user's first and other post IDs from Redis,
then return them as a tuple in the form (first, extra)
:param author: The username to get posts for
:return: Tuple of the first and other post IDs
"""
return r.lindex(author, 0), r.lrange(author, 1, -1) | 3653a1bdbc3cde8614098a705ae7f11de850165f | 3,651,595 |
from datetime import datetime
def template_localtime(value, use_tz=None):
"""
Checks if value is a datetime and converts it to local time if necessary.
If use_tz is provided and is not None, that will force the value to
be converted (or not), overriding the value of settings.USE_TZ.
This function is designed for use by the template engine.
"""
should_convert = (isinstance(value, datetime)
and (settings.USE_TZ if use_tz is None else use_tz)
and not is_naive(value)
and getattr(value, 'convert_to_local_time', True))
return localtime(value) if should_convert else value | 7042696ae5291248ee2a2d56dcc5e943ccec92d8 | 3,651,596 |
def FilesBrowse(button_text='Browse', target=(ThisRow, -1), file_types=(("ALL Files", "*.*"),), disabled=False,
initial_folder=None, tooltip=None, size=(None, None), auto_size_button=None, button_color=None,
change_submits=False, enable_events=False,
font=None, pad=None, key=None):
"""
:param button_text: text in the button (Default value = 'Browse')
:param target: key or (row,col) target for the button (Default value = (ThisRow, -1))
:param file_types: (Default value = (("ALL Files", "*.*")))
:param disabled: set disable state for element (Default = False)
:param initial_folder: starting path for folders and files
:param tooltip: (str) text, that will appear when mouse hovers over the element
:param size: (w,h) w=characters-wide, h=rows-high
:param auto_size_button: True if button size is determined by button text
:param button_color: button color (foreground, background)
:param change_submits: If True, pressing Enter key submits window (Default = False)
:param enable_events: Turns on the element specific events.(Default = False)
:param font: Union[str, Tuple[str, int]] specifies the font family, size, etc
:param pad: Amount of padding to put around element
:param key: Used with window.FindElement and with return values to uniquely identify this element
"""
return Button(button_text=button_text, button_type=BUTTON_TYPE_BROWSE_FILES, target=target, file_types=file_types,
initial_folder=initial_folder, change_submits=change_submits, enable_events=enable_events,
tooltip=tooltip, size=size, auto_size_button=auto_size_button,
disabled=disabled, button_color=button_color, font=font, pad=pad, key=key) | d712e5e41afa1d09482971864ce1b9af66332394 | 3,651,597 |
import os
def ensure_sudo() -> str:
"""ensures user is root and SUDO_USER is in os.environ,
:returns: the real username (see real_username())
"""
# if we aren't root, or don't have access to host environment variables...
username = real_username()
uid = os.getuid() # pylint: disable=no-member
if username == "root":
# this could happen with sudo su, for example
raise EnvironmentError("Could not look up SUDO_USER")
if uid != 0:
raise PermissionError("this script needs sudo")
return username | dc32713b77a4ae908f6cf36873075de52bfd3589 | 3,651,598 |
def f2p(phrase, max_word_size=15, cutoff=3):
"""Convert a Finglish phrase to the most probable Persian phrase.
"""
results = f2p_list(phrase, max_word_size, cutoff)
return ' '.join(i[0][0] for i in results) | 51a6f518481097bbba49685f32fb87ed65cc19ec | 3,651,599 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.