content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
import re
def glycan_to_graph(glycan, libr = None):
"""the monumental function for converting glycans into graphs\n
| Arguments:
| :-
| glycan (string): IUPAC-condensed glycan sequence (string)
| libr (list): sorted list of unique glycoletters observed in the glycans of our dataset\n
| Returns:
| :-
| (1) a list of labeled glycoletters from the glycan / node list
| (2) two lists to indicate which glycoletters are connected in the glycan graph / edge list
"""
if libr is None:
libr = lib
bracket_count = glycan.count('[')
parts = []
branchbranch = []
branchbranch2 = []
position_bb = []
b_counts = []
bb_count = 0
#checks for branches-within-branches and handles them
if bool(re.search('\[[^\]]+\[', glycan)):
double_pos = [(k.start(),k.end()) for k in re.finditer('\[[^\]]+\[', glycan)]
for spos, pos in double_pos:
bracket_count -= 1
glycan_part = glycan[spos+1:]
glycan_part = glycan_part[glycan_part.find('['):]
idx = [k.end() for k in re.finditer('\][^\(]+\(', glycan_part)][0]
idx2 = [k.start() for k in re.finditer('\][^\(]+\(', glycan_part)][0]
branchbranch.append(glycan_part[:idx-1].replace(']','').replace('[',''))
branchbranch2.append(glycan[pos-1:])
glycan_part = glycan[:pos-1]
b_counts.append(glycan_part.count('[')-bb_count)
glycan_part = glycan_part[glycan_part.rfind('[')+1:]
position_bb.append(glycan_part.count('(')*2)
bb_count += 1
for b in branchbranch2:
glycan = glycan.replace(b, ']'.join(b.split(']')[1:]))
main = re.sub("[\[].*?[\]]", "", glycan)
position = []
branch_points = [x.start() for x in re.finditer('\]', glycan)]
for i in branch_points:
glycan_part = glycan[:i+1]
glycan_part = re.sub("[\[].*?[\]]", "", glycan_part)
position.append(glycan_part.count('(')*2)
parts.append(main)
for k in range(1,bracket_count+1):
start = find_nth(glycan, '[', k) + 1
#checks whether glycan continues after branch
if bool(re.search("[\]][^\[]+[\(]", glycan[start:])):
#checks for double branches and removes second branch
if bool(re.search('\]\[', glycan[start:])):
glycan_part = re.sub("[\[].*?[\]]", "", glycan[start:])
end = re.search("[\]].*?[\(]", glycan_part).span()[1] - 1
parts.append(glycan_part[:end].replace(']',''))
else:
end = re.search("[\]].*?[\(]", glycan[start:]).span()[1] + start -1
parts.append(glycan[start:end].replace(']',''))
else:
if bool(re.search('\]\[', glycan[start:])):
glycan_part = re.sub("[\[].*?[\]]", "", glycan[start:])
end = len(glycan_part)
parts.append(glycan_part[:end].replace(']',''))
else:
end = len(glycan)
parts.append(glycan[start:end].replace(']',''))
try:
for bb in branchbranch:
parts.append(bb)
except:
pass
parts = min_process_glycans(parts)
parts_lengths = [len(j) for j in parts]
parts_tokenized = [string_to_labels(k, libr) for k in parts]
parts_tokenized = [parts_tokenized[0]] + [parts_tokenized[k][:-1] for k in range(1,len(parts_tokenized))]
parts_tokenized = [item for sublist in parts_tokenized for item in sublist]
range_list = list(range(len([item for sublist in parts for item in sublist])))
init = 0
parts_positions = []
for k in parts_lengths:
parts_positions.append(range_list[init:init+k])
init += k
for j in range(1,len(parts_positions)-len(branchbranch)):
parts_positions[j][-1] = position[j-1]
for j in range(1, len(parts_positions)):
try:
for z in range(j+1,len(parts_positions)):
parts_positions[z][:-1] = [o-1 for o in parts_positions[z][:-1]]
except:
pass
try:
for i,j in enumerate(range(len(parts_positions)-len(branchbranch), len(parts_positions))):
parts_positions[j][-1] = parts_positions[b_counts[i]][position_bb[i]]
except:
pass
pairs = []
for i in parts_positions:
pairs.append([(i[m],i[m+1]) for m in range(0,len(i)-1)])
pairs = list(zip(*[item for sublist in pairs for item in sublist]))
return parts_tokenized, pairs | b709cd064cc97159e7bf19b90c3dab2016fbc786 | 4,886 |
def run(ex: "interactivity.Execution") -> "interactivity.Execution":
"""Exit the shell."""
ex.shell.shutdown = True
return ex.finalize(
status="EXIT",
message="Shutting down the shell.",
echo=True,
) | 7ab7bbe8b1c276c1b84963c3a8eb9a1bdb79888c | 4,887 |
def get_features(df, row = False):
""" Transform the df into a df with basic features and dropna"""
df_feat = df
df_feat['spread'] = df_feat['high'] - df_feat['low']
df_feat['upper_shadow'] = upper_shadow(df_feat)
df_feat['lower_shadow'] = lower_shadow(df_feat)
df_feat['close-open'] = df_feat['close'] - df_feat['open']
df_feat['SMA_7'] = df_feat.iloc[:,1].rolling(window=7).mean()
df_feat['SMA_14'] = df_feat.iloc[:,1].rolling(window=14).mean()
df_feat['SMA_21'] = df_feat.iloc[:,1].rolling(window=21).mean()
# Create the STD_DEV feature for the past 7 days
df_feat['STD_DEV_7'] = df_feat.iloc[:,1].rolling(window=7).std()
# Features from ta-lib as example
df_feat.ta.donchian(lower_length=10, upper_length=15, append=True)
# Drop the NA rows created by the SMA indicators
df_feat.dropna(inplace = True)
return df_feat | 42e9c54a3357634cc74878909f2f8a33cfc6ee0c | 4,888 |
import torch
def match_prob_for_sto_embed(sto_embed_word, sto_embed_vis):
"""
Compute match probability for two stochastic embeddings
:param sto_embed_word: (batch_size, num_words, hidden_dim * 2)
:param sto_embed_vis: (batch_size, num_words, hidden_dim * 2)
:return (batch_size, num_words)
"""
assert not bool(torch.isnan(sto_embed_word).any()) and not bool(torch.isnan(sto_embed_vis).any())
batch_size = sto_embed_word.shape[0]
num_words = sto_embed_word.shape[1]
mu_word, var_word = torch.split(sto_embed_word, DIM_EMBED, dim=-1)
mu_vis, var_vis = torch.split(sto_embed_vis, DIM_EMBED, dim=-1)
if cfg.metric == 'monte-carlo':
k = SAMPLING_K
z_word = batch_rsample(mu_word, var_word, k) # (batch_size, num_words, k, hidden_dim)
z_vis = batch_rsample(mu_vis, var_vis, k) # (batch_size, num_words, k, hidden_dim)
num_samples = k
z_word = z_word.unsqueeze(3).repeat([1, 1, 1, k, 1]) # (batch_size, num_words, k, k, hidden_dim)
z_vis = z_vis.repeat([1, 1, k, 1]).reshape(list(z_vis.shape[:2]) + [k, k, -1]) # (batch_size, num_words, k, k, hidden_dim)
if z_vis.shape[1] == 1:
z_vis = z_vis.repeat([1, num_words, 1, 1, 1]) # (batch_size, num_words, k, k, hidden_dim)
# Compute probabilities for all pair combinations
match_prob = - torch.sqrt(torch.sum((z_word - z_vis) ** 2, dim=-1))
match_prob = match_prob.sum(-1).sum(-1) / (num_samples ** 2)
if k > 1 and batch_size > 1 and num_words > 0:
assert bool(torch.all(z_word[0, 0, 0, 0] == z_word[0, 0, 0, 1]))
assert bool(torch.all(z_vis[0, 0, 0, 0] == z_vis[0, 0, 1, 0]))
if sto_embed_vis.shape[1] == 1 and num_words > 1:
assert bool(torch.all(z_vis[0, 0] == z_vis[0, 1]))
elif cfg.metric == 'w-distance':
match_prob = torch.sum((mu_word - mu_vis) ** 2 + (torch.sqrt(var_word) - torch.sqrt(var_vis)) ** 2, dim=-1)
else:
raise ValueError('Unexpected metric type')
assert match_prob.shape == (batch_size, num_words)
return match_prob | 0668f4bc6e3112cd63d33fcb5612368604724359 | 4,889 |
import functools
def POST(path):
"""
Define decorator @post('/path'):
"""
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kw):
return func(*args, **kw)
wrapper.__method__ = 'POST'
wrapper.__route__ = path
return wrapper
return decorator | d2c76d57687dc0983d2f00995c7a6e6414e8201b | 4,890 |
def BatchNorm(
inputs, axis=-1, momentum=0.9, eps=1e-5,
use_stats=-1, **kwargs):
"""Batch Normalization. `[Ioffe & Szegedy, 2015] <https://arxiv.org/abs/1502.03167>`_.
We enforce the number of inputs should be *5*, i.e.,
it is implemented into a fused version.
However, you can still fix the *gamma* and *beta*,
by disabling the their gradients directly.
**Type Constraints**: (*float16*, *float32*)
Parameters
----------
inputs : sequence of Tensor
The inputs, represent [x, mean, var, gamma, beta].
axis : int, optional
The channel axis.
momentum : float, optional, default=0.99
The momentum of moving average.
eps : float, optional, default=1e-5
The eps.
use_stats : int, optional, default=-1
Whether to use global stats.
Returns
-------
Tensor
The output tensor, calculated as:
|batchnorm_scale_function|
The moving average of mean/var, calculated as:
|default_moving_average_function|
"""
return Tensor.CreateOperator('BatchNorm', **ParseArgs(locals())) | 642e8ee5cafdf6a416febaff1ddcae5190e27cb1 | 4,891 |
def problem_from_graph(graph):
""" Create a problem from the given interaction graph. For each interaction (i,j), 0 <= i <= j <= 1 is added. """
n = graph.vcount()
domain = Domain.make([], [f"x{i}" for i in range(n)], real_bounds=(0, 1))
X = domain.get_symbols()
support = smt.And(*((X[e.source] <= X[e.target]) for e in graph.es))
return Density(domain, support & domain.get_bounds(), smt.Real(1)) | 973602abf8a20ffe45a5bcae6ec300ba749ab6d9 | 4,892 |
def rotate_points(x, y, x0, y0, phi):
"""
Rotate x and y around designated center (x0, y0).
Args:
x: x-values of point or array of points to be rotated
y: y-values of point or array of points to be rotated
x0: horizontal center of rotation
y0: vertical center of rotation
phi: angle to rotate (+ is ccw) in radians
Returns:
x, y: locations of rotated points
"""
xp = x - x0
yp = y - y0
s = np.sin(-phi)
c = np.cos(-phi)
xf = xp * c - yp * s
yf = xp * s + yp * c
xf += x0
yf += y0
return xf, yf | 8058385185e937d13e2fd17403b7653f3a5f55e7 | 4,893 |
from typing import List
def xpme(
dates: List[date],
cashflows: List[float],
prices: List[float],
pme_prices: List[float],
) -> float:
"""Calculate PME for unevenly spaced / scheduled cashflows and return the PME IRR
only.
"""
return verbose_xpme(dates, cashflows, prices, pme_prices)[0] | 4301e1e95ab4eee56a3644b132a400522a5ab173 | 4,894 |
def get_node(uuid):
"""Get node from cache by it's UUID.
:param uuid: node UUID.
:returns: structure NodeInfo.
"""
row = _db().execute('select * from nodes where uuid=?', (uuid,)).fetchone()
if row is None:
raise utils.Error('Could not find node %s in cache' % uuid, code=404)
return NodeInfo.from_row(row) | 87988d0c0baa665f1fcd86991253f8fe0cba96a1 | 4,895 |
def bisect_jump_time(tween, value, b, c, d):
"""
**** Not working yet
return t for given value using bisect
does not work for whacky curves
"""
max_iter = 20
resolution = 0.01
iter = 1
lower = 0
upper = d
while iter < max_iter:
t = (upper - lower) / 2
if tween(t, b, c, d) - value < resolution:
return t
else:
upper = t | f7e1dbeb000ef60a5bd79567dc88d66be9235a75 | 4,896 |
def __session_kill():
"""
unset session on the browser
Returns:
a 200 HTTP response with set-cookie to "expired" to unset the cookie on the browser
"""
res = make_response(jsonify(__structure(status="ok", msg=messages(__language(), 166))))
res.set_cookie("key", value="expired")
return res | 9313e46e2dcd297444efe08c6f24cb4150349fdb | 4,897 |
def register_errorhandlers(app):
"""Register error handlers."""
def render_error(error):
"""Render error template."""
# If a HTTPException, pull the `code` attribute; default to 500
error_code = getattr(error, "code", 500)
return render_template(f"errors/{error_code}.html"), error_code
for errcode in [401, 404, 500]:
app.errorhandler(errcode)(render_error)
return None | e8e3ddf10fb6c7a370c252c315888c91b26f6503 | 4,898 |
def register(key):
"""Register callable object to global registry.
This is primarily used to wrap classes and functions into the bcdp pipeline.
It is also the primary means for which to customize bcdp for your own
usecases when overriding core functionality is required.
Parameters
----------
key : str
Key for obj in registry. Append periods ('.') to navigate the registry
tree. Example: 'data_source.rcmed'
Returns
-------
dec : function
Generic decorator which returns the wrapped class or function.
"""
def dec(obj):
registry[key] = obj
return obj
return dec | 4f3d7d9e8b49d448d338408de8ceebee58136893 | 4,899 |
import ctypes
import six
def _windows_long_path_name(short_path):
"""Use Windows' `GetLongPathNameW` via ctypes to get the canonical,
long path given a short filename.
"""
if not isinstance(short_path, six.text_type):
short_path = short_path.decode(_fsencoding())
buf = ctypes.create_unicode_buffer(260)
get_long_path_name_w = ctypes.windll.kernel32.GetLongPathNameW
return_value = get_long_path_name_w(short_path, buf, 260)
if return_value == 0 or return_value > 260:
# An error occurred
return short_path
else:
long_path = buf.value
# GetLongPathNameW does not change the case of the drive
# letter.
if len(long_path) > 1 and long_path[1] == ':':
long_path = long_path[0].upper() + long_path[1:]
return long_path | 72d6b9fc1fb8acd6285019a8d48ea42e847ce8db | 4,900 |
import types
def _create_behavioral_cloning_agent(
time_step_spec: types.NestedTensorSpec, action_spec: types.NestedTensorSpec,
preprocessing_layers: types.NestedLayer,
policy_network: types.Network) -> tfa.agents.TFAgent:
"""Creates a behavioral_cloning_agent."""
network = policy_network(
time_step_spec.observation,
action_spec,
preprocessing_layers=preprocessing_layers,
name='QNetwork')
return behavioral_cloning_agent.BehavioralCloningAgent(
time_step_spec, action_spec, cloning_network=network, num_outer_dims=2) | c3420767aaa153ef44054fdb4fbdcc9540d59775 | 4,901 |
def sidequery():
"""Serves AJAX call for HTML content for the sidebar (*query* **record** page).
Used when the user is switching between **material** and **record** pages.
See also [M:RECORD.body][record.RECORD.body].
Client code: [{sidecontent.fetch}][sidecontentfetch].
"""
session.forget(response)
Query = QUERY()
Record = RECORDQUERY(Query)
return Record.body() | 6f5c6660f25e568ea4fa2ad046eac5a57cb4f7e5 | 4,903 |
import pandas
import numpy
def random_answers_2020_ml():
"""
Generates random answers the machine learning challenge of
hackathons :ref:`l-hackathon-2020`.
"""
df = pandas.DataFrame({"index": numpy.arange(473333)})
df['label'] = numpy.random.randint(low=0, high=2, size=(df.shape[0], ))
df['score'] = numpy.random.random((df.shape[0], ))
return df | 24a721c1c8e512ade6293644eff61b0866c3f0fe | 4,905 |
def _sizeof_fmt(num, suffix='B'):
"""Format a number as human readable, based on 1024 multipliers.
Suited to be used to reformat a size expressed in bytes.
By Fred Cirera, after https://stackoverflow.com/a/1094933/1870254
Args:
num (int): The number to be formatted.
suffix (str): the measure unit to append at the end of the formatted
number.
Returns:
str: The formatted number including multiplier and measure unit.
"""
for unit in ['','Ki','Mi','Gi','Ti','Pi','Ei','Zi']:
if abs(num) < 1024.0:
return "%3.1f%s%s" % (num, unit, suffix)
num /= 1024.0
return "%.1f%s%s" % (num, 'Yi', suffix) | c70c9ce46f6b391e2389329a6fcd50bf863ea041 | 4,907 |
def num_cluster_members(matrix, identity_threshold):
"""
Calculate number of sequences in alignment
within given identity_threshold of each other
Parameters
----------
matrix : np.array
N x L matrix containing N sequences of length L.
Matrix must be mapped to range(0, num_symbols) using
map_matrix function
identity_threshold : float
Sequences with at least this pairwise identity will be
grouped in the same cluster.
Returns
-------
np.array
Vector of length N containing number of cluster
members for each sequence (inverse of sequence
weight)
"""
N, L = matrix.shape
L = 1.0 * L
# minimal cluster size is 1 (self)
num_neighbors = np.ones((N))
# compare all pairs of sequences
for i in range(N - 1):
for j in range(i + 1, N):
pair_id = 0
for k in range(L):
if matrix[i, k] == matrix[j, k]:
pair_id += 1
if pair_id / L >= identity_threshold:
num_neighbors[i] += 1
num_neighbors[j] += 1
return num_neighbors | e9034a728b22f7a594ef7842f2a4039559751e21 | 4,911 |
def get_score(command: str) -> float:
"""Get pylint score"""
output = check_output(command, shell=True).decode("utf-8")
start = output.find("Your code has been rated at ")
if start == -1:
raise ValueError(f'Could not find quality score in "{output.rstrip()}".')
start += len("Your code has been rated at ")
end = start + output[start:].find("/")
score = float(output[start:end])
return score | d32b6f9496033d4c2b569ebc7403be43bb43ceb1 | 4,912 |
import json
def __process_input(request_data: str) -> np.array:
"""
Converts input request data into numpy array
:param request_data in json format
:return: numpy array
"""
return np.asarray(json.loads(request_data)["input"]) | 7639018f69a4e72568cdf86abc503133ebd734af | 4,914 |
import math
def getDist_P2L(PointP,Pointa,Pointb):
"""计算点到直线的距离
PointP:定点坐标
Pointa:直线a点坐标
Pointb:直线b点坐标
"""
#求直线方程
A=0
B=0
C=0
A=Pointa[1]-Pointb[1]
B=Pointb[0]-Pointa[0]
C=Pointa[0]*Pointb[1]-Pointa[1]*Pointb[0]
#代入点到直线距离公式
distance=0
distance=(A*PointP[0]+B*PointP[1]+C)/math.sqrt(A*A+B*B)
return distance | ca0ec1fc25183a240179faef7473d7b86758a92b | 4,915 |
def skipgram_batch(centers, contexts, num_tokens, dtype, index_dtype):
"""Create a batch for SG training objective."""
contexts = mx.nd.array(contexts[2], dtype=index_dtype)
indptr = mx.nd.arange(len(centers) + 1)
centers = mx.nd.array(centers, dtype=index_dtype)
centers_csr = mx.nd.sparse.csr_matrix(
(mx.nd.ones(centers.shape), centers, indptr), dtype=dtype,
shape=(len(centers), num_tokens))
return centers_csr, contexts, centers | e16c7ffd6c4f18e247a885de0b7477ddfa5ed02c | 4,916 |
def JD2RA(JD, longitude=21.42830, latitude=-30.72152, epoch='current'):
"""
Convert from Julian date to Equatorial Right Ascension at zenith
during a specified epoch.
Parameters:
-----------
JD : type=float, a float or an array of Julian Dates
longitude : type=float, longitude of observer in degrees east, default=HERA longitude
latitude : type=float, latitude of observer in degrees north, default=HERA latitutde
This only matters when using epoch="J2000"
epoch : type=str, epoch for RA calculation. options=['current', 'J2000'].
The 'current' epoch is the epoch at JD. Note that
LST is defined as the zenith RA in the current epoch. Note that
epoch='J2000' corresponds to the ICRS standard.
Output:
-------
RA : type=float, right ascension [degrees] at zenith JD times
in the specified epoch.
"""
# get JD type
if isinstance(JD, list) or isinstance(JD, np.ndarray):
_array = True
else:
_array = False
JD = [JD]
# setup RA list
RA = []
# iterate over jd
for jd in JD:
# use current epoch calculation
if epoch == 'current':
ra = JD2LST(jd, longitude=longitude) * 180 / np.pi
RA.append(ra)
# use J2000 epoch
elif epoch == 'J2000':
loc = crd.EarthLocation(lat=latitude * unt.deg, lon=longitude * unt.deg)
t = Time(jd, format='jd', scale='utc')
zen = crd.SkyCoord(frame='altaz', alt=90 * unt.deg, az=0 * unt.deg, obstime=t, location=loc)
RA.append(zen.icrs.ra.degree)
else:
raise ValueError("didn't recognize {} epoch".format(epoch))
RA = np.array(RA)
if _array:
return RA
else:
return RA[0] | 14bb4d621449a7fd9fa57acecb107aaa4ea61010 | 4,917 |
def _average_gradients(tower_grads):
"""Calculate the average gradient for each shared variable across all towers.
Note that this function provides a synchronization point across all towers.
Args:
tower_grads: List of lists of (gradient, variable) tuples. The outer list is
over individual gradients. The inner list is over the gradient calculation
for each tower.
Returns:
List of pairs of (gradient, variable) where the gradient has been averaged
across all towers.
"""
average_grads = []
for grad_and_vars in zip(*tower_grads):
# Note that each grad_and_vars looks like the following:
# ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN))
grads = []
print(len(grad_and_vars))
for g, v in grad_and_vars:
if g is None:
print(v)
for grad_and_vars in zip(*tower_grads):
# Note that each grad_and_vars looks like the following:
# ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN))
grads = []
print(len(grad_and_vars))
for g, v in grad_and_vars:
if g is not None:
print(v)
for g, v in grad_and_vars:
# Add 0 dimension to the gradients to represent the tower.
print(v)
expanded_g = tf.expand_dims(g, 0)
# Append on a 'tower' dimension which we will average over below.
grads.append(expanded_g)
# Average over the 'tower' dimension.
grad = tf.concat(grads, 0)
grad = tf.reduce_mean(grad, 0)
capped_grad = tf.clip_by_value(grad, -200., 200.)
# Keep in mind that the Variables are redundant because they are shared
# across towers. So .. we will just return the first tower's pointer to
# the Variable.
v = grad_and_vars[0][1]
grad_and_var = (capped_grad, v)
average_grads.append(grad_and_var)
return average_grads | 24be75daeeb1d5878a9a481c43be440ced40a0a2 | 4,918 |
import time
def get_online_users(guest=False): # pragma: no cover
"""Returns all online users within a specified time range
:param guest: If True, it will return the online guests
"""
current = int(time.time()) // 60
minutes = range_method(flaskbb_config['ONLINE_LAST_MINUTES'])
if guest:
return redis_store.sunion(['online-guests/%d' % (current - x)
for x in minutes])
return redis_store.sunion(['online-users/%d' % (current - x)
for x in minutes]) | 39ad71b71e8a8caac0e6a82b7992c6229f85d255 | 4,920 |
from typing import Union
def reverse_bearing(bearing: Union[int, float]):
"""
180 degrees from supplied bearing
:param bearing:
:return:
"""
assert isinstance(bearing, (float, int))
assert 0. <= bearing <= 360.
new_bearing = bearing + 180.
# Ensure strike is between zero and 360 (bearing)
return normalize_bearing(new_bearing) | 1fd01df40a23c52ff093c17fd5752f0609cee761 | 4,921 |
def signUp_page(request):
"""load signUp page"""
return render(request, 'app/signUp_page.html') | ae28acac27264dbb8d2f6a69afb01c6f96a08218 | 4,922 |
import csv
def read_students(path):
""" Read a tab-separated file of students. The only required field is 'github_repo', which is this
student's github repository. """
students = [line for line in csv.DictReader(open(path), delimiter='\t')]
check_students(students)
return students | e64aeb1a73fb79e91d0464d6a95e509d3cc60b94 | 4,924 |
from typing import Tuple
import torch
def get_extended_attention_mask(
attention_mask: Tensor,
input_shape: Tuple[int],
device: torch.device,
is_decoder=False,
) -> Tensor:
"""
Makes broadcastable attention and causal masks so that future and masked tokens are ignored.
Arguments:
attention_mask (:obj:`torch.Tensor`):
Mask with ones indicating tokens to attend to, zeros for tokens to ignore.
input_shape (:obj:`Tuple[int]`):
The shape of the input to the model.
device: (:obj:`torch.device`):
The device of the input to the model.
Returns:
:obj:`torch.Tensor` The extended attention mask, with a the same dtype as :obj:`attention_mask.dtype`.
"""
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
if attention_mask.dim() == 3:
extended_attention_mask = attention_mask[:, None, :, :]
elif attention_mask.dim() == 2:
# Provided a padding mask of dimensions [batch_size, seq_length]
# - if the model is a decoder, apply a causal mask in addition to the padding mask
# - if the model is an encoder, make the mask broadcastable to [batch_size, num_heads, seq_length, seq_length]
if is_decoder:
batch_size, seq_length = input_shape
seq_ids = torch.arange(seq_length, device=device)
causal_mask = (
seq_ids[None, None, :].repeat(batch_size, seq_length, 1)
<= seq_ids[None, :, None]
)
# in case past_key_values are used we need to add a prefix ones mask to the causal mask
# causal and attention masks must have same type with pytorch version < 1.3
causal_mask = causal_mask.to(attention_mask.dtype)
if causal_mask.shape[1] < attention_mask.shape[1]:
prefix_seq_len = attention_mask.shape[1] - causal_mask.shape[1]
causal_mask = torch.cat(
[
torch.ones(
(batch_size, seq_length, prefix_seq_len),
device=device,
dtype=causal_mask.dtype,
),
causal_mask,
],
axis=-1,
)
extended_attention_mask = (
causal_mask[:, None, :, :] * attention_mask[:, None, None, :]
)
else:
extended_attention_mask = attention_mask[:, None, None, :]
else:
raise ValueError(
f"Wrong shape for input_ids (shape {input_shape}) or attention_mask (shape {attention_mask.shape})"
)
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
# extended_attention_mask = extended_attention_mask.to(
# dtype=self.dtype
# ) # fp16 compatibility
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
return extended_attention_mask | 26104733e3cc970536a11c3930866dc3d11d3583 | 4,925 |
def adler32(string, start=ADLER32_DEFAULT_START):
"""
Compute the Adler-32 checksum of the string, possibly with the given
start value, and return it as a unsigned 32 bit integer.
"""
return _crc_or_adler(string, start, _adler32) | ed4a0905b4891ee931ef08f91e92032a613caee7 | 4,928 |
def find_earliest_brs_idx(g: Grid, V: np.ndarray, state: np.narray, low: int, high: int) -> int:
"""
Determines the earliest time the current state is in the reachable set
Args:
g: Grid
V: Value function
state: state of dynamical system
low: lower bound of search range (inclusive)
high: upper bound of search range (inclusive)
Returns:
t: Earliest time where the state is in the reachable set
"""
epsilon = 1e-4
while low < high:
mid = np.ceil((high + low) / 2)
value = g.get_value(V[..., mid], state)
if value < epsilon:
low = mid
else:
high = mid - 1
return low | 234a201af98f74c41785a36b3391e23700ac80e6 | 4,929 |
def italic(s):
"""Returns the string italicized.
Source: http://stackoverflow.com/a/16264094/2570866
"""
return r'\textit{' + s + '}' | 7eb9e9629e8556e9410e4d92525dd8c06c3e25de | 4,930 |
import functools
def skip_if_disabled(func):
"""Decorator that skips a test if test case is disabled."""
@functools.wraps(func)
def wrapped(*a, **kwargs):
func.__test__ = False
test_obj = a[0]
message = getattr(test_obj, 'disabled_message',
'Test disabled')
if getattr(test_obj, 'disabled', False):
test_obj.skipTest(message)
func(*a, **kwargs)
return wrapped | 56d42a1e0418f4edf3d4e8478358495b1353f57a | 4,931 |
from typing import Any
import itertools
def _compare_keys(target: Any, key: Any) -> bool:
"""
Compare `key` to `target`.
Return True if each value in `key` == corresponding value in `target`.
If any value in `key` is slice(None), it is considered equal
to the corresponding value in `target`.
"""
if not isinstance(target, tuple):
return target == key
for k1, k2 in itertools.zip_longest(target, key, fillvalue=None):
if k2 == slice(None):
continue
if k1 != k2:
return False
return True | ff5c60fab8ac0cbfe02a816ec78ec4142e32cfbf | 4,932 |
import xarray as xr
import aux_functions_strat as aux
def predict_xr(result_ds, regressors):
"""input: results_ds as came out of MLR and saved to file, regressors dataset"""
# if produce_RI isn't called on data then you should explicitely put time info
rds = result_ds
regressors = regressors.sel(time=rds.time) # slice
regressors = regressors.apply(aux.normalize_xr, norm=1, verbose=False) # normalize
reg_dict = dict(zip(rds.regressors.values, regressors.data_vars.values()))
# make sure that all the regressors names are linking to their respective dataarrays
for key, value in reg_dict.items():
# print(key, value)
assert value.name == key
reg_da = xr.concat(reg_dict.values(), dim='regressors')
reg_da['regressors'] = list(reg_dict.keys())
reg_da.name = 'regressors_time_series'
rds['predicted'] = xr.dot(rds.params, reg_da) + rds.intercept
rds = aux.xr_order(rds)
# retures the same dataset but with total predicted reconstructed geo-time-series field
result_ds = rds
return result_ds | 458f3f9a17d9cc16200f1eb0e20eb2a43f095ea0 | 4,934 |
async def list_features(location_id):
"""
List features
---
get:
summary: List features
tags:
- features
parameters:
- name: envelope
in: query
required: false
description: If set, the returned list will be wrapped in an envelope with this name.
responses:
200:
description: A list of objects.
content:
application/json:
schema:
type: array
items: Feature
"""
location = g.active_incident.Location.find_by_id(location_id)
if location is None:
raise exceptions.NotFound(description="Location {} was not found".format(location_id))
features = location.Feature.find()
# Wrap the list if the caller requested an envelope.
query = request.args
if "envelope" in query:
result = {query.get("envelope"): features}
else:
result = features
return jsonify(result), HTTPStatus.OK | 0698a63af10a70cc2ae0b8734dff61fb51786829 | 4,935 |
def shot_start_frame(shot_node):
"""
Returns the start frame of the given shot
:param shot_node: str
:return: int
"""
return sequencer.get_shot_start_frame(shot_node) | f13582040ad188b6be8217a7657ce53c145fe090 | 4,936 |
def word1(x: IntVar) -> UInt8:
"""Implementation for `WORD1`."""
return word_n(x, 1) | 0cc7e254c48596d190ccb43a0e0d3c90b18f34af | 4,937 |
def _getCols1():
"""
Robs Version 1 CSV files
"""
cols = 'Date,DOY,Time,Location,Satellite,Collection,Longitude,Latitude,SolarZenith,SolarAzimuth,SensorZenith,SensorAzimuth,ScatteringAngle,nval_AOT_1020_l20,mean_AOT_1020_l20,mean_AOT_870_l20,mean_AOT_675_l20,sdev_AOT_675_l20,mean_AOT_500_l20,mean_AOT_440_l20,mean_AOT_380_l20,mean_AOT_340_l20,mean_Water_cm_l20,nval_AOT_1020_l15,mean_AOT_1020_l15,mean_AOT_870_l15,mean_AOT_675_l15,sdev_AOT_675_l15,mean_AOT_500_l15,mean_AOT_440_l15,mean_AOT_380_l15,mean_AOT_340_l15,mean_Water_cm_l15,npix_AOT0550,mean_AOT0550,sdev_AOT0550,mean_rAOTse0550,sdev_rAOTse0550,mean_AOT0470corr_l,npix_AOT0550corr_l,pval_AOT0550corr_l,mean_AOT0550corr_l,sdev_AOT0550corr_l,mean_AOT0660corr_l,mean_AOT2100corr_l,mean_rAOTse0550_l,pval_rAOTse0550_l,mean_AOT0550sm_l,pval_AOT0550sm_l,mean_Aexp0470_0670_l,mean_surfre0470_l,mean_surfre0660_l,mean_surfre2100_l,mean_fiterr_l,mean_atype_l,mean_cfrac_l,mean_mconc_l,QA0470_l,mean_mref0470_l,mean_mref0550_l,mean_mref0660_l,mean_mref0870_l,mean_mref1200_l,mean_mref1600_l,mean_mref2100_l,pval_mref0470_l,pval_mref0550_l,pval_mref0660_l,pval_mref0870_l,pval_mref1200_l,pval_mref1600_l,pval_mref2100_l,mean_AOT0470ea_o,npix_AOT0550ea_o,pval_AOT0550ea_o,mean_AOT0550ea_o,sdev_AOT0550ea_o,mean_AOT0660ea_o,mean_AOT0870ea_o,mean_AOT1200ea_o,mean_AOT1600ea_o,mean_AOT2100ea_o,mean_AOT0470sa_o,npix_AOT0550sa_o,pval_AOT0550sa_o,mean_AOT0550sa_o,sdev_AOT0550sa_o,mean_AOT0660sa_o,mean_AOT0870sa_o,mean_AOT1200sa_o,mean_AOT1600sa_o,mean_AOT2100sa_o,mean_rAOTse0550a_o,mean_effr0550a_o,sdev_effr0550a_o,mean_solindx_sa_o,mean_solindx_la_o,mean_lsqerr_a_o,mean_cfrac_o,sdev_cfrac_o,QAavg_o,mean_mref0470_o,mean_mref0550_o,mean_mref0660_o,mean_mref0870_o,mean_mref1200_o,mean_mref1600_o,mean_mref2100_o,sdev_mref0470_o,sdev_mref0550_o,sdev_mref0660_o,sdev_mref0870_o,sdev_mref1200_o,sdev_mref1600_o,sdev_mref2100_o,mean_wni,mean_wir,pval_wni,pval_wir,mean_pathrad0470_l,mean_pathrad0660_l,mean_critref0470_l,mean_critref0660_l,mean_errprad0470_l,mean_errprad0660_l,mean_errcref0470_l,mean_errcref0660_l,mean_qwtprad0470_l,mean_qwtprad0660_l,mean_qwtcref0470_l,mean_qwtcref0660_l,npix_AOT0550dpbl_l,pval_AOT0550dpbl_l,mean_AOT0550dpbl_l,sdev_AOT0550dpbl_l,mean_AOT0412dpbl_l,mean_AOT0470dpbl_l,mean_AOT0660dpbl_l,mean_Aext0412_0470dpbl_l,mean_SSA0412dpbl_l,mean_SSA0470dpbl_l,mean_SSA0660dpbl_l,mean_surfre0412dpbl_l,mean_surfre0470dpbl_l,mean_surfre0660dpbl_l,tau_550_norm,eta_norm,tau_f,tau_c,alpha_norm,alpha_f,Deta,tau_466,tau_553,tau_644,tau_866,tau_2119,Angs_466_644,exp_errorO_pct,exp_errorL_pct,ncep_pwat,ncep_O3,ncep_pres,ncep_windspd,ncep_winddir'
return cols | bf3148b53effc18e212e03cf70673dc25e1d0005 | 4,938 |
from typing import Optional
from pathlib import Path
def run_primer3(sequence, region, primer3_exe: str, settings_dict: dict,
padding=True, thermodynamic_params: Optional[Path] = None):
"""Run primer 3. All other kwargs will be passed on to primer3"""
if padding:
target_start = region.padding_left
target_len = len(sequence) - region.padding_left - region.padding_right
else:
target_start = 1
target_len = len(sequence)
target = ",".join(map(str, [target_start, target_len]))
p3 = Primer3(primer3_exe, sequence, target, target, settings_dict,
thermodynamic_params=thermodynamic_params)
p3_out = p3.run()
primers = parse_primer3_output(p3_out)
return primers | 670f70b5b50200da5c9cd13b447a5836b748fd31 | 4,939 |
import inspect
def filter_safe_dict(data, attrs=None, exclude=None):
"""
Returns current names and values for valid writeable attributes. If ``attrs`` is given, the
returned dict will contain only items named in that iterable.
"""
def is_member(cls, k):
v = getattr(cls, k)
checks = [
not k.startswith("_"),
not inspect.ismethod(v) or getattr(v, "im_self", True),
not inspect.isfunction(v),
not isinstance(v, (classmethod, staticmethod, property)),
]
return all(checks)
cls = None
if inspect.isclass(data):
cls = data
data = {k: getattr(cls, k) for k in dir(cls) if is_member(cls, k)}
ret = {}
for k, v in data.items():
checks = [
not k.startswith("_"),
not inspect.ismethod(v) or getattr(v, "im_self", True),
not isinstance(v, (classmethod, staticmethod, property)),
not attrs or (k in attrs),
not exclude or (k not in exclude),
]
if all(checks):
ret[k] = v
return ret | ce457615ba8e360243912c3bba532e8327b8def4 | 4,940 |
def fixture_loqus_exe():
"""Return the path to a loqus executable"""
return "a/path/to/loqusdb" | 647b31e37854a5cbc8fd066c982e67f976100c03 | 4,941 |
def get_reads_section(read_length_r1, read_length_r2):
"""
Yield a Reads sample sheet section with the specified R1/R2 length.
:rtype: SampleSheetSection
"""
rows = [[str(read_length_r1)], [str(read_length_r2)]]
return SampleSheetSection(SECTION_NAME_READS, rows) | 19f3e36e34471c6bac89f2a42bdcb3f4b79c22c7 | 4,942 |
def validate(number):
"""Check if the number is valid. This checks the length, format and check
digit."""
number = compact(number)
if not all(x in _alphabet for x in number):
raise InvalidFormat()
if len(number) != 16:
raise InvalidLength()
if number[-1] == '-':
raise InvalidFormat()
if number[-1] != calc_check_digit(number):
raise InvalidChecksum()
return number | e191ee9d8631dfd843276b2db7ee9699b974e555 | 4,943 |
import re
def parse_directory(filename):
""" read html file (nook directory listing),
return users as [{'name':..., 'username':...},...] """
try:
file = open(filename)
html = file.read()
file.close()
except:
return []
users = []
for match in re.finditer(r'<b>([^<]+)</b>.*?mailto:([^@]+)@', html):
groups = match.groups()
users.append({'name':groups[0], 'username':groups[1]})
users.sort(key=lambda x:x['username'])
return users | 1b7fc5b6257b5c382f520a60c9227e8b458d482d | 4,944 |
from typing import Optional
def dec_multiply(*args) -> Optional[Decimal]:
"""
Multiplication of numbers passed as *args.
Args:
*args: numbers we want to multiply
Returns:
The result of the multiplication as a decimal number
Examples:
>>> dec_multiply(3, 3.5, 4, 2.34)
Decimal('98.280')
>>> dec_multiply() is None
True
"""
if not args:
return
total = Decimal(str(args[0]))
for element in args[1:]:
total *= Decimal(str(element))
return total | f7d953debc5d24c97ee274ec13683be3fda302eb | 4,947 |
import json
def get_networks():
"""
Returns a list of all available network names
:return: JSON string, ex. "['bitcoin','bitcoin-cash','dash','litecoin']"
"""
return json.dumps([x[0] for x in db.session.query(Node.network).distinct().all()]) | 755e0238463aabed0a38102ca793842dd54a6c87 | 4,948 |
def get_cache_key_generator(request=None, generator_cls=None, get_redis=None):
"""Return an instance of ``CacheKeyGenerator`` configured with a redis
client and the right cache duration.
"""
# Compose.
if generator_cls is None:
generator_cls = CacheKeyGenerator
if get_redis is None:
get_redis = get_redis_client
# Instantiate and return the cache key generator.
return generator_cls(get_redis(request)) | 80c25a204976492e2741e46bd79d70d0e6b62b1a | 4,949 |
import pathlib
def is_from_derms(output):
"""Given an output, check if it's from DERMS simulation.
Parameters
----------
output: str or pathlib.Path
"""
if not isinstance(output, pathlib.Path):
output = pathlib.Path(output)
derms_info_file = output / DERMS_INFO_FILENAME
if derms_info_file.exists():
return True
return False | e9a9be7e18cda3b22661f773e6bb585c833b74d6 | 4,950 |
def js_squeeze(parser, token):
"""
{% js_squeeze "js/dynamic_minifyed.js" "js/script1.js,js/script2.js" %}
will produce STATIC_ROOT/js/dynamic_minifyed.js
"""
bits = token.split_contents()
if len(bits) != 3:
raise template.TemplateSyntaxError, "%r tag requires exactly two arguments" % bits[0]
return SqueezeNode('js', *bits[1:]) | 30b10b85001bbb5710584fb41469e1c36d50f086 | 4,951 |
def view_extracted_data() -> str:
"""
Display Raw extracted data from Documents
"""
extracted_data = read_collection(FIRESTORE_PROJECT_ID, FIRESTORE_COLLECTION)
if not extracted_data:
return render_template("index.html", message_error="No data to display")
return render_template("index.html", extracted_data=extracted_data) | 9eccebd4952fc3c988bfc6014d2c12944a197ac4 | 4,952 |
import json
def get_lstm_trump_text():
"""Use the LSTM trump tweets model to generate text."""
data = json.loads(request.data)
sl = data["string_length"]
st = data["seed_text"]
gen_text = lstm_trump.generate_text(seed_text=st, pred_len=int(sl))
return json.dumps(gen_text) | 9fbad3e7abcfcbbbfb5919a5c37cf607e972592e | 4,953 |
def countSort(alist):
"""计数排序"""
if alist == []:
return []
cntLstLen = max(alist) + 1
cntLst = [0] * cntLstLen
for i in range(len(alist)):
cntLst[alist[i]] += 1 #数据alist[i] = k就放在第k位
alist.clear()
for i in range(cntLstLen):
while cntLst[i] > 0: #将每个位置的数据k循环输出多次
alist.append(i)
cntLst[i] -= 1
return alist | 6727b41794dc2a2f826023c2a53202798dfa49ab | 4,955 |
def _FloatsTraitsBase_read_values_dataset(arg2, arg3, arg4, arg5):
"""_FloatsTraitsBase_read_values_dataset(hid_t arg2, hid_t arg3, hid_t arg4, unsigned int arg5) -> FloatsList"""
return _RMF_HDF5._FloatsTraitsBase_read_values_dataset(arg2, arg3, arg4, arg5) | 4f2cfb17e5f0b3cfc980f51ef8e9ae8d7d38ba2c | 4,956 |
import requests
from bs4 import BeautifulSoup
import io
def TRI_Query(state=None, county=None,area_code=None, year=None,chunk_size=100000):
"""Query the EPA Toxic Release Inventory Database
This function constructs a query for the EPA Toxic Release Inventory API, with optional arguments for details such as the two-letter state, county name, area code, and year. More info here: https://www.epa.gov/enviro/envirofacts-data-service-api
"""
base_url='https://data.epa.gov/efservice/'
#Declare the names of the tables that we want to pull
table_name1='TRI_FACILITY'
table_name2='TRI_REPORTING_FORM'
table_name3 = 'TRI_TRANSFER_QTY'
output_format='CSV'
query = base_url
query+=table_name1+'/'
#Add in the state qualifier, if the desired_state variable is named
if state:
query+='state_abbr/=/'+state+'/'
#Add in the county qualifier, if the desired_county variable is named
if county:
query+='county_name/'+county+'/'
#Add in the area code qualifier, if the desired_area_code variable is named
if area_code:
query+='zip_code/'+str(area_code)+'/'
#Add in the next table name and year qualifier, if the desired_year variable is named
query += table_name2+'/'
if year:
if type(year) is list:
query+='reporting_year/'+str(year[0])+'/'+str(year[1])+'/'
else:
query+='reporting_year/'+str(year)+'/'
#add the third table
query += table_name3+'/'
count_query = query+'count/'
count_xml = requests.get(count_query).content
nrows= int(BeautifulSoup(count_xml,features="lxml").find('requestrecordcount').contents[0])
#Add in the desired output format to the query
csv_query = query+ output_format
#Return the completed query
bar = Bar('Downloading Records:',max=nrows,\
suffix='%(index)d/%(max)d %(percent).1f%% - %(eta)ds')
bar.check_tty = False
s=requests.get(csv_query).content
dataframe=pd.read_csv(io.StringIO(s.decode('utf-8')), engine='python',
encoding='utf-8', error_bad_lines=False)
bar.next(n = dataframe.shape[0])
nrows_prev = dataframe.shape[0]
while dataframe.shape[0] < nrows:
new_query = query + 'rows/'+str(dataframe.shape[0])+':'\
+str(dataframe.shape[0]+chunk_size)+'/'
csv_query = new_query+ output_format
s=requests.get(csv_query).content
dataframe = dataframe.append(pd.read_csv(io.StringIO(s.decode('utf-8')),
engine='python',encoding='utf-8',
error_bad_lines=False))
bar.next(n=dataframe.shape[0]-nrows_prev)
nrows_prev = dataframe.shape[0]
bar.finish()
# do the replacement:
if 'TRI_TRANSFER_QTY.TYPE_OF_WASTE_MANAGEMENT' in dataframe.columns:
dataframe.replace({'TRI_TRANSFER_QTY.TYPE_OF_WASTE_MANAGEMENT':wm_dict},inplace=True)
return dataframe | 48eef06d1409dfe4404c6548435196cc95baff62 | 4,957 |
import requests
import html
def get_monthly_schedule(year, month):
"""
:param year: a string, e.g. 2018
:param month: a string, e.g. january
:return schedule: a pd.DataFrame containing game info for the month
"""
url = f'https://www.basketball-reference.com/leagues/NBA_{year}_games-{month}.html'
page = requests.get(url)
tree = html.fromstring(page.content)
game_date = tree.xpath('//*[@data-stat="date_game"]/a/text()')
road_team = tree.xpath('//*[@data-stat="visitor_team_name"]/a/text()')
road_pts = tree.xpath('//*[@data-stat="visitor_pts"]/text()')
road_pts.pop(0) # Remove column name
home_team = tree.xpath('//*[@data-stat="home_team_name"]/a/text()')
home_pts = tree.xpath('//*[@data-stat="home_pts"]/text()')
home_pts.pop(0) # Remove column name
box_score_url = tree.xpath('//*[@data-stat="box_score_text"]/a/@href')
schedule = {
'DATE': game_date,
'ROAD_TEAM': road_team,
'ROAD_PTS': road_pts,
'HOME_TEAM': home_team,
'HOME_PTS': home_pts,
'BOX_SCORE_URL': box_score_url,
}
# Create a dictionary with different length columns (Series) that is
# suitable for a DataFrame
schedule = dict([ (k, pd.Series(v)) for k, v in schedule.items() ])
schedule = pd.DataFrame(schedule)
schedule.dropna(how='any', inplace=True)
schedule['ROAD_TM'] = schedule['ROAD_TEAM'].map(team_name_abbrev)
schedule['HOME_TM'] = schedule['HOME_TEAM'].map(team_name_abbrev)
schedule = schedule[['DATE', 'ROAD_TEAM', 'ROAD_TM', 'ROAD_PTS',
'HOME_TEAM', 'HOME_TM', 'HOME_PTS', 'BOX_SCORE_URL']]
BBALLREF = 'https://www.basketball-reference.com'
schedule['BOX_SCORE_URL'] = \
schedule['BOX_SCORE_URL'].apply(lambda x: BBALLREF + x)
def format_date(date):
return arrow.get(date, 'ddd, MMM D, YYYY').datetime.strftime('%Y-%m-%d')
schedule['DATE'] = schedule['DATE'].apply(format_date)
return schedule | 1aa48abaa274166110df8dfd55b49560f72db054 | 4,958 |
import csv
import zlib
def get_gzip_guesses(preview, stream, chunk_size, max_lines):
"""
:type preview: str
:param preview: The initial chunk of content read from the s3
file stream.
:type stream: botocore.response.StreamingBody
:param stream: StreamingBody object of the s3 dataset file.
:type chunk_size: int
:param chunk_size: Maximum size of the chunk in bytes peeking.
:type max_lines: int
:param max_lines: Maximum number of lines to peek into.
"""
COMPRESSION_TYPE = 'GZIP'
guesses = dict()
dialect = csv.Sniffer().sniff(zlib.decompressobj(zlib.MAX_WBITS|16).decompress(preview))
has_header = csv.Sniffer().has_header(zlib.decompressobj(zlib.MAX_WBITS|16).decompress(preview))
d = zlib.decompressobj(zlib.MAX_WBITS|16)
lines_read = 0
first_row = True
data = ''
while True:
if first_row:
chunk = preview
else:
chunk = stream.read(chunk_size)
if not chunk:
break
data += d.decompress(chunk)
if '\n' in data:
guesses, data, lines_read = analyze_data(data, lines_read, max_lines, first_row, guesses, dialect, has_header)
first_row = False
if lines_read >= max_lines:
return guesses, has_header, COMPRESSION_TYPE, dialect
return guesses, has_header, COMPRESSION_TYPE, dialect | aa6185ed31fc4bb5d85e991702925502beff86c0 | 4,959 |
from typing import List
def make_preds_epoch(classifier: nn.Module,
data: List[SentenceEvidence],
batch_size: int,
device: str=None,
criterion: nn.Module=None,
tensorize_model_inputs: bool=True):
"""Predictions for more than one batch.
Args:
classifier: a module that looks like an AttentiveClassifier
data: a list of elements to make predictions over. These must be SentenceEvidence objects.
batch_size: the biggest chunk we can fit in one batch.
device: Optional; what compute device this should run on
criterion: Optional; a loss function
tensorize_model_inputs: should we convert our data to tensors before passing it to the model? Useful if we have a model that performs its own tokenization
"""
epoch_loss = 0
epoch_soft_pred = []
epoch_hard_pred = []
epoch_truth = []
batches = _grouper(data, batch_size)
classifier.eval()
for batch in batches:
loss, soft_preds, hard_preds, targets = make_preds_batch(classifier, batch, device, criterion=criterion, tensorize_model_inputs=tensorize_model_inputs)
if loss is not None:
epoch_loss += loss.sum().item()
epoch_hard_pred.extend(hard_preds)
epoch_soft_pred.extend(soft_preds.cpu())
epoch_truth.extend(targets)
epoch_loss /= len(data)
epoch_hard_pred = [x.item() for x in epoch_hard_pred]
epoch_truth = [x.item() for x in epoch_truth]
return epoch_loss, epoch_soft_pred, epoch_hard_pred, epoch_truth | 67cecfc6648ef4ad10531b086dab2fc9e6e2f6f3 | 4,960 |
def array_3_1(data):
"""
功能:将3维数组转换成1维数组 \n
参数: \n
data:图像数据,3维数组 \n
返回值:图像数据,1维数组 \n
"""
# 受不了了,不判断那么多了
shape = data.shape
width = shape[0]
height = shape[1]
# z = list()
z = np.zeros([width * height, 1])
for i in range(0, width):
for j in range(0, height):
index = i * width + j
z[index][0] = data[i, j, 0]
# z.append(data[i, j, 0])
return z | 0b2991da94102e5ecf47d037f995b95a3fd28ac8 | 4,961 |
import json
def UpdateString(update_intervals):
"""Calculates a short and long message to represent frequency of updates.
Args:
update_intervals: A list of interval numbers (between 0 and 55) that
represent the times an update will occur
Returns:
A two-tuple of the long and short message (respectively) corresponding to
the frequency. This is intended to be sent via AJAX and hence the
tuple is turned into json before being returned.
Raises:
BadInterval in the case that the length of update_intervals is not
a key in the constant RESPONSES
"""
length = len(update_intervals)
if length not in RESPONSES:
raise BadInterval(length)
else:
return json.dumps(RESPONSES[length]) | 35ba60e028c238f304bcf03d745865c93408b9c1 | 4,962 |
from typing import Optional
from re import T
def coalesce(*xs: Optional[T]) -> T:
"""Return the first non-None value from the list; there must be at least one"""
for x in xs:
if x is not None:
return x
assert False, "Expected at least one element to be non-None" | fe388a40ff200f9988514563d0e37d2d604317a7 | 4,963 |
def check_phil(phil, scope=True, definition=True, raise_error=True):
"""
Convenience function for checking if the input is a libtbx.phil.scope
only or a libtbx.phil.definition only or either.
Parameters
----------
phil: object
The object to be tested
scope: bool
Flag to check if phil is a libtbx.phil.scope
definition: bool
Flag to check if phil is a libtbx.phil.definition
raise_error: bool
If true, a RuntimeError is raised if the check(s) fail
Returns
-------
value: bool
"""
value = False
if scope: # check for only libtbx.phil.scope
value = isinstance(phil, libtbx.phil.scope)
if definition: # check for only libtbx.phil.definition
value = isinstance(phil, libtbx.phil.definition)
if scope and definition: # check for either
value = isinstance(phil, libtbx.phil.scope) or isinstance(phil, libtbx.phil.definition)
if (scope and definition) and not value and raise_error:
raise RuntimeError('A libtbx.phil.scope or libtbx.phil.definition is expected.')
elif scope and not value and raise_error:
raise RuntimeError('A libtbx.phil.scope is expected.')
elif definition and not value and raise_error:
raise RuntimeError('A libtbx.phil.definition is expected.')
return value | 11a59bb25689bfc5882b8e0b0b9c2e9a5f233db0 | 4,964 |
import json
def get_ssm_environment() -> dict:
"""Get the value of environment variables stored in the SSM param store under $DSS_DEPLOYMENT_STAGE/environment"""
p = ssm_client.get_parameter(Name=fix_ssm_variable_prefix("environment"))
parms = p["Parameter"]["Value"] # this is a string, so convert to dict
return json.loads(parms) | 2f5a44c7e01f87c0aff092f9fed83f0030d4f7da | 4,965 |
from datetime import datetime
def get_default_date_stamp():
"""
Returns the default date stamp as 'now', as an ISO Format string 'YYYY-MM-DD'
:return:
"""
return datetime.now().strftime('%Y-%m-%d') | 672cd98265b19da2df92c7849f1059e5988473d7 | 4,966 |
import re
def check_for_launchpad(old_vendor, name, urls):
"""Check if the project is hosted on launchpad.
:param name: str, name of the project
:param urls: set, urls to check.
:return: the name of the project on launchpad, or an empty string.
"""
if old_vendor != "pypi":
# XXX This might work for other starting vendors
# XXX but I didn't check. For now only allow
# XXX pypi -> launchpad.
return ''
for url in urls:
try:
return re.match(r"https?://launchpad.net/([\w.\-]+)",
url).groups()[0]
except AttributeError:
continue
return '' | 87fc4be32cd93671b5d9fe43697d9e6918675843 | 4,967 |
import torch
def constructRBFStates(L1, L2, W1, W2, sigma):
"""
Constructs a dictionary dict[tuple] -> torch.tensor that converts
tuples (x,y) representing positions to torch tensors used as input to the
neural network. The tensors have an entry for each valid position on the
race track. For each position (x,y), the tensor is constructed using the gaussian
radial basis function with standard deviation sigma. In other words, if entry i corresponds
to the position p2 = (x2, y2), then the tensor for a point p1 = (x1,y1) will have
tensor[i] = Gaussian_RBF(p1, p2).
@type L1: int
See description in the @RaceCar class.
@type L2: int
See description in the @RaceCar class.
@type W1: int
See description in the @RaceCar class.
@type W2: int
See description in the @RaceCar class.
@type sigma: float
The standard deviation of the gaussian radial basis function.
"""
N_states = (L1+1)*(W1+W2+1)+L2*(W2+1)
x_coords = torch.zeros(N_states, dtype=torch.float32)
y_coords = torch.zeros(N_states, dtype=torch.float32)
state_to_basis = {}
ind = 0
for x in range(L1+L2+1):
for y in range(W1+W2+1):
if (0<=x<=L1 and 0<=y<=W1+W2) or (0<=x<=L1+L2 and W1<=y<=W1+W2):
x_coords[ind] = x
y_coords[ind] = y
ind += 1
for x in range(L1 + L2 + 1):
for y in range(W1 + W2 + 1):
if (0 <= x <= L1 and 0 <= y <= W1 + W2) or (0 <= x <= L1 + L2 and W1 <= y <= W1 + W2):
basis = torch.exp(-((x_coords-x)**2 + (y_coords-y)**2)/(2*sigma**2))
state_to_basis[(x,y)] = basis.view(1, -1).to(device)
return state_to_basis | 575572e40f66c121468d547b45fa92c23f78f99f | 4,969 |
from typing import Union
from typing import Iterator
def tile_grid_intersection(
src0: DatasetReader,
src1: DatasetReader,
blockxsize: Union[None, int] = None,
blockysize: Union[None, int] = None
) -> tuple[Iterator[Window], Iterator[Window], Iterator[Window], Affine, int, int]:
"""Generate tiled windows for the intersection between two grids.
Given two rasters having different dimensions calculate read-window generators for each
and a write-window generator for the intersecion.
Parameters:
src0: rasterio read source
src1: rasterio read source
blockxsize: write-window width
blockysize: write-window height
Returns:
read windows for src0,
read windows for src1,
write windows for the intersection,
write raster Affine,
write raster width in columns
write raster height in rows
"""
bbox0 = window_bounds(((0, 0), src0.shape), src0.transform, offset='ul')
bbox1 = window_bounds(((0, 0), src1.shape), src1.transform, offset='ul')
bounds = intersect_bounds(bbox0, bbox1)
(row_start0, row_stop0), (col_start0, col_stop0) = bounds_window(
bounds, src0.transform
)
(row_start1, row_stop1), (col_start1, col_stop1) = bounds_window(
bounds, src1.transform
)
ncols = col_stop0 - col_start0
nrows = row_stop0 - row_start0
affine = from_bounds(bounds[0], bounds[1], bounds[2], bounds[3], ncols, nrows)
if blockxsize is None:
blockxsize = ncols
if blockysize is None:
blockysize = nrows
windows0 = tile_grid(
ncols,
nrows,
blockxsize,
blockysize,
col_offset=col_start0,
row_offset=row_start0,
)
windows1 = tile_grid(
ncols,
nrows,
blockxsize,
blockysize,
col_offset=col_start1,
row_offset=row_start1,
)
write_windows = tile_grid(ncols, nrows, blockxsize, blockysize)
return (windows0, windows1, write_windows, affine, nrows, ncols) | 847092e1a02ed446d7873658340d578248b1e80c | 4,971 |
def etapes_index_view(request):
"""
GET etapes index
"""
# Check connected
if not check_connected(request):
raise exc.HTTPForbidden()
records = request.dbsession.query(AffaireEtapeIndex).filter(
AffaireEtapeIndex.ordre != None
).order_by(AffaireEtapeIndex.ordre.asc()).all()
return Utils.serialize_many(records) | a79ec31c3849a7e77528d4607859f9bf77899ffb | 4,972 |
from typing import Optional
def brute_force(ciphered_text: str, charset: str = DEFAULT_CHARSET, _database_path: Optional[str] = None) -> int:
""" Get Caesar ciphered text key.
Uses a brute force technique trying the entire key space until finding a text
that can be identified with any of our languages.
**You should not use this function. Use *brute_force_mp* instead.** This
function is slower than *mp* one because is sequential while the other uses a
multiprocessing approach. This function only stay here to allow comparisons
between sequential and multiprocessing approaches.
:param ciphered_text: Text to be deciphered.
:param charset: Charset used for Caesar method substitution. Both ends, ciphering
and deciphering, should use the same charset or original text won't be properly
recovered.
:param _database_path: Absolute pathname to database file. Usually you don't
set this parameter, but it is useful for tests.
:return: Caesar key found.
"""
key_space_length = len(charset)
return simple_brute_force(key_generator=integer_key_generator(key_space_length),
assess_function=_assess_caesar_key,
# key_space_length=key_space_length,
ciphered_text=ciphered_text,
charset=charset,
_database_path=_database_path) | 9b23b4b5068dd36345d6aa43f71bd307f8b24e0c | 4,973 |
def SqZerniketoOPD(x,y,coef,N,xwidth=1.,ywidth=1.):
"""
Return an OPD vector based on a set of square Zernike coefficients
"""
stcoef = np.dot(zern.sqtost[:N,:N],coef)
x = x/xwidth
y = y/ywidth
zm = zern.zmatrix(np.sqrt(x**2+y**2),np.arctan2(y,x),N)
opd = np.dot(zm,stcoef)
return opd | 4243f2c4106d5de0b7f6966cfb3244644beff100 | 4,974 |
import random
def build_voterinfo(campaign, state):
"""Render a tweet of voting info for a state"""
state_info = campaign.info_by_state[state]
num_cities = len(state_info[CITIES])
assert num_cities == len(set(state_info[CITIES])), f"Duplicate entries in CITIES for {state}."
city_ct = num_cities
effective_length = 0
tweet_text = ""
while city_ct > 0:
# Iterate on building a tweet until it fits within the limit.
# Return none if unsuccessful
city_set = set(state_info[CITIES])
try:
# Select up to city_ct cities
cities = []
cities_found = 0
while cities_found < city_ct:
city_idx = random.randint(0, num_cities - 1)
city = state_info[CITIES][city_idx]
if city in city_set:
cities.append(hashtag(city))
city_set.remove(city)
cities_found += 1
effective_length, tweet_text = render_voterinfo(campaign, state, cities)
break
except AssertionError:
tweet_text = ""
city_ct -= 1
return effective_length, tweet_text | a3f6b7aea9b84174ed1e825cacb38966e099c7eb | 4,975 |
def train_model(training_df, stock):
"""
Summary: Trains XGBoost model on stock prices
Inputs: stock_df - Pandas DataFrame containing data about stock price, date, and daily tweet sentiment regarding that stock
stock - String representing stock symbol to be used in training
Return value: Trained XGBoost model
"""
print("Beginning training model for ", stock)
X_train, X_test, y_train, y_test = create_train_data(training_df)
print("Created data")
xgb = XGBRegressor(objective="reg:squarederror", random_state=42)
parameters = {
'n_estimators': [100, 200, 300, 400],
'learning_rate': [0.001, 0.005, 0.01, 0.05],
'max_depth': [8, 10, 12, 15],
'gamma': [0.001, 0.005, 0.01, 0.02],
}
print("Performing Grid Search")
gs = GridSearchCV(xgb, parameters)
gs.fit(X_train, y_train, verbose=2)
print("Grid Search Done")
model = XGBRegressor(**gs.best_params_, objective="reg:squarederror")
model.fit(X_train, y_train)
print("Model fit")
y_pred = model.predict(X_test)
print(stock)
print(f'y_true = {np.array(y_test)[:5]}')
print(f'y_pred = {y_pred[:5]}')
print(f'mean_squared_error = {mean_squared_error(y_test, y_pred)}')
print("----------------")
return model | be28e84c6796bd002217ab56c85958b52fbc199c | 4,976 |
def create_test_node(context, **kw):
"""Create and return a test Node object.
Create a node in the DB and return a Node object with appropriate
attributes.
"""
node = get_test_node(context, **kw)
node.create()
return node | 21ff9931a7c6859bbe924014cb3a06b9890f7a63 | 4,978 |
def get_physical_id(r_properties):
""" Generated resource id """
bucket = r_properties['Bucket']
key = r_properties['Key']
return f's3://{bucket}/{key}' | 2cd467d9b1df72a4573d99f7a5d799f9612239c9 | 4,979 |
def entity_test_models(translation0, locale1):
"""This fixture provides:
- 2 translations of a plural entity
- 1 translation of a non-plural entity
- A subpage that contains the plural entity
"""
entity0 = translation0.entity
locale0 = translation0.locale
project0 = entity0.resource.project
locale0.cldr_plurals = "0,1"
locale0.save()
translation0.plural_form = 0
translation0.save()
resourceX = Resource.objects.create(
project=project0, path="resourceX.po")
entity0.string = "Entity zero"
entity0.key = entity0.string
entity0.string_plural = "Plural %s" % entity0.string
entity0.save()
entityX = Entity.objects.create(
resource=resourceX,
string="entityX",
key='Key%sentityX' % KEY_SEPARATOR)
translation0pl = Translation.objects.create(
entity=entity0,
locale=locale0,
plural_form=1,
string="Plural %s" % translation0.string)
translationX = Translation.objects.create(
entity=entityX,
locale=locale0,
string="Translation %s" % entityX.string)
subpageX = Subpage.objects.create(
project=project0, name="Subpage")
subpageX.resources.add(entity0.resource)
return translation0, translation0pl, translationX, subpageX | 36c6962a69d241e395af1c7ebe16271dcaed975d | 4,980 |
def paris_topology(self, input_path):
"""Generation of the Paris metro network topology
Parameters:
input_path: string, input folder path
Returns:
self.g: nx.Graph(), Waxman graph topology
self.length: np.array, lengths of edges
"""
adj_file = open(input_path + "adj.dat", "r")
lines = adj_file.readlines()
# graph adjacency list
topol = np.zeros([len(lines), 2], dtype=int)
for iedge, line in enumerate(lines):
topol[iedge][:] = [int(w) for w in line.split()[0:2]]
self.g.add_edges_from(topol)
# coordinate of nodes
coord_file = open(input_path + "coord.dat", "r")
lines = coord_file.readlines()
for inode, line in enumerate(lines):
self.g.nodes[inode]["pos"] = tuple([float(w) for w in line.split()[0:2]])
# length of edges
self.length = np.zeros(self.g.number_of_edges())
for i, edge in enumerate(self.g.edges()):
self.length[i] = distance.euclidean(self.g.nodes[edge[0]]["pos"], self.g.nodes[edge[1]]["pos"])
# right hand side construction
forcing_path = input_path + "rhs.dat"
self.forcing = forcing_generation(self, forcing_path)
return self.g, self.length, self.forcing | 9f81e111cfe9adf265b9a3aa58390935b752f242 | 4,981 |
def _rescale(vector):
"""Scale values in vector to the range [0, 1].
Args:
vector: A list of real values.
"""
# Subtract min, making smallest value 0
min_val = min(vector)
vector = [v - min_val for v in vector]
# Divide by max, making largest value 1
max_val = float(max(vector))
try:
return [v / max_val for v in vector]
except ZeroDivisionError: # All values are the same
return [1.0] * len(vector) | 0091deb65c67ef55b2632ac8d5ff8a15b275d12e | 4,982 |
from datetime import datetime
def validate_travel_dates(departure, arrival):
"""It validates arrival and departure dates
:param departure: departure date
:param arrival: arrival date
:returns: error message or Boolean status
"""
date_format = "%Y-%m-%dT%H:%M:%SZ"
status = True
error_message = ""
if datetime.strptime(departure, date_format) < datetime.now():
status = False
error_message = Response(
{"message": "Departure time cannot be in the past"},
status=HTTP_400_BAD_REQUEST,
)
elif datetime.strptime(arrival, date_format) < datetime.now():
status = False
error_message = Response(
{"message": "Arrival time cannot be in the past"},
status=HTTP_400_BAD_REQUEST,
)
elif datetime.strptime(departure, date_format) > datetime.strptime(
arrival, date_format
):
status = False
error_message = Response(
{"message": "Departure time cannot be greater than arrival time"},
status=HTTP_400_BAD_REQUEST,
)
return status, error_message | 41759684517daece729ba845b7afc80c6e6b01ea | 4,983 |
def xmatch_arguments():
""" Obtain information about the xmatch service
"""
return jsonify({'args': args_xmatch}) | 393e74df6900b8c4ed6f0eac82c162a7287a9b6d | 4,984 |
def rosenbrock_func(x):
"""Rosenbrock objective function.
Also known as the Rosenbrock's valley or Rosenbrock's banana
function. Has a global minimum of :code:`np.ones(dimensions)` where
:code:`dimensions` is :code:`x.shape[1]`. The search domain is
:code:`[-inf, inf]`.
Parameters
----------
x : numpy.ndarray
set of inputs of shape :code:`(n_particles, dimensions)`
Returns
-------
numpy.ndarray
computed cost of size :code:`(n_particles, )`
"""
r = np.sum(100*(x.T[1:] - x.T[:-1]**2.0)**2 + (1-x.T[:-1])**2.0, axis=0)
return r | 5d89e22fde50032175b69f36a4c0031bfc07c2bb | 4,985 |
def isHdf5Dataset(obj):
"""Is `obj` an HDF5 Dataset?"""
return isinstance(obj, h5py.Dataset) | b674106e05d5f10585b58d246654987f174d2048 | 4,986 |
import numpy
def writing_height(sample_wrapper, in_air):
"""
Returns writing height.
:param sample_wrapper: sample wrapper object
:type sample_wrapper: HandwritingSampleWrapper
:param in_air: in-air flag
:type in_air: bool
:return: writing height
:rtype: float
"""
# Get the on-surface/in-air sample data
sample = sample_wrapper.on_surface_data \
if not in_air \
else sample_wrapper.in_air_data
# Check the presence of sample data
if not sample:
return numpy.nan
# Return the writing height
return float(numpy.max(sample.y) - numpy.min(sample.y)) | fce6c0abcc65484088278eddd3bb77541725934c | 4,987 |
def simplify_index_permutations(expr, permutation_operators):
"""
Performs simplification by introducing PermutationOperators where appropriate.
Schematically:
[abij] - [abji] - [baij] + [baji] -> P(ab)*P(ij)*[abij]
permutation_operators is a list of PermutationOperators to consider.
If permutation_operators=[P(ab),P(ij)] we will try to introduce the
permutation operators P(ij) and P(ab) in the expression. If there are other
possible simplifications, we ignore them.
>>> from sympy import symbols, Function
>>> from sympy.physics.secondquant import simplify_index_permutations
>>> from sympy.physics.secondquant import PermutationOperator
>>> p,q,r,s = symbols('p,q,r,s')
>>> f = Function('f')
>>> g = Function('g')
>>> expr = f(p)*g(q) - f(q)*g(p); expr
f(p)*g(q) - f(q)*g(p)
>>> simplify_index_permutations(expr,[PermutationOperator(p,q)])
f(p)*g(q)*PermutationOperator(p, q)
>>> PermutList = [PermutationOperator(p,q),PermutationOperator(r,s)]
>>> expr = f(p,r)*g(q,s) - f(q,r)*g(p,s) + f(q,s)*g(p,r) - f(p,s)*g(q,r)
>>> simplify_index_permutations(expr,PermutList)
f(p, r)*g(q, s)*PermutationOperator(p, q)*PermutationOperator(r, s)
"""
def _get_indices(expr, ind):
"""
Collects indices recursively in predictable order.
"""
result = []
for arg in expr.args:
if arg in ind:
result.append(arg)
else:
if arg.args:
result.extend(_get_indices(arg,ind))
return result
def _choose_one_to_keep(a,b,ind):
# we keep the one where indices in ind are in order ind[0] < ind[1]
if _get_indices(a,ind) < _get_indices(b,ind):
return a
else:
return b
expr = expr.expand()
if isinstance(expr,Add):
terms = set(expr.args)
for P in permutation_operators:
new_terms = set([])
on_hold = set([])
while terms:
term = terms.pop()
permuted = P.get_permuted(term)
if permuted in terms | on_hold:
try:
terms.remove(permuted)
except KeyError:
on_hold.remove(permuted)
keep = _choose_one_to_keep(term, permuted, P.args)
new_terms.add(P*keep)
else:
# Some terms must get a second chance because the permuted
# term may already have canonical dummy ordering. Then
# substitute_dummies() does nothing. However, the other
# term, if it exists, will be able to match with us.
permuted1 = permuted
permuted = substitute_dummies(permuted)
if permuted1 == permuted:
on_hold.add(term)
elif permuted in terms | on_hold:
try:
terms.remove(permuted)
except KeyError:
on_hold.remove(permuted)
keep = _choose_one_to_keep(term, permuted, P.args)
new_terms.add(P*keep)
else:
new_terms.add(term)
terms = new_terms | on_hold
return Add(*terms)
return expr | 3a72459c9f9ee9e1f0f030fa96f5a38c0a1985c0 | 4,988 |
from typing import Set
from typing import Any
from typing import Tuple
def get_classification_outcomes(
confusion_matrix: pd.DataFrame,
classes: Set[Any],
class_name: str,
) -> Tuple[int, int, int, int]:
"""
Given a confusion matrix, this function counts the cases of:
- **True Positives** : classifications that accurately labeled a class
- **True Negatives** : classifications that accurately labeled an example as
not belonging to a class.
- **False Positives** : classifications that attributed the wrong label to an
example.
- **False Negatives** : classifications that falsely claimed that an example
does not belong to a class.
Args:
confusion_matrix: The result of calling [generate_confusion_matrix]
[toolbox.algorithms.learning.evaluation.generate_confusion_matrix]
classes: The set of all class labels
class_name: The name (label) of the class being evaluated.
Returns:
- `tp`: Count of True Positives
- `tn`: Count of True Negatives
- `fp`: Count of False Positives
- `fn`: Count of False Negatives
"""
excl_idx = classes.difference(set((class_name,)))
tp = confusion_matrix.loc[class_name, class_name]
tn = confusion_matrix.loc[excl_idx, excl_idx].sum().sum()
fp = confusion_matrix.loc[class_name, excl_idx].sum()
fn = confusion_matrix.loc[excl_idx, class_name].sum()
return (tp, tn, fp, fn) | c8d84aa5d84e9405539fa40cd05101ac84eda871 | 4,989 |
def points_in_convex_polygon_3d_jit(points,
polygon_surfaces,
):
"""check points is in 3d convex polygons.
Args:
points: [num_points, 3] array.
polygon_surfaces: [num_polygon, max_num_surfaces,
max_num_points_of_surface, 3]
array. all surfaces' normal vector must direct to internal.
max_num_points_of_surface must at least 3.
num_surfaces: [num_polygon] array. indicate how many surfaces
a polygon contain
Returns:
[num_points, num_polygon] bool array.
"""
max_num_surfaces, max_num_points_of_surface = polygon_surfaces.shape[1:3]
num_points = points.shape[0]
num_polygons = polygon_surfaces.shape[0]
num_surfaces = np.full((num_polygons,), 9999999, dtype=np.int64)
normal_vec, d = surface_equ_3d_jit(polygon_surfaces[:, :, :3, :])
# normal_vec: [num_polygon, max_num_surfaces, 3]
# d: [num_polygon, max_num_surfaces]
ret = np.ones((num_points, num_polygons), dtype=np.bool_)
sign = 0.0
for i in range(num_points):
for j in range(num_polygons):
for k in range(max_num_surfaces):
if k > num_surfaces[j]:
break
sign = points[i, 0] * normal_vec[j, k, 0] \
+ points[i, 1] * normal_vec[j, k, 1] \
+ points[i, 2] * normal_vec[j, k, 2] + d[j, k]
if sign >= 0:
ret[i, j] = False
break
return ret | b3834ec647fcb6b156f57a36e11dd5dd22bec1d9 | 4,990 |
def _get_spamassassin_flag_path(domain_or_user):
"""
Get the full path of the file who's existence is used as a flag to turn
SpamAssassin on.
Args:
domain_or_user - A full email address or a domain name
"""
domain = domain_or_user.lower()
user = False
if '@' in domain:
user, domain = domain.split('@')
sys_user = get_account_from_domain(domain)
if user:
return '/home/' + sys_user + '/etc/' + domain + '/' + user + '/enable_spamassassin'
else:
return '/home/' + sys_user + '/etc/' + domain + '/enable_spamassassin' | e29055f2cbe81dd7ad2083f5bfdc46d02b354dba | 4,991 |
def format(number):
"""Reformat the passed number to the standard format."""
number = compact(number)
return '-'.join((number[:3], number[3:-1], number[-1])) | 90ad8360ef773a9386a122d3f44870a6b371d370 | 4,992 |
def get_terms(request):
"""Returns list of terms matching given query"""
if TEST_MODE:
thesaurus_name = request.params.get('thesaurus_name')
extract_name = request.params.get('extract_name')
query = request.params.get('term')
else:
thesaurus_name = request.validated.get('thesaurus_name')
extract_name = request.validated.get('extract_name')
query = request.validated.get('term')
if not (thesaurus_name or query):
return {}
thesaurus = query_utility(IThesaurus, name=thesaurus_name)
if thesaurus is None:
return {}
try:
return {
'results': [
{
'id': term.label,
'text': term.label
}
for term in unique(thesaurus.find_terms(query, extract_name,
exact=True, stemmed=True))
if term.status != STATUS_ARCHIVED
]
}
except ParseError:
return [] | b6e6810a1858de9da609b2e42b39a933ee9fbb04 | 4,993 |
from .. import sim
import __main__ as top
def createExportNeuroML2(netParams=None, simConfig=None, output=False, reference=None, connections=True, stimulations=True, format='xml'):
"""
Wrapper function create and export a NeuroML2 simulation
Parameters
----------
netParams : ``netParams object``
NetPyNE netParams object specifying network parameters.
**Default:** *required*.
simConfig : ``simConfig object``
NetPyNE simConfig object specifying simulation configuration.
**Default:** *required*.
output : bool
Whether or not to return output from the simulation.
**Default:** ``False`` does not return anything.
**Options:** ``True`` returns output.
reference : <``None``?>
<Short description of reference>
**Default:** ``None``
**Options:** ``<option>`` <description of option>
connections : bool
<Short description of connections>
**Default:** ``True``
**Options:** ``<option>`` <description of option>
stimulations : bool
<Short description of stimulations>
**Default:** ``True``
**Options:** ``<option>`` <description of option>
format : str
<Short description of format>
**Default:** ``'xml'``
**Options:** ``<option>`` <description of option>
Returns
-------
data : tuple
If ``output`` is ``True``, returns (pops, cells, conns, stims, rxd, simData)
"""
if not netParams: netParams = top.netParams
if not simConfig: simConfig = top.simConfig
sim.initialize(netParams, simConfig) # create network object and set cfg and net params
pops = sim.net.createPops() # instantiate network populations
cells = sim.net.createCells() # instantiate network cells based on defined populations
conns = sim.net.connectCells() # create connections between cells based on params
stims = sim.net.addStims() # add external stimulation to cells (IClamps etc)
rxd = sim.net.addRxD() # add reaction-diffusion (RxD)
simData = sim.setupRecording() # setup variables to record for each cell (spikes, V traces, etc)
sim.exportNeuroML2(reference, connections, stimulations,format) # export cells and connectivity to NeuroML 2 format
if output:
return (pops, cells, conns, stims, rxd, simData) | 84b3fb607ab30b17222143c46d839bab087c4916 | 4,994 |
def better_get_first_model_each_manufacturer(car_db):
"""Uses map function and lambda to avoid code with side effects."""
result = map(lambda x: x[0], car_db.values())
# convert map to list
return list(result) | 8969c23bfe4df2b1c164dca6c4f929a62de5ba2a | 4,996 |
from collections import Iterable
def _is_scalar(value):
"""Whether to treat a value as a scalar.
Any non-iterable, string, or 0-D array
"""
return (getattr(value, 'ndim', None) == 0
or isinstance(value, (str, bytes))
or not isinstance(value, (Iterable,))) | 725aa4a6002146ecb3dca3a17faa829e213cb3f7 | 4,998 |
def copy_random(x, y):
""" from 2 randInt calls """
seed = find_seed(x, y)
rand = JavaRandom(seed)
rand.next() # this will be y so we discard it
return rand | f1a1019ed7f012d83edca77ba3c7ccd2a806ee01 | 4,999 |
def build_tree(train, max_depth, min_size, n_features):
"""build_tree(创建一个决策树)
Args:
train 训练数据集
max_depth 决策树深度不能太深,不然容易导致过拟合
min_size 叶子节点的大小
n_features 选取的特征的个数
Returns:
root 返回决策树
"""
# 返回最优列和相关的信息
root = get_split(train, n_features)
# 对左右2边的数据 进行递归的调用,由于最优特征使用过,所以在后面进行使用的时候,就没有意义了
# 例如: 性别-男女,对男使用这一特征就没任何意义了
split(root, max_depth, min_size, n_features, 1)
return root | 5afd343436f14d9ab704636eb480d92a31d59f04 | 5,000 |
import pprint
def delete_bucket(bucket_name: str, location: str, verbose: bool) -> bool:
"""Delete the specified S3 bucket
Args:
bucket_name (str): name of the S3 bucket
location (str): the location (region) the S3 bucket resides in
verbose (bool): enable verbose output
Returns:
bool: True if the specified S3 bucket was successfully deleted,
False otherwise
"""
try:
print(f'Deleting S3 bucket {bucket_name} in location {location} ...')
start = timer()
s3_client = boto3.client('s3', region_name=location)
response = s3_client.delete_bucket(Bucket=bucket_name)
end = timer()
elapsed_time = round(end - start, 3)
print(f'Deleted bucket in {elapsed_time} seconds')
if verbose:
print('delete_bucket() response:')
pprint.pprint(response)
print()
if response['ResponseMetadata']['HTTPStatusCode'] == 204:
print(f'S3 bucket {bucket_name} successfully deleted')
return True
except ClientError as e:
print(f'S3 ClientError occurred while trying to delete bucket:')
print(f"\t{e.response['Error']['Code']}: {e.response['Error']['Message']}")
return False | 79c225c9f8caa0d8c3431709d3f08ccaefe3fc1c | 5,001 |
import re
def generate_ordered_match_str_from_subseqs(r1,
subseqs_to_track,
rc_component_dict,
allow_overlaps=False):
"""Generates an ordered subsequences match string for the input sequence.
Args:
r1: (str) R1 sequence to scan for subsequence matches.
subseqs_to_track: (list) Subsequences to look for in R1.
rc_component_dict: (dict) Dict mapping DNA sequence to label.
allow_overlaps: (boolean) Whether to allow matches that overlap on R1. If
False, then it will identify a maximal non-overlapping set of matches.
Returns:
(str) labeled components for r1 in the form: 'label_1;label_2;...;label_n'
"""
# Generate ordered set of subseq matches to r1 sequence.
match_tups = []
for mer_label in subseqs_to_track:
mer = rc_component_dict[mer_label]
for match in re.finditer(mer, r1):
xstart = match.start()
xend = xstart + len(mer)
match_tups.append((xstart, xend, mer_label))
match_tups.sort(reverse=True)
# Create a maximal independent set that does not allow overlapping subseqs.
if not allow_overlaps and len(match_tups) > 0:
mer_graph = nx.Graph()
mer_graph.add_nodes_from(match_tups)
for i in range(len(match_tups)):
for j in range(i + 1, len(match_tups)):
# Check if the end of match_tups[j] overlaps the start of match_tups[i].
if match_tups[i][0] < match_tups[j][1]:
mer_graph.add_edge(match_tups[i], match_tups[j])
# Generate a non-overlapping list of subseqs.
match_tups = nx.maximal_independent_set(mer_graph)
match_tups.sort(reverse=True)
match_str = BCS_SEP.join([match_tup[-1] for match_tup in match_tups])
return match_str | 202f228b40b73518342b1cc2419ca466626fc166 | 5,002 |
def combination(n: int, r: int) -> int:
""":return nCr = nPr / r!"""
return permutation(n, r) // factorial(r) | 6cf58428cacd0e09cc1095fb120208aaeee7cb7c | 5,003 |
def _extract_operator_data(fwd, inv_prep, labels, method='dSPM'):
"""Function for extracting forward and inverse operator matrices from
the MNE-Python forward and inverse data structures, and assembling the
source identity map.
Input arguments:
================
fwd : ForwardOperator
The fixed_orientation forward operator.
Instance of the MNE-Python class Forward.
inv_prep : Inverse
The prepared inverse operator.
Instance of the MNE-Python class InverseOperator.
labels : list
List of labels belonging to the used parcellation, e.g. the
Desikan-Killiany, Destrieux, or Schaefer parcellation.
May not contain 'trash' labels/parcels (unknown or medial wall), those
should be deleted from the labels array!
method : str
The inversion method. Default 'dSPM'.
Other methods ('MNE', 'sLORETA', 'eLORETA') have not been tested.
Output arguments:
=================
source_identities : ndarray
Vector mapping sources to parcels or labels.
fwd_mat : ndarray [sensors x sources]
The forward operator matrix.
inv_mat : ndarray [sources x sensors]
The prepared inverse operator matrix.
"""
# counterpart to forwardOperator, [sources x sensors]. ### pick_ori None for free, 'normal' for fixed orientation.
K, noise_norm, vertno, source_nn = _assemble_kernel(
inv=inv_prep, label=None, method=method, pick_ori='normal')
# get source space
src = inv_prep.get('src')
vert_lh, vert_rh = src[0].get('vertno'), src[1].get('vertno')
# get labels, vertices and src-identities
src_ident_lh = np.full(len(vert_lh), -1, dtype='int')
src_ident_rh = np.full(len(vert_rh), -1, dtype='int')
# find sources that belong to the left hemisphere labels
n_labels = len(labels)
for la, label in enumerate(labels[:n_labels//2]):
for v in label.vertices:
src_ident_lh[np.where(vert_lh == v)] = la
# find sources that belong to the right hemisphere labels. Add by n left.
for la, label in enumerate(labels[n_labels//2:n_labels]):
for v in label.vertices:
src_ident_rh[np.where(vert_rh == v)] = la
src_ident_rh[np.where(src_ident_rh<0)] = src_ident_rh[np.where(
src_ident_rh<0)] -n_labels/2
src_ident_rh = src_ident_rh + (n_labels // 2)
source_identities = np.concatenate((src_ident_lh,src_ident_rh))
# extract fwd and inv matrices
fwd_mat = fwd['sol']['data'] # sensors x sources
"""If there are bad channels the corresponding rows can be missing
from the forward matrix. Not sure if the same can occur for the
inverse. This is not a problem if bad channels are interpolated.""" ### MOVED from weight_inverse_operator, just before """Compute the weighted operator."""
ind = np.asarray([i for i, ch in enumerate(fwd['info']['ch_names'])
if ch not in fwd['info']['bads']])
fwd_mat = fwd_mat[ind, :]
# noise_norm is used with dSPM and sLORETA. Other methods return null.
if method != 'dSPM' or method != 'sLORETA':
noise_norm = 1.
inv_mat = K * noise_norm # sources x sensors
return source_identities, fwd_mat, inv_mat | 6daded6f6df4abbd3dea105927ca39e02e64b970 | 5,004 |
from prompt_toolkit.interface import CommandLineInterface
from .containers import Window
from .controls import BufferControl
def find_window_for_buffer_name(cli, buffer_name):
"""
Look for a :class:`~prompt_toolkit.layout.containers.Window` in the Layout
that contains the :class:`~prompt_toolkit.layout.controls.BufferControl`
for the given buffer and return it. If no such Window is found, return None.
"""
assert isinstance(cli, CommandLineInterface)
for l in cli.layout.walk(cli):
if isinstance(l, Window) and isinstance(l.content, BufferControl):
if l.content.buffer_name == buffer_name:
return l | 7912cc96365744c3a4daa44a72f272b083121e3c | 5,006 |
Subsets and Splits