content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def create_user():
""" Method that will create an user .
Returns:
user.id: The id of the created user
Raises:
If an error occurs it will be displayed in a error message.
"""
try:
new_user = User(name=login_session['username'], email=login_session[
'email'], picture=login_session['picture'])
db_session.add(new_user)
db_session.commit()
user = db_session.query(User).filter_by(email=login_session['email']).one()
return user.id
except Exception as e:
flash('An error has occurred: {}'.format(str(e)), 'error')
return None | e5d555fb955523c6bff811efe308030de257e05a | 3,650,328 |
import numpy
def buildStartAndEndWigData(thisbam, LOG_EVERY_N=1000, logger=None):
"""parses a bam file for 3' and 5' ends and builds these into wig-track data
Returns a dictionary of various gathered statistics."""
def formatToWig(wigdata):
""" take in the read position data and output in wigTrack format"""
this_wigTracks = {}
for key in wigdata.keys():
track = wigTrack()
track.wigtype = "fixedStep"
track.chr = key
track.start = 1
track.step = 1
track.position = numpy.arange(len(wigdata[key]))+track.start
this_wigTracks[key] = track
this_wigTracks[key].data = wigdata[key]
this_wigData = wigData()
this_wigData.tracks = this_wigTracks
return(this_wigData)
if type(thisbam) is str:
thisbam = pysam.AlignmentFile(thisbam, "rb")
all_wigdata={
"fwd":{
"five_prime":{},
"three_prime":{}
},
"rev":{
"five_prime":{},
"three_prime":{}
}
}
chromSizes = dict(zip(thisbam.references, thisbam.lengths))
for key in chromSizes.keys():
for strand in all_wigdata.keys():
for end in all_wigdata[strand].keys():
all_wigdata[strand][end][key] = numpy.zeros(chromSizes[key])
counter=0
nlogs=0
for read in thisbam.fetch():
if read.is_reverse:
all_wigdata["rev"]["five_prime"][read.reference_name][read.reference_end-1]+=1
all_wigdata["rev"]["three_prime"][read.reference_name][read.reference_start]+=1
else:
all_wigdata["fwd"]["five_prime"][read.reference_name][read.reference_start]+=1
all_wigdata["fwd"]["three_prime"][read.reference_name][read.reference_end-1]+=1
counter+=1
if (counter % LOG_EVERY_N)==0:
msg = "processed {these} reads...".format(these=(nlogs*LOG_EVERY_N))
if logger is not None:
logger.info(msg)
else:
print(msg)
nlogs+=1
msg = "Processed {counted} reads...".format(counted=counter)
if logger is not None:
logger.info(msg)
else:
print(msg)
msg = "Formatting wig tracks..."
if logger is not None:
logger.info(msg)
else:
print(msg)
for strand in all_wigdata.keys():
for end in all_wigdata[strand].keys():
all_wigdata[strand][end] = formatToWig(all_wigdata[strand][end])
return(all_wigdata, chromSizes) | f97a2b2c54f1cf2f978a17ef2b74435153ec4369 | 3,650,329 |
from pathlib import Path
from typing import List
from typing import Optional
def time_series_h5(timefile: Path, colnames: List[str]) -> Optional[DataFrame]:
"""Read temporal series HDF5 file.
If :data:`colnames` is too long, it will be truncated. If it is too short,
additional column names will be deduced from the content of the file.
Args:
timefile: path of the TimeSeries.h5 file.
colnames: names of the variables expected in :data:`timefile` (may be
modified).
Returns:
A :class:`pandas.DataFrame` containing the time series, organized by
variables in columns and the time steps in rows.
"""
if not timefile.is_file():
return None
with h5py.File(timefile, 'r') as h5f:
dset = h5f['tseries']
_, ncols = dset.shape
ncols -= 1 # first is istep
h5names = h5f['names'].asstr()[len(colnames) + 1:]
_tidy_names(colnames, ncols, h5names)
data = dset[()]
pdf = pd.DataFrame(data[:, 1:],
index=np.int_(data[:, 0]), columns=colnames)
# remove duplicated lines in case of restart
return pdf.loc[~pdf.index.duplicated(keep='last')] | e28194bcfead5b188ea947efe51fc2bac052bea9 | 3,650,330 |
def decode_jwt(token):
"""decodes a token and returns ID associated (subject) if valid"""
try:
payload = jwt.decode(token.encode(), current_app.config['SECRET_KEY'], algorithms=['HS256'])
return {"isError": False, "payload": payload["sub"]}
except jwt.ExpiredSignatureError as e:
current_app.logger.error("Token expired.")
raise ExpiredTokenError()
except jwt.InvalidTokenError as e:
current_app.logger.error("Invalid token.")
raise InvalidTokenError() | 15938fc40d2fb5b60c4ef5ccb3d6f3211fa5952f | 3,650,331 |
def format_point(point: Point) -> str:
"""Return a str representing a Point object.
Args:
point:
Point obj to represent.
Returns:
A string representing the Point with ° for grades, ' for minutes and " for seconds.
Latitude is written before Longitude.
Example Output: 30°21'12", 10°21'22"
"""
lat = to_sexagesimal(point.latitude)
long = to_sexagesimal(point.longitude)
return f'[{lat.deg}°{lat.min}\'{lat.sec}\", {long.deg}°{long.min}\'{long.sec}\"]' | 435a13d7198e6da99306c58d35249b666a03571c | 3,650,332 |
def families_horizontal_correctors():
"""."""
return ['CH'] | a3f8de3e0d44ea72d2fb98733050b7a2d598c142 | 3,650,333 |
import requests
def variable_select_source_data_proxy(request):
"""
@summary: 获取下拉框源数据的通用接口
@param request:
@return:
"""
url = request.GET.get('url')
try:
response = requests.get(
url=url,
verify=False
)
except Exception as e:
logger.exception('variable select get data from url[url={url}] raise error: {error}'.format(url=url, error=e))
text = _('请求数据异常: {error}').format(error=e)
data = [{'text': text, 'value': ''}]
return JsonResponse(data, safe=False)
try:
data = response.json()
except Exception:
try:
content = response.content.decode(response.encoding)
logger.exception('variable select get data from url[url={url}] is not a valid JSON: {data}'.format(
url=url, data=content[:500])
)
except Exception:
logger.exception('variable select get data from url[url={url}] data is not a valid JSON'.format(url=url))
text = _('返回数据格式错误,不是合法 JSON 格式')
data = [{'text': text, 'value': ''}]
return JsonResponse(data, safe=False) | c8d131d6c7d0e766e0a4dacd1b0086090ee02c4f | 3,650,334 |
async def select_guild_lfg_events(guild_id: int) -> list[asyncpg.Record]:
"""Gets the lfg messages for a specific guild ordered by the youngest creation date"""
select_sql = f"""
SELECT
id, message_id, creation_time, voice_channel_id
FROM
lfgmessages
WHERE
guild_id = $1
ORDER BY
creation_time ASC;"""
async with (await get_connection_pool()).acquire(timeout=timeout) as connection:
return await connection.fetch(select_sql, guild_id) | 3a1b98191b75b4ec0bbdb5942a7b5b2d8c8dca48 | 3,650,335 |
def ValueToString(descriptor, field_desc, value):
"""Renders a field value as a PHP literal.
Args:
descriptor: The descriptor module from the protobuf package, e.g.
google.protobuf.descriptor.
field_desc: The type descriptor for the field value to be rendered.
value: The value of the field to be rendered.
Returns:
A PHP literal for the provided value.
"""
if field_desc.label == descriptor.FieldDescriptor.LABEL_REPEATED:
if value:
return '[%s]' % ', '.join([NonRepeatedValueToString(descriptor,
field_desc, s)
for s in value])
return '[]'
return NonRepeatedValueToString(descriptor, field_desc, value) | e40815ab6e3b55e1a7cb026c33a9c9324da900b4 | 3,650,336 |
def __load_txt_resource__(path):
"""
Loads a txt file template
:param path:
:return:
"""
txt_file = open(path, "r")
return txt_file | 9e3632098c297d1f6407559a86f0d8dc7b68ea75 | 3,650,339 |
def parse_cpu_spec(spec):
"""Parse a CPU set specification.
:param spec: cpu set string eg "1-4,^3,6"
Each element in the list is either a single
CPU number, a range of CPU numbers, or a
caret followed by a CPU number to be excluded
from a previous range.
:returns: a set of CPU indexes
"""
cpuset_ids = set()
cpuset_reject_ids = set()
for rule in spec.split(','):
rule = rule.strip()
# Handle multi ','
if len(rule) < 1:
continue
# Note the count limit in the .split() call
range_parts = rule.split('-', 1)
if len(range_parts) > 1:
# So, this was a range; start by converting the parts to ints
try:
start, end = [int(p.strip()) for p in range_parts]
except ValueError:
raise exception.Invalid(_("Invalid range expression %r")
% rule)
# Make sure it's a valid range
if start > end:
raise exception.Invalid(_("Invalid range expression %r")
% rule)
# Add available CPU ids to set
cpuset_ids |= set(range(start, end + 1))
elif rule[0] == '^':
# Not a range, the rule is an exclusion rule; convert to int
try:
cpuset_reject_ids.add(int(rule[1:].strip()))
except ValueError:
raise exception.Invalid(_("Invalid exclusion "
"expression %r") % rule)
else:
# OK, a single CPU to include; convert to int
try:
cpuset_ids.add(int(rule))
except ValueError:
raise exception.Invalid(_("Invalid inclusion "
"expression %r") % rule)
# Use sets to handle the exclusion rules for us
cpuset_ids -= cpuset_reject_ids
return cpuset_ids | fa323a2fc5c27a6645f0abfb3b4878f6ae6390ee | 3,650,340 |
import typing
def distance_fit_from_transits() -> typing.List[float]:
"""
This uses the observers position from full transits and then the runway positions from all
the transit lines fitted to a
"""
((x_mean, x_std), (y_mean, y_std)) = observer_position_mean_std_from_full_transits()
transits = transit_x_axis_distances(x_mean, y_mean)
times = [v.time for v in transits]
dists = [v.distance for v in transits]
popt, pcov = curve_fit(
video_utils.polynomial_3,
times,
dists,
)
return popt | 57b201b1328528191b4926a66325ca026855f09a | 3,650,341 |
import torch
def collate_fn_synthesize(batch):
"""
Create batch
Args : batch(tuple) : List of tuples / (x, c) x : list of (T,) c : list of (T, D)
Returns : Tuple of batch / Network inputs x (B, C, T), Network targets (B, T, 1)
"""
local_conditioning = len(batch[0]) >= 2
if local_conditioning:
new_batch = []
for idx in range(len(batch)):
x, c = batch[idx]
if upsample_conditional_features:
assert len(x) % len(c) == 0 and len(x) // len(c) == hop_length
new_batch.append((x, c))
batch = new_batch
else:
pass
input_lengths = [len(x[0]) for x in batch]
max_input_len = max(input_lengths)
# x_batch : [B, T, 1]
x_batch = np.array([_pad_2d(x[0].reshape(-1, 1), max_input_len) for x in batch], dtype=np.float32)
assert len(x_batch.shape) == 3
y_batch = np.array([_pad(x[0], max_input_len) for x in batch], dtype=np.float32)
assert len(y_batch.shape) == 2
if local_conditioning:
max_len = max([len(x[1]) for x in batch])
c_batch = np.array([_pad_2d(x[1], max_len) for x in batch], dtype=np.float32)
assert len(c_batch.shape) == 3
# (B x C x T')
c_batch = torch.tensor(c_batch).transpose(1, 2).contiguous()
else:
c_batch = None
# Convert to channel first i.e., (B, C, T) / C = 1
x_batch = torch.tensor(x_batch).transpose(1, 2).contiguous()
# Add extra axis i.e., (B, T, 1)
y_batch = torch.tensor(y_batch).unsqueeze(-1).contiguous()
input_lengths = torch.tensor(input_lengths)
return x_batch, y_batch, c_batch, input_lengths | b784a45eb753d5d84ae3bf18fd4e7a09e891753d | 3,650,342 |
def max_(context, mapping, args, **kwargs):
"""Return the max of an iterable"""
if len(args) != 1:
# i18n: "max" is a keyword
raise error.ParseError(_("max expects one argument"))
iterable = evalwrapped(context, mapping, args[0])
try:
return iterable.getmax(context, mapping)
except error.ParseError as err:
# i18n: "max" is a keyword
hint = _("max first argument should be an iterable")
raise error.ParseError(bytes(err), hint=hint) | 068f77031fb83dc9d88446863e39f38c14a7478d | 3,650,343 |
from typing import Optional
from typing import Dict
def dict_to_duration(time_dict: Optional[Dict[str, int]]) -> Duration:
"""Convert a QoS duration profile from YAML into an rclpy Duration."""
if time_dict:
try:
return Duration(seconds=time_dict['sec'], nanoseconds=time_dict['nsec'])
except KeyError:
raise ValueError(
'Time overrides must include both seconds (sec) and nanoseconds (nsec).')
else:
return Duration() | 7b20ed1ecbe496f55426562e791e591d8c5104e5 | 3,650,344 |
def gen_ex_tracking_df(subj_dir):
"""Generate subject tracking error data frames from time series CSVs.
This method generates tracking error (Jaccard distance, CSA, T, AR) data
frames from raw time series CSV data for a single subject.
Args:
subj_dir (str): path to subject data directory, including final '/'
Returns:
pandas.DataFrame mean errors (Jaccard distance, CSA, T, AR)
pandas.DataFrame standard deviation errors (Jaccard distance, CSA, T, AR)
"""
df_iou = gen_jd_vals(subj_dir)
df_csa = gen_def_err_vals(subj_dir, 'CSA')
df_t = gen_def_err_vals(subj_dir, 'T')
df_tr = gen_def_err_vals(subj_dir, 'AR')
df_iou_mean = df_iou.mean().to_frame()
df_csa_mean = df_csa.mean().to_frame()
df_t_mean = df_t.mean().to_frame()
df_tr_mean = df_tr.mean().to_frame()
df_means = df_iou_mean.copy()
df_means.rename(columns={0: 'Jaccard Distance'}, inplace=True)
df_means['CSA'] = df_csa_mean[0]
df_means['T'] = df_t_mean[0]
df_means['AR'] = df_tr_mean[0]
df_iou_std = df_iou.std().to_frame()
df_csa_std = df_csa.std().to_frame()
df_t_std = df_t.std().to_frame()
df_tr_std = df_tr.std().to_frame()
df_stds = df_iou_std.copy()
df_stds.rename(columns={0: 'Jaccard Distance'}, inplace=True)
df_stds['CSA'] = df_csa_std[0]
df_stds['T'] = df_t_std[0]
df_stds['AR'] = df_tr_std[0]
return df_means, df_stds | 259f2533bf8a0d9a03c250fc937c4a99903c8994 | 3,650,345 |
def mse(y_true: np.ndarray, y_pred: np.ndarray) -> float:
"""Compute the MSE (Mean Squared Error)."""
return sklearn.metrics.mean_squared_error(y_true, y_pred) | f9c669b04bc6a44bcd983c79dec5d630a6acbd09 | 3,650,346 |
import torch
def policy_improvement(env, V, gamma):
"""
Obtain an improved policy based on the values
@param env: OpenAI Gym environment
@param V: policy values
@param gamma: discount factor
@return: the policy
"""
n_state = env.observation_space.n
n_action = env.action_space.n
policy = torch.zeros(n_state)
for state in range(n_state):
v_actions = torch.zeros(n_action)
for action in range(n_action):
for trans_prob, new_state, reward, _ in env.env.P[state][action]:
v_actions[action] += trans_prob * (reward + gamma * V[new_state])
policy[state] = torch.argmax(v_actions)
return policy | 10587e5d4fb08158eff06a4305de6c02fc2d878c | 3,650,347 |
def loudness_zwst_freq(spectrum, freqs, field_type="free"):
"""Zwicker-loudness calculation for stationary signals
Calculates the acoustic loudness according to Zwicker method for
stationary signals.
Normatice reference:
ISO 532:1975 (method B)
DIN 45631:1991
ISO 532-1:2017 (method 1)
The code is based on BASIC program published in "Program for
calculating loudness according to DIN 45631 (ISO 532B)", E.Zwicker
and H.Fastl, J.A.S.J (E) 12, 1 (1991).
Note that due to normative continuity, as defined in the
preceeding standards, the method is in accordance with
ISO 226:1987 equal loudness contours (instead of ISO 226:2003)
Parameters
----------
spectrum : numpy.array
A RMS frequency spectrum, size (Nfreq, Ntime)
freqs : list
List of the corresponding frequencies, size (Nfreq,) or (Nfreq, Ntime)
field_type : str
Type of soundfield corresponding to spec_third ("free" by
default or "diffuse")
Outputs
-------
N : float or numpy.array
Calculated loudness [sones], size (Ntime,).
N_specific : numpy.ndarray
Specific loudness [sones/bark], size (Nbark, Ntime).
bark_axis : numpy.array
Frequency axis in bark, size (Nbark,).
"""
if len(spectrum) != len(freqs):
raise ValueError('Input spectrum and frequency axis must have the same shape')
# Compute third octave band spectrum
spec_third, _ = noct_synthesis(spectrum, freqs, fmin=24, fmax=12600)
# Compute dB values
spec_third = amp2db(spec_third, ref=2e-5)
# Compute main loudness
Nm = _main_loudness(spec_third, field_type)
#
# Computation of specific loudness pattern and integration of overall
# loudness by attaching slopes towards higher frequencies
N, N_specific = _calc_slopes(Nm)
# Define Bark axis
bark_axis = np.linspace(0.1, 24, int(24 / 0.1))
return N, N_specific, bark_axis | 391e33784e355675b68b3e95ade084cbcb86d5b5 | 3,650,348 |
def normU(u):
"""
A function to scale Uranium map. We don't know what this function should be
"""
return u | e4bd83a26c502e9129d18091e807e17ab3294fd1 | 3,650,350 |
def exact_riemann_solution(q_l, q_r, gamma=1.4, phase_plane_curves=False):
"""Return the exact solution to the Riemann problem with initial states
q_l, q_r. The solution is given in terms of a list of states, a list of
speeds (each of which may be a pair in case of a rarefaction fan), and a
function reval(xi) that gives the solution at a point xi=x/t.
The input and output vectors are the conserved quantities.
If phase_plane_curves==True, then the appropriate Hugoniot Locus and/or
integral curve is returned for the 1- and 3-waves.
"""
rho_l, u_l, p_l = conservative_to_primitive(*q_l)
rho_r, u_r, p_r = conservative_to_primitive(*q_r)
# Compute left and right state sound speeds
c_l = sound_speed(rho_l, p_l, gamma)
c_r = sound_speed(rho_r, p_r, gamma)
ws = np.zeros(5)
wave_types = ['', 'contact', '']
if rho_l == 0:
# 3-rarefaction connecting right state to vacuum
p = 0.
rho_l_star = 0.
rho_r_star = 0.
u_vacuum_r = integral_curve_3(0., rho_r, u_r, p_r, gamma)
u = u_vacuum_r
ws[0] = 0.
ws[1] = 0.
ws[2] = 0.
ws[3] = u_vacuum_r
ws[4] = u_r + c_r
wave_types = ['contact', 'contact', 'raref']
elif rho_r == 0:
# 1-rarefaction connecting left state to vacuum
p = 0
rho_l_star = 0.
rho_r_star = 0.
u_vacuum_l = integral_curve_1(0., rho_l, u_l, p_l, gamma)
u = u_vacuum_l
ws[0] = u_l - c_l
ws[1] = u_vacuum_l
ws[2] = 0.
ws[3] = 0.
ws[4] = 0.
wave_types = ['raref', 'contact', 'contact']
elif u_l - u_r + 2*(c_l+c_r)/(gamma-1.) < 0:
# Middle states are vacuum
p = 0.
rho_l_star = 0.
rho_r_star = 0.
u_vacuum_l = integral_curve_1(0., rho_l, u_l, p_l, gamma)
u_vacuum_r = integral_curve_3(0., rho_r, u_r, p_r, gamma)
u = 0.5*(u_vacuum_l + u_vacuum_r)
ws[0] = u_l - c_l
ws[1] = u_vacuum_l
ws[2] = u
ws[3] = u_vacuum_r
ws[4] = u_r + c_r
wave_types = ['raref', 'contact', 'raref']
else:
# Check whether the 1-wave is a shock or rarefaction
def phi_l(p):
if p >= p_l: return hugoniot_locus_1(p, rho_l, u_l, p_l, gamma)
else: return integral_curve_1(p, rho_l, u_l, p_l, gamma)
# Check whether the 1-wave is a shock or rarefaction
def phi_r(p):
if p >= p_r: return hugoniot_locus_3(p, rho_r, u_r, p_r, gamma)
else: return integral_curve_3(p, rho_r, u_r, p_r, gamma)
phi = lambda p: phi_l(p)-phi_r(p)
exp = (1.-gamma)/(2.*gamma)
guess = ((c_l + c_r - (gamma-1.)*(u_r-u_l)/2.)/(c_l*p_l**exp+c_r*p_r**exp))**(-1./exp)
# Compute middle state p, u by finding curve intersection
p, info, ier, msg = fsolve(phi, guess, full_output=True, xtol=1.e-14)
# For strong rarefactions, sometimes fsolve needs help
if ier != 1:
p, info, ier, msg = fsolve(phi, guess, full_output=True, factor=0.1, xtol=1.e-10)
# This should not happen:
if ier != 1:
print('Warning: fsolve did not converge.')
print(msg)
u = phi_l(p)
ws[2] = u
# Find shock and rarefaction speeds
if p > p_l:
wave_types[0] = 'shock'
rho_l_star = rho_l*(1+beta(gamma)*p/p_l)/(p/p_l+beta(gamma))
ws[0] = (rho_l*u_l - rho_l_star*u)/(rho_l - rho_l_star)
ws[1] = ws[0]
else:
wave_types[0] = 'raref'
rho_l_star = (p/p_l)**(1./gamma) * rho_l
c_l_star = sound_speed(rho_l_star, p, gamma)
ws[0] = u_l - c_l
ws[1] = u - c_l_star
if p > p_r:
wave_types[2] = 'shock'
rho_r_star = rho_r*(1+beta(gamma)*p/p_r)/(p/p_r+beta(gamma))
ws[4] = (rho_r*u_r - rho_r_star*u)/(rho_r - rho_r_star)
ws[3] = ws[4]
else:
wave_types[2] = 'raref'
rho_r_star = (p/p_r)**(1./gamma) * rho_r
c_r_star = sound_speed(rho_r_star, p, gamma)
ws[3] = u + c_r_star
ws[4] = u_r + c_r
# Find solution inside rarefaction fans (in primitive variables)
def raref1(xi):
u1 = ((gamma-1.)*u_l + 2*(c_l + xi))/(gamma+1.)
rho1 = (rho_l**gamma*(u1-xi)**2/pospart(gamma*p_l))**(1./(gamma-1.))
p1 = p_l*(rho1/pospart(rho_l))**gamma
return rho1, u1, p1
def raref3(xi):
u3 = ((gamma-1.)*u_r - 2*(c_r - xi))/(gamma+1.)
rho3 = (rho_r**gamma*(xi-u3)**2/pospart(gamma*p_r))**(1./(gamma-1.))
p3 = p_r*(rho3/pospart(rho_r))**gamma
return rho3, u3, p3
q_l_star = np.squeeze(np.array(primitive_to_conservative(rho_l_star,u,p)))
q_r_star = np.squeeze(np.array(primitive_to_conservative(rho_r_star,u,p)))
states = np.column_stack([q_l,q_l_star,q_r_star,q_r])
speeds = [[], ws[2], []]
if wave_types[0] in ['shock','contact']:
speeds[0] = ws[0]
else:
speeds[0] = (ws[0],ws[1])
if wave_types[2] in ['shock','contact']:
speeds[2] = ws[3]
else:
speeds[2] = (ws[3],ws[4])
def reval(xi):
r"""Returns the Riemann solution in primitive variables for any
value of xi = x/t.
"""
rar1 = raref1(xi)
rar3 = raref3(xi)
rho_out = (xi<=ws[0] )*rho_l \
+ (xi>ws[0])*(xi<=ws[1])*rar1[0] \
+ (xi>ws[1])*(xi<=ws[2] )*rho_l_star \
+ (xi>ws[2]) *(xi<=ws[3])*rho_r_star \
+ (xi>ws[3])*(xi<=ws[4])*rar3[0] \
+ (xi>ws[4] )*rho_r
u_out = (xi<=ws[0] )*u_l \
+ (xi>ws[0])*(xi<=ws[1])*rar1[1] \
+ (xi>ws[1])*(xi<=ws[2] )*u \
+ (xi>ws[2] )*(xi<=ws[3])*u \
+ (xi>ws[3])*(xi<=ws[4])*rar3[1] \
+ (xi>ws[4] )*u_r
p_out = (xi<=ws[0] )*p_l \
+ (xi>ws[0])*(xi<=ws[1])*rar1[2] \
+ (xi>ws[1])*(xi<=ws[2] )*p \
+ (xi>ws[2] )*(xi<=ws[3])*p \
+ (xi>ws[3])*(xi<=ws[4])*rar3[2] \
+ (xi>ws[4] )*p_r
return primitive_to_conservative(rho_out,u_out,p_out)
if phase_plane_curves:
if wave_types[0] == 'raref':
phi1 = lambda p: integral_curve_1(p, rho_l, u_l, p_l, gamma)
elif wave_types[0] == 'shock':
phi1 = lambda p: hugoniot_locus_1(p, rho_l, u_l, p_l, gamma)
else:
phi1 = lambda p: p
if wave_types[2] == 'raref':
phi3 = lambda p: integral_curve_3(p, rho_r, u_r, p_r, gamma)
elif wave_types[2] == 'shock':
phi3 = lambda p: hugoniot_locus_3(p, rho_r, u_r, p_r, gamma)
else:
phi3 = lambda p: p
return states, speeds, reval, wave_types, (p, phi1, phi3)
else:
return states, speeds, reval, wave_types | a5baa391d88a56026cc02a1a1fa841325e712ea0 | 3,650,351 |
def show_edge_scatter(N, s1, s2, t1, t2, d, dmax=None, fig_ax=None):
"""Draw the cell-edge contour and the displacement vectors.
The contour is drawn using a scatter plot to color-code the displacements."""
if fig_ax is None:
fig, ax = plt.subplots()
else:
fig, ax = fig_ax
plt.figure(fig.number)
# Evaluate splines at window locations and on fine-resolution grid
c1 = splineutils.splevper(t1, s1)
c2 = splineutils.splevper(t2, s2)
c1p = splev(np.linspace(0, 1, N + 1), s1)
c2p = splev(np.linspace(0, 1, N + 1), s2)
# Interpolate displacements
# d = 0.5 + 0.5 * d / np.max(np.abs(d))
if len(d) < N + 1:
d = np.interp(np.linspace(0, 1, N + 1), t1, d, period=1)
if dmax is None:
dmax = np.max(np.abs(d))
if dmax == 0:
dmax = 1
# Plot results
# matplotlib.use('PDF')
lw = 1
s = 1 # Scaling factor for the vectors
ax.plot(c1p[0], c1p[1], "b", zorder=50, lw=lw)
ax.plot(c2p[0], c2p[1], "r", zorder=100, lw=lw)
# plt.scatter(c1p[0], c1p[1], c=d, cmap='bwr', vmin=-dmax, vmax=dmax, zorder=50, s1=lw)
# # plt.colorbar(label='Displacement [pixels]')
for j in range(len(t2)):
ax.arrow(
c1[0][j],
c1[1][j],
s * (c2[0][j] - c1[0][j]),
s * (c2[1][j] - c1[1][j]),
color="y",
zorder=200,
lw=lw,
)
# plt.arrow(c1[0][j], c1[1][j], s1 * u[0][j], s1 * u[1][j], color='y', zorder=200, lw=lw) # Show normal to curve
ax.arrow(
c1[0][0],
c1[1][0],
s * (c2[0][0] - c1[0][0]),
s * (c2[1][0] - c1[1][0]),
color="c",
zorder=400,
lw=lw,
)
fig.tight_layout()
return fig, ax | d0575ec425828e895f24c3ff8cbf9f472ba62947 | 3,650,352 |
def get_A2_const(alpha1, alpha2, lam_c, A1):
"""Function to compute the constant A2.
Args:
alpha1 (float): The alpha1 parameter of the WHSCM.
alpha2 (float): The alpha2 parameter of the WHSCM.
lam_c (float): The switching point between the
two exponents of the double power-laws
in the WHSCM.
A1 (float): The A1 constant of the WHSCM.
Returns:
A2 (float): The A2 constant of the WHSCM.
"""
A2 = A1 * (lam_c**(alpha2 - alpha1))
return A2 | 16fe12e9ef9d72cfe7250cf840e222512409d377 | 3,650,353 |
def _parse_seq_tf_example(example, uint8_features, shapes):
"""Parse tf.Example containing one or two episode steps."""
def to_feature(key, shape):
if key in uint8_features:
return tf.io.FixedLenSequenceFeature(
shape=[], dtype=tf.string, allow_missing=True)
else:
return tf.io.FixedLenSequenceFeature(
shape=shape, dtype=tf.float32, allow_missing=True)
feature_map = {}
for k, v in shapes.items():
feature_map[k] = to_feature(k, v)
parsed = tf.io.parse_single_example(example, features=feature_map)
observation = {}
restructured = {}
for k in parsed.keys():
if 'observation' not in k:
restructured[k] = parsed[k]
continue
if k in uint8_features:
observation[k.replace('observation/', '')] = tf.reshape(
tf.io.decode_raw(parsed[k], out_type=tf.uint8), (-1,) + shapes[k])
else:
observation[k.replace('observation/', '')] = parsed[k]
restructured['observation'] = observation
restructured['length'] = tf.shape(restructured['action'])[0]
return restructured | 5e0e4a6d3f26c28eb6e5dfe12e9295eb5b53979c | 3,650,355 |
def unique_list(a_list, unique_func=None, replace=False):
"""Unique a list like object.
- collection: list like object
- unique_func: the filter functions to return a hashable sign for unique
- replace: the following replace the above with the same sign
Return the unique subcollection of collection.
Example:
data = [(1, 2), (2, 1), (2, 3), (1, 2)]
unique_func = lambda x: tuple(sorted(x))
unique(data) -> [(1, 2), (2, 1), (2, 3)]
unique(data, unique_func) -> [(1, 2), (2, 3)]
unique(data, unique_func, replace=True) -> [(2, 1), (2, 3)]
"""
unique_func = unique_func or (lambda x: x)
result = {}
for item in a_list:
hashable_sign = unique_func(item)
if hashable_sign not in result or replace:
result[hashable_sign] = item
return list(result.values()) | 8d7957a8dffc18b82e8a45129ba3634c28dd0d52 | 3,650,356 |
def calculate_attitude_angle(eccentricity_ratio):
"""Calculates the attitude angle based on the eccentricity ratio.
Parameters
----------
eccentricity_ratio: float
The ratio between the journal displacement, called just eccentricity, and
the radial clearance.
Returns
-------
float
Attitude angle
Examples
--------
>>> from ross.fluid_flow.fluid_flow import fluid_flow_example
>>> my_fluid_flow = fluid_flow_example()
>>> calculate_attitude_angle(my_fluid_flow.eccentricity_ratio) # doctest: +ELLIPSIS
1.5...
"""
return np.arctan(
(np.pi * (1 - eccentricity_ratio ** 2)**(1/2)) /
(4 * eccentricity_ratio)
) | 24dcb463a0ebdcab8582309bf9ff44fdbfc44686 | 3,650,357 |
import torch
import pickle
def enc_obj2bytes(obj, max_size=4094):
"""
Encode Python objects to PyTorch byte tensors
"""
assert max_size <= MAX_SIZE_LIMIT
byte_tensor = torch.zeros(max_size, dtype=torch.uint8)
obj_enc = pickle.dumps(obj)
obj_size = len(obj_enc)
if obj_size > max_size:
raise Exception(
'objects too large: object size {}, max size {}'.format(
obj_size, max_size
)
)
byte_tensor[0] = obj_size // 256
byte_tensor[1] = obj_size % 256
byte_tensor[2:2+obj_size] = torch.ByteTensor(list(obj_enc))
return byte_tensor | bca30d1a88db42f66bdd978386a1c4d24c0c790b | 3,650,358 |
def featCompression (feats, deltas, deltas2):
"""
Returns augmented feature vectors for all cases.
"""
feats_total = np.zeros (78)
for i in range (len (feats)):
row_total = np.array ([])
feat_mean = np.mean (np.array (feats[i]), axis = 0)
delt_mean = np.mean (np.array (deltas[i]), axis = 0)
delt2_mean = np.mean (np.array (deltas2[i]), axis = 0)
feat_std = np.std (np.array (feats[i]), axis = 0)
delt_std = np.std (np.array (deltas[i]), axis = 0)
delt2_std = np.std (np.array (deltas2[i]), axis = 0)
row_total = np.hstack ((feat_mean, feat_std, delt_mean, delt_std, \
delt2_mean, delt2_std))
feats_total = np.vstack ((feats_total, row_total))
return feats_total[1:, :] | 033b53fec9cf920daadb3ba5bef2fcce7cc11d21 | 3,650,359 |
def _array_indexing(array, key, key_dtype, axis):
"""Index an array or scipy.sparse consistently across NumPy version."""
if np_version < parse_version('1.12') or issparse(array):
# Remove the check for NumPy when using >= 1.12
# check if we have an boolean array-likes to make the proper indexing
if key_dtype == 'bool':
key = np.asarray(key)
if isinstance(key, tuple):
key = list(key)
return array[key] if axis == 0 else array[:, key] | b8c04647ad79ce8ffd973d2b13fe7231854822de | 3,650,360 |
import torch
def test_binary(test_data, model, criterion, batch_size, device, generate_batch=None):
"""Calculate performance of a Pytorch binary classification model
Parameters
----------
test_data : torch.utils.data.Dataset
Pytorch dataset
model: torch.nn.Module
Pytorch Model
criterion: function
Loss function
bacth_size : int
Number of observations per batch
device : str
Name of the device used for the model
collate_fn : function
Function defining required pre-processing steps
Returns
-------
Float
Loss score
Float:
Accuracy Score
"""
# Set model to evaluation mode
model.eval()
test_loss = 0
test_acc = 0
# Create data loader
data = DataLoader(test_data, batch_size=batch_size, collate_fn=generate_batch)
# Iterate through data by batch of observations
for feature, target_class in data:
# Load data to specified device
feature, target_class = feature.to(device), target_class.to(device).to(torch.float32)
# Set no update to gradients
with torch.no_grad():
# Make predictions
output = model(feature)
# Calculate loss for given batch
loss = criterion(output, target_class.unsqueeze(1))
# Calculate global loss
test_loss += loss.item()
# Calculate global accuracy
test_acc += (output.argmax(1) == target_class).sum().item()
return test_loss / len(test_data), test_acc / len(test_data) | 9bc8fefffca3d484abbcac48836fde3ca7b5287a | 3,650,362 |
def linear_imputer(y, missing_values=np.nan, copy=True):
"""
Replace missing values in y with values from a linear interpolation on their position in the array.
Parameters
----------
y: list or `numpy.array`
missing_values: number, string, np.nan or None, default=`np.nan`
The placeholder for the missing values. All occurrences of `missing_values` will be imputed.
copy : bool, default=True
If True, a copy of X will be created. If False, imputation will be done in-place whenever possible.
Returns
-------
`numpy.array` : array with `missing_values` imputed
"""
x = np.arange(len(y))
if missing_values is np.nan:
mask_missing = np.isnan(y)
else:
mask_missing = y == missing_values
imputed_values = np.interp(x[mask_missing], x[~mask_missing], y[~mask_missing])
if copy:
yy = np.copy(y)
yy[mask_missing] = imputed_values
return yy
else:
y[mask_missing] = imputed_values
return y | 2557e7647e8d7e0246bb15c57605b75bf3d4131b | 3,650,363 |
def gap2d_cx(cx):
"""Accumulates complexity of gap2d into cx = (h, w, flops, params, acts)."""
cx["h"] = 1
cx["w"] = 1
return cx | 28f6ba5f166f0b21674dfd507871743243fb4737 | 3,650,364 |
import requests
def test_is_not_healthy(requests_mock):
"""
Test is not healthy response
"""
metadata = Gen3Metadata("https://example.com")
def _mock_request(url, **kwargs):
assert url.endswith("/_status")
mocked_response = MagicMock(requests.Response)
mocked_response.status_code = 500
mocked_response.text = "Not Healthy"
mocked_response.json.return_value = {}
mocked_response.raise_for_status.side_effect = HTTPError("uh oh")
return mocked_response
requests_mock.side_effect = _mock_request
response = metadata.is_healthy()
assert not response | 9b6fdb7b822ef83e0441bdafa1c87ea07ad901ad | 3,650,365 |
def kernelTrans(X, A, kTup):
"""
通过核函数将数据转换更高维的空间
Parameters:
X - 数据矩阵
A - 单个数据的向量
kTup - 包含核函数信息的元组
Returns:
K - 计算的核K
"""
m,n = np.shape(X)
K = np.mat(np.zeros((m,1)))
if kTup[0] == 'lin': K = X * A.T #线性核函数,只进行内积。
elif kTup[0] == 'rbf': #高斯核函数,根据高斯核函数公式进行计算
for j in range(m):
deltaRow = X[j,:] - A
K[j] = deltaRow*deltaRow.T
K = np.exp(K/(-1*kTup[1]**2)) #计算高斯核K
else: raise NameError('核函数无法识别')
return K #返回计算的核K | da43ab6aeff623b32d3287ad07acdc1ef5ec4bc3 | 3,650,366 |
def getBusEquipmentData(bhnd,paraCode):
"""
Retrieves the handle of all equipment of a given type (paraCode)
that is attached to bus [].
Args :
bhnd : [bus handle]
nParaCode : code data (BR_nHandle,GE_nBusHnd...)
Returns:
[][] = [len(bhnd)] [len(all equipment)]
Raises:
OlrxAPIException
"""
# get data
res = []
vt = paraCode//100
val1 = setValType(vt,0)
for bhnd1 in bhnd:
r2 = []
while ( OLXAPI_OK == OlxAPI.GetBusEquipment( bhnd1, c_int(paraCode), byref(val1) ) ) :
if vt==VT_STRING:
r2.append((val1.value).decode("UTF-8"))
else:
r2.append(val1.value)
res.append(r2)
return res | 6e0e63846c7b934edbe9de078c7ad8561766e58d | 3,650,367 |
from typing import List
from typing import Counter
import click
def build_and_register(
client: "prefect.Client",
flows: "List[FlowLike]",
project_id: str,
labels: List[str] = None,
force: bool = False,
) -> Counter:
"""Build and register all flows.
Args:
- client (prefect.Client): the prefect client to use
- flows (List[FlowLike]): the flows to register
- project_id (str): the project id in which to register the flows
- labels (List[str], optional): Any extra labels to set on all flows
- force (bool, optional): If false (default), an idempotency key will
be used to avoid unnecessary register calls.
Returns:
- Counter: stats about the number of successful, failed, and skipped flows.
"""
# Finish preparing flows to ensure a stable hash later
prepare_flows(flows, labels)
# Group flows by storage instance.
storage_to_flows = defaultdict(list)
for flow in flows:
storage = flow.storage if isinstance(flow, prefect.Flow) else None
storage_to_flows[storage].append(flow)
# Register each flow, building storage as needed.
# Stats on success/fail/skip rates are kept for later display
stats = Counter(registered=0, errored=0, skipped=0)
for storage, flows in storage_to_flows.items():
# Build storage if needed
if storage is not None:
click.echo(f" Building `{type(storage).__name__}` storage...")
try:
storage.build()
except Exception as exc:
click.secho(" Error building storage:", fg="red")
log_exception(exc, indent=6)
red_error = click.style("Error", fg="red")
for flow in flows:
click.echo(f" Registering {flow.name!r}... {red_error}")
stats["errored"] += 1
continue
for flow in flows:
click.echo(f" Registering {flow.name!r}...", nl=False)
try:
if isinstance(flow, box.Box):
serialized_flow = flow
else:
serialized_flow = flow.serialize(build=False)
flow_id, flow_version, is_new = register_serialized_flow(
client=client,
serialized_flow=serialized_flow,
project_id=project_id,
force=force,
)
except Exception as exc:
click.secho(" Error", fg="red")
log_exception(exc, indent=4)
stats["errored"] += 1
else:
if is_new:
click.secho(" Done", fg="green")
click.echo(f" └── ID: {flow_id}")
click.echo(f" └── Version: {flow_version}")
stats["registered"] += 1
else:
click.secho(" Skipped", fg="yellow")
stats["skipped"] += 1
return stats | 7e4634578b44b7b5a1743fa0cfab21c6c551930b | 3,650,368 |
def creer_element_xml(nom_elem,params):
"""
Créer un élément de la relation qui va donner un des attributs.
Par exemple, pour ajouter le code FANTOIR pour une relation, il faut que le code XML soit <tag k='ref:FR:FANTOIR' v='9300500058T' />"
Pour cela, il faut le nom de l'élément (ici tag) et un dictionnaire de paramètres nommé param qui associe chaque clé à une valeur (ici
{'k':'ref:FR:FANTOIR', 'v'='9300500058T'}
:param nom_elem:
:type nom_elem: str
:param params:
:type params: dict
:return: élément XML désiré
:rtype: xml.etree.ElementTree.Element
"""
# Initialisation de l'objet XML
elem = ET.Element(nom_elem)
ajouter_atrributs_element_xml(elem, params)
return elem | a4cd29b82531c7bc01864ae79de87643afeb8276 | 3,650,371 |
def get_display():
"""Getter function for the display keys
Returns:
list: list of dictionary keys
"""
return data.keys() | dcc8957faf30db15282d2e67025cd6d5fd07e9dd | 3,650,372 |
def calculate_class_probabilities(summaries, row) -> dict():
"""
Calculate the probability of a value using the Gaussian Probability Density Function from inputs:
summaries: prepared summaries of dataset
row: a row in the dataset for predicting its label (a row of X_test)
This function uses the statistics calculated from training data to calculate probabilities for the testing dataset (new data). Probabilities are calculated separately for each class. First, we calculate the probability that a new X vector from the testing dataset belongs to the first class. Then, we calculate the probabilities that it belongs to the second class, and so on for all the classes identified in the training dataset.
The probability that a new X vector from the testing dataset belongs to a class is calculated as follows:
P(class|data) = P(X|class) * P(class)
Note we have simplified the Bayes theorem by removing the division as we do not strictly need a number between 0 and 1 to predict the class the new data belongs to as we will be simply taking the maximum result from the above equation for each class.
It returns a dictionary where each key is the class label and the values are the probabibilities of that row belonging to each class on the dataset.
"""
# total number of training records calculated from the counts stored in the summary statistics
# note that the count column has the same value for all rows, and hence picking up item [0] will suffice
total_rows = sum([summaries[label]['count'][0] for label in summaries])
probabilities = dict()
for class_value, class_summaries in summaries.items():
probabilities[class_value] = summaries[class_value]['count'][0]/float(total_rows)
for i in range(len(class_summaries)-1):
mean, stdev, _ = class_summaries.iloc[i]
# probabilities are multiplied together as they accumulate.
probabilities[class_value] *= calculate_probability(row[i], mean, stdev)
# normalize probabilities so that they sum 1
max_prob = probabilities[max(probabilities, key=probabilities.get)]
min_prob = probabilities[min(probabilities, key=probabilities.get)]
for class_value, probability in probabilities.items():
if (max_val - min_val) > 0:
probabilities[class_value] = (probability - min_val) / (max_val - min_val)
else:
probabilities[class_value] = float(0.0)
sum_prob = sum(probabilities.values())
for class_value, probability in probabilities.items():
if sum_prob > 0:
probabilities[class_value] = probability / sum_prob
return probabilities | ffdff84e27fe5e76d66176d5d1c862f16b1ee494 | 3,650,373 |
from typing import Sequence
from typing import Any
def find(sequence: Sequence, target_element: Any) -> int:
"""Find the index of the first occurrence of target_element in sequence.
Args:
sequence: A sequence which to search through
target_element: An element to search in the sequence
Returns:
The index of target_element's first occurrence, -1 if it was not found or the sequence is empty
"""
if not sequence:
return -1
try:
return sequence.index(target_element)
except ValueError:
return -1 | 20edfae45baafa218d8d7f37e0409e6f4868b75d | 3,650,375 |
def read_time_data(fname, unit):
"""
Read time data (csv) from file and load into Numpy array
"""
data = np.loadtxt(fname, delimiter=',')
t = data[:,0]
x = data[:,1]*unit
f = interp1d(t, x, kind='linear', bounds_error=False, fill_value=x[0])
return f | 588c3bc472aa05eb0ead983405f22ecdf260687c | 3,650,376 |
from pathlib import Path
from typing import List
from typing import Dict
import json
def read_nli_data(p: Path) -> List[Dict]:
"""Read dataset which has been converted to nli form"""
with open(p) as f:
data = json.load(f)
return data | 2218d8dc06e3b9adfe89cb780a9ef4e7cb111d14 | 3,650,378 |
def stanley_control(state, cx, cy, cyaw, last_target_idx):
"""
Stanley steering control.
:param state: (State object)
:param cx: ([float])
:param cy: ([float])
:param cyaw: ([float])
:param last_target_idx: (int)
:return: (float, int)
"""
current_target_idx, error_front_axle = calc_target_index(state, cx, cy)
if last_target_idx >= current_target_idx:
current_target_idx = last_target_idx
# theta_e corrects the heading error
theta_e = normalize_angle(cyaw[current_target_idx] - state.yaw)
# theta_d corrects the cross track error
theta_d = np.arctan2(k * error_front_axle, state.v)
# Steering control
delta = theta_e + theta_d
return delta, current_target_idx | bef2d7d075a6ef637d1423d6c85cdde3ac4d9d70 | 3,650,379 |
def get_predictions(my_map, reviews, restaurants):
"""
Get the topic predictions for all restaurants.
Parameters:
my_map - the Map object representation of the current city
reviews - a dictionary of reviews with restaurant ids for keys
restaurants - a list of restaurants of the current city
Returns:
A tuple of a dictionary of restaurant ids to topic distributions and the lda model
"""
predictor = LDAPredictor()
lda = predictor.lda
restaurant_ids_to_topics = {}
for restaurant in restaurants:
business_id = restaurant["business_id"]
review = reviews[business_id]
prediction = predictor.predict_topics(review)
restaurant_ids_to_topics[business_id] = make_topic_array_from_tuple_list(prediction, NUM_TOPICS) #topic array of weights for each topic index
normalized_restaurant_ids_to_topics = normalize_predictions(restaurant_ids_to_topics, restaurants)
return normalized_restaurant_ids_to_topics, lda | ef900b9d3526a12f64951e9d7b6f4eb80db9f6f4 | 3,650,380 |
def decode_token(token, secret_key):
"""
解密websocket token
:param token:
:param secret_key:
:return:
"""
info = jwt.decode(token, secret_key, algorithms=['HS256'])
return info | 5807ce3428435eb0c15dd464164627fb342e46d6 | 3,650,381 |
import requests
def getPatternID(pattern_url):
"""asssumes pattern_url is a string, representing the URL of a ravelry pattern
e.g.https://www.ravelry.com/patterns/library/velvet-cache-cou
returns an int, the pattern ID
"""
permalink = pattern_url[41:]
with requests.Session() as a_session:
auth_name = "read-046277a3027f680ebe3fa030e755eb34"
auth_pass = "O+mL0KzfjgQ1eLA7K8FO9s28QPvr6QuiL+pOvFHZ"
a_session.auth = (auth_name, auth_pass)
ravelry_adapter = HTTPAdapter(max_retries=3)
a_session.mount('https://ravelry.com', ravelry_adapter)
base_request = "https://api.ravelry.com/patterns/search.json?query="
pattern = a_session.get(base_request+permalink)
if pattern.status_code != 200:
raise RuntimeError("Ravelry not responding as expected.\
Please check your internet connection or try again later")
pattern_id = pattern.json()['patterns'][0]['id']
return pattern_id | 0624457bad8753a9d15f8339c381ec233a207098 | 3,650,382 |
def make_multibonacci_modulo(history_length, limit):
"""Creates a function that generates the Multibonacci sequence modulo n."""
def sequence_fn(seq):
return np.sum(seq[-history_length:]) % limit
return sequence_fn | 358876a91fec23853bde843c7222cd837b45ada3 | 3,650,383 |
def _get_key(arguments):
"""
Determine the config key based on the arguments.
:param arguments: A dictionary of arguments already processed through
this file's docstring with docopt
:return: The datastore path for the config key.
"""
# Get the base path.
if arguments.get("felix"):
base = CONFIG_PATH
elif arguments.get("node"):
base = BGP_HOST_PATH % {"hostname": hostname}
else:
base = BGP_GLOBAL_PATH
# Determine the actual name of the field. Look this up from the config
# data, otherwise just use the name.
config_data = _get_config_data(arguments)
name = arguments["<NAME>"]
if name in config_data:
name, _ = config_data[name]
return base + name | b68dd68a013ed2289ae60ab49a347858ce447964 | 3,650,384 |
def prepare_data_from_stooq(df, to_prediction = False, return_days = 5):
"""
Prepares data for X, y format from pandas dataframe
downloaded from stooq. Y is created as closing price in return_days
- opening price
Keyword arguments:
df -- data frame contaning data from stooq
return_days -- number of day frame in which to calculate y.
"""
if 'Wolumen' in df.columns:
df = df.drop(['Data', 'Wolumen', 'LOP'], axis=1)
else:
df = df.drop('Data', axis = 1)
y = df['Zamkniecie'].shift(-return_days) - df['Otwarcie']
if not to_prediction:
df = df.iloc[:-return_days,:]
y = y[:-return_days]/df['Otwarcie']
return df.values, y | 4b5bc45529b70ed1e8517a1d91fb5a6c2ff0b504 | 3,650,385 |
def represents_int_above_0(s: str) -> bool:
"""Returns value evaluating if a string is an integer > 0.
Args:
s: A string to check if it wil be a float.
Returns:
True if it converts to float, False otherwise.
"""
try:
val = int(s)
if val > 0:
return True
else:
return False
except ValueError:
return False | e39c4afeff8f29b86ef2a80be0af475223654449 | 3,650,386 |
def resnet18(pretrained=False, **kwargs):
"""Constructs a ResNet-18 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url('https://download.pytorch.org/models/resnet18-5c106cde.pth'))
return model | 89b71b447890e8986493abc90cca6e2ac2f0eca8 | 3,650,387 |
def sydney():
"""Import most recent Sydney dataset"""
d = {
'zip':'Sydney_geol_100k_shape',
'snap':-1,
}
return(d) | f79a5002ef548769096d3aeb1ad2c7d77ac5ce68 | 3,650,388 |
def format_non_date(value):
"""Return non-date value as string."""
return_value = None
if value:
return_value = value
return return_value | 9a7a13d7d28a14f5e92920cfef7146f9259315ec | 3,650,390 |
def get_loss(loss_str):
"""Get loss type from config"""
def _get_one_loss(cur_loss_str):
if hasattr(keras_losses, cur_loss_str):
loss_cls = getattr(keras_losses, cur_loss_str)
elif hasattr(custom_losses, cur_loss_str):
loss_cls = getattr(custom_losses, cur_loss_str)
else:
raise ValueError('%s is not a valid loss' % cur_loss_str)
return loss_cls
if not isinstance(loss_str, list):
loss_cls = _get_one_loss(loss_str)
return loss_cls
else:
loss_cls_list = []
for cur_loss in loss_str:
loss_cls = _get_one_loss(cur_loss)
loss_cls_list.append(loss_cls)
return loss_cls_list | 4c5714b7e8ca0becf43922a9624d9a4dccc4ac28 | 3,650,391 |
from functools import cmp_to_key
def _hashable_policy(policy, policy_list):
"""
Takes a policy and returns a list, the contents of which are all hashable and sorted.
Example input policy:
{'Version': '2012-10-17',
'Statement': [{'Action': 's3:PutObjectAcl',
'Sid': 'AddCannedAcl2',
'Resource': 'arn:aws:s3:::test_policy/*',
'Effect': 'Allow',
'Principal': {'AWS': ['arn:aws:iam::XXXXXXXXXXXX:user/username1', 'arn:aws:iam::XXXXXXXXXXXX:user/username2']}
}]}
Returned value:
[('Statement', ((('Action', (u's3:PutObjectAcl',)),
('Effect', (u'Allow',)),
('Principal', ('AWS', ((u'arn:aws:iam::XXXXXXXXXXXX:user/username1',), (u'arn:aws:iam::XXXXXXXXXXXX:user/username2',)))),
('Resource', (u'arn:aws:s3:::test_policy/*',)), ('Sid', (u'AddCannedAcl2',)))),
('Version', (u'2012-10-17',)))]
"""
# Amazon will automatically convert bool and int to strings for us
if isinstance(policy, bool):
return tuple([str(policy).lower()])
elif isinstance(policy, int):
return tuple([str(policy)])
if isinstance(policy, list):
for each in policy:
tupleified = _hashable_policy(each, [])
if isinstance(tupleified, list):
tupleified = tuple(tupleified)
policy_list.append(tupleified)
elif isinstance(policy, string_types) or isinstance(policy, binary_type):
policy = to_text(policy)
# convert root account ARNs to just account IDs
if policy.startswith('arn:aws:iam::') and policy.endswith(':root'):
policy = policy.split(':')[4]
return [policy]
elif isinstance(policy, dict):
sorted_keys = list(policy.keys())
sorted_keys.sort()
for key in sorted_keys:
element = policy[key]
# Special case defined in
# https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_elements_principal.html
if key in ["NotPrincipal", "Principal"] and policy[key] == "*":
element = {"AWS": "*"}
tupleified = _hashable_policy(element, [])
if isinstance(tupleified, list):
tupleified = tuple(tupleified)
policy_list.append((key, tupleified))
# ensure we aren't returning deeply nested structures of length 1
if len(policy_list) == 1 and isinstance(policy_list[0], tuple):
policy_list = policy_list[0]
if isinstance(policy_list, list):
policy_list.sort(key=cmp_to_key(_py3cmp))
return policy_list | beb5b527f38d7d1a9cecf81918a3291c0c9960ad | 3,650,392 |
def LF_CD_NO_VERB(c):
"""
This label function is designed to fire if a given
sentence doesn't contain a verb. Helps cut out some of the titles
hidden in Pubtator abstracts
"""
if len([x for x in nltk.pos_tag(word_tokenize(c.get_parent().text)) if "VB" in x[1]]) == 0:
if "correlates with" in c.get_parent().text:
return 0
return -1
return 0 | aa36b8a4cd00194fd1d786a7d3619ea46da0e1ab | 3,650,393 |
from typing import Tuple
def has_file_allowed_extension(filename: PATH_TYPE, extensions: Tuple[str, ...]) -> bool:
"""Checks if a file is an allowed extension.
Args:
filename (string): path to a file
extensions (tuple of strings): extensions to consider (lowercase)
Returns:
bool: True if the filename ends with one of given extensions
"""
return str(filename).lower().endswith(extensions) | 2f0d5698ecdb10533b303a637fdc03747ef8060c | 3,650,394 |
def get_account_html(netid, timestamp=None):
"""
The Libraries object has a method for getting information
about a user's library account
"""
return _get_resource(netid, timestamp=timestamp, style='html') | 7c917f6778e42a1166d4a33819c2e51378933226 | 3,650,395 |
import functools
import math
def gcd_multiple(*args) -> int:
"""Return greatest common divisor of integers in args"""
return functools.reduce(math.gcd, args) | c686b9495cd45ff047f091e31a79bedcd61f8842 | 3,650,396 |
from typing import Counter
def chars_to_family(chars):
"""Takes a list of characters and constructs a family from them. So, A1B2
would be created from ['B', 'A', 'B'] for example."""
counter = Counter(chars)
return "".join(sorted([char + str(n) for char, n in counter.items()])) | e78de779599f332045a98edde2aa0a0edc5a653b | 3,650,397 |
import configparser
def get_config_properties(config_file="config.properties", sections_to_fetch = None):
"""
Returns the list of properties as a dict of key/value pairs in the file config.properties.
:param config_file: filename (string).
:param section: name of section to fetch properties from (if specified); all sections are returned by default (iterable).
:return: A flat (no sections) Python dictionary of properties.
"""
cf = configparser.ConfigParser()
try:
cf.read(config_file)
except Exception as e:
print("[ERROR] exception {} reading configurations from file {}".format(e, config_file))
properties = {}
for section in cf.sections():
# only include args section if requested
if (not sections_to_fetch or (section in sections_to_fetch)):
for item in cf.items(section):
properties[item[0]] = item[1]
return properties | 627d21327560595bb4c2905c98604926f03ca655 | 3,650,398 |
import base64
import secrets
import time
def process_speke():
"""Processes an incoming request from MediaLive, which is using SPEKE
A key is created and stored in DynamoDB."""
input_request = request.get_data()
# Parse request
tree = ET.fromstring(input_request)
content_id = tree.get("id")
kid = tree[0][0].get("kid")
iv = tree[0][0].get("explicitIV") or ""
keyPeriod = tree[2][0].get("id")
index = tree[2][0].get("index")
# Create key
key = base64.b64encode(secrets.token_bytes(16)).decode("ascii")
# Expire key tomorrow
expiry = round(time.time()) + 24 * 60 * 60
# Create the pssh
systems = []
for drmsystem in tree[1]:
if drmsystem.get("systemId") == CLEARKEY_SYSTEM_ID:
pssh = psshgen.genClearkeyPssh([kid])
systems.append(
f"""<!-- ClearKey -->
<cpix:DRMSystem kid="{kid}" systemId="{CLEARKEY_SYSTEM_ID}">
<cpix:PSSH>{pssh}</cpix:PSSH>
</cpix:DRMSystem>"""
)
# Save key info in dynamo
dynamo.put_item(
TableName=TABLE,
Item={
"content_id": {"S": content_id},
"kid": {"S": kid},
"iv": {"S": iv},
"keyPeriod": {"S": keyPeriod},
"index": {"S": index},
"key": {"S": key},
"expiry": {"N": str(expiry)},
},
)
if iv:
iv = f'explicitIV="{iv}"'
# Craft response
response = f"""<cpix:CPIX xmlns:cpix="urn:dashif:org:cpix" xmlns:pskc="urn:ietf:params:xml:ns:keyprov:pskc" xmlns:speke="urn:aws:amazon:com:speke" id="{content_id}">
<cpix:ContentKeyList>
<cpix:ContentKey {iv} kid="{kid}">
<cpix:Data>
<pskc:Secret>
<pskc:PlainValue>{key}</pskc:PlainValue>
</pskc:Secret>
</cpix:Data>
</cpix:ContentKey>
</cpix:ContentKeyList>
<cpix:DRMSystemList>
{''.join(systems)}
</cpix:DRMSystemList>
<cpix:ContentKeyPeriodList>
<cpix:ContentKeyPeriod id="{keyPeriod}" index="{index}" />
</cpix:ContentKeyPeriodList>
<cpix:ContentKeyUsageRuleList>
<cpix:ContentKeyUsageRule kid="{kid}">
<cpix:KeyPeriodFilter periodId="{keyPeriod}" />
</cpix:ContentKeyUsageRule>
</cpix:ContentKeyUsageRuleList>
</cpix:CPIX>"""
return response | 3caa1c0390ea699feab2f138942b6773933fbada | 3,650,399 |
from typing import Dict
def merge(source: Dict, destination: Dict) -> Dict:
"""
Deep merge two dictionaries
Parameters
----------
source: Dict[Any, Any]
Dictionary to merge from
destination: Dict[Any, Any]
Dictionary to merge to
Returns
-------
Dict[Any, Any]
New dictionary with fields in destination overwritten
with values from source
"""
new_dict = {**destination}
for key, value in source.items():
if isinstance(value, dict):
# get node or create one
node = new_dict.get(key, {})
new_dict[key] = merge(value, node)
else:
new_dict[key] = value
return new_dict | 4ffba933fe1ea939ecaa9f16452b74a4b3859f40 | 3,650,400 |
async def async_api_adjust_volume_step(hass, config, directive, context):
"""Process an adjust volume step request."""
# media_player volume up/down service does not support specifying steps
# each component handles it differently e.g. via config.
# For now we use the volumeSteps returned to figure out if we
# should step up/down
volume_step = directive.payload['volumeSteps']
entity = directive.entity
data = {
ATTR_ENTITY_ID: entity.entity_id,
}
if volume_step > 0:
await hass.services.async_call(
entity.domain, SERVICE_VOLUME_UP,
data, blocking=False, context=context)
elif volume_step < 0:
await hass.services.async_call(
entity.domain, SERVICE_VOLUME_DOWN,
data, blocking=False, context=context)
return directive.response() | 85625118d4185842dd0398ec5dd0adbb951b5d67 | 3,650,401 |
import warnings
import io
def load_img(path, grayscale=False, color_mode='rgb', target_size=None,
interpolation='nearest'):
"""Loads an image into PIL format.
# Arguments
path: Path to image file.
grayscale: DEPRECATED use `color_mode="grayscale"`.
color_mode: The desired image format. One of "grayscale", "rgb", "rgba".
"grayscale" supports 8-bit images and 32-bit signed integer images.
Default: "rgb".
target_size: Either `None` (default to original size)
or tuple of ints `(img_height, img_width)`.
interpolation: Interpolation method used to resample the image if the
target size is different from that of the loaded image.
Supported methods are "nearest", "bilinear", and "bicubic".
If PIL version 1.1.3 or newer is installed, "lanczos" is also
supported. If PIL version 3.4.0 or newer is installed, "box" and
"hamming" are also supported.
Default: "nearest".
# Returns
A PIL Image instance.
# Raises
ImportError: if PIL is not available.
ValueError: if interpolation method is not supported.
"""
if grayscale is True:
warnings.warn('grayscale is deprecated. Please use '
'color_mode = "grayscale"')
color_mode = 'grayscale'
if pil_image is None:
raise ImportError('Could not import PIL.Image. '
'The use of `load_img` requires PIL.')
with open(path, 'rb') as f:
img = pil_image.open(io.BytesIO(f.read()))
if color_mode == 'grayscale':
# if image is not already an 8-bit, 16-bit or 32-bit grayscale image
# convert it to an 8-bit grayscale image.
if img.mode not in ('L', 'I;16', 'I'):
img = img.convert('L')
elif color_mode == 'rgba':
if img.mode != 'RGBA':
img = img.convert('RGBA')
elif color_mode == 'rgb':
if img.mode != 'RGB':
img = img.convert('RGB')
else:
raise ValueError('color_mode must be "grayscale", "rgb", or "rgba"')
if target_size is not None:
width_height_tuple = (target_size[1], target_size[0])
if img.size != width_height_tuple:
if interpolation not in _PIL_INTERPOLATION_METHODS:
raise ValueError(
'Invalid interpolation method {} specified. Supported '
'methods are {}'.format(
interpolation,
", ".join(_PIL_INTERPOLATION_METHODS.keys())))
resample = _PIL_INTERPOLATION_METHODS[interpolation]
img = img.resize(width_height_tuple, resample)
return img | 61709400c2bd6379864f2399c2c04c29e0b61b92 | 3,650,403 |
import logging
import platform
def check_compatible_system_and_kernel_and_prepare_profile(args):
"""
Checks if we can do local profiling, that for now is only available
via Linux based platforms and kernel versions >=4.9
Args:
args:
"""
res = True
logging.info("Enabled profilers: {}".format(args.profilers))
logging.info("Checking if system is capable of running those profilers")
if "Linux" not in platform.system():
logging.error(
"Platform needs to be Linux based. Current platform: {}".format(
platform.system()
)
)
res = False
# check for release >= 4.9
release = platform.release()
logging.info("Detected platform release: {}".format(release))
major_minor = release.split(".")[:2]
system_kernel_major_v = major_minor[0]
system_kernel_minor_v = major_minor[1]
if float(system_kernel_major_v) < 4:
logging.error(
"kernel version needs to be >= 4.9. Detected version: {}".format(release)
)
res = False
if float(system_kernel_major_v) == 4 and float(system_kernel_minor_v) < 9:
logging.error(
"kernel version needs to be >= 4.9. Detected version: {}".format(release)
)
res = False
# a map between profiler name and profiler object wrapper
res, profilers_map = get_profilers_map(args.profilers.split(","))
return res, profilers_map | 35da3151109ef13c7966ede991b871dca45f4d0b | 3,650,404 |
import logging
import time
import re
def recv_bgpmon_updates(host, port, queue):
"""
Receive and parse the BGP update XML stream of bgpmon
"""
logging.info ("CALL recv_bgpmon_updates (%s:%d)", host, port)
# open connection
sock = _init_bgpmon_sock(host,port)
data = ""
stream = ""
# receive data
logging.info(" + receiving XML update stream ...")
while(True):
data = sock.recv(1024)
if not data:
sock.close()
time.sleep(60)
sock = _init_bgpmon_sock(host,port)
continue
stream += data
stream = str.replace(stream, "<xml>", "")
while (re.search('</BGP_MONITOR_MESSAGE>', stream)):
messages = stream.split('</BGP_MONITOR_MESSAGE>')
msg = messages[0] + '</BGP_MONITOR_MESSAGE>'
stream = '</BGP_MONITOR_MESSAGE>'.join(messages[1:])
result = parse_bgp_message(msg)
if result:
queue.put(result)
return True | 6cf8e008b2b47437e80b863eab6f7c1fd4a54e18 | 3,650,407 |
import ast
def is_string_expr(expr: ast.AST) -> bool:
"""Check that the expression is a string literal."""
return (
isinstance(expr, ast.Expr)
and isinstance(expr.value, ast.Constant)
and isinstance(expr.value.value, str)
) | f61418b5671c5e11c1e90fce8d90c583659d40e3 | 3,650,408 |
import numba
def events_to_img(
xs: np.ndarray,
ys: np.ndarray,
tots: np.ndarray,
cluster_ids: np.ndarray,
x_img: np.ndarray,
y_img: np.ndarray,
minimum_event_num: int = 30,
extinguish_dist: float = 1.41422, # sqrt(2) = 1.41421356237
) -> np.ndarray:
"""
Converting given events into a flatten image array defined by
given image pixel positions
@param xs: event x position, must be float
@param ys: event y position, must be float
@param tots: event time of threshold (as intensity), must be float
@param cluster_ids: ID labels
@param x_img: pixel position of the target image (see np.meshgrid)
@param y_img: pixel position of the target image (see np.meshgrid)
@param minimum_event_num: minimum number of events needed to be included
@param extinguish_dist: signal impact ends outside this range
@returns: the image converted from given events using weighted centroid method
"""
# preparation
unique_cids = np.unique(cluster_ids)
img_shape = x_img.shape
x_img = x_img.flatten()
y_img = y_img.flatten()
img = x_img * 0.0
for i in numba.prange(unique_cids.shape[0]):
cid = unique_cids[i]
idx = np.where(cluster_ids == cid)[0]
# skip cluster with too few events
if idx.shape[0] < minimum_event_num:
continue
# compute the centroid position and weighted equivalent intensity
wgts = tots[idx] / np.sum(tots[idx])
xc = np.dot(wgts, xs[idx])
yc = np.dot(wgts, ys[idx])
# ic = np.dot(wgts, tots[idx])
# propogate the signal to the image
idx = np.where(
np.logical_and(
np.logical_and(
x_img >= xc - extinguish_dist,
x_img < xc + extinguish_dist,
),
np.logical_and(
y_img >= yc - extinguish_dist,
y_img < yc + extinguish_dist,
),
))[0]
wgts = (x_img[idx] - xc)**2 + (y_img[idx] - yc)**2
wgts = 1.0 / wgts
wgts = wgts / np.sum(wgts)
img[idx] += wgts
# return the results
return img.reshape(img_shape) | ced70fc290157a4168c8e9ebd589263bbc410c6f | 3,650,409 |
def generate_ar(n_series, n_samples, random_state=0):
"""Generate a linear auto-regressive series.
This simple model is defined as::
X(t) = 0.4 * X(t - 1) - 0.6 * X(t - 4) + 0.5 * N(0, 1)
The task is to predict the current value using all the previous values.
Parameters
----------
n_series : int
Number of time series to generate.
n_samples : int
Number of samples in each time series.
random_state : int, default 0
Seed to use in the random generator.
Returns
-------
X, Y : ndarray, shape (n_series, 1, n_stamps, 1)
Input and output sequences, `Y` is just delayed by 1 sample version
of `X`.
"""
n_init = 4
n_discard = 20
X = np.zeros((n_series, n_init + n_discard + n_samples + 1))
rng = np.random.RandomState(random_state)
X[:, n_init] = rng.randn(n_series)
for i in range(n_init + 1, X.shape[1]):
X[:, i] = (0.4 * X[:, i - 1] - 0.6 * X[:, i - 4] +
0.1 * rng.randn(n_series))
Y = X[:, n_init + n_discard + 1:, None]
X = X[:, n_init + n_discard: -1, None]
return X, Y | a02b2bb242ecc0eb6cf3d5d23d0982c56d81b617 | 3,650,411 |
import re
def get_raw_code(file_path):
"""
Removes empty lines, leading and trailing whitespaces, single and multi line comments
:param file_path: path to .java file
:return: list with raw code
"""
raw_code = []
multi_line_comment = False
with open(file_path, "r") as f:
for row in f:
# remove leading and trailing whitespaces
line = row.strip()
# remove '/* comments */'
line = re.sub(r'''
^ # start of string
/\* # "/*" string
.* # any character (except line break) zero or more times
\*/ # "*/" string
\s* # zero or many whitespaces
''', '', line, 0, re.VERBOSE)
# remove '//comments'
line = re.sub(r'''
^ # start of string
// # "//" string
.* # any character (except line break) zero or more times
$ # end of string
''', '', line, 0, re.VERBOSE)
# ignore empty lines
if line != '':
# skip multi-line comments (/*)
if re.search(r'''
^ # start of string
/\* # "/*" string
.* # any character (except line break) zero or more times
''', line, re.VERBOSE):
multi_line_comment = True
continue
# check if multi-line comment was closed (*/)
elif re.search(r'''
.* # any character (except line break) zero or more times
\*/ # "*/" string
$ # end of string
''', line, re.VERBOSE):
multi_line_comment = False
line = re.sub(r'''
.* # any character (except line break) zero or more times
\*/ # "*/" string
\s* # zero or many whitespaces
''', '', line, 0, re.VERBOSE)
if line == '':
continue
# add line if it's not multi-line comment
if not multi_line_comment:
raw_code.append(line)
return raw_code | 6654a0423f024eaea3067c557984c3aa5e9494da | 3,650,413 |
from typing import Pattern
import re
def _yaml_comment_regex() -> Pattern:
"""
From https://yaml-multiline.info/, it states that `#` cannot appear *after* a space
or a newline, otherwise it will be a syntax error (for multiline strings that don't
use a block scalar). This applies to single lines as well: for example, `a#b` will be
treated as a single value, but `a #b` will only capture `a`, leaving `#b` as a comment.
For lines that *do* use a block scalar, the YAML parser will throw a syntax error if
there is additional text on the same line as the block scalar. Comments however, are fine.
e.g.
key: | # this is ok
blah
key: | but this is not
blah
Given that we've made it to this stage, we can assume the YAML file is syntactically
correct. Therefore, if we add whitespace before the comment character, we can know that
everything else *after* the comment character is a comment for a given line.
"""
return re.compile(r'(\s+#[\S ]*)') | 3b5739f460c3d2c66f802dd46e061d2d07030525 | 3,650,415 |
def to_list(name: str) -> "Expr":
"""
Aggregate to list
"""
return col(name).list() | 2265c14d13ef92bb5481dc2eee17915288cf95e8 | 3,650,416 |
import re
def format_ipc_dimension(number: float, decimal_places: int = 2) -> str:
"""
Format a dimension (e.g. lead span or height) according to IPC rules.
"""
formatted = '{:.2f}'.format(number)
stripped = re.sub(r'^0\.', '', formatted)
return stripped.replace('.', '') | 60001f99b5f107faba19c664f90ee2e9fb61fe68 | 3,650,417 |
def mean_test(data, muy0, alternative = 'equal', alpha = 0.95):
"""
This function is used to create a confidence interval of two.sided hypothesis
Input:
data (1D array): the sample of the whole column that you want to evaluate
confidence (float) : confidence_level, must be in (0, 1)
Output:
the dictionary that contains the info of
- Confidence_Interval (tupple)
- T_statistics (float)
- Two-sided p-value.
"""
# convert datapoint to float
a = 1.0 * np.array(data)
confidence = np.round(1 - alpha, 3)
# Number of observations
n = len(a)
# Compute mean and standard_errors
m, se = np.mean(a), stats.sem(a)
# result of testing
T_stat = ((m - muy0) / se)*np.sqrt(n)
# compute the interval_radius
if alternative in ['equal', "two.side"]:
h = se * stats.t.ppf((1 + confidence) / 2., n-1)
conf_itv = (float(m - h), float(m + h))
alt = "true mean (muy) is not equal to muy0 = {}".format(muy0)
cnls = "true mean (muy) = muy0 = {}".format(muy0)
p_val = 2*min(stats.t.cdf(T_stat, n-1), 1 - stats.t.cdf(T_stat, n-1))
elif alternative == 'greater':
h = se * stats.t.ppf(1 - confidence, n-1)
conf_itv = (float(m - h), '+inf')
alt = "true mean (muy) > muy0 = {}".format(muy0)
cnls = "true mean (muy) <= muy0 = {}".format(muy0)
p_val = 1 - stats.t.cdf(T_stat, n-1)
elif alternative == 'less':
h = se * stats.t.ppf(1 - confidence, n-1)
conf_itv = ('-inf', float(m + h))
alt = "true mean (muy) < muy0 = {}".format(muy0)
cnls = "true mean (muy) >= muy0 = {}".format(muy0)
p_val = stats.t.cdf(T_stat, n-1)
# conclusion
if p_val < alpha:
kl = 'reject the hypothesis, {}'.format(alt)
else:
kl = 'can not reject the null hypothesis, so the {}'.format(cnls)
# save all the output-results
dic_data = pd.DataFrame(
{
'alpha / confidence_level': [{'significance level':alpha, 'confidence_level': 1- alpha}],
'Confidence_Interval': [conf_itv],
'T_statistic': T_stat,
'sample_mean': m,
'alternative_hypothesis': alt,
'p_value': p_val,
'conclusion': "For confidence_level = {}%, we {}".format(100*confidence, kl)
}
)
return dic_data | 9a798eeed1ba2debfe42dbb08ed33c8a1f463fd3 | 3,650,418 |
def __build_command(command, name, background=None, enable=None):
""" Constuct args for systemctl command.
Args:
command: The systemctl command
name: The unit name or name pattern
background: True to have systemctl perform the command in the background
enable: True to enable/disable, False to start/stop only, None for default.
Returns:
The name with type suffix
"""
args = ['--quiet']
if background:
args.append('--no-block')
if ((enable or name.startswith('appscale-'))
and not enable==False
and command in ('start', 'stop')):
args.append('--now')
args.append('--runtime')
if command == 'start':
args.append('enable')
else:
args.append('disable')
else:
args.append(command)
args.append(__expand_name(name))
return args | 5f47c19bb24d05b66d02c6d93ed1bf90144afb63 | 3,650,419 |
import requests
def dockerFetchLatestVersion(image_name: str) -> list[str]:
"""
Fetches the latest version of a docker image from hub.docker.com
:param image_name: image to search for
:return: list of version suggestions for the image or 'not found' if error was returned
"""
base_url = "https://hub.docker.com/v2/repositories/library"
request = f"{base_url}/{image_name}/tags"
params = {
"ordering": "last_updated",
"name": "."
}
version_list = []
response = requests.get(request, params=params)
if response.status_code == requests.codes.ok:
json = response.json()
version_list = list(
map(lambda i: i["name"], json["results"])
)[:5]
if len(version_list) == 0:
version_list = [NOT_FOUND]
else:
del params["name"]
response = requests.get(request, params=params)
if response.status_code == requests.codes.ok:
json = response.json()
version_list += list(
map(lambda i: i["name"], json["results"])
)[:5]
return sorted(sorted(list(set(version_list)), reverse=True), key=lambda it: _isfloat(it), reverse=True) | 5907dfe92b627c272132f97be9019d735aabd570 | 3,650,420 |
import torch
import torchvision
def _dataset(
dataset_type: str,
transform: str,
train: bool = True
) -> torch.utils.data.Dataset:
"""
Dataset:
mnist: MNIST
cifar10: CIFAR-10
cifar100: CIFAR-100
Transform:
default: the default transform for each data set
simclr: the transform introduced in SimCLR
"""
try:
transform = _get_transform(dataset_type, transform, train)
except KeyError:
raise DatasetNotIncludeError(f"Dataset {dataset_type} or transform {transform} is not included.\n" \
f"Refer to the following: {_dataset.__doc__}")
if dataset_type == "mnist":
dataset = torchvision.datasets.MNIST(
root=ROOT, train=train, download=False,
transform=transform
)
elif dataset_type == "fashionmnist":
dataset = torchvision.datasets.FashionMNIST(
root=ROOT, train=train, download=False,
transform=transform
)
elif dataset_type == "cifar10":
dataset = torchvision.datasets.CIFAR10(
root=ROOT, train=train, download=False,
transform=transform
)
elif dataset_type == "cifar100":
dataset = torchvision.datasets.CIFAR100(
root=ROOT, train=train, download=False,
transform=transform
)
return dataset | cdfeefefede97db0a8d8ad6c3f4620855004062c | 3,650,421 |
def inferCustomerClasses(param_file, evidence_dir, year):
"""
This function uses the variable elimination algorithm from libpgm to infer the customer class of each AnswerID, given the evidence presented in the socio-demographic survey responses.
It returns a tuple of the dataframe with the probability distribution over all classes for each AnswerID and the BN object.
"""
bn = loadbn(param_file)
evidence, a_id = readEvidence(year, evidence_dir)
query = {"customer_class":''}
cols = bn.Vdata.get('customer_class')['vals']
result = pd.DataFrame(columns=cols) #create empty dataframe in which to store inferred probabilities
count = 0 #set counter
for e in evidence:
bn = loadbn(param_file)
fn = TableCPDFactorization(bn)
try:
inf = fn.condprobve(query, e)
classprobs = list(inf.vals)
result.loc[count] = classprobs
count += 1
except:
result.loc[count] = [None] * len(cols)
count += 1
result['AnswerID'] = a_id
result.set_index(keys='AnswerID',inplace=True)
return result | 212906aacc2bc3d607e1589742591834953de14e | 3,650,422 |
def MidiSegInfo(segment):
""" Midi file info saved in config file for speed """
class segInfo:
iMsPerTick = 0
bpm = 4
ppqn = 480
total_ticks = 0
iLengthInMs = 0
iTracks = 0
trackList = []
ver = "1.5"
ret = segInfo()
savedVer = IniGetValue(JetDefs.JETMIDIFILES_INI, segment.filename, "Ver")
savedDateTime = IniGetValue(JetDefs.JETMIDIFILES_INI, segment.filename, "DateTime")
dateTime = FileDateTime(segment.filename)
if ver != savedVer or dateTime != savedDateTime:
mi = GetMidiInfo(segment.filename)
if mi.err == 0:
IniSetValue(JetDefs.JETMIDIFILES_INI, segment.filename, "Ver", ver)
IniSetValue(JetDefs.JETMIDIFILES_INI, segment.filename, "DateTime", str(dateTime))
IniSetValue(JetDefs.JETMIDIFILES_INI, segment.filename, "PPQN", str(mi.ppqn))
IniSetValue(JetDefs.JETMIDIFILES_INI, segment.filename, "BPM", str(mi.beats_per_measure))
IniSetValue(JetDefs.JETMIDIFILES_INI, segment.filename, "totalTicks", str(mi.totalTicks))
IniSetValue(JetDefs.JETMIDIFILES_INI, segment.filename, "maxTracks", str(mi.maxTracks))
iLengthInMs = GetMidiFileLength(segment.filename) * 1000
IniSetValue(JetDefs.JETMIDIFILES_INI, segment.filename, "LengthInMs", str(iLengthInMs))
if iLengthInMs > 0:
IniSetValue(JetDefs.JETMIDIFILES_INI, segment.filename, "MsPerTick", str(iLengthInMs / mi.totalTicks))
#have to write out the tracklist in format that can be saved in INI file
tl = []
for track in mi.trackList:
tl.append((track.track, track.channel, track.name))
IniSetValue(JetDefs.JETMIDIFILES_INI, segment.filename, "Tracks", tl)
trackList = []
tl = IniGetValue(JetDefs.JETMIDIFILES_INI, segment.filename, "Tracks", 'list', [])
for t in tl:
trackList.append(trackGrid(t[0], t[1], t[2],False))
iTracks = IniGetValue(JetDefs.JETMIDIFILES_INI, segment.filename, "maxTracks", 'int', 0)
iMsPerTick = IniGetValue(JetDefs.JETMIDIFILES_INI, segment.filename, "MsPerTick", 'float', 0)
bpm = IniGetValue(JetDefs.JETMIDIFILES_INI, segment.filename, "BPM", 'int', 0)
ppqn = IniGetValue(JetDefs.JETMIDIFILES_INI, segment.filename, "PPQN", 'int', 480)
if iMsPerTick == 0 or bpm == 0 or ppqn == 0:
return ret
tb = TimeBase(ppqn, bpm)
total_ticks = tb.ConvertStrTimeToTicks(segment.length)
if total_ticks == 0:
total_ticks = tb.MbtDifference(tb.ConvertStrTimeToTuple(segment.start), tb.ConvertStrTimeToTuple(segment.end))
if total_ticks == 0:
return ret
ret.iTracks = iTracks
ret.iMsPerTick = iMsPerTick
ret.bpm = bpm
ret.ppqn = ppqn
ret.total_ticks = total_ticks
ret.iLengthInMs = total_ticks * iMsPerTick
ret.trackList = trackList
return ret | 7d48b699ed52239cf08e57b217f5dd62f3c64a84 | 3,650,423 |
def num_in_row(board, row, num):
"""True if num is already in the row, False otherwise"""
return num in board[row] | ca9ab9de4514740e25e0c55f3613d03b2844cdb8 | 3,650,424 |
import urllib
def load_mnist(dataset="mnist.pkl.gz"):
"""
dataset: string, the path to dataset (MNIST)
"""
data_dir, data_file = os.path.split(dataset)
# download MNIST if not found
if not os.path.isfile(dataset):
origin = (
'http://www.iro.umontreal.ca/~lisa/deep/data/mnist/mnist.pkl.gz'
)
print 'Downloading MNIST from %s' % origin
assert urllib.urlretrieve(origin, dataset)
print "Loading Data ..."
with gzip.open(dataset, 'rb') as handle:
train_set, valid_set, test_set = cPickle.load(handle)
rval = [(train_set[0],to_categorical(train_set[1])),
(valid_set[0],to_categorical(valid_set[1])),
(test_set[0], to_categorical(test_set[1]))]
#train_X, train_y = shared_data(train_set[0]),shared_data_int32(to_categorical(train_set[1]))
#valid_X, valid_y = shared_data(valid_set[0]),shared_data_int32(to_categorical(valid_set[1]))
#test_X, test_y = shared_data(test_set[0]),shared_data_int32(to_categorical(test_set[1]))
#
#rval = [(train_X, train_y), (valid_X, valid_y), (test_X, test_y)]
return rval | 2e499431bed7a8c1c775b04d6272153564d9c99f | 3,650,425 |
def factorial_3(n, acc=1):
"""
Replace all recursive tail calls f(x=x1, y=y1, ...) with (x, y, ...) = (x1, y1, ...); continue
"""
while True:
if n < 2:
return 1 * acc
(n, acc) = (n - 1, acc * n)
continue
break | e067cf4564056bf488e56fe58bbd5b998b0175f3 | 3,650,427 |
def autocorr(x, axis=0, fast=False):
"""
Estimate the autocorrelation function of a time series using the FFT.
:param x:
The time series. If multidimensional, set the time axis using the
``axis`` keyword argument and the function will be computed for every
other axis.
:param axis: (optional)
The time axis of ``x``. Assumed to be the first axis if not specified.
:param fast: (optional)
If ``True``, only use the largest ``2^n`` entries for efficiency.
(default: False)
"""
x = np.atleast_1d(x)
m = [slice(None), ] * len(x.shape)
# For computational efficiency, crop the chain to the largest power of
# two if requested.
if fast:
n = int(2**np.floor(np.log2(x.shape[axis])))
m[axis] = slice(0, n)
x = x
else:
n = x.shape[axis]
# Compute the FFT and then (from that) the auto-correlation function.
f = np.fft.fft(x-np.mean(x, axis=axis), n=2*n, axis=axis)
m[axis] = slice(0, n)
acf = np.fft.ifft(f * np.conjugate(f), axis=axis)[m].real
m[axis] = 0
return acf / acf[m] | e2e4105bd0a4aed3431af6acf4f3669bb3340825 | 3,650,428 |
import re
import dateutil
def parse_date(filename_html):
"""Parse a file, and return the date associated with it.
filename_html -- Name of file to parse.
"""
match = re.search(r"\d{4}-\d{2}-\d{2}", filename_html)
if not match:
return None
match_date = match.group()
file_date = dateutil.parser.parse(match_date).date()
return file_date | 2ee45c3f70b75fc2d26b9c00861dbb1e7586d4af | 3,650,429 |
def mod(a1, a2):
"""
Function to give the remainder
"""
return a1 % a2 | f5c03a952aed373e43933bafe37dbc75e796b74d | 3,650,431 |
def select_theme_dirs():
"""
Load theme templates, if applicable
"""
if settings.THEME_DIR:
return ["themes/" + settings.THEME_DIR + "/templates", "templates"]
else:
return ["templates"] | df74bc751f701be63276b5481ac222e64ba914e7 | 3,650,432 |
def encode_string(s):
"""
Simple utility function to make sure a string is proper
to be used in a SQL query
EXAMPLE:
That's my boy! -> N'That''s my boy!'
"""
res = "N'"+s.replace("'","''")+"'"
res = res.replace("\\''","''")
res = res.replace("\''","''")
return res | 814822b9aa15def24f98b2b280ab899a3f7ea617 | 3,650,433 |
def email_manage(request, email_pk, action):
"""Set the requested email address as the primary. Can only be
requested by the owner of the email address."""
email_address = get_object_or_404(EmailAddress, pk=email_pk)
if not email_address.user == request.user and not request.user.is_staff:
messages.error(request, "You are not authorized to manage this email address")
# if not email_address.is_verified():
# messages.error(request, "Email '%s' needs to be verified first." % email_address.email)
if action == "set_primary":
email_address.set_primary()
messages.success(request, "'%s' is now marked as your primary email address." % email_address.email)
elif action == "delete":
email_address.delete()
messages.success(request, "'%s' has been removed." % email_address.email)
if 'HTTP_REFERER' in request.META:
return redirect(request.META['HTTP_REFERER'])
else:
return redirect(reverse('member:profile:view', kwargs={'username': email_address.user.username})) | 7a533fe34fdc13b737025c01bb0bb15dcbeae0f2 | 3,650,434 |
def get_container_service_api_version():
"""Get zun-api-version with format: 'container X.Y'"""
return 'container ' + CONTAINER_SERVICE_MICROVERSION | c6f83640b50132e24ce96889688afcda49ba6b1c | 3,650,435 |
from django.utils.cache import get_cache_key
from django.core.cache import cache
def invalidate_view_cache(view_name, args=[], namespace=None, key_prefix=None):
"""
This function allows you to invalidate any view-level cache.
view_name: view function you wish to invalidate or it's named url pattern
args: any arguments passed to the view function
namepace: if an application namespace is used, pass that
key prefix: for the @cache_page decorator for the function (if any)
"""
# create a fake request object
request = HttpRequest()
# Loookup the request path:
if namespace:
view_name = namespace + ":" + view_name
request.path = reverse(view_name, args=args)
# get cache key, expire if the cached item exists:
key = get_cache_key(request, key_prefix=key_prefix)
if key:
if cache.get(key):
cache.set(key, None, 0)
return True
return False | 2622e6ee48cb7565014660858104edba5b20b9eb | 3,650,437 |
def compute_ray_features_segm_2d(seg_binary, position, angle_step=5., smooth_coef=0, edge='up'):
""" compute ray features vector , shift them to be starting from larges
and smooth_coef them by gauss filter
(from given point the close distance to boundary)
:param ndarray seg_binary: np.array<height, width>
:param tuple(int,int) position: integer position in the segmentation
:param float angle_step: angular step for ray features
:param str edge: pointing to the up of down edge of an boundary
:param int smooth_coef: smoothing the final ray features
:return list(float): ray distances
.. seealso:: :func:`imsegm.descriptors.compute_ray_features_segm_2d_vectors`
.. note:: for more examples, see unittests
>>> seg_empty = np.zeros((100, 150), dtype=bool)
>>> compute_ray_features_segm_2d(seg_empty, (50, 75), 90) # doctest: +ELLIPSIS
array([-1., -1., -1., -1.]...)
>>> from skimage import draw
>>> seg = np.ones((100, 150), dtype=bool)
>>> x, y = draw.circle(50, 75, 40, shape=seg.shape)
>>> seg[x, y] = False
>>> np.round(compute_ray_features_segm_2d(seg, (50, 75), 45)) # doctest: +ELLIPSIS
array([ 40., 41., 40., 41., 40., 41., 40., 41.]...)
>>> np.round(compute_ray_features_segm_2d(seg, (60, 40), 30, smooth_coef=1)).tolist()
[66.0, 52.0, 32.0, 16.0, 8.0, 5.0, 5.0, 8.0, 16.0, 33.0, 53.0, 67.0]
>>> ray_fts = compute_ray_features_segm_2d(seg, (40, 60), 20)
>>> np.round(ray_fts).tolist() # doctest: +NORMALIZE_WHITESPACE
[54.0, 57.0, 59.0, 55.0, 51.0, 44.0, 38.0, 31.0, 27.0, 24.0, 22.0, 22.0,
23.0, 26.0, 29.0, 35.0, 42.0, 49.0]
"""
assert seg_binary.ndim == len(position), \
'Segmentation dim of %r and position (%i) does not match' \
% (seg_binary.ndim, len(position))
seg_binary = seg_binary.astype(bool)
position = tuple(map(int, position))
fn_compute = cython_ray_features_seg2d if USE_CYTHON else numpy_ray_features_seg2d
ray_dist = fn_compute(seg_binary, position, angle_step, edge)
if smooth_coef is not None and smooth_coef > 0:
ray_dist = gaussian_filter1d(ray_dist, smooth_coef)
return ray_dist | 18b830fe6ac83cf7282be39d368ad7c1261a890c | 3,650,438 |
def visualize_bbox(img, bbox, class_name, color=(255, 0, 0) , thickness=2):
"""Visualizes a single bounding box on the image"""
BOX_COLOR = (255, 0, 0) # Red
TEXT_COLOR = (255, 255, 255) # White
x_min, y_min, x_max, y_max = bbox
cv2.rectangle(img, (x_min, y_min), (x_max, y_max), color=color, thickness=thickness)
((text_width, text_height), _) = cv2.getTextSize(class_name, cv2.FONT_HERSHEY_SIMPLEX, 0.35, 1)
cv2.rectangle(img, (x_min, y_min - int(1.3 * text_height)), (x_min + text_width, y_min), BOX_COLOR, -1)
cv2.putText(
img,
text=class_name,
org=(x_min, y_min - int(0.3 * text_height)),
fontFace=cv2.FONT_HERSHEY_SIMPLEX,
fontScale=0.35,
color=TEXT_COLOR,
lineType=cv2.LINE_AA,
)
return img | 147335c2e87b57f0bd0ba0840e9dae9f713b513f | 3,650,439 |
import json
def mock_light():
"""Mock UniFi Protect Camera device."""
data = json.loads(load_fixture("sample_light.json", integration=DOMAIN))
return Light.from_unifi_dict(**data) | ba83ae80ddb39ec9f9b6f30b77eabee39f998b39 | 3,650,440 |
def randomize_bulge_i(N, M, bp='G', target='none', ligand='theo'):
"""
Replace the upper stem with the aptamer and randomize the bulge to connect
it to the lower stem.
This is a variant of the rb library with two small differences. First, the
nucleotides flanking the aptamer are not randomized and are instead
guaranteed to base pair. The default base pair is GC. However, note that
to be consistent with rb, this base pair is considered part of the linker,
and is included in the N and M arguments. So rbi/4/8 only randomizes 10
positions. Second, the library is based off the Dang scaffold. Most
extended upper stem is replaced with the aptamer, but the CG base pair in
the lower stem remains.
Parameters
----------
N: int
The length of the linker on the 5' side of the aptamer. This length
includes a non-randomized base pair immediately adjacent to the aptamer.
M: int
The length of the linker on the 3' side of the aptamer. This length
includes a non-randomized base pair immediately adjacent to the aptamer.
bp: 'ACGU'
Not implemented, but this would be a good interface for changing the
static base pair. Right now to base pair is hard-coded to be GC.
"""
sgrna = on(target=target)
sgrna['bulge/5'].attachment_sites = 0,
sgrna['bulge/3'].attachment_sites = 4,
sgrna.attach(
random_insert(ligand, N, M, flags='g'),
'bulge/5', 0,
'bulge/3', 4,
)
return sgrna | aff8aa9e6ba276bb9d867f24c58c44e0e3c849f6 | 3,650,441 |
import logging
def parse_identifier(db, identifier):
"""Parse the identifier and return an Identifier object representing it.
:param db: Database session
:type db: sqlalchemy.orm.session.Session
:param identifier: String containing the identifier
:type identifier: str
:return: Identifier object
:rtype: Optional[core.model.identifier.Identifier]
"""
parsed_identifier = None
try:
result = Identifier.parse_urn(db, identifier)
if result is not None:
parsed_identifier, _ = result
except Exception:
logging.error(
f"An unexpected exception occurred during parsing identifier {identifier}"
)
return parsed_identifier | 390fc8d44014d2cdb17fc641dab9914ba13bc95e | 3,650,443 |
import textwrap
def wrap_name(dirname, figsize):
"""Wrap name to fit in subfig."""
fontsize = plt.rcParams["font.size"]
# 1/120 = inches/(fontsize*character)
num_chars = int(figsize / fontsize * 72)
return textwrap.fill(dirname, num_chars) | 7fb7430a01781c7c53637ae4a94c72c057faddab | 3,650,444 |
def resolve_shape(tensor, rank=None, scope=None):
"""Fully resolves the shape of a Tensor.
Use as much as possible the shape components already known during graph
creation and resolve the remaining ones during runtime.
Args:
tensor: Input tensor whose shape we query.
rank: The rank of the tensor, provided that we know it.
scope: Optional name scope.
Returns:
shape: The full shape of the tensor.
"""
with tf.name_scope(scope, 'resolve_shape', [tensor]):
if rank is not None:
shape = tensor.get_shape().with_rank(rank).as_list()
else:
shape = tensor.get_shape().as_list()
if None in shape:
shape_dynamic = tf.shape(tensor)
for i in range(len(shape)):
if shape[i] is None:
shape[i] = shape_dynamic[i]
return shape | cc1c3a0bd2b5a35580dd94b6c45a2a36cc151e5a | 3,650,445 |
def gradient_clip(gradients, max_gradient_norm):
"""Clipping gradients of a model."""
clipped_gradients, gradient_norm = tf.clip_by_global_norm(
gradients, max_gradient_norm)
gradient_norm_summary = [tf.summary.scalar("grad_norm", gradient_norm)]
gradient_norm_summary.append(
tf.summary.scalar("clipped_gradient", tf.global_norm(clipped_gradients)))
return clipped_gradients, gradient_norm_summary | 416f9560ad612ab364cd03de39851a559012d26b | 3,650,446 |
def list_species(category_id):
"""
List all the species for the specified category
:return: A list of Species instances
"""
with Session.begin() as session:
species = session.query(Species)\
.filter(Species.categoryId == category_id)\
.order_by(db.asc(Species.name))\
.all()
return species | c49283fdde11456ffc6e4eff4b5043d547fa9908 | 3,650,448 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.