content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def get_default_interpreter():
"""Returns an instance of the default interpreter class."""
return __default_interpreter.get() | 8807e2480787d26e81ab1be3377f8e3a11daa1de | 3,655,221 |
def fx_ugoira_frames():
"""frames data."""
return {
'000000.jpg': 1000,
'000001.jpg': 2000,
'000002.jpg': 3000,
} | e3517b37bb4c9cd1dfb70b13128d16ef80a9801a | 3,655,222 |
import array
def coherent_tmm(pol, n_list, d_list, th_0, lam_vac):
"""
This is my slightly modified version of byrnes's "coh_tmm"
I've rearranged the calculations in a way that is more intuitive to me
Example inputs:
For angle dependence, be careful to include air first, otherwise the angle will be wrong
layers = [ 'Air','SiO2', 'ITO' ,'PEDOT' ,'TCTA' , 'TCTA-tpbi-Irppy' ,'tpbi', 'Al', 'Air']
doping = [ 1, 1 , 1 , 1 , 1 ,[0.475,0.475,0.05] , 1, 1, 1]
d_list = np.array([0,0, 100 , 70 , 20 , 60 , 20 , 100, 0])
n_list = load_nk(layers,doping,wavelength_nm,df_nk)
Assign a thickness of 0 to incoherent layers (air, substrate)
Notes from byrnes:
Main "coherent transfer matrix method" calc. Given parameters of a stack,
calculates everything you could ever want to know about how light
propagates in it. (If performance is an issue, you can delete some of the
calculations without affecting the rest.)
pol is light polarization, "s" or "p".
n_list is the list of refractive indices, in the order that the light would
pass through them. The 0'th element of the list should be the semi-infinite
medium from which the light enters, the last element should be the semi-
infinite medium to which the light exits (if any exits).
th_0 is the angle of incidence: 0 for normal, pi/2 for glancing.
Remember, for a dissipative incoming medium (n_list[0] is not real), th_0
should be complex so that n0 sin(th0) is real (intensity is constant as
a function of lateral position).
d_list is the list of layer thicknesses (front to back). Should correspond
one-to-one with elements of n_list. First and last elements should be "inf".
lam_vac is vacuum wavelength of the light.
Outputs the following as a dictionary (see manual for details)
* r--reflection amplitude
* t--transmission amplitude
* R--reflected wave power (as fraction of incident)
* T--transmitted wave power (as fraction of incident)
* power_entering--Power entering the first layer, usually (but not always)
equal to 1-R (see manual).
* vw_list-- n'th element is [v_n,w_n], the forward- and backward-traveling
amplitudes, respectively, in the n'th medium just after interface with
(n-1)st medium.
* kz_list--normal component of complex angular wavenumber for
forward-traveling wave in each layer.
* th_list--(complex) propagation angle (in radians) in each layer
* pol, n_list, d_list, th_0, lam_vac--same as input
"""
# Convert to numpy arrays if not already
n_list = np.array(n_list)
d_list = np.array(d_list)
# Input tests
if ((hasattr(lam_vac, 'size') and lam_vac.size > 1)
or (hasattr(th_0, 'size') and th_0.size > 1)):
raise ValueError('This function is not vectorized; you need to run one '
'calculation at a time (1 wavelength, 1 angle, etc.)')
if (n_list.ndim != 1) or (d_list.ndim != 1) or (n_list.size != d_list.size):
raise ValueError("Problem with n_list or d_list!")
#assert d_list[0] == d_list[-1] == inf, 'd_list must start and end with inf!'
assert abs((n_list[0]*np.sin(th_0)).imag) < 100*EPSILON, 'Error in n0 or th0!'
assert is_forward_angle(n_list[0], th_0), 'Error in n0 or th0!'
# using a mix of notation from byrnes and pettersson
# because pettersson's notation is often garbage
num_layers = n_list.size
n0 = n_list[0]
cosTheta_list = sqrt(1 - (n0/n_list)**2 * sin(th_0)**2)
th_list = list_snell(n_list, th_0)
sinTheta_list = (n0/n_list)**2 * sin(th_0)**2
kz_list = 2 * pi / lam_vac * n_list * cosTheta_list
delta = kz_list * d_list
t_list = zeros((num_layers, num_layers), dtype=complex)
r_list = zeros((num_layers, num_layers), dtype=complex)
I_list = zeros((num_layers, 2, 2), dtype=complex)
L_list = zeros((num_layers, 2, 2), dtype=complex)
M_list = zeros((num_layers, 2, 2), dtype=complex)
Mtilde = make_2x2_array(1, 0, 0, 1, dtype=complex)
for j in range(0,num_layers-1):
# t and r are shared notation for pettersson and byrnes
t_list[j,j+1] = interface_t_cos(pol, n_list[j], n_list[j+1],
cosTheta_list[j], cosTheta_list[j+1])
r_list[j,j+1] = interface_r_cos(pol, n_list[j], n_list[j+1],
cosTheta_list[j], cosTheta_list[j+1])
# interface matrix, eqn. 1 pettersson
I_list[j] = 1/t_list[j,j+1] * make_2x2_array(1,r_list[j,j+1],
r_list[j,j+1],1,
dtype=complex)
# M and L are not defined for the 0th layer
# i.e. the substrate or ambient is incoherent
if j==0:
# Pre-factor in byrnes eqn 13
Mtilde = np.dot(I_list[j],Mtilde)
if j>0:
# Layer matrix (phase matrix), eqn. 5 pettersson
L_list[j] = make_2x2_array(exp(-1j*delta[j]),0,
0,exp(1j*delta[j]),dtype=complex)
# M matrix (byrnes eqn. 11)
M_list[j] = np.dot(L_list[j],I_list[j])
# Mtilde byrnes eqn. 13
Mtilde = np.dot(Mtilde,M_list[j])
# Net complex transmission and reflection amplitudes
# byrnes eqn 15, petterson eqns 9-10
r = Mtilde[1,0]/Mtilde[0,0]
t = 1/Mtilde[0,0]
# Construct list of forward and backward amplitudes (byrnes eqn 10)
# vw_list[n] = [v_n, w_n]. v_0 and w_0 are undefined because the 0th medium
# has no left interface.
vw_list = zeros((num_layers, 2), dtype=complex)
v_list = zeros((num_layers,1), dtype=complex)
w_list = zeros((num_layers,1), dtype=complex)
# Final layer v and w, Transmitted amplitude, assuming no back illumination
vw = array([[t],[0]])
vw_list[-1,:] = np.transpose(vw)
for i in range(num_layers-2, 0, -1):
vw = np.dot(M_list[i], vw)
v_list[i] = vw[0]
w_list[i] = vw[1]
vw_list[i,:] = np.transpose(vw)
# Assuming incident intensity is 1
vw = array([[1],[r]])
vw_list[0,:] = np.transpose(vw)
# Net transmitted and reflected power, as a proportion of the incoming light
# power.
R = R_from_r(r)
T = T_from_t(pol, t, n_list[0], n_list[-1], th_0, th_list[-1])
power_entering = power_entering_from_r(pol, r, n_list[0], th_0)
th_list=0
return {'r': r, 't': t, 'R': R, 'T': T, 'power_entering': power_entering,
'vw_list': vw_list, 'kz_list': kz_list, 'th_list': th_list,
'pol': pol, 'n_list': n_list, 'd_list': d_list, 'th_0': th_0,
'lam_vac':lam_vac, 'M_list':M_list, 't_list':t_list, 'r_list':r_list,
'Mtilde':Mtilde, 'I_list':I_list, 'L_list':L_list} | 3e10041325ee211d684c9ad960b445df8e6de2db | 3,655,223 |
def base_info():
"""
基本资料的展示和修改
1、尝试获取用户信息
2、如果是get请求,返回用户信息给模板
如果是post请求:
1、获取参数,nick_name,signature,gender[MAN,WOMAN]
2、检查参数的完整性
3、检查gender性别必须在范围内
4、保存用户信息
5、提交数据
6、修改redis缓存中的nick_name
注册:session['nick_name'] = mobile
登录:session['nick_name'] = user.nick_name
修改:session['nick_name'] = nick_name
7、返回结果
:return:
"""
user = g.user
if request.method == 'GET':
data = {
'user': user.to_dict()
}
return render_template('blogs/user_base_info.html', data=data)
# 获取参数
nick_name = request.json.get('nick_name')
signature = request.json.get('signature')
gender = request.json.get('gender')
# 检查参数
if not all([nick_name, signature, gender]):
return jsonify(errno=RET.PARAMERR, errmsg='参数缺失')
# 校验性别参数范围
if gender not in ['MAN', 'WOMAN']:
return jsonify(errno=RET.PARAMERR, errmsg='参数范围错误')
# 保存用户信息
user.nick_name = nick_name
user.signature = signature
user.gender = gender
# 提交数据
try:
db.session.add(user)
db.session.commit()
except Exception as e:
current_app.logger.error(e)
db.session.rollback()
return jsonify(errno=RET.DBERR, errmsg='保存数据失败')
# 修改redis缓存中的用户信息
session['nick_name'] = nick_name
# 返回结果
return jsonify(errno=RET.OK, errmsg='OK') | 87d5595171e2cecc469ea933b210783e15c477d2 | 3,655,224 |
def to_list(obj):
""" """
if isinstance(obj, np.ndarray):
return obj.tolist()
raise TypeError('Not serializable') | 92e4851bb117ab908dc256f8b42ef03c85d70e28 | 3,655,225 |
from sage.symbolic.expression import Expression
from sage.symbolic.ring import SR
from inspect import signature, Parameter
def symbolic_expression(x):
"""
Create a symbolic expression or vector of symbolic expressions from x.
INPUT:
- ``x`` - an object
OUTPUT:
- a symbolic expression.
EXAMPLES::
sage: a = symbolic_expression(3/2); a
3/2
sage: type(a)
<type 'sage.symbolic.expression.Expression'>
sage: R.<x> = QQ[]; type(x)
<type 'sage.rings.polynomial.polynomial_rational_flint.Polynomial_rational_flint'>
sage: a = symbolic_expression(2*x^2 + 3); a
2*x^2 + 3
sage: type(a)
<type 'sage.symbolic.expression.Expression'>
sage: from sage.symbolic.expression import is_Expression
sage: is_Expression(a)
True
sage: a in SR
True
sage: a.parent()
Symbolic Ring
Note that equations exist in the symbolic ring::
sage: E = EllipticCurve('15a'); E
Elliptic Curve defined by y^2 + x*y + y = x^3 + x^2 - 10*x - 10 over Rational Field
sage: symbolic_expression(E)
x*y + y^2 + y == x^3 + x^2 - 10*x - 10
sage: symbolic_expression(E) in SR
True
If ``x`` is a list or tuple, create a vector of symbolic expressions::
sage: v=symbolic_expression([x,1]); v
(x, 1)
sage: v.base_ring()
Symbolic Ring
sage: v=symbolic_expression((x,1)); v
(x, 1)
sage: v.base_ring()
Symbolic Ring
sage: v=symbolic_expression((3,1)); v
(3, 1)
sage: v.base_ring()
Symbolic Ring
sage: E = EllipticCurve('15a'); E
Elliptic Curve defined by y^2 + x*y + y = x^3 + x^2 - 10*x - 10 over Rational Field
sage: v=symbolic_expression([E,E]); v
(x*y + y^2 + y == x^3 + x^2 - 10*x - 10, x*y + y^2 + y == x^3 + x^2 - 10*x - 10)
sage: v.base_ring()
Symbolic Ring
If ``x`` is a function, for example defined by a ``lambda`` expression, create a
symbolic function::
sage: f = symbolic_expression(lambda z: z^2 + 1); f
z |--> z^2 + 1
sage: f.parent()
Callable function ring with argument z
sage: f(7)
50
If ``x`` is a list or tuple of functions, or if ``x`` is a function that returns a list
or tuple, create a callable symbolic vector::
sage: symbolic_expression([lambda mu, nu: mu^2 + nu^2, lambda mu, nu: mu^2 - nu^2])
(mu, nu) |--> (mu^2 + nu^2, mu^2 - nu^2)
sage: f = symbolic_expression(lambda uwu: [1, uwu, uwu^2]); f
uwu |--> (1, uwu, uwu^2)
sage: f.parent()
Vector space of dimension 3 over Callable function ring with argument uwu
sage: f(5)
(1, 5, 25)
sage: f(5).parent()
Vector space of dimension 3 over Symbolic Ring
TESTS:
Also functions defined using ``def`` can be used, but we do not advertise it as a use case::
sage: def sos(x, y):
....: return x^2 + y^2
sage: symbolic_expression(sos)
(x, y) |--> x^2 + y^2
Functions that take a varying number of arguments or keyword-only arguments are not accepted::
sage: def variadic(x, *y):
....: return x
sage: symbolic_expression(variadic)
Traceback (most recent call last):
...
TypeError: unable to convert <function variadic at 0x...> to a symbolic expression
sage: def function_with_keyword_only_arg(x, *, sign=1):
....: return sign * x
sage: symbolic_expression(function_with_keyword_only_arg)
Traceback (most recent call last):
...
TypeError: unable to convert <function function_with_keyword_only_arg at 0x...>
to a symbolic expression
"""
if isinstance(x, Expression):
return x
elif hasattr(x, '_symbolic_'):
return x._symbolic_(SR)
elif isinstance(x, (tuple, list)):
return vector([symbolic_expression(item) for item in x])
elif callable(x):
try:
s = signature(x)
except ValueError:
pass
else:
if all(param.kind in (Parameter.POSITIONAL_ONLY, Parameter.POSITIONAL_OR_KEYWORD)
for param in s.parameters.values()):
vars = [SR.var(name) for name in s.parameters.keys()]
result = x(*vars)
if isinstance(result, (tuple, list)):
return vector(SR, result).function(*vars)
else:
return SR(result).function(*vars)
return SR(x) | 648c85a8fd3f4ffefec44e5720f8c9ac68c10388 | 3,655,226 |
def seq_hyphentation(words):
"""
Converts words in a list of strings into lists of syllables
:param words: a list of words (strings)
:return: a list of lists containing word syllables
"""
return [hyphenation(w) for w in words] | dd1ab65f64926e724718edac316a98bac99991da | 3,655,227 |
def angle(A, B, dim=1):
"""
Computes the angle in radians between the inputs along the specified dimension
Parameters
----------
A : Tensor
first input tensor
B : Tensor
second input tensor
dim : int (optional)
dimension along the angle is computed (default is 1)
Returns
-------
Tensor
the tensor containing the angle between the inputs
"""
return acos(clamp(dot(A, B, dim=dim), -1, 1)) | f64950b8004a32e2ab274efee3a9bedf6441439a | 3,655,228 |
import functools
def _run_lint_helper(
*, fail_on_missing_sub_src, exclude_lint, warn_lint, site_name=None):
"""Helper for executing lint on specific site or all sites in repo."""
if site_name:
func = functools.partial(engine.lint.site, site_name=site_name)
else:
func = engine.lint.full
warns = func(
fail_on_missing_sub_src=fail_on_missing_sub_src,
exclude_lint=exclude_lint,
warn_lint=warn_lint)
return warns | a73e2e9a4bb968376622308cf7af2f97f6533595 | 3,655,229 |
def simulate_from_orders_nb(target_shape: tp.Shape,
group_lens: tp.Array1d,
init_cash: tp.Array1d,
call_seq: tp.Array2d,
size: tp.ArrayLike = np.asarray(np.inf),
price: tp.ArrayLike = np.asarray(np.inf),
size_type: tp.ArrayLike = np.asarray(SizeType.Amount),
direction: tp.ArrayLike = np.asarray(Direction.Both),
fees: tp.ArrayLike = np.asarray(0.),
fixed_fees: tp.ArrayLike = np.asarray(0.),
slippage: tp.ArrayLike = np.asarray(0.),
min_size: tp.ArrayLike = np.asarray(0.),
max_size: tp.ArrayLike = np.asarray(np.inf),
size_granularity: tp.ArrayLike = np.asarray(np.nan),
reject_prob: tp.ArrayLike = np.asarray(0.),
lock_cash: tp.ArrayLike = np.asarray(False),
allow_partial: tp.ArrayLike = np.asarray(True),
raise_reject: tp.ArrayLike = np.asarray(False),
log: tp.ArrayLike = np.asarray(False),
val_price: tp.ArrayLike = np.asarray(np.inf),
close: tp.ArrayLike = np.asarray(np.nan),
auto_call_seq: bool = False,
ffill_val_price: bool = True,
update_value: bool = False,
max_orders: tp.Optional[int] = None,
max_logs: int = 0,
flex_2d: bool = True) -> tp.Tuple[tp.RecordArray, tp.RecordArray]:
"""Creates on order out of each element.
Iterates in the column-major order.
Utilizes flexible broadcasting.
!!! note
Should be only grouped if cash sharing is enabled.
If `auto_call_seq` is True, make sure that `call_seq` follows `CallSeqType.Default`.
Single value should be passed as a 0-dim array (for example, by using `np.asarray(value)`).
Usage:
* Buy and hold using all cash and closing price (default):
```pycon
>>> import numpy as np
>>> from vectorbt.records.nb import col_map_nb
>>> from vectorbt.portfolio.nb import simulate_from_orders_nb, asset_flow_nb
>>> from vectorbt.portfolio.enums import Direction
>>> close = np.array([1, 2, 3, 4, 5])[:, None]
>>> order_records, _ = simulate_from_orders_nb(
... target_shape=close.shape,
... close=close,
... group_lens=np.array([1]),
... init_cash=np.array([100]),
... call_seq=np.full(close.shape, 0)
... )
>>> col_map = col_map_nb(order_records['col'], close.shape[1])
>>> asset_flow = asset_flow_nb(close.shape, order_records, col_map, Direction.Both)
>>> asset_flow
array([[100.],
[ 0.],
[ 0.],
[ 0.],
[ 0.]])
```
"""
check_group_lens_nb(group_lens, target_shape[1])
cash_sharing = is_grouped_nb(group_lens)
check_group_init_cash_nb(group_lens, target_shape[1], init_cash, cash_sharing)
order_records, log_records = init_records_nb(target_shape, max_orders, max_logs)
init_cash = init_cash.astype(np.float_)
last_position = np.full(target_shape[1], 0., dtype=np.float_)
last_debt = np.full(target_shape[1], 0., dtype=np.float_)
last_val_price = np.full(target_shape[1], np.nan, dtype=np.float_)
order_price = np.full(target_shape[1], np.nan, dtype=np.float_)
temp_order_value = np.empty(target_shape[1], dtype=np.float_)
oidx = 0
lidx = 0
from_col = 0
for group in range(len(group_lens)):
to_col = from_col + group_lens[group]
group_len = to_col - from_col
cash_now = init_cash[group]
free_cash_now = init_cash[group]
for i in range(target_shape[0]):
for k in range(group_len):
col = from_col + k
# Resolve order price
_price = flex_select_auto_nb(price, i, col, flex_2d)
if np.isinf(_price):
if _price > 0:
_price = flex_select_auto_nb(close, i, col, flex_2d) # upper bound is close
elif i > 0:
_price = flex_select_auto_nb(close, i - 1, col, flex_2d) # lower bound is prev close
else:
_price = np.nan # first timestamp has no prev close
order_price[col] = _price
# Resolve valuation price
_val_price = flex_select_auto_nb(val_price, i, col, flex_2d)
if np.isinf(_val_price):
if _val_price > 0:
_val_price = _price # upper bound is order price
elif i > 0:
_val_price = flex_select_auto_nb(close, i - 1, col, flex_2d) # lower bound is prev close
else:
_val_price = np.nan # first timestamp has no prev close
if not np.isnan(_val_price) or not ffill_val_price:
last_val_price[col] = _val_price
# Calculate group value and rearrange if cash sharing is enabled
if cash_sharing:
# Same as get_group_value_ctx_nb but with flexible indexing
value_now = cash_now
for k in range(group_len):
col = from_col + k
if last_position[col] != 0:
value_now += last_position[col] * last_val_price[col]
# Dynamically sort by order value -> selling comes first to release funds early
if auto_call_seq:
# Same as sort_by_order_value_ctx_nb but with flexible indexing
for k in range(group_len):
col = from_col + k
temp_order_value[k] = approx_order_value_nb(
flex_select_auto_nb(size, i, col, flex_2d),
flex_select_auto_nb(size_type, i, col, flex_2d),
flex_select_auto_nb(direction, i, col, flex_2d),
cash_now,
last_position[col],
free_cash_now,
last_val_price[col],
value_now
)
# Sort by order value
insert_argsort_nb(temp_order_value[:group_len], call_seq[i, from_col:to_col])
for k in range(group_len):
col = from_col + k
if cash_sharing:
col_i = call_seq[i, col]
if col_i >= group_len:
raise ValueError("Call index exceeds bounds of the group")
col = from_col + col_i
# Get current values per column
position_now = last_position[col]
debt_now = last_debt[col]
val_price_now = last_val_price[col]
if not cash_sharing:
value_now = cash_now
if position_now != 0:
value_now += position_now * val_price_now
# Generate the next order
order = order_nb(
size=flex_select_auto_nb(size, i, col, flex_2d),
price=order_price[col],
size_type=flex_select_auto_nb(size_type, i, col, flex_2d),
direction=flex_select_auto_nb(direction, i, col, flex_2d),
fees=flex_select_auto_nb(fees, i, col, flex_2d),
fixed_fees=flex_select_auto_nb(fixed_fees, i, col, flex_2d),
slippage=flex_select_auto_nb(slippage, i, col, flex_2d),
min_size=flex_select_auto_nb(min_size, i, col, flex_2d),
max_size=flex_select_auto_nb(max_size, i, col, flex_2d),
size_granularity=flex_select_auto_nb(size_granularity, i, col, flex_2d),
reject_prob=flex_select_auto_nb(reject_prob, i, col, flex_2d),
lock_cash=flex_select_auto_nb(lock_cash, i, col, flex_2d),
allow_partial=flex_select_auto_nb(allow_partial, i, col, flex_2d),
raise_reject=flex_select_auto_nb(raise_reject, i, col, flex_2d),
log=flex_select_auto_nb(log, i, col, flex_2d)
)
# Process the order
state = ProcessOrderState(
cash=cash_now,
position=position_now,
debt=debt_now,
free_cash=free_cash_now,
val_price=val_price_now,
value=value_now,
oidx=oidx,
lidx=lidx
)
order_result, new_state = process_order_nb(
i, col, group,
state,
update_value,
order,
order_records,
log_records
)
# Update state
cash_now = new_state.cash
position_now = new_state.position
debt_now = new_state.debt
free_cash_now = new_state.free_cash
val_price_now = new_state.val_price
value_now = new_state.value
oidx = new_state.oidx
lidx = new_state.lidx
# Now becomes last
last_position[col] = position_now
last_debt[col] = debt_now
if not np.isnan(val_price_now) or not ffill_val_price:
last_val_price[col] = val_price_now
from_col = to_col
return order_records[:oidx], log_records[:lidx] | 32898fa1a1aadf50d6d07553da8e7bed94f3de0e | 3,655,230 |
def exp_map_individual(network, variable, max_degree):
"""Summary measure calculate for the non-parametric mapping approach described in Sofrygin & van der Laan (2017).
This approach works best for networks with uniform degree distributions. This summary measure generates a number
of columns (a total of ``max_degree``). Each column is then an indicator variable for each observation. To keep
all columns the same number of dimensions, zeroes are filled in for all degrees above unit i's observed degree.
Parameters
----------
network : networkx.Graph
The NetworkX graph object to calculate the summary measure for.
variable : str
Variable to calculate the summary measure for (this will always be the exposure variable internally).
max_degree : int
Maximum degree in the network (defines the number of columns to generate).
Returns
-------
dataframe
Data set containing all generated columns
"""
attrs = []
for i in network.nodes:
j_attrs = []
for j in network.neighbors(i):
j_attrs.append(network.nodes[j][variable])
attrs.append(j_attrs[:max_degree])
return pd.DataFrame(attrs,
columns=[variable+'_map'+str(x+1) for x in range(max_degree)]) | cb4424ad10dae3df4a3d60ec5d7b143b2130a9bb | 3,655,232 |
def bridge_meshes(Xs, Ys, Zs, Cs):
"""
Concatenate multiple meshes, with hidden transparent bridges, to a single mesh, so that plt.plot_surface
uses correct drawing order between meshes (as it really should)
:param list Xs: list of x-coordinates for each mesh
:param list Ys: list of y-coordinates for each mesh
:param list Zs: list of z-coordinates for each mesh
:param list Cs: list of colors for each mesh
:return: Concatenated meshes X_full, Y_full, Z_full, C_full
"""
assert len(Xs) == len(Ys) == len(Zs) == len(Cs)
if len(Xs) > 2:
X1, Y1, Z1, C1 = bridge_meshes(Xs[1:], Ys[1:], Zs[1:], Cs[1:])
elif len(Xs) == 2:
X1, Y1, Z1, C1 = Xs[1], Ys[1], Zs[1], Cs[1]
else:
raise Exception
X0, Y0, Z0, C0 = Xs[0], Ys[0], Zs[0], Cs[0]
X_bridge = np.vstack(np.linspace(X0[-1, :], X1[-1, :], 1))
Y_bridge = np.vstack(np.linspace(Y0[-1, :], Y1[-1, :], 1))
Z_bridge = np.vstack(np.linspace(Z0[-1, :], Z1[-1, :], 1))
color_bridge = np.empty_like(Z_bridge, dtype=object)
color_bridge.fill((1, 1, 1, 0)) # Make the bridge transparant
# Join surfaces
X_full = np.vstack([X0, X_bridge, X1])
Y_full = np.vstack([Y0, Y_bridge, Y1])
Z_full = np.vstack([Z0, Z_bridge, Z1])
color_full = np.vstack([C0, color_bridge, C1])
return X_full, Y_full, Z_full, color_full | 389948e3d357cb7a87e844eee8417f2466c41cab | 3,655,233 |
def get_groups():
"""
Get the list of label groups.
@return: the list of label groups.
"""
labels_dict = load_yaml_from_file("labels")
groups = []
for group_info in labels_dict["groups"]:
group = Group(**group_info)
label_names = group_info.pop("labels", [])
groups.append(group)
for label_info in label_names:
Label(**label_info, group=group)
return groups | 03822287ab1a2525560f6fdf2a55a3c2461c6bea | 3,655,234 |
def diffractometer_rotation(phi=0, chi=0, eta=0, mu=0):
"""
Generate the 6-axis diffracometer rotation matrix
R = M * E * X * P
Also called Z in H. You, J. Appl. Cryst 32 (1999), 614-623
:param phi: float angle in degrees
:param chi: float angle in degrees
:param eta: float angle in degrees
:param mu: float angle in degrees
:return: [3*3] array
"""
P = rotmatrixz(phi)
X = rotmatrixy(chi)
E = rotmatrixz(eta)
M = rotmatrixx(mu)
return np.dot(M, np.dot(E, np.dot(X, P))) | 7f56caf6585f74406b8f681614c6a6f32592ad91 | 3,655,235 |
def supports_build_in_container(config):
"""
Given a workflow config, this method provides a boolean on whether the workflow can run within a container or not.
Parameters
----------
config namedtuple(Capability)
Config specifying the particular build workflow
Returns
-------
tuple(bool, str)
True, if this workflow can be built inside a container. False, along with a reason message if it cannot be.
"""
def _key(c):
return str(c.language) + str(c.dependency_manager) + str(c.application_framework)
# This information could have beeen bundled inside the Workflow Config object. But we this way because
# ultimately the workflow's implementation dictates whether it can run within a container or not.
# A "workflow config" is like a primary key to identify the workflow. So we use the config as a key in the
# map to identify which workflows can support building within a container.
unsupported = {
_key(DOTNET_CLIPACKAGE_CONFIG): "We do not support building .NET Core Lambda functions within a container. "
"Try building without the container. Most .NET Core functions will build "
"successfully.",
_key(GO_MOD_CONFIG): "We do not support building Go Lambda functions within a container. "
"Try building without the container. Most Go functions will build "
"successfully.",
}
thiskey = _key(config)
if thiskey in unsupported:
return False, unsupported[thiskey]
return True, None | 278bde73252d13784298d01d954a56fcecd986dc | 3,655,236 |
def get_img_array_mhd(img_file):
"""Image array in zyx convention with dtype = int16."""
itk_img = sitk.ReadImage(img_file)
img_array_zyx = sitk.GetArrayFromImage(itk_img) # indices are z, y, x
origin = itk_img.GetOrigin() # x, y, z world coordinates (mm)
origin_zyx = [origin[2], origin[1], origin[0]] # y, x, z
spacing = itk_img.GetSpacing() # x, y, z world coordinates (mm)
spacing_zyx = [spacing[2], spacing[1], spacing[0]] # z, y, x
acquisition_exception = None # no acquisition number found in object
return img_array_zyx, spacing_zyx, origin_zyx, acquisition_exception | 6c6bafedf34aaf0c03367c9058b29401bf133fd0 | 3,655,237 |
def registration(request):
"""Render the registration page."""
if request.user.is_authenticated:
return redirect(reverse('index'))
if request.method == 'POST':
registration_form = UserRegistrationForm(request.POST)
if registration_form.is_valid():
registration_form.save()
user = auth.authenticate(username=request.POST['username'],
password=request.POST['password1'])
if user:
auth.login(user=user, request=request)
messages.success(request, "You have registered successfully.")
return redirect(reverse('index'))
else:
messages.error(request, "Unable to register your account at this time.")
else:
registration_form = UserRegistrationForm()
return render(request, 'registration.html', {"registration_form": registration_form}) | dae59392e290291d9d81ca427ee35b07c6ed554b | 3,655,238 |
def _get_arc2height(arcs):
"""
Parameters
----------
arcs: list[(int, int)]
Returns
-------
dict[(int, int), int]
"""
# arc2height = {(b,e): np.abs(b - e) for b, e in arcs}
n_arcs = len(arcs)
arcs_sorted = sorted(arcs, key=lambda x: np.abs(x[0] - x[1]))
arc2height = {arc: 1 for arc in arcs}
for arc_i in range(n_arcs):
bi, ei = sorted(arcs_sorted[arc_i])
for arc_j in range(n_arcs):
if arc_i == arc_j:
continue
bj, ej = sorted(arcs_sorted[arc_j])
if bi <= bj <= ej <= ei:
arc2height[arcs_sorted[arc_i]] = max(arc2height[arcs_sorted[arc_j]] + 1, arc2height[arcs_sorted[arc_i]])
return arc2height | feb929e9f2e23e1c154423930ae33944b95af699 | 3,655,239 |
from shutil import copyfile
def init_ycm(path):
"""
Generate a ycm_extra_conf.py file in the given path dir to specify
compilation flags for a project. This is necessary to get
semantic analysis for c-family languages.
Check ycmd docs for more details.
"""
conf = join(path, '.ycm_extra_conf.py')
if exists(conf):
root.status.set_msg('File overwritten: %s' % conf)
copyfile(join(dirname(__file__), 'ycm_extra_conf.py'), conf)
return conf | 361d744982c2a8c4fd1e787408150381a3b111d3 | 3,655,240 |
def get_aggregate_stats_flows_single_appliance(
self,
ne_pk: str,
start_time: int,
end_time: int,
granularity: str,
traffic_class: int = None,
flow: str = None,
ip: str = None,
data_format: str = None
) -> dict:
"""Get aggregate flow stats data for a single appliance filter by
query parameters
.. list-table::
:header-rows: 1
* - Swagger Section
- Method
- Endpoint
* - aggregateStats
- GET
- /stats/aggregate/flow/{nePk}
:param ne_pk: Network Primary Key (nePk) of appliance, e.g. ``3.NE``
:type ne_pk: str
:param start_time: Long(Signed 64 bits) value of seconds since EPOCH
time indicating the starting time boundary of data time range
:type start_time: int
:param end_time: Long(Signed 64 bits) value of seconds since EPOCH
time indicating the ending time boundary of data time range
:type end_time: int
:param granularity: Data granularity filtering whether data is
minutely data, hourly data or daily data. Accepted values are
``minute``, ``hour``, and ``day``
:type granularity: str
:param traffic_class: Filter for data which belongs to particular
traffic class, accepted values between 1-10, defaults to None
:type traffic_class: int, optional
:param flow: Filter for data of a particular flow type. Accepted
values are "TCP_ACCELERATED" "TCP_NOT_ACCELERATED" "NON_TCP",
defaults to None
:type flow: str, optional
:param ip: ``True`` to use IP address as key to sort results or
``False`` or ``None`` for default sorting by appliance ID,
defaults to None
:type ip: bool, optional
:param data_format: The only format other than JSON currently
supported is CSV, accepted value is ``csv``, defaults to None
:type data_format: str, optional
:return: Returns dictionary of aggregate stats filtered by query
parameters
:rtype: dict
"""
path = (
"/stats/aggregate/flow/"
+ "{}?startTime={}&endTime={}&granularity={}".format(
ne_pk, start_time, end_time, granularity
)
)
if traffic_class is not None:
path = path + "&trafficClass={}".format(traffic_class)
if flow is not None:
path = path + "&flow={}".format(flow)
if ip is not None:
path = path + "&ip={}".format(ip)
if data_format is not None:
path = path + "&format={}".format(data_format)
return self._get(path) | 5ca6e2b5ce1b176aea603a254b0ca655e0f43c0c | 3,655,241 |
def load_user(userid):
"""Callback to load user from db, called by Flask-Login"""
db = get_db()
user = db.execute("SELECT id FROM users WHERE id = ?", [userid]).fetchone()
if user is not None:
return User(user[0])
return None | 0dd9516af3670794c107bd6633c74a033f0a4983 | 3,655,242 |
import torch
def get_partial_outputs_with_prophecies(prophecies, loader, model, my_device,
corpus, seq2seq):
"""
Parameters
----------
prophecies : dict
Dictionary mapping from sequence index to a list of prophecies, one
for each prefix in the sequence.
loader : torch.utils.data.dataloader.DataLoader
Data loader, batch must have size 1.
model : models.<model>
NN model not BERT
my_device : torch.device
PyTorch device.
label_pad : int
Index of padding label.
seq2seq : bool
True if sequence tagging, else False for sequence classification.
Returns
-------
results : dict of dicts
A dictionary storing partial outputs, accuracy w.r.t. the gold labels
and an np matrix that indicates editions.
"""
# see comments in function above
model.eval()
results = {'partial_outputs':{}, 'log_changes':{}, 'accuracy':{}}
with torch.no_grad():
for x, lens, y, idx in loader:
#if idx.item() not in prophecies:
# continue
x = x.to(my_device)
y = y.to(my_device)
lens = lens.to(my_device)
if seq2seq:
predictions = np.zeros((lens, lens))
changes = np.zeros((lens, lens))
else:
predictions = np.zeros((lens, 1))
changes = np.zeros((lens, 1))
pad = corpus.word2id['<pad>']
for l in range(1,lens.item()+1):
if l != lens.item():
part_x = x[:,:l]
# add prophecy
prophecy = nltk.word_tokenize(
prophecies[idx.item()][l-1][0])
prophecy_ids = torch.tensor([[corpus.word2id.get(w, pad)
for w in prophecy[l:]]],
dtype=torch.long, device=x.device)
part_x = torch.cat((part_x, prophecy_ids),dim=1)
part_len = torch.tensor([l+prophecy_ids.shape[1]],
device=x.device)
# create any y to append will not be used (but cannot be the same idx as
# label of predicate in SRL), we use zero and check
if 'srl' in corpus.task:
assert corpus.label2id['B-V'] != 0
if seq2seq:
extra_pad = torch.tensor([[0]*(part_x.shape[1]-l)], device=x.device, dtype=torch.long)
part_y = torch.cat((y[:,:l], extra_pad), dim=1)
#part_y = torch.zeros((1, part_len.item()), dtype=torch.long,
# device=y.device)
else:
part_y = y
else: # complete sentence does not need prophecy
part_x = x
part_y = y
part_len = lens
#unpacked, mask = model(x, lens) # _ = (hidden, context)
_, predicted = model(part_x, part_len, part_y, seq2seq)
if seq2seq:
predictions[l-1] = np.array((predicted[:l].tolist()
+ (lens.item() - l)*[np.inf]))
else:
predictions[l-1] = np.array((predicted.tolist()))
if l == 1:
changes[l-1][0] = 1
else:
changes[l-1] = predictions[l-1] != predictions[l-2]
y = y.reshape(-1)
y = torch.tensor([i for i in y if i!=corpus.label2id['<pad>']])
if seq2seq:
acc = (predictions[-1] == y.cpu().numpy()).sum() / lens.item()
else:
acc = (predictions[-1] == y.cpu().numpy()).sum()
results['partial_outputs'][idx.item()] = predictions
results['log_changes'][idx.item()] = changes
results['accuracy'][idx.item()] = acc
return results | cae0ed8643f677a5d2a2f3e75858b68f473acc50 | 3,655,243 |
from typing import Tuple
from typing import Optional
from typing import List
import io
import textwrap
from re import I
def _generate_deserialize_impl(
symbol_table: intermediate.SymbolTable,
spec_impls: specific_implementations.SpecificImplementations,
) -> Tuple[Optional[Stripped], Optional[List[Error]]]:
"""Generate the implementation for deserialization functions."""
blocks = [
_generate_skip_whitespace_and_comments(),
_generate_read_whole_content_as_base_64(),
] # type: List[Stripped]
errors = [] # type: List[Error]
for symbol in symbol_table.symbols:
if isinstance(symbol, intermediate.Enumeration):
# NOTE (mristin, 2022-04-13):
# Enumerations are going to be directly deserialized using
# ``Stringification``.
continue
elif isinstance(symbol, intermediate.ConstrainedPrimitive):
# NOTE (mristin, 2022-04-13):
# Constrained primitives are only verified, but do not represent a C# type.
continue
elif isinstance(
symbol, (intermediate.AbstractClass, intermediate.ConcreteClass)
):
if symbol.is_implementation_specific:
implementation_keys = [
specific_implementations.ImplementationKey(
f"Xmlization/DeserializeImplementation/"
f"{symbol.name}_from_element.cs"
),
specific_implementations.ImplementationKey(
f"Xmlization/DeserializeImplementation/"
f"{symbol.name}_from_sequence.cs"
),
]
for implementation_key in implementation_keys:
implementation = spec_impls.get(implementation_key, None)
if implementation is None:
errors.append(
Error(
symbol.parsed.node,
f"The xmlization snippet is missing "
f"for the implementation-specific "
f"class {symbol.name}: {implementation_key}",
)
)
continue
else:
blocks.append(spec_impls[implementation_key])
else:
if isinstance(symbol, intermediate.ConcreteClass):
(
block,
generation_errors,
) = _generate_deserialize_impl_cls_from_sequence(cls=symbol)
if generation_errors is not None:
errors.append(
Error(
symbol.parsed.node,
f"Failed to generate the XML deserialization code "
f"for the class {symbol.name}",
generation_errors,
)
)
else:
assert block is not None
blocks.append(block)
if symbol.interface is not None:
blocks.append(
_generate_deserialize_impl_interface_from_element(
interface=symbol.interface
)
)
if isinstance(symbol, intermediate.ConcreteClass):
blocks.append(
_generate_deserialize_impl_concrete_cls_from_element(cls=symbol)
)
else:
assert_never(symbol)
if len(errors) > 0:
return None, errors
writer = io.StringIO()
writer.write(
"""\
/// <summary>
/// Implement the deserialization of meta-model classes from XML.
/// </summary>
/// <remarks>
/// The implementation propagates an <see cref="Reporting.Error" /> instead of
/// relying on exceptions. Under the assumption that incorrect data is much less
/// frequent than correct data, this makes the deserialization more
/// efficient.
///
/// However, we do not want to force the client to deal with
/// the <see cref="Reporting.Error" /> class as this is not intuitive.
/// Therefore we distinguish the implementation, realized in
/// <see cref="DeserializeImplementation" />, and the facade given in
/// <see cref="Deserialize" /> class.
/// </remarks>
internal static class DeserializeImplementation
{
"""
)
for i, block in enumerate(blocks):
if i > 0:
writer.write("\n\n")
writer.write(textwrap.indent(block, I))
writer.write("\n} // internal static class DeserializeImplementation")
return Stripped(writer.getvalue()), None | 3e2e3c78709b75a8b650d775d4b0f8b6c8287ca0 | 3,655,244 |
def timestep_to_transition_idx(snapshot_years, transitions, timestep):
"""Convert timestep to transition index.
Args:
snapshot_years (list): a list of years corresponding to the provided
rasters
transitions (int): the number of transitions in the scenario
timestep (int): the current timestep
Returns:
transition_idx (int): the current transition
"""
for i in xrange(0, transitions):
if timestep < (snapshot_years[i+1] - snapshot_years[0]):
return i | 96bcda2493fcd51f9c7b335ea75fd612384207e3 | 3,655,245 |
def resolve_checks(names, all_checks):
"""Returns a set of resolved check names.
Resolving a check name expands tag references (e.g., "@tag") to all the
checks that contain the given tag. OpenShiftCheckException is raised if
names contains an unknown check or tag name.
names should be a sequence of strings.
all_checks should be a sequence of check classes/instances.
"""
known_check_names = set(check.name for check in all_checks)
known_tag_names = set(name for check in all_checks for name in check.tags)
check_names = set(name for name in names if not name.startswith('@'))
tag_names = set(name[1:] for name in names if name.startswith('@'))
unknown_check_names = check_names - known_check_names
unknown_tag_names = tag_names - known_tag_names
if unknown_check_names or unknown_tag_names:
msg = []
if unknown_check_names:
msg.append('Unknown check names: {}.'.format(', '.join(sorted(unknown_check_names))))
if unknown_tag_names:
msg.append('Unknown tag names: {}.'.format(', '.join(sorted(unknown_tag_names))))
msg.append('Make sure there is no typo in the playbook and no files are missing.')
raise OpenShiftCheckException('\n'.join(msg))
tag_to_checks = defaultdict(set)
for check in all_checks:
for tag in check.tags:
tag_to_checks[tag].add(check.name)
resolved = check_names.copy()
for tag in tag_names:
resolved.update(tag_to_checks[tag])
return resolved | d86dcd9a5539aeaa31fb3c86304c62f8d86bbb11 | 3,655,247 |
from typing import Optional
def swish(
data: NodeInput,
beta: Optional[NodeInput] = None,
name: Optional[str] = None,
) -> Node:
"""Return a node which performing Swish activation function Swish(x, beta=1.0) = x * sigmoid(x * beta)).
:param data: Tensor with input data floating point type.
:return: The new node which performs Swish
"""
if beta is None:
beta = make_constant_node(1.0, np.float32)
return _get_node_factory_opset4().create("Swish", as_nodes(data, beta), {}) | d17562d0e63aa1610d9bc641faabec27264a2919 | 3,655,248 |
from datetime import datetime
def cut_out_interval(data, interval, with_gaps=False):
"""
Cuts out data from input array.
Interval is the start-stop time pair.
If with_gaps flag is True, then one NaN value will be added
between the remaining two pieces of data.
Returns modified data array.
:param data: 2-dimensional array with data
:param interval: list or array with two time points
:type data: np.ndarray
:type interval: list or tuple or np.ndarray
:return: modified data array, start and stop point of deleted interval
:rtype: tuple
"""
supported_arr_types = "np.ndarray"
supported_interval_types = "list or tuple or np.ndarray"
assert isinstance(data, np.ndarray), \
"Arr value is of an unsupported type. " \
"Expected {}, got {} instead.".format(supported_arr_types, type(data))
assert data.ndim == 2, \
"Data must be 2-dimensional array. Got {} ndims instead.".format(data.ndim)
assert isinstance(interval, list) or \
isinstance(interval, tuple) or \
isinstance(interval, np.ndarray), \
"Interval value is of an unsupported type. " \
"Expected {}, got {} instead." \
"".format(supported_interval_types, type(interval))
assert len(interval) == 2, \
"Unsupported interval length. " \
"Expected 2, got {} instead.".format(len(interval))
assert interval[0] <= interval[1], \
"Left interval border ({}) is greater than the right ({})." \
"".format(interval[0], interval[1])
idx_start, idx_stop = _get_interval_idx(data, interval)
if idx_start is None or idx_stop is None:
return data, None, None
# 1-dimensional mask
mask = np.ones(shape=data.shape[1], dtype=bool)
# right border value is included
mask[idx_start:idx_stop + 1] = False
start_str = datetime.fromtimestamp(data[0, idx_start]).strftime("%Y.%m.%d %H:%M:%S")
stop_str = datetime.fromtimestamp(data[0, idx_stop]).strftime("%Y.%m.%d %H:%M:%S")
# add nan if cutting inner interval
if with_gaps and idx_start > 0 and idx_stop < data.shape[1] - 1:
# leave one element and replace it with nan
mask[idx_stop] = True
data[:, idx_stop] = np.nan
# masking (cutting out) all columns
data = data[:, mask]
else:
# masking (cutting out) all columns
data = data[:, mask]
return data, start_str, stop_str | 753be7e45102a7e0adc1b19365d10e009c8f6b89 | 3,655,249 |
import re
def _abbreviations_to_word(text: str):
"""
对句子中的压缩次进行扩展成单词
:param text: 单个句子文本
:return: 转换后的句子文本
"""
abbreviations = [
(re.compile('\\b%s\\.' % x[0], re.IGNORECASE), x[1]) for x in [
('mrs', 'misess'),
('mr', 'mister'),
('dr', 'doctor'),
('st', 'saint'),
('co', 'company'),
('jr', 'junior'),
('maj', 'major'),
('gen', 'general'),
('drs', 'doctors'),
('rev', 'reverend'),
('lt', 'lieutenant'),
('hon', 'honorable'),
('sgt', 'sergeant'),
('capt', 'captain'),
('esq', 'esquire'),
('ltd', 'limited'),
('col', 'colonel'),
('ft', 'fort')
]
]
for regex, replacement in abbreviations:
text = re.sub(regex, replacement, text)
return text | 576eb1588c40ab4b9ffa7d368249e520ecf887ba | 3,655,250 |
def resnet56(num_classes=100):
"""Constructs a ResNet-56 model for CIFAR-10 (by default)
Args:
num_classes (uint): number of classes
"""
model = CifarResNet(ResNetBasicblock, 56, num_classes)
return model | 98070a6a1b6f69b2d253537b604c616ae52de9b2 | 3,655,251 |
def pad_set_room(request):
"""
pad修改关联会议室
:param request:
:return:
"""
dbs = request.dbsession
user_id = request.POST.get('user_id', '')
room_id = request.POST.get('room_id', '')
pad_code = request.POST.get('pad_code', '')
if not user_id:
error_msg = '用户ID不能为空!'
elif not pad_code:
error_msg = '终端编码不能为空!'
elif not room_id:
error_msg = '会议室ID不能为空!'
else:
room, error_msg = set_room(dbs, user_id, pad_code, room_id)
update_last_time(dbs, pad_code, 'setRoom')
logger.info('setRoom--user_id:' + user_id + ',pad_code:' + pad_code + ',room_id:' + room_id)
if error_msg:
json = {
'success': 'false',
'error_msg': error_msg,
}
else:
json = {
'success': 'true',
'room': room
}
return json | 1646204a666e68021c649b6d322b74cbcd515fd2 | 3,655,253 |
def airffromrh_wmo(rh_wmo,temp,pres,asat=None,dhsat=None,chkvals=False,
chktol=_CHKTOL,asat0=None,dhsat0=None,chkbnd=False,mathargs=None):
"""Calculate dry fraction from WMO RH.
Calculate the dry air mass fraction from the relative humidity. The
relative humidity used here is defined by the WMO as:
rh_wmo = [(1-airf)/airf] / [(1-asat)/asat]
where asat is the dry air fraction at saturation.
:arg float rh_wmo: Relative humidity, unitless.
:arg float temp: Temperature in K.
:arg float pres: Pressure in Pa.
:arg asat: Saturation dry air mass fraction in kg/kg. If unknown,
pass None (default) and it will be calculated.
:type asat: float or None
:arg dhsat: Saturation humid air density in kg/m3. If unknown, pass
None (default) and it will be calculated.
:type dhsat: float or None
:arg bool chkvals: If True (default False) and all values are given,
this function will calculate the disequilibrium and raise a
warning if the results are not within a given tolerance.
:arg float chktol: Tolerance to use when checking values (default
_CHKTOL).
:arg asat0: Initial guess for the saturation dry air mass fraction
in kg/kg. If None (default) then `_approx_tp` is used.
:type asat0: float or None
:arg dhsat0: Initial guess for the saturation humid air density in
kg/m3. If None (default) then `_approx_tp` is used.
:type dhsat0: float or None
:arg bool chkbnd: If True then warnings are raised when the given
values are valid but outside the recommended bounds (default
False).
:arg mathargs: Keyword arguments to the root-finder
:func:`_newton <maths3.newton>` (e.g. maxiter, rtol). If None
(default) then no arguments are passed and default parameters
will be used.
:returns: In-situ dry air mass fraction in kg/kg.
:raises RuntimeWarning: If the relative disequilibrium is more than
chktol, if chkvals is True and all values are given.
:Examples:
>>> airffromrh_wmo(0.8,270.,1e5)
0.997645698908
"""
asat = massfractionair(temp=temp,pres=pres,airf=asat,dhum=dhsat,
chkvals=chkvals,chktol=chktol,airf0=asat0,dhum0=dhsat0,chkbnd=chkbnd,
mathargs=mathargs)
airf = asat / (rh_wmo*(1-asat) + asat)
return airf | 1e4418591087a4bd26b48c470239df58087cdb6e | 3,655,254 |
import base64
import zlib
def inflate(data: str) -> str:
"""
reverses the compression used by draw.io
see: https://drawio-app.com/extracting-the-xml-from-mxfiles/
see: https://stackoverflow.com/questions/1089662/python-inflate-and-deflate-implementations
:param data: base64 encoded string
:return: "plain text" version of the deflated data
"""
data = base64.b64decode(data)
decompress = zlib.decompressobj(-zlib.MAX_WBITS)
inflated = decompress.decompress(data)
inflated += decompress.flush()
return unquote(inflated.decode('utf-8')) | e4456c7482591611436a92a71464754871461fd5 | 3,655,256 |
import operator
def get_tree(data_path,sep,root,cutoff,layer_max,up=True):
"""
This function takes the path of a data file of edge list with numeric
weights and returns a tree (DiGraph object). The parameters include:
data_path: The path of a data file of edge list with numeric weights.
sep: The delimiter of the data file.
root: A root node to start with.
cutoff: The edge weight threshold.
layer_max: The number of layers to explore.
up: The direction (upstream or downstream) of the tree.
The default is upstream.
"""
# Read in the network data.
F = nx.read_weighted_edgelist(data_path,delimiter=sep,create_using=nx.DiGraph())
# create_using is to specify a directed network, otherwise, an
# undirected network is returned.
# Filter the edges with the cutoff value.
G = nx.DiGraph( [ (u,v,d) for u,v,d in F.edges(data=True) if d['weight']>=cutoff] )
reachset = set()
unreachset = set()
for n in G.nodes():
if(n != root):
unreachset.add(n)
else:
reachset.add(n)
H = nx.DiGraph() # Initiate a tree.
oldreach = len(reachset)
newreach = oldreach +1
rndcount = 0
if(up==True): # When an upstream tree is requested.
while(newreach>oldreach and rndcount<layer_max):
oldreach = len(reachset)
candidatesIn = {}
for ee in G.edges(data = True):
e1 = ee[0]
e2 = ee[1]
w = ee[2]['weight']
if(e2 in reachset and e1 in unreachset): # e2 in reachset because the direction is upstream.
candidatesIn[(e1,e2)] = w
sorted_edges_in = sorted(candidatesIn.iteritems(), key=operator.itemgetter(1), reverse = True)
# reverse = True is to pick the edge with the largest weight
# first. Otherwise, the edge with the smallest weight will be
# picked first.
if(len(sorted_edges_in) > 0):
for se in sorted_edges_in:
if (se[0][0] in unreachset):
# The same candidate node may appear more than once
# connecting with different existing nodes. So
# se[0][0] needs to be checked if still in
# unreachset before being added. This is to ensure
# that all the nodes in the tree are unique. For
# each round/layer of search, the edge with a
# larger weight is preferred.
reachset.add(se[0][0])
unreachset.remove(se[0][0])
H.add_edge(se[0][0],se[0][1],weight=se[1],layer=rndcount+1) # The edge attribute layer is added.
H.node[se[0][0]]['dist']=rndcount+1 # The node attribute dist (distance from the root) is added.
newreach=len(reachset)
else:
newreach=oldreach
rndcount=rndcount+1
if(H.number_of_nodes()>0): # Error if empty tree.
H.node[root]['dist']=0 # Add the attribute dist for the root.
else: # When a downstream tree is requested.
while(newreach>oldreach and rndcount<layer_max):
oldreach = len(reachset)
candidatesOut = {}
for ee in G.edges(data = True):
e1 = ee[0]
e2 = ee[1]
w = ee[2]['weight']
if(e1 in reachset and e2 in unreachset): # e1 in reachset because the direction is downstream.
candidatesOut[(e1,e2)] = w
sorted_edges_out = sorted(candidatesOut.iteritems(), key=operator.itemgetter(1), reverse = True)
# reverse = True is to pick the edge with the largest weight
# first. Otherwise, the edge with the smallest weight will be
# picked first.
if(len(sorted_edges_out) > 0):
for se in sorted_edges_out:
if (se[0][1] in unreachset):
# The same candidate node may appear more than once
# connecting with different existing nodes. So
# se[0][1] needs to be checked if still in
# unreachset before being added. This is to ensure
# that all the nodes in the tree are unique. For
# each round/layer of search, the edge with a
# larger weight is preferred.
reachset.add(se[0][1])
unreachset.remove(se[0][1])
H.add_edge(se[0][0],se[0][1],weight=se[1],layer=rndcount+1) # The edge attribute layer is added.
H.node[se[0][1]]['dist']=rndcount+1 # The node attribute dist (distance from the root) is added.
newreach=len(reachset)
else:
newreach=oldreach
rndcount=rndcount+1
if(H.number_of_nodes()>0): # Error if empty tree.
H.node[root]['dist']=0 # Add the attribute dist for the root.
return H | 6f2d151aac39786311c61da4f38140e6c0159562 | 3,655,257 |
def delete_functions(lambda_client, function_list) -> list:
"""Deletes all instances in the instances parameter.
Args:
lambda_client: A lambda boto3 client
function_list: A list of instances you want deleted.
Returns:
A count of deleted instances
"""
terminated_functions = []
for lambda_function in function_list:
function_name = lambda_function["FunctionName"]
if helpers.check_in_whitelist(function_name, WHITELIST_NAME):
continue
try:
lambda_client.delete_function(
FunctionName=function_name
)
except ClientError as error:
error_string = "{0} on {1} - {2}".format(error, RESOURCE_NAME,
function_name)
print(error_string)
terminated_functions.append(error_string)
continue
terminated_functions.append(lambda_function["FunctionName"])
return terminated_functions | f0ca59647f6813d04bf2bbd6ec33ed7744acdd04 | 3,655,258 |
def make_random_shares(seed, minimum, n_shares, share_strength=256):
"""
Generates a random shamir pool for a given seed phrase.
Returns share points as seeds phrases (word list).
"""
if minimum > n_shares:
raise ValueError(
"More shares needed (%d) to recover the seed phrase than created "
"(%d). Seed phrase would be irrecoverable." % (minimum, n_shares)
)
seed_length = len(seed.split(" "))
if seed_length not in LENGTH_ALLOWED:
raise ValueError(
"Seed phrase should have %s words, but not %d words."
% (LENGTH_STR, seed_length)
)
seed_strength = seed_length // 3 * 32
if share_strength not in STRENGTH_ALLOWED:
raise ValueError(
"Share strength should be one of the following %s. "
"But it is not (%d)." % (STRENGTH_STR, share_strength)
)
if share_strength < seed_strength:
raise ValueError(
"Share strength (%d) is lower that seed strength (%d). Seed phrase "
"would be irrecoverable." % (share_strength, seed_strength)
)
prime = PRIMES[share_strength]
secret = seed_to_int(seed)
poly = [secret] + [random_int(prime - 1) for i in range(minimum - 1)]
points = [(i, _eval_at(poly, i, prime))
for i in range(1, n_shares + 1)]
shares = [(i, int_to_seed(point, strength=share_strength))
for i, point in points]
return shares | a8496909cc06f3663d07036e54af744ac7e26b18 | 3,655,259 |
from typing import Optional
from typing import Sequence
def confusion_matrix(
probs: Optional[Sequence[Sequence]] = None,
y_true: Optional[Sequence] = None,
preds: Optional[Sequence] = None,
class_names: Optional[Sequence[str]] = None,
title: Optional[str] = None,
):
"""
Computes a multi-run confusion matrix.
Arguments:
probs (2-d arr): Shape [n_examples, n_classes]
y_true (arr): Array of label indices.
preds (arr): Array of predicted label indices.
class_names (arr): Array of class names.
Returns:
Nothing. To see plots, go to your W&B run page then expand the 'media' tab
under 'auto visualizations'.
Example:
```
vals = np.random.uniform(size=(10, 5))
probs = np.exp(vals)/np.sum(np.exp(vals), keepdims=True, axis=1)
y_true = np.random.randint(0, 5, size=(10))
labels = ["Cat", "Dog", "Bird", "Fish", "Horse"]
wandb.log({'confusion_matrix': wandb.plot.confusion_matrix(probs, y_true=y_true, class_names=labels)})
```
"""
np = util.get_module(
"numpy",
required="confusion matrix requires the numpy library, install with `pip install numpy`",
)
# change warning
assert probs is None or len(probs.shape) == 2, (
"confusion_matrix has been updated to accept"
" probabilities as the default first argument. Use preds=..."
)
assert (probs is None or preds is None) and not (
probs is None and preds is None
), "Must provide probabilties or predictions but not both to confusion matrix"
if probs is not None:
preds = np.argmax(probs, axis=1).tolist()
assert len(preds) == len(
y_true
), "Number of predictions and label indices must match"
if class_names is not None:
n_classes = len(class_names)
class_inds = [i for i in range(n_classes)]
assert max(preds) <= len(
class_names
), "Higher predicted index than number of classes"
assert max(y_true) <= len(
class_names
), "Higher label class index than number of classes"
else:
class_inds = set(preds).union(set(y_true))
n_classes = len(class_inds)
class_names = [f"Class_{i}" for i in range(1, n_classes + 1)]
# get mapping of inds to class index in case user has weird prediction indices
class_mapping = {}
for i, val in enumerate(sorted(list(class_inds))):
class_mapping[val] = i
counts = np.zeros((n_classes, n_classes))
for i in range(len(preds)):
counts[class_mapping[y_true[i]], class_mapping[preds[i]]] += 1
data = []
for i in range(n_classes):
for j in range(n_classes):
data.append([class_names[i], class_names[j], counts[i, j]])
fields = {
"Actual": "Actual",
"Predicted": "Predicted",
"nPredictions": "nPredictions",
}
title = title or ""
return wandb.plot_table(
"wandb/confusion_matrix/v1",
wandb.Table(columns=["Actual", "Predicted", "nPredictions"], data=data),
fields,
{"title": title},
) | c2b63ccf7e3f226b6bfbea4656bc816eaa6e336a | 3,655,260 |
import html
def get_monitor_details():
"""Render the index page."""
monitor_id = paranoid_clean(request.args.get('id'))
monitors = mongo.db[app.config['MONITORS_COLLECTION']]
monitor = monitors.find_one({'hashed': monitor_id}, {'_id': 0})
if not monitor:
return jsonify({'success': False, 'error': 'Monitor was not found.'})
articles = mongo.db[app.config['ARTICLES_COLLECTION']]
link = monitor['metadata']['rss_link']
articles = list(articles.find({'feed_source': link}, {'_id': 0}))
for idx, item in enumerate(articles):
articles[idx]['title'] = html.unescape(item['title'])
articles[idx]['date'] = item['collected'][:10]
articles.sort(key=lambda x: x['collected'], reverse=True)
return jsonify({'success': True, 'monitor': monitor, 'articles': articles}) | 6a45ed67ff79216c9048ce9e3ed80be4e43b9bd9 | 3,655,261 |
def _simplify(obj: object) -> object:
"""
This function takes an object as input and returns a simple
Python object which is supported by the chosen serialization
method (such as JSON or msgpack). The reason we have this function
is that some objects are either NOT supported by high level (fast)
serializers OR the high level serializers don't support the fastest
form of serialization. For example, PyTorch tensors have custom pickle
functionality thus its better to pre-serialize PyTorch tensors using
pickle and then serialize the binary in with the rest of the message
being sent.
Args:
obj: an object which may need to be simplified
Returns:
obj: an simple Python object which msgpack can serialize
Raises:
ValueError: if `move_this` or `in_front_of_that` are not both single ASCII
characters.
"""
try:
# check to see if there is a simplifier
# for this type. If there is, run return
# the simplified object
current_type = type(obj)
result = (simplifiers[current_type][0], simplifiers[current_type][1](obj))
return result
except KeyError:
# if there is not a simplifier for this
# object, then the object is already a
# simple python object and we can just
# return it
return obj | fc17b64e3701faa70ea5bfb36a8e2b9195dcbab1 | 3,655,262 |
import copy
def match_v2v3(aperture_1, aperture_2, verbose=False):
"""Use the V2V3 from aperture_1 in aperture_2 modifying X[Y]DetRef,X[Y]SciRef to match.
Also shift the polynomial coefficients to reflect the new reference point origin
and for NIRCam recalculate angles.
Parameters
----------
aperture_1 : `pysiaf.Aperture object`
Aperture whose V2,V3 reference position is to be used
aperture_2 : `pysiaf.Aperture object`
The V2,V3 reference position is to be altered to match that of aperture_1
verbose : bool
verbosity
Returns
-------
new_aperture_2: `pysiaf.Aperture object`
An aperture object derived from aperture_2 but with some parameters changed to match
altered V2V3.
"""
instrument = aperture_1.InstrName
assert instrument != 'NIRSPEC', 'Program not working for NIRSpec'
assert (aperture_2.AperType in ['FULLSCA', 'SUBARRAY', 'ROI']), \
"2nd aperture must be pixel-based"
order = aperture_1.Sci2IdlDeg
V2Ref1 = aperture_1.V2Ref
V3Ref1 = aperture_1.V3Ref
newV2Ref = V2Ref1
newV3Ref = V3Ref1
if verbose:
print('Current Vref', aperture_2.V2Ref, aperture_2.V3Ref)
print('Shift to ', V2Ref1, V3Ref1)
# Need to work in aperture 2 coordinate systems
aperName_1 = aperture_1.AperName
aperName_2 = aperture_2.AperName
detector_1 = aperName_1.split('_')[0]
detector_2 = aperName_2.split('_')[0]
if verbose:
print('Detector 1', detector_1, ' Detector 2', detector_2)
V2Ref2 = aperture_2.V2Ref
V3Ref2 = aperture_2.V3Ref
theta0 = aperture_2.V3IdlYAngle
if verbose:
print('Initial VRef', V2Ref2, V3Ref2)
print('Initial theta', theta0)
theta = radians(theta0)
coefficients = aperture_2.get_polynomial_coefficients()
A = coefficients['Sci2IdlX']
B = coefficients['Sci2IdlY']
C = coefficients['Idl2SciX']
D = coefficients['Idl2SciY']
if verbose:
print('\nA')
print_triangle(A)
print('B')
print_triangle(B)
print('C')
print_triangle(C)
print('D')
print_triangle(D)
(stat, xmean, ymean, xstd, ystd, data) = compute_roundtrip_error(A, B, C, D,
verbose=verbose,
instrument=instrument)
print('Round trip X Y')
print(' Means%8.4F %8.4f' % (xmean, ymean))
print(' STDs%8.4f %8.4f' % (xstd, ystd))
# Use convert
(newXSci, newYSci) = aperture_2.convert(V2Ref1, V3Ref1, 'tel', 'sci')
(newXDet, newYDet) = aperture_2.convert(V2Ref1, V3Ref1, 'tel', 'det')
(newXIdl, newYIdl) = aperture_2.convert(V2Ref1, V3Ref1, 'tel', 'idl')
dXSciRef = newXSci - aperture_2.XSciRef
dYSciRef = newYSci - aperture_2.YSciRef
AS = shift_coefficients(A, dXSciRef, dYSciRef)
BS = shift_coefficients(B, dXSciRef, dYSciRef)
if verbose:
print('VRef1', V2Ref1, V3Ref1)
print('Idl', newXIdl, newYIdl)
print('Shift pixel origin by', dXSciRef, dYSciRef)
print('New Ideal origin', newXIdl, newYIdl)
CS = shift_coefficients(C, AS[0], BS[0])
DS = shift_coefficients(D, AS[0], BS[0])
AS[0] = 0.0
BS[0] = 0.0
CS[0] = 0.0
DS[0] = 0.0
if verbose:
print('\nShifted Polynomials')
print('AS')
print_triangle(AS)
print('BS')
print_triangle(BS)
print('CS')
print_triangle(CS)
print('DS')
print_triangle(DS)
print('\nABCDS')
(stat, xmean, ymean, xstd, ystd, data) = compute_roundtrip_error(AS, BS, CS, DS,
verbose=verbose,
instrument=instrument)
if verbose:
print('Round trip X Y')
print(' Means%8.4F %8.4f' % (xmean, ymean))
print(' STDs%8.4f %8.4f' % (xstd, ystd))
newA = AS
newB = BS
newC = CS
newD = DS
new_aperture_2 = copy.deepcopy(aperture_2)
# For NIRCam only, adjust angles
if instrument == 'NIRCAM':
newV3IdlYAngle = degrees(atan2(-AS[2], BS[2])) # Everything rotates by this amount
if abs(newV3IdlYAngle) > 90.0:
newV3IdlYAngle = newV3IdlYAngle - copysign(180, newV3IdlYAngle)
newA = AS*cos(radians(newV3IdlYAngle)) + BS*sin(radians(newV3IdlYAngle))
newB = -AS*sin(radians(newV3IdlYAngle)) + BS*cos(radians(newV3IdlYAngle))
if verbose:
print('New angle', newV3IdlYAngle)
print('\nnewA')
print_triangle(newA)
print('newB')
print_triangle(newB)
newC = prepend_rotation_to_polynomial(CS, -newV3IdlYAngle)
newD = prepend_rotation_to_polynomial(DS, -newV3IdlYAngle)
if verbose:
print('newC')
print_triangle(newC)
print('newD')
print_triangle(newD)
(stat, xmean, ymean, xstd, ystd, data) = compute_roundtrip_error(newA, newB, newC, newD,
verbose=verbose,
instrument=instrument)
print('\nFinal coefficients')
print('Round trip X Y')
print(' Means%8.4F %8.4f' % (xmean, ymean))
print(' STDs%8.4f %8.4f' % (xstd, ystd))
newV3SciXAngle = aperture_2.V3SciXAngle + newV3IdlYAngle
newV3SciYAngle = aperture_2.V3SciXAngle + newV3IdlYAngle
newV3IdlYAngle = aperture_2.V3IdlYAngle + newV3IdlYAngle
new_aperture_2.V3SciXAngle = newV3SciXAngle
new_aperture_2.V3SciYAngle = newV3SciYAngle
new_aperture_2.V3IdlYAngle = newV3IdlYAngle
# Set new values in new_aperture_2
new_aperture_2.V2Ref = newV2Ref
new_aperture_2.V3Ref = newV3Ref
new_aperture_2.XDetRef = newXDet
new_aperture_2.YDetRef = newYDet
new_aperture_2.XSciRef = newXSci
new_aperture_2.YSciRef = newYSci
if verbose:
print('Initial', aperture_2.V2Ref, aperture_2.V3Ref, aperture_2.XDetRef, aperture_2.YDetRef)
print('Changes', newV2Ref, newV3Ref, newXDet, newYDet)
print('Modified', new_aperture_2.V2Ref, new_aperture_2.V3Ref, new_aperture_2.XDetRef,
new_aperture_2.YDetRef)
new_aperture_2.set_polynomial_coefficients(newA, newB, newC, newD)
(xcorners, ycorners) = new_aperture_2.corners('idl', rederive=True)
for c in range(4):
suffix = "{}".format(c+1)
setattr(new_aperture_2, 'XIdlVert' + suffix, xcorners[c])
setattr(new_aperture_2, 'YIdlVert' + suffix, ycorners[c])
return new_aperture_2 | 295eb72c43f073f71b1cedaf8a94d6b1cc61dbf7 | 3,655,263 |
import time
def get_offset(sample_time):
"""
Find simple offsett values.
During the sample time of this function
the BBB with the magnetometer on should be rotated
along all axis.
sample_time is in seconds
"""
start = time.clock()
mag_samples = []
mag_max = [0,0,0]
mag_min = [0,0,0]
offset = [0,0,0]
while (time.clock() - start) < sample_time:
raw_data = get_raw_mag()
mag_samples.append(transform_readable(raw_data))
# blink leds to signify timespan
while mag_samples != []:
a = mag_samples.pop()
# find maximum, minimum Values
for i in range(3):
if (a[i] > mag_max[i]):
mag_max[i] = a[i]
if (a[i] < mag_max[i]):
mag_min[i] = a[i]
#print(mag_max)
#print(mag_min)
# calculate offset from Extrema
for i in range(3):
offset[i] = (mag_max[i] + mag_min[i])/2
return offset | 712fe82dbdc50e198baf93b752f572331ce33f63 | 3,655,265 |
def get_multimode_2d_dist(num_modes: int = 1, scale: float = 1.0):
"""Get a multimodal distribution of Gaussians."""
angles = jnp.linspace(0, jnp.pi * 2, num_modes + 1)
angles = angles[:-1]
x, y = jnp.cos(angles) * scale / 2., jnp.sin(angles) * scale / 2.
loc = jnp.array([x, y]).T
scale = jnp.ones((num_modes, 2)) * scale / 10.
return tfd.MixtureSameFamily(
mixture_distribution=tfd.Categorical(
probs=jnp.ones((num_modes,)) / num_modes),
components_distribution=tfd.MultivariateNormalDiag(
loc=loc, scale_diag=scale)) | dab5400e545feb7cde2804af151f3c20c600b0ce | 3,655,267 |
def residual_squared_error(data_1, data_2):
"""
Calculation the residual squared error between two arrays.
Parameters
----------
data: numpy array
Data
calc: numpy array
Calculated values
Return
------
rse: float
residual squared error
"""
RSS = np.sum(np.square(data_1 - data_2))
rse = np.sqrt(RSS / (len(data_1) - 2))
return rse | 771c365fc38d6eda07989a1a6eb34c0f96347c3c | 3,655,268 |
def by_index(pot):
""" Build a new potential where the keys of the potential dictionary
correspond to the indices along values of n-dimensional grids,
rather than, possibly, the coordinate values of the grids themselves.
Key Transformation:
((grid_val_i, grid_val_j, ...)_i,) -> ((i, j, ...)_i,)
:param pot: potential along a coordinate
:type pot: dict[tuple(float)] = float
:rtype: dict[tuple(int)] = float
"""
pot_keys = list(pot.keys())
dim = dimension(pot)
remap_dcts = []
for i in range(dim):
_coords = sorted(list(set(lst[i] for lst in pot_keys)))
_idxs = list(range(len(_coords)))
remap_dcts.append(dict(zip(_coords, _idxs)))
new_dct = {}
for keys in pot_keys:
new_tup = ()
for i, val in enumerate(keys):
new_tup += (remap_dcts[i][val],)
new_dct[new_tup] = pot[keys]
return new_dct | 7235322f606cf972c8bf4ad46a614001f235b3e9 | 3,655,269 |
def current_user():
"""Получить текущего пользователя или отредактировать профиль"""
user = get_user_from_request()
if request.method == "POST":
json = request.get_json()
user.email = json.get("email", user.email)
user.name = json.get("name", user.name)
user.about = sanitize(json.get("about", user.about))
user.birthday = json.get("birthday", user.birthday)
if "avatar" in json:
content = Content.get_or_none(Content.id == json["avatar"])
if content:
if not content.is_image:
return errors.user_avatar_is_not_image()
elif content.size > 1024 * 500: # 500kb
return errors.user_avatar_too_large()
else:
user.avatar = content
user.save()
user = User.get(User.id == user.id)
return jsonify({"success": 1, "user": user.to_json_with_email()}) | e7e3db1744e21c64732217e1609a113b938c677c | 3,655,270 |
from datetime import datetime
async def async_union_polygons(bal_name, geom_list):
"""union a set of polygons & return the resulting multipolygon"""
start_time = datetime.now()
big_poly = unary_union(geom_list)
print(f"\t - {bal_name} : set of polygons unioned: {datetime.now() - start_time}")
return big_poly | 2432818d6bb38232e08a4439e7a69007a7c24334 | 3,655,271 |
def _error_text(because: str, text: str, backend: usertypes.Backend) -> str:
"""Get an error text for the given information."""
other_backend, other_setting = _other_backend(backend)
if other_backend == usertypes.Backend.QtWebKit:
warning = ("<i>Note that QtWebKit hasn't been updated since "
"July 2017 (including security updates).</i>")
suffix = " (not recommended)"
else:
warning = ""
suffix = ""
return ("<b>Failed to start with the {backend} backend!</b>"
"<p>qutebrowser tried to start with the {backend} backend but "
"failed because {because}.</p>{text}"
"<p><b>Forcing the {other_backend.name} backend{suffix}</b></p>"
"<p>This forces usage of the {other_backend.name} backend by "
"setting the <i>backend = '{other_setting}'</i> option "
"(if you have a <i>config.py</i> file, you'll need to set "
"this manually). {warning}</p>".format(
backend=backend.name, because=because, text=text,
other_backend=other_backend, other_setting=other_setting,
warning=warning, suffix=suffix)) | cb4fda8ab6c06d01ae01e6226d435d30cd0bd971 | 3,655,272 |
def COUNT(condition: pd.DataFrame, n: int):
"""the number of days fits the 'condition' in the past n days
Args:
condition (pd.DataFrame): dataframe index by date time(level 0) and asset(level 1), containing bool values
n (int): the number of past days
"""
return condition.rolling(n, center=False, min_periods=n).sum() | ed380061249803e9c378950a88dc5162543cfee0 | 3,655,273 |
def Mat33_nrow():
"""Mat33_nrow() -> int"""
return _simbody.Mat33_nrow() | 7f22177bcf150458e6545ed204e47b3326ce6193 | 3,655,274 |
def isstruct(ob):
""" isstruct(ob)
Returns whether the given object is an SSDF struct.
"""
if hasattr(ob, '__is_ssdf_struct__'):
return bool(ob.__is_ssdf_struct__)
else:
return False | 465196af79c9de1f7685e0004e92b68a7f524149 | 3,655,275 |
def where_between(field_name, start_date, end_date):
"""
Return the bit of query for the dates interval.
"""
str = """ {0} between date_format('{1}', '%%Y-%%c-%%d %%H:%%i:%%S')
and date_format('{2}', '%%Y-%%c-%%d 23:%%i:%%S')
""" .format( field_name,
start_date.strftime("%Y-%m-%d %H:%M:%S"),
end_date.strftime("%Y-%m-%d %H:%M:%S"))
return str | 4801d01ac8743f138e7c558da40518b75ca6daed | 3,655,276 |
def to_console_formatted_string(data: dict) -> str:
"""..."""
def make_line(key: str) -> str:
if key.startswith('__cauldron_'):
return ''
data_class = getattr(data[key], '__class__', data[key])
data_type = getattr(data_class, '__name__', type(data[key]))
value = '{}'.format(data[key])[:250].replace('\n', '\n ')
if value.find('\n') != -1:
value = '\n{}'.format(value)
return '+ {name} ({type}): {value}'.format(
name=key,
type=data_type,
value=value
)
keys = list(data.keys())
keys.sort()
lines = list(filter(
lambda line: len(line) > 0,
[make_line(key) for key in keys]
))
return '\n'.join(lines) | 05cec50b3eee8199b19024aae32dda2a8ba33115 | 3,655,277 |
def cluster_instance_get_info_ajax(request, c_id):
"""
get cluster instance status
"""
dic = {"res": True, "info":None, "err":None}
instance_id = request.GET.get("instance_id")
require_vnc = request.GET.get("require_vnc")
if require_vnc == "true":
require_vnc = True
else:
require_vnc = False
if instance_id.isdecimal():
instance_id = int(instance_id)
instance_info = get_cluster_instance_info(request.user, instance_id,require_vnc=require_vnc)
if not instance_info:
raise Http404
dic["info"] = {"status":instance_info["status"], "status_name":instance_info["status_name"], "vnc_url":instance_info["vnc_url"]}
else:
dic["res"] = False
dic["err"] = "Invalid ID"
return JsonResponse(dic) | 1c000a659893b375a2e89faadedccde7ca8dcab6 | 3,655,278 |
import time
def timeit(verbose=False):
"""
Time functions via decoration. Optionally output time to stdout.
Parameters:
-----------
verbose : bool
Example Usage:
>>> @timeit(verbose=True)
>>> def foo(*args, **kwargs): pass
"""
def _timeit(f):
@wraps(f)
def wrapper(*args, **kwargs):
if verbose:
start = time.time()
res = f(*args, **kwargs)
runtime = time.time() - start
print(f'{f.__name__!r} in {runtime:.4f} s')
else:
res = f(*args, **kwargs)
return res
return wrapper
return _timeit | 5e8e0441914b5d26db99fc378374bebde2d39376 | 3,655,279 |
def signal_period(peaks, sampling_rate=1000, desired_length=None,
interpolation_order="cubic"):
"""Calculate signal period from a series of peaks.
Parameters
----------
peaks : list, array, DataFrame, Series or dict
The samples at which the peaks occur. If an array is passed in, it is
assumed that it was obtained with `signal_findpeaks()`. If a DataFrame
is passed in, it is assumed it is of the same length as the input
signal in which occurrences of R-peaks are marked as "1", with such
containers obtained with e.g., ecg_findpeaks() or rsp_findpeaks().
sampling_rate : int
The sampling frequency of the signal that contains peaks (in Hz, i.e.,
samples/second). Defaults to 1000.
desired_length : int
By default, the returned signal rate has the same number of elements as
the raw signal. If set to an integer, the returned signal rate will be
interpolated between peaks over `desired_length` samples. Has no
effect if a DataFrame is passed in as the `signal` argument. Defaults
to None.
interpolation_order : str
Order used to interpolate the rate between peaks. See
`signal_interpolate()`.
Returns
-------
array
A vector containing the period.
See Also
--------
signal_findpeaks, signal_fixpeaks, signal_plot
Examples
--------
>>> import neurokit2 as nk
>>>
>>> signal = nk.signal_simulate(duration=10, sampling_rate=1000,
>>> frequency=1)
>>> info = nk.signal_findpeaks(signal)
>>>
>>> rate = nk.signal_rate(peaks=info["Peaks"])
>>> nk.signal_plot(rate)
"""
peaks, desired_length = _signal_formatpeaks_sanitize(peaks, desired_length)
# Sanity checks.
if len(peaks) <= 3:
print("NeuroKit warning: _signal_formatpeaks(): too few peaks detected"
" to compute the rate. Returning empty vector.")
return np.full(desired_length, np.nan)
# Calculate period in sec, based on peak to peak difference and make sure
# that rate has the same number of elements as peaks (important for
# interpolation later) by prepending the mean of all periods.
period = np.ediff1d(peaks, to_begin=0) / sampling_rate
period[0] = np.mean(period[1:])
# Interpolate all statistics to desired length.
if desired_length != np.size(peaks):
period = signal_interpolate(peaks, period,
desired_length=desired_length,
method=interpolation_order)
return period | dae9a7af6d23fdaa1f742cbc3b18649a525c4041 | 3,655,280 |
import google.cloud.dataflow as df
from google.cloud.dataflow.utils.options import PipelineOptions
def model_co_group_by_key_tuple(email_list, phone_list, output_path):
"""Applying a CoGroupByKey Transform to a tuple.
URL: https://cloud.google.com/dataflow/model/group-by-key
"""
p = df.Pipeline(options=PipelineOptions())
# [START model_group_by_key_cogroupbykey_tuple]
# Each data set is represented by key-value pairs in separate PCollections.
# Both data sets share a common key type (in this example str).
# The email_list contains values such as: ('joe', '[email protected]') with
# multiple possible values for each key.
# The phone_list contains values such as: ('mary': '111-222-3333') with
# multiple possible values for each key.
emails = p | df.Create('email', email_list)
phones = p | df.Create('phone', phone_list)
# The result PCollection contains one key-value element for each key in the
# input PCollections. The key of the pair will be the key from the input and
# the value will be a dictionary with two entries: 'emails' - an iterable of
# all values for the current key in the emails PCollection and 'phones': an
# iterable of all values for the current key in the phones PCollection.
# For instance, if 'emails' contained ('joe', '[email protected]') and
# ('joe', '[email protected]'), then 'result' will contain the element
# ('joe', {'emails': ['[email protected]', '[email protected]'], 'phones': ...})
result = {'emails': emails, 'phones': phones} | df.CoGroupByKey()
def join_info((name, info)):
return '; '.join(['%s' % name,
'%s' % ','.join(info['emails']),
'%s' % ','.join(info['phones'])])
contact_lines = result | df.Map(join_info)
# [END model_group_by_key_cogroupbykey_tuple]
contact_lines | df.io.Write(df.io.TextFileSink(output_path))
p.run() | 7256b9dac30fe731011729ea463e37b39d8c4dde | 3,655,281 |
def get_recommendation(anime_name, cosine_sim, clean_anime, anime_index):
"""
Getting pairwise similarity scores for all anime in the data frame.
The function returns the top 10 most similar anime to the given query.
"""
idx = anime_index[anime_name]
sim_scores = list(enumerate(cosine_sim[idx]))
sim_scores = sorted(sim_scores, key=lambda x: x[1], reverse=True)
sim_scores = sim_scores[0:11]
anime_indices = [i[0] for i in sim_scores]
result = clean_anime[['name']].iloc[anime_indices].drop(idx)
return result | 93bc3e53071200810b34e31674fcaa0a98cdaebb | 3,655,282 |
def get_nwb_metadata(experiment_id):
"""
Collects metadata based on the experiment id and converts the weight to a float.
This is needed for further export to nwb_converter.
This function also validates, that all metadata is nwb compatible.
:param experiment_id: The experiment id given by the user.
:return: Final nwb metadata to be passed on.
:rtype: dict
"""
metadata = get_raw_nwb_metadata(experiment_id)
# nwb_converter unfortunately needs the weight to be a float in kg.
metadata["Subject"]["weight"] = convert_weight(metadata["Subject"]["weight"])
if validate_pynwb_data(metadata):
return metadata
else:
raise Exception("Could not validate nwb file.") | 9882e71cb869e1ebf762fd851074d316b9fda462 | 3,655,283 |
from typing import Tuple
from typing import Union
def string_to_value_error_mark(string: str) -> Tuple[float, Union[float, None], str]:
"""
Convert string to float and error.
Parameters
----------
string : str
DESCRIPTION.
Returns
-------
value : float
Value.
error : float
Error.
"""
value, error, mark = None, None, ""
ind_1 = string.find("(")
s_sigma = ""
if value == ".":
pass
elif ind_1 != -1:
ind_2 = string.find(")")
if ind_2 > ind_1:
s_sigma = string[(ind_1+1):ind_2]
if not(s_sigma.isdigit()):
s_sigma = ""
str_1 = string.split("(")[0]
value = float(str_1)
mark = string[ind_2+1:].strip()
if s_sigma != "":
s_h = "".join(["0" if _h.isdigit() else _h for _h in
str_1[:-len(s_sigma)]])
error = abs(float(s_h+s_sigma))
else:
error = 0.
else:
try:
value = float(string)
except ValueError:
value = None
return value, error, mark | c2c69c419d44e8342376ee24f6a4ced6ee2090e7 | 3,655,284 |
import itertools
def _children_with_tags(element, tags):
"""Returns child elements of the given element whose tag is in a given list.
Args:
element: an ElementTree.Element.
tags: a list of strings that are the tags to look for in child elements.
Returns:
an iterable of ElementTree.Element instances, which are the children of
the input element whose tags matched one of the elements of the list.
"""
return itertools.chain(*(_children_with_tag(element, tag) for tag in tags)) | 522151e7e9ad355e5c6850cef62093e1bd4ed0a0 | 3,655,285 |
def align_with_known_width(val, width: int, lowerBitCntToAlign: int):
"""
Does same as :func:`~.align` just with the known width of val
"""
return val & (mask(width - lowerBitCntToAlign) << lowerBitCntToAlign) | 8c9b7ffd8fced07f2ca78db7665ea5425417e45a | 3,655,287 |
def get_email_from_request(request):
"""Use cpg-utils to extract user from already-authenticated request headers."""
user = get_user_from_headers(request.headers)
if not user:
raise web.HTTPForbidden(reason='Invalid authorization header')
return user | 60872f86bb69de6b1b339f715a2561dafd231489 | 3,655,288 |
from typing import List
from typing import Tuple
def get_kernels(params: List[Tuple[str, int, int, int, int]]) -> List[np.ndarray]:
"""
Create list of kernels
:param params: list of tuples with following format ("kernel name", angle, multiplier, rotation angle)
:return: list of kernels
"""
kernels = [] # type: List[np.ndarray]
for param in params:
if len(param) < 5:
print('Number of parameters given must be 4, got', param, 'len(', len(param), ') instead')
return None
if param[0] == 'gauss':
kernels.append(rotate_matrix(get_gauss(param[1], param[2]) * param[3], param[4]))
elif param[0] == 'log':
kernels.append(rotate_matrix(get_log(param[1], param[2]) * param[3], param[4]))
elif param[0] == 'sobel':
kernels.append(rotate_matrix(get_sobel(param[1], param[2]) * param[3], param[4]))
elif param[0] == 'ft0':
kernels.append(rotate_matrix(get_ft0(param[1], param[2]) * param[3], param[4]))
elif param[0] == 'ft1':
kernels.append(rotate_matrix(get_ft1(param[1], param[2]) * param[3], param[4]))
elif param[0] == 'ft2c':
kernels.append(rotate_matrix(get_ft2c(param[1], param[2]) * param[3], param[4]))
if len(kernels) == 1:
return kernels[0]
else:
return kernels | b39fd152fe94f4c52398ae4984414d2cefbf401f | 3,655,289 |
def forward_propagation(propagation_start_node, func, x):
"""A forward propagation starting at the `propagation_start_node` and
wrapping the all the composition operations along the way.
Parameters
----------
propagation_start_node : Node
The node where the gradient function (or anything similar) is requested.
func : function
The function to apply at the node (most likely be a composition of functions).
x : narray
A set of parameters for the function.
Returns
-------
Wrapper
The ending wrapper wrapping the propagation end node.
"""
trace_marker = marker_stack.get_marker()
propagation_start_wrapper = new_wrapper(
x, trace_marker, propagation_start_node)
propagation_end_wrapper = func(propagation_start_wrapper)
marker_stack.release_marker(trace_marker)
if isinstance(propagation_end_wrapper, Wrapper) and propagation_end_wrapper._trace_marker == propagation_start_wrapper.trace_marker:
return propagation_end_wrapper._value, propagation_end_wrapper._node
else:
return propagation_end_wrapper, None | 12fbbb53fd329aacdf5f5fffbfa2a81342663fb8 | 3,655,290 |
def read_entities():
"""
find list of entities
:return:
"""
intents = Entity.objects.only('name','id')
return build_response.sent_json(intents.to_json()) | 842ec7506b49abd6557219e2c9682bdd48df86fb | 3,655,292 |
def available(unit, item) -> bool:
"""
If any hook reports false, then it is false
"""
for skill in unit.skills:
for component in skill.components:
if component.defines('available'):
if component.ignore_conditional or condition(skill, unit):
if not component.available(unit, item):
return False
return True | 7550a197e2d877ef4ff622d08a056be434f1f06e | 3,655,293 |
def cleanArray(arr):
"""Clean an array or list from unsupported objects for plotting.
Objects are replaced by None, which is then converted to NaN.
"""
try:
return np.asarray(arr, float)
except ValueError:
return np.array([x if isinstance(x, number_types) else None
for x in arr], float) | 7ab7d645209ad0815a3eb831a1345cdad0ae4aba | 3,655,294 |
def _ensure_args(G, source, method, directed,
return_predecessors, unweighted, overwrite, indices):
"""
Ensures the args passed in are usable for the API api_name and returns the
args with proper defaults if not specified, or raises TypeError or
ValueError if incorrectly specified.
"""
# checks common to all input types
if (method is not None) and (method != "auto"):
raise ValueError("only 'auto' is currently accepted for method")
if (indices is not None) and (type(indices) == list):
raise ValueError("indices currently cannot be a list-like type")
if (indices is not None) and (source is not None):
raise TypeError("cannot specify both 'source' and 'indices'")
if (indices is None) and (source is None):
raise TypeError("must specify 'source' or 'indices', but not both")
G_type = type(G)
# Check for Graph-type inputs
if (G_type in [Graph, DiGraph]) or is_nx_graph_type(G_type):
exc_value = "'%s' cannot be specified for a Graph-type input"
if directed is not None:
raise TypeError(exc_value % "directed")
if return_predecessors is not None:
raise TypeError(exc_value % "return_predecessors")
if unweighted is not None:
raise TypeError(exc_value % "unweighted")
if overwrite is not None:
raise TypeError(exc_value % "overwrite")
directed = False
# Check for non-Graph-type inputs
else:
if (directed is not None) and (type(directed) != bool):
raise ValueError("'directed' must be a bool")
if (return_predecessors is not None) and \
(type(return_predecessors) != bool):
raise ValueError("'return_predecessors' must be a bool")
if (unweighted is not None) and (unweighted is not True):
raise ValueError("'unweighted' currently must be True if "
"specified")
if (overwrite is not None) and (overwrite is not False):
raise ValueError("'overwrite' currently must be False if "
"specified")
source = source if source is not None else indices
if return_predecessors is None:
return_predecessors = True
return (source, directed, return_predecessors) | 6d9168de0d25f5ee4d720347182763ad744600a6 | 3,655,296 |
def read_siemens_scil_b0():
""" Load Siemens 1.5T b0 image form the scil b0 dataset.
Returns
-------
img : obj,
Nifti1Image
"""
file = pjoin(dipy_home,
'datasets_multi-site_all_companies',
'1.5T',
'Siemens',
'b0.nii.gz')
return nib.load(file) | edf700fc6e14a35b5741e4419ba96cb753188da8 | 3,655,297 |
def gdpcleaner(gdpdata: pd.DataFrame):
"""
Author: Gabe Fairbrother
Remove spurious columns, Rename relevant columns, Remove NaNs
Parameters
----------
gdpdata: DataFrame
a loaded dataframe based on a downloaded Open Government GDP at basic prices dataset (https://open.canada.ca/en/open-data)
Returns
-------
DataFrame: A cleaned and simplified DataFrame of the relevant columns for summary and visualization.
Possible columns (dataset dependent) include:
Date: Date of data
Location: Province or Jurisdiction
Scale: Scale of the Value column (Percent, Millions, etc)
Unit: Unit of Measure
Value: Portion of the GDP for the Location and Date
NAICS_Class: North American Industry Classification System ID
Industry: Industry of Record
Sub-sector: Non-profit sub-sector
Special_Industry: Special Industry Aggregate
Examples
--------
>>> result = gdpcleaner(example_data)
"""
#Check for DataFrame input argument
if (isinstance(gdpdata, pd.core.frame.DataFrame)):
pass
else:
raise TypeError("Argument must be a Pandas DataFrame")
cleaned_frame = gdpdata
#Remove spurious columns
spurious = ['DGUID', 'UOM_ID', 'SCALAR_ID', 'VECTOR', 'COORDINATE',
'STATUS', 'SYMBOL', 'TERMINATED', 'DECIMALS', 'Value', 'Seasonal adjustment']
for column in cleaned_frame.columns :
if column in spurious:
cleaned_frame = cleaned_frame.drop(columns=column)
#Drop any rows with null value
cleaned_frame = cleaned_frame.dropna()
#Rename relevant columns
cleaned_frame = cleaned_frame.rename(columns={'REF_DATE': 'Date', 'GEO': 'Location',
'SCALAR_FACTOR': 'Scale', 'VALUE': 'Value', 'UOM': 'Unit'})
for column in cleaned_frame.columns:
if 'NAICS' in column:
cleaned_frame = cleaned_frame.rename(columns={column: 'NAICS_Class'})
if 'aggregat' in column: #Not a spelling mistake, there are multiple similar column headers in different datasets
cleaned_frame = cleaned_frame.rename(columns={column: 'Special_Industry'})
return cleaned_frame | 4c685a244a746f05fbef5216518e23a956ae8da7 | 3,655,298 |
import re
def sort_with_num(path):
"""Extract leading numbers in a file name for numerical sorting."""
fname = path.name
nums = re.match('^\d+', fname)
if nums:
return int(nums[0])
else:
return 0 | 2209384720c33b8201c06f7a14b431972712814a | 3,655,299 |
import sqlite3
def prob8(cur: sqlite3.Cursor) -> pd.DataFrame:
"""Give a list of the services which connect the stops 'Craiglockhart' and
'Tollcross'.
Parameters
----------
cur (sqlite3.Cursor) : The cursor for the database we're accessing.
Returns
-------
(pd.DataFrame) : Table with the solution.
"""
cur.execute("""SELECT DISTINCT r1.company, r1.num
FROM route AS r1
JOIN route AS r2 ON (r1.company = r2.company AND r1.num = r2.num)
JOIN stops AS stops1 ON stops1.id = r1.stop
JOIN stops as stops2 ON stops2.id = r2.stop
WHERE stops1.name = 'Craiglockhart'
AND stops2.name = 'Tollcross';
""")
return pd.DataFrame(cur.fetchall()) | 14e8bbb04befc1116f969ca977d83bc27890664c | 3,655,300 |
def get_command(name):
""" return command represented by name """
_rc = COMMANDS[name]()
return _rc | 22e64898973d2a2ec1cca2ff72fa86eaed4a3546 | 3,655,301 |
def _str_struct(a):
"""converts the structure to a string for logging purposes."""
shape_dtype = lambda x: (jnp.asarray(x).shape, str(jnp.asarray(x).dtype))
return str(jax.tree_map(shape_dtype, a)) | 96d417c6cd1332d6e71b21472444cf6178cad92a | 3,655,302 |
def delete_interface_address(
api_client, interface_id, address_id, **kwargs
): # noqa: E501
"""delete_interface_address # noqa: E501
Delete interface address details # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> response = await api.delete_interface_address(interface_id, address_id, async_req=True)
:param interface_id int: ID of interface
:param address_id int: ID of address
:param async_req bool: execute request asynchronously
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: APIResponse or awaitable if async
"""
local_var_params = locals()
collection_formats = {}
path_params = {"interface_id": interface_id, "address_id": address_id}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params["Accept"] = api_client.select_header_accept(
["application/json"]
) # noqa: E501
# Authentication setting
auth_settings = ["ApiTokenAuth", "basicAuth"] # noqa: E501
return api_client.call_api(
"/interfaces/system/{interface_id}/addresses/{address_id}",
"DELETE",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="object", # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get("async_req"),
_return_http_data_only=local_var_params.get(
"_return_http_data_only"
), # noqa: E501
_preload_content=local_var_params.get("_preload_content", True),
_request_timeout=local_var_params.get("_request_timeout"),
collection_formats=collection_formats,
) | 19d04ef0783988c8eb86983d589d9f07e82ba3b8 | 3,655,304 |
import types
async def set_promo(message: types.Message, state: FSMContext):
"""
Команда /setpromo
"""
arg = message.get_args()
if not arg:
return await message.answer(_("Укажите аргумент: промокод. Например: <pre>/set_promo my-promo-code</pre>"),
parse_mode="HTML")
arg = arg.strip()
try:
UUID(arg)
except ValueError:
return await message.answer(_("Промокод не найден"))
promo = await models.Promo.get_or_none(code=arg)
if not promo:
return await message.answer(_("Промокод не найден"))
if promo.owner:
return await message.answer(_("Промокод уже использован"))
user, created = await models.User.get_or_create(telegram_id=message.from_user.id)
promo.owner = user
await promo.save(update_fields=["owner_id"])
await message.answer(_("Промокод активирован! Спасибо 🙌")) | 9a15dd1bea20c3da6dd31eee5e2a723ddd110ba2 | 3,655,305 |
def plot_waterfall(*sigObjs, step=10, xLim:list=None,
Pmin=20, Pmax=None, tmin=0, tmax=None, azim=-72, elev=14,
cmap='jet', winPlot=False, waterfallPlot=True, fill=True,
lines=False, alpha=1, figsize=(20, 8), winAlpha=0,
removeGridLines=False, saveFig=False, bar=False, width=0.70,
size=3, lcol=None, filtered=True):
"""
This function was gently sent by Rinaldi Polese Petrolli.
# TO DO
Keyword Arguments:
step {int} -- [description] (default: {10})
xLim {list} -- [description] (default: {None})
Pmin {int} -- [description] (default: {20})
Pmax {[type]} -- [description] (default: {None})
tmin {int} -- [description] (default: {0})
tmax {[type]} -- [description] (default: {None})
azim {int} -- [description] (default: {-72})
elev {int} -- [description] (default: {14})
cmap {str} -- [description] (default: {'jet'})
winPlot {bool} -- [description] (default: {False})
waterfallPlot {bool} -- [description] (default: {True})
fill {bool} -- [description] (default: {True})
lines {bool} -- [description] (default: {False})
alpha {int} -- [description] (default: {1})
figsize {tuple} -- [description] (default: {(20, 8)})
winAlpha {int} -- [description] (default: {0})
removeGridLines {bool} -- [description] (default: {False})
saveFig {bool} -- [description] (default: {False})
bar {bool} -- [description] (default: {False})
width {float} -- [description] (default: {0.70})
size {int} -- [description] (default: {3})
lcol {[type]} -- [description] (default: {None})
filtered {bool} -- [description] (default: {True})
Returns:
[type] -- [description]
"""
realSigObjs = \
_remove_non_(SignalObj, sigObjs, msgPrefix='plot_waterfall:')
if len(realSigObjs) > 0:
figs = plot.waterfall(realSigObjs, step, xLim,
Pmin, Pmax, tmin, tmax, azim, elev,
cmap, winPlot, waterfallPlot, fill,
lines, alpha, figsize, winAlpha,
removeGridLines, saveFig, bar, width,
size, lcol, filtered)
return figs
else:
return | 85888e49a938a5e4faac90c52b2df7fa7036610c | 3,655,306 |
import csv
import re
def indices(input_file):
"""
Parse the index file or target file and return a list of values.
:return:
"""
index_list = []
line_num = 0
index_file = list(csv.reader(open(input_file), delimiter='\t'))
for line in index_file:
line_num += 1
col_count = len(line)
if col_count > 1 and len(line[0].split("#")[0]) > 1: # Skip any lines that are blank or comments.
tmp_line = []
for i in range(col_count):
try:
line[i] = line[i].split("#")[0] # Strip out end of line comments and white space.
except IndexError:
raise SystemExit(
"There is a syntax error in file {0} on line {1}, column {2} "
.format(input_file, str(line_num), str(i)))
line[i] = re.sub(",", '', line[i]) # Strip out any commas.
tmp_line.append(line[i])
index_list.append(tmp_line)
return index_list | ea07d6f2bc8f3d23cf2ae59cb2df6c19158752fc | 3,655,307 |
def has_same_facts(ruler_intervals1, ruler_intervals2, D):
"""
Check whether the two same-pattern ruler lists have the same facts at each corresponding ruler-interval
Args:
ruler_intervals1: a list of ruler-intervals
ruler_intervals2: a list of ruler-intervals
D: contain all relational facts
Returns:
True or False
"""
for ruler1, ruler2 in zip(ruler_intervals1, ruler_intervals2):
for predicate in D:
for entity in D[predicate]:
if interval_inclusion_intervallist(ruler1, D[predicate][entity]) and \
not interval_inclusion_intervallist(ruler2, D[predicate][entity]):
return False
return True | 210540bd2c2062f3150a34c5911017ec49b5603f | 3,655,310 |
def main():
""" """
undet = argument_parse()
print 'Start\t|\tCheck incorrect index'
fq_list = split_fastq(undet)
print 'Process\t|\tAnalysis undetermined data'
combined_df = multi_process(fq_list)
sorted_combined_df = combined_df.sort_values(
by='count',
ascending=False,
inplace=False
)
print sorted_combined_df.head(10)
print 'Process\t|\tWrite out result'
sorted_combined_df.to_csv('undetermined_top_index.csv', header=False)
for f in fq_list:
os.system('rm {}'.format(f))
print 'End\t|\tCheck incorrect index'
return True
else:
print 'End\t|\tCannot analyze index\n'
return False | e20f65e172f49ce2f184b32344135ccadb550253 | 3,655,311 |
def ruleset_delete(p_engine, p_username, rulesetname, envname):
"""
Delete ruleset from Masking engine
param1: p_engine: engine name from configuration
param2: rulesetname: ruleset name
return 0 if added, non 0 for error
"""
return ruleset_worker(p_engine=p_engine, p_username=p_username, rulesetname=rulesetname,
envname=envname, function_to_call='do_delete') | 470e2d104a6d10737bba975a0cb15a4768238244 | 3,655,312 |
def config_from_file(file_name):
"""Load and return json from file."""
with open(file_name) as config_file:
config = ujson.load(config_file)
return config | 2dd1b57612c528a85dbe04c717800b6908cb9c40 | 3,655,313 |
def build_yaml_object(
dataset_id: str,
table_id: str,
config: dict,
schema: dict,
metadata: dict = dict(),
columns_schema: dict = dict(),
partition_columns: list = list(),
):
"""Build a dataset_config.yaml or table_config.yaml
Args:
dataset_id (str): The dataset id.
table_id (str): The table id.
config (dict): A dict with the `basedosdados` client configurations.
schema (dict): A dict with the JSON Schema of the dataset or table.
metadata (dict): A dict with the metadata of the dataset or table.
columns_schema (dict): A dict with the JSON Schema of the columns of
the table.
partition_columns (list): A list with the partition columns of the
table.
Returns:
CommentedMap: A YAML object with the dataset or table metadata.
"""
properties: dict = schema["properties"]
definitions: dict = schema["definitions"]
# Drop all properties without yaml_order
properties = {
key: value for key, value in properties.items() if value.get("yaml_order")
}
# Add properties
yaml = add_yaml_property(
yaml=ryaml.CommentedMap(),
properties=properties,
definitions=definitions,
metadata=metadata,
)
# Add columns
if metadata.get("columns"):
yaml["columns"] = []
for metadatum in metadata.get("columns"):
properties = add_yaml_property(
yaml=ryaml.CommentedMap(),
properties=columns_schema["properties"],
definitions=columns_schema["definitions"],
metadata=metadatum,
has_column=True,
)
yaml["columns"].append(properties)
# Add partitions in case of new dataset/talbe or local overwriting
if partition_columns and partition_columns != ["[]"]:
yaml["partitions"] = ""
for local_column in partition_columns:
for remote_column in yaml["columns"]:
if remote_column["name"] == local_column:
remote_column["is_partition"] = True
yaml["partitions"] = ", ".join(partition_columns)
# Nullify `partitions` field in case of other-than-None empty values
if yaml.get("partitions") == "":
yaml["partitions"] = None
# Add dataset_id and table_id
yaml["dataset_id"] = dataset_id
if table_id:
yaml["table_id"] = table_id
# Add gcloud config variables
yaml["source_bucket_name"] = str(config.get("bucket_name"))
yaml["project_id_prod"] = str(
config.get("gcloud-projects", {}).get("prod", {}).get("name")
)
yaml["project_id_staging"] = str(
config.get("gcloud-projects", {}).get("staging", {}).get("name")
)
return yaml | 8fa7d3acac0e9636fda923d9a38e9a82f904afae | 3,655,314 |
from pathlib import Path
def make_cumulative(frame, filedate, unit):
"""Create a cumulative graph of cases over time"""
gb = frame.groupby("Accurate_Episode_Date").agg(patients=("Row_ID", "count"))
gb = gb.resample("D").last().fillna(0).reset_index()
max_date = gb["Accurate_Episode_Date"].max().strftime("%Y-%m-%d")
gb["cumulative"] = gb.patients.cumsum().astype(int)
print(gb)
print(gb.info())
ax = sns.lineplot(
data=gb, x="Accurate_Episode_Date", y="cumulative", linewidth=2, color="red"
)
ax.set(
ylabel="Cumulative case count",
xlabel="Date",
title=f"{unit} Cumulative Cases by Episode Date ({max_date})",
)
ax2 = plt.twinx()
sns.lineplot(
data=gb, x="Accurate_Episode_Date", y="patients", ax=ax2, linewidth=0.5
)
ax2.set(ylim=(0, gb["patients"].max() * 2))
plt.gcf().autofmt_xdate()
fname = GRAPHDIR / Path(f"{filedate}-cumulative.png")
ax.figure.savefig(fname)
return fname | 44a2a1b3af68c293a86af97b11edf8cca562e6b8 | 3,655,316 |
def most_common(l):
""" Helper function.
:l: List of strings.
:returns: most common string.
"""
# another way to get max of list?
#from collections import Counter
#data = Counter(your_list_in_here)
#data.most_common() # Returns all unique items and their counts
#data.most_common(1)
count = 0
answer = ''
for element in l:
if l.count(element) > count:
count = l.count(element)
answer = element
return answer | 5010e4e26b00099c287f8597d8dc5881a67c4034 | 3,655,317 |
def reduce_avg(reduce_target, lengths, dim):
"""
Args:
reduce_target : shape(d_0, d_1,..,d_dim, .., d_k)
lengths : shape(d0, .., d_(dim-1))
dim : which dimension to average, should be a python number
"""
shape_of_lengths = lengths.get_shape()
shape_of_target = reduce_target.get_shape()
if len(shape_of_lengths) != dim:
raise ValueError(('Second input tensor should be rank %d, ' +
'while it got rank %d') % (dim, len(shape_of_lengths)))
if len(shape_of_target) < dim+1 :
raise ValueError(('First input tensor should be at least rank %d, ' +
'while it got rank %d') % (dim+1, len(shape_of_target)))
rank_diff = len(shape_of_target) - len(shape_of_lengths) - 1
mxlen = tf.shape(reduce_target)[dim]
mask = mkMask(lengths, mxlen)
if rank_diff!=0:
len_shape = tf.concat(axis=0, values=[tf.shape(lengths), [1]*rank_diff])
mask_shape = tf.concat(axis=0, values=[tf.shape(mask), [1]*rank_diff])
else:
len_shape = tf.shape(lengths)
mask_shape = tf.shape(mask)
lengths_reshape = tf.reshape(lengths, shape=len_shape)
mask = tf.reshape(mask, shape=mask_shape)
mask_target = reduce_target * tf.cast(mask, dtype=reduce_target.dtype)
red_sum = tf.reduce_sum(mask_target, axis=[dim], keep_dims=False)
red_avg = red_sum / (tf.to_float(lengths_reshape) + 1e-30)
return red_avg | 3bba229f448d393019857d89d16820076732e932 | 3,655,318 |
def _near_mod_2pi(e, t, atol=_DEFAULT_ATOL):
"""Returns whether a value, e, translated by t, is equal to 0 mod 2 * pi."""
return _near_mod_n(e, t, n=2 * np.pi, atol=atol) | 465911aca0fe1a7cd397ed2304426da5fdaaccc3 | 3,655,319 |
def create_returns_similarity(strategy: QFSeries, benchmark: QFSeries, mean_normalization: bool = True,
std_normalization: bool = True, frequency: Frequency = None) -> KDEChart:
"""
Creates a new returns similarity chart. The frequency is determined by the specified returns series.
Parameters
----------
strategy: QFSeries
The strategy series to plot.
benchmark: QFSeries
The benchmark series to plot.
mean_normalization: bool
Whether to perform mean normalization on the series data.
std_normalization: bool
Whether to perform variance normalization on the series data.
frequency: Frequency
Returns can be aggregated in to specific frequency before plotting the chart
Returns
-------
KDEChart
A newly created KDEChart instance.
"""
chart = KDEChart()
colors = Chart.get_axes_colors()
if frequency is not None:
aggregate_strategy = get_aggregate_returns(strategy.to_simple_returns(), frequency)
aggregate_benchmark = get_aggregate_returns(benchmark.to_simple_returns(), frequency)
else:
aggregate_strategy = strategy.to_simple_returns()
aggregate_benchmark = benchmark.to_simple_returns()
scaled_strategy = preprocessing.scale(
aggregate_strategy, with_mean=mean_normalization, with_std=std_normalization)
strategy_data_element = DataElementDecorator(
scaled_strategy, bw="scott", shade=True, label=strategy.name, color=colors[0])
chart.add_decorator(strategy_data_element)
scaled_benchmark = preprocessing.scale(
aggregate_benchmark, with_mean=mean_normalization, with_std=std_normalization)
benchmark_data_element = DataElementDecorator(
scaled_benchmark, bw="scott", shade=True, label=benchmark.name, color=colors[1])
chart.add_decorator(benchmark_data_element)
# Add a title.
title = _get_title(mean_normalization, std_normalization, frequency)
title_decorator = TitleDecorator(title, key="title")
chart.add_decorator(title_decorator)
chart.add_decorator(AxesLabelDecorator("Returns", "Similarity"))
return chart | a83a7d2171ee488c1ac9ede80f39778658a4538f | 3,655,320 |
def _cli():
"""
command line interface
:return:
"""
parser = generate_parser()
args = parser.parse_args()
return interface(args.bids_dir,
args.output_dir,
args.aseg,
args.subject_list,
args.session_list,
args.collect,
args.ncpus,
args.stage,
args.bandstop,
args.max_cortical_thickness,
args.check_outputs_only,
args.t1_brain_mask,
args.t2_brain_mask,
args.study_template,
args.t1_reg_method,
args.cleaning_json,
args.print,
args.ignore_expected_outputs,
args.multi_template_dir,
args.norm_method,
args.norm_gm_std_dev_scale,
args.norm_wm_std_dev_scale,
args.norm_csf_std_dev_scale,
args.make_white_from_norm_t1,
args.single_pass_pial,
args.registration_assist,
args.freesurfer_license) | 0b37b2eab79c5f50d5f18b5d6b435e3b97682a36 | 3,655,322 |
import base64
def urlsafe_b64decode_nopadding(val):
"""Deal with unpadded urlsafe base64."""
# Yes, it accepts extra = characters.
return base64.urlsafe_b64decode(str(val) + '===') | 22ed00b07e16b4b557dc46b5caeb9f7ce9513c0d | 3,655,324 |
def _subimg_bbox(img, subimage, xc, yc):
"""
Find the x/y bounding-box pixel coordinates in ``img`` needed to
add ``subimage``, centered at ``(xc, yc)``, to ``img``. Returns
``None`` if the ``subimage`` would extend past the ``img``
boundary.
"""
ys, xs = subimage.shape
y, x = img.shape
y0 = int(yc - (ys - 1) / 2.0)
y1 = y0 + ys
x0 = int(xc - (xs - 1) / 2.0)
x1 = x0 + xs
if (x0 >= 0) and (y0 >= 0) and (x1 < x) and (y1 < y):
return (x0, x1, y0, y1)
else:
return None | b299a6b3726ced525b538b4fea45b235fc0bd56e | 3,655,325 |
from datetime import datetime
def _ToDatetimeObject(date_str):
"""Converts a string into datetime object.
Args:
date_str: (str) A date and optional time for the oldest article
allowed. This should be in ISO 8601 format. (yyyy-mm-dd)
Returns:
datetime.datetime Object.
Raises:
ValueError: Invalid date format.
"""
if not date_str:
date_str = datetime.now().strftime('%Y-%m-%d')
if not any(date_.match(date_str) for date_ in DATE_REGEXES):
raise ValueError('Invalid date format %s' % date_str)
return datetime.strptime(date_str, '%Y-%m-%d') | df675cb5391456122bb350a126e0b4a4ed31fc49 | 3,655,326 |
def select_most_uncertain_patch(x_image_pl, y_label_pl, fb_pred, ed_pred, fb_prob_mean_bald, kernel_window, stride_size,
already_select_image_index, previously_selected_binary_mask, num_most_uncert_patch,
method):
"""This function is used to acquire the #most uncertain patches in the pooling set.
Args:
x_image_pl: [Num_Im, Im_h, Im_w,3]
y_label_pl: [Num_Im, Im_h, Im_w,1]
fb_pred: [Num_Im, Im_h, Im_w, 2]
ed_pred: [Num_Im, Im_h, Im_w, 2]
fb_prob_mean_bald: [num_im, imw, imw]
kernel_window: [kh, kw] determine the size of the region
stride_size: int, determine the stride between every two regions
already_select_image_index: if it's None, then it means that's the first acquistion step,
otherwise it's the numeric image index for the previously selected patches
previously_selected_binary_mask: [num_already_selected_images, Im_h, Im_w,1]
num_most_uncert_patch: int, number of patches that are selected in each acquisition step
method: acquisition method: 'B', 'C', 'D'
Returns:
Most_Uncert_Im: [Num_Selected, Im_h, Im_w, 3]imp
Most_Uncert_FB_GT: [Num_Selected, Im_h, Im_w,1]
Most_Uncert_ED_GT: [Num_Selected, Im_h, Im_w,1]
Most_Uncert_Binary_Mask: [Num_Selected, Im_h, Im_w,1]
Selected_Image_Index: [Num_Selected]
"""
num_im = np.shape(x_image_pl)[0]
uncertainty_map_tot = []
for i in range(num_im):
if method == 'B':
var_stat = get_uncert_heatmap(x_image_pl[i], fb_pred[i])
elif method == 'C':
var_stat = get_entropy_heatmap(fb_pred[i])
elif method == 'D':
var_stat = get_bald_heatmap(fb_prob_mean_bald[i], fb_pred[i])
uncertainty_map_tot.append(var_stat)
uncertainty_map_tot = np.array(uncertainty_map_tot)
if already_select_image_index is None:
print("--------This is the beginning of the selection process-------")
else:
print(
"----------Some patches have already been annotated, I need to deal with that")
previously_selected_binary_mask = np.squeeze(previously_selected_binary_mask, axis=-1)
for i in range(np.shape(previously_selected_binary_mask)[0]):
uncertainty_map_single = uncertainty_map_tot[already_select_image_index[i]]
uncertainty_map_updated = uncertainty_map_single * (1 - previously_selected_binary_mask[i])
uncertainty_map_tot[already_select_image_index[i]] = uncertainty_map_updated
selected_numeric_image_index, binary_mask_updated_tot = calculate_score_for_patch(uncertainty_map_tot,
kernel_window, stride_size,
num_most_uncert_patch)
pseudo_fb_la_tot = []
pseudo_ed_la_tot = []
for index, single_selected_image_index in enumerate(selected_numeric_image_index):
pseudo_fb_la, pseudo_ed_la = return_pseudo_label(y_label_pl[single_selected_image_index],
fb_pred[single_selected_image_index],
ed_pred[single_selected_image_index],
binary_mask_updated_tot[index])
pseudo_fb_la_tot.append(pseudo_fb_la)
pseudo_ed_la_tot.append(pseudo_ed_la)
most_uncert_im_tot = x_image_pl[selected_numeric_image_index]
most_uncertain = [most_uncert_im_tot,
pseudo_fb_la_tot,
pseudo_ed_la_tot,
binary_mask_updated_tot,
selected_numeric_image_index]
return most_uncertain | 21f40e34b1436d91eca041998cb927800cc10f7b | 3,655,327 |
import requests
import json
def submit_extraction(connector, host, key, datasetid, extractorname):
"""Submit dataset for extraction by given extractor.
Keyword arguments:
connector -- connector information, used to get missing parameters and send status updates
host -- the clowder host, including http and port, should end with a /
key -- the secret key to login to clowder
datasetid -- the dataset UUID to submit
extractorname -- registered name of extractor to trigger
"""
url = "%sapi/datasets/%s/extractions?key=%s" % (host, datasetid, key)
result = requests.post(url,
headers={'Content-Type': 'application/json'},
data=json.dumps({"extractor": extractorname}),
verify=connector.ssl_verify if connector else True)
result.raise_for_status()
return result.status_code | 449fc6c3c37ef8a5206a7ebe18b367885ae319a8 | 3,655,328 |
import math
def fcmp(x, y, precision):
"""fcmp(x, y, precision) -> -1, 0, or 1"""
if math.fabs(x-y) < precision:
return 0
elif x < y:
return -1
return 1 | 905421b36635ab830e2216ab34fee89f75c7f4c4 | 3,655,329 |
def parse_vcf_line(line):
"""
Args:
line (str): line in VCF file obj.
Returns:
parsed_line_lst (lst): with tuple elem (chr, pos, ref, alt)
Example:
deletion
pos 123456789012
reference ATTAGTAGATGT
deletion ATTA---GATGT
VCF:
CHROM POS REF ALT
N 4 AGTA A
Bambino:
chr pos ref alt
chr_N 5 GTA -
insertion
pos 1234***56789012
reference ATTA***GTAGATGT
insertion ATTAGTAGTAGATGT
VCF:
CHROM POS REF ALT
N 4 A AGTA
Bambino:
chr pos ref alt
chr_N 5 - GTA
"""
parsed_line_lst = []
# skip header lines
if line.startswith("#"):
return parsed_line_lst
lst = line.rstrip().split("\t")
chr = lst[0]
vcf_pos = int(lst[1])
vcf_ref = lst[3]
vcf_alts = lst[4].split(",") # possibly multi-allelic
if not chr.startswith("chr"):
chr = "chr" + chr
# skip non canonical chrmosomes
if not is_canonical_chromosome(chr):
return parsed_line_lst
for vcf_alt in vcf_alts:
n = count_padding_bases(vcf_ref, vcf_alt)
pos = vcf_pos + n
if len(vcf_ref) < len(vcf_alt):
ref = "-"
alt = vcf_alt[n:]
parsed_line_lst.append((chr, pos, ref, alt))
elif len(vcf_ref) > len(vcf_alt):
ref = vcf_ref[n:]
alt = "-"
parsed_line_lst.append((chr, pos, ref, alt))
else:
pass # not indel
return parsed_line_lst | 705c3bfe2ed3a0d4552dcbd18e8c08b73b84b40b | 3,655,330 |
def fuzzy_lookup_item(name_or_id, lst):
"""Lookup an item by either name or id.
Looking up by id is exact match. Looking up by name is by containment, and
if the term is entirely lowercase then it's also case-insensitive.
Multiple matches will throw an exception, unless one of them was an exact
match.
"""
try:
idd = int(name_or_id)
for val in lst:
if val.id == idd:
return val
raise RuntimeError('Id %d not found!' % idd)
except ValueError:
insensitive = name_or_id.islower()
matches = []
for val in lst:
name = val.name or ''
if name_or_id == name:
return val
if insensitive:
name = name.lower()
if name_or_id in name:
matches.append(val)
if len(matches) == 1:
return matches[0]
if not matches:
raise RuntimeError(f'No name containing {name_or_id!r} found!') from None
raise RuntimeError(
f'Multiple matches for {name_or_id!r}: {[x.name for x in matches]}') from None | 604b3879d0f97822d5a36db6dcf468ef8eefaac9 | 3,655,331 |
def fantasy_pros_ecr_scrape(league_dict=config.sean):
"""Scrape Fantasy Pros ECR given a league scoring format
:param league_dict: league dict in config.py used to determine whether to pull PPR/standard/half-ppr
"""
scoring = league_dict.get('scoring')
if scoring == 'ppr':
url = 'https://www.fantasypros.com/nfl/rankings/ppr-cheatsheets.php'
elif scoring == 'half-ppr':
url = 'https://www.fantasypros.com/nfl/rankings/half-point-ppr-cheatsheets.php'
else:
url = 'https://www.fantasypros.com/nfl/rankings/consensus-cheatsheets.php'
html = scrape_dynamic_javascript(url)
parsed_dict = parse_ecr_html(html)
return pd.DataFrame(parsed_dict) | c20ae9542f9fea096510681bcf3c430b23cbdf29 | 3,655,333 |
def lda(X, y, nr_components=2):
"""
Linear discrimindant analysis
:param X: Input vectors
:param y: Input classes
:param nr_components: Dimension of output co-ordinates
:return: Output co-ordinates
"""
print("Computing Linear Discriminant Analysis projection")
X2 = X.copy()
X2.flat[::X.shape[1] + 1] += 0.01 # Make X invertible
return discriminant_analysis.LinearDiscriminantAnalysis(n_components=nr_components).fit_transform(X2, y) | c9db65d494304246cf518833c1ae5c6ed22f3fa6 | 3,655,334 |
def _flatten_value_to_list(batch_values):
"""Converts an N-D dense or sparse batch to a 1-D list."""
# Ravel for flattening and tolist so that we go to native Python types
# for more efficient followup processing.
#
batch_value, = batch_values
return batch_value.ravel().tolist() | 77bfd9d32cbbf86a16a8da2701417a9ac9b9cc93 | 3,655,335 |
def sun_position(time):
"""
Computes the sun's position in longitude and colatitude at a given time
(mjd2000).
It is accurate for years 1901 through 2099, to within 0.006 deg.
Input shape is preserved.
Parameters
----------
time : ndarray, shape (...)
Time given as modified Julian date, i.e. with respect to the date 0h00
January 1, 2000 (mjd2000).
Returns
-------
theta : ndarray, shape (...)
Geographic colatitude of sun's position in degrees
:math:`[0^\\circ, 180^\\circ]`.
phi : ndarray, shape (...)
Geographic east longitude of sun's position in degrees
:math:`(-180^\\circ, 180^\\circ]`.
References
----------
Taken from `here <http://jsoc.stanford.edu/doc/keywords/Chris_Russel/
Geophysical%20Coordinate%20Transformations.htm#appendix2>`_
"""
rad = pi / 180
year = 2000 # reference year for mjd2000
assert np.all((year + time // 365.25) < 2099) \
and np.all((year - time // 365.25) > 1901), \
("Time must be between 1901 and 2099.")
frac_day = np.remainder(time, 1) # decimal fraction of a day
julian_date = 365 * (year-1900) + (year-1901)//4 + time + 0.5
t = julian_date/36525
v = np.remainder(279.696678 + 0.9856473354*julian_date, 360.)
g = np.remainder(358.475845 + 0.985600267*julian_date, 360.)
slong = v + (1.91946 - 0.004789*t)*np.sin(g*rad) + 0.020094*np.sin(2*g*rad)
obliq = (23.45229 - 0.0130125*t)
slp = (slong - 0.005686)
sind = np.sin(obliq*rad)*np.sin(slp*rad)
cosd = np.sqrt(1.-sind**2)
# sun's declination in radians
declination = np.arctan(sind/cosd)
# sun's right right ascension in radians (0, 2*pi)
right_ascension = pi - np.arctan2(sind/(cosd * np.tan(obliq*rad)),
-np.cos(slp*rad)/cosd)
# Greenwich mean siderial time in radians (0, 2*pi)
gmst = np.remainder(279.690983 + 0.9856473354*julian_date
+ 360.*frac_day + 180., 360.) * rad
theta = degrees(pi/2 - declination) # convert to colatitude
phi = center_azimuth(degrees(right_ascension - gmst))
return theta, phi | d5465044fbbe650580f4e9afaa13cf83e2cad758 | 3,655,336 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.