content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def alt2temp_ratio(H, alt_units=default_alt_units):
"""
Return the temperature ratio (temperature / standard temperature for
sea level). The altitude is specified in feet ('ft'), metres ('m'),
statute miles, ('sm') or nautical miles ('nm').
If the units are not specified, the units in default_units.py are used.
Examples:
Calculate the temperature ratio at 8,000 (default altitude units)
>>> alt2temp_ratio(8000)
0.94499531494013533
Calculate the temperature ratio at 8,000 m.
>>> alt2temp_ratio(8000, alt_units = 'm')
0.81953843484296374
"""
# function tested in tests/test_std_atm.py
return alt2temp(H, alt_units, temp_units='K') / T0 | c46ca3d63169676ccda223f475927a902a82a15e | 1,838 |
def encode_message(key, message):
""" Encodes the message (string) using the key (string) and
pybase64.urlsafe_b64encode functionality """
keycoded = []
if not key:
key = chr(0)
# iterating through the message
for i in range(len(message)):
# assigning a key_character based on the given key
key_character = key[i % len(key)]
# each char of the message has the key_char added (in ascii values)
# and is converted back to normal, and appended to the keycoded values
keycoded.append(
chr((ord(message[i]) + ord(key_character)) % 256)
)
encoded = pybase64.urlsafe_b64encode(
"".join(keycoded).encode() # convert to bytes object (builtin)
).decode() # back to text
return encoded | ea3a5403878dc58f1faa586c9851863a670c8cd0 | 1,839 |
def add_sibling(data, node_path, new_key, new_data, _i=0):
"""
Traversal-safe method to add a siblings data node.
:param data: The data object you're traversing.
:param node_path: List of path segments pointing to the node you're creating a
sibling of. Same as node_path of traverse()
:param new_key: The sibling key to create.
:param new_data: The new data to be stored at the key.
"""
if _i < len(node_path) - 1:
return add_sibling(data[node_path[_i]], node_path, new_key, new_data, _i + 1)
else:
data[new_key] = new_data | 4bc11315eab686659edc9f7eb8479508d3ca37fb | 1,842 |
def draw_pnl(ax, df):
"""
Draw p&l line on the chart.
"""
ax.clear()
ax.set_title('Performance')
index = df.index.unique()
dt = index.get_level_values(level=0)
pnl = index.get_level_values(level=4)
ax.plot(
dt, pnl, '-',
color='green',
linewidth=1.0,
label='Performance'
)
def perc(val):
return '{:2f}'.format(val)
ax.format_ydata = perc
set_legend(ax)
format_ax(ax) | 6210c1861943bf61a7df8dbe2124f8f0f5e77e89 | 1,843 |
def maxRstat(Z, R, i):
"""
Return the maximum statistic for each non-singleton cluster and its
children.
Parameters
----------
Z : array_like
The hierarchical clustering encoded as a matrix. See `linkage` for more
information.
R : array_like
The inconsistency matrix.
i : int
The column of `R` to use as the statistic.
Returns
-------
MR : ndarray
Calculates the maximum statistic for the i'th column of the
inconsistency matrix `R` for each non-singleton cluster
node. ``MR[j]`` is the maximum over ``R[Q(j)-n, i]``, where
``Q(j)`` the set of all node ids corresponding to nodes below
and including ``j``.
See Also
--------
linkage : for a description of what a linkage matrix is.
inconsistent : for the creation of a inconsistency matrix.
Examples
--------
>>> from scipy.cluster.hierarchy import median, inconsistent, maxRstat
>>> from scipy.spatial.distance import pdist
Given a data set ``X``, we can apply a clustering method to obtain a
linkage matrix ``Z``. `scipy.cluster.hierarchy.inconsistent` can
be also used to obtain the inconsistency matrix ``R`` associated to
this clustering process:
>>> X = [[0, 0], [0, 1], [1, 0],
... [0, 4], [0, 3], [1, 4],
... [4, 0], [3, 0], [4, 1],
... [4, 4], [3, 4], [4, 3]]
>>> Z = median(pdist(X))
>>> R = inconsistent(Z)
>>> R
array([[1. , 0. , 1. , 0. ],
[1. , 0. , 1. , 0. ],
[1. , 0. , 1. , 0. ],
[1. , 0. , 1. , 0. ],
[1.05901699, 0.08346263, 2. , 0.70710678],
[1.05901699, 0.08346263, 2. , 0.70710678],
[1.05901699, 0.08346263, 2. , 0.70710678],
[1.05901699, 0.08346263, 2. , 0.70710678],
[1.74535599, 1.08655358, 3. , 1.15470054],
[1.91202266, 1.37522872, 3. , 1.15470054],
[3.25 , 0.25 , 3. , 0. ]])
`scipy.cluster.hierarchy.maxRstat` can be used to compute
the maximum value of each column of ``R``, for each non-singleton
cluster and its children:
>>> maxRstat(Z, R, 0)
array([1. , 1. , 1. , 1. , 1.05901699,
1.05901699, 1.05901699, 1.05901699, 1.74535599, 1.91202266,
3.25 ])
>>> maxRstat(Z, R, 1)
array([0. , 0. , 0. , 0. , 0.08346263,
0.08346263, 0.08346263, 0.08346263, 1.08655358, 1.37522872,
1.37522872])
>>> maxRstat(Z, R, 3)
array([0. , 0. , 0. , 0. , 0.70710678,
0.70710678, 0.70710678, 0.70710678, 1.15470054, 1.15470054,
1.15470054])
"""
Z = np.asarray(Z, order='c')
R = np.asarray(R, order='c')
is_valid_linkage(Z, throw=True, name='Z')
is_valid_im(R, throw=True, name='R')
if type(i) is not int:
raise TypeError('The third argument must be an integer.')
if i < 0 or i > 3:
raise ValueError('i must be an integer between 0 and 3 inclusive.')
if Z.shape[0] != R.shape[0]:
raise ValueError("The inconsistency matrix and linkage matrix each "
"have a different number of rows.")
n = Z.shape[0] + 1
MR = np.zeros((n - 1,))
[Z, R] = _copy_arrays_if_base_present([Z, R])
_hierarchy.get_max_Rfield_for_each_cluster(Z, R, MR, int(n), i)
return MR | d63c6370e9d896a2e315012fa92fa650d1acaee8 | 1,844 |
import re
def strip_characters(text):
"""Strip characters in text."""
t = re.sub('\(|\)|:|,|;|\.|’|”|“|\?|%|>|<', '', text)
t = re.sub('/', ' ', t)
t = t.replace("'", '')
return t | 763ddc837ef9be19aa067e362c312ebd88632ed7 | 1,845 |
def make_std_secgroup(name, desc="standard security group"):
"""
Returns a standarized resource group with rules for ping and ssh access.
The returned resource can be further configured with additional rules by the
caller.
The name parameter is used to form the name of the ResourceGroup, and also
provides the name of the SecGroup that is created in the ResourceGroup.
"""
return ResourceGroup("%s_std_secgroup" % name,
group=SecGroup(name, desc),
ping_rule=SecGroupRule("ping_rule",
ctxt.comp.container.group,
ip_protocol="icmp",
from_port=-1, to_port=-1),
ssh_rule=SecGroupRule("ssh_rule",
ctxt.comp.container.group,
ip_protocol="tcp",
from_port=22, to_port=22),
) | b53a65bdc04871c0d7ca56574c1852906b2d9351 | 1,847 |
def parse_plot_args(*args, **options):
"""Parse the args the same way plt.plot does."""
x = None
y = None
style = None
if len(args) == 1:
y = args[0]
elif len(args) == 2:
if isinstance(args[1], str):
y, style = args
else:
x, y = args
elif len(args) == 3:
x, y, style = args
return x, y, style | 7687ed00785c1ab20fdf2f7bdc969fde3c75840f | 1,848 |
def publications_classification_terms_get(search=None): # noqa: E501
"""List of Classification Terms
List of Classification Terms # noqa: E501
:param search: search term applied
:type search: str
:rtype: ApiOptions
"""
return 'do some magic!' | 6633c91d59a5df7805979bd85a01f8eb1c269946 | 1,849 |
def lu_decompose(tri_diagonal):
"""Decompose a tri-diagonal matrix into LU form.
Parameters
----------
tri_diagonal : TriDiagonal
Represents the matrix to decompose.
"""
# WHR Appendix B: perform LU decomposition
#
# d[0] = hd[0]
# b[i] = hu[i]
#
# Iterative algorithm:
# d[i] = hd[i] - hu[i-1] a[i-1]
# a[i] = hl[i] / d[i]
hd, hu, hl = tri_diagonal
b = hu
# We want to vectorize the calculation of d and a as much as possible,
# instead of using WHR's iterative algorithm directly.
#
# Substitute a[i-1] into the expression for d[i] to get a recurrence
# relation for d:
#
# d[i] = hd[i] - hu[i-1] a[i-1]
# = hd[i] - hu[i-1] * hl[i-1] / d[i-1]
#
# Let c[i] = hu[i-1] * hl[i-1].
# c[0] = 0, which is meaningless but convenient for the helper.
#
# d[i] = hd[i] - c[i] / d[i-1]
c = np.empty_like(hd)
c[0] = 0.0
np.multiply(hu, hl, c[1:])
np.negative(c, c)
d = hd.copy()
solve_lu_d(c, d)
# a[i] = hl[i] / d[i]
a = np.divide(hl, d[:-1])
return TriDiagonalLU(d, b, a) | 423bb853d96b534055bd00b3c768158c86826b1b | 1,850 |
def _card(item):
"""Handle card entries
Returns: title (append " - Card" to the name,
username (Card brand),
password (card number),
url (none),
notes (including all card info)
"""
notes = item.get('notes', "") or ""
# Add card info to the notes
notes = notes + ("\n".join([f"{i}: {j}" for i, j in item.get('card', "").items()]))
return f"{item['name']} - Card", \
item.get('card', {}).get('brand', '') or "", \
item.get('card', {}).get('number', "") or "", \
"", \
notes | fc7d5e4b960019b05ffe7ca02fd3d1a94d69b303 | 1,851 |
def s3():
"""Boto3 S3 resource."""
return S3().resource | 6402deaafa2ae7d599de1c8e8c67b9e669c06463 | 1,852 |
def SUE(xmean=None,ymean=None,xstdev=None,ystdev=None,rho=None, \
xskew=None,yskew=None,xmin=None,xmax=None,ymin=None,ymax=None, \
Npt=300,xisln=False,yisln=False):
"""
SKEWED UNCERTAINTY ELLIPSES (SUE)
Function to plot uncertainty SUEs (or 1 sigma contour of a bivariate
split-normal distribution). The parameters are the means (xmean,ymean), the
standard deviations (xstdev,ystdev), the skewnesses (xskew,yskew) and the
correlation coefficients (rho). The optional bounds (xmin,xmax,ymin,ymax)
have the effect of truncating the SUEs in case there is a range of
parameter space that is forbidden.
It is important to notice that the xisln/yisln parameters are not related to
the log settings of the axes where we plot the SUE, but are here to
indicate that the moments of the variable to plot correspond to the natural
logarithm (ln) of the variable we want to display. For instance, for
displaying the ellipses of (x,y) where, for x, the moments are those of lnx,
we would write:
SUE(xmean=mean_of_lnx,ymean=mean_of_y,xstdev=stdev_of_lnx, \
ystdev=stdev_of_y,xskew=skewness_of_lnx,yskew=skewness_of_y, \
rho=correl_coeff_of_lnx_and_y,xisln=True)
"""
# Rotation angle
theta = 1./2 * np.arctan( 2*rho*xstdev*ystdev / (xstdev**2-ystdev**2) )
# Numerically solve for taux and tauy (tau=1.D2 ==> skew=0.99)
taugrid = ramp(N=10000,x0=1.E-2,x1=1.E2,log=True)
Ax = np.sqrt(np.pi/2) \
* ( (np.cos(theta))**3*xskew*xstdev**3 \
+ (np.sin(theta))**3*yskew*ystdev**3 ) \
/ ( (np.sin(theta))**6 + (np.cos(theta))**6 ) \
* ( ( (np.cos(theta))**2 - (np.sin(theta))**2 ) \
/ ( (np.cos(theta))**2*xstdev**2 \
- (np.sin(theta))**2*ystdev**2 ) )**1.5
Ay = np.sqrt(np.pi/2) \
* ( (np.cos(theta))**3*yskew*ystdev**3 \
- (np.sin(theta))**3*xskew*xstdev**3 ) \
/ ( (np.cos(theta))**6 + (np.sin(theta))**6 ) \
* ( ( (np.cos(theta))**2 - (np.sin(theta))**2 ) \
/ ( (np.cos(theta))**2*ystdev**2 \
- (np.sin(theta))**2*xstdev**2 ) )**1.5
taux = np.exp(np.interp(Ax,Ctau(taugrid)/(Btau(taugrid))**1.5, \
np.log(taugrid)))
tauy = np.exp(np.interp(Ay,Ctau(taugrid)/(Btau(taugrid))**1.5, \
np.log(taugrid)))
if (not np.isfinite(taux) or taux > 1.E2): taux = 1.E2
if (not np.isfinite(tauy) or tauy > 1.E2): tauy = 1.E2
# Rest of the parameters
lambdax = np.sqrt( ( (np.cos(theta))**2*xstdev**2 \
- (np.sin(theta))**2*ystdev**2 ) \
/ ( (np.cos(theta))**2 - (np.sin(theta))**2 ) / Btau(taux) )
lambday = np.sqrt( ( (np.cos(theta))**2*ystdev**2 \
- (np.sin(theta))**2*xstdev**2 ) \
/ ( (np.cos(theta))**2 - (np.sin(theta))**2 ) / Btau(tauy) )
x0 = xmean - np.sqrt(2/np.pi) * ( np.cos(theta)*lambdax*(taux-1) \
- np.sin(theta)*lambday*(tauy-1) )
y0 = ymean - np.sqrt(2/np.pi) * ( np.sin(theta)*lambdax*(taux-1) \
+ np.cos(theta)*lambday*(tauy-1) )
# Draw the SUE
matrot = np.array([ [ np.cos(theta), -np.sin(theta) ], \
[ np.sin(theta), np.cos(theta) ] ])
xell_ax1 = np.zeros(2)
yell_ax1 = np.zeros(2)
xell_ax2 = np.zeros(2)
yell_ax2 = np.zeros(2)
for k in np.arange(4):
if (k == 0):
xell_sub = ramp(N=Npt,x0=-lambdax,x1=0) + x0
rx = 1-(xell_sub-x0)**2/lambdax**2
yell_sub = np.zeros(Npt)
yell_sub[rx >= 0] = -lambday * np.sqrt(rx[rx >= 0]) + y0
yell_sub[rx < 0] = np.nan
elif (k == 1):
xell_sub = ramp(N=Npt,x0=0,x1=lambdax*taux) + x0
rx = 1-(xell_sub-x0)**2/lambdax**2/taux**2
yell_sub = np.zeros(Npt)
yell_sub[rx >= 0] = -lambday * np.sqrt(rx[rx >= 0]) + y0
yell_sub[rx < 0] = np.nan
elif (k == 2):
xell_sub = (ramp(N=Npt,x0=0,x1=lambdax*taux))[::-1] + x0
rx = 1-(xell_sub-x0)**2/lambdax**2/taux**2
yell_sub = np.zeros(Npt)
yell_sub[rx >= 0] = lambday*tauy * np.sqrt(rx[rx >= 0]) + y0
yell_sub[rx < 0] = np.nan
elif (k == 3):
xell_sub = (ramp(N=Npt,x0=-lambdax,x1=0))[::-1] + x0
rx = 1-(xell_sub-x0)**2/lambdax**2
yell_sub = np.zeros(Npt)
yell_sub[rx >= 0] = lambday*tauy * np.sqrt(rx[rx >= 0]) + y0
yell_sub[rx < 0] = np.nan
# Add the limit case (half ellipse)
mask = np.logical_and(np.isfinite(yell_sub),np.isfinite(xell_sub))
xell_sub = xell_sub[mask]
yell_sub = yell_sub[mask]
Nsub = np.count_nonzero(mask)
# Rotate the ellipse
for j in np.arange(Nsub):
vecell = np.matmul(matrot, \
np.array([xell_sub[j]-x0,yell_sub[j]-y0]))
xell_sub[j] = vecell[0] + x0
yell_sub[j] = vecell[1] + y0
if (k == 0):
xell = xell_sub
yell = yell_sub
else:
xell = np.concatenate((xell,xell_sub))
yell = np.concatenate((yell,yell_sub))
xplot = np.concatenate((xell,[xell[0]]))
yplot = np.concatenate((yell,[yell[0]]))
# Logs and limits
if (xisln):
xplot = np.exp(xplot)
x0 = np.exp(x0)
if (yisln):
yplot = np.exp(yplot)
y0 = np.exp(y0)
if (xmin != None):
xplot[xplot < xmin] = xmin
if (x0 < xmin): x0 = xmin
if (xmax != None):
xplot[xplot > xmax] = xmax
if (x0 > xmax): x0 = xmax
if (ymin != None):
yplot[yplot < ymin] = ymin
if (y0 < ymin): y0 = ymin
if (ymax != None):
yplot[yplot > ymax] = ymax
if (y0 > ymax): y0 = ymax
return(xplot,yplot,x0,y0) | 8b298a2d2ba04a3f1262205f19b1993a4701e279 | 1,853 |
def create_label(places, size, corners, resolution=0.50, x=(0, 90), y=(-50, 50), z=(-4.5, 5.5), scale=4, min_value=np.array([0., -50., -4.5])):
"""Create training Labels which satisfy the range of experiment"""
x_logical = np.logical_and((places[:, 0] < x[1]), (places[:, 0] >= x[0]))
y_logical = np.logical_and((places[:, 1] < y[1]), (places[:, 1] >= y[0]))
z_logical = np.logical_and((places[:, 2] + size[:, 0]/2. < z[1]), (places[:, 2] + size[:, 0]/2. >= z[0]))
xyz_logical = np.logical_and(x_logical, np.logical_and(y_logical, z_logical))
center = places.copy()
center[:, 2] = center[:, 2] + size[:, 0] / 2. # Move bottom to center
sphere_center = ((center[xyz_logical] - min_value) / (resolution * scale)).astype(np.int32)
train_corners = corners[xyz_logical].copy()
anchor_center = sphere_to_center(sphere_center, resolution=resolution, scale=scale, min_value=min_value) #sphere to center
for index, (corner, center) in enumerate(zip(corners[xyz_logical], anchor_center)):
train_corners[index] = corner - center
return sphere_center, train_corners | 1ae1ed49674fbcee15fb6a8201e69be2c82630f9 | 1,854 |
def usage():
"""Serve the usage page."""
return render_template("meta/access.html") | 909272906678c9980f379342b87c8af6a00ab89c | 1,855 |
def GetCache(name, create=False):
"""Returns the cache given a cache indentfier name.
Args:
name: The cache name to operate on. May be prefixed by "resource://" for
resource cache names or "file://" for persistent file cache names. If
only the prefix is specified then the default cache name for that prefix
is used.
create: Creates the persistent cache if it exists if True.
Raises:
CacheNotFound: If the cache does not exist.
Returns:
The cache object.
"""
types = {
'file': file_cache.Cache,
'resource': resource_cache.ResourceCache,
}
def _OpenCache(cache_class, name):
try:
return cache_class(name, create=create)
except cache_exceptions.Error as e:
raise Error(e)
if name:
for cache_id, cache_class in types.iteritems():
if name.startswith(cache_id + '://'):
name = name[len(cache_id) + 3:]
if not name:
name = None
return _OpenCache(cache_class, name)
return _OpenCache(resource_cache.Cache, name) | b8e1796d772506d4abb9f8261df33b4cf6777934 | 1,856 |
def rf_local_divide_int(tile_col, scalar):
"""Divide a Tile by an integral scalar"""
return _apply_scalar_to_tile('rf_local_divide_int', tile_col, scalar) | 0a8c44cafcc44d323fb931fc1b037759ad907d18 | 1,857 |
from typing import Any
from typing import Dict
def or_(*children: Any) -> Dict[str, Any]:
"""Select devices that match at least one of the given selectors.
>>> or_(tag('sports'), tag('business'))
{'or': [{'tag': 'sports'}, {'tag': 'business'}]}
"""
return {"or": [child for child in children]} | 0bda8654ddc0f5dac80c8eb51b0d6d55b57c9e2a | 1,858 |
def get_width_and_height_from_size(x):
""" Obtains width and height from a int or tuple """
if isinstance(x, int): return x, x
if isinstance(x, list) or isinstance(x, tuple): return x
else: raise TypeError() | 581c9f332613dab5de9b786ce2bac3387ee1bd3b | 1,859 |
def remove_stopwords(lista,stopwords):
"""Function to remove stopwords
Args:
lista ([list]): list of texts
stopwords ([list]): [description]
Returns:
[list]: List of texts without stopwords
"""
lista_out = list()
for idx, text in enumerate(lista):
text = ' '.join([word for word in text.split() if word not in stopwords])
text = text.strip()
lista_out.append(text)
#print("Len original: {} - Len processed stopwords: {}".format(len(lista),len(lista_out)))
return lista_out | edca74bb3a041a65a628fcd3f0c71be5ad4858df | 1,860 |
def get_users_report(valid_users, ibmcloud_account_users):
"""get_users_report()"""
users_report = []
valid_account_users = []
invalid_account_users = []
# use case 1: find users in account not in valid_users
for account_user in ibmcloud_account_users:
# check if account user is in valid_users
is_valid_user=False
for valid_user in valid_users:
if ( account_user["email"] == valid_user["email"] ):
account_user["name"] = valid_user["name"]
account_user["identities"] = valid_user["identities"]
if "resourceGroups" in valid_user:
account_user["resourceGroups"] = valid_user["resourceGroups"]
account_user["manager"] = valid_user["manager"]
account_user["association"] = valid_user["association"]
is_valid_user=True
if is_valid_user:
valid_account_users.append(account_user)
else:
invalid_account_users.append(account_user)
users_report = {
"valid_account_users" : valid_account_users,
"invalid_account_users" : invalid_account_users
}
return users_report | a96f8835496f82d8b6f8cd4f248ed8a03676795b | 1,862 |
def insert_bn(names):
"""Insert bn layer after each conv.
Args:
names (list): The list of layer names.
Returns:
list: The list of layer names with bn layers.
"""
names_bn = []
for name in names:
names_bn.append(name)
if 'conv' in name:
position = name.replace('conv', '')
names_bn.append(f'bn{position}')
return names_bn | efe1e6a3218fb33f74c17f90a06e2d18d17442e5 | 1,863 |
def convert_format(parameters):
"""Converts dictionary database type format to serial transmission format"""
values = parameters.copy()
for key, (index, format, value) in values.items():
if type(format) == type(db.Int):
values[key] = (index, 'i', value) # signed 32 bit int (arduino long)
elif type(format) == type(db.Int16):
values[key] = (index, 'h', value)
elif type(format) == type(db.Float):
values[key] = (index, 'f', value)
elif type(format) == type(db.String32):
values[key] = (index, 's', value)
elif type(format) == type(db.StringN):
values[key] = (index, 's', value)
elif type(format) == type(db.Time):
values[key] = (index, 'd', value)
return values | fae756d54cbef6ecc7de07d123513b773ccf1433 | 1,864 |
import warnings
def getproj4(epsg):
"""
Get projection file (.prj) text for given epsg code from
spatialreference.org. See: https://www.epsg-registry.org/
.. deprecated:: 3.2.11
This function will be removed in version 3.3.5. Use
:py:class:`flopy.discretization.structuredgrid.StructuredGrid` instead.
Parameters
----------
epsg : int
epsg code for coordinate system
Returns
-------
prj : str
text for a projection (*.prj) file.
"""
warnings.warn(
"SpatialReference has been deprecated and will be removed in version "
"3.3.5. Use StructuredGrid instead.",
category=DeprecationWarning,
)
return get_spatialreference(epsg, text="proj4") | 80dccf9722f7dd45cca87dcd78775868cfe545ad | 1,866 |
def vpn_tunnel_inside_cidr(cidr):
"""
Property: VpnTunnelOptionsSpecification.TunnelInsideCidr
"""
reserved_cidrs = [
"169.254.0.0/30",
"169.254.1.0/30",
"169.254.2.0/30",
"169.254.3.0/30",
"169.254.4.0/30",
"169.254.5.0/30",
"169.254.169.252/30",
]
cidr_match_re = compile(
r"^169\.254\.(?:25[0-5]|2[0-4]\d|[01]?\d\d?)"
r"\.(?:25[0-5]|2[0-4]\d|[01]?\d\d?)\/30$"
)
if cidr in reserved_cidrs:
raise ValueError(
'The following CIDR blocks are reserved and cannot be used: "%s"'
% (", ".join(reserved_cidrs))
)
elif not cidr_match_re.match(cidr):
raise ValueError(
"%s is not a valid CIDR."
" A size /30 CIDR block from the 169.254.0.0/16 must be specified." % cidr
)
return cidr | 01807a4db2fc80cf8253b0e000e412b0dce1a528 | 1,867 |
def choose_media_type(accept, resource_types):
"""choose_media_type(accept, resource_types) -> resource type
select a media type for the response
accept is the Accept header from the request. If there is no Accept header, '*/*' is assumed. If the Accept header cannot be parsed, HTTP400BadRequest is raised.
resource_types is an ordered list of available resource types, with the most desirable type first.
To find a match, the types in the Accept header are ordered by q value (descending), and each is compared with the available resource types in order. The first matching media type is returned.
If not match is found, HTTP406NotAcceptable is raised.
"""
# This function is exposed in the script dpf_choose_media_type,
# so if changes are made here, that script's documentation
# should be updated to reflect them.
# list of (type, subtype, q)
accept_types = []
for part in accept.split(','):
part = part.strip()
if ';' not in part:
mt = part
q = 1.0
else:
(mt, q) = part.split(';', 1)
mt = mt.strip()
q = q.strip()
if not q.startswith('q='):
raise HTTP400BadRequest('text/plain', 'Bad Accept header.\n')
try:
q = float(q[2:])
except ValueError:
raise HTTP400BadRequest('text/plain', 'Bad Accept header.\n')
if '/' not in mt:
raise HTTP400BadRequest('text/plain', 'Bad Accept header.\n')
(type, subtype) = mt.split('/', 1)
accept_types.append((type, subtype, q))
accept_types.sort(cmp_accept_type)
accept_types.reverse()
for (type, subtype, q) in accept_types:
for available_type in resource_types:
(a_type, a_subtype) = available_type.split('/', 1)
if type != '*' and type != a_type:
continue
if subtype != '*' and subtype != a_subtype:
continue
return available_type
raise HTTP406NotAcceptable() | 876ad2ace8af69f5c6dc83d91d598220935987d5 | 1,868 |
import math
def get_border_removal_size(image: Image, border_removal_percentage: float = .04, patch_width: int = 8):
"""
This function will compute the border removal size. When computing the boarder removal the patch size becomes
important the output shape of the image will always be an even factor of the patch size. This allows the later
computations to evenly fit the image.
:param image: input image to get the dimentions
:param border_removal_percentage: how much of the boarder to remove
:param patch_width: the width size of the patches in pixels.
:return: how many pixes to be removed around the boarder
"""
w, h = image.size
return int(math.ceil(w * border_removal_percentage / patch_width)) * patch_width | f0f236b1d2a13058042269e0e85f52f37fb47b5e | 1,869 |
def get_natural_num(msg):
"""
Get a valid natural number from the user!
:param msg: message asking for a natural number
:return: a positive integer converted from the user enter.
"""
valid_enter = False
while not valid_enter:
given_number = input(msg).strip()
if given_number.isdigit():
num = int(given_number)
valid_enter = True
return num | 77bed94bf6d3e5ceb56d58eaf37e3e687e3c94ba | 1,870 |
from typing import Optional
def _decode_panoptic_or_depth_map(map_path: str) -> Optional[str]:
"""Decodes the panoptic or depth map from encoded image file.
Args:
map_path: Path to the panoptic or depth map image file.
Returns:
Panoptic or depth map as an encoded int32 numpy array bytes or None if not
existing.
"""
if not tf.io.gfile.exists(map_path):
return None
with tf.io.gfile.GFile(map_path, 'rb') as f:
decoded_map = np.array(Image.open(f)).astype(np.int32)
if FLAGS.panoptic_divisor > 0 and map_path.endswith(_LABEL_SUFFIX):
semantic_map = decoded_map[:, :, 0]
instance_map = (
decoded_map[:, :, 1] * _ENCODED_INSTANCE_LABEL_DIVISOR +
decoded_map[:, :, 2])
decoded_map = semantic_map * FLAGS.panoptic_divisor + instance_map
return decoded_map.tobytes() | 1f3b81827ba911614d8979b187b8cde7f10078fe | 1,871 |
def splitstr(s, l=25):
""" split string with max length < l
"(i/n)"
"""
arr = [len(x) for x in s.split()]
out = []
counter = 5
tmp_out = ''
for i in xrange(len(arr)):
if counter + arr[i] > l:
out.append(tmp_out)
tmp_out = ''
counter = 5
else:
tmp_out += s.split()[i] + ' '
counter = len(tmp_out) + 5
return out | 0d84d7bbf420d1f97993be459764c37fed50f8b3 | 1,872 |
import re
def SplitRequirementSpecifier(requirement_specifier):
"""Splits the package name from the other components of a requirement spec.
Only supports PEP 508 `name_req` requirement specifiers. Does not support
requirement specifiers containing environment markers.
Args:
requirement_specifier: str, a PEP 508 requirement specifier that does not
contain an environment marker.
Returns:
(string, string), a 2-tuple of the extracted package name and the tail of
the requirement specifier which could contain extras and/or a version
specifier.
Raises:
Error: No package name was found in the requirement spec.
"""
package = requirement_specifier.strip()
tail_start_regex = r'(\[|\(|==|>=|!=|<=|<|>|~=|===)'
tail_match = re.search(tail_start_regex, requirement_specifier)
tail = ''
if tail_match:
package = requirement_specifier[:tail_match.start()].strip()
tail = requirement_specifier[tail_match.start():].strip()
if not package:
raise Error(r'Missing package name in requirement specifier: \'{}\''.format(
requirement_specifier))
return package, tail | d71eee50c162756ac7aae0bd120323d50d3ab255 | 1,873 |
def arctanh(var):
"""
Wrapper function for atanh
"""
return atanh(var) | 955d09821d78703c99fc2e51f70ca0fc47b0c943 | 1,874 |
def predict_sentiment(txt: str, direc: str = 'models/sentiment/saved_models/model50') -> float:
"""
predicts sentiment of string
only use for testing not good for large data because
model is loaded each time
input is a txt string
optional directory change for using different models
returns a value from -1 to 1
Aproaching -1 being a negative sentiment
Aproaching 1 being a positive sentiment
"""
vals = spacy.load(direc)(txt).cats
return vals["pos"] if vals["pos"]>vals["neg"] else -1*vals["neg"] | d7ff2d361792032eb097e0b0e9818da6ce3af1e5 | 1,875 |
import numpy
def logfbank(signal,
samplerate=16000,
winlen=0.025,
winstep=0.01,
nfilt=26,
nfft=512,
lowfreq=0,
highfreq=None,
preemph=0.97,
winfunc=lambda x: numpy.ones((x,))):
"""Compute log Mel-filterbank energy features from an audio signal.
:param signal: the audio signal from which to compute features. Should be an N*1 array
:param samplerate: the samplerate of the signal we are working with.
:param winlen: the length of the analysis window in seconds. Default is 0.025s (25 milliseconds)
:param winstep: the step between successive windows in seconds. Default is 0.01s (10 milliseconds)
:param nfilt: the number of filters in the filterbank, default 26.
:param nfft: the FFT size. Default is 512.
:param lowfreq: lowest band edge of mel filters. In Hz, default is 0.
:param highfreq: highest band edge of mel filters. In Hz, default is samplerate/2
:param preemph: apply preemphasis filter with preemph as coefficient. 0 is no filter. Default is 0.97.
:param winfunc: the analysis window to apply to each frame. By default no window is applied. You can use numpy window functions here e.g. winfunc=numpy.hamming
:returns: A numpy array of size (NUMFRAMES by nfilt) containing features. Each row holds 1 feature vector.
"""
feat, energy = fbank(signal, samplerate, winlen, winstep, nfilt, nfft,
lowfreq, highfreq, preemph, winfunc)
return numpy.log(feat) | 670d5faf73fcca6da249d9b5c9fb6965eafab855 | 1,876 |
def get_rmsd( pose, second_pose, overhang = 0):
"""
Get RMSD assuming they are both the same length!
"""
#id_map = get_mask_for_alignment(pose, second_pose, cdr, overhang)
#rms = rms_at_corresponding_atoms_no_super(pose, second_pose, id_map)
start = 1 + overhang
end = pose.total_residue() - overhang
l = Loop(start, end)
loops = Loops()
loops.push_back(l)
rms = loop_rmsd(pose, second_pose, loops, False, True)
return rms | 80af3478fe946243cba5e7f3e45c61a8ea9af1d1 | 1,877 |
def inverse(a: int, n: int):
"""
calc the inverse of a in the case of module n, where a and n must be mutually prime.
a * x = 1 (mod n)
:param a: (int)
:param n: (int)
:return: (int) x
"""
assert greatest_common_divisor(a, n) == 1
return greatest_common_divisor_with_coefficient(a, n)[1] % n | 1012c4c69b81dccd2ecfd0c5cddf7a7bd9b2c1f8 | 1,878 |
def create_app():
"""Create the Flask application."""
return app | 7ff5c1e66ab48a5f262beb7abfa21e28680605c9 | 1,879 |
def generate_prime_candidate(length):
""" Genera un integer impar aleatorimanete
param size: tamanio del numero deseado
return:integer
"""
p = big_int(length)
p |= (1 << length - 1) | 1
return p | bdae69644156191a5388b23d7cf1853b8b0273b6 | 1,880 |
def PathPrefix(vm):
"""Determines the prefix for a sysbench command based on the operating system.
Args:
vm: VM on which the sysbench command will be executed.
Returns:
A string representing the sysbench command prefix.
"""
if vm.OS_TYPE == os_types.RHEL:
return INSTALL_DIR
else:
return '/usr/' | e0ade1847bce77f3b4efd9f801b36b219917f0b8 | 1,881 |
from typing import Union
import asyncio
def get_next_valid_seq_number(
address: str, client: SyncClient, ledger_index: Union[str, int] = "current"
) -> int:
"""
Query the ledger for the next available sequence number for an account.
Args:
address: the account to query.
client: the network client used to make network calls.
ledger_index: The ledger index to use for the request. Must be an integer
ledger value or "current" (the current working version), "closed" (for the
closed-and-proposed version), or "validated" (the most recent version
validated by consensus). The default is "current".
Returns:
The next valid sequence number for the address.
"""
return asyncio.run(main.get_next_valid_seq_number(address, client, ledger_index)) | cb2a502aabc474ab79ea14bd2305dda5bfe8b479 | 1,882 |
import re
def apps_list(api_filter, partial_name, **kwargs):
"""List all defined applications. If you give an optional command line
argument, the apps are filtered by name using this string."""
params = {}
if api_filter:
params = {"filter": api_filter}
rv = okta_manager.call_okta("/apps", REST.get, params=params)
# now filter by name, if given
if partial_name:
matcher = re.compile(partial_name)
rv = list(filter(lambda x: matcher.search(x["name"]), rv))
return rv | bafb2f1eb65e735b40613e302fcbc85507c25cb8 | 1,883 |
def jacquez(s_coords, t_coords, k, permutations=99):
"""
Jacquez k nearest neighbors test for spatio-temporal interaction.
:cite:`Jacquez:1996`
Parameters
----------
s_coords : array
(n, 2), spatial coordinates.
t_coords : array
(n, 1), temporal coordinates.
k : int
the number of nearest neighbors to be searched.
permutations : int, optional
the number of permutations used to establish pseudo-
significance (the default is 99).
Returns
-------
jacquez_result : dictionary
contains the statistic (stat) for the test and the
associated p-value (pvalue).
stat : float
value of the Jacquez k nearest neighbors test for the
dataset.
pvalue : float
p-value associated with the statistic (normally
distributed with k-1 df).
Examples
--------
>>> import numpy as np
>>> import libpysal as lps
>>> from pointpats import SpaceTimeEvents, jacquez
Read in the example data and create an instance of SpaceTimeEvents.
>>> path = lps.examples.get_path("burkitt.shp")
>>> events = SpaceTimeEvents(path,'T')
The Jacquez test counts the number of events that are k nearest
neighbors in both time and space. The following runs the Jacquez test
on the example data and reports the resulting statistic. In this case,
there are 13 instances where events are nearest neighbors in both space
and time.
# turning off as kdtree changes from scipy < 0.12 return 13
>>> np.random.seed(100)
>>> result = jacquez(events.space, events.t ,k=3,permutations=99)
>>> print(result['stat'])
13
The significance of this can be assessed by calling the p-
value from the results dictionary, as shown below. Again, no
space-time interaction is observed.
>>> result['pvalue'] < 0.01
False
"""
time = t_coords
space = s_coords
n = len(time)
# calculate the nearest neighbors in space and time separately
knnt = lps.weights.KNN.from_array(time, k)
knns = lps.weights.KNN.from_array(space, k)
nnt = knnt.neighbors
nns = knns.neighbors
knn_sum = 0
# determine which events are nearest neighbors in both space and time
for i in range(n):
t_neighbors = nnt[i]
s_neighbors = nns[i]
check = set(t_neighbors)
inter = check.intersection(s_neighbors)
count = len(inter)
knn_sum += count
stat = knn_sum
# return the results (if no inference)
if not permutations:
return stat
# loop for generating a random distribution to assess significance
dist = []
for p in range(permutations):
j = 0
trand = np.random.permutation(time)
knnt = lps.weights.KNN.from_array(trand, k)
nnt = knnt.neighbors
for i in range(n):
t_neighbors = nnt[i]
s_neighbors = nns[i]
check = set(t_neighbors)
inter = check.intersection(s_neighbors)
count = len(inter)
j += count
dist.append(j)
# establish the pseudo significance of the observed statistic
distribution = np.array(dist)
greater = np.ma.masked_greater_equal(distribution, stat)
count = np.ma.count_masked(greater)
pvalue = (count + 1.0) / (permutations + 1.0)
# report the results
jacquez_result = {'stat': stat, 'pvalue': pvalue}
return jacquez_result | dc71d74cc0e0159e1164d659ca3f07f3b9a61dd6 | 1,884 |
def array2tensor(array, device='auto'):
"""Convert ndarray to tensor on ['cpu', 'gpu', 'auto']
"""
assert device in ['cpu', 'gpu', 'auto'], "Invalid device"
if device != 'auto':
return t.tensor(array).float().to(t.device(device))
if device == 'auto':
return t.tensor(array).float().to(t.device('cuda' if t.cuda.is_available() else 'cpu')) | 53826ad5b19a4e030bc3e98857c9b3285094370f | 1,885 |
def to_json(graph):
"""Convert this graph to a Node-Link JSON object.
:param BELGraph graph: A BEL graph
:return: A Node-Link JSON object representing the given graph
:rtype: dict
"""
graph_json_dict = node_link_data(graph)
# Convert annotation list definitions (which are sets) to canonicalized/sorted lists
graph_json_dict['graph'][GRAPH_ANNOTATION_LIST] = {
keyword: list(sorted(values))
for keyword, values in graph_json_dict['graph'][GRAPH_ANNOTATION_LIST].items()
}
# Convert set to list
graph_json_dict['graph'][GRAPH_UNCACHED_NAMESPACES] = list(graph_json_dict['graph'][GRAPH_UNCACHED_NAMESPACES])
return graph_json_dict | 325053a0838bbf1ab70a4fb61e17f93f27c80dab | 1,886 |
def export(df: pd.DataFrame):
"""
From generated pandas dataframe to xml configuration
:param df: computed pandas dataframe
:return:
"""
return df | 445e91a419746afef8062dcc1e6691572ba9390d | 1,887 |
def has_same_attributes(link1, link2):
"""
Return True if the two links have the same attributes for our purposes,
ie it is OK to merge them together into one link
Parameters:
link1 - Link object
link2 - Link object
Return value:
True iff link1 and link2 have compatible attributes
"""
return (link1.linktype == link2.linktype and
abs(link1.B - link2.B) < EPS and
abs(link1.power - link2.power) < EPS and
abs(link1.capacity - link2.capacity) < EPS) | e80f62d01ef18e547a2e7718ac2bb1ca3001b84f | 1,888 |
def test_signals_creation(test_df, signal_algorithm):
"""Checks signal algorithms can create a signal in a Pandas dataframe."""
test_df_copy = test_df.copy()
original_columns = test_df.columns
# We check if the test series has the columns needed for the rule to calculate.
required_columns = Api.required_inputs_for_algorithm(signal_algorithm)
all_present = True
for ii_requirement in required_columns:
if ii_requirement not in original_columns:
all_present = False
# If columns are missing, we anticipate a KeyError will trigger.
if not all_present:
with pytest.raises(KeyError):
Api.calculate_signal(test_df_copy, signal_algorithm)
return True
# Otherwise we expect to parse successfully.
df_with_signal = Api.calculate_signal(test_df_copy, signal_algorithm)
if not isinstance(df_with_signal, pd.DataFrame):
print(df_with_signal)
print("Type was: ", type(df_with_signal))
raise TypeError("Bad output format.")
# Signal algorithms should be adding new columns with float, int or NaN data.
new_columns = False
for ii_column_name in df_with_signal.columns:
if ii_column_name not in original_columns:
new_columns = True
for ii_value in df_with_signal[ii_column_name]:
if not isinstance(ii_value, (float, int)):
assert ii_value is "NaN"
# At least one new column should have been added. Otherwise output is overriding input columns.
if not new_columns:
raise AssertionError(
"No new columns were created by the signal function: ",
df_with_signal.columns,
" versus original of ",
original_columns,
) | 5a4515092d778090a77ce5933ad2e79b4d62df36 | 1,889 |
def get_prev_day(d):
"""
Returns the date of the previous day.
"""
curr = date(*map(int, d.split('-')))
prev = curr - timedelta(days=1)
return str(prev) | 9195c0be4fc25a68b0bb94e953bafd407c5931a3 | 1,890 |
import types
def copy_function(old_func, updated_module):
"""Copies a function, updating it's globals to point to updated_module."""
new_func = types.FunctionType(old_func.__code__, updated_module.__dict__,
name=old_func.__name__,
argdefs=old_func.__defaults__,
closure=old_func.__closure__)
new_func.__dict__.update(old_func.__dict__)
new_func.__module__ = updated_module.__name__
return new_func | e09022f734faa1774a3ac592c0e12b0b007ae8e3 | 1,891 |
import random
def get_random_color():
"""
获得一个随机的bootstrap颜色字符串标识
:return: bootstrap颜色字符串
"""
color_str = [
'primary',
'secondary',
'success',
'danger',
'warning',
'info',
'dark',
]
return random.choice(color_str) | 898814996aa5ada8f4000244887af382b8b9e1bc | 1,892 |
def logged_run(cmd, buffer):
"""Return exit code."""
pid = Popen(cmd, stdout=PIPE, stderr=STDOUT)
pid.wait()
buffer.write(pid.stdout.read())
return pid.returncode | 99a1aa4f8997ef7665a8e994b3df3d4ffe8a844b | 1,894 |
import json
def create_iam_role(iam_client):
"""Create an IAM role for the Redshift cluster to have read only access to
S3.
Arguments:
iam_client (boto3.client) - IAM client
Returns:
role_arn (str) - ARN for the IAM Role
"""
# Create the role if it doesn't already exist.
try:
print('Creating IAM Role...')
redshift_role = iam_client.create_role(
Path="/",
RoleName=IAM_ROLE_NAME,
Description="Allows Redshift clusters to call AWS services",
AssumeRolePolicyDocument=json.dumps(
{
'Statement': [
{
'Action': 'sts:AssumeRole',
'Effect': 'Allow',
'Principal': {'Service': 'redshift.amazonaws.com'}
}
],
'Version': '2012-10-17'
}
)
)
except Exception as e:
print(e)
# Attach the policy.
try:
iam_client.attach_role_policy(
RoleName=IAM_ROLE_NAME,
PolicyArn="arn:aws:iam::aws:policy/AmazonS3ReadonlyAccess"
)
except Exception as e:
print(e)
# Return the Role ARN.
role_arn = iam_client.get_role(RoleName=IAM_ROLE_NAME)['Role']['Arn']
print('Role ARN: %s' % role_arn)
return role_arn | 949026bae3edc1dacc5057427ecf3e21490bd9b8 | 1,897 |
import array
def cone_face_to_span(F):
"""
Compute the span matrix F^S of the face matrix F,
that is, a matrix such that
{F x <= 0} if and only if {x = F^S z, z >= 0}.
"""
b, A = zeros((F.shape[0], 1)), -F
# H-representation: A x + b >= 0
F_cdd = Matrix(hstack([b, A]), number_type=NUMBER_TYPE)
F_cdd.rep_type = RepType.INEQUALITY
P = Polyhedron(F_cdd)
V = array(P.get_generators())
for i in xrange(V.shape[0]):
if V[i, 0] != 0: # 1 = vertex, 0 = ray
raise NotConeFace(F)
return V[:, 1:].T | ae928e179085116fa8ac48fd01841458bdcd38ec | 1,898 |
def neighborhood(index, npoints, maxdist=1):
"""
Returns the neighbourhood of the current index,
= all points of the grid separated by up to
*maxdist* from current point.
@type index: int
@type npoints: int
@type maxdist int
@rtype: list of int
"""
return [index + i for i in range(-maxdist, maxdist + 1)
if i != 0 and 0 <= index + i <= npoints - 1] | 98166d810daa6b99862a4c9f6d1629fdfa571bd0 | 1,899 |
def data_check(data):
"""Check the data in [0,1]."""
return 0 <= float(data) <= 1 | b292ef07a024e53d82e706f0d88d50d6318d6593 | 1,900 |
import re
def tokenize(text):
"""
Tokenization function to process text data
Args:
text: String. disaster message.
Returns:
clean_tokens: list. token list from text message.
"""
url_regex = 'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+'
# get list of all urls using regex
detected_urls = re.findall(url_regex, text)
# replace each url in text string with placeholder
for url in detected_urls:
text = text.replace(url, "urlplaceholder")
# tokenize text
tokens = word_tokenize(text)
# initiate lemmatizer
lemmatizer = WordNetLemmatizer()
# iterate through each token
clean_tokens = []
for tok in tokens:
# lemmatize, normalize case, and remove leading/trailing white space
clean_tok = lemmatizer.lemmatize(tok).lower().strip()
clean_tokens.append(clean_tok)
return clean_tokens | d5ee0929c0b6fad243b87c2b7e82270859b9b3f3 | 1,901 |
def get_symbol_historical(symbol_name):
"""Returns the available historical data for a symbol as a dictionary."""
# Get the data
symbol_data = get_symbol_data(symbol_name)
# Build the response
response = symbol_data.to_dict(orient="records")
return response | 37578652a13ff2b705c46185aba8cd47a73dc6e0 | 1,902 |
def guesses(word):
"""
return all of the first and second order guesses for this word
"""
result = list(known(*first_order_variants(word)))
result.sort()
return result | 9a74372e701d526d74df1df613b8648f47830202 | 1,903 |
def em(X, sf, inits, K, L, n_iter=100, n_inner_iter=50, tol=1e-5, zero_inflated=True):
"""
run EM algorithm on the given init centers
return the clustering labels with the highest log likelihood
"""
# add prepare reduced data here
print("start em algorithm")
res = _em(X, sf, inits, K, L, n_iter, n_inner_iter, tol, zero_inflated)
max_idx = np.argmax([r['llf'] for r in res])
sol = res[max_idx]
em_labels = np.argmax(sol['rho'], axis=1).flatten()
sol['labels'] = em_labels
return sol | 7a53c14caf56958fed80241bf347071b84a62280 | 1,904 |
def update_logger(evo_logger, x, fitness, memory, top_k, verbose=False):
""" Helper function to keep track of top solutions. """
# Check if there are solutions better than current archive
vals = jnp.hstack([evo_logger["top_values"], fitness])
params = jnp.vstack([evo_logger["top_params"], x])
concat_top = jnp.hstack([jnp.expand_dims(vals, 1), params])
sorted_top = concat_top[concat_top[:, 0].argsort()]
# Importantly: Params are stored as flat vectors
evo_logger["top_values"] = sorted_top[:top_k, 0]
evo_logger["top_params"] = sorted_top[:top_k, 1:]
evo_logger["log_top_1"].append(evo_logger["top_values"][0])
evo_logger["log_top_mean"].append(jnp.mean(evo_logger["top_values"]))
evo_logger["log_top_std"].append(jnp.std(evo_logger["top_values"]))
evo_logger["log_gen_1"].append(jnp.min(fitness))
evo_logger["log_gen_mean"].append(jnp.mean(fitness))
evo_logger["log_gen_std"].append(jnp.std(fitness))
evo_logger["log_sigma"].append(memory["sigma"])
evo_logger["log_gen"].append(memory["generation"])
if verbose:
print(evo_logger["log_gen"][-1], evo_logger["top_values"])
return evo_logger | 8efd1bbc4f0c1cde17e2ef425ae82cf3f5967df3 | 1,906 |
def ae(nb_features,
input_shape,
nb_levels,
conv_size,
nb_labels,
enc_size,
name='ae',
prefix=None,
feat_mult=1,
pool_size=2,
padding='same',
activation='elu',
use_residuals=False,
nb_conv_per_level=1,
batch_norm=None,
enc_batch_norm=None,
ae_type='conv', # 'dense', or 'conv'
enc_lambda_layers=None,
add_prior_layer=False,
add_prior_layer_reg=0,
use_logp=True,
conv_dropout=0,
include_mu_shift_layer=False,
single_model=False, # whether to return a single model, or a tuple of models that can be stacked.
final_pred_activation='softmax',
do_vae=False):
"""
Convolutional Auto-Encoder.
Optionally Variational.
Optionally Dense middle layer
"Mostly" in that the inner encoding can be (optionally) constructed via dense features.
Parameters:
do_vae (bool): whether to do a variational auto-encoder or not.
enc_lambda_layers functions to try:
K.softsign
a = 1
longtanh = lambda x: K.tanh(x) * K.log(2 + a * abs(x))
"""
# naming
model_name = name
# volume size data
ndims = len(input_shape) - 1
if isinstance(pool_size, int):
pool_size = (pool_size,) * ndims
# get encoding model
enc_model = conv_enc(nb_features,
input_shape,
nb_levels,
conv_size,
name=model_name,
feat_mult=feat_mult,
pool_size=pool_size,
padding=padding,
activation=activation,
use_residuals=use_residuals,
nb_conv_per_level=nb_conv_per_level,
conv_dropout=conv_dropout,
batch_norm=batch_norm)
# middle AE structure
if single_model:
in_input_shape = None
in_model = enc_model
else:
in_input_shape = enc_model.output.shape.as_list()[1:]
in_model = None
mid_ae_model = single_ae(enc_size,
in_input_shape,
conv_size=conv_size,
name=model_name,
ae_type=ae_type,
input_model=in_model,
batch_norm=enc_batch_norm,
enc_lambda_layers=enc_lambda_layers,
include_mu_shift_layer=include_mu_shift_layer,
do_vae=do_vae)
# decoder
if single_model:
in_input_shape = None
in_model = mid_ae_model
else:
in_input_shape = mid_ae_model.output.shape.as_list()[1:]
in_model = None
dec_model = conv_dec(nb_features,
in_input_shape,
nb_levels,
conv_size,
nb_labels,
name=model_name,
feat_mult=feat_mult,
pool_size=pool_size,
use_skip_connections=False,
padding=padding,
activation=activation,
use_residuals=use_residuals,
final_pred_activation='linear',
nb_conv_per_level=nb_conv_per_level,
batch_norm=batch_norm,
conv_dropout=conv_dropout,
input_model=in_model)
if add_prior_layer:
dec_model = add_prior(dec_model,
[*input_shape[:-1], nb_labels],
name=model_name,
prefix=model_name + '_prior',
use_logp=use_logp,
final_pred_activation=final_pred_activation,
add_prior_layer_reg=add_prior_layer_reg)
if single_model:
return dec_model
else:
return (dec_model, mid_ae_model, enc_model) | ab5bbed13e5636ab506612776920eaffa67b8b3e | 1,907 |
def parser_config(p):
"""JLS file info."""
p.add_argument('--verbose', '-v',
action='store_true',
help='Display verbose information.')
p.add_argument('filename',
help='JLS filename')
return on_cmd | ea9e20fd055933d7e1b1b5f92da76875f7f318e6 | 1,910 |
def decentralized_training_strategy(communication_rounds, epoch_samples, batch_size, total_epochs):
"""
Split one epoch into r rounds and perform model aggregation
:param communication_rounds: the communication rounds in training process
:param epoch_samples: the samples for each epoch
:param batch_size: the batch_size for each epoch
:param total_epochs: the total epochs for training
:return: batch_per_epoch, total_epochs with communication rounds r
"""
if communication_rounds >= 1:
epoch_samples = round(epoch_samples / communication_rounds)
total_epochs = round(total_epochs * communication_rounds)
batch_per_epoch = round(epoch_samples / batch_size)
elif communication_rounds in [0.2, 0.5]:
total_epochs = round(total_epochs * communication_rounds)
batch_per_epoch = round(epoch_samples / batch_size)
else:
raise NotImplementedError(
"The communication round {} illegal, should be 0.2 or 0.5".format(communication_rounds))
return batch_per_epoch, total_epochs | 3a743208af50d7c7865d5d5f86a4f58b0ba98a4d | 1,911 |
def create_config_file_lines():
"""Wrapper for creating the initial config file content as lines."""
lines = [
"[default]\n",
"config_folder = ~/.zettelkasten.d\n",
"\n",
"def_author = Ammon, Mathias\n",
"def_title = Config Parsed Test Title\n",
"def_location_specifier = None\n",
"\n",
"location = ~/zettelkasten\n",
"\n",
"initial_folder_structure = \n",
" lobby,\n",
" %(sources_directory)s,\n",
" _sources/audios,\n",
" _sources/images,\n",
" _sources/pdfs,\n",
" _sources/videos\n",
"\n",
"name_sep = /\n",
"\n",
"required_attributes = \n",
" uid,\n",
" category,\n",
" subcategory\n",
"\n",
"sources_directory = _sources\n",
"\n",
"styles_file = styles.cfg\n",
"\n",
"reserved_folder_names = \n",
" lobby,\n",
" %(sources_directory)s,\n",
" pytest_dir,\n",
" doctest_dir,\n",
" .zettelkasten.d\n",
"\n",
"zettelkasten_bib_file = zettelkasten.bib\n",
"\n",
"[source_file_formats]\n",
"audios = \n",
" mp3,\n",
" wav\n",
"images = \n",
" webp,\n",
" jpg,\n",
" jpeg,\n",
" png\n",
"pdfs =\n",
" pdf,\n",
" odt\n",
"videos =\n",
" mkv,\n",
" webm,\n",
" mp4\n",
"\n",
"[zettel_meta_attribute_defaults]\n",
"# required for zettel adding to work \n",
"category= None\n",
"subcategory= None\n",
"# optional\n",
"author = Mathias Ammon\n",
"topics =\n",
"tags =\n",
"doc = today\n",
"\n",
"[zettel_meta_attribute_labels]\n",
"# required for zettel adding to work\n",
"uid = #+Title:\n",
"category = #+Category:\n",
"subcategory = #+Subcategory:\n",
"# optional\n",
"author = #+Author:\n",
"doc = #+DOC:\n",
"dole = #+DOLE:\n",
"topics = #+Topics:\n",
"tags = #+Tags:\n",
]
return lines | d0d1057c3f450636279a8df9d4a39977f1eeef42 | 1,912 |
def p_planes_tangent_to_cylinder(base_point, line_vect, ref_point, dist, ):
"""find tangent planes of a cylinder passing through a given point ()
.. image:: ../images/plane_tangent_to_one_cylinder.png
:scale: 80 %
:align: center
Parameters
----------
base_point : point
point M
line_vect : vector
direction of the existing bar's axis, direction [the other pt, base_pt], **direction very important!**
ref_point : point
point Q
dist : float
cylinder radius
Returns
-------
list of two [ref_point, local_y, local_x]
local x = QB
local_y // line_vect
"""
l_vect = normalize_vector(line_vect)
tangent_pts = lines_tangent_to_cylinder(base_point, line_vect, ref_point, dist)
if tangent_pts is None:
return None
base_pt, upper_tang_pt, lower_tang_pt = tangent_pts
r1 = subtract_vectors(add_vectors(base_pt, upper_tang_pt), ref_point)
r1 = normalize_vector(r1)
r2 = subtract_vectors(add_vectors(base_pt, lower_tang_pt), ref_point)
r2 = normalize_vector(r2)
return [[ref_point, l_vect, r1], [ref_point, l_vect, r2]] | e8928e4314cadede97bef977c0348e32832157ad | 1,913 |
def BOPTools_AlgoTools3D_OrientEdgeOnFace(*args):
"""
* Get the edge <aER> from the face <aF> that is the same as the edge <aE>
:param aE:
:type aE: TopoDS_Edge &
:param aF:
:type aF: TopoDS_Face &
:param aER:
:type aER: TopoDS_Edge &
:rtype: void
"""
return _BOPTools.BOPTools_AlgoTools3D_OrientEdgeOnFace(*args) | 31da8b90e4ad5838b94a0481d937104845de735c | 1,914 |
def create_store_from_creds(access_key, secret_key, region, **kwargs):
"""
Creates a parameter store object from the provided credentials.
Arguments:
access_key {string} -- The access key for your AWS account
secret_key {string} -- The secret key for you AWS account
region {string} -- The region you wish to connect to
Keyword Arguments (Optional):
session='session' {string} -- The session token you wish to use.
Returns:
Object -- An AWS parameter store object.
"""
session = kwargs.get('session') if 'session' in kwargs else ''
store = EC2ParameterStore(
aws_access_key_id=access_key,
aws_secret_access_key=secret_key,
aws_session_token=session, #optional
region_name=region
)
return store | 8e0ec2a6579a6013d36b6933ee922a406730ee35 | 1,915 |
import abc
def are_objects_equal(object1, object2):
"""
compare two (collections of) arrays or other objects for equality. Ignores nan.
"""
if isinstance(object1, abc.Sequence):
items = zip(object1, object2)
elif isinstance(object1, dict):
items = [(value, object2[key]) for key, value in object1.items()]
else:
items = [(object1, object2)]
# equal_nan does not exist in array_equal in old numpy
npy_major_version = tuple(int(v) for v in np.__version__.split('.')[:2])
if npy_major_version < (1, 19):
fixed = [(np.nan_to_num(a1), np.nan_to_num(a2)) for a1, a2 in items]
return np.all([np.all(a1 == a2) for a1, a2 in fixed])
try:
return np.all(
[np.array_equal(a1, a2, equal_nan=True) for a1, a2 in items]
)
except TypeError:
# np.array_equal fails for arrays of type `object` (e.g: strings)
return np.all([a1 == a2 for a1, a2 in items]) | 94b4b9a9f42bc8b1dd44d5e010b422082452f649 | 1,916 |
def get_recipes_from_dict(input_dict: dict) -> dict:
"""Get recipes from dict
Attributes:
input_dict (dict): ISO_639_1 language code
Returns:
recipes (dict): collection of recipes for input language
"""
if not isinstance(input_dict, dict):
raise TypeError("Input is not type dict")
recipes = input_dict
return recipes | e710d9629d10897d4aae7bf3d5de5dbbe18196c5 | 1,917 |
def tasks_from_wdl(wdl):
"""
Return a dictionary of tasks contained in a .wdl file.
The values are task definitions within the wdl
"""
return scopes_from_wdl("task", wdl) | 24d302995dcfa274b4b04868f901f832b36ec5cd | 1,918 |
async def get_category_item_route(category_id: CategoryEnum, item_id: ObjectID,
db: AsyncIOMotorClient = Depends(get_database)) -> ItemInResponse:
"""Get the details about a particular item"""
_res = await db[category_id]["data"].find_one({"_id": item_id})
if _res:
return ItemInResponse(data=_res)
raise HTTPException(
status_code=404,
detail=f'ObjectID {item_id} not found in {category_id}') | 4feed87e3948994f8066268820355d9fdfe4999d | 1,920 |
def weighted_SVD(matrix, error=None, full_matrices=False):
"""
Finds the most important modes of the given matrix given the weightings
given by the error.
matrix a horizontal rectangular matrix
error weighting applied to the dimension corresponding to the rows
"""
if type(error) is type(None):
error = np.ones(matrix.shape[0])
expanded_error = error[:,np.newaxis]
to_svd = matrix / expanded_error
(SVD_U, SVD_S, SVD_V_transpose) =\
la.svd(to_svd, full_matrices=full_matrices)
SVD_U = SVD_U * expanded_error
return SVD_U, SVD_S, SVD_V_transpose.T | 5ca0f54af765f0694fb572ee3b82f4d59642bb06 | 1,921 |
def ingredients():
"""Route to list all ingredients currently in the database.
"""
query = request.args.get("q")
ingredients = db.get_ingredient_subset_from_db(query)
return jsonify(ingredients) | 376bcb8e16c0379676f9748f4a2858ea39ca33ab | 1,922 |
def read_h5_particles(particles_file, refpart, real_particles, bucket_length, comm, verbose):
"""Read an array of particles from an HDF-5 file"""
four_momentum = refpart.get_four_momentum()
pmass = four_momentum.get_mass()
E_0 = four_momentum.get_total_energy()
p0c = four_momentum.get_momentum()
myrank = comm.get_rank()
mpisize = comm.get_size()
if myrank==0 and verbose:
print("Loading particles from h5 file: ", particles_file)
if myrank == 0:
#h5 = tables.open_file(particles_file)
h5 = h5py.File(particles_file)
# use explicit int conversion otherwise there seems to
# be a typepython->C++ type mismatch of numpy.int64->int
#num_total_particles = int(h5.root.particles.shape[0])
num_total_particles = int(h5['particles'].shape[0])
if verbose:
print("Total of ", num_total_particles, " particles from file")
# broadcast num particles to all nodes
MPI.COMM_WORLD.bcast(num_total_particles, root=0)
else:
num_total_particles = None
num_total_particles = MPI.COMM_WORLD.bcast(num_total_particles, root=0)
if myrank == 0:
particles = h5['particles']
# make sure the data has the correct shape, either [n,6] without
# particles IDs or [n,7] with particle IDs.
if (particles.shape[1] != 7):
raise RuntimeError, "input data shape %shas incorrect number of particle coordinates"%repr(particles.shape)
#Note: Synergia bunch constructor updated - commit 077b99d7 - 11/17/2016
#Using old constructor throws an ArgumentError of a non-standard type.
# Using a try and except to handle both instances.
try:
# try the original constructor
bunch = synergia.bunch.Bunch(
refpart,
num_total_particles, real_particles, comm,
bucket_length)
except Exception, e:
#look to see if it's an ArgumentError by evaluating the traceback
if (not str(e).startswith("Python argument types in")):
raise
else:
# use the new constructor
if verbose:
print("Using updated bunch constructor")
bunch = synergia.bunch.Bunch(
refpart,
num_total_particles, real_particles, comm)
# now set the new parameter 'z_period_length'
if bucket_length is not None:
bunch.set_z_period_length(bucket_length)
else:
bucket_length = 1. #fix this quantity
local_num = bunch.get_local_num()
local_particles = bunch.get_local_particles()
# Each processor will have a possibly different number of local particles.
# rank 0 has to find out how many each of them has and distribute them
n_particles_by_proc = MPI.COMM_WORLD.gather(local_num, 0)
if myrank == 0:
# copy in my particles
this_rank_start = 0
local_particles[:,:] = particles[0:local_num, :]
this_rank_start += local_num
# send particles out to other ranks
for r in range(1, mpisize):
this_rank_end = this_rank_start+n_particles_by_proc[r]
MPI.COMM_WORLD.send(obj=particles[this_rank_start:this_rank_end, :],
dest=r)
this_rank_start += n_particles_by_proc[r]
else:
# I'm not rank 0. Receive my particles
lp = MPI.COMM_WORLD.recv(source=0)
local_particles[:,:] = lp[:,:]
return bunch | caaeb89920b3cc9e0b263c9b1fea5fc1615ad8b3 | 1,923 |
import logging
def readAndMapFile(path):
"""
Main file breaker - this takes a given file and breaks it into arbitrary
fragments, returning and array of fragments. For simplicity, this is breaking on
newline characters to start with. May have to be altered to work with puncuation
and/or special characters as needed.
"""
splitLines = []
def mapper(line):
strippedLine = line.strip()
if (len(strippedLine) > 0):
splitLines.append(strippedLine)
with open(path, "r", encoding=FILE_ENCODING) as f:
content = f.read()
items = content.split("\n")
for i in items:
logging.info("n-gram length = {}".format(len(i)))
mapper(i)
logging.info("Read {} lines of text from {}".format(len(splitLines), path))
return splitLines | 4a542d1a08fcd88a1660de360c15d87949eddf11 | 1,924 |
def fetch_git_logs(repo, from_date, to_date, args): # pragma: no cover
"""Fetch all logs from Gitiles for the given date range.
Gitiles does not natively support time ranges, so we just fetch
everything until the range is covered. Assume that logs are ordered
in reverse chronological order.
"""
cursor = ''
commit_date = to_date
data = []
while cursor is not None:
page = fetch_git_page(repo, cursor)
logs = page.get('log', [])
cursor = page.get('next')
for log in logs:
committer = log.get('committer', {})
commit_date = date_from_git(committer.get('time'))
if not commit_date:
continue
if commit_date > to_date:
continue
if commit_date < from_date:
break
files = set()
for entry in log.get('tree_diff', []):
files.add(entry['old_path'])
files.add(entry['new_path'])
if args.path_filter_include:
if not any(matches_path_filter(p, args.path_filter_include)
for p in files):
continue
if args.path_filter_exclude:
if any(matches_path_filter(p, args.path_filter_exclude)
for p in files):
continue
data.append({
'author': log.get('author', {}).get('email'),
'date': commit_date,
'commit-bot': bool('commit-bot' in committer.get('email', '')),
'revision': log.get('commit'),
})
if commit_date < from_date:
break
return data | 1164b373e9b8f7186165712f8ac9e5e3d1a1f10f | 1,925 |
import torch
def _gen_bfp_op(op, name, bfp_args):
"""
Do the 'sandwich'
With an original op:
out = op(x, y)
grad_x, grad_y = op_grad(grad_out)
To the following:
x_, y_ = input_op(x, y)
Where input_op(x, y) -> bfp(x), bfp(y)
and input_op_grad(grad_x, grad_y) -> bfp(grad_x), bfp(grad_y)
out_ = op(x_, y_)
out = output_op(out)
Where output_op(out) -> bfp(out)
and output_op_grad(grad_out) -> bfp(grad_out)
This way we garantee that everything in and out of the forward and backward operations is
properly converted to bfp
"""
name = _get_op_name(name, **bfp_args)
class NewOpIn(torch.autograd.Function):
@staticmethod
def forward(ctx, x, w):
return (float_to_bfp_batched(x, backward=False, **bfp_args), w)
@staticmethod
def backward(ctx, grad_x, grad_w):
return (grad_x, grad_w)
NewOpIn.__name__ = name + '_In'
new_op_in = NewOpIn.apply
class NewOpOut(torch.autograd.Function):
@staticmethod
def forward(ctx, op_out):
return op_out
@staticmethod
def backward(ctx, op_out_grad):
return float_to_bfp_batched(op_out_grad, backward=True, **bfp_args)
NewOpOut.__name__ = name + '_Out'
new_op_out = NewOpOut.apply
def new_op(x, w, *args, **kwargs):
x, w = new_op_in(x, w)
out = op(x, w, *args, **kwargs)
return new_op_out(out)
return new_op | d430bd9d090d0a47fa4d6a8c173c77b08e2fdb66 | 1,926 |
def angleaxis_to_rotation_matrix(aa):
"""Converts the 3 element angle axis representation to a 3x3 rotation matrix
aa: numpy.ndarray with 1 dimension and 3 elements
Returns a 3x3 numpy.ndarray
"""
angle = np.sqrt(aa.dot(aa))
if angle > 1e-6:
c = np.cos(angle);
s = np.sin(angle);
u = np.array([aa[0]/angle, aa[1]/angle, aa[2]/angle]);
R = np.empty((3,3))
R[0,0] = c+u[0]*u[0]*(1-c); R[0,1] = u[0]*u[1]*(1-c)-u[2]*s; R[0,2] = u[0]*u[2]*(1-c)+u[1]*s;
R[1,0] = u[1]*u[0]*(1-c)+u[2]*s; R[1,1] = c+u[1]*u[1]*(1-c); R[1,2] = u[1]*u[2]*(1-c)-u[0]*s;
R[2,0] = u[2]*u[0]*(1-c)-u[1]*s; R[2,1] = u[2]*u[1]*(1-c)+u[0]*s; R[2,2] = c+u[2]*u[2]*(1-c);
else:
R = np.eye(3)
return R | 57d849f137684824aa23d393802dc247df987b59 | 1,927 |
def sendOrderFAK(self, orderType, price, volume, symbol, exchange, stop=False):
"""发送委托"""
if self.trading:
# 如果stop为True,则意味着发本地停止单
req = {}
req['sid'] = self.sid
if orderType == CTAORDER_BUY:
req['direction'] = '0'
req['offset'] = '0'
elif orderType == CTAORDER_SELL:
req['direction'] = '1'
req['offset'] = '1'
elif orderType == CTAORDER_SELL_TODAY:
req['direction'] = '1'
req['offset'] = '3'
elif orderType == CTAORDER_SHORT:
req['direction'] = '1'
req['offset'] = '0'
elif orderType == CTAORDER_COVER:
req['direction'] = '0'
req['offset'] = '1'
elif orderType == CTAORDER_COVER_TODAY:
req['direction'] = '0'
req['offset'] = '3'
req['symbol'] = symbol
req['volume'] = volume
req['price'] = price
req['hedgeflag'] = '1'
req['ordertype'] = '1'
req['exchange'] = exchange
vtOrderID = ctaEngine.sendOrder(req)
return vtOrderID
else:
return None
# ---------------------------------------------------------------------- | 5b72ab3cdfa0b4412df2861d1e23a4a55f1d7206 | 1,928 |
import itertools
def unique(lst):
"""
:param lst: a list of lists
:return: a unique list of items appearing in those lists
"""
indices = sorted(list(range(len(lst))), key=lst.__getitem__)
indices = set(next(it) for k, it in
itertools.groupby(indices, key=lst.__getitem__))
return [x for i, x in enumerate(lst) if i in indices] | 0848d693681ff0f8bdbc0d0436b3d4450eee781e | 1,929 |
def max_frequency(sig, FS):
"""Compute max frequency along the specified axes.
Parameters
----------
sig: ndarray
input from which max frequency is computed.
FS: int
sampling frequency
Returns
-------
f_max: int
0.95 of max_frequency using cumsum.
"""
f, fs = plotfft(sig, FS, doplot=False)
t = np.cumsum(fs)
try:
ind_mag = np.where(t > t[-1]*0.95)[0][0]
except:
ind_mag = np.argmax(t)
f_max = f[ind_mag]
return f_max | 19321fb47d47b99138e1d1551f3728df4c2b7370 | 1,930 |
def split(text):
"""Turns the mobypron.unc file into a dictionary"""
map_word_moby = {}
try:
lines = text.split("\n")
for line in lines:
(word, moby) = line.split(" ", 1)
map_word_moby[word] = moby
except IOError as error:
print(f"Failed due to IOError: {error}")
return map_word_moby | ba051724f0399e918949c3e8b7fb010e2d87c9f9 | 1,931 |
def report(key_name=None, priority=-1, **formatters):
""" Use this decorator to indicate what returns to include in the report and how to format it """
def tag_with_report_meta_data(cls):
# guard: prevent bad coding by catching bad return key
if key_name and key_name not in cls.return_keys:
raise Exception("Task %s does not specify %s using the @returns decorator. "
"It cannot be used in @report" % (cls.name, key_name))
report_entry = {
"key_name": key_name,
'priority': priority,
'formatters': formatters,
}
if not hasattr(cls, 'report_meta'):
cls.report_meta = []
cls.report_meta.append(report_entry)
return cls
return tag_with_report_meta_data | 3830135de40bdc2a25bd3c6b6cecc194c6dbebac | 1,932 |
import scipy
def calc_momentum_def(x_loc, X, Y, U):
""" calc_momentum_def() : Calculates the integral momentum deficit of scalar field U stored at \
locations X,Y on a vertical line that runs nearest to x_loc. """
U_line, x_line, x_idx_line = get_line_quantity(x_loc, X, Y, U)
y_line = Y[:,x_idx_line]
return scipy.integrate.trapz(U_line*(1-U_line), y_line) | 7173450ebd779c07a80cef2deb37954ddb7509be | 1,933 |
def display_unit_title(unit, app_context):
"""Prepare an internationalized display for the unit title."""
course_properties = app_context.get_environ()
template = get_unit_title_template(app_context)
return template % {'index': unit.index, 'title': unit.title} | 9d8ffbf0672388bd890aaabb8e5fbdb5e193d3d2 | 1,934 |
def load_user(user_id):
"""Load the user object from the user ID stored in the session"""
return User.objects(pk=user_id).first() | 96df8d5e21f380369ae0c6ccc404a4f7880bf000 | 1,935 |
def get_complex_replay_list():
"""
For full replays that have crashed or failed to be converted
:return:
"""
return [
'https://cdn.discordapp.com/attachments/493849514680254468/496153554977816576/BOTS_JOINING_AND_LEAVING.replay',
'https://cdn.discordapp.com/attachments/493849514680254468/496153569981104129/BOTS_NO_POSITION.replay',
'https://cdn.discordapp.com/attachments/493849514680254468/496153605074845734/ZEROED_STATS.replay',
'https://cdn.discordapp.com/attachments/493849514680254468/496180938968137749/FAKE_BOTS_SkyBot.replay',
'https://cdn.discordapp.com/attachments/493849514680254468/497149910999891969/NEGATIVE_WASTED_COLLECTION.replay',
'https://cdn.discordapp.com/attachments/493849514680254468/497191273619259393/WASTED_BOOST_WHILE_SUPER_SONIC.replay',
'https://cdn.discordapp.com/attachments/493849514680254468/501630263881760798/OCE_RLCS_7_CARS.replay',
'https://cdn.discordapp.com/attachments/493849514680254468/561300088400379905/crossplatform_party.replay'
] | ef5a75a848289ad9c129c2b73a6d6845dcd07cfe | 1,936 |
import json
def parse_registry():
""" Parses the provided registry.dat file and returns a dictionary of chunk
file names and hashes. (The registry file is just a json dictionary containing
a list of file names and hashes.) """
registry = request.values.get("registry", None)
if registry is None:
return None
try:
ret = json.loads(registry)
except ValueError:
return abort(400)
if not isinstance(ret, dict):
return abort(400)
return ret | 71d4cd0f2b9fb33b92861feb9ea882fc32ec7234 | 1,937 |
import math
def get_cosine_with_hard_restarts_schedule_with_warmup(optim: Optimizer,
num_warmup_step: float,
num_training_step: int,
num_cycles: float = 1.,
last_epoch: int = -1):
"""
get a scheduler with a linear warmup between ``[0, num_warmup_step)`` and then decreases it following a cosine
function with several hard restarts.
"""
def lr_lambda(current_step):
if current_step < num_warmup_step:
return float(current_step) / float(max(1.0, num_warmup_step))
progress = float(current_step - num_warmup_step) / float(max(1, num_training_step - num_warmup_step))
if progress >= 1.0:
return 0.
return max(0., .5 * (1. + math.cos(math.pi * ((float(num_cycles) * progress) % 1.))))
return LambdaLR(optim, lr_lambda, last_epoch) | 5327cb688885c8ecc271156364a06bffedd97775 | 1,938 |
import typing
def home():
"""
Render Homepage
--------------------------------------------------------------
This site should be cached, because it is the main entry point for many users.
"""
bestseller: typing.List[Device] = get_bestsellers()
specialist_manufacturers = Manufacturer.query.filter(
(Manufacturer.name == "Samsung") | (Manufacturer.name == "Huawei")
).all()
return render_template("shop/home.html", bestseller=bestseller, specialist_manufacturers=specialist_manufacturers) | ca452264e8a10af83e0cc7b5df592a9f618085ad | 1,939 |
def reject_call():
"""Ends the call when a user does not want to talk to the caller"""
resp = twilio.twiml.Response()
resp.say("I'm sorry, Mr. Baker doesn't want to talk to you. Goodbye scum.", voice='woman', language='en-GB')
resp.hangup()
return str(resp) | 743e58b230a3a63df4c3e882139755b8d2c4bc55 | 1,940 |
def table_prep(data, columns=''):
"""
Data processor for table() function.
You can call it separately as well and in
return get a non-prettyfied summary table.
Unless columns are defined, the three first
columns are chosen by default.
SYNTAX EXAMPLE:
df['quality_score','influence_score','reach_score']
"""
if data.shape[1] != 3:
if len(columns) != 3:
if data.shape[1] > 3:
print("showing first three columns because no columns were \
specific / data had more than 3 columns")
data = pd.DataFrame(data[data.columns[0:3]])
if data.shape[1] < 3:
print("You need at least 3 columns of data for this table")
quit()
if len(columns) == 3:
data = data[columns]
desc = pd.DataFrame({'sum': data.sum().astype('int'),
'median': data.median(),
'mean': data.mean(),
'std': data.std()})
desc = desc.round(decimals=2)
return desc | a9d3d75d2ac32ddf5ae4d5a17a10974b61c139ee | 1,941 |
def lerp(a,b,t):
""" Linear interpolation between from @a to @b as @t goes between 0 an 1. """
return (1-t)*a + t*b | 12cb8690ba5e5f2a4c08c1cd29d3497513b63438 | 1,942 |
def convert_to_legacy_v3(
game_tick_packet: game_data_struct.GameTickPacket,
field_info_packet: game_data_struct.FieldInfoPacket = None):
"""
Returns a legacy packet from v3
:param game_tick_packet a game tick packet in the v4 struct format.
:param field_info_packet a field info packet in the v4 struct format. Optional. If this is not supplied,
none of the boost locations will be filled in.
"""
legacy_packet = GameTickPacket()
legacy_packet.numBoosts = game_tick_packet.num_boost
legacy_packet.numCars = game_tick_packet.num_cars
for i in range(game_tick_packet.num_cars):
convert_player_info(legacy_packet.gamecars[i], game_tick_packet.game_cars[i])
for i in range(game_tick_packet.num_boost):
convert_boost_info(legacy_packet.gameBoosts[i], game_tick_packet.game_boosts[i])
if field_info_packet is not None:
convert_vector(legacy_packet.gameBoosts[i].Location, field_info_packet.boost_pads[i].location)
convert_ball_info(legacy_packet.gameball, game_tick_packet.game_ball)
convert_game_info(legacy_packet.gameInfo, game_tick_packet.game_info)
return legacy_packet | 3e00e165233806957a010871c9218b1c02950063 | 1,943 |
import logging
def _load_audio(audio_path, sample_rate):
"""Load audio file."""
global counter
global label_names
global start
global end
logging.info("Loading '%s'.", audio_path)
try:
lbl1=Alphabet[audio_path[-6]]
lbl2 = Alphabet[audio_path[-5]]
except:
lbl1=1 + counter
lbl2=2 + counter
label_names=np.array([[lbl1,lbl2]]).astype(np.float32)
counter = counter + 1
print('label names')
print(audio_path)
#print(audio_path[-6]+audio_path[-5])
print(label_names)
beam.metrics.Metrics.counter('prepare-tfrecord', 'load-audio').inc()
with tf.io.gfile.GFile(audio_path, 'rb') as f:
audio_segment = (
pydub.AudioSegment.from_file(f)
.set_channels(1).set_frame_rate(sample_rate))
audio = np.array(audio_segment.get_array_of_samples()).astype(np.float32)
audio=audio[start:end]
audio /= 2 ** (8 * audio_segment.sample_width)
with tf.io.gfile.GFile(str(audio_path.replace("audio","audio_2")), 'rb') as sd:
audio_segment_2 = (
pydub.AudioSegment.from_file(sd)
.set_channels(1).set_frame_rate(sample_rate))
audio_2 = np.array(audio_segment_2.get_array_of_samples()).astype(np.float32)
audio_2=audio_2[start:end]
# Convert from int to float representation.
audio_2 /= 2**(8 * audio_segment_2.sample_width)
print('I am alive!')
start = start + 64000
end = end + 64000
#print(audio)
return {'audio': audio,'audio_2': audio_2} | 5e8112c79164c800965f137c83ceb720aab17bdf | 1,944 |
def generate_annotation_dict(annotation_file):
""" Creates a dictionary where the key is a file name
and the value is a list containing the
- start time
- end time
- bird class.
for each annotation in that file.
"""
annotation_dict = dict()
for line in open(annotation_file):
file_name, start_time, end_time, bird_class = line.strip().split('\t')
if file_name not in annotation_dict:
annotation_dict[file_name] = list()
annotation_dict[file_name].append([start_time, end_time, bird_class])
return annotation_dict | f40f210075e65f3dbe68bb8a594deb060a23ad8b | 1,945 |
def ishom(T, check=False, tol=100):
"""
Test if matrix belongs to SE(3)
:param T: SE(3) matrix to test
:type T: numpy(4,4)
:param check: check validity of rotation submatrix
:type check: bool
:return: whether matrix is an SE(3) homogeneous transformation matrix
:rtype: bool
- ``ishom(T)`` is True if the argument ``T`` is of dimension 4x4
- ``ishom(T, check=True)`` as above, but also checks orthogonality of the
rotation sub-matrix and validitity of the bottom row.
.. runblock:: pycon
>>> from spatialmath.base import *
>>> import numpy as np
>>> T = np.array([[1, 0, 0, 3], [0, 1, 0, 4], [0, 0, 1, 5], [0, 0, 0, 1]])
>>> ishom(T)
>>> T = np.array([[1, 1, 0, 3], [0, 1, 0, 4], [0, 0, 1, 5], [0, 0, 0, 1]]) # invalid SE(3)
>>> ishom(T) # a quick check says it is an SE(3)
>>> ishom(T, check=True) # but if we check more carefully...
>>> R = np.array([[1, 1, 0], [0, 1, 0], [0, 0, 1]])
>>> ishom(R)
:seealso: :func:`~spatialmath.base.transformsNd.isR`, :func:`~isrot`, :func:`~spatialmath.base.transforms2d.ishom2`
"""
return (
isinstance(T, np.ndarray)
and T.shape == (4, 4)
and (
not check
or (
base.isR(T[:3, :3], tol=tol)
and np.all(T[3, :] == np.array([0, 0, 0, 1]))
)
)
) | b4a0467d22940889e3071bf07d4a093d567409f3 | 1,946 |
def _get_stp_data(step_order=STEP_ORDER, n=N_PER_STEP):
"""Returns np.array of step-type enums data for sample data.
Parameters
----------
step_order : list of (int, char)
List of (Cycle number, step type code) for steps in sample procedure.
n : int
Number of datapoints per step.
Returns
-------
stp_data : np.array(int)
"""
return np.hstack([_get_step_stp_idx_data(step_code, n=n) for _, step_code
in step_order]) | d96a2604ac67e1a84ead39e0d2d39a5c6183a5cd | 1,947 |
from typing import Union
from typing import List
def fuse_stride_arrays(dims: Union[List[int], np.ndarray],
strides: Union[List[int], np.ndarray]) -> np.ndarray:
"""
Compute linear positions of tensor elements
of a tensor with dimensions `dims` according to `strides`.
Args:
dims: An np.ndarray of (original) tensor dimensions.
strides: An np.ndarray of (possibly permuted) strides.
Returns:
np.ndarray: Linear positions of tensor elements according to `strides`.
"""
return fuse_ndarrays([
np.arange(0, strides[n] * dims[n], strides[n], dtype=SIZE_T)
for n in range(len(dims))
]) | 06185cb0bcfccd30e7b006fa8fe4e28a6f5ae7f3 | 1,949 |
def extract_jasmine_summary(line):
"""
Example SUCCESS karma summary line:
PhantomJS 2.1.1 (Linux 0.0.0): Executed 1 of 1 SUCCESS (0.205 secs / 0.001 secs)
Exmaple FAIL karma summary line:
PhantomJS 2.1.1 (Linux 0.0.0): Executed 1 of 1 (1 FAILED) ERROR (0.21 secs / 0.001 secs)
"""
# get totals
totals = line.split(' Executed ')[1].split(' ')
executed_tests, total_tests = int(totals[0]), int(totals[2])
# get failed
if 'SUCCESS' in line:
failed_tests = 0
else:
failed_tests = int(totals[3][1:])
return {
'total_tests': total_tests,
'executed_tests': executed_tests,
'failed_tests': failed_tests,
'passed_tests': executed_tests - failed_tests
} | f795ff015555cc3a2bd2d27527ae505a6dde9231 | 1,950 |
Subsets and Splits