content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def tau_for_x(x, beta):
"""Rescales tau axis to x -1 ... 1"""
if x.min() < -1 or x.max() > 1:
raise ValueError("domain of x")
return .5 * beta * (x + 1) | 1d7b868dfadb65e6f98654276763fd4bff2c20ff | 3,653,700 |
from typing import Optional
from typing import Dict
def _generate_element(name: str,
text: Optional[str] = None,
attributes: Optional[Dict] = None) -> etree.Element:
"""
generate an ElementTree.Element object
:param name: namespace+tag_name of the element
:param text: Text of the element. Default is None
:param attributes: Attributes of the elements in form of a dict {"attribute_name": "attribute_content"}
:return: ElementTree.Element object
"""
et_element = etree.Element(name)
if text:
et_element.text = text
if attributes:
for key, value in attributes.items():
et_element.set(key, value)
return et_element | d7d8f7d174f207d64993aca54803af6600c3ddb6 | 3,653,701 |
def CoA_Cropland_URL_helper(*, build_url, config, **_):
"""
This helper function uses the "build_url" input from flowbyactivity.py,
which is a base url for data imports that requires parts of the url text
string to be replaced with info specific to the data year. This function
does not parse the data, only modifies the urls from which data is
obtained.
:param build_url: string, base url
:param config: dictionary, items in FBA method yaml
:return: list, urls to call, concat, parse, format into Flow-By-Activity
format
"""
# initiate url list for coa cropland data
urls = []
# call on state acronyms from common.py (and remove entry for DC)
state_abbrevs = abbrev_us_state
state_abbrevs = {k: v for (k, v) in state_abbrevs.items() if k != "DC"}
# replace "__aggLevel__" in build_url to create three urls
for x in config['agg_levels']:
for y in config['sector_levels']:
# at national level, remove the text string calling for
# state acronyms
if x == 'NATIONAL':
url = build_url
url = url.replace("__aggLevel__", x)
url = url.replace("__secLevel__", y)
url = url.replace("&state_alpha=__stateAlpha__", "")
if y == "ECONOMICS":
url = url.replace(
"AREA%20HARVESTED&statisticcat_desc=AREA%20IN%20"
"PRODUCTION&statisticcat_desc=TOTAL&statisticcat_desc="
"AREA%20BEARING%20%26%20NON-BEARING",
"AREA&statisticcat_desc=AREA%20OPERATED")
else:
url = url.replace("&commodity_desc=AG%20LAND&"
"commodity_desc=FARM%20OPERATIONS", "")
urls.append(url)
else:
# substitute in state acronyms for state and county url calls
for z in state_abbrevs:
url = build_url
url = url.replace("__aggLevel__", x)
url = url.replace("__secLevel__", y)
url = url.replace("__stateAlpha__", z)
if y == "ECONOMICS":
url = url.replace(
"AREA%20HARVESTED&statisticcat_desc=AREA%20IN%20"
"PRODUCTION&statisticcat_desc=TOTAL&"
"statisticcat_desc=AREA%20BEARING%20%26%20NON-BEARING",
"AREA&statisticcat_desc=AREA%20OPERATED")
else:
url = url.replace("&commodity_desc=AG%20LAND&commodity_"
"desc=FARM%20OPERATIONS", "")
urls.append(url)
return urls | 5cd08b8c4198428e45267f33d35d98b63df4fd17 | 3,653,702 |
def _centered_bias(logits_dimension, head_name=None):
"""Returns `logits`, optionally with centered bias applied.
Args:
logits_dimension: Last dimension of `logits`. Must be >= 1.
head_name: Optional name of the head.
Returns:
Centered bias `Variable`.
Raises:
ValueError: if `logits_dimension` is invalid.
"""
if (logits_dimension is None) or (logits_dimension < 1):
raise ValueError("Invalid logits_dimension %s." % logits_dimension)
centered_bias = variable_scope.get_variable(
name="centered_bias_weight",
shape=(logits_dimension,),
initializer=init_ops.zeros_initializer(),
trainable=True)
for dim in range(logits_dimension):
if head_name:
summary.scalar("centered_bias/bias_%d/%s" % (dim, head_name),
centered_bias[dim])
else:
summary.scalar("centered_bias/bias_%d" % dim, centered_bias[dim])
return centered_bias | 868fc2681ee1177932b77bdfe9ce9eefc3c5fde1 | 3,653,703 |
from typing import Union
from typing import List
def get_columns(dataframe: pd.DataFrame,
columns: Union[str, List[str]]) -> Union[pd.Series, pd.DataFrame]:
"""Get the column names, and can rename according to list"""
return dataframe[list(columns)].copy(True) | e624233a3aca3f71f203bf7acca700722819b237 | 3,653,704 |
import pandas
import math
def get_vote_activity(session):
"""Create a plot showing the inline usage statistics."""
creation_date = func.date_trunc("day", Vote.created_at).label("creation_date")
votes = (
session.query(creation_date, func.count(Vote.id).label("count"))
.group_by(creation_date)
.order_by(creation_date)
.all()
)
total_votes = [("Total votes", q[0], q[1]) for q in votes]
# Grid style
plt.style.use("seaborn-whitegrid")
# Combine the results in a single dataframe and name the columns
dataframe = pandas.DataFrame(total_votes, columns=["type", "date", "votes"])
months = mdates.MonthLocator() # every month
months_fmt = mdates.DateFormatter("%Y-%m")
max_value = max([vote[2] for vote in total_votes])
magnitude = get_magnitude(max_value)
# Plot each result set
fig, ax = plt.subplots(figsize=(30, 15), dpi=120)
for key, group in dataframe.groupby(["type"]):
ax = group.plot(ax=ax, kind="bar", x="date", y="votes", label=key)
ax.xaxis.set_major_locator(months)
ax.xaxis.set_major_formatter(months_fmt)
ax.yaxis.set_ticks(np.arange(0, max_value, math.pow(10, magnitude - 1)))
image = image_from_figure(fig)
image.name = "vote_statistics.png"
return image | 9b59ad083147d7e21d8d32e730b235a23b187c0f | 3,653,705 |
def viz_graph(obj):
"""
Generate the visulization of the graph in the JupyterLab
Arguments
-------
obj: list
a list of Python object that defines the nodes
Returns
-----
nx.DiGraph
"""
G = nx.DiGraph()
# instantiate objects
for o in obj:
for i in o['inputs']:
G.add_edge(i, o['id'])
return G | a826438b3e207f88a7bddddcd4fc02a4ad9c753d | 3,653,706 |
def zrand_convolve(labelgrid, neighbors='edges'):
"""
Calculates the avg and std z-Rand index using kernel over `labelgrid`
Kernel is determined by `neighbors`, which can include all entries with
touching edges (i.e., 4 neighbors) or corners (i.e., 8 neighbors).
Parameters
----------
grid : (S, K, N) array_like
Array containing cluster labels for each `N` samples, where `S` is mu
and `K` is K.
neighbors : str, optional
How many neighbors to consider when calculating Z-rand kernel. Must be
in ['edges', 'corners']. Default: 'edges'
Returns
-------
zrand_avg : (S, K) np.ndarray
Array containing average of the z-Rand index calculated using provided
neighbor kernel
zrand_std : (S, K) np.ndarray
Array containing standard deviation of the z-Rand index
"""
inds = cartesian([range(labelgrid.shape[0]), range(labelgrid.shape[1])])
zrand = np.empty(shape=labelgrid.shape[:-1] + (2,))
for x, y in inds:
ninds = get_neighbors(x, y, neighbors=neighbors, shape=labelgrid.shape)
zrand[x, y] = zrand_partitions(labelgrid[ninds].T)
return zrand[..., 0], zrand[..., 1] | 4b3950239886cb7e41fb2a7105c2413234dcdb30 | 3,653,707 |
def msg_receiver():
"""
消息已收界面
:return:
"""
return render_template('sysadmin/sysmsg/sys_msg_received.html', **locals()) | 0902f9eb4ad75802d7f858f4474c5e587082403f | 3,653,708 |
from datetime import datetime
def abs_timedelta(delta):
"""Returns an "absolute" value for a timedelta, always representing a
time distance."""
if delta.days < 0:
now = datetime.datetime.now()
return now - (now + delta)
return delta | 81018ea9c54585a8c24e52cc48c21fcb2d73e9b3 | 3,653,709 |
def make_new_get_user_response(row):
""" Returns an object containing only what needs to be sent back to the user. """
return {
'userName': row['userName'],
'categories': row['categories'],
'imageName': row['imageName'],
'refToImage': row['refToImage'],
'imgDictByTag': row['imgDictByTag'],
'canView': row['canView']
} | e13d8d297bd1401752ce07d93a68e765ed1113e8 | 3,653,711 |
def is_feature_enabled():
"""
Helper to check Site Configuration for ENABLE_COURSE_ACCESS_GROUPS.
:return: bool
"""
is_enabled = bool(configuration_helpers.get_value('ENABLE_COURSE_ACCESS_GROUPS', default=False))
if is_enabled:
# Keep the line below in sync with `util.organizations_helpers.organizations_enabled`
if not settings.FEATURES.get('ORGANIZATIONS_APP', False):
raise ConfigurationError(
'The Course Access Groups feature is enabled but the Oragnizations App is not. '
'Please enable the feature flag `ORGANIZATIONS_APP` to fix this exception.'
)
return is_enabled | 57f0b94409d9332f8846d64a6a30518b6dcc8173 | 3,653,713 |
def solve_disp_eq(betbn, betbt, bet, Znak, c, It, Ia, nb, var):
"""
Решение дисперсионного уравнения.
Znak = -1 при преломлении
Znak = 1 при отражении
"""
betb = sqrt(betbn ** 2. + betbt ** 2.)
gamb = 1. / sqrt(1. - betb ** 2.)
d = c * It / Ia
Ab = 1. + (nb ** 2. - 1.) * gamb ** 2. * (1. - betbn ** 2.)
Bb = d ** 2. * (1. - bet ** 2. - (nb ** 2. - 1.) * (gamb * (bet - betbn)) ** 2.)
Cb = (nb ** 2. - 1.) * gamb ** 2. * d * betbt * (2. - 2. * bet * betbn - d * betbt * (1. - bet ** 2.))
Qb = Ab - Bb - Cb
CHb = bet + (nb ** 2. - 1.) * gamb ** 2. * (bet - betbn) * (1. - betbt * d)
ZNb = 1. - bet ** 2. - (nb ** 2. - 1.) * (gamb * (bet - betbn)) ** 2.
kbna = Ia * (CHb + Znak * sqrt(Qb)) / (c * ZNb) # норм.проекция волн.вектора
kbt = It # Тангенц.проекция волн.вектора
iQb = arctan(abs(kbt / kbna))
wi = kbna * bet * c + Ia # Частота прел. волны
ci = wi * cos(iQb) / abs(kbna) # Скорость света в среде
if var < 0:
iQb = -iQb
# k = kbna / cos(arctan(abs(kbt / kbna)))
# # ui=betb*c
# uit = betbt * c
# uin = betbn * c
# V = bet * c
# Ai = -1 / pow(c, 2.) - (pow(nb, 2.) - 1.) * pow(1. - uin / V, 2.) / pow(c, 2.) / (1. - pow(betb, 2.))
# Bi = -2 * (pow(nb, 2.) - 1.) * (-kbt * uit + Ia * uin / V) * (1. - uin / V) / (pow(c, 2.) * (1. - pow(betb, 2.)))
# Ci = pow(k, 2.) - (pow(nb, 2.) - 1.) * pow(-kbt * uit + Ia * uin / V, 2.) / pow(c, 2.) / (1. - pow(betb, 2.))
# '''print "Ai = %s"%Ai
# print "Bi = %s"%Bi
# print "Ci = %s"%Ci'''
# # wi=(-Bi-sqrt(pow(Bi,2)-4*Ai*Ci))/(2*Ai)
# dispeq = Ai * wi * wi + Bi * wi + Ci
# '''print "dispeq = %s"%(dispeq,)
# print "wi= %s"%wi'''
return (kbna, kbt, iQb, wi, ci) | b67b41cdccf37a14fda103b6f05263c7cbb4514e | 3,653,714 |
import numpy
def phistogram(view, a, bins=10, rng=None, normed=False):
"""Compute the histogram of a remote array a.
Parameters
----------
view
IPython DirectView instance
a : str
String name of the remote array
bins : int
Number of histogram bins
rng : (float, float)
Tuple of min, max of the range to histogram
normed : boolean
Should the histogram counts be normalized to 1
"""
nengines = len(view.targets)
# view.push(dict(bins=bins, rng=rng))
with view.sync_imports():
rets = view.apply_sync(lambda a, b, rng: numpy.histogram(a,b,rng), Reference(a), bins, rng)
hists = [ r[0] for r in rets ]
lower_edges = [ r[1] for r in rets ]
# view.execute('hist, lower_edges = numpy.histogram(%s, bins, rng)' % a)
lower_edges = view.pull('lower_edges', targets=0)
hist_array = numpy.array(hists).reshape(nengines, -1)
# hist_array.shape = (nengines,-1)
total_hist = numpy.sum(hist_array, 0)
if normed:
total_hist = total_hist/numpy.sum(total_hist,dtype=float)
return total_hist, lower_edges | 3c4633891b495a5cad867c945a8f8cc1c6b3c14f | 3,653,715 |
from typing import Iterable
from typing import Iterator
def windowed(it: Iterable[_T], size: int) -> Iterator[tuple[_T, ...]]:
"""Retrieve overlapped windows from iterable.
>>> [*windowed(range(5), 3)]
[(0, 1, 2), (1, 2, 3), (2, 3, 4)]
"""
return zip(*(islice(it_, start, None)
for start, it_ in enumerate(tee(it, size)))) | 6e3b29b67f9eb323d00065fa58ccd916c7c49640 | 3,653,716 |
def minmax(data):
"""Solution to exercise R-1.3.
Takes a sequence of one or more numbers, and returns the smallest and
largest numbers, in the form of a tuple of length two. Do not use the
built-in functions min or max in implementing the solution.
"""
min_idx = 0
max_idx = 0
for idx, num in enumerate(data):
if num > data[max_idx]:
max_idx = idx
if num < data[min_idx]:
min_idx = idx
return (data[min_idx], data[max_idx]) | 9715bef69c120f6d1afb933bd9030240f556eb20 | 3,653,717 |
def sample_discreate(prob, n_samples):
"""根据类先验分布对标签值进行采样
M = sample_discreate(prob, n_samples)
Input:
prob: 类先验分布 shape=(n_classes,)
n_samples: 需要采样的数量 shape = (n_samples,)
Output:
M: 采样得到的样本类别 shape = (n_samples,)
例子:
sample_discreate([0.8,0.2],n_samples)
从类别[0,1]中采样产生n_samples个样本
其中采样得到0的概率为0.8,得到1的概率为0.2.
"""
np.random.seed(1) # 使每一次生成的随机数一样
n = prob.size # 类别的数量
R = np.random.rand(n_samples) # 生成服从均匀分布的随机数
M = np.zeros(n_samples) # 初始化最终结果
cumprob = np.cumsum(prob) # 累积概率分布
if n < n_samples: # 如果采样的样本数量大于类别数量
for i in range(n-1):
M = M + np.array(R > cumprob[i])
else: # 如果采样的样本数量小于类别数量
cumprob2 = cumprob[:-1]
for i in range(n_samples):
M[i] = np.sum(R[i] > cumprob2)
return M | 34c19c2dcbad652bdae8f2c829f42934c2176e84 | 3,653,718 |
from xml.dom.minidom import parseString # tools for handling XML in python
def get_catalyst_pmids(first, middle, last, email, affiliation=None):
"""
Given an author's identifiers and affiliation information, optional lists of pmids, call the catalyst service
to retrieve PMIDS for the author and return a list of PMIDS
:param first: author first name
:param middle: author middle name
:param last: author last name
:param email: author email(s) as a list
:param affiliation: author affiliation as a list
:return: list of pmids identified by the catalyst service that have a high probability of being written by the
author
"""
result = get_catalyst_pmids_xml(first, middle, last, email, affiliation)
dom = parseString(result) # create a document Object Model (DOM) from the Harvard Catalyst result
return [node.childNodes[0].data for node in dom.getElementsByTagName('PMID')] | d0cb5560ec8e6f80627b40c4623683732c84dc7c | 3,653,719 |
from typing import List
from typing import Dict
def upload_categories_to_fyle(workspace_id):
"""
Upload categories to Fyle
"""
try:
fyle_credentials: FyleCredential = FyleCredential.objects.get(workspace_id=workspace_id)
xero_credentials: XeroCredentials = XeroCredentials.objects.get(workspace_id=workspace_id)
fyle_connection = FyleConnector(
refresh_token=fyle_credentials.refresh_token
)
platform = PlatformConnector(fyle_credentials)
xero_connection = XeroConnector(
credentials_object=xero_credentials,
workspace_id=workspace_id
)
platform.categories.sync()
xero_connection.sync_accounts()
xero_attributes = DestinationAttribute.objects.filter(attribute_type='ACCOUNT', workspace_id=workspace_id)
xero_attributes = remove_duplicates(xero_attributes)
fyle_payload: List[Dict] = create_fyle_categories_payload(xero_attributes, workspace_id)
if fyle_payload:
fyle_connection.connection.Categories.post(fyle_payload)
platform.categories.sync()
return xero_attributes
except XeroCredentials.DoesNotExist:
logger.error(
'Xero Credentials not found for workspace_id %s',
workspace_id,
) | 0efcdc205a3aaa33acd88a231984ab9407d994ac | 3,653,721 |
def georegister_px_df(df, im_fname=None, affine_obj=None, crs=None,
geom_col='geometry', precision=None):
"""Convert a dataframe of geometries in pixel coordinates to a geo CRS.
Arguments
---------
df : :class:`pandas.DataFrame`
A :class:`pandas.DataFrame` with polygons in a column named
``"geometry"``.
im_fname : str, optional
A filename or :class:`rasterio.DatasetReader` object containing an
image that has the same bounds as the pixel coordinates in `df`. If
not provided, `affine_obj` and `crs` must both be provided.
affine_obj : `list` or :class:`affine.Affine`, optional
An affine transformation to apply to `geom` in the form of an
``[a, b, d, e, xoff, yoff]`` list or an :class:`affine.Affine` object.
Required if not using `raster_src`.
crs : dict, optional
The coordinate reference system for the output GeoDataFrame. Required
if not providing a raster image to extract the information from. Format
should be ``{'init': 'epsgxxxx'}``, replacing xxxx with the EPSG code.
geom_col : str, optional
The column containing geometry in `df`. If not provided, defaults to
``"geometry"``.
precision : int, optional
The decimal precision for output geometries. If not provided, the
vertex locations won't be rounded.
"""
if im_fname is not None:
affine_obj = rasterio.open(im_fname).transform
crs = rasterio.open(im_fname).crs
else:
if not affine_obj or not crs:
raise ValueError(
'If an image path is not provided, ' +
'affine_obj and crs must be.')
tmp_df = affine_transform_gdf(df, affine_obj, geom_col=geom_col,
precision=precision)
return gpd.GeoDataFrame(tmp_df, crs=crs) | e310fee04d214186f60965e68fb2b896b8ad0004 | 3,653,722 |
def load_ui_type(ui_file):
"""
Pyside "load_ui_type" command like PyQt4 has one, so we have to convert the
ui file to py code in-memory first and then execute it in a special frame
to retrieve the form_class.
"""
parsed = xml.parse(ui_file)
widget_class = parsed.find('widget').get('class')
form_class = parsed.find('class').text
with open(ui_file, 'r') as f:
o = StringIO()
frame = {}
uic.compileUi(f, o, indent=0)
pyc = compile(o.getvalue(), '<string>', 'exec')
exec pyc in frame
# Fetch the base_class and form class based on their type
# in the xml from designer
form_class = frame['Ui_%s' % form_class]
base_class = eval('QtWidgets.%s' % widget_class)
return base_class, form_class | 1f9bfc05d52fd8f25d63104c93f675cc8e978501 | 3,653,723 |
def how_did_I_do(MLP, df, samples, expected):
"""Simple report of expected inputs versus actual outputs."""
predictions = MLP.predict(df[samples].to_list())
_df = pd.DataFrame({"Expected": df[expected], "Predicted": predictions})
_df["Correct"] = _df["Expected"] == _df["Predicted"]
print(f'The network got {sum(_df["Correct"])} out of {len(_df)} correct.')
return _df | 5fbebeac01dad933c20b3faf3f8682ae59d173ba | 3,653,724 |
def all_bootstrap_os():
"""Return a list of all the OS that can be used to bootstrap Spack"""
return list(data()['images']) | b7a58aabe17ee28ed783a9d43d1d8db5d0b85db3 | 3,653,725 |
def coords_to_volume(coords: np.ndarray, v_size: int,
noise_treatment: bool = False) -> np.ndarray:
"""Converts coordinates to binary voxels.""" # Input is centered on [0,0,0].
return weights_to_volume(coords=coords, weights=1, v_size=v_size, noise_treatment=noise_treatment) | 62e2ba5549faff51e4da68f6bc9521ff2f9ce9cb | 3,653,726 |
def logo(symbol, external=False, vprint=False):
""":return: Google APIs link to the logo for the requested ticker.
:param symbol: The ticker or symbol of the stock you would like to request.
:type symbol: string, required
"""
instance = iexCommon('stock', symbol, 'logo', external=external)
return instance.execute() | 320755632f81686ceb35a75b44c5176893ea37e2 | 3,653,727 |
def get_dependency_node(element):
""" Returns a Maya MFnDependencyNode from the given element
:param element: Maya node to return a dependency node class object
:type element: string
"""
# adds the elements into an maya selection list
m_selectin_list = OpenMaya.MSelectionList()
m_selectin_list.add(element)
# creates an MObject
m_object = OpenMaya.MObject()
# gets the MObject from the list
m_selectin_list.getDependNode(0, m_object)
return OpenMaya.MFnDependencyNode(m_object) | d573b14cf7ba54fd07f135d37c90cfe75e74992a | 3,653,728 |
def create_lineal_data(slope=1, bias=0, spread=0.25, data_size=50):
"""
Helper function to create lineal data.
:param slope: slope of the lineal function.
:param bias: bias of the lineal function.
:param spread: spread of the normal distribution.
:param data_size: number of samples to generate.
:return x, y: data and labels
"""
x = np.linspace(0, 1, data_size)
y = x * slope + bias + np.random.normal(scale=spread, size=x.shape)
return x, y | fa735416a1f23a5aa29f66e353d187a5a896df7a | 3,653,729 |
def parse_station_resp(fid):
"""
Gather information from a single station IRIS response file
*fid*. Return the information as a :class:`RespMap`.
"""
resp_map = RespMap()
# sanity check initialization
network = None
stn = None
location = None
# skip initial header comment block
skip_block_header_comments(fid)
while True:
block_header, block, eof = parse_block(fid)
# sanity check (same network, station, and location across recorded blocks)
network = check(block_header, 'Network', network)
stn = check(block_header, 'Station', stn)
location = check(block_header, 'Location', location)
# store block information
interval = DateTimeInterval.closed_open(block_header['Start_date'],
block_header['End_date'])
resp_map.setdefault(interval, {})[block_header['Channel']] = block
if eof:
break
resp_map.network = network
resp_map.stn = stn
resp_map.location = location
return resp_map | 9d61b2c033008fc594b230aad83378a442cb748b | 3,653,730 |
def plot_pol(image, figsize=(8,8), print_stats=True, scaled=True, evpa_ticks=True):
"""Mimics the plot_pol.py script in ipole/scripts"""
fig, ax = plt.subplots(2, 2, figsize=figsize)
# Total intensity
plot_I(ax[0,0], image, xlabel=False)
# Quiver on intensity
if evpa_ticks:
plot_evpa_ticks(ax[0,0], image, n_evpa=30)
# Linear polarization fraction
plot_lpfrac(ax[0,1], image, xlabel=False, ylabel=False)
# evpa
plot_evpa_rainbow(ax[1,0], image)
# circular polarization fraction
plot_cpfrac(ax[1,1], image, ylabel=False)
if print_stats:
# print image-average quantities to command line
print("Flux [Jy]: {0:g} ({1:g} unpol)".format(image.flux(), image.flux_unpol()))
print("I,Q,U,V [Jy]: {0:g} {1:g} {2:g} {3:g}".format(image.Itot(), image.Qtot(),
image.Utot(), image.Vtot()))
print("LP [%]: {0:g}".format(100.*image.lpfrac_int()))
print("CP [%]: {0:g}".format(100.*image.cpfrac_int()))
print("EVPA [deg]: {0:g}".format(image.evpa_int()))
return fig | dc3741703435bb95b7ea511460d9feda39ea11f3 | 3,653,731 |
def _ensure_dtype_type(value, dtype: DtypeObj):
"""
Ensure that the given value is an instance of the given dtype.
e.g. if out dtype is np.complex64_, we should have an instance of that
as opposed to a python complex object.
Parameters
----------
value : object
dtype : np.dtype or ExtensionDtype
Returns
-------
object
"""
# Start with exceptions in which we do _not_ cast to numpy types
if is_extension_array_dtype(dtype):
return value
elif dtype == np.object_:
return value
elif isna(value):
# e.g. keep np.nan rather than try to cast to np.float32(np.nan)
return value
return dtype.type(value) | 36de4b993e2da0bacf3228d46e13332f89346210 | 3,653,733 |
from typing import List
def format_batch_request_last_fm(listens: List[Listen]) -> Request:
"""
Format a POST request to scrobble the given listens to Last.fm.
"""
assert len(listens) <= 50, 'Last.fm allows at most 50 scrobbles per batch.'
params = {
'method': 'track.scrobble',
'sk': LAST_FM_SESSION_KEY,
}
for i, listen in enumerate(listens):
params.update(listen.format_lastfm_scrobble(i))
return format_signed_request(http_method='POST', data=params) | 8f7b36b6880ecd91e19282b80975cccc999014b6 | 3,653,734 |
def get_entry_for_file_attachment(item_id, attachment):
"""
Creates a file entry for an attachment
:param item_id: item_id of the attachment
:param attachment: attachment dict
:return: file entry dict for attachment
"""
entry = fileResult(get_attachment_name(attachment.name), attachment.content)
entry["EntryContext"] = {
CONTEXT_UPDATE_EWS_ITEM_FOR_ATTACHMENT
+ CONTEXT_UPDATE_FILE_ATTACHMENT: parse_attachment_as_dict(item_id, attachment)
}
return entry | c3d10402da0ada14289a7807ef1a57f97c6a22ba | 3,653,737 |
def check_all_particles_present(partlist, gambit_pdg_codes):
"""
Checks all particles exist in the particle_database.yaml.
"""
absent = []
for i in range(len(partlist)):
if not partlist[i].pdg() in list(gambit_pdg_codes.values()):
absent.append(partlist[i])
absent_by_pdg = [x.pdg() for x in absent]
if len(absent) == 0:
print("All particles are in the GAMBIT database.")
else:
print(("\nThe following particles (by PDG code) are missing from the "
"particle database: {0}. GUM is now adding them to "
"../config/particle_database.yaml.\n").format(absent_by_pdg))
return absent | eab49388d472934a61900d8e972c0f2ef01ae1fb | 3,653,738 |
def binarize_tree(t):
"""Convert all n-nary nodes into left-branching subtrees
Returns a new tree. The original tree is intact.
"""
def recurs_binarize_tree(t):
if t.height() <= 2:
return t[0]
if len(t) == 1:
return recurs_binarize_tree(t[0])
elif len(t) == 2:
new_children = []
for i, child in enumerate(t):
new_children.append(recurs_binarize_tree(child))
return Tree(t.node, new_children)
#return Tree(-1, new_children)
else:
#left_child = recurs_binarize_tree(Tree(-1, t[0:-1]))
if t.node[-1] != '_':
new_node_name = t.node + '_'
else:
new_node_name = t.node
left_child = recurs_binarize_tree(Tree(new_node_name, t[0:-1]))
right_child = recurs_binarize_tree(t[-1])
#return Tree(-1, [left_child, right_child])
return Tree(t.node, [left_child, right_child])
return recurs_binarize_tree(t) | 5f9bc8ab7a0c1ab862b7366b188072006a80ff51 | 3,653,739 |
def calculate_prfs_using_rdd(y_actual, y_predicted, average='macro'):
"""
Determines the precision, recall, fscore, and support of the predictions.
With average of macro, the algorithm Calculate metrics for each label, and find their unweighted mean.
See http://scikit-learn.org/stable/modules/generated/sklearn.metrics.precision_recall_fscore_support.html for details
A better metric for recommender systems is precision at N (also in this package)
Args:
y_actual: actual ratings in the format of an RDD of [ (userId, itemId, actualRating) ]
y_predicted: predicted ratings in the format of an RDD of [ (userId, itemId, predictedRating) ]
Returns:
precision, recall, fbeta_score, and support values
"""
prediction_rating_pairs = y_predicted.map(lambda x: ((x[0], x[1]), x[2]))\
.join(y_actual.map(lambda x: ((x[0], x[1]), x[2])))\
.map(lambda ((user, item), (prediction, rating)): (user, item, prediction, rating))
true_vals = np.array(prediction_rating_pairs.map(lambda (user, item, prediction, rating): rating).collect())
pred_vals = np.array(prediction_rating_pairs.map(lambda (user, item, prediction, rating): prediction).collect())
return precision_recall_fscore_support(map(lambda x: int(np.round(x)), true_vals),\
map(lambda x: int(np.round(x)), pred_vals), average = average) | 01fadc6a03f6ce24e736da9d1cfd088b490aa482 | 3,653,740 |
def translation_from_matrix(M):
"""Returns the 3 values of translation from the matrix M.
Parameters
----------
M : list[list[float]]
A 4-by-4 transformation matrix.
Returns
-------
[float, float, float]
The translation vector.
"""
return [M[0][3], M[1][3], M[2][3]] | 2b3bddd08772b2480a923a778d962f8e94f4b78a | 3,653,741 |
def saving_filename_boundary(save_location, close_up, beafort, wave_roughness):
""" Setting the filename of the figure """
if close_up is None:
return save_location + 'Boundary_comparison_Bft={}_roughness={}.png'.format(beafort, wave_roughness)
else:
ymax, ymin = close_up
return save_location + 'Boundary_comparison_Bft={}_max={}_min={}_roughness={}.png'.format(beafort, ymax, ymin,
wave_roughness) | c0357a211adc95c35873a0f3b0c900f6b5fe42d0 | 3,653,742 |
def get_library() -> CDLL:
"""Return the CDLL instance, loading it if necessary."""
global LIB
if LIB is None:
LIB = _load_library("aries_askar")
_init_logger()
return LIB | 64183953e7ab3f4e617b050fbf985d79aebc9b95 | 3,653,743 |
def childs_page_return_right_login(response_page, smarsy_login):
"""
Receive HTML page from login function and check we've got expected source
"""
if smarsy_login in response_page:
return True
else:
raise ValueError('Invalid Smarsy Login') | e7cb9b8d9df8bd5345f308e78cec28a20919370e | 3,653,744 |
def merge_files(intakes, outcomes):
"""
Merges intakes and outcomes datasets to create unique line for each animal in the shelter to capture full stories for each animal
takes intakes file then outcomes file as arguments
returns merged dataset
"""
# Merge intakes and outcomes on animal id and year
animal_shelter_df = pd.merge(intakes,
outcomes,
on=['animal_id', 'year'],
how='left',
suffixes=('_intake', '_outcome'))
# Filters out animals who have yet to have outcomes and keeps animals where outcome data is later than intake date
animal_shelter_df = animal_shelter_df[(~animal_shelter_df['date_o'].isna())
& (animal_shelter_df['date_o'] > animal_shelter_df['date_i'])]
# Creates new days_in_shelter variable
animal_shelter_df['days_in_shelter'] = (animal_shelter_df['date_o'] - animal_shelter_df['date_i']).dt.days
# Sorts the column names to be alphabetical
animal_shelter_df = animal_shelter_df[animal_shelter_df.columns.sort_values()]
return animal_shelter_df | c7110cf1b5fe7fad52c3e331c8d6840de83891b3 | 3,653,745 |
def construct_features_MH_1(data):
"""
Processes the provided pandas dataframe object by:
Deleting the original METER_ID, LOCATION_NO, BILLING_CYCLE, COMMENTS, and DAYS_FROM_BILLDT columns
Constructing a time series index out of the year, month, day, hour, minute, second columns
Sorting by the time series index
"""
try:
del data['METER_ID']
del data['LOCATION_HASH']
del data['BILLING_CYCLE']
del data['COMMENTS']
del data['DAYS_FROM_BILLDT']
return data
except Exception as e:
logger.info(
'There was a problem constructing the feature vector for the provided data set: {}'.format(str(e))) | 32f238ee730e84c0c699759913ffd2f6a2fc6fbf | 3,653,747 |
from functools import cmp_to_key
def sort_observations(observations):
"""
Method to sort observations to make sure that the "winner" is at index 0
"""
return sorted(observations, key=cmp_to_key(cmp_observation), reverse=True) | 183b044a48b4a7ea5093efaa92bd0977b085d949 | 3,653,748 |
def coor_trans(point, theta):
"""
coordinate transformation (坐标转换)
theta方向:以顺时针旋转为正
"""
point = np.transpose(point)
k = np.array([[np.cos(theta), np.sin(theta)],
[-np.sin(theta), np.cos(theta)]])
print(point)
# return np.dot(k, point)
return np.round(np.dot(k, point),6) | aa3b1532c629011e6f0ce72dc80eb1eebfc43765 | 3,653,749 |
import torch
import time
def ppo(
env_fn,
actor_critic=core.MLPActorCritic2Heads,
ac_kwargs=dict(),
seed=0,
steps_per_epoch=4000,
epochs=100,
epochs_rnd_warmup=1,
gamma=0.99,
clip_ratio=0.2,
pi_lr=3e-4,
vf_lr=1e-3,
rnd_lr=1e-3,
train_pi_iters=80,
train_v_iters=80,
train_rnd_iters=80,
lam=0.97,
max_ep_len=200,
target_kl=0.01,
logger_kwargs=dict(),
save_freq=10,
scale_reward=100,
only_intr=False,
norm_intr=False,
alpha_std_est=0.05,
single_head=False,
):
"""
Proximal Policy Optimization (by clipping),
with early stopping based on approximate KL
Args:
env_fn : A function which creates a copy of the environment.
The environment must satisfy the OpenAI Gym API.
actor_critic: The constructor method for a PyTorch Module with a
``step`` method, an ``act`` method, a ``pi`` module, and a ``v``
module. The ``step`` method should accept a batch of observations
and return:
=========== ================ ======================================
Symbol Shape Description
=========== ================ ======================================
``a`` (batch, act_dim) | Numpy array of actions for each
| observation.
``v`` (batch,) | Numpy array of value estimates
| for the provided observations.
``logp_a`` (batch,) | Numpy array of log probs for the
| actions in ``a``.
=========== ================ ======================================
The ``act`` method behaves the same as ``step`` but only returns ``a``.
The ``pi`` module's forward call should accept a batch of
observations and optionally a batch of actions, and return:
=========== ================ ======================================
Symbol Shape Description
=========== ================ ======================================
``pi`` N/A | Torch Distribution object, containing
| a batch of distributions describing
| the policy for the provided observations.
``logp_a`` (batch,) | Optional (only returned if batch of
| actions is given). Tensor containing
| the log probability, according to
| the policy, of the provided actions.
| If actions not given, will contain
| ``None``.
=========== ================ ======================================
The ``v`` module's forward call should accept a batch of observations
and return:
=========== ================ ======================================
Symbol Shape Description
=========== ================ ======================================
``v`` (batch,) | Tensor containing the value estimates
| for the provided observations. (Critical:
| make sure to flatten this!)
=========== ================ ======================================
ac_kwargs (dict): Any kwargs appropriate for the ActorCritic object
you provided to PPO.
seed (int): Seed for random number generators.
steps_per_epoch (int): Number of steps of interaction (state-action pairs)
for the agent and the environment in each epoch.
epochs (int): Number of epochs of interaction (equivalent to
number of policy updates) to perform.
epochs_rnd_warmup (int): Number of epochs of training RND before starting training agent.
gamma (float): Discount factor. (Always between 0 and 1.)
clip_ratio (float): Hyperparameter for clipping in the policy objective.
Roughly: how far can the new policy go from the old policy while
still profiting (improving the objective function)? The new policy
can still go farther than the clip_ratio says, but it doesn't help
on the objective anymore. (Usually small, 0.1 to 0.3.) Typically
denoted by :math:`\\epsilon`.
pi_lr (float): Learning rate for policy optimizer.
vf_lr (float): Learning rate for value function optimizer.
rnd_lr (float): Learning rate for RND optimizer.
train_pi_iters (int): Maximum number of gradient descent steps to take
on policy loss per epoch. (Early stopping may cause optimizer
to take fewer than this.)
train_v_iters (int): Number of gradient descent steps to take on
value function per epoch.
train_rnd_iters (int): Number of gradient descent steps to take on
RND per epoch.
lam (float): Lambda for GAE-Lambda. (Always between 0 and 1,
close to 1.)
max_ep_len (int): Maximum length of trajectory / episode / rollout.
target_kl (float): Roughly what KL divergence we think is appropriate
between new and old policies after an update. This will get used
for early stopping. (Usually small, 0.01 or 0.05.)
logger_kwargs (dict): Keyword args for EpochLogger.
save_freq (int): How often (in terms of gap between epochs) to save
the current policy and value function.
scale_reward (float): total_reward = extr_reward + scale_reward*intr_reward
"""
# Special function to avoid certain slowdowns from PyTorch + MPI combo.
setup_pytorch_for_mpi()
# Set up logger and save configuration
logger = EpochLogger(**logger_kwargs)
logger.save_config(locals())
# Random seed
seed += 10000 * proc_id()
torch.manual_seed(seed)
np.random.seed(seed)
# Instantiate environment
env = env_fn()
obs_dim = env.observation_space.shape
act_dim = env.action_space.shape
# Create actor-critic module
ac = actor_critic(env.observation_space, env.action_space, **ac_kwargs)
# Sync params across processes
sync_params(ac)
# Create RND module and optimizer
rnd = RND(obs_dim[0], (32, 32), nn.Sigmoid)
sync_params(rnd)
rnd_optimizer = Adam(rnd.predictor_network.parameters(), lr=rnd_lr)
# Create running estimator for reward normalization
reward_std_estimator = core.running_exp_estimator(alpha_std_est)
# Count variables
var_counts = tuple(core.count_vars(module) for module in [ac.pi, ac.v_extr, ac.v_intr, rnd.predictor_network])
logger.log("\nNumber of parameters: \t pi: %d, \t v_extr: %d, \t v_intr: %d, \t rnd: %d\n" % var_counts)
local_steps_per_epoch = int(steps_per_epoch / num_procs())
o = env.reset()
# Train RND on random agent for 'epochs_rnd_warmup' epochs
for epoch in range(epochs_rnd_warmup):
for t in range(local_steps_per_epoch):
a, _, _, _ = ac.step(torch.as_tensor(o, dtype=torch.float32))
next_o, r, d, _ = env.step(a)
rnd_loss = rnd.loss(torch.as_tensor(next_o, dtype=torch.float32))
reward_std_estimator.update(rnd_loss.item())
rnd_optimizer.zero_grad()
rnd_loss.backward()
mpi_avg_grads(rnd.predictor_network) # average grads across MPI processes
rnd_optimizer.step()
# Set up experience buffer
buf = PPOBuffer(obs_dim, act_dim, local_steps_per_epoch, gamma, lam)
# Set up function for computing PPO policy loss
def compute_loss_pi(data):
obs, act, adv, logp_old = data["obs"], data["act"], data["adv"], data["logp"]
# Policy loss
pi, logp = ac.pi(obs, act)
ratio = torch.exp(logp - logp_old)
clip_adv = torch.clamp(ratio, 1 - clip_ratio, 1 + clip_ratio) * adv
loss_pi = -(torch.min(ratio * adv, clip_adv)).mean()
# Useful extra info
approx_kl = (logp_old - logp).mean().item()
ent = pi.entropy().mean().item()
clipped = ratio.gt(1 + clip_ratio) | ratio.lt(1 - clip_ratio)
clipfrac = torch.as_tensor(clipped, dtype=torch.float32).mean().item()
pi_info = dict(kl=approx_kl, ent=ent, cf=clipfrac)
return loss_pi, pi_info
# Set up functions for computing value loss
def compute_loss_v_extr(data):
obs, ret = data["obs"], data["ret_extr"]
return ((ac.v_extr(obs) - ret) ** 2).mean()
def compute_loss_v_intr(data):
obs, ret = data["obs"], data["ret_intr"]
return ((ac.v_intr(obs) - ret) ** 2).mean()
# Set up optimizers for policy and value function
pi_optimizer = Adam(ac.pi.parameters(), lr=pi_lr)
vf_extr_optimizer = Adam(ac.v_extr.parameters(), lr=vf_lr)
if not single_head:
vf_intr_optimizer = Adam(ac.v_intr.parameters(), lr=vf_lr)
# Set up model saving
logger.setup_pytorch_saver(ac)
def update(epoch):
data = buf.get()
pi_l_old, pi_info_old = compute_loss_pi(data)
pi_l_old = pi_l_old.item()
v_extr_l_old = compute_loss_v_extr(data).item()
if not single_head:
v_intr_l_old = compute_loss_v_intr(data).item()
loss_rnd_old = rnd.loss(data["obs"]).item()
# Train policy with multiple steps of gradient descent
for i in range(train_pi_iters):
pi_optimizer.zero_grad()
loss_pi, pi_info = compute_loss_pi(data)
kl = mpi_avg(pi_info["kl"])
if kl > 1.5 * target_kl:
logger.log("Early stopping at step %d due to reaching max kl." % i)
break
loss_pi.backward()
mpi_avg_grads(ac.pi) # average grads across MPI processes
pi_optimizer.step()
logger.store(StopIter=i)
# Value function learning
for i in range(train_v_iters):
vf_extr_optimizer.zero_grad()
loss_v_extr = compute_loss_v_extr(data)
loss_v_extr.backward()
mpi_avg_grads(ac.v_extr) # average grads across MPI processes
vf_extr_optimizer.step()
if not single_head:
for i in range(train_v_iters):
vf_intr_optimizer.zero_grad()
loss_v_intr = compute_loss_v_intr(data)
loss_v_intr.backward()
mpi_avg_grads(ac.v_intr) # average grads across MPI processes
vf_intr_optimizer.step()
for i in range(train_rnd_iters):
rnd_optimizer.zero_grad()
loss_rnd = rnd.loss(data["obs"])
loss_rnd.backward()
mpi_avg_grads(rnd.predictor_network) # average grads across MPI processes
rnd_optimizer.step()
# Log changes from update
kl, ent, cf = pi_info["kl"], pi_info_old["ent"], pi_info["cf"]
logger.store(
LossPi=pi_l_old,
LossV_extr=v_extr_l_old,
LossRND=loss_rnd_old,
KL=kl,
Entropy=ent,
ClipFrac=cf,
DeltaLossPi=(loss_pi.item() - pi_l_old),
DeltaLossV_extr=(loss_v_extr.item() - v_extr_l_old),
DeltaLossRND=(loss_rnd.item() - loss_rnd_old),
)
if not single_head:
logger.store(LossV_intr=v_intr_l_old, DeltaLossV_intr=(loss_v_intr.item() - v_intr_l_old))
# Prepare for interaction with environment
start_time = time.time()
o, ep_ret_extr, ep_ret_intr, ep_len = env.reset(), 0, 0, 0
# Main loop: collect experience in env and update/log each epoch
for epoch in range(epochs):
for t in range(local_steps_per_epoch):
a, v_extr, v_intr, logp = ac.step(torch.as_tensor(o, dtype=torch.float32))
next_o, r_extr, d, _ = env.step(a)
rnd_reward = rnd.reward(torch.as_tensor(next_o, dtype=torch.float32))
if norm_intr:
reward_std_estimator.update(rnd_reward)
r_intr = rnd_reward / reward_std_estimator.get_std()
logger.store(EpRet_exp_std=reward_std_estimator.get_std())
else:
r_intr = rnd_reward
# save and log
ep_ret_extr += r_extr
ep_ret_intr += r_intr
ep_len += 1
if only_intr:
r_extr = 0
if single_head:
buf.store(o, a, r_extr + scale_reward * r_intr, 0, v_extr, 0, logp)
else:
buf.store(o, a, r_extr, scale_reward * r_intr, v_extr, v_intr, logp)
logger.store(VVals_extr=v_extr, VVals_intr=v_intr)
# Update obs (critical!)
o = next_o
timeout = ep_len == max_ep_len
terminal = d or timeout
epoch_ended = t == local_steps_per_epoch - 1
if terminal or epoch_ended:
# if epoch_ended and not(terminal):
# print('Warning: trajectory cut off by epoch at %d steps.' % ep_len, flush=True)
# logger.log('Warning: trajectory cut off by epoch at %d steps.' % ep_len)
_, v_extr, v_intr, _ = ac.step(torch.as_tensor(o, dtype=torch.float32))
# if trajectory reached terminal state, value_extr target is zero, else bootstrap value target
if not (timeout or epoch_ended):
v_extr = 0
if single_head:
buf.finish_path(v_extr + v_intr, 0)
else:
buf.finish_path(v_extr, v_intr)
if terminal:
# only save EpRet / EpLen if trajectory finished
logger.store(EpRet_extr=ep_ret_extr, EpLen=ep_len, EpRet_intr=ep_ret_intr)
o, ep_ret_extr, ep_ret_intr, ep_len = env.reset(), 0, 0, 0
# Save model
if (epoch % save_freq == 0) or (epoch == epochs - 1):
logger.save_state({"env": env}, None)
# Perform PPO update!
update(epoch)
# Log info about epoch
logger.log_tabular("Epoch", epoch)
logger.log_tabular("EpRet_extr", with_min_and_max=True)
logger.log_tabular("EpRet_intr", average_only=True)
if norm_intr:
logger.log_tabular("EpRet_exp_std", average_only=True)
logger.log_tabular("EpLen", average_only=True)
logger.log_tabular("VVals_extr", average_only=True)
if not single_head:
logger.log_tabular("VVals_intr", average_only=True)
logger.log_tabular("LossPi", average_only=True)
logger.log_tabular("LossV_extr", average_only=True)
if not single_head:
logger.log_tabular("LossV_intr", average_only=True)
logger.log_tabular("LossRND", average_only=True)
logger.log_tabular("DeltaLossPi", average_only=True)
logger.log_tabular("DeltaLossV_extr", average_only=True)
if not single_head:
logger.log_tabular("DeltaLossV_intr", average_only=True)
logger.log_tabular("TotalEnvInteracts", (epoch + 1) * steps_per_epoch)
logger.log_tabular("Entropy", average_only=True)
logger.log_tabular("KL", average_only=True)
logger.log_tabular("ClipFrac", average_only=True)
logger.log_tabular("StopIter", average_only=True)
logger.log_tabular("Time", time.time() - start_time)
logger.dump_tabular() | 481da1fc7cc0677e02009d983345a15fbca23159 | 3,653,750 |
import heapq
def ltopk(k, seq, key=None):
"""
>>> ltopk(2, [1, 100, 10, 1000])
[1000, 100]
>>> ltopk(2, ['Alice', 'Bob', 'Charlie', 'Dan'], key=len)
['Charlie', 'Alice']
"""
if key is not None and not callable(key):
key = getter(key)
return list(heapq.nlargest(k, seq, key=key)) | 3d41f8576ca6b2741d12ca8b80c8fb220166b85b | 3,653,751 |
def index():
""" Root URL response """
return (
jsonify(
name="Promotion REST API Service",
version="1.0",
),
status.HTTP_200_OK,
) | 7c45e54c3500f638291c85d38d27976952d0a6e3 | 3,653,752 |
def add_cameras_default(scene):
""" Make two camera (main/top) default setup for demo images."""
cam_main = create_camera_perspective(
location=(-33.3056, 24.1123, 26.0909),
rotation_quat=(0.42119, 0.21272, -0.39741, -0.78703),
)
scene.collection.objects.link(cam_main)
cam_top = create_camera_top_view_ortho()
scene.collection.objects.link(cam_top)
# make this the main scene camera
scene.camera = cam_main
return cam_main, cam_top | 50428d5f3c79c4581e397af1411a5a92055fe695 | 3,653,753 |
def distr_mean_stde(distribution: np.ndarray) -> tuple:
"""
Purpose:
Compute the mean and standard deviation for a distribution.
Args:
distribution (np.ndarray): distribution
Returns:
tuple (ie. distribution mean and standard deviation)
"""
# Compute and print the mean, stdev of the resample distribution of means
distribution_mean = np.mean(distribution)
standard_error = np.std(distribution)
print('Bootstrap Distribution: center={:0.2f}, spread={:0.2f}'.format(distribution_mean, standard_error))
print()
return distribution_mean, standard_error | 9232587e2c1e71a8f7c672cb962961cab7ad8d85 | 3,653,754 |
from operator import and_
def release_waiting_requests_grouped_fifo(rse_id, count=None, direction='destination', deadline=1, volume=0, session=None):
"""
Release waiting requests. Transfer requests that were requested first, get released first (FIFO).
Also all requests to DIDs that are attached to the same dataset get released, if one children of the dataset is choosed to be released (Grouped FIFO).
:param rse_id: The RSE id.
:param count: The count to be released. If None, release all waiting requests.
:param direction: Direction if requests are grouped by source RSE or destination RSE.
:param deadline: Maximal waiting time in hours until a dataset gets released.
:param volume: The maximum volume in bytes that should be transfered.
:param session: The database session.
"""
amount_updated_requests = 0
# Release requests that exceeded waiting time
if deadline:
amount_updated_requests = release_waiting_requests_per_deadline(rse_id=rse_id, deadline=deadline, session=session)
count = count - amount_updated_requests
grouped_requests_subquery, filtered_requests_subquery = create_base_query_grouped_fifo(rse_id=rse_id, filter_by_rse=direction, session=session)
# cumulate amount of children per dataset and combine with each request and only keep requests that dont exceed the limit
cumulated_children_subquery = session.query(grouped_requests_subquery.c.name,
grouped_requests_subquery.c.scope,
grouped_requests_subquery.c.amount_childs,
grouped_requests_subquery.c.oldest_requested_at,
func.sum(grouped_requests_subquery.c.amount_childs).over(order_by=(grouped_requests_subquery.c.oldest_requested_at)).label('cum_amount_childs'))\
.subquery()
cumulated_children_subquery = session.query(filtered_requests_subquery.c.id)\
.join(cumulated_children_subquery, and_(filtered_requests_subquery.c.dataset_name == cumulated_children_subquery.c.name, filtered_requests_subquery.c.dataset_scope == cumulated_children_subquery.c.scope))\
.filter(cumulated_children_subquery.c.cum_amount_childs - cumulated_children_subquery.c.amount_childs < count)\
.subquery()
# needed for mysql to update and select from the same table
cumulated_children_subquery = session.query(cumulated_children_subquery.c.id).subquery()
statement = update(models.Request).where(models.Request.id.in_(cumulated_children_subquery)).values(state=RequestState.QUEUED)
amount_updated_requests += session.execute(statement).rowcount
# release requests where the whole datasets volume fits in the available volume space
if volume:
amount_updated_requests += release_waiting_requests_per_free_volume(rse_id=rse_id, volume=volume, session=session)
return amount_updated_requests | 9a52a28fe06634de73a0436721aa97e590612e17 | 3,653,755 |
def _get_gap_memory_pool_size_MB():
"""
Return the gap memory pool size suitable for usage on the GAP
command line.
The GAP 4.5.6 command line parser had issues with large numbers, so
we return it in megabytes.
OUTPUT:
String.
EXAMPLES:
sage: from sage.interfaces.gap import \
... _get_gap_memory_pool_size_MB
sage: _get_gap_memory_pool_size_MB() # random output
'1467m'
"""
pool = get_gap_memory_pool_size()
pool = (pool // (1024**2)) + 1
return str(pool)+'m' | 035072ff6fff18859717b131cdd660f252ac6262 | 3,653,756 |
async def order_book_l2(symbol: str) -> dict:
"""オーダーブックを取得"""
async with pybotters.Client(base_url=base_url, apis=apis) as client:
r = await client.get("/orderBook/L2", params={"symbol": symbol,},)
data = await r.json()
return data | 4c9b8e067874871cda8b9a9f113f8ff6e4529c02 | 3,653,757 |
async def create_comment_in_post(*, post: models.Post = Depends(resolve_post), created_comment: CreateComment,
current_user: models.User = Depends(resolve_current_user),
db: Session = Depends(get_db)):
"""Create a comment in a post."""
return crud.create_comment(db, author_id=current_user.id, parent_resub_id=post.parent_resub_id,
parent_post_id=post.id, parent_comment_id=None, content=created_comment.content) | 90e4a8628d631bcb33eb5462e0e8001f90fb5c86 | 3,653,759 |
def sigma_bot(sigma_lc_bot, sigma_hc_bot, x_aver_bot_mass):
"""
Calculates the surface tension at the bottom of column.
Parameters
----------
sigma_lc_bot : float
The surface tension of low-boilling component at the bottom of column, [N / m]
sigma_hc_bot : float
The surface tension of high-boilling component at the bottom of column, [N / m]
x_aver_bot_mass : float
The average mass concentration at bot of column, [kg/kg]
Returns
-------
sigma_bot : float
The surface tension at the bottom of column, [N / m]
References
----------
&&&&&
"""
return (sigma_lc_bot * x_aver_bot_mass + (1 - x_aver_bot_mass) * sigma_hc_bot) | 5105e5592556cab14cb62ab61b4f242499b33e1d | 3,653,760 |
def _normalize_zonal_lat_lon(ds: xr.Dataset) -> xr.Dataset:
"""
In case that the dataset only contains lat_centers and is a zonal mean dataset,
the longitude dimension created and filled with the variable value of certain latitude.
:param ds: some xarray dataset
:return: a normalized xarray dataset
"""
if 'latitude_centers' not in ds.coords or 'lon' in ds.coords:
return ds
ds_zonal = ds.copy()
resolution = (ds.latitude_centers[1].values - ds.latitude_centers[0].values)
ds_zonal = ds_zonal.assign_coords(
lon=[i + (resolution / 2) for i in np.arange(-180.0, 180.0, resolution)])
for var in ds_zonal.data_vars:
if 'latitude_centers' in ds_zonal[var].dims:
ds_zonal[var] = xr.concat([ds_zonal[var] for _ in ds_zonal.lon], 'lon')
ds_zonal[var]['lon'] = ds_zonal.lon
var_dims = ds_zonal[var].attrs.get('dimensions', [])
lat_center_index = var_dims.index('latitude_centers')
var_dims.remove('latitude_centers')
var_dims.append('lat')
var_dims.append('lon')
var_chunk_sizes = ds_zonal[var].attrs.get('chunk_sizes', [])
lat_chunk_size = var_chunk_sizes[lat_center_index]
del var_chunk_sizes[lat_center_index]
var_chunk_sizes.append(lat_chunk_size)
var_chunk_sizes.append(ds_zonal.lon.size)
ds_zonal = ds_zonal.rename_dims({'latitude_centers': 'lat'})
ds_zonal = ds_zonal.assign_coords(lat=ds.latitude_centers.values)
ds_zonal = ds_zonal.drop_vars('latitude_centers')
ds_zonal = ds_zonal.transpose(..., 'lat', 'lon')
has_lon_bnds = 'lon_bnds' in ds_zonal.coords or 'lon_bnds' in ds_zonal
if not has_lon_bnds:
lon_values = [[i - (resolution / 2), i + (resolution / 2)] for i in ds_zonal.lon.values]
ds_zonal = ds_zonal.assign_coords(lon_bnds=xr.DataArray(lon_values, dims=['lon', 'bnds']))
has_lat_bnds = 'lat_bnds' in ds_zonal.coords or 'lat_bnds' in ds_zonal
if not has_lat_bnds:
lat_values = [[i - (resolution / 2), i + (resolution / 2)] for i in ds_zonal.lat.values]
ds_zonal = ds_zonal.assign_coords(lat_bnds=xr.DataArray(lat_values, dims=['lat', 'bnds']))
ds_zonal.lon.attrs['bounds'] = 'lon_bnds'
ds_zonal.lon.attrs['long_name'] = 'longitude'
ds_zonal.lon.attrs['standard_name'] = 'longitude'
ds_zonal.lon.attrs['units'] = 'degrees_east'
ds_zonal.lat.attrs['bounds'] = 'lat_bnds'
ds_zonal.lat.attrs['long_name'] = 'latitude'
ds_zonal.lat.attrs['standard_name'] = 'latitude'
ds_zonal.lat.attrs['units'] = 'degrees_north'
return ds_zonal | 0a6021cc22271d6489a1a946e5ff38a6019ae3e8 | 3,653,761 |
def setup_audio(song_filename):
"""Setup audio file
and setup setup the output device.output is a lambda that will send data to
fm process or to the specified ALSA sound card
:param song_filename: path / filename to music file
:type song_filename: str
:return: output, fm_process, fft_calc, music_file
:rtype tuple: lambda, subprocess, fft.FFT, decoder
"""
# Set up audio
force_header = False
if any([ax for ax in [".mp4", ".m4a", ".m4b"] if ax in song_filename]):
force_header = True
music_file = decoder.open(song_filename, force_header)
sample_rate = music_file.getframerate()
num_channels = music_file.getnchannels()
fft_calc = fft.FFT(CHUNK_SIZE,
sample_rate,
hc.GPIOLEN,
cm.audio_processing.min_frequency,
cm.audio_processing.max_frequency,
cm.audio_processing.custom_channel_mapping,
cm.audio_processing.custom_channel_frequencies)
# setup output device
output = set_audio_device(sample_rate, num_channels)
chunks_per_sec = ((16 * num_channels * sample_rate) / 8) / CHUNK_SIZE
light_delay = int(cm.audio_processing.light_delay * chunks_per_sec)
# Output a bit about what we're about to play to the logs
nframes = str(music_file.getnframes() / sample_rate)
log.info("Playing: " + song_filename + " (" + nframes + " sec)")
return output, fft_calc, music_file, light_delay | 63ca73faf6511047d273e3b36d3ef450dc073a2f | 3,653,762 |
def _collect_package_prefixes(package_dir, packages):
"""
Collect the list of prefixes for all packages
The list is used to match paths in the install manifest to packages
specified in the setup.py script.
The list is sorted in decreasing order of prefix length so that paths are
matched with their immediate parent package, instead of any of that
package's ancestors.
For example, consider the project structure below. Assume that the
setup call was made with a package list featuring "top" and "top.bar", but
not "top.not_a_subpackage".
::
top/ -> top/
__init__.py -> top/__init__.py (parent: top)
foo.py -> top/foo.py (parent: top)
bar/ -> top/bar/ (parent: top)
__init__.py -> top/bar/__init__.py (parent: top.bar)
not_a_subpackage/ -> top/not_a_subpackage/ (parent: top)
data_0.txt -> top/not_a_subpackage/data_0.txt (parent: top)
data_1.txt -> top/not_a_subpackage/data_1.txt (parent: top)
The paths in the generated install manifest are matched to packages
according to the parents indicated on the right. Only packages that are
specified in the setup() call are considered. Because of the sort order,
the data files on the bottom would have been mapped to
"top.not_a_subpackage" instead of "top", proper -- had such a package been
specified.
"""
return list(
sorted(
((package_dir[package].replace(".", "/"), package) for package in packages),
key=lambda tup: len(tup[0]),
reverse=True,
)
) | 6c497725e8a441f93f55084ef42489f97e35acf8 | 3,653,763 |
def _grae_ymin_ ( graph ) :
"""Get minimal y for the points
>>> graph = ...
>>> ymin = graph.ymin ()
"""
ymn = None
np = len(graph)
for ip in range( np ) :
x , exl , exh , y , eyl , eyh = graph[ip]
y = y - abs( eyl )
if None == ymn or y <= ymn : ymn = y
return ymn | 99efb6f6466e56b350da02963e442ac2b991ecf5 | 3,653,764 |
def vec_sum(a, b):
"""Compute the sum of two vector given in lists."""
return [va + vb for va, vb in zip(a, b)] | d85f55e22a60af66a85eb6c8cd180007351bf5d9 | 3,653,767 |
import time
def one_v_one_classifiers(x,y,lambd,max_iters,eps=.0001):
"""
Function for running a 1v1 classifier on many classes using the linearsvm function.
Inputs:
x: numpy matrix
a matrix of size nxd
y: numpy matrix
a matrix of size nx1
lambd: float
lambda, the penalization constant. Default = -1
max_iters: int
maximum number of iterations. Default: 100
eps: float
the stopping criteria for the normalized gradient. Default: .001
Returns:
vals: numpy matrix
beta values for each pair of classes
i_vals: numpy matrix
matrix of first class tested for 1v1 comparison of class i vs class j
j_vals: numpy matrix
matrix of second class tested for 1v1 comparison of class i vs class j
"""
classified_vals = []
i_vals = []
j_vals = []
classes = len(np.unique(y))
t_init = 10**-1
t0 = time.time()
vals_to_run = []
k=3 # 3 fold CV
num_lambdas = 3 # num lambdas to try in CV
vals = []
vals_to_run = [] # group
for i in range(classes):
for j in range(i+1,classes):
features_to_test = x[(y==i)|(y==j)]
scaler = preprocessing.StandardScaler()
features_to_test = scaler.fit_transform(features_to_test)
labels_to_test = y[(y==i)|(y==j)]
labels_to_test = ((labels_to_test - min(labels_to_test)) / (max(labels_to_test)-min(labels_to_test)))*2-1
# save a list of parameters to call run_svm as a list
vals_to_run.append( (features_to_test,
labels_to_test,
k,
max_iters,
num_lambdas ,
t_init,
lambd ,
eps) )
#classified_vals.append(betas[-1])
i_vals.append(i)
j_vals.append(j)
print("setup complete. Time :",time.time()-t0, " " , time.strftime('%X %x %Z'))
t0 = time.time()
#do computation
pool = ThreadPool(35)
vals_temp = pool.starmap(run_svm,vals_to_run)
objs = np.asarray(vals_temp)[:,1]
vals_temp = np.asarray(vals_temp)[:,0]
vals = vals + list(vals_temp)
return np.asarray(vals), np.asarray(i_vals) , np.asarray(j_vals), objs | 3cf564039c78363021cb65650dd50db9536922bb | 3,653,768 |
def rlsp(mdp, s_current, p_0, horizon, temp=1, epochs=1, learning_rate=0.2,
r_prior=None, r_vec=None, threshold=1e-3, check_grad_flag=False):
"""The RLSP algorithm"""
def compute_grad(r_vec):
# Compute the Boltzmann rational policy \pi_{s,a} = \exp(Q_{s,a} - V_s)
policy = value_iter(mdp, 1, mdp.f_matrix @ r_vec, horizon, temp)
d_last_step, d_last_step_list = compute_d_last_step(
mdp, policy, p_0, horizon, return_all=True)
if d_last_step[s_current] == 0:
print('Error in om_method: No feasible trajectories!')
return r_vec
expected_features, expected_features_list = compute_feature_expectations(
mdp, policy, p_0, horizon)
G = compute_g(mdp, policy, p_0, horizon, d_last_step_list, expected_features_list)
# Compute the gradient
dL_dr_vec = G[s_current] / d_last_step[s_current]
# Gradient of the prior
if r_prior!= None: dL_dr_vec += r_prior.logdistr_grad(r_vec)
return dL_dr_vec
def compute_log_likelihood(r_vec):
policy = value_iter(mdp, 1, mdp.f_matrix @ r_vec, horizon, temp)
d_last_step = compute_d_last_step(mdp, policy, p_0, horizon)
log_likelihood = np.log(d_last_step[s_current])
if r_prior!= None: log_likelihood += np.sum(r_prior.logpdf(r_vec))
return log_likelihood
def get_grad(_):
"""dummy function for use with check_grad()"""
return dL_dr_vec
if r_vec is None:
r_vec = 0.01*np.random.randn(mdp.f_matrix.shape[1])
print('Initial reward vector: {}'.format(r_vec))
if check_grad_flag: grad_error_list=[]
for i in range(epochs):
dL_dr_vec = compute_grad(r_vec)
if check_grad_flag:
grad_error_list.append(check_grad(compute_log_likelihood, get_grad, r_vec))
# Gradient ascent
r_vec = r_vec + learning_rate * dL_dr_vec
# with printoptions(precision=4, suppress=True):
# print('Epoch {}; Reward vector: {}'.format(i, r_vec))
# if check_grad_flag: print('grad error: {}'.format(grad_error_list[-1]))
if np.linalg.norm(dL_dr_vec) < threshold:
if check_grad_flag:
print()
print('Max grad error: {}'.format(np.amax(np.asarray(grad_error_list))))
print('Median grad error: {}'.format(np.median(np.asarray(grad_error_list))))
break
return r_vec | d389363929f4e7261d72b0d9d83a806fae10b8ab | 3,653,769 |
import numbers
def rotate(img, angle=0, order=1):
"""Rotate image by a certain angle around its center.
Parameters
----------
img : ndarray(uint16 or uint8)
Input image.
angle : integer
Rotation angle in degrees in counter-clockwise direction.
Returns
-------
rotated : ndarray(uint16 or uint8)
Rotated version of the input.
Examples
--------
rotate(image, 30)
rotate(image, 180)
"""
if not _is_numpy_image(img):
raise TypeError('img should be numpy ndarray. Got {}'.format(type(img)))
if not (isinstance(angle, numbers.Number)):
raise TypeError('Angle should be integer. Got {}'.format(type(angle)))
img_new = transform.rotate(img, angle, order=order, preserve_range=True)
img_new = img_new.astype(img.dtype)
return img_new | 8b55fe060ff6b8eb0c7137dc38a72531c24c7534 | 3,653,770 |
def activate(request, uidb64, token):
"""Function that activates the user account."""
try:
uid = force_text(urlsafe_base64_decode(uidb64))
user = User.objects.get(pk=uid)
except(TypeError, ValueError, OverflowError, User.DoesNotExist):
user = None
if user is not None and account_activation_token.check_token(user, token):
user.is_active = True
user.save()
login(request, user)
messages.success(request, 'Konto zostało pomyślnie aktywowane. Możesz się zalogować.')
return redirect('login')
else:
messages.warning(request, 'Link aktywacyjny jest nieprawidłowy lub konto zostało już aktywowane.')
return redirect('login') | 8538fc17e37b2743a7145286720ba5e8d653c790 | 3,653,771 |
def dfn(*args, **kwargs):
"""
The HTML Definition Element (<dfn>) represents the defining
instance of a term.
"""
return el('dfn', *args, **kwargs) | 798fb57360aca6f035ad993998c622eb6fff4e82 | 3,653,772 |
def handle_post_runs(project_id, deployment_id):
"""Handles POST requests to /."""
is_experiment_deployment = False
experiment_deployment = request.args.get('experimentDeploy')
if experiment_deployment and experiment_deployment == 'true':
is_experiment_deployment = True
run_id = create_deployment_run(project_id, deployment_id, is_experiment_deployment)
return jsonify({"message": "Pipeline running.", "runId": run_id}) | 5684a2b1f82981d4a3d5d7b870485b01201fdd2e | 3,653,774 |
def get_in_reply_to_user_id(tweet):
"""
Get the user id of the uesr whose Tweet is being replied to, and None
if this Tweet is not a reply. \n
Note that this is unavailable in activity-streams format
Args:
tweet (Tweet): A Tweet object (or a dictionary)
Returns:
str: the user id of the user whose Tweet is being replied to, None
(if not a reply), or for activity-streams raise a NotAvailableError
Example:
>>> from tweet_parser.getter_methods.tweet_reply import *
>>> original_format_dict = {
... "created_at": "Wed May 24 20:17:19 +0000 2017",
... "in_reply_to_user_id_str": "2382763597"
... }
>>> get_in_reply_to_user_id(original_format_dict)
'2382763597'
"""
if is_original_format(tweet):
return tweet["in_reply_to_user_id_str"]
else:
raise NotAvailableError("Gnip activity-streams format does not" +
" return the replied to user's id") | 74bbfa224f15781f769bf52bb470e23e9c93a95a | 3,653,775 |
def release_definition_show(definition_id=None, name=None, open_browser=False, team_instance=None, project=None,
detect=None):
"""Get the details of a release definition.
:param definition_id: ID of the definition.
:type definition_id: int
:param name: Name of the definition. Ignored if --id is supplied.
:type name: str
:param open_browser: Open the definition summary page in your web browser.
:type open_browser: bool
:param team_instance: VSTS account or TFS collection URL. Example: https://myaccount.visualstudio.com
:type team_instance: str
:param project: Name or ID of the team project.
:type project: str
:param detect: Automatically detect values for instance and project. Default is "on".
:type detect: str
:rtype: ReleaseDefinitionReference
"""
team_instance, project = resolve_instance_and_project(detect=detect,
team_instance=team_instance,
project=project)
client = get_release_client(team_instance)
if definition_id is None:
if name is not None:
definition_id = get_definition_id_from_name(name, client, project)
else:
raise ValueError("Either the --id argument or the --name argument must be supplied for this command.")
release_definition = client.get_release_definition(definition_id=definition_id, project=project)
if open_browser:
_open_definition(release_definition)
return release_definition | 3a4f13a1dfb7f1bd95bf8eae52d41f14566eb5fb | 3,653,777 |
def GKtoUTM(ea, no=None, zone=32, gk=None, gkzone=None):
"""Transform any Gauss-Krueger to UTM autodetect GK zone from offset."""
if gk is None and gkzone is None:
if no is None:
rr = ea[0][0]
else:
if isinstance(ea, list) or isinstance(ea, tuple):
rr = ea[0]
else:
rr = ea
gkzone = int(floor(rr * 1e-6))
print(gkzone)
if gkzone <= 0 or gkzone >= 5:
print("cannot detect valid GK zone")
pyproj = opt_import('pyproj', 'coordinate transformations')
if pyproj is None:
return None
gk = pyproj.Proj(init="epsg:"+str(31464+gkzone))
wgs84 = pyproj.Proj(init="epsg:4326") # pure ellipsoid to doubel transform
utm = pyproj.Proj(proj='utm', zone=zone, ellps='WGS84') # UTM
if no is None: # two-column matrix
lon, lat = pyproj.transform(gk, wgs84, ea[0], ea[1])
else:
lon, lat = pyproj.transform(gk, wgs84, ea, no)
return utm(lon, lat) | 330804d9bfe4785d867755b58355b0633d1fe7c8 | 3,653,778 |
def robots(req):
"""
.. seealso:: http://www.sitemaps.org/protocol.html#submit_robots
"""
return Response(
"Sitemap: %s\n" % req.route_url('sitemapindex'), content_type="text/plain") | 42e21c5968d7e6d02049a0539d5b115aa596292e | 3,653,779 |
import numpy as np
def bolling(asset:list, samples:int=20, alpha:float=0, width:float=2):
"""
According to MATLAB:
BOLLING(ASSET,SAMPLES,ALPHA,WIDTH) plots Bollinger bands for given ASSET
data vector. SAMPLES specifies the number of samples to use in computing
the moving average. ALPHA is an optional input that specifies the exponent
used to compute the element weights of the moving average. The default
ALPHA is 0 (simple moving average). WIDTH is an optional input that
specifies the number of standard deviations to include in the envelope. It
is a multiplicative factor specifying how tight the bounds should be made
around the simple moving average. The default WIDTH is 2. This calling
syntax plots the data only and does not return the data.
Note: The standard deviations are normalized by (N-1) where N is the
sequence length.
"""
# build weight vector
# 主体
r = len(asset)
i = np.arange(1,samples+1) ** alpha
w = i / sum(i)
# build moving average vectors with for loops
a = np.zeros((r-samples, 1))
b = a.copy()
for i in range(samples, r):
a[i-samples] = np.sum( asset[i-samples:i] * w )
b[i-samples] = width * np.sum(np.std( asset[i-samples:i] * w ))
return a,a+b,a-b | 90c06bb45f30713a05cde865e23c0f9e317b0887 | 3,653,780 |
def metrics():
"""
Expose metrics for the Prometheus collector
"""
collector = SensorsDataCollector(sensors_data=list(sensors.values()), prefix='airrohr_')
return Response(generate_latest(registry=collector), mimetype='text/plain') | 93a3de3fbddaeeeaafd182824559003701b718bc | 3,653,781 |
def solar_energy_striking_earth_today() -> dict:
"""Get number of solar energy striking earth today."""
return get_metric_of(label='solar_energy_striking_earth_today') | a53c6e45f568d5b4245bbc993547b28f5414ca47 | 3,653,782 |
def write_data_str(geoms, grads, hessians):
""" Writes a string containing the geometry, gradient, and Hessian
for either a single species or points along a reaction path
that is formatted appropriately for the ProjRot input file.
:param geoms: geometries
:type geoms: list
:param grads: gradients
:type grads: list
:param hessians: Hessians
:type hessians: list
:rtype: str
"""
# if not isinstance(geoms, list):
# geoms = [geoms]
# if not isinstance(grads, list):
# grads = [grads]
# if not isinstance(hessians, list):
# hessians = [hessians]
nsteps = len(geoms)
data_str = ''
for i, (geo, grad, hess) in enumerate(zip(geoms, grads, hessians)):
data_str += 'Step {0}\n'.format(str(i+1))
data_str += 'geometry\n'
data_str += _format_geom_str(geo)
data_str += 'gradient\n'
data_str += _format_grad_str(geo, grad)
data_str += 'Hessian\n'
data_str += _format_hessian_str(hess)
if i != nsteps-1:
data_str += '\n'
return remove_trail_whitespace(data_str) | 34c1148f820396bf4619ace2d13fb517e4f6f16d | 3,653,783 |
import types
from typing import Dict
from typing import Any
from typing import List
def gen_chart_name(data: types.ChartAxis,
formatter: Dict[str, Any],
device: device_info.DrawerBackendInfo
) -> List[drawings.TextData]:
"""Generate the name of chart.
Stylesheets:
- The `axis_label` style is applied.
Args:
data: Chart axis data to draw.
formatter: Dictionary of stylesheet settings.
device: Backend configuration.
Returns:
List of `TextData` drawings.
"""
style = {'zorder': formatter['layer.axis_label'],
'color': formatter['color.axis_label'],
'size': formatter['text_size.axis_label'],
'va': 'center',
'ha': 'right'}
text = drawings.TextData(data_type=types.LabelType.CH_NAME,
channels=data.channels,
xvals=[types.AbstractCoordinate.LEFT],
yvals=[0],
text=data.name,
ignore_scaling=True,
styles=style)
return [text] | 032abcb5e6fca1920965fdd20203614dd750c9c0 | 3,653,784 |
from typing import Sequence
def vector_cosine_similarity(docs: Sequence[spacy.tokens.Doc]) -> np.ndarray:
"""
Get the pairwise cosine similarity between each
document in docs.
"""
vectors = np.vstack([doc.vector for doc in docs])
return pairwise.cosine_similarity(vectors) | 14456abcbb038dd2a4c617690d7f68dfc7a7bcb8 | 3,653,786 |
def create_test_validation():
"""
Returns a constructor function for creating a Validation object.
"""
def _create_test_validation(db_session, resource, success=None, started_at=None, secret=None):
create_kwargs = {"resource": resource}
for kwarg in ['success', 'started_at', 'secret']:
if locals()[kwarg] is not None:
create_kwargs[kwarg] = locals()[kwarg]
(validation, _) = get_one_or_create(db_session, Validation, **create_kwargs)
return validation
return _create_test_validation | 7d78ae1c999cb79151e7527fd5bad448946aaccc | 3,653,787 |
def nrmse(img, ref, axes = (0,1)):
""" Compute the normalized root mean squared error (nrmse)
:param img: input image (np.array)
:param ref: reference image (np.array)
:param axes: tuple of axes over which the nrmse is computed
:return: (mean) nrmse
"""
nominator = np.real(np.sum( (img - ref) * np.conj(img - ref), axis = axes))
denominator = np.real(np.sum( ref * np.conj(ref), axis = axes))
nrmse = np.sqrt(nominator / denominator)
return np.mean(nrmse) | ab040a2dd88acb2ce1e7df3b37215c5a40092f8a | 3,653,788 |
def pairwise_comparison(column1,var1,column2,var2):
"""
Arg: column1 --> column name 1 in df
column2 --> column name 2 in df
var1---> 3 cases:
abbreviation in column 1 (seeking better model)
abbreviation in column 1 (seeking lesser value in column1 in comparison to column2)
empty strong (seeking greater value in column2 in comparison to column1)
var2---> 3 cases:
abbreviation in column 2 (seeking better model)
abbreviation in column 2 (seeking greater value in column2 in comparison to column1)
empty strong (seeking lesser value in column1 in comparison to column2)
Return: 2 cases:
abbreviation of column name in which is smaller/greater depending on function use
Function: list comprehension , put two column together (zip)
used to find data set with a smaller/greater value
"""
return [var1 if r < c else var2 for r,c in zip(column1,column2)] | a67ef991dcad4816e9b15c1f352079ce14d7d823 | 3,653,789 |
def prep_data_CNN(documents):
"""
Prepare the padded docs and vocab_size for CNN training
"""
t = Tokenizer()
docs = list(filter(None, documents))
print("Size of the documents in prep_data {}".format(len(documents)))
t.fit_on_texts(docs)
vocab_size = len(t.word_counts)
print("Vocab size {}".format(vocab_size))
encoded_docs = t.texts_to_sequences(docs)
print("Size of the encoded documents {}".format(len(encoded_docs)))
e_lens = []
for i in range(len(encoded_docs)):
e_lens.append(len(encoded_docs[i]))
lens_edocs = list(map(size, encoded_docs))
max_length = np.average(lens_edocs)
sequence_length = 1500 # Can use this instead of the above average max_length value
max_length = sequence_length
padded_docs = pad_sequences(
encoded_docs, maxlen=int(max_length), padding='post')
print("Length of a padded row {}".format(padded_docs.shape))
print("max_length {} and min_length {} and average {}".format(
max_length, min(lens_edocs), np.average(lens_edocs)))
return padded_docs, max_length, vocab_size, t.word_index | a568942bdedbea99d6abf2bd5b8fc8c7912e4271 | 3,653,790 |
def gc2gd_lat(gc_lat):
"""Convert geocentric latitude to geodetic latitude using WGS84.
Parameters
-----------
gc_lat : (array_like or float)
Geocentric latitude in degrees N
Returns
---------
gd_lat : (same as input)
Geodetic latitude in degrees N
"""
wgs84_e2 = 0.006694379990141317 - 1.0
gd_lat = np.rad2deg(-np.arctan(np.tan(np.deg2rad(gc_lat)) / wgs84_e2))
return gd_lat | e019a5a122266eb98dba830283091bcbf42f873f | 3,653,791 |
def polynomial_kernel(X, Y, c, p):
"""
Compute the polynomial kernel between two matrices X and Y::
K(x, y) = (<x, y> + c)^p
for each pair of rows x in X and y in Y.
Args:
X - (n, d) NumPy array (n datapoints each with d features)
Y - (m, d) NumPy array (m datapoints each with d features)
c - a coefficient to trade off high-order and low-order terms (scalar)
p - the degree of the polynomial kernel
Returns:
kernel_matrix - (n, m) Numpy array containing the kernel matrix
"""
# YOUR CODE HERE
# raise NotImplementedError
kernel_matrix = (np.matmul(X, Y.T) + c) ** p
return kernel_matrix | 5532692b0a8411560f56033bcf6ad27b3c8e41a1 | 3,653,793 |
def slug_from_iter(it, max_len=128, delim='-'):
"""Produce a slug (short URI-friendly string) from an iterable (list, tuple, dict)
>>> slug_from_iter(['.a.', '=b=', '--alpha--'])
'a-b-alpha'
"""
nonnull_values = [str(v) for v in it if v or ((isinstance(v, (int, float, Decimal)) and str(v)))]
return slugify(delim.join(shorten(v, max_len=int(float(max_len) / len(nonnull_values))) for v in nonnull_values), word_boundary=True) | 0da42aa5c56d3012e5caf4a5ead37632d5d21ab0 | 3,653,794 |
def modulusOfRigidity(find="G", printEqs=True, **kwargs):
"""
Defines the slope of the stress-strain curve up to the elastic limit of the material.
For most ductile materials it is the same in compression as in tensions. Not true for cast irons, other brittle materials, or magnesium.
Where:
E = modulus of elasticity
v = poisson's ratio
Material v
Aluminum 0.34
Copper 0.35
Iron 0.28
Steel 0.28
Magnesium 0.33
Titanium 0.34
"""
eq = list()
eq.append("Eq(G, E / (2*(1+v))")
return solveEqs(eq, find=find, printEq=printEqs, **kwargs) | cb849755799d85b9d4d0671f6656de748ab38f7c | 3,653,795 |
async def async_unload_entry(hass: HomeAssistantType, entry: ConfigType) -> bool:
"""Unload FRITZ!Box Tools config entry."""
hass.services.async_remove(DOMAIN, SERVICE_RECONNECT)
for domain in SUPPORTED_DOMAINS:
await hass.config_entries.async_forward_entry_unload(entry, domain)
del hass.data[DOMAIN]
return True | e934ec21be451cc1084bd293dbce5495f6b4915c | 3,653,796 |
def queryMaxTransferOutAmount(asset, isolatedSymbol="", recvWindow=""):
"""# Query Max Transfer-Out Amount (USER_DATA)
#### `GET /sapi/v1/margin/maxTransferable (HMAC SHA256)`
### Weight:
5
### Parameters:
Name |Type |Mandatory |Description
--------|--------|--------|--------
asset |STRING |YES |
isolatedSymbol |STRING |NO |isolated symbol
recvWindow |LONG |NO |The value cannot be greater than <code>60000</code>
timestamp |LONG |YES |
"""
endpoint = '/sapi/v1/margin/maxTransferable'
params = {
"asset": asset
}
if isolatedSymbol: params["isolatedSymbol"] = isolatedSymbol
if recvWindow: params["recvWindow"] = recvWindow
return getbinancedata_sig(endpoint, params) | f9e178d18eea969e5aabc0efa3aee938ad730752 | 3,653,797 |
def remove_layer(nn, del_idx, additional_edges, new_strides=None):
""" Deletes the layer indicated in del_idx and adds additional_edges specified
in additional_edges. """
layer_labels, num_units_in_each_layer, conn_mat, mandatory_child_attributes = \
get_copies_from_old_nn(nn)
# First add new edges to conn_mat and remove edges to and from del_idx
for add_edge in additional_edges:
conn_mat[add_edge[0], add_edge[1]] = 1
conn_mat[del_idx, :] = 0
conn_mat[:, del_idx] = 0
# Now reorder everything so that del_idx is at the end
all_idxs = list(range(len(layer_labels)))
new_order = all_idxs[:del_idx] + all_idxs[del_idx+1:] + [del_idx]
# Now reorder everything so that the layer to be remove is at the end
layer_labels = reorder_list_or_array(layer_labels, new_order)
num_units_in_each_layer = reorder_list_or_array(num_units_in_each_layer, new_order)
conn_mat = reorder_rows_and_cols_in_matrix(conn_mat, new_order)
# remove layer
layer_labels = layer_labels[:-1]
num_units_in_each_layer = num_units_in_each_layer[:-1]
conn_mat = conn_mat[:-1, :-1]
# Strides for a convolutional network
if nn.nn_class == 'cnn':
new_strides = new_strides if new_strides is not None else \
mandatory_child_attributes.strides
mandatory_child_attributes.strides = reorder_list_or_array(
new_strides, new_order)
mandatory_child_attributes.strides = mandatory_child_attributes.strides[:-1]
return get_new_nn(nn, layer_labels, num_units_in_each_layer, conn_mat,
mandatory_child_attributes) | 33d4a2e6ba05000f160b0d0cc603c568f68790d7 | 3,653,799 |
import itertools
import random
def brutekeys(pinlength, keys="0123456789", randomorder=False):
"""
Returns a list of all possibilities to try, based on the length of s and buttons given.
Yeah, lots of slow list copying here, but who cares, it's dwarfed by the actual guessing.
"""
allpossible = list(itertools.imap(lambda x: "".join(x),itertools.product(keys, repeat=pinlength)))
if randomorder:
random.shuffle(allpossible)
return allpossible | 42f659e37468073c42117d1f4d6235f08aedde59 | 3,653,800 |
def return_figures():
"""Creates four plotly visualizations
Args:
None
Returns:
list (dict): list containing the four plotly visualizations
"""
df = query_generation('DE', 14)
graph_one = []
x_val = df.index
for energy_source in df.columns:
y_val = df[energy_source].tolist()
graph_one.append(
go.Scatter(
x=x_val,
y=y_val,
mode='lines',
name=energy_source,
stackgroup = 'one'
)
)
layout_one = dict(title='Generation in Germany during the last 14 days',
xaxis=dict(title='Date'),
yaxis=dict(title='Net Generation (MW)'),
colorway = ['#008000', '#ffa500', '#ff0000', '#000080', '#008080', '#808080', '#a52a2a', '#1e90ff', '#ffc40c'],
plot_bgcolor = '#E8E8E8',
hovermode = 'closest',
hoverdistance = -1,
height = 500
)
# append all charts to the figures list
figures = []
figures.append(dict(data=graph_one, layout=layout_one))
return figures | c89fe79d12173bc0b167e43e81d3adf19e81eb7b | 3,653,801 |
def create_central_storage_strategy():
"""Create a CentralStorageStrategy, using a GPU if it is available."""
compute_devices = ['cpu:0', 'gpu:0'] if (
tf.config.list_logical_devices('GPU')) else ['cpu:0']
return tf.distribute.experimental.CentralStorageStrategy(
compute_devices, parameter_device='cpu:0') | 46cc64d6cb888f51513a2b7d5bb4e28af58b5a29 | 3,653,802 |
def ToolStep(step_class, os, **kwargs):
"""Modify build step arguments to run the command with our custom tools."""
if os.startswith('win'):
command = kwargs.get('command')
env = kwargs.get('env')
if isinstance(command, list):
command = [WIN_BUILD_ENV_PATH] + command
else:
command = WIN_BUILD_ENV_PATH + ' ' + command
if env:
env = dict(env) # Copy
else:
env = {}
env['BOTTOOLS'] = WithProperties('%(workdir)s\\tools\\buildbot\\bot_tools')
kwargs['command'] = command
kwargs['env'] = env
return step_class(**kwargs) | 30bdf2a1f81135150230b5a894ee0fa3c7be4fa4 | 3,653,803 |
def get_security_groups():
"""
Gets all available AWS security group names and ids associated with an AWS role.
Return:
sg_names (list): list of security group id, name, and description
"""
sg_groups = boto3.client('ec2', region_name='us-west-1').describe_security_groups()['SecurityGroups']
sg_names = []
for sg in sg_groups:
sg_names.append(sg['GroupId'] + ': ' + sg['GroupName'] + ': ' + sg['Description'])
return sg_names | 48a30454a26ea0b093dff59c830c14d1572d3e11 | 3,653,804 |
def traverseTokens(tokens, lines, callback):
"""Traverses a list of tokens to identify functions. Then uses a callback
to perform some work on the functions. Each function seen gets a new State
object created from the given callback method; there is a single State for
global code which is given None in the constructor. Then, each token seen
is passed to the 'add' method of the State. This is used by the State to
either calculate sizes, print tokens, or detect dependencies. The 'build'
method is called at the end of the function to create a result object that
is returned as an array at the end.
Arguments:
tokens - An array of Tokens.
lines - An array of compiled code lines.
callback - A constructor that returns a state object. It takes a start
token or None if outside a function. It has two member
functions:
add - accepts the current token and the token's index.
build - returns an object to be added to the results.
Returns:
an array of State objects in a format controlled by the callback.
"""
ret = []
state = callback(None, None)
# Create a token iterator. This is used to read tokens from the array. We
# cannot use a for loop because the iterator is passed to readFunction.
tokenIter = enumerate(tokens)
try:
while True:
index, token = next(tokenIter)
if isFunction(token, lines):
ret += readFunction(tokenIter, token, index, lines, callback)
else:
state.add(token, index)
except StopIteration:
pass
temp = state.build()
if temp:
ret.append(temp)
return ret | 4fcdfc4505a0a3eb1ba10a884cb5fc2a2714d845 | 3,653,805 |
from typing import Any
def publications_by_country(papers: dict[str, Any]) -> dict[Location, int]:
"""returns number of published papers per country"""
countries_publications = {}
for paper in papers:
participant_countries = {Location(city=None, state=None, country=location.country) \
for location in paper.locations}
for country in participant_countries:
try:
countries_publications[country] += 1
except KeyError:
countries_publications[country] = 1
return (dict(sorted(countries_publications.items(), key=lambda x: x[1], reverse=True))) | 7295fd9491d60956ca45995efc6818687c266446 | 3,653,806 |
def dequote(str):
"""Will remove single or double quotes from the start and end of a string
and return the result."""
quotechars = "'\""
while len(str) and str[0] in quotechars:
str = str[1:]
while len(str) and str[-1] in quotechars:
str = str[0:-1]
return str | e6377f9992ef8119726b788c02af9df32c722c28 | 3,653,807 |
import numpy
def uccsd_singlet_paramsize(n_qubits, n_electrons):
"""Determine number of independent amplitudes for singlet UCCSD
Args:
n_qubits(int): Number of qubits/spin-orbitals in the system
n_electrons(int): Number of electrons in the reference state
Returns:
Number of independent parameters for singlet UCCSD with a single
reference.
"""
n_occupied = int(numpy.ceil(n_electrons / 2.))
n_virtual = n_qubits / 2 - n_occupied
n_single_amplitudes = n_occupied * n_virtual
n_double_amplitudes = n_single_amplitudes ** 2
return (n_single_amplitudes + n_double_amplitudes) | 408c9158c76fba5d118cc6603e08260db30cc3df | 3,653,808 |
def variance(timeseries: SummarizerAxisTimeseries, param: dict):
"""
Calculate the variance of the timeseries
"""
v_mean = mean(timeseries)
# Calculate variance
v_variance = 0
for ts, value in timeseries.values():
v_variance = (value - v_mean)**2
# Average
v_variance = len(timeseries.values)
if v_variance == 0:
return 0
return mean / v_variance | 9b8c0e6a1d1e313a3e3e4a82fe06845f4d996620 | 3,653,809 |
def setup_i2c_sensor(sensor_class, sensor_name, i2c_bus, errors):
""" Initialise one of the I2C connected sensors, returning None on error."""
if i2c_bus is None:
# This sensor uses the multipler and there was an error initialising that.
return None
try:
sensor = sensor_class(i2c_bus)
except Exception as err:
# Error initialising this sensor, try to continue without it.
msg = "Error initialising {}:\n{}".format(sensor_name, err)
print(msg)
errors += (msg + "\n")
return None
else:
print("{} initialised".format(sensor_name))
return sensor | 62633c09f6e78b43fca625df8fbd0d20d866735b | 3,653,810 |
def argparse_textwrap_unwrap_first_paragraph(doc):
"""Join by single spaces all the leading lines up to the first empty line"""
index = (doc + "\n\n").index("\n\n")
lines = doc[:index].splitlines()
chars = " ".join(_.strip() for _ in lines)
alt_doc = chars + doc[index:]
return alt_doc | f7068c4b463c63d100980b743f8ed2d69b149a97 | 3,653,811 |
import ctypes
def iterator(x, y, z, coeff, repeat, radius=0):
""" compute an array of positions visited by recurrence relation """
c_iterator.restype = ctypes.POINTER(ctypes.c_double * (3 * repeat))
start = to_double_ctype(np.array([x, y, z]))
coeff = to_double_ctype(coeff)
out = to_double_ctype(np.zeros(3 * repeat))
res = c_iterator(start, coeff, repeat, ctypes.c_double(radius), out).contents
return np.array(res).reshape((repeat, 3)).T | 82c32dddf2c8d0899ace56869679ccc8dbb36d22 | 3,653,812 |
import webbrowser
def open_pep(
search: str, base_url: str = BASE_URL, pr: int | None = None, dry_run: bool = False
) -> str:
"""Open this PEP in the browser"""
url = pep_url(search, base_url, pr)
if not dry_run:
webbrowser.open_new_tab(url)
print(url)
return url | 2f23e16867ccb0e028798ff261c9c64eb1cdeb31 | 3,653,813 |
def random_sparse_matrix(n, n_add_elements_frac=None,
n_add_elements=None,
elements=(-1, 1, -2, 2, 10),
add_elements=(-1, 1)):
"""Get a random matrix where there are n_elements."""
n_total_elements = n * n
n_diag_elements = n
frac_diag = 1. * n_diag_elements / n_total_elements
if n_add_elements is not None and n_add_elements_frac is not None:
raise ValueError("Should only set either n_add_elements or n_add_elements_frac")
if n_add_elements_frac is not None:
n_add_elements = int(round(n_add_elements_frac * n_total_elements))
assert n_add_elements_frac >= 0, n_add_elements_frac
assert n_add_elements_frac <= 1 - frac_diag, n_add_elements_frac
assert n_add_elements >= 0
assert n_add_elements <= n_total_elements - n_diag_elements
A = np.zeros((n, n))
remaining = set(range(n))
# main elements
for i in range(n):
j = np.random.choice(list(remaining))
remaining.remove(j)
A[i, j] = np.random.choice(list(elements))
# additional elements
left_indices = np.array(list(zip(*np.where(A == 0.0))))
# print(left_indices)
# print(A)
np.random.shuffle(left_indices)
assert len(left_indices) >= n_add_elements
for i_add in range(n_add_elements):
i, j = left_indices[i_add]
assert A[i, j] == 0.0
A[i, j] = np.random.choice(list(add_elements))
return A | 41ea01c69bd757f11bbdb8a259ec3aa1baabadc2 | 3,653,814 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.