content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
from pathlib import Path
def file(base_path, other_path):
"""
Returns a single file
"""
return [[Path(base_path), Path(other_path)]] | 3482041757b38929a58d7173731e84a915225809 | 3,655,455 |
import io
def get_md_resource(file_path):
"""Read the file and parse into an XML tree.
Parameters
----------
file_path : str
Path of the file to read.
Returns
-------
etree.ElementTree
XML tree of the resource on disk.
"""
namespaces = Namespaces().get_namespaces(keys=('gmd', 'gmi'))
with io.open(file_path, mode='r', encoding='utf-8') as f:
data = f.read().encode('utf-8')
data = etree.fromstring(data)
mdelem = data.find('.//' + util.nspath_eval(
'gmd:MD_Metadata', namespaces))
if mdelem is None:
mdelem = data.find(
'.//' + util.nspath_eval('gmi:MI_Metadata', namespaces))
if mdelem is None and data.tag in ['{http://www.isotc211.org/2005/gmd}MD_Metadata',
'{http://www.isotc211.org/2005/gmi}MI_Metadata']:
mdelem = data
return mdelem | 809ea7cef3c9191db9589e0eacbba4016c2e9893 | 3,655,456 |
def fmin_style(sfmin):
"""convert sfmin to style"""
return Struct(
is_valid=good(sfmin.is_valid, True),
has_valid_parameters=good(sfmin.has_valid_parameters, True),
has_accurate_covar=good(sfmin.has_accurate_covar, True),
has_posdef_covar=good(sfmin.has_posdef_covar, True),
has_made_posdef_covar=good(sfmin.has_made_posdef_covar, False),
hesse_failed=good(sfmin.hesse_failed, False),
has_covariance=good(sfmin.has_covariance, True),
is_above_max_edm=good(sfmin.is_above_max_edm, False),
has_reached_call_limit=caution(sfmin.has_reached_call_limit, False),
) | 44ecba0a25c38a5a61cdba7750a6b8ad53d78c3d | 3,655,457 |
def xirr(cashflows,guess=0.1):
"""
Calculate the Internal Rate of Return of a series of cashflows at irregular intervals.
Arguments
---------
* cashflows: a list object in which each element is a tuple of the form (date, amount), where date is a python datetime.date object and amount is an integer or floating point number. Cash outflows (investments) are represented with negative amounts, and cash inflows (returns) are positive amounts.
* guess (optional, default = 0.1): a guess at the solution to be used as a starting point for the numerical solution.
Returns
--------
* Returns the IRR as a single value
Notes
----------------
* The Internal Rate of Return (IRR) is the discount rate at which the Net Present Value (NPV) of a series of cash flows is equal to zero. The NPV of the series of cash flows is determined using the xnpv function in this module. The discount rate at which NPV equals zero is found using the secant method of numerical solution.
* This function is equivalent to the Microsoft Excel function of the same name.
* For users that do not have the scipy module installed, there is an alternate version (commented out) that uses the secant_method function defined in the module rather than the scipy.optimize module's numerical solver. Both use the same method of calculation so there should be no difference in performance, but the secant_method function does not fail gracefully in cases where there is no solution, so the scipy.optimize.newton version is preferred.
_irr = xirr( [ (date(2010, 12, 29), -10000),
(date(2012, 1, 25), 20),
(date(2012, 3, 8), 10100)] )
"""
val = -666
try:
val = optimize.newton(lambda r: xnpv(r,cashflows),guess)
except:
print("Failed to converge after, returning: -666")
return val | a6adbd091fd5a742c7b27f0816021c2e8499c42f | 3,655,458 |
def check_rt_druid_fields(rt_table_columns, druid_columns):
"""
对比rt的字段,和druid物理表字段的区别
:param rt_table_columns: rt的字段转换为druid中字段后的字段信息
:param druid_columns: druid物理表字段
:return: (append_fields, bad_fields),需变更增加的字段 和 有类型修改的字段
"""
append_fields, bad_fields = [], []
for key, value in rt_table_columns.items():
col_name, col_type = key.lower(), value.lower()
if druid_columns[col_name]:
# 再对比类型
druid_col_type = druid_columns[col_name]
ok = (
(col_type == druid_col_type)
or (col_type == STRING and druid_col_type == VARCHAR)
or (col_type == LONG and druid_col_type == BIGINT)
)
if not ok:
bad_fields.append({col_name: f"difference between rt and druid({col_type} != {druid_col_type})"})
else:
append_fields.append({FIELD_NAME: col_name, FIELD_TYPE: col_type})
return append_fields, bad_fields | 1c60f49e4316cf78f1396689a55d9a1c71123fe8 | 3,655,459 |
from operator import ne
def is_stuck(a, b, eta):
""" Check if the ricci flow is stuck. """
return ne.evaluate("a-b<eta/50").all() | 53f4bb934cc48d2890289fda3fdb3574d5f6aa4c | 3,655,460 |
from typing import Sized
def stable_seasoal_filter(time_series: Sized, freq: int):
"""
Стабильный сезонный фильтр для ряда.
:param time_series: временной ряд
:param freq: частота расчета среднего значения
:return: значения сезонной составляющей
"""
length = len(time_series)
if length < freq:
raise ValueError(f'Length of time series is less than freq ({length} < {freq}')
if not isinstance(freq, int):
raise TypeError(f'freq must be an integer')
if freq < 1:
raise ValueError(f'freq must be greater than zero (actually is {freq})')
values = time_series.values if isinstance(time_series, pd.DataFrame) else time_series
seasonal = list()
for i in range(freq):
seasonal_values = [values[i + j * freq] for j in range(length) if i + j * freq < length]
seasonal.append(np.mean(seasonal_values))
seasonals = [seasonal for i in range(length)]
return pd.DataFrame([i for i in chain(*seasonals)][:length]) | fb4997b637d5229ee7f7a645ce19bbd5fcbab0bc | 3,655,462 |
def make_str_lst_unc_val(id, luv):
"""
make_str_lst_unc_val(id, luv)
Make a formatted string from an ID string and a list of uncertain values.
Input
-----
id A number or a string that will be output as a string.
luv A list of DTSA-II UncertainValue2 items. These will be printed
as comma-delimited pairs with 6 digits following the decimal.
Return
------
A string with comma-delimited values with the ID and mean and uncertainty
for each item in the list. This is suitable for writing output to a .csv
file.
Example:
--------
import dtsa2.jmGen as jmg
import gov.nist.microanalysis.Utility as epu
nmZnO1 = 40.1
uvOKa1 = epu.UncertainValue2(0.269157,0.000126)
uvZnLa1 = epu.UncertainValue2(0.259251,9.4e-05)
uvSiKa1 = epu.UncertainValue2(0.654561,8.4e-05)
l_uvals = [uvOKa1, uvZnLa1, uvSiKa1]
out = jmg.make_list_unc_val_string(nmZnO1, l_uvals)
print(out)
1> 40.1, 0.269157, 0.000126, 0.259251, 0.000094, 0.654561, 0.000084
"""
lv = len(luv)
i = 0
rv = "%s, " % (id)
for uv in luv:
rc = round(uv.doubleValue(), 6)
uc = round(uv.uncertainty(), 6)
if i == lv-1:
rv += "%g, %.6f" % (rc, uc)
else:
rv += "%g, %.6f, " % (rc, uc)
i += 1
return(rv) | c65b9bb0c6539e21746a06f7a864acebc2bade03 | 3,655,464 |
def plot_faces(ax, coordinates, meta, st):
"""plot the faces"""
for s in st.faces:
# check that this face isnt in the cut region
def t_param_difference(v1, v2):
return abs(meta["t"][v1] - meta["t"][v2])
if all(all(t_param_difference(v1, v2) < 2 for v2 in s) for v1 in s):
pts = np.array([coordinates[v] for v in s])
pts = np.array([nearest(np.max(pts, 0), p) for p in pts])
center = np.mean(pts, 0)
pts = (pts - center) / 1.8 + center
color = (0, 0, 1, .5)
if meta["s_type"][s] == (2, 1):
color = (1, 0, 0, .5)
p = Polygon(pts, closed=False, color=color)
ax.add_patch(p) | a7eef2d209f7c15d8ba232b25d20e4c751075013 | 3,655,465 |
import typing
def translate_null_strings_to_blanks(d: typing.Dict) -> typing.Dict:
"""Map over a dict and translate any null string values into ' '.
Leave everything else as is. This is needed because you cannot add TableCell
objects with only a null string or the client crashes.
:param Dict d: dict of item values.
:rtype Dict:
"""
# Beware: locally defined function.
def translate_nulls(s):
if s == "":
return " "
return s
new_d = {k: translate_nulls(v) for k, v in d.items()}
return new_d | 1a6cfe2f8449d042eb01774054cddde08ba56f8c | 3,655,466 |
import json
def HttpResponseRest(request, data):
"""
Return an Http response into the correct output format (JSON, XML or HTML),
according of the request.format parameters.
Format is automatically added when using the
:class:`igdectk.rest.restmiddleware.IGdecTkRestMiddleware` and views decorators.
"""
if request.format == Format.JSON:
encoded = json.dumps(data, cls=ComplexEncoder)
return HttpResponse(encoded, content_type=Format.JSON.content_type)
elif request.format == Format.HTML:
return HttpResponse(data)
elif request.format == Format.XML:
encoded = igdectk.xmlio.dumps(data)
return HttpResponse(encoded, content_type=Format.XML.content_type)
elif request.format == Format.TEXT:
return HttpResponse(data, content_type=Format.TEXT.content_type)
else:
return None | 56682e808dcb9778ea47218d48fb74612ac44b5d | 3,655,467 |
def build_server_update_fn(model_fn, server_optimizer_fn, server_state_type,
model_weights_type):
"""Builds a `tff.tf_computation` that updates `ServerState`.
Args:
model_fn: A no-arg function that returns a `tff.learning.TrainableModel`.
server_optimizer_fn: A no-arg function that returns a
`tf.keras.optimizers.Optimizer`.
server_state_type: type_signature of server state.
model_weights_type: type_signature of model weights.
Returns:
A `tff.tf_computation` that updates `ServerState`.
"""
@tff.tf_computation(server_state_type, model_weights_type.trainable)
def server_update_tf(server_state, model_delta):
"""Updates the `server_state`.
Args:
server_state: The `ServerState`.
model_delta: The model difference from clients.
Returns:
The updated `ServerState`.
"""
model = model_fn()
server_optimizer = server_optimizer_fn()
# Create optimizer variables so we have a place to assign the optimizer's
# state.
server_optimizer_vars = _create_optimizer_vars(model, server_optimizer)
return server_update(model, server_optimizer, server_optimizer_vars,
server_state, model_delta)
return server_update_tf | c0b8285a5d12c40157172d3b48a49cc5306a567b | 3,655,468 |
def madgraph_tarball_filename(physics):
"""Returns the basename of a MadGraph tarball for the given physics"""
# Madgraph tarball filenames do not have a part number associated with them; overwrite it
return svj_filename("step0_GRIDPACK", Physics(physics, part=None)).replace(
".root", ".tar.xz"
) | a0a8bacb5aed0317b5c0fd8fb3de5382c98e267d | 3,655,469 |
def _mk_cmd(verb, code, payload, dest_id, **kwargs) -> Command:
"""A convenience function, to cope with a change to the Command class."""
return Command.from_attrs(verb, dest_id, code, payload, **kwargs) | afd5804937a55d235fef45358ee12088755f9dc9 | 3,655,470 |
def getobjname(item):
"""return obj name or blank """
try:
objname = item.Name
except BadEPFieldError as e:
objname = ' '
return objname | f8875b6e9c9ed2b76affe39db583c091257865d8 | 3,655,471 |
def process_fire_data(filename=None, fire=None, and_save=False, timezone='Asia/Bangkok', to_drop=True):
""" Add datetime, drop duplicate data and remove uncessary columns.
"""
if filename:
fire = pd.read_csv(filename)
# add datetime
fire = add_datetime_fire(fire, timezone)
# drop duplicate data
print('before drop', fire.shape)
# sort values by brightness
try:
# for MODIS file
fire = fire.sort_values(
['datetime', 'lat_km', 'long_km', 'brightness'], ascending=False)
except BaseException:
# for VIIRS
fire = fire.sort_values(
['datetime', 'lat_km', 'long_km', 'bright_ti4'], ascending=False)
if to_drop:
fire = fire.drop_duplicates(['datetime', 'lat_km', 'long_km'])
# drop unncessary columns
try:
columns_to_drop = [
'acq_date',
'satellite',
'instrument',
'version',
'daynight',
'bright_t31',
'type']
columns_to_drop = [s for s in columns_to_drop if s in fire.columns]
fire = fire.drop(columns_to_drop, axis=1)
except BaseException:
columns_to_drop = [
'acq_date',
'satellite',
'instrument',
'version',
'daynight',
'bright_ti5',
'type']
columns_to_drop = [s for s in columns_to_drop if s in fire.columns]
fire = fire.drop(columns_to_drop, axis=1)
fire = fire.sort_values('datetime')
fire = fire.set_index('datetime')
# remove the data before '2002-07-04' because there is only one satellite
fire = fire.loc['2002-07-04':]
print('after drop', fire.shape)
if and_save:
fire.to_csv(filename, index=False)
else:
return fire | 767bb77db2b3815a5646f185b72727aec74ee8d8 | 3,655,472 |
def create_controllable_source(source, control, loop, sleep):
"""Makes an observable controllable to handle backpressure
This function takes an observable as input makes it controllable by
executing it in a dedicated worker thread. This allows to regulate
the emission of the items independently of the asyncio event loop.
Args:
- source: An observable emitting the source items.
- control: [Optional] The control observable emitting delay items in seconds.
- sleep: the sleep function to used. Needed only for testing.
Returns:
An observable similar to the source observable, with emission being
controlled by the control observable.
"""
if control is not None:
typed_control = control.pipe(
ops.observe_on(NewThreadScheduler()),
ops.map(ControlItem),
)
scheduled_source = source.pipe(
ops.subscribe_on(NewThreadScheduler()),
ops.merge(typed_control),
ops.map(lambda i: control_sync(i, sleep)),
ops.filter(lambda i: i is not ControlItem),
ops.observe_on(AsyncIOThreadSafeScheduler(loop)),
)
else:
scheduled_source = source.pipe(
ops.subscribe_on(NewThreadScheduler()),
ops.observe_on(AsyncIOThreadSafeScheduler(loop)),
)
return scheduled_source | 2092de1aaeace275b2fea2945e8d30f529309874 | 3,655,473 |
def getE5():
"""
Returns the e5
Args:
"""
return E5.get() | 35526332b957628a6aa3fd90f7104731749e10ed | 3,655,474 |
def triangulate(pts_subset):
"""
This function encapsulates the whole triangulation algorithm into four
steps. The function takes as input a list of points. Each point is of the
form [x, y], where x and y are the coordinates of the point.
Step 1) The list of points is split into groups. Each group has exactly
two or three points.
Step 2) For each group of two point, a single edge is generated. For each
group of three points, three edges forming a triangle are
generated. These are the 'primitive' triangulations.
Step 3) The primitive triangulations are paired into groups.
Step 4) The groups are then recursively merged until there is only a
single triangulation of all points remaining.
Parameters
----------
pts_subset : list
A list of points with the form [ [x1, y1], [x2, y2], ..., [xn, yn] ]
The first element of each list represents the x-coordinate, the second
entry the y-coordinate.
Returns
-------
out : list
List with a single element. The TriangulationEdges class object with
the completed Delauney triangulation of the input points.
See TriangulationEdges docstring for further info.
"""
split_pts = split_list.groups_of_3(pts_subset)
primitives = make_primitives(split_pts)
groups = [primitives[i:i+2] for i in range(0, len(primitives), 2)]
groups = recursive_group_merge(groups)
return groups[0][0] | 55e145a44303409a4e1ede7f14e4193c06efd769 | 3,655,475 |
def get_session(region, default_bucket):
"""Gets the sagemaker session based on the region.
Args:
region: the aws region to start the session
default_bucket: the bucket to use for storing the artifacts
Returns:
`sagemaker.session.Session instance
"""
boto_session = boto3.Session(region_name=region)
sagemaker_client = boto_session.client("sagemaker")
runtime_client = boto_session.client("sagemaker-runtime")
return sagemaker.session.Session(
boto_session=boto_session,
sagemaker_client=sagemaker_client,
sagemaker_runtime_client=runtime_client,
default_bucket=default_bucket,
) | 1bfbea7aeb30f33772c1b748580f0776463203a4 | 3,655,476 |
def intp_sc(x, points):
"""
SCurve spline based interpolation
args:
x (list) : t coordinate list
points (list) : xyz coordinate input points
returns:
x (relative coordinate point list)
o (xyz coordinate points list, resplined)
"""
sc = vtk.vtkSCurveSpline()
for i in points:
sc.AddPoint(i[0], i[1])
o = []
for i in x:
o.append(sc.Evaluate(i))
return x, o | 13df2e19c91ff0469ce68467e9e4df36c0e4831b | 3,655,477 |
def backend():
"""Publicly accessible method
for determining the current backend.
# Returns
String, the name of the backend PyEddl is currently using.
# Example
```python
>>> eddl.backend.backend()
'eddl'
```
"""
return _BACKEND | b811dd6a760006e572aa02fc246fdf72ac7e608c | 3,655,478 |
import json
def query_collection_mycollections():
"""
Query Content API Collection with access token.
"""
access_token = request.args.get("access_token", None)
if access_token is not None and access_token != '':
# Construct an Authorization header with the value of 'Bearer <access token>'
headers = {
"Accept": "application/json",
"Authorization": "Bearer " + access_token
}
url = APP_CONFIG['CONTENTAPI_COLLECTIONS_URL'] + 'mycollections'
r = s.get(url, headers=headers, verify=(app.config['SSLVERIFY'] == 'True'))
if r.status_code in (400,500):
# Handle known errors
result = r.json()
return jsonify(result)
elif r.status_code == 200:
result = r.json()
params = {
'access_token': access_token,
'endpoint_path': '/mycollections',
'mycollections_results': json.dumps(result, indent=2),
'mycollections_results_obj': result
}
return render_template('mycollections.html', **params)
else:
# Handle unknown error
return (r.text, r.status_code, r.headers.items())
else:
return "access_token not specified" | 98b8a75ea515255fde327d11c959f5e9b6d9ea43 | 3,655,479 |
def xmlbuildmanual() -> __xml_etree:
"""
Returns a empty xml ElementTree obj to build/work with xml data
Assign the output to var
This is using the native xml library via etree shipped with the python standard library.
For more information on the xml.etree api, visit: https://docs.python.org/3/library/xml.etree.elementtree.html#module-xml.etree.ElementTree
"""
return __xml_etree | e08d83aca4b140c2e289b5173e8877e3c3e5fee1 | 3,655,480 |
def graclus_cluster(row, col, weight=None, num_nodes=None):
"""A greedy clustering algorithm of picking an unmarked vertex and matching
it with one its unmarked neighbors (that maximizes its edge weight).
Args:
row (LongTensor): Source nodes.
col (LongTensor): Target nodes.
weight (Tensor, optional): Edge weights. (default: :obj:`None`)
num_nodes (int, optional): The number of nodes. (default: :obj:`None`)
Examples::
>>> row = torch.LongTensor([0, 1, 1, 2])
>>> col = torch.LongTensor([1, 0, 2, 1])
>>> weight = torch.Tensor([1, 1, 1, 1])
>>> cluster = graclus_cluster(row, col, weight)
"""
num_nodes = row.max().item() + 1 if num_nodes is None else num_nodes
if row.is_cuda: # pragma: no cover
row, col = sort_row(row, col)
else:
row, col = randperm(row, col)
row, col = randperm_sort_row(row, col, num_nodes)
row, col = remove_self_loops(row, col)
cluster = row.new_empty((num_nodes, ))
graclus(cluster, row, col, weight)
return cluster | c586ffd325697302a2413e613a75fe4302741af6 | 3,655,481 |
def _get_serve_tf_examples_fn(model, tf_transform_output):
"""Returns a function that parses a serialized tf.Example."""
model.tft_layer = tf_transform_output.transform_features_layer()
@tf.function
def serve_tf_examples_fn(serialized_tf_examples):
"""Returns the output to be used in the serving signature."""
feature_spec = tf_transform_output.raw_feature_spec()
feature_spec.pop(_LABEL_KEY)
parsed_features = tf.io.parse_example(serialized_tf_examples, feature_spec)
transformed_features = model.tft_layer(parsed_features)
return model(transformed_features)
return serve_tf_examples_fn | 801bfde2b72823b2ba7b8329f080774ca9aa536f | 3,655,482 |
import decimal
def get_profitable_change(day_candle):
""" Get the potential daily profitable price change in pips.
If prices rise enough, we have: close_bid - open_ask (> 0), buy.
If prices fall enough, we have: close_ask - open_bid (< 0), sell.
if prices stay relatively still, we don't buy or sell. It's 0.
Args:
day_candle: candles.Candle object representing a daily candle.
Returns:
profitable_change: Decimal. The profitable rate change described
above, in two decimal places.
"""
multiplier = day_candle.instrument.multiplier
change = 0
if day_candle.close_bid > day_candle.open_ask:
change = multiplier * (day_candle.close_bid - day_candle.open_ask)
elif day_candle.close_ask < day_candle.open_bid:
change = multiplier * (day_candle.close_ask - day_candle.open_bid)
return decimal.Decimal(change).quantize(TWO_PLACES) | 94adc63d984e7797590a2dd3eb33c8d98b09c76e | 3,655,483 |
def run_epoch():
"""Runs one epoch and returns reward averaged over test episodes"""
rewards = []
for _ in range(NUM_EPIS_TRAIN):
run_episode(for_training=True)
for _ in range(NUM_EPIS_TEST):
rewards.append(run_episode(for_training=False))
return np.mean(np.array(rewards)) | d9f5e33e00eaeedfdff7ebb4d3a7731d679beff1 | 3,655,485 |
def _max_pool(heat, kernel=3):
"""
NCHW
do max pooling operation
"""
# print("heat.shape: ", heat.shape) # default: torch.Size([1, 1, 152, 272])
pad = (kernel - 1) // 2
h_max = nn.functional.max_pool2d(heat, (kernel, kernel), stride=1, padding=pad)
# print("h_max.shape: ", h_max.shape) # default: torch.Size([1, 1, 152, 272])
keep = (h_max == heat).float() # 将boolean类型的Tensor转换成Float类型的Tensor
# print("keep.shape: ", keep.shape, "keep:\n", keep)
return heat * keep | 68edc979d78ee2fc11f94efd2dfb5e9140762a0e | 3,655,486 |
def getBool(string):
"""
Stub function, set PshellServer.py softlink to PshellServer-full.py for full functionality
"""
return (True) | de7f6a4b124b6a1f6e1b878daf01cc14e5d5eb08 | 3,655,487 |
from typing import Counter
def multi_dists(
continuous,
categorical,
count_cutoff,
summary_type,
ax=None,
stripplot=False,
order="ascending",
newline_counts=False,
xtick_rotation=45,
xtick_ha="right",
seaborn_kwargs={},
stripplot_kwargs={},
):
"""
Compare the distributions of a continuous variable when grouped
by a categorical one.
Parameters
----------
continuous : Series
continuous values to plot
categorical : Series
categorical values (groups) to plot
count_cutoff : boolean
minimum number of samples per groups to include
summary_type : string, "box" or "violin"
type of summary plot to make
ax : MatPlotLib axis
axis to plot in (will create new one if not provided)
stripplot : boolean
whether or not to plot the raw values
order : "ascending", "descending", or list of categories
how to sort categories in the plot
newline_counts : boolean
whether to add category counts as a separate line
in the axis labels
xtick_rotation : float
how much to rotate the xtick labels by (in degree)
xtick_ha : string
horizontal alignment of the xtick labels
seaborn_kwargs : dictionary
additional arguments to pass to Seaborn boxplot/violinplot
stripplot_kwargs : dictionary
additional arguments to pass to Seaborn stripplot (if stripplot=True)
Returns
-------
ax : MatPlotLib axis
axis with plot data
"""
if ax is None:
ax = plt.subplot(111)
# remove NaNs and convert continuous
continuous = pd.Series(continuous).dropna()
categorical = pd.Series(categorical).dropna().astype(str)
# series names
continuous_name = str(continuous.name)
categorical_name = str(categorical.name)
# handle cases where series names are missing or identical
if continuous_name is None:
continuous_name = "continuous"
if categorical_name is None:
categorical_name = "categorical"
if continuous_name == categorical_name:
continuous_name += "_continuous"
categorical_name += "_categorical"
merged = pd.concat([continuous, categorical], axis=1, join="inner")
merged.columns = [continuous_name, categorical_name]
# counts per category, with cutoff
categorical_counts = Counter(merged[categorical_name])
merged["count"] = merged[categorical_name].apply(categorical_counts.get)
merged = merged[merged["count"] >= count_cutoff]
merged_sorted = (
merged.groupby([categorical_name])[continuous_name]
.aggregate(np.median)
.reset_index()
)
# sort categories by mean
if order == "ascending":
merged_sorted = merged_sorted.sort_values(
continuous_name, ascending=True
)
order = merged_sorted[continuous_name]
elif order == "descending":
merged_sorted = merged_sorted.sort_values(
continuous_name, ascending=False
)
order = merged_sorted[continuous_name]
else:
merged_sorted["continuous_idx"] = merged_sorted[
categorical_name
].apply(order.index)
merged_sorted = merged_sorted.sort_values(
"continuous_idx", ascending=True
)
# recompute category counts after applying cutoff
counts = merged_sorted[categorical_name].apply(categorical_counts.get)
counts = counts.astype(str)
# x-axis labels with counts
if newline_counts:
x_labels = merged_sorted[categorical_name] + "\n(" + counts + ")"
else:
x_labels = merged_sorted[categorical_name] + " (" + counts + ")"
if summary_type == "violin":
sns.violinplot(
x=categorical_name,
y=continuous_name,
data=merged,
order=merged_sorted[categorical_name],
inner=None,
ax=ax,
**seaborn_kwargs,
)
elif summary_type == "box":
sns.boxplot(
x=categorical_name,
y=continuous_name,
data=merged,
order=merged_sorted[categorical_name],
notch=True,
ax=ax,
**seaborn_kwargs,
)
if stripplot:
sns.stripplot(
x=categorical_name,
y=continuous_name,
data=merged,
order=merged_sorted[categorical_name],
size=2,
alpha=0.5,
linewidth=1,
jitter=0.1,
edgecolor="black",
ax=ax,
**stripplot_kwargs,
)
ax.spines["right"].set_visible(False)
ax.spines["top"].set_visible(False)
ax.set_xticklabels(x_labels, rotation=xtick_rotation, ha=xtick_ha)
return ax | 84302dfc359c62c67ef0757ea7c0841e5292b19d | 3,655,488 |
import xdg
def expand_xdg(xdg_var: str, path: str) -> PurePath:
"""Return the value of an XDG variable prepended to path.
This function expands an XDG variable, and then concatenates to it the
given path. The XDG variable name can be passed both uppercase or
lowercase, and either with or without the 'XDG_' prefix.
"""
xdg_var = xdg_var if xdg_var.startswith('XDG_') else 'XDG_' + xdg_var
return getattr(xdg, xdg_var.upper()) / path | 13b8885a08c384d29636c5f5070a86e05c30b43a | 3,655,489 |
def follow_index(request):
"""Просмотр подписок"""
users = request.user.follower.all()
paginator = Paginator(users, 3)
page_number = request.GET.get('page')
page = paginator.get_page(page_number)
return render(request, 'recipes/follow_index.html',
{'page': page, 'paginator': paginator}) | 00851ccffde0e76544e5732d0f651f6e00bc45d4 | 3,655,490 |
def test_drawcounties_cornbelt():
"""draw counties on the map"""
mp = MapPlot(sector="cornbelt", title="Counties", nocaption=True)
mp.drawcounties()
return mp.fig | 8bba5c33c374bf3f4e892bc984b72de22c97e38d | 3,655,491 |
import torch
def atomic_degrees(mol: IndigoObject) -> dict:
"""Get the number of atoms direct neighbors (except implicit hydrogens) in a molecule.
Args:
IndigoObject: molecule object
Returns:
dict: key - feature name, value - torch.tensor of atomic degrees
"""
degrees = []
for atom in mol.iterateAtoms():
degrees.append(atom.degree())
return {"degrees": torch.tensor(degrees).unsqueeze(1)} | 114431622574985bd016276a7a809560c896e1bc | 3,655,492 |
def hotspots(raster, kernel, x='x', y='y'):
"""Identify statistically significant hot spots and cold spots in an input
raster. To be a statistically significant hot spot, a feature will have a
high value and be surrounded by other features with high values as well.
Neighborhood of a feature defined by the input kernel, which currently
support a shape of circle, annulus, or custom kernel.
The result should be a raster with the following 7 values:
90 for 90% confidence high value cluster
95 for 95% confidence high value cluster
99 for 99% confidence high value cluster
-90 for 90% confidence low value cluster
-95 for 95% confidence low value cluster
-99 for 99% confidence low value cluster
0 for no significance
Parameters
----------
raster: xarray.DataArray
Input raster image with shape=(height, width)
kernel: Kernel
Returns
-------
hotspots: xarray.DataArray
"""
# validate raster
if not isinstance(raster, DataArray):
raise TypeError("`raster` must be instance of DataArray")
if raster.ndim != 2:
raise ValueError("`raster` must be 2D")
if not (issubclass(raster.values.dtype.type, np.integer) or
issubclass(raster.values.dtype.type, np.floating)):
raise ValueError(
"`raster` must be an array of integers or float")
raster_dims = raster.dims
if raster_dims != (y, x):
raise ValueError("raster.coords should be named as coordinates:"
"(%s, %s)".format(y, x))
# apply kernel to raster values
mean_array = convolve_2d(raster.values, kernel / kernel.sum(), pad=True)
# calculate z-scores
global_mean = np.nanmean(raster.values)
global_std = np.nanstd(raster.values)
if global_std == 0:
raise ZeroDivisionError("Standard deviation "
"of the input raster values is 0.")
z_array = (mean_array - global_mean) / global_std
out = _hotspots(z_array)
result = DataArray(out,
coords=raster.coords,
dims=raster.dims,
attrs=raster.attrs)
return result | ab091924bb36576e338c38d752df3f856de331cb | 3,655,493 |
import configparser
import logging
def _read_config(filename):
"""Reads configuration file.
Returns DysonLinkCredentials or None on error.
"""
config = configparser.ConfigParser()
logging.info('Reading "%s"', filename)
try:
config.read(filename)
except configparser.Error as ex:
logging.critical('Could not read "%s": %s', filename, ex)
return None
try:
username = config['Dyson Link']['username']
password = config['Dyson Link']['password']
country = config['Dyson Link']['country']
return DysonLinkCredentials(username, password, country)
except KeyError as ex:
logging.critical('Required key missing in "%s": %s', filename, ex)
return None | bb1282e94500c026072a0e8d76fdf3dcd68e9062 | 3,655,495 |
def view_share_link(request, token):
"""
Translate a given sharelink to a proposal-detailpage.
:param request:
:param token: sharelink token, which includes the pk of the proposal
:return: proposal detail render
"""
try:
pk = signing.loads(token, max_age=settings.MAXAGESHARELINK)
except signing.SignatureExpired:
return render(request, "base.html", {
"Message": "Share link has expired!"
})
except signing.BadSignature:
return render(request, "base.html", {
"Message": "Invalid token in share link!"
})
obj = get_object_or_404(Proposal, pk=pk)
return render(request, "proposals/detail_project.html", {
"proposal": obj,
"project": obj
}) | 9d88375f1f3c9c0b94ad2beab4f47ce74ea2464e | 3,655,496 |
from sklearn.pipeline import Pipeline
def create(pdef):
"""Scikit-learn Pipelines objects creation (deprecated).
This function creates a list of sklearn Pipeline objects starting from the
list of list of tuples given in input that could be created using the
adenine.core.define_pipeline module.
Parameters
-----------
pdef : list of list of tuples
This arguments contains the specification needed by sklearn in order
to create a working Pipeline object.
Returns
-----------
pipes : list of sklearn.pipeline.Pipeline objects
The list of Piplines, each of them can be fitted and trasformed
with some data.
"""
return [Pipeline(p) for p in pdef] | 552014c652d7de236ba917592108315acfd9c694 | 3,655,497 |
def pressure_differentiable(altitude):
"""
Computes the pressure at a given altitude with a differentiable model.
Args:
altitude: Geopotential altitude [m]
Returns: Pressure [Pa]
"""
return np.exp(interpolated_log_pressure(altitude)) | a6a9e7dcc38ac855f3ba5c9a117506a87a981217 | 3,655,498 |
def create_optimizer(hparams, global_step, use_tpu=False):
"""Creates a TensorFlow Optimizer.
Args:
hparams: ConfigDict containing the optimizer configuration.
global_step: The global step Tensor.
use_tpu: If True, the returned optimizer is wrapped in a
CrossShardOptimizer.
Returns:
A TensorFlow optimizer.
Raises:
ValueError: If hparams.optimizer is unrecognized.
"""
optimizer_name = hparams.optimizer.lower()
optimizer_params = {}
if optimizer_name == "momentum":
optimizer_class = tf.train.MomentumOptimizer
optimizer_params["momentum"] = hparams.get("momentum", 0.9)
optimizer_params["use_nesterov"] = hparams.get("use_nesterov", False)
elif optimizer_name == "sgd":
optimizer_class = tf.train.GradientDescentOptimizer
elif optimizer_name == "adagrad":
optimizer_class = tf.train.AdagradOptimizer
elif optimizer_name == "adam":
optimizer_class = tf.train.AdamOptimizer
elif optimizer_name == "rmsprop":
optimizer_class = tf.RMSPropOptimizer
else:
raise ValueError("Unknown optimizer: {}".format(hparams.optimizer))
# Apply weight decay wrapper.
optimizer_class = (
tf.contrib.opt.extend_with_decoupled_weight_decay(optimizer_class))
# Create optimizer.
learning_rate, weight_decay = create_learning_rate_and_weight_decay(
hparams, global_step)
optimizer = optimizer_class(
weight_decay=weight_decay,
learning_rate=learning_rate,
**optimizer_params)
if use_tpu:
optimizer = tf.contrib.tpu.CrossShardOptimizer(optimizer)
return optimizer | 9ff18644fe513e3d01b297e30538c396546746ab | 3,655,499 |
def compare_dirs_ignore_words(dir1, dir2, ignore_words, ignore_files=None):
"""Same as compare_dirs but ignores lines with words in ignore_words.
"""
return compare_dirs(
dir1,
dir2,
ignore=ignore_files,
function=lambda file1, file2:
compare_text_files_ignore_lines(file1, file2, ignore_words)
) | 2794027a638a7f775ae559b250a500e28a218e4a | 3,655,500 |
def float_to_wazn(value):
"""Converts a float value to an integer in the WAZN notation.
The float format has a maxium of 6 decimal digits.
:param value: value to convert from float to WAZN notation
:returns: converted value in WAZN notation
"""
return int(Decimal(value) / MICRO_WAZN) | 6bf10dfbefe51b2a785c4d0504e446662487b485 | 3,655,501 |
import time
def timer(func):
""" Decorator to measure execution time """
def wrapper(*args, **kwargs):
start_time = time.time()
ret = func(*args, **kwargs)
elapsed = time.time() - start_time
print('{:s}: {:4f} sec'.format(func.__name__, elapsed))
return ret
return wrapper | 0f6a8a4dc8eff1aa49efaf5d26ac46e0cc483b3e | 3,655,502 |
import uuid
def _create_keyword_plan_campaign(client, customer_id, keyword_plan):
"""Adds a keyword plan campaign to the given keyword plan.
Args:
client: An initialized instance of GoogleAdsClient
customer_id: A str of the customer_id to use in requests.
keyword_plan: A str of the keyword plan resource_name this keyword plan
campaign should be attributed to.create_keyword_plan.
Returns:
A str of the resource_name for the newly created keyword plan campaign.
Raises:
GoogleAdsException: If an error is returned from the API.
"""
keyword_plan_campaign_service = client.get_service(
"KeywordPlanCampaignService"
)
operation = client.get_type("KeywordPlanCampaignOperation")
keyword_plan_campaign = operation.create
keyword_plan_campaign.name = f"Keyword plan campaign {uuid.uuid4()}"
keyword_plan_campaign.cpc_bid_micros = 1000000
keyword_plan_campaign.keyword_plan = keyword_plan
network = client.enums.KeywordPlanNetworkEnum.GOOGLE_SEARCH
keyword_plan_campaign.keyword_plan_network = network
geo_target = client.get_type("KeywordPlanGeoTarget")
# Constant for U.S. Other geo target constants can be referenced here:
# https://developers.google.com/google-ads/api/reference/data/geotargets
geo_target.geo_target_constant = "geoTargetConstants/2840"
keyword_plan_campaign.geo_targets.append(geo_target)
# Constant for English
language = "languageConstants/1000"
keyword_plan_campaign.language_constants.append(language)
response = keyword_plan_campaign_service.mutate_keyword_plan_campaigns(
customer_id=customer_id, operations=[operation]
)
resource_name = response.results[0].resource_name
print(f"Created keyword plan campaign with resource name: {resource_name}")
return resource_name | b6ce2ee2ec40e1192461c41941f18fe04f901344 | 3,655,503 |
def word2vec(sentences, year):
"""
Creates a word2vec model.
@param sentences: list of list of words in each sentence (title + abstract)
@return word2vec model
"""
print("Creating word2vec model")
model = Word2Vec(sentences, size=500, window=5, min_count=1, workers=4)
model.save(f"models/decades/word2vec_{str(year)}-{str(year+9)}.model")
print("Saved word2vec model")
return model | 745bd15f4c0cea5b9417fd0562625426bd5cd293 | 3,655,504 |
def true_rjust(string, width, fillchar=' '):
""" Justify the string to the right, using printable length as the width. """
return fillchar * (width - true_len(string)) + string | 53a8cbfd049c21821b64e1a218c9af2a7b4c8b7d | 3,655,505 |
def threshold_generator_with_values(values, duration, num_classes):
"""
Args:
values: A Tensor with shape (-1,)
Values = strictly positive, float thresholds.
duration: An int.
num_classes: An int.
Returns:
thresh: A Tensor with shape
(len(list_values), duration, num_classes, num_classes).
In each matrix,
diag = 0, and off-diag shares a single value > 0.
Matrices are sorted in ascending order of the values
w.r.t. axis=0.
"""
num_thresh = values.shape[0]
thresh = tf.reshape(values, [num_thresh, 1, 1, 1])
thresh = tf.tile(thresh, [1, duration, num_classes, num_classes])
# (num thresh, num cls, num cls)
mask = tf.linalg.tensor_diag([-1.] * num_classes) + 1
thresh *= mask
# Now diag = 0.
thresh += mask * 1e-11
# Avoids 0 threholds, which may occur
# when logits for different classes have the same value,
# e.g., 0, due to loss of significance.
# This operation may cause sparsity of SAT curve
# if llr_min is << 1e-11, but such a case is ignorable
# in practice, according to my own experience.
return thresh | 58aa50e08beaba8f299af3ec9dfeb3de652e6471 | 3,655,506 |
def is_hermitian(mx, tol=1e-9):
"""
Test whether mx is a hermitian matrix.
Parameters
----------
mx : numpy array
Matrix to test.
tol : float, optional
Tolerance on absolute magitude of elements.
Returns
-------
bool
True if mx is hermitian, otherwise False.
"""
(m, n) = mx.shape
for i in range(m):
if abs(mx[i, i].imag) > tol: return False
for j in range(i + 1, n):
if abs(mx[i, j] - mx[j, i].conjugate()) > tol: return False
return True | 31e9a1faff21707b2fc44c7824bb05fc85967f00 | 3,655,507 |
def argmax(a, b, axis=1, init_value=-1, name="argmax"):
""" sort in axis with ascending order """
assert axis<len(a.shape) and len(a.shape)<=2, "invalid axis"
assert b.shape[axis] == 2, "shape mismatch"
size = a.shape[axis] # save max arg index
def argmax2d(A, B):
init = hcl.compute((2,), lambda x: init_value)
r = hcl.reduce_axis(0, size, name="rdx")
# Y as reducer tensor
def sreduce(x, Y):
with hcl.if_(x > Y[1]):
Y[0] = r
Y[1] = x
my_argmax = hcl.reducer(init, sreduce)
if axis == 1:
return hcl.update(B,
lambda x, _y: my_argmax(A[x, r], axis=r), name=name)
else: # reduce in y axis
return hcl.update(B,
lambda _x, y: my_argmax(A[r, y], axis=r), name=name)
# return decorated function
mod = hcl.def_([a.shape, b.shape], name=name)(argmax2d)
mod(a, b) | 3626126cae255498cab854f8b898a7d0f730b20d | 3,655,508 |
def morphology(src, operation="open", kernel_shape=(3, 3), kernel_type="ones"):
"""Dynamic calls different morphological operations
("open", "close", "dilate" and "erode") with the given parameters
Arguments:
src (numpy.ndarray) : source image of shape (rows, cols)
operation (str, optional) : name of a morphological operation:
``("open", "close", "dilate", "erode")``
Defaults to ``"open"``.
kernel_shape (tuple, optional) : shape of the kernel (rows, cols).
Defaults to (3,3).
kernel_type (str, optional) : type of kernel.
``("ones", "upper_triangle", "lower_triangle", "x", "plus", "ellipse")``
Defaults to ``"ones"``.
Returns:
numpy.ndarray: a copy of the source image after apply the effect.
"""
kernel = create_2D_kernel(kernel_shape, kernel_type)
if operation == "open":
return open(src, kernel)
elif operation == "close":
return close(src, kernel)
elif operation == "dilate":
return dilate(src, kernel)
elif operation == "erode":
return erode(src, kernel)
else:
valid_operations = ["open", "close", "dilate", "erode"]
raise ValueError(
f"Invalid morphology operation '{operation}'. Valid morphological operations are {valid_operations}"
) | f8258616f07b9dd0089d323d9237483c05b86c2e | 3,655,509 |
def msd(traj, mpp, fps, max_lagtime=100, detail=False, pos_columns=None):
"""Compute the mean displacement and mean squared displacement of one
trajectory over a range of time intervals.
Parameters
----------
traj : DataFrame with one trajectory, including columns frame, x, and y
mpp : microns per pixel
fps : frames per second
max_lagtime : intervals of frames out to which MSD is computed
Default: 100
detail : See below. Default False.
Returns
-------
DataFrame([<x>, <y>, <x^2>, <y^2>, msd], index=t)
If detail is True, the DataFrame also contains a column N,
the estimated number of statistically independent measurements
that comprise the result at each lagtime.
Notes
-----
Input units are pixels and frames. Output units are microns and seconds.
See also
--------
imsd
emsd
"""
if traj['frame'].max() - traj['frame'].min() + 1 == len(traj):
# no gaps: use fourier-transform algorithm
return _msd_fft(traj, mpp, fps, max_lagtime, detail, pos_columns)
else:
# there are gaps in the trajectory: use slower algorithm
return _msd_gaps(traj, mpp, fps, max_lagtime, detail, pos_columns) | 4511eb3e0d69ab5581635ba93db6dedfd387eb84 | 3,655,510 |
import random
def build_rnd_graph(golden, rel, seed=None):
"""Build a random graph for testing."""
def add_word(word):
if word not in words:
words.add(word)
def add_edge(rel, word1, word2):
data.append((rel, word1, word2))
random.seed(seed)
m, _ = golden.shape
words = set()
for i in range(m):
if golden['relation'][i] != rel:
continue
add_word(golden['word1_id'][i])
add_word(golden['word2_id'][i])
data = []
for word1 in words:
for word2 in words:
if word1 >= word2:
continue
if random.randint(0, 1):
add_edge(rel, word1, word2)
add_edge(rel, word2, word1)
df = pd.DataFrame(data, columns=('relation', 'word1_id', 'word2_id'),
index=range(len(data)))
return df | 46eacb5a51cf94ee27ed33757887afca4cc153ff | 3,655,511 |
from typing import Union
import pathlib
from typing import IO
from typing import AnyStr
import inspect
import pandas
def _make_parser_func(sep):
"""
Create a parser function from the given sep.
Parameters
----------
sep: str
The separator default to use for the parser.
Returns
-------
A function object.
"""
def parser_func(
filepath_or_buffer: Union[str, pathlib.Path, IO[AnyStr]],
sep=sep,
delimiter=None,
header="infer",
names=None,
index_col=None,
usecols=None,
squeeze=False,
prefix=None,
mangle_dupe_cols=True,
dtype=None,
engine=None,
converters=None,
true_values=None,
false_values=None,
skipinitialspace=False,
skiprows=None,
nrows=None,
na_values=None,
keep_default_na=True,
na_filter=True,
verbose=False,
skip_blank_lines=True,
parse_dates=False,
infer_datetime_format=False,
keep_date_col=False,
date_parser=None,
dayfirst=False,
cache_dates=True,
iterator=False,
chunksize=None,
compression="infer",
thousands=None,
decimal: str = ".",
lineterminator=None,
quotechar='"',
quoting=0,
escapechar=None,
comment=None,
encoding=None,
dialect=None,
error_bad_lines=True,
warn_bad_lines=True,
skipfooter=0,
doublequote=True,
delim_whitespace=False,
low_memory=True,
memory_map=False,
float_precision=None,
):
# ISSUE #2408: parse parameter shared with pandas read_csv and read_table and update with provided args
_pd_read_csv_signature = {
val.name for val in inspect.signature(pandas.read_csv).parameters.values()
}
_, _, _, f_locals = inspect.getargvalues(inspect.currentframe())
if f_locals.get("sep", sep) is False:
f_locals["sep"] = "\t"
kwargs = {k: v for k, v in f_locals.items() if k in _pd_read_csv_signature}
return _read(**kwargs)
return parser_func | 318edb7e761e163c828878a1c186edc535d824a1 | 3,655,512 |
def dcm_to_pil_image_gray(file_path):
"""Read a DICOM file and return it as a gray scale PIL image"""
ds = dcmread(file_path)
# Get the image after apply clahe
img_filtered = Image.fromarray(apply_clahe(ds.pixel_array).astype("uint8"))
# Normalize original image to the interval [0, 255]
img = cv.normalize(ds.pixel_array, None, alpha=0, beta=255, norm_type=cv.NORM_MINMAX, dtype=cv.CV_8U)
img = Image.fromarray(img.astype("uint8"))
return [img, img_filtered] | c85b149dd1d02f24e60411ffb6d0dccfb1afd949 | 3,655,514 |
from typing import Any
def get_object_unique_name(obj: Any) -> str:
"""Return a unique string associated with the given object.
That string is constructed as follows: <object class name>_<object_hex_id>
"""
return f"{type(obj).__name__}_{hex(id(obj))}" | f817abf636673f7ef6704cbe0ff5a7a2b897a3f6 | 3,655,515 |
def create_voting_dict():
"""
Input: a list of strings. Each string represents the voting record of a senator.
The string consists of
- the senator's last name,
- a letter indicating the senator's party,
- a couple of letters indicating the senator's home state, and
- a sequence of numbers (0's, 1's, and negative 1's) indicating the senator's
votes on bills
all separated by spaces.
Output: A dictionary that maps the last name of a senator
to a list of numbers representing the senator's voting record.
Example:
>>> vd = create_voting_dict(['Kennedy D MA -1 -1 1 1', 'Snowe R ME 1 1 1 1'])
>>> vd == {'Snowe': [1, 1, 1, 1], 'Kennedy': [-1, -1, 1, 1]}
True
You can use the .split() method to split each string in the
strlist into a list; the first element of the list will be the senator's
name, the second will be his/her party affiliation (R or D), the
third will be his/her home state, and the remaining elements of
the list will be that senator's voting record on a collection of bills.
You can use the built-in procedure int() to convert a string
representation of an integer (e.g. '1') to the actual integer
(e.g. 1).
The lists for each senator should preserve the order listed in voting data.
In case you're feeling clever, this can be done in one line.
"""
voting_dic = {}
for s in voting_data:
s = s.strip()
items = s.split(' ')
voting_dic[items[0]] = [int(v) for v in items[3:]]
return voting_dic | 4a662c110b88ae92ea548da6caa15b01b82f1cf2 | 3,655,516 |
def areFriends(profile1, profile2):
"""Checks wether profile1 is connected to profile2 and profile2 is connected to profile1"""
def check(p1, p2):
if p1.isServiceIdentity:
fsic = get_friend_serviceidentity_connection(p2.user, p1.user)
return fsic is not None and not fsic.deleted
else:
friend_map = get_friends_map(p1.user)
return friend_map is not None and remove_slash_default(p2.user) in friend_map.friends
return check(profile1, profile2) and check(profile2, profile1) | 89eb04d0b8cce054e75d26d194d3f88f9b5970db | 3,655,517 |
def filter_dict(regex_dict, request_keys):
"""
filter regular expression dictionary by request_keys
:param regex_dict: a dictionary of regular expressions that
follows the following format:
{
"name": "sigma_aldrich",
"regexes": {
"manufacturer": {
"regex": "[C|c]ompany(?P\u003cdata\u003e.{80})",
"flags": "is"
},
"product_name": {
"regex": "\\s[P|p]roduct\\s(?P\u003cdata\u003e.{80})",
"flags": "is"
},
...
}
returns
{
'sigma_aldrich': {
"manufacturer": {
"regex": "[C|c]ompany(?P\u003cdata\u003e.{80})",
"flags": "is"
},
}
:param request_keys: a list of dictionary keys that correspond to valid
regex lookups i.e. ['manufacturer', 'product_name']
"""
out_dict = dict()
nested_regexes = regex_dict['regexes']
for request_key in request_keys:
if request_key in nested_regexes:
out_dict[request_key] = nested_regexes[request_key]
return {'name': regex_dict['name'], 'regexes': out_dict} | fb503f0d4df0a7965c276907b7a9e43bd14f9cac | 3,655,518 |
import six
def calculate_partition_movement(prev_assignment, curr_assignment):
"""Calculate the partition movements from initial to current assignment.
Algorithm:
For each partition in initial assignment
# If replica set different in current assignment:
# Get Difference in sets
:rtype: tuple
dict((partition, (from_broker_set, to_broker_set)), total_movements
"""
total_movements = 0
movements = {}
for prev_partition, prev_replicas in six.iteritems(prev_assignment):
curr_replicas = curr_assignment[prev_partition]
diff = len(set(curr_replicas) - set(prev_replicas))
if diff:
total_movements += diff
movements[prev_partition] = (
(set(prev_replicas) - set(curr_replicas)),
(set(curr_replicas) - set(prev_replicas)),
)
return movements, total_movements | 180a47944523f0c814748d1918935e47d9a7ada4 | 3,655,519 |
from typing import List
from typing import Union
import torch
from typing import Sequence
def correct_crop_centers(
centers: List[Union[int, torch.Tensor]],
spatial_size: Union[Sequence[int], int],
label_spatial_shape: Sequence[int],
) -> List[int]:
"""
Utility to correct the crop center if the crop size is bigger than the image size.
Args:
ceters: pre-computed crop centers, will correct based on the valid region.
spatial_size: spatial size of the ROIs to be sampled.
label_spatial_shape: spatial shape of the original label data to compare with ROI.
"""
spatial_size = fall_back_tuple(spatial_size, default=label_spatial_shape)
if not (np.subtract(label_spatial_shape, spatial_size) >= 0).all():
raise ValueError("The size of the proposed random crop ROI is larger than the image size.")
# Select subregion to assure valid roi
valid_start = np.floor_divide(spatial_size, 2)
# add 1 for random
valid_end = np.subtract(label_spatial_shape + np.array(1), spatial_size / np.array(2)).astype(np.uint16)
# int generation to have full range on upper side, but subtract unfloored size/2 to prevent rounded range
# from being too high
for i, valid_s in enumerate(valid_start):
# need this because np.random.randint does not work with same start and end
if valid_s == valid_end[i]:
valid_end[i] += 1
for i, c in enumerate(centers):
center_i = c
if c < valid_start[i]:
center_i = valid_start[i]
if c >= valid_end[i]:
center_i = valid_end[i] - 1
centers[i] = center_i
corrected_centers: List[int] = [c.item() if isinstance(c, torch.Tensor) else c for c in centers] # type: ignore
return corrected_centers | d8a28d464a4d0fcd8c1ad8ed0b790713aaa878e2 | 3,655,520 |
def contrast_normalize(data, centered=False):
"""Normalizes image data to have variance of 1
Parameters
----------
data : array-like
data to be normalized
centered : boolean
When False (the default), centers the data first
Returns
-------
data : array-like
normalized data
"""
if not centered:
data = center(data)
data = np.divide(data, np.sqrt(np.var(data)))
return data | e85e5488e0c69bcf0233c03f3595a336b3ad7921 | 3,655,521 |
def create_gdrive_folders(website_short_id: str) -> bool:
"""Create gdrive folder for website if it doesn't already exist"""
folder_created = False
service = get_drive_service()
base_query = "mimeType = 'application/vnd.google-apps.folder' and not trashed and "
query = f"{base_query}name = '{website_short_id}'"
fields = "nextPageToken, files(id, name, parents)"
folders = list(query_files(query=query, fields=fields))
if settings.DRIVE_UPLOADS_PARENT_FOLDER_ID:
filtered_folders = []
for folder in folders:
ancestors = get_parent_tree(folder["parents"])
if settings.DRIVE_UPLOADS_PARENT_FOLDER_ID in [
ancestor["id"] for ancestor in ancestors
]:
filtered_folders.append(folder)
else:
filtered_folders = folders
if len(filtered_folders) == 0:
folder_metadata = {
"name": website_short_id,
"mimeType": DRIVE_MIMETYPE_FOLDER,
}
if settings.DRIVE_UPLOADS_PARENT_FOLDER_ID:
folder_metadata["parents"] = [settings.DRIVE_UPLOADS_PARENT_FOLDER_ID]
else:
folder_metadata["parents"] = [settings.DRIVE_SHARED_ID]
folder = (
service.files()
.create(supportsAllDrives=True, body=folder_metadata, fields="id")
.execute()
)
folder_created = True
else:
folder = filtered_folders[0]
Website.objects.filter(short_id=website_short_id).update(gdrive_folder=folder["id"])
for subfolder in [
DRIVE_FOLDER_FILES,
DRIVE_FOLDER_FILES_FINAL,
DRIVE_FOLDER_VIDEOS_FINAL,
]:
query = f"{base_query}name = '{subfolder}' and parents = '{folder['id']}'"
folders = list(query_files(query=query, fields=fields))
if len(folders) == 0:
folder_metadata = {
"name": subfolder,
"mimeType": DRIVE_MIMETYPE_FOLDER,
"parents": [folder["id"]],
}
service.files().create(
supportsAllDrives=True, body=folder_metadata, fields="id"
).execute()
folder_created = True
return folder_created | 21928843c47bbc3b175b65a7268eb63e0bec1275 | 3,655,522 |
def filter_for_recognized_pumas(df):
"""Written for income restricted indicator but can be used for many other
indicators that have rows by puma but include some non-PUMA rows. Sometimes
we set nrows in read csv/excel but this approach is more flexible"""
return df[df["puma"].isin(get_all_NYC_PUMAs())] | fe3c608495603f74300dc79a4e50d185d87ca799 | 3,655,523 |
def school_booking_cancel(request, pk_booking):
"""Render the school booking cancel page for a school representative.
:param request: httprequest received
:type request: HttpRequest
:param pk_booking: Primary Key of a Booking
:type pk_booking: int
:return: Return a HttpResponse whose content is filled with the result of the passed arguments.
:rtype: HttpResponse
"""
booking = Booking.objects.get(id=pk_booking)
if request.method == "POST":
booking.status = "Cancelled"
booking.reason_cancellation = request.POST["reason_cancellation"]
booking.save()
admin_email = ADMIN_EMAIL
send_email_booking_cancellation(admin_email, booking)
return redirect("school-dashboard")
data = {"booking": booking}
return render(request, "schoolApp/school-booking-cancel.html", data) | 1b0e09119ba453efdf486e11a57d92c508a497ff | 3,655,525 |
def bandpass_filter(df, spiky_var):
"""Detect outliers according to a passband filter specific to each variable.
Parameters
----------
df: pandas DataFrame that contains the spiky variable
spiky_var: string that designate the spiky variable
Returns
-------
id_outlier: index of outliers"""
if spiky_var == 'LE':
id_bandpass = ( df[spiky_var] < -35 ) | ( df[spiky_var] > 300 ) # in [W+1m-2]
elif spiky_var == 'H':
id_bandpass = ( df[spiky_var] < -100 ) | ( df[spiky_var] > 400 ) # in [W+1m-2]
elif spiky_var == 'CO2_flux':
id_bandpass = ( df[spiky_var] < -10 ) | ( df[spiky_var] > 20 ) # in [µmol+1s-1m-2]
elif spiky_var == 'CH4_flux':
id_bandpass = ( df[spiky_var] < -0.1 ) | ( df[spiky_var] > 0.25 ) # in [µmol+1s-1m-2]
return id_bandpass | a20e3861f04212fe8b3e44d278da9ed58d545d1c | 3,655,526 |
def load_energy():
"""Loads the energy file, skipping all useluss information and returns it as a dataframe"""
energy = pd.read_excel("Energy Indicators.xls", skiprows=17, header=0,
skip_footer=53-15, na_values="...", usecols=[2,3,4,5])
# Rename columns
energy.columns = ["Country", "Energy Supply [Petajoules]", "Energy Supply per Capita [Gigajoules]", "% Renewable"]
# Exclude numbers from country names
energy["Country"] = energy["Country"].str.replace("\d+", "")
# Delete the parentheses
energy["Country"] = energy["Country"].str.replace("\(.*\)", "")
return energy | 10c9e638398d74eed57ccab414cac5577623c6cf | 3,655,527 |
import re
def list_list_to_string(list_lists,data_delimiter=None,row_formatter_string=None,line_begin=None,line_end=None):
"""Repeatedly calls list to string on each element of a list and string adds the result
. ie coverts a list of lists to a string. If line end is None the value defaults to "\n", for no seperator use ''
"""
if line_end is None:
line_end="\n"
check_arg_type(list_lists,ListType)
string_out=""
for index,row in enumerate(list_lists):
if index==len(list_lists)-1:
if line_end is "\n":
last_end=""
else:
last_end=re.sub("\n","",line_end,count=1)
string_out=string_out+list_to_string(row,data_delimiter=data_delimiter,
row_formatter_string=row_formatter_string,
begin=line_begin,end=last_end)
else:
string_out=string_out+list_to_string(row,data_delimiter=data_delimiter,
row_formatter_string=row_formatter_string,
begin=line_begin,end=line_end)
return string_out | d1e69d21205fcc21e186a8dd160c1817fb1f0f68 | 3,655,528 |
def sampen(L, m):
"""
"""
N = len(L)
r = (np.std(L) * .2)
B = 0.0
A = 0.0
# Split time series and save all templates of length m
xmi = np.array([L[i: i + m] for i in range(N - m)])
xmj = np.array([L[i: i + m] for i in range(N - m + 1)])
# Save all matches minus the self-match, compute B
B = np.sum([np.sum(np.abs(xmii - xmj).max(axis=1) <= r) - 1 for xmii in xmi])
# Similar for computing A
m += 1
xm = np.array([L[i: i + m] for i in range(N - m + 1)])
A = np.sum([np.sum(np.abs(xmi - xm).max(axis=1) <= r) - 1 for xmi in xm])
# Return SampEn
return -np.log(A / B) | 0a97e8dd8c4edbf2ec4cbbdff3241af7de3f2a66 | 3,655,530 |
from typing import Union
def total(score: Union[int, RevisedResult]) -> int:
"""
Return the total number of successes (negative for a botch).
If `score` is an integer (from a 1st/2nd ed. die from :func:`standard` or
:func:`special`) then it is returned unmodified.
If `score` is a :class:`RevisedResult` (from :func:`revised_standard` or
:func:`revised_special`) then the value returned is the net successes,
except in the special case where there were successes but they were all
cancelled out by botches. In that case return 0 even if the net successes
is negative.
"""
return int(score) | 849b757875ea461b0ea6bf4a63270e6f5fbac28c | 3,655,531 |
def rbbox_overlaps_v3(bboxes1, bboxes2, mode='iou', is_aligned=False):
"""Calculate overlap between two set of bboxes.
Args:
bboxes1 (torch.Tensor): shape (B, m, 5) in <cx, cy, w, h, a> format
or empty.
bboxes2 (torch.Tensor): shape (B, n, 5) in <cx, cy, w, h, a> format
or empty.
mode (str): "iou" (intersection over union), "iof" (intersection over
foreground) or "giou" (generalized intersection over union).
Default "iou".
is_aligned (bool, optional): If True, then m and n must be equal.
Default False.
Returns:
Tensor: shape (m, n) if ``is_aligned`` is False else shape (m,)
"""
assert mode in ['iou', 'iof']
# Either the boxes are empty or the length of boxes's last dimension is 5
assert (bboxes1.size(-1) == 5 or bboxes1.size(0) == 0)
assert (bboxes2.size(-1) == 5 or bboxes2.size(0) == 0)
rows = bboxes1.size(0)
cols = bboxes2.size(0)
if is_aligned:
assert rows == cols
if rows * cols == 0:
return bboxes1.new(rows, 1) if is_aligned else bboxes1.new(rows, cols)
return obb_overlaps(bboxes1, bboxes2, mode, is_aligned) | cdd414b02ac08c5a8bc494ed7319942e26bc5f02 | 3,655,533 |
import warnings
def get_target_compute_version(target=None):
"""Utility function to get compute capability of compilation target.
Looks for the arch in three different places, first in the target attributes, then the global
scope, and finally the GPU device (if it exists).
Parameters
----------
target : tvm.target.Target, optional
The compilation target
Returns
-------
compute_version : str
compute capability of a GPU (e.g. "8.0")
"""
# 1. Target
if target:
if "arch" in target.attrs:
compute_version = target.attrs["arch"]
major, minor = compute_version.split("_")[1]
return major + "." + minor
# 2. Global scope
from tvm.autotvm.env import AutotvmGlobalScope # pylint: disable=import-outside-toplevel
if AutotvmGlobalScope.current.cuda_target_arch:
major, minor = AutotvmGlobalScope.current.cuda_target_arch.split("_")[1]
return major + "." + minor
# 3. GPU
if tvm.gpu(0).exist:
return tvm.gpu(0).compute_version
warnings.warn(
"No CUDA architecture was specified or GPU detected."
"Try specifying it by adding '-arch=sm_xx' to your target."
)
return None | ad55cbb81fb74521175ea6fbdcba54cc14a409cb | 3,655,534 |
def get_poet_intro_by_id(uid):
"""
get poet intro by id
:param uid:
:return:
"""
return Poet.get_poet_by_id(uid) | d6fd84bd150ee8ce72becdb6b27b67d1c21c7a9b | 3,655,535 |
from datetime import datetime
def create_post():
"""Создать пост"""
user = get_user_from_request()
post = Post(
created_date=datetime.datetime.now(),
updated_date=datetime.datetime.now(),
creator=user,
)
json = request.get_json()
url = json["url"]
if Post.get_or_none(Post.url == url) is not None:
return errors.post_url_already_taken()
error = set_blog(post, json, user)
if error is not None:
error_response = {
BlogError.NoBlog: errors.blog_not_found(),
BlogError.NoAccess: errors.blog_no_access(),
}[error]
return error_response
fill_post_from_json(post, json)
post.save()
set_tags_for_post(post, json)
manage_jam_entries(post, json)
return jsonify({"success": 1, "post": post.to_json()}) | 7b8eaf74cda78198d08ca6eac66f1f13a12c4341 | 3,655,536 |
import async_timeout
async def fetch(session, url):
"""Method to fetch data from a url asynchronously
"""
async with async_timeout.timeout(30):
async with session.get(url) as response:
return await response.json() | d8ff22df047fece338dcfe4c6286766a563ff9aa | 3,655,537 |
def recurse_while(predicate, f, *args):
"""
Accumulate value by executing recursively function `f`.
The function `f` is executed with starting arguments. While the
predicate for the result is true, the result is fed into function `f`.
If predicate is never true then starting arguments are returned.
:param predicate: Predicate function guarding execution.
:param f: Function to execute.
:param *args: Starting arguments.
"""
result = f(*args)
result = result if type(result) == tuple else (result, )
while predicate(*result):
args = result # predicate(args) is always true
result = f(*args)
result = result if type(result) == tuple else (result, )
return args if len(args) > 1 else args[0] | fd3313760c246336519a2e89281cc94a2bee6833 | 3,655,538 |
import timeit
import logging
def construct_lookup_variables(train_pos_users, train_pos_items, num_users):
"""Lookup variables"""
index_bounds = None
sorted_train_pos_items = None
def index_segment(user):
lower, upper = index_bounds[user:user + 2]
items = sorted_train_pos_items[lower:upper]
negatives_since_last_positive = np.concatenate(
[items[0][np.newaxis], items[1:] - items[:-1] - 1])
return np.cumsum(negatives_since_last_positive)
start_time = timeit.default_timer()
inner_bounds = np.argwhere(train_pos_users[1:] -
train_pos_users[:-1])[:, 0] + 1
(upper_bound,) = train_pos_users.shape
index_bounds = np.array([0] + inner_bounds.tolist() + [upper_bound])
# Later logic will assume that the users are in sequential ascending order.
assert np.array_equal(train_pos_users[index_bounds[:-1]], np.arange(num_users))
sorted_train_pos_items = train_pos_items.copy()
for i in range(num_users):
lower, upper = index_bounds[i:i + 2]
sorted_train_pos_items[lower:upper].sort()
total_negatives = np.concatenate([
index_segment(i) for i in range(num_users)])
logging.info("Negative total vector built. Time: {:.1f} seconds".format(
timeit.default_timer() - start_time))
return total_negatives, index_bounds, sorted_train_pos_items | c3e1087cce5a38d681379f30d7c6dee8d1544e60 | 3,655,540 |
def total_allocation_constraint(weight, allocation: float, upper_bound: bool = True):
"""
Used for inequality constraint for the total allocation.
:param weight: np.array
:param allocation: float
:param upper_bound: bool if true the constraint is from above (sum of weights <= allocation) else from below
(sum of weights <= allocation)
:return: np.array
"""
if upper_bound:
return allocation - weight.sum()
else:
return weight.sum() - allocation | b92c4bd18d1c6246ff202987c957a5098fd66ba1 | 3,655,541 |
def sigmoid(x):
""" computes sigmoid of x """
return 1.0/(1.0 + np.exp(-x)) | cd34b4ed9fe08607ea3fec1dce89bee7c34efeb0 | 3,655,542 |
def handle_error(err):
"""Catches errors with processing client requests and returns message"""
code = 500
error = 'Error processing the request'
if isinstance(err, HTTPError):
code = err.code
error = str(err.message)
return jsonify(error=error, code=code), code | d6f9051bab504852720f657d04bc6afa72794047 | 3,655,543 |
from pyunitwizard.kernel import default_form, default_parser
from pyunitwizard import convert as _convert, get_dimensionality as _get_dimensionality
from typing import Dict
def dimensionality(quantity_or_unit: str) -> Dict[str, int]:
""" Returns the dimensionality of the quantity or unit.
Parameters
-----------
quantity_or_unit : str
A quanitity or a unit
Returns
-------
dimensionality_dict : dict
Dictionary which keys are fundamental units and values are the exponent of
each unit in the quantity.
"""
tmp_quantity_or_unit = _convert(quantity_or_unit, to_form=default_form, parser=default_parser)
return _get_dimensionality(tmp_quantity_or_unit) | 4c334c6283a57704036c414cfd52f79b875a93fc | 3,655,544 |
import re
def split_prec_rows(df):
"""Split precincts into two rows.
NOTE: Because this creates a copy of the row values, don't rely on total vote counts, just look at percentage.
"""
for idx in df.index:
# look for rows with precincts that need to be split
if re.search('\d{4}/\d{4}',idx):
row_values = df.loc[idx]
split = idx.split('/')
for p in split:
df.loc[p] = row_values
# delete original row
df = df.drop(idx, axis=0)
return(df) | 72ba424080b0ff3e04ecc5d248bc85b4f409167c | 3,655,545 |
def socfaker_elasticecsfields_host():
"""
Returns an ECS host dictionary
Returns:
dict: Returns a dictionary of ECS
host fields/properties
"""
if validate_request(request):
return jsonify(str(socfaker.products.elastic.document.fields.host)) | 41a2624eda28eae736398ece87aee2ee2028987c | 3,655,546 |
from textwrap import dedent
def _moog_writer(photosphere, filename, **kwargs):
"""
Writes an :class:`photospheres.photosphere` to file in a MOOG-friendly
format.
:param photosphere:
The photosphere.
:path filename:
The filename to write the photosphere to.
"""
def _get_xi():
xi = photosphere.meta["stellar_parameters"].get("microturbulence", 0.0)
if 0 >= xi:
logger.warn("Invalid microturbulence value: {:.3f} km/s".format(xi))
return xi
if photosphere.meta["kind"] == "marcs":
xi = _get_xi()
output = dedent("""
WEBMARCS
MARCS (2011) TEFF/LOGG/[M/H]/XI {1:.0f}/{2:.3f}/{3:.3f}/{4:.3f}
NTAU {0:.0f}
5000.0
""".format(len(photosphere),
photosphere.meta["stellar_parameters"]["effective_temperature"],
photosphere.meta["stellar_parameters"]["surface_gravity"],
photosphere.meta["stellar_parameters"]["metallicity"],
xi)).lstrip()
for i, line in enumerate(photosphere):
output += " {0:>3.0f} {0:>3.0f} {1:10.3e} {0:>3.0f} {2:10.3e} "\
"{3:10.3e} {4:10.3e}\n".format(i + 1, line["lgTau5"], line["T"],
line["Pe"], line["Pg"])
output += " {0:.3f}\n".format(xi)
output += "NATOMS 0 {0:.3f}\n".format(
photosphere.meta["stellar_parameters"]["metallicity"])
output += "NMOL 0\n"
elif photosphere.meta["kind"] == "castelli/kurucz":
xi = _get_xi()
output = dedent("""
KURUCZ
CASTELLI/KURUCZ (2004) {1:.0f}/{2:.3f}/{3:.3f}/{4:.3f}/{5:.3f}
NTAU {0:.0f}
""".format(len(photosphere),
photosphere.meta["stellar_parameters"]["effective_temperature"],
photosphere.meta["stellar_parameters"]["surface_gravity"],
photosphere.meta["stellar_parameters"]["metallicity"],
photosphere.meta["stellar_parameters"]["alpha_enhancement"],
xi)).lstrip()
for line in photosphere:
output += " {0:.8e} {1:10.3e}{2:10.3e}{3:10.3e}{4:10.3e}\n".format(
line["RHOX"], line["T"], line["P"], line["XNE"], line["ABROSS"])
output += " {0:.3f}\n".format(xi)
output += "NATOMS 0 {0:.3f}\n".format(
photosphere.meta["stellar_parameters"]["metallicity"])
output += "NMOL 0\n"
# MOOG11 fails to read if you don't add an extra line
output += "\n"
else:
raise ValueError("photosphere kind '{}' cannot be written to a MOOG-"\
"compatible format".format(photosphere.meta["kind"]))
with open(filename, "w") as fp:
fp.write(output)
return None | da5a952e15984aecd914ebcf9381900924fdeff1 | 3,655,547 |
def upcomingSplits(
symbol="",
exactDate="",
token="",
version="stable",
filter="",
format="json",
):
"""This will return all upcoming estimates, dividends, splits for a given symbol or the market. If market is passed for the symbol, IPOs will also be included.
https://iexcloud.io/docs/api/#upcoming-events
Args:
symbol (str): Symbol to look up
exactDate (str): exactDate Optional. Exact date for which to get data
token (str): Access token
version (str): API version
filter (str): filters: https://iexcloud.io/docs/api/#filter-results
format (str): return format, defaults to json
Returns:
dict or DataFrame: result
"""
return _baseEvent(
"splits",
symbol=symbol,
exactDate=exactDate,
token=token,
version=version,
filter=filter,
format=format,
) | ae21f8c04bf7bf2eacd8b16aa62c9fae7750e042 | 3,655,548 |
def mu_model(u, X, U, k):
"""
Returns the utility of the kth player
Parameters
----------
u
X
U
k
Returns
-------
"""
M = X.T @ X
rewards = M @ u
penalties = u.T @ M @ U[:, :k] * U[:, :k]
return rewards - penalties.sum(axis=1) | 59bce1ce8617f0e11340d1c1ab18315fd81e6925 | 3,655,549 |
def tokenizer_init(model_name):
"""simple wrapper for auto tokenizer"""
tokenizer = AutoTokenizer.from_pretrained(model_name)
return tokenizer | c54accf802fcbcf1479ccb0745266a5182edb73b | 3,655,550 |
def insert_message(nick, message, textDirection):
"""
Insert record
"""
ins = STATE['messages_table'].insert().values(
nick=nick, message=message, textDirection=textDirection)
res = STATE['conn'].execute(ins)
ltr = 1 if textDirection == 'ltr' else 0
rtl = 1 if textDirection == 'rtl' else 0
STATE['conn'].execute(
'update message_stats set ltr = ltr + ?, rtl = rtl + ?',
ltr, rtl)
return {
'id': res.lastrowid
} | 1163fab2342aa5e41b321055bbf75f4c23fbb031 | 3,655,551 |
from typing import Tuple
from typing import Dict
def process_metadata(metadata) -> Tuple[Dict[str, str], Dict[str, str]]:
""" Returns a tuple of valid and invalid metadata values. """
if not metadata:
return {}, {}
valid_values = {}
invalid_values = {}
for m in metadata:
key, value = m.split("=", 1)
if key in supported_metadata_keys:
valid_values[key] = value
else:
invalid_values[key] = value
return valid_values, invalid_values | fbd7affaa8743d6c45bb2a02067d737dac990eb4 | 3,655,552 |
def rightOfDeciSeperatorToDeci(a):
"""This function only convert value at the right side of decimal seperator to decimal"""
deciNum = 0
for i in range(len(a)):
deciNum += (int(a[i]))*2**-(i+1)
return deciNum | 14cfd187758836d329ac4778a30167ddece0f2a0 | 3,655,553 |
import torch
def conv(input, weight):
"""
Returns the convolution of input and weight tensors,
where input contains sequential data.
The convolution is along the sequence axis.
input is of size [batchSize, inputDim, seqLength]
"""
output = torch.nn.functional.conv1d(input=input, weight=weight)
return output | e213be11c423ff63a1ebffda55331298fcf53443 | 3,655,554 |
import torch
def irr_repr(order, alpha, beta, gamma, dtype = None):
"""
irreducible representation of SO3
- compatible with compose and spherical_harmonics
"""
cast_ = cast_torch_tensor(lambda t: t)
dtype = default(dtype, torch.get_default_dtype())
alpha, beta, gamma = map(cast_, (alpha, beta, gamma))
return wigner_d_matrix(order, alpha, beta, gamma, dtype = dtype) | ff054a05c2d79a4dcfc903116cefdfce4fa56c8f | 3,655,555 |
from typing import List
from typing import Optional
def label_to_span(labels: List[str],
scheme: Optional[str] = 'BIO') -> dict:
"""
convert labels to spans
:param labels: a list of labels
:param scheme: labeling scheme, in ['BIO', 'BILOU'].
:return: labeled spans, a list of tuples (start_idx, end_idx, label)
"""
assert scheme in ['BIO', 'BILOU'], ValueError("unknown labeling scheme")
labeled_spans = dict()
i = 0
while i < len(labels):
if labels[i] == 'O' or labels[i] == 'ABS':
i += 1
continue
else:
if scheme == 'BIO':
if labels[i][0] == 'B':
start = i
lb = labels[i][2:]
i += 1
try:
while labels[i][0] == 'I':
i += 1
end = i
labeled_spans[(start, end)] = lb
except IndexError:
end = i
labeled_spans[(start, end)] = lb
i += 1
# this should not happen
elif labels[i][0] == 'I':
i += 1
elif scheme == 'BILOU':
if labels[i][0] == 'U':
start = i
end = i + 1
lb = labels[i][2:]
labeled_spans[(start, end)] = lb
i += 1
elif labels[i][0] == 'B':
start = i
lb = labels[i][2:]
i += 1
try:
while labels[i][0] != 'L':
i += 1
end = i
labeled_spans[(start, end)] = lb
except IndexError:
end = i
labeled_spans[(start, end)] = lb
break
i += 1
else:
i += 1
return labeled_spans | 01e3a1f3d72f8ec0b1cfa2c982fc8095c06c09f8 | 3,655,556 |
from typing import Optional
def get_storage_account(account_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetStorageAccountResult:
"""
The storage account.
:param str account_name: The name of the storage account within the specified resource group. Storage account names must be between 3 and 24 characters in length and use numbers and lower-case letters only.
:param str resource_group_name: The name of the resource group within the user's subscription. The name is case insensitive.
"""
__args__ = dict()
__args__['accountName'] = account_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:storage/v20160501:getStorageAccount', __args__, opts=opts, typ=GetStorageAccountResult).value
return AwaitableGetStorageAccountResult(
access_tier=__ret__.access_tier,
creation_time=__ret__.creation_time,
custom_domain=__ret__.custom_domain,
encryption=__ret__.encryption,
id=__ret__.id,
kind=__ret__.kind,
last_geo_failover_time=__ret__.last_geo_failover_time,
location=__ret__.location,
name=__ret__.name,
primary_endpoints=__ret__.primary_endpoints,
primary_location=__ret__.primary_location,
provisioning_state=__ret__.provisioning_state,
secondary_endpoints=__ret__.secondary_endpoints,
secondary_location=__ret__.secondary_location,
sku=__ret__.sku,
status_of_primary=__ret__.status_of_primary,
status_of_secondary=__ret__.status_of_secondary,
tags=__ret__.tags,
type=__ret__.type) | c818e801d152f2b11bedac42bcefe322b94ab16e | 3,655,557 |
def format_and_add(graph, info, relation, name):
"""
input: graph and three stirngs
function formats the strings and adds to the graph
"""
info = info.replace(" ", "_")
name = name.replace(" ", "_")
inf = rdflib.URIRef(project_prefix + info)
rel = rdflib.URIRef(project_prefix + relation)
nm = rdflib.URIRef(project_prefix + name)
graph.add((inf, rel, nm))
return None | abbe87b923d4ac37262e391a7d9a878b65e4ff41 | 3,655,558 |
def to_log_space(p:float, bounds: BOUNDS_TYPE):
""" Interprets p as a point in a rectangle in R^2 or R^3 using Morton space-filling curve
:param bounds [ (low,high), (low,high), (low,high) ] defaults to unit cube
:param dim Dimension. Only used if bounds are not supplied.
Very similar to "to_space" but assumes speed varies with logarithm
"""
assert 0 <= p <= 1
dim = len(bounds)
us = list(reversed(ZCurveConventions().to_cube(zpercentile=p, dim=dim))) # 0 < us[i] < 1
return [to_log_space_1d(u, low=b[0], high=b[1]) for u, b in zip(us, bounds)] | 2ab53687481100bda229456b8aa5fad4d8ef817d | 3,655,560 |
def rsi_tradingview(ohlc: pd.DataFrame, period: int = 14, round_rsi: bool = True):
""" Implements the RSI indicator as defined by TradingView on March 15, 2021.
The TradingView code is as follows:
//@version=4
study(title="Relative Strength Index", shorttitle="RSI", format=format.price, precision=2, resolution="")
len = input(14, minval=1, title="Length")
src = input(close, "Source", type = input.source)
up = rma(max(change(src), 0), len)
down = rma(-min(change(src), 0), len)
rsi = down == 0 ? 100 : up == 0 ? 0 : 100 - (100 / (1 + up / down))
plot(rsi, "RSI", color=#8E1599)
band1 = hline(70, "Upper Band", color=#C0C0C0)
band0 = hline(30, "Lower Band", color=#C0C0C0)
fill(band1, band0, color=#9915FF, transp=90, title="Background")
:param ohlc:
:param period:
:param round_rsi:
:return: an array with the RSI indicator values
"""
delta = ohlc["close"].diff()
up = delta.copy()
up[up < 0] = 0
up = pd.Series.ewm(up, alpha=1/period).mean()
down = delta.copy()
down[down > 0] = 0
down *= -1
down = pd.Series.ewm(down, alpha=1/period).mean()
rsi = np.where(up == 0, 0, np.where(down == 0, 100, 100 - (100 / (1 + up / down))))
return np.round(rsi, 2) if round_rsi else rsi | 5b9d96d5e174d6534c2d32a3ece83a5fb09b28ba | 3,655,561 |
def bin_by(x, y, nbins=30):
"""Bin x by y, given paired observations of x & y.
Returns the binned "x" values and the left edges of the bins."""
bins = np.linspace(y.min(), y.max(), nbins+1)
# To avoid extra bin for the max value
bins[-1] += 1
indicies = np.digitize(y, bins)
output = []
for i in xrange(1, len(bins)):
output.append(x[indicies==i])
# Just return the left edges of the bins
bins = bins[:-1]
return output, bins | ff0f9e79f3561cabf2a6498e31a78836be38bfb3 | 3,655,562 |
def calc_stats_with_cumsum(df_tgt, list_tgt_status, dict_diff, calc_type=0):
""" Calculate statistics with cumulative sum of target status types. \n
"dict_diff" is dictionaly of name key and difference value. ex) {"perweek": 7, "per2week": 14} \n
calc_type=0: calculate for each simulation result. \n
calc_type=1: calculate for each daycount result. """
# Prepare front side of dataframe.
if calc_type == 0:
sim_num = len(df_tgt[list_tgt_status[0]].columns)
output_df = pd.DataFrame([i for i in range(sim_num)], columns=["sim_num"])
else:
output_df = df_tgt.iloc[:, :2].copy()
# Calculate statistics with cumulative sum.
for one_status in list_tgt_status:
# Extract target status data.
one_tgt_df = df_tgt[one_status]
# Calculate the days difference in dict_diff.
dict_df_diff = {}
for one_key, one_diff in dict_diff.items():
temp_df_diff = one_tgt_df.cumsum().diff(one_diff)
temp_df_diff.iloc[one_diff-1, :] = one_tgt_df.cumsum().iloc[one_diff-1, :]
dict_df_diff[one_key] = temp_df_diff
if calc_type == 0:
# Each simulation.
output_df.loc[:, "{}_perday_mean".format(one_status)] = one_tgt_df.T.mean(axis=1).values
output_df.loc[:, "{}_perday_std".format(one_status)] = one_tgt_df.T.std(axis=1).values
output_df.loc[:, "{}_perday_min".format(one_status)] = one_tgt_df.T.min(axis=1).values
output_df.loc[:, "{}_perday_quartile1".format(one_status)] = one_tgt_df.T.quantile(q=0.25, axis=1).values
output_df.loc[:, "{}_perday_median".format(one_status)] = one_tgt_df.T.median(axis=1).values
output_df.loc[:, "{}_perday_quartile3".format(one_status)] = one_tgt_df.T.quantile(q=0.75, axis=1).values
output_df.loc[:, "{}_perday_max".format(one_status)] = one_tgt_df.T.max(axis=1).values
for one_key, one_diff in dict_diff.items():
output_df.loc[:, "{}_{}_mean".format(one_status, one_key)] = dict_df_diff[one_key].T.mean(axis=1).values
output_df.loc[:, "{}_{}_std".format(one_status, one_key)] = dict_df_diff[one_key].T.std(axis=1).values
output_df.loc[:, "{}_{}_min".format(one_status, one_key)] = dict_df_diff[one_key].T.min(axis=1).values
output_df.loc[:, "{}_{}_quartile1".format(one_status, one_key)] = dict_df_diff[one_key].T.quantile(q=0.25, axis=1).values
output_df.loc[:, "{}_{}_median".format(one_status, one_key)] = dict_df_diff[one_key].T.median(axis=1).values
output_df.loc[:, "{}_{}_quartile3".format(one_status, one_key)] = dict_df_diff[one_key].T.quantile(q=0.75, axis=1).values
output_df.loc[:, "{}_{}_max".format(one_status, one_key)] = dict_df_diff[one_key].T.max(axis=1).values
else:
# Each day.
output_df.loc[:, "{}_perday_mean".format(one_status)] = one_tgt_df.mean(axis=1)
output_df.loc[:, "{}_perday_std".format(one_status)] = one_tgt_df.std(axis=1)
output_df.loc[:, "{}_perday_min".format(one_status)] = one_tgt_df.min(axis=1)
output_df.loc[:, "{}_perday_quartile1".format(one_status)] = one_tgt_df.quantile(q=0.25, axis=1)
output_df.loc[:, "{}_perday_median".format(one_status)] = one_tgt_df.median(axis=1)
output_df.loc[:, "{}_perday_quartile3".format(one_status)] = one_tgt_df.quantile(q=0.75, axis=1)
output_df.loc[:, "{}_perday_max".format(one_status)] = one_tgt_df.max(axis=1)
for one_key, one_diff in dict_diff.items():
# Note: Processing is well done, but numpy warning occurs.
# Note: Because all the data of first few days in "perweek" and "per2week" become np.NaN.
output_df.loc[:, "{}_{}_mean".format(one_status, one_key)] = dict_df_diff[one_key].mean(axis=1)
output_df.loc[:, "{}_{}_std".format(one_status, one_key)] = dict_df_diff[one_key].std(axis=1)
output_df.loc[:, "{}_{}_min".format(one_status, one_key)] = dict_df_diff[one_key].min(axis=1)
output_df.loc[:, "{}_{}_quartile1".format(one_status, one_key)] = dict_df_diff[one_key].quantile(q=0.25, axis=1)
output_df.loc[:, "{}_{}_median".format(one_status, one_key)] = dict_df_diff[one_key].median(axis=1)
output_df.loc[:, "{}_{}_quartile3".format(one_status, one_key)] = dict_df_diff[one_key].quantile(q=0.75, axis=1)
output_df.loc[:, "{}_{}_max".format(one_status, one_key)] = dict_df_diff[one_key].max(axis=1)
return output_df | 2dcdd95b8723f250a24afae548b8fd4ce9b5f51c | 3,655,563 |
def _normalize_handler_method(method):
"""Transforms an HTTP method into a valid Python identifier."""
return method.lower().replace("-", "_") | aad23dba304ba39708e4415de40019479ccf0195 | 3,655,564 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.