content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
from typing import Tuple
from typing import List
from typing import Type
from typing import _Final
def build_type(tp) -> Tuple[str, List[Type]]:
"""
Build typescript type from python type.
"""
tokens = tokenize_python_type(tp)
dependencies = [
token
for token in tokens
if token not in TYPE_MAPPING_WITH_GENERIC_FALLBACK
and not type(token) in TRIVIAL_TYPE_MAPPING
and not isinstance(token, _Final)
]
return _build_type(tokens), dependencies | 475362488b7fe07db035ce70ddb3ac40580412dd | 3,651,043 |
def laplacian_radial_kernel(distance, bandwidth=1.0):
"""Laplacian radial kernel.
Parameters
----------
distance : array-like
Array of non-negative real values.
bandwidth : float, optional (default=1.0)
Positive scale parameter of the kernel.
Returns
-------
weight : array-like
Array of non-negative real values of the same shape than
parameter 'distance'.
Returns
-------
http://crsouza.com/2010/03/17/
kernel-functions-for-machine-learning-applications/
https://data-flair.training/blogs/svm-kernel-functions/
"""
distance = _check_distance(distance)
bandwidth = _check_bandwidth(bandwidth)
scaled_distance = distance / bandwidth
weight = gs.exp(- scaled_distance)
return weight | fd5f777b0d21e3a7673a6589a76dd50f48384029 | 3,651,044 |
def build_eslog_config_param(
group_id,
task_name,
rt_id,
tasks,
topic,
table_name,
hosts,
http_port,
transport,
es_cluster_name,
es_version,
enable_auth,
user,
password,
):
"""
es参数构建
:param group_id: 集群名
:param task_name: 任务名
:param rt_id: rt_id
:param tasks: 任务数
:param topic: 来源topic
:param table_name: 表名
:param hosts: es的host
:param http_port: es的port
:param transport: es transport的port
:param es_cluster_name: es集群名称
:param es_version es集群版本
:param enable_auth 是否启用验证
:param user: 用户名
:param password: 密码, 加密过的
:return: 参数
"""
return {
"group.id": group_id,
"rt.id": rt_id,
"topics": topic,
"type.name": table_name,
"tasks.max": "%s" % tasks,
"es.index.prefix": table_name.lower(),
"es.cluster.name": es_cluster_name,
"es.cluster.version": es_version,
"es.hosts": hosts,
"es.transport.port": transport,
"es.host": hosts,
"es.http.port": http_port,
"connector.class": "com.tencent.bk.base.datahub.databus.connect.sink.es.EsSinkConnector",
"flush.timeout.ms": "10000",
"batch.size": "10000",
"max.in.flight.requests": "5",
"retry.backoff.ms": "5000",
"max.retry": "5",
"es.cluster.enable.auth": enable_auth,
"es.cluster.enable.PlaintextPwd": False, # 当前都是加密后的密码
"es.cluster.username": user,
"es.cluster.password": password,
} | 826b8d97ef14792845b4ced98ab5dcb3f36e57f3 | 3,651,045 |
def disclosure(input_df, cur_period):
"""
Reading in a csv, converting to a data frame and converting some cols to int.
:param input_df: The csv file that is converted into a data frame.
:param cur_period: The current period for the results process.
:return: None.
"""
input_df = pd.read_csv(input_df, dtype={"Q601_asphalting_sand": int,
'Q602_building_soft_sand': int,
'Q603_concreting_sand': int,
'Q604_bituminous_gravel': int,
'Q605_concreting_gravel': int,
'Q606_other_gravel': int,
'Q607_constructional_fill': int,
'Q608_total': int,
'enterprise_ref': int, 'period': int,
'region': int})
input_df["disclosive"] = None
input_df["publish"] = None
input_df["reason"] = None
def run_disclosure(row):
if row['Q608_total'] == 0:
row['disclosive'] = 'N'
row['publish'] = 'Publish'
row['reason'] = ' Total is zero'
else:
row['disclosive'] = 'Y'
row['publish'] = 'N/A'
return row
disaggregated_data = input_df[input_df.period == cur_period]
region_agg = disaggregated_data.groupby('region')
region_agg = region_agg.agg({'Q608_total': 'sum', 'Q607_constructional_fill': 'sum',
'Q606_other_gravel': 'sum', 'Q605_concreting_gravel': 'sum',
'Q604_bituminous_gravel': 'sum', 'Q603_concreting_sand': 'sum',
'Q602_building_soft_sand': 'sum', 'Q601_asphalting_sand': 'sum',
'enterprise_ref': 'nunique'})
region_agg = region_agg.apply(run_disclosure, axis=1)
# regionlorm = disaggregated_data.groupby(['region'])
region_agg_lorm = disaggregated_data.groupby(['region', 'land_or_marine'])
return region_agg_lorm | 65702fa309884206f284b35c48e2e8c8a34aef2b | 3,651,046 |
def kitchen_sink():
"""Combines all of the test data."""
return word_frequencies.load(_KITCHEN_SINK_DATA) | 4e0b0d38465fb02cd4f8aeb5e54c2f6bcbdf2cda | 3,651,048 |
import torch
def sim_matrix(a, b, eps=1e-8):
"""
added eps for numerical stability
"""
a = normalize_embeddings(a, eps)
b = normalize_embeddings(b, eps)
sim_mt = torch.mm(a, b.transpose(0, 1))
return sim_mt | d0caa5ce6e9f86b861910221321b80752b4f24e4 | 3,651,049 |
def read_addon_xml(path):
"""Parse the addon.xml and return an info dictionary"""
info = dict(
path='./', # '/storage/.kodi/addons/plugin.video.vrt.nu',
profile='special://userdata', # 'special://profile/addon_data/plugin.video.vrt.nu/',
type='xbmc.python.pluginsource',
)
tree = ET.parse(path)
root = tree.getroot()
info.update(root.attrib) # Add 'id', 'name' and 'version'
info['author'] = info.pop('provider-name')
for child in root:
if child.attrib.get('point') != 'xbmc.addon.metadata':
continue
for grandchild in child:
# Handle assets differently
if grandchild.tag == 'assets':
for asset in grandchild:
info[asset.tag] = asset.text
continue
# Not in English ? Drop it
if grandchild.attrib.get('lang', 'en_GB') != 'en_GB':
continue
# Add metadata
info[grandchild.tag] = grandchild.text
return {info['name']: info} | 6ead602b97c12bfd78ddc7194102a84793aa631b | 3,651,050 |
import requests
def get_submission_list(start_timestamp, end_timestamp, args=None):
"""
Scrapes a subreddit for submissions between to given dates. Due to limitations
of the underlying service, it may not return all the possible submissions, so
it will be necessary to call this method again. The method requests the results
in descending orders, so in subsequent calls, you should only update end_timestamp.
:param start_timestamp: request results after this date/time.
:param end_timestamp: request results before this date/time.
:param args: the args to pass to the endpoint
:return: the JSON object returned by the service.
"""
# Generic parameters: for each submission we want its ID and timestamp,
# 500 is the maximum limit, sorted temporally by the most recent
params = "fields=id,created_utc,subreddit&limit=500&sort=desc&sort_type=created_utc"
if args:
for key, value in args.items():
params += "&{0}={1}".format(key, value)
url = "{0}?before={1}&after={2}&{3}".format(
PUSHSHIFT_ENDPOINT, end_timestamp, start_timestamp, params
)
resp = requests.get(url)
return resp.json() | 7fa053c27787136420a9004721c1954318deeedb | 3,651,051 |
def loadDataSet():
"""
load data from data set
Args:
Returns:
dataSet: train input of x
labelSet: train input of y
"""
# initialize x-trainInput,y-trainInput
dataSet = []
labelSet = []
# open file reader
fr = open('testSet.txt')
for line in fr.readlines():
# strip() -- get rid of the space on both side
# split() -- division as tab
lineArr = line.strip().split()
# padding data in list
# x0 = 1.0 , x1 = column1 , x2 = column2
dataSet.append([1.0, float(lineArr[0]), float(lineArr[1])])
# label = column3
labelSet.append(float(lineArr[2]))
return dataSet,labelSet | 38f42a8a7c6b12e3d46d757d98565222e931149f | 3,651,052 |
def xds_read_xparm_new_style(xparm_file):
"""Parse the XPARM file to a dictionary."""
data = map(float, " ".join(open(xparm_file, "r").readlines()[1:]).split())
starting_frame = int(data[0])
phi_start, phi_width = data[1:3]
axis = data[3:6]
wavelength = data[6]
beam = data[7:10]
spacegroup = int(data[10])
cell = data[11:17]
a, b, c = data[17:20], data[20:23], data[23:26]
assert int(data[26]) == 1
nx, ny = map(int, data[27:29])
px, py = data[29:31]
ox, oy = data[31:33]
distance = data[33]
x, y = data[34:37], data[37:40]
normal = data[40:43]
results = {
"starting_frame": starting_frame,
"phi_start": phi_start,
"phi_width": phi_width,
"axis": axis,
"wavelength": wavelength,
"beam": beam,
"nx": nx,
"ny": ny,
"px": px,
"py": py,
"distance": distance,
"ox": ox,
"oy": oy,
"x": x,
"y": y,
"normal": normal,
"spacegroup": spacegroup,
"cell": cell,
"a": a,
"b": b,
"c": c,
}
return results | ba5a851c68c54aa0c9f82df1dc2334f427c8cea8 | 3,651,053 |
def clear_bit(val, offs):
"""Clear bit at offset 'offs' in value."""
return val & ~(1 << offs) | e50e5f8ccc3fe08d9b19248e290c2117b78379ee | 3,651,054 |
def get_org_details(orgs):
"""Get node and site details, store in Org object"""
org_details = []
for org in orgs:
org_id = org['id']
org_name = org['name']
org_longname = org['longname']
Org = namedtuple('Org', ['org_id', 'org_name', 'org_longname'])
org_details.extend([Org(org_id, org_name, org_longname)])
return org_details | 94bec33c2fbee35210ca61f6b8d3694d198c80ee | 3,651,055 |
import logging
def flush_after(handler, delay):
"""Add 'handler' to the queue so that it is flushed after 'delay' seconds by the flush thread.
Return the scheduled event which may be used for later cancellation (see cancel()).
"""
if not isinstance(handler, logging.Handler):
raise TypeError("handler must be a logging.Handler instance")
return _FLUSH_THREAD.submit(handler.flush, delay) | a8cb8197643dbd092f709bed0726d076997e4715 | 3,651,056 |
def _ExtractCLPath(output_of_where):
"""Gets the path to cl.exe based on the output of calling the environment
setup batch file, followed by the equivalent of `where`."""
# Take the first line, as that's the first found in the PATH.
for line in output_of_where.strip().splitlines():
if line.startswith('LOC:'):
return line[len('LOC:'):].strip() | 6a0c0d4aa74b4e84de69de023e2721edd95c36bd | 3,651,057 |
import math
def logGamma(x):
"""The natural logarithm of the gamma function.
Based on public domain NETLIB (Fortran) code by W. J. Cody and L. Stoltz<BR>
Applied Mathematics Division<BR>
Argonne National Laboratory<BR>
Argonne, IL 60439<BR>
<P>
References:
<OL>
<LI>W. J. Cody and K. E. Hillstrom, 'Chebyshev Approximations for the Natural Logarithm of the Gamma Function,' Math. Comp. 21, 1967, pp. 198-203.
<LI>K. E. Hillstrom, ANL/AMD Program ANLC366S, DGAMMA/DLGAMA, May, 1969.
<LI>Hart, Et. Al., Computer Approximations, Wiley and sons, New York, 1968.
</OL></P><P>
From the original documentation:
</P><P>
This routine calculates the LOG(GAMMA) function for a positive real argument X.
Computation is based on an algorithm outlined in references 1 and 2.
The program uses rational functions that theoretically approximate LOG(GAMMA)
to at least 18 significant decimal digits. The approximation for X > 12 is from reference 3,
while approximations for X < 12.0 are similar to those in reference 1, but are unpublished.
The accuracy achieved depends on the arithmetic system, the compiler, the intrinsic functions,
and proper selection of the machine-dependent constants.
</P><P>
Error returns:<BR>
The program returns the value XINF for X .LE. 0.0 or when overflow would occur.
The computation is believed to be free of underflow and overflow."""
y = x
if y < 0.0 or y > LOG_GAMMA_X_MAX_VALUE:
# Bad arguments
return float("inf")
if y <= EPS:
return -math.log(y)
if y <= 1.5:
if (y < pnt68):
corr = -math.log(y)
xm1 = y
else:
corr = 0.0;
xm1 = y - 1.0;
if y <= 0.5 or y >= pnt68:
xden = 1.0;
xnum = 0.0;
for i in xrange(8):
xnum = xnum * xm1 + lg_p1[i];
xden = xden * xm1 + lg_q1[i];
return corr + xm1 * (lg_d1 + xm1 * (xnum / xden));
else:
xm2 = y - 1.0;
xden = 1.0;
xnum = 0.0;
for i in xrange(8):
xnum = xnum * xm2 + lg_p2[i];
xden = xden * xm2 + lg_q2[i];
return corr + xm2 * (lg_d2 + xm2 * (xnum / xden));
if (y <= 4.0):
xm2 = y - 2.0;
xden = 1.0;
xnum = 0.0;
for i in xrange(8):
xnum = xnum * xm2 + lg_p2[i];
xden = xden * xm2 + lg_q2[i];
return xm2 * (lg_d2 + xm2 * (xnum / xden));
if y <= 12.0:
xm4 = y - 4.0;
xden = -1.0;
xnum = 0.0;
for i in xrange(8):
xnum = xnum * xm4 + lg_p4[i];
xden = xden * xm4 + lg_q4[i];
return lg_d4 + xm4 * (xnum / xden);
assert y <= lg_frtbig
res = lg_c[6];
ysq = y * y;
for i in xrange(6):
res = res / ysq + lg_c[i];
res /= y;
corr = math.log(y);
res = res + LOGSQRT2PI - 0.5 * corr;
res += y * (corr - 1.0);
return res | 36128e9a4b765dcc85ef42866fdbe7d16140ea1d | 3,651,058 |
def regional_validity(query_point, regional_inclusion, regional_exclusions):
""" regional_validity
Returns whether a coordinate point is inside a polygon and outside of excluded regions.
Input: A Point object, a Polygon Object of the inclusion region; a list of Polygon Objects of excluded regions.
Output: True if the query point is both inside the regional polygon and outside all exlusions; False otherwise.
"""
if query_point.within(regional_inclusion):
# Check if the point co-occurs with city areas...
for city in regional_exclusions:
if query_point.within(city):
return False
return True
return False | 68e06b3d89e4783130f123d6c91dc5c43a9788ba | 3,651,059 |
def get_word_vector_list(doc, w2v):
"""Get all the vectors for a text"""
vectors = []
for word in doc:
try:
vectors.append(w2v.wv[word])
except KeyError:
continue
return vectors | f228c2100b6a622fdb677954257e2d1590dcc0ff | 3,651,060 |
def solve(lines, n):
"""Apply the rules specified in the input lines to the starting
pattern for n iterations.
The number of lit pixels in the final pattern is returned.
"""
rules = load_rulebook(lines)
pattern = START
for _ in range(n):
pattern = enhance(pattern, rules)
return sum([row.count('#') for row in pattern]) | 781c349bfa186ac04daea60fe7e954431787ea15 | 3,651,061 |
def _to_plotly_color(scl, transparence=None):
"""
converts a rgb color in format (0-1,0-1,0-1) to a plotly color 'rgb(0-255,0-255,0-255)'
"""
plotly_col = [255 * _c for _c in mplc.to_rgba(scl)] if len(scl) == 3 else [255 * _c for _c in mplc.to_rgb(scl)]
if transparence is not None:
assert 0. <= transparence <= 1.0
plotly_col[3] = transparence
return "rgba({:.0f}, {:.0f}, {:.0f}, {:.4f})".format(*plotly_col)
else:
return "rgb({:.0f}, {:.0f}, {:.0f})".format(*plotly_col[:3]) | 95b7686f913c69792e18f127176db68a3f72622f | 3,651,062 |
def dense_attention_block(seqs_repr, is_training, num_layers,
decay_variable, decay_constant,
units, dropout, query_dropout,
l2_scale, name=''):
"""
"""
for i in range(num_layers):
with tf.variable_scope('dense_attention{}'.format(i), reuse=tf.AUTO_REUSE):
#seqs_repr = tf.Print(seqs_repr, [tf.shape(seqs_repr)], "{}".format(i))
seqs_repr = attention_block(seqs_repr,
is_training,
decay_variable,
decay_constant,
dropout,
query_dropout,
l2_scale)
layer_reprs.append(seqs_repr)
return seqs_repr | db50dd5e4d8d61622a9f989ec0ae9c02c5a4cfe1 | 3,651,063 |
def generate_schema_type(app_name: str, model: object) -> DjangoObjectType:
"""
Take a Django model and generate a Graphene Type class definition.
Args:
app_name (str): name of the application or plugin the Model is part of.
model (object): Django Model
Example:
For a model with a name of "Device", the following class definition is generated:
Class DeviceType(DjangoObjectType):
Meta:
model = Device
fields = ["__all__"]
if a FilterSet exist for this model at '<app_name>.filters.<ModelName>FilterSet'
The filterset will be store in filterset_class as follow
Class DeviceType(DjangoObjectType):
Meta:
model = Device
fields = ["__all__"]
filterset_class = DeviceFilterSet
"""
main_attrs = {}
meta_attrs = {"model": model, "fields": "__all__"}
# We'll attempt to find a FilterSet corresponding to the model
# Not all models have a FilterSet defined so the function return none if it can't find a filterset
meta_attrs["filterset_class"] = get_filterset_for_model(model)
main_attrs["Meta"] = type("Meta", (object,), meta_attrs)
schema_type = type(f"{model.__name__}Type", (DjangoObjectType,), main_attrs)
return schema_type | b57cd78cce59dacf1fdb1d14c667405b6cfdcc90 | 3,651,064 |
def do_authorize():
"""
Send a token request to the OP.
"""
oauth2.client_do_authorize()
try:
redirect = flask.session.pop("redirect")
return flask.redirect(redirect)
except KeyError:
return flask.jsonify({"success": "connected with fence"}) | 1e19f501ac6da94058619e8dd5905d6cd2ab1a69 | 3,651,065 |
import re
def get_windows():
"""
Return all windows found by WM with CPU, fullscreen, process name, and class information.
"""
# Basic window information
result = check_output('nice -n 19 wmctrl -l -p', shell=True)
lines = [a for a in result.decode('utf8').split('\n') if a != '']
windows = [re.split(r'\s+', a, maxsplit=4) for a in lines]
# Window properties
window_index = {}
for window in windows:
window_id = window[0]
r = check_output('nice -n 19 xprop -id {}'.format(window_id), shell=True)
wm_classes = []
r_class = re.search(br'WM_CLASS\(STRING\) = (.*)\n', r)
if r_class:
wm_classes = re.findall('\"(.*?)\"', r_class.group(1).decode('ascii'))
fullscreen = b'WM_STATE_FULLSCREEN' in r
window_index[window_id] = (fullscreen, wm_classes)
# Basic process information
usable_lines = []
result = check_output('nice -n 19 top -b -n 2', shell=True)
lines = [a for a in result.decode('utf8').split('\n') if a != '']
first_found = False
for i, line in enumerate(lines):
r = re.search(r'PID\s+USER\s+PR\s+NI', line)
if r:
if first_found:
usable_lines = lines[i + 1:]
break
else:
first_found = True
processes = [re.split(r'\s+', a.strip()) for a in usable_lines]
process_index = {a[0]: (a[8], a[11]) for a in processes}
result = []
for window in windows:
cpu, name = process_index.get(window[2], (None, None))
fullscreen, wm_classes = window_index.get(window[0], None)
result.append(Window(*window, cpu=cpu, fullscreen=fullscreen, name=name,
wm_classes=wm_classes))
return result | dd0f6f702592cf7f2fdd8541959682890dcc271e | 3,651,066 |
import google
def get_google_open_id_connect_token(service_account_credentials):
"""Get an OpenID Connect token issued by Google for the service account.
This function:
1. Generates a JWT signed with the service account's private key
containing a special "target_audience" claim.
2. Sends it to the OAUTH_TOKEN_URI endpoint. Because the JWT in #1
has a target_audience claim, that endpoint will respond with
an OpenID Connect token for the service account -- in other words,
a JWT signed by *Google*. The aud claim in this JWT will be
set to the value from the target_audience claim in #1.
For more information, see
https://developers.google.com/identity/protocols/OAuth2ServiceAccount .
The HTTP/REST example on that page describes the JWT structure and
demonstrates how to call the token endpoint. (The example on that page
shows how to get an OAuth2 access token; this code is using a
modified version of it to get an OpenID Connect token.)
"""
service_account_jwt = (
service_account_credentials._make_authorization_grant_assertion())
request = google.auth.transport.requests.Request()
body = {
'assertion': service_account_jwt,
'grant_type': google.oauth2._client._JWT_GRANT_TYPE,
}
token_response = google.oauth2._client._token_endpoint_request(
request, OAUTH_TOKEN_URI, body)
return token_response['id_token'] | 08e483865d26772112ffaf9837692f001598ced5 | 3,651,067 |
def term_to_atoms(terms):
"""Visitor to list atoms in term."""
if not isinstance(terms, list):
terms = [terms]
new_terms = []
for term in terms:
if isinstance(term, And):
new_terms += term_to_atoms(term.to_list())
elif isinstance(term, Or):
new_terms += term_to_atoms(term.to_list())
elif isinstance(term, Not):
new_terms.append(term.child)
else:
new_terms.append(term)
return new_terms | 6262ea7b1df124a4717d1452a23c33175b5da7a8 | 3,651,068 |
def expr_max(argv):
"""
Max aggregator function for :class:`Expression` objects
Returns
-------
exp : :class:`Expression`
Max of given arguments
Examples
--------
>>> x = so.VariableGroup(10, name='x')
>>> y = so.expr_max(2*x[i] for i in range(10))
"""
return expr_nested(argv, 'max') | 182157a627b12db6c41c79a99f135a7a493d4410 | 3,651,069 |
def handle_size(bytes_in=False, bytes_out=False):
"""
a function that converts bytes to human readable form. returns a
string like: 42.31 TB. example:
your_variable_name = make_readable(value_in_bytes)
"""
tib = 1024 ** 4
gib = 1024 ** 3
mib = 1024 ** 2
kib = 1024
if bytes_in:
data = float(bytes_in)
if data >= tib:
symbol = 'TB'
new_data = data / tib
elif data >= gib:
symbol = 'GB'
new_data = data / gib
elif data >= mib:
symbol = 'MB'
new_data = data / mib
elif data >= kib:
symbol = 'KB'
new_data = data / kib
elif data >= 0:
symbol = ' B'
new_data = data
formated_data = "{0:.2f}".format(new_data)
converted_data = str(formated_data) + symbol
return converted_data
elif bytes_out:
symbol = bytes_out[-1].lower()
data = bytes_out[0:-1]
try:
bytes = int(data)
except Exception as e:
print("couldnt convert " + data + " to int!")
print(e)
exit()
if symbol == 't':
converted_data = bytes * tib
elif symbol == 'g':
converted_data = bytes * gib
elif symbol == 'm':
converted_data = bytes * mib
elif symbol == 'k':
converted_data = bytes * kib
else:
print("unsupported size type! expected t, g, m, or k!")
exit()
return converted_data | 6e2b3b758e1afc1cea43bbe7ac0c6179b1d32c5f | 3,651,070 |
def return_elapsed(gs):
"""Returns a description of the elapsed time of recent operations.
Args:
gs: global state.
Returns:
A dictionary containing the count, minimum elapsed time,
maximum elapsed time, average elapsed time, and list of elapsed time
records.
"""
assert isinstance(gs, global_state.GlobalState)
elapsed_list = []
elapsed_sum = 0.0
elapsed_min = None
elapsed_max = None
for elapsed_record in gs.get_elapsed():
duration = elapsed_record.elapsed_seconds
elapsed_list.append(
{'start_time': utilities.seconds_to_timestamp(
elapsed_record.start_time),
'what': elapsed_record.what,
'threadIdentifier': elapsed_record.thread_identifier,
'elapsed_seconds': duration})
elapsed_sum += duration
if (elapsed_min is None) or (elapsed_max is None):
elapsed_min = duration
elapsed_max = duration
else:
elapsed_min = min(elapsed_min, duration)
elapsed_max = max(elapsed_max, duration)
return {'count': len(elapsed_list),
'min': elapsed_min,
'max': elapsed_max,
'average': elapsed_sum / len(elapsed_list) if elapsed_list else None,
'items': elapsed_list} | af832a3bac239e24f610e39c5dee8fde6a1a25c8 | 3,651,071 |
def calculate_per_class_lwlrap(truth, scores):
"""Calculate label-weighted label-ranking average precision.
Arguments:
truth: np.array of (num_samples, num_classes) giving boolean ground-truth
of presence of that class in that sample.
scores: np.array of (num_samples, num_classes) giving the classifier-under-
test's real-valued score for each class for each sample.
Returns:
per_class_lwlrap: np.array of (num_classes,) giving the lwlrap for each
class.
weight_per_class: np.array of (num_classes,) giving the prior of each
class within the truth labels. Then the overall unbalanced lwlrap is
simply np.sum(per_class_lwlrap * weight_per_class)
"""
assert truth.shape == scores.shape
num_samples, num_classes = scores.shape
# Space to store a distinct precision value for each class on each sample.
# Only the classes that are true for each sample will be filled in.
precisions_for_samples_by_classes = np.zeros((num_samples, num_classes))
for sample_num in range(num_samples):
pos_class_indices, precision_at_hits = (
_one_sample_positive_class_precisions(scores[sample_num, :],
truth[sample_num, :]))
precisions_for_samples_by_classes[sample_num, pos_class_indices] = (
precision_at_hits)
labels_per_class = np.sum(truth > 0, axis=0)
weight_per_class = labels_per_class / float(np.sum(labels_per_class))
# Form average of each column, i.e. all the precisions assigned to labels in
# a particular class.
per_class_lwlrap = (np.sum(precisions_for_samples_by_classes, axis=0) /
np.maximum(1, labels_per_class))
# overall_lwlrap = simple average of all the actual per-class, per-sample precisions
# = np.sum(precisions_for_samples_by_classes) / np.sum(precisions_for_samples_by_classes > 0)
# also = weighted mean of per-class lwlraps, weighted by class label prior across samples
# = np.sum(per_class_lwlrap * weight_per_class)
return per_class_lwlrap, weight_per_class | 7cc9187f96d0899d0ce554164df553cc9b5f79a0 | 3,651,072 |
import hashlib
import zipfile
from datetime import datetime
def generate_manifest(name, p, h=None):
""" generate_manifest(name, p, h) -> mapping
Generates a mapping used as the manifest file.
:param name: a dotted package name, as in setup.py
:param p: the zip file with package content.
:param h: optional hash function to use.
:returns: the path to the created manifest file.
"""
if h is None:
h = hashlib.sha256
m = {}
fh = m["files"] = {}
order = []
with zipfile.ZipFile(p) as zf:
for fi in zf.filelist:
order.append(fi.filename)
hash_all = h()
for fn in sorted(order):
contents = zf.read(fn)
hash_all.update(contents)
fh[fn] = h(contents).hexdigest()
m["name"] = name
m["sum"] = hash_all.hexdigest()
m["date"] = datetime.datetime.now().isoformat()
return m | 13c10ae405dbc6fe5acf92180e7981d07fdb9c60 | 3,651,074 |
def benchrun(methods,
model,
case_args,
filename,
cpus=1,):
"""
Parameters
----------
methods : list of str
Voter systems to be assessed by the election model.
model : func
Election model running function as
>>> e = func(**kwargs)
Where
- `e` is an Election object
- `kwargs` is a dict of arguments generated by `case_args`
case_args : generator
Generator that creates the parametric arguments to input into the model.
Must accept argument `methods` --
>>> generator = case_args(methods)
>>> args = next(generator)
filename : str
Naming prefix for output files
cpus : int
Number of processes or CPU's to use
Returns
-------
df : Dataframe
Results for every election iteration assessed
"""
b = _BenchRunner(model=model, case_args=case_args, filename=filename)
if cpus > 1:
return b.runmult(methods, cpus=cpus)
else:
return b.run(methods) | 414c96deb9a8d2f64b6808323465f8647aa5e48a | 3,651,075 |
def retry( exceptions,times=3,sleep_second=0):
"""
Retry Decorator
Retries the wrapped function/method `times` times if the exceptions listed
in ``exceptions`` are thrown
:param times: The number of times to repeat the wrapped function/method
:type times: Int
:param Exceptions: Lists of exceptions that trigger a retry attempt
:type Exceptions: Tuple of Exceptions
"""
if not py.iterable(exceptions):exceptions=[exceptions]
def decorator(func):
def newfn(*args, **kwargs):
attempt = 0
while attempt < times:
try:
return func(*args, **kwargs)
except Exception as e:
for i in exceptions:
if isinstance(e,i):
log(
'Exception thrown when attempting to run %s, attempt '
'%d of %d' % (func, attempt, times),
exc_info=True
)
attempt += 1
if sleep_second:sleep(sleep_second)
break
else:#when no break
raise e
return func(*args, **kwargs)
return newfn
return decorator | de715a0f903386358265c3fe4a13f1d91bcb177e | 3,651,076 |
def positiveId(obj):
"""Return id(obj) as a non-negative integer."""
result = id(obj)
if result < 0:
result += _address_mask
assert result > 0
return result | 5d3f987c621cf3d43ac31e9300a4d54ba208a7a0 | 3,651,077 |
def compute_annualized_total_return_over_months(df, column_price, months):
"""
Computed the annualized total return over the specified number of months.
This is equivalent to Compound Annual Growth Rate (CAGR).
Note: If the period is less than one year, it is best not to use annualized total return as it could result in a
very large (positive or negative) number that is not meaningful.
:param df: dataframe (sorted in ascending time order)
:param column_price: name of source column in dataframe with price values (adjusted for splits and dividends) to
compute annualized total return
:param months: time period in months (e.g. 1 = 1 month, 2 = 2 months, 2.5 = 1 month and ~15 days, etc.)
:return: annualized total return over months
"""
# calculate cumulative total return
total_return = compute_cumulative_total_return(df, column_price)
# calculate annualized total returns over months
annualized_total_return = ((1 + total_return)**(12/months)) - 1
return annualized_total_return | a75886ae85ab5bb146d93bd159a0a2a32f950678 | 3,651,079 |
from functools import reduce
def build_sparse_ts_from_distributions(start_date, end_date, seasonalities, time_interval, dist_dict, **kwargs):
"""constructs a time series with given distributions and seasonalities in a given frequency time_interval"""
ts_list = []
for (name, dist), seasonality in zip(dist_dict.items(), seasonalities):
ts_list.append(build_sparse_ts_by_seasonality(dist, start_date, end_date, seasonality, time_interval,
**kwargs.get(name, {})))
ts = reduce(lambda x, y: add_ts_with_different_dates(x, y), ts_list) # add time series together
return ts | 81d2ebc32a2b62ed967377faf90b3b58e7c753ff | 3,651,080 |
def preprocess_label(labels, scored_classes, equivalent_classes):
""" convert string labels to binary labels """
y = np.zeros((len(scored_classes)), np.float32)
for label in labels:
if label in equivalent_classes:
label = equivalent_classes[label]
if label in scored_classes:
y[scored_classes.index(label)] = 1
return y | 3e2465bb0db04afaaca0576f6c97847bd0fd2b2e | 3,651,081 |
import math
def vec_abs(field: SampledField):
""" See `phi.math.vec_abs()` """
if isinstance(field, StaggeredGrid):
field = field.at_centers()
return field.with_values(math.vec_abs(field.values)) | 91395513b7e457bdfdded484db1069e8c3b95805 | 3,651,084 |
def spoofRequest(app):
"""
Make REQUEST variable to be available on the Zope application server.
This allows acquisition to work properly
"""
_policy=PermissiveSecurityPolicy()
_oldpolicy=setSecurityPolicy(_policy)
newSecurityManager(None, OmnipotentUser().__of__(app.acl_users))
info = {'SERVER_NAME': 'isaw4.atlantides.org',
'SERVER_PORT': '8083',
'REQUEST_METHOD': 'GET'}
return makerequest(app, environ=info) | d1b3bd1a37d69f6500d23e55b5318b6519ed04be | 3,651,085 |
def data_to_percentage(data_list: pd.DataFrame) -> pd.DataFrame:
"""
Takes a dataframe with one or more columns filled with digits and returns a
dataframe with the percentages corresponding to the number of times the
numbers 1-9 appear in each column.
Args:
data_list: a dataframe of integers representing all of the leading
digits from a dataset (in this case, the number of vote counts).
Each columns is a category and is a Series with digits.
threshold: (int) minimum number of integers in column for percentage
to be found in it and for it to be returned.
Returns:
returns a dataframe of Series with the percentages of each column that
are each unique number in that column. Any numbers outside of [1, 9] are
not included and any column with fewer unique digits than another column
is dropped.
"""
def per_column_percentage(column: pd.Series) -> pd.Series:
number_of_occurrences = column.value_counts()
number_of_occurrences = number_of_occurrences[
(number_of_occurrences.index > 0)
& (number_of_occurrences.index < 10)
]
return number_of_occurrences.multiply(
100 / sum(number_of_occurrences)
).sort_index()
return data_list.apply(per_column_percentage).dropna(axis=1) | 18316ddf999419290d572e77d2934241359e45a3 | 3,651,086 |
from .models.models import EncoderClassifier
import torch
def create_classifier_from_encoder(data:DataBunch, encoder_path:str=None, path=None,
dropout1=0.5, device: torch.device = torch.device('cuda', 0), **kwargs):
"""Factory function to create classifier from encoder to allow transfer learning."""
path = data.path if path is None else path
if encoder_path is None:
logger.info("WARNING: `encoder_path` is None, not using pretrained feature extractor")
encoder = None
else:
encoder = torch.load(encoder_path, map_location='cpu')
model = EncoderClassifier(data.train_ds.shape, encoder, len(data.classes),dropout1=dropout1)
learn = Learner(data, model, path, model_type="classifier", device=device, **kwargs)
learn.freeze_encoder()
return learn | 736cfec768b6d659ab6fa1f087474a482409b66e | 3,651,087 |
from typing import Hashable
def filter_string(
df: pd.DataFrame,
column_name: Hashable,
search_string: str,
complement: bool = False,
case: bool = True,
flags: int = 0,
na=None,
regex: bool = True,
) -> pd.DataFrame:
"""Filter a string-based column according to whether it contains a substring.
This is super sugary syntax that builds on top of `pandas.Series.str.contains`.
It is meant to be the method-chaining equivalent of the following:
```python
df = df[df[column_name].str.contains(search_string)]]
```
This method does not mutate the original DataFrame.
Example: Retain rows whose column values contain a particular substring.
>>> import pandas as pd
>>> import janitor
>>> df = pd.DataFrame({"a": range(3, 6), "b": ["bear", "peeL", "sail"]})
>>> df
a b
0 3 bear
1 4 peeL
2 5 sail
>>> df.filter_string(column_name="b", search_string="ee")
a b
1 4 peeL
>>> df.filter_string(column_name="b", search_string="L", case=False)
a b
1 4 peeL
2 5 sail
Example: Filter names does not contain `'.'` (disable regex mode).
>>> import pandas as pd
>>> import janitor
>>> df = pd.Series(["JoseChen", "Brian.Salvi"], name="Name").to_frame()
>>> df
Name
0 JoseChen
1 Brian.Salvi
>>> df.filter_string(column_name="Name", search_string=".", regex=False, complement=True)
Name
0 JoseChen
:param df: A pandas DataFrame.
:param column_name: The column to filter. The column should contain strings.
:param search_string: A regex pattern or a (sub-)string to search.
:param complement: Whether to return the complement of the filter or not. If
set to True, then the rows for which the string search fails are retained
instead.
:param case: If True, case sensitive.
:param flags: Flags to pass through to the re module, e.g. re.IGNORECASE.
:param na: Fill value for missing values. The default depends on dtype of
the array. For object-dtype, `numpy.nan` is used. For `StringDtype`,
`pandas.NA` is used.
:param regex: If True, assumes `search_string` is a regular expression. If False,
treats the `search_string` as a literal string.
:returns: A filtered pandas DataFrame.
""" # noqa: E501
criteria = df[column_name].str.contains(
pat=search_string,
case=case,
flags=flags,
na=na,
regex=regex,
)
if complement:
return df[~criteria]
return df[criteria] | 9e5598a4afcff41ec5dc67c38b68efbacf3f09ec | 3,651,088 |
from typing import List
from typing import Dict
from typing import Union
import functools
def on_demand_feature_view(
features: List[Feature], inputs: Dict[str, Union[FeatureView, RequestDataSource]]
):
"""
Declare an on-demand feature view
:param features: Output schema with feature names
:param inputs: The inputs passed into the transform.
:return: An On Demand Feature View.
"""
def decorator(user_function):
on_demand_feature_view_obj = OnDemandFeatureView(
name=user_function.__name__,
inputs=inputs,
features=features,
udf=user_function,
)
functools.update_wrapper(
wrapper=on_demand_feature_view_obj, wrapped=user_function
)
return on_demand_feature_view_obj
return decorator | 0ea45df22cb167ad2aa919a0be40f2a11574a69a | 3,651,089 |
from typing import cast
def get_error_string(ftdi):
"""
get_error_string(context ftdi) -> char *
Get string representation for last error code
Parameters:
-----------
ftdi: pointer to ftdi_context
Returns:
--------
Pointer: to error string
"""
errstr = ftdi_get_error_string(ftdi)
return cast(errstr, c_char_p).value.decode('ascii') | e0d3eaa19014fff9840e7a8e629651107ae25495 | 3,651,090 |
def DenseNet52k12(growth_rate = 12,
reduction = 0.5):
"""
Parameters:
----------
Returns
-------
"""
return DenseNet(reduction = reduction,
growth_rate = growth_rate,
layers=52) | a295bcae685ae36bcbe356099d404403f7b8c0b6 | 3,651,091 |
def construct_fid_mask(catalog):
"""
Constructs the fidelity mask based off my results, not Robertos
:param catalog:
:return:
"""
line_widths = [i for i in range(3, 21, 2)]
fid_catalog = load_table("fidelity_snr.out", start=0)
fid_limit = 0.4
six_fids = []
for width in line_widths:
f = interp1d(fid_catalog["fbin"], fid_catalog["pure{}".format(width)], kind='slinear')
xdata = np.linspace(5.85, 7.85, 10000)
six_fids.append(xdata[np.argmax(f(xdata) >= fid_limit)])
masks = []
line_widths = [i for i in range(3, 21, 2)]
#six_fids = [6.3, 6.2, 6.1, 6.15, 6.1, 6.20, 6.1, 6.20, 6.05]
# six_fids = [6.35, 6.25, 6.15, 6.15, 6.15, 6.25, 6.15, 6.25, 6.05]
# six_fids = [6.25, 6.2, 6.1, 6.1, 6.1, 6.15, 6.1, 6.15, 6.05]
for index, width in enumerate(line_widths):
print(six_fids[index])
masks.append(catalog[((catalog['width'] == width) & (catalog['rsnrrbin'] >= six_fids[index]))])
total = masks[0]
t_sum = 0
for mask in masks[1:]:
t_sum += len(mask)
total = vstack((total, mask))
print("Total One: {}".format(len(total)))
return total | 81f50ae4dd092482eb406bef331075245989d2f3 | 3,651,092 |
def _run_job(tgt, fun, arg, kwarg, tgt_type, timeout, retry):
"""
Helper function to send execution module command using ``client.run_job``
method and collect results using ``client.get_event_iter_returns``. Implements
basic retry mechanism.
If ``client.get_event_iter_returns`` return no results, ``_run_job`` will retry
the command until minions return results or ``retry`` threshold reached, in
latter case ``CommandExecutionError`` raised with job details
"""
ret = {}
attempt = 1
while attempt <= retry:
# publish job command
pub_data = client.run_job(
tgt=tgt, fun=fun, arg=arg, kwarg=kwarg, tgt_type=tgt_type, timeout=timeout
)
# collect job results
job_results = client.get_event_iter_returns(timeout=timeout, **pub_data)
for item in job_results:
ret.update(item)
if not set(pub_data["minions"]) == set(ret.keys()):
minions_no_return = set(pub_data["minions"]) - set(ret.keys())
log.warning(
"Nornir-runner:_run_job - {}s timeout; no results from {}; returned {}; jid {}; attempt: {}".format(
timeout,
list(minions_no_return),
list(ret.keys()),
pub_data["jid"],
attempt,
)
)
if ret:
break
attempt += 1
else:
raise CommandExecutionError(
"Nornir-runner:_run_job - no results from minions; tgt: {}; fun: {}; tgt_type: {}; timeout: {}; retry: {}; kwarg: {}".format(
tgt, fun, tgt_type, timeout, retry, kwarg
)
)
return ret | e23c189063e5d7df542d8e774acf655f4af61289 | 3,651,094 |
def _set_rank_colorbar(ax, img, norm):
""" Set color bar for rankshow on the right of the ax
"""
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
plt.colorbar(img, cax=cax)
y_tick_values = cax.get_yticks()
boundary_means = [np.mean((y_tick_values[ii],y_tick_values[ii-1]))
for ii in range(1, len(y_tick_values))]
print(norm.boundaries)
category_names = [(str(norm.boundaries[ii-1])+'~'+
str(norm.boundaries[ii]))
for ii in range(1, len(norm.boundaries))]
# category_names[0] = '<='+str(norm.boundaries[1])
category_names[-1] = '>'+str(norm.boundaries[-2])
cax.yaxis.set_ticks(boundary_means)
cax.yaxis.set_ticklabels(category_names,rotation=0)
return cax | f21f59ac69c79cf9449d28710abdeeb730004077 | 3,651,095 |
from typing import Optional
from pathlib import Path
import importlib
def destination(stub: str) -> Optional[Path]:
"""Determine stub path
Only handle micropython stubs, ignoring
any cPython stdlib equivalents.
"""
prefix, _, suffix = stub.partition(".")
if importlib.util.find_spec(prefix): # type: ignore
return # in cPython stdlib, skip
prefix = Path(prefix)
if suffix in ("py", "pyi"): # module
return prefix / f"__init__.{suffix}"
return prefix / suffix | 8b2552513dbeaa9dc09cb85703b736e17c4788b5 | 3,651,096 |
def train_IPCA(X,n_dims,batch_size,model='ipca'):
"""
name: train_IPCA
Linear dimensionality reduction using Singular Value Decomposition of
centered data, keeping only the most significant singular vectors to
project the data to a lower dimensional space.
returns: the transformer model
"""
estimator=transformer[model].set_params(pca__n_components=n_dims,pca__batch_size=batch_size)
estimator.fit(X)
return estimator | 282c885a562b5b3dbe356050ef5f270f49d7014d | 3,651,098 |
def _str_cell(cell: Cell) -> str:
"""Строковое представление клетки.
Данной строкой клетка будет выводится на экран.
"""
if cell.is_open:
if cell.is_empty:
return " "
elif cell.value:
return f" {cell.value} "
elif cell.is_flagged:
return "[F]"
else:
return "[ ]" | 2e4428196601a726b488e3ec4d966072033c5bfe | 3,651,099 |
def mvw_ledoit_wolf(prices,
weight_bounds=(0.,1.),
rf = 0.,
options = None):
"""
Calculates the mean-variance weights given a DataFrame of returns.
Wraps mean_var_weights with ledoit_wolf covariance calculation method
Args:
* prices (DataFrame): Prices for multiple securities.
* weight_bounds ((low, high)): Weigh limits for optimization.
* rf (float): `Risk-free rate <https://www.investopedia.com/terms/r/risk-freerate.asp>`_ used in utility calculation
* options (dict): options for minimizing, e.g. {'maxiter': 10000 }
Returns:
Series {col_name: weight}
"""
r = prices.to_returns().dropna()
covar = ledoit_wolf(r)[0]
return covar | 086f6430d189fd12509d56ce4a96a351a178979b | 3,651,100 |
def _PadLabels3d(logits, labels):
"""Pads or slices 3-d labels to match logits.
Covers the case of 2-d softmax output, when labels is [batch, height, width]
and logits is [batch, height, width, onehot]
Args:
logits: 4-d Pre-softmax fully-connected output.
labels: 3-d, but not necessarily matching in size.
Returns:
labels: Resized by padding or clipping to match logits.
"""
logits_shape = shapes.tensor_shape(logits)
labels_shape = shapes.tensor_shape(labels)
labels = tf.reshape(labels, [-1, labels_shape[2]])
labels = _PadLabels2d(logits_shape[2], labels)
labels = tf.reshape(labels, [labels_shape[0], -1])
labels = _PadLabels2d(logits_shape[1] * logits_shape[2], labels)
return tf.reshape(labels, [labels_shape[0], logits_shape[1], logits_shape[2]]) | 223f7dfea9ebc970e62dbe71e2f27dfb5c9f161d | 3,651,101 |
def intx():
"""Returns the default int type, as a string.
(e.g. 'int16', 'int32', 'int64').
# Returns
String, the current default int type.
"""
return _INTX | 57661ef00953e07228ff81abc93ec22c216797ff | 3,651,102 |
import json
def dev_end_hardware_script() -> Response:
"""Designate the end of a hardware script in flask log.
Can be invoked by: curl http://localhost:4567/development/end_hardware_script
"""
return Response(json.dumps({}), mimetype="application/json") | 714b448642180753e639992f2d101841074aeefd | 3,651,103 |
def _init_train(opt):
"""Common initilization stuff for all training process."""
ArgumentParser.validate_prepare_opts(opt)
if opt.train_from:
# Load checkpoint if we resume from a previous training.
checkpoint = load_checkpoint(ckpt_path=opt.train_from)
fields = load_fields(opt.save_data, checkpoint)
transforms_cls = get_transforms_cls(opt._all_transform)
if (hasattr(checkpoint["opt"], '_all_transform') and
len(opt._all_transform.symmetric_difference(
checkpoint["opt"]._all_transform)) != 0):
_msg = "configured transforms is different from checkpoint:"
new_transf = opt._all_transform.difference(
checkpoint["opt"]._all_transform)
old_transf = checkpoint["opt"]._all_transform.difference(
opt._all_transform)
if len(new_transf) != 0:
_msg += f" +{new_transf}"
if len(old_transf) != 0:
_msg += f" -{old_transf}."
logger.warning(_msg)
if opt.update_vocab:
logger.info("Updating checkpoint vocabulary with new vocabulary")
fields, transforms_cls = prepare_fields_transforms(opt)
else:
checkpoint = None
#数据预处理准备阶段,目的是将数据处理成torchtext.field格式
fields, transforms_cls = prepare_fields_transforms(opt)
# Report src and tgt vocab sizes
for side in ['src', 'tgt']:
f = fields[side]
try:
f_iter = iter(f)
except TypeError:
f_iter = [(side, f)]
for sn, sf in f_iter:
if sf.use_vocab:
logger.info(' * %s vocab size = %d' % (sn, len(sf.vocab)))
return checkpoint, fields, transforms_cls | bb2a043d1a59f996b303aabf9db724ced3505dbf | 3,651,104 |
def compare(isamAppliance1, isamAppliance2):
"""
Compare Update Servers between two appliances
"""
ret_obj1 = get_all(isamAppliance1)
ret_obj2 = get_all(isamAppliance2)
for obj in ret_obj1['data']:
del obj['uuid']
for obj in ret_obj2['data']:
del obj['uuid']
return ibmsecurity.utilities.tools.json_compare(ret_obj1, ret_obj2, deleted_keys=['uuid']) | e29025ca0af897f10b3b8498f8def86841b76c97 | 3,651,106 |
import random
def get_random():
"""
Retrieves the current issue of XKCD, chooses an issue 1 - current issue #, and returns a json object.
Returns null if an requests error occurs.
"""
return get_issue(random.randint(1, int(get_current()["num"]))) | 10fbf75681901722510b0b9fbb2de298eb80b45e | 3,651,108 |
def get_fasta_readlengths(fasta_file):
"""
Get a sorted list of contig lengths
:return: (tuple)
"""
lens = []
with open_fasta_reader(fasta_file) as f:
for record in f:
lens.append(len(record.sequence))
lens.sort()
return lens | 769cf5af50ba684c107a1312d2aeaab2721a29c6 | 3,651,109 |
def postprocess(p, gt, width_and_height, p_binary, false_positives=False, false_negatives=False):
"""
This function does matching and then postprocessing of p's and gt's
:param p: the objects given from rcnn
:param gt: the objects we get from the ground truth
:param width_and_height: the width and height of the image
:return: info_image: a list which contains the postprocessed p, rectangels for p, postprocessed gt, rectangles
for gt, width and height
"""
len_p = len(p)
len_gt = len(gt)
elements_in_p = [i for i in xrange(len_p)]
elements_in_gt = [i for i in xrange(len_gt)]
matching_table = create_matching_table(p, gt)
max_number_of_matches = min(matching_table.shape[0], matching_table.shape[1])
new_p = []
new_gt = []
new_rects_p = []
new_rects_gt = []
new_p_binary = []
new_gt_binary = []
threshold = 0.5
# on this part we create the real matches between p and gt
for _ in xrange(max_number_of_matches):
best_match = unravel_index(matching_table.argmax(), matching_table.shape)
if matching_table[best_match[0], best_match[1]] > threshold: # check if it is a different value from 0
matching_table[best_match[0], :] = 0.
matching_table[:, best_match[1]] = 0.
new_p.append(p[best_match[0], :21])
new_p_binary.append(p_binary[best_match[0]])
new_gt_binary.append(np.array([1., 0.]))
new_rects_p.append(p[best_match[0], 21:])
new_gt.append(gt[best_match[1], :21])
new_rects_gt.append(gt[best_match[1], 21:])
elements_in_p.remove(best_match[0])
elements_in_gt.remove(best_match[1])
# here we add the matches of false positives by inserting background class on the given rectangles on the ground
# truth
if false_positives:
for element in elements_in_p:
new_p.append(p[element, :21])
new_p_binary.append(p_binary[element])
new_rects_p.append(p[element, 21:])
new_gt.append(create_background_peak_array())
new_gt_binary.append(np.array([0., 1.])) # 0 - not background; 1 - background
new_rects_gt.append(p[element, 21:])
# here we deal with false negatives, by adding them as r-cnn outputs equal to the ground truth
if false_negatives:
for element in elements_in_gt:
new_p.append(gt[element, :21])
new_p_binary.append(np.array([1., 0.]))
new_rects_p.append(gt[element, 21:])
new_gt.append(gt[element, :21])
new_gt_binary.append((np.array([1., 0.])))
new_rects_gt.append(gt[element, 21:])
# convert all the lists to numpy arrays
new_p = np.asarray(new_p)
new_rects_p = np.asarray(new_rects_p)
new_gt = np.asarray(new_gt)
new_rects_gt = np.asarray(new_rects_gt)
# add all the postprocessed information to a list
info_image = [new_p, new_gt, new_rects_p, new_rects_gt, width_and_height, new_p_binary, new_gt_binary]
return info_image | dd83de4547f7c1461b64fcd2dfa4c3df54aefd10 | 3,651,110 |
from csb.bio.structure import TorsionAngles
import numpy
def deg(x):
"""
Convert an array of torsion angles in radians to torsion degrees
ranging from -180 to 180.
@param x: array of angles
@type x: numpy array
@rtype: numpy array
"""
func = numpy.vectorize(TorsionAngles.deg)
return func(x) | 95e37a0c644df1562e417c1ad61e4788bd46c279 | 3,651,111 |
import timeit
def run_median_trial():
"""Generate table for Median Trial."""
tbl = DataTable([10,15,15],['N', 'median_time', 'sort_median'])
trials = [2**k+1 for k in range(8,20)]
for n in trials:
t_med = 1000*min(timeit.repeat(stmt='assert(linear_median(a) == {}//2)'.format(n),
setup='''
import random
from ch01.challenge import linear_median
a = list(range({}))
random.shuffle(a)
'''.format(n), repeat=10, number=5))/5
t_sort = 1000*min(timeit.repeat(stmt='assert(median_from_sorted_list(a) == {0}//2)'.format(n),
setup='''
import random
from ch01.challenge import median_from_sorted_list
a = list(range({}))
random.shuffle(a)
'''.format(n), repeat=10, number=5))/5
tbl.row([n, t_med, t_sort])
return tbl | ed4c5ebe8bd6259c4adc45c4b023cc5bb96a1055 | 3,651,112 |
def regroup(X, N):
"""
Regroups the rows and columns of X such that rows/cols
that are N apart in X, are adjeacent in Y. If N is a
2 element vector, N[0] is used for rows and N[1] is used
for columns.
Parameters:
X: m by n matrix to be regrouped.
N: Integer or two element vector.
Returns:
Y: Regrouped matrix.
"""
m, n = X.shape
if isinstance(N, int):
N = [N, N]
if m % N[0] != 0 or n % N[1] != 0:
raise ValueError('X dimensions need to be multiple\
of elements in N')
row_ind = np.ravel(
[[i + k for i in np.arange(0, n, N[0])] for k in range(N[0])])
col_ind = np.ravel(
[[i + k for i in np.arange(0, n, N[1])] for k in range(N[1])])
Y = X[row_ind, :]
Y = Y[:, col_ind]
return Y | 7ad92b878cb6a55820ef9ad92c68e934184d725d | 3,651,113 |
def return_estimators(n_components):
"""Returns all of the estimators that can be used to generate models.
A larger selection of possible estimators have been commented out, but
could be uncommented."""
estimators = [
('PCArandom',
decomposition.PCA(n_components=n_components, svd_solver='randomized',
whiten=True))
]
# estimators = [
# ('PCArandom',
# decomposition.PCA(n_components=n_components,
# svd_solver='randomized',
# whiten=True)),
# ('PCAfull',
# decomposition.PCA(n_components=n_components,
# svd_solver='full',
# whiten=True)),
# ('PCAarpack',
# decomposition.PCA(n_components=n_components,
# svd_solver='arpack',
# whiten=True)),
# ('PCAauto',
# decomposition.PCA(n_components=n_components,
# svd_solver='auto',
# whiten=True))
# ]
return estimators | 680aa1d50c4e2db0e4d3df9e60749350df437bb8 | 3,651,114 |
def _check_type_picks(picks):
"""helper to guarantee type integrity of picks"""
err_msg = 'picks must be None, a list or an array of integers'
if picks is None:
pass
elif isinstance(picks, list):
if not all(isinstance(i, int) for i in picks):
raise ValueError(err_msg)
picks = np.array(picks)
elif isinstance(picks, np.ndarray):
if not picks.dtype.kind == 'i':
raise ValueError(err_msg)
else:
raise ValueError(err_msg)
return picks | 79493f75db8e57f32a6369ad18900e0632d2bc18 | 3,651,115 |
def get_test_standard_scaler_str():
"""
Get a pandas projection code str
"""
test_code = cleandoc("""
standard_scaler = StandardScaler()
encoded_data = standard_scaler.fit_transform(df)
""")
return test_code | fd6e1daa7e0dddb603437e5b35c283a11e68ec00 | 3,651,116 |
from typing import List
from typing import Tuple
import re
def add_command(
command_list: List[Tuple[re.Pattern, callable]], func: callable, command_str: str
) -> List[Tuple[re.Pattern, callable]]:
"""Add a function and the command pattern to the command list.
Args:
func: Function it will be called
command_str: command string that specifies the pattern
"""
command_pattern = build_command_pattern(command_str)
command_list.append((command_pattern, func))
return command_list | f8076e4a6b37722591eae04a67feb1c25e606b84 | 3,651,117 |
def get_clusters_and_critical_nodes(G, k, rho_star, phi_in):
"""
The implementation of the main body of the partitioning Algorithm.
The main while-loop of the algorithm is executed as long as a refinement is still possible.
:param phi_in: An algorithm parameter used to lower bound the inner conductance of each cluster
:param rho_star: A technical parameter of the algorithm
:param G: A networkx graph
:param k: The (supposed) number of clusters
:return: a list containing an l-wise partitioning of the nodes of G, for some l <= k
"""
# A list of vertices in the graph G
vertices = list(G.nodes())
# Initially the graph contains one cluster P_1 = V with core set core_1 = P_1.
P_1 = vertices[:]
core_1 = P_1[:]
# num_clusters is the variable denoting the current number of clusters
num_clusters = 1
# clusters is a list storing the current cluster structure of G (i.e. P_1, ..., P_l)
clusters = [P_1]
# core_sets is a list containing the current core_subsets of each cluster.
# (i.e. core_1, ..., core_(num_clusters) with core_i being a subset of P_i)
core_sets = [core_1]
# A list of lists, where each element grouped_critical_nodes[i] is a list of critical nodes from the tree T_i of
# cluster clusters[i]
grouped_critical_nodes = []
# The main loop of the algorithm. We continue as long as an update is possible
overall_update_is_found = True
while overall_update_is_found:
# At the beginning of the loop there is no update found
overall_update_is_found = False
# The main loop of the Partition Algorithm. We continue as long as a GT_update is possible
GT_update_is_found = True
while GT_update_is_found:
# First we check if a GT_update is possible
GT_update_is_found, index_cluster_to_update = check_if_GT_update_is_possible(G, clusters, core_sets,
phi_in)
if GT_update_is_found:
GT_update_is_done = False
# Notation of the corresponding sets of vertices
P_i = clusters[index_cluster_to_update]
core_i = core_sets[index_cluster_to_update]
S = cheeger_cut.cheeger_cut(G.subgraph(P_i))
S_complement = diff(vertices, S)
S_plus = intersect(S, core_i)
S_plus_bar = intersect(S_complement, core_i)
S_minus = intersect(diff(P_i, core_i), S)
S_minus_bar = intersect(diff(P_i, core_i), S_complement)
# Without loss of generality we assume vol(S_plus) < vol(core_i) / 2
if vol(G, S_plus) > vol(G, S_plus_bar):
S_plus, S_plus_bar = S_plus_bar, S_plus
S_minus, S_minus_bar = S_minus_bar, S_minus
# First "if" in the algorithm
if is_first_if_condition_satisfied(G, S_plus, S_plus_bar, k, num_clusters, rho_star):
make_new_cluster_with_subset_T_bar_of_core_i(
S_plus, S_plus_bar, clusters, core_sets, index_cluster_to_update)
num_clusters += 1
# A sanity check update
num_clusters = min(num_clusters, k)
GT_update_is_done = True
# Second "if" in the algorithm
if not GT_update_is_done and is_second_if_condition_satisfied(G, S_plus, S_plus_bar, core_i, k):
update_core_to_subset_T_or_T_bar(G, S_plus, S_plus_bar, core_sets, index_cluster_to_update)
GT_update_is_done = True
# Third "if" in the algorithm
if not GT_update_is_done and is_third_if_condition_satisfied(G, S_minus, k, num_clusters, rho_star):
make_new_cluster_with_subset_T_of_P_i(S_minus, clusters, core_sets, index_cluster_to_update)
num_clusters += 1
# A sanity check update
num_clusters = min(num_clusters, k)
GT_update_is_done = True
# At this point only a refinement of the partition is possible
if not GT_update_is_done:
# If there is a cluster P_j s.t. w(P_i - core_i -> P_i) < w(P_i - core_i -> P_j),
# then merge (P_i - core_i) with argmax_(P_j){w(P_i - core_i -> P_j)}
P_i_minus_core_i = diff(P_i, core_i)
# Find the index j of argmax_(P_j){w(P_i - core_i -> P_j)}.
best_cluster_index = find_cluster_P_j_that_maximises_weight_from_T_to_P_j(G, P_i_minus_core_i,
clusters)
# Forth "if" in the algorithm.
if best_cluster_index != index_cluster_to_update:
move_subset_T_from_P_i_to_P_j(P_i_minus_core_i, clusters, index_cluster_to_update,
best_cluster_index)
GT_update_is_done = True
if not GT_update_is_done:
# If there is a cluster P_j s.t. w(S_minus -> P_i) < w(S_minus -> P_j),
# then merge S_minus with argmax_(P_j){w(S_minus -> P_j)}
# Find the index j of argmax_(P_j){w(S_minus -> P_j)}.
best_cluster_index = find_cluster_P_j_that_maximises_weight_from_T_to_P_j(G, S_minus, clusters)
# Fifth "if" in the algorithm
if best_cluster_index != index_cluster_to_update:
move_subset_T_from_P_i_to_P_j(S_minus, clusters, index_cluster_to_update,
best_cluster_index)
GT_update_is_done = True
if not GT_update_is_done:
raise Exception('No GT_update performed in iteration')
grouped_critical_nodes = []
# Check if critical nodes need refinements
for i in range(len(clusters)):
# Get the list of critical nodes in the degree based construction of the graph G_i = G[P_i]
P_i = clusters[i]
core_i = core_sets[i]
G_i = G.subgraph(P_i)
T_i = tree.Tree()
T_i.make_tree(G_i, "degree")
critical_nodes_of_T_i = T_i.get_critical_nodes()
grouped_critical_nodes = grouped_critical_nodes + [critical_nodes_of_T_i]
for node in critical_nodes_of_T_i:
# Notation
N = node.vertices
N_complement = diff(vertices, N)
N_plus = intersect(N, core_i)
N_plus_bar = intersect(N_complement, core_i)
N_minus = intersect(diff(P_i, core_i), N)
N_minus_bar = intersect(diff(P_i, core_i), N_complement)
# Sixth "if" of the algorithm, first "if" of the refinement of the nodes,
if is_sixth_if_condition_satisfied(G, N_plus, N_plus_bar, k, num_clusters, rho_star):
make_new_cluster_with_subset_T_bar_of_core_i(
N_plus, N_plus_bar, clusters, core_sets, i)
num_clusters += 1
# A sanity check update
num_clusters = min(num_clusters, k)
overall_update_is_found = True
break
# Seventh "if" of the algorithm, second if of the refinement of the nodes
if not overall_update_is_found and is_seventh_if_condition_satisfied(G, N_plus, core_i, k):
update_core_to_subset_T_or_T_bar(G, N_plus, N_plus_bar, core_sets, i)
overall_update_is_found = True
break
# We attempt to move N_minus to the cluster P_j that maximises w(N_minus -> P_j)
if not overall_update_is_found and vol(G, N_minus) <= vol(G, P_i) / 2:
# Find the index j of argmax_(P_j){w(N_minus -> P_j)}.
# If best_cluster_index = i, then the eighth "if" is not satisfied
best_cluster_index = find_cluster_P_j_that_maximises_weight_from_T_to_P_j(G, N_minus, clusters)
# Eighth "if" of the algorithm, third if of the refinement of the nodes.
if weight(G, N_minus, P_i) < weight(G, N_minus, clusters[best_cluster_index]):
move_subset_T_from_P_i_to_P_j(N_minus, clusters, i,
best_cluster_index)
overall_update_is_found = True
break
if overall_update_is_found:
break
return clusters, grouped_critical_nodes | e7374c9cad30a87477ee5b9ce4d0a0e9cb7de041 | 3,651,118 |
def get_edges_out_for_vertex(edges: list, vertex: int) -> list:
"""Get a sublist of edges that have the specified vertex as first element
:param edges: edges of the graph
:param vertex: vertex of which we want to find the corresponding edges
:return: selected edges
"""
return [e for e in edges if e[0] == vertex] | 21485073df1c754e7c8e2b7dd9cafef284e601e7 | 3,651,119 |
def pellet_plot_multi_unaligned(FEDs, shade_dark, lights_on,
lights_off,**kwargs):
"""
FED3 Viz: Plot cumulaive pellet retrieval for multiple FEDs, keeping the
x-axis to show absolute time.
Parameters
----------
FEDs : list of FED3_File objects
FED3 files (loaded by load.FED3_File)
shade_dark : bool
Whether to shade lights-off periods
lights_on : int
Integer between 0 and 23 denoting the start of the light cycle.
lights_off : int
Integer between 0 and 23 denoting the end of the light cycle.
**kwargs :
ax : matplotlib.axes.Axes
Axes to plot on, a new Figure and Axes are
created if not passed
date_filter : array
A two-element array of datetimes (start, end) used to filter
the data
**kwargs also allows FED3 Viz to pass all settings to all functions.
Returns
-------
fig : matplotlib.figure.Figure
"""
if not isinstance(FEDs, list):
FEDs = [FEDs]
for file in FEDs:
assert isinstance(file, FED3_File),'Non FED3_File passed to pellet_plot_multi()'
if 'ax' not in kwargs:
fig, ax = plt.subplots(figsize=(7,3.5), dpi=150)
else:
ax = kwargs['ax']
min_date = np.datetime64('2100')
max_date = np.datetime64('1970')
for file in FEDs:
df = file.data
if 'date_filter' in kwargs:
s, e = kwargs['date_filter']
df = df[(df.index >= s) &
(df.index <= e)].copy()
x = df.index
y = df['Pellet_Count']
ax.plot(x, y, label=file.filename, alpha=.6, lw=1)
if max(x) > max_date:
max_date = max(x)
if min(x) < min_date:
min_date = min(x)
ax.set_xlabel('Time (h)')
date_format_x(ax, min_date, max_date)
ax.set_ylabel('Cumulative Pellets')
title = ('Pellets Retrieved for Multiple FEDs')
ax.set_title(title)
if shade_dark:
shade_darkness(ax, min_date, max_date,
lights_on=lights_on,
lights_off=lights_off)
if len(FEDs) < 10:
ax.legend(bbox_to_anchor=(1,1), loc='upper left')
plt.tight_layout()
return fig if 'ax' not in kwargs else None | 3601e8ecff20a3d7978f7261ebaa5236d662a25e | 3,651,120 |
import time
def sync_via_mrmsdtw(f_chroma1: np.ndarray,
f_chroma2: np.ndarray,
f_DLNCO1: np.ndarray = None,
f_DLNCO2: np.ndarray = None,
input_feature_rate: float = 50,
step_sizes: np.ndarray = np.array([[1, 0], [0, 1], [1, 1]], np.int32),
step_weights: np.ndarray = np.array([1.0, 1.0, 1.0], np.float64),
threshold_rec: int = 10000, win_len_smooth: np.ndarray = np.array([201, 101, 21, 1]),
downsamp_smooth: np.ndarray = np.array([50, 25, 5, 1]),
verbose: bool = False,
dtw_implementation: str = 'synctoolbox',
normalize_chroma: bool = True,
chroma_norm_ord: int = 2,
chroma_norm_threshold: float = 0.001):
"""Compute memory-restricted multi-scale DTW (MrMsDTW) using chroma and (optionally) DLNCO features.
MrMsDTW is performed on multiple levels that get progressively finer, with rectangular constraint
regions defined by the alignment found on the previous, coarser level.
If DLNCO features are provided, these are used on the finest level in addition to chroma
to provide higher synchronization accuracy.
Parameters
----------
f_chroma1 : np.ndarray [shape=(12, N)]
Chroma feature matrix of the first sequence
f_chroma2 : np.ndarray [shape=(12, M)]
Chroma feature matrix of the second sequence
f_DLNCO1 : np.ndarray [shape=(12, N)]
DLNCO feature matrix of the first sequence (optional, default: None)
f_DLNCO2 : np.ndarray [shape=(12, M)]
DLNCO feature matrix of the second sequence (optional, default: None)
input_feature_rate: float
Input feature rate of the chroma features (default: 50)
step_sizes: np.ndarray
DTW step sizes (default: np.array([[1, 0], [0, 1], [1, 1]]))
step_weights: np.ndarray
DTW step weights (np.array([1.0, 1.0, 1.0]))
threshold_rec: int
Defines the maximum area that is spanned by the rectangle of two
consecutive elements in the alignment (default: 10000)
win_len_smooth : np.ndarray
Window lengths for chroma feature smoothing (default: np.array([201, 101, 21, 1]))
downsamp_smooth : np.ndarray
Downsampling factors (default: np.array([50, 25, 5, 1]))
verbose : bool
Set `True` for visualization (default: False)
dtw_implementation : str
DTW implementation, librosa or synctoolbox (default: synctoolbox)
normalize_chroma : bool
Set `True` to normalize input chroma features after each downsampling
and smoothing operation.
chroma_norm_ord: int
Order of chroma normalization, relevant if ``normalize_chroma`` is True.
(default: 2)
chroma_norm_threshold: float
If the norm falls below threshold for a feature vector, then the
normalized feature vector is set to be the unit vector. Relevant, if
``normalize_chroma`` is True (default: 0.001)
Returns
-------
alignment : np.ndarray [shape=(2, T)]
Resulting warping path
"""
# If DLNCO features are given as input, high resolution MrMsDTW is activated.
high_res = False
if f_DLNCO1 is not None and f_DLNCO2 is not None:
high_res = True
if high_res and (f_chroma1.shape[1] != f_DLNCO1.shape[1] or f_chroma2.shape[1] != f_DLNCO2.shape[1]):
raise ValueError('Chroma and DLNCO features must be of the same length.')
if downsamp_smooth[-1] != 1 or win_len_smooth[-1] != 1:
raise ValueError('The downsampling factor of the last iteration must be equal to 1, i.e.'
'at the last iteration, it is computed at the input feature rate!')
num_iterations = win_len_smooth.shape[0]
cost_matrix_size_old = tuple()
feature_rate_old = input_feature_rate / downsamp_smooth[0]
alignment = None
total_computation_time = 0.0
for it in range(num_iterations):
tic1 = time.perf_counter()
# Smooth and downsample given raw features
f_chroma1_cur, _ = smooth_downsample_feature(f_chroma1,
input_feature_rate=input_feature_rate,
win_len_smooth=win_len_smooth[it],
downsamp_smooth=downsamp_smooth[it])
f_chroma2_cur, feature_rate_new = smooth_downsample_feature(f_chroma2,
input_feature_rate=input_feature_rate,
win_len_smooth=win_len_smooth[it],
downsamp_smooth=downsamp_smooth[it])
if normalize_chroma:
f_chroma1_cur = normalize_feature(f_chroma1_cur,
norm_ord=chroma_norm_ord,
threshold=chroma_norm_threshold)
f_chroma2_cur = normalize_feature(f_chroma2_cur,
norm_ord=chroma_norm_ord,
threshold=chroma_norm_threshold)
# Project path onto new resolution
cost_matrix_size_new = (f_chroma1_cur.shape[1], f_chroma2_cur.shape[1])
if alignment is None:
# Initialize the alignment with the start and end frames of the feature sequence
anchors = np.array([[0, f_chroma1_cur.shape[1] - 1], [0, f_chroma2_cur.shape[1] - 1]])
else:
projected_alignment = project_alignment_on_a_new_feature_rate(alignment=alignment,
feature_rate_old=feature_rate_old,
feature_rate_new=feature_rate_new,
cost_matrix_size_old=cost_matrix_size_old,
cost_matrix_size_new=cost_matrix_size_new)
anchors = derive_anchors_from_projected_alignment(projected_alignment=projected_alignment,
threshold=threshold_rec)
# Cost matrix and warping path computation
if high_res and it == num_iterations - 1:
# Compute cost considering chroma and pitch onset features and alignment only in the last iteration,
# where the features are at the finest level.
cost_matrices_step1 = compute_cost_matrices_between_anchors(f_chroma1=f_chroma1_cur,
f_chroma2=f_chroma2_cur,
f_DLNCO1=f_DLNCO1,
f_DLNCO2=f_DLNCO2,
anchors=anchors)
else:
cost_matrices_step1 = compute_cost_matrices_between_anchors(f_chroma1=f_chroma1_cur,
f_chroma2=f_chroma2_cur,
anchors=anchors)
wp_list = compute_warping_paths_from_cost_matrices(cost_matrices_step1,
step_sizes=step_sizes,
step_weights=step_weights,
implementation=dtw_implementation)
# Concatenate warping paths
wp = build_path_from_warping_paths(warping_paths=wp_list,
anchors=anchors)
anchors_step1 = None
wp_step1 = None
num_rows_step1 = 0
num_cols_step1 = 0
ax = None
toc1 = time.perf_counter()
if verbose and cost_matrices_step1 is not None:
anchors_step1 = np.array(anchors, copy=True)
wp_step1 = np.array(wp, copy=True)
num_rows_step1, num_cols_step1 = np.sum(np.array([dtw_mat.shape for dtw_mat in cost_matrices_step1], int),
axis=0)
fig, ax = sync_visualize_step1(cost_matrices_step1,
num_rows_step1,
num_cols_step1,
anchors,
wp)
tic2 = time.perf_counter()
# Compute neighboring anchors and refine alignment using local path between neighboring anchors
anchor_indices_in_warping_path = find_anchor_indices_in_warping_path(wp, anchors=anchors)
# Compute neighboring anchors for refinement
neighboring_anchors, neighboring_anchor_indices = \
derive_neighboring_anchors(wp, anchor_indices=anchor_indices_in_warping_path)
if neighboring_anchor_indices.shape[0] > 1 \
and it == num_iterations - 1 and high_res:
cost_matrices_step2 = compute_cost_matrices_between_anchors(f_chroma1=f_chroma1_cur,
f_chroma2=f_chroma2_cur,
f_DLNCO1=f_DLNCO1,
f_DLNCO2=f_DLNCO2,
anchors=neighboring_anchors)
else:
cost_matrices_step2 = compute_cost_matrices_between_anchors(f_chroma1=f_chroma1_cur,
f_chroma2=f_chroma2_cur,
anchors=neighboring_anchors)
wp_list_refine = compute_warping_paths_from_cost_matrices(cost_matrices=cost_matrices_step2,
step_sizes=step_sizes,
step_weights=step_weights,
implementation=dtw_implementation)
wp = __refine_wp(wp, anchors, wp_list_refine, neighboring_anchors, neighboring_anchor_indices)
toc2 = time.perf_counter()
computation_time_it = toc2 - tic2 + toc1 - tic1
total_computation_time += computation_time_it
alignment = wp
feature_rate_old = feature_rate_new
cost_matrix_size_old = cost_matrix_size_new
if verbose and cost_matrices_step2 is not None:
sync_visualize_step2(ax,
cost_matrices_step2,
wp,
wp_step1,
num_rows_step1,
num_cols_step1,
anchors_step1,
neighboring_anchors)
print('Level {} computation time: {:.2f} seconds'.format(it, computation_time_it))
if verbose:
print('Computation time of MrMsDTW: {:.2f} seconds'.format(total_computation_time))
return alignment | 00dac7bdde14597e0daece958e65761ec01d1494 | 3,651,121 |
def simulate_beta_binomial(
K, D, sigma2, theta, mu=0, invlink=logistic, seed=None):
"""Simulates from binomial Gaussian process with Beta latent noise.
Args:
K: Cell-state kernel, for example as generated by create_linear_kernel
or create_rbf_kernel.
D: Array of total counts.
sigma2: Kernel variance component.
theta: Dispersion parameter. If zero, sample from a regular Binomial
distribution instead.
mu: Optional fixed effects on a logit scale. Defaults to zero, which
corresponds to a binomial mean of 0.5.
invlink: Inverse link function. Defaults to invlogit.
seed: Random seed.
Returns:
List with alternative counts, latent rates as well as sampled binomial
means.
"""
D = atleast_2d_column(D)
n, p = D.shape
rng = np.random.default_rng(seed)
if sigma2 == 0:
latent = mu * np.ones((n, p))
else:
mu = mu * np.ones((n, 1))
latent = _sample_normal(p, mu, sigma2*K, rng)
beta_mean = invlink(latent)
if theta > 0:
binomial_mean = rng.beta(a=beta_mean / theta, b=(1-beta_mean) / theta)
else:
binomial_mean = beta_mean
a = rng.binomial(n=D, p=binomial_mean)
return {'A': a, 'beta_mean': beta_mean, 'binomial_mean': binomial_mean} | de4648af70a6b35c7b7f5edc2c151a98db6d7603 | 3,651,122 |
def convert_to_floats(tsi):
"""
A helper function that tax all of the fields of a TaxSaveInputs model
and converts them to floats, or list of floats
"""
def numberfy_one(x):
if isinstance(x, float):
return x
else:
return float(x)
def numberfy(x):
if isinstance(x, list):
return [numberfy_one(i) for i in x]
else:
return numberfy_one(x)
attrs = vars(tsi)
return {k: numberfy(v) for k, v in list(attrs.items()) if v} | a6f93f402c547435fa9fe611481084215f52f13b | 3,651,123 |
def properties_filter(mol):
"""
Calculates the properties that contain logP, MW, HBA, HBD, TPSA, NRB
"""
#frag = Chem.rdmolops.GetMolFrags(mol) # remove '.'
#if len(frag) > 1:
#return False
MW_s = Descriptors.MolWt(mol) # MW
if MW_s < 250 or MW_s > 750:
return False
ALOGP_s = Descriptors.MolLogP(mol) # ALOGP
if ALOGP_s < -2 or ALOGP_s > 7:
return False
HBA_s = 0
for hba in Acceptors: # HBA
if mol.HasSubstructMatch(hba):
matches = mol.GetSubstructMatches(hba)
HBA_s += len(matches)
HBD_s = Descriptors.NumHDonors(mol) # HBD
if HBA_s + HBD_s >= 10:
return False
TPSA_s = Descriptors.TPSA(mol) # TPSA
if TPSA_s >= 150:
return False
NRB_s = Descriptors.NumRotatableBonds(mol) # NRB
if NRB_s >= 10:
return False
return True | bc124620baddb828b4c5cb82e0b0374bdb51bad7 | 3,651,124 |
def _create_certificate_chain():
"""
Construct and return a chain of certificates.
1. A new self-signed certificate authority certificate (cacert)
2. A new intermediate certificate signed by cacert (icert)
3. A new server certificate signed by icert (scert)
"""
caext = X509Extension(b('basicConstraints'), False, b('CA:true'))
# Step 1
cakey = PKey()
cakey.generate_key(TYPE_RSA, 512)
cacert = X509()
cacert.get_subject().commonName = "Authority Certificate"
cacert.set_issuer(cacert.get_subject())
cacert.set_pubkey(cakey)
cacert.set_notBefore(b("20000101000000Z"))
cacert.set_notAfter(b("20200101000000Z"))
cacert.add_extensions([caext])
cacert.set_serial_number(0)
cacert.sign(cakey, "sha1")
# Step 2
ikey = PKey()
ikey.generate_key(TYPE_RSA, 512)
icert = X509()
icert.get_subject().commonName = "Intermediate Certificate"
icert.set_issuer(cacert.get_subject())
icert.set_pubkey(ikey)
icert.set_notBefore(b("20000101000000Z"))
icert.set_notAfter(b("20200101000000Z"))
icert.add_extensions([caext])
icert.set_serial_number(0)
icert.sign(cakey, "sha1")
# Step 3
skey = PKey()
skey.generate_key(TYPE_RSA, 512)
scert = X509()
scert.get_subject().commonName = "Server Certificate"
scert.set_issuer(icert.get_subject())
scert.set_pubkey(skey)
scert.set_notBefore(b("20000101000000Z"))
scert.set_notAfter(b("20200101000000Z"))
scert.add_extensions([
X509Extension(b('basicConstraints'), True, b('CA:false'))])
scert.set_serial_number(0)
scert.sign(ikey, "sha1")
return [(cakey, cacert), (ikey, icert), (skey, scert)] | 156a61e8159b1826def8fa33d5c5965add2c7f2e | 3,651,125 |
def build_job_spec_name(file_name, version="develop"):
"""
:param file_name:
:param version:
:return: str, ex. job-hello_world:develop
"""
name = file_name.split('.')[-1]
job_name = 'job-%s:%s' % (name, version)
return job_name | 55a45052852e6b24cb4370f7efe5c213da83e423 | 3,651,126 |
import torch
def draw_mask(im: torch.Tensor, mask: torch.Tensor, t=0.2, color=(255, 255, 255), visualize_instances=True):
"""
Visualize mask where mask = 0.
Supports multiple instances.
mask shape: [N, C, H, W], where C is different instances in same image.
"""
assert len(mask.shape) in (3, 4), mask.shape
mask = mask.view(-1, *mask.shape[-3:])
im = im.view(-1, *im.shape[-3:])
assert im.dtype == torch.uint8, im.dtype
assert 0 <= t <= 1
if not visualize_instances:
mask = mask.any(dim=1, keepdim=True)
mask = mask.float()
kernel = torch.ones((3, 3), dtype=mask.dtype, device=mask.device)
outer_border = dilation(mask, kernel).logical_xor(mask)
outer_border = outer_border.any(dim=1, keepdim=True).repeat(1, 3, 1, 1) > 0
inner_border = erosion(mask, kernel).logical_xor(mask)
inner_border = inner_border.any(dim=1, keepdim=True).repeat(1, 3, 1, 1) > 0
mask = (mask == 0).any(dim=1, keepdim=True).repeat(1, 3, 1, 1)
color = torch.tensor(color).to(im.device).byte().view(1, 3, 1, 1)#.repeat(1, *im.shape[1:])
color = color.repeat(im.shape[0], 1, *im.shape[-2:])
im[mask] = (im[mask] * (1-t) + t * color[mask]).byte()
im[outer_border] = 255
im[inner_border] = 0
return im | 45d12dbc695755f0231ca2a8d0f8d1cdf2f423ff | 3,651,127 |
def view_about():
"""
shows the about page
:return:
:rtype:
"""
return render_template('about.html', title="About Flask AWS Template") | a364842c165864aba34605f3ffdd8c1d412015e8 | 3,651,128 |
import numpy
def viterbi(observed_values,
transition_probabilities,
emission_probabilities,
initial_distribution,
file_name,
log=True):
"""Calculates the viterbi-path for a given hidden-markov-model, heavily
inspired by Abhisek Janas Blogpost "Implement Viterbi Algorithm in Hidden
Markov Model using Python and R" at February 21, 2019.
The Blog as well as the original source-code can be found under http://www.adeveloperdiary.com/data-science/machine-learning/implement-viterbi-algorithm-in-hidden-markov-model-using-python-and-r/ #noqa
Args:
observed_values (np.array): visible part of the hidden-markov-model
transition_probabilities (np.array): transition probabilities for the
hidden part of the hidden-markov-model
emission_probabilities (np.array): transition probabilities for the
visible part of the hidden-markov-model
initial_distribution (np.array): probabilities for the initial status
log (bool) = True: The results are calculated using the logarithmic
projection
Returns:
(np.array): the viterbi-path for the given hidden-markov-model
"""
# Amount of steps
epochs = observed_values.shape[0]
# Amount of states
states = transition_probabilities.shape[0]
# Hightest probability to end in specific state
omega = numpy.zeros((epochs, states), dtype=numpy.longdouble)
prev = numpy.zeros((epochs - 1, states), dtype=numpy.longdouble)
# Two Dimensional Array, which holds all forward probability for every
# state and epoch
forward_probs = numpy.zeros((epochs, states), dtype=numpy.longdouble)
# Two Dimensional Array, which holds all backword probability for every
# state and epoch
backward_probs = numpy.zeros((epochs, states), dtype=numpy.longdouble)
# Since we start at the pack of the list we need to init it with a one,
# instead of a zero
backward_probs[epochs - 1] = numpy.ones((states))
# Two Dimensional Array, which holds all posteriori probability for every
# state and epoch
posteriori_probs = numpy.zeros((epochs, states), dtype=numpy.longdouble)
# Calculation of the probability for the observed initial state
if log:
omega[0, :] = numpy.log(initial_distribution * emission_probabilities[:, observed_values[0]-1]) #noqa
else:
omega[0, :] = initial_distribution * emission_probabilities[:, observed_values[0]-1] #noqa
forward_probs[0, :] = initial_distribution * emission_probabilities[:, observed_values[0]-1] #noqa
for epoch in range(1, epochs):
for state in range(1, -1, -1):
# Calculate the probability of obtaining the observed value for
# each possible transition.
if log:
probability = omega[epoch - 1] + \
numpy.log(transition_probabilities[:, state]) + \
numpy.log(emission_probabilities[state, observed_values[epoch]-1]) #noqa
else:
probability = omega[epoch - 1] * \
transition_probabilities[:, state] * \
emission_probabilities[state, observed_values[epoch]-1]
# This is our most probable state given previous state at epoch
prev[epoch - 1, state] = numpy.argmax(probability)
# save probability of the most probable state
omega[epoch, state] = numpy.max(probability)
# Calculate forward probability's for Posteriori-Decoding
# The sum of the equations is calculated with matrix
# multiplication(.dot), since that way a generice implementation
# is provided!
if not log:
forward_probs[epoch, state] = emission_probabilities[state, observed_values[epoch]-1] * forward_probs[epoch - 1].dot(transition_probabilities[:, state]) #noqa
# Path Array
path = numpy.zeros(epochs)
# Find the most probable last hidden state
last_state = numpy.argmax(omega[epochs - 1, :]).astype(int)
# Start building the path
path[0] = last_state
# Start backtracking
backtrack_index = 1
for i in range(epochs - 2, -1, -1):
# Calculate the next hidden state based on its successor
next_hidden = prev[i, last_state]
# Add state to the path
path[backtrack_index] = next_hidden
# Save state for the next backtracking step
last_state = next_hidden.astype(int)
backtrack_index += 1
# Posteriori-Decoding, calculate backward probability's.
# The sum of the equations is calculated with matrix
# multiplication(.dot), since that way a generice implementation is
# provided!
# The results are at this point in the reversed order, since we started
# do calculate them from the end!
if not log:
for state in range(states):
backward_probs[i, state] = (backward_probs[i+1]*emission_probabilities[:, observed_values[i]-1]).dot(transition_probabilities[state, :]) #noqa
# Flip the path array since we were backtracking
path = numpy.flip(path, axis=0)
# Convert numeric values to actual hidden states
result = ""
for element in path:
if element == 0:
result = result + "F"
else:
result = result + "L"
# Posteriori-Decoding, calculate posteriori probability's.
if not log:
# Flip the backward probability's to provide the probability's in
# the correct order
backward_probs = numpy.flip(backward_probs, axis=0)
increase = 1
for i in range(epochs):
# A counter to manage the constant multiplication used
if(i % 20 == 0):
# increase the multiplication factor
increase *= numpy.longdouble(10**5)
# Calculate the posteriori probability based on the given algorithm
posteriori_probs[i, :] = ((forward_probs[i, :]*increase) * (backward_probs[i, :]*increase)) / (numpy.max(omega[epochs-1, :])*increase) #noqa
# Remove the constant factor and override the current posteriori
# probability, to give a correct value
posteriori_probs[i, :] = posteriori_probs[i, :] / increase
numpy.savetxt("results\\posteriori-decoding"+file_name, posteriori_probs) #noqa
dirName = "results\\viterbi-Path"+file_name
text_file = open(dirName, "w")
text_file.write(result)
text_file.close()
return result | b063e5c5bbf566afb0f16175d9d229bef7a953f1 | 3,651,129 |
def extract_psf_fitting_names(psf):
"""
Determine the names of the x coordinate, y coordinate, and flux from
a model. Returns (xname, yname, fluxname)
"""
if hasattr(psf, 'xname'):
xname = psf.xname
elif 'x_0' in psf.param_names:
xname = 'x_0'
else:
raise ValueError('Could not determine x coordinate name for '
'psf_photometry.')
if hasattr(psf, 'yname'):
yname = psf.yname
elif 'y_0' in psf.param_names:
yname = 'y_0'
else:
raise ValueError('Could not determine y coordinate name for '
'psf_photometry.')
if hasattr(psf, 'fluxname'):
fluxname = psf.fluxname
elif 'flux' in psf.param_names:
fluxname = 'flux'
else:
raise ValueError('Could not determine flux name for psf_photometry.')
return xname, yname, fluxname | cee108dd1f97e506b60ba621c7f08efa7b5c33d7 | 3,651,130 |
def config_check_conformance(cookie, dn):
""" Auto-generated UCS XML API Method. """
method = ExternalMethod("ConfigCheckConformance")
method.cookie = cookie
method.dn = dn
xml_request = method.to_xml(option=WriteXmlOption.DIRTY)
return xml_request | 598fbd665dcf18a35104400bf7debfc64347c3b5 | 3,651,132 |
def get_dist_to_port(geotiff):
"""
Extract "truth" dist_to_port from geotiff
"""
with Geotiff(geotiff) as tif:
dist_to_port = tif.values
return dist_to_port | 1a77c2ac905eea2d1796529297168dac394b4bdb | 3,651,133 |
import inspect
def build_dataset_exporter(
dataset_type, strip_none=True, warn_unused=True, **kwargs
):
"""Builds the :class:`DatasetExporter` instance for the given parameters.
Args:
dataset_type: the :class:`fiftyone.types.dataset_types.Dataset` type
strip_none (True): whether to exclude None-valued items from ``kwargs``
warn_unused (True): whether to issue warnings for any non-None unused
parameters encountered
**kwargs: keyword arguments to pass to the dataset exporter's
constructor via ``DatasetExporter(**kwargs)``
Returns:
a tuple of:
- the :class:`DatasetExporter` instance
- a dict of unused keyword arguments
"""
if dataset_type is None:
raise ValueError(
"You must provide a `dataset_type` in order to build a dataset "
"exporter"
)
if inspect.isclass(dataset_type):
dataset_type = dataset_type()
dataset_exporter_cls = dataset_type.get_dataset_exporter_cls()
if strip_none:
kwargs = {k: v for k, v in kwargs.items() if v is not None}
kwargs, unused_kwargs = fou.extract_kwargs_for_class(
dataset_exporter_cls, kwargs
)
try:
dataset_exporter = dataset_exporter_cls(**kwargs)
except Exception as e:
raise ValueError(
"Failed to construct exporter of type %s using the provided "
"parameters. See above for the error. You may need to supply "
"additional mandatory arguments. Please consult the documentation "
"of %s to learn more"
% (dataset_exporter_cls, dataset_exporter_cls)
) from e
if warn_unused:
for key, value in unused_kwargs.items():
if value is not None:
logger.warning(
"Ignoring unsupported parameter '%s' for exporter type %s",
key,
dataset_exporter_cls,
)
return dataset_exporter, unused_kwargs | 6a21c90ee2a9c297ad86515f5078221459b1fb01 | 3,651,134 |
def conditions(x):
"""
This function will check whether the constraints that apply to
our optimization are met or not.
"""
if ( (10/x[0]) > 66.0 ):
return False
elif ( (10/x[0] + 12/x[1]) > 88.0 ):
return False
elif ( (10/x[0] + 12/x[1] + 7/x[2]) > 107.0 ):
return False
elif ( (10/x[0] + 12/x[1] + 7/x[2] + 14/x[3]) > 128.0 ):
return False
elif ( (10/x[0] + 12/x[1] + 7/x[2] + 14/x[3] + 15/x[4]) > 157.0 ):
return False
elif ( (10/x[0] + 12/x[1] + 7/x[2] + 14/x[3] + 15/x[4] + 20/x[5]) > 192.0 ):
return False
elif ( (10/x[0] + 12/x[1] + 7/x[2] + 14/x[3] + 15/x[4] + 20/x[5] + 10/x[6]) > 222.0 ):
return False
elif ( (10/x[0] + 12/x[1] + 7/x[2] + 14/x[3] + 15/x[4] + 20/x[5] + 10/x[6] + 10/x[7]) > 242.0 ):
return False
elif ( (10/x[0] + 12/x[1] + 7/x[2] + 14/x[3] + 15/x[4] + 20/x[5] + 10/x[6] + 10/x[7] + 16/x[8]) > 268.0 ):
return False
elif ( (10/x[0] + 12/x[1] + 7/x[2] + 14/x[3] + 15/x[4] + 20/x[5] + 10/x[6] + 10/x[7] + 16/x[8] + 8/x[9]) > 292.0 ):
return False
return True | 263fdc3fd07aa656982401f71071fcd684b8625f | 3,651,135 |
def get_commit_ancestors_graph(refenv, starting_commit):
"""returns a DAG of all commits starting at some hash pointing to the repo root.
Parameters
----------
refenv : lmdb.Environment
lmdb environment where the commit refs are stored
starting_commit : string
commit hash to start creating the DAG from
Returns
-------
dict
a dictionary where each key is a commit hash encountered along the way,
and it's value is a list containing either one or two elements which
identify the child commits of that parent hash.
"""
parent_commit = starting_commit
commit_graph = {}
seen = set(starting_commit)
more_work = []
end_commit = False
if parent_commit == '':
end_commit = True
while end_commit is not True:
childCommit = get_commit_ancestors(refenv, parent_commit)
if ((childCommit.master_ancestor == '') or (childCommit.master_ancestor in seen)):
end_commit = True
commit_graph[parent_commit] = [childCommit.master_ancestor]
if len(more_work) != 0:
master_commit = more_work.pop(0)
end_commit = False
else:
continue
elif childCommit.is_merge_commit is True:
master_commit = childCommit.master_ancestor
dev_commit = childCommit.dev_ancestor
more_work.append(dev_commit)
commit_graph[parent_commit] = [master_commit, dev_commit]
seen.add(master_commit)
seen.add(dev_commit)
else:
master_commit = childCommit.master_ancestor
commit_graph[parent_commit] = [master_commit]
seen.add(master_commit)
parent_commit = master_commit
return commit_graph | 078819cf0291a5e4e1e8ad4ea409f475c0df93fd | 3,651,137 |
def is_verification_handshake(rjson):
"""
Determines if the request is the Slack application APIs verification handshake
:rtype: bool
"""
# Check body contains the right keys
for x in ['token', 'challenge', 'type']:
if x not in rjson:
return False
# Check type is correct
if rjson['type'] != "url_verification":
return False
# Note: no need to check the token, we check the request is signed
# before this code is ever run.
# It's a verification request
log.info("Received URL verification handshake request")
return True | 1ceccd9ca578bd09e9629cd59e565bc523502030 | 3,651,138 |
def template_node(scope_key):
""" Create and return a new template node.
Parameters
----------
scope_key : object
The key for the local scope in the local storage maps.
Returns
-------
result : TemplateNode
A new compiler template node.
"""
node = TemplateNode()
node.scope_key = scope_key
return node | 4cd9721dd9f9f91cb84326391630274b8f5764a7 | 3,651,139 |
def GetAutoResult(chroot_path, buildbucket_id):
"""Returns the conversion of the result of 'cros buildresult'."""
# Calls 'cros buildresult' to get the status of the tryjob.
build_result = GetStatusFromCrosBuildResult(chroot_path, buildbucket_id)
# The string returned by 'cros buildresult' might not be in the mapping.
if build_result not in builder_status_mapping:
raise ValueError(
'"cros buildresult" return value is invalid: %s' % build_result)
return builder_status_mapping[build_result] | 705fbc011c11fa67d0b61f130a3b6f024a6dcd44 | 3,651,140 |
def rft(x):
"""
Real Fourier Transform
"""
# XXX figure out what exactly this is doing...
s = x.shape[-1]
xp = np.zeros(x.shape,dtype="complex64")
xp[...,1:s/2] = x[...,1:-1:2]+x[...,2::2]*1.j
xp[...,0] = x[...,0]/2.
xp[...,s/2] = x[...,-1]/2.
return np.array(nmr_reorder(np.fft.fft(2*xp,axis=-1).real),dtype="float32") | 3a65f0a0059df4c74b223f3284e996b82d7ebf02 | 3,651,141 |
def yam_path(manifestsdir):
"""Bundletracker manifest."""
return join(manifestsdir, 'yam.json') | 5d1b5162bd8285d8e33c822a3b5edcc996452719 | 3,651,142 |
def single_from(iterable):
"""Check that an iterable contains one unique value, and return it."""
unique_vals = set(iterable)
if len(unique_vals) != 1:
raise ValueError('multiple unique values found')
return unique_vals.pop() | c8fb8864083195ad913ff1ddf0114b5a50068902 | 3,651,143 |
import requests
def vthash(filehash: str):
"""Returns the analysis data class for a file in VirusTotal's database"""
endpoint_path = f'/files/{filehash}'
endpoint = f"{api_base_url}{endpoint_path}"
r = requests.get(endpoint, headers=header)
if r.status_code == 404 and r.json()['error']['code'] == 'NotFoundError':
return None
elif r.status_code == 200:
return analysisdata(r) | bf4f334ad7a35e1141f9e00a44544fdd0709b411 | 3,651,144 |
def prod(x, axis=None, keepdims=False):
"""
product of all element in the array
Parameters
----------
x : tensor_like
input array
axis : int, tuple of ints
axis or axes along which a product is performed
keepdims : bool
keep dimensionality or not
Returns
-------
product : tensor_like
product of all element
"""
return Product(axis=axis, keepdims=keepdims).forward(x) | 8962e7b6abd16c9354f076c0c6d718b82fe44223 | 3,651,145 |
from typing import List
import difflib
def menu(queue: List[str] = None):
"""Fred Menu"""
fred_controller = FredController(queue)
an_input = "HELP_ME"
while True:
# There is a command in the queue
if fred_controller.queue and len(fred_controller.queue) > 0:
# If the command is quitting the menu we want to return in here
if fred_controller.queue[0] in ("q", "..", "quit"):
print("")
if len(fred_controller.queue) > 1:
return fred_controller.queue[1:]
return []
# Consume 1 element from the queue
an_input = fred_controller.queue[0]
fred_controller.queue = fred_controller.queue[1:]
# Print the current location because this was an instruction and we want user to know what was the action
if an_input and an_input.split(" ")[0] in fred_controller.CHOICES_COMMANDS:
print(f"{get_flair()} /economy/fred/ $ {an_input}")
# Get input command from user
else:
# Display help menu when entering on this menu from a level above
if an_input == "HELP_ME":
fred_controller.print_help()
# Get input from user using auto-completion
if session and gtff.USE_PROMPT_TOOLKIT and fred_controller.completer:
an_input = session.prompt(
f"{get_flair()} /economy/fred/ $ ",
completer=fred_controller.completer,
search_ignore_case=True,
)
# Get input from user without auto-completion
else:
an_input = input(f"{get_flair()} /economy/fred/ $ ")
try:
# Process the input command
fred_controller.queue = fred_controller.switch(an_input)
except SystemExit:
print(
f"\nThe command '{an_input}' doesn't exist on the /economy/fred menu.",
end="",
)
similar_cmd = difflib.get_close_matches(
an_input.split(" ")[0] if " " in an_input else an_input,
fred_controller.CHOICES,
n=1,
cutoff=0.7,
)
if similar_cmd:
if " " in an_input:
candidate_input = (
f"{similar_cmd[0]} {' '.join(an_input.split(' ')[1:])}"
)
if candidate_input == an_input:
an_input = ""
fred_controller.queue = []
print("\n")
continue
an_input = candidate_input
else:
an_input = similar_cmd[0]
print(f" Replacing by '{an_input}'.")
fred_controller.queue.insert(0, an_input)
else:
print("\n") | b8133dd748f0a48099359b6503edee6c9f875fb6 | 3,651,146 |
def generic_repr(name, obj, deferred):
"""
Generic pretty printer for NDTable and NDArray.
Output is of the form::
Array(3, int32)
values := [Numpy(ptr=60597776, dtype=int64, shape=(3,))];
metadata := [contigious]
layout := Identity;
[1 2 3]
"""
if deferred:
if _show_details:
header = "%s\n" % (name)
header += " datashape := %s \n" % str(obj._datashape)
header += " metadata := %s \n" % obj._metadata
else:
header = ''
else:
if _show_details:
header = "%s\n" % (name)
header += " datashape := %s \n" % str(obj._datashape)
header += " values := %s \n" % list(obj.space)
header += " metadata := %s \n" % obj._metadata
header += " layout := %s \n" % obj._layout.desc
else:
header = ''
# Show the data below
fullrepr = header + generic_str(obj, deferred)
return fullrepr | c9de29b792d943420b02455752f01a9c12fcf66c | 3,651,147 |
def build_model(X, y, ann_hidden_dim, num_passes=20000):
"""
:param ann_hidden_dim: Number of nodes in the hidden layer
:param num_passes: Number of passes through the training data for gradient descent
:return: returns the parameters of artificial neural network for prediction using forward propagation of the parameters
"""
model = {}
# Initialize the parameters to random values.
np.random.seed(0)
w1 = np.random.randn(ann_input_dim, ann_hidden_dim) / np.sqrt(ann_input_dim)
c1 = np.zeros((1, ann_hidden_dim))
w2 = np.random.randn(ann_hidden_dim, ann_output_dim) / np.sqrt(ann_hidden_dim)
c2 = np.zeros((1, ann_output_dim))
# Batch gradient descent
for i in range(0, num_passes):
# Forward propagation
z1 = X.dot(w1) + c1
a1 = np.tanh(z1)
z2 = a1.dot(w2) + c2
exp_scores = np.exp(z2)
probs = exp_scores / np.sum(exp_scores, axis=1, keepdims=True)
# Back propagation
delta3 = probs
delta3[range(len(X)), y] -= 1
dw2 = (a1.T).dot(delta3)
dc2 = np.sum(delta3, axis=0, keepdims=True)
delta2 = delta3.dot(w2.T) * (1 - np.power(a1, 2))
dw1 = np.dot(X.T, delta2)
dc1 = np.sum(delta2, axis=0)
# Add regularization terms (c1 and c2 don't have regularization terms)
dw2 += REG_LAMBDA * w2
dw1 += REG_LAMBDA * w1
# Gradient descent parameter update
w1 += -EPSILON * dw1
c1 += -EPSILON * dc1
w2 += -EPSILON * dw2
c2 += -EPSILON * dc2
# Assign new parameters to the model
model = {'w1': w1, 'c1': c1, 'w2': w2, 'c2': c2}
return model | bccdf828050af8a6ff5943eb84b574756f9f54ab | 3,651,148 |
def g_square_dis(dm, x, y, s):
"""G square test for discrete data.
Args:
dm: the data matrix to be used (as a numpy.ndarray).
x: the first node (as an integer).
y: the second node (as an integer).
s: the set of neibouring nodes of x and y (as a set()).
levels: levels of each column in the data matrix
(as a list()).
Returns:
p_val: the p-value of conditional independence.
"""
levels = np.amax(dm, axis=0) + 1
def _calculate_tlog(x, y, s, dof, levels, dm):
prod_levels = np.prod(list(map(lambda x: levels[x], s)))
nijk = np.zeros((levels[x], levels[y], prod_levels))
s_size = len(s)
z = []
for z_index in range(s_size):
z.append(s.pop())
pass
for row_index in range(dm.shape[0]):
i = dm[row_index, x]
j = dm[row_index, y]
k = []
k_index = 0
for s_index in range(s_size):
if s_index == 0:
k_index += dm[row_index, z[s_index]]
else:
lprod = np.prod(list(map(lambda x: levels[x], z[:s_index])))
k_index += (dm[row_index, z[s_index]] * lprod)
pass
pass
nijk[i, j, k_index] += 1
pass
nik = np.ndarray((levels[x], prod_levels))
njk = np.ndarray((levels[y], prod_levels))
for k_index in range(prod_levels):
nik[:, k_index] = nijk[:, :, k_index].sum(axis = 1)
njk[:, k_index] = nijk[:, :, k_index].sum(axis = 0)
pass
nk = njk.sum(axis = 0)
tlog = np.zeros((levels[x], levels[y], prod_levels))
tlog.fill(np.nan)
for k in range(prod_levels):
tx = np.array([nik[:, k]]).T
ty = np.array([njk[:, k]])
tdijk = tx.dot(ty)
tlog[:, :, k] = nijk[:, :, k] * nk[k] / tdijk
pass
return (nijk, tlog)
row_size = dm.shape[0]
s_size = len(s)
dof = ((levels[x] - 1) * (levels[y] - 1)
* np.prod(list(map(lambda x: levels[x], s))))
row_size_required = 10 * dof
nijk = None
if s_size < 5:
if s_size == 0:
nijk = np.zeros((levels[x], levels[y]))
for row_index in range(row_size):
i = dm[row_index, x]
j = dm[row_index, y]
nijk[i, j] += 1
pass
tx = np.array([nijk.sum(axis = 1)]).T
ty = np.array([nijk.sum(axis = 0)])
tdij = tx.dot(ty)
tlog = nijk * row_size / tdij
pass
if s_size > 0:
nijk, tlog = _calculate_tlog(x, y, s, dof, levels, dm)
pass
pass
else:
nijk = np.zeros((levels[x], levels[y], 1))
i = dm[0, x]
j = dm[0, y]
k = []
for z in s:
k.append(dm[:, z])
pass
k = np.array(k).T
parents_count = 1
parents_val = np.array([k[0, :]])
nijk[i, j, parents_count - 1] = 1
for it_sample in range(1, row_size):
is_new = True
i = dm[it_sample, x]
j = dm[it_sample, y]
tcomp = parents_val[:parents_count, :] == k[it_sample, :]
for it_parents in range(parents_count):
if np.all(tcomp[it_parents, :]):
nijk[i, j, it_parents] += 1
is_new = False
break
pass
if is_new is True:
parents_count += 1
parents_val = np.r_[parents_val, [k[it_sample, :]]]
nnijk = np.zeros((levels[x], levels[y], parents_count))
for p in range(parents_count - 1):
nnijk[:, :, p] = nijk[:, :, p]
pass
nnijk[i, j, parents_count - 1] = 1
nijk = nnijk
pass
pass
nik = np.ndarray((levels[x], parents_count))
njk = np.ndarray((levels[y], parents_count))
for k_index in range(parents_count):
nik[:, k_index] = nijk[:, :, k_index].sum(axis = 1)
njk[:, k_index] = nijk[:, :, k_index].sum(axis = 0)
pass
nk = njk.sum(axis = 0)
tlog = np.zeros((levels[x], levels[y], parents_count))
tlog.fill(np.nan)
for k in range(parents_count):
tx = np.array([nik[:, k]]).T
ty = np.array([njk[:, k]])
tdijk = tx.dot(ty)
tlog[:, :, k] = nijk[:, :, k] * nk[k] / tdijk
pass
pass
log_tlog = np.log(tlog)
G2 = np.nansum(2 * nijk * log_tlog)
if dof == 0:
p_val = 1
else:
p_val = chi2.sf(G2, dof)
if s_size == 0:
nijk = nijk.reshape((nijk.shape[0], nijk.shape[1], 1))
log_tlog = log_tlog.reshape((log_tlog.shape[0], log_tlog.shape[1], 1))
return G2, p_val, nijk, log_tlog | 2f0f0b44a919177c0f5775a34e0493c62720a21d | 3,651,149 |
def start(name):
"""
Start the specified service
CLI Example:
.. code-block:: bash
salt '*' service.start <service name>
"""
cmd = "/usr/sbin/svcadm enable -s -t {0}".format(name)
retcode = __salt__["cmd.retcode"](cmd, python_shell=False)
if not retcode:
return True
if retcode == 3:
# Return code 3 means there was a problem with the service
# A common case is being in the 'maintenance' state
# Attempt a clear and try one more time
clear_cmd = "/usr/sbin/svcadm clear {0}".format(name)
__salt__["cmd.retcode"](clear_cmd, python_shell=False)
return not __salt__["cmd.retcode"](cmd, python_shell=False)
return False | 607b559281c6b13002d7237b8c4409533074d0bc | 3,651,150 |
from typing import Dict
def line_coloring(num_vertices) -> Dict:
"""
Creates an edge coloring of the line graph, corresponding to the optimal
line swap strategy, given as a dictionary where the keys
correspond to the different colors and the values are lists of edges (where edges
are specified as tuples). The graph coloring consists of one color for all even-numbered
edges and one color for all odd-numbered edges.
Args:
num_vertices: The number of vertices in the line graph
Returns:
Graph coloring as a dictionary of edge lists
"""
line_coloring = {}
for i in range(num_vertices - 1):
line_coloring[(i, i + 1)] = i % 2
line_coloring[(i + 1, i)] = i % 2
return line_coloring | 423e626ecbf4f48e0a192241375484a077fbe0b2 | 3,651,151 |
def flatten_outputs(predictions, number_of_classes):
"""Flatten the prediction batch except the prediction dimensions"""
logits_permuted = predictions.permute(0, 2, 3, 1)
logits_permuted_cont = logits_permuted.contiguous()
outputs_flatten = logits_permuted_cont.view(-1, number_of_classes)
return outputs_flatten
# outputs_flatten = torch.tensor(predictions | c58fb965443a5402e9bec32afaebe9376c74653f | 3,651,152 |
def get_r_vals(cell_obj):
"""Get radial distances for inner and outer membranes for the cell object"""
r_i = cell_obj.coords.calc_rc(cell_obj.data.data_dict['storm_inner']['x'],
cell_obj.data.data_dict['storm_inner']['y'])
r_o = cell_obj.coords.calc_rc(cell_obj.data.data_dict['storm_outer']['x'],
cell_obj.data.data_dict['storm_outer']['y'])
return r_i, r_o | d51c926791845006dfe9a97cbd9c82c041ea701b | 3,651,153 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.