content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
import sqlite3
def prob8(cur: sqlite3.Cursor) -> pd.DataFrame:
"""Give a list of the services which connect the stops 'Craiglockhart' and
'Tollcross'.
Parameters
----------
cur (sqlite3.Cursor) : The cursor for the database we're accessing.
Returns
-------
(pd.DataFrame) : Table with the solution.
"""
cur.execute("""SELECT DISTINCT r1.company, r1.num
FROM route AS r1
JOIN route AS r2 ON (r1.company = r2.company AND r1.num = r2.num)
JOIN stops AS stops1 ON stops1.id = r1.stop
JOIN stops as stops2 ON stops2.id = r2.stop
WHERE stops1.name = 'Craiglockhart'
AND stops2.name = 'Tollcross';
""")
return pd.DataFrame(cur.fetchall()) | 14e8bbb04befc1116f969ca977d83bc27890664c | 3,655,300 |
def get_command(name):
""" return command represented by name """
_rc = COMMANDS[name]()
return _rc | 22e64898973d2a2ec1cca2ff72fa86eaed4a3546 | 3,655,301 |
def _str_struct(a):
"""converts the structure to a string for logging purposes."""
shape_dtype = lambda x: (jnp.asarray(x).shape, str(jnp.asarray(x).dtype))
return str(jax.tree_map(shape_dtype, a)) | 96d417c6cd1332d6e71b21472444cf6178cad92a | 3,655,302 |
from typing import Set
import os
def get_moved_files(dir_path: str) -> Set:
"""
获取要移动的文件(夹),包括:
- 文件夹
- 损坏的图片
- 非图像文件
- 重复的图片
"""
removed_files = set()
file_map = {}
for file in os.listdir(dir_path):
file_path = os.path.join(dir_path, file)
# 过滤文件
if os.path.isfile(file_path):
# 按文件大小进行分组
# 不同大小的图片一定不一样,所以只需对比相同大小的图片即可,缩小范围
size = os.path.getsize(file_path)
file_map.setdefault(size, []).append(file_path)
else:
removed_files.add(file_path)
for files in file_map.values():
duplicate_files = set()
m = len(files)
for i in range(m):
if files[i] in duplicate_files:
continue
# 损坏图像文件/非图像文件处理
try:
img1 = Image.open(files[i])
except UnidentifiedImageError:
duplicate_files.add(files[i])
continue
image1 = np.array(img1)
for j in range(i + 1, m):
if files[j] in duplicate_files:
continue
# 损坏图像文件/非图像文件处理
try:
img2 = Image.open(files[j])
except UnidentifiedImageError:
duplicate_files.add(files[j])
continue
# 判断图片尺寸是否相同
if img1.size == img2.size:
# 判断图片内容是否相同
image2 = np.array(img2)
if np.array_equal(image1, image2):
duplicate_files.add(files[j])
removed_files = removed_files | duplicate_files
return removed_files | 52520d8e8cd41343945e45bb46da32177175ad34 | 3,655,303 |
def delete_interface_address(
api_client, interface_id, address_id, **kwargs
): # noqa: E501
"""delete_interface_address # noqa: E501
Delete interface address details # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> response = await api.delete_interface_address(interface_id, address_id, async_req=True)
:param interface_id int: ID of interface
:param address_id int: ID of address
:param async_req bool: execute request asynchronously
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: APIResponse or awaitable if async
"""
local_var_params = locals()
collection_formats = {}
path_params = {"interface_id": interface_id, "address_id": address_id}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params["Accept"] = api_client.select_header_accept(
["application/json"]
) # noqa: E501
# Authentication setting
auth_settings = ["ApiTokenAuth", "basicAuth"] # noqa: E501
return api_client.call_api(
"/interfaces/system/{interface_id}/addresses/{address_id}",
"DELETE",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="object", # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get("async_req"),
_return_http_data_only=local_var_params.get(
"_return_http_data_only"
), # noqa: E501
_preload_content=local_var_params.get("_preload_content", True),
_request_timeout=local_var_params.get("_request_timeout"),
collection_formats=collection_formats,
) | 19d04ef0783988c8eb86983d589d9f07e82ba3b8 | 3,655,304 |
import types
async def set_promo(message: types.Message, state: FSMContext):
"""
Команда /setpromo
"""
arg = message.get_args()
if not arg:
return await message.answer(_("Укажите аргумент: промокод. Например: <pre>/set_promo my-promo-code</pre>"),
parse_mode="HTML")
arg = arg.strip()
try:
UUID(arg)
except ValueError:
return await message.answer(_("Промокод не найден"))
promo = await models.Promo.get_or_none(code=arg)
if not promo:
return await message.answer(_("Промокод не найден"))
if promo.owner:
return await message.answer(_("Промокод уже использован"))
user, created = await models.User.get_or_create(telegram_id=message.from_user.id)
promo.owner = user
await promo.save(update_fields=["owner_id"])
await message.answer(_("Промокод активирован! Спасибо 🙌")) | 9a15dd1bea20c3da6dd31eee5e2a723ddd110ba2 | 3,655,305 |
def plot_waterfall(*sigObjs, step=10, xLim:list=None,
Pmin=20, Pmax=None, tmin=0, tmax=None, azim=-72, elev=14,
cmap='jet', winPlot=False, waterfallPlot=True, fill=True,
lines=False, alpha=1, figsize=(20, 8), winAlpha=0,
removeGridLines=False, saveFig=False, bar=False, width=0.70,
size=3, lcol=None, filtered=True):
"""
This function was gently sent by Rinaldi Polese Petrolli.
# TO DO
Keyword Arguments:
step {int} -- [description] (default: {10})
xLim {list} -- [description] (default: {None})
Pmin {int} -- [description] (default: {20})
Pmax {[type]} -- [description] (default: {None})
tmin {int} -- [description] (default: {0})
tmax {[type]} -- [description] (default: {None})
azim {int} -- [description] (default: {-72})
elev {int} -- [description] (default: {14})
cmap {str} -- [description] (default: {'jet'})
winPlot {bool} -- [description] (default: {False})
waterfallPlot {bool} -- [description] (default: {True})
fill {bool} -- [description] (default: {True})
lines {bool} -- [description] (default: {False})
alpha {int} -- [description] (default: {1})
figsize {tuple} -- [description] (default: {(20, 8)})
winAlpha {int} -- [description] (default: {0})
removeGridLines {bool} -- [description] (default: {False})
saveFig {bool} -- [description] (default: {False})
bar {bool} -- [description] (default: {False})
width {float} -- [description] (default: {0.70})
size {int} -- [description] (default: {3})
lcol {[type]} -- [description] (default: {None})
filtered {bool} -- [description] (default: {True})
Returns:
[type] -- [description]
"""
realSigObjs = \
_remove_non_(SignalObj, sigObjs, msgPrefix='plot_waterfall:')
if len(realSigObjs) > 0:
figs = plot.waterfall(realSigObjs, step, xLim,
Pmin, Pmax, tmin, tmax, azim, elev,
cmap, winPlot, waterfallPlot, fill,
lines, alpha, figsize, winAlpha,
removeGridLines, saveFig, bar, width,
size, lcol, filtered)
return figs
else:
return | 85888e49a938a5e4faac90c52b2df7fa7036610c | 3,655,306 |
import csv
import re
def indices(input_file):
"""
Parse the index file or target file and return a list of values.
:return:
"""
index_list = []
line_num = 0
index_file = list(csv.reader(open(input_file), delimiter='\t'))
for line in index_file:
line_num += 1
col_count = len(line)
if col_count > 1 and len(line[0].split("#")[0]) > 1: # Skip any lines that are blank or comments.
tmp_line = []
for i in range(col_count):
try:
line[i] = line[i].split("#")[0] # Strip out end of line comments and white space.
except IndexError:
raise SystemExit(
"There is a syntax error in file {0} on line {1}, column {2} "
.format(input_file, str(line_num), str(i)))
line[i] = re.sub(",", '', line[i]) # Strip out any commas.
tmp_line.append(line[i])
index_list.append(tmp_line)
return index_list | ea07d6f2bc8f3d23cf2ae59cb2df6c19158752fc | 3,655,307 |
import argparse
def parse_arguments():
"""
Parse the arguments from the user
"""
parser = argparse.ArgumentParser(
description="omeClust visualization script.\n",
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument(
"adist",
help="the input file D*N, Rows: D features and columns: N samples OR \n" +
"a distance matrix file D*D (rows and columns should be the same and in the same order) \n ",
)
parser.add_argument(
"clusters",
help="the input file D*N, Rows: D features and columns: N samples OR \n" +
"a distance matrix file D*D (rows and columns should be the same and in the same order) \n ",
)
parser.add_argument(
"--metadata",
help="metadata",
)
parser.add_argument(
"--shapeby",
type=str,
help="the input file D*N, Rows: D features and columns: N samples OR \n" +
"a distance matrix file D*D (rows and columns should be the same and in the same order) \n ",
)
parser.add_argument(
"-o", "--output",
help="the output directory\n",
required=True)
parser.add_argument(
"--size-to-plot",
type=int,
dest='size_to_plot',
default=3,
help="Minimum size of cluster to be plotted")
parser.add_argument("--fig-size", nargs=2,
# type=int,
dest='fig_size',
default=[3, 2.5], help="width and height of plots")
parser.add_argument("--point-size",
type=int,
dest='point_size',
default=3, help="width and height of plots")
parser.add_argument("--show",
help="show ordination plot before save\n",
action="store_true",
default=False,
dest='show')
return parser.parse_args() | aaa649b34cdb6819f9a56e7e0d547ccc88bff139 | 3,655,308 |
import math
from functools import reduce
def checkSequences(numl, rowlen, seqlen):
"""In a square of numbers, represented by the list with
the given row length, look for the top product with length
of seqlen"""
listl=len(numl)
collen=math.ceil(listl/rowlen)
seqind=seqlen-1
log.debug("List length, collen: %d, %d", listl, collen)
ret=[0]
def checkProd(mxprod, prodll, i):
r=None
for prodl in prodll:
prod=reduce(lambda x,y: x*y, prodl)
if prod > mxprod:
log.debug("Found a higher product than previous %d: %d at i=%d", mxprod, prod, i)
mxprod=prod
r=(prod, i, prodl)
return r
for i in range(0, listl):
# Check the first and last value are in the same row
checkrow = (i//rowlen)==((i+seqind)//rowlen) and (i+seqind)<listl
checkcol = (i+(rowlen*seqind)) < listl
checkdiagr = checkrow and (i+seqind+(rowlen*seqind)) < listl
checkdiagl = (i//rowlen)==((i-seqind)//rowlen) and (i-seqind+(rowlen*seqind)) < listl
log.debug("i: %d, check (row,col,diagr,diagl): (%d,%d,%d,%d)",
i, checkrow, checkcol, checkdiagr, checkdiagl)
prodll=[]
r=None
if checkrow:
prodll.append([numl[i+j] for j in range(0,seqlen)])
if checkcol:
prodll.append([numl[i+(j*rowlen)] for j in range(0,seqlen)])
if checkdiagr:
prodll.append([numl[i+j+(j*rowlen)] for j in range(0,seqlen)])
if checkdiagl:
prodll.append([numl[i-j+(j*rowlen)] for j in range(0,seqlen)])
r=checkProd(ret[0], prodll, i)
ret = r if r is not None else ret
return ret | ec37575a088098fb57dc2a58603ccea6fcc9b5a9 | 3,655,309 |
def has_same_facts(ruler_intervals1, ruler_intervals2, D):
"""
Check whether the two same-pattern ruler lists have the same facts at each corresponding ruler-interval
Args:
ruler_intervals1: a list of ruler-intervals
ruler_intervals2: a list of ruler-intervals
D: contain all relational facts
Returns:
True or False
"""
for ruler1, ruler2 in zip(ruler_intervals1, ruler_intervals2):
for predicate in D:
for entity in D[predicate]:
if interval_inclusion_intervallist(ruler1, D[predicate][entity]) and \
not interval_inclusion_intervallist(ruler2, D[predicate][entity]):
return False
return True | 210540bd2c2062f3150a34c5911017ec49b5603f | 3,655,310 |
def main():
""" """
undet = argument_parse()
print 'Start\t|\tCheck incorrect index'
fq_list = split_fastq(undet)
print 'Process\t|\tAnalysis undetermined data'
combined_df = multi_process(fq_list)
sorted_combined_df = combined_df.sort_values(
by='count',
ascending=False,
inplace=False
)
print sorted_combined_df.head(10)
print 'Process\t|\tWrite out result'
sorted_combined_df.to_csv('undetermined_top_index.csv', header=False)
for f in fq_list:
os.system('rm {}'.format(f))
print 'End\t|\tCheck incorrect index'
return True
else:
print 'End\t|\tCannot analyze index\n'
return False | e20f65e172f49ce2f184b32344135ccadb550253 | 3,655,311 |
def ruleset_delete(p_engine, p_username, rulesetname, envname):
"""
Delete ruleset from Masking engine
param1: p_engine: engine name from configuration
param2: rulesetname: ruleset name
return 0 if added, non 0 for error
"""
return ruleset_worker(p_engine=p_engine, p_username=p_username, rulesetname=rulesetname,
envname=envname, function_to_call='do_delete') | 470e2d104a6d10737bba975a0cb15a4768238244 | 3,655,312 |
def config_from_file(file_name):
"""Load and return json from file."""
with open(file_name) as config_file:
config = ujson.load(config_file)
return config | 2dd1b57612c528a85dbe04c717800b6908cb9c40 | 3,655,313 |
def build_yaml_object(
dataset_id: str,
table_id: str,
config: dict,
schema: dict,
metadata: dict = dict(),
columns_schema: dict = dict(),
partition_columns: list = list(),
):
"""Build a dataset_config.yaml or table_config.yaml
Args:
dataset_id (str): The dataset id.
table_id (str): The table id.
config (dict): A dict with the `basedosdados` client configurations.
schema (dict): A dict with the JSON Schema of the dataset or table.
metadata (dict): A dict with the metadata of the dataset or table.
columns_schema (dict): A dict with the JSON Schema of the columns of
the table.
partition_columns (list): A list with the partition columns of the
table.
Returns:
CommentedMap: A YAML object with the dataset or table metadata.
"""
properties: dict = schema["properties"]
definitions: dict = schema["definitions"]
# Drop all properties without yaml_order
properties = {
key: value for key, value in properties.items() if value.get("yaml_order")
}
# Add properties
yaml = add_yaml_property(
yaml=ryaml.CommentedMap(),
properties=properties,
definitions=definitions,
metadata=metadata,
)
# Add columns
if metadata.get("columns"):
yaml["columns"] = []
for metadatum in metadata.get("columns"):
properties = add_yaml_property(
yaml=ryaml.CommentedMap(),
properties=columns_schema["properties"],
definitions=columns_schema["definitions"],
metadata=metadatum,
has_column=True,
)
yaml["columns"].append(properties)
# Add partitions in case of new dataset/talbe or local overwriting
if partition_columns and partition_columns != ["[]"]:
yaml["partitions"] = ""
for local_column in partition_columns:
for remote_column in yaml["columns"]:
if remote_column["name"] == local_column:
remote_column["is_partition"] = True
yaml["partitions"] = ", ".join(partition_columns)
# Nullify `partitions` field in case of other-than-None empty values
if yaml.get("partitions") == "":
yaml["partitions"] = None
# Add dataset_id and table_id
yaml["dataset_id"] = dataset_id
if table_id:
yaml["table_id"] = table_id
# Add gcloud config variables
yaml["source_bucket_name"] = str(config.get("bucket_name"))
yaml["project_id_prod"] = str(
config.get("gcloud-projects", {}).get("prod", {}).get("name")
)
yaml["project_id_staging"] = str(
config.get("gcloud-projects", {}).get("staging", {}).get("name")
)
return yaml | 8fa7d3acac0e9636fda923d9a38e9a82f904afae | 3,655,314 |
import os
def read_candidate_data_list(file, path=IEDC_paths.candidates):
"""
Will read a candidate file and return its data.
:param file: Filename of the file to process
:param path: Path of the file
:return: Dictionary of dataframes for metadata, classifications, and data
"""
# make it a proper path
file = os.path.join(path, file)
data = pd.read_excel(file, sheet_name='Data')
return data | acf3ed6c93f4e57797b8451bd6b239a4d6564dff | 3,655,315 |
from pathlib import Path
def make_cumulative(frame, filedate, unit):
"""Create a cumulative graph of cases over time"""
gb = frame.groupby("Accurate_Episode_Date").agg(patients=("Row_ID", "count"))
gb = gb.resample("D").last().fillna(0).reset_index()
max_date = gb["Accurate_Episode_Date"].max().strftime("%Y-%m-%d")
gb["cumulative"] = gb.patients.cumsum().astype(int)
print(gb)
print(gb.info())
ax = sns.lineplot(
data=gb, x="Accurate_Episode_Date", y="cumulative", linewidth=2, color="red"
)
ax.set(
ylabel="Cumulative case count",
xlabel="Date",
title=f"{unit} Cumulative Cases by Episode Date ({max_date})",
)
ax2 = plt.twinx()
sns.lineplot(
data=gb, x="Accurate_Episode_Date", y="patients", ax=ax2, linewidth=0.5
)
ax2.set(ylim=(0, gb["patients"].max() * 2))
plt.gcf().autofmt_xdate()
fname = GRAPHDIR / Path(f"{filedate}-cumulative.png")
ax.figure.savefig(fname)
return fname | 44a2a1b3af68c293a86af97b11edf8cca562e6b8 | 3,655,316 |
def most_common(l):
""" Helper function.
:l: List of strings.
:returns: most common string.
"""
# another way to get max of list?
#from collections import Counter
#data = Counter(your_list_in_here)
#data.most_common() # Returns all unique items and their counts
#data.most_common(1)
count = 0
answer = ''
for element in l:
if l.count(element) > count:
count = l.count(element)
answer = element
return answer | 5010e4e26b00099c287f8597d8dc5881a67c4034 | 3,655,317 |
def reduce_avg(reduce_target, lengths, dim):
"""
Args:
reduce_target : shape(d_0, d_1,..,d_dim, .., d_k)
lengths : shape(d0, .., d_(dim-1))
dim : which dimension to average, should be a python number
"""
shape_of_lengths = lengths.get_shape()
shape_of_target = reduce_target.get_shape()
if len(shape_of_lengths) != dim:
raise ValueError(('Second input tensor should be rank %d, ' +
'while it got rank %d') % (dim, len(shape_of_lengths)))
if len(shape_of_target) < dim+1 :
raise ValueError(('First input tensor should be at least rank %d, ' +
'while it got rank %d') % (dim+1, len(shape_of_target)))
rank_diff = len(shape_of_target) - len(shape_of_lengths) - 1
mxlen = tf.shape(reduce_target)[dim]
mask = mkMask(lengths, mxlen)
if rank_diff!=0:
len_shape = tf.concat(axis=0, values=[tf.shape(lengths), [1]*rank_diff])
mask_shape = tf.concat(axis=0, values=[tf.shape(mask), [1]*rank_diff])
else:
len_shape = tf.shape(lengths)
mask_shape = tf.shape(mask)
lengths_reshape = tf.reshape(lengths, shape=len_shape)
mask = tf.reshape(mask, shape=mask_shape)
mask_target = reduce_target * tf.cast(mask, dtype=reduce_target.dtype)
red_sum = tf.reduce_sum(mask_target, axis=[dim], keep_dims=False)
red_avg = red_sum / (tf.to_float(lengths_reshape) + 1e-30)
return red_avg | 3bba229f448d393019857d89d16820076732e932 | 3,655,318 |
def _near_mod_2pi(e, t, atol=_DEFAULT_ATOL):
"""Returns whether a value, e, translated by t, is equal to 0 mod 2 * pi."""
return _near_mod_n(e, t, n=2 * np.pi, atol=atol) | 465911aca0fe1a7cd397ed2304426da5fdaaccc3 | 3,655,319 |
def create_returns_similarity(strategy: QFSeries, benchmark: QFSeries, mean_normalization: bool = True,
std_normalization: bool = True, frequency: Frequency = None) -> KDEChart:
"""
Creates a new returns similarity chart. The frequency is determined by the specified returns series.
Parameters
----------
strategy: QFSeries
The strategy series to plot.
benchmark: QFSeries
The benchmark series to plot.
mean_normalization: bool
Whether to perform mean normalization on the series data.
std_normalization: bool
Whether to perform variance normalization on the series data.
frequency: Frequency
Returns can be aggregated in to specific frequency before plotting the chart
Returns
-------
KDEChart
A newly created KDEChart instance.
"""
chart = KDEChart()
colors = Chart.get_axes_colors()
if frequency is not None:
aggregate_strategy = get_aggregate_returns(strategy.to_simple_returns(), frequency)
aggregate_benchmark = get_aggregate_returns(benchmark.to_simple_returns(), frequency)
else:
aggregate_strategy = strategy.to_simple_returns()
aggregate_benchmark = benchmark.to_simple_returns()
scaled_strategy = preprocessing.scale(
aggregate_strategy, with_mean=mean_normalization, with_std=std_normalization)
strategy_data_element = DataElementDecorator(
scaled_strategy, bw="scott", shade=True, label=strategy.name, color=colors[0])
chart.add_decorator(strategy_data_element)
scaled_benchmark = preprocessing.scale(
aggregate_benchmark, with_mean=mean_normalization, with_std=std_normalization)
benchmark_data_element = DataElementDecorator(
scaled_benchmark, bw="scott", shade=True, label=benchmark.name, color=colors[1])
chart.add_decorator(benchmark_data_element)
# Add a title.
title = _get_title(mean_normalization, std_normalization, frequency)
title_decorator = TitleDecorator(title, key="title")
chart.add_decorator(title_decorator)
chart.add_decorator(AxesLabelDecorator("Returns", "Similarity"))
return chart | a83a7d2171ee488c1ac9ede80f39778658a4538f | 3,655,320 |
import matplotlib.cm as mpl_color_map
from PIL import Image
import copy
def apply_colormap_on_image(org_im, activation, colormap_name='viridis', alpha=.4, thresh=30):
"""
Apply heatmap on image
Args:
org_img (PIL img): Original image
activation_map (numpy arr): Activation map (grayscale) 0-255
colormap_name (str): Name of the colormap
"""
org_im = Image.fromarray(to_img(org_im))
# Get colormap
color_map = mpl_color_map.get_cmap(colormap_name)
no_trans_heatmap = color_map(activation)
# Change alpha channel in colormap to make sure original image is displayed
heatmap = copy.copy(no_trans_heatmap)
heatmap[:, :, 3] = alpha
heatmap[:, :, 3][activation < thresh] = 0
heatmap = Image.fromarray((heatmap * 255).astype(np.uint8))
no_trans_heatmap = Image.fromarray((no_trans_heatmap * 255).astype(np.uint8))
# Apply heatmap on iamge
heatmap_on_image = Image.new("RGBA", org_im.size)
heatmap_on_image = Image.alpha_composite(heatmap_on_image, org_im.convert('RGBA'))
heatmap_on_image = Image.alpha_composite(heatmap_on_image, heatmap)
no_trans_heatmap = to_img(no_trans_heatmap)
heatmap_on_image = to_img(heatmap_on_image)
return no_trans_heatmap, heatmap_on_image | d6f89cb06e8ec489e9bd38cb52f5d04628037b70 | 3,655,321 |
def _cli():
"""
command line interface
:return:
"""
parser = generate_parser()
args = parser.parse_args()
return interface(args.bids_dir,
args.output_dir,
args.aseg,
args.subject_list,
args.session_list,
args.collect,
args.ncpus,
args.stage,
args.bandstop,
args.max_cortical_thickness,
args.check_outputs_only,
args.t1_brain_mask,
args.t2_brain_mask,
args.study_template,
args.t1_reg_method,
args.cleaning_json,
args.print,
args.ignore_expected_outputs,
args.multi_template_dir,
args.norm_method,
args.norm_gm_std_dev_scale,
args.norm_wm_std_dev_scale,
args.norm_csf_std_dev_scale,
args.make_white_from_norm_t1,
args.single_pass_pial,
args.registration_assist,
args.freesurfer_license) | 0b37b2eab79c5f50d5f18b5d6b435e3b97682a36 | 3,655,322 |
def statusize():
"""Posts a status from the web."""
db = get_session(current_app)
user_id = session.get('user_id')
if not user_id:
return forbidden('You must be logged in to statusize!')
user = db.query(User).get(user_id)
message = request.form.get('message', '')
if not message:
return page_not_found('You cannot statusize nothing!')
status = Status(user_id=user.id, content=message, content_html=message)
project = request.form.get('project', '')
if project:
project = db.query(Project).filter_by(id=project).first()
if project:
status.project_id = project.id
# TODO: reply handling
db.add(status)
db.commit()
# Try to go back from where we came.
referer = request.headers.get('referer', url_for('status.index'))
redirect_url = request.form.get('redirect_to', referer)
return redirect(redirect_url) | b06f711dfbf73b9c75ba2303478799dcc678a28c | 3,655,323 |
import base64
def urlsafe_b64decode_nopadding(val):
"""Deal with unpadded urlsafe base64."""
# Yes, it accepts extra = characters.
return base64.urlsafe_b64decode(str(val) + '===') | 22ed00b07e16b4b557dc46b5caeb9f7ce9513c0d | 3,655,324 |
def _subimg_bbox(img, subimage, xc, yc):
"""
Find the x/y bounding-box pixel coordinates in ``img`` needed to
add ``subimage``, centered at ``(xc, yc)``, to ``img``. Returns
``None`` if the ``subimage`` would extend past the ``img``
boundary.
"""
ys, xs = subimage.shape
y, x = img.shape
y0 = int(yc - (ys - 1) / 2.0)
y1 = y0 + ys
x0 = int(xc - (xs - 1) / 2.0)
x1 = x0 + xs
if (x0 >= 0) and (y0 >= 0) and (x1 < x) and (y1 < y):
return (x0, x1, y0, y1)
else:
return None | b299a6b3726ced525b538b4fea45b235fc0bd56e | 3,655,325 |
from datetime import datetime
def _ToDatetimeObject(date_str):
"""Converts a string into datetime object.
Args:
date_str: (str) A date and optional time for the oldest article
allowed. This should be in ISO 8601 format. (yyyy-mm-dd)
Returns:
datetime.datetime Object.
Raises:
ValueError: Invalid date format.
"""
if not date_str:
date_str = datetime.now().strftime('%Y-%m-%d')
if not any(date_.match(date_str) for date_ in DATE_REGEXES):
raise ValueError('Invalid date format %s' % date_str)
return datetime.strptime(date_str, '%Y-%m-%d') | df675cb5391456122bb350a126e0b4a4ed31fc49 | 3,655,326 |
def select_most_uncertain_patch(x_image_pl, y_label_pl, fb_pred, ed_pred, fb_prob_mean_bald, kernel_window, stride_size,
already_select_image_index, previously_selected_binary_mask, num_most_uncert_patch,
method):
"""This function is used to acquire the #most uncertain patches in the pooling set.
Args:
x_image_pl: [Num_Im, Im_h, Im_w,3]
y_label_pl: [Num_Im, Im_h, Im_w,1]
fb_pred: [Num_Im, Im_h, Im_w, 2]
ed_pred: [Num_Im, Im_h, Im_w, 2]
fb_prob_mean_bald: [num_im, imw, imw]
kernel_window: [kh, kw] determine the size of the region
stride_size: int, determine the stride between every two regions
already_select_image_index: if it's None, then it means that's the first acquistion step,
otherwise it's the numeric image index for the previously selected patches
previously_selected_binary_mask: [num_already_selected_images, Im_h, Im_w,1]
num_most_uncert_patch: int, number of patches that are selected in each acquisition step
method: acquisition method: 'B', 'C', 'D'
Returns:
Most_Uncert_Im: [Num_Selected, Im_h, Im_w, 3]imp
Most_Uncert_FB_GT: [Num_Selected, Im_h, Im_w,1]
Most_Uncert_ED_GT: [Num_Selected, Im_h, Im_w,1]
Most_Uncert_Binary_Mask: [Num_Selected, Im_h, Im_w,1]
Selected_Image_Index: [Num_Selected]
"""
num_im = np.shape(x_image_pl)[0]
uncertainty_map_tot = []
for i in range(num_im):
if method == 'B':
var_stat = get_uncert_heatmap(x_image_pl[i], fb_pred[i])
elif method == 'C':
var_stat = get_entropy_heatmap(fb_pred[i])
elif method == 'D':
var_stat = get_bald_heatmap(fb_prob_mean_bald[i], fb_pred[i])
uncertainty_map_tot.append(var_stat)
uncertainty_map_tot = np.array(uncertainty_map_tot)
if already_select_image_index is None:
print("--------This is the beginning of the selection process-------")
else:
print(
"----------Some patches have already been annotated, I need to deal with that")
previously_selected_binary_mask = np.squeeze(previously_selected_binary_mask, axis=-1)
for i in range(np.shape(previously_selected_binary_mask)[0]):
uncertainty_map_single = uncertainty_map_tot[already_select_image_index[i]]
uncertainty_map_updated = uncertainty_map_single * (1 - previously_selected_binary_mask[i])
uncertainty_map_tot[already_select_image_index[i]] = uncertainty_map_updated
selected_numeric_image_index, binary_mask_updated_tot = calculate_score_for_patch(uncertainty_map_tot,
kernel_window, stride_size,
num_most_uncert_patch)
pseudo_fb_la_tot = []
pseudo_ed_la_tot = []
for index, single_selected_image_index in enumerate(selected_numeric_image_index):
pseudo_fb_la, pseudo_ed_la = return_pseudo_label(y_label_pl[single_selected_image_index],
fb_pred[single_selected_image_index],
ed_pred[single_selected_image_index],
binary_mask_updated_tot[index])
pseudo_fb_la_tot.append(pseudo_fb_la)
pseudo_ed_la_tot.append(pseudo_ed_la)
most_uncert_im_tot = x_image_pl[selected_numeric_image_index]
most_uncertain = [most_uncert_im_tot,
pseudo_fb_la_tot,
pseudo_ed_la_tot,
binary_mask_updated_tot,
selected_numeric_image_index]
return most_uncertain | 21f40e34b1436d91eca041998cb927800cc10f7b | 3,655,327 |
import requests
import json
def submit_extraction(connector, host, key, datasetid, extractorname):
"""Submit dataset for extraction by given extractor.
Keyword arguments:
connector -- connector information, used to get missing parameters and send status updates
host -- the clowder host, including http and port, should end with a /
key -- the secret key to login to clowder
datasetid -- the dataset UUID to submit
extractorname -- registered name of extractor to trigger
"""
url = "%sapi/datasets/%s/extractions?key=%s" % (host, datasetid, key)
result = requests.post(url,
headers={'Content-Type': 'application/json'},
data=json.dumps({"extractor": extractorname}),
verify=connector.ssl_verify if connector else True)
result.raise_for_status()
return result.status_code | 449fc6c3c37ef8a5206a7ebe18b367885ae319a8 | 3,655,328 |
import math
def fcmp(x, y, precision):
"""fcmp(x, y, precision) -> -1, 0, or 1"""
if math.fabs(x-y) < precision:
return 0
elif x < y:
return -1
return 1 | 905421b36635ab830e2216ab34fee89f75c7f4c4 | 3,655,329 |
def parse_vcf_line(line):
"""
Args:
line (str): line in VCF file obj.
Returns:
parsed_line_lst (lst): with tuple elem (chr, pos, ref, alt)
Example:
deletion
pos 123456789012
reference ATTAGTAGATGT
deletion ATTA---GATGT
VCF:
CHROM POS REF ALT
N 4 AGTA A
Bambino:
chr pos ref alt
chr_N 5 GTA -
insertion
pos 1234***56789012
reference ATTA***GTAGATGT
insertion ATTAGTAGTAGATGT
VCF:
CHROM POS REF ALT
N 4 A AGTA
Bambino:
chr pos ref alt
chr_N 5 - GTA
"""
parsed_line_lst = []
# skip header lines
if line.startswith("#"):
return parsed_line_lst
lst = line.rstrip().split("\t")
chr = lst[0]
vcf_pos = int(lst[1])
vcf_ref = lst[3]
vcf_alts = lst[4].split(",") # possibly multi-allelic
if not chr.startswith("chr"):
chr = "chr" + chr
# skip non canonical chrmosomes
if not is_canonical_chromosome(chr):
return parsed_line_lst
for vcf_alt in vcf_alts:
n = count_padding_bases(vcf_ref, vcf_alt)
pos = vcf_pos + n
if len(vcf_ref) < len(vcf_alt):
ref = "-"
alt = vcf_alt[n:]
parsed_line_lst.append((chr, pos, ref, alt))
elif len(vcf_ref) > len(vcf_alt):
ref = vcf_ref[n:]
alt = "-"
parsed_line_lst.append((chr, pos, ref, alt))
else:
pass # not indel
return parsed_line_lst | 705c3bfe2ed3a0d4552dcbd18e8c08b73b84b40b | 3,655,330 |
def fuzzy_lookup_item(name_or_id, lst):
"""Lookup an item by either name or id.
Looking up by id is exact match. Looking up by name is by containment, and
if the term is entirely lowercase then it's also case-insensitive.
Multiple matches will throw an exception, unless one of them was an exact
match.
"""
try:
idd = int(name_or_id)
for val in lst:
if val.id == idd:
return val
raise RuntimeError('Id %d not found!' % idd)
except ValueError:
insensitive = name_or_id.islower()
matches = []
for val in lst:
name = val.name or ''
if name_or_id == name:
return val
if insensitive:
name = name.lower()
if name_or_id in name:
matches.append(val)
if len(matches) == 1:
return matches[0]
if not matches:
raise RuntimeError(f'No name containing {name_or_id!r} found!') from None
raise RuntimeError(
f'Multiple matches for {name_or_id!r}: {[x.name for x in matches]}') from None | 604b3879d0f97822d5a36db6dcf468ef8eefaac9 | 3,655,331 |
import os
def _make_output_dirs(root_output_dir, experiment_name):
"""Get directories for outputs. Create if not exist."""
tf.io.gfile.makedirs(root_output_dir)
checkpoint_dir = os.path.join(root_output_dir, 'checkpoints', experiment_name)
tf.io.gfile.makedirs(checkpoint_dir)
results_dir = os.path.join(root_output_dir, 'results', experiment_name)
tf.io.gfile.makedirs(results_dir)
summary_dir = os.path.join(root_output_dir, 'logdir', experiment_name)
tf.io.gfile.makedirs(summary_dir)
return checkpoint_dir, results_dir, summary_dir | df9f6774b74dfb4156414e5c7761a595fb8b6cb3 | 3,655,332 |
def fantasy_pros_ecr_scrape(league_dict=config.sean):
"""Scrape Fantasy Pros ECR given a league scoring format
:param league_dict: league dict in config.py used to determine whether to pull PPR/standard/half-ppr
"""
scoring = league_dict.get('scoring')
if scoring == 'ppr':
url = 'https://www.fantasypros.com/nfl/rankings/ppr-cheatsheets.php'
elif scoring == 'half-ppr':
url = 'https://www.fantasypros.com/nfl/rankings/half-point-ppr-cheatsheets.php'
else:
url = 'https://www.fantasypros.com/nfl/rankings/consensus-cheatsheets.php'
html = scrape_dynamic_javascript(url)
parsed_dict = parse_ecr_html(html)
return pd.DataFrame(parsed_dict) | c20ae9542f9fea096510681bcf3c430b23cbdf29 | 3,655,333 |
def lda(X, y, nr_components=2):
"""
Linear discrimindant analysis
:param X: Input vectors
:param y: Input classes
:param nr_components: Dimension of output co-ordinates
:return: Output co-ordinates
"""
print("Computing Linear Discriminant Analysis projection")
X2 = X.copy()
X2.flat[::X.shape[1] + 1] += 0.01 # Make X invertible
return discriminant_analysis.LinearDiscriminantAnalysis(n_components=nr_components).fit_transform(X2, y) | c9db65d494304246cf518833c1ae5c6ed22f3fa6 | 3,655,334 |
def _flatten_value_to_list(batch_values):
"""Converts an N-D dense or sparse batch to a 1-D list."""
# Ravel for flattening and tolist so that we go to native Python types
# for more efficient followup processing.
#
batch_value, = batch_values
return batch_value.ravel().tolist() | 77bfd9d32cbbf86a16a8da2701417a9ac9b9cc93 | 3,655,335 |
def sun_position(time):
"""
Computes the sun's position in longitude and colatitude at a given time
(mjd2000).
It is accurate for years 1901 through 2099, to within 0.006 deg.
Input shape is preserved.
Parameters
----------
time : ndarray, shape (...)
Time given as modified Julian date, i.e. with respect to the date 0h00
January 1, 2000 (mjd2000).
Returns
-------
theta : ndarray, shape (...)
Geographic colatitude of sun's position in degrees
:math:`[0^\\circ, 180^\\circ]`.
phi : ndarray, shape (...)
Geographic east longitude of sun's position in degrees
:math:`(-180^\\circ, 180^\\circ]`.
References
----------
Taken from `here <http://jsoc.stanford.edu/doc/keywords/Chris_Russel/
Geophysical%20Coordinate%20Transformations.htm#appendix2>`_
"""
rad = pi / 180
year = 2000 # reference year for mjd2000
assert np.all((year + time // 365.25) < 2099) \
and np.all((year - time // 365.25) > 1901), \
("Time must be between 1901 and 2099.")
frac_day = np.remainder(time, 1) # decimal fraction of a day
julian_date = 365 * (year-1900) + (year-1901)//4 + time + 0.5
t = julian_date/36525
v = np.remainder(279.696678 + 0.9856473354*julian_date, 360.)
g = np.remainder(358.475845 + 0.985600267*julian_date, 360.)
slong = v + (1.91946 - 0.004789*t)*np.sin(g*rad) + 0.020094*np.sin(2*g*rad)
obliq = (23.45229 - 0.0130125*t)
slp = (slong - 0.005686)
sind = np.sin(obliq*rad)*np.sin(slp*rad)
cosd = np.sqrt(1.-sind**2)
# sun's declination in radians
declination = np.arctan(sind/cosd)
# sun's right right ascension in radians (0, 2*pi)
right_ascension = pi - np.arctan2(sind/(cosd * np.tan(obliq*rad)),
-np.cos(slp*rad)/cosd)
# Greenwich mean siderial time in radians (0, 2*pi)
gmst = np.remainder(279.690983 + 0.9856473354*julian_date
+ 360.*frac_day + 180., 360.) * rad
theta = degrees(pi/2 - declination) # convert to colatitude
phi = center_azimuth(degrees(right_ascension - gmst))
return theta, phi | d5465044fbbe650580f4e9afaa13cf83e2cad758 | 3,655,336 |
import json
def get_assay_description(assay_id, summary=True, attempts=10):
""" Get the description of an assay in JSON format.
Parameters
----------
assay_id : int
The id of the bioassay.
summary : bool, optional
If true returns a summary of the description of the assay (default=True).
attempts : int, optional
number of times to try to download the data in case of failure
(default=10).
Returns
--------
dict
A dictionary containing the assay description.
"""
assay_url = base_url + "/assay/aid/{}".format(assay_id)
if summary:
description_url = assay_url + "/summary/JSON"
else:
description_url = assay_url + "/description/JSON"
data = _get_data(description_url, attempts)
return json.loads(data) | 13ff3620a1ef3e7aa1c12bd5a9b5aa88b2fb297f | 3,655,337 |
def acos(expr):
"""
Arc cosine -- output in radians.
It is the same that :code:`arccos` moodle math function.
"""
return Expression('acos({0})'.format(str(expr))) | d064caaa037de619266e322f85ae09c2ba7d9d16 | 3,655,338 |
from datetime import datetime
def annotate_genes(gene_df, annotation_gtf, lookup_df=None):
"""
Add gene and variant annotations (e.g., gene_name, rs_id, etc.) to gene-level output
gene_df: output from map_cis()
annotation_gtf: gene annotation in GTF format
lookup_df: DataFrame with variant annotations, indexed by 'variant_id'
"""
gene_dict = {}
print('['+datetime.now().strftime("%b %d %H:%M:%S")+'] Adding gene and variant annotations', flush=True)
print(' * parsing GTF', flush=True)
with open(annotation_gtf) as gtf:
for row in gtf:
row = row.strip().split('\t')
if row[0][0]=='#' or row[2]!='gene': continue
# get gene_id and gene_name from attributes
attr = dict([i.split() for i in row[8].replace('"','').split(';') if i!=''])
# gene_name, gene_chr, gene_start, gene_end, strand
gene_dict[attr['gene_id']] = [attr['gene_name'], row[0], row[3], row[4], row[6]]
print(' * annotating genes', flush=True)
if 'group_id' in gene_df:
gene_info = pd.DataFrame(data=[gene_dict[i] for i in gene_df['group_id']],
columns=['gene_name', 'gene_chr', 'gene_start', 'gene_end', 'strand'],
index=gene_df.index)
else:
gene_info = pd.DataFrame(data=[gene_dict[i] for i in gene_df.index],
columns=['gene_name', 'gene_chr', 'gene_start', 'gene_end', 'strand'],
index=gene_df.index)
gene_df = pd.concat([gene_info, gene_df], axis=1)
assert np.all(gene_df.index==gene_info.index)
col_order = ['gene_name', 'gene_chr', 'gene_start', 'gene_end', 'strand',
'num_var', 'beta_shape1', 'beta_shape2', 'true_df', 'pval_true_df', 'variant_id', 'tss_distance']
if lookup_df is not None:
print(' * adding variant annotations from lookup table', flush=True)
gene_df = gene_df.join(lookup_df, on='variant_id') # add variant information
col_order += list(lookup_df.columns)
col_order += ['ma_samples', 'ma_count', 'af', 'pval_nominal',
'slope', 'slope_se', 'pval_perm', 'pval_beta']
if 'group_id' in gene_df:
col_order += ['group_id', 'group_size']
col_order += ['qval', 'pval_nominal_threshold']
gene_df = gene_df[col_order]
print('done.', flush=True)
return gene_df | 562ef01380075a3e12eeaecdd6ab1e2285ddbc4f | 3,655,339 |
import torch
def y_gate():
"""
Pauli y
"""
return torch.tensor([[0, -1j], [1j, 0]]) + 0j | c0da0112233773e1c764e103599a591bb7a4a7f5 | 3,655,340 |
import tarfile
def extract_tarball(tarball, install_dir):
"""Extract tarball to a local path"""
if not tarball.path.is_file():
raise IOError(f"<info>{tarball.path}</info> is not a file!")
try:
with tarfile.open(tarball.path, "r:gz") as f_tarball:
extraction_dir = [
obj.name
for obj in f_tarball.getmembers()
if obj.isdir() and "/" not in obj.name
][0]
f_tarball.extractall(install_dir)
except tarfile.ReadError as exc:
raise IOError(f"<info>{tarball.path}</info> is not a valid tarball!") from exc
return install_dir / extraction_dir | da9deeb71da36c7c01611f3be7965a8c4a22dc41 | 3,655,341 |
def compose_matrix(scale=None, shear=None, angles=None, translation=None, perspective=None):
"""Calculates a matrix from the components of scale, shear, euler_angles, translation and perspective.
Parameters
----------
scale : [float, float, float]
The 3 scale factors in x-, y-, and z-direction.
shear : [float, float, float]
The 3 shear factors for x-y, x-z, and y-z axes.
angles : [float, float, float]
The rotation specified through the 3 Euler angles about static x, y, z axes.
translation : [float, float, float]
The 3 values of translation.
perspective : [float, float, float, float]
The 4 perspective entries of the matrix.
Returns
-------
list[list[float]]
The 4x4 matrix that combines the provided transformation components.
Examples
--------
>>> trans1 = [1, 2, 3]
>>> angle1 = [-2.142, 1.141, -0.142]
>>> scale1 = [0.123, 2, 0.5]
>>> M = compose_matrix(scale1, None, angle1, trans1, None)
>>> scale2, shear2, angle2, trans2, persp2 = decompose_matrix(M)
>>> allclose(scale1, scale2)
True
>>> allclose(angle1, angle2)
True
>>> allclose(trans1, trans2)
True
"""
M = [[1. if i == j else 0. for i in range(4)] for j in range(4)]
if perspective is not None:
P = matrix_from_perspective_entries(perspective)
M = multiply_matrices(M, P)
if translation is not None:
T = matrix_from_translation(translation)
M = multiply_matrices(M, T)
if angles is not None:
R = matrix_from_euler_angles(angles, static=True, axes="xyz")
M = multiply_matrices(M, R)
if shear is not None:
H = matrix_from_shear_entries(shear)
M = multiply_matrices(M, H)
if scale is not None:
S = matrix_from_scale_factors(scale)
M = multiply_matrices(M, S)
for i in range(4):
for j in range(4):
M[i][j] /= M[3][3]
return M | a186919f8b6fc47637e7c20db30fbdd8e461e059 | 3,655,342 |
def dict_merge(set1, set2):
"""Joins two dictionaries."""
return dict(list(set1.items()) + list(set2.items())) | d88a68720cb9406c46bdef40f46e461a80e588c0 | 3,655,343 |
def EucDistIntegral(a, b, x):
"""[summary]
Calculate Integrated Euclidean distance.
Args:
a (float): a value
b (float): b value
x (float): x value
Returns:
val: Integration result
"""
asq = a * a
bsq = b * b
xsq = x * x
dn = (6 * (1 + asq)**(3 / 2))
cx = (a * b + x + asq * x) / \
sqrt((bsq + 2 * a * b * x + (1 + asq) * xsq)) / sqrt((1 + asq))
if abs(abs(cx) - 1) <= 1E-9 or np.isnan(cx):
c1 = x * b**2
else:
c1 = b**3 * arctanh(np.float(cx))
c2 = sqrt(bsq + 2 * a * b * x + (1 + asq) * xsq) * \
(2 * b * x + 2 * asq * b * x + a**3 * xsq + a * (bsq + xsq))
if x == 0:
c4 = 0
else:
c3 = abs(x) / (b + a * x + sqrt(xsq + (b + a * x)**2))
if np.isnan(c3) or np.isinf(c3):
if b == 0:
c3 = 1 / (sign(x) * a + sqrt(asq + 1))
else:
c3 = -2 * b / abs(x)
c4 = (1 + asq) * x**3 * log(c3)
return (c1 + sqrt(1 + asq) * (c2 - c4)) / dn | 3da541356636e8be7f9264d9d59a29dd003c082b | 3,655,344 |
def _VarintSize(value):
"""Compute the size of a varint value."""
if value <= 0x7f: return 1
if value <= 0x3fff: return 2
if value <= 0x1fffff: return 3
if value <= 0xfffffff: return 4
if value <= 0x7ffffffff: return 5
if value <= 0x3ffffffffff: return 6
if value <= 0x1ffffffffffff: return 7
if value <= 0xffffffffffffff: return 8
if value <= 0x7fffffffffffffff: return 9
return 10 | 4bd9b1c8d362f5e72e97f9f2c8e0d5711065291f | 3,655,345 |
import requests
def send_to_hipchat(
message,
token=settings.HIPCHAT_API_TOKEN,
room=settings.HIPCHAT_ROOM_ID,
sender="Trello",
color="yellow",
notify=False): # noqa
"""
Send a message to HipChat.
Returns the status code of the request. Should be 200.
"""
payload = {
'auth_token': token,
'notify': notify,
'color': color,
'from': sender,
'room_id': room,
'message': message
}
return requests.post(HIPCHAT_API_URL, data=payload).status_code | 138abbf59f561a4c5d21aea9976856dbd7a581ca | 3,655,346 |
from cStringIO import StringIO
import cgi
def input(*requireds, **defaults):
"""
Returns a `storage` object with the GET and POST arguments.
See `storify` for how `requireds` and `defaults` work.
"""
def dictify(fs): return dict([(k, fs[k]) for k in fs.keys()])
_method = defaults.pop('_method', 'both')
e = ctx.env.copy()
out = {}
if _method.lower() in ['both', 'post']:
a = {}
if e['REQUEST_METHOD'] == 'POST':
a = cgi.FieldStorage(fp = StringIO(data()), environ=e,
keep_blank_values=1)
a = dictify(a)
out = dictadd(out, a)
if _method.lower() in ['both', 'get']:
e['REQUEST_METHOD'] = 'GET'
a = dictify(cgi.FieldStorage(environ=e, keep_blank_values=1))
out = dictadd(out, a)
try:
return storify(out, *requireds, **defaults)
except KeyError:
badrequest()
raise StopIteration | 0b3fcd9142dbcd3309b80837c6fc53abdf4aaad6 | 3,655,347 |
def nodes_and_edges_valid(dev, num_nodes, node_names, rep):
"""Asserts that nodes in a device ``dev`` are properly initialized, when there
are ``num_nodes`` nodes expected, with names ``node_names``, using representation ``rep``."""
if not set(dev._nodes.keys()) == {"state"}:
return False
if not len(dev._nodes["state"]) == num_nodes:
return False
for idx in range(num_nodes):
if not dev._nodes["state"][idx].name == node_names[idx]:
return False
return edges_valid(dev, num_nodes=num_nodes, rep=rep) | ad6dbfdfd92114c9b041617a91ad30dbe8a8189f | 3,655,348 |
def is_android(builder_cfg):
"""Determine whether the given builder is an Android builder."""
return ('Android' in builder_cfg.get('extra_config', '') or
builder_cfg.get('os') == 'Android') | 74b1620ba2f6fff46495174158f734c5aa8da372 | 3,655,349 |
def twoSum(self, numbers, target): # ! 这个方法可行
"""
:type numbers: List[int]
:type target: int
:rtype: List[int]
"""
numbers_dict = {}
for idn, v in enumerate(numbers):
if target - v in numbers_dict:
return [numbers_dict[target - v] + 1, idn + 1]
numbers_dict[v] = idn | e2b93828b5db7256b9a1e90e7e21adad1ce0b4de | 3,655,350 |
def not_after(cert):
"""
Gets the naive datetime of the certificates 'not_after' field.
This field denotes the last date in time which the given certificate
is valid.
:return: Datetime
"""
return cert.not_valid_after | 4f084146908d70af5c2cdfa5151f0c26533ac7fe | 3,655,351 |
from datetime import datetime
def parse_time_string(time_str: str) -> datetime.time:
"""Parses a string recognizable by TIME_REGEXP into a datetime.time object. If
the string has an invalid format, a ValueError is raised."""
match = TIME_REGEXP.match(time_str)
if match is None:
raise ValueError("time string {} has an invalid format".format(repr(time_str)))
groups = match.groupdict()
return datetime.time(int(groups["h"]), int(groups["m"]), int(groups["s"] or 0)) | 3238abcc6edb5a37c4a3d615b71e9dde6344f0ac | 3,655,352 |
import os
def dig(start, outdir, depth=2, max_duration=360):
"""
Crawls YouTube for source material (as mp3s).
Args:
- start: the starting YouTube url
- outdir: directory to save download tracks to
- depth: how many levels of related vids to look through
- max_duration: only dl videos shorter than or equal to this in duration
"""
urls = [start]
candidates = [start]
# Dig
while depth:
candidates = sum((_get_related_video_urls(url) for url in candidates), [])
urls += candidates
depth -= 1
# Remove dupes
urls = set(urls)
print('Got {0} videos'.format(len(urls)))
# Kind of peculiar how this function has to work
def _filter(info):
if info['duration'] > max_duration:
return 'Too long'
return None
ydl_opts = {
'format': 'bestaudio/best',
'postprocessors': [{
'key': 'FFmpegExtractAudio',
'preferredcodec': 'mp3',
'preferredquality': '192',
}],
'match_filter': _filter,
'outtmpl': os.path.join(outdir, '%(title)s-%(id)s.%(ext)s'),
}
with YoutubeDL(ydl_opts) as ydl:
ydl.download(urls) | 09cd6c98d117fc71f58373213ed432c1a126a6cf | 3,655,353 |
def get_roc_curve(y_true, y_score, title=None, with_plot=True):
"""
Plot the [Receiver Operating Characteristic][roc] curve of the given
true labels and confidence scores.
[roc]: http://en.wikipedia.org/wiki/Receiver_operating_characteristic
"""
fpr, tpr, thresholds = sklearn.metrics.roc_curve(y_true, y_score)
auc = np.trapz(tpr, fpr)
fig = None
if with_plot:
fig = vislab.results_viz.plot_curve_with_area(
fpr, tpr, auc, 'False Positive Rate', 'True Positive Rate', 'AUC')
ax = fig.get_axes()[0]
ax.plot([0, 1], [0, 1], 'k--')
if title is not None:
ax.set_title(title)
return fig, fpr, tpr, auc | 7635af1705c6bdaccce1e1c5e99719645026d436 | 3,655,354 |
from datetime import datetime
def read_err_songs():
""" read song data from xml file to a list of dictionaries """
songfile = open('/home/gabe/python/selfishmusic/errors.xml')
soup = BS.BeautifulSoup(songfile.read())
songsxml = soup.findAll('song')
songs = []
for song in songsxml:
sd = {}
sd['songnum'] = int(get_text(song.songnum))
sd['title'] = get_text(song.title)
sd['artist'] = get_text(song.artist)
date = get_text(song.date)
date = [x.strip(' ,') for x in date.split(' ')]
sd['date'] = datetime.date(month=MONTHS.index(date[0]) + 1,
day=int(date[1]),
year=int(date[2]))
sd['lyrics'] = get_text(song.lyrics)
sd['found_title'] = get_text(song.found_title)
sd['found_artist'] = get_text(song.found_artist)
songs.append(sd)
songfile.close()
return songs | 287c205c054045b3a88b74cf008e5a21037f9727 | 3,655,355 |
def word_value(word: str) -> int:
"""Returns the sum of the alphabetical positions of each letter in word."""
return (0 if word == '' else
word_value(word[:-1]) + alpha.letter_index_upper(word[-1])) | b964faa5a5792e003fb0859c1ffb0b25e63f6a75 | 3,655,356 |
def status():
"""
Returns json response of api status
Returns:
JSON: json object
"""
status = {
"status": "OK"
}
return jsonify(status) | d515e0628bb4c77ad83b0a26b758a3686663d329 | 3,655,357 |
def celcius_to_farenheit(x):
"""calculate celcius to farenheit"""
farenheit = (9*x/5) + 32
return farenheit | fa0041451c82b20283e4f20b501a6042ab19ec95 | 3,655,358 |
def CheckFlags(node_name, report_per_node, warnings, errors,
flags, warning_helper, error_helper):
"""Check the status flags in each node and bookkeep the results.
Args:
node_name: Short name of the node.
report_per_node: Structure to record warning/error messages per node.
Its type should be collections.defaultdict(list).
warnings: Structure to record nodes that raise each warning type.
Its type should be collections.defaultdict(list).
errors: Structure to record nodes that raise each error type.
Its type should be collections.defaultdict(list).
flags: The status flags to check against.
warning_helper: The EnumHelper for warnings.
error_helper: The EnumHelper for errors.
Returns:
True if there are any warnings/errors.
"""
any_warning_or_error = False
if warning_helper:
for warning_value in warning_helper.Values():
warning_name = warning_helper.ShortName(warning_value)
if avionics_util.CheckWarning(flags, warning_value):
report_per_node[node_name].append(('WARNING', warning_name))
warnings[warning_name].append(node_name)
any_warning_or_error = True
if error_helper:
for error_value in error_helper.Values():
error_name = error_helper.ShortName(error_value)
if avionics_util.CheckError(flags, error_value):
report_per_node[node_name].append(('ERROR', error_name))
errors[error_name].append(node_name)
any_warning_or_error = True
return any_warning_or_error | 63bac7bfa4e3fa9c3cc462f5400d68116dfb898d | 3,655,359 |
def EnrollmentTransaction():
"""
:return:
"""
return b'\x20' | 05adff34b6cf100d95e16ab837b38b26b6315b6a | 3,655,360 |
def sentinel_id(vocabulary, return_value=None):
"""Token ID to use as a sentinel.
By default, we use the last token in the vocabulary.
Args:
vocabulary: a t5.data.vocabularies.Vocabulary
return_value: an optional integer
Returns:
an integer
"""
if return_value is not None:
return return_value
return vocabulary.vocab_size - 1 | 08ad1116b7f41ba7070359675a0133f14b9917bd | 3,655,361 |
from datetime import datetime
import urllib
import hmac
import hashlib
import base64
def create_signature(api_key, method, host, path, secret_key, get_params=None):
"""
创建签名
:param get_params: dict 使用GET方法时附带的额外参数(urlparams)
:return:
"""
sorted_params = [
("AccessKeyId", api_key),
("SignatureMethod", "HmacSHA256"),
("SignatureVersion", "2"),
("Timestamp", datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%S"))
]
if get_params:
sorted_params.extend(list(get_params.items()))
sorted_params = list(sorted(sorted_params))
encode_params = urllib.parse.urlencode(sorted_params)
payload = [method, host, path, encode_params]
payload = "\n".join(payload)
payload = payload.encode(encoding="UTF8")
secret_key = secret_key.encode(encoding="UTF8")
digest = hmac.new(secret_key, payload, digestmod=hashlib.sha256).digest()
signature = base64.b64encode(digest)
params = dict(sorted_params)
params["Signature"] = signature.decode("UTF8")
return params | 3e38bc883da9f5ebb311e1498f8cc73d1754c38b | 3,655,362 |
def measure_fwhm(image, plot=True, printout=True):
"""
Find the 2D FWHM of a background/continuum subtracted cutout image of a target.
The target should be centered and cropped in the cutout.
Use lcbg.utils.cutout for cropping targets.
FWHM is estimated using the sigmas from a 2D gaussian fit of the target's flux.
The FWHM is returned as a tuple of the FWHM in the x and y directions.
Parameters
----------
image : array like
Input background/continuum subtracted cutout image.
printout : bool
Print out info.
plot : bool
To plot fit or not.
Returns
-------
tuple : array of floats
FWHM in x and y directions.
"""
# Find FWHM
# ----------
fitted_line = fit_gaussian2d(image)
# Find fitted center
x_mean, y_mean = [i.value for i in [fitted_line.x_mean, fitted_line.y_mean]]
# Estimate FWHM using gaussian_sigma_to_fwhm
x_fwhm = fitted_line.x_stddev * gaussian_sigma_to_fwhm
y_fwhm = fitted_line.y_stddev * gaussian_sigma_to_fwhm
# Find half max
hm = fitted_line(x_mean, y_mean) / 2.
# Find the mean of the x and y direction
mean_fwhm = np.mean([x_fwhm, y_fwhm])
mean_fwhm = int(np.round(mean_fwhm))
# Print info about fit and FWHM
# ------------------------------
if printout:
print("Image Max: {}".format(image.max()))
print("Amplitude: {}".format(fitted_line.amplitude.value))
print("Center: ({}, {})".format(x_mean, y_mean))
print("Sigma = ({}, {})".format(fitted_line.x_stddev.value,
fitted_line.y_stddev.value, ))
print("Mean FWHM: {} Pix ".format(mean_fwhm))
print("FWHM: (x={}, y={}) Pix ".format(x_fwhm, y_fwhm))
if plot:
fig, [ax0, ax1, ax2, ax3] = plot_fit(image, fitted_line)
# Make x and y grid to plot to
y_arange, x_arange = np.mgrid[:image.shape[0], :image.shape[1]]
# Plot input image with FWHM and center
# -------------------------------------
ax0.imshow(image, cmap='gray_r')
ax0.axvline(x_mean - x_fwhm / 2, c='c', linestyle="--", label="X FWHM")
ax0.axvline(x_mean + x_fwhm / 2, c='c', linestyle="--")
ax0.axhline(y_mean - y_fwhm / 2, c='g', linestyle="--", label="Y FWHM")
ax0.axhline(y_mean + y_fwhm / 2, c='g', linestyle="--")
ax0.set_title("Center and FWHM Plot")
ax0.legend()
# Plot X fit
# ----------
ax2.axvline(x_mean, linestyle="-", label="Center")
ax2.axvline(x_mean - x_fwhm / 2, c='c', linestyle="--", label="X FWHM")
ax2.axvline(x_mean + x_fwhm / 2, c='c', linestyle="--")
ax2.axhline(hm, c="black", linestyle="--", label="Half Max")
ax2.legend()
# Plot Y fit
# ----------
ax3.axvline(y_mean, linestyle="-", label="Center")
ax3.axvline(y_mean - y_fwhm / 2, c='g', linestyle="--", label="Y FWHM")
ax3.axvline(y_mean + y_fwhm / 2, c='g', linestyle="--")
ax3.axhline(hm, c="black", linestyle="--", label="Half Max")
ax3.legend()
plt.show()
return np.array([x_fwhm, y_fwhm]) | c2fdb3a10ffa575ffe6fdeb9e86a47ffaefea5c2 | 3,655,363 |
from .mappia_publisher import MappiaPublisherPlugin
def classFactory(iface): # pylint: disable=invalid-name
"""Load MappiaPublisher class from file MappiaPublisher.
:param iface: A QGIS interface instance.
:type iface: QgsInterface
"""
#
return MappiaPublisherPlugin() | 1802094cb49c01b0c9c5ed8b45d3c77bcd9b746a | 3,655,364 |
def mongo_insert_canary(mongo, db_name, coll_name, doc):
""" Inserts a canary document with 'j' True. Returns 0 if successful. """
LOGGER.info("Inserting canary document %s to DB %s Collection %s", doc, db_name, coll_name)
coll = mongo[db_name][coll_name].with_options(
write_concern=pymongo.write_concern.WriteConcern(j=True))
res = coll.insert_one(doc)
return 0 if res.inserted_id else 1 | d82fe021db76972be19394688a07e9426bff82b7 | 3,655,365 |
from typing import Type
def is_dict_specifier(value):
# type: (object) -> bool
""" Check if value is a supported dictionary.
Check if a parameter of the task decorator is a dictionary that specifies
at least Type (and therefore can include things like Prefix, see binary
decorator test for some examples).
:param value: Decorator value to check.
:return: True if value is a dictionary that specifies at least the Type of
the key.
"""
return isinstance(value, dict) and Type in value | e18ad83a1b79a8150dfda1c65f4ab7e72cc8c8c8 | 3,655,366 |
def parse_star_count(stars_str):
"""Parse strings like 40.3k and get the no. of stars as a number"""
stars_str = stars_str.strip()
return int(float(stars_str[:-1]) * 1000) if stars_str[-1] == 'k' else int(stars_str) | d47177f26656e6dc33d708a0c4824ff677f3387a | 3,655,367 |
import shutil
def is_libreoffice_sdk_available() -> bool:
""" do we have idlc somewhere (we suppose it is made available in current path var.) ? """
return shutil.which("idlc") is not None | 83f8b158bcf97aa875280b20e177895432116d21 | 3,655,368 |
def set_metrics_file(filenames, metric_type):
"""Create metrics from data read from a file.
Args:
filenames (list of str):
Paths to files containing one json string per line (potentially base64
encoded)
metric_type (ts_mon.Metric): any class deriving from ts_mon.Metric.
For ex. ts_mon.GaugeMetric.
Returns:
metric (list of metric_type): the metric instances, filled.
"""
if not filenames:
return []
metrics = []
for filename in filenames:
with open(filename, 'r') as f:
lines = f.read()
# Skip blank lines because it helps humans.
lines = [line for line in lines.splitlines() if line.strip()]
metrics.extend(set_metrics(lines, metric_type))
return metrics | 372ec1fcb4b50711b35e40936e63839d75689dee | 3,655,369 |
def sortino_ratio_nb(returns, ann_factor, required_return_arr):
"""2-dim version of `sortino_ratio_1d_nb`.
`required_return_arr` should be an array of shape `returns.shape[1]`."""
result = np.empty(returns.shape[1], dtype=np.float_)
for col in range(returns.shape[1]):
result[col] = sortino_ratio_1d_nb(returns[:, col], ann_factor, required_return=required_return_arr[col])
return result | 2dfd6be1b7d3747c87484b22eb0cc0b0271c93a6 | 3,655,370 |
import re
def format_env_var(name: str, value: str) -> str:
"""
Formats environment variable value.
Formatter is chosen according to the kind of variable.
:param name: name of environment variable
:param value: value of environment variable
:return: string representation of value in appropriate format
"""
formatter = get_formatter(name)
new = str(value)
new = formatter(new)
new = escape(new)
new = re.sub("\n", "<br>", new)
return new | 030b16b897f2222d8465143b462f99ba344ba1eb | 3,655,371 |
from typing import Counter
def evenly_divisible(n):
""" Idea:
- Find factors of numbers 1 to n. Use DP to cache results bottom up.
- Amongst all factors, we have to include max counts of prime factors.
- For example, in in 1 .. 10, 2 has to be included 3 times since 8 = 2 ^ 3
"""
max_counts = Counter()
for n in range(n, 1, -1):
factors = prime_factorize(n)
# Update max counts
for k, v in factors.iteritems():
max_counts[k] = max(max_counts[k], v)
res = 1
for k, v in max_counts.iteritems():
res *= k ** v
return res | 68301a33751c2f3863092450235ca5c24b28379e | 3,655,372 |
def gradients(vals, func, releps=1e-3, abseps=None, mineps=1e-9, reltol=1,
epsscale=0.5):
"""
Calculate the partial derivatives of a function at a set of values. The
derivatives are calculated using the central difference, using an iterative
method to check that the values converge as step size decreases.
Parameters
----------
vals: array_like
A set of values, that are passed to a function, at which to calculate
the gradient of that function
func:
A function that takes in an array of values.
releps: float, array_like, 1e-3
The initial relative step size for calculating the derivative.
abseps: float, array_like, None
The initial absolute step size for calculating the derivative.
This overrides `releps` if set.
`releps` is set then that is used.
mineps: float, 1e-9
The minimum relative step size at which to stop iterations if no
convergence is achieved.
epsscale: float, 0.5
The factor by which releps if scaled in each iteration.
Returns
-------
grads: array_like
An array of gradients for each non-fixed value.
"""
grads = np.zeros(len(vals))
# maximum number of times the gradient can change sign
flipflopmax = 10.
# set steps
if abseps is None:
if isinstance(releps, float):
eps = np.abs(vals) * releps
eps[eps == 0.] = releps # if any values are zero set eps to releps
teps = releps * np.ones(len(vals))
elif isinstance(releps, (list, np.ndarray)):
if len(releps) != len(vals):
raise ValueError("Problem with input relative step sizes")
eps = np.multiply(np.abs(vals), releps)
eps[eps == 0.] = np.array(releps)[eps == 0.]
teps = releps
else:
raise RuntimeError("Relative step sizes are not a recognised type!")
else:
if isinstance(abseps, float):
eps = abseps * np.ones(len(vals))
elif isinstance(abseps, (list, np.ndarray)):
if len(abseps) != len(vals):
raise ValueError("Problem with input absolute step sizes")
eps = np.array(abseps)
else:
raise RuntimeError("Absolute step sizes are not a recognised type!")
teps = eps
# for each value in vals calculate the gradient
count = 0
for i in range(len(vals)):
# initial parameter diffs
leps = eps[i]
cureps = teps[i]
flipflop = 0
# get central finite difference
fvals = np.copy(vals)
bvals = np.copy(vals)
# central difference
fvals[i] += 0.5 * leps # change forwards distance to half eps
bvals[i] -= 0.5 * leps # change backwards distance to half eps
cdiff = (func(fvals) - func(bvals)) / leps
while 1:
fvals[i] -= 0.5 * leps # remove old step
bvals[i] += 0.5 * leps
# change the difference by a factor of two
cureps *= epsscale
if cureps < mineps or flipflop > flipflopmax:
# if no convergence set flat derivative (TODO: check if there is a better thing to do instead)
logger.warn("Derivative calculation did not converge: setting flat derivative.")
grads[count] = 0.
break
leps *= epsscale
# central difference
fvals[i] += 0.5 * leps # change forwards distance to half eps
bvals[i] -= 0.5 * leps # change backwards distance to half eps
cdiffnew = (func(fvals) - func(bvals)) / leps
if cdiffnew == cdiff:
grads[count] = cdiff
break
# check whether previous diff and current diff are the same within reltol
rat = (cdiff / cdiffnew)
if np.isfinite(rat) and rat > 0.:
# gradient has not changed sign
if np.abs(1. - rat) < reltol:
grads[count] = cdiffnew
break
else:
cdiff = cdiffnew
continue
else:
cdiff = cdiffnew
flipflop += 1
continue
count += 1
return grads | a3dcc4e0bb9402bd2d4c6b14b37c13647200f1a8 | 3,655,373 |
def do_open(user_input):
"""identical to io.open in PY3"""
try:
with open(user_input) as f:
return f.read()
except Exception:
return None | 72037207adecb2758c844c2f0c7233d834060111 | 3,655,374 |
def likely_solution(players):
""" Return tuples of cards with the
number of players who don't have them
"""
likely = likely_solution_nums(players)
return sorted([(ALLCARDS[n], ct) for n, ct in likely],
key=lambda tp: tp[1], reverse=True) | f0531f3188a38ec1b70ca48f95c9cfdc71d723b5 | 3,655,375 |
def cns_extended_inp(mtf_infile, pdb_outfile):
"""
Create CNS iput script (.inp) to create extended PDB file
from molecular topology file (.mtf)
Parameters
----------
mtf_infile : str
Path to .mtf topology file
pdb_outfile : str
Path where extended .pdb file will be stored
Returns
-------
str:
Input script
"""
return _cns_render_template(
"generate_extended",
{
"mtf_infile": mtf_infile,
"pdb_outfile": pdb_outfile,
}
) | c850137db9a22fd48559228e3032bcd510c9d69b | 3,655,376 |
def index(request, response_format='html'):
"""Sales index page"""
query = Q(status__hidden=False)
if request.GET:
if 'status' in request.GET and request.GET['status']:
query = _get_filter_query(request.GET)
else:
query = query & _get_filter_query(request.GET)
orders = Object.filter_by_request(
request, SaleOrder.objects.filter(query), mode="r")
filters = OrderFilterForm(request.user.profile, '', request.GET)
statuses = Object.filter_by_request(request, SaleStatus.objects, mode="r")
massform = MassActionForm(request.user.profile)
return render_to_response('sales/index',
{'orders': orders,
'filters': filters,
'statuses': statuses,
'massform': massform
},
context_instance=RequestContext(request), response_format=response_format) | afb47a5c9094c9ff125c05c3588712d1875c69f3 | 3,655,377 |
import os
def getDroppableFilename(mime_data):
"""
Returns the filename of a file dropped into the canvas (if it was
accepted via @see isDroppableMimeType).
"""
if mime_data.hasUrls():
# Return the first locally existing file
for url in mime_data.urls():
fpath = url.toLocalFile()
if os.path.exists(fpath):
return fpath.strip()
if mime_data.hasText():
txt = mime_data.text()
if txt.startswith('file://'):
return txt[7:].strip()
raise ValueError('Unsupported QMimeData for dropped file!') | c49370abf2b56f1cb3ded02c5edfab121a728096 | 3,655,378 |
def team_points_leaders(num_results=None, round_name=None):
"""Returns the team points leaders across all groups, as a dictionary profile__team__name
and points.
"""
size = team_normalize_size()
if size:
entries = score_mgr.team_points_leaders(round_name=round_name)
else:
entries = score_mgr.team_points_leaders(num_results=num_results, round_name=round_name)
if entries:
if size:
for entry in entries:
team = Team.objects.get(name=entry["profile__team__name"])
if team.size:
entry["points"] = int(entry["points"] * float(size / team.size))
# resort the entries after the normalization
entries = sorted(entries, key=lambda e: e["points"], reverse=True)
return entries[:num_results]
else:
return entries
else:
results = Team.objects.all().extra(
select={'profile__team__name': 'name', 'points': 0}).values(
'profile__team__name', 'points')
if num_results:
results = results[:num_results]
return results | 56b72b28f74f94e428b668b785b3dbd5b0c7c378 | 3,655,379 |
def with_color(text, color, bold=False):
"""
Return a ZSH color-formatted string.
Arguments
---------
text: str
text to be colored
color: str
ZSH color code
bold: bool
whether or not to make the text bold
Returns
-------
str
string with ZSH color-coded text
"""
color_fmt = '$fg_bold[{:s}]' if bold else '$fg[{:s}]'
return '%{{{:s}%}}{:s}%{{$reset_color%}}'.format(
color_fmt.format(color), text) | 40c194d9de76ab504a25592cfb13407cb089da0a | 3,655,380 |
def sample_test():
"""Return sample test json."""
return get_sample_json("test.json") | 9d135d4fd2f7eb52d16ff96332811e4141139a12 | 3,655,381 |
import warnings
from io import StringIO
def dataframe_from_inp(inp_path, section, additional_cols=None, quote_replace=' ', **kwargs):
"""
create a dataframe from a section of an INP file
:param inp_path:
:param section:
:param additional_cols:
:param skip_headers:
:param quote_replace:
:return:
"""
# format the section header for look up in headers OrderedDict
sect = remove_braces(section).upper()
# get list of all section headers in inp to use as section ending flags
headers = get_inp_sections_details(inp_path, include_brackets=False)
if sect not in headers:
warnings.warn(f'{sect} section not found in {inp_path}')
return pd.DataFrame()
# extract the string and read into a dataframe
start_string = format_inp_section_header(section)
end_strings = [format_inp_section_header(h) for h in headers.keys()]
s = extract_section_of_file(inp_path, start_string, end_strings, **kwargs)
# replace occurrences of double quotes ""
s = s.replace('""', quote_replace)
# and get the list of columns to use for parsing this section
# add any additional columns needed for special cases (build instructions)
additional_cols = [] if additional_cols is None else additional_cols
cols = headers[sect]['columns'] + additional_cols
if headers[sect]['columns'][0] == 'blob':
# return the whole row, without specific col headers
return pd.read_csv(StringIO(s), delim_whitespace=False)
else:
try:
df = pd.read_csv(StringIO(s), header=None, delim_whitespace=True,
skiprows=[0], index_col=0, names=cols)
except IndexError:
print(f'failed to parse {section} with cols: {cols}. head:\n{s[:500]}')
raise
return df | 8eaefdc08c7de3991f5a85cfe5001a6dcd0aaf7b | 3,655,382 |
def compositional_stratified_splitting(dataset, perc_train):
"""Given the dataset and the percentage of data you want to extract from it, method will
apply stratified sampling where X is the dataset and Y is are the category values for each datapoint.
In the case each structure contains 2 types of atoms, the category will
be constructed as such: number of atoms of type 1 + number of atoms of type 2 * 100.
Parameters
----------
dataset: [Data]
A list of Data objects representing a structure that has atoms.
subsample_percentage: float
Percentage of the dataset.
Returns
----------
[Data]
Subsample of the original dataset constructed using stratified sampling.
"""
dataset_categories = create_dataset_categories(dataset)
dataset, dataset_categories = duplicate_unique_data_samples(
dataset, dataset_categories
)
sss_train = sklearn.model_selection.StratifiedShuffleSplit(
n_splits=1, train_size=perc_train, random_state=0
)
trainset, val_test_set = generate_partition(sss_train, dataset, dataset_categories)
val_test_dataset_categories = create_dataset_categories(val_test_set)
val_test_set, val_test_dataset_categories = duplicate_unique_data_samples(
val_test_set, val_test_dataset_categories
)
sss_valtest = sklearn.model_selection.StratifiedShuffleSplit(
n_splits=1, train_size=0.5, random_state=0
)
valset, testset = generate_partition(
sss_valtest, val_test_set, val_test_dataset_categories
)
return trainset, valset, testset | b57a0b7d651e6f9be4182fec8c918438dcae9b7a | 3,655,383 |
def is_inside_line_segment(x, y, x0, y0, x1, y1):
"""Return True if the (x, y) lies inside the line segment defined by
(x0, y0) and (x1, y1)."""
# Create two vectors.
v0 = np.array([ x0-x, y0-y ]).reshape((2,1))
v1 = np.array([ x1-x, y1-y ]).reshape((2,1))
# Inner product.
prod = v0.transpose().dot(v1)
if ( prod <= 0 ):
return True
else:
return False | b653c542d3d573857199d90257e9e36e6c45ccdc | 3,655,384 |
def transition_soil_carbon(area_final, carbon_final, depth_final,
transition_rate, year, area_initial,
carbon_initial, depth_initial):
"""This is the formula for calculating the transition of soil carbon
.. math:: (af * cf * df) - \
\\frac{1}{(1 + tr)^y} * \
[(af * cf * df) - \
(ai * ci * di)]
where
* :math:`af` is area_final
* :math:`cf` is carbon_final
* :math:`df` is depth_final
* :math:`tr` is transition_rate
* :math:`y` is year
* :math:`ai` is area_initial
* :math:`ci` is carbon_initial
* :math:`di` is depth_initial
Args:
area_final (float): The final area of the carbon
carbon_final (float): The final amount of carbon per volume
depth_final (float): The final depth of carbon
transition_rate (float): The rate at which the transition occurs
year (float): The amount of time in years overwhich the transition occurs
area_initial (float): The intial area of the carbon
carbon_initial (float): The iniital amount of carbon per volume
depth_initial (float): The initial depth of carbon
Returns:
float: Transition amount of soil carbon
"""
return (area_final * carbon_final * depth_final) - \
(1/((1 + transition_rate) ** year)) * \
((area_final * carbon_final * depth_final) - \
(area_initial * carbon_initial * depth_initial)) | bfbf83f201eb8b8b0be0ec6a8722e850f6084e95 | 3,655,385 |
import json
import os
def make_global_config():
"""load & augment experiment configuration, then add it to global variables"""
parser = ArgumentParser(description='Evaluate TRE model.', formatter_class=ArgumentDefaultsHelpFormatter)
# parser.add_argument('--config_path', type=str, default="1d_gauss/20200501-0739_0")
parser.add_argument('--config_path', type=str, default="gaussians/20200713-1029_4")
# parser.add_argument('--config_path', type=str, default="mnist/20200504-1031_0")
parser.add_argument('--ais_id', type=int, default=0)
parser.add_argument('--eval_epoch_idx', type=str, default="best")
parser.add_argument('--do_estimate_log_par', type=int, default=0) # -1 == False, else True
parser.add_argument('--do_sample', type=int, default=-1) # -1 == False, else True
parser.add_argument('--ais_nuts_max_tree_depth', type=int, default=5) # -1 == False, else True
parser.add_argument('--do_assess_subbridges', type=int, default=-1) # -1 == False, else True
parser.add_argument('--do_assess_parameters', type=int, default=0) # -1 == False, else True
parser.add_argument('--sample_method', type=str, default="nuts")
parser.add_argument('--act_threshold_quantile', type=float, default=0.99)
# if we are only sampling (i.e. not computing partition function with AIS), then this is the number of sampling
# steps we use when performing annealed sampling. If None, then use the default value stored in config file.
parser.add_argument('--only_sample_total_n_steps', type=int, default=1000)
parser.add_argument('--only_sample_n_chains', type=int, default=-1)
# initial step size for annealed sampling
parser.add_argument('--ais_step_size_init', type=float, default=0.02)
parser.add_argument('--init_post_annealed_step_size', type=float, default=0.02)
# when doing annealed sampling with uncalibrated_langevin, we use an exponentially decreasing step size schedule.
# The final step size in this schedule is 10^-step_size_reduction_magnitude smaller than the initial step size.
parser.add_argument('--step_size_reduction_magnitude', type=float, default=2)
# After annealed sampling, we continue sampling from the entire model
parser.add_argument('--do_post_annealed_sample', type=int, default=0) # -1 == False, else True
parser.add_argument('--post_ais_n_samples_keep', type=int, default=20)
parser.add_argument('--post_ais_thinning_factor', type=int, default=0)
parser.add_argument('--post_ais_nuts_max_tree_depth', type=int, default=10)
parser.add_argument('--parallel_iterations', type=int, default=10)
parser.add_argument('--swap_memory', type=int, default=-1) # attempt to save gpu memory by using cpu when possible
parser.add_argument('--n_noise_samples_for_variational_losses', type=int, default=1000)
parser.add_argument('--frac', type=float, default=1.0)
parser.add_argument('--debug', type=int, default=-1)
args = parser.parse_args()
with open(project_root + "saved_models/{}/config.json".format(args.config_path)) as f:
config = json.load(f)
rename_save_dir(config)
if args.only_sample_n_chains == -1:
del args.only_sample_n_chains
config.update(vars(args))
config["do_estimate_log_par"] = False if args.do_estimate_log_par == -1 else True
config["do_sample"] = False if args.do_sample == -1 else True
config["do_post_annealed_sample"] = False if args.do_post_annealed_sample == -1 else True
config["do_assess_subbridges"] = False if args.do_assess_subbridges == -1 else True
config["do_assess_parameters"] = False if args.do_assess_parameters == -1 else True
config["swap_memory"] = False if args.swap_memory == -1 else True
config["debug"] = False if args.debug == -1 else True
if config["eval_epoch_idx"] == "final": # work out the final epoch number
metrics_save_dir = os.path.join(config["save_dir"], "model/every_x_epochs/")
epoch_nums = [x.split(".")[0] for x in os.listdir(metrics_save_dir) if "checkpoint" not in x]
config["eval_epoch_idx"] = str(max([int(x) for x in epoch_nums]))
if "data_dist_name" not in config: config["data_dist_name"] = None
save_config(config)
if config["debug"]:
config["do_assess_subbridges"] = True
config["do_assess_parameters"] = True
config["do_sample"] = False
config["do_estimate_log_par"] = True
config["do_post_annealed_sample"] = False
config["frac"] = 0.2
config["ais_n_chains"] = 10
config["ais_total_n_steps"] = 10
config["only_sample_n_chains"] = 10
config["only_sample_total_n_steps"] = 10
config["post_ais_n_samples_keep"] = 10
config["post_ais_thinning_factor"] = 5
config["n_noise_samples_for_variational_losses"] = 1000
globals().update(config)
return AttrDict(config) | 3e4e1a035220ae216b1beb22a27764c833e98566 | 3,655,386 |
import sys
def dijkstra(graph, source):
"""Find the shortest path from the source node to every other node in the given graph"""
# Declare and initialize result, unvisited, and path
result = {i: sys.maxsize if i != source else 0 for i in graph.nodes} # placeholder, by default set distance to maxsize
path = dict()
unvisited = set(graph.nodes)
while unvisited: # As long as unvisited is non-empty
min_node = None
# Find the unvisited node having smallest known distance from the source node.
for node in unvisited:
if min_node is None: # base case
min_node = node
elif result[node] < result[min_node]:
min_node = node # switch the nodes, so start with source, then next lowest...
"""tried to be fancy"""
# d = {i[0][1]: i[1] for i in graph.distances.items() if i[0][0] == node}
# min_node = min(d, key=d.get)
# result[min_node] = d[min_node]
current_distance = result[min_node]
# For the current node, find all the unvisited neighbours.
# For this, you have calculate the distance of each unvisited neighbour.
# unvisited_neighbours = unvisited.intersection(graph.neighbours[min_node]) does not work, might not be a path between nodes
for neighbour in graph.neighbours[min_node]:
if neighbour in unvisited:
distance = current_distance + graph.distances[(min_node, neighbour)]
# If the calculated distance of the unvisited neighbour is less than the already known distance in result dictionary,
# update the shortest distance in the result dictionary.
if distance < result[neighbour]:
result[neighbour] = distance
path[neighbour] = min_node
# Remove the current node from the unvisited set.
unvisited.remove(min_node)
# should do an ASSERT to check no values in result dict equal sys.maxsize
return result | 4c3fda4922795b8a47e7b94bf3a09016f5eb2551 | 3,655,387 |
def _snr_approx(array, source_xy, fwhm, centery, centerx):
"""
array - frame convolved with top hat kernel
"""
sourcex, sourcey = source_xy
rad = dist(centery, centerx, sourcey, sourcex)
ind_aper = draw.circle(sourcey, sourcex, fwhm/2.)
# noise : STDDEV in convolved array of 1px wide annulus (while
# masking the flux aperture) * correction of # of resolution elements
ind_ann = draw.circle_perimeter(int(centery), int(centerx), int(rad))
array2 = array.copy()
array2[ind_aper] = array[ind_ann].mean() # quick-n-dirty mask
n2 = (2*np.pi*rad)/fwhm - 1
noise = array2[ind_ann].std()*np.sqrt(1+(1/n2))
# signal : central px minus the mean of the pxs (masked) in 1px annulus
signal = array[sourcey, sourcex] - array2[ind_ann].mean()
snr = signal / noise
return sourcey, sourcex, snr | 6f055444163c03d0bcc61107db2045b968f06b52 | 3,655,388 |
def create_pipeline(training_set, validation_set, test_set):
"""
Create a pipeline for the training, validation and testing set
Parameters: training_set: Training data set
validation_set: Validation data set
test_set: Test data set
Returns: batch_size: Batch size
image_size: Image dimensions (width, height)
training_batches: Batches of training data set
validation_batches: Batches of validation data set
testing_batches: Batches of test data set
"""
# Define batch size and image size
batch_size = 64
image_size = 224
# Define function to convert images to appropriate format, resize to fit the input layer and normalize it
def format_image(image, label):
image = tf.cast(image, tf.float32)
image = tf.image.resize(image, [image_size, image_size])
image /= 255
return image, label
# Define batches, while modifying images according to above function as well as batch and prefetch them
training_batches = training_set.map(format_image).batch(batch_size).prefetch(1)
validation_batches = validation_set.map(format_image).batch(batch_size).prefetch(1)
testing_batches = test_set.map(format_image).batch(batch_size).prefetch(1)
return batch_size, image_size, training_batches, validation_batches, testing_batches | a6af6ff83180a0a11bfc3bacefd6a2e2261aaeed | 3,655,389 |
from typing import Tuple
from typing import Any
def invalid_request() -> Tuple[Any, int]:
"""Invalid request API response."""
return jsonify({API.Response.KEY_INFO: API.Response.VAL_INVALID_REQUEST}), 400 | 76a81f8c85014822f4fa306c917a06d92a89ea70 | 3,655,390 |
from pathlib import Path
from typing import List
from typing import Dict
from typing import Any
def _add_hyperparameters(
ranges_path: Path, defaults_path: Path
) -> List[Dict[str, Any]]:
"""Returns a list of hyperparameters in a format that is compatible with the json
reader of the ConfigSpace API.
The list is created from two files: a hp_space file that defines the ranges of the
hyperparameters and an options file that defines the default values of the
hyperparameters. Both are in json format.
Parameters
----------
ranges_path: Path
Path to the hp_space file
defaults_path: Path
Path to the options file
Returns
-------
List
A list of hyperparameters
"""
# load the ranges of the hyperparameters as a dict
ranges_dict = load_data(ranges_path)
ranges_dict = flatten_dictionary(ranges_dict)
# load the default values of the hyperparameters as a dict
defaults_dict = load_data(defaults_path)
defaults_dict = flatten_dictionary(defaults_dict)
hyperparameter_list = _add_ranges(ranges_dict)
hyperparameter_list = _add_defaults(hyperparameter_list, defaults_dict)
return hyperparameter_list | 9609d3f31ffaee69148360966b1040f1970399b3 | 3,655,391 |
def setup(app):
"""Setup extension."""
app.add_domain(StuffDomain)
app.connect("builder-inited", generate_latex_preamble)
app.connect("config-inited", init_numfig_format)
app.add_css_file("stuff.css")
app.add_enumerable_node(
StuffNode,
"stuff",
html=(html_visit_stuff_node, html_depart_stuff_node),
singlehtml=(html_visit_stuff_node, html_depart_stuff_node),
latex=(latex_visit_stuff_node, latex_depart_stuff_node),
)
app.add_node(
nodes.caption,
override=True,
html=(html_visit_caption_node, html_depart_caption_node),
singlehtml=(html_visit_caption_node, html_depart_caption_node),
latex=(latex_visit_caption_node, latex_depart_caption_node),
)
app.add_node(
ContentNode,
html=(html_visit_content_node, html_depart_content_node),
singlehtml=(html_visit_content_node, html_depart_content_node),
latex=(latex_visit_content_node, latex_depart_content_node),
)
return {"version": __version__, "parallel_read_safe": True} | 3c7a5d36c835e7339876cdf88673d79e5f76b590 | 3,655,392 |
from pathlib import Path
import typing
def has_checksum(path: Path, csum: str,
csum_fun: typing.Optional[Checksum] = None) -> bool:
"""
:return: True if the file at the path `path` has given checksum
"""
return get_checksum(path, csum_fun=csum_fun) == csum | e9bed6e0d82745113412e6dace5869aa32aa4fc9 | 3,655,393 |
import numpy as np
def remove_outliers(column):
"""
:param column: list of numbers
:return:
"""
if len(column) < 1:
return []
clean_column = []
q1 = np.percentile(column, 25)
q3 = np.percentile(column, 75)
#k = 1.5
k = 2
# [Q1 - k(Q3 - Q1), Q3 + k(Q3 - Q1)]
lower_bound = q1 - k*(q3-q1)
upper_bound = q3 + k*(q3-q1)
for c in column:
if c >= lower_bound and c <= upper_bound:
clean_column.append(c)
return clean_column | 04c1e736e27ffeaef528f25fd303d0f27c3a94ac | 3,655,394 |
import json
from datetime import datetime
import pytz
def lambda_handler(event, context):
"""
Lambda entry-point
"""
news_link = "https://news.ok.ubc.ca/feed/"
news_items = []
filtered_news_items = []
response_items = get_news_items_from_web(news_link)
if len(response_items) == 0:
return {"status": "No items in RSS Feed"}
# Iterate through list of raw items and parse them, if there is a parsing error, save the raw item that throws an
# error to S3
for item in response_items:
try:
news_item = news_parser(item)
news_items.append(news_item)
except Exception as e:
S3_CLIENT.put_object(Body=json.dumps(item, indent=4), Bucket=S3_BUCKET_NAME,
Key=f'ErrorLog/News/{str(datetime.now(tz=pytz.timezone("America/Vancouver")))[:-13]}.json')
LOGGER.error(f"Error in parsing a news item, raw item saved to {S3_BUCKET_NAME}/ErrorLog/News")
detailed_exception(LOGGER)
# Filter the parsed items based on last query time to get only new items
try:
last_query_time = SSM_CLIENT.get_parameter(Name="NewsQueryTime")["Parameter"]["Value"]
for news_item in news_items:
if datetime.strptime(last_query_time, "%Y-%m-%d %H:%M:%S") \
< datetime.strptime(news_item["dateModified"], "%Y-%m-%d %H:%M:%S"):
filtered_news_items.append(news_item)
SSM_CLIENT.put_parameter(Name="NewsQueryTime",
Value=str(datetime.now(tz=pytz.timezone("America/Vancouver")))[:-13],
Overwrite=True)
except SSM_CLIENT.exceptions.InternalServerError as e:
LOGGER.error("Error in communicating with Parameter store")
detailed_exception(LOGGER)
LOGGER.debug(json.dumps(news_items, indent=4))
LOGGER.debug(json.dumps(filtered_news_items, indent=4))
# Save new items to central data lake S3
if len(filtered_news_items) != 0:
S3_CLIENT.put_object(Body=json.dumps(filtered_news_items, indent=4), Bucket=S3_BUCKET_NAME,
Key=f'News/{str(datetime.now(tz=pytz.timezone("America/Vancouver")))[:-13]}.json')
# Insert items into DynamoDB table with appropriate TTL
table = DYNAMODB_RESOURCE.Table(NEWS_TABLE)
for events_item in filtered_news_items:
events_item["expiresOn"] = get_adjusted_unix_time(events_item["dateModified"], "%Y-%m-%d %H:%M:%S",
EXPIRY_DAYS_OFFSET * 24)
table.put_item(Item=events_item)
return {"status": "completed"} | cd188a0179ee74335202b865edb8b5216dbf50b8 | 3,655,395 |
def read_photons(photonfile, ra0, dec0, tranges, radius, verbose=0,
colnames=['t', 'x', 'y', 'xa', 'ya', 'q', 'xi', 'eta', 'ra',
'dec', 'flags']):
"""
Read a photon list file and return a python dict() with the expected
format.
:param photonfile: Name of the photon event file to use.
:type photonfile: str
:param ra0: Right ascension of the targeted sky position, in degrees.
:type ra0: float
:param dec0: Declination of the targeted sky position, in degrees.
:type dec0: float
:param tranges: Set of time ranges from which to retrieve photon events,
in GALEX time units
:type tranges: list
:param radius: The radius, in degrees, defining a cone on the sky that
is centered on ra0 and dec0, from which to extract photons.
:type radius: float
:param verbose: Verbosity level, a value of 0 is minimum verbosity.
:type verbose: int
:param colnames: Labels of the columns found in the photon event file.
:type colnames: list
:returns: dict -- The set of photon events and their properties.
"""
# [Future]: Consider moving this method to 'dbasetools'.
if verbose:
mc.print_inline('Reading photon list file: {f}'.format(f=photonfile))
data = pd.io.parsers.read_csv(photonfile, names=colnames)
ra, dec = np.array(data['ra']), np.array(data['dec'])
angsep = mc.angularSeparation(ra0, dec0, ra, dec)
ix = np.array([])
for trange in tranges:
cut = np.where((angsep <= radius) & (np.isfinite(angsep)))[0]
ix = np.concatenate((ix, cut), axis=0)
events = {'t':np.array(data['t'][ix])/tscale,
'ra':np.array(data['ra'][ix]),
'dec':np.array(data['dec'][ix]),
'xi':np.array(data['xi'][ix]),
'eta':np.array(data['eta'][ix]),
'x':np.array(data['x'][ix]),
'y':np.array(data['y'][ix])}
return events | c83958f8ae541e5df564c5ce53dd40593c9dfc3e | 3,655,396 |
def normElec(surf, electrode, normdist, NaN_as_zeros=True):
"""
Notes
-----
When `normway` is a scalar, it takes the normal of the points of the mesh which are closer
than `normway`. However, some points have a normal of (0, 0, 0) (default assigned
if the vertex does not belong to any triangle). projectElectrodes.m includes
those (0, 0, 0) in the calculation, but it might not be correct.
See l. 138 (there are no NaN in normals but only (0, 0, 0)).
To replicate the matlab behavior, make sure that `NaN_as_zeros` is True.
"""
dvect = norm(electrode - surf['pos'], axis=1) # l. 104-112 of projectElectrodes.m
closevert = dvect < normdist # l. 120 of projectElectrodes.m
normal = surf['pos_norm'][closevert, :].mean(axis=0) # l. 144 of projectElectrodes.m
normals2av = surf['pos_norm'][closevert, :].copy()
if NaN_as_zeros:
normals2av[isnan(normals2av)] = 0
normal = nanmean(normals2av, axis=0)
return normal | d449f4518c589a2a68b64ca812d964cb6249694e | 3,655,397 |
def filter_sources(sources, release):
"""Check if a source has already been consumed. If has not then add it to
sources dict.
"""
source, version, dist, arch = parse_release(release)
if source not in sources.keys():
sources[source] = {version: {dist: [arch]}}
return True
elif version not in sources[source].keys():
sources[source][version] = {dist: [arch]}
return True
elif dist not in sources[source][version]:
sources[source][version][dist] = [arch]
return True
elif arch not in sources[source][version][dist]:
sources[source][version][dist].append(arch)
return True
return False | 661d379291170a4994c0813d24820007e47bd092 | 3,655,398 |
from typing import Union
from typing import Dict
from typing import Any
async def train(model, *args: Union[BaseSource, Record, Dict[str, Any]]):
"""
Train a machine learning model.
Provide records to the model to train it. The model should be already
instantiated.
Parameters
----------
model : Model
Machine Learning model to use. See :doc:`/plugins/dffml_model` for
models options.
*args : list
Input data for training. Could be a ``dict``, :py:class:`Record`,
filename, one of the data :doc:`/plugins/dffml_source`, or a filename
with the extension being one of the data sources.
Examples
--------
>>> import asyncio
>>> from dffml import *
>>>
>>> model = SLRModel(
... features=Features(
... Feature("Years", int, 1),
... ),
... predict=Feature("Salary", int, 1),
... directory="tempdir",
... )
>>>
>>> async def main():
... await train(
... model,
... {"Years": 0, "Salary": 10},
... {"Years": 1, "Salary": 20},
... {"Years": 2, "Salary": 30},
... {"Years": 3, "Salary": 40},
... )
>>>
>>> asyncio.run(main())
"""
sources = _records_to_sources(*args)
async with sources as sources, model as model:
async with sources() as sctx, model() as mctx:
return await mctx.train(sctx) | 9a8e1648247a8eb3c8354c324ac2c48a52617899 | 3,655,399 |
Subsets and Splits