content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def get_groups_data():
"""
Get all groups, get all users for each group and sort groups by users
:return:
"""
groups = [group["name"] for group in jira.get_groups(limit=200)["groups"]]
groups_and_users = [get_all_users(group) for group in groups]
groups_and_users = [sort_users_in_group(group) for group in groups_and_users]
return groups_and_users | 9ec0d3772b438f10edde4a4bad591f249709de98 | 3,649,164 |
def hindu_lunar_holiday(l_month, l_day, g_year):
"""Return the list of fixed dates of occurrences of Hindu lunar
month, month, day, day, in Gregorian year, g_year."""
l_year = hindu_lunar_year(
hindu_lunar_from_fixed(gregorian_new_year(g_year)))
date1 = hindu_date_occur(l_month, l_day, l_year)
date2 = hindu_date_occur(l_month, l_day, l_year + 1)
return list_range([date1, date2], gregorian_year_range(g_year)) | fa9bafead696b177a137c12b7544c8e71c4f2f43 | 3,649,166 |
import copy
def identify_all_failure_paths(network_df_in,edge_failure_set,flow_dataframe,path_criteria):
"""Identify all paths that contain an edge
Parameters
---------
network_df_in - Pandas DataFrame of network
edge_failure_set - List of string edge ID's
flow_dataframe - Pandas DataFrame of list of edge paths
path_criteria - String name of column of edge paths in flow dataframe
Outputs
-------
network_df - Pandas DataFrame of network
With removed edges
edge_path_index - List of integer indexes
Of locations of paths in flow dataframe
"""
edge_path_index = []
network_df = copy.deepcopy(network_df_in)
for edge in edge_failure_set:
network_df = network_df[network_df.edge_id != edge]
edge_path_index += flow_dataframe.loc[flow_dataframe[path_criteria].str.contains(
"'{}'".format(edge))].index.tolist()
edge_path_index = list(set(edge_path_index))
return network_df, edge_path_index | db2da6ad20a4ae547c309ac63b6e68a17c3874e7 | 3,649,167 |
def wikipedia_wtap_setup():
"""
A commander has 5 tanks, 2 aircraft and 1 sea vessel and is told to
engage 3 targets with values 5,10,20 ...
"""
tanks = ["tank-{}".format(i) for i in range(5)]
aircrafts = ["aircraft-{}".format(i) for i in range(2)]
ships = ["ship-{}".format(i) for i in range(1)]
weapons = tanks + aircrafts + ships
target_values = {1: 5, 2: 10, 3: 20}
tank_probabilities = [
(1, 0.3),
(2, 0.2),
(3, 0.5),
]
aircraft_probabilities = [
(1, 0.1),
(2, 0.6),
(3, 0.5),
]
sea_vessel_probabilities = [
(1, 0.4),
(2, 0.5),
(3, 0.4)
]
category_and_probabilities = [
(tanks, tank_probabilities),
(aircrafts, aircraft_probabilities),
(ships, sea_vessel_probabilities)
]
probabilities = []
for category, probs in category_and_probabilities:
for vehicle in category:
for prob in probs:
probabilities.append((vehicle,) + prob)
g = Graph(from_list=probabilities)
return g, weapons, target_values | 828746bef74b88bde1a9c72a79338ec05591721a | 3,649,168 |
def allowed_file(filename: str) -> bool:
"""Determines whether filename is allowable
Parameters
----------
filename : str
a filename
Returns
-------
bool
True if allowed
"""
return "." in filename and filename.rsplit(".", 1)[1].lower() in ALLOWED_EXTENSIONS | fd05abc21025c9eb49f7e3426b4e183b178361c4 | 3,649,169 |
def update_bitweights(realization, asgn, tileids, tg_ids, tg_ids2idx, bitweights):
"""
Update bit weights for assigned science targets
"""
for tileid in tileids:
try: # Find which targets were assigned
adata = asgn.tile_location_target(tileid)
for loc, tgid in adata.items():
idx = tg_ids2idx[tgid]
bitweights[realization * len(tg_ids) + idx] = True
except:
pass
return bitweights | f1b7e085d43e36b025aa1c61ab1b7156ba1d3ed7 | 3,649,171 |
def load_from_input_flags(params, params_source, input_flags):
"""Update params dictionary with input flags.
Args:
params: Python dictionary of hyperparameters.
params_source: Python dictionary to record source of hyperparameters.
input_flags: All the flags with non-null value of overridden
hyperparameters.
Returns:
Python dict of hyperparameters.
"""
if params is None:
raise ValueError(
'Input dictionary is empty. It is expected to be loaded with default '
'values')
if not isinstance(params, dict):
raise ValueError(
'The base parameter set must be a Python dict, was: {}'.format(
type(params)))
for key in params:
flag_value = input_flags.get_flag_value(key, None)
if flag_value is not None:
params[key] = flag_value
params_source[key] = 'Command-line flags'
return params, params_source | 7ec8662f03469f1ed03f29c9f7e9663c49aa7056 | 3,649,172 |
import hal
def hal(_module_patch):
"""Simulated hal module"""
return hal | 3ab217e0cbce54d6dab01217c829905dc61bf06c | 3,649,173 |
def elements(all_isotopes=True):
"""
Loads a DataFrame of all elements and isotopes.
Scraped from https://www.webelements.com/
Returns
-------
pandas DataFrame with columns (element, atomic_number, isotope, atomic_weight, percent)
"""
el = pd.read_pickle(pkgrs.resource_filename('latools', 'resources/elements.pkl'))
if all_isotopes:
return el.set_index('element')
else:
def wmean(g):
return (g.atomic_weight * g.percent).sum() / 100
iel = el.groupby('element').apply(wmean)
iel.name = 'atomic_weight'
return iel | d706ee5ffaa8c756c9e85f3e143876070f8f81e4 | 3,649,174 |
import typing
def create_steps_sequence(num_steps: Numeric, axis: str) -> typing.List[typing.Tuple[float, str]]:
"""
Returns a list of num_steps tuples: [float, str], with given string parameter, and
the floating-point parameter increasing lineairly from 0 to 1.
Example:
>>> create_steps_sequence(5, 'X')
[(0.0, 'X'), (0.2, 'X'), (0.4, 'X'), (0.6, 'X'), (0.8, 'X')]
"""
if isinstance(num_steps, float):
num_steps = int(num_steps)
if num_steps == 0:
return []
sequence = []
for step in range(num_steps):
sequence.append((step * 1.0 / num_steps, axis))
return sequence | 3f7e2010a3360c90bec81a02228b1a7590686175 | 3,649,176 |
def disable_doze_light(ad):
"""Force the device not in doze light mode.
Args:
ad: android device object.
Returns:
True if device is not in doze light mode.
False otherwise.
"""
ad.adb.shell("dumpsys battery reset")
ad.adb.shell("cmd deviceidle disable light")
adb_shell_result = ad.adb.shell("dumpsys deviceidle get light").decode(
'utf-8')
if not adb_shell_result.startswith(DozeModeStatus.ACTIVE):
info = ("dumpsys deviceidle get light: {}".format(adb_shell_result))
print(info)
return False
return True | d2054ae8f84a45b360ded839badfbd19fea83b11 | 3,649,177 |
def jobs():
""" List all jobs """
return jsonify(job.get_jobs()) | c7141011c59851586d327185892ea61d7a11ef58 | 3,649,178 |
def get_pipelines():
"""Get pipelines."""
return PIPELINES | 2d770a9fa189dd534528d26794f8887c638723f4 | 3,649,179 |
import re
def tokenize(s):
"""
Tokenize on parenthesis, punctuation, spaces and American units followed by a slash.
We sometimes give American units and metric units for baking recipes. For example:
* 2 tablespoons/30 mililiters milk or cream
* 2 1/2 cups/300 grams all-purpose flour
The recipe database only allows for one unit, and we want to use the American one.
But we must split the text on "cups/" etc. in order to pick it up.
"""
return filter(None, re.split(r"([,()])?\s+", clump_fractions(normalise(s)))) | 04575ff78cb73515fafcda541177d53d330bd510 | 3,649,180 |
def makeColorMatrix(n, bg_color, bg_alpha, ix=None,
fg_color=[228/255.0, 26/255.0, 28/255.0], fg_alpha=1.0):
"""
Construct the RGBA color parameter for a matplotlib plot.
This function is intended to allow for a set of "foreground" points to be
colored according to integer labels (e.g. according to clustering output),
while "background" points are all colored something else (e.g. light gray).
It is used primarily in the interactive plot tools for DeBaCl but can also
be used directly by a user to build a scatterplot from scratch using more
complicated DeBaCl output. Note this function can be used to build an RGBA
color matrix for any aspect of a plot, including point face color, edge
color, and line color, despite use of the term "points" in the descriptions
below.
Parameters
----------
n : int
Number of data points.
bg_color : list of floats
A list with three entries, specifying a color in RGB format.
bg_alpha : float
Specifies background point opacity.
ix : list of ints, optional
Identifies foreground points by index. Default is None, which does not
distinguish between foreground and background points.
fg_color : list of ints or list of floats, optional
Only relevant if 'ix' is specified. If 'fg_color' is a list of integers
then each entry in 'fg_color' indicates the color of the corresponding
foreground point. If 'fg_color' is a list of 3 floats, then all
foreground points will be that RGB color. The default is to color all
foreground points red.
fg_alpha : float, optional
Opacity of the foreground points.
Returns
-------
rgba : 2D numpy array
An 'n' x 4 RGBA array, where each row corresponds to a plot point.
"""
rgba = np.zeros((n, 4), dtype=np.float)
rgba[:, 0:3] = bg_color
rgba[:, 3] = bg_alpha
if ix is not None:
if np.array(fg_color).dtype.kind == 'i':
palette = Palette()
fg_color = palette.applyColorset(fg_color)
rgba[ix, 0:3] = fg_color
rgba[ix, 3] = fg_alpha
return rgba | 7ef7a7cfb6cd4a6bcb97086382e6b95e5340ce78 | 3,649,181 |
import itertools
def closest_pair(points):
"""
最近点対 O(N log N)
Verify: http://judge.u-aizu.ac.jp/onlinejudge/description.jsp?id=CGL_5_A&lang=ja
:param list of Point points:
:rtype: (float, (Point, Point))
:return: (距離, 点対)
"""
assert len(points) >= 2
def _rec(xsorted):
"""
:param list of Point xsorted:
:rtype: (float, (Point, Point))
"""
n = len(xsorted)
if n <= 2:
return xsorted[0].dist(xsorted[1]), (xsorted[0], xsorted[1])
if n <= 3:
# 全探索
d = INF
pair = None
for p, q in itertools.combinations(xsorted, r=2):
if p.dist(q) < d:
d = p.dist(q)
pair = p, q
return d, pair
# 分割統治
# 両側の最近点対
ld, lp = _rec(xsorted[:n // 2])
rd, rp = _rec(xsorted[n // 2:])
if ld <= rd:
d = ld
ret_pair = lp
else:
d = rd
ret_pair = rp
mid_x = xsorted[n // 2].x
# 中央から d 以内のやつを集める
mid_points = []
for p in xsorted:
# if abs(p.x - mid_x) < d:
if abs(p.x - mid_x) - d < -EPS:
mid_points.append(p)
# この中で距離が d 以内のペアがあれば更新
mid_points.sort(key=lambda p: p.y)
mid_n = len(mid_points)
for i in range(mid_n - 1):
j = i + 1
p = mid_points[i]
q = mid_points[j]
# while q.y - p.y < d
while (q.y - p.y) - d < -EPS:
pq_d = p.dist(q)
if pq_d < d:
d = pq_d
ret_pair = p, q
j += 1
if j >= mid_n:
break
q = mid_points[j]
return d, ret_pair
return _rec(list(sorted(points, key=lambda p: p.x))) | fbb189269b6d1fcbf214d8030d49bb0605b375c2 | 3,649,182 |
def less_equals(l,r):
"""
| Forms constraint :math:`l \leq r`.
:param l: number,
:ref:`scalar object<scalar_ref>` or
:ref:`multidimensional object<multi_ref>`.
:param r: number,
:ref:`scalar object<scalar_ref>` or
:ref:`multidimensional object<multi_ref>`.
:return: :ref:`constraint<constr_obj>` or
:ref:`list of constraints<constr_list_obj>`.
"""
return compare(l,LESS_EQUALS,r) | ba37a090cbbf1d7db99411d67e9eda572c1f0153 | 3,649,183 |
def ensureImageMode(tex : Image, mode="RGBA") -> Image:
"""Ensure the passed image is in a given mode. If it is not, convert it.
https://pillow.readthedocs.io/en/stable/handbook/concepts.html#concept-modes
:param Image tex: The image whose mode to check
:param str mode: The mode to ensure and convert to if needed
:return: tex if it is of the given mode. tex converted to mode otherwise.
:rtype: Image
"""
return tex if tex.mode == mode else tex.convert(mode) | 9b77763fbfea0f66b4b4d7151cdf595f2e2b8aa6 | 3,649,184 |
def code2name(code: int) -> str:
""" Convert prefecture code to name """
return __code2name[code] | d2ca1a3977915359afd8254337e14c6fd13db8b3 | 3,649,187 |
def newton(backward_differences, max_num_iters, newton_coefficient, ode_fn_vec,
order, step_size, time, tol, unitary, upper):
"""Runs Newton's method to solve the BDF equation."""
initial_guess = tf.reduce_sum(
tf1.where(
tf.range(MAX_ORDER + 1) <= order,
backward_differences[:MAX_ORDER + 1],
tf.zeros_like(backward_differences)[:MAX_ORDER + 1]),
axis=0)
rhs_constant_term = newton_coefficient * tf.reduce_sum(
tf1.where(
tf.range(1, MAX_ORDER + 1) <= order, RECIPROCAL_SUMS[1:, np.newaxis] *
backward_differences[1:MAX_ORDER + 1],
tf.zeros_like(backward_differences)[1:MAX_ORDER + 1]),
axis=0)
next_time = time + step_size
step_size_cast = tf.cast(step_size, backward_differences.dtype)
real_dtype = tf.abs(backward_differences).dtype
def newton_body(iterand):
"""Performs one iteration of Newton's method."""
next_backward_difference = iterand.next_backward_difference
next_state_vec = iterand.next_state_vec
rhs = newton_coefficient * step_size_cast * ode_fn_vec(
next_time,
next_state_vec) - rhs_constant_term - next_backward_difference
delta = tf.squeeze(
tf.linalg.triangular_solve(
upper,
tf.matmul(tf.transpose(unitary), rhs[:, tf.newaxis]),
lower=False))
num_iters = iterand.num_iters + 1
next_backward_difference += delta
next_state_vec += delta
delta_norm = tf.cast(tf.norm(delta), real_dtype)
lipschitz_const = delta_norm / iterand.prev_delta_norm
# Stop if method has converged.
approx_dist_to_sol = lipschitz_const / (1. - lipschitz_const) * delta_norm
close_to_sol = approx_dist_to_sol < tol
delta_norm_is_zero = tf.equal(delta_norm, tf.constant(0., dtype=real_dtype))
converged = close_to_sol | delta_norm_is_zero
finished = converged
# Stop if any of the following conditions are met:
# (A) We have hit the maximum number of iterations.
# (B) The method is converging too slowly.
# (C) The method is not expected to converge.
too_slow = lipschitz_const > 1.
finished = finished | too_slow
if max_num_iters is not None:
too_many_iters = tf.equal(num_iters, max_num_iters)
num_iters_left = max_num_iters - num_iters
num_iters_left_cast = tf.cast(num_iters_left, real_dtype)
wont_converge = (
approx_dist_to_sol * lipschitz_const**num_iters_left_cast > tol)
finished = finished | too_many_iters | wont_converge
return [
_NewtonIterand(
converged=converged,
finished=finished,
next_backward_difference=next_backward_difference,
next_state_vec=next_state_vec,
num_iters=num_iters,
prev_delta_norm=delta_norm)
]
iterand = _NewtonIterand(
converged=False,
finished=False,
next_backward_difference=tf.zeros_like(initial_guess),
next_state_vec=tf.identity(initial_guess),
num_iters=0,
prev_delta_norm=tf.constant(np.array(-0.), dtype=real_dtype))
[iterand] = tf.while_loop(lambda iterand: tf.logical_not(iterand.finished),
newton_body, [iterand])
return (iterand.converged, iterand.next_backward_difference,
iterand.next_state_vec, iterand.num_iters) | 9a5e6e45357d2d769153bf6854818e22df7639f3 | 3,649,188 |
import random
def input(channel):
"""
To read the value of a GPIO pin:
:param channel:
:return:
"""
return LOW if random.random() < 0.5 else HIGH | 838df044dc18c443e2f35f7f67a8e07b8276e1a3 | 3,649,190 |
def kml_start(params):
"""Define basic kml
header string"""
kmlstart = '''
<Document>
<name>%s</name>
<open>1</open>
<description>%s</description>
'''
return kmlstart % (params[0], params[1]) | c2fa4c1eeff086dfc3baa41ecd067634920b25b1 | 3,649,191 |
def add_item_to_do_list():
"""
Asks users to keep entering items to add to a new To Do list until they enter the word 'stop'
:return: to do list with new items
"""
### TO COMPLETE ###
return to_do_list | 4c133ea3c05024a51dda2fb9f01dcc30926f84f4 | 3,649,192 |
def parse(tokens):
"""Currently parse just supports fn, variable and constant definitions."""
context = Context()
context.tokens = tokens
while tokens:
parse_token(context)
if context.stack:
raise CompileError("after parsing, there are still words on the stack!!:\n{0}".format(
context.stack))
return context | 89dce5a630dd0bd657963185ac533738bee7d6a5 | 3,649,193 |
def get_file_iterator(options):
"""
returns a sequence of files
raises IOError if problemmatic
raises ValueError if problemmatic
"""
# -------- BUILD FILE ITERATOR/GENERATOR --------
if options.f is not None:
files = options.f
elif options.l is not None:
try:
lfile = open(options.l, 'r')
# make a generator of non-blank lines
files = (line.strip() for line in lfile if line.strip())
except IOError:
msg = "{0} does not exist.".format(options.l)
raise IOError(msg)
else:
msg = "Must provide input files or file list."
raise ValueError(msg)
return files | 53b16f49d14dc346e404a63415772dd2a1d10f50 | 3,649,195 |
def entropy_from_CT(SA, CT):
"""
Calculates specific entropy of seawater.
Parameters
----------
SA : array_like
Absolute salinity [g kg :sup:`-1`]
CT : array_like
Conservative Temperature [:math:`^\circ` C (ITS-90)]
Returns
-------
entropy : array_like
specific entropy [J kg :sup:`-1` K :sup:`-1`]
Examples
--------
>>> import gsw
>>> SA = [34.7118, 34.8915, 35.0256, 34.8472, 34.7366, 34.7324]
>>> CT = [28.8099, 28.4392, 22.7862, 10.2262, 6.8272, 4.3236]
>>> gsw.entropy_from_CT(SA, CT)
array([ 400.38916315, 395.43781023, 319.86680989, 146.79103279,
98.64714648, 62.79185763])
References
----------
.. [1] IOC, SCOR and IAPSO, 2010: The international thermodynamic equation
of seawater - 2010: Calculation and use of thermodynamic properties.
Intergovernmental Oceanographic Commission, Manuals and Guides No. 56,
UNESCO (English), 196 pp. See appendix A.10.
"""
SA = np.maximum(SA, 0)
pt0 = pt_from_CT(SA, CT)
return -gibbs(n0, n1, n0, SA, pt0, 0) | dfaaeef93ed924bc5e49fb02c30b6cc43ef824e0 | 3,649,196 |
def checking_log(input_pdb_path: str, output_log_path: str, properties: dict = None, **kwargs) -> int:
"""Create :class:`CheckingLog <model.checking_log.CheckingLog>` class and
execute the :meth:`launch() <model.checking_log.CheckingLog.launch>` method."""
return CheckingLog(input_pdb_path=input_pdb_path,
output_log_path=output_log_path,
properties=properties, **kwargs).launch() | c6cb77585920609e12b90f5f783ceb73b58afb8b | 3,649,197 |
def get_registry_by_name(cli_ctx, registry_name, resource_group_name=None):
"""Returns a tuple of Registry object and resource group name.
:param str registry_name: The name of container registry
:param str resource_group_name: The name of resource group
"""
resource_group_name = get_resource_group_name_by_registry_name(
cli_ctx, registry_name, resource_group_name)
client = cf_acr_registries(cli_ctx)
return client.get(resource_group_name, registry_name), resource_group_name | ca4bcee260f035a7921e772dffaace379e0ab115 | 3,649,199 |
def plot_karyotype_summary(haploid_coverage,
chromosomes,
chrom_length,
output_dir,
bed_filename,
bed_file_sep=',',
binsize=1000000,
overlap=50000,
cov_min=5,
cov_max=200,
min_PL_length=3000000,
chroms_with_text=None):
"""
Plots karyotype summary for the whole genome with data preparation.
:param haploid_coverage: the average coverage of haploid regions (or the half of that of diploid regions)
:param chromosomes: list of chromosomes in the genome (list of str)
:param chrom_length: list of chromosome lengths (list of int)
:param output_dir: the path to the directory where PE_fullchrom_[chrom].txt files are located (str)
:param bed_filename: the path to the bed file of the sample with ploidy and LOH information (str)
:param bed_file_sep: bed file separator (default: ',') (str)
:param binsize: the binsize used for moving average (default: 1000000) (int)
:param overlap: the overlap used for moving average (default: 50000) (int, smaller than binsize)
:param cov_min: the minimum coverage for a position to be included (default: 5) (int)
:param cov_max: the maximum coverage for a position to be included (default: 2000) (int)
:param min_PL_length: the minimal length of a region to be plotted (default: 3000000) (int)
:param chroms_with_text: the list of chromosomes to be indicated with text on the plot (list of str) (If there are many short chromosomes or they have long names, it is useful to only indicate a few with text on the plot.)
:returns: a matplotlib figure
"""
real_pos, dr, dr_25, dr_75, baf, baf_25, baf_75 = __get_BAF_and_DR(avg_dip_cov=haploid_coverage * 2,
chroms=chromosomes,
chrom_length_list=chrom_length,
datadir=output_dir,
binsize=binsize,
overlap=overlap,
cov_min=cov_min,
cov_max=cov_max)
s0, s1, loh_pos, loh = __get_PL_and_LOH(bed_filename=bed_filename,
chroms=chromosomes,
chrom_lenght_list=chrom_length,
bed_file_sep=bed_file_sep,
numtoplot=5000,
minlength=min_PL_length)
f = __plot_karyotype(real_pos=real_pos,
dr=dr,
dr_25=dr_25,
dr_75=dr_75,
baf=baf,
baf_25=baf_25,
baf_75=baf_75,
s0=s0,
s1=s1,
loh_pos=loh_pos,
loh=loh,
all_chroms=chromosomes,
chrom_length_list=chrom_length,
chroms_with_text=chroms_with_text)
return f | 5234b2ac9e459cfe445be6820abb97821503f554 | 3,649,200 |
import torch
def probs_to_mu_sigma(probs):
"""Calculate mean and covariance matrix for each channel of probs
tensor of keypoint probabilites [N, C, H, W]
mean calculated on a grid of scale [-1, 1]
Parameters
----------
probs : torch.Tensor
tensor of shape [N, C, H, W] where each channel along axis 1
is interpreted as a probability density.
Returns
-------
mu : torch.Tensor
tensor of shape [N, C, 2] representing partwise mean coordinates
of x and y for each item in the batch
sigma : torch.Tensor
tensor of shape [N, C, 2, 2] representing covariance matrix
for each item in the batch
"""
bn, nk, h, w = shape_as_list(probs)
y_t = tile(torch.linspace(-1, 1, h).view(h, 1), w, 1)
x_t = tile(torch.linspace(-1, 1, w).view(1, w), h, 0)
y_t = torch.unsqueeze(y_t, dim=-1)
x_t = torch.unsqueeze(x_t, dim=-1)
meshgrid = torch.cat([y_t, x_t], dim=-1)
if probs.is_cuda:
meshgrid = meshgrid.to(probs.device)
mu = torch.einsum("ijl,akij->akl", meshgrid, probs)
mu_out_prod = torch.einsum("akm,akn->akmn", mu, mu)
mesh_out_prod = torch.einsum("ijm,ijn->ijmn", meshgrid, meshgrid)
sigma = torch.einsum("ijmn,akij->akmn", mesh_out_prod, probs) - mu_out_prod
return mu, sigma | d0653e50d1f9ec4125e9b30c10a0e6cb78c6dc8e | 3,649,202 |
def fetch_biomart_genes_mm9():
"""Fetches mm9 genes from Ensembl via biomart."""
return _fetch_genes_biomart(
host='http://may2012.archive.ensembl.org',
gene_name_attr='external_gene_id') | f184608e87a2d390b47fd0f78d293dfd52064ad0 | 3,649,203 |
def tweet_words(tweet):
"""Return the words in a tweet."""
return extract_words(tweet_text(tweet)) | c207553fa1bd718083d26e57a9daea43c5629116 | 3,649,204 |
def meter_statistics(meter_id,api_endpoint,token,meter_list,web,**kwargs):
"""
Get the statistics for the specified meter.
Args:
meter_id(string): The meter name.
api_endpoint(string): The api endpoint for the ceilometer service.
token(string): X-Auth-token.
meter_list(list): The list of available meters.
Returns:
bool: True if successful, False otherwise.
list: The list with the meter statistics.
"""
meter_stat = [None]
headers = {
#'Accept': 'application/json',
'Content-Type': 'application/json;',
'Accept': 'application/json',
'X-Auth-Token': token
}
path = "/v2/meters/"+meter_id+"/statistics?"
q=kwargs.pop('q')
target = urlparse(api_endpoint+path+q)
method = 'GET'
logger.info('Inside meter-statistics: Path is %s',target)
if(web==False):
from_date,to_date,from_time,to_time,resource_id,user_id,status_q=query()
if(status_q==True):
q=set_query(from_date,to_date,from_time,to_time,resource_id,user_id,status_q)
body="{"+q
period=raw_input("Do you want to define a time period? Enter 'Y' if yes, 'N' if no.")
if(period=="Y"):
period_def=raw_input("Enter the desired time period in seconds: ")
body=body+',"period":'+period_def
groupby=raw_input("Do you want to define a group by value? Enter 'Y' if yes, 'N' if no.")
if (groupby=="Y") :
rid=raw_input("Do you want to group by the resource id? If yes, enter 'Y', else enter 'N'. ")
if(rid=="Y"):
groupby_def=',"groupby":['
groupby_def=groupby_def+'"resource_id"'
pid=raw_input("Do you want to group by the project id? If yes, enter 'Y', else enter 'N'. ")
if(pid=="Y"):
groupby_def=groupby_def+',"project_id"'
groupby_def=groupby_def+']'
body=body+groupby_def
else:
pid=raw_input("Do you want to group by the project id? If yes, enter 'Y', else enter 'N'. ")
if(pid=="Y"):
groupby_def=',"groupby":['
groupby_def=groupby_def+'"project_id"'
groupby_def=groupby_def+']'
body=body+groupby_def
body=body+"}"
else:
body="{"
period=raw_input("Do you want to define a time period? Enter 'Y' if yes, 'N' if no.")
if(period=="Y"):
period_def=raw_input("Enter the desired time period in seconds: ")
body=body+'"period":'+period_def
rid=raw_input("Do you want to group by the resource id? If yes, enter 'Y', else enter 'N'. ")
if(rid=="Y"):
groupby_def=',"groupby":['
groupby_def=groupby_def+'"resource_id"'
pid=raw_input("Do you want to group by the project id? If yes, enter 'Y', else enter 'N'. ")
if(pid=="Y"):
groupby_def=groupby_def+',"project_id"'
groupby_def=groupby_def+']'
body=body+groupby_def
else:
pid=raw_input("Do you want to group by the project id? If yes, enter 'Y', else enter 'N'. ")
if(pid=="Y"):
groupby_def=',"groupby":['
groupby_def=groupby_def+'"project_id"'
groupby_def=groupby_def+']'
body=body+groupby_def
body=body+"}"
else:
rid=raw_input("Do you want to group by the resource id? If yes, enter 'Y', else enter 'N'. ")
if(rid=="Y"):
groupby_def='"groupby":['
groupby_def=groupby_def+'"resource_id"'
pid=raw_input("Do you want to group by the project id? If yes, enter 'Y', else enter 'N'. ")
if(pid=="Y"):
groupby_def=groupby_def+',"project_id"'
groupby_def=groupby_def+']'
body=body+groupby_def
else:
pid=raw_input("Do you want to group by the project id? If yes, enter 'Y', else enter 'N'. ")
if(pid=="Y"):
groupby_def='"groupby":['
groupby_def=groupby_def+'"project_id"'
groupby_def=groupby_def+']'
body=body+groupby_def
body=body+"}"
else:
#q=kwargs.pop('q')
if 'period' in kwargs:
period=kwargs.pop('period')
body="{"+q
body=body+',"period":'+period+"}"
else:
body="{"+q+"}"
if is_in_mlist(meter_id,meter_list):
logger.info('Inside meter_statistics: body is %s',body)
h = http.Http()
#print method
#print body
#print headers
#print target.geturl()
#response, content = h.request(target.geturl(),method,body,headers)
response, content = h.request(target.geturl(),method,'',headers)
#print response
header = json.dumps(response)
#print header
json_header = json.loads(header)
#print json_header
server_response = json_header["status"]
if server_response not in {'200'}:
print "Inside meter_statistics(): Something went wrong!"
logger.warn('Inside meter_statistics: not a valid response ')
return False, meter_stat
else:
logger.info('Getting the meter statistics \n')
data = json.loads(content)
#print content
#print data
#print "========================="
meter_stat = [None]*len(data)
for i in range(len(data)):
meter_stat[i]={}
meter_stat[i]["average"] = data[i]["avg"]
meter_stat[i]["count"] = data[i]["count"]
meter_stat[i]["duration"] = data[i]["duration"]
meter_stat[i]["duration-end"] = data[i]["duration_end"]
meter_stat[i]["duration-start"] = data[i]["duration_start"]
meter_stat[i]["max"] = data[i]["max"]
meter_stat[i]["min"] = data[i]["min"]
meter_stat[i]["period"] = data[i]["period"]
meter_stat[i]["period-end"] = data[i]["period_end"]
meter_stat[i]["period-start"] = data[i]["period_start"]
meter_stat[i]["sum"] = data[i]["sum"]
meter_stat[i]["unit"] = data[i]["unit"]
meter_stat[i]["group-by"] = data[i]["groupby"]
return True, meter_stat
else:
logger.warn("Inside meter statistics: not an existing meter name")
print "Choose a meter from the meter list!"
return False,meter_stat | 33c717dee32027a1502a5a295b87c5cd67a2c054 | 3,649,205 |
from typing import Union
from typing import Tuple
def parse_image_size(image_size: Union[Text, int, Tuple[int, int]]):
"""Parse the image size and return (height, width).
Args:
image_size: A integer, a tuple (H, W), or a string with HxW format.
Returns:
A tuple of integer (height, width).
"""
if isinstance(image_size, int):
# image_size is integer, with the same width and height.
return (image_size, image_size)
if isinstance(image_size, str):
# image_size is a string with format WxH
width, height = image_size.lower().split("x")
return (int(height), int(width))
if isinstance(image_size, tuple):
return image_size
raise ValueError(
"image_size must be an int, WxH string, or (height, width)"
"tuple. Was %r" % image_size
) | 12d8925780914672b1e7d976040596f3178e7e20 | 3,649,206 |
def find_last_match(view, what, start, end, flags=0):
"""Find last occurrence of `what` between `start`, `end`.
"""
match = view.find(what, start, flags)
new_match = None
while match:
new_match = view.find(what, match.end(), flags)
if new_match and new_match.end() <= end:
match = new_match
else:
return match | fc863cf00d05a1fb6302a34b5b1e891e3c9eb3d7 | 3,649,207 |
def convert_metrics_per_batch_to_per_sample(metrics, target_masks):
"""
Args:
metrics: list of len(num_batches), each element: list of len(num_metrics), each element: (num_active_in_batch,) metric per element
target_masks: list of len(num_batches), each element: (batch_size, seq_len, feat_dim) boolean mask: 1s active, 0s ignore
Returns:
metrics_array = list of len(num_batches), each element: (batch_size, num_metrics) metric per sample
"""
metrics_array = []
for b, batch_target_masks in enumerate(target_masks):
num_active_per_sample = np.sum(batch_target_masks, axis=(1, 2))
batch_metrics = np.stack(metrics[b], axis=1) # (num_active_in_batch, num_metrics)
ind = 0
metrics_per_sample = np.zeros((len(num_active_per_sample), batch_metrics.shape[1])) # (batch_size, num_metrics)
for n, num_active in enumerate(num_active_per_sample):
new_ind = ind + num_active
metrics_per_sample[n, :] = np.sum(batch_metrics[ind:new_ind, :], axis=0)
ind = new_ind
metrics_array.append(metrics_per_sample)
return metrics_array | 2ceae1402ac0efae841683d426f87a295f3695c8 | 3,649,208 |
import asyncio
async def get_series(database, series_id):
"""Get a series."""
series_query = """
select series.id, series.played, series_metadata.name, rounds.tournament_id, tournaments.id as tournament_id,
tournaments.name as tournament_name, events.id as event_id, events.name as event_name
from series join rounds on series.round_id=rounds.id join series_metadata on series.id=series_metadata.series_id
join tournaments on rounds.tournament_id=tournaments.id
join events on tournaments.event_id=events.id
where series.id=:id
"""
participants_query = 'select series_id, name, score, winner from participants where series_id=:id'
matches_query = 'select id, series_id from matches where series_id=:id'
values = {'id': series_id}
series, participants, matches = await asyncio.gather(
database.fetch_one(series_query, values=values),
database.fetch_all(participants_query, values=values),
database.fetch_all(matches_query, values=values)
)
return dict(
series,
participants=list(map(dict, participants)),
match_ids=list(map(lambda m: m['id'], matches)),
tournament=dict(
id=series['tournament_id'],
name=series['tournament_name'],
event=dict(
id=series['event_id'],
name=series['event_name']
)
)
) | f5e122052209c399c41afcd579f9b16e863c7a28 | 3,649,209 |
def n_mpjpe(predicted, target):
"""
Normalized MPJPE (scale only), adapted from:
https://github.com/hrhodin/UnsupervisedGeometryAwareRepresentationLearning/blob/master/losses/poses.py
"""
assert predicted.shape == target.shape
norm_predicted = np.mean(np.sum(predicted**2, axis=2, keepdims=True), axis=1, keepdims=True)
norm_target = np.mean(np.sum(target*predicted, axis=2, keepdims=True), axis=1, keepdims=True)
scale = norm_target / norm_predicted
return euclidean_distance_3D(scale * predicted, target) | 68656aca6226db3a4cc7670ccc1972d666b11261 | 3,649,210 |
import math
def calc_distance(p1, p2):
""" calculates a distance on a 2d euclidean space, between two points"""
dist = math.sqrt((p2[0] - p1[0]) ** 2 + (p2[1] - p1[1]) ** 2)
return dist | d4005d44d5724c051860fb9aa2edeab1654157c6 | 3,649,211 |
def rgb2ycbcr(img, range=255., only_y=True):
"""same as matlab rgb2ycbcr, please use bgr2ycbcr when using cv2.imread
img: shape=[h, w, 3]
range: the data range
only_y: only return Y channel
"""
in_img_type = img.dtype
img.astype(np.float32)
range_scale = 255. / range
img *= range_scale
# convert
if only_y:
rlt = np.dot(img, [65.481, 128.553, 24.966]) / 255.0 + 16.0
else:
rlt = np.matmul(img, [[65.481, -37.797, 112.0], [128.553, -74.203, -93.786],
[24.966, 112.0, -18.214]]) / 255.0 + [16, 128, 128]
rlt /= range_scale
if in_img_type == np.uint8:
rlt = rlt.round()
return rlt.astype(in_img_type) | e3dfd7b35faf437a936813afe537d1d4a41b2f6b | 3,649,212 |
def _create_xctest_bundle(name, actions, binary):
"""Creates an `.xctest` bundle that contains the given binary.
Args:
name: The name of the target being built, which will be used as the
basename of the bundle (followed by the .xctest bundle extension).
actions: The context's actions object.
binary: The binary that will be copied into the test bundle.
Returns:
A `File` (tree artifact) representing the `.xctest` bundle.
"""
xctest_bundle = derived_files.xctest_bundle(
actions = actions,
target_name = name,
)
args = actions.args()
args.add(xctest_bundle.path)
args.add(binary)
actions.run_shell(
arguments = [args],
command = (
'mkdir -p "$1/Contents/MacOS" && ' +
'cp "$2" "$1/Contents/MacOS"'
),
inputs = [binary],
mnemonic = "SwiftCreateTestBundle",
outputs = [xctest_bundle],
progress_message = "Creating test bundle for {}".format(name),
)
return xctest_bundle | cf6c64b73b7fcbd7df2a5e6bb60e0605b16a8f58 | 3,649,213 |
def doFile(path_, *args, **kwargs):
"""Execute a given file from path with arguments."""
result, reason = loadfile(path_)
if result:
data = result(*args, **kwargs)
if data:
return data[1]
error(data[1])
error(reason) | 15c6dd79872b479275717fb8a574a34f92381390 | 3,649,214 |
from pretty import pretty
from pprint import pformat
def pformat(obj, verbose=False):
"""
Prettyprint an object. Either use the `pretty` library or the
builtin `pprint`.
"""
try:
return pretty(obj, verbose=verbose)
except ImportError:
return pformat(obj) | 7522c9b64650a5056fb22d7fdd0c459ce87ca7c7 | 3,649,215 |
def reanalyze_function(*args):
"""
reanalyze_function(func_t pfn, ea_t ea1=0, ea_t ea2=BADADDR, bool analyze_parents=False)
reanalyze_function(func_t pfn, ea_t ea1=0, ea_t ea2=BADADDR)
reanalyze_function(func_t pfn, ea_t ea1=0)
reanalyze_function(func_t pfn)
"""
return _idaapi.reanalyze_function(*args) | 52d248fbb82ebb41ff925c42b7cb6856c5cba927 | 3,649,216 |
def categorical_sample_logits(logits):
"""
Samples (symbolically) from categorical distribution, where logits is a NxK
matrix specifying N categorical distributions with K categories
specifically, exp(logits) / sum( exp(logits), axis=1 ) is the
probabilities of the different classes
Cleverly uses gumbell trick, based on
https://github.com/tensorflow/tensorflow/issues/456
"""
U = tf.random_uniform(tf.shape(logits))
return tf.argmax(logits - tf.log(-tf.log(U)), dimension=1) | c5bf8615fe3c25f392bc3fa27f965527f237ef3e | 3,649,217 |
def mass(d, r):
""" computes the right hand side of the differential equation of mass continuity
"""
return 4 * pi * d * r * r | 1924309951e35d36b51fe92389c3fa68fac3ebfa | 3,649,218 |
def bord(u):
"""
éxécution de bord("undébutuntructrucundébut")
i suffix estPréfixe
23 ndébutuntructrucundébut False
22 débutuntructrucundébut False
21 ébutuntructrucundébut False
20 butuntructrucundébut False
19 utuntructrucundébut False
18 tuntructrucundébut False
17 untructrucundébut False
16 ntructrucundébut False
15 tructrucundébut False
14 ructrucundébut False
13 uctrucundébut False
12 ctrucundébut False
11 trucundébut False
10 rucundébut False
9 ucundébut False
8 cundébut False
7 undébut True
"""
suffix = ""
for i in reversed(range(0, len(u))):
suffix = u[len(u)-i:len(u)]
if estPréfixe(u, suffix):
break
return suffix | 950eaac804a0788c9d2f845d594b7781d5ea9aa4 | 3,649,219 |
def is_unique_n_bit_vector(string: str) -> bool:
"""
Similiar to the dict solution, it just uses a bit vector instead of a dict or array.
"""
vector = 0
for letter in string:
if vector & 1 << ord(letter):
return False
vector |= 1 << ord(letter)
return True | d19609f1fb1e6a189a9adb11b37a96632c8d0958 | 3,649,220 |
def seq2msk(isq):
"""
Convert seqhis into mskhis
OpticksPhoton.h uses a mask but seq use the index for bit-bevity::
3 enum
4 {
5 CERENKOV = 0x1 << 0,
6 SCINTILLATION = 0x1 << 1,
7 MISS = 0x1 << 2,
8 BULK_ABSORB = 0x1 << 3,
9 BULK_REEMIT = 0x1 << 4,
"""
ifl = np.zeros_like(isq)
for n in range(16):
msk = 0xf << (4*n) ## nibble mask
nib = ( isq & msk ) >> (4*n) ## pick the nibble and shift to pole position
flg = 1 << ( nib[nib>0] - 1 ) ## convert flag bit index into flag mask
ifl[nib>0] |= flg
pass
return ifl | 950dc8fe1fcc275f7a90e695816ea1777cc5164e | 3,649,221 |
def split(ich):
""" Split a multi-component InChI into InChIs for each of its components.
(fix this for /s [which should be removed in split/join operations]
and /m, which is joined as /m0110.. with no separators)
:param ich: InChI string
:type ich: str
:rtype: tuple(str)
"""
fml_slyr = formula_sublayer(ich)
main_dct = main_sublayers(ich)
char_dct = charge_sublayers(ich)
ste_dct = stereo_sublayers(ich)
iso_dct = isotope_sublayers(ich)
fml_slyrs = _split_sublayer_string(
fml_slyr, count_sep_ptt='', sep_ptt=app.escape('.'))
count = len(fml_slyrs)
main_dcts = _split_sublayers(main_dct, count)
char_dcts = _split_sublayers(char_dct, count)
ste_dcts = _split_sublayers(ste_dct, count)
iso_dcts = _split_sublayers(iso_dct, count)
ichs = tuple(from_data(fml_slyr=fml_slyr,
main_lyr_dct=main_dct,
char_lyr_dct=char_dct,
ste_lyr_dct=ste_dct,
iso_lyr_dct=iso_dct)
for fml_slyr, main_dct, char_dct, ste_dct, iso_dct
in zip(fml_slyrs, main_dcts, char_dcts, ste_dcts, iso_dcts))
return ichs | 0db3bee951e38f7db8cbcdb02a64ed28b9562e9d | 3,649,222 |
def crtb_cb(client, crtb):
"""Wait for the crtb to have the userId populated"""
def cb():
c = client.reload(crtb)
return c.userId is not None
return cb | eff248a877e195e59d2f6db812af2ff43955aee0 | 3,649,223 |
def create_network(network_input, n_alphabets):
""" create the structure of the neural network """
model = Sequential()
model.add(LSTM(512,input_shape=(network_input.shape[1], network_input.shape[2]),return_sequences=True))
model.add(Dropout(0.3))
model.add(Bidirectional(LSTM(512, return_sequences=True)))
model.add(Dropout(0.3))
model.add(Bidirectional(LSTM(512)))
model.add(Dense(256))
model.add(Dropout(0.3))
model.add(Dense(n_alphabets))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam')
return model | dd6610f0db02d0d20fb457d91346144494ad32e4 | 3,649,224 |
def create_derivative_graph(f, xrange, n):
"""Takes a function as an input with a specific interval xrange, then creates a list with the ouput
y-points for the nth derivative of f.
:param f: Input function that we wish to take the derivative of.
:type f: lambda
:param xrange: The interval on which to evaluate f^n(x).
:type xrange: list
:param n: The derivative (1st, 2nd, 3rd, etc)
:type n: int
:return: A list of all f^n(x) points for all x in xrange.
:rtype: list of floats
"""
plot_points = []
for x in xrange:
plot_points.append(nth_derivative(f, x, n))
return plot_points | 782d26d22c93ae4b05d075fbf4075a8bba9d89b8 | 3,649,225 |
def _matching_not_matching(on, **kwargs):
"""
Change the text for matching/not matching
"""
text = "matching" if not on else "not matching"
classname = "colour-off" if not on else "colour-on"
return text, classname | aeefa7f16e3268ffe7af93db72490abe053370b2 | 3,649,226 |
def sample(model, x, steps, temperature=1.0, sample=False, top_k=None):
"""
take a conditioning sequence of indices in x (of shape (b,t)) and predict the next token in
the sequence, feeding the predictions back into the model each time. Clearly the sampling
has quadratic complexity unlike an RNN that is only linear, and has a finite context window
of block_size, unlike an RNN that has an infinite context window.
"""
block_size = model.block_size
for k in range(steps):
x_cond = x if x.shape[1] <= block_size else x[:, -block_size:] # crop context if needed
logits = model(x_cond)
# pluck the logits at the final step and scale by temperature
logits = logits[:, -1, :] / temperature
# optionally crop probabilities to only the top k options
if top_k is not None:
logits = top_k_logits(logits, top_k)
# apply softmax to convert to probabilities
probs = tf.nn.softmax(logits, axis=-1)
# sample from the distribution or take the most likely
if sample:
ix = tf.random.categorical(logits,1,dtype=tf.int32)
else:
_, ix = tf.math.top_k(probs, k=1)
# append to the sequence and continue
x = tf.concat((x,ix), axis=1)
return x | 532ad7e1af4c7b059bbd12a8584c469bcb5d079e | 3,649,228 |
def vstack(arg_list):
"""Wrapper on vstack to ensure list argument.
"""
return Vstack(*arg_list) | 95215c8277da6b86c21220021d667ae3dcc05440 | 3,649,229 |
import json
def metadata_to_list(metadata):
"""Transform a metadata dictionary retrieved from Cassandra to a list of
tuples. If metadata items are lists they are split into multiple pairs in
the result list
:param metadata: dict"""
res = []
for k, v in metadata.iteritems():
try:
val_json = json.loads(v)
val = val_json.get('json', '')
# If the value is a list we create several pairs in the result
if isinstance(val, list):
for el in val:
res.append((k, el))
else:
if val:
res.append((k, val))
except ValueError:
if v:
res.append((k, v))
return res | 1044a93742a635e72e443d3a5c2e5805702d1602 | 3,649,230 |
from typing import Optional
from typing import Union
import torch
from pathlib import Path
import json
def load_separator(
model_str_or_path: str = "umxhq",
niter: int = 1,
residual: bool = False,
slicq_wiener: bool = False,
wiener_win_len: Optional[int] = 300,
device: Union[str, torch.device] = "cpu",
pretrained: bool = True,
):
"""Separator loader
Args:
model_str_or_path (str): Model name or path to model _parent_ directory
E.g. The following files are assumed to present when
loading `model_str_or_path='mymodel', targets=['vocals']`
'mymodel/separator.json', mymodel/vocals.pth', 'mymodel/vocals.json'.
Defaults to `umxhq`.
targets (list of str or None): list of target names. When loading a
pre-trained model, all `targets` can be None as all targets
will be loaded
device (str): torch device, defaults to `cpu`
pretrained (bool): determines if loading pre-trained weights
"""
model_path = Path(model_str_or_path).expanduser()
# when path exists, we assume its a custom model saved locally
if model_path.exists():
with open(Path(model_path, "separator.json"), "r") as stream:
enc_conf = json.load(stream)
xumx_model, model_nsgt, jagged_slicq_sample = load_target_models(
model_str_or_path=model_path, pretrained=pretrained, sample_rate=enc_conf["sample_rate"], device=device
)
separator = model.Separator(
xumx_model,
model_nsgt,
jagged_slicq_sample,
stft_wiener=not slicq_wiener,
sample_rate=enc_conf["sample_rate"],
nb_channels=enc_conf["nb_channels"],
).to(device)
return separator | 2cb2d951d669c7d08a3bf3cabc5c49a11ca717fc | 3,649,232 |
def IntermediateParticleConst_get_decorator_type_name():
"""IntermediateParticleConst_get_decorator_type_name() -> std::string"""
return _RMF.IntermediateParticleConst_get_decorator_type_name() | efb869aece5ad0f19e06f5d1a13e89998cde53a8 | 3,649,233 |
def multiply_add_plain_with_delta(ct, pt, context_data):
"""Add plaintext to ciphertext.
Args:
ct (Ciphertext): ct is pre-computed carrier polynomial where we can add pt data.
pt (Plaintext): A plaintext representation of integer data to be encrypted.
context (Context): Context for extracting encryption parameters.
Returns:
A Ciphertext object with the encrypted result of encryption process.
"""
ct_param_id = ct.param_id
coeff_modulus = context_data.param.coeff_modulus
pt = pt.data
plain_coeff_count = len(pt)
delta = context_data.coeff_div_plain_modulus
ct0, ct1 = ct.data # here ct = pk * u * e
# Coefficients of plain m multiplied by coeff_modulus q, divided by plain_modulus t,
# and rounded to the nearest integer (rounded up in case of a tie). Equivalent to
for i in range(plain_coeff_count):
for j in range(len(coeff_modulus)):
temp = round(delta[j] * pt[i]) % coeff_modulus[j]
ct0[j][i] = (ct0[j][i] + temp) % coeff_modulus[j]
return CipherText([ct0, ct1], ct_param_id) | 4f004cc443d183f25cf35bc691c9797b4a8a5875 | 3,649,234 |
import json
from datetime import datetime
def retrieve_form_data(form, submission_type="solution"):
"""Quick utility function that groups together the processing of request data. Allows for easier handling of exceptions
Takes request object as argument
On Success, returns hashmap of processed data...otherwise raise an exception"""
if submission_type == "solution":
processed_data = {}
try:
print("FCD =>", form.cleaned_data)
processed_data["prob_id"] = int(form.cleaned_data.get("problem_id"))
processed_data["uid"] = int(form.cleaned_data.get("user_id"))
processed_data["code_data"] = form.cleaned_data.get("solution")
processed_data["course_id"] = form.cleaned_data.get("course_id", None)
except Exception as e:
print("POST NOT OK: Error during intial processing of uploaded data - {0}".format(str(e)))
return Response(ERROR_CODES["Form Submission Error"], status=status.HTTP_400_BAD_REQUEST)
return processed_data
elif submission_type == "problem_upload":
data = form.cleaned_data
processed_data = {}
try:
processed_data["author_id"] = int(data.get("author_id"))
processed_data["category"] = data.get("category")
processed_data["target_file"] = data.get("target_file", None)
processed_data["data_file"] = data.get("data_file", None)
processed_data["course_id"] = data.get("course_id", None)
if processed_data["data_file"] is not None:
processed_data["data_file"].seek(0)
processed_data["init_data"] = processed_data["data_file"].read().decode("utf-8")
try:
json.loads(processed_data["init_data"])
except Exception as e:
raise Exception("Invalid JSON in init_data_file! - {0}".format(str(e)))
else:
processed_data["init_data"] = None
processed_data["name"] = data.get("name").replace("(", "[").replace(")", "]")
if "(" in processed_data["name"] or ")" in processed_data["name"]:
print("POST NOT OK: Problem Name cannot contain parnetheses!")
return Response(ERROR_CODES["Form Submission Error"], status=status.HTTP_400_BAD_REQUEST)
description = data.get("description")
processed_data["program_file"] = data.get("program")
processed_data["code"] = [line.decode("utf-8") for line in processed_data["program_file"].read().splitlines()]
processed_data["metadata"] = data.get("meta_file")
processed_data["metadata"]["description"] = description
processed_data["date_submitted"] = datetime.now()
processed_data["inputs"] = data.get("inputs", None)
if processed_data["category"] == "file_io":
processed_data["metadata"]["inputs"] = "file"
else:
processed_data["metadata"]["inputs"] = True if processed_data["inputs"] is not None else False
processed_data["metadata"]["init_data"] = True if processed_data["init_data"] is not None else False
except Exception as e:
print("POST NOT OK: Error during intial processing of uploaded data - {0}".format(str(e)))
return Response(ERROR_CODES["Form Submission Error"], status=status.HTTP_400_BAD_REQUEST)
return processed_data | 4ab635ac226ebb7811baf2d0e3d71c8cfc25b1da | 3,649,236 |
def keep_english_for_spacy_nn(df):
"""This function takes the DataFrame for songs
and keep songs with english as main language
for english version of spacy neural network for word processing"""
#Keep only english for spacy NN English preprocessing words
#Network for other languages like french, spanish, portuguese are also available
df = df.loc[df['Main Language'] == 'en',:]
#Drop the translation column not use for lyrics in english
df.drop(['English Translation Lyrics'],axis =1,inplace = True)
return df | e24402fa91ee0444c86867c98777fbd3cb7c9894 | 3,649,238 |
def make_dealer_cards_more_fun(deck, dealer):
"""
to make dealercards more fun to make dealer win this game more.
:param dealercards: dealercards
:return: none
maybe has a lot of memory work will arise.
"""
dealercards = card_sorting_dealer(dealer)
count = 0
if jokbo(dealercards) == 0 or jokbo(dealercards) == 1 or jokbo(dealercards) == 2:
deck.append({"suit": dealer[0]["suit"], "rank": dealer[0]["rank"]})
deck.append({"suit": dealer[1]["suit"], "rank": dealer[1]["rank"]})
deck.append({"suit": dealer[2]["suit"], "rank": dealer[2]["rank"]})
deck.append({"suit": dealer[3]["suit"], "rank": dealer[3]["rank"]})
deck.append({"suit": dealer[4]["suit"], "rank": dealer[4]["rank"]})
if jokbo(dealercards) == 0: #하이카드
while True:
count += 1
dealer = []
card, deck = hit(deck)
dealer.append(card)
card, deck = hit(deck)
dealer.append(card)
card, deck = hit(deck)
dealer.append(card)
card, deck = hit(deck)
dealer.append(card)
card, deck = hit(deck)
dealer.append(card)
dealercards = card_sorting_dealer(dealer)
if count == 30: break
if jokbo(dealercards) != 0 and jokbo(dealercards) != 1:
break
else:
deck.append({"suit": dealer[0]["suit"], "rank": dealer[0]["rank"]})
deck.append({"suit": dealer[1]["suit"], "rank": dealer[1]["rank"]})
deck.append({"suit": dealer[2]["suit"], "rank": dealer[2]["rank"]})
deck.append({"suit": dealer[3]["suit"], "rank": dealer[3]["rank"]})
deck.append({"suit": dealer[4]["suit"], "rank": dealer[4]["rank"]})
continue
return dealer
elif jokbo(dealercards) == 1: #원페어
while True:
count += 1
dealer = []
card, deck = hit(deck)
dealer.append(card)
card, deck = hit(deck)
dealer.append(card)
card, deck = hit(deck)
dealer.append(card)
card, deck = hit(deck)
dealer.append(card)
card, deck = hit(deck)
dealer.append(card)
dealercards = card_sorting_dealer(dealer)
if count == 30: break
if jokbo(dealercards) != 0 and jokbo(dealercards) != 1:
break
else:
deck.append({"suit": dealer[0]["suit"], "rank": dealer[0]["rank"]})
deck.append({"suit": dealer[1]["suit"], "rank": dealer[1]["rank"]})
deck.append({"suit": dealer[2]["suit"], "rank": dealer[2]["rank"]})
deck.append({"suit": dealer[3]["suit"], "rank": dealer[3]["rank"]})
deck.append({"suit": dealer[4]["suit"], "rank": dealer[4]["rank"]})
continue
return dealer
else:
return dealer | 5adf8fbeeb53124c75ec43a13492d7aef1ebdc7e | 3,649,239 |
def data(request):
"""Returns available albums from the database. Can be optionally filtered by year.
This is called from templates/albums/album/index.html when the year input is changed.
"""
year = request.GET.get('year')
if year:
try:
year = int(year)
except (ValueError, TypeError):
return HttpResponseBadRequest('invalid year parameter')
else:
year = None
return JsonResponse(list(get_albums(year)), safe=False) | 8390bcc6fd2bcc109930cb34b3269b450c12a87c | 3,649,241 |
from typing import Tuple
def yaw_to_quaternion3d(yaw: float) -> Tuple[float,float,float,float]:
"""
Args:
- yaw: rotation about the z-axis
Returns:
- qx,qy,qz,qw: quaternion coefficients
"""
qx,qy,qz,qw = Rotation.from_euler('z', yaw).as_quat()
return qx,qy,qz,qw | 263a0b12e0c165f929c5004cdb67b8133f117140 | 3,649,242 |
def parse_coap_response_code(response_code):
"""
Parse the binary code from CoAP response and return the response code as a float.
See also https://tools.ietf.org/html/rfc7252#section-5.9 for response code definitions.
:rtype float
"""
response_code_class = response_code // 32
response_code_detail = response_code % 32
# Compose response code
return response_code_class + response_code_detail / 100 | 9a8165f205ec2f6fe8576e18a831498f82834a10 | 3,649,243 |
from functools import reduce
def modified_partial_sum_product(
sum_op, prod_op, factors, eliminate=frozenset(), plate_to_step=dict()
):
"""
Generalization of the tensor variable elimination algorithm of
:func:`funsor.sum_product.partial_sum_product` to handle markov dimensions
in addition to plate dimensions. Markov dimensions in transition factors
are eliminated efficiently using the parallel-scan algorithm in
:func:`funsor.sum_product.sequential_sum_product`. The resulting factors are then
combined with the initial factors and final states are eliminated. Therefore,
when Markov dimension is eliminated ``factors`` has to contain a pairs of
initial factors and transition factors.
:param ~funsor.ops.AssociativeOp sum_op: A semiring sum operation.
:param ~funsor.ops.AssociativeOp prod_op: A semiring product operation.
:param factors: A collection of funsors.
:type factors: tuple or list
:param frozenset eliminate: A set of free variables to eliminate,
including both sum variables and product variable.
:param dict plate_to_step: A dict mapping markov dimensions to
``step`` collections that contain ordered sequences of Markov variable names
(e.g., ``{"time": frozenset({("x_0", "x_prev", "x_curr")})}``).
Plates are passed with an empty ``step``.
:return: a list of partially contracted Funsors.
:rtype: list
"""
assert callable(sum_op)
assert callable(prod_op)
assert isinstance(factors, (tuple, list))
assert all(isinstance(f, Funsor) for f in factors)
assert isinstance(eliminate, frozenset)
assert isinstance(plate_to_step, dict)
# process plate_to_step
plate_to_step = plate_to_step.copy()
prev_to_init = {}
for key, step in plate_to_step.items():
# map prev to init; works for any history > 0
for chain in step:
init, prev = chain[: len(chain) // 2], chain[len(chain) // 2 : -1]
prev_to_init.update(zip(prev, init))
# convert step to dict type required for MarkovProduct
plate_to_step[key] = {chain[1]: chain[2] for chain in step}
plates = frozenset(plate_to_step.keys())
sum_vars = eliminate - plates
prod_vars = eliminate.intersection(plates)
markov_sum_vars = frozenset()
for step in plate_to_step.values():
markov_sum_vars |= frozenset(step.keys()) | frozenset(step.values())
markov_sum_vars &= sum_vars
markov_prod_vars = frozenset(
k for k, v in plate_to_step.items() if v and k in eliminate
)
markov_sum_to_prod = defaultdict(set)
for markov_prod in markov_prod_vars:
for k, v in plate_to_step[markov_prod].items():
markov_sum_to_prod[k].add(markov_prod)
markov_sum_to_prod[v].add(markov_prod)
var_to_ordinal = {}
ordinal_to_factors = defaultdict(list)
for f in factors:
ordinal = plates.intersection(f.inputs)
ordinal_to_factors[ordinal].append(f)
for var in sum_vars.intersection(f.inputs):
var_to_ordinal[var] = var_to_ordinal.get(var, ordinal) & ordinal
ordinal_to_vars = defaultdict(set)
for var, ordinal in var_to_ordinal.items():
ordinal_to_vars[ordinal].add(var)
results = []
while ordinal_to_factors:
leaf = max(ordinal_to_factors, key=len)
leaf_factors = ordinal_to_factors.pop(leaf)
leaf_reduce_vars = ordinal_to_vars[leaf]
for (group_factors, group_vars) in _partition(
leaf_factors, leaf_reduce_vars | markov_prod_vars
):
# eliminate non markov vars
nonmarkov_vars = group_vars - markov_sum_vars - markov_prod_vars
f = reduce(prod_op, group_factors).reduce(sum_op, nonmarkov_vars)
# eliminate markov vars
markov_vars = group_vars.intersection(markov_sum_vars)
if markov_vars:
markov_prod_var = [markov_sum_to_prod[var] for var in markov_vars]
assert all(p == markov_prod_var[0] for p in markov_prod_var)
if len(markov_prod_var[0]) != 1:
raise ValueError("intractable!")
time = next(iter(markov_prod_var[0]))
for v in sum_vars.intersection(f.inputs):
if time in var_to_ordinal[v] and var_to_ordinal[v] < leaf:
raise ValueError("intractable!")
time_var = Variable(time, f.inputs[time])
group_step = {
k: v for (k, v) in plate_to_step[time].items() if v in markov_vars
}
f = MarkovProduct(sum_op, prod_op, f, time_var, group_step)
f = f.reduce(sum_op, frozenset(group_step.values()))
f = f(**prev_to_init)
remaining_sum_vars = sum_vars.intersection(f.inputs)
if not remaining_sum_vars:
results.append(f.reduce(prod_op, leaf & prod_vars - markov_prod_vars))
else:
new_plates = frozenset().union(
*(var_to_ordinal[v] for v in remaining_sum_vars)
)
if new_plates == leaf:
raise ValueError("intractable!")
f = f.reduce(prod_op, leaf - new_plates - markov_prod_vars)
ordinal_to_factors[new_plates].append(f)
return results | 24d5f529d03eeb3a332cc861fdabff3a0d613d37 | 3,649,244 |
def load_scicar_cell_lines(test=False):
"""Download sci-CAR cell lines data from GEO."""
if test:
adata = load_scicar_cell_lines(test=False)
adata = subset_joint_data(adata)
return adata
return load_scicar(
rna_url,
rna_cells_url,
rna_genes_url,
atac_url,
atac_cells_url,
atac_genes_url,
) | 4760b41e2a29125ba9eaf597c555b1b40e338612 | 3,649,245 |
def binary_search(sorted_list, item):
"""
Implements a Binary Search, O(log n).
If item is is list, returns amount of steps.
If item not in list, returns None.
"""
steps = 0
start = 0
end = len(sorted_list)
while start < end:
steps += 1
mid = (start + end) // 2
# print("#", mid)
if sorted_list[mid] == item:
return steps
# If the item is lesser than the list
# item == 3 and sorted_list == [1, 2, 3, 4, 5, 6, 8]
# the END of my list becomes the middle (4), excluding all items from the middle to the end
# end == 4
# next time, when mid = (start + end) // 2 executes, mid == 2
if sorted_list[mid] > item:
end = mid
# If the item is bigger than the list
# item == 8 and sorted_list == [1, 2, 3, 4, 5, 6, 8]
# the START of my list will be the middle (4) plus 1, excluding all items from the middle to the begginning
# start == 5
# next time, when mid = (start + end) // 2 executes, mid == 8
if sorted_list[mid] < item:
start = mid + 1
return None | 30b1bba330752455d932b4c6cf1ad4dab5969db3 | 3,649,246 |
def _scale_by(number, should_fail=False):
"""
A helper function that creates a scaling policy and scales by the given
number, if the number is not zero. Otherwise, just triggers convergence.
:param int number: The number to scale by.
:param bool should_fail: Whether or not the policy execution should fail.
:return: A function that can be passed to :func:`_oob_disable_then` as the
``then`` parameter.
"""
def _then(helper, rcs, group):
policy = ScalingPolicy(scale_by=number, scaling_group=group)
return (policy.start(rcs, helper.test_case)
.addCallback(policy.execute,
success_codes=[403] if should_fail else [202]))
return _then | 046dcaf120d4c04578e9562b23f76f1cb8f98690 | 3,649,248 |
import traceback
def selectgender(value):
"""格式化为是/否
:param value:M/F,
:return: 男/女
"""
absent = {"M": u'男', "F": u'女'}
try:
if value:
return absent[value]
return ""
except:
traceback.print_exc() | 7b6b0b41b5ea8d3eaab5574881b40f5c00da73cd | 3,649,249 |
def Clifford_twirl_channel_one_qubit(K, rho, sys=1, dim=[2]):
"""
Twirls the given channel with Kraus operators in K by the one-qubit
Clifford group on the given subsystem (specified by sys).
"""
n = int(np.log2(np.sum([d for d in dim])))
C1 = eye(2**n)
C2 = Rx_i(sys, np.pi, n)
C3 = Rx_i(sys, np.pi / 2.0, n)
C4 = Rx_i(sys, -np.pi / 2.0, n)
C5 = Rz_i(sys, np.pi, n)
C6 = Rx_i(sys, np.pi, n) * Rz_i(sys, np.pi, n)
C7 = Rx_i(sys, np.pi / 2.0, n) * Rz_i(sys, np.pi, n)
C6 = Rx_i(sys, np.pi, n) * Rz_i(sys, np.pi, n)
C8 = Rx_i(sys, -np.pi / 2.0, n) * Rz_i(sys, np.pi, n)
C9 = Rz_i(sys, np.pi / 2.0, n)
C10 = Ry_i(sys, np.pi, n) * Rz_i(sys, np.pi / 2.0, n)
C11 = Ry_i(sys, -np.pi / 2.0, n) * Rz_i(sys, np.pi / 2.0, n)
C12 = Ry_i(sys, np.pi / 2.0, n) * Rz_i(sys, np.pi / 2.0, n)
C13 = Rz_i(sys, -np.pi / 2.0, n)
C14 = Ry_i(sys, np.pi, n) * Rz_i(sys, -np.pi / 2.0, n)
C15 = Ry_i(sys, -np.pi / 2.0, n) * Rz_i(sys, -np.pi / 2.0, n)
C16 = Ry_i(sys, np.pi / 2.0, n) * Rz_i(sys, -np.pi / 2.0, n)
C17 = (
Rz_i(sys, -np.pi / 2.0, n)
* Rx_i(sys, np.pi / 2.0, n)
* Rz_i(sys, np.pi / 2.0, n)
)
C18 = (
Rz_i(sys, np.pi / 2.0, n)
* Rx_i(sys, np.pi / 2.0, n)
* Rz_i(sys, np.pi / 2.0, n)
)
C19 = Rz_i(sys, np.pi, n) * Rx_i(sys, np.pi / 2.0, n) * Rz_i(sys, np.pi / 2.0, n)
C20 = Rx_i(sys, np.pi / 2.0, n) * Rz_i(sys, np.pi / 2.0, n)
C21 = (
Rz_i(sys, np.pi / 2.0, n)
* Rx_i(sys, -np.pi / 2.0, n)
* Rz_i(sys, np.pi / 2.0, n)
)
C22 = (
Rz_i(sys, -np.pi / 2.0, n)
* Rx_i(sys, -np.pi / 2.0, n)
* Rz_i(sys, np.pi / 2.0, n)
)
C23 = Rx_i(sys, -np.pi / 2.0, n) * Rz_i(sys, np.pi / 2.0, n)
C24 = Rx_i(sys, np.pi, n) * Rx_i(sys, -np.pi / 2.0, n) * Rz_i(sys, np.pi / 2.0, n)
C = [
C1,
C2,
C3,
C4,
C5,
C6,
C7,
C8,
C9,
C10,
C11,
C12,
C13,
C14,
C15,
C16,
C17,
C18,
C19,
C20,
C21,
C22,
C23,
C24,
]
rho_twirl = 0
for i in range(len(C)):
rho_twirl += (
(1.0 / 24.0)
* C[i]
@ apply_channel(K, dag(C[i]) @ rho @ C[i], sys, dim)
@ dag(C[i])
)
return rho_twirl, C | 1225c8689641e245d7666c75f9e31d862f1efe56 | 3,649,250 |
def unpack_batch(batch, use_cuda=False):
""" Unpack a batch from the data loader. """
input_ids = batch[0]
input_mask = batch[1]
segment_ids = batch[2]
boundary_ids = batch[3]
pos_ids = batch[4]
rel_ids = batch[5]
knowledge_feature = batch[6]
bio_ids = batch[1]
# knowledge_adjoin_matrix = batch[7]
# know_segment_ids = batch[6]
# know_input_ids = batch[7]
# know_input_mask = batch[8]
# knowledge_feature = (batch[6], batch[7], batch[8])
return input_ids, input_mask, segment_ids, boundary_ids, pos_ids, rel_ids, knowledge_feature,bio_ids | 6bc8bc9b3c8a9e2b40ac08e67c9fbcf84914e2eb | 3,649,251 |
def truncate(text: str, length: int = 255, end: str = "...") -> str:
"""Truncate text.
Parameters
---------
text : str
length : int, default 255
Max text length.
end : str, default "..."
The characters that come at the end of the text.
Returns
-------
truncated text : str
Examples
--------
.. code-block:: html
<meta property="og:title" content="^^ truncate(title, 30) ^^">"""
return f"{text[:length]}{end}" | f14605542418ca95e4752be7ec2fea189b9454ce | 3,649,252 |
def gaussian_slice(x, sigma, mu):
"""
return a slice of x in which the gaussian is significant
exp(-0.5 * ((x - mu) / sigma) ** 2) < given_threshold
"""
r = sigma * sp.sqrt(-2.0 * sp.log(small_thr))
x_lo = bisect_left(x, mu - r)
x_hi = bisect_right(x, mu + r)
return slice(x_lo, x_hi) | 25ed1bf4423e8d86baaebec54e4478b58b58365c | 3,649,254 |
def preview(delivery_id):
"""
打印预览
:param delivery_id:
:return:
"""
delivery_info = get_delivery_row_by_id(delivery_id)
# 检查资源是否存在
if not delivery_info:
abort(404)
# 检查资源是否删除
if delivery_info.status_delete == STATUS_DEL_OK:
abort(410)
delivery_print_date = time_utc_to_local(delivery_info.update_time).strftime('%Y-%m-%d')
delivery_code = '%s%s' % (g.ENQUIRIES_PREFIX, time_utc_to_local(delivery_info.create_time).strftime('%y%m%d%H%M%S'))
# 获取客户公司信息
customer_info = get_customer_row_by_id(delivery_info.customer_cid)
# 获取客户联系方式
customer_contact_info = get_customer_contact_row_by_id(delivery_info.customer_contact_id)
# 获取出货人员信息
user_info = get_user_row_by_id(delivery_info.uid)
delivery_items = get_delivery_items_rows(delivery_id=delivery_id)
# 文档信息
document_info = DOCUMENT_INFO.copy()
document_info['TITLE'] = _('delivery preview')
template_name = 'delivery/preview.html'
return render_template(
template_name,
delivery_id=delivery_id,
delivery_info=delivery_info,
customer_info=customer_info,
customer_contact_info=customer_contact_info,
user_info=user_info,
delivery_items=delivery_items,
delivery_print_date=delivery_print_date,
delivery_code=delivery_code,
**document_info
) | d57acf49d7692fe4da02607695ad71fdad1758e5 | 3,649,255 |
import requests
def request_with_json(json_payload):
"""
Load interpolations from the interp service into the DB
"""
test_response = requests.post(INTERP_URL, json=json_payload)
test_response_json = test_response.json()
return test_response_json | 5222060788ce321d258fa23309f5894640a70589 | 3,649,256 |
def correlation(df, rowvar=False):
"""
Calculate column-wise Pearson correlations using ``numpy.ma.corrcoef``
Input data is masked to ignore NaNs when calculating correlations. Data is returned as
a Pandas ``DataFrame`` of column_n x column_n dimensions, with column index copied to
both axes.
:param df: Pandas DataFrame
:return: Pandas DataFrame (n_columns x n_columns) of column-wise correlations
"""
# Create a correlation matrix for all correlations
# of the columns (filled with na for all values)
df = df.copy()
maskv = np.ma.masked_where(np.isnan(df.values), df.values)
cdf = np.ma.corrcoef(maskv, rowvar=False)
cdf = pd.DataFrame(np.array(cdf))
cdf.columns = df.columns
cdf.index = df.columns
cdf = cdf.sort_index(level=0, axis=1)
cdf = cdf.sort_index(level=0)
return cdf | b64ab2f5f08191c9536f6d08b8132b3ecc100698 | 3,649,257 |
def cost_zpk_fit(zpk_args, f, x,
error_func=kontrol.core.math.log_mse,
error_func_kwargs={}):
"""The cost function for fitting a frequency series with zero-pole-gain.
Parameters
----------
zpk_args: array
A 1-D list of zeros, poles, and gain.
Zeros and poles are in unit of Hz.
f: array
The frequency axis.
x: array
The frequecy series data.
error_func: func(x1: array, x2: array) -> float
The function that evaluate the error between arrays x1 and x2.
Defaults to kontrol.core.math.log_mse, which evaluates the
logarithmic mean square error.
error_func_kwargs: dict, optional
Keyword arguments passed to the error function.
Defaults {}.
Returns
-------
cost: float
The cost.
"""
x_zpk = abs(
kontrol.frequency_series.conversion.args2zpk(f=f, zpk_args=zpk_args))
cost = error_func(x, x_zpk, **error_func_kwargs)
return cost | fb18cfae20a279e0b65a03b37a10c33e6a17c6db | 3,649,258 |
def getTrainPredictions(img,subImgSize,model):
"""Makes a prediction for an image.
Takes an input of any size, crops it to specified size, makes
predictions for each cropped window, and stitches output together.
Parameters
----------
img : np.array (n x m x 3)
Image to be transformed
subImgSize : np.array (a x b)
Input size for model
model: keras.model
Keras model used to make predictions
Returns
-------
pred: np.array (n x m)
Prediction from image
"""
# get the size of the input image
l,w,_ = np.shape(img)
# init array for new image
pred = np.zeros(shape = (l,w))
r = l//subImgSize[0]
c = w//subImgSize[1]
roffset = 0
coffset = 0
if l%subImgSize[0] != 0:
roffset = 1
if w%subImgSize[1] != 0:
coffset = 1
x1 = 0
predX1 = 0
# Crop the image
for j in range(r + roffset):
y1 = 0
predY1 = 0
x2 = (j+1)*subImgSize[0]
if x2 > l:
x2 = l
x1 = l - subImgSize[0]
for k in range(c + coffset):
# find upper bounds of window
y2 = (k+1)*subImgSize[1]
# if outer dimension is larger than image size, adjust
if y2 > w:
y2 = w
y1 = w - subImgSize[1]
# crop area of picture
croppedArea = img[x1:x2,y1:y2,:]
# make prediction using model
modelPrediction = model.predict(np.expand_dims(croppedArea,axis = 0))
# update prediction image
pred[predX1:x2,predY1:y2] = modelPrediction[0,(predX1-x1):,(predY1-y1):,0]
# update the bounds
y1 = y2
predY1 = y1
# update the lower x bound
x1 = x2
predX1 = x1
return pred | e81ee8d6839fa07753ac379520c60d7b2d5be175 | 3,649,259 |
def use_bcbio_variation_recall(algs):
"""Processing uses bcbio-variation-recall. Avoids core requirement if not used.
"""
for alg in algs:
jointcaller = alg.get("jointcaller", [])
if not isinstance(jointcaller, (tuple, list)):
jointcaller = [jointcaller]
for caller in jointcaller:
if caller not in set(["gatk-haplotype-joint", None, False]):
return True
return False | c833f9a2dd9523f78cf294a1822b251b6940a1cd | 3,649,260 |
from typing import Mapping
def _sa_model_info(Model: type, types: AttributeType) -> Mapping[str, AttributeInfo]:
""" Get the full information about the model
This function gets a full, cachable, information about the model's `types` attributes, once.
sa_model_info() can then filter it the way it likes, without polluting the cache.
"""
# Get a list of all available InfoClasses
info_classes = [
InfoClass
for InfoClass in AttributeInfo.all_implementations()
if InfoClass.extracts() & types # only enabled types
]
# Apply InfoClasses' extraction to every attribute
# If there is any weird attribute that is not supported, it is silently ignored.
return {
name: InfoClass.extract(attribute)
for name, attribute in all_sqlalchemy_model_attributes(Model).items()
for InfoClass in info_classes
if InfoClass.matches(attribute, types)
} | 8886427a4722bb1fb37664fa7382f61922d89b69 | 3,649,261 |
def bll6_models(estimators, cv_search={}, transform_search={}):
"""
Provides good defaults for transform_search to models()
Args:
estimators: list of estimators as accepted by models()
transform_search: optional LeadTransform arguments to override the defaults
"""
cvd = dict(
year=range(2011, 2014+1),
month=1,
day=1,
train_years=[6],
train_query=[None],
)
cvd.update(cv_search)
transformd = dict(
wic_sample_weight=[0],
aggregations=aggregations.args,
outcome_expr=['max_bll0 >= 6']
)
transformd.update(transform_search)
return models(estimators, cvd, transformd) | c69d17c1f5c6625ef6382959910b23d44459c158 | 3,649,262 |
def bgColor(col):
""" Return a background color for a given column title """
# Auto-generated columns
if col in ColumnList._COLUMNS_GEN:
return BG_GEN
# KiCad protected columns
elif col in ColumnList._COLUMNS_PROTECTED:
return BG_KICAD
# Additional user columns
else:
return BG_USER | ae6a44c61807f513a679ccad0f4c39622efa768e | 3,649,263 |
def merge_hedge_positions(df, hedge):
"""
将一个表中的多条记录进行合并,然后对冲
:param self:
:param df:
:return:
"""
# 临时使用,主要是因为i1709.与i1709一类在分组时会出问题,i1709.是由api中查询得到
if df.empty:
return df
df['Symbol'] = df['InstrumentID']
# 合并
df = df.groupby(by=['Symbol', 'InstrumentID', 'HedgeFlag', 'Side'])[
'Position'].sum().to_frame().reset_index()
# print(df)
# 对冲
if hedge:
df['Net'] = df['Side'] * df['Position']
df = df.groupby(by=['Symbol', 'InstrumentID', 'HedgeFlag'])['Net'].sum().to_frame().reset_index()
df['Position'] = abs(df['Net'])
df['Side'] = df['Net'] / df['Position']
df = df[df['Position'] != 0]
df = df[['Symbol', 'InstrumentID', 'HedgeFlag', 'Side', 'Position']]
# print(df)
return df | 4bcaa8b160186c6c5e6e3382017d0db3ee9d6c6e | 3,649,264 |
import numpy
def BackwardSubTri(U,y):
"""
usage: x = BackwardSubTri(U,y)
Row-oriented backward substitution to solve the upper-triangular, 'tridiagonal'
linear system
U x = y
This function does not ensure that U has the correct nonzero structure. It does,
however, attempt to catch the case where U is singular.
Inputs:
U - square n-by-n matrix (assumed upper triangular and 'tridiagonal')
y - right-hand side vector (n-by-1)
Outputs:
x - solution vector (n-by-1)
"""
# check inputs
m, n = numpy.shape(U)
if (m != n):
raise ValueError("BackwardSubTri error: matrix must be square")
p = numpy.size(y)
if (p != n):
raise ValueError("BackwardSubTri error: right-hand side vector has incorrect dimensions")
if (numpy.min(numpy.abs(numpy.diag(U))) < 100*numpy.finfo(float).eps):
raise ValueError("BackwardSubTri error: matrix is [close to] singular")
# create output vector
x = y.copy()
# perform forward-subsitution algorithm
for i in range(n-1,-1,-1):
if (i<n-1):
x[i] -= U[i,i+1]*x[i+1]
x[i] /= U[i,i]
return x | 5b7c2c636eac0912aa26bc8a236f1c870b95c48b | 3,649,265 |
def discrete_model(parents, lookup_table):
"""
Create CausalAssignmentModel based on a lookup table.
Lookup_table maps inputs values to weigths of the output values
The actual output values are sampled from a discrete distribution
of integers with probability proportional to the weights.
Lookup_table for the form:
Dict[Tuple(input_vales): (output_weights)]
Arguments
---------
parents: list
variable names of parents
lookup_table: dict
lookup table
Returns
-------
model: CausalAssignmentModel
"""
assert len(parents) > 0
# create input/output mapping
inputs, weights = zip(*lookup_table.items())
output_length = len(weights[0])
assert all(len(w) == output_length for w in weights)
outputs = np.arange(output_length)
ps = [np.array(w) / sum(w) for w in weights]
def model(**kwargs):
n_samples = kwargs["n_samples"]
a = np.vstack([kwargs[p] for p in parents]).T
b = np.zeros(n_samples) * np.nan
for m, p in zip(inputs, ps):
b = np.where(
(a == m).all(axis=1),
np.random.choice(outputs, size=n_samples, p=p), b)
if np.isnan(b).any():
raise ValueError("It looks like an input was provided which doesn't have a lookup.")
return b
return CausalAssignmentModel(model, parents) | a0eee81439b5997b91941181b8c7978d7f3581c9 | 3,649,266 |
import base64
import json
import tempfile
import traceback
def retrieve(datafile, provider):
"""
Retrieve a file from the remote provider
:param datafile:
:param provider:
:return: the path to a temporary file containing the data, or None
"""
r = _connect(provider)
try:
data = base64.b64decode(json.loads(r.get(datafile.storage_key))['data'])
with tempfile.NamedTemporaryFile(delete=False) as tmpfile:
tmpfile.write(data)
tmpfilename = tmpfile.name
return tmpfilename
except:
print('Download failed: %s' % traceback.format_exc())
return None | fca0595df40b1743e5cdb73c8a20b0ddc6a2611f | 3,649,267 |
def edf_parse_message(EDFFILE):
"""Return message info."""
message = edf_get_event_data(EDFFILE).contents
time = message.sttime
message = string_at(byref(message.message[0]), message.message.contents.len + 1)[2:]
message = message.decode('UTF-8')
return (time, message) | 3d29db28b7d110e9fcdf6e8309c027cb4254c647 | 3,649,268 |
from typing import Dict
import logging
def read_abbrevs_and_add_to_db(abbrevs_path: str,
db: Connection) -> Dict[str, int]:
"""Add abbreviations from `abbrevs_path` to `idx` and `defns`."""
with open(abbrevs_path, 'rt') as ab:
abbrevs = read_abbrevs(ab)
abbrev_nid = add_abbrevs_to_db(abbrevs, db)
logging.info('Added %d abbreviations.', len(abbrevs))
return abbrev_nid | 6c937fd15352d8b7e3dcf607bbf2a5b66f105ffb | 3,649,269 |
def is_encrypted(input_file: str) -> bool:
"""Checks if the inputted file is encrypted using PyPDF4 library"""
with open(input_file, 'rb') as pdf_file:
pdf_reader = PdfFileReader(pdf_file, strict=False)
return pdf_reader.isEncrypted | be03d2843f35e21d7881c17f086f33ffbee5e8fa | 3,649,270 |
def get_parameter_by_name(device, name):
""" Find the given device's parameter that belongs to the given name """
for i in device.parameters:
if i.original_name == name:
return i
return | 9669262a9bcac8b4c054e07b2c04b780b5f84f87 | 3,649,271 |
from typing import Optional
import requests
def LogPrint(email: str, fileName: str, materialType: str, printWeight: float, printPurpose: str, msdNumber: Optional[str], paymentOwed: bool) -> bool:
"""Logs a print. Returns if the task was successful.
:param email: Email of the user exporting the print.
:param fileName: Name of the file that was exported.
:param materialType: Type of the material being used.
:param printWeight: Weight of the print being exported.
:param printPurpose: Purpose of the print being exported.
:param msdNumber: MSD Number of the print being exported.
:param paymentOwed: Whether the payment is owed or not.
"""
# Get the hashed id and return if there is none.
hashedId = getUniversityIdHash(email)
if hashedId is None:
return False
# Check if this is a Senior Design print.
msd = printPurpose == "Senior Design Project (Reimbursed)"
# Create the payload.
arguments = {
"hashedId": hashedId,
"fileName": fileName,
"material": materialType,
"weight": printWeight,
"purpose": printPurpose,
"billTo": msdNumber if msd else None,
"owed": paymentOwed,
}
# Send the request and return the result.
printResult = requests.post(getHost() + "/print/add", json=arguments).json()
return "status" in printResult.keys() and printResult["status"] == "success" | eb8c629d4eacdf24988cd6d33d4b4733bb90caac | 3,649,272 |
def is_requirement(line):
"""
Return True if the requirement line is a package requirement;
that is, it is not blank, a comment, or editable.
"""
# Remove whitespace at the start/end of the line
line = line.strip()
# Skip blank lines, comments, and editable installs
return not (
line == '' or
line.startswith('-r') or
line.startswith('#') or
line.startswith('-e') or
line.startswith('git+')
) | db30ff6bb2421d2b31939a20e708bf1c923a353e | 3,649,273 |
def read_pose_txt(pose_txt):
"""
Read the pose txt file and return a 4x4 rigid transformation.
"""
with open(pose_txt, "r") as f:
lines = f.readlines()
pose = np.zeros((4, 4))
for line_idx, line in enumerate(lines):
items = line.split(" ")
for i in range(4):
pose[line_idx, i] = float(items[i])
return pose | 250cc4c793a3bba948aeac2ca547c6680937a6e7 | 3,649,274 |
def get_futures(race_ids=list(range(1, 13000))):
"""Get Futures for all BikeReg race pages with given race_ids."""
session = FuturesSession(max_workers=8)
return [session.get(f'https://results.bikereg.com/race/{race_id}')
for race_id in race_ids if race_id not in BAD_IDS] | 1992c7b2fee93eecb75fd6e0c7a625181073609f | 3,649,275 |
def sum_of_proper_divisors(number: int):
"""
Let d(n) be defined as the sum of proper divisors of n
(numbers less than n which divide evenly into n).
:param number:
:return:
"""
divisors = []
for n in range(1, number):
if number % n == 0:
divisors.append(n)
return sum(divisors) | 9015dd3809f90d328b0b4a6b51f6fcb145f0241d | 3,649,276 |
def coddington_meridional(p, q, theta):
""" return radius of curvature """
f = p * q / (p + q)
R = 2 * f / np.sin(theta)
return R | ef4964f08af065b2da6cbe5b156e4e976406a879 | 3,649,277 |
def read_analysis_file(timestamp=None, filepath=None, data_dict=None,
file_id=None, ana_file=None, close_file=True, mode='r'):
"""
Creates a data_dict from an AnalysisResults file as generated by analysis_v3
:param timestamp: str with a measurement timestamp
:param filepath: (str) path to file
:param data_dict: dict where to store the file entries
:param file_id: suffix to the usual HDF measurement file found from giving
a measurement timestamp. Defaults to '_AnalysisResults,' the standard
suffix created by analysis_v3
:param ana_file: HDF file instance
:param close_file: whether to close the HDF file at the end
:param mode: str specifying the HDF read mode (if ana_file is None)
:return: the data dictionary
"""
if data_dict is None:
data_dict = {}
try:
if ana_file is None:
if filepath is None:
if file_id is None:
file_id = '_AnalysisResults'
folder = a_tools.get_folder(timestamp)
filepath = a_tools.measurement_filename(folder, file_id=file_id)
ana_file = h5py.File(filepath, mode)
read_from_hdf(data_dict, ana_file)
if close_file:
ana_file.close()
except Exception as e:
if close_file:
ana_file.close()
raise e
return data_dict | 5e0d1797f45f18665f3ae5eaa6bac987fe94f926 | 3,649,278 |
def get_player_macro_econ_df(rpl: sc2reader.resources.Replay,
pid: int) -> pd.DataFrame:
"""This function organises the records of a player's major
macroeconomic performance indicators.
The function uses a player's PlayerStatsEvents contained in a Replay
object to compose a DataFrame. In the DataFrame, each column points to
a particular indicator. Each row points to the records of all
indicators at a specific moment during the game.
*Arguments*
- rpl (sc2reader.resources.Replay)
Replay object generated with sc2reader containing a match's
data.
- pid (int)
A player's id number distinguishes them from the other
players in a match. It can be extracted from a Participant
object through the pid attribute.
*Returns*
- pd.DataFrame
This DataFrame contains all the time series that illustrate the
changes of each attribute during a match. Each column alludes
to an attribute, each row to a moment during the match.
"""
columns_names =[
'second',
'minerals_current',
'vespene_current',
'minerals_used_active_forces',
'vespene_used_active_forces',
'minerals_collection_rate',
'vespene_collection_rate',
'workers_active_count',
'minerals_used_in_progress',
'vespene_used_in_progress',
'resources_used_in_progress',
'minerals_used_current',
'vespene_used_current',
'resources_used_current',
'minerals_lost',
'vespene_lost',
'resources_lost',
'minerals_killed',
'vespene_killed',
'resources_killed',
'food_used',
'food_made'
]
# Generate a DataFrame with the columns listed above
pstatse_list = get_pstatse(rpl, pid)
pstatse_dicts_list = [event.__dict__ for event in pstatse_list]
pstatse_df = pd.DataFrame(pstatse_dicts_list, columns= columns_names)
# Complete the DataFrame with the real_time, unspent_rsrc columns and
# army_value.
# Also, eliminate possible duplicate last record.
return complete_pstatse_df(rpl, pstatse_df) | 4a0123ce4fe7f704f83c39a8c78e29c9347b1e1a | 3,649,279 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.