content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def _check_molecule_format(val):
"""If it seems to be zmatrix rather than xyz format we convert before returning"""
atoms = [x.strip() for x in val.split(";")]
if atoms is None or len(atoms) < 1: # pylint: disable=len-as-condition
raise QiskitNatureError("Molecule format error: " + val)
# An xyz format has 4 parts in each atom, if not then do zmatrix convert
# Allows dummy atoms, using symbol 'X' in zmatrix format for coord computation to xyz
parts = [x.strip() for x in atoms[0].split(" ")]
if len(parts) != 4:
try:
zmat = []
for atom in atoms:
parts = [x.strip() for x in atom.split(" ")]
z = [parts[0]]
for i in range(1, len(parts), 2):
z.append(int(parts[i]))
z.append(float(parts[i + 1]))
zmat.append(z)
xyz = z2xyz(zmat)
new_val = ""
for atm in xyz:
if atm[0].upper() == "X":
continue
if new_val:
new_val += "; "
new_val += f"{atm[0]} {atm[1]} {atm[2]} {atm[3]}"
return new_val
except Exception as exc:
raise QiskitNatureError("Failed to convert atom string: " + val) from exc
return val | d546251f02c6ee3bfe44256edff439ba4d3b4c31 | 2,418 |
def measure_area_perimeter(mask):
"""A function that takes either a segmented image or perimeter
image as input, and calculates the length of the perimeter of a lesion."""
# Measure area: the sum of all white pixels in the mask image
area = np.sum(mask)
# Measure perimeter: first find which pixels belong to the perimeter.
perimeter = measure.perimeter(mask)
return area, perimeter | f443b7208c8c452480f0f207153afc5aa1f11d41 | 2,419 |
from typing import Any
from unittest.mock import Mock
def mock_object(**params: Any) -> "Mock": # type: ignore # noqa
"""creates an object using params to set attributes
>>> option = mock_object(verbose=False, index=range(5))
>>> option.verbose
False
>>> option.index
[0, 1, 2, 3, 4]
"""
return type("Mock", (), params)() | 52140b52d29a424b3f16f0e26b03c19f4afbb0b4 | 2,421 |
def get_words(message):
"""Get the normalized list of words from a message string.
This function should split a message into words, normalize them, and return
the resulting list. For splitting, you should split on spaces. For
normalization, you should convert everything to lowercase.
Args:
message: A string containing an SMS message
Returns:
The list of normalized words from the message.
"""
words = message.strip().split()
norm_words = [word.lower() for word in words]
# apply stop words
nonstop_words = [word for word in norm_words if not word in stop_words]
# apply stemming
stem_words = [ps.stem(word) for word in nonstop_words]
return stem_words | dec592d3574da70c27368c4642f5fa47d23b5225 | 2,422 |
def hasPathSum(self, root, sum):
"""
:type root: TreeNode
:type sum: int
:rtype: bool
"""
if root is None:
return False
if sum - root.val == 0 and root.left is None and root.right is None:
return True
else:
return self.hasPathSum(root.left, sum - root.val) or self.hasPathSum(root.right, sum - root.val) | ffab5b8205aa9785c86ac365bd6e854319138627 | 2,424 |
def _(node: IntJoin, ctx: AnnotateContext) -> BoxType:
"""All references available on either side of the Join nodes are available."""
lt = box_type(node.over)
rt = box_type(node.joinee)
t = union(lt, rt)
node.typ = t
return t | c5d2c94f58c019399ebfcc431994ab339f317b0c | 2,425 |
import json
def detect_global_table_updates(record):
"""This will detect DDB Global Table updates that are not relevant to application data updates. These need to be
skipped over as they are pure noise.
:param record:
:return:
"""
# This only affects MODIFY events.
if record['eventName'] == 'MODIFY':
# Need to compare the old and new images to check for GT specific changes only (just pop off the GT fields)
old_image = remove_global_dynamo_specific_fields(record['dynamodb']['OldImage'])
new_image = remove_global_dynamo_specific_fields(record['dynamodb']['NewImage'])
if json.dumps(old_image, sort_keys=True) == json.dumps(new_image, sort_keys=True):
return True
return False | 43d9cd6558b0e935a4e195e80932104699564230 | 2,426 |
from typing import List
import warnings
def fix_telecined_fades(clip: vs.VideoNode, tff: bool | int | None = None,
thr: float = 2.2) -> vs.VideoNode:
"""
A filter that gives a mathematically perfect solution to fades made *after* telecining
(which made perfect IVTC impossible). This is an improved version of the Fix-Telecined-Fades plugin
that deals with overshoot/undershoot by adding a check.
Make sure to run this *after* IVTC/deinterlacing!
If the value surpases thr * original value, it will not affect any pixels in that frame
to avoid it damaging frames it shouldn't need to. This helps a lot with orphan fields as well,
which would otherwise create massive swings in values, sometimes messing up the fade fixing.
If you pass your own float clip, you'll want to make sure to properly dither it down after.
If you don't do this, you'll run into some serious issues!
Taken from this gist and modified by LightArrowsEXE.
<https://gist.github.com/blackpilling/bf22846bfaa870a57ad77925c3524eb1>
:param clip: Input clip
:param tff: Top-field-first. `False` sets it to Bottom-Field-First.
If None, get the field order from the _FieldBased prop.
:param thr: Threshold for when a field should be adjusted.
Default is 2.2, which appears to be a safe value that doesn't
cause it to do weird stuff with orphan fields.
:return: Clip with only fades fixed
"""
def _ftf(n: int, f: List[vs.VideoFrame]) -> vs.VideoNode:
avg = (get_prop(f[0], 'PlaneStatsAverage', float),
get_prop(f[1], 'PlaneStatsAverage', float))
if avg[0] != avg[1]:
mean = sum(avg) / 2
fixed = (sep[0].std.Expr(f"x {mean} {avg[0]} / dup {thr} <= swap 1 ? *"),
sep[1].std.Expr(f"x {mean} {avg[1]} / *"))
else:
fixed = sep # type: ignore
return core.std.Interleave(fixed).std.DoubleWeave()[::2]
# I want to catch this before it reaches SeperateFields and give newer users a more useful error
if get_prop(clip.get_frame(0), '_FieldBased', int) == 0 and tff is None:
raise vs.Error("fix_telecined_fades: 'You must set `tff` for this clip!'")
elif isinstance(tff, (bool, int)):
clip = clip.std.SetFieldBased(int(tff) + 1)
clip32 = depth(clip, 32).std.Limiter()
bits = get_depth(clip)
sep = clip32.std.SeparateFields().std.PlaneStats()
sep = sep[::2], sep[1::2] # type: ignore # I know this isn't good, but frameeval breaks otherwise
ftf = core.std.FrameEval(clip32, _ftf, sep) # and I don't know how or why
if bits == 32:
warnings.warn("fix_telecined_fades: 'Make sure to dither down BEFORE setting the FieldBased prop to 0! "
"Not doing this MAY return some of the combing!'")
else:
ftf = depth(ftf, bits, dither_type=Dither.ERROR_DIFFUSION)
ftf = ftf.std.SetFieldBased(0)
return ftf | d28b78bdb65ffc0c1d354fbbde25391d7ce389b1 | 2,427 |
from typing import Union
from typing import Optional
from io import StringIO
def compare_rdf(expected: Union[Graph, str], actual: Union[Graph, str], fmt: Optional[str] = "turtle") -> Optional[str]:
"""
Compare expected to actual, returning a string if there is a difference
:param expected: expected RDF. Can be Graph, file name, uri or text
:param actual: actual RDF. Can be Graph, file name, uri or text
:param fmt: RDF format
:return: None if they match else summary of difference
"""
def rem_metadata(g: Graph) -> IsomorphicGraph:
# Remove list declarations from target
for s in g.subjects(RDF.type, RDF.List):
g.remove((s, RDF.type, RDF.List))
for t in g:
if t[1] in (META.generation_date, META.source_file_date, META.source_file_size,
TYPE.generation_date, TYPE.source_file_date, TYPE.source_file_size):
g.remove(t)
g_iso = to_isomorphic(g)
return g_iso
# Bypass compare if settings have turned it off
if SKIP_RDF_COMPARE:
print(f"tests/utils/compare_rdf.py: {SKIP_RDF_COMPARE_REASON}")
return None
expected_graph = to_graph(expected, fmt)
expected_isomorphic = rem_metadata(expected_graph)
actual_graph = to_graph(actual, fmt)
actual_isomorphic = rem_metadata(actual_graph)
# Graph compare takes a Looong time
in_both, in_old, in_new = graph_diff(expected_isomorphic, actual_isomorphic)
# if old_iso != new_iso:
# in_both, in_old, in_new = graph_diff(old_iso, new_iso)
old_len = len(list(in_old))
new_len = len(list(in_new))
if old_len or new_len:
txt = StringIO()
with redirect_stdout(txt):
print("----- Missing Triples -----")
if old_len:
print_triples(in_old)
print("----- Added Triples -----")
if new_len:
print_triples(in_new)
return txt.getvalue()
return None | f2e128e1c43c5c207e99c30bb32c14f4c4b71798 | 2,429 |
def start_engine(engine_name, tk, context):
"""
Creates an engine and makes it the current engine.
Returns the newly created engine object. Example::
>>> import sgtk
>>> tk = sgtk.sgtk_from_path("/studio/project_root")
>>> ctx = tk.context_empty()
>>> engine = sgtk.platform.start_engine('tk-maya', tk, ctx)
>>> engine
<Sgtk Engine 0x10451b690: tk-maya, env: shotgun>
:param engine_name: Name of the engine to launch, e.g. tk-maya
:param tk: :class:`~sgtk.Sgtk` instance to associate the engine with
:param context: :class:`~sgtk.Context` object of the context to launch the engine for.
:returns: :class:`Engine` instance
:raises: :class:`TankEngineInitError` if an engine could not be started
for the passed context.
"""
return _start_engine(engine_name, tk, None, context) | bb755d359f5a950aa182545803de0a1ca4d6aaee | 2,430 |
def parse_vaulttext(b_vaulttext):
"""Parse the vaulttext.
Args:
b_vaulttext: A byte str containing the vaulttext (ciphertext, salt,
crypted_hmac).
Returns:
A tuple of byte str of the ciphertext suitable for passing to a Cipher
class's decrypt() function, a byte str of the salt, and a byte str of the
crypted_hmac.
Raises:
AnsibleVaultFormatError: If the vaulttext format is invalid.
"""
# SPLIT SALT, DIGEST, AND DATA
try:
return _parse_vaulttext(b_vaulttext)
except AnsibleVaultFormatError:
raise
except Exception as exc:
raise AnsibleVaultFormatError(f'Vault vaulttext format error: {exc}') | 1b1b6e2aaf1893401d93f750248892ffebae26a6 | 2,431 |
import sqlite3
def does_column_exist_in_db(db, table_name, col_name):
"""Checks if a specific col exists"""
col_name = col_name.lower()
query = f"pragma table_info('{table_name}');"
all_rows = []
try:
db.row_factory = sqlite3.Row # For fetching columns by name
cursor = db.cursor()
cursor.execute(query)
all_rows = cursor.fetchall()
except sqlite3.Error as ex:
print(f'Query error, query={query} Error={ex}')
for row in all_rows:
if row['name'].lower() == col_name:
return True
return False | 90abc20c9643e93641e37c0e94fd504cbcf09928 | 2,432 |
import hmac
def make_secure_val(val):
"""Takes hashed pw and adds salt; this will be the cookie"""
return '%s|%s' % (val, hmac.new(secret, val).hexdigest()) | 6b29f5f3a447bca73ac02a1d7843bdbb6d982db9 | 2,433 |
import random
def get_ad_contents(queryset):
"""
Contents의 queryset을 받아서 preview video가 존재하는 contents를 랜덤으로 1개 리턴
:param queryset: Contents queryset
:return: contents object
"""
contents_list = queryset.filter(preview_video__isnull=False)
max_int = contents_list.count() - 1
if max_int < 0:
return
while True:
idx = random.randint(0, max_int)
contents = contents_list[idx]
if contents:
return contents | 233d1e5f736a9cff38731dd292d431f098bee17a | 2,434 |
def Image_CanRead(*args, **kwargs):
"""
Image_CanRead(String filename) -> bool
Returns True if the image handlers can read this file.
"""
return _core_.Image_CanRead(*args, **kwargs) | f82e31860480611baf6a5f920515466c0d37acab | 2,435 |
def flatten(lst):
"""Flatten a list."""
return [y for l in lst for y in flatten(l)] if isinstance(lst, (list, np.ndarray)) else [lst] | 0aed241d06725dee9a99512ab2ea5c3f6c02008d | 2,436 |
def calc_mean_score(movies):
"""Helper method to calculate mean of list of Movie namedtuples,
round the mean to 1 decimal place"""
ratings = [m.score for m in movies]
mean = sum(ratings) / max(1, len(ratings))
return round(mean, 1) | 6f837ff251e6221227ba4fa7da752312437da90f | 2,437 |
def srun(hosts, cmd, srun_params=None):
"""Run srun cmd on slurm partition.
Args:
hosts (str): hosts to allocate
cmd (str): cmdline to execute
srun_params(dict): additional params for srun
Returns:
CmdResult: object containing the result (exit status, stdout, etc.) of
the srun command
"""
cmd = srun_str(hosts, cmd, srun_params)
try:
result = run_command(cmd, timeout=30)
except DaosTestError as error:
result = None
raise SlurmFailed("srun failed : {}".format(error))
return result | 2e339d90c2de4b1ae81f7e4671c1f726a725a68c | 2,438 |
def COSclustering(key, emb, oracle_num_speakers=None, max_num_speaker=8, MIN_SAMPLES=6):
"""
input:
key (str): speaker uniq name
emb (np array): speaker embedding
oracle_num_speaker (int or None): oracle number of speakers if known else None
max_num_speakers (int): maximum number of clusters to consider for each session
MIN_SAMPLES (int): minimum number of samples required for NME clustering, this avoids
zero p_neighbour_lists. Default of 6 is selected since (1/rp_threshold) >= 4.
output:
Y (List[int]): speaker labels
"""
est_num_spks_out_list = []
mat = get_eigen_matrix(emb)
if oracle_num_speakers:
max_num_speaker = oracle_num_speakers
X_conn_spkcount, rp_thres_spkcount, est_num_of_spk, lambdas, p_neigh = NMEanalysis(mat, max_num_speaker)
if emb.shape[0] > MIN_SAMPLES:
X_conn_from_dist = get_X_conn_from_dist(mat, p_neigh)
else:
X_conn_from_dist = mat
if oracle_num_speakers:
est_num_of_spk = oracle_num_speakers
est_num_spks_out_list.append([key, str(est_num_of_spk)])
# Perform spectral clustering
spectral_model = sklearn_SpectralClustering(
affinity='precomputed',
eigen_solver='amg',
random_state=0,
n_jobs=-1,
n_clusters=est_num_of_spk,
eigen_tol=1e-10,
)
Y = spectral_model.fit_predict(X_conn_from_dist)
return Y | a3b967251683da1e29004a937625d7006a0519ed | 2,439 |
import torch
def gauss_distance(sample_set, query_set, unlabeled_set=None):
""" (experimental) function to try different approaches to model prototypes as gaussians
Args:
sample_set: features extracted from the sample set
query_set: features extracted from the query set
query_set: features extracted from the unlabeled set
"""
b, n, k, c = sample_set.size()
sample_set_std = sample_set.std(2).view(b, 1, n, c)
sample_set_mean = sample_set.mean(2).view(b, 1, n, c)
query_set = query_set.view(b, n * k, 1, c)
d = (query_set - sample_set_mean) / sample_set_std
return -torch.sum(d ** 2, 3) / np.sqrt(c) | b7583988d79d70bda9c3ab6ee0690042645ed714 | 2,440 |
def make_mps_left(mps,truncate_mbd=1e100,split_s=False):
"""
Put an mps into left canonical form
Args:
mps : list of mps tensors
The MPS stored as a list of mps tensors
Kwargs:
truncate_mbd : int
The maximum bond dimension to which the
mps should be truncated
Returns:
mps : list of mps tensors
The resulting left-canonicalized MPS
"""
# Figure out size of mps
N = len(mps)
# Loop backwards
for site in range(N-1):
#tmpprint('\t\t\t\tSite: {}'.format(site))
mps = move_gauge_right(mps,site,
truncate_mbd=truncate_mbd,
return_ent=False,
return_wgt=False,
split_s=split_s)
# Remove empty indices at the ends of the mps
mps = remove_empty_ends(mps)
# Return results
return mps | f2de408b82877050bf24a822c54b4520dad40f2e | 2,441 |
def word_after(line, word):
"""'a black sheep', 'black' -> 'sheep'"""
return line.split(word, 1)[-1].split(' ', 1)[0] | cfa16244d00af8556d7955b7edeb90bac0a213ba | 2,442 |
def domain_in_domain(subdomain, domain):
"""Returns try if subdomain is a sub-domain of domain.
subdomain
A *reversed* list of strings returned by :func:`split_domain`
domain
A *reversed* list of strings as returned by :func:`split_domain`
For example::
>>> domain_in_domain(['com', 'example'],
... ['com', 'example', 'www'])
True"""
if len(subdomain) <= len(domain):
i = 0
for d in subdomain:
if d != domain[i]:
return False
i += 1
return True
else:
return False | cb1b3a3f899f13c13d4168c88ca5b9d4ee345e47 | 2,443 |
def polygon_from_boundary(xs, ys, xmin=0.0, xmax=1.0, ymin=0.0, ymax=1.0, xtol=0.0):
"""Polygon within box left of boundary given by (xs, ys)
xs, ys: coordinates of boundary (ys ordered increasingly)
"""
xs = np.asarray(xs)
ys = np.asarray(ys)
xs[xs > xmax-xtol] = xmax
xs[xs < xmin+xtol] = xmin
index = -1
while xs[index] == xmin:
index -= 1
if index < -2:
xs, ys = xs[:index+2], ys[:index+2]
vertices = zip(xs, ys)
if len(xs) == 1:
vertices.append((xs[0], ymax))
vertices.append((xmin, ymax))
elif xs[-1] >= xmax-xtol:
if xs[-1] < xmax:
vertices.append((xmax, ys[-1]))
if ys[-1] < ymax:
vertices.append((xmax, ymax))
vertices.append((xmin, ymax))
elif xs[-1] > xmin:
vertices.append((xmin, ys[-1]))
if (xs[0] > xmin) or (ys[0] > ymin):
vertices.append((xmin, ymin))
if ys[0] > ymin:
vertices.append((xs[0], ymin))
vertices = np.asarray(vertices)
return vertices | 4e75cd8b11038432224836b427658226d4c820d7 | 2,444 |
def is_degenerate(op, tol=1e-12):
"""Check if operator has any degenerate eigenvalues, determined relative
to mean spacing of all eigenvalues.
Parameters
----------
op : operator or 1d-array
Operator or assumed eigenvalues to check degeneracy for.
tol : float
How much closer than evenly spaced the eigenvalue gap has to be
to count as degenerate.
Returns
-------
n_dgen : int
Number of degenerate eigenvalues.
"""
op = np.asarray(op)
if op.ndim != 1:
evals = eigvalsh(op)
else:
evals = op
l_gaps = evals[1:] - evals[:-1]
l_tol = tol * (evals[-1] - evals[0]) / op.shape[0]
return np.count_nonzero(abs(l_gaps) < l_tol) | 2d2672c711c1e4320de151484cdeb7463cf0abd8 | 2,445 |
def _get_skip_props(mo, include_operational=False, version_filter=True):
"""
Internal function to skip mo property if not to be considered for sync.
"""
skip_props = []
for prop in mo.prop_meta:
mo_property_meta = mo.prop_meta[prop]
if mo_property_meta is None:
continue
# not include operational property
if not include_operational:
if mo_property_meta.access in (MoPropertyMeta.INTERNAL,
MoPropertyMeta.READ_ONLY):
skip_props.append(prop)
# checks if property is part of current or earlier ucsm schema
if version_filter:
version = mo.get_handle().version
if version is None or version < mo_property_meta.version or \
mo_property_meta.access == MoPropertyMeta.INTERNAL:
skip_props.append(prop)
return skip_props | dd24798c84f47a954eb324092c3cfb46c23a062e | 2,446 |
def generate_split_problem():
"""Generates a 'Split' problem configuration.
Returns (environment, robot, start configuration, goal configuration)."""
walls = [rectangle(0, 400, 0, 10), rectangle(0, 400, 290, 300),
rectangle(0, 10, 0, 300), rectangle(390, 400, 0, 300),
rectangle(180, 220, 100, 200)]
split_environment = Environment(walls)
robot_geometry = Polygon([(-15, -15), (-15, 15), (15, 15), (15, -15)])
robot = Robot(robot_geometry)
start = np.array([50, 150, 0])
goal = np.array([350, 150, 0])
return split_environment, robot, start, goal | a2a3ab0495dcf5a109ed2eb2e92bb0db424edd53 | 2,447 |
def problem_generator(difficulty=3):
"""
This function generates mathematical expressions as string. It is not very
smart and will generate expressions that have answers the lex function
cannot accept.
"""
operators = ["/", "*", "+", "-"]
numeric_lim = difficulty * 7
output = ""
for i in range(difficulty + 3):
if i % 2 == 0:
output += str(randint(1, numeric_lim)) + " "
else:
output += operators[randint(0, len(operators) - 1)] + " "
if output[len(output) - 2] in operators:
output += str(randint(1, numeric_lim))
return output | f1859784a065a22adb83c33d46623fcafa470096 | 2,448 |
from typing import Tuple
import math
def logical_factory_dimensions(params: Parameters
) -> Tuple[int, int, float]:
"""Determine the width, height, depth of the magic state factory."""
if params.use_t_t_distillation:
return 12*2, 8*2, 6 # Four T2 factories
l1_distance = params.l1_distance
l2_distance = params.code_distance
t1_height = 4 * l1_distance / l2_distance
t1_width = 8 * l1_distance / l2_distance
t1_depth = 5.75 * l1_distance / l2_distance
ccz_depth = 5
ccz_height = 6
ccz_width = 3
storage_width = 2 * l1_distance / l2_distance
ccz_rate = 1 / ccz_depth
t1_rate = 1 / t1_depth
t1_factories = int(math.ceil((ccz_rate * 8) / t1_rate))
t1_factory_column_height = t1_height * math.ceil(t1_factories / 2)
width = int(math.ceil(t1_width * 2 + ccz_width + storage_width))
height = int(math.ceil(max(ccz_height, t1_factory_column_height)))
depth = max(ccz_depth, t1_depth)
return width, height, depth | 95846f5f58e5c342ca81b3f51a9bacfb31bf777a | 2,449 |
def data():
"""
Data providing function:
This function is separated from create_model() so that hyperopt
won't reload data for each evaluation run.
"""
d_file = 'data/zinc_100k.h5'
data_train, data_test, props_train, props_test, tokens = utils.load_dataset(d_file, "TRANSFORMER", True)
x_train = [data_train, data_train, props_train]
y_train = None
x_test = [data_test, data_test, props_test]
y_test = None
return x_train, y_train, x_test, y_test | 354ace6fdfc9f8f9ec0702ea8a0a03853b8d7f49 | 2,451 |
def mongos_program(logger, job_num, executable=None, process_kwargs=None, mongos_options=None): # pylint: disable=too-many-arguments
"""Return a Process instance that starts a mongos with arguments constructed from 'kwargs'."""
args = [executable]
mongos_options = mongos_options.copy()
if "port" not in mongos_options:
mongos_options["port"] = network.PortAllocator.next_fixture_port(job_num)
suite_set_parameters = mongos_options.get("set_parameters", {})
_apply_set_parameters(args, suite_set_parameters)
mongos_options.pop("set_parameters")
# Apply the rest of the command line arguments.
_apply_kwargs(args, mongos_options)
_set_keyfile_permissions(mongos_options)
process_kwargs = make_historic(utils.default_if_none(process_kwargs, {}))
return make_process(logger, args, **process_kwargs), mongos_options["port"] | a342db697d39e48ea0261e5ddfd89bdd99b6dced | 2,453 |
def markerBeings():
"""标记众生区块
Content-Type: application/json
{
"token":"",
"block_id":""
}
返回 json
{
"is_success":bool,
"data":
"""
try:
info = request.get_json()
# 验证token
token = info["token"]
if not auth.verifyToken(token):
http_message = HttpMessage(is_success=False, data="Token无效")
return http_message.getJson()
# 获取列表
beings_block_id = info["block_id"]
if blockOfGarbage.addGarbageBlockQueue(beings_block_id):
http_message = HttpMessage(is_success=True, data="标记成功")
return http_message.getJson()
else:
http_message = HttpMessage(is_success=False, data="该区块已经被标记")
return http_message.getJson()
except Exception as err:
print(err)
http_message = HttpMessage(is_success=False, data="参数错误")
return http_message.getJson() | 20b73bec5f6c27e365a90fe466e34445592f2bd3 | 2,454 |
def get_charges_with_openff(mol):
"""Starting from a openff molecule returns atomic charges
If the charges are already defined will return them without
change
I not will calculate am1bcc charges
Parameters
------------
mol : openff.toolkit.topology.Molecule
Examples
---------
from openff.toolkit.topology import Molecule
mol = Molecule.from_file(SOME_FILE)
# mol = Molecule.from_smiles(SMILES)
get_charges_with_openff(mol)
Returns
------------
np.array(float)
charges in atomic units (elementary charge)
Notes
----------
Some extra conformers may be generated because of
https://github.com/openforcefield/openff-toolkit/issues/492
"""
if (mol.partial_charges is None) or (np.allclose(
mol.partial_charges / unit.elementary_charge,
np.zeros([mol.n_particles]))):
# NOTE: generate_conformers seems to be required for some molecules
# https://github.com/openforcefield/openff-toolkit/issues/492
mol.generate_conformers(n_conformers=10)
mol.compute_partial_charges_am1bcc()
return mol.partial_charges.value_in_unit(unit.elementary_charge) | fa539ef60fda28a4983d632830f3a8ea813f5486 | 2,455 |
def parse_mdout(file):
"""
Return energies from an AMBER ``mdout` file.
Parameters
----------
file : os.PathLike
Name of Amber output file
Returns
-------
energies : dict
A dictionary containing VDW, electrostatic, bond, angle, dihedral, V14, E14, and total energy.
"""
vdw, ele, bnd, ang, dih, v14, e14 = [], [], [], [], [], [], []
restraint = []
with open(file, "r") as f:
for line in f.readlines():
words = line.rstrip().split()
if len(words) > 1:
if "BOND" in words[0]:
bnd.append(float(words[2]))
ang.append(float(words[5]))
dih.append(float(words[8]))
if "VDWAALS" in words[0]:
vdw.append(float(words[2]))
ele.append(float(words[5]))
if "1-4" in words[0]:
v14.append(float(words[3]))
e14.append(float(words[7]))
restraint.append(float(words[10]))
energies = {
"Bond": bnd,
"Angle": ang,
"Dihedral": dih,
"V14": v14,
"E14": e14,
"VDW": vdw,
"Ele": ele,
"Restraint": restraint,
"Total": [sum(x) for x in zip(bnd, ang, dih, v14, e14, vdw, ele)],
}
return energies | 9310b0220d4b96b65e3484adf49edebda039dfad | 2,456 |
from propy.AAComposition import GetSpectrumDict
from typing import Optional
from typing import List
def aa_spectrum(
G: nx.Graph, aggregation_type: Optional[List[str]] = None
) -> nx.Graph:
"""
Calculate the spectrum descriptors of 3-mers for a given protein. Contains the composition values of 8000 3-mers
:param G: Protein Graph to featurise
:type G: nx.Graph
:param aggregation_type: Aggregation types to use over chains
:type aggregation_type: List[Optional[str]]
:return: Protein Graph with aa_spectrum feature added. G.graph["aa_spectrum_{chain | aggregation_type}"]
:rtype: nx.Graph
"""
func = GetSpectrumDict
feature_name = "aa_spectrum"
return compute_propy_feature(
G,
func=func,
feature_name=feature_name,
aggregation_type=aggregation_type,
) | dfd657452fda009c4420566f1456dc4bd32271ac | 2,457 |
import hashlib
def get_click_data(api, campaign_id):
"""Return a list of all clicks for a given campaign."""
rawEvents = api.campaigns.get(campaign_id).as_dict()["timeline"]
clicks = list() # Holds list of all users that clicked.
for rawEvent in rawEvents:
if rawEvent["message"] == "Clicked Link":
click = dict()
# Builds out click document.
click["user"] = hashlib.sha256(
rawEvent["email"].encode("utf-8")
).hexdigest()
click["source_ip"] = rawEvent["details"]["browser"]["address"]
click["time"] = rawEvent["time"]
click["application"] = get_application(rawEvent)
clicks.append(click)
return clicks | 641836d73b2c5b2180a98ffc61d0382be74d2618 | 2,458 |
def multi_label_column_to_binary_columns(data_frame: pd.DataFrame, column: str):
"""
assuming that the column contains array objects,
returns a new dataframe with binary columns (True/False)
indicating presence of each distinct array element.
:data_frame: the pandas DataFrame
:column: the column with array values
:return: a new DataFrame with binary columns
"""
label_unique_values = data_frame[column].str.replace(
"'", '').str.split(',').explode().to_frame()
drop_identical_values = label_unique_values[column].drop_duplicates(
keep="first").tolist()
multi_label_data_frame = pd.concat([data_frame,
pd.crosstab(label_unique_values.index,
label_unique_values[column])[drop_identical_values]], axis=1)
return multi_label_data_frame | ab626530181740fc941e8efbbaf091bc06f0a0d8 | 2,459 |
def _GetBuilderPlatforms(builders, waterfall):
"""Get a list of PerfBuilder objects for the given builders or waterfall.
Otherwise, just return all platforms.
"""
if builders:
return {b for b in bot_platforms.ALL_PLATFORMS if b.name in
builders}
elif waterfall == 'perf':
return bot_platforms.OFFICIAL_PLATFORMS
elif waterfall == 'perf-fyi':
return bot_platforms.FYI_PLATFORMS
else:
return bot_platforms.ALL_PLATFORMS | f6d7e636bcbd941b1dde8949c68be295e0aef227 | 2,462 |
def meeting_guide(context):
"""
Display the ReactJS drive Meeting Guide list.
"""
settings = get_meeting_guide_settings()
json_meeting_guide_settings = json_dumps(settings)
return {
"meeting_guide_settings": json_meeting_guide_settings,
"mapbox_key": settings["map"]["key"],
"timezone": settings["timezone"],
} | 46d8d20fcb2bd4dacd45a510f51eaea292da0da6 | 2,463 |
import multiprocessing
def generate_input_fn(file_path, shuffle, batch_size, num_epochs):
"""Generates a data input function.
Args:
file_path: Path to the data.
shuffle: Boolean flag specifying if data should be shuffled.
batch_size: Number of records to be read at a time.
num_epochs: Number of times to go through all of the records.
Returns:
A function useed by `Estimator` to read data.
"""
def _input_fn():
"""Returns features and target from input data.
Defines the input dataset, specifies how to read the data, and reads it.
Returns:
A tuple os a dictionary containing the features and the target.
"""
num_threads = multiprocessing.cpu_count()
dataset = tf.data.TextLineDataset(filenames=[file_path])
dataset = dataset.skip(1)
dataset = dataset.map(lambda x: parse_csv(
tf.expand_dims(x, -1)), num_parallel_calls=num_threads)
dataset = dataset.map(get_features_target_tuple,
num_parallel_calls=num_threads)
if shuffle:
dataset = dataset.shuffle(SHUFFLE_BUFFER_SIZE)
dataset = dataset.batch(batch_size)
dataset = dataset.repeat(num_epochs)
dataset = dataset.prefetch(1)
iterator = dataset.make_one_shot_iterator()
features, target = iterator.get_next()
return features, target
return _input_fn | abf9dd66000eca392344f9663fa8418b9e596098 | 2,464 |
import numpy
def amp_phase_to_complex(lookup_table):
"""
This constructs the function to convert from AMP8I_PHS8I format data to complex64 data.
Parameters
----------
lookup_table : numpy.ndarray
Returns
-------
callable
"""
_validate_lookup(lookup_table)
def converter(data):
if not isinstance(data, numpy.ndarray):
raise ValueError('requires a numpy.ndarray, got {}'.format(type(data)))
if data.dtype.name != 'uint8':
raise ValueError('requires a numpy.ndarray of uint8 dtype, got {}'.format(data.dtype.name))
if len(data.shape) == 3:
raise ValueError('Requires a three-dimensional numpy.ndarray (with band '
'in the last dimension), got shape {}'.format(data.shape))
out = numpy.zeros((data.shape[0], data.shape[1], data.shape[2]/2), dtype=numpy.complex64)
amp = lookup_table[data[:, :, 0::2]]
theta = data[:, :, 1::2]*(2*numpy.pi/256)
out.real = amp*numpy.cos(theta)
out.imag = amp*numpy.sin(theta)
return out
return converter | dea38027654a5a2b6ab974943dbdc57b36835a8e | 2,465 |
from operator import concat
def combine_aqs_cmaq(model, obs):
"""Short summary.
Parameters
----------
model : type
Description of parameter `model`.
obs : type
Description of parameter `obs`.
Returns
-------
type
Description of returned object.
"""
g = obs.df.groupby('Species')
comparelist = sort(obs.df.Species.unique())
dfs = []
for i in comparelist:
if (i == 'OZONE'): # & ('O3' in model.keys):
print('Interpolating Ozone:')
df = g.get_group(i)
fac = epa_util.check_cmaq_units(df, param='O3', aqs_param=i)
print(fac)
cmaq = model.get_var(lay=0, param='O3').compute() * fac
df = interpo.interp_to_obs(cmaq, df, model.latitude.values, model.longitude.values,
radius=model.dset.XCELL)
# df.Obs, df.CMAQ = df.Obs, df.CMAQ
df.Units = 'PPB'
dfs.append(df)
elif i == 'PM2.5':
if ('PM25_TOT' in model.keys) | ('ASO4J' in model.keys):
print('Interpolating PM2.5:')
df = g.get_group(i)
fac = epa_util.check_cmaq_units(df, param='PM25', aqs_param=i)
cmaq = model.get_var(lay=0, param='PM25').compute() * fac
df = interpo.interp_to_obs(cmaq, df, model.latitude.values, model.longitude.values,
radius=model.dset.XCELL)
dfs.append(df)
elif i == 'CO':
if 'CO' in model.keys:
print('Interpolating CO:')
df = g.get_group(i)
fac = epa_util.check_cmaq_units(df, param='CO', aqs_param=i)
cmaq = model.get_var(lay=0, param='CO').compute() * fac
df = interpo.interp_to_obs(cmaq, df, model.latitude.values, model.longitude.values,
radius=model.dset.XCELL)
dfs.append(df)
elif i == 'NOY':
if 'NOY' in model.keys:
print('Interpolating NOY:')
df = g.get_group(i)
fac = epa_util.check_cmaq_units(df, param='NOY', aqs_param=i)
cmaq = model.get_var(lay=0, param='NOY').compute() * fac
df = interpo.interp_to_obs(cmaq, df, model.latitude.values, model.longitude.values,
radius=model.dset.XCELL)
dfs.append(df)
elif i == 'SO2':
if 'SO2' in model.keys:
print('Interpolating SO2')
df = g.get_group(i)
fac = epa_util.check_cmaq_units(df, param='SO2', aqs_param=i)
cmaq = model.get_var(lay=0, param='SO2').compute() * fac
df = interpo.interp_to_obs(cmaq, df, model.latitude.values, model.longitude.values,
radius=model.dset.XCELL)
dfs.append(df)
elif i == 'NOX':
if ('NO' in model.keys) | ('NO2' in model.keys):
print('Interpolating NOX:')
df = g.get_group(i)
fac = epa_util.check_cmaq_units(df, param='NOX', aqs_param=i)
cmaq = model.get_var(lay=0, param='NOX').compute() * fac
df = interpo.interp_to_obs(cmaq, df, model.latitude.values, model.longitude.values,
radius=model.dset.XCELL)
dfs.append(df)
elif i == 'NO':
if ('NO' in model.keys):
print('Interpolating NO:')
df = g.get_group(i)
fac = epa_util.check_cmaq_units(df, param='NO', aqs_param=i)
cmaq = model.get_var(lay=0, param='NO').compute() * fac
df = interpo.interp_to_obs(cmaq, df, model.latitude.values, model.longitude.values,
radius=model.dset.XCELL)
dfs.append(df)
elif i == 'NO2':
if ('NO2' in model.keys):
print('Interpolating NO2:')
df = g.get_group(i)
fac = epa_util.check_cmaq_units(df, param='NO2', aqs_param=i)
cmaq = model.get_var(lay=0, param='NO2').compute() * fac
df = interpo.interp_to_obs(cmaq, df, model.latitude.values, model.longitude.values,
radius=model.dset.XCELL)
dfs.append(df)
elif i == 'SO4f':
if ('PM25_SO4' in model.keys) | ('ASO4J' in model.keys) | ('ASO4I' in model.keys):
print('Interpolating PSO4:')
df = g.get_group(i)
fac = epa_util.check_cmaq_units(df, param='SO4f', aqs_param=i)
cmaq = model.get_var(lay=0, param='SO4f').compute() * fac
df = interpo.interp_to_obs(cmaq, df, model.latitude.values, model.longitude.values,
radius=model.dset.XCELL)
dfs.append(df)
elif i == 'PM10':
if ('PM_TOTAL' in model.keys) or ('ASO4K' in model.keys):
print('Interpolating PM10:')
df = g.get_group(i)
fac = epa_util.check_cmaq_units(df, param='PM10', aqs_param=i)
cmaq = model.get_var(lay=0, param='PM10').compute() * fac
df = interpo.interp_to_obs(cmaq, df, model.latitude.values, model.longitude.values,
radius=model.dset.XCELL)
dfs.append(df)
elif i == 'NO3f':
if ('PM25_NO3' in model.keys) | ('ANO3J' in model.keys) | ('ANO3I' in model.keys):
print('Interpolating PNO3:')
df = g.get_group(i)
fac = epa_util.check_cmaq_units(df, param='NO3f', aqs_param=i)
cmaq = model.get_var(lay=0, param='NO3F').compute() * fac
df = interpo.interp_to_obs(cmaq, df, model.latitude.values, model.longitude.values,
radius=model.dset.XCELL)
dfs.append(df)
elif i == 'ECf':
if ('PM25_EC' in model.keys) | ('AECI' in model.keys) | ('AECJ' in model.keys):
print('Interpolating PEC:')
df = g.get_group(i)
fac = epa_util.check_cmaq_units(df, param='ECf', aqs_param=i)
cmaq = model.get_var(lay=0, param='ECf').compute() * fac
df = interpo.interp_to_obs(cmaq, df, model.latitude.values, model.longitude.values,
radius=model.dset.XCELL)
dfs.append(df)
elif i == 'OCf':
if ('APOCJ' in model.keys):
print('Interpolating OCf:')
df = g.get_group(i)
fac = epa_util.check_cmaq_units(df, param='OCf', improve_param=i)
cmaqvar = model.get_var(lay=0, param='OC').compute() * fac
df = interpo.interp_to_obs(cmaqvar, df, model.latitude.values, model.longitude.values,
radius=model.dset.XCELL)
dfs.append(df)
elif i == 'ETHANE':
if ('ETHA' in model.keys):
print('Interpolating Ethane:')
df = g.get_group(i)
fac = epa_util.check_cmaq_units(df, param='ETHA', aqs_param=i)
cmaq = model.get_var(lay=0, param='ETHA').compute() * fac
df = interpo.interp_to_obs(cmaq, df, model.latitude.values, model.longitude.values,
radius=model.dset.XCELL)
dfs.append(df)
elif i == 'BENZENE':
if ('BENZENE' in model.keys):
print('Interpolating BENZENE:')
df = g.get_group(i)
fac = epa_util.check_cmaq_units(df, param='BENZENE', aqs_param=i)
cmaq = model.get_var(lay=0, param='BENZENE').compute() * fac
df = interpo.interp_to_obs(cmaq, df, model.latitude.values, model.longitude.values,
radius=model.dset.XCELL)
dfs.append(df)
elif i == 'TOLUENE':
if ('TOL' in model.keys):
print('Interpolating Toluene:')
df = g.get_group(i)
fac = epa_util.check_cmaq_units(df, param='TOL', aqs_param=i)
cmaq = model.get_var(lay=0, param='TOL').compute() * fac
df = interpo.interp_to_obs(cmaq, df, model.latitude.values, model.longitude.values,
radius=model.dset.XCELL)
dfs.append(df)
elif i == 'ISOPRENE':
if ('ISOP' in model.keys):
print('Interpolating Isoprene:')
df = g.get_group(i)
fac = epa_util.check_cmaq_units(df, param='ISOP', aqs_param=i)
cmaq = model.get_var(lay=0, param='ISOP').compute() * fac
df = interpo.interp_to_obs(cmaq, df, model.latitude.values, model.longitude.values,
radius=model.dset.XCELL)
dfs.append(df)
elif i == 'O-XYLENE':
if ('XYL' in model.keys):
print('Interpolating Xylene')
df = g.get_group(i)
fac = epa_util.check_cmaq_units(df, param='XYL', aqs_param=i)
cmaq = model.get_var(lay=0, param='XYL').compute() * fac
df = interpo.interp_to_obs(cmaq, df, model.latitude.values, model.longitude.values,
radius=model.dset.XCELL)
dfs.append(df)
elif i == 'WS':
if ('WSPD10' in model.keys):
print('Interpolating WS:')
df = g.get_group(i)
cmaq = model.get_var(lay=0, param='WSPD10')
df = interpo.interp_to_obs(cmaq, df, model.latitude.values, model.longitude.values,
radius=model.dset.XCELL)
dfs.append(df)
elif i == 'TEMP':
if 'TEMP2' in model.keys:
print('Interpolating TEMP:')
df = g.get_group(i)
cmaq = model.get_var(lay=0, param='TEMP2')
df = interpo.interp_to_obs(cmaq, df, model.latitude.values, model.longitude.values,
radius=model.dset.XCELL)
dfs.append(df)
elif i == 'WD':
if ('WDIR10' in model.keys):
print('Interpolating WD:')
df = g.get_group(i)
cmaq = model.get_var(lay=0, param='WDIR10')
df = interpo.interp_to_obs(cmaq, df, model.latitude.values, model.longitude.values,
radius=model.dset.XCELL)
dfs.append(df)
df = concat(dfs)
df.dropna(subset=['Obs', 'model'], inplace=True)
return df | bbc6ba6faf0f580d35674a912c245349d00d2a95 | 2,466 |
def read_1d_spikes(filename):
"""Reads one dimensional binary spike file and returns a td_event event.
The binary file is encoded as follows:
* Each spike event is represented by a 40 bit number.
* First 16 bits (bits 39-24) represent the neuronID.
* Bit 23 represents the sign of spike event: 0=>OFF event, 1=>ON event.
* the last 23 bits (bits 22-0) represent the spike event timestamp in
microseconds.
Parameters
----------
filename : str
name of spike file.
Returns
-------
Event
spike event.
Examples
--------
>>> td_event = read_1d_spikes(file_path)
"""
with open(filename, 'rb') as input_file:
input_byte_array = input_file.read()
input_as_int = np.asarray([x for x in input_byte_array])
x_event = (input_as_int[0::5] << 8) | input_as_int[1::5]
c_event = input_as_int[2::5] >> 7
t_event = (
(input_as_int[2::5] << 16)
| (input_as_int[3::5] << 8)
| (input_as_int[4::5])
) & 0x7FFFFF
# convert spike times to ms
return Event(x_event, None, c_event, t_event / 1000) | 034d6de1e38734fcfe131027956d781752163c33 | 2,468 |
def _parse_step_log(lines):
"""Parse the syslog from the ``hadoop jar`` command.
Returns a dictionary which potentially contains the following keys:
application_id: a string like 'application_1449857544442_0002'. Only
set on YARN
counters: a map from counter group -> counter -> amount, or None if
no counters found (only YARN prints counters)
errors: a list of errors, with the following keys:
hadoop_error:
message: lines of error, as as string
start_line: first line of log containing the error (0-indexed)
num_lines: # of lines of log containing the error
attempt_id: ID of task attempt with this error
job_id: a string like 'job_201512112247_0003'. Should always be set
output_dir: a URI like 'hdfs:///user/hadoop/tmp/my-output-dir'. Should
always be set on success.
"""
return _parse_step_log_from_log4j_records(
_parse_hadoop_log4j_records(lines)) | 251b0e89157a1c3fa152cbf50daa5e0b10e17bcc | 2,469 |
import re
def is_regex(regex, invert=False):
"""Test that value matches the given regex.
The regular expression is searched against the value, so a match
in the middle of the value will succeed. To specifically match
the beginning or the whole regex, use anchor characters. If
invert is true, then matching the regex will cause the test to
fail.
"""
# pylint: disable=unused-argument # args defined by test definition
rex = re.compile(regex)
def is_regex_test(conf, path, value):
match = rex.search(value)
if invert and match:
return u'"{0}" matches /{1}/'.format(value, regex)
if not invert and not match:
return u'"{0}" does not match /{1}/'.format(value, regex)
return None
return is_regex_test | 0db71b3dae2b2013650b65ecacfe6aed0cd8366b | 2,470 |
def get_obj(obj):
"""Opens the url of `app_obj`, builds the object from the page and
returns it.
"""
open_obj(obj)
return internal_ui_operations.build_obj(obj) | f6deddc62f7f3f59ab93b553c64b758340b5fa6c | 2,471 |
def process(frame):
"""Process initial frame and tag recognized objects."""
# 1. Convert initial frame to grayscale
grayframe = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# For every model:
for model, color, parameters in (
(MODEL_FACE, (255, 255, 0), {'scaleFactor': 1.1, 'minNeighbors': 5, 'minSize': (30, 30)}),
(MODEL_EYE, (0, 0, 255), {'scaleFactor': 1.1, 'minNeighbors': 5, 'minSize': (20, 20)}),
*((model, (0, 255, 0), {'scaleFactor': 1.1, 'minNeighbors': 5, 'minSize': (20, 20)}) for model in MODELS_PLATE),
):
# 2. Apply model, recognize objects
objects = model.detectMultiScale(grayframe, **parameters)
# 3. For every recognized object, draw a rectangle around it
for (x, y, w, h) in objects:
cv2.rectangle(frame, (x, y), (x + w, y + h), color, 2) # BGR
# 4. Return initial color frame with rectangles
return frame | c0c977d522f292d6cbb03c4e64eabc3d11342e0f | 2,472 |
from datetime import datetime
def tzoffset():
"""UTC to America/New_York offset."""
return datetime.timedelta(hours=5) | 05e883eeae63ad1dd7b287dd0b331b13b11b8cd1 | 2,473 |
def KICmag(koi,band):
"""
Returns the apparent magnitude of given KOI star in given band. returns KICmags(koi)[band]
"""
return KICmags(koi)[band] | 767ff04c9319acd698daa05f084e8ee9c456a628 | 2,475 |
from typing import List
def list_to_decimal(nums: List[int]) -> int:
"""Accept a list of positive integers in the range(0, 10)
and return a integer where each int of the given list represents
decimal place values from first element to last. E.g
[1,7,5] => 175
[0,3,1,2] => 312
Place values are 10**n where n represents the digit position
Eg to calculate 1345, we have 5 1's, 4 10's, 3 100's and 1 1000's
1, 3 , 4 , 5
1000's, 100's, 10's, 1's
"""
for num in nums:
if isinstance(num, bool) or not isinstance(num, int):
raise TypeError
elif not num in range(0, 10):
raise ValueError
return int("".join(map(str, nums))) | 7727ce610987fc9da03a5e23ec8674d1deb7c7f0 | 2,476 |
def str_to_bool(v):
"""
:type v: str
"""
return v.lower() in ("true", "1") | 3eb7ae9e1fe040504ea57c65ed1cbd48be9269cf | 2,477 |
def home_event_manager():
"""
Route for alumni's home
:return:
"""
if "idUsers" in session and session["UserTypes_idUserTypes"] == 2:
return redirect("/events")
else:
session.clear()
return redirect("/login") | 7facf96fbd5d8bbcb7fb867cac7a150a31185dde | 2,478 |
import hashlib
def md5_encode(text):
""" 把數據 md5 化 """
md5 = hashlib.md5()
md5.update(text.encode('utf-8'))
encodedStr = md5.hexdigest().upper()
return encodedStr | b08f656f5ab0858accfbf54e03d95635a3598e13 | 2,480 |
from typing import Counter
def _ngrams(segment, n):
"""Extracts n-grams from an input segment.
Parameters
----------
segment: list
Text segment from which n-grams will be extracted.
n: int
Order of n-gram.
Returns
-------
ngram_counts: Counter
Contain all the nth n-grams in segment with a count of how many times each n-gram occurred.
"""
ngram_counts = Counter()
for i in range(0, len(segment) - n + 1):
ngram = tuple(segment[i:i + n])
ngram_counts[ngram] += 1
return ngram_counts | 580cad34eb03359988eb2ce6f77dad246166b890 | 2,481 |
def build_varint(val):
"""Build a protobuf varint for the given value"""
data = []
while val > 127:
data.append((val & 127) | 128)
val >>= 7
data.append(val)
return bytes(data) | 46f7cd98b6858c003cd66d87ba9ec13041fcf9db | 2,482 |
import re
def python_safe_name(s):
"""
Return a name derived from string `s` safe to use as a Python function name.
For example:
>>> s = "not `\\a /`good` -safe name ??"
>>> assert python_safe_name(s) == 'not_good_safe_name'
"""
no_punctuation = re.compile(r'[\W_]', re.MULTILINE).sub
s = s.lower()
s = no_punctuation(' ', s)
s = '_'.join(s.split())
if py2 and isinstance(s, unicode):
s = s.encode('ascii', 'ignore')
return s | 463d7c3bf4f22449a0a1c28897654d3ccb5e94cb | 2,483 |
def hash_bytes(hash_type: SupportedHashes, bytes_param: bytes) -> bytes:
"""Hash arbitrary bytes using a supported algo of your choice.
Args:
hash_type: SupportedHashes enum type
bytes_param: bytes to be hashed
Returns:
hashed bytes
"""
hasher = get_hash_obj(hash_type)
hasher.update(bytes_param)
return hasher.digest() | 8f9c05fd050e6f89d6bc5213c03f4002cc341cb0 | 2,484 |
def analyze(osi, num_inc=1, dt=None, dt_min=None, dt_max=None, jd=None):
"""
Performs an analysis step.
Returns 0 if successful, and <0 if fail
Parameters
----------
osi
num_inc
dt
dt_min
dt_max
jd
Returns
-------
"""
op_type = 'analyze'
if dt is None:
parameters = [int(num_inc)]
elif dt_min is None:
parameters = [int(num_inc), float(dt)]
else:
parameters = [int(num_inc), float(dt), dt_min, dt_max, jd]
return osi.to_process(op_type, parameters) | 6c748a49c5e54cf88a04002d98995f4fd90d5130 | 2,485 |
from typing import Any
import importlib
def load_class(path: str) -> Any:
"""
Load a class at the provided location. Path is a string of the form: path.to.module.class and conform to the python
import conventions.
:param path: string pointing to the class to load
:return: the requested class object
"""
try:
log.info('loading class : [{}]'.format(path))
module_name, class_name = path.rsplit('.', 1)
mod = importlib.import_module(module_name)
return getattr(mod, class_name)
except Exception:
raise ProcessingError('Class loading error : expecting path.to.module.ClassName, got : {}'.format(path)) | c6ae2cd20f71a68a6ec05ef5693656d0db7f2703 | 2,486 |
def dcg_at_k(r, k, method=0):
"""Score is discounted cumulative gain (dcg)
Relevance is positive real values. Can use binary
as the previous methods.
Example from
http://www.stanford.edu/class/cs276/handouts/EvaluationNew-handout-6-per.pdf
# >>> r = [3, 2, 3, 0, 0, 1, 2, 2, 3, 0]
# >>> dcg_at_k(r, 1)
# 3.0
# >>> dcg_at_k(r, 1, method=1)
# 3.0
# >>> dcg_at_k(r, 2)
# 5.0
# >>> dcg_at_k(r, 2, method=1)
# 4.2618595071429155
# >>> dcg_at_k(r, 10)
# 9.6051177391888114
# >>> dcg_at_k(r, 11)
9.6051177391888114
Args:
r: Relevance scores (list or numpy) in rank order
(first element is the first item)
k: Number of results to consider
method: If 0 then weights are [1.0, 1.0, 0.6309, 0.5, 0.4307, ...]
If 1 then weights are [1.0, 0.6309, 0.5, 0.4307, ...]
Returns:
Discounted cumulative gain
"""
r = np.asfarray(r)[:k]
if r.size:
if method == 0:
return r[0] + np.sum(r[1:] / np.log2(np.arange(2, r.size + 1)))
elif method == 1:
return np.sum(r / np.log2(np.arange(2, r.size + 2)))
else:
raise ValueError('method must be 0 or 1.')
return 0. | a52b2e3735461ea7749f092aa35cfe8a55f33e3f | 2,487 |
def metric_section(data_model, metric, level) -> str:
"""Return the metric as Markdown section."""
markdown = markdown_header(metric["name"], level=level, index=True)
markdown += markdown_paragraph(metric["description"])
markdown += definition_list("Default target", metric_target(metric))
markdown += definition_list("Scales", *metric_scales(metric))
markdown += definition_list("Default tags", *metric["tags"])
markdown += "```{admonition} Supporting sources\n"
for source in metric["sources"]:
source_name = data_model["sources"][source]["name"]
default = " (default)" if source == metric.get("default_source", "no default source") else ""
markdown += f"- [{source_name}]({metric_source_slug(metric['name'], source_name)}){default}\n"
markdown += "```\n"
return markdown | 6c02e707b6de7c2d89e7eb590d3d9252f13ae9b7 | 2,488 |
def MAKEFOURCC(ch0: str, ch1: str, ch2: str, ch3: str) -> int:
"""Implementation of Window's `MAKEFOURCC`.
This is simply just returning the bytes of the joined characters.
`MAKEFOURCC(*"DX10")` can also be implemented by `Bytes(b"DX10")`.
Args:
ch0 (str): First char
ch1 (str): Second char
ch2 (str): Third char
ch3 (str): Fourth char
Returns:
int: The integer representation of given characters.
**Reference**:
`Microsoft <https://goo.gl/bjtMFA>`__
"""
return (ord(ch0) << 0) | (ord(ch1) << 8) | (ord(ch2) << 16) | (ord(ch3) << 24) | 91afd9dcc8f1cd8c5ef167bdb560c8bf2d89b228 | 2,491 |
def sort_configs(configs): # pylint: disable=R0912
"""Sort configs by global/package/node, then by package name, then by node name
Attributes:
configs (list): List of config dicts
"""
result = []
# Find all unique keys and sort alphabetically
_keys = []
for config in configs:
if config["key"] not in _keys:
_keys.append(config["key"])
_keys = sorted(_keys, key=str.lower)
# For each key find globals, then packages, then nodes
for key in _keys:
_packages = []
_nodes = []
for config in configs:
if config["key"] == key:
if config["type"] == "global":
result.append(config)
elif config["type"] == "package":
_packages.append(config)
elif config["type"] == "node":
_nodes.append(config)
# Sort the package end node elements alphabetically
_package_ids = sorted([_package["id"]
for _package in _packages], key=str.lower)
for package in _package_ids:
for config in configs:
if config["key"] == key and config["type"] == "package" and config["id"] == package:
result.append(config)
break
_node_ids = sorted([_node["id"] for _node in _nodes], key=str.lower)
for node in _node_ids:
for config in configs:
if config["key"] == key and config["type"] == "node" and config["id"] == node:
result.append(config)
break
return result | 5c05214af42a81b35986f3fc0d8670fbef2e2845 | 2,492 |
def _add_student_submit(behave_sensibly):
"""Allow addition of new students
Handle both "good" and "bad" versions (to keep code DRY)
"""
try:
if behave_sensibly:
do_add_student_good(
first_name=request.forms.first_name,
last_name=request.forms.last_name,
card_info=request.forms.card_info,
)
else:
do_add_student_bad(
first_name=request.forms.first_name,
last_name=request.forms.last_name,
card_info=request.forms.card_info,
)
except psycopg2.DatabaseError:
pass
return redirect("/students") | 780b0e667a841b51b1b64c8c8156cebda3d586e9 | 2,495 |
def _get_table_reference(self, table_id):
"""Constructs a TableReference.
Args:
table_id (str): The ID of the table.
Returns:
google.cloud.bigquery.table.TableReference:
A table reference for a table in this dataset.
"""
return TableReference(self, table_id) | e92dc5fbeac84b902e50d5302539503246c39f30 | 2,496 |
def get_present_types(robots):
"""Get unique set of types present in given list"""
return {type_char for robot in robots for type_char in robot.type_chars} | 75c33e0bf5f97afe93829c51086100f8e2ba13af | 2,498 |
def _deserialize_row(params, mask):
"""
This is for stochastic vectors where some elements are forced to zero.
Such a vector is defined by a number of parameters equal to
the length of the vector minus one and minus the number of elements
forced to zero.
@param params: an array of statistical parameters
@param mask: bools such that False forces zero probability
@return: a mask-conformant list of nonnegative floats
"""
row = np.zeros(mask.shape)
row[mask] = [1.0] + np.exp(params).tolist()
row /= row.sum()
return row | 004775ef669ce7698570091c7212912d0f309bee | 2,499 |
import re
def ruru_old_log_checker(s):
"""
古いログ形式ならTrue、そうでないならFalseを返す
:param s:
:return:
"""
time_data_regex = r'[0-9]{4}\/[0-9]{2}\/[0-9]{2}\s[0-9]{2}:[0-9]{2}:[0-9]{2}'
# るる鯖新ログ形式なら1つ目のdiv:d12150で時刻が取得可能。そうでないなら取得不可
time_data = re.search(time_data_regex, str(s.find('div', class_='d12150')))
return False if time_data else True | 54f6a94dab98ef6947496b8e1f95401d99424ee2 | 2,500 |
def scale_z_by_atom(z, scale, copy=True):
"""
Parameters
----------
z_ : array, shape (n_trials, n_atoms, n_times - n_times_atom + 1)
Can also be a list of n_trials LIL-sparse matrix of shape
(n_atoms, n_times - n_times_atom + 1)
The sparse activation matrix.
scale : array, shape = (n_atoms, )
The scales to apply on z.
"""
if is_list_of_lil(z):
n_trials, n_atoms, n_times_valid = get_z_shape(z)
assert n_atoms == len(scale)
if copy:
z = deepcopy(z)
for z_i in z:
for k in range(z_i.shape[0]):
z_i.data[k] = [zikt * scale[k] for zikt in z_i.data[k]]
else:
if copy:
z = z.copy()
z *= scale[None, :, None]
return z | b87368c1ea8dcd18fcbfd85ef8cde5450d5fcf33 | 2,502 |
def index():
"""
example action using the internationalization operator T and flash
rendered by views/default/index.html or views/generic.html
if you need a simple wiki simply replace the two lines below with:
return auth.wiki()
"""
if auth.is_logged_in():
# # if newly registered user is not in auth_membership add him as an administrator
if not db(db.auth_membership.user_id == auth.user_id).count() > 0:
auth.add_membership(auth.id_group(ADMIN), auth.user_id)
session.user_info = get_user_info()
response.user_info = session.user_info
if request.user_agent().is_mobile:
return response.render('../views/default/index-m.html')
else:
return response.render('../views/default/index.html') | d48d29ee65ddf064dc2f424f2be9f46da23cbd4a | 2,503 |
def compute_classification_metrics_at_ks(is_match, num_predictions, num_trgs, k_list=[5,10], meng_rui_precision=False):
"""
:param is_match: a boolean np array with size [num_predictions]
:param predicted_list:
:param true_list:
:param topk:
:return: {'precision@%d' % topk: precision_k, 'recall@%d' % topk: recall_k, 'f1_score@%d' % topk: f1, 'num_matches@%d': num_matches}
"""
assert is_match.shape[0] == num_predictions
#topk.sort()
if num_predictions == 0:
precision_ks = [0] * len(k_list)
recall_ks = [0] * len(k_list)
f1_ks = [0] * len(k_list)
num_matches_ks = [0] * len(k_list)
num_predictions_ks = [0] * len(k_list)
else:
num_matches = np.cumsum(is_match)
num_predictions_ks = []
num_matches_ks = []
precision_ks = []
recall_ks = []
f1_ks = []
for topk in k_list:
if topk == 'M':
topk = num_predictions
elif topk == 'G':
#topk = num_trgs
if num_predictions < num_trgs:
topk = num_trgs
else:
topk = num_predictions
if meng_rui_precision:
if num_predictions > topk:
num_matches_at_k = num_matches[topk-1]
num_predictions_at_k = topk
else:
num_matches_at_k = num_matches[-1]
num_predictions_at_k = num_predictions
else:
if num_predictions > topk:
num_matches_at_k = num_matches[topk - 1]
else:
num_matches_at_k = num_matches[-1]
num_predictions_at_k = topk
precision_k, recall_k, f1_k = compute_classification_metrics(num_matches_at_k, num_predictions_at_k, num_trgs)
precision_ks.append(precision_k)
recall_ks.append(recall_k)
f1_ks.append(f1_k)
num_matches_ks.append(num_matches_at_k)
num_predictions_ks.append(num_predictions_at_k)
return precision_ks, recall_ks, f1_ks, num_matches_ks, num_predictions_ks | 189a6491e1b5d8e3bf8869586b69667eb1b9d9c9 | 2,504 |
def compute_dose_median_scores(null_dist_medians, dose_list):
"""
Align median scores per dose, this function return a dictionary,
with keys as dose numbers and values as all median scores for each dose
"""
median_scores_per_dose = {}
for dose in dose_list:
median_list = []
for keys in null_distribution_medians:
dose_median_list = null_distribution_medians[keys][dose-1]
median_list += dose_median_list
median_scores_per_dose[dose] = median_list
return median_scores_per_dose | ec23f186c10a6921cdae9d4965a51343dc78011e | 2,506 |
def generate_converter(name, taskdep, **options) :
"""
taskdep 是执行该程序之前应该执行的任务
task_html_generator 表示的是能够生成html的任务,我们需要从这个任务中提取result
taskname是生成的任务名
"""
converter = options.get('converter',
Pandoc("-f", "html", "-t", "markdown", "--wrap=none"))
flowdep = options.get('flowdep', taskdep[0])
return lift_process_to_task(name, converter, taskdep, flowdep=flowdep) | 3e60abfcdabfb0c35ff8b9692b21b27af2300da8 | 2,507 |
def symmetric_product(tensor):
"""
Symmetric outer product of tensor
"""
shape = tensor.size()
idx = list(range(len(shape)))
idx[-1], idx[-2] = idx[-2], idx[-1]
return 0.5 * (tensor + tensor.permute(*idx)) | 4f96ab5f0bd41080352b1e5e806b6a73b3506950 | 2,508 |
import torch
def prep_image(img, inp_dim):
"""
Function:
Prepare image for inputting to the neural network.
Arguments:
img -- image it self
inp_dim -- dimension for resize the image (input dimension)
Return:
img -- image after preparing
"""
img = (letterbox_image(img, (inp_dim, inp_dim)))
img = img[:,:,::-1].transpose((2,0,1)).copy()
img = torch.from_numpy(img).float().div(255.0).unsqueeze(0)
return img | 4f32717cb06b32cab2e0b92d3a24a9a665baf27b | 2,509 |
async def auth_check(request):
"""
No-op view to set the session cookie, this is used by websocket since the "Set-Cookie" header
doesn't work with 101 upgrade
"""
return json_response(status='ok') | 60419c8d32bbc41525ebf44d4d7bcabe8a117df0 | 2,510 |
def check_constraint(term_freq,top_terms,top_terms_test_freq):
"""
Check the constraint 12%-30% for the test set
term_freq is the dictionnary of all term frequencies
top_terms is the list of terms we care about (first 300?)
top_terms_freq is an array of frequency of top terms in test set.
RETURN
True if constraint satisfied, False otherwise
"""
return check_constraint_12pc(term_freq,top_terms,top_terms_test_freq) and check_constraint_30pc(term_freq,top_terms,top_terms_test_freq) | d2b31c68d1a8cd1a7d8471818cc46de943496aaa | 2,511 |
import pytz
def predict_split(history, prediction_length=7*24, hyperparameters={}):
"""
This function predicts a time series of gas prices by splitting it into a
tren and a residual and then applying a feature pipeline and predicting
each of them individually.
Keyword arguments:
history -- the time series to split up
prediction_length -- the number of time steps to predict (default 7*24)
hyperparameters -- values used for the prediction model (default {})
Return value:
2 time series predicted: trend and residual
"""
#extract parameters
r = hyperparameters["r"] if "r" in hyperparameters else default_hyperparameters["r"]
#split data
trend, res = split_trend(history)
#create index for prediction time series
index_pred = pd.date_range(
start=history.index.max() + timedelta(hours=1),
end=history.index.max() + timedelta(hours=prediction_length),
freq="1H",
tz=pytz.utc
)
#predict the trend
trend_pred = predict_ts(
(trend - trend.shift(1)).fillna(0.),
get_feature_pipeline("trend", hyperparameters),
index_pred,
hyperparameters=hyperparameters
).cumsum() + trend.iloc[-1]
#compute residual prediction
res_pred = predict_ts(
res.iloc[-r:],
get_feature_pipeline("res", hyperparameters),
index_pred,
hyperparameters=hyperparameters
)
#alternative: using AR from statsmodels
#res_model = AR(res)
#res_results = res_model.fit(disp=-1, maxlag=p)
#res_pred = res_results.predict(len(res), len(res) + prediction_length)
#return result
return trend_pred, res_pred | 025d4b753754ad18a04b46f95002f9ab54ccd9bd | 2,514 |
def SFRfromLFIR(LFIR):
"""
Kennicut 1998
To get Star formation rate from LFIR (8-1000um)
LFIR in erg s-1
SFR in Msun /year
"""
SFR = 4.5E-44 * LFIR
return SFR | 4adf401bbf2c6547cea817b52eb881531db8c798 | 2,515 |
def inc_group_layers(n_list, d_list, c_list):
"""
Helper function for inc_tmm. Groups and sorts layer information.
See coh_tmm for definitions of n_list, d_list.
c_list is "coherency list". Each entry should be 'i' for incoherent or 'c'
for 'coherent'.
A "stack" is a group of one or more consecutive coherent layers. A "stack
index" labels the stacks 0,1,2,.... The "within-stack index" counts the
coherent layers within the stack 1,2,3... [index 0 is the incoherent layer
before the stack starts]
An "incoherent layer index" labels the incoherent layers 0,1,2,...
An "alllayer index" labels all layers (all elements of d_list) 0,1,2,...
Returns info about how the layers relate:
* stack_d_list[i] = list of thicknesses of each coherent layer in the i'th
stack, plus starting and ending with "inf"
* stack_n_list[i] = list of refractive index of each coherent layer in the
i'th stack, plus the two surrounding incoherent layers
* all_from_inc[i] = j means that the layer with incoherent index i has
alllayer index j
* inc_from_all[i] = j means that the layer with alllayer index i has
incoherent index j. If j = nan then the layer is coherent.
* all_from_stack[i1][i2] = j means that the layer with stack index i1 and
within-stack index i2 has alllayer index j
* stack_from_all[i] = [j1 j2] means that the layer with alllayer index i is
part of stack j1 with withinstack-index j2. If stack_from_all[i] = nan
then the layer is incoherent
* inc_from_stack[i] = j means that the i'th stack comes after the layer
with incoherent index j, and before the layer with incoherent index j+1.
* stack_from_inc[i] = j means that the layer with incoherent index i comes
immediately after the j'th stack. If j=nan, it is not immediately
following a stack.
* num_stacks = number of stacks
* num_inc_layers = number of incoherent layers
* num_layers = number of layers total
"""
if (d_list.ndim != 1):
raise ValueError("Problem with n_list or d_list!")
if (d_list[0] != np.inf) or (d_list[-1] != np.inf):
raise ValueError('d_list must start and end with inf!')
if (c_list[0] != 'i') or (c_list[-1] != 'i'):
raise ValueError('c_list should start and end with "i"')
if not len(n_list) == d_list.size == len(c_list):
raise ValueError('List sizes do not match!')
inc_index = 0
stack_index = 0
stack_d_list = []
stack_n_list = []
all_from_inc = []
inc_from_all = []
all_from_stack = []
stack_from_all = []
inc_from_stack = []
stack_from_inc = []
stack_in_progress = False
for alllayer_index in range(len(n_list)):
if c_list[alllayer_index] == 'c': # coherent layer
inc_from_all.append(np.nan)
if not stack_in_progress: # this layer is starting new stack
stack_in_progress = True
ongoing_stack_d_list = [np.inf, d_list[alllayer_index]]
ongoing_stack_n_list = [n_list[alllayer_index - 1],
n_list[alllayer_index]]
stack_from_all.append([stack_index, 1])
all_from_stack.append([alllayer_index - 1, alllayer_index])
inc_from_stack.append(inc_index - 1)
within_stack_index = 1
else: # another coherent layer in the same stack
ongoing_stack_d_list.append(d_list[alllayer_index])
ongoing_stack_n_list.append(n_list[alllayer_index])
within_stack_index += 1
stack_from_all.append([stack_index, within_stack_index])
all_from_stack[-1].append(alllayer_index)
elif c_list[alllayer_index] == 'i': # incoherent layer
stack_from_all.append(np.nan)
inc_from_all.append(inc_index)
all_from_inc.append(alllayer_index)
if not stack_in_progress: # previous layer was also incoherent
stack_from_inc.append(np.nan)
else: # previous layer was coherent
stack_in_progress = False
stack_from_inc.append(stack_index)
ongoing_stack_d_list.append(np.inf)
stack_d_list.append(ongoing_stack_d_list)
ongoing_stack_n_list.append(n_list[alllayer_index])
stack_n_list.append(ongoing_stack_n_list)
all_from_stack[-1].append(alllayer_index)
stack_index += 1
inc_index += 1
else:
raise ValueError("Error: c_list entries must be 'i' or 'c'!")
return {'stack_d_list': stack_d_list,
'stack_n_list': stack_n_list,
'all_from_inc': all_from_inc,
'inc_from_all': inc_from_all,
'all_from_stack': all_from_stack,
'stack_from_all': stack_from_all,
'inc_from_stack': inc_from_stack,
'stack_from_inc': stack_from_inc,
'num_stacks': len(all_from_stack),
'num_inc_layers': len(all_from_inc),
'num_layers': len(n_list)} | 1b25975169839e54feae58f98b5de98916c51541 | 2,516 |
def get_heater_device_json():
""" returns information about the heater in json """
return '{\n "state" : "' + _pretty_state_identifier(brew_logic.heater_state) + '",\n "overridden" : "' + str(brew_logic.heater_override).lower() + '"\n }' | 3997e9eee7cbb058adf4900b571c8458e2464e19 | 2,517 |
def rfc_deploy():
"""This function trains a Random Forest classifier and outputs the
out-of-sample performance from the validation and test sets
"""
df = pd.DataFrame()
for pair in pairs:
# retrieving the data and preparing the features
dataset = gen_feat(pair)
dataset.drop(['Open', 'High', 'Low', 'Close', 'volume'], axis=1, inplace=True)
# selecting the features to train on
cols = list(dataset.columns)
feats = cols[2:]
#splitting into training, validation and test sets
df_train = dataset.iloc[:-100,:]
train = df_train.copy()
df_test = dataset.iloc[-100:,:]
test = df_test.copy()
train_f = train.iloc[:-100,:]
valid = train.iloc[-100:,:]
#training the algorithm
m = rfc(train_f[feats], train_f['dir'])
# test sets
test_pred = m.predict(test[feats])
test_proba = m.predict_proba(test[feats])
df1 = pd.DataFrame(test_pred,columns=['prediction'], index=test.index)
proba_short = []
proba_long = []
for x in range(len(test_proba)):
proba_short.append(test_proba[x][0])
proba_long.append(test_proba[x][-1])
proba = {'proba_short': proba_short,
'proba_long': proba_long}
df2 = pd.DataFrame(proba, index=test.index)
df1['probability'] = np.where(df1['prediction'] == 1, df2['proba_long'],
np.where(df1['prediction'] == -1, df2['proba_short'], 0))
df1['signal'] = np.where((df1['probability'] >= .7) & (df1['prediction'] == 1), 'Go Long',
np.where((df1['probability'] >= 0.7) & (df1['prediction'] == -1), 'Go Short', 'Stand Aside'))
df1.reset_index(inplace=True)
df1['pair'] = pair
df1.set_index('pair', inplace=True)
entry_sig = df1[['probability', 'signal']].iloc[-1:]
# Merge
df = pd.concat([df, entry_sig], axis=0)
#output
return df | 86c4aa5f44d23cce83f6cc9993c0e10cd124c423 | 2,518 |
def get_block(block_name):
"""Get block from BLOCK_REGISTRY based on block_name."""
if not block_name in BLOCK_REGISTRY:
raise Exception(NO_BLOCK_ERR.format(
block_name, BLOCK_REGISTRY.keys()))
block = BLOCK_REGISTRY[block_name]
return block | 10b86c5045496a865907ef2617b2994d03f1312d | 2,519 |
from pathlib import Path
import yaml
def _determine_role_name(var_file: Path) -> str:
"""
Lookup role name from directory or galaxy_info.
"""
if var_file.is_file():
role_path: Path = var_file.parent / ".."
name = str(role_path.resolve().name)
meta_path: Path = role_path / 'meta' / 'main.yml'
if (meta_path.is_file()):
with open(str(meta_path), 'r') as f:
meta = yaml.load(f, Loader=SafeLoader)
try:
role_name = meta['galaxy_info']['role_name']
name = role_name
except BaseException:
pass
return name | 59e6d60234cc7988fe6c3005176f1c89cac5b60d | 2,520 |
def coco17_category_info(with_background=True):
"""
Get class id to category id map and category id
to category name map of COCO2017 dataset
Args:
with_background (bool, default True):
whether load background as class 0.
"""
clsid2catid = {
1: 1,
2: 2,
3: 3,
4: 4,
5: 5,
6: 6,
7: 7,
8: 8,
9: 9,
10: 10,
11: 11,
12: 13,
13: 14,
14: 15,
15: 16,
16: 17,
17: 18,
18: 19,
19: 20,
20: 21,
21: 22,
22: 23,
23: 24,
24: 25,
25: 27,
26: 28,
27: 31,
28: 32,
29: 33,
30: 34,
31: 35,
32: 36,
33: 37,
34: 38,
35: 39,
36: 40,
37: 41,
38: 42,
39: 43,
40: 44,
41: 46,
42: 47,
43: 48,
44: 49,
45: 50,
46: 51,
47: 52,
48: 53,
49: 54,
50: 55,
51: 56,
52: 57,
53: 58,
54: 59,
55: 60,
56: 61,
57: 62,
58: 63,
59: 64,
60: 65,
61: 67,
62: 70,
63: 72,
64: 73,
65: 74,
66: 75,
67: 76,
68: 77,
69: 78,
70: 79,
71: 80,
72: 81,
73: 82,
74: 84,
75: 85,
76: 86,
77: 87,
78: 88,
79: 89,
80: 90
}
catid2name = {
0: 'background',
1: 'person',
2: 'bicycle',
3: 'car',
4: 'motorcycle',
5: 'airplane',
6: 'bus',
7: 'train',
8: 'truck',
9: 'boat',
10: 'traffic light',
11: 'fire hydrant',
13: 'stop sign',
14: 'parking meter',
15: 'bench',
16: 'bird',
17: 'cat',
18: 'dog',
19: 'horse',
20: 'sheep',
21: 'cow',
22: 'elephant',
23: 'bear',
24: 'zebra',
25: 'giraffe',
27: 'backpack',
28: 'umbrella',
31: 'handbag',
32: 'tie',
33: 'suitcase',
34: 'frisbee',
35: 'skis',
36: 'snowboard',
37: 'sports ball',
38: 'kite',
39: 'baseball bat',
40: 'baseball glove',
41: 'skateboard',
42: 'surfboard',
43: 'tennis racket',
44: 'bottle',
46: 'wine glass',
47: 'cup',
48: 'fork',
49: 'knife',
50: 'spoon',
51: 'bowl',
52: 'banana',
53: 'apple',
54: 'sandwich',
55: 'orange',
56: 'broccoli',
57: 'carrot',
58: 'hot dog',
59: 'pizza',
60: 'donut',
61: 'cake',
62: 'chair',
63: 'couch',
64: 'potted plant',
65: 'bed',
67: 'dining table',
70: 'toilet',
72: 'tv',
73: 'laptop',
74: 'mouse',
75: 'remote',
76: 'keyboard',
77: 'cell phone',
78: 'microwave',
79: 'oven',
80: 'toaster',
81: 'sink',
82: 'refrigerator',
84: 'book',
85: 'clock',
86: 'vase',
87: 'scissors',
88: 'teddy bear',
89: 'hair drier',
90: 'toothbrush'
}
if not with_background:
clsid2catid = {k - 1: v for k, v in clsid2catid.items()}
return clsid2catid, catid2name | f64be8c09b3372ad75826a6bfdc8a2f0bc4f9e25 | 2,523 |
def example_miller_set(example_crystal):
"""Generate an example miller set."""
ms = miller.set(
crystal_symmetry=example_crystal.get_crystal_symmetry(),
indices=flex.miller_index([(1, 1, 1)] * 8 + [(2, 2, 2)]),
anomalous_flag=False,
)
return ms | 516eca404544d8f8af8dd664488006c67bef03b8 | 2,525 |
async def get(req):
"""
Get a complete analysis document.
"""
db = req.app["db"]
analysis_id = req.match_info["analysis_id"]
document = await db.analyses.find_one(analysis_id)
if document is None:
return not_found()
sample = await db.samples.find_one({"_id": document["sample"]["id"]}, {"quality": False})
if not sample:
return bad_request("Parent sample does not exist")
read, _ = virtool.samples.utils.get_sample_rights(sample, req["client"])
if not read:
return insufficient_rights()
await virtool.subtractions.db.attach_subtraction(db, document)
if document["ready"]:
document = await virtool.analyses.format.format_analysis(req.app, document)
return json_response(virtool.utils.base_processor(document)) | e52598d27b73dd9ef5d24aba196f97f85fb47214 | 2,526 |
from typing import Union
from typing import Mapping
from typing import Any
def get_cube_point_indexes(cube: xr.Dataset,
points: Union[xr.Dataset, pd.DataFrame, Mapping[str, Any]],
dim_name_mapping: Mapping[str, str] = None,
index_name_pattern: str = DEFAULT_INDEX_NAME_PATTERN,
index_dtype=np.float64,
cube_asserted: bool = False) -> xr.Dataset:
"""
Get indexes of given point coordinates *points* into the given *dataset*.
:param cube: The cube dataset.
:param points: A mapping from column names to column data arrays, which must all have the same length.
:param dim_name_mapping: A mapping from dimension names in *cube* to column names in *points*.
:param index_name_pattern: A naming pattern for the computed indexes columns.
Must include "{name}" which will be replaced by the dimension name.
:param index_dtype: Numpy data type for the indexes. If it is a floating point type (default),
then *indexes* will contain fractions, which may be used for interpolation.
For out-of-range coordinates in *points*, indexes will be -1 if *index_dtype* is an integer type, and NaN,
if *index_dtype* is a floating point types.
:param cube_asserted: If False, *cube* will be verified, otherwise it is expected to be a valid cube.
:return: A dataset containing the index columns.
"""
if not cube_asserted:
assert_cube(cube)
dim_name_mapping = dim_name_mapping if dim_name_mapping is not None else {}
dim_names = _get_cube_data_var_dims(cube)
col_names = [dim_name_mapping.get(dim_name, dim_name) for dim_name in dim_names]
_validate_points(points, col_names, param_name="points")
indexes = []
for dim_name, col_name in zip(dim_names, col_names):
col = points[col_name]
coord_indexes = get_dataset_indexes(cube, dim_name, col, index_dtype=index_dtype)
indexes.append((index_name_pattern.format(name=dim_name),
xr.DataArray(coord_indexes, dims=[INDEX_DIM_NAME])))
return xr.Dataset(dict(indexes)) | b1f5eb134ab7119589b54c45b95065c2f57348dc | 2,527 |
def auto_add():
"""
自动添加
1 查找所有amis文件
2 更新记录
3 记录按照app组织,生成dict
4 为每个app生成auto_urls.py
:return:
"""
amis_json_file_list = get_amis_files()
cnt = update_rcd(amis_json_file_list)
aml_app_dict = get_rcd_by_app_name()
add_needed_auto_urls(aml_app_dict)
add_urls_needed(aml_app_dict)
return cnt | 17b6026f56793f3a6f76446145b7f65a6fe29a5a | 2,528 |
from pathlib import Path
from typing import Optional
def _get_configs(cli_args: CLIArgs, project_root: Path) -> Configs:
"""
Deal with extra configs for 3rd party tool.
Parameters
----------
cli_args
Commandline arguments passed to nbqa
project_root
Root of repository, where .git / .hg / .nbqa.ini file is.
Returns
-------
Configs
Taken from CLI (if given), else from .nbqa.ini.
"""
cli_config: Configs = Configs.parse_from_cli_args(cli_args)
file_config: Optional[Configs] = config_parser.parse_config_from_file(
cli_args, project_root
)
if file_config is not None:
cli_config = cli_config.merge(file_config)
return cli_config | d9ef190a99b06f2d17bbc336ace86061ea215d97 | 2,529 |
from sklearn.preprocessing import RobustScaler
def robust_standardize(df: pd.DataFrame, excluded_colnames: list = None) -> pd.DataFrame:
"""
Applies the RobustScaler from the module sklearn.preprocessing by
removing the median and scaling the data according to the quantile
range (IQR). This transformation is robust to outliers.
Note: In case multiple dataframes are used (i.e., several partitions of
the dataset in training and testing), make sure that all of them will
be passed to this method at once, and as one single dataframe. Otherwise,
the normalization will be carried out on local (as opposed to global)
extrema, hence unrepresentative IQR. This is a bad practice.
:param df: The dataframe to be normalized.
:param excluded_colnames: The name of non-numeric (e.g., TimeStamp,
ID etc.) that must be excluded before normalization takes place.
They will be added back to the normalized data.
:return: The same dataframe as input, with the label column unchanged,
except that now the numerical values are transformed into new range
determined by IQR.
"""
excluded_colnames = excluded_colnames if excluded_colnames else []
colnames_original_order = list(df)
# Separate data (numeric) from those to be excluded (ids and class_labels)
included_cnames = [colname for colname in list(df) if colname not in excluded_colnames]
# Exclude all non-numeric columns
df_numeric = df[included_cnames].select_dtypes(include=np.number)
# set-difference between the original and numeric columns
excluded_cnames = list(set(colnames_original_order) - set(list(df_numeric)))
df_excluded = df[excluded_cnames]
# prepare normalizer and normalize
scaler = RobustScaler()
res_ndarray = scaler.fit_transform(df_numeric)
df_numeric = pd.DataFrame(res_ndarray, columns=list(df_numeric), dtype=float)
# Reset the indices (so that they match)
df_excluded.reset_index()
df_numeric.reset_index()
# Add the excluded columns back
df_norm = df_excluded.join(df_numeric)
# Restore the original oder of columns
df_norm = df_norm[colnames_original_order]
return df_norm | 0727ce390e773405a221c6fb3248ddd5d40445b2 | 2,532 |
import math
def meanStdDev( valueList, scale ):
"""Compute the mean and standard deviation of a *non-empty* list of numbers."""
numElements = len(valueList)
if numElements == 0:
return(None, 0.0)
mean = float(sum(valueList)) / numElements
variance = 0
for value in valueList:
variance += math.pow( value - mean, 2 )
variance = variance / float(numElements)
return (scale * mean, scale * math.sqrt(variance)) | 2970ae1e4382092eb67219373aa26b9ca75226a3 | 2,533 |
def audience_filter(digest, audience):
"""Check whether the current audience level should include that digest."""
return get_split(
digest,
[
{
"key": "audience_{}".format(idx),
"size": 1.0
} for idx in range(0, 100)
]
) < audience | 811e4e94e68901bfeaedabfec5e16a30de55408c | 2,534 |
def request_specific_data2num(batch_data):
"""
input: next_batch_requestable request_specific_data[slot].
change the data into processable type for tensorflow
:param batch_data: 一个 batch 的训练数据
:return: 直接输入request-specific tracker 模型计算的数据
"""
batchsize_request = len(batch_data)
x_usr = np.zeros((batchsize_request, max_length, embedding_dim))
x_usr_len = np.zeros((batchsize_request), dtype='int32')
x_slot = np.zeros((batchsize_request, embedding_dim))
for batch_id, data in enumerate(batch_data):
for word_id, word in enumerate(data[1]):
if word in vocab_dict:
x_usr[batch_id, word_id, :] = embedding_table[word]
else:
x_usr[batch_id, word_id, :] = embedding_table['unk']
x_usr_len[batch_id] = len(data[1])
x_slot[batch_id, :] = embedding_table[data[2]]
return x_usr, x_usr_len, x_slot | e8a5b414f00e43755719dfadc0b089177cb67152 | 2,535 |
def points_from_x0y0x1y1(xyxy):
"""
Constructs a polygon representation from a rectangle described as a list [x0, y0, x1, y1]
"""
[x0, y0, x1, y1] = xyxy
return "%s,%s %s,%s %s,%s %s,%s" % (
x0, y0,
x1, y0,
x1, y1,
x0, y1
) | 8a7d766145dc31e6619b290b8d96a95983f9cc01 | 2,536 |
def get_columns(invoice_list, additional_table_columns):
"""return columns based on filters"""
columns = [
_("Invoice") + ":Link/Sales Invoice:120", _("Posting Date") + ":Date:80", _("Status") + "::80",
_("Customer") + ":Link/Customer:120", _("Sales Person") + ":Link/Sales Person:100",
_("AR Status") + "::75", _("Territory") + ":Link/Territory:100",
_("SKU") + ":Link/Item:100", _("Qty") + ":Float:50", _("Price List") + ":Currency/currency:120",
_("Discount") + ":Currency/currency:120", _("Net Price") + ":Currency/currency:120",
_("Amount") + ":Currency/currency:120"
]
columns = columns + [_("Outstanding Amount") + ":Currency/currency:120"]
return columns | c9849e62d401ec5cc8de52d266a39eccf4b4dbe8 | 2,537 |
def one_norm(a):
"""
Return the one-norm of the matrix.
References:
[0] https://www.mathworks.com/help/dsp/ref/matrix1norm.html
Arguments:
a :: ndarray(N x N) - The matrix to compute the one norm of.
Returns:
one_norm_a :: float - The one norm of a.
"""
return anp.max(anp.sum(anp.abs(a), axis=0)) | c3e1c83d3776dda8ffa82b9b36d26866f390f6cc | 2,538 |
def remove_nan_inf(df, reindex=True):
"""
Removes all rows that have NaN, inf or -inf as a value, and then optionally
reindexes the dataframe.
Parameters
----------
df : pd.DataFrame
Dataframe to remove NaNs and Infs from.
reindex : bool, optional
Reindex the dataframe so that there are no missing indices.
Returns
-------
df : pd.DataFrame
Dataframe with all the NaNs and Infs removed.
"""
df = df.replace([np.inf, -np.inf], np.nan).dropna()
if reindex is True:
df = df.reset_index(drop=True)
return df | 3b9339f2ee1315eac458925e5be5279e147d5c7d | 2,539 |
def contingency_table(seg, gt, ignore_seg=[0], ignore_gt=[0], norm=True):
"""Return the contingency table for all regions in matched segmentations.
Parameters
----------
seg : np.ndarray, int type, arbitrary shape
A candidate segmentation.
gt : np.ndarray, int type, same shape as `seg`
The ground truth segmentation.
ignore_seg : list of int, optional
Values to ignore in `seg`. Voxels in `seg` having a value in this list
will not contribute to the contingency table. (default: [0])
ignore_gt : list of int, optional
Values to ignore in `gt`. Voxels in `gt` having a value in this list
will not contribute to the contingency table. (default: [0])
norm : bool, optional
Whether to normalize the table so that it sums to 1.
Returns
-------
cont : scipy.sparse.csc_matrix
A contingency table. `cont[i, j]` will equal the number of voxels
labeled `i` in `seg` and `j` in `gt`. (Or the proportion of such voxels
if `norm=True`.)
"""
segr = seg.ravel()
gtr = gt.ravel()
ij = np.vstack((segr, gtr))
selector = np.ones(segr.shape, np.bool)
data = np.ones(len(gtr))
for i in ignore_seg:
selector[segr == i] = 0
for j in ignore_gt:
selector[gtr == j] = 0
ij = ij[:, selector]
data = data[selector]
cont = sparse.coo_matrix((data, ij)).tocsc()
if norm:
cont /= float(cont.sum())
return cont | 47284bb5aaa492b6cbc50794c8ccd8a1e63676b4 | 2,540 |
def get_basic_track_info(track):
"""
Given a track object, return a dictionary of track name, artist name,
album name, track uri, and track id.
"""
# Remember that artist and album artist have different entries in the
# spotify track object.
name = track["name"]
artist = track['artists'][0]['name']
album = track['album']['name']
uri = track["uri"]
track_id = track['id']
output = {"name": name, "artist": artist, "album": album, "uri": uri,
"id": track_id}
return output | 925f7bb00482e946ad7a6853bac8b243d24145c7 | 2,541 |
Subsets and Splits