content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def populate_institute_form(form, institute_obj):
"""Populate institute settings form
Args:
form(scout.server.blueprints.institutes.models.InstituteForm)
institute_obj(dict) An institute object
"""
# get all other institutes to populate the select of the possible collaborators
institutes_tuples = []
for inst in store.institutes():
if not inst["_id"] == institute_obj["_id"]:
institutes_tuples.append(((inst["_id"], inst["display_name"])))
form.display_name.default = institute_obj.get("display_name")
form.institutes.choices = institutes_tuples
form.coverage_cutoff.default = institute_obj.get("coverage_cutoff")
form.frequency_cutoff.default = institute_obj.get("frequency_cutoff")
# collect all available default HPO terms and populate the pheno_groups form select with these values
default_phenotypes = [choice[0].split(" ")[0] for choice in form.pheno_groups.choices]
if institute_obj.get("phenotype_groups"):
for key, value in institute_obj["phenotype_groups"].items():
if not key in default_phenotypes:
custom_group = " ".join(
[key, ",", value.get("name"), "( {} )".format(value.get("abbr"))]
)
form.pheno_groups.choices.append((custom_group, custom_group))
# populate gene panels multiselect with panels from institute
available_panels = list(store.latest_panels(institute_obj["_id"]))
# And from institute's collaborators
for collaborator in institute_obj.get("collaborators", []):
available_panels += list(store.latest_panels(collaborator))
panel_set = set()
for panel in available_panels:
panel_set.add((panel["panel_name"], panel["display_name"]))
form.gene_panels.choices = list(panel_set)
return default_phenotypes | 836850a55a02b199b2c7607a236f77e6b95051e0 | 3,650,808 |
def closestMedioidI(active_site, medioids, distD):
"""
returns the index of the closest medioid in medioids to active_site
input: active_site, an ActiveSite instance
medioids, a list of ActiveSite instances
distD, a dictionary of distances
output: the index of the ActiveSite closest to active_site in medioids
"""
closest = (float('Inf'), None)
for i, medioid in enumerate(medioids):
thisDist = distD[frozenset([active_site, medioid])]
if thisDist < closest[0]:
closest = (thisDist, i)
return closest[1] | 379f98a84751c0a392f8f9b1703b89b299979676 | 3,650,809 |
def no_op_job():
"""
A no-op parsl.python_app to return a future for a job that already
has its outputs.
"""
return 0 | ad8d6379ba35dae14ce056d9900fb6e62c769d85 | 3,650,811 |
def identity(dim, shape=None):
"""Return identity operator with appropriate shape.
Parameters
----------
dim : int
Dimension of real space.
shape : int (optional)
Size of the unitary part of the operator.
If not provided, U is set to None.
Returns
-------
id : PointGroupElement
"""
R = ta.identity(dim, int)
if shape is not None:
U = np.eye(shape)
else:
U = None
return PointGroupElement(R, False, False, U) | 0cd40246f4ccf2805a852dcea09d451e7f8c63a5 | 3,650,812 |
from typing import Optional
from typing import Union
import torch
from pathlib import Path
import json
def load_separator(
model_str_or_path: str = "umxhq",
targets: Optional[list] = None,
niter: int = 1,
residual: bool = False,
wiener_win_len: Optional[int] = 300,
device: Union[str, torch.device] = "cpu",
pretrained: bool = True,
filterbank: str = "torch",
):
"""Separator loader
Args:
model_str_or_path (str): Model name or path to model _parent_ directory
E.g. The following files are assumed to present when
loading `model_str_or_path='mymodel', targets=['vocals']`
'mymodel/separator.json', mymodel/vocals.pth', 'mymodel/vocals.json'.
Defaults to `umxhq`.
targets (list of str or None): list of target names. When loading a
pre-trained model, all `targets` can be None as all targets
will be loaded
niter (int): Number of EM steps for refining initial estimates
in a post-processing stage. `--niter 0` skips this step altogether
(and thus makes separation significantly faster) More iterations
can get better interference reduction at the price of artifacts.
Defaults to `1`.
residual (bool): Computes a residual target, for custom separation
scenarios when not all targets are available (at the expense
of slightly less performance). E.g vocal/accompaniment
Defaults to `False`.
wiener_win_len (int): The size of the excerpts (number of frames) on
which to apply filtering independently. This means assuming
time varying stereo models and localization of sources.
None means not batching but using the whole signal. It comes at the
price of a much larger memory usage.
Defaults to `300`
device (str): torch device, defaults to `cpu`
pretrained (bool): determines if loading pre-trained weights
filterbank (str): filterbank implementation method.
Supported are `['torch', 'asteroid']`. `torch` is about 30% faster
compared to `asteroid` on large FFT sizes such as 4096. However,
asteroids stft can be exported to onnx, which makes is practical
for deployment.
"""
model_path = Path(model_str_or_path).expanduser()
# when path exists, we assume its a custom model saved locally
if model_path.exists():
if targets is None:
raise UserWarning("For custom models, please specify the targets")
target_models = load_target_models(
targets=targets, model_str_or_path=model_path, pretrained=pretrained
)
with open(Path(model_path, "separator.json"), "r") as stream:
enc_conf = json.load(stream)
separator = model.Separator(
target_models=target_models,
niter=niter,
residual=residual,
wiener_win_len=wiener_win_len,
sample_rate=enc_conf["sample_rate"],
n_fft=enc_conf["nfft"],
n_hop=enc_conf["nhop"],
nb_channels=enc_conf["nb_channels"],
filterbank=filterbank,
).to(device)
# otherwise we load the separator from torchhub
else:
hub_loader = getattr(openunmix, model_str_or_path)
separator = hub_loader(
targets=targets,
device=device,
pretrained=True,
niter=niter,
residual=residual,
filterbank=filterbank,
)
return separator | bb9d0ecf47174ebac9181710a1bc4689ca122ecf | 3,650,815 |
from datetime import datetime
def transform_datetime(date_str, site):
"""
ๆ นๆฎsite่ฝฌๆขๅๅง็dateไธบๆญฃ่ง็date็ฑปๅๅญๆพ
:param date_str: ๅๅง็date
:param site: ็ฝ็ซๆ ่ฏ
:return: ่ฝฌๆขๅ็date
"""
result = None
if site in SITE_MAP:
if SITE_MAP[site] in (SiteType.SINA, SiteType.HACKERNEWS):
try:
time_int = int(date_str)
result = datetime.fromtimestamp(time_int).strftime(DATE_FMT)
except Exception as e:
result = parse(date_str).strftime(DATE_FMT)
elif SITE_MAP[site] == SiteType.TENCENT:
result = date_str
elif SITE_MAP[site] == SiteType.TUICOOL:
result = date_str
elif SITE_MAP[site] == SiteType.HACKER:
result = date_str
elif SITE_MAP[site] == SiteType.DMZJ:
result = parse(date_str).strftime(DATE_FMT)
elif SITE_MAP[site] == SiteType.ACGMH:
result = parse(date_str).strftime(DATE_FMT)
elif SITE_MAP[site] == SiteType.CTOLIB:
result = parse(date_str).strftime(DATE_FMT)
elif date_str.strip() == '':
result = datetime.now().strftime(DATE_FMT)
else:
result = parse(date_str).strftime(DATE_FMT)
return result | 647ab633b0d5ce0887042ef42a762f1bc3196242 | 3,650,816 |
import re
import numpy
def ParseEventsForTTLs(eventsFileName, TR = 2.0, onset = False, threshold = 5.0):
"""
Parses the events file from Avotec for TTLs. Use if history file is not available.
The events files does not contain save movie start/stops, so use the history file if possible
@param eventsFileName: name of events file from avotec
@param TR: TR duration in seconds
@param onset: use the TTL pulse onset instead of the offset for timestamps?
@param threshold: multiple of the TR interval to use as a threshold as a break between runs
@type eventsFileName: str
@type TR: float
@type onset: bool
@type threshold: float
@return: timestamps of TTLs in each run, each run is a list of TTL timestamps and the number of TTLs
@rtype: list<tuple<list<float>, int>>
"""
eventsFile = open(eventsFileName, 'r')
TTLtoken = 'S' if onset else 's'
TTLs = []
lastTime = (0, 0, 0, 0)
duplicates = 0
runs = []
thisRun = []
line = eventsFile.readline()
while line != '':
tokens = line.split()
if len(tokens) > 0 and tokens[-1] == TTLtoken:
time = []
for token in re.split('[:\.]', re.match('[0-9\. ]+:[0-9\. ]+:[0-9 ]+\.[0-9]+', line).group()):
if (len(token) > 2): # the milliseconds have rather high precision
time.append(int(numpy.round(float(token) * 0.001)))
else:
time.append(int(token))
time = tuple(time)
if (TimeToSeconds(time) - TimeToSeconds(lastTime) > 0.1): # long enough of an interval since last one such that it's not a duplicate
TTLs.append(time)
lastTime = time
else:
duplicates += 1
line = eventsFile.readline()
nTRs = 1
thisRun.append(TTLs[0])
for i in range(1, len(TTLs) - 1):
this = TTLs[i]
last = TTLs[i - 1]
dt = TimeToSeconds(this) - TimeToSeconds(last)
if dt > threshold * TR:
runs.append((thisRun, nTRs))
thisRun = [this]
nTRs = 1
else:
thisRun.append(this)
nTRs += 1
runs.append((thisRun, nTRs + 1)) # account for last run without a faraway TTL
eventsFile.close()
print('{} duplicated TTLs'.format(duplicates))
for i in range(len(runs)):
duration = TimeToSeconds(runs[i][0][-1]) - TimeToSeconds(runs[i][0][0])
expectedTRs = int(numpy.round(duration / TR))
if (i == len(runs) - 1):
expectedTRs += 1 # account for last run without a faraway TTL
print('Run {} expected {} TTLs from duration, actual recorded {} TTLs'.format(i + 1, expectedTRs, len(runs[i][0])))
return runs | 59fa31df066424df3625e55496f0ccefa39f2d64 | 3,650,817 |
def _to_native_string(string, encoding='ascii'):
"""Given a string object, regardless of type, returns a representation of
that string in the native string type, encoding and decoding where
necessary. This assumes ASCII unless told otherwise.
"""
if isinstance(string, str):
out = string
else:
out = string.decode(encoding)
return out | b50fd0fc62b2cfc024c847b98e1f85b4b67d07e3 | 3,650,818 |
def load(path: str) -> model_lib.Model:
"""Deserializes a TensorFlow SavedModel at `path` to a `tff.learning.Model`.
Args:
path: The `str` path pointing to a SavedModel.
Returns:
A `tff.learning.Model`.
"""
py_typecheck.check_type(path, str)
if not path:
raise ValueError('`path` must be a non-empty string, cannot deserialize '
'models without an output path.')
return _LoadedSavedModel(tf.saved_model.load(path)) | 1bd16ed7b4a7955f2a78fc638e896bbd6d1ee5ac | 3,650,819 |
def parameters_from_object_schema(schema, in_='formData'):
"""Convert object schema to parameters."""
# We can only extract parameters from schema
if schema['type'] != 'object':
return []
properties = schema.get('properties', {})
required = schema.get('required', [])
parameters = []
for name, property in properties.items():
parameter = {
'name': name,
'in_': in_,
'required': (name in required),
}
parameter.update(property)
parameter = Parameter(**parameter)
parameters.append(parameter)
parameters = sorted(parameters, key=lambda x: x['name'])
return parameters | 7508fb066d6924fc0af4a10338636b70ef64b9b2 | 3,650,821 |
def any_toggle_enabled(*toggles):
"""
Return a view decorator for allowing access if any of the given toggles are
enabled. Example usage:
@toggles.any_toggle_enabled(REPORT_BUILDER, USER_CONFIGURABLE_REPORTS)
def delete_custom_report():
pass
"""
def decorator(view_func):
@wraps(view_func)
def wrapped_view(request, *args, **kwargs):
for t in toggles:
if (
(hasattr(request, 'user') and t.enabled(request.user.username))
or (hasattr(request, 'domain') and t.enabled(request.domain))
):
return view_func(request, *args, **kwargs)
raise Http404()
return wrapped_view
return decorator | 25f48e9227f5c6ff74ae9874ac0b3b7ad010861b | 3,650,823 |
def moguls(material, height, randomize, coverage, det, e0=20.0, withPoisson=True, nTraj=defaultNumTraj, dose=defaultDose, sf=True, bf=True, optimize=True, xtraParams=defaultXtraParams):
"""moguls(material, radius, randomize, det, [e0=20.0], [withPoisson=True], [nTraj=defaultNumTraj], [dose = 120.0], [sf=True], [bf=True], [optimize=True], [xtraParams={}])
Monte Carlo simulate a spectrum from a rough surface made up of close packed spheres.
+ material - Composition of material
+ height - mogul height = 0.5 * mogul radius
+ randomize - randomize the beam start position?
+ coverage - fractional likelihood of each bump existing (0.0 to 1.0)"""
tmp = u"MC simulation of a %0.2lg um %d%% %smogul bed of %s at %0.1f keV%s%s" % (1.0e6 * height, int(100.0*coverage), (" rand " if randomize else " "), material, e0, (" + CSF" if sf else ""), (" + BSF" if bf else ""))
return base(det, e0, withPoisson, nTraj, dose, sf, bf, tmp, buildMoguls, { "Coverage" : coverage, "Optimize": optimize, "Height" : height, "Material" : material, "Randomize" : randomize }, xtraParams) | 182aa248962877636e18860b46d20335eb535074 | 3,650,824 |
def link_datasets(yelp_results, dj_df, df_type="wages"):
"""
(Assisted by Record Linkage Toolkit library and documentation)
This functions compares the Yelp query results to database results and
produces the best matches based on computing the qgram score. Depending
on the specific database table characteristics the qgram calculation
will be between the zip_code, business name, address strings, latitude,
longitude, or a combination of those charateristics.
Inputs:
- yelp_results: a pandas dataframe of yelp business results based
on a user's input
- dj_df: a pandas dataframe of django results.
Ex. labour statistics, healthcode violations, Divvy, etc.
- df_type: a string of which specific dataframe is being passed to
be compared to the Yelp results
Outputs:
- link: a tuple containing the indices of Yelp query results dataframe
and the database dataframe AND the best matches qgram scores
"""
# set thresholds for comparing strings using qgram method
name_thresh = 0.55
addr_thresh = 0.55
strong_addr_thresh = 0.90
# initialize a Record Linkage comparison object
compare = rl.Compare()
# Labour & Food data comparisons to Yelp are made on zip, business name,
# and address
if df_type == "wages" or df_type == "food":
indexer = rl.BlockIndex(on='zip_code') # block on zip code
compare.numeric('zip_code', 'zip_code', method='linear',
scale=30.0, label='zip_score')
compare.string('name', 'name', method='qgram',
threshold=name_thresh, label='name_score')
compare.string('addr', 'addr', method='qgram',
threshold=addr_thresh, label='addr_score')
# Environmental data comparisons to Yelp are made on address
elif df_type == "enviro":
indexer = rl.FullIndex() # no blocking available
compare.string('addr', 'addr', method='qgram',
threshold=strong_addr_thresh, label='addr_score')
# all other data comparisons to Yelp
else:
indexer = rl.FullIndex()
pairs = indexer.index(yelp_results, dj_df)
# In addition to above comparisons, ALL data sets are also compared to
# Yelp based on latitude and longitude
compare.geo('latitude', 'longitude', 'latitude', 'longitude',
method='linear', scale=30.0, label='coord_score')
# compute record linkage scores
features = compare.compute(pairs, yelp_results, dj_df)
# set classification thresholds
zip_classif_thresh = 1.0
addr_classif_thresh = 1.0
coord_classif_thresh = 0.99
name_classif_thresh = 1.0
# Classification and final filtering
if df_type == "wages" or df_type == "food":
best_matches = features[(features['zip_score'] == zip_classif_thresh) &
(features['name_score'] == name_classif_thresh) &
(features['addr_score'] == addr_classif_thresh) &
(features['coord_score'] >= coord_classif_thresh)]
elif df_type == "enviro":
best_matches = features[(features['addr_score'] == addr_classif_thresh) &
(features['coord_score'] >= coord_classif_thresh)]
else:
best_matches = features[(features['coord_score'] >= coord_classif_thresh)]
# obtain the index values from best_matches
index_array = best_matches.index.values
# create tuple of indices and best matches df
link = (index_array, best_matches)
return link | 326857d5060ac5cedcac3de90ce284048b2d2fa7 | 3,650,825 |
def hello():
"""Say Hello, so that we can check shared code."""
return b"hello" | 7197ed31c5fde419d4607ca1b5dbec7f8cb20608 | 3,650,826 |
def loglog_mean_lines(x, ys, axis=0, label=None, alpha=0.1):
""" Log-log plot of lines and their mean. """
return _plot_mean_lines(partial(plt.loglog, x), ys, axis, label, alpha) | 2f4461ca21c2f8db9ddfd763f474ebc73f3bf636 | 3,650,827 |
def generate_identifier(endpoint_description: str) -> str:
"""Generate ID for model."""
return (
Config.fdk_publishers_base_uri()
+ "/fdk-model-publisher/catalog/"
+ sha1(bytes(endpoint_description, encoding="utf-8")).hexdigest() # noqa
) | 30bfc15c12b47f637627391a45bb9b5f9355c4f7 | 3,650,829 |
def depthFirstSearch(problem):
"""Search the deepest nodes in the search tree first."""
stack = util.Stack() # Stack used as fringe list
stack.push((problem.getStartState(),[],0))
return genericSearch(problem,stack) | 67452934a29e9857f90b88f3fead67d101468471 | 3,650,830 |
def create_app():
"""
Method to init and set up the Flask application
"""
flask_app = MyFlask(import_name="dipp_app")
_init_config(flask_app)
_setup_context(flask_app)
_register_blueprint(flask_app)
_register_api_error(flask_app)
return flask_app | bfb64ac71fcd076fe26c3b342c33af30370be8db | 3,650,832 |
def find_consumes(method_type):
"""
Determine mediaType for input parameters in request body.
"""
if method_type in ('get', 'delete'):
return None
return ['application/json'] | 785e70e41629b0386d8b86f247afaf5bff3b7ba9 | 3,650,833 |
def preprocess(text):
""" Simple Arabic tokenizer and sentencizer. It is a space-based tokenizer. I use some rules to handle
tokenition exception like words containing the preposition 'ู'. For example 'ููุงูุฏุชู' is tokenized to 'ู ูุงูุฏุชู'
:param text: Arabic text to handle
:return: list of tokenized sentences
"""
try:
text = text.decode('utf-8')
except(UnicodeDecodeError, AttributeError):
pass
text = text.strip()
tokenizer_exceptions = ["ูุธู", "ูุถุนูุง", "ูุถุนู", "ููููุง", "ูุตูููุง", "ูุฌููุง", "ูุงูุฏุชู", "ูุงูุฏู", "ูุงุฏู", "ูุถุนูุฉ",
"ูุงุฌูุงุช", "ููุฑุชูุง", "ููุงูุฉ", "ููุง", "ูุฒูุฑูุง", "ูุฒุงุฑุชู", "ูุฌูุงูุง", "ูุงุฑุฏุฉ", "ูุถุนุชู",
"ูุถุนุชูุง", "ูุฌุงูุฉ", "ููู
ูุฉ", "ูุงุฌูุฉ", "ูุงุถุนุงู", "ูุงูุนู", "ูุฏุงุฆุน", "ูุงุนุฏุง", "ูุงุน", "ูุงุณุนุง",
"ูุฑุงุฆูุง", "ูุญุฏูุง", "ูุฒุงุฑุชู", "ูุฒุงุฑุชู", "ูุงูุฏุฉ", "ูุฒุฑุงุฆูุง", "ูุณุทุงุก", "ูููุงู
ุฒ", "ูุงูู",
"ูุงูุฏูุง", "ูุณู
", "ูุงูู", "ูุฌููุง", "ูุงุณุนุฉ", "ูุงุณุน", "ูุฒููุง", "ูุฒูู",
"ูุตููุง", "ูุงูุฏูุง", "ูุตููุงู", "ูุถูุญุงู", "ูุฌููุชู", "ูุถุนุชู", "ูููููููุณ", "ูุญุฏูุง", "ูุฒูุฑุงู",
"ูููุงุช", "ูุนุฑ", "ูุงูููุง", "ูููู", "ูุตูููู
", "ูุงุฑุณู", "ูุงุฌูุช", "ููุงุฆูุฉ", "ูุถุนูู
",
"ูุณุทุงุก", "ูุธููุชู", "ูุฑุงุฆู", "ูุงุณุน", "ูุฑุท", "ูุธูุช", "ูููู", "ูุงููุช", "ููุฏูุง", "ูุตูุชูุง",
"ูุซุงุฆูู", "ููููุงู", "ูุณุงุท", "ููููุน", "ููููุน", "ูุฎูู
ุฉ", "ููุณุช", "ูุงูุชุฑ", "ููุฑุงู", "ููุงุนุฉ",
"ููุงูุช", "ูุงูู", "ูุงุฌุจ", "ูุธููุชูุง", "ููุงูุงุช", "ูุงุดูุทู", "ูุงุตู",
"ููุญ", "ูุนุฏ", "ูููุฏ", "ูุฒู", "ูููุน", "ูุฑุดุฉ", "ููุงุฆุน", "ูุชูุฑุฉ", "ูุณุงุทุฉ", "ูููุฏ", "ููุงุช",
"ูุตุงูุฉ", "ูุดูู", "ูุซุงุฆู", "ูุทููุฉ", "ูุฌูุงุช", "ูุฌูุช", "ูุนูุฏ", "ูุถุนูู
", "ููู", "ูุณุนูุง", "ูุณุนู",
"ููุงูุฉ", "ูุงุตูุงู", "ูุงุตูุช", "ูููุงู", "ูุฌุฏุชูุง", "ูุฌุฏุชู", "ูุฏูุชู", "ูุทุฃุช", "ูุทุฃ", "ูุนูุฏูุง",
"ูุฌูู", "ูุถูุญ", "ูุฌูุฒ", "ูุฑุซูุง", "ูุฑุซ", "ูุงูุน", "ููู
", "ูุงุณุนุงู", "ูุฑุงุซูุฉ", "ูุฑุงุซู", "ูุงูุงุณ",
"ูุงุฌููุง", "ูุงุจู", "ููููู
ูุฏูุง", "ูุงุถุญุง", "ูุงุถุญ", "ูุตูุชู", "ูุงุชุณุงุจ", "ูุญุฏุงุช", "ูู",
"ููุฑูุฏ", "ูุงูุฏ", "ูููุงุก", "ูุชุฑ", "ูุซูู", "ููุงูุฉ", "ููุงูุงุช", "ู ุงุญุฏุฉ", "ูุงุญุฏ", "ูุตูุชู",
"ูุตูู", "ูููู
ููุบุชูู", "ููุฏ", "ูุฒุฑ", "ูุนู", "ููุฏ", "ูุตูู", "ููู", "ููุงุฉ", "ููุชุด", "ูุณุท",
"ูุฒุฑุงุก", "ูุฒุงุฑุฉ", "ูุฏู", "ูุตูู", "ููู
ุจูุฏูู", "ูุณุช", "ููุฌ", "ูุงูุฏ", "ูููุฏ", "ูุซุงุฑ",
"ูุฌุฏ", "ูุฌู", "ููุช", "ูููุฒ", "ูุฌูุฏ", "ูุฌูู", "ูุญุฏ", "ูุญูุฏ", "ูุฏุง", "ูุฏุงุฏ", "ูุฏุฑู",
"ูุฏู", "ูุฏูุน", "ูุฑุงุก", "ูุฑุงูุณ", "ูุฑุซ", "ูุฑููุซ", "ูุฑุฏ", "ูุฑุฏุฉ", "ูุฑู", "ูุฑู
", "ูุฒูุฑ",
"ูุณุงู
", "ูุณุงุฆู", "ูุณุชูู", "ูุณุท", "ูุณู", "ูุณูุท", "ูุณููุฉ", "ูุณูู
", "ูุตุงู", "ูุตู", "ูุตููู",
"ูุตู", "ูุถุน", "ูุทู", "ูุนุงุก", "ููุงุก", "ููู", "ูููู", "ููุช", "ููุน", "ููุงู", "ูููู",
"ููุงุก", "ููู", "ููุจ", "ูุจุงุก", "ููุณุชูู", "ูุถุญ", "ูุฌุจ", "ูููุน", "ูููุบุชูู", "ูุญุด",
"ููุฑ", "ููุงุฏุฉ", "ููู", "ูููุงุช", "ูุฒุงุฑ", "ูุฌูู", "ููู
ุงู", "ูุฌููู", "ูุธููุฉ", "ูุธุงุฆู", "ููุงุฆู"]
sentence_splitter_exceptions = ["ุฏ.", "ูู.", "ูู.", "ุขุฑ.", "ุจู.", "ุฌู.", "ุฏู.", "ุฌูู.", "ุงู.", "ุงู.", "ุณู.", "ุงุณ.",
"ุงุชุด.", "ุงู."]
sentence_splitters = ['.', '!', 'ุ', '\n']
text = text.replace('ุ', ' ุ ')
text = text.replace('*', ' * ')
text = text.replace('โ', ' โ ')
text = text.replace('โ', ' โ ')
text = text.replace(',', ' , ')
text = text.replace('(', ' ( ')
text = text.replace(')', ' ) ')
text = text.replace('/', ' / ')
text = text.replace('[', ' [ ')
text = text.replace(']', ' ] ')
text = text.replace('|', ' | ')
text = text.replace('ุ', ' ุ ')
text = text.replace('ยซ', ' ยซ ')
text = text.replace('ยป', ' ยป ')
text = text.replace('!', ' ! ')
text = text.replace('-', ' - ')
text = text.replace('โ', ' โ ')
text = text.replace('โ', ' โ ')
text = text.replace('"', ' " ')
text = text.replace('ุ', ' ุ ')
text = text.replace(':', ' : ')
text = text.replace('โฆ', ' โฆ ')
text = text.replace('..', ' .. ')
text = text.replace('...', ' ... ')
text = text.replace('\'', ' \' ')
text = text.replace('\n', ' \n ')
text = text.replace(' ', ' ')
tokens = text.split()
for i, token in enumerate(tokens):
if token[-1] in sentence_splitters:
is_exceptions = token in sentence_splitter_exceptions
if not is_exceptions:
tokens[i] = token[:-1] + ' ' + token[-1] + 'SENT_SPLITTER'
tokens = ' '.join(tokens).split()
for i, token in enumerate(tokens):
if token.startswith('ู'):
is_exceptions = [token.startswith(exception) and len(token) <= len(exception) + 1 for exception in
tokenizer_exceptions]
if True not in is_exceptions:
tokens[i] = token[0] + ' ' + token[1:]
text = (' '.join(tokens))
text = text.replace(' ูุงู', ' ู ุงู')
text = text.replace(' ูู', ' ู ู')
text = text.replace(' ูุฅ', ' ู ุฅ')
text = text.replace(' ุจุงูุฃ', ' ุจ ุงูุฃ')
text = text.replace('ูููุง ู', 'ูููุง ู ')
text = text.replace('ูุณุจุฉ ู', 'ูุณุจุฉ ู ')
sentences = text.split('SENT_SPLITTER')
return sentences | 48a44391413045a49d6d9f2dff20dcd89734b4f2 | 3,650,834 |
def login(client, password="pass", ):
"""Helper function to log into our app.
Parameters
----------
client : test client object
Passed here is the flask test client used to send the request.
password : str
Dummy password for logging into the app.
Return
-------
post request object
The test client is instructed to send a post request to the /login
route. The request contains the fields values to be posted by the form.
"""
return client.post('/login',
data=dict(pass_field=password, remember_me=True),
follow_redirects=True) | 5adca2e7d54dabe47ae92f0bcebb93e0984617b1 | 3,650,835 |
def define_dagstermill_solid(
name,
notebook_path,
input_defs=None,
output_defs=None,
config_schema=None,
required_resource_keys=None,
output_notebook=None,
output_notebook_name=None,
asset_key_prefix=None,
description=None,
tags=None,
):
"""Wrap a Jupyter notebook in a solid.
Arguments:
name (str): The name of the solid.
notebook_path (str): Path to the backing notebook.
input_defs (Optional[List[InputDefinition]]): The solid's inputs.
output_defs (Optional[List[OutputDefinition]]): The solid's outputs. Your notebook should
call :py:func:`~dagstermill.yield_result` to yield each of these outputs.
required_resource_keys (Optional[Set[str]]): The string names of any required resources.
output_notebook (Optional[str]): If set, will be used as the name of an injected output of
type :py:class:`~dagster.FileHandle` that will point to the executed notebook (in
addition to the :py:class:`~dagster.AssetMaterialization` that is always created). This
respects the :py:class:`~dagster.core.storage.file_manager.FileManager` configured on
the pipeline resources via the "file_manager" resource key, so, e.g.,
if :py:class:`~dagster_aws.s3.s3_file_manager` is configured, the output will be a :
py:class:`~dagster_aws.s3.S3FileHandle`.
output_notebook_name: (Optional[str]): If set, will be used as the name of an injected output
of type of :py:class:`~dagster.BufferedIOBase` that is the file object of the executed
notebook (in addition to the :py:class:`~dagster.AssetMaterialization` that is always
created). It allows the downstream solids to access the executed notebook via a file
object.
asset_key_prefix (Optional[Union[List[str], str]]): If set, will be used to prefix the
asset keys for materialized notebooks.
description (Optional[str]): If set, description used for solid.
tags (Optional[Dict[str, str]]): If set, additional tags used to annotate solid.
Dagster uses the tag keys `notebook_path` and `kind`, which cannot be
overwritten by the user.
Returns:
:py:class:`~dagster.SolidDefinition`
"""
check.str_param(name, "name")
check.str_param(notebook_path, "notebook_path")
input_defs = check.opt_list_param(input_defs, "input_defs", of_type=InputDefinition)
output_defs = check.opt_list_param(output_defs, "output_defs", of_type=OutputDefinition)
required_resource_keys = check.opt_set_param(
required_resource_keys, "required_resource_keys", of_type=str
)
extra_output_defs = []
if output_notebook_name is not None:
required_resource_keys.add("output_notebook_io_manager")
extra_output_defs.append(
OutputDefinition(name=output_notebook_name, io_manager_key="output_notebook_io_manager")
)
# backcompact
if output_notebook is not None:
rename_warning(
new_name="output_notebook_name", old_name="output_notebook", breaking_version="0.14.0"
)
required_resource_keys.add("file_manager")
extra_output_defs.append(OutputDefinition(dagster_type=FileHandle, name=output_notebook))
if isinstance(asset_key_prefix, str):
asset_key_prefix = [asset_key_prefix]
asset_key_prefix = check.opt_list_param(asset_key_prefix, "asset_key_prefix", of_type=str)
default_description = f"This solid is backed by the notebook at {notebook_path}"
description = check.opt_str_param(description, "description", default=default_description)
user_tags = validate_tags(tags)
if tags is not None:
check.invariant(
"notebook_path" not in tags,
"user-defined solid tags contains the `notebook_path` key, but the `notebook_path` key is reserved for use by Dagster",
)
check.invariant(
"kind" not in tags,
"user-defined solid tags contains the `kind` key, but the `kind` key is reserved for use by Dagster",
)
default_tags = {"notebook_path": notebook_path, "kind": "ipynb"}
return SolidDefinition(
name=name,
input_defs=input_defs,
compute_fn=_dm_solid_compute(
name,
notebook_path,
output_notebook_name,
asset_key_prefix=asset_key_prefix,
output_notebook=output_notebook, # backcompact
),
output_defs=output_defs + extra_output_defs,
config_schema=config_schema,
required_resource_keys=required_resource_keys,
description=description,
tags={**user_tags, **default_tags},
) | 48097a7bed7ef84ad8d9df4eeef835f3723cb391 | 3,650,836 |
import torch
def denormalize_laf(LAF: torch.Tensor, images: torch.Tensor) -> torch.Tensor:
"""De-normalizes LAFs from scale to image scale.
B,N,H,W = images.size()
MIN_SIZE = min(H,W)
[a11 a21 x]
[a21 a22 y]
becomes
[a11*MIN_SIZE a21*MIN_SIZE x*W]
[a21*MIN_SIZE a22*MIN_SIZE y*H]
Args:
LAF:
images: images, LAFs are detected in.
Returns:
the denormalized lafs.
Shape:
- Input: :math:`(B, N, 2, 3)`
- Output: :math:`(B, N, 2, 3)`
"""
raise_error_if_laf_is_not_valid(LAF)
n, ch, h, w = images.size()
wf = float(w)
hf = float(h)
min_size = min(hf, wf)
coef = torch.ones(1, 1, 2, 3).to(LAF.dtype).to(LAF.device) * min_size
coef[0, 0, 0, 2] = wf
coef[0, 0, 1, 2] = hf
return coef.expand_as(LAF) * LAF | 51b8c81359237a9e102c1cd33bb7d1ab16c39893 | 3,650,837 |
import re
def parse_regex_flags(raw_flags: str = 'gim'):
"""
parse flags user input and convert them to re flags.
Args:
raw_flags: string chars representing er flags
Returns:
(re flags, whether to return multiple matches)
"""
raw_flags = raw_flags.lstrip('-') # compatibility with original MatchRegex script.
multiple_matches = 'g' in raw_flags
raw_flags = raw_flags.replace('g', '')
flags = re.RegexFlag(0)
for c in raw_flags:
if c in LETTER_TO_REGEX_FLAGS:
flags |= LETTER_TO_REGEX_FLAGS[c]
else:
raise ValueError(f'Invalid regex flag "{c}".\n'
f'Supported flags are {", ".join(LETTER_TO_REGEX_FLAGS.keys())}')
return flags, multiple_matches | 71816c57f4e4f6dac82b4746b534a680745bc730 | 3,650,839 |
def has_answer(answers, retrieved_text, match='string', tokenized: bool = False):
"""Check if retrieved_text contains an answer string.
If `match` is string, token matching is done between the text and answer.
If `match` is regex, we search the whole text with the regex.
"""
if not isinstance(answers, list):
answers = [answers]
if match == 'string':
if tokenized:
text = md.detokenize(retrieved_text)
t_text = retrieved_text
else:
text = retrieved_text
t_text = spacy_tokenize(retrieved_text, uncase=True)
for single_answer in answers:
single_answer = spacy_tokenize(single_answer, uncase=True)
for i in range(0, len(t_text) - len(single_answer) + 1):
if single_answer == t_text[i: i + len(single_answer)]:
return True
for single_answer in answers: # If raw covered.
if single_answer in text:
return True
elif match == 'regex':
if tokenized:
text = md.detokenize(retrieved_text)
else:
text = retrieved_text
# Answer is a regex
single_answer = normalize(answers[0])
if regex_match(text, single_answer):
return True
return False | f0107006d2796e620cd1a47ef9e79c1c5cc1fd7a | 3,650,841 |
def get_utm_zone(srs):
"""
extracts the utm_zone from an osr.SpatialReference object (srs)
returns the utm_zone as an int, returns None if utm_zone not found
"""
if not isinstance(srs, osr.SpatialReference):
raise TypeError('srs is not a osr.SpatialReference instance')
if srs.IsProjected() != 1:
return None
projcs = srs.GetAttrValue('projcs')
assert 'UTM' in projcs
datum = None
if 'NAD83' in projcs:
datum = 'NAD83'
elif 'WGS84' in projcs:
datum = 'WGS84'
elif 'NAD27' in projcs:
datum = 'NAD27'
# should be something like NAD83 / UTM zone 11N...
if '/' in projcs:
utm_token = projcs.split('/')[1]
else:
utm_token = projcs
if 'UTM' not in utm_token:
return None
# noinspection PyBroadException
try:
utm_zone = int(''.join([k for k in utm_token if k in '0123456789']))
except Exception:
return None
if utm_zone < 0 or utm_zone > 60:
return None
hemisphere = projcs[-1]
return datum, utm_zone, hemisphere | 3ee1f9780ce0fbfd843ea6b72627e90e16fd1549 | 3,650,842 |
def get_documents_meta_url(project_id: int, limit: int = 10, host: str = KONFUZIO_HOST) -> str:
"""
Generate URL to load meta information about the Documents in the Project.
:param project_id: ID of the Project
:param host: Konfuzio host
:return: URL to get all the Documents details.
"""
return f"{host}/api/projects/{project_id}/docs/?limit={limit}" | b538d028844a2f769e8700995d1052b440592046 | 3,650,843 |
def parse_params_from_string(paramStr: str) -> dict:
""" Create a dictionary representation of parameters in PBC format
"""
params = dict()
lines = paramStr.split('\n')
for line in lines:
if line:
name, value = parse_param_line(line)
add_param(params, name, value)
return params | fbf8c8cfffd0c411cc4a83760f373dd4e02eec1e | 3,650,844 |
def number_fixed_unused_variables(block):
"""
Method to return the number of fixed Var components which do not appear
within any activated Constraint in a model.
Args:
block : model to be studied
Returns:
Number of fixed Var components which do not appear within any activated
Constraints in block
"""
return len(fixed_unused_variables_set(block)) | a6432160bc52ac3e5682b255c951388242bbc2b0 | 3,650,846 |
def tunnelX11( node, display=None):
"""Create an X11 tunnel from node:6000 to the root host
display: display on root host (optional)
returns: node $DISPLAY, Popen object for tunnel"""
if display is None and 'DISPLAY' in environ:
display = environ[ 'DISPLAY' ]
if display is None:
error( "Error: Cannot connect to display\n" )
return None, None
host, screen = display.split( ':' )
# Unix sockets should work
if not host or host == 'unix':
# GDM3 doesn't put credentials in .Xauthority,
# so allow root to just connect
quietRun( 'xhost +si:localuser:root' )
return display, None
else:
# Create a tunnel for the TCP connection
port = 6000 + int( float( screen ) )
connection = r'TCP\:%s\:%s' % ( host, port )
cmd = [ "socat", "TCP-LISTEN:%d,fork,reuseaddr" % port,
"EXEC:'mnexec -a 1 socat STDIO %s'" % connection ]
return 'localhost:' + screen, node.popen( cmd ) | a0e824bef4d23dd3a8a5c25653bf778731de180e | 3,650,847 |
import collections
def get_aws_account_id_file_section_dict() -> collections.OrderedDict:
"""~/.aws_accounts_for_set_aws_mfa ใใ Section ๆ
ๅ ฑใๅๅพใใ"""
# ~/.aws_accounts_for_set_aws_mfa ใฎๆ็กใ็ขบ่ชใใใชใใใฐ็ๆใใ
prepare_aws_account_id_file()
# ่ฉฒๅฝ ini ใใกใคใซใฎใปใฏใทใงใณ dictionary ใๅๅพ
return Config._sections | 51eb94857d62b91c5fcfe978b3cd2a32cbefb6ae | 3,650,849 |
from datetime import datetime
def profile(request, session_key):
"""download_audio.html renderer.
:param request: rest API request object.
:type request: Request
:param session_key: string representing the session key for the user
:type session_key: str
:return: Just another django mambo.
:rtype: HttpResponse
"""
# This may be different from the one provided in the URL.
my_session_key = request.session.session_key
last_week = datetime.date.today() - datetime.timedelta(days=7)
# Get the weekly counts.
last_weeks = [datetime.date.today() - datetime.timedelta(days=days) for days in [6, 13, 20, 27, 34]]
dates = []
weekly_counts = []
for week in last_weeks:
dates.append(week.strftime('%m/%d/%Y'))
count = AnnotatedRecording.objects.filter(
file__gt='', file__isnull=False, session_id=session_key, timestamp__gt=week,
timestamp__lt=week + datetime.timedelta(days=7)).count()
weekly_counts.append(count)
recording_count = AnnotatedRecording.objects.filter(
file__gt='', file__isnull=False).count()
# Construct dictionaries of the user's recordings.
user_recording_count = AnnotatedRecording.objects.filter(
file__gt='', file__isnull=False, session_id=session_key).count()
recent_recordings = AnnotatedRecording.objects.filter(
file__gt='', file__isnull=False, session_id=session_key, timestamp__gt=last_week)
recent_dict = defaultdict(list)
[recent_dict[rec.surah_num].append((rec.ayah_num, rec.file.url)) for rec in recent_recordings]
old_recordings = AnnotatedRecording.objects.filter(
file__gt='', file__isnull=False, session_id=session_key, timestamp__lt=last_week)
old_dict = defaultdict(list)
[old_dict[rec.surah_num].append((rec.ayah_num, rec.file.url)) for rec in old_recordings]
recent_lists = _sort_recitations_dict_into_lists(recent_dict)
old_lists = _sort_recitations_dict_into_lists(old_dict)
return render(request, 'audio/profile.html', {'session_key': my_session_key,
'recent_dict': dict(recent_dict),
'recent_lists': recent_lists,
'old_lists': old_lists,
'dates': dates[::-1],
'weekly_counts': weekly_counts[::-1],
'old_dict': dict(old_dict),
'recording_count': recording_count,
'user_recording_count': user_recording_count}) | ba39b5a69c062ab62f83f46f7044f403120016ca | 3,650,850 |
import requests
def pipFetchLatestVersion(pkg_name: str) -> str:
"""
Fetches the latest version of a python package from pypi.org
:param pkg_name: package to search for
:return: latest version of the package or 'not found' if error was returned
"""
base_url = "https://pypi.org/pypi"
request = f"{base_url}/{pkg_name}/json"
response = requests.get(request)
if response.status_code == requests.codes.ok:
json = response.json()
newest_version = json["info"]["version"]
else:
newest_version = NOT_FOUND
return newest_version | f1a49d31f4765a1a2ddc5942792a74be211fef49 | 3,650,851 |
def mock_datasource_http_oauth2(mock_datasource):
"""Mock DataSource object with http oauth2 credentials"""
mock_datasource.credentials = b"client_id: FOO\nclient_secret: oldisfjowe84uwosdijf"
mock_datasource.location = "http://foo.com"
return mock_datasource | 8496f6b9ac60af193571f762eb2ea925915a1223 | 3,650,853 |
def find_certificate_name(file_name):
"""Search the CRT for the actual aggregator name."""
# This loop looks for the collaborator name in the key
with open(file_name, 'r') as f:
for line in f:
if 'Subject: CN=' in line:
col_name = line.split('=')[-1].strip()
break
return col_name | 853ec62b69feebd86c7a56e1d47b2c12e7f56d63 | 3,650,854 |
from typing import List
def float2bin(p: float, min_bits: int = 10, max_bits: int = 20, relative_error_tol=1e-02) -> List[bool]:
""" Converts probability `p` into binary list `b`.
Args:
p: probability such that 0 < p < 1
min_bits: minimum number of bits before testing relative error.
max_bits: maximum number of bits for truncation.
relative_error_tol: relative error tolerance
Returns:
b: List[bool]
Examples:
Probability 0.5 becomes:
>>> float2bin(0.5) # Is 0.1
[1]
Moreover 0.125 is:
>>> float2bin(0.125) # Is 0.001
[0, 0, 1]
Some numbers get truncated. For example, probability 1/3 becomes:
>>> float2bin(1/3) # Is 0.0101010101...
[0, 1, 0, 1, 0, 1, 0, 1, 0]
You can increase the maximum number of bits to reach float precision, for example:
>>> 1/3
0.3333333333333333
>>> q = float2bin(1/3, 64)
>>> bin2float(q)
0.3333333333333333
>>> 1/3 == bin2float(q)
True
"""
assert 1 > p > 0
b = []
i = 1
original_p = 1 - p
while p != 0 or i > max_bits:
if i > min_bits:
if isclose(1 - bin2float(b), original_p, rtol=relative_error_tol, atol=0):
break
if p >= 2 ** -i:
b.append(True)
p -= 2 ** -i
else:
b.append(False)
i += 1
return b | 1b25f84255ace0503f06ae2ab9f8dc650206176c | 3,650,856 |
def bin_thresh(img: np.ndarray, thresh: Number) -> np.ndarray:
"""
Performs binary thresholding of an image
Parameters
----------
img : np.ndarray
Image to filter.
thresh : int
Pixel values >= thresh are set to 1, else 0.
Returns
-------
np.ndarray :
Binarized image, same shape as input
"""
res = img >= thresh
return res | 9064fb5f50c22aabc73bf63d3a818b6898a19a58 | 3,650,857 |
from mathutils import Matrix, Vector, Euler
def add_object_align_init(context, operator):
"""
Return a matrix using the operator settings and view context.
:arg context: The context to use.
:type context: :class:`bpy.types.Context`
:arg operator: The operator, checked for location and rotation properties.
:type operator: :class:`bpy.types.Operator`
:return: the matrix from the context and settings.
:rtype: :class:`mathutils.Matrix`
"""
properties = operator.properties if operator is not None else None
space_data = context.space_data
if space_data and space_data.type != 'VIEW_3D':
space_data = None
# location
if operator and properties.is_property_set("location"):
location = Matrix.Translation(Vector(properties.location))
else:
if space_data: # local view cursor is detected below
location = Matrix.Translation(space_data.cursor_location)
else:
location = Matrix.Translation(context.scene.cursor_location)
if operator:
properties.location = location.to_translation()
# rotation
view_align = (context.user_preferences.edit.object_align == 'VIEW')
view_align_force = False
if operator:
if properties.is_property_set("view_align"):
view_align = view_align_force = operator.view_align
else:
if properties.is_property_set("rotation"):
# ugh, 'view_align' callback resets
value = properties.rotation[:]
properties.view_align = view_align
properties.rotation = value
del value
else:
properties.view_align = view_align
if operator and (properties.is_property_set("rotation") and
not view_align_force):
rotation = Euler(properties.rotation).to_matrix().to_4x4()
else:
if view_align and space_data:
rotation = space_data.region_3d.view_matrix.to_3x3().inverted()
rotation.resize_4x4()
else:
rotation = Matrix()
# set the operator properties
if operator:
properties.rotation = rotation.to_euler()
return location * rotation | 6bd32226c7024245b1252c3a51f5ae713f43a1b2 | 3,650,858 |
import pickle
def load_dataset():
"""
load dataset
:return: dataset in numpy style
"""
data_location = 'data.pk'
data = pickle.load(open(data_location, 'rb'))
return data | 9467826bebfc9ca3ad1594904e9f3195e345c065 | 3,650,859 |
def video_feed():
"""Return camera live feed."""
return Response(gen(Camera()),
mimetype='multipart/x-mixed-replace; boundary=frame') | 87c9ae8aa84fe17a16b040d56fbdaac6351e0706 | 3,650,860 |
def area_in_squaremeters(geodataframe):
"""Calculates the area sizes of a geo dataframe in square meters.
Following https://gis.stackexchange.com/a/20056/77760 I am choosing equal-area projections
to receive a most accurate determination of the size of polygons in the geo dataframe.
Instead of Gall-Peters, as suggested in the answer, I am using EPSG_3035 which is
particularly usefull for Europe.
Returns a pandas series of area sizes in square meters.
"""
return geodataframe.to_crs(EPSG_3035_PROJ4).area | 47a2ae042c8cda7fa6b66ccd011d0293afb36504 | 3,650,861 |
import scipy
def add_eges_grayscale(image):
""" Edge detect.
Keep original image grayscale value where no edge.
"""
greyscale = rgb2gray(image)
laplacian = np.array([[0, -1, 0], [-1, 4, -1], [0, -1, 0]])
edges = scipy.ndimage.filters.correlate(greyscale, laplacian)
for index,value in np.ndenumerate(edges):
edges[index] = 255-greyscale[index] if value == 0 else 0
return edges | 0cba5152578722693d0d796252a99973e980b365 | 3,650,862 |
def generateFromSitePaymentObject(signature: str, account_data: dict, data: dict)->dict:
"""[summary]
Creates object for from site chargment request
Args:
signature (str): signature hash string
account_data (dict): merchant_account: str
merchant_domain: str
data (dict): order + personal data to create charge
orderReference (str): timestamp
amount (float): order total amount
currency (str): 'USD', 'UAH', 'RUB'
card (str): user card number
expMonth (str): card expires month
expYear (str): card expires year
cardCvv (str): card cvv
cardHolder (str): full name of card holder "Test test"
productName (list[str]): product names list
productPrice (list[float]): product price list
productCount (list[int]): product count list
clientFirstName (str): client first name
clientLastName (str): client last name
clientCountry (str): client country
clientEmail (str): client email
clientPhone (str): client phone
Returns:
dict: [description]
"""
return {
"transactionType":"CHARGE",
'merchantAccount': account_data['merchant_account'],
"merchantAuthType":"SimpleSignature",
'merchantDomainName': account_data['merchant_domain'],
"merchantTransactionType":"AUTH",
"merchantTransactionSecureType": "NON3DS",
'merchantSignature': signature,
"apiVersion":1,
'orderReference': str(data['orderReference']),
'orderDate': str(data['orderReference']),
"amount":data["amount"],
'currency': data['currency'],
"card":data['card'],
"expMonth":data['expMonth'],
"expYear":data['expYear'],
"cardCvv":data['cardCvv'],
"cardHolder":data['cardHolder'],
'productName': list(map(str, data['productName'])),
'productPrice': list(map(float, data['productPrice'])),
'productCount': list(map(int, data['productCount'])),
"clientFirstName":data['clientFirstName'],
"clientLastName":data['clientLastName'],
"clientCountry":data['clientCountry'],
"clientEmail":data['clientEmail'],
"clientPhone":data['clientPhone'],
} | 149434694e985956dede9bf8b6b0da1215ac9963 | 3,650,863 |
def deal_weights(node, data=None):
""" deal the weights of the custom layer
"""
layer_type = node.layer_type
weights_func = custom_layers[layer_type]['weights']
name = node.layer_name
return weights_func(name, data) | a2a271ea0aeb94a1267dbc06da8997985b81633e | 3,650,864 |
def label_brand_generic(df):
""" Correct the formatting of the brand and generic drug names """
df = df.reset_index(drop=True)
df = df.drop(['drug_brand_name', 'drug_generic_name'], axis=1)
df['generic_compare'] = df['generic_name'].str.replace('-', ' ')
df['generic_compare'] = df['generic_compare'].str.replace('with ', '')
df['generic_compare'] = df['generic_compare'].str.replace('/', ' ')
df['brand_compare'] = df['brand_name'].str.replace('-', ' ')
df['brand_compare'] = df['brand_compare'].str.replace('with ', '')
df['brand_compare'] = df['brand_compare'].str.replace('/', ' ')
df_na = df.fillna(0) #df.dropna().sort_values(by='generic_name')
risk_class_list = []
# Find contingency table for each generic
# format [[brand_ad_ev, brand_bene], [generic_ad_ev, generic_bene]]
for i, val in enumerate(df_na['generic_compare']):
if ((df_na.iloc[i]['brand_compare'] == val) | (df_na.iloc[i]['brand_compare'] in val) |
(val in df_na.iloc[i]['brand_compare'])):
# GENERIC NEG = -1
risk_class_list.append(-1)
else:
# BRAND POS = 1
risk_class_list.append(1)
risk_series = pd.Series(risk_class_list).replace(np.inf, np.nan)
risk_series = risk_series.replace(-np.inf, np.nan)
df_na['risk_class'] = risk_series
df['risk_class'] = risk_series
# Drop columns that are redunant from name matching
df_na = df_na.drop(['generic_compare', 'brand_compare'], axis = 1)
df = df.drop(['generic_compare', 'brand_compare'], axis = 1)
df_class_generic_count = pd.pivot_table(df, index = ['generic_name'],
values = ['risk_class'], aggfunc = 'count')
df_class_generic_count.rename(columns={'risk_class' : 'risk_count'}, inplace=True)
df = df.merge(df_class_generic_count, right_index=True, left_on = 'generic_name', how='inner')
return df | a421eece6e595159847821abcaf2cf7dd8dc88c5 | 3,650,865 |
def RMSRE(
image_true: np.ndarray,
image_test: np.ndarray,
mask: np.ndarray = None,
epsilon: float = 1e-9,
) -> float:
"""Root mean squared relative error (RMSRE) between two images within the
specified mask. If not mask is specified the entire image is used.
Parameters
----------
image_true : np.ndarray
ground truth image.
image_test : np.ndarray
predicted image.
mask : np.ndarray, optional
mask to compute the RMSRE in, by default None
epsilon : float, optional
epsilon used to stabilize the calculation of the relative error,
by default 1e-9
Returns
-------
float
RMSRE value between the images within the specified mask.
"""
if mask is None:
mask = np.ones_like(image_true)
mask_flat = mask.reshape(-1).astype(bool)
# flatten
relativeErrorImageFlat = (
image_test.reshape(-1)[mask_flat] - image_true.reshape(-1)[mask_flat]
) / (image_true.reshape(-1)[mask_flat] + epsilon)
return np.sqrt(
np.mean(relativeErrorImageFlat) ** 2 + np.std(relativeErrorImageFlat) ** 2
) | 6b377b2588ef0c02f059248d3214e0d7960ca25b | 3,650,866 |
import PIL
import logging
def getImage(imageData, flag):
"""
Returns the PIL image object from imageData based on the flag.
"""
image = None
try:
if flag == ENHANCED:
image = PIL.Image.open(imageData.enhancedImage.file)
elif flag == UNENHANCED:
image = PIL.Image.open(imageData.unenhancedImage.file)
elif flag == DISPLAY:
image = PIL.Image.open(imageData.image.file)
except:
logging.error("image cannot be read from the image data")
return None
return image | a3aaa80bc396fcdf099d5963706d21d63a6dcf0d | 3,650,867 |
def save_record(record_type,
record_source,
info,
indicator,
date=None):
"""
A convenience function that calls 'create_record' and also saves the resulting record.
:param record_type: The record type, which should be a value from the RecordType enumeration
:param record_source: The source for the record, which should be a value from the RecordSource enumeration
:param info: The actual data to be stored in the record
:param date: The date to use with this record, or None to use the current date
:return: The new IndicatorRecord instance
"""
record = create_record(record_type, record_source, info, indicator, date)
record.save()
logger.info("%s (%s) record from %s saved successfully",
record_type.name,
record_type.title,
record_source.title)
return record | 903eb7333cfd2cc534812c5417e5e32a7769ffe4 | 3,650,868 |
def update_product_price(pid: str, new_price: int):
""" Update product's price
Args:
pid (str): product id
new_price (int): new price
Returns:
dict: status(success, error)
"""
playload = {'status': ''}
try:
connection = create_connection()
with connection:
with connection.cursor() as cursor:
sql = "UPDATE `product` SET `PRICE` = %s WHERE `PID` = %s"
cursor.execute(sql, (new_price, pid))
connection.commit()
playload['status'] = 'success'
return playload
except:
playload['status'] = 'error'
return playload | fff3723a9138724f1957cd9a669cdcf79e4ed4e5 | 3,650,869 |
def select_n_products(lst, n):
"""Select the top N products (by number of reviews)
args:
lst: a list of lists that are (key,value) pairs for (ASIN, N-reviews)
sorted on the number of reviews in reverse order
n: a list of three numbers,
returns:
a list of lists with N products
"""
top_products = []
first_third = lst[100:100 + n[0] + 1]
second_third = lst[1000:1000 + n[1] + 1]
third_third = lst[50000:50000 + n[2] + 1]
top_products.extend(first_third)
top_products.extend(second_third)
top_products.extend(third_third)
n_reviews = sum([x[1] for x in top_products])
print "The number of products is: {} and the number of reviews is: {}".format(
sum(n), n_reviews)
return(top_products) | ed052708010512758845186ae9e4fb33b41bc511 | 3,650,870 |
def load_vanHateren(params):
"""
Load van Hateren data and format as a Dataset object
Inputs:
params [obj] containing attributes:
data_dir [str] directory to van Hateren data
rand_state (optional) [obj] numpy random state object
num_images (optional) [int] how many images to extract. Default (None) is all images.
image_edge_size (optional) [int] how many pixels on an edge. Default (None) is full-size.
"""
# Parse params
assert hasattr(params, "data_dir"), ("function input must have 'data_dir' kwarg")
data_dir = params.data_dir
if hasattr(params, "rand_state"):
rand_state = params.rand_state
else:
#assert hasattr(params, "rand_seed"), ("Params must specify a random state or seed")
if hasattr(params, "rand_seed"):
rand_state = np.random.RandomState(params.rand_seed)
else:
rand_state = np.random.RandomState(None)
print("WARNING: Params did not specify a random state or seed")
num_images = int(params.num_images) if hasattr(params, "num_images") else None
image_edge_size = int(params.image_edge_size) if hasattr(params, "image_edge_size") else None
# Get data
img_filename = data_dir+"/img/images_curated.h5" # pre-curated dataset
vh_data = vanHateren(img_filename, num_images, rand_state)
image_dataset = Dataset(vh_data.images, lbls=None, ignore_lbls=None, rand_state=rand_state)
# Resize data
if image_edge_size is not None:
edge_scale = image_edge_size/image_dataset.shape[1] #vh has square images
assert edge_scale <= 1.0, (
"image_edge_size (%g) must be less than or equal to the original size (%g)."%(image_edge_size,
image_dataset.shape[1]))
scale_factor = [1.0, edge_scale, edge_scale, 1.0] # batch & channel don't get downsampled
image_dataset.downsample(scale_factor, order=3)
return {"train":image_dataset} | ca32f182f5534da89df0bd5454e74a586c6ca4d6 | 3,650,871 |
import torch
def wrap_to_pi(inp, mask=None):
"""Wraps to [-pi, pi)"""
if mask is None:
mask = torch.ones(1, inp.size(1))
if mask.dim() == 1:
mask = mask.unsqueeze(0)
mask = mask.to(dtype=inp.dtype)
val = torch.fmod((inp + pi) * mask, 2 * pi)
neg_mask = (val * mask) < 0
val = val + 2 * pi * neg_mask.to(val.dtype)
val = (val - pi)
inp = (1 - mask) * inp + mask * val
return inp | 7aca43bb2146c1cad07f9a070a7099e6fb8ad857 | 3,650,874 |
import pandas
def if_pandas(func):
"""Test decorator that skips test if pandas not installed."""
@wraps(func)
def run_test(*args, **kwargs):
try:
except ImportError:
pytest.skip('Pandas not available.')
else:
return func(*args, **kwargs)
return run_test | b39f88543559c4f4f1b9bb5bb30768916d3708d6 | 3,650,875 |
def handle_front_pots(pots, next_pots):
"""Handle front, additional pots in pots."""
if next_pots[2] == PLANT:
first_pot = pots[0][1]
pots = [
[next_pots[2], first_pot - 1]] + pots
return pots, next_pots[2:]
return pots, next_pots[3:] | 53ec905a449c0402946cb8c28852e81da80a92ef | 3,650,876 |
import types
def environment(envdata):
"""
Class decorator that allows to run tests in sandbox against different Qubell environments.
Each test method in suite is converted to <test_name>_on_environemnt_<environment_name>
:param params: dict
"""
#assert isinstance(params, dict), "@environment decorator should take 'dict' with environments"
def copy(func, name=None):
return types.FunctionType(func.func_code, func.func_globals, name=name,
argdefs=func.func_defaults,
closure=func.func_closure)
def wraps_class(clazz):
if "environments" in clazz.__dict__:
log.warn("Class {0} environment attribute is overridden".format(clazz.__name__))
params = format_as_api(envdata)
clazz.environments = params
methods = [method
for _, method in clazz.__dict__.items()
if isinstance(method, types.FunctionType) and method.func_name.startswith("test")]
for env in params:
if env['name'] != DEFAULT_ENV_NAME():
env['name'] += '_for_%s' % clazz.__name__ # Each test class should have it's own set of envs.
for method in methods:
delattr(clazz, method.func_name)
log.info("Test '{0}' multiplied per environment in {1}".format(method.func_name, clazz.__name__))
for env in params:
new_name = method.func_name + "_on_environment_" + env['name']
setattr(clazz, new_name, copy(method, new_name))
return clazz
return wraps_class | 9ce82ff8ee3627f8795b7bc9634c298e8ff195bc | 3,650,877 |
def get_domain_name(url):
""" Returns the domain name from a URL """
parsed_uri = urlparse(url)
return parsed_uri.netloc | 00160285a29a4b2d1fe42fb8ec1648ca4c31fa8b | 3,650,878 |
def get_answer_str(answers: list, scale: str):
"""
:param ans_type: span, multi-span, arithmetic, count
:param ans_list:
:param scale: "", thousand, million, billion, percent
:param mode:
:return:
"""
sorted_ans = sorted(answers)
ans_temp = []
for ans in sorted_ans:
ans_str = str(ans)
if is_number(ans_str):
ans_num = to_number(ans_str)
if ans_num is None:
if scale:
ans_str = ans_str + " " + str(scale)
else:
if '%' in ans_str: # has been handled the answer itself is a percentage
ans_str = '%.4f' % ans_num
else:
ans_str = '%.4f' % (round(ans_num, 2) * scale_to_num(scale))
else:
if scale:
ans_str = ans_str + " " + str(scale)
ans_temp.append(ans_str)
return [" ".join(ans_temp)] | 734015503ccec63265a0531aa05e8bd8514c7c15 | 3,650,879 |
def user_0post(users):
"""
Fixture that returns a test user with 0 posts.
"""
return users['user2'] | 5401e7f356e769b5ae68873f2374ef74a2d439c6 | 3,650,880 |
import json
def transportinfo_decoder(obj):
"""Decode programme object from json."""
transportinfo = json.loads(obj)
if "__type__" in transportinfo and transportinfo["__type__"] == "__transportinfo__":
return TransportInfo(**transportinfo["attributes"])
return transportinfo | 8a311cb419e9985ef0a184b82888220c0f3258b2 | 3,650,883 |
def group_events_data(events):
"""
Group events according to the date.
"""
# e.timestamp is a datetime.datetime in UTC
# change from UTC timezone to current seahub timezone
def utc_to_local(dt):
tz = timezone.get_default_timezone()
utc = dt.replace(tzinfo=timezone.utc)
local = timezone.make_naive(utc, tz)
return local
event_groups = []
for e in events:
e.time = utc_to_local(e.timestamp)
e.date = e.time.strftime("%Y-%m-%d")
if e.etype == 'repo-update':
e.author = e.commit.creator_name
elif e.etype == 'repo-create':
e.author = e.creator
else:
e.author = e.repo_owner
if len(event_groups) == 0 or \
len(event_groups) > 0 and e.date != event_groups[-1]['date']:
event_group = {}
event_group['date'] = e.date
event_group['events'] = [e]
event_groups.append(event_group)
else:
event_groups[-1]['events'].append(e)
return event_groups | de2f2031bdcaaf2faffdb99c67bbbb1e15828ef8 | 3,650,884 |
def create_matrix(PBC=None):
"""
Used for calculating distances in lattices with periodic boundary conditions. When multiplied with a set of points, generates additional points in cells adjacent to and diagonal to the original cell
Args:
PBC: an axis which does not have periodic boundary condition. Ex: PBC=1 cancels periodic boundary conditions along the x axis
Returns:
A numpy array of matrices which can be multiplied by a set of coordinates
"""
matrix = []
i_list = [-1, 0, 1]
j_list = [-1, 0, 1]
k_list = [-1, 0, 1]
if PBC == 1:
i_list = [0]
elif PBC == 2:
j_list = [0]
elif PBC == 3:
k_list = [0]
for i in i_list:
for j in j_list:
for k in k_list:
matrix.append([i,j,k])
return np.array(matrix, dtype=float) | 7470803fe8297ef2db1ce4bd159e9d9c93d34787 | 3,650,885 |
def get_additive_seasonality_linear_trend() -> pd.Series:
"""Get example data for additive seasonality tutorial"""
dates = pd.date_range(start="2017-06-01", end="2021-06-01", freq="MS")
T = len(dates)
base_trend = 2
state = np.random.get_state()
np.random.seed(13)
observations = base_trend * np.arange(T) + np.random.normal(loc=4, size=T)
np.random.set_state(state)
seasonality = 12
time = np.arange(0, T / seasonality, 1 / seasonality)
amplitude = 10
sin_cos_wave = amplitude * np.cos(2 * np.pi * time) + amplitude * np.sin(
2 * np.pi * time
)
observations += sin_cos_wave
output = pd.Series(observations, index=dates)
return output | 034b4ca9e086e95fa1663704fda91ae3986694b4 | 3,650,886 |
def is_client_trafic_trace(conf_list, text):
"""Determine if text is client trafic that should be included."""
for index in range(len(conf_list)):
if text.find(conf_list[index].ident_text) != -1:
return True
return False | 0b7fdf58e199444ea52476d5621ea9353475b0a0 | 3,650,887 |
def isinf(x):
"""
For an ``mpf`` *x*, determines whether *x* is infinite::
>>> from sympy.mpmath import *
>>> isinf(inf), isinf(-inf), isinf(3)
(True, True, False)
"""
if not isinstance(x, mpf):
return False
return x._mpf_ in (finf, fninf) | 4d5ca6ac2f8ed233a70c706b7fff97bf171c4f21 | 3,650,888 |
def formalize_switches(switches):
"""
Create all entries for the switches in the topology.json
"""
switches_formal=dict()
for s, switch in enumerate(switches):
switches_formal["s_"+switch]=formalize_switch(switch, s)
return switches_formal | 8dbb9987e5bc9c9f81afc0432428a746e2f05fc4 | 3,650,889 |
def arp_scores(run):
"""
This function computes the Average Retrieval Performance (ARP) scores according to the following paper:
Timo Breuer, Nicola Ferro, Norbert Fuhr, Maria Maistro, Tetsuya Sakai, Philipp Schaer, Ian Soboroff.
How to Measure the Reproducibility of System-oriented IR Experiments.
Proceedings of SIGIR, pages 349-358, 2020.
The ARP score is defined by the mean across the different topic scores of a run.
For all measures outputted by trec_eval, the ARP scores will be determined.
@param run: The run to be evaluated.
@return: Dictionary containing the ARP scores for every measure outputted by trec_eval.
"""
return dict(_arp_scores(run)) | 0e23eb1d6ee3c2502408585b1d0dbb0993ca7628 | 3,650,890 |
from typing import Tuple
from typing import Optional
import scipy
def bayesian_proportion_test(
x:Tuple[int,int],
n:Tuple[int,int],
prior:Tuple[float,float]=(0.5,0.5),
prior2:Optional[Tuple[float,float]]=None,
num_samples:int=1000,
seed:int=8675309) -> Tuple[float,float,float]:
""" Perform a Bayesian test to identify significantly different proportions.
This test is based on a beta-binomial conjugate model. It uses Monte Carlo
simulations to estimate the posterior of the difference between the
proportions, as well as the likelihood that :math:`\pi_1 > \pi_2` (where
:math:`\pi_i` is the likelihood of success in sample :math:`i`).
Parameters
----------
x : typing.Tuple[int,int]
The number of successes in each sample
n : typing.Tuple[int,int]
The number of trials in each sample
prior : typing.Tuple[float,float]
The parameters of the beta distribution used as the prior in the conjugate
model for the first sample.
prior2 : typing.Optional[typing.Tuple[float,float]]
The parameters of the beta distribution used as the prior in the conjugate
model for the second sample. If this is not specified, then `prior` is used.
num_samples : int
The number of simulations
seed : int
The seed for the random number generator
Returns
-------
difference_{mean,var} : float
The posterior mean and variance of the difference in the likelihood of success
in the two samples. A negative mean indicates that the likelihood in sample 2
is higher.
p_pi_1_greater : float
The probability that :math:`\pi_1 > \pi_2`
"""
# copy over the prior if not specified for sample 2
if prior2 is None:
prior2 = prior
# check the bounds
if len(x) != 2:
msg = "[bayesian_proportion_test]: please ensure x has exactly two elements"
raise ValueError(msg)
if len(n) != 2:
msg = "[bayesian_proportion_test]: please ensure n has exactly two elements"
raise ValueError(msg)
if len(prior) != 2:
msg = "[bayesian_proportion_test]: please ensure prior has exactly two elements"
raise ValueError(msg)
if len(prior2) != 2:
msg = "[bayesian_proportion_test]: please ensure prior2 has exactly two elements"
raise ValueError(msg)
# set the seed
if seed is not None:
np.random.seed(seed)
# perform the test
a = prior[0]+x[0]
b = prior[0]+n[0]-x[0]
s1_posterior_samples = scipy.stats.beta.rvs(a, b, size=num_samples)
a = prior[1]+x[1]
b = prior[1]+n[1]-x[1]
s2_posterior_samples = scipy.stats.beta.rvs(a, b, size=num_samples)
diff_posterior_samples = s1_posterior_samples - s2_posterior_samples
diff_posterior_mean = np.mean(diff_posterior_samples)
diff_posterior_var = np.var(diff_posterior_samples)
p_pi_1_greater = sum(s1_posterior_samples > s2_posterior_samples) / num_samples
return diff_posterior_mean, diff_posterior_var, p_pi_1_greater | 5f63424b9dcb6e235b13a9e63f0b9a2dc1e95b31 | 3,650,891 |
import torch
def _create_triangular_filterbank(
all_freqs: Tensor,
f_pts: Tensor,
) -> Tensor:
"""Create a triangular filter bank.
Args:
all_freqs (Tensor): STFT freq points of size (`n_freqs`).
f_pts (Tensor): Filter mid points of size (`n_filter`).
Returns:
fb (Tensor): The filter bank of size (`n_freqs`, `n_filter`).
"""
# Adopted from Librosa
# calculate the difference between each filter mid point and each stft freq point in hertz
f_diff = f_pts[1:] - f_pts[:-1] # (n_filter + 1)
slopes = f_pts.unsqueeze(0) - all_freqs.unsqueeze(1) # (n_freqs, n_filter + 2)
# create overlapping triangles
zero = torch.zeros(1)
down_slopes = (-1.0 * slopes[:, :-2]) / f_diff[:-1] # (n_freqs, n_filter)
up_slopes = slopes[:, 2:] / f_diff[1:] # (n_freqs, n_filter)
fb = torch.max(zero, torch.min(down_slopes, up_slopes))
return fb | 1ad5bd58d673626a15e27b6d9d68829299fe7636 | 3,650,892 |
def convert_millis(track_dur_lst):
""" Convert milliseconds to 00:00:00 format """
converted_track_times = []
for track_dur in track_dur_lst:
seconds = (int(track_dur)/1000)%60
minutes = int(int(track_dur)/60000)
hours = int(int(track_dur)/(60000*60))
converted_time = '%02d:%02d:%02d' % (hours, minutes, seconds)
converted_track_times.append(converted_time)
return converted_track_times | 3d5199da01529f72b7eb6095a26e337277f3c2c9 | 3,650,893 |
def sync_xlims(*axes):
"""Synchronize the x-axis data limits for multiple axes. Uses the maximum
upper limit and minimum lower limit across all given axes.
Parameters
----------
*axes : axis objects
List of matplotlib axis objects to format
Returns
-------
out : yxin, xmax
The computed bounds
"""
xmins, xmaxs = zip(*[ax.get_xlim() for ax in axes])
xmin = min(xmins)
xmax = max(xmaxs)
for ax in axes:
ax.set_xlim(xmin, xmax)
return xmin, xmax | a377877a9647dfc241db482f8a2c630fe3eed146 | 3,650,894 |
def algo_config_to_class(algo_config):
"""
Maps algo config to the IRIS algo class to instantiate, along with additional algo kwargs.
Args:
algo_config (Config instance): algo config
Returns:
algo_class: subclass of Algo
algo_kwargs (dict): dictionary of additional kwargs to pass to algorithm
"""
pol_cls, _ = algo_name_to_factory_func("bc")(algo_config.actor)
plan_cls, _ = algo_name_to_factory_func("gl")(algo_config.value_planner.planner)
value_cls, _ = algo_name_to_factory_func("bcq")(algo_config.value_planner.value)
return IRIS, dict(policy_algo_class=pol_cls, planner_algo_class=plan_cls, value_algo_class=value_cls) | 884ab7a91d9d8c901d078f9b477d5d21cba3e5ff | 3,650,895 |
def group_by_key(dirnames, key):
"""Group a set of output directories according to a model parameter.
Parameters
----------
dirnames: list[str]
Output directories
key: various
A field of a :class:`Model` instance.
Returns
-------
groups: dict[various: list[str]]
For each value of `key` that is found at least once in the models, a
list of the output directories where `key` is that value.
"""
groups = defaultdict(lambda: [])
for dirname in dirnames:
m = get_recent_model(dirname)
groups[m.__dict__[key]].append(dirname)
return dict(groups) | b291cd889c72fb198400b513e52ff9417c8d93b7 | 3,650,896 |
def redistrict_grouped(df, kind, group_cols, district_col=None,
value_cols=None, **kwargs):
"""Redistrict dataframe by groups
Args:
df (pandas.DataFrame): input dataframe
kind (string): identifier of redistrict info (e.g. de/kreise)
group_cols (list): List of column names to group by
district_col (string): Name of district column
value_cols (list): List of column names with values to operate on
**kwargs: see redistrict function
Returns:
pandas.Dataframe: Redistricted dataframe
"""
return pd.concat(redistrict_grouped_dataframe(df, kind, group_cols,
district_col=district_col, value_cols=value_cols,
**kwargs)) | 21f6514ca15d5fff57d03dab9d0bb7693c132e95 | 3,650,897 |
from typing import Tuple
from typing import List
import torch
def count_wraps_rand(
nr_parties: int, shape: Tuple[int]
) -> Tuple[List[ShareTensor], List[ShareTensor]]:
"""Count wraps random.
The Trusted Third Party (TTP) or Crypto provider should generate:
- a set of shares for a random number
- a set of shares for the number of wraparounds for that number
Those shares are used when doing a public division, such that the
end result would be the correct one.
Args:
nr_parties (int): Number of parties
shape (Tuple[int]): The shape for the random value
Returns:
List[List[List[ShareTensor, ShareTensor]]: a list of instaces with the shares
for a random integer value and shares for the number of wraparounds that are done when
reconstructing the random value
"""
rand_val = torch.empty(size=shape, dtype=torch.long).random_(
generator=ttp_generator
)
r_shares = MPCTensor.generate_shares(
secret=rand_val,
nr_parties=nr_parties,
tensor_type=torch.long,
encoder_precision=0,
)
wraps = count_wraps([share.tensor for share in r_shares])
theta_r_shares = MPCTensor.generate_shares(
secret=wraps, nr_parties=nr_parties, tensor_type=torch.long, encoder_precision=0
)
# We are always creating only an instance
primitives_sequential = [(r_shares, theta_r_shares)]
primitives = list(
map(list, zip(*map(lambda x: map(list, zip(*x)), primitives_sequential)))
)
return primitives | b16e21be2d421e134866df8929a319a19bdd304a | 3,650,898 |
from typing import Sequence
def text_sim(
sc1: Sequence,
sc2: Sequence,
) -> float:
"""Returns the Text_Sim similarity measure between two pitch class sets.
"""
sc1 = prime_form(sc1)
sc2 = prime_form(sc2)
corpus = [text_set_class(x) for x in sorted(allClasses)]
vectorizer = TfidfVectorizer()
trsfm = vectorizer.fit_transform(corpus)
text_similarity = cosine_similarity(trsfm)
names = [str(x) for x in sorted(allClasses)]
df = pd.DataFrame(text_similarity.round(3), columns=names, index=names)
return df[str(sc1)][str(sc2)] | 6479ad4916fb78d69935fb9b618c5eb02951f05a | 3,650,899 |
def _feature_properties(feature, layer_definition, whitelist=None, skip_empty_fields=False):
""" Returns a dictionary of feature properties for a feature in a layer.
Third argument is an optional list or dictionary of properties to
whitelist by case-sensitive name - leave it None to include everything.
A dictionary will cause property names to be re-mapped.
OGR property types:
OFTInteger (0), OFTIntegerList (1), OFTReal (2), OFTRealList (3),
OFTString (4), OFTStringList (5), OFTWideString (6), OFTWideStringList (7),
OFTBinary (8), OFTDate (9), OFTTime (10), OFTDateTime (11).
Extra OGR types for GDAL 2.x:
OFTInteger64 (12), OFTInteger64List (13)
"""
properties = {}
okay_types = [ogr.OFTInteger, ogr.OFTReal, ogr.OFTString,
ogr.OFTWideString, ogr.OFTDate, ogr.OFTTime, ogr.OFTDateTime]
if hasattr(ogr, 'OFTInteger64'):
okay_types.extend([ogr.OFTInteger64, ogr.OFTInteger64List])
for index in range(layer_definition.GetFieldCount()):
field_definition = layer_definition.GetFieldDefn(index)
field_type = field_definition.GetType()
name = field_definition.GetNameRef()
if type(whitelist) in (list, dict) and name not in whitelist:
continue
if field_type not in okay_types:
try:
name = [oft for oft in dir(ogr) if oft.startswith('OFT') and getattr(ogr, oft) == field_type][0]
except IndexError:
raise KnownUnknown("Found an OGR field type I've never even seen: %d" % field_type)
else:
raise KnownUnknown("Found an OGR field type I don't know what to do with: ogr.%s" % name)
if not skip_empty_fields or feature.IsFieldSet(name):
property = type(whitelist) is dict and whitelist[name] or name
properties[property] = feature.GetField(name)
return properties | 482e42a9f4761cd0273dfb4e5f70bdb55ce168d9 | 3,650,900 |
def reverse_search(view, what, start=0, end=-1, flags=0):
"""Do binary search to find `what` walking backwards in the buffer.
"""
if end == -1:
end = view.size()
end = find_eol(view, view.line(end).a)
last_match = None
lo, hi = start, end
while True:
middle = (lo + hi) / 2
line = view.line(middle)
middle, eol = find_bol(view, line.a), find_eol(view, line.a)
if search_in_range(view, what, middle, hi, flags):
lo = middle
elif search_in_range(view, what, lo, middle - 1, flags):
hi = middle -1
else:
return calculate_relative_ref(view, '.')
# Don't search forever the same line.
if last_match and line.contains(last_match):
match = find_last_match(view, what, lo, hi, flags=flags)
return view.rowcol(match.begin())[0] + 1
last_match = sublime.Region(line.begin(), line.end()) | 7b8d95a987b9b986fb0e334cf3a9bc74014be67d | 3,650,901 |
def formatLookupLigatureSubstitution(lookup, lookupList, makeName=makeName):
""" GSUB LookupType 4 """
# substitute <glyph sequence> by <glyph>;
# <glyph sequence> must contain two or more of <glyph|glyphclass>. For example:
# substitute [one one.oldstyle] [slash fraction] [two two.oldstyle] by onehalf;
lines = list(filter(None, [ formatLookupflag(lookup, makeName=makeName) ])) \
+ ['sub {0} {1} by {2};'.format(first, ' '.join(lig.Component), lig.LigGlyph)
for sub in lookup.SubTable
for first, ligatures in sub.ligatures.items()
for lig in ligatures]
return (True, lines) | 3804d7c38564459b6f0cf19cbbac5e96642e61a2 | 3,650,902 |
import pathlib
def convert_raw2nc(path2rawfolder = '/nfs/grad/gradobs/raw/mlo/2020/', path2netcdf = '/mnt/telg/data/baseline/mlo/2020/',
# database = None,
start_date = '2020-02-06',
pattern = '*sp02.*',
sernos = [1032, 1046],
site = 'mlo',
overwrite = False,
verbose = False,
raise_error = True,
test = False):
"""
Parameters
----------
path2rawfolder : TYPE, optional
DESCRIPTION. The default is '/nfs/grad/gradobs/raw/mlo/2020/'.
path2netcdf : TYPE, optional
DESCRIPTION. The default is '/mnt/telg/data/baseline/mlo/2020/'.
# database : TYPE, optional
DESCRIPTION. The default is None.
start_date : TYPE, optional
DESCRIPTION. The default is '2020-02-06'.
pattern : str, optional
Only files with this pattern are considered. In newer raw data
versions this would be '*sp02.*'. In older ones: 'MLOD*'
sernos : TYPE, optional
DESCRIPTION. The default is [1032, 1046].
overwrite : TYPE, optional
DESCRIPTION. The default is False.
verbose : TYPE, optional
DESCRIPTION. The default is False.
test : TYPE, optional
If True only one file is processed. The default is False.
Returns
-------
None.
"""
# lines = get_lines_from_station_header()
path2rawfolder = pathlib.Path(path2rawfolder)
path2netcdf = pathlib.Path(path2netcdf)
try:
path2netcdf.mkdir(exist_ok=True)
except FileNotFoundError:
path2netcdf.parent.mkdir()
path2netcdf.mkdir()
file_list = list(path2rawfolder.glob(pattern))
# print(len(file_list))
# file_contents = []
# return file_list
df_in = pd.DataFrame(file_list, columns=['path_in'])
# test what format, old or new.
p2f = file_list[0]
nl = p2f.name.split('.')
if len(nl) == 2:
# old format like /nfs/grad/gradobs/raw/mlo/2013/sp02/MLOD013A.113
# get year from path
def path2date(path2file):
year = path2file.parent.parent.name
jul = int(''.join(filter(str.isdigit, path2file.name.split('.')[0])))
date = pd.to_datetime(year) + pd.to_timedelta(jul-1, 'd')
return date
# df_in.index = df_in.path_in.apply(lambda x: pd.to_datetime(year) + pd.to_timedelta((int(''.join(filter(str.isdigit, x.name.split('.')[0]))))-1, 'd'))
else:
# new format: gradobs.mlo-sp02.20200126.raw.dat
# df_in.index = df_in.path_in.apply(lambda x: pd.to_datetime(x.name.split('.')[2]))
path2date = lambda x: pd.to_datetime(x.name.split('.')[2])
# set index based on format
df_in.index = df_in.path_in.apply(path2date)
df_in.sort_index(inplace=True)
df_in = df_in.truncate(before=start_date)
df_out = pd.DataFrame(columns=['path_out'])
# generate output path
for sn in sernos:
for idx, row in df_in.iterrows():
# fnnc = row.path_in.name.replace('.dat','.nc')
# fnnc = fnnc.replace('-sp02', '.sp02')
# fnns = fnnc.split('.')
# fnns = fnns[:3] + [f'sn{str(sn)}'] + fnns[3:]
# fnnc = '.'.join(fnns)
# path2netcdf_file = path2netcdf.joinpath(fnnc)
date = idx
fnnc = f'gradobs.mlo.sp02.sn{sn}.{date.year}{date.month:02d}{date.day:02d}.raw.nc'
path2netcdf_file = path2netcdf.joinpath(fnnc)
df_add = pd.DataFrame({'path_in': row.path_in, 'path_out':path2netcdf_file}, index = [idx]
# ignore_index=True
)
df_out = df_out.append(df_add)
# check if file exists. Process only those that do not exist
df_out['exists'] = df_out.path_out.apply(lambda x: x.is_file())
df_work = df_out[~df_out.exists]
# return df_work
### bsts
work_array = df_work.path_in.unique()
print(f'No of files that need to be processed: {len(work_array)}')
# exists = 0
# new = 0
for e, file in enumerate(work_array):
# if e == 3: break
# ds = read_file(file, lines)
df_sel = df_work[df_work.path_in == file]
try:
dslist = read_file(file, database = database, site = site)
except IndexError:
if raise_error:
raise
else:
print('Instrument not installed ... skip', end = '...')
if test:
return {'file': file, 'database': database}
else:
continue
### generate output file name
# processing
for ds in dslist:
# fnnc = file.name.replace('.dat','.nc')
# fnnc = fnnc.replace('-sp02', '.sp02')
# fnns = fnnc.split('.')
# fnns = fnns[:3] + [f'sn{str(ds.serial_no.values)}'] + fnns[3:]
# fnnc = '.'.join(fnns)
# path2netcdf_file = path2netcdf.joinpath(fnnc)
# check which of the output files is the right ... still, i am not convinced this is the most elegant way to do this.... add the lineno in the work table?
sn = str(ds.serial_no.values)
try:
path2netcdf_file = [p2fo for p2fo in df_sel.path_out.values if sn in p2fo.name][0]
except IndexError:
assert(False), 'This Error is usually caused because one of the netcdf files (for a serial number) is deleted, but not the other.'
# save to file
ds.to_netcdf(path2netcdf_file)
if test:
break
# out = dict(processed = new,
# skipped = exists,
# last_ds_list = dslist)
if not test:
df_out['exists'] = df_out.path_out.apply(lambda x: x.is_file())
df_work = df_out[~df_out.exists]
work_array = df_work.path_in.unique()
assert(df_work.shape[0] == 0), f'df_work should be empty at the end. Still has {df_work.shape[0]} entries.'
return | 16313b1a7abc05fac469d9a0c5003eebb7ef2a8c | 3,650,903 |
import requests
def get_curricula(course_url, year):
"""Encodes the available curricula for a given course in a given year in a vaguely sane format
Dictionary fields:
- constant.CODEFLD: curriculum code as used in JSON requests
- constant.NAMEFLD: human-readable curriculum name"""
curricula = []
curricula_req_url = constant.CURRICULAURLFORMAT[get_course_lang(course_url)].format(course_url, year)
for curr in requests.get(curricula_req_url).json():
curricula.append({constant.CODEFLD: curr[constant.CURRVAL], constant.NAMEFLD: curr[constant.CURRNAME]})
return curricula | 878f2a54e41624887aed720de52dea15bdbf6528 | 3,650,904 |
def conv3x3(in_planes, out_planes, stride=1, groups=1):
"""3x3 conv with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, groups=groups, bias=False) | 4e53568bb4bf88998020b0804770895b67e9e018 | 3,650,905 |
import numpy
def zeros(shape, dtype=None):
"""
Create a Tensor filled with zeros, closer to Numpy's syntax than ``alloc``.
"""
if dtype is None:
dtype = config.floatX
return alloc(numpy.array(0, dtype=dtype), *shape) | 9d1d70f59b585d06623d41c18acc67ec16572307 | 3,650,907 |
from typing import Optional
import types
import numpy
from typing import cast
def station_location_from_rinex(rinex_path: str) -> Optional[types.ECEF_XYZ]:
"""
Opens a RINEX file and looks in the headers for the station's position
Args:
rinex_path: the path to the rinex file
Returns:
XYZ ECEF coords in meters for the approximate receiver location
approximate meaning may be off by a meter or so
or None if ECEF coords could not be found
"""
xyz = None
lat = None
lon = None
height = None
with open(rinex_path, "rb") as filedat:
for _ in range(50):
linedat = filedat.readline()
if b"POSITION XYZ" in linedat:
xyz = numpy.array([float(x) for x in linedat.split()[:3]])
elif b"Monument location:" in linedat:
lat, lon, height = [float(x) for x in linedat.split()[2:5]]
elif b"(latitude)" in linedat:
lat = float(linedat.split()[0])
elif b"(longitude)" in linedat:
lon = float(linedat.split()[0])
elif b"(elevation)" in linedat:
height = float(linedat.split()[0])
if lat is not None and lon is not None and height is not None:
xyz = coordinates.geodetic2ecef((lat, lon, height))
if xyz is not None:
return cast(types.ECEF_XYZ, xyz)
return None | e7fc390a36f34aed04d30becd544d58ea3f6aa41 | 3,650,908 |
def get_profiles():
"""Return the paths to all profiles in the local library"""
paths = APP_DIR.glob("profile_*")
return sorted(paths) | 729b78daa1d259a227147698a2d4d4c9c5126f29 | 3,650,909 |
def split(array, nelx, nely, nelz, dof):
"""
Splits an array of boundary conditions into an array of collections of
elements. Boundary conditions that are more than one node in size are
grouped together. From the nodes, the function returns the neighboring
elements inside the array.
"""
if len(array) == 0:
return []
array.sort()
connected_nodes = [array[0]]
nlist = []
tmp = _get_elem(array[0], nelx, nely, nelz, dof)
for i in range(1, len(array)):
if _nodes_connected(connected_nodes, array[i], nelx, nely, nelz, dof):
tmp = tmp.union(_get_elem(array[i], nelx, nely, nelz, dof))
connected_nodes.append(array[i])
else:
nlist.append(list(tmp))
tmp = _get_elem(array[i], nelx, nely, nelz, dof)
nlist.append(list(tmp))
return nlist | 1dbc48402e7124e3384bc56538b05f073fe64370 | 3,650,910 |
def sanitize_app_name(app):
"""Sanitize the app name and build matching path"""
app = "".join(c for c in app if c.isalnum() or c in ('.', '_')).rstrip().lstrip('/')
return app | fca922d8b622baa1d5935cd8eca2ffca050a4c86 | 3,650,911 |
import pathlib
def get_rinex_file_version(file_path: pathlib.PosixPath) -> str:
""" Get RINEX file version for a given file path
Args:
file_path: File path.
Returns:
RINEX file version
"""
with files.open(file_path, mode="rt") as infile:
try:
version = infile.readline().split()[0]
except IndexError:
log.fatal(f"Could not find Rinex version in file {file_path}")
return version | c7060e8eb32a0e5539323c7334221d4b1967bb1f | 3,650,912 |
import socket
def get_hm_port(identity_service, local_unit_name, local_unit_address,
host_id=None):
"""Get or create a per unit Neutron port for Octavia Health Manager.
A side effect of calling this function is that a port is created if one
does not already exist.
:param identity_service: reactive Endpoint of type ``identity-service``
:type identity_service: RelationBase class
:param local_unit_name: Name of juju unit, used to build tag name for port
:type local_unit_name: str
:param local_unit_address: DNS resolvable IP address of unit, used to
build Neutron port ``binding:host_id``
:type local_unit_address: str
:param host_id: Identifier used by SDN for binding the port
:type host_id: Option[None,str]
:returns: Port details extracted from result of call to
neutron_client.list_ports or neutron_client.create_port
:rtype: dict
:raises: api_crud.APIUnavailable, api_crud.DuplicateResource
"""
session = session_from_identity_service(identity_service)
try:
nc = init_neutron_client(session)
resp = nc.list_networks(tags='charm-octavia')
except NEUTRON_TEMP_EXCS as e:
raise APIUnavailable('neutron', 'networks', e)
network = None
n_resp = len(resp.get('networks', []))
if n_resp == 1:
network = resp['networks'][0]
elif n_resp > 1:
raise DuplicateResource('neutron', 'networks', data=resp)
else:
ch_core.hookenv.log('No network tagged with `charm-octavia` exists, '
'deferring port setup awaiting network and port '
'(re-)creation.', level=ch_core.hookenv.WARNING)
return
health_secgrp = None
try:
resp = nc.list_security_groups(tags='charm-octavia-health')
except NEUTRON_TEMP_EXCS as e:
raise APIUnavailable('neutron', 'security_groups', e)
n_resp = len(resp.get('security_groups', []))
if n_resp == 1:
health_secgrp = resp['security_groups'][0]
elif n_resp > 1:
raise DuplicateResource('neutron', 'security_groups', data=resp)
else:
ch_core.hookenv.log('No security group tagged with '
'`charm-octavia-health` exists, deferring '
'port setup awaiting network and port '
'(re-)creation...',
level=ch_core.hookenv.WARNING)
return
try:
resp = nc.list_ports(tags='charm-octavia-{}'
.format(local_unit_name))
except NEUTRON_TEMP_EXCS as e:
raise APIUnavailable('neutron', 'ports', e)
port_template = {
'port': {
# avoid race with OVS agent attempting to bind port
# before it is created in the local units OVSDB
'admin_state_up': False,
'binding:host_id': host_id or socket.gethostname(),
# NOTE(fnordahl): device_owner has special meaning
# for Neutron [0], and things may break if set to
# an arbritary value. Using a value known by Neutron
# is_dvr_serviced() function [1] gets us the correct
# rules appiled to the port to allow IPv6 Router
# Advertisement packets through LP: #1813931
# 0: https://github.com/openstack/neutron/blob/
# 916347b996684c82b29570cd2962df3ea57d4b16/
# neutron/plugins/ml2/drivers/openvswitch/
# agent/ovs_dvr_neutron_agent.py#L592
# 1: https://github.com/openstack/neutron/blob/
# 50308c03c960bd6e566f328a790b8e05f5e92ead/
# neutron/common/utils.py#L200
'device_owner': (
neutron_lib.constants.DEVICE_OWNER_LOADBALANCERV2),
'security_groups': [
health_secgrp['id'],
],
'name': 'octavia-health-manager-{}-listen-port'
.format(local_unit_name),
'network_id': network['id'],
},
}
n_resp = len(resp.get('ports', []))
if n_resp == 1:
hm_port = resp['ports'][0]
# Ensure binding:host_id is up to date on a existing port
#
# In the event of a need to update it, we bring the port down to make
# sure Neutron rebuilds the port correctly.
#
# Our caller, ``setup_hm_port``, will toggle the port admin status.
if hm_port and hm_port.get(
'binding:host_id') != port_template['port']['binding:host_id']:
try:
nc.update_port(hm_port['id'], {
'port': {
'admin_state_up': False,
'binding:host_id': port_template['port'][
'binding:host_id'],
}
})
except NEUTRON_TEMP_EXCS as e:
raise APIUnavailable('neutron', 'ports', e)
elif n_resp > 1:
raise DuplicateResource('neutron', 'ports', data=resp)
else:
# create new port
try:
resp = nc.create_port(port_template)
hm_port = resp['port']
ch_core.hookenv.log('Created port {}'.format(hm_port['id']),
ch_core.hookenv.INFO)
# unit specific tag is used by each unit to load their state
nc.add_tag('ports', hm_port['id'],
'charm-octavia-{}'
.format(local_unit_name))
# charm-wide tag is used by leader to load cluster state and build
# ``controller_ip_port_list`` configuration property
nc.add_tag('ports', hm_port['id'], 'charm-octavia')
except NEUTRON_TEMP_EXCS as e:
raise APIUnavailable('neutron', 'ports', e)
return hm_port | 6cde426643219f4fc3385a36e3c20503b8c41a9e | 3,650,913 |
def total_length(neurite):
"""Neurite length. For a morphology it will be a sum of all neurite lengths."""
return sum(s.length for s in neurite.iter_sections()) | 854429e073eaea49c168fb0f9e381c71d7a7038a | 3,650,914 |
def _solarize(img, magnitude):
"""solarize"""
return ImageOps.solarize(img, magnitude) | d588068f42930872775e62a619333439d8aa47d8 | 3,650,915 |
def calculateCurvature(yRange, left_fit_cr):
"""
Returns the curvature of the polynomial `fit` on the y range `yRange`.
"""
return ((1 + (2 * left_fit_cr[0] * yRange * ym_per_pix + left_fit_cr[1]) ** 2) ** 1.5) / np.absolute(
2 * left_fit_cr[0]) | af1cd81c3eeb85297bcfcb44779bf86b4c6b8dc9 | 3,650,916 |
def testing_server_error_view(request):
"""Displays a custom internal server error (500) page"""
return render(request, '500.html', {}) | 84055f37d1ba215ae0e439c1f9d96260208133ff | 3,650,918 |
def main_epilog() -> str:
"""
This method builds the footer for the main help screen.
"""
msg = "To get help on a specific command, see `conjur <command> -h | --help`\n\n"
msg += "To start using Conjur with your environment, you must first initialize " \
"the configuration. See `conjur init -h` for more information."
return msg | ecf4167535b5f1e787d286a3b2194816790a7e6a | 3,650,919 |
def sigma_M(n):
"""boson lowering operator, AKA sigma minus"""
return np.diag([np.sqrt(i) for i in range(1, n)], k=1) | 532a082ed5fd3094044162c85042bf963dad4461 | 3,650,920 |
def windowing_is(root, *window_sys):
"""
Check for the current operating system.
:param root: A tk widget to be used as reference
:param window_sys: if any windowing system provided here is the current
windowing system `True` is returned else `False`
:return: boolean
"""
windowing = root.tk.call('tk', 'windowingsystem')
return windowing in window_sys | fd021039686b1971f8c5740beb804826a7afdf80 | 3,650,921 |
def init_columns_entries(variables):
"""
Making sure we have `columns` & `entries` to return, without effecting the original objects.
"""
columns = variables.get('columns')
if columns is None:
columns = [] # Relevant columns in proper order
if isinstance(columns, str):
columns = [columns]
else:
columns = list(columns)
entries = variables.get('entries')
if entries is None:
entries = [] # Entries of dict with relevant columns
elif isinstance(entries, dict):
entries = [entries]
else:
entries = list(entries)
return columns, entries | 49a12b0561d0581785c52d9474bc492f2c64626c | 3,650,922 |
from typing import Tuple
def _run_ic(dataset: str, name: str) -> Tuple[int, float, str]:
"""Run iterative compression on all datasets.
Parameters
----------
dataset : str
Dataset name.
name : str
FCL name.
Returns
-------
Tuple[int, float, str]
Solution size, time, and certificate.
"""
# Execute
time, size, certificate = solve_ic(
str(FCL_DATA_DIR / dataset / 'huffner' / (name + HUFFNER_DATA_EXT)),
timeout=EXACT_TIMEOUT,
preprocessing=2,
htime=min(0.3 * EXACT_TIMEOUT, 1)
)
# Return
return size, time, str(certificate) | e041cb9c0ca5af98d1f8d23a0e6f3cbe7f5a34a4 | 3,650,923 |
def notch(Wn, Q=10, analog=False, output="ba"):
"""
Design an analog or digital biquad notch filter with variable Q.
The notch differs from a peaking cut filter in that the gain at the
notch center frequency is 0, or -Inf dB.
Transfer function: H(s) = (s**2 + 1) / (s**2 + s/Q + 1)
Parameter
----------
Wn : float
Center frequency of the filter.
For digital filters, `Wn` is normalized from 0 to 1, where 1 is the
Nyquist frequency, pi radians/sample. (`Wn` is thus in
half-cycles / sample.)
For analog filters, `Wn` is an angular frequency (e.g. rad/s).
Q : float
Quality factor of the filter. Examples:
* sqrt(2) is 1 octave wide
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
output : {'ba', 'zpk', 'ss'}, optional
Type of output: numerator/denominator ('ba'), pole-zero ('zpk'), or
state-space ('ss').
Default is 'ba'.
Returns
-------
b, a : ndarray, ndarray
Numerator (`b`) and denominator (`a`) polynomials of the IIR filter.
Only returned if ``output='ba'``.
z, p, k : ndarray, ndarray, float
Zeros, poles, and system gain of the IIR filter transfer
function. Only returned if ``output='zpk'``.
"""
# H(s) = (s**2 + 1) / (s**2 + s/Q + 1)
b = np.array([1, 0, 1])
a = np.array([1, 1 / Q, 1])
return _transform(b, a, Wn, analog, output) | a9b4e488bb5a849459bf843abe2bd9d6d18f662d | 3,650,924 |
def Torus(radius=(1, 0.5), tile=(20, 20), device='cuda:0'):
"""
Creates a torus quad mesh
Parameters
----------
radius : (float,float) (optional)
radii of the torus (default is (1,0.5))
tile : (int,int) (optional)
the number of divisions of the cylinder (default is (20,20))
device : str or torch.device (optional)
the device the tensors will be stored to (default is 'cuda:0')
Returns
-------
(Tensor,LongTensor,Tensor)
the point set tensor, the topology tensor, the vertex normals
"""
T, P = grid2mesh(*tuple(TorusPatch(radius=radius, tile=tile, device=device)))
N = vertex_normal(P, quad2tri(T))
return P, T, N | 79c7934cabecdf3a4c9c28de7193ccae1ce037de | 3,650,925 |
def check_new_value(new_value: str, definition) -> bool:
"""
checks with definition if new value is a valid input
:param new_value: input to set as new value
:param definition: valid options for new value
:return: true if valid, false if not
"""
if type(definition) is list:
if new_value in definition:
return True
else:
return False
elif definition is bool:
if new_value == "true" or new_value == "false":
return True
else:
return False
elif definition is int:
try:
int(new_value)
return True
except ValueError:
return False
elif definition is float:
try:
float(new_value)
return True
except ValueError:
return False
elif definition is str:
return True
else:
# We could not validate the type or values so we assume it is incorrect
return False | d7204c7501e713c4ce8ecaeb30239763c13c1f18 | 3,650,926 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.