content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def get_functional_groups(alkoxy_mol):
"""
given a molecule object `alkoxy_mol`. This method returns
a dictionary of groups used in the Vereecken SAR with the
key being the group and the value being the number of occurances
it has.
"""
#print 'getting groups from {}'.format(alkoxy_mol.toSMILES())
alkoxy_mol.assignAtomIDs()
labeled_atoms = alkoxy_mol.getLabeledAtoms()
assert labeled_atoms['*1'].symbol == 'C'
assert labeled_atoms['*3'].symbol == 'C', alkoxy_mol.toAdjacencyList() + str(labeled_atoms)
alpha_groups = get_atom_groups(labeled_atoms['*1'])
beta_groups = get_atom_groups(labeled_atoms['*3'])
# find cyclic groups here (after project finished)
all_groups = {}
for label, num in alpha_groups.items():
all_groups['alpha{}'.format(label)] = num
for label, num in beta_groups.items():
all_groups['beta{}'.format(label)] = num
return all_groups | 9c0280bb09e6ef606aac2a14fe2826c0a9feb06d | 3,252 |
def rough(material, coverage, scale, det, e0=20.0, withPoisson=True, nTraj=defaultNumTraj, dose=defaultDose, sf=True, bf=True, xtraParams=defaultXtraParams):
"""rough(material, coverage, scale, det, [e0=20.0], [withPoisson=True], [nTraj=defaultNumTraj], [dose = 120.0], [sf=True], [bf=True], [xtraParams={}])
Monte Carlo simulate a spectrum from a rough surface with roughness modeled as square pillars of the specified scale and fractional coverage.
The features are also offset by a randomized x,y offset of size approximately scale to ensure that the beam doesn't always strike at the same sort of a position.
+ material - Composition of material
+ coverage of pillars on surface (0.0 to 1.0 -> 0% to 100%)
+ scale - height and width of pillars
+ depth - Depth of trough"""
tmp = u"MC simulation of a %0.2lg um %d%% coverage rough surface of %s at %0.1f keV%s%s" % (1.0e6 * scale, int(100.0 * coverage), material, e0, (" + CSF" if sf else ""), (" + BSF" if bf else ""))
return base(det, e0, withPoisson, nTraj, dose, sf, bf, tmp, buildRough, { "Scale" : scale, "Coverage" : coverage, "Size" : 1.0e-5, "Material" : material }, xtraParams) | 0aa6a21a2cdae22bf9f56cd6babfa9c3402ce465 | 3,253 |
def jsonify(comment_lower: str) -> str:
"""pyNastran: SPOINT={'id':10, 'xyz':[10.,10.,10.]}"""
sline = comment_lower.split('=')
rhs = sline[1].rstrip()
return rhs.replace("'", '"').replace('}', ',}').replace(',,}', ',}') | e8641d5e94cff32389f7ade3360935a2abbcf297 | 3,254 |
from catalyst.engines.torch import (
DataParallelEngine,
DeviceEngine,
DistributedDataParallelEngine,
)
from catalyst.engines.amp import (
AMPEngine,
DataParallelAMPEngine,
DistributedDataParallelAMPEngine,
)
from catalyst.engines.apex import (
APEXEngine,
DataParallelAPEXEngine,
DistributedDataParallelAPEXEngine,
)
def get_available_engine(
fp16: bool = False, ddp: bool = False, amp: bool = False, apex: bool = False
) -> "IEngine":
"""Returns available engine based on given arguments.
Args:
fp16 (bool): option to use fp16 for training. Default is `False`.
ddp (bool): option to use DDP for training. Default is `False`.
amp (bool): option to use APEX for training. Default is `False`.
apex (bool): option to use APEX for training. Default is `False`.
Returns:
IEngine which match requirements.
"""
if fp16 and not amp and not apex:
amp = SETTINGS.amp_required or (SETTINGS.amp_required and SETTINGS.apex_required)
apex = SETTINGS.apex_required and (not SETTINGS.amp_required)
if amp:
assert (
SETTINGS.amp_required
), "catalyst[amp] is not available, to install it, run `pip install catalyst[amp]`."
assert not apex, "Could not use both apex and amp engines"
if apex:
assert (
SETTINGS.apex_required
), "catalyst[apex] is not available, to install it, run `pip install catalyst[apex]`."
assert not amp, "Could not use both apex and amp engines"
is_multiple_gpus = NUM_CUDA_DEVICES > 1
if not IS_CUDA_AVAILABLE:
return DeviceEngine("cpu")
elif is_multiple_gpus:
if ddp:
if amp:
return DistributedDataParallelAMPEngine()
elif apex:
return DistributedDataParallelAPEXEngine()
else:
return DistributedDataParallelEngine()
else:
if amp:
return DataParallelAMPEngine()
elif apex:
return DataParallelAPEXEngine()
else:
return DataParallelEngine()
else:
if amp:
return AMPEngine()
elif apex:
return APEXEngine()
else:
return DeviceEngine("cuda") | 6d29e0c1938c5889b6e4a7fa972945065bc2cf3a | 3,256 |
import shutil
def disk_usage(pathname):
"""Return disk usage statistics for the given path"""
### Return tuple with the attributes total,used,free in bytes.
### usage(total=118013599744, used=63686647808, free=48352747520)
return shutil.disk_usage(pathname) | c7a36e2f3200e26a67c38d50f0a97dd015f7ccfa | 3,257 |
from typing import Tuple
def create_new_deployment(
runner: Runner, deployment_arg: str, expose: PortMapping,
add_custom_nameserver: bool
) -> Tuple[str, str]:
"""
Create a new Deployment, return its name and Kubernetes label.
"""
span = runner.span()
run_id = runner.session_id
runner.show(
"Starting network proxy to cluster using "
"new Deployment {}".format(deployment_arg)
)
def remove_existing_deployment(quiet=False):
if not quiet:
runner.show("Cleaning up Deployment {}".format(deployment_arg))
runner.check_call(
runner.kubectl(
"delete",
"--ignore-not-found",
"svc,deploy",
"--selector=telepresence=" + run_id,
)
)
runner.add_cleanup("Delete new deployment", remove_existing_deployment)
remove_existing_deployment(quiet=True)
command = [
"run", # This will result in using Deployment:
"--restart=Always",
"--limits=cpu=100m,memory=256Mi",
"--requests=cpu=25m,memory=64Mi",
deployment_arg,
"--image=" + get_image_name(expose),
"--labels=telepresence=" + run_id,
]
# Provide a stable argument ordering. Reverse it because that happens to
# make some current tests happy but in the long run that's totally
# arbitrary and doesn't need to be maintained. See issue 494.
for port in sorted(expose.remote(), reverse=True):
command.append("--port={}".format(port))
if expose.remote():
command.append("--expose")
# If we're on local VM we need to use different nameserver to prevent
# infinite loops caused by sshuttle:
if add_custom_nameserver:
command.append(
"--env=TELEPRESENCE_NAMESERVER=" + get_alternate_nameserver()
)
try:
runner.check_call(runner.kubectl(command))
except CalledProcessError as exc:
raise runner.fail(
"Failed to create deployment {}:\n{}".format(
deployment_arg, exc.stderr
)
)
span.end()
return deployment_arg, run_id | d15e9e1ec9d09669b8becd4e169049d5a1e836ab | 3,259 |
import logging
def score_latency(
references, reference_wavs, partial_translations, target_language="en-US"
):
"""Measures the "final" translation lag after all corrections have been made."""
logger = logging.getLogger("evaluation")
tokenizer = get_tokenizer(target_language)
min_len = min(len(partial_translations), len(references))
if len(partial_translations) != len(references):
logger.warning(
f"Found {len(references)} references, {len(partial_translations)} partial "
+ f"translations. Evaluating only the first {min_len}"
)
partial_translations = partial_translations[:min_len]
references = references[:min_len]
# Make case insensitive and tokenize
partial_translations_tokenized = [
[(t_time, tokenizer.tokenize(t.upper())) for t_time, t in transcript]
for transcript in partial_translations
]
references = [tokenizer.tokenize(r.upper()) for r in references]
# Compute total lag
output_words, total_lag = 0, 0
for reference, (_, reference_wav), partial_translation in zip(
references, reference_wavs, partial_translations_tokenized
):
if len(partial_translation) == 0:
continue
final_time, final_translation = partial_translation[-1]
reference_duration = get_duration_seconds(reference_wav)
for j in range(1, len(final_translation) + 1):
# Compare a time a word was finalized in the output
# to the time its corresponding word was uttered
finalization_time = get_finalization_time(
final_translation, j, partial_translation
)
original_token = int(j * len(reference) / len(final_translation))
original_time = get_token_time(
original_token, reference, reference_duration
)
total_lag += max(0, finalization_time - original_time)
output_words += 1
return total_lag / max(1, output_words) | 9d31e029247e44448103d99760019f0dffa1cf44 | 3,260 |
def shapelet_with_w_term(
coords, frequency, coeffs, beta, delta_lm, lm, dtype=np.complex128
):
"""
shapelet: outputs visibilities corresponding to that of a shapelet
Inputs:
coords: coordinates in (u,v) space with shape (nrow, 3)
frequency: frequency values with shape (nchan,)
coeffs: shapelet coefficients with shape, where
coeffs[3, 4] = coeffs_l[3] * coeffs_m[4] (nsrc, nmax1, nmax2)
beta: characteristic shapelet size with shape (nsrc, 2)
delta_l: pixel size in l dim
delta_m: pixel size in m dim
lm: source center coordinates of shape (nsource, 2)
Returns:
out_shapelets: Shapelet with shape (nrow, nchan, nsrc)
"""
nrow = coords.shape[0]
nsrc = coeffs.shape[0]
nchan = frequency.shape[0]
out_shapelets = np.empty((nrow, nchan, nsrc), dtype=np.complex128)
delta_l, delta_m = delta_lm
for row in range(nrow):
u, v, w = coords[row, :]
for chan in range(nchan):
fu = u * 2 * np.pi * frequency[chan] / lightspeed
fv = v * 2 * np.pi * frequency[chan] / lightspeed
for src in range(nsrc):
nmax1, nmax2 = coeffs[src, :, :].shape
beta_u, beta_v = beta[src, :]
l, m = lm[src, :]
if beta_u == 0 or beta_v == 0:
out_shapelets[row, chan, src] = 1
continue
tmp_shapelet = 0 + 0j
for n1 in range(nmax1):
for n2 in range(nmax2):
tmp_shapelet += (
0
if coeffs[src][n1, n2] == 0
else coeffs[src][n1, n2]
* basis_function(
n1, fu, beta_u, True, delta_x=delta_l
)
* basis_function(
n2, fv, beta_v, True, delta_x=delta_m
)
)
w_term = phase_steer_and_w_correct(
(u, v, w), (l, m), frequency[chan]
)
out_shapelets[row, chan, src] = tmp_shapelet * w_term
return out_shapelets | f6c9f9011306cc2de5054e015857b3b47c7e6cd9 | 3,261 |
from typing import Counter
def _entropy_counter2(arr):
"""
calculate the base 2 entropy of the distribution given in `arr` using a
`Counter` and the `values` method (for python3)
"""
arr_len = len(arr)
if arr_len == 0:
return 0
log_arr_len = np.log2(len(arr))
return -sum(val * (np.log2(val) - log_arr_len)
for val in Counter(arr).values()) / arr_len | 1f72c7a7e5db56aa9a0e5c3811cf28c600420949 | 3,263 |
def get_changes_between_models(model1, model2, excludes=None):
"""
Return a dict of differences between two model instances
"""
if excludes is None:
excludes = []
changes = {}
for field in model1._meta.fields:
if (isinstance(field, (fields.AutoField,
fields.related.RelatedField))
or field.name in excludes):
continue
if field.value_from_object(model1) != field.value_from_object(model2):
changes[field.verbose_name] = (field.value_from_object(model1),
field.value_from_object(model2))
return changes | 1f62afdc7818574553fa7a53eb05e766c2805edd | 3,265 |
def get_intersect(x1, y1, x2, y2):
"""
Returns the point of intersection of the lines or None if lines are parallel
Ex. p1=(x1,x2)... line_intersection((p1,p2), (p3,p4))
a1: [x, y] a point on the first line
a2: [x, y] another point on the first line
b1: [x, y] a point on the second line
b2: [x, y] another point on the second line
"""
s = np.vstack([x1, y1, x2, y2]) # s for stacked
h = np.hstack((s, np.ones((4, 1)))) # h for homogeneous
l1 = np.cross(h[0], h[1]) # get first line
l2 = np.cross(h[2], h[3]) # get second line
x, y, z = np.cross(l1, l2) # point of intersection
if z == 0: # lines are parallel
return None, None
return x / z, y / z | 8e9ed2f2351b41658400badc7339eedc9791db8a | 3,266 |
def removeDuplicateColumns(df):
"""
Removes columns that have a duplicate name.
:return pd.DataFrame:
"""
duplicates = getDuplicates(df.columns)
done = False
idx = 0
df_result = df.copy()
additions_dict = {}
while not done:
if idx >= len(df_result.columns):
done = True
break
column = df_result.columns[idx]
if column in duplicates:
df1 = df_result[column]
values = df1.iloc[:,1]
del df_result[column]
duplicates.remove(column)
additions_dict[column] = values
else:
idx += 1
df_add = pd.DataFrame(additions_dict)
df_result = pd.concat([df_result, df_add], axis=1, sort=True)
return df_result | dc46580d221b8e4279ba73e8d97eee079e65309c | 3,267 |
def conv_block(data, name, channels,
kernel_size=(3, 3), strides=(1, 1), padding=(1, 1),
epsilon=1e-5):
"""Helper function to construct conv-bn-relu"""
# convolution + bn + relu
conv = sym.conv2d(data=data, channels=channels,
kernel_size=kernel_size, strides=strides,
padding=padding, use_bias=False,
layout="NCHW", name=name + "_conv")
bn = sym.batch_norm(data=conv, epsilon=epsilon, name=name + "_bn")
act = sym.relu(data=bn, name=name + "_relu")
return act | 90464c208c12a6e9907f5a206ddd324fd92638ff | 3,268 |
import pickle
import torchvision
import torch
def utzappos_tensor_dset(img_size, observed, binarized, drop_infreq,
cache_fn, *dset_args, transform=None, **dset_kwargs):
"""
Convert folder dataset to tensor dataset.
"""
cache_fn = UTZapposIDImageFolder.get_cache_name(cache_fn, img_size, observed, binarized, drop_infreq)
try:
with open(cache_fn, 'rb') as f:
dset_samples, dset_labels, dset_label_info = pickle.load(f)
except FileNotFoundError:
img_transform = torchvision.transforms.Compose([torchvision.transforms.Resize((img_size, img_size)),
torchvision.transforms.ToTensor()])
dset = UTZapposIDImageFolder(*dset_args, img_size=img_size, transform=img_transform,
observed=observed, binarized=binarized, drop_infreq=drop_infreq,
**dset_kwargs)
dset_examples = [dset[ind] for ind in range(len(dset))]
dset_samples, dset_labels = map(torch.stack, zip(*dset_examples))
# find_duplicates_in_dsets((dset_samples, dset_labels), (dset_samples, dset_labels),
# tuple_format=True, itself=True)
dset_label_info = dset._label_info
with open(cache_fn, 'wb') as handle:
pickle.dump((dset_samples, dset_labels, dset_label_info), handle, protocol=4)
return CustomTensorDataset(dset_samples, dset_labels, transform=transform), dset_label_info, cache_fn | 8008f8d19453884106832746a4cefb55c9813c45 | 3,270 |
def compare_versions(aStr, bStr):
"""
Assumes Debian version format:
[epoch:]upstream_version[-debian_revision]
Returns:
-1 : a < b
0 : a == b
1 : a > b
"""
# Compare using the version class
return cmp(Version(aStr), Version(bStr)) | a17e333cc555b1b260cf826a5e4c29b0e291c479 | 3,271 |
import numbers
def unscale_parameter(value: numbers.Number,
petab_scale: str) -> numbers.Number:
"""Bring parameter from scale to linear scale.
:param value:
Value to scale
:param petab_scale:
Target scale of ``value``
:return:
``value`` on linear scale
"""
if petab_scale == LIN:
return value
if petab_scale == LOG10:
return np.power(10, value)
if petab_scale == LOG:
return np.exp(value)
raise ValueError(f"Unknown parameter scale {petab_scale}. "
f"Must be from {(LIN, LOG, LOG10)}") | f04156220e8a39c31473507a60fee3d5185bda0c | 3,273 |
def perturb(sentence, bertmodel, num):
"""Generate a list of similar sentences by BERT
Arguments:
sentence: Sentence which needs to be perturbed
bertModel: MLM model being used (BERT here)
num: Number of perturbations required for a word in a sentence
"""
# Tokenize the sentence
tokens = tokenizer.tokenize(sent)
pos_inf = nltk.tag.pos_tag(tokens)
# the elements in the lists are tuples <index of token, pos tag of token>
bert_masked_indexL = list()
# collect the token index for substitution
for idx, (word, tag) in enumerate(pos_inf):
if (tag.startswith("JJ") or tag.startswith("JJR") or tag.startswith("JJS")
or tag.startswith("PRP") or tag.startswith("PRP$") or tag.startswith("RB")
or tag.startswith("RBR") or tag.startswith("RBS") or tag.startswith("VB") or
tag.startswith("VBD") or tag.startswith("VBG") or tag.startswith("VBN") or
tag.startswith("VBP") or tag.startswith("VBZ") or tag.startswith("NN") or
tag.startswith("NNS") or tag.startswith("NNP") or tag.startswith("NNPS")):
tagFlag = tag[:2]
if (idx!=0 and idx!=len(tokens)-1):
bert_masked_indexL.append((idx, tagFlag))
bert_new_sentences = list()
# generate similar setences using Bert
if bert_masked_indexL:
bert_new_sentences = perturbBert(sent, bertmodel, num, bert_masked_indexL)
return bert_new_sentences | 598ed7e37185de6bf2a977c226bb58677684772d | 3,274 |
import logging
def discovery_dispatch(task: TaskRequest) -> TaskResponse:
"""Runs appropriate discovery function based on protocol
Args:
task (TaskRequest): namedtuple
Returns:
TaskResponse[str, dict[str, str|int|bool|list]]
"""
task = TaskRequest(*task)
proto = constant.Proto(task.proto)
logging.info(
"Dispatching: host=%s, hostname=%s, proto=%s",
task.host,
task.hostname,
proto,
)
discoverer = get_discovery(proto)
device = discoverer(
host=task.host,
hostname=task.hostname,
sysinfo=task.sysinfo,
extra=task.extra,
**task.kwargs,
)
logging.info("Dispatch received response from %s", task.host)
return TaskResponse(task.host, device.dump()) | 3fe6394cf81fdb3e25343df27479f4b4ab3033fa | 3,275 |
def get_free_times(busy_times, begin_date, end_date):
"""
Gets a list of free times calculated from a list of busy times.
:param busy_times: is the list of busy times in ascending order.
:param begin_date: is the start of the selected time interval.
:param end_date: is the end of the selected time interval.
:return: a list of free times.
"""
free_times = []
busy_times_original = busy_times
begin_date = arrow.get(begin_date).replace(hour=9)
end_date = arrow.get(end_date).replace(hour=17)
# print('free times')
if len(busy_times) == 0:
free_times.append((begin_date.isoformat(), end_date.isoformat()))
else:
begin_date_end = begin_date.replace(hour=17)
begin_day = begin_date.format('YYYYMMDD')
begin_time = '09:00'
end_time = '17:00'
end_date_start = arrow.get(end_date).replace(hour=9)
end_day = end_date.format('YYYYMMDD')
stored_event = busy_times[0]
busy_times = busy_times[1:]
if len(busy_times) == 0:
stored_event_start = arrow.get(stored_event['start']['dateTime'])
stored_event_end = arrow.get(stored_event['end']['dateTime'])
if (stored_event_start == begin_date and
stored_event_end < begin_date_end):
free_times.append((stored_event_end.isoformat(),
end_date.isoformat()))
elif (stored_event_end == end_date and
stored_event_start > end_date_start):
free_times.append((begin_date.isoformat(),
stored_event_start.isoformat()))
elif (stored_event_start > begin_date and
stored_event_end < end_date):
free_times.append((begin_date.isoformat(),
stored_event_start.isoformat()))
free_times.append((stored_event_end.isoformat(),
end_date.isoformat()))
for event in busy_times:
event_start = arrow.get(event['start']['dateTime'])
event_end = arrow.get(event['end']['dateTime'])
event_start_time = event_start.format('HH:mm')
event_end_time = event_end.format('HH:mm')
event_end_day = event_end.format('YYYYMMDD')
stored_event_start = arrow.get(stored_event['start']['dateTime'])
stored_event_start_time = stored_event_start.format('HH:mm')
stored_event_start_day = arrow.get(
stored_event['start']['dateTime']).format('YYYYMMDD')
stored_event_end = stored_event['end']['dateTime']
stored_event_end_time = arrow.get(stored_event_end).format('HH:mm')
event_start = event_start.isoformat()
# starting free time on begin day after start of day
if (stored_event_start_day == begin_day and
stored_event_start_time > begin_time):
free_times.append((begin_date.isoformat(),
stored_event_start.isoformat()))
# print('0 {} - {}'.format(begin_date.isoformat(),
# stored_event_start.isoformat()))
# middle free times
if (stored_event_end < event_start and
(stored_event_end, event_start) not in free_times):
if event_start_time == '09:00':
event_start = arrow.get(
event['start']['dateTime']).replace(
days=-1, hour=17).isoformat()
if stored_event_end_time == '17:00':
stored_event_end = arrow.get(
stored_event_end).replace(days=+1,
hour=START_TIME).isoformat()
free_times.append((stored_event_end, event_start))
# print('1 {} - {}'.format(stored_event_end,
# event_start))
# ending free time
if (event_end_day == end_day and
event_end_time != end_time):
free_times.append((event_end.isoformat(), end_date.isoformat()))
# print('2 {} - {}'.format(event_end.isoformat(),
# end_date.isoformat()))
# ending free time for final events that end before end_date
if (busy_times.index(event) == len(busy_times) - 1 and
event_end < end_date):
if event_end_time == '17:00':
event_end = event_end.replace(days=+1, hour=START_TIME)
free_times.append((event_end.isoformat(), end_date.isoformat()))
# print('3 {} - {}'.format(event_end.isoformat(),
# end_date.isoformat()))
# starting free time not on begin day
if (arrow.get(free_times[0][0]) != begin_date and
stored_event_start != begin_date and
begin_date != arrow.get(
busy_times_original[0]['start']['dateTime'])):
free_times.insert(0, (begin_date.isoformat(),
stored_event_start.isoformat()))
# print('4 {} - {}'.format(begin_date.isoformat(),
# stored_event_start.isoformat()))
stored_event = event
# print()
# print('free times')
# for time in free_times:
# print(time)
return free_times | 95f33c22e28e9ed7bc299ac966767a2292cf6d7b | 3,276 |
from datetime import datetime
import pytz
def upstream_has_data(valid):
"""Does data exist upstream to even attempt a download"""
utcnow = datetime.datetime.utcnow().replace(tzinfo=pytz.utc)
# NCEP should have at least 24 hours of data
return (utcnow - datetime.timedelta(hours=24)) < valid | e222ca16820f2e9030170877f8e2ae4faff8d5b7 | 3,277 |
def encode_array(x, base=2, **kwds):
"""Encode array of integer-symbols.
Parameters
----------
x : (N, k) array_like
Array of integer symbols.
base : int
Encoding base.
**kwds :
Keyword arguments passed to :py:func:`numpy.ravel`.
Returns
-------
int
Integer code of an array.
"""
seq = np.ravel(x, **kwds)
return encode_sequence(seq, base=base) | b16546350638967dd60812b98295ffc4c95abd4d | 3,278 |
import itertools
def str_for_model(model: Model, formatting: str = "plain", include_params: bool = True) -> str:
"""Make a human-readable string representation of Model, listing all random variables
and their distributions, optionally including parameter values."""
all_rv = itertools.chain(model.unobserved_RVs, model.observed_RVs, model.potentials)
rv_reprs = [rv.str_repr(formatting=formatting, include_params=include_params) for rv in all_rv]
rv_reprs = [rv_repr for rv_repr in rv_reprs if "TransformedDistribution()" not in rv_repr]
if not rv_reprs:
return ""
if "latex" in formatting:
rv_reprs = [
rv_repr.replace(r"\sim", r"&\sim &").strip("$")
for rv_repr in rv_reprs
if rv_repr is not None
]
return r"""$$
\begin{{array}}{{rcl}}
{}
\end{{array}}
$$""".format(
"\\\\".join(rv_reprs)
)
else:
# align vars on their ~
names = [s[: s.index("~") - 1] for s in rv_reprs]
distrs = [s[s.index("~") + 2 :] for s in rv_reprs]
maxlen = str(max(len(x) for x in names))
rv_reprs = [
("{name:>" + maxlen + "} ~ {distr}").format(name=n, distr=d)
for n, d in zip(names, distrs)
]
return "\n".join(rv_reprs) | 89711e4fd12572339a501698c39fc8b81deca8a3 | 3,279 |
from typing import Callable
from typing import Optional
from typing import Union
def get_device(
raw_data: dict, control_data: dict, request: Callable
) -> Optional[
Union[
HomeSeerDimmableDevice,
HomeSeerFanDevice,
HomeSeerLockableDevice,
HomeSeerStatusDevice,
HomeSeerSwitchableDevice,
HomeSeerCoverDevice,
HomeSeerSetPointDevice
]
]:
"""
Parses control_data to return an appropriate device object
based on the control pairs detected for the device.
On/Off = HomeSeerSwitchableDevice
On/Off/Dim = HomeSeerDimmableDevice
On/Off/Fan = HomeSeerFanDevice
Lock/Unlock = HomeSeerLockableDevice
other = HomeSeerStatusDevice
"""
item = next((x for x in control_data if x["ref"] == raw_data["ref"]), None)
supported_features = get_supported_features(item)
return build_device(raw_data, item, request, supported_features) | 616c16e749fef7dc45539a7eb8bdbc9f11d3edd1 | 3,280 |
def wait():
"""
Gets the New Block work unit to send to clients
"""
return _event.get() | d24047d92b3774c675369ee739ec697ab23f0fea | 3,281 |
from typing import Optional
def process(msd_id: str, counter: AtomicCounter) -> Optional[dict]:
"""
Processes the given MSD id and increments the counter. The
method will find and return the artist.
:param msd_id: the MSD id to process
:param counter: the counter to increment
:return: the dictionary containing the MSD id and the artist, raises an
exception if the file cannot be processed
"""
try:
with tables.open_file(msd_id_to_h5(msd_id, args.path_dataset_dir)) as h5:
artist = h5.root.metadata.songs.cols.artist_name[0].decode("utf-8")
return {"msd_id": msd_id, "artist": artist}
except Exception as e:
print(f"Exception during processing of {msd_id}: {e}")
finally:
counter.increment() | 6bd93bf72a7ecfa6ddb41557b0550a629b9612f4 | 3,282 |
def choose_run(D, var2align, run):
"""Get input for the alignment.
Do it by indicating a run to align to.
Args:
D (pd.DataFrame): DataFrame containing columns 'id', 'run', and ...
var2align (str): Name of the column to align.
run (whatever): The run to align to.
Returns:
tuple of pd.DataFrames: The data ready for alignment and the remainder.
"""
X = D[['id', 'run', var2align]] # subselect the data for alignment
X.columns = ['id', 'run', 'x']
ref = X.loc[X.run == run] # the reference peptides
other = X.loc[X.run != run] # all other peptides
# we can align peptides in other runs only to those found in chosen run.
alignable_idx = other.id.isin(set(other.id) & set(ref.id))
X = other.loc[alignable_idx,]
unalignable = other.loc[~alignable_idx,]
ref = ref[['id','x']].set_index('id')
ref.columns = ['y']
X = pd.concat([X.set_index('id'), ref], axis=1, join='inner')
return X, unalignable | 54fc84e61b3874219d473659c85bd369b367a05d | 3,283 |
def stack(tensor_list, axis=0):
"""
This function is the same as torch.stack but handles both
numpy.ndarray and torch.Tensor
:param tensor_list:
:param axis:
:return:
"""
if isinstance(tensor_list[0], th.Tensor):
return th.stack(tensor_list, axis)
else:
return np.stack(tensor_list, axis) | 9d8e5d8fbd9f89acb40ada362d0ae8d4913df939 | 3,285 |
def alias(alias):
"""Select a single alias."""
return {'alias': alias} | 35364346da4d7b1f6de2d7ba6e0b5721b6bef1dd | 3,286 |
def model_creator(config):
"""Constructor function for the model(s) to be optimized.
You will also need to provide a custom training
function to specify the optimization procedure for multiple models.
Args:
config (dict): Configuration dictionary passed into ``PyTorchTrainer``.
Returns:
One or more torch.nn.Module objects.
"""
return nn.Linear(1, 1) | 81909a284bddd83a62c8c9adacfbe75cf46650bd | 3,287 |
import numbers
def ensure_r_vector(x):
"""Ensures that the input is rendered as a vector in R.
It is way more complicated to define an array in R than in Python because an array
in R cannot end with an comma.
Examples
--------
>>> ensure_r_vector("string")
"c('string')"
>>> ensure_r_vector(1)
'c(1)'
>>> ensure_r_vector(list("abcd"))
"c('a', 'b', 'c', 'd')"
>>> ensure_r_vector((1, 2))
'c(1, 2)'
"""
if isinstance(x, str):
out = f"c('{x}')"
elif isinstance(x, numbers.Number):
out = f"c({x})"
elif isinstance(x, (tuple, list)):
mapped = map(lambda l: str(l) if isinstance(l, numbers.Number) else f"'{l}'", x)
concatenated = ", ".join(mapped)
out = f"c({concatenated})"
else:
raise NotImplementedError(
f"'ensure_r_vector' is not defined for dtype {type(x)}"
)
return out | 14fdeb6bf73244c69d9a6ef89ba93b33aa4a66d8 | 3,288 |
from typing import Optional
def open_and_prepare_avatar(image_bytes: Optional[bytes]) -> Optional[Image.Image]:
"""Opens the image as bytes if they exist, otherwise opens the 404 error image. then circular crops and resizes it"""
if image_bytes is not None:
try:
with Image.open(BytesIO(image_bytes)) as im:
prepared_image = crop_circular_border_w_transparent_bg(im)
prepared_image = resize_image(prepared_image)
except UnidentifiedImageError as e:
log.error("Error loading Avatar", exc_info=e)
return None
else:
with Image.open("resources/404 Avatar Not Found.png") as im:
prepared_image = crop_circular_border_w_transparent_bg(im)
prepared_image = resize_image(prepared_image)
return prepared_image | f5b4543f64b15180deed3cb8e672a3e1b96956f7 | 3,289 |
def is_GammaH(x):
"""
Return True if x is a congruence subgroup of type GammaH.
EXAMPLES::
sage: from sage.modular.arithgroup.all import is_GammaH
sage: is_GammaH(GammaH(13, [2]))
True
sage: is_GammaH(Gamma0(6))
True
sage: is_GammaH(Gamma1(6))
True
sage: is_GammaH(sage.modular.arithgroup.congroup_generic.CongruenceSubgroup(5))
False
"""
return isinstance(x, GammaH_class) | 9cfba55901a45d4482b6926673bfb87fabc88030 | 3,290 |
def _run_with_interpreter_if_needed(fuzzer_path, args, max_time):
"""Execute the fuzzer script with an interpreter, or invoke it directly."""
interpreter = shell.get_interpreter(fuzzer_path)
if interpreter:
executable = interpreter
args.insert(0, fuzzer_path)
else:
executable = fuzzer_path
runner = new_process.UnicodeProcessRunner(executable)
return runner.run_and_wait(timeout=max_time, additional_args=args) | 3739db213571ed00c5e026f9a768ca610e0ac318 | 3,291 |
def cost_logistic(p, x, y):
"""
Sum of absolute deviations of obs and logistic function L/(1+exp(-k(x-x0)))
Parameters
----------
p : iterable of floats
parameters (`len(p)=3`)
`p[0]` L - Maximum of logistic function
`p[1]` k - Steepness of logistic function
`p[2]` x0 - Inflection point of logistic function
x : float or array_like of floats
independent variable
y : float or array_like of floats
dependent variable, observations
Returns
-------
float
sum of absolute deviations
"""
return np.sum(np.abs(y-logistic_p(x,p))) | 32b89ef7d33d49b7af63c8d11afffeb641b12de1 | 3,293 |
from datetime import datetime
def estimate_dt(time_array):
"""Automatically estimate timestep in a time_array
Args:
time_array ([list]): List or dataframe with time entries
Returns:
dt ([datetime.timedelta]): Timestep in dt.timedelta format
"""
if len(time_array) < 2:
# Assume arbitrary value
return datetime.timedelta(seconds=0)
dt = np.median(np.diff(time_array))
if not isinstance(dt, datetime.timedelta):
dt = datetime.timedelta(seconds=dt.astype(float)/1e9)
# Check if data is all ascending
if dt <= datetime.timedelta(0):
raise UserWarning('Please only insert time ascending data.')
return dt | 6e6b8dcd4d2d85b4bfb97137294774bb4bcc2673 | 3,294 |
import uu
def gen_uuid() -> str:
"""
获取uuid
:return: uuid
"""
return uu.uuid4().hex | 82fd4fa7a3e39cc0c91ab16be3cf0c6a3f63eb3d | 3,295 |
import inspect
def make_signature(arg_names, member=False):
"""Make Signature object from argument name iterable or str."""
kind = inspect.Parameter.POSITIONAL_OR_KEYWORD
if isinstance(arg_names, str):
arg_names = map(str.strip, arg_name_list.split(','))
if member and arg_names and arg_names[0] != 'self':
arg_names = ['self'] + arg_names
return inspect.Signature([inspect.Parameter(n, kind) for n in arg_names]) | 2730e50ea68e6fe2942c629caa3b3119aea9a325 | 3,296 |
def set_trace_platform(*args):
"""
set_trace_platform(platform)
Set platform name of current trace.
@param platform (C++: const char *)
"""
return _ida_dbg.set_trace_platform(*args) | 9f581018960cdd0949ca41750286eddf1fa43741 | 3,297 |
def leapfrog2(init, tspan, a, beta, omega, h):
"""
Integrate the damped oscillator with damping factor a using single step
Leapfrog for separable Hamiltonians.
"""
f = forcing(beta, omega)
return sym.leapfrog(init, tspan, h, lambda x, p, t: -x-a*p+f(t)) | a8eebe1ee7f50c87e515c2c5cca0bdc30605dc8f | 3,298 |
def get_paths(config, action, dir_name):
"""
Returns 'from' and 'to' paths.
@param config: wrapsync configuration
@param action: 'push'/'pull'
@param dir_name: name of the directory to append to paths from the config
@return: dictionary containing 'from' and 'to' paths
"""
path_from = ''
path_to = ''
if action == 'push':
if dir_name == 'all':
path_from = build_local_path(config, False)
path_to = build_remote_path(config, True)
else:
path_from = f"{build_local_path(config, False)}/{dir_name}"
path_to = build_remote_path(config, False)
else:
if dir_name == 'all':
path_from = build_remote_path(config, False)
path_to = build_local_path(config, True)
else:
path_from = f"{build_remote_path(config, False)}/{dir_name}"
path_to = build_local_path(config, False)
return {
'from': path_from,
'to': path_to
} | f03ee64a76bafcf832f8dddcdcb4f16c28529c5c | 3,299 |
def to_dense(arr):
"""
Convert a sparse array to a dense numpy array. If the
array is already a numpy array, just return it. If the
array passed in is a list, then we recursively apply this
method to its elements.
Parameters
-----------
arr : :obj:`numpy.ndarray`, :obj:`scipy.sparse.spmatrix`, or list
Any matrix (or list of matrices) that must be converted
to a dense numpy array.
Raises
--------
TypeError
If the array provided is not a list, `numpy` array,
or `scipy.sparse` matrix.
Returns
--------
dense_args: tuple
"""
if isinstance(arr, np.ndarray):
return arr
if isinstance(arr, list):
return [to_dense(el) for el in arr]
# assume it must be a `scipy.sparse.spmatrix`
if isinstance(arr, sp.spmatrix):
return arr.toarray()
error_msg = (
"Can only convert numpy matrices, scipy matrices, or "
"lists of those elements to dense arrays"
)
raise TypeError(error_msg) | 1fa2ccdd184aa4155cfd121310d67e9e73ffff17 | 3,300 |
def output_results(results, way):
"""Helper method with most of the logic"""
tails = way(results)
heads = len(results) - tails
result = ", ".join([["Heads", "Tails"][flip] for flip in results])
return result + f"\n{heads} Heads; {tails} Tails" | f60716004b11e115fe69a14b70957b5b66080dbc | 3,301 |
def guess_init(model, focal_length, j2d, init_pose):
"""Initialize the camera translation via triangle similarity, by using the torso
joints .
:param model: SMPL model
:param focal_length: camera focal length (kept fixed)
:param j2d: 14x2 array of CNN joints
:param init_pose: 72D vector of pose parameters used for initialization (kept fixed)
:returns: 3D vector corresponding to the estimated camera translation
"""
cids = np.arange(0, 12)
# map from LSP to SMPL joints
j2d_here = j2d[cids]
smpl_ids = [8, 5, 2, 1, 4, 7, 21, 19, 17, 16, 18, 20]
opt_pose = ch.array(init_pose)
_, A_global = global_rigid_transformation(opt_pose, model.J, model.kintree_table, xp=ch)
Jtr = ch.vstack([g[:3, 3] for g in A_global])
Jtr = Jtr[smpl_ids].r
# 9 is L shoulder, 3 is L hip
# 8 is R shoulder, 2 is R hip
diff3d = np.array([Jtr[9] - Jtr[3], Jtr[8] - Jtr[2]])
mean_height3d = np.mean(np.sqrt(np.sum(diff3d**2, axis=1)))
diff2d = np.array([j2d_here[9] - j2d_here[3], j2d_here[8] - j2d_here[2]])
mean_height2d = np.mean(np.sqrt(np.sum(diff2d**2, axis=1)))
est_d = focal_length * (mean_height3d / mean_height2d)
init_t = np.array([0., 0., est_d])
return init_t | ce1ca89bc60500cc59441c97cf9d71ef3d9b528b | 3,302 |
def TCnCom_Dump(*args):
"""
Dump(TCnComV const & CnComV, TStr Desc=TStr())
Parameters:
CnComV: TCnComV const &
Desc: TStr const &
TCnCom_Dump(TCnComV const & CnComV)
Parameters:
CnComV: TCnComV const &
"""
return _snap.TCnCom_Dump(*args) | c2ce258a12074106e4c93e938dfa988b1bc29015 | 3,303 |
import functools
def get_reparametrize_functions(
params, constraints, scaling_factor=None, scaling_offset=None
):
"""Construct functions to map between internal and external parameters.
All required information is partialed into the functions.
Args:
params (pandas.DataFrame): See :ref:`params`.
constraints (list): List of constraint dictionaries.
scaling_factor (np.ndarray or None): If None, no scaling factor is used.
scaling_offset (np.ndarray or None): If None, no scaling offset is used
Returns:
func: Function that maps an external parameter vector to an internal one
func: Function that maps an internal parameter vector to an external one
"""
params = add_default_bounds_to_params(params)
check_params_are_valid(params)
processed_constraints, processed_params = process_constraints(
constraints=constraints,
params=params,
scaling_factor=scaling_factor,
scaling_offset=scaling_offset,
)
# get partialed reparametrize from internal
pre_replacements = processed_params["_pre_replacements"].to_numpy()
post_replacements = processed_params["_post_replacements"].to_numpy()
fixed_values = processed_params["_internal_fixed_value"].to_numpy()
# get partialed reparametrize to internal
internal_free = processed_params["_internal_free"].to_numpy()
partialed_to_internal = functools.partial(
reparametrize_to_internal,
internal_free=internal_free,
processed_constraints=processed_constraints,
scaling_factor=scaling_factor,
scaling_offset=scaling_offset,
)
partialed_from_internal = functools.partial(
reparametrize_from_internal,
fixed_values=fixed_values,
pre_replacements=pre_replacements,
processed_constraints=processed_constraints,
post_replacements=post_replacements,
scaling_factor=scaling_factor,
scaling_offset=scaling_offset,
)
return partialed_to_internal, partialed_from_internal | a0d8f283bf44f66fb098c499a6b610174078b980 | 3,305 |
def gaussNewton(P, model, target, targetLandmarks, sourceLandmarkInds, NN, jacobi = True, calcId = True):
"""
Energy function to be minimized for fitting.
"""
# Shape eigenvector coefficients
idCoef = P[: model.idEval.size]
expCoef = P[model.idEval.size: model.idEval.size + model.expEval.size]
# Rotation Euler angles, translation vector, scaling factor
angles = P[model.idEval.size + model.expEval.size:][:3]
R = rotMat2angle(angles)
t = P[model.idEval.size + model.expEval.size:][3: 6]
s = P[model.idEval.size + model.expEval.size:][6]
# Transpose if necessary
if targetLandmarks.shape[0] != 3:
targetLandmarks = targetLandmarks.T
# The eigenmodel, before rigid transformation and scaling
model = model.idMean + np.tensordot(model.idEvec, idCoef, axes = 1) + np.tensordot(model.expEvec, expCoef, axes = 1)
# After rigid transformation and scaling
source = s*np.dot(R, model) + t[:, np.newaxis]
# Find the nearest neighbors of the target to the source vertices
# start = clock()
distance, ind = NN.kneighbors(source.T)
targetNN = target[ind.squeeze(axis = 1), :].T
# print('NN: %f' % (clock() - start))
# Calculate resisduals
rVert = targetNN - source
rLand = targetLandmarks - source[:, sourceLandmarkInds]
rAlpha = idCoef ** 2 / model.idEval
rDelta = expCoef ** 2 / model.expEval
# Calculate costs
Ever = np.linalg.norm(rVert, axis = 0).sum() / model.numVertices
Elan = np.linalg.norm(rLand, axis = 0).sum() / sourceLandmarkInds.size
Ereg = np.sum(rAlpha) + np.sum(rDelta)
if jacobi:
# start = clock()
drV_dalpha = -s*np.tensordot(R, model.idEvec, axes = 1)
drV_ddelta = -s*np.tensordot(R, model.expEvec, axes = 1)
drV_dpsi = -s*np.dot(dR_dpsi(angles), model)
drV_dtheta = -s*np.dot(dR_dtheta(angles), model)
drV_dphi = -s*np.dot(dR_dphi(angles), model)
drV_dt = -np.tile(np.eye(3), [source.shape[1], 1])
drV_ds = -np.dot(R, model)
drR_dalpha = np.diag(2*idCoef / model.idEval)
drR_ddelta = np.diag(2*expCoef / model.expEval)
# Calculate Jacobian
if calcId:
r = np.r_[rVert.flatten('F'), rLand.flatten('F'), rAlpha, rDelta]
J = np.r_[np.c_[drV_dalpha.reshape((source.size, idCoef.size), order = 'F'), drV_ddelta.reshape((source.size, expCoef.size), order = 'F'), drV_dpsi.flatten('F'), drV_dtheta.flatten('F'), drV_dphi.flatten('F'), drV_dt, drV_ds.flatten('F')], np.c_[drV_dalpha[:, sourceLandmarkInds, :].reshape((targetLandmarks.size, idCoef.size), order = 'F'), drV_ddelta[:, sourceLandmarkInds, :].reshape((targetLandmarks.size, expCoef.size), order = 'F'), drV_dpsi[:, sourceLandmarkInds].flatten('F'), drV_dtheta[:, sourceLandmarkInds].flatten('F'), drV_dphi[:, sourceLandmarkInds].flatten('F'), drV_dt[:sourceLandmarkInds.size * 3, :], drV_ds[:, sourceLandmarkInds].flatten('F')], np.c_[drR_dalpha, np.zeros((idCoef.size, expCoef.size + 7))], np.c_[np.zeros((expCoef.size, idCoef.size)), drR_ddelta, np.zeros((expCoef.size, 7))]]
# Parameter update (Gauss-Newton)
dP = -np.linalg.inv(np.dot(J.T, J)).dot(J.T).dot(r)
else:
r = np.r_[rVert.flatten('F'), rLand.flatten('F'), rDelta]
J = np.r_[np.c_[drV_ddelta.reshape((source.size, expCoef.size), order = 'F'), drV_dpsi.flatten('F'), drV_dtheta.flatten('F'), drV_dphi.flatten('F'), drV_dt, drV_ds.flatten('F')], np.c_[drV_ddelta[:, sourceLandmarkInds, :].reshape((np.prod(targetLandmarks.shape), expCoef.size), order = 'F'), drV_dpsi[:, sourceLandmarkInds].flatten('F'), drV_dtheta[:, sourceLandmarkInds].flatten('F'), drV_dphi[:, sourceLandmarkInds].flatten('F'), drV_dt[:sourceLandmarkInds.size * 3, :], drV_ds[:, sourceLandmarkInds].flatten('F')], np.c_[drR_ddelta, np.zeros((expCoef.size, 7))]]
# Parameter update (Gauss-Newton)
dP = np.r_[np.zeros(model.idEval.size), -np.linalg.inv(np.dot(J.T, J)).dot(J.T).dot(r)]
# print('GN: %f' % (clock() - start))
return Ever + Elan + Ereg, dP
return Ever + Elan + Ereg | 2b54080bf9f76a8a16e26c10f6209f55bcb0c57f | 3,306 |
from re import T
def arange(start, stop=None, step=1, dtype='int32'):
"""Creates a 1-D tensor containing a sequence of integers.
The function arguments use the same convention as
Theano's arange: if only one argument is provided,
it is in fact the "stop" argument.
"""
return T.arange(start, stop=stop, step=step, dtype=dtype) | 72f505d7f1928d4e35a7e183a30bdc8cddf2edd7 | 3,307 |
def create_attachable_access_entity_profile(infra, entity_profile, **args):
"""Create an attached entity profile. This provides a template to deploy hypervisor policies on a large set of leaf ports. This also provides the association of a Virtual Machine Management (VMM) domain and the physical network infrastructure. """
args = args['optional_args'] if 'optional_args' in args.keys() else args
infra_attentityp = AttEntityP(infra, entity_profile)
if 'enable_infrastructure_vlan' in args.keys():
if args['enable_infrastructure_vlan'] in [True, 'True', 'true', 't', 'T']:
infra_provacc = ProvAcc(infra_attentityp)
elif args['enable_infrastructure_vlan'] in [False, 'False', 'false', 'f', 'F']:
infra_provacc = ProvAcc(infra_attentityp)
infra_provacc.delete()
if 'domain_profiles' in args.keys() and is_valid(args['domain_profiles']):
for domain in args['domain_profiles']:
if domain['type'] == 'physical':
path = 'uni/phys-'
elif domain['type'] == 'vcenter':
path = 'uni/vmmp-VMware/dom-'
elif domain['type'] == 'layer2':
path = 'uni/l2dom-'
elif domain['type'] == 'layer3':
path = 'uni/l3dom-'
else:
print 'Invalid domain type.'
path = ''
infra_rsdomp = RsDomP(infra_attentityp, path+domain['name'])
if is_valid_key(args, 'interface_policy_group'):
infra_funcp = FuncP(infra)
infra_accportgrp = AccPortGrp(infra_funcp, args['interface_policy_group'])
infra_rsattentp = RsAttEntP(infra_accportgrp)
return infra_attentityp | 96c711b8c5de52ca44483edcb478a829986e901a | 3,308 |
from typing import Tuple
from typing import List
import csv
def tensor_projection_reader(
embedding_file_path: str,
label_file_path: str
) -> Tuple[np.ndarray, List[List[str]]]:
"""
Reads the embedding and labels stored at the given paths and returns an np.ndarray and list of labels
:param str embedding_file_path: Path to the embedding file
:param str label_file_path: Path to the labels file
:return: An embedding and list of labels
:rtype: (numpy.ndarray, List[List[str]])
"""
embedding = np.loadtxt(embedding_file_path, delimiter='\t')
labels: List[List[str]] = []
with open(label_file_path) as f:
csv_reader = csv.reader(f, delimiter='\t')
for label_row in csv_reader:
labels.append(label_row)
return embedding, labels | 7e8cc804181ead221a283b4d8aa95a9e9b7d00ef | 3,309 |
def xml_to_dict(xmlobj, saveroot=True):
"""Parse the xml into a dictionary of attributes.
Args:
xmlobj: An ElementTree element or an xml string.
saveroot: Keep the xml element names (ugly format)
Returns:
An ElementDict object or ElementList for multiple objects
"""
if isinstance(xmlobj, basestring):
# Allow for blank (usually HEAD) result on success
if xmlobj.isspace():
return {}
try:
element = ET.fromstring(xmlobj)
except Exception, err:
raise Error('Unable to parse xml data: %s' % err)
else:
element = xmlobj
element_type = element.get('type', '').lower()
if element_type == 'array':
element_list_type = element.tag.replace('-', '_')
return_list = element_containers.ElementList(element_list_type)
for child in element.getchildren():
child_element = xml_to_dict(child, saveroot)
if saveroot and isinstance(child_element, dict):
return_list.append(child_element.values()[0])
else:
return_list.append(child_element)
if saveroot:
return element_containers.ElementDict(element_list_type,
{element_list_type:
return_list})
else:
return return_list
elif element.get('nil') == 'true':
return None
elif element_type in ('integer', 'datetime', 'date',
'decimal', 'double', 'float') and not element.text:
return None
elif element_type == 'integer':
return int(element.text)
elif element_type == 'datetime':
if date_parse:
return date_parse(element.text)
else:
try:
timestamp = calendar.timegm(
time.strptime(element.text, '%Y-%m-%dT%H:%M:%S+0000'))
return datetime.datetime.utcfromtimestamp(timestamp)
except ValueError, err:
raise Error('Unable to parse timestamp. Install dateutil'
' (http://labix.org/python-dateutil) or'
' pyxml (http://pyxml.sf.net/topics/)'
' for ISO8601 support.')
elif element_type == 'date':
time_tuple = time.strptime(element.text, '%Y-%m-%d')
return datetime.date(*time_tuple[:3])
elif element_type == 'decimal':
return decimal.Decimal(element.text)
elif element_type in ('float', 'double'):
return float(element.text)
elif element_type == 'boolean':
if not element.text:
return False
return element.text.strip() in ('true', '1')
elif element_type == 'yaml':
if not yaml:
raise ImportError('PyYaml is not installed: http://pyyaml.org/')
return yaml.safe_load(element.text)
elif element_type == 'base64binary':
return base64.decodestring(element.text)
elif element_type == 'file':
content_type = element.get('content_type',
'application/octet-stream')
filename = element.get('name', 'untitled')
return FileObject(element.text, filename, content_type)
elif element_type in ('symbol', 'string'):
if not element.text:
return ''
return element.text
elif element.getchildren():
# This is an element with children. The children might be simple
# values, or nested hashes.
if element_type:
attributes = element_containers.ElementDict(
underscore(element.get('type', '')), element.items())
else:
attributes = element_containers.ElementDict(singularize(
element.tag.replace('-', '_')), element.items())
for child in element.getchildren():
attribute = xml_to_dict(child, saveroot)
child_tag = child.tag.replace('-', '_')
if saveroot:
# If this is a nested hash, it will come back as
# {child_tag: {key: value}}, we only want the inner hash
if isinstance(attribute, dict):
if len(attribute) == 1 and child_tag in attribute:
attribute = attribute[child_tag]
# Handle multiple elements with the same tag name
if child_tag in attributes:
if isinstance(attributes[child_tag], list):
attributes[child_tag].append(attribute)
else:
attributes[child_tag] = [attributes[child_tag],
attribute]
else:
attributes[child_tag] = attribute
if saveroot:
return {element.tag.replace('-', '_'): attributes}
else:
return attributes
elif element.items():
return element_containers.ElementDict(element.tag.replace('-', '_'),
element.items())
else:
return element.text | 85428aaefc1f48881891ddd910daef1cc4f1547e | 3,310 |
import torch
import numpy
def conve_interaction(
h: torch.FloatTensor,
r: torch.FloatTensor,
t: torch.FloatTensor,
t_bias: torch.FloatTensor,
input_channels: int,
embedding_height: int,
embedding_width: int,
hr2d: nn.Module,
hr1d: nn.Module,
) -> torch.FloatTensor:
"""Evaluate the ConvE interaction function.
:param h: shape: (batch_size, num_heads, 1, 1, dim)
The head representations.
:param r: shape: (batch_size, 1, num_relations, 1, dim)
The relation representations.
:param t: shape: (batch_size, 1, 1, num_tails, dim)
The tail representations.
:param t_bias: shape: (batch_size, 1, 1, num_tails, 1)
The tail entity bias.
:param input_channels:
The number of input channels.
:param embedding_height:
The height of the reshaped embedding.
:param embedding_width:
The width of the reshaped embedding.
:param hr2d:
The first module, transforming the 2D stacked head-relation "image".
:param hr1d:
The second module, transforming the 1D flattened output of the 2D module.
:return: shape: (batch_size, num_heads, num_relations, num_tails)
The scores.
"""
# repeat if necessary, and concat head and relation, batch_size', num_input_channels, 2*height, width
# with batch_size' = batch_size * num_heads * num_relations
x = broadcast_cat(
[
h.view(*h.shape[:-1], input_channels, embedding_height, embedding_width),
r.view(*r.shape[:-1], input_channels, embedding_height, embedding_width),
],
dim=-2,
).view(-1, input_channels, 2 * embedding_height, embedding_width)
# batch_size', num_input_channels, 2*height, width
x = hr2d(x)
# batch_size', num_output_channels * (2 * height - kernel_height + 1) * (width - kernel_width + 1)
x = x.view(-1, numpy.prod(x.shape[-3:]))
x = hr1d(x)
# reshape: (batch_size', embedding_dim) -> (b, h, r, 1, d)
x = x.view(-1, h.shape[1], r.shape[2], 1, h.shape[-1])
# For efficient calculation, each of the convolved [h, r] rows has only to be multiplied with one t row
# output_shape: (batch_size, num_heads, num_relations, num_tails)
t = t.transpose(-1, -2)
x = (x @ t).squeeze(dim=-2)
# add bias term
return x + t_bias.squeeze(dim=-1) | fadf03905ed5c822df0fe099cb439f481073d202 | 3,311 |
def index():
"""Show Homepage"""
return render_template("index.html") | f05985d10a9699783f6f3c4c4f88c8be48a0a7a9 | 3,312 |
def with_input_dtype(policy, dtype):
"""Copies "infer" `policy`, adding `dtype` to it.
Policy must be "infer" or "infer_float32_vars" (i.e., has no compute dtype).
Returns a new policy with compute dtype `dtype`. The returned policy's
variable dtype is also `dtype` if `policy` is "infer", and is `float32` if
`policy` is "infer_with_float32_vars".
Args:
policy: An "infer" or "infer_float32_vars" policy
dtype: The dtype of an input to a layer.
Returns:
A new policy copied from `policy`, but with compute dtype and maybe
variable_dtype set to `dtype`.
"""
assert not policy.compute_dtype
dtype = dtypes.as_dtype(dtype).name
if policy.variable_dtype is None:
return Policy(dtype)
else:
# Policies without a compute dtype are either "infer" or
# "infer_with_float32_vars", so the variable_dtype must be float32 here.
assert policy.variable_dtype == 'float32'
try:
Policy._warn_about_float32_vars = False # pylint: disable=protected-access
return Policy(dtype + '_with_float32_vars')
finally:
Policy._warn_about_float32_vars = True # pylint: disable=protected-access | 32815d4499b57ed8623a55414ef7b6115c450726 | 3,314 |
import io
import warnings
def decode_object_based(effects):
"""
Reads and decodes info about object-based layer effects.
"""
fp = io.BytesIO(effects)
version, descriptor_version = read_fmt("II", fp)
try:
descriptor = decode_descriptor(None, fp)
except UnknownOSType as e:
warnings.warn("Ignoring object-based layer effects tagged block (%s)" % e)
return effects
return ObjectBasedEffects(version, descriptor_version, descriptor) | 6471f6f9987b1817f223fe02a5ba5923ddf8c0c8 | 3,315 |
def example_add(x: int, y: int):
"""
...
"""
return x + y | 88e835e872e2ef4eb54f721e3d556ee7f8db1bbc | 3,316 |
from typing import Optional
def inverse(text: str, reset_style: Optional[bool] = True) -> str:
"""Returns text inverse-colored.
Args:
reset_style: Boolean that determines whether a reset character should
be appended to the end of the string.
"""
return set_mode("inverse", False) + text + (reset() if reset_style else "") | 4d8aceada756386348b68c13dabe4948b15986c3 | 3,317 |
def make():
""" hook function for entrypoints
:return:
"""
return LocalFileSystem | 7e48c7c4a9225f4bd3d7d430b6221005e2787e55 | 3,318 |
def configure():
"""read configuration from command line options and config file values"""
opts = parse_options()
defaults = dict(v.split('=') for v in opts.S or [])
with open(opts.config_file) as config:
targets = read_config(config, defaults, opts.ignore_colon)
if opts.T:
return {opts.T: targets[opts.T]}
else:
return targets | 09c85e8fce3947ee54c1524545e14fe25a4d054e | 3,319 |
def proper_loadmat(file_path):
"""Loads using scipy.io.loadmat, and cleans some of the metadata"""
data = loadmat(file_path)
clean_data = {}
for key, value in data.items():
if not key.startswith("__"):
clean_data[key] = value.squeeze().tolist()
return clean_data | d7cbc547ab47235db2df80fdf2ca9decd3a4c42d | 3,320 |
from typing import List
def _get_time_total(responses: List[DsResponse]) -> List[str]:
"""Get formated total time metrics."""
metric_settings = {
"name": "time_total",
"type": "untyped",
"help": "Returns the total time in seconds (time taken to request, render and download).",
"func": lambda response: __float2str(response.time_total),
}
return _get_metrics(responses, metric_settings) | 641bff0a75d1f61afa7ad1d9e9058faee58c18b8 | 3,321 |
async def list_sessions(
cache: Redis = Depends(depends_redis),
) -> ListSessionsResponse:
"""Get all session keys"""
keylist = []
for key in await cache.keys(pattern=f"{IDPREFIX}*"):
if not isinstance(key, bytes):
raise TypeError(
"Found a key that is not stored as bytes (stored as type "
f"{type(key)!r})."
)
keylist.append(key.decode(encoding="utf-8"))
return ListSessionsResponse(keys=keylist) | 7fce8610a5c53317636da7e5408a582c10faff3c | 3,322 |
def square(x, out=None, where=True, **kwargs):
"""
Return the element-wise square of the input.
Args:
x (numpoly.ndpoly):
Input data.
out (Optional[numpy.ndarray]):
A location into which the result is stored. If provided, it must
have a shape that the inputs broadcast to. If not provided or
`None`, a freshly-allocated array is returned. A tuple (possible
only as a keyword argument) must have length equal to the number of
outputs.
where (Optional[numpy.ndarray]):
This condition is broadcast over the input. At locations where the
condition is True, the `out` array will be set to the ufunc result.
Elsewhere, the `out` array will retain its original value. Note
that if an uninitialized `out` array is created via the default
``out=None``, locations within it where the condition is False will
remain uninitialized.
kwargs:
Keyword args passed to numpy.ufunc.
Returns:
out (numpoly.ndpoly):
Element-wise `x*x`, of the same shape and dtype as `x`.
This is a scalar if `x` is a scalar.
Examples:
>>> numpoly.square([-1j, 1])
polynomial([(-1-0j), (1+0j)])
>>> numpoly.square(numpoly.sum(numpoly.symbols("x y")))
polynomial(y**2+2*x*y+x**2)
"""
return multiply(x, x, out=out, where=where, **kwargs) | a59297f913433ec870a9eb7d8be5eea21a78cc41 | 3,323 |
def evaluate_tuple(columns,mapper,condition):
"""
"""
if isinstance(condition, tuple):
return condition[0](columns,mapper,condition[1],condition[2])
else:
return condition(columns,mapper) | 5200da50900329431db4ce657e79135534b8469e | 3,324 |
import scipy
def imread(path, is_grayscale=True):
"""
Read image using its path.
Default value is gray-scale, and image is read by YCbCr format as the paper said.
"""
if is_grayscale:
return scipy.misc.imread(path, flatten=True, mode='YCbCr').astype(np.float)
else:
return scipy.misc.imread(path, mode='YCbCr').astype(np.float) | b32e918583c7d4a3bc3e38994bc4aef7dfdc5206 | 3,325 |
def get_priority_text(priority):
"""
Returns operation priority name by numeric value.
:param int priority: Priority numeric value.
:return: Operation priority name.
:rtype: str | None
"""
if priority == NSOperationQueuePriorityVeryLow:
return "VeryLow"
elif priority == NSOperationQueuePriorityLow:
return "Low"
elif priority == NSOperationQueuePriorityNormal:
return "Normal"
elif priority == NSOperationQueuePriorityHigh:
return "High"
elif priority == NSOperationQueuePriorityVeryHigh:
return "VeryHigh"
return "{}".format(priority) | 02986079f164672d58d7d5476e82463e1343ba9d | 3,326 |
import posixpath
def get_experiment_tag_for_image(image_specs, tag_by_experiment=True):
"""Returns the registry with the experiment tag for given image."""
tag = posixpath.join(experiment_utils.get_base_docker_tag(),
image_specs['tag'])
if tag_by_experiment:
tag += ':' + experiment_utils.get_experiment_name()
return tag | f45898d1f9adb74ca1133be05ab60da5de9df9e6 | 3,327 |
def call_pager():
"""
Convenient wrapper to call Pager class
"""
return _Pager() | 00cba0c47fc18417ab82ff41ae956961dcff9db4 | 3,328 |
def sign_in(request, party_id, party_guest_id):
"""
Sign guest into party.
"""
if request.method != "POST":
return HttpResponse("Endpoint supports POST method only.", status=405)
try:
party = Party.objects.get(pk=party_id)
party_guest = PartyGuest.objects.get(pk=party_guest_id)
except Party.DoesNotExist:
return HttpResponse("Requested Party ID does not exist.", status=404)
except PartyGuest.DoesNotExist:
return HttpResponse("Requested Party Guest does not exist.", status=404)
if not party.is_list_closed():
return HttpResponse("Can't sign in guests before the party starts.", status=403)
if not party_guest.signed_in:
party.sign_in(party_guest)
party.save()
party_guest.save()
return JsonResponse(party_guest.to_json())
return HttpResponse(
"Guest already signed in. Refresh to see updated list.", status=409
) | 2672344a92fb0d029946bf30d1a0a89d33a24a0f | 3,329 |
from datetime import datetime
def mcoolqc_status(connection, **kwargs):
"""Searches for annotated bam files that do not have a qc object
Keyword arguments:
lab_title -- limit search with a lab i.e. Bing+Ren, UCSD
start_date -- limit search to files generated since a date formatted YYYY-MM-DD
run_time -- assume runs beyond run_time are dead (default=24 hours)
"""
start = datetime.utcnow()
check = CheckResult(connection, 'mcoolqc_status')
my_auth = connection.ff_keys
check.action = "mcoolqc_start"
check.brief_output = []
check.full_output = {}
check.status = 'PASS'
# check indexing queue
check, skip = wfr_utils.check_indexing(check, connection)
if skip:
return check
# Build the query (find mcool files)
default_stati = 'released&status=uploaded&status=released+to+project'
stati = 'status=' + (kwargs.get('status') or default_stati)
query = 'search/?file_format.file_format=mcool&{}'.format(stati)
query += '&type=FileProcessed'
query += '&quality_metric.display_title=No+value'
# add date
s_date = kwargs.get('start_date')
if s_date:
query += '&date_created.from=' + s_date
# add lab
lab = kwargs.get('lab_title')
if lab:
query += '&lab.display_title=' + lab
# The search
print(query)
res = ff_utils.search_metadata(query, key=my_auth)
if not res:
check.action_message = 'No action required at this moment'
check.summary = 'All Good!'
return check
check.summary = '{} files need a mcoolqc'. format(len(res))
check.status = 'WARN'
check = wfr_utils.check_runs_without_output(res, check, 'mcoolQC', my_auth, start)
return check | 44273aa0f7441775258e0b390059cfe9778747e2 | 3,330 |
def isValidListOrRulename(word: str) -> bool:
"""test if there are no accented characters in a listname or rulename
so asciiletters, digitis, - and _ are allowed
"""
return bool(reValidName.match(word)) | ec826f31604f8dd43ba044e1f6ffbaaf758bdb88 | 3,331 |
def glyph_has_ink(font: TTFont, name: Text) -> bool:
"""Checks if specified glyph has any ink.
That is, that it has at least one defined contour associated.
Composites are considered to have ink if any of their components have ink.
Args:
font: the font
glyph_name: The name of the glyph to check for ink.
Returns:
True if the font has at least one contour associated with it.
"""
if 'glyf' in font:
return ttf_glyph_has_ink(font, name)
elif ('CFF ' in font) or ('CFF2' in font):
return cff_glyph_has_ink(font, name)
else:
raise Exception("Could not find 'glyf', 'CFF ', or 'CFF2' table.") | 6450e2ec2ed7158f901c7e50999245042d880dce | 3,332 |
async def async_setup_entry(hass, entry, async_add_entities):
"""
Set up n3rgy data sensor
:param hass: hass object
:param entry: config entry
:return: none
"""
# in-line function
async def async_update_data():
"""
Fetch data from n3rgy API
This is the place to pre-process the data to lookup tables so entities can quickly look up their data
:param: none
:return: power consumption data
"""
return await hass.async_add_executor_job(read_consumption, api, entry)
async def async_initialize():
"""
Initialize objects from n3rgy API
:param: none
:return: data coordinator, device type
"""
coordinator = DataUpdateCoordinator(
hass,
_LOGGER,
name=PLATFORM,
update_method=async_update_data
)
# fetch initial data so we have data when entities subscribe
sensor_name, device_type = await hass.async_add_executor_job(get_device_info, api, entry)
await coordinator.async_refresh()
return (coordinator, sensor_name, device_type)
# initialize n3rgy API
device_type = None
api = init_api_client(entry)
# grant consent options
if GRANT_CONSENT_READY:
# grant consent is enabled for live environment
if process_grant_consent(entry):
coordinator, sensor_name, device_type = await async_initialize()
else:
# grant consent is disabled
coordinator, sensor_name, device_type = await async_initialize()
# add sensor
async_add_entities([N3rgySensor(coordinator, sensor_name, device_type)], False) | e2dd956428eb377c56d104e49889760f6ba9b653 | 3,333 |
def main():
""" Process command line arguments and run the script """
bp = BrPredMetric()
result = bp.Run()
return result | b61a80ee805dfc2d6e146b24ae0564bb5cda6e83 | 3,334 |
def step(init_distr,D):
"""
"""
for k in init_distr.keys():
init_distr[k] = D[init_distr[k]]()
return init_distr | 6270dd2818d2148e7d979d249fbb2a3a596dc2de | 3,335 |
def from_json(data: JsonDict) -> AttributeType:
"""Make an attribute type from JSON data (deserialize)
Args:
data: JSON data from Tamr server
"""
base_type = data.get("baseType")
if base_type is None:
logger.error(f"JSON data: {repr(data)}")
raise ValueError("Missing required field 'baseType'.")
if base_type == Boolean._tag:
return BOOLEAN
elif base_type == Double._tag:
return DOUBLE
elif base_type == Int._tag:
return INT
elif base_type == Long._tag:
return LONG
elif base_type == String._tag:
return STRING
elif base_type == Array._tag:
inner_type = data.get("innerType")
if inner_type is None:
logger.error(f"JSON data: {repr(data)}")
raise ValueError("Missing required field 'innerType' for Array type.")
return Array(inner_type=from_json(inner_type))
elif base_type == Map._tag:
inner_type = data.get("innerType")
if inner_type is None:
logger.error(f"JSON data: {repr(data)}")
raise ValueError("Missing required field 'innerType' for Map type.")
return Map(inner_type=from_json(inner_type))
elif base_type == Record._tag:
attributes = data.get("attributes")
if attributes is None:
logger.error(f"JSON data: {repr(data)}")
raise ValueError("Missing required field 'attributes' for Record type.")
return Record(
attributes=tuple([subattribute.from_json(attr) for attr in attributes])
)
else:
logger.error(f"JSON data: {repr(data)}")
raise ValueError(f"Unrecognized 'baseType': {base_type}") | eba662ed1c1c3f32a5b65908fae68d7dd41f89e3 | 3,336 |
def ftduino_find_by_name(name):
"""
Returns the path of the ftDuino with the specified `name`.
:param name: Name of the ftDuino.
:return: The path of the ftDuino or ``None`` if the ftDuino was not found.
"""
for path, device_name in ftduino_iter():
if device_name == name:
return path
return None | 8a03d0b84dc9180fb2885d46fc8f1755cd2c6eed | 3,337 |
import numbers
def spectral_entropy (Sxx, fn, flim=None, display=False) :
"""
Compute different entropies based on the average spectrum, its variance,
and its maxima [1]_ [2]_
Parameters
----------
Sxx : ndarray of floats
Spectrogram (2d).
It is recommended to work with PSD to be consistent with energy conservation
fn : 1d ndarray of floats
frequency vector
flim : tupple (fmin, fmax), optional, default is None
Frequency band used to compute the spectral entropy.
For instance, one may want to compute the spectral entropy for the
biophony bandwidth
display : boolean, optional, default is False
Display the different spectra (mean, variance, covariance, max...)
Returns
-------
EAS : scalar
Entropy of Average Spectrum
ECU : scalar
Entropy of spectral variance (along the time axis for each frequency)
ECV : scalar
Entropy of Coefficient of Variation (along the time axis for each frequency)
EPS : scalar
Entropy of spectral maxima (peaks)
EPS_KURT : scalar
Kurtosis of spectral maxima
EPS_SKEW : scalar
Skewness of spectral maxima
References
----------
.. [1] TOWSEY, Michael W. The calculation of acoustic indices derived from long-duration recordings of the natural environment. 2017. https://eprints.qut.edu.au/110634/1/QUTePrints110634_TechReport_Towsey2017August_AcousticIndices%20v3.pdf
.. [2] QUT : https://github.com/QutEcoacoustics/audio-analysis. Michael Towsey, Anthony Truskinger, Mark Cottman-Fields, & Paul Roe. (2018, March 5). Ecoacoustics Audio Analysis Software v18.03.0.41 (Version v18.03.0.41). Zenodo. http://doi.org/10.5281/zenodo.1188744
Examples
--------
>>> s, fs = maad.sound.load('../data/cold_forest_daylight.wav')
>>> Sxx_power, tn, fn, _ = maad.sound.spectrogram (s, fs)
>>> EAS, ECU, ECV, EPS, EPS_KURT, EPS_SKEW = maad.features.spectral_entropy(Sxx_power, fn, flim=(2000,10000))
>>> print('EAS: %2.2f / ECU: %2.2f / ECV: %2.2f / EPS: %2.2f / EPS_KURT: %2.2f / EPS_SKEW: %2.2f' % (EAS, ECU, ECV, EPS, EPS_KURT, EPS_SKEW))
EAS: 0.27 / ECU: 0.49 / ECV: 0.24 / EPS: 1.00 / EPS_KURT: 17.58 / EPS_SKEW: 3.55
"""
if isinstance(flim, numbers.Number) :
print ("WARNING: flim must be a tupple (fmin, fmax) or None")
return
if flim is None : flim=(fn.min(),fn.max())
# select the indices corresponding to the frequency range
iBAND = index_bw(fn, flim)
# force Sxx to be an ndarray
X = np.asarray(Sxx)
# TOWSEY : only on the bio band
# EAS [TOWSEY] #
#### COMMENT : Result a bit different due to different Hilbert implementation
X_mean = mean(X[iBAND], axis=1)
Hf = entropy(X_mean)
EAS = 1 - Hf
#### Entropy of spectral variance (along the time axis for each frequency)
""" ECU [TOWSEY] """
X_Var = var(X[iBAND], axis=1)
Hf_var = entropy(X_Var)
ECU = 1 - Hf_var
#### Entropy of coefficient of variance (along the time axis for each frequency)
""" ECV [TOWSEY] """
X_CoV = var(X[iBAND], axis=1)/mean(X[iBAND], axis=1)
Hf_CoV = entropy(X_CoV)
ECV = 1 - Hf_CoV
#### Entropy of spectral maxima
""" EPS [TOWSEY] """
ioffset = np.argmax(iBAND==True)
Nbins = sum(iBAND==True)
imax_X = np.argmax(X[iBAND],axis=0) + ioffset
imax_X = fn[imax_X]
max_X_bin, bin_edges = np.histogram(imax_X, bins=Nbins, range=flim)
if sum(max_X_bin) == 0 :
max_X_bin = np.zeros(len(max_X_bin))
EPS = float('nan')
#### Kurtosis of spectral maxima
EPS_KURT = float('nan')
#### skewness of spectral maxima
EPS_SKEW = float('nan')
else:
max_X_bin = max_X_bin/sum(max_X_bin)
Hf_fmax = entropy(max_X_bin)
EPS = 1 - Hf_fmax
#### Kurtosis of spectral maxima
EPS_KURT = kurtosis(max_X_bin)
#### skewness of spectral maxima
EPS_SKEW = skewness(max_X_bin)
if display:
fig, ax = plt.subplots()
ax.plot(fn[iBAND], X_mean/max(X_mean),label="Normalized mean")
plt.plot(fn[iBAND], X_Var/max(X_Var),label="Normalized variance")
ax.plot(fn[iBAND], X_CoV/max(X_CoV),label="Normalized covariance")
ax.plot(fn[iBAND], max_X_bin/max(max_X_bin),label="Normalized Spectral max")
ax.set_title('Signals')
ax.set_xlabel('Frequency [Hz]')
ax.legend()
return EAS, ECU, ECV, EPS, EPS_KURT, EPS_SKEW | 533b388781e158b558ee38645271194adb414729 | 3,338 |
def _percentages(self):
"""
An extension method for Counter that
returns a dict mapping the keys of the Counter to their percentages.
:param self: Counter
:return: a dict mapping the keys of the Counter to their percentages
"""
# type: () -> dict[any, float]
length = float(sum(count for count in self.viewvalues()))
return {value: self[value] / length for value in self} | 752781a9697113ebf3297050649a7f4ba1580b97 | 3,339 |
def find_best_word_n(draw, nb_letters, path):
"""
"""
lexicon = get_lexicon(path, nb_letters)
mask = [is_word_in_draw(draw, word) for word in lexicon["draw"]]
lexicon = lexicon.loc[mask]
return lexicon | ff3e06e69e6c56f59cf278c10e6860c6d0529b87 | 3,340 |
import json
def feature_reader(path):
"""
Reading the feature matrix stored as JSON from the disk.
:param path: Path to the JSON file.
:return out_features: Dict with index and value tensor.
"""
features = json.load(open(path))
features = {int(k): [int(val) for val in v] for k, v in features.items()}
return features | 959e37ae5a3b0b482d67e5e917211e2131b3c643 | 3,341 |
def locate_all_occurrence(l, e):
"""
Return indices of all element occurrences in given list
:param l: given list
:type l: list
:param e: element to locate
:return: indices of all occurrences
:rtype: list
"""
return [i for i, x in enumerate(l) if x == e] | 95b662f359bd94baf68ac86450d94298dd6b366d | 3,342 |
def UVectorFromAngles(reflection):
"""
Calculate the B&L U vector from bisecting geometry
angles
"""
u = np.zeros((3,), dtype='float64')
# The tricky bit is set again: Busing & Levy's omega is 0 in
# bisecting position. This is why we have to correct for
# stt/2 here
om = np.deg2rad(reflection['om'] - reflection['stt']/2.)
chi = np.deg2rad(reflection['chi'])
phi = np.deg2rad(reflection['phi'])
u[0] = cos(om) * cos(chi) * cos(phi) - sin(om) * sin(phi)
u[1] = cos(om) * cos(chi) * sin(phi) + sin(om) * cos(phi)
u[2] = cos(om) * sin(chi)
return u | fe282e8ac67e5fafb34c63e1745cb9b262602a7a | 3,343 |
import numbers
def to_pillow_image(img_array, image_size=None):
"""Convert an image represented as a numpy array back into a
Pillow Image object."""
if isinstance(image_size, (numbers.Integral, np.integer)):
image_size = (image_size, image_size)
img_array = skimage.img_as_ubyte(img_array)
img = pil_image.fromarray(img_array)
if image_size:
img = img.resize((image_size[1], image_size[0]), pil_image.LANCZOS)
return img | 435bfe79afc59f1cbdd250ca9e1558de8921f7b6 | 3,344 |
from typing import Iterator
def seq_to_sentence(seq: Iterator[int], vocab: Vocab, ignore: Iterator[int]) -> str:
"""Convert a sequence of integers to a string of (space-separated) words according to a vocabulary.
:param seq: Iterator[int]
A sequence of integers (tokens) to be converted.
:param vocab: Vocab
A Torchtext Vocab object containing a mapping from integers to strings (words).
:param ignore: Iterator[int]
A sequence of integers representing "special tokens" to ignore (convert as blanks).
:return: str
The resulting sentence.
"""
return ' '.join(vocab.itos[i] if vocab.itos[i] not in ignore else '' for i in seq).strip() | 2138bd3454c61b7e2a6e3dad25876fdcc4cabe4e | 3,346 |
from skimage.exposure import histogram, match_histograms
import gc
def estimate_exposures(imgs, exif_exp, metadata, method, noise_floor=16, percentile=10,
invert_gamma=False, cam=None, outlier='cerman'):
"""
Exposure times may be inaccurate. Estimate the correct values by fitting a linear system.
:imgs: Image stack
:exif_exp: Exposure times read from image metadata
:metadata: Internal camera metadata dictionary
:method: Pick from ['gfxdisp', 'cerman']
:noise_floor: All pixels smaller than this will be ignored
:percentile: Use a small percentage of the least noisy pixels for the estimation
:invert_gamma: If the images are gamma correct invert to work with linear values
:cam: Camera noise parameters for better estimation
:return: Corrected exposure times
"""
assert method in ('gfxdisp', 'cerman')
num_exp = len(imgs)
assert num_exp > 1, f'Files not found or are invalid: {files}'
# Mask out saturated and noisy pixels
black_frame = np.tile(metadata['black_level'].reshape(2, 2), (metadata['h']//2, metadata['w']//2)) \
if metadata['raw_format'] else metadata['black_level']
Y = np.maximum(imgs - black_frame, 1e-6) # Add epsilon since we need log(Y)
if invert_gamma:
max_value = np.iinfo(metadata['dtype']).max
Y = (Y / max_value)**(invert_gamma) * max_value
if method == 'cerman':
'''
L. Cerman and V. Hlavac, “Exposure time estimation for high dynamic range imaging with
hand held camera” in Proc. of Computer Vision Winter Workshop, Czech Republic. 2006.
'''
rows, cols, m, W = np.zeros((4, 0))
for i in range(num_exp - 1):
# Ensure images are sorted in increasing order of exposure time
assert all(e1 <= e2 for e1, e2 in zip(exif_exp[:-1], exif_exp[1:])), \
'Please name the input files in increasing order of exposure time when sorted'
im1, im2 = Y[i], Y[i+1]
mask = np.stack((im1 + black_frame < metadata['saturation_point'],
im2 + black_frame < metadata['saturation_point'],
im1 > noise_floor, im2 > noise_floor)).all(axis=0)
# Match histograms of consecutive exposures
im1_hat = match_histograms(im1, im2)
im2_hat = match_histograms(im2, im1)
# Construct the simple sparse linear system. There are 2 sets for each pair (Eq. 4)
num_pix = np.count_nonzero(mask)
rows = np.concatenate((rows, np.arange(2*num_pix) + len(rows)))
cols = np.concatenate((cols, np.repeat(i, 2*num_pix)))
m = np.concatenate((m, (im1_hat[mask]/im1[mask]), (im2[mask]/im2_hat[mask])))
# Weights are given by sqrt() of histogram counts (Eq. 4)
im1, im2 = im1.astype(np.uint16), im2.astype(np.uint16)
counts, bins = histogram(im1)
weights1 = np.sqrt(counts[np.searchsorted(bins, im1[mask])])
counts, bins = histogram(im2)
weights2 = np.sqrt(counts[np.searchsorted(bins, im2[mask])])
W = np.concatenate((W, weights1, weights2))
num_rows = rows.shape[0]
data = np.ones(num_rows)
O = csr_matrix((data, (rows, cols)), shape=(num_rows, (num_exp - 1)))
elif method == 'gfxdisp':
logger.info(f'Estimate using logarithmic linear system with noise model')
num_pix = int(percentile/100*metadata['h']*metadata['w'])
# If noise parameters is provided, retrieve variances, else use simplified model
L = np.log(Y)
if cam == 'default':
cam = HDRutils.NormalNoise('Sony', 'ILCE-7R', 100, bits=14)
bits = cam.bits if cam else 14
scaled_var = np.stack([(cam.var(y)/y**2) if cam else 1/y**2 for y in Y/(2**bits - 1)])
# Construct logarithmic sparse linear system W.O.e = W.m
logger.info(f'Constructing sparse matrix (O) and vector (m) using {num_pix} pixels')
rows = np.arange(0, (num_exp - 1)*num_pix, 0.5)
cols, data = np.repeat(np.ones_like(rows)[None], 2, axis=0)
data[1::2] = -1
m = np.zeros((num_exp - 1)*num_pix, dtype=np.float32)
W = np.zeros_like(m)
for i in range(num_exp - 1):
cols[i*num_pix*2:(i + 1)*num_pix*2:2] = i
# Collect unsaturated pixels from all longer exposures
for j in range(i + 1, num_exp):
mask = np.stack((Y[i] + black_frame < metadata['saturation_point'],
Y[j] + black_frame < metadata['saturation_point'],
Y[i] > noise_floor, Y[j] > noise_floor)).all(axis=0)
# if mask.sum() < num_pix:
# continue
weights = np.concatenate((W[i*num_pix:(i+1)*num_pix],
(1/(scaled_var[i] + scaled_var[j]) * mask).flatten()))
logdiff = np.concatenate((m[i*num_pix:(i+1)*num_pix], (L[i] - L[j]).flatten()))
selected = np.argsort(weights)[-num_pix:]
W[i*num_pix:(i + 1)*num_pix] = weights[selected]
m[i*num_pix:(i + 1)*num_pix] = logdiff[selected]
cols[i*num_pix*2 + 1:(i + 1)*num_pix*2:2][selected > num_pix] = j
O = csr_matrix((data, (rows, cols)), shape=((num_exp - 1)*num_pix, num_exp))
logger.info('Solving the sparse linear system using least squares')
if outlier == 'cerman':
err_prev = np.finfo(float).max
t = trange(1000, leave=False)
for i in t:
exp = lsqr(diags(W) @ O, W * m)[0]
err = (W*(O @ exp - m))**2
selected = err < 3*err.mean()
W = W[selected]
m = m[selected]
O = O[selected]
if err.mean() < 1e-6 or err_prev - err.mean() < 1e-6:
# assert err_prev - err.mean() > 0
break
err_prev = err.mean()
t.set_description(f'loss={err.mean()}')
del err, selected
gc.collect()
logger.warning(f'Used {O.shape[0]/(num_exp - 1)/num_pix*100}% of the initial pixels')
elif outlier == 'ransac':
assert method == 'gfxdisp'
num_rows = W.shape[0]
# Randomly select 10% of the data
selected = np.zeros(num_rows, dtype=bool)
selected[:num_rows//10] = True
loss = np.finfo(float).max
WO = diags(W) @ O
Wm = W*m
t = trange(100, leave=False)
for i in t:
np.random.shuffle(selected)
exp_i = lsqr(WO[selected], Wm[selected])[0]
exp_i = np.exp(exp_i - exp_i.max()) * exif_exp.max()
reject = np.maximum(exp_i/exif_exp, exif_exp/exp_i) > 3
exp_i[reject] = exif_exp[reject]
err = ((W*(O @ exp_i - m))**2).sum()
if err < loss:
loss = err
exp = np.log(exp_i)
t.set_description(f'loss={err}; i={i}')
else:
exp = lsqr(diags(W) @ O, W * m)[0]
if method == 'cerman':
exp = np.append(exp, exif_exp[-1])
for e in range(num_exp - 2, -1, -1):
exp[e] = exif_exp[e+1]/exp[e]
elif method == 'gfxdisp':
exp = np.exp(exp - exp.max()) * exif_exp.max()
# logger.warning(f'Exposure times in EXIF: {exif_exp}, estimated exposures: {exp}. Outliers removed {i} times')
# reject = np.maximum(exp/exif_exp, exif_exp/exp) > 3
# exp[reject] = exif_exp[reject]
# if reject.any():
# logger.warning(f'Exposure estimation failed {reject}. Try using more pixels')
return exp | db80a45dc30cea86a71688a56447ef0166bb49b2 | 3,347 |
def default_reverse(*args, **kwargs):
"""
Acts just like django.core.urlresolvers.reverse() except that if the
resolver raises a NoReverseMatch exception, then a default value will be
returned instead. If no default value is provided, then the exception will
be raised as normal.
NOTE: Any exception that is not NoReverseMatch will always be raised as
normal, even if a default is provided.
"""
# We're explicitly NOT happy to just re-raise the exception, as that may
# adversely affect stack traces.
if 'default' not in kwargs:
return reverse(*args, **kwargs)
else:
default = kwargs.pop('default', None)
try:
return reverse(*args, **kwargs)
except NoReverseMatch:
return default | cadf9452c309adb4f2a865a3ea97ee2aca5b1acc | 3,348 |
def get_company_periods_up_to(period):
""" Get all periods for a company leading up to the given period, including the given period
"""
company = period.company
return (company.period_set
.filter(company=company, end__lte=period.end)) | 604814f60a58f9155a47faba62561f94d3197fb2 | 3,349 |
from typing import List
def format_count(
label: str, counts: List[int], color: str, dashed: bool = False
) -> dict:
"""Format a line dataset for chart.js"""
ret = {
"label": label,
"data": counts,
"borderColor": color,
"borderWidth": 2,
"fill": False,
}
if dashed:
ret["borderDash"] = [5, 5]
return ret | 40f5aee7ad5d66f57737345b7d82e45a97cf6633 | 3,350 |
def detect_ripples(eeg):
"""Detect sharp wave ripples (SWRs) from single channel eeg (AnalogSignalArray).
"""
# Maggie defines ripples by doing:
# (1) filter 150-250
# (2) hilbert envelope
# (3) smooth with Gaussian (4 ms SD)
# (4) 3.5 SD above the mean for 15 ms
# (5) full ripple defined as window back to mean
assert eeg.n_signals == 1, "only single channel ripple detection currently supported!"
# (1)
ripple_eeg = nel.filtering.sosfiltfilt(eeg, fl=150, fh=250)
# (2, 3)
ripple_envelope = nel.utils.signal_envelope1D(ripple_eeg, sigma=0.004)
# (4, 5)
bounds, maxes, events = nel.utils.get_events_boundaries(
x=ripple_envelope.data,
PrimaryThreshold=ripple_envelope.mean() + 3.5*ripple_envelope.std(), # cm/s
SecondaryThreshold=ripple_envelope.mean(), # cm/s
minThresholdLength=0.015, # threshold crossing must be at least 15 ms long
minLength=0.0, # total ripple duration must be at least XXX ms long
ds = 1/ripple_envelope.fs
)
# convert bounds to time in seconds
timebounds = ripple_envelope.time[bounds]
# add 1/fs to stops for open interval
timebounds[:,1] += 1/eeg.fs
# create EpochArray with bounds
ripple_epochs = nel.EpochArray(timebounds)
# Adjust ripple centers to align to a peak
ripple_centers = np.floor( (ripple_epochs.centers - eeg.time[0])*eeg.fs ).astype(int)
ch = 7 # this was on some of Sibo's data, for CA1
adjusted_centers = [(p-10)+np.argmax(eeg.data[ch,p-10:p+10]) for p in ripple_centers[1:-1].tolist()]
return ripple_epochs | c92190ee6c31e6c1805841258224fa2aa7d4a749 | 3,351 |
def configured_hosts(hass):
"""Return a set of the configured hosts."""
"""For future to use with discovery!"""
out = {}
for entry in hass.config_entries.async_entries(DOMAIN):
out[entry.data[CONF_ADDRESS]] = {
UUID: entry.data[UUID],
CONF_ADDRESS: entry.data[CONF_ADDRESS],
ACCESS_KEY: entry.data[ACCESS_KEY],
SENSORS: entry.data.get(SENSORS, []),
}
return out | 04d24a8011a706d618699528129ba394ec54a590 | 3,353 |
def generate_keys(directory: str, pwd: bytes = None) -> (ec.EllipticCurvePrivateKey, ec.EllipticCurvePublicKey):
"""
Generate the public and private keys
Generated keys have a default name, you should rename them
This can be done with os.rename()
:param directory: folder where the keys are made
overwrite the existing keys
:param pwd: password: if not None, Best available encryption is chosen
and the private key is encrypted with a the password
:return: private, public keys
"""
private_key = generate_private_key(directory, pwd)
public_key = generate_public_key(directory, private_key)
return private_key, public_key | 28821be8d081e8c8369b889e3ce1a18336ab3c9f | 3,355 |
def get_storage_config_by_data_type(result_table_id):
"""
根据rt_id获取存储配置列表
:param result_table_id:rtid
:return: response:存储配置列表
"""
return DataStorageConfig.objects.filter(result_table_id=result_table_id, data_type="raw_data") | 12df282ece7176f003726dfb5ee1c1a1707ef6ad | 3,357 |
from typing import List
def separate_args(args: List[str]) -> (List[str], List[str]):
"""Separate args into preparser args and primary parser args.
Args:
args: Raw command line arguments.
Returns:
A tuple of lists (preparser_args, mainparser_args).
"""
preparser_args = []
if args and args[0].startswith("-"):
cur = 0
while cur < len(args) and args[cur].startswith("-"):
if args[cur] in _repobee.cli.preparser.PRE_PARSER_OPTS:
preparser_args += args[cur : cur + 2]
cur += 2
elif args[cur] in _repobee.cli.preparser.PRE_PARSER_FLAGS:
preparser_args.append(args[cur])
cur += 1
else:
break
return preparser_args, args[len(preparser_args) :] | 49829516f6982d041386d95c20b0028034e066a9 | 3,358 |
def lookup_alive_tags_shallow(repository_id, start_pagination_id=None, limit=None):
""" Returns a list of the tags alive in the specified repository. Note that the tags returned
*only* contain their ID and name. Also note that the Tags are returned ordered by ID.
"""
query = (Tag
.select(Tag.id, Tag.name)
.where(Tag.repository == repository_id)
.order_by(Tag.id))
if start_pagination_id is not None:
query = query.where(Tag.id >= start_pagination_id)
if limit is not None:
query = query.limit(limit)
return filter_to_alive_tags(query) | a0970da049fb2fa7cd3cc69c459fb7917d8185c8 | 3,359 |
def getLesson(request):
"""
Get the JSON representation for a lesson.
"""
print("getLesson called...")
lesson_id = None
if 'lesson_id' in request.matchdict:
lesson_id = request.matchdict['lesson_id']
if lesson_id is None:
# This should return an appropriate error about not finding the
# requested lesson.
pass
lesson = getLessonById(lesson_id)
return lesson | d721ab060462368d9ce3af071faa7e0751b34984 | 3,360 |
def make_daysetting_from_data(data):
""" Constructs a new setting from a given dataset. This method will automatically
instantiate a new class matching the type of the given dataset. It will fill
all values provided by the dataset and then return the created instance """
factory = {
"color": ColorType,
"scalar": ScalarType
}
return make_setting_from_factory(data, factory) | d3f78fe67441e555d5b525ce1ca6cb334769942a | 3,362 |
from typing import Optional
def read_report(file) -> Optional[Report]:
"""
Reads the report meta-data section of the file.
:param file: The file being read from.
:return: The report section of the file.
"""
# Use a peeker so we don't read beyond the end of the header section
peeker = line_peeker(file)
# Read each line as a property
properties = {}
while True:
line = next(peeker)
# Finish when we reach a non-report line
if not is_report_line(line):
break
# Skip comment lines
if is_comment_line(line):
continue
# Extract the property name and value from the line
name, value = split_field_line(line)
properties[name] = value
# Return the report (if there was one)
if len(properties) == 0:
return None
else:
return properties_to_report(properties) | 557402ee57675fcc11a0a05da02d554c1b2f13db | 3,363 |
def get_valid_segment(text):
""" Returns None or the valid Loki-formatted urn segment for the given input string. """
if text == '':
return None
else:
# Return the converted text value with invalid characters removed.
valid_chars = ['.', '_', '-']
new_text = ''
for char in text:
if char in valid_chars or char.isalnum():
new_text += char
return new_text | 423c1764b590df635b0794bfe52a0a8479d53fbf | 3,364 |
def mparse(filename, staticObstacleList=list(), **kwargs):
"""
Parses a map file into a list of obstacles
@param filename The file name of the map file
@return A list of obstacles
"""
polyList = kwargs.get("nodes", list())
obstacleList = list()
try:
if filename is not None:
f = open(filename, "r+")
numberOfPolys = int(f.readline())
file_ext = filename.split(".")[-1]
# determine if obstacles are dynamic
if file_ext == "obstacles":
dynamicObstacle = True
else:
dynamicObstacle = False
# loop through file and create PolyObstacle objects
for _ in range(numberOfPolys):
# parse obstacle details
polyList = list()
line = [line for line in f.readline().split()[1:]]
intList = map(lambda s: int(float(s)), line)
polyList += [
[
(
mapVal(
intList[2*i],
-29,
29,
0,
con.Configuration.xSize
),
con.Configuration.ySize - mapVal(
intList[2*i + 1],
-29,
29,
0,
con.Configuration.ySize
)
) for i in range(len(intList) / 2)
]
]
# create and append PolyObstacle to obstacleList
obstacleList += [
obstacle.PolyObstacle(
pList,
con.Configuration.screen,
dynamic=dynamicObstacle
) for pList in polyList
]
else:
# auto generate dyanmic obstacles
for pList in polyList:
obst = obstacle.PolyObstacle(
pList,
con.Configuration.screen,
dynamic=True,
start_point=kwargs.get("start_point", None),
end_point=kwargs.get("end_point", None)
)
obstacleList.append(obst)
except Exception:
print("Error occured while parsing file [{0}]!".format(filename))
finally:
return obstacleList | ea62ff3e4f42ad9150be248c5a13d3c367f668b2 | 3,366 |
def sort_f_df(f_df):
"""Sorts f_df by s_idx first then by l_idx.
E.g. for scenario 0, see all decision alternatives in order,
then scenario 1, scenario 2, etc.
Parameters
----------
f_df : pandas.DataFrame
A dataframe of performance values, `f`, with indexes for the
scenario, `s`, and decision alternative, `l`.
Columns: `['s_idx', 'l_idx', '<f1_name>', '<f2_name>', ...]`
"""
# This will sort first by s_idx then by l_idx, both from 0 to ...
f_df.sort_values(['l_idx', 's_idx'], ascending=[True, True])
return f_df | ec82966a7a2fb417312198afe42109ed5883d31d | 3,368 |
Subsets and Splits