content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def scorer(func):
"""This function is a decorator for a scoring function.
This is hack a to get around self being passed as the first argument to the scoring function."""
def wrapped(a, b=None):
if b is not None:
return func(b)
return func(a)
return wrapped | 39ec390982d26d10a6ce827800df654ff6c4ab42 | 3,654,700 |
def print_stats(yards):
"""
This function prints the final stats after a skier has crashed.
"""
print
print "You skied a total of", yards, "yards!"
#print "Want to take another shot?"
print
return 0 | 72b56bf8cfb0691636e41ccfcfe9b3893ab870eb | 3,654,701 |
def _calculate_risk_reduction(module):
"""
Function to calculate the risk reduction due to testing. The algorithms
used are based on the methodology presented in RL-TR-92-52, "SOFTWARE
RELIABILITY, MEASUREMENT, AND TESTING Guidebook for Software
Reliability Measurement and Testing." Rather than attempting to
estimate the software failure rate, RTK provides a risk index for the
software based on the same factors used in RL-TR-92-52 for estimating
software failure rates. RTK also provides test planning guidance in
the same manner as RL-TR-92-52.
:param module: the :py:class:`rtk.software.CSCI.Model` or
:py:class:`rtk.software.Unit.Model` data model to calculate.
:return: _error_code
:rtype: int
"""
# WARNING: Refactor _calculate_risk_reduction; current McCabe Complexity metric = 13.
_error_code = 0
# Calculate the risk reduction due to the test effort.
try:
if module.test_effort == 1: # Labor hours
_test_ratio = float(module.labor_hours_test) / \
float(module.labor_hours_dev)
elif module.test_effort == 2: # Budget
_test_ratio = float(module.budget_test) / \
float(module.budget_dev)
elif module.test_effort == 3: # Schedule
_test_ratio = float(module.schedule_test) / \
float(module.schedule_dev)
else:
_test_ratio = 1.0
except ZeroDivisionError:
_error_code = 10
_test_ratio = 0.0
module.te = 1.0
if _test_ratio > 0.4:
module.te = 0.9
# Calculate the risk reduction due to test methods used.
module.tm = 1.0
module.tu = sum([_tu[0] for _tu in module.lst_test_selection])
module.tt = sum([_tt[1] for _tt in module.lst_test_selection])
try:
if module.tu / module.tt > 0.75:
module.tm = 0.9
elif module.tu / module.tt < 0.5:
module.tm = 1.1
except ZeroDivisionError:
_error_code = 10
# Calculate the risk reduction due to test coverage.
try:
if module.level_id == 2: # Module
_VS = ((float(module.nm_test) / float(module.nm)) +
(float(module.interfaces_test) /
float(module.interfaces))) / 2.0
elif module.level_id == 3: # Unit
_VS = ((float(module.branches_test) / float(module.branches)) +
(float(module.inputs_test) / float(module.inputs))) / 2.0
else:
_VS = 1.0
except ZeroDivisionError:
_error_code = 10
_VS = 1.0
module.tc = 1.0 / _VS
module.t_risk = module.te * module.tm * module.tc
return _error_code | c8876bc247243f13572d49c07063a063ba4eb42a | 3,654,702 |
def run_metarl(env, test_env, seed, log_dir):
"""Create metarl model and training."""
deterministic.set_seed(seed)
snapshot_config = SnapshotConfig(snapshot_dir=log_dir,
snapshot_mode='gap',
snapshot_gap=10)
runner = LocalRunner(snapshot_config)
obs_dim = int(np.prod(env[0]().observation_space.shape))
action_dim = int(np.prod(env[0]().action_space.shape))
reward_dim = 1
# instantiate networks
encoder_in_dim = obs_dim + action_dim + reward_dim
encoder_out_dim = params['latent_size'] * 2
net_size = params['net_size']
context_encoder = MLPEncoder(input_dim=encoder_in_dim,
output_dim=encoder_out_dim,
hidden_sizes=[200, 200, 200])
space_a = akro.Box(low=-1,
high=1,
shape=(obs_dim + params['latent_size'], ),
dtype=np.float32)
space_b = akro.Box(low=-1, high=1, shape=(action_dim, ), dtype=np.float32)
augmented_env = EnvSpec(space_a, space_b)
qf1 = ContinuousMLPQFunction(env_spec=augmented_env,
hidden_sizes=[net_size, net_size, net_size])
qf2 = ContinuousMLPQFunction(env_spec=augmented_env,
hidden_sizes=[net_size, net_size, net_size])
obs_space = akro.Box(low=-1, high=1, shape=(obs_dim, ), dtype=np.float32)
action_space = akro.Box(low=-1,
high=1,
shape=(params['latent_size'], ),
dtype=np.float32)
vf_env = EnvSpec(obs_space, action_space)
vf = ContinuousMLPQFunction(env_spec=vf_env,
hidden_sizes=[net_size, net_size, net_size])
policy = TanhGaussianMLPPolicy2(
env_spec=augmented_env, hidden_sizes=[net_size, net_size, net_size])
context_conditioned_policy = ContextConditionedPolicy(
latent_dim=params['latent_size'],
context_encoder=context_encoder,
policy=policy,
use_ib=params['use_information_bottleneck'],
use_next_obs=params['use_next_obs_in_context'],
)
train_task_names = ML10.get_train_tasks()._task_names
test_task_names = ML10.get_test_tasks()._task_names
pearlsac = PEARLSAC(
env=env,
test_env=test_env,
policy=context_conditioned_policy,
qf1=qf1,
qf2=qf2,
vf=vf,
num_train_tasks=params['num_train_tasks'],
num_test_tasks=params['num_test_tasks'],
latent_dim=params['latent_size'],
meta_batch_size=params['meta_batch_size'],
num_steps_per_epoch=params['num_steps_per_epoch'],
num_initial_steps=params['num_initial_steps'],
num_tasks_sample=params['num_tasks_sample'],
num_steps_prior=params['num_steps_prior'],
num_extra_rl_steps_posterior=params['num_extra_rl_steps_posterior'],
num_evals=params['num_evals'],
num_steps_per_eval=params['num_steps_per_eval'],
batch_size=params['batch_size'],
embedding_batch_size=params['embedding_batch_size'],
embedding_mini_batch_size=params['embedding_mini_batch_size'],
max_path_length=params['max_path_length'],
reward_scale=params['reward_scale'],
train_task_names=train_task_names,
test_task_names=test_task_names,
)
tu.set_gpu_mode(params['use_gpu'], gpu_id=0)
if params['use_gpu']:
pearlsac.to()
tabular_log_file = osp.join(log_dir, 'progress.csv')
tensorboard_log_dir = osp.join(log_dir)
dowel_logger.add_output(dowel.StdOutput())
dowel_logger.add_output(dowel.CsvOutput(tabular_log_file))
dowel_logger.add_output(dowel.TensorBoardOutput(tensorboard_log_dir))
runner.setup(algo=pearlsac,
env=env,
sampler_cls=PEARLSampler,
sampler_args=dict(max_path_length=params['max_path_length']))
runner.train(n_epochs=params['num_epochs'],
batch_size=params['batch_size'])
dowel_logger.remove_all()
return tabular_log_file | adc4041539d55d9cddba69a44a0d0fcfbbc1c16e | 3,654,703 |
from ..nn.nn_modifiers import get_single_nn_mutation_op
def get_default_mutation_op(dom):
""" Returns the default mutation operator for the domain. """
if dom.get_type() == 'euclidean':
return lambda x: euclidean_gauss_mutation(x, dom.bounds)
elif dom.get_type() == 'integral':
return lambda x: integral_gauss_mutation(x, dom.bounds)
elif dom.get_type() == 'discrete':
return lambda x: discrete_random_mutation(x, dom.list_of_items)
elif dom.get_type() == 'prod_discrete':
return lambda x: prod_discrete_random_mutation(x, dom.list_of_list_of_items)
elif dom.get_type() == 'discrete_numeric':
return lambda x: discrete_numeric_exp_mutation(x, dom.list_of_items)
elif dom.get_type() == 'prod_discrete_numeric':
return lambda x: prod_discrete_numeric_exp_mutation(x, dom.list_of_list_of_items)
elif dom.get_type() == 'discrete_euclidean':
return lambda x: discrete_euclidean_mutation(x, dom.list_of_items)
elif dom.get_type() == 'neural_network':
return get_single_nn_mutation_op(dom, [0.5, 0.25, 0.125, 0.075, 0.05])
else:
raise ValueError('No default mutation implemented for domain type %s.'%(
dom.get_type())) | 8e9455ca96dac89b11bebcc3e4f779f62111a010 | 3,654,704 |
import itertools
def chunked(src, size, count=None, **kw):
"""Returns a list of *count* chunks, each with *size* elements,
generated from iterable *src*. If *src* is not evenly divisible by
*size*, the final chunk will have fewer than *size* elements.
Provide the *fill* keyword argument to provide a pad value and
enable padding, otherwise no padding will take place.
>>> chunked(range(10), 3)
[[0, 1, 2], [3, 4, 5], [6, 7, 8], [9]]
>>> chunked(range(10), 3, fill=None)
[[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, None, None]]
>>> chunked(range(10), 3, count=2)
[[0, 1, 2], [3, 4, 5]]
See :func:`chunked_iter` for more info.
"""
chunk_iter = chunked_iter(src, size, **kw)
if count is None:
return list(chunk_iter)
else:
return list(itertools.islice(chunk_iter, count)) | 6f35735d9294f4c245643609641fb86b0f988fb1 | 3,654,705 |
def doc_to_schema_fields(doc, schema_file_name='_schema.yaml'):
"""Parse a doc to retrieve the schema file."""
return doc_to_schema(doc, schema_file_name=schema_file_name)[
'schema_fields'] | b9d88f52ff49e43cae0ad5373a8d841f0236bb50 | 3,654,706 |
from typing import Tuple
from typing import OrderedDict
from typing import Counter
import tqdm
def cluster(df: pd.DataFrame, k: int, knn: int = 10, m: int = 30, alpha: float = 2.0, verbose0: bool = False,
verbose1: bool = False, verbose2: bool = True, plot: bool = True) -> Tuple[pd.DataFrame, OrderedDict]:
"""
Chameleon clustering: build the K-NN graph, partition it into m clusters
:param df: input dataframe.
:param k: desired number of clusters.
:param knn: parameter k of K-nearest_neighbors.
:param m: number of clusters to reach in the initial clustering phase.
:param alpha: exponent of relative closeness; the larger, the more important relative closeness is than
relative interconnectivity.
:param verbose0: if True, print general infos.
:param verbose1: if True, print infos about the prepartitioning phase.
:param verbose2: if True, print labels of merging clusters and their scores in the merging phase.
:param plot: if True, show plots.
:return: dataframe with cluster labels and dictionary of merging scores (similarities).
"""
if k is None:
k = 1
if verbose0:
print(f"Building kNN graph (k = {knn})...")
graph = knn_graph(df=df, k=knn, symmetrical=False, verbose=verbose1)
if plot is True:
plot2d_graph(graph, print_clust=False)
graph = pre_part_graph(graph, m, df, verbose1, plotting=plot)
# to account for cases where initial_clust is too big or k is already reached before the merging phase
cl_dict = OrderedDict({
list(graph.nodes)[i]: graph.nodes[i]["cluster"]
for i in range(len(graph))
})
m = len(Counter(cl_dict.values()))
if verbose0:
print(f"actual init_clust: {m}")
merging_similarities = OrderedDict({})
iterm = (tqdm(enumerate(range(m - k)), total=m - k) if verbose1 else enumerate(range(m - k)))
for i, _ in iterm:
df, ms, ci = merge_best(graph, df, alpha, k, False, verbose2)
if ms == 0:
break
merging_similarities[m - (i + 1)] = ms
if plot:
plot2d_data(df, ci)
res = rebuild_labels(df)
return res, merging_similarities | 2363df84104da1f182c63faaac21006033e23083 | 3,654,707 |
def get_CIFAR10_data(num_training=49000, num_validation=1000, num_test=1000):
"""
Load the CIFAR-10 dataset from disk and perform preprocessing to prepare
it for the two-layer neural net classifier. These are the same steps as
we used for the SVM, but condensed to a single function.
"""
# Load the raw CIFAR-10 data
cifar10_dir = "C:\Users\Pomodori\workspace\cifar-10-batches-py"
X_train, y_train, X_test, y_test = load_CIFAR10(cifar10_dir)
# Subsample the data
mask = range(num_training, num_training + num_validation)
X_val = X_train[mask]
y_val = y_train[mask]
mask = range(num_training)
X_train = X_train[mask]
y_train = y_train[mask]
mask = range(num_test)
X_test = X_test[mask]
y_test = y_test[mask]
# Normalize the data: subtract the mean image
mean_image = np.mean(X_train, axis=0)
X_train -= mean_image
X_val -= mean_image
X_test -= mean_image
# Reshape data to rows
X_train = X_train.reshape(num_training, -1)
X_val = X_val.reshape(num_validation, -1)
X_test = X_test.reshape(num_test, -1)
return X_train, y_train, X_val, y_val, X_test, y_test | 515777ca498ae9a234a1503660f2cde40f0b0244 | 3,654,708 |
def timeframe_int_to_str(timeframe: int) -> str:
"""
Convert timeframe from integer to string
:param timeframe: minutes per candle (240)
:return: string representation for API (4h)
"""
if timeframe < 60:
return f"{timeframe}m"
elif timeframe < 1440:
return f"{int(timeframe / 60)}h"
else:
return f"{int(timeframe / 1440)}d" | 75778742dea8204c74a47bfe92c25aef43ebbad8 | 3,654,709 |
def FIT(individual):
"""Sphere test objective function.
F(x) = sum_{i=1}^d xi^2
d=1,2,3,...
Range: [-100,100]
Minima: 0
"""
y=sum(x**2 for x in individual)
return y | d6aadf620f85bd9cb27cef661e2ec664a4eb43b1 | 3,654,710 |
def update_range(value):
"""
For user selections, return the relevant range
"""
global df
min, max = df.timestamp.iloc[value[0]], df.timestamp.iloc[value[-1]]
return 'timestamp slider: {} | {}'.format(min, max) | c4819b46cdd78be3c86fc503791a7a0ff9cd96b3 | 3,654,711 |
def simplify(tile):
"""
:param tile: 34 tile format
:return: tile: 0-8 presentation
"""
return tile - 9 * (tile // 9) | c8543d73e37d4fa1d665d3d28277ff99095e0635 | 3,654,712 |
def vep(dataset, config, block_size=1000, name='vep', csq=False) -> MatrixTable:
"""Annotate variants with VEP.
.. include:: ../_templates/req_tvariant.rst
:func:`.vep` runs `Variant Effect Predictor
<http://www.ensembl.org/info/docs/tools/vep/index.html>`__ with the `LOFTEE
plugin <https://github.com/konradjk/loftee>`__ on the current dataset and
adds the result as a row field.
Examples
--------
Add VEP annotations to the dataset:
>>> result = hl.vep(dataset, "data/vep.properties") # doctest: +SKIP
Notes
-----
**Configuration**
:func:`.vep` needs a configuration file to tell it
how to run VEP. The format is a `.properties file
<https://en.wikipedia.org/wiki/.properties>`__. Roughly, each line defines a
property as a key-value pair of the form `key = value`. :func:`.vep` supports the
following properties:
- **hail.vep.perl** -- Location of Perl. Optional, default: perl.
- **hail.vep.perl5lib** -- Value for the PERL5LIB environment variable when
invoking VEP. Optional, by default PERL5LIB is not set.
- **hail.vep.path** -- Value of the PATH environment variable when invoking
VEP. Optional, by default PATH is not set.
- **hail.vep.location** -- Location of the VEP Perl script. Required.
- **hail.vep.cache_dir** -- Location of the VEP cache dir, passed to VEP
with the ``--dir`` option. Required.
- **hail.vep.fasta** -- Location of the FASTA file to use to look up the
reference sequence, passed to VEP with the `--fasta` option. Required.
- **hail.vep.assembly** -- Genome assembly version to use. Optional,
default: GRCh37
- **hail.vep.plugin** -- VEP plugin, passed to VEP with the `--plugin`
option. Optional. Overrides `hail.vep.lof.human_ancestor` and
`hail.vep.lof.conservation_file`.
- **hail.vep.lof.human_ancestor** -- Location of the human ancestor file for
the LOFTEE plugin. Ignored if `hail.vep.plugin` is set. Required otherwise.
- **hail.vep.lof.conservation_file** -- Location of the conservation file
for the LOFTEE plugin. Ignored if `hail.vep.plugin` is set. Required
otherwise.
Here is an example ``vep.properties`` configuration file
.. code-block:: text
hail.vep.perl = /usr/bin/perl
hail.vep.path = /usr/local/bin:/usr/bin:/bin:/usr/sbin:/sbin
hail.vep.location = /path/to/vep/ensembl-tools-release-81/scripts/variant_effect_predictor/variant_effect_predictor.pl
hail.vep.cache_dir = /path/to/vep
hail.vep.lof.human_ancestor = /path/to/loftee_data/human_ancestor.fa.gz
hail.vep.lof.conservation_file = /path/to/loftee_data/phylocsf.sql
**VEP Invocation**
.. code-block:: text
<hail.vep.perl>
<hail.vep.location>
--format vcf
--json
--everything
--allele_number
--no_stats
--cache --offline
--dir <hail.vep.cache_dir>
--fasta <hail.vep.fasta>
--minimal
--assembly <hail.vep.assembly>
--plugin LoF,\
human_ancestor_fa:$<hail.vep.lof.human_ancestor>,\
filter_position:0.05,\
min_intron_size:15,\
conservation_file:<hail.vep.lof.conservation_file>
-o STDOUT
**Annotations**
A new row field is added in the location specified by `name` with the
following schema:
.. code-block:: text
struct {
assembly_name: str,
allele_string: str,
ancestral: str,
colocated_variants: array<struct {
aa_allele: str,
aa_maf: float64,
afr_allele: str,
afr_maf: float64,
allele_string: str,
amr_allele: str,
amr_maf: float64,
clin_sig: array<str>,
end: int32,
eas_allele: str,
eas_maf: float64,
ea_allele: str,
ea_maf: float64,
eur_allele: str,
eur_maf: float64,
exac_adj_allele: str,
exac_adj_maf: float64,
exac_allele: str,
exac_afr_allele: str,
exac_afr_maf: float64,
exac_amr_allele: str,
exac_amr_maf: float64,
exac_eas_allele: str,
exac_eas_maf: float64,
exac_fin_allele: str,
exac_fin_maf: float64,
exac_maf: float64,
exac_nfe_allele: str,
exac_nfe_maf: float64,
exac_oth_allele: str,
exac_oth_maf: float64,
exac_sas_allele: str,
exac_sas_maf: float64,
id: str,
minor_allele: str,
minor_allele_freq: float64,
phenotype_or_disease: int32,
pubmed: array<int32>,
sas_allele: str,
sas_maf: float64,
somatic: int32,
start: int32,
strand: int32
}>,
context: str,
end: int32,
id: str,
input: str,
intergenic_consequences: array<struct {
allele_num: int32,
consequence_terms: array<str>,
impact: str,
minimised: int32,
variant_allele: str
}>,
most_severe_consequence: str,
motif_feature_consequences: array<struct {
allele_num: int32,
consequence_terms: array<str>,
high_inf_pos: str,
impact: str,
minimised: int32,
motif_feature_id: str,
motif_name: str,
motif_pos: int32,
motif_score_change: float64,
strand: int32,
variant_allele: str
}>,
regulatory_feature_consequences: array<struct {
allele_num: int32,
biotype: str,
consequence_terms: array<str>,
impact: str,
minimised: int32,
regulatory_feature_id: str,
variant_allele: str
}>,
seq_region_name: str,
start: int32,
strand: int32,
transcript_consequences: array<struct {
allele_num: int32,
amino_acids: str,
biotype: str,
canonical: int32,
ccds: str,
cdna_start: int32,
cdna_end: int32,
cds_end: int32,
cds_start: int32,
codons: str,
consequence_terms: array<str>,
distance: int32,
domains: array<struct {
db: str,
name: str
}>,
exon: str,
gene_id: str,
gene_pheno: int32,
gene_symbol: str,
gene_symbol_source: str,
hgnc_id: str,
hgvsc: str,
hgvsp: str,
hgvs_offset: int32,
impact: str,
intron: str,
lof: str,
lof_flags: str,
lof_filter: str,
lof_info: str,
minimised: int32,
polyphen_prediction: str,
polyphen_score: float64,
protein_end: int32,
protein_start: int32,
protein_id: str,
sift_prediction: str,
sift_score: float64,
strand: int32,
swissprot: str,
transcript_id: str,
trembl: str,
uniparc: str,
variant_allele: str
}>,
variant_class: str
}
Parameters
----------
dataset : :class:`.MatrixTable`
Dataset.
config : :obj:`str`
Path to VEP configuration file.
block_size : :obj:`int`
Number of rows to process per VEP invocation.
name : :obj:`str`
Name for resulting row field.
csq : :obj:`bool`
If ``True``, annotates VCF CSQ field as a :py:data:`.tstr`.
If ``False``, annotates with the full nested struct schema.
Returns
-------
:class:`.MatrixTable`
Dataset with new row-indexed field `name` containing VEP annotations.
"""
require_row_key_variant(dataset, 'vep')
mt = MatrixTable(Env.hail().methods.VEP.apply(dataset._jvds, config, 'va.`{}`'.format(name), csq, block_size))
return mt.annotate_rows(vep=mt['vep']['vep']) | e9433db17e82d00aba275066026a301a9b97e5e0 | 3,654,713 |
def __get_ll_type__(ll_type):
"""
Given an lltype value, retrieve its definition.
"""
res = [llt for llt in __LL_TYPES__
if llt[1] == ll_type]
assert len(res) < 2, 'Duplicate linklayer types.'
if res:
return res[0]
else:
return None | f2e86ddd027ec26546a4be8ff8060c1cd8c64aca | 3,654,714 |
import os
import logging
def is_cloaked(path, names):
""" Return True if this is likely to be a cloaked encrypted post """
fname = unicoder(os.path.split(path)[1]).lower()
fname = os.path.splitext(fname)[0]
for name in names:
name = os.path.split(name.lower())[1]
name, ext = os.path.splitext(unicoder(name))
if ext == u'.rar' and fname.startswith(name) and (len(fname) - len(name)) < 8 and len(names) < 3 and not RE_SUBS.search(fname):
logging.debug('File %s is probably encrypted due to RAR with same name inside this RAR', fname)
return True
elif 'password' in name:
logging.debug('RAR %s is probably encrypted: "password" in filename %s', fname, name)
return True
return False | 69a6ccacd9a26adcba64fa84ede25074594a27bc | 3,654,715 |
def slice_node(node, split):
"""Splits a node up into two sides.
For text nodes, this will return two text nodes.
For text elements, this will return two of the source nodes with children
distributed on either side. Children that live on the split will be
split further.
Parameters
----------
node : docutils.nodes.Text or docutils.nodes.TextElement
split : int
Location of the represented text to split at.
Returns
-------
(left, right) : (type(node), type(node))
"""
if isinstance(node, Text):
return Text(node[:split]), Text(node[split:])
elif isinstance(node, docutils.nodes.TextElement):
if split < 0:
split = len(node.astext())+split
right = node.deepcopy()
left = node.deepcopy()
left.clear()
offset = 0
while offset < split:
try:
child = right.pop(0)
except IndexError:
break
child_strlen = len(child.astext())
if offset+child_strlen < split:
left.append(child)
offset += child_strlen
continue
elif offset+child_strlen != split:
child_left, child_right = slice_node(child, split-offset)
left.append(child_left)
right.insert(0, child_right)
offset += child_strlen
return left, right
else:
raise ValueError('Cannot split {}'.format(repr(node))) | 5958afbb61160f7e00c42e80c4c69aa7f8644925 | 3,654,716 |
def k_radius(x,centroids):
"""
Maximal distance between centroids and corresponding samples in partition
"""
labels = partition_labels(x,centroids)
radii = []
for idx in range(centroids.shape[0]):
mask = labels == idx
radii.append(
np.max(
np.linalg.norm(x[mask]-centroids[idx],axis=-1))
)
return np.asarray(radii) | de010609e726ce250d72d773a9c1ffb772315b0c | 3,654,717 |
def build_feature_df(data, default=True, custom_features={}):
"""
Computes the feature matrix for the dataset of components.
Args:
data (dataset): A mapping of {ic_id: IC}. Compatible with the dataset representaion produced by load_dataset().
default (bool, optional): Determines wether to compute a standard selection of features for the dataset. Defaults to True.
custom_features (dict, optional): A mapping of custom features that will be computed for the dataset.
The format is {feature_name: compute_feature} where compute_feature is a function with the only argument IC. Defaults to {}.
Returns:
pd.Dataframe: The feature matrix for the dataset.
"""
feature_df = pd.DataFrame(index=data.keys())
def get_iter():
if default:
return default_features.items()
else:
return chain(default_features.items(), custom_features.items())
features = [feature_name for feature_name, _ in get_iter()]
idx = []
rows = []
for ic_id, ic in data.items():
row = []
idx.append(ic_id)
for feature_name, compute_feature in get_iter():
row.append(compute_feature(ic))
rows.append(row)
feature_df = pd.DataFrame(rows, index=idx, columns=features)
return feature_df | bbd2543a5043ae11305fe86449778a74f7e7ceb3 | 3,654,718 |
def update_uid_digests_cache(uid, digest):
"""
Updates uid_digest cache, also updates rd_digest and rd_digest_dict cache also.
"""
debug = False
try:
if debug:
print '\n debug -- Entered update_uid_digests_cache...'
dump_dict(digest, debug)
# Get the cache; If cache exists, update the 'uid_digests' cache.
uid_digests = cache.get('uid_digests')
if uid_digests and uid_digests is not None:
if uid in uid_digests:
if debug: print '\n debug -- uid (%s) in uid_digests...' % uid
uid_digests[uid] = digest
uid_digests_cache_update(uid_digests)
# Update rd_digests and rd_digests dict cache.
if not update_rd_digests_cache(uid):
message = '********* Failed to update rd_digests cache **********'
current_app.logger.info(message)
return
except Exception as err:
message = str(err)
current_app.logger.info(message)
return None | 841d1b2517594175867b970e0e1af4631d97d9c7 | 3,654,719 |
def decode_complex(data, complex_names=(None, None)):
""" Decodes possibly complex data read from an HDF5 file.
Decodes possibly complex datasets read from an HDF5 file. HDF5
doesn't have a native complex type, so they are stored as
H5T_COMPOUND types with fields such as 'r' and 'i' for the real and
imaginary parts. As there is no standardization for field names, the
field names have to be given explicitly, or the fieldnames in `data`
analyzed for proper decoding to figure out the names. A variety of
reasonably expected combinations of field names are checked and used
if available to decode. If decoding is not possible, it is returned
as is.
Parameters
----------
data : arraylike
The data read from an HDF5 file, that might be complex, to
decode into the proper Numpy complex type.
complex_names : tuple of 2 str and/or Nones, optional
``tuple`` of the names to use (in order) for the real and
imaginary fields. A ``None`` indicates that various common
field names should be tried.
Returns
-------
c : decoded data or data
If `data` can be decoded into a complex type, the decoded
complex version is returned. Otherwise, `data` is returned
unchanged.
See Also
--------
encode_complex
Notes
-----
Currently looks for real field names of ``('r', 're', 'real')`` and
imaginary field names of ``('i', 'im', 'imag', 'imaginary')``
ignoring case.
"""
# Now, complex types are stored in HDF5 files as an H5T_COMPOUND type
# with fields along the lines of ('r', 're', 'real') and ('i', 'im',
# 'imag', 'imaginary') for the real and imaginary parts, which most
# likely won't be properly extracted back into making a Python
# complex type unless the proper h5py configuration is set. Since we
# can't depend on it being set and adjusting it is hazardous (the
# setting is global), it is best to just decode it manually. These
# fields are obtained from the fields of its dtype. Obviously, if
# there are no fields, then there is nothing to do.
if data.dtype.fields is None:
return data
fields = list(data.dtype.fields)
# If there aren't exactly two fields, then it can't be complex.
if len(fields) != 2:
return data
# We need to grab the field names for the real and imaginary
# parts. This will be done by seeing which list, if any, each field
# is and setting variables to the proper name if it is in it (they
# are initialized to None so that we know if one isn't found).
real_fields = ['r', 're', 'real']
imag_fields = ['i', 'im', 'imag', 'imaginary']
cnames = list(complex_names)
for s in fields:
if s.lower() in real_fields:
cnames[0] = s
elif s.lower() in imag_fields:
cnames[1] = s
# If the real and imaginary fields were found, construct the complex
# form from the fields. This is done by finding the complex type
# that they cast to, making an array, and then setting the
# parts. Otherwise, return what we were given because it isn't in
# the right form.
if cnames[0] is not None and cnames[1] is not None:
cdata = np.result_type(data[cnames[0]].dtype, \
data[cnames[1]].dtype, 'complex64').type(data[cnames[0]])
cdata.imag = data[cnames[1]]
return cdata
else:
return data | 4c2fad09751ddfe4c5623d47a187f710ab62532f | 3,654,720 |
def CLYH(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "2020-05-29", **kwargs
) -> Graph:
"""Return CLYH graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "2020-05-29"
Version to retrieve
The available versions are:
- 2020-05-29
"""
return AutomaticallyRetrievedGraph(
"CLYH", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)() | 6dcddfff1411ea71d1743fabde782066c41ace9f | 3,654,721 |
def tif_to_array(
filename,
image_descriptions=False,
verbose=False,
):
"""Load a tif into memory and return it as a numpy array.
This is primarily a tool we use to interact with ImageJ, so that's
the only case it's really been debugged for. I bet somebody made
nice python bindings for LibTIFF, if you want a more general purpose
reader.
"""
ifds, endian = parse_tif(filename, verbose)
"""
Ensure that the various IFD's are consistent: same length, width,
bit depth, data format, etc.
Also check that our assumptions about other tags are true.
"""
width = ifds[0]['ImageWidth']
length = ifds[0]['ImageLength']
bit_depth = ifds[0]['BitsPerSample']
data_format = ifds[0].get('SampleFormat', 1) #Default to unsigned int
for d in ifds:
try:
assert width == d['ImageWidth']
assert length == d['ImageLength']
assert bit_depth == d['BitsPerSample']
assert data_format == d.get('SampleFormat', 1)
except AssertionError:
print("To load a TIF as a numpy array, the IFDs all have to match.")
print("IFD A:", ifds[0])
print("IFD B:", d)
raise UserWarning("The TIF we're trying to load has mismatched IFD's")
try:
assert d.get('SamplesPerPixel', 1) == 1
assert d.get('NewSubFileType', 0) == 0
assert d.get('Compression', 1) == 1
assert d.get('PhotometricInterpretation', 0) in (0, 1)
except AssertionError:
print("Offending IFD:", d)
raise UserWarning(
"The TIF we're trying to load" +
" uses options that np_tif doesn't support.")
"""
Collect the strip offsets and the strip byte counts
"""
strip_offsets = []
strip_byte_counts = []
for d in ifds:
try: #Just one strip per IFD
strip_offsets.append(int(d['StripOffsets']))
strip_byte_counts.append(int(d['StripByteCounts']))
except TypeError: #Many strips per IFD
strip_offsets.extend(int(x) for x in d['StripOffsets'])
strip_byte_counts.extend(int(x) for x in d['StripByteCounts'])
assert len(strip_offsets) == len(strip_byte_counts)
"""
Allocate our numpy array, and load data into our array from disk,
one strip at a time.
"""
data = np.zeros(sum(strip_byte_counts), dtype=np.ubyte)
data_offset = 0
with open(filename, 'rb') as f:
for i in range(len(strip_offsets)):
file_offset = strip_offsets[i]
num_bytes = strip_byte_counts[i]
data[data_offset:data_offset + num_bytes] = np.frombuffer(
get_bytes_from_file(f, file_offset, num_bytes),
dtype=np.ubyte)
data_offset += num_bytes
"""
Determine the numpy data type from the TIF bit depth and data
format, and reshape based on width, height, and number of ifd's:
"""
data_type = {
1: 'uint',
2: 'int',
3: 'float',
4: 'undefined',
}[data_format] + ascii(bit_depth)
try:
data_type = getattr(np, data_type)
except AttributeError:
raise UserWarning("Unsupported data format: " + data_type)
data = data.view(data_type)
if endian == 'big':
data = data.byteswap()
data = data.reshape(len(ifds), length, width)
"""
Optionally, return the image descriptions.
"""
if image_descriptions:
image_descriptions = [d.get('ImageDescription', '') for d in ifds]
for desc in image_descriptions:
if desc != image_descriptions[0]:
break
else:
image_descriptions = image_descriptions[0:1]
return data, image_descriptions
return data | db81009b9a3ccc6238bf605a56247e38586fc134 | 3,654,722 |
def lineParPlot(parDict, FigAx=None, **kwargs):
"""
Plot the results of lineParameters().
Parameters
----------
parDict : dict
The relevant parameters:
xPerc : tuple, (xPerc1, xPerc2)
Left and right x-axis values of the line profile at perc% of the peak flux.
Xc : float
The center of x-axis value calculated at perc% of the peak flux.
Fperc : float
Fpeak * perc / 100.
FigAx : tuple (optional)
The tuple of (fig, ax) of the figure.
**kwargs : dict
The keywords for the plotting.
Returns
-------
FigAx : tuple
The tuple of (fig, ax) of the figure.
"""
if FigAx is None:
fig = plt.figure(figsize=(8, 4))
ax = plt.gca()
else:
fig, ax = FigAx
x1, x2 = parDict["xPerc"]
xc = parDict["Xc"]
yperc = parDict["Fperc"]
ax.axvline(x=x1, **kwargs)
kwargs["label"] = None
ax.axvline(x=x2, **kwargs)
ax.axhline(y=yperc, **kwargs)
kwargs["ls"] = "-"
ax.axvline(x=xc, **kwargs)
return (fig, ax) | 4767446fb983902ea0a3ce631420c61f032970f9 | 3,654,723 |
def prepare_data_arrays(tr_df, te_df, target):
"""
tr_df: train dataset made by "prepare_dataset" function
te_df: test dataset made by "prepare_dataset" function
target: name of target y
return: (numpy array of train dataset),
(numpy array of test dataset: y will be filled with NaN),
(column ID of y)
"""
col_to_id = {k: v for v, k in enumerate(tr_df.columns)}
train_array = np.array(tr_df)
test_array = np.array(te_df)
target_id = col_to_id[target]
# fill target values with nan
test_array[:, target_id] = np.nan
return train_array, test_array, target_id | 097f376263dfeecffaf201f4ea1cd29980d88746 | 3,654,724 |
import tempfile
import os
def download_git_repo(repo: str):
"""
Download remote git repo
"""
local_filename = repo.split('/')[-1]
class CloneProgress(RemoteProgress):
def update(self, op_code, cur_count, max_count=None, message=''):
if message:
print(message)
td = tempfile.mkdtemp()
repo_local_path = os.path.join(td, local_filename)
git.Repo.clone_from(repo, repo_local_path,
branch='master', progress=CloneProgress(), depth=1)
return repo_local_path | 7d2b6b1bcadb9d0a8ed8baae9ec4576c67b9099a | 3,654,725 |
def plot(model_set, actual_mdot=True, qnuc=0.0, verbose=True, ls='-', offset=True,
bprops=('rate', 'fluence', 'peak'), display=True, grid_version=0):
"""Plot predefined set of mesa model comparisons
model_set : int
ID for set of models (defined below)
"""
mesa_info = get_mesa_set(model_set)
if actual_mdot:
mdots = mesa_info['mdots_actual']
else:
mdots = mesa_info['mdots']
mesa_info['params']['qnuc'] = qnuc
fig, ax = plot_compare(mesa_runs=mesa_info['runs'], display=display,
mesa_mdots=mdots, bprops=bprops,
params=mesa_info['params'], verbose=verbose,
grid_version=grid_version, ls=ls, offset=offset)
return fig, ax | 2ceec63d162fe07dd4a00a508657095528243421 | 3,654,726 |
import os
import shutil
def main():
"""Main documentation builder script."""
parser = ArgumentParser(
description="build GGRC documentation",
)
parser.add_argument(
'-c', '--clean',
action='store_true',
default=False,
help='clean cache before build',
dest='clean',
)
parser.add_argument(
'-s', '--strict',
action='store_true',
default=False,
help='treat warnings as errors',
dest='strict',
)
args = parser.parse_args()
docs_src = os.path.join(DOCS_DIR, 'source')
docs_build = os.path.join(DOCS_DIR, 'build')
builder.build('API', os.path.join(docs_src, 'api'))
if args.clean:
shutil.rmtree(docs_build, ignore_errors=True)
if not os.path.isdir(docs_build):
os.mkdir(docs_build)
sphinx = Sphinx(
srcdir=docs_src,
confdir=docs_src,
outdir=os.path.join(docs_build, 'html'),
doctreedir=os.path.join(docs_build, 'doctrees'),
buildername='html',
warningiserror=args.strict,
)
sphinx.build()
return sphinx.statuscode | 9c193e70df24c1b1431d0eea9809b3d07baf0c62 | 3,654,727 |
def preprocessing_fn(batch):
"""
Standardize, then normalize sound clips
"""
processed_batch = []
for clip in batch:
signal = clip.astype(np.float64)
# Signal normalization
signal = signal / np.max(np.abs(signal))
# get pseudorandom chunk of fixed length (from SincNet's create_batches_rnd)
signal_length = len(signal)
if signal_length < WINDOW_LENGTH:
signal = np.concatenate((signal, np.zeros(WINDOW_LENGTH-signal_length)))
else:
np.random.seed(signal_length)
signal_start = np.random.randint(0, signal_length-WINDOW_LENGTH)
signal_stop = signal_start + WINDOW_LENGTH
signal = signal[signal_start:signal_stop]
processed_batch.append(signal)
return np.array(processed_batch) | d277cd95d174e1ec104a8b8a8d72e23e2dd7f991 | 3,654,728 |
def generate_random_bond_list(atom_count, bond_count, seed=0):
"""
Generate a random :class:`BondList`.
"""
np.random.seed(seed)
# Create random bonds between atoms of
# a potential atom array of length ATOM_COUNT
bonds = np.random.randint(atom_count, size=(bond_count, 3))
# Clip bond types to allowed BondType values
bonds[:, 2] %= len(struc.BondType)
# Remove bonds of atoms to itself
bonds = bonds[bonds[:,0] != bonds[:,1]]
assert len(bonds) > 0
return struc.BondList(atom_count, bonds) | cb7784f8561be2ea7c54d5f46c2e6e697164b1b8 | 3,654,729 |
def open_cosmos_files():
"""
This function opens files related to the COSMOS field.
Returns:
A lot of stuff. Check the code to see what it returns
"""
COSMOS_mastertable = pd.read_csv('data/zfire/zfire_cosmos_master_table_dr1.1.csv',index_col='Nameobj')
ZF_cat = ascii.read('data/zfourge/spitler2014/cosmos.v0.10.7.a.cat')
ZF_EAZY = ascii.read('data/zfourge/spitler2014/cosmos.v0.10.7.a.zout')
ZF_FAST = ascii.read('data/zfourge/spitler2014/cosmos.v0.10.7.a.fout')
#load in colours using spec-z
#only ZFIRE
U_spec = ascii.read('data/zfourge/uvj/specz_zfire/cosmos.v0.10.7.a.153.rf')
V_spec = ascii.read('data/zfourge/uvj/specz_zfire/cosmos.v0.10.7.a.155.rf')
J_spec = ascii.read('data/zfourge/uvj/specz_zfire/cosmos.v0.10.7.a.161.rf')
#load in colours using photo-z
U_photo = ascii.read('data/zfourge/uvj/photoz/cosmos.v0.10.7.a.153.rf')
V_photo = ascii.read('data/zfourge/uvj/photoz/cosmos.v0.10.7.a.155.rf')
J_photo = ascii.read('data/zfourge/uvj/photoz/cosmos.v0.10.7.a.161.rf')
#galaxy colours derived by Lee's catalogue
#This uses the older EAZY method of fitting colours
UV_lee = ascii.read('data/zfourge/spitler2014/cosmos.v0.10.7.a.153-155.rf')
VJ_lee = ascii.read('data/zfourge/spitler2014/cosmos.v0.10.7.a.155-161.rf')
UV_IR_SFRs = ascii.read('data/zfourge/sfrs/cosmos.sfr.v0.5.cat')
MOSDEF_ZFOURGE = ascii.read('data/catalogue_crossmatch/MOSDEF_COSMOS.dat')
#ZFIRE and MOSDEF colours
U_ZM = ascii.read('data/zfourge/uvj/specz_zfire_mosdef/cosmos.v0.10.7.a.153.rf')
V_ZM = ascii.read('data/zfourge/uvj/specz_zfire_mosdef/cosmos.v0.10.7.a.155.rf')
J_ZM = ascii.read('data/zfourge/uvj/specz_zfire_mosdef/cosmos.v0.10.7.a.161.rf')
VUDS_ZFOURGE = ascii.read('data/catalogue_crossmatch/VUDS_COSMOS.dat')
VUDS_extra = ascii.read('data/vuds/cesam_vuds_spectra_dr1_cosmos_catalog_additional_info.txt')
#ZFIRE and VUDS colours
U_ZV = ascii.read('data/zfourge/uvj/specz_vuds/cosmos.v0.10.7.a.153.rf')
V_ZV = ascii.read('data/zfourge/uvj/specz_vuds/cosmos.v0.10.7.a.155.rf')
J_ZV = ascii.read('data/zfourge/uvj/specz_vuds/cosmos.v0.10.7.a.161.rf')
return COSMOS_mastertable, ZF_cat, ZF_EAZY, ZF_FAST, U_spec, V_spec, J_spec,\
U_photo, V_photo, J_photo, UV_lee, VJ_lee, UV_IR_SFRs, MOSDEF_ZFOURGE,\
U_ZM,V_ZM, J_ZM, VUDS_ZFOURGE, VUDS_extra, U_ZV, V_ZV, J_ZV | 229aa967dce5faaf42b488ebf2768b280ced9359 | 3,654,730 |
import numpy
def convert_image_points_to_points(image_positions, distances):
"""Convert image points to 3d points.
Returns:
positions
"""
hypotenuse_small = numpy.sqrt(
image_positions[:, 0]**2 +
image_positions[:, 1]**2 + 1.0)
ratio = distances / hypotenuse_small
n = image_positions.shape[0]
positions = numpy.zeros([n, 3])
positions[:, 0] = -image_positions[:, 0] * ratio
positions[:, 1] = ratio
positions[:, 2] = -image_positions[:, 1] * ratio
return positions | 3680a02997cf1109fd08f61c6642b29ea3433f1d | 3,654,731 |
def W(i, j):
"""The Wilson functions.
:func:`W` corresponds to formula (2) on page 16 in `the technical paper`_
defined as:
.. math::
W(t, u_j)= \\
e^{-UFR\cdot (t+u_j)}\cdot \\
\left\{ \\
\\alpha\cdot\min(t, u_j) \\
-0.5\cdot e^{-\\alpha\cdot\max(t, u_j)}\cdot( \\
e^{\\alpha\cdot\min(t, u_j)} \\
-e^{-\\alpha\cdot\min(t, u_j)} \\
) \\
\\right\}
where :math:`t = u_i`.
Args:
i(int): Time index (1, 2, ..., :attr:`N`)
j(int): Time index (1, 2, ..., :attr:`N`)
"""
t = u[i]
uj = u[j]
return exp(-UFR * (t+uj)) * (
alpha * min(t, uj) - 0.5 * exp(-alpha * max(t, uj)) * (
exp(alpha*min(t, uj)) - exp(-alpha*min(t, uj))
)) | 37266db68fb51a87f15290edae06eb6397796b6f | 3,654,732 |
from scipy.interpolate import interp1d
def reddening_fm(wave, ebv=None, a_v=None, r_v=3.1, model='f99'):
"""Determines a Fitzpatrick & Massa reddening curve.
Parameters
----------
wave: ~numpy.ndarray
wavelength in Angstroms
ebv: float
E(B-V) differential extinction; specify either this or a_v.
a_v: float
A(V) extinction; specify either this or ebv.
r_v: float, optional
defaults to standard Milky Way average of 3.1
model: {'f99', 'fm07'}, optional
* 'f99' is the default Fitzpatrick (1999) [1]_
* 'fm07' is Fitzpatrick & Massa (2007) [2]_. Currently not R dependent.
Returns
-------
reddening_curve: ~numpy.ndarray
Multiply to deredden flux, divide to redden.
Notes
-----
Uses Fitzpatrick (1999) [1]_ by default, which relies on the UV
parametrization of Fitzpatrick & Massa (1990) [2]_ and spline fitting in the
optical and IR. This function is defined from 910 A to 6 microns, but note
the claimed validity goes down only to 1150 A. The optical spline points are
not taken from F99 Table 4, but rather updated versions from E. Fitzpatrick
(this matches the Goddard IDL astrolib routine FM_UNRED).
The fm07 model uses the Fitzpatrick & Massa (2007) [3]_ parametrization,
which has a slightly different functional form. That paper claims it
preferable, although it is unclear if signficantly (Gordon et al. 2009)
[4]_. It is not the literature standard, so not default here.
References
----------
[1] Fitzpatrick, E. L. 1999, PASP, 111, 63
[2] Fitpatrick, E. L. & Massa, D. 1990, ApJS, 72, 163
[3] Fitpatrick, E. L. & Massa, D. 2007, ApJ, 663, 320
[4] Gordon, K. D., Cartledge, S., & Clayton, G. C. 2009, ApJ, 705, 1320
"""
model = model.lower()
if model not in ['f99','fm07']:
raise ValueError('model must be f99 or fm07')
if (a_v is None) and (ebv is None):
raise ValueError('Must specify either a_v or ebv')
if (a_v is not None) and (ebv is not None):
raise ValueError('Cannot specify both a_v and ebv')
if a_v is not None:
ebv = a_v / r_v
if model == 'fm07':
raise ValueError('TEMPORARY: fm07 currently not properly R dependent')
x = 1e4 / wave # inverse microns
k = np.zeros(x.size)
if any(x < 0.167) or any(x > 11):
raise ValueError('fm_dered valid only for wavelengths from 910 A to '+
'6 microns')
# UV region
uvsplit = 10000. / 2700. # Turn 2700A split into inverse microns.
uv_region = (x >= uvsplit)
y = x[uv_region]
k_uv = np.zeros(y.size)
# Fitzpatrick (1999) model
if model == 'f99':
x0, gamma = 4.596, 0.99
c3, c4 = 3.23, 0.41
c2 = -0.824 + 4.717 / r_v
c1 = 2.030 - 3.007 * c2
D = y**2 / ((y**2-x0**2)**2 + y**2 * gamma**2)
F = np.zeros(y.size)
valid = (y >= 5.9)
F[valid] = 0.5392 * (y[valid]-5.9)**2 + 0.05644 * (y[valid]-5.9)**3
k_uv = c1 + c2*y + c3*D + c4*F
# Fitzpatrick & Massa (2007) model
if model == 'fm07':
x0, gamma = 4.592, 0.922
c1, c2, c3, c4, c5 = -0.175, 0.807, 2.991, 0.319, 6.097
D = y**2 / ((y**2-x0**2)**2 + y**2 * gamma**2)
valid = (y <= c5)
k_uv[valid] = c1 + c2*y[valid] + c3*D[valid]
valid = (y > c5)
k_uv[valid] = c1 + c2*y[valid] + c3*D[valid] + c4*(y[valid]-c5)**2
k[uv_region] = k_uv
# Calculate values for UV spline points to anchor OIR fit
x_uv_spline = 10000. / np.array([2700., 2600.])
D = x_uv_spline**2 / ((x_uv_spline**2-x0**2)**2 + x_uv_spline**2 * gamma**2)
k_uv_spline = c1 + c2*x_uv_spline +c3*D
# Optical / IR
OIR_region = (x < uvsplit)
y = x[OIR_region]
k_OIR = np.zeros(y.size)
# Fitzpatrick (1999) model
if model == 'f99':
# The OIR anchors are up from IDL astrolib, not F99.
anchors_extinction = np.array([0, 0.26469*r_v/3.1, 0.82925*r_v/3.1, # IR
-0.422809 + 1.00270*r_v + 2.13572e-04*r_v**2, # optical
-5.13540e-02 + 1.00216*r_v - 7.35778e-05*r_v**2,
0.700127 + 1.00184*r_v - 3.32598e-05*r_v**2,
(1.19456 + 1.01707*r_v - 5.46959e-03*r_v**2 + 7.97809e-04*r_v**3 +
-4.45636e-05*r_v**4)])
anchors_k = np.append(anchors_extinction-r_v, k_uv_spline)
# Note that interp1d requires that the input abscissa is monotonically
# _increasing_. This is opposite the usual ordering of a spectrum, but
# fortunately the _output_ abscissa does not have the same requirement.
anchors_x = 1e4 / np.array([26500., 12200., 6000., 5470., 4670., 4110.])
anchors_x = np.append(0., anchors_x) # For well-behaved spline.
anchors_x = np.append(anchors_x, x_uv_spline)
OIR_spline = interp1d(anchors_x, anchors_k, kind='cubic')
k_OIR = OIR_spline(y)
# Fitzpatrick & Massa (2007) model
if model == 'fm07':
anchors_k_opt = np.array([0., 1.322, 2.055])
IR_wave = np.array([float('inf'), 4., 2., 1.333, 1.])
anchors_k_IR = (-0.83 + 0.63*r_v) * IR_wave**-1.84 - r_v
anchors_k = np.append(anchors_k_IR, anchors_k_opt)
anchors_k = np.append(anchors_k, k_uv_spline)
anchors_x = np.array([0., 0.25, 0.50, 0.75, 1.]) # IR
opt_x = 1e4 / np.array([5530., 4000., 3300.]) # optical
anchors_x = np.append(anchors_x, opt_x)
anchors_x = np.append(anchors_x, x_uv_spline)
OIR_spline = interp1d(anchors_x, anchors_k, kind='cubic')
k_OIR = OIR_spline(y)
k[OIR_region] = k_OIR
reddening_curve = 10**(0.4 * ebv * (k+r_v))
return reddening_curve | 1f47b360044613c9bbb18bf3446bcd7e3ad20344 | 3,654,733 |
def list_registered_stateful_ops_without_inputs():
"""Returns set of registered stateful ops that do not expect inputs.
This list is used to identify the ops to be included in the state-graph and
that are subsequently fed into the apply-graphs.
Returns:
A set of strings.
"""
return set([
name
for name, op in op_def_registry.get_registered_ops().items()
if op.is_stateful and not op.input_arg
]) | aa089bc4157c6a3c36121c6e880ffbd546723f0e | 3,654,734 |
def load_frame_from_video(path: str, frame_index: int) -> np.ndarray:
"""load a full trajectory video file and return a single frame from it"""
vid = load_video(path)
img = vid[frame_index]
return img | 7b8747df38dfcf1f2244166002126d6d25170506 | 3,654,735 |
from typing import Dict
from typing import List
def get_settings_patterns(project_id: int) -> Dict[str, str]:
"""Returning project patterns settings"""
track_patterns: List[Dict[str, str]] = ProjectSettings.objects.get(project_id=project_id).trackPatterns
return {pattern['pattern']: pattern['regex'] for pattern in track_patterns} | d566ad5ec2fd72e2384fea90aa9cae9d99d9f441 | 3,654,736 |
def video_to_array(filepath):
"""Process the video into an array."""
cap = cv2.VideoCapture(filepath)
num_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
channel = 3
frame_buffer = np.empty((num_frames, height, width, channel), dtype=np.float32)
frame_num = 0
returned = True
while (frame_num < num_frames and returned):
returned, frame = cap.read()
if frame is not None:
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
frame = frame.astype(np.float32)
frame = frame / 255.0
if np.sum(frame) > 0.0:
frame_buffer[frame_num] = frame
frame_num += 1
cap.release()
return frame_buffer | 2034ce56c7ca4fe61d0e0eb443c6fa9910d8a232 | 3,654,737 |
from unittest.mock import Mock
async def test_10_request(requests_mock: Mock) -> None:
"""Test `async request()`."""
result = {"result": "the result"}
rpc = RestClient("http://test", "passkey", timeout=0.1)
def response(req: PreparedRequest, ctx: object) -> bytes: # pylint: disable=W0613
assert req.body is not None
_ = json_decode(req.body)
return json_encode(result).encode("utf-8")
requests_mock.post("/test", content=response)
ret = await rpc.request("POST", "test", {})
assert requests_mock.called
auth_parts = requests_mock.last_request.headers['Authorization'].split(' ', 1)
assert auth_parts[0].lower() == 'bearer'
assert auth_parts[1] == 'passkey'
assert ret == result
result2 = {"result2": "the result 2"}
def response2(req: PreparedRequest, ctx: object) -> bytes: # pylint: disable=W0613
assert req.body is not None
_ = json_decode(req.body)
return json_encode(result2).encode("utf-8")
requests_mock.post("/test2", content=response2)
ret = await rpc.request("POST", "/test2")
assert requests_mock.called
assert ret == result2 | fa9e03d5b3f5a4f594db29eae057607f790e158c | 3,654,738 |
def entmax15(X, axis=-1, k=None):
"""1.5-entmax: normalizing sparse transform (a la softmax).
Solves the optimization problem:
max_p <x, p> - H_1.5(p) s.t. p >= 0, sum(p) == 1.
where H_1.5(p) is the Tsallis alpha-entropy with alpha=1.5.
Parameters
----------
X : paddle.Tensor
The input tensor.
axis : int must
The dimension along which to apply 1.5-entmax.
k : int or None
number of largest elements to partial-sort over. For optimal
performance, should be slightly bigger than the expected number of
nonzeros in the solution. If the solution is more than k-sparse,
this function is recursively called with a 2*k schedule.
If `None`, full sorting is performed from the beginning.
Returns
-------
P : paddle tensor, same shape as X
The projection result, such that P.sum(axis=axis) == 1 elementwise.
"""
assert axis in [-1, X.ndim - 1]
return Entmax15Function.apply(X, axis, k) | 08887ec5aff323077ea6ea99bf6bd2b83bb4cc19 | 3,654,739 |
def get_dependency_graph(node, targets=None):
"""Returns the dependent nodes and the edges for the passed in node.
:param str node: The node to get dependencies for.
:param list targets: A list with the modules that are used as targets.
:return: The dependency graph info.
:rtype: GraphInfo
"""
g = _make_graph()
edges, direct_dependencies = _all_dependencies(node, g)
if targets:
targets = set(targets)
affected_targets = []
if not edges:
return graph_info.GraphInfo(
graph=g,
nodes=[],
edges=[],
direct_dependencies=[],
affected_targets=[]
)
all_nodes = set()
for n1, n2 in edges:
all_nodes.add(n1)
all_nodes.add(n2)
node_to_info = {}
for index, node_name in enumerate(all_nodes):
if node_name not in node_to_info:
node_id = index + 1
node_to_info[node_name] = {
"id": node_id,
"label": "",
"title": node_name,
"value": 1,
"color": "blue"
}
if targets and node_name in targets:
node_to_info[node_name]["color"] = 'orange'
node_to_info[node_name]["value"] = 3
affected_targets.append(node_name)
node_to_info[node]['color'] = 'red'
node_to_info[node]['value'] = 3
edges_representation = []
for n1, n2 in edges:
index1 = node_to_info[n1]["id"]
index2 = node_to_info[n2]["id"]
edge_color = 'gray'
value = 1
if n1 == node:
node_to_info[n2]['color'] = 'green'
node_to_info[n2]['value'] = 2
edge_color = 'green'
value = 2
if n2 == node:
node_to_info[n1]['color'] = 'green'
node_to_info[n1]['value'] = 2
edge_color = 'green'
value = 2
edges_representation.append(
{
"from": index1,
"to": index2,
"color": edge_color,
"value": value
},
)
info = graph_info.GraphInfo(
graph=g,
nodes=list(node_to_info.values()),
edges=edges_representation,
direct_dependencies=sorted(direct_dependencies),
affected_targets=affected_targets
)
return info | 39667e034379477086062a9032f5007c12aba30e | 3,654,740 |
from pathlib import Path
def is_submodule_repo(p: Path) -> bool:
"""
"""
if p.is_file() and '.git/modules' in p.read_text():
return True
return False | 26675ee25e431778325081ec80d45ff3d72c2046 | 3,654,741 |
def shift_contig(df2, remove):
"""
The function append shifted fragment from
sort_cluster_seq function.
Parameters
----------
df2 : pandas DataFrame
DataFrame NRPS cluster fragment.
remove : list
List of cluster fragment, which should removed.
Returns
-------
df2 : pandas DataFrame
Corrected DataFrame with NRPS meta information.
"""
for gen in remove:
df2 = df2.append(gen)
return df2 | 7df891785fc58d818af5b423c7fdbc3c4382951f | 3,654,742 |
import matplotlib as mpl
def get_color_cycle(n=None):
"""Return the matplotlib color cycle.
:param Optional[int] n:
if given, return a list with exactly n elements formed by repeating
the color cycle as necessary.
Usage::
blue, green, red = get_color_cycle(3)
"""
cycle = mpl.rcParams["axes.prop_cycle"].by_key()["color"]
if n is None:
return it.cycle(cycle)
return list(it.islice(it.cycle(cycle), n)) | f19393d9a5c61ab158517261c258cc46b7a9701b | 3,654,743 |
def _bocs_consistency_mapping(x):
"""
This is for the comparison with BOCS implementation
:param x:
:return:
"""
horizontal_ind = [0, 2, 4, 7, 9, 11, 14, 16, 18, 21, 22, 23]
vertical_ind = sorted([elm for elm in range(24) if elm not in horizontal_ind])
return x[horizontal_ind].reshape((ISING_GRID_H, ISING_GRID_W - 1)), x[vertical_ind].reshape((ISING_GRID_H - 1, ISING_GRID_W)) | bd8fe5261e024f5d5cdf1a2d77229dd564d947bf | 3,654,744 |
def get_document(name, key):
"""Get document from Database"""
constructor = Constructor()
inst_coll = constructor.factory(kind='Collection', name=name)
inst_doc = Document(inst_coll)
doc = inst_doc.get_document(key)
return doc | acd4e8117c0002d323a4fad79704a33437481657 | 3,654,745 |
from datetime import datetime
import json
def predict() -> str:
"""predict the movie genres based on the request data"""
cur = db_connection.cursor()
try:
input_params = __process_input(request.data)
input_vec = vectorizer.transform(input_params)
prediction = classifier.predict(input_vec)
predictions = binarizer.inverse_transform(prediction)
for count, i in enumerate(input_params):
pred = ", ".join(predictions[count])
cur.execute(
f"INSERT INTO prediction(input, output, time) VALUES('{i}', '{pred}', '{datetime.datetime.now()}' )"
)
db_connection.commit()
except Exception as e:
response = app.response_class(
response=json.dumps({"error": f"{e.__class__} occured"}), status=400
)
return response
response = app.response_class(
response=json.dumps({"predictions:": binarizer.inverse_transform(prediction)}),
status=200,
)
return response | 0ae49a8ab05d1df1c0beb07f322262a7a7ac8ee2 | 3,654,746 |
def SignificanceWeights(serializer, decay):
"""Multiplies a binary mask with a symbol significance mask."""
def significance_weights(mask):
# (repr,) -> (batch, length, repr)
# significance = [0, 1, 2]
significance = serializer.significance_map
assert significance.shape[0] == mask.shape[2]
# significance = batch_size * [0, 1, 2]
significance = jnp.repeat(
significance[np.newaxis, ...], repeats=mask.shape[0], axis=0)
# significance = batch_size * [0, 1, 2] * mask.shape[1]
significance = jnp.repeat(
significance[..., jnp.newaxis], repeats=mask.shape[1], axis=2)
# significance = batch_size * mask.shape[1] * [0, 1, 2]
significance = jnp.swapaxes(significance, 1, 2)
assert significance.shape == mask.shape
sig_weights = mask * decay ** significance
return sig_weights
return tl.Fn('SignificanceWeights', significance_weights) | 545ac45149b8653f502d2dd864f92a40ee5919cb | 3,654,747 |
def check_fun_inter_allocation(fun_inter, data, **kwargs):
"""Check allocation rules for fun_inter then returns objects if check"""
out = None
check_allocation_fun_inter = get_allocation_object(data, kwargs['xml_fun_inter_list'])
if check_allocation_fun_inter is None:
check_fe = check_fun_elem_data_consumption(
data, fun_inter,
kwargs['xml_fun_elem_list'],
kwargs['xml_function_list'],
kwargs['xml_consumer_function_list'],
kwargs['xml_producer_function_list'])
if all(i for i in check_fe):
out = [fun_inter, data]
fun_inter.add_allocated_data(data.id)
elif True in check_fe:
if check_fe[0] is True:
print(f"Data {data.name} has only consumer(s) "
f"allocated to a functional element exposing "
f"{fun_inter.name}, {data.name} not "
f"allocated to {fun_inter.name}")
elif check_fe[1] is True:
print(f"Data {data.name} has only producer(s) "
f"allocated to a functional element exposing "
f"{fun_inter.name}, {data.name} not "
f"allocated to {fun_inter.name}")
else:
print(f"Data {data.name} has no producer(s) nor "
f"consumer(s) allocated to functional elements "
f"exposing {fun_inter.name}, {data.name} not "
f"allocated to {fun_inter.name}")
return out | 61f17844953f3260a23aff35a2f090a028dd9212 | 3,654,748 |
from typing import Optional
def kernel_bw_lookup(
compute_device: str,
compute_kernel: str,
caching_ratio: Optional[float] = None,
) -> Optional[float]:
"""
Calculates the device bandwidth based on given compute device, compute kernel, and
caching ratio.
Args:
compute_kernel (str): compute kernel.
compute_device (str): compute device.
caching_ratio (Optional[float]): caching ratio used to determine device bandwidth
if UVM caching is enabled.
Returns:
float: the device bandwidth.
"""
caching_ratio = caching_ratio if caching_ratio else UVM_CACHING_RATIO
lookup = {
# CPU
("cpu", EmbeddingComputeKernel.DENSE.value): 0.35 * DDR_MEM_BW,
("cpu", EmbeddingComputeKernel.SPARSE.value): 0.35 * DDR_MEM_BW,
("cpu", EmbeddingComputeKernel.BATCHED_DENSE.value): 0.5 * DDR_MEM_BW,
("cpu", EmbeddingComputeKernel.BATCHED_FUSED.value): 1 * DDR_MEM_BW,
("cpu", EmbeddingComputeKernel.BATCHED_QUANT.value): 1 * DDR_MEM_BW,
# CUDA
("cuda", EmbeddingComputeKernel.DENSE.value): 0.35 * HBM_MEM_BW,
("cuda", EmbeddingComputeKernel.SPARSE.value): 0.35 * HBM_MEM_BW,
("cuda", EmbeddingComputeKernel.BATCHED_DENSE.value): 0.5 * HBM_MEM_BW,
("cuda", EmbeddingComputeKernel.BATCHED_FUSED.value): 1 * HBM_MEM_BW,
("cuda", EmbeddingComputeKernel.BATCHED_FUSED_UVM.value): DDR_MEM_BW / 10,
("cuda", EmbeddingComputeKernel.BATCHED_FUSED_UVM_CACHING.value): (
caching_ratio * HBM_MEM_BW + (1 - caching_ratio) * DDR_MEM_BW
)
/ 10,
("cuda", EmbeddingComputeKernel.BATCHED_QUANT.value): 1 * HBM_MEM_BW,
("cuda", EmbeddingComputeKernel.BATCHED_QUANT_UVM.value): DDR_MEM_BW / 10,
("cuda", EmbeddingComputeKernel.BATCHED_QUANT_UVM_CACHING.value): (
caching_ratio * HBM_MEM_BW + (1 - caching_ratio) * DDR_MEM_BW
)
/ 10,
}
return lookup.get((compute_device, compute_kernel)) | efd70d5c2e5fc9295bccbfb05113474ac40ff1c9 | 3,654,749 |
import ctypes
def spkltc(targ, et, ref, abcorr, stobs):
"""
Return the state (position and velocity) of a target body
relative to an observer, optionally corrected for light time,
expressed relative to an inertial reference frame.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/spkltc_c.html
:param targ: Target body.
:type targ: int
:param et: Observer epoch.
:type et: float
:param ref: Inertial reference frame of output state.
:type ref: str
:param abcorr: Aberration correction flag.
:type abcorr: str
:param stobs: State of the observer relative to the SSB.
:type stobs: 6-Element Array of floats
:return:
One way light time between observer and target,
Derivative of light time with respect to time
:rtype: tuple
"""
assert len(stobs) == 6
targ = stypes.c_int(targ)
et = ctypes.c_double(et)
ref = stypes.stringToCharP(ref)
abcorr = stypes.stringToCharP(abcorr)
stobs = stypes.toDoubleVector(stobs)
starg = stypes.emptyDoubleVector(6)
lt = ctypes.c_double()
dlt = ctypes.c_double()
libspice.spkltc_c(targ, et, ref, abcorr, stobs, starg, ctypes.byref(lt),
ctypes.byref(dlt))
return stypes.cVectorToPython(starg), lt.value, dlt.value | e3c701e7e9b2c15d5b182bd4fff395a2cbf5d849 | 3,654,750 |
def create_container(
container_image: str,
name: str = None,
volumes: t.List[str] = None,
) -> str:
"""Create a new working container from provided container image.
Args:
container_image (str): The container image to start from.
name (str, optional): The container name.
volumes (t.List[str], optional): Any volumes to bind into the container.
Returns:
str: The container name/id used for further manipulation.
"""
args = []
if name:
args.extend(["--name", name])
if volumes:
args.extend(_unwind_list("--volume", volumes))
command = ["buildah", "from"] + args + [container_image]
result = platform_utils.run_command(command, capture_stdout=True)
container = result.stdout.strip()
logger.success(f"Created '{container}' from image '{container_image}'")
return container | be50e84169e5d3df5dfd9730493d7daa9788049b | 3,654,751 |
def single_data_path(client, node_id):
"""
In order for a shrink to work, it should be on a single filesystem, as
shards cannot span filesystems. Return `True` if the node has a single
filesystem, and `False` otherwise.
:arg client: An :class:`elasticsearch.Elasticsearch` client object
:rtype: bool
"""
return len(client.nodes.stats()['nodes'][node_id]['fs']['data']) == 1 | ae0b34f82acb6d12faf525f0270250cdf471a6f8 | 3,654,752 |
def sortorder(obj):
"""
Trys to smartly determine the sort order for this object ``obj``
"""
if hasattr(obj, 'last'):
return obj.last.timestamp()
if isinstance(obj, str):
# First assume pure numeric
try:
return float(obj)
except ValueError:
pass
# Assume it is of the form
# AB [N.M] PPP words'
try:
return float(obj.split('[')[1].split(']')[0])
except (IndexError, ValueError):
return strip_tags(obj).strip()
return None | 674ee77a87ccd7a0bd89a88b88a2682926a1135e | 3,654,753 |
import json
import re
def get_more_details_of_post(post_url: str) -> json:
"""
:param post_url: the url of an imgur post
:return: Details like Virality-score, username etc in JSON format
"""
details = {}
try:
request = HTMLSession().get(post_url)
# some times, request isn't properly made, hence call again.
if len(request.html.find('script')) < 18:
request = HTMLSession().get(post_url)
return details
# handle when its not there at all
regex = 'item: ({.+} )' # regex to isolate the `item` dict.
# 18th script tag has the `item` dict. this is tested on more than 1500 links.
matched = re.search(regex, request.html.find(
'script')[18].text).group(0)
item = json.loads(matched[5:])
details['username'] = item['account_url']
details['comment_count'] = item['comment_count']
details['downs'] = item['downs']
details['ups'] = item['ups']
details['points'] = item['points']
details['score'] = item['score']
details['timestamp'] = item['timestamp']
details['views'] = item['views']
details['favorite_count'] = item['favorite_count']
details['hot_datetime'] = item['hot_datetime']
details['nsfw'] = item['nsfw']
details['platform'] = 'Not Detected' if item['platform'] == None else item['platform']
details['virality'] = item['virality']
except Exception as e:
print(e)
return details | dd3d622c8a7e8f61daf24c2d0cc6752323d4693e | 3,654,754 |
import struct
import hmac
import hashlib
def subkey_public_pair_chain_code_pair(public_pair, chain_code_bytes, i):
"""
Yield info for a child node for this node.
public_pair:
base public pair
chain_code:
base chain code
i:
the index for this node.
Returns a pair (new_public_pair, new_chain_code)
"""
i_as_bytes = struct.pack(">l", i)
sec = public_pair_to_sec(public_pair, compressed=True)
data = sec + i_as_bytes
I64 = hmac.HMAC(key=chain_code_bytes, msg=data, digestmod=hashlib.sha512).digest()
I_left_as_exponent = from_bytes_32(I64[:32])
x, y = public_pair
the_point = I_left_as_exponent * ecdsa.generator_secp256k1 + \
ecdsa.Point(ecdsa.generator_secp256k1.curve(), x, y, ORDER)
if the_point == INFINITY:
logger.critical(_SUBKEY_VALIDATION_LOG_ERR_FMT)
raise DerivationError('K_{} == {}'.format(i, the_point))
I_left_as_exponent = from_bytes_32(I64[:32])
if I_left_as_exponent >= ORDER:
logger.critical(_SUBKEY_VALIDATION_LOG_ERR_FMT)
raise DerivationError('I_L >= {}'.format(ORDER))
new_public_pair = the_point.pair()
new_chain_code = I64[32:]
return new_public_pair, new_chain_code | 8f31eb0ae3b063964ff46bcf6c78431d39d0e2ba | 3,654,755 |
from typing import Optional
def get_registry_description(metaprefix: str) -> Optional[str]:
"""Get the description for the registry, if available.
:param metaprefix: The metaprefix of the registry
:return: The description for the registry, if available, otherwise ``None``.
>>> get_registry_description('prefixcommons')
'A registry of commonly used prefixes in the life sciences and linked data'
>>> get_registry_description('missing')
None
"""
registry = get_registry(metaprefix)
if registry is None:
return None
return registry.description | 12b7aac7f880d6699ca85add1065eca49a06d278 | 3,654,756 |
import tqdm
def evaluate(model, valid_exe, valid_ds, valid_prog, dev_count, metric):
"""evaluate """
acc_loss = 0
acc_top1 = 0
cc = 0
for feed_dict in tqdm.tqdm(
multi_device(valid_ds.generator(), dev_count), desc='evaluating'):
if dev_count > 1:
loss, top1 = valid_exe.run(
feed=feed_dict,
fetch_list=[model.metrics[0].name, model.metrics[1].name])
loss = np.mean(loss)
top1 = np.mean(top1)
else:
loss, top1 = valid_exe.run(
valid_prog,
feed=feed_dict,
fetch_list=[model.metrics[0].name, model.metrics[1].name])
acc_loss += loss
acc_top1 += top1
cc += 1
ret = {"loss": float(acc_loss / cc), "top1": float(acc_top1 / cc)}
return ret | 7b228e7cadd71ec1ac31436767b92c4dadb5ec53 | 3,654,757 |
def _get_rank(player):
"""Get the rank of a player"""
cursor = _DB.cursor()
try:
cursor.execute("SELECT score FROM scores WHERE player = ?", (player.lower(),))
rows = cursor.fetchall()
if not rows:
return 0
ps = rows[0][0]
cursor.execute("SELECT count(*) FROM scores WHERE score > ?", (ps,))
rows = cursor.fetchall()
return 1+rows[0][0]
finally:
cursor.close() | e556b9fb75f6b40c8c1be8759255dfc5953a1e9a | 3,654,758 |
import os
def getDataFromFileList(filedir):
"""
Reads all data from each file to one big data set ordered as:
[[info],[[residue],[data]]]
"""
data = []
filelist =os.listdir(filedir)
print("Loading data from data dir\n")
if len(filelist)>0:
print("DataFiles included:\n ----------------------------------")
else:
print("No data files found\n")
return data
for f in filelist:
if "cpmg" not in f :
continue
d1 ={}
d1["info"]= getFileInfo(filedir+f)
if not d1["info"]:
return data
if not d1["info"]["type"] =="fit" and not d1["info"]["type"] == "exp": #This is to avoid additional files that have the same name
continue
else:
print("| "+f+" |")
d1["resdata"]=readExpFile(filedir+f,d1["info"]["type"])
data.append(d1)
print(" ----------------------------------\n")
return data | 7314aee3defcd72a6b7e7edbdd8be57393de39c5 | 3,654,759 |
import pydoc
def spec(func):
"""return a string with Python function specification"""
doc = pydoc.plain(pydoc.render_doc(func))
return doc.splitlines()[2] | 00b96364f77141fedd7d50396946fd4e29cc5d02 | 3,654,760 |
def brm_weights(P, r, Γ, X):
"""Bellman-residual minimization fixed-point weights.
TODO: Need to actually go through the details to make sure this is right
"""
assert linalg.is_stochastic(P)
assert X.ndim == 2
assert len(X) == len(P)
ns = len(P)
Γ = as_diag(Γ, ns)
Λ = as_diag(Λ, ns)
# Calculate intermediate quantities
I = np.eye(ns)
dist = linalg.stationary(P)
D = np.diag(dist)
Π = X @ pinv(X.T @ D @ X) @ X.T @ D
A = (X - P @ Γ @ Π @ X).T @ (X - P @ Γ @ Π @ X)
b = (X - P @ Γ @ Π @ X).T @ r
return pinv(A) @ b | a4392f9d33a2cdddd2af5b0002af3a750fd64ab8 | 3,654,761 |
import os
import time
def file_age(file_name):
"""
Returns the age of a file in seconds from now. -1 if the file does not exist.
:param file_name: file name
.. versionadded:: 9.3.1
"""
if not os.path.exists(file_name):
return -1
return time.time() - os.path.getmtime(file_name) | 9cefc1da2f7ab1c44fbe9dc4f63a5d51bc088ab8 | 3,654,762 |
import posixpath
def IsVirus(mi, log):
"""Test: a virus is any message with an attached executable
I've also noticed the viruses come in as wav and midi attachements
so I trigger on those as well.
This is a very paranoid detector, since someone might send me a
binary for valid reasons. I white-list everyone who's sent me
email before so it doesn't affect me.
"""
for part in mi.msg.walk():
if part.get_main_type() == 'multipart':
continue
filename = part.get_filename()
if filename is None:
if part.get_type() in ["application/x-msdownload",
"audio/x-wav", "audio/x-midi"]:
# Only viruses send messages to me with these types
log.pass_test(VIRUS)
return ("it has a virus-like content-type (%s)" %
part.get_type())
else:
extensions = "bat com exe pif ref scr vbs wsh".split()
base, ext = posixpath.splitext(filename)
if ext[1:].lower() in extensions:
log.pass_test(VIRUS)
return "it has a virus-like attachment (%s)" % ext[1:]
return False | e30e91951ad49395d87bef07926cfdff4d15b3e2 | 3,654,763 |
def to_curl(request, compressed=False, verify=True):
"""
Returns string with curl command by provided request object
Parameters
----------
compressed : bool
If `True` then `--compressed` argument will be added to result
"""
parts = [
('curl', None),
('-X', request.method),
]
for k, v in sorted(request.headers.items()):
parts += [('-H', '{0}: {1}'.format(k, v))]
if request.body:
body = request.body
if isinstance(body, bytes):
body = body.decode('utf-8')
parts += [('-d', body)]
if compressed:
parts += [('--compressed', None)]
if not verify:
parts += [('--insecure', None)]
parts += [(None, request.url)]
flat_parts = []
for k, v in parts:
if k:
flat_parts.append(quote(k))
if v:
flat_parts.append(quote(v).replace("\n", "\\n"))
return ' '.join(flat_parts) | b462f62031f4fe757bb7a45b50ced9bc2ea6a9b5 | 3,654,764 |
def prox_trace_indicator(a, lamda):
"""Time-varying latent variable graphical lasso prox."""
es, Q = np.linalg.eigh(a)
xi = np.maximum(es - lamda, 0)
return np.linalg.multi_dot((Q, np.diag(xi), Q.T)) | 85d6cb26c7a35dbab771e0a9f9c8979fba90e680 | 3,654,765 |
def get_gamma_non_jitted(esys):
"""Get log gamma
Returns
-------
float[:]
"""
if isinstance(esys.species[0].logc, float):
v = np.empty(len(esys.species))
else:
v = np.empty(len(esys.species), dtype=object)
for i, sp in enumerate(esys.species):
v[i] = 10.0 ** (sp.logg)
return v
# return np.array([10.0**(sp.logg) for sp in self.species])
# v = np.empty(len(self.species))
# for i, sp in enumerate(self.species):
# v[i] = 10.0**(sp.logg)
# return v | f3d7f4b96676a10b7065196aac247006019da31e | 3,654,766 |
def active_matrices_from_extrinsic_euler_angles(
basis1, basis2, basis3, e, out=None):
"""Compute active rotation matrices from extrinsic Euler angles.
Parameters
----------
basis1 : int
Basis vector of first rotation. 0 corresponds to x axis, 1 to y axis,
and 2 to z axis.
basis2 : int
Basis vector of second rotation. 0 corresponds to x axis, 1 to y axis,
and 2 to z axis.
basis3 : int
Basis vector of third rotation. 0 corresponds to x axis, 1 to y axis,
and 2 to z axis.
e : array-like, shape (..., 3)
Euler angles
out : array, shape (..., 3, 3), optional (default: new array)
Output array to which we write the result
Returns
-------
Rs : array, shape (..., 3, 3)
Rotation matrices
"""
e = np.asarray(e)
R_shape = e.shape + (3,)
R_alpha = active_matrices_from_angles(basis1, e[..., 0].flat)
R_beta = active_matrices_from_angles(basis2, e[..., 1].flat)
R_gamma = active_matrices_from_angles(basis3, e[..., 2].flat)
if out is None:
out = np.empty(R_shape)
out[:] = np.einsum(
"nij,njk->nik", np.einsum("nij,njk->nik", R_gamma, R_beta),
R_alpha).reshape(R_shape)
return out | 50218d9ce2296e3c4952cc77fe64e30c19e03f77 | 3,654,767 |
import sys
import logging
import traceback
def save(image, parent=None):
""" Save an image with appropriate dialogs (file selector)
Return the chosen save path or None
parent : parent wx Window for dialogs
"""
dialog = ImageFileDialog(parent, style=wx.FD_SAVE|wx.FD_OVERWRITE_PROMPT)
if dialog.ShowModal() != wx.ID_OK :
return None
path = dialog.GetPath()
if isinstance(image, list) :
periodic_progress_dialog = PeriodicProgressDialog(0.2, "Saving files", "Saving ...")
worker_thread = WorkerThread(periodic_progress_dialog,target=medipy.io.save_serie, args=(image,path,))
worker_thread.start()
periodic_progress_dialog.start()
worker_thread.join()
periodic_progress_dialog.Destroy()
else :
try :
medipy.io.save(image, path)
return path
except :
# Try by adding a ".nii" suffix
try :
path += ".nii"
medipy.io.save(image, path)
return path
except :
exc_info = sys.exc_info()
logging.error("".join(traceback.format_exception(*exc_info)))
wx.MessageBox("Could not save image to %s: %s"%(path, exc_info[1]),
"Could not save image", wx.OK|wx.ICON_ERROR) | 83bc917f8fe21442712b0a8979bb9e8ddfdd80a7 | 3,654,768 |
def runQuery(scenarioID):
"""
Run a query that aquires the data from the lrs for one specific dialoguetrainer scenario
\n
:param scenarioID: The id of the scenario to request the data from \t
:type scenarioID: int \n
:returns: The data for that scenario or error \t
:rtype: [Dict<string, mixed>] | {error} \n
"""
return (
lrs.Query()
.where(lrs.Attr.ACTIVITY, lrs.IS, f"https://en.dialoguetrainer.app/scenario/play/{scenarioID}")
.where(lrs.Attr.VERB, lrs.IS, "https://adlnet.gov/expapi/verbs/completed")
.select(lrs.Attr.ACTOR, lrs.Attr.RESULT)
.execute()
) | 0f57a4468354680b315a65263593979149bdb186 | 3,654,769 |
def is_spaceafter_yes(line):
"""
SpaceAfter="Yes" extracted from line
"""
if line[-1] == "_":
return False
for ddd in line[MISC].split("|"):
kkk, vvv = ddd.split("=")
if kkk == "SpaceAfter":
return vvv == "Yes"
raise ValueError | 5693c8874ec9676bf19d9b1cb7ead5c1772a3f0b | 3,654,770 |
def linear_scheduler(optimizer, warmup_steps, training_steps, last_epoch=-1):
"""linear_scheduler with warmup from huggingface"""
def lr_lambda(current_step):
if current_step < warmup_steps:
return float(current_step) / float(max(1, warmup_steps))
return max(
0.0,
float(training_steps - current_step)
/ float(max(1, training_steps - warmup_steps)),
)
return LambdaLR(optimizer, lr_lambda, last_epoch) | d9446ede5be0ed981ae00b0bccd494017057d834 | 3,654,771 |
from functools import reduce
import operator
from re import X
def MajorityVoteN(qubits,
nrounds,
prep=[],
meas_delay=1e-6,
add_cals=False,
calRepeats=2):
"""
Majority vote across multiple measurement results (same or different qubits)
Parameters
----------
qubits : Channels.LogicalChannel tuple
A hashable (immutable) tuple of qubits for majority vote
nrounds: int
Number of consecutive measurements
prep : boolean iterable, optional
Array of binary values mapping X(q) pulses to the list of qubits
proivided. Ex: (q1,q2), prep=(1,0) -> would apply a pi pulse to q1
before the majority vote measurement. Default = []
measDelay : int/float, optional
Delay between syndrome check rounds (seconds)
add_cals : boolean, optional
Whether to append calibration pulses to the end of the sequence
calRepeats : int, optional
How many times to repeat calibration scalings (default 2)
Returns
-------
metafile : string
Path to a json metafile with details about the sequences and paths to
compiled machine files
Examples
--------
>>> mf = MajorityVoteN((q1, q2, q3), 10);
Compiled 1 sequences.
o INVALIDATE(channel=None, addr=0x1, mask=0x0)
o WRITEADDR(channel=None, addr=0x1, value=0xfffff)
MAJORITYMASK(in_addr=1, out_addr=0)
o INVALIDATE(channel=None, addr=0xa, mask=0xfffff)
o INVALIDATE(channel=None, addr=0xb, mask=0x1)
MAJORITY(in_addr=a, out_addr=b)
>>> mf
'/path/to/exp/exp-meta.json'
"""
nqubits = len(qubits)
seqs = [MajorityMask(1, 0, nrounds*nqubits),
Invalidate(10, nrounds*nqubits),
Invalidate(11, 1)]
if prep:
seqs += [reduce(operator.mul,
[X(q) for n,q in enumerate(qubits) if prep[n]])]
for n in range(nrounds):
seqs += [reduce(operator.mul,
[MEASA(q, (10, nqubits*n+m)) for m,q in enumerate(qubits)]),
Id(qubits[0],meas_delay)]
seqs+=MajorityVote(10,11, nrounds*nqubits)
seqs+=qwait("RAM", 11)
seqs+=[Id(qubits[0],100e-9)]
seqs+=qif(1,[X(qubits[0])]) # placeholder for any conditional operation
seqs=[seqs]
if add_cals:
seqs += create_cal_seqs(qubits,
calRepeats)
metafile = compile_to_hardware(seqs,
'MajorityVote/MajorityVote',
tdm_seq=True)
return metafile | 7bc3b6161d5224ed7adf9248b32b0bd283f50c70 | 3,654,772 |
def getRatios(vect1, vect2):
"""Assumes: vect1 and vect2 are equal length lists of numbers
Returns: a list containing the meaningful values of
vect1[i]/vect2[i]"""
ratios = []
for index in range(len(vect1)):
try:
ratios.append(vect1[index]/vect2[index])
except ZeroDivisionError:
ratios.append(float('nan')) #nan = Not a Number
except:
raise ValueError('getRatios called with bad arguments')
return ratios | e28f871986ab2b1b87cc3671b1c27ad14a0aadf8 | 3,654,773 |
def sampleset():
"""Return list with 50 positive and 10 negative samples"""
pos = [(0, i) for i in range(50)]
neg = [(1, i) for i in range(10)]
return pos + neg | 77e5a0ca3ad8757f0ded2aec9d73312a66ac9044 | 3,654,774 |
def recognize_emotion(name, mode, dataset):
"""
The main program for building the system. And we support following kinds of model:
1. Convolutional Neural Network (CNN)
2. Support Vector Machine (SVM)
3. Adaboost
4. Multilayer Perceptron (MLP)
Args:
name: path of the photo for recognizing
mode: mode used for face detection, 'auto' or 'manual
dataset: dataset used for face recognition, 'CK+48' or 'fer2013'
Returns:
predicted: emotion prediction (numerical) of detected faces using cnn and fisherfaces
recognition: emotion recognition (categorical) of detected faces using cnn and fisherfaces
Note: result will be printed to standard output, accuracy needs to be improved.
"""
# Load the dataset into a shuffled list of tuples
dataset_tuple_list = dp.load_dataset(dataset)
# Split the dataset into train, test, validation and their corresponding labels
img_train, img_train_label, img_validation, img_validation_label, img_test, img_test_label, le = \
dp.split_data(dataset_tuple_list)
# Fisherfaces: Get the fisherfaces_train and fisherfaces_test feature vectors for further training and predicting
fisher_train, fisher_test, fisher_validation, pca, lda = fe.fisherfaces(img_train, img_test, img_validation,
img_train_label, le)
# Construct and train the selected model with the input train and validation datasets
model_trained = mc.train_model('cnn', fisher_train, img_train_label, fisher_validation,
img_validation_label, 'fisherfaces')
# detect faces in photo and get coordinates of them
face_coordinates, resized_list = fd.detect_face(name, mode)
# project faces to fisherfaces
face_column_matrix = fe.constructRowMatrix(np.array(resized_list))
pca_face = pca.transform(face_column_matrix)
fisherfaces_face = lda.transform(pca_face)
# use trained cnn to recognize emotions
fisherfaces_face = fisherfaces_face.reshape(-1, 1, 6)
prediction = model_trained.predict(fisherfaces_face)
recognized = np.argmax(prediction, axis=1)
print(f'\nprediction:\n{prediction}\nrecognized:\n{recognized}')
return prediction, recognized | eae092866f5190a637bbdb08c4ad7188b9cb88f3 | 3,654,775 |
def feedback(request):
"""
Feedback page. Here one can send feedback to improve the
website further.
"""
return render(request, "groundfloor/common/feedback.html", context = None) | 8dd9f8ae57ca49629820c58b54c7d98d705597bb | 3,654,776 |
from typing import Tuple
def fill_nodata_image(dataset: xr.Dataset) -> Tuple[np.ndarray, np.ndarray]:
"""
Interpolate no data values in image. If no mask was given, create all valid masks
:param dataset: Dataset image
:type dataset: xarray.Dataset containing :
- im : 2D (row, col) xarray.DataArray
:return: a Tuple that contains the filled image and mask
:rtype: Tuple of np.ndarray
"""
if 'msk' in dataset:
img, msk = interpolate_nodata_sgm(dataset['im'].data, dataset['msk'].data)
else:
msk = np.full((dataset['im'].data.shape[0], dataset['im'].data.shape[1]), int(dataset.attrs['valid_pixels']))
img = dataset['im'].data
return img, msk | c245f0cbfbb79737fb85b9b8fb8381aad6373926 | 3,654,777 |
def find(value, a_list):
"""
TestCase for find
>>> find(26, [12,14])
True
>>> find(40, [14, 15, 16, 4, 6, 5])
False
>>> find(1, [1])
False
>>> find(1, [])
False
>>> find(4, [2, 3, 2])
True
"""
# 现将列表变为<value, index>字典
if a_list is None or len(a_list) < 2:
return False
d = {}
for i in range(len(a_list)):
if d.has_key(a_list[i]):
d[a_list[i]] = d[a_list[i]] + 1
else:
d[a_list[i]] = 1
# 第二次遍历
for i in a_list:
if d.has_key(value-i):
# 排除自己本身
x = value == i*2
if(not (x and d[i] == 1)):
return True
return False | dd466a8ffa0c760ed0af9ad109b5f4e3b85a62db | 3,654,778 |
def transform_bbox(
bbox, source_epsg_code, target_epsg_code, all_coords=False
):
"""
Transform bbox from source_epsg_code to target_epsg_code,
if necessary
:returns np.array of shape 4 which represent the two coordinates:
left, bottom and right, top.
When `all_coords` is set to `True`, a np.array of shape 8 is given
which represents coords of the bbox in the following order:
left top, right top, right bottom, left bottom
"""
if source_epsg_code != target_epsg_code:
# XXX: Not entirely sure whether transformations between two projected
# coordinate systems always do retain the rectangular shape of a bbox.
# Transformations between an unprojected system (e.g. WGS84) and a
# projected system (e.g. RDNEW) will experience distortion: the
# resulting shape cannot be accurately represented by top left
# and bottom right.
source_srs = get_spatial_reference(source_epsg_code)
target_srs = get_spatial_reference(target_epsg_code)
if source_srs.IsProjected() != target_srs.IsProjected():
msg = "Transforming a bbox from %s to %s is inaccurate."
logger.warning(msg, source_epsg_code, target_epsg_code)
# Transform to [[left, right],[top, bottom]]
input_x = [bbox[BBOX_LEFT], bbox[BBOX_RIGHT]]
input_y = [bbox[BBOX_TOP], bbox[BBOX_BOTTOM]]
if all_coords:
input_x += [bbox[BBOX_RIGHT], bbox[BBOX_LEFT]]
input_y += [bbox[BBOX_TOP], bbox[BBOX_BOTTOM]]
bbox_trans = np.array(
transform_xys(
np.array(input_x), np.array(input_y),
source_epsg_code, target_epsg_code
)
)
if all_coords:
bbox = np.array([
bbox_trans[0][0], bbox_trans[1][0], # left_top
bbox_trans[0][2], bbox_trans[1][2], # right_top
bbox_trans[0][1], bbox_trans[1][1], # right_bottom
bbox_trans[0][3], bbox_trans[1][3] # left_bottom
])
else:
# Transform back to [left,bottom,right,top]
bbox = np.array(
[min(bbox_trans[0]), min(bbox_trans[1]), # left_bottom
max(bbox_trans[0]), max(bbox_trans[1]) # right_top
]
)
return bbox | cd6938b2dfcc02fe9c2a323e2b60339de216dd26 | 3,654,779 |
def distance_metric(vector1, vector2):
""" Returns a score value using Jaccard distance
Args:
vector1 (np.array): first vector with minHash values
vector2 (np.array): second vector with minHash values
Returns:
float: Jaccard similarity
"""
return distance.pdist(np.array([vector1,vector2]), 'jaccard').sum() | e1acbc9eff7ee8bc78be0307acacbca9e9d69265 | 3,654,780 |
from datetime import datetime
def update_calendar(request):
"""
to update an entry to the academic calendar to be updated.
@param:
request - contains metadata about the requested page.
@variables:
from_date - The starting date for the academic calendar event.
to_date - The ending date for the academic caldendar event.
desc - Description for the academic calendar event.
prev_desc - Description for the previous event which is to be updated.
get_calendar_details = Get the object of the calendar instance from the database for the previous Description.
"""
if user_check(request):
return HttpResponseRedirect('/academic-procedures/')
calendar = Calendar.objects.all()
context= {
'academic_calendar' :calendar,
'tab_id' :['4','1']
}
if request.method == "POST":
try:
from_date = request.POST.getlist('from_date')
to_date = request.POST.getlist('to_date')
desc = request.POST.getlist('description')[0]
prev_desc = request.POST.getlist('prev_desc')[0]
from_date = from_date[0].split('-')
from_date = [int(i) for i in from_date]
from_date = datetime.datetime(*from_date).date()
to_date = to_date[0].split('-')
to_date = [int(i) for i in to_date]
to_date = datetime.datetime(*to_date).date()
get_calendar_details = Calendar.objects.all().filter(description=prev_desc).first()
get_calendar_details.description = desc
get_calendar_details.from_date = from_date
get_calendar_details.to_date = to_date
get_calendar_details.save()
except Exception as e:
from_date=""
to_date=""
desc=""
return render(request, "ais/ais.html", context)
return render(request, "ais/ais.html", context) | 804f3f0443d192c0c18a501c40808f2406596491 | 3,654,781 |
def get_section(entry: LogEntry) -> str:
"""returns the section of the request (/twiki/bin/edit/Main -> /twiki)"""
section = entry.request.split('/')[:2]
return '/'.join(section) | dee463b5a662846da01fc2ef8d1c72c5b582e7e5 | 3,654,782 |
def reverse_lookup(d, v):
"""
Reverse lookup all corresponding keys of a given value.
Return a lisy containing all the keys.
Raise and exception if the list is empty.
"""
l = []
for k in d:
if d[k] == v:
l.append(k)
if l == []:
raise ValueError
else:
return l | d68f437aec47df964905779f99d58be84515fb72 | 3,654,783 |
def compile_channels_table(*, channels_meta, sources, detectors, wavelengths):
"""Compiles a NIRSChannelsTable given the details about the channels, sources,
detectors, and wavelengths.
"""
table = NIRSChannelsTable()
for channel_id, channel in channels_meta.items():
source_label = sources.label[channel["source_idx"]]
detector_label = detectors.label[channel["detector_idx"]]
source_wavelength = wavelengths[channel["wavelength_idx"]]
table.add_row(
label=f"{source_label}_{detector_label} {source_wavelength:.0f}",
source=channel["source_idx"],
detector=channel["detector_idx"],
source_wavelength=source_wavelength,
)
table.source.table = sources
table.detector.table = detectors
return table | ac3099ef0440962b3fbfeec36f01ae92061b5693 | 3,654,784 |
from pathlib import Path
def cpe2pkg_tool():
"""Unsupported ecosystem CVE fixture."""
bin = Path(__file__).parent.parent / Path('tools/bin/cpe2pkg.jar')
if bin.exists():
return str(bin)
else:
raise RuntimeError('`cpe2pkg.jar` is not available, please run `make build-cpe2pkg once.`') | 7ad5489cd560f2820a5e77c46964514a5a34edc9 | 3,654,785 |
import threading
def spawn_thread(func, *args, **kwds):
"""
Utility function for creating and starting a daemonic thread.
"""
thr = threading.Thread(target=func, args=args, kwargs=kwds)
thr.setDaemon(True)
thr.start()
return thr | afaace7e02870390acb297106ac9d35c9a931a59 | 3,654,786 |
import sys
def decision(question):
"""Asks user for a question returning True/False answed"""
if sys.version_info[0] < 3:
if raw_input("\n%s [Y/n] " % question) in ["", "y", "Y"]:
return True
else:
if input("\n%s [Y/n] " % question) in ["", "y", "Y"]:
return True
return False | 8d31e2f11ad9aa2d0d35f35078ffb46ca0718f09 | 3,654,787 |
import uuid
def get_thread_replies(parent_id):
"""
Get all replies to a thread
If the thread does not exist, return an empty list
:param parent_id: Thread ID
:return: replies to thread
"""
assert type(parent_id) is uuid.UUID, """parent_id is not correct type"""
reply_query = Query()
results = db.search(reply_query.parent == str(parent_id))
return results | 1b167dcc4ab09d50cda9feb478c8f1a4d0399e96 | 3,654,788 |
import torch
def compute_acc(pred, labels):
"""
Compute the accuracy of prediction given the labels.
"""
return (torch.argmax(pred, dim=1) == labels).float().sum() / len(pred) | 1b1ad83b9b4ae06f2bc80209e4e7339a421a39f3 | 3,654,789 |
async def read_update_status() -> str:
"""Read update status."""
return (
await cache.get(Config.update_status_id())
if await cache.exists(Config.update_status_id())
else "ready_to_update"
) | 0e80f7065665dbe1a41e59fe7c65904e58bb6d8f | 3,654,790 |
def _login_and_select_first_active_device(api):
"""Login Erie Connect and select first active device"""
# These do i/o
_LOGGER.debug(f'{DOMAIN}: erie_connect.login()')
api.login()
_LOGGER.debug(f'{DOMAIN}: erie_connect.select_first_active_device()')
api.select_first_active_device()
if (
api.device is None
or api.auth is None
):
raise InvalidData
return api.device.id | 1733db73978b8a2c92400946b6c044ab3bb4ab23 | 3,654,791 |
def PCopy (inFA, err):
"""
Make copy an GPUFArray
returns copy
* inFA = input Python GPUFArray
* err = Python Obit Error/message stack
"""
################################################################
# Checks
if not PIsA(inFA):
print("Actually ",inFA.__class__)
raise TypeError("inFA MUST be a Python Obit GPUFArray")
outFA = GPUFArray("None")
outFA.me = Obit.GPUFArrayCopy (inFA.me, outFA.me, err.me);
if err.isErr:
OErr.printErrMsg(err, "Error copying GPUFArray")
return outFA
# end PCopy | df1047dc143fb5f8d8f4fd88a2b1ebc0904620a2 | 3,654,792 |
def _get_statuses(policy_type_id, policy_instance_id):
"""
shared helper to get statuses for an instance
"""
_instance_is_valid(policy_type_id, policy_instance_id)
prefixes_for_handler = "{0}{1}.{2}.".format(HANDLER_PREFIX, policy_type_id, policy_instance_id)
return list(SDL.find_and_get(A1NS, prefixes_for_handler).values()) | fdddc26d3c2b65834d4b047a5565894b0d965f9d | 3,654,793 |
def phase_lines(graph):
""" Determines the phase lines of a graph.
:param graph: Graph
:return: dictionary with node id : phase in cut.
"""
if has_cycles(graph):
raise ValueError("a cyclic graph will not have phaselines.")
phases = {n: 0 for n in graph.nodes()}
q = graph.nodes(in_degree=0)
while q:
n = q.pop(0)
level = phases[n]
children = graph.nodes(from_node=n)
for c in children:
if phases[c] <= level:
phases[c] = level + 1
q.append(c)
return phases | 9f1aab9e487bd258c88b0f149bcf613341945879 | 3,654,794 |
import os
import webbrowser
import requests
def creds() -> Account:
"""Load or obtain credentials for user."""
credentials = "8da780f3-5ea0-4d97-ab13-9e7976370624"
protocol = MSGraphProtocol(timezone="Europe/Stockholm")
scopes = protocol.get_scopes_for(SCOPES)
token_backend = FileSystemTokenBackend(
token_path=os.path.dirname(__file__), token_filename="o365_token.txt"
)
connection = Connection(
credentials, auth_flow_type="public", token_backend=token_backend
)
account = Account(
credentials,
auth_flow_type="public",
protocol=protocol,
token_backend=token_backend,
)
if (
not os.path.exists("kronoxToGCalendar/logic/o365_token.txt")
and not account.is_authenticated
):
print("AUTH TRIGGERED")
auth_url = connection.get_authorization_url(
requested_scopes=scopes,
redirect_uri="https://kronox-client-api.herokuapp.com/return_token_url",
)
webbrowser.open_new(auth_url[0])
token_req = lambda: requests.get(
"https://kronox-client-api.herokuapp.com/get_token_url"
)
while token_req().text == "None":
continue
token_res_arr = token_req().text.split("&")
print(token_res_arr)
token_code = token_res_arr[0].split("?")[1][5:]
token_state = token_res_arr[1][6:]
token_url = (
"https://login.microsoftonline.com/common/oauth2/nativeclient?code="
+ token_code
+ "&state="
+ token_state
)
connection.request_token(token_url)
print("AUTH PASSED")
account.is_authenticated
return account | d8e1e547d1db443723a58bb583584ede03fe4bd5 | 3,654,795 |
import base64
def b64decode_str(b64string):
"""
Decodes an arbitrary string from a base 64 ASCII string
"""
output = base64.b64decode(b64string).decode("UTF-8")
logger.debug("Decoded %s as %s", b64string, output)
return output | 26475a5380e0a535a9ec12146d0f6e4236d20495 | 3,654,796 |
def BCELossConfig(argument_parser):
"""
Set CLI arguments
:param argument_parser: argument parser
:type argument_parser: ```ArgumentParser```
:returns: argument_parser
:rtype: ```ArgumentParser```
"""
argument_parser.description = """Creates a criterion that measures the Binary Cross Entropy
between the target and the output:
The unreduced (i.e. with :attr:`reduction` set to ``'none'``) loss can be described as:
.. math::
\\ell(x, y) = L = \\{l_1,\\dots,l_N\\}^\\top, \\quad
l_n = - w_n \\left[ y_n \\cdot \\log x_n + (1 - y_n) \\cdot \\log (1 - x_n) \\right],
where :math:`N` is the batch size. If :attr:`reduction` is not ``'none'``
(default ``'mean'``), then
.. math::
\\ell(x, y) = \\begin{cases}
\\operatorname{mean}(L), & \\text{if reduction} = \\text{`mean';}\\\\
\\operatorname{sum}(L), & \\text{if reduction} = \\text{`sum'.}
\\end{cases}
This is used for measuring the error of a reconstruction in for example
an auto-encoder. Note that the targets :math:`y` should be numbers
between 0 and 1.
Notice that if :math:`x_n` is either 0 or 1, one of the log terms would be
mathematically undefined in the above loss equation. PyTorch chooses to set
:math:`\\log (0) = -\\infty`, since :math:`\\lim_{x\\to 0} \\log (x) = -\\infty`.
However, an infinite term in the loss equation is not desirable for several reasons.
For one, if either :math:`y_n = 0` or :math:`(1 - y_n) = 0`, then we would be
multiplying 0 with infinity. Secondly, if we have an infinite loss value, then
we would also have an infinite term in our gradient, since
:math:`\\lim_{x\\to 0} \\frac{d}{dx} \\log (x) = \\infty`.
This would make BCELoss's backward method nonlinear with respect to :math:`x_n`,
and using it for things like linear regression would not be straight-forward.
Our solution is that BCELoss clamps its log function outputs to be greater than
or equal to -100. This way, we can always have a finite loss value and a linear
backward method.
Shape:
- Input: :math:`(N, *)` where :math:`*` means, any number of additional
dimensions
- Target: :math:`(N, *)`, same shape as the input
- Output: scalar. If :attr:`reduction` is ``'none'``, then :math:`(N, *)`, same
shape as input.
Examples::
>>> m = nn.Sigmoid()
>>> loss = nn.BCELoss()
>>> input = torch.randn(3, requires_grad=True)
>>> target = torch.empty(3).random_(2)
>>> output = loss(m(input), target)
>>> output.backward()"""
argument_parser.add_argument(
"--weight",
help="""a manual rescaling weight given to the loss of each batch element. If given, has to be a Tensor of
size `nbatch`.""",
required=True,
)
argument_parser.add_argument(
"--size_average",
type=bool,
help="""Deprecated (see :attr:`reduction`). By default, the losses are averaged over each loss element in
the batch. Note that for some losses, there are multiple elements per sample. If the field
:attr:`size_average` is set to ``False``, the losses are instead summed for each minibatch. Ignored
when :attr:`reduce` is ``False``.""",
default=True,
)
argument_parser.add_argument(
"--reduce",
type=bool,
help="""Deprecated (see :attr:`reduction`). By default, the losses are averaged or summed over observations
for each minibatch depending on :attr:`size_average`. When :attr:`reduce` is ``False``, returns a
loss per batch element instead and ignores :attr:`size_average`.""",
default=True,
)
argument_parser.add_argument(
"--reduction",
help="""Specifies the reduction to apply to the output: ``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no
reduction will be applied, ``'mean'``: the sum of the output will be divided by the number of
elements in the output, ``'sum'``: the output will be summed. Note: :attr:`size_average` and
:attr:`reduce` are in the process of being deprecated, and in the meantime, specifying either of
those two args will override :attr:`reduction`.""",
required=True,
default="mean",
)
argument_parser.add_argument(
"--__constants__", type=str, action="append", required=True, default="reduction"
)
return argument_parser | d0108459bdad9b2f6fad438bff542624b482ef7d | 3,654,797 |
def gen_cities_avg(climate, multi_cities, years):
"""
Compute the average annual temperature over multiple cities.
Args:
climate: instance of Climate
multi_cities: the names of cities we want to average over (list of str)
years: the range of years of the yearly averaged temperature (list of
int)
Returns:
a pylab 1-d array of floats with length = len(years). Each element in
this array corresponds to the average annual temperature over the given
cities for a given year.
"""
# MY_CODE
return np.array([np.mean([np.mean(climate.get_yearly_temp(city, year))
for city in multi_cities]) for year in years]) | 9609add6a1514d09b42e2494e56c84522d3cb364 | 3,654,798 |
def tangentVectorsOnSphere( points, northPole = np.array([0.0,0.0,1.0]) ):
"""
Acquire a basis for the tangent space at given points on the surface of the unit sphere.
:param points: N x 3 array of N points at which to acquire basis of tangent space.
:param northPole: 3 array of point corresponding to the north pole.
:return A N x 3 x 3 array. Each point has three orthogonal tangent vectors of unit length.
They are constructed such as the first vector is pointing towards the 'northPole'.
The second vector is orthogonal to both the first vector and the vector from the origin to the point of interest.
The third vector is equal to the vector between the origin and the point of interest.
The last dimension represent the elements of the vectors. The next to last dimension indices the vectors
"""
vectors = np.zeros( (points.shape[0], 3,3) )
# Get third vector
vectors[:, 2, :] = points / np.linalg.norm(points, axis= 1).reshape((-1,1))
# Get second vector
vectors[:, 1, :] = np.cross( northPole.reshape( (1,3) ), vectors[:,2,:] )
# Get first vector
vectors[:, 0, :] = np.cross( vectors[:,2,:], vectors[:,1,:] )
# Normalize vectors
lengths = np.linalg.norm( vectors, axis=2 ).reshape((-1, 3))
inds = np.any( lengths == 0.0, axis=1 )
vectors[inds, :, : ] = np.nan
vectors[~inds, :, :] = vectors[~inds, :, :] / lengths[~inds, :].reshape( (-1,3,1) )
return vectors | bfa23a393ac4d1b38c6c2b19207520db1bd83e03 | 3,654,799 |
Subsets and Splits