max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
sketchgraphs_models/nn/distributed.py | applied-exploration/contraint-based-parametric-modelling | 204 | 12659417 | """Utility modules for distributed and parallel training. """
import torch
class SingleDeviceDistributedParallel(torch.nn.parallel.distributed.DistributedDataParallel):
"""This module implements a module similar to `DistributedDataParallel`, but it accepts
inputs of any shape, and only supports a single device per instance.
"""
def __init__(self, module, device_id, find_unused_parameters=False):
super(SingleDeviceDistributedParallel, self).__init__(
module, [device_id], find_unused_parameters=find_unused_parameters)
def forward(self, *inputs, **kwargs):
if self.require_forward_param_sync:
self._sync_params()
output = self.module(*inputs, **kwargs)
if torch.is_grad_enabled() and self.require_backward_grad_sync:
self.require_forward_param_sync = True
if self.find_unused_parameters:
self.reducer.prepare_for_backward(list(torch.nn.parallel.distributed._find_tensors(output)))
else:
self.reducer.prepare_for_backward([])
return output
def state_dict(self, destination=None, prefix='', keep_vars=False):
return self.module.state_dict(destination, prefix, keep_vars)
def load_state_dict(self, state_dict, strict=True):
return self.module.load_state_dict(state_dict, strict)
|
Python/Numba/Ufunc/julia_ufunc.py | Gjacquenot/training-material | 115 | 12659435 | '''Module containing function for computing Julia sets'''
from numba import guvectorize, void, complex128, int32, float64
@guvectorize([void(complex128[:], float64[:], int32[:], int32[:])],
'(n),(),()->(n)')
def julia_set(domain, max_norm, max_iters, iterations):
for i, z in enumerate(domain):
iterations[i] = 0
while (iterations[i] <= max_iters[0] and
z.real*z.real + z.imag*z.imag <= max_norm[0]*max_norm[0]):
z = z**2 - 0.622772 + 0.42193j
iterations[i] += 1
|
apps/forms-flow-ai/forms-flow-api/migrations/versions/0b8739ab2097_cleanup_of_submission_id.py | saravanpa-aot/SBC_DivApps | 132 | 12659467 | """cleanup of submission id
Revision ID: 0b8739ab2097
Revises: <KEY>
Create Date: 2020-09-03 16:19:38.703377
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '0b8739ab2097'
down_revision = '<KEY>'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('application', 'form_submission_id')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('application', sa.Column('form_submission_id', sa.VARCHAR(length=100), autoincrement=False, nullable=False))
# ### end Alembic commands ###
|
deprecated/scripts/data/diff_vec_clustering_attempt/diff_explore.py | grahamwhiteuk/neutralizing-bias | 169 | 12659474 | <reponame>grahamwhiteuk/neutralizing-bias
import sys
import numpy as np
from sklearn.cluster import DBSCAN
import time
import matplotlib.pyplot as plt
def read_vecs(path):
out = []
for l in open(path):
out.append([float(x) for x in l.strip().split()])
return np.array(out)
difs = read_vecs(sys.argv[1])
mu = np.mean(difs)
sd = np.std(difs)
print(mu)
print(sd)
eps = sd * 0.01
min_samples = 4 #? total guess...do better?
print('dbscanning...')
start = time.time()
clustering = DBSCAN(eps=eps, min_samples=min_samples, n_jobs=4).fit(difs)
print('done! took ', time.time() - start)
labels = list(clustering.labels_)
print(labels)
plt.hist(labels)
plt.title('labels')
plt.show()
|
lib/indicators.py | CanadaMontreal/trading-with-python | 318 | 12659496 | '''
Created on Jul 3, 2014
author: <NAME>
License: BSD
Description: Module containing some (technical) indicators
'''
import pandas as pd
def rsi(price, n=14):
''' rsi indicator '''
gain = price.diff().fillna(0) # fifference between day n and n-1, replace nan (first value) with 0
def rsiCalc(p):
# subfunction for calculating rsi for one lookback period
avgGain = p[p>0].sum()/n
avgLoss = -p[p<0].sum()/n
rs = avgGain/avgLoss
return 100 - 100/(1+rs)
# run for all periods with rolling_apply
return pd.rolling_apply(gain,n,rsiCalc)
|
unittest/scripts/auto/py_devapi/validation/mysqlx_type_norecord.py | mueller/mysql-shell | 119 | 12659505 | <gh_stars>100-1000
#@<OUT> Help on Type
NAME
Type - Data type constants.
SYNTAX
mysqlx.Type
DESCRIPTION
The data type constants assigned to a Column object retrieved through
RowResult.get_columns().
PROPERTIES
BIGINT
A large integer.
BIT
A bit-value type.
BYTES
A binary string.
DATE
A date.
DATETIME
A date and time combination.
DECIMAL
A packed "exact" fixed-point number.
ENUM
An enumeration.
FLOAT
A floating-point number.
GEOMETRY
A geometry type.
INT
A normal-size integer.
JSON
A JSON-format string.
MEDIUMINT
A medium-sized integer.
SET
A set.
SMALLINT
A small integer.
STRING
A character string.
TIME
A time.
TINYINT
A very small integer.
FUNCTIONS
help([member])
Provides help about this class and it's members
#@<OUT> Help on Type.BIGINT
NAME
BIGINT - A large integer.
SYNTAX
mysqlx.Type.BIGINT
#@<OUT> Help on Type.BIT
NAME
BIT - A bit-value type.
SYNTAX
mysqlx.Type.BIT
#@<OUT> Help on Type.BYTES
NAME
BYTES - A binary string.
SYNTAX
mysqlx.Type.BYTES
#@<OUT> Help on Type.DATE
NAME
DATE - A date.
SYNTAX
mysqlx.Type.DATE
#@<OUT> Help on Type.DATETIME
NAME
DATETIME - A date and time combination.
SYNTAX
mysqlx.Type.DATETIME
#@<OUT> Help on Type.DECIMAL
NAME
DECIMAL - A packed "exact" fixed-point number.
SYNTAX
mysqlx.Type.DECIMAL
#@<OUT> Help on Type.ENUM
NAME
ENUM - An enumeration.
SYNTAX
mysqlx.Type.ENUM
#@<OUT> Help on Type.FLOAT
NAME
FLOAT - A floating-point number.
SYNTAX
mysqlx.Type.FLOAT
#@<OUT> Help on Type.GEOMETRY
NAME
GEOMETRY - A geometry type.
SYNTAX
mysqlx.Type.GEOMETRY
#@<OUT> Help on Type.INT
NAME
INT - A normal-size integer.
SYNTAX
mysqlx.Type.INT
#@<OUT> Help on Type.JSON
NAME
JSON - A JSON-format string.
SYNTAX
mysqlx.Type.JSON
#@<OUT> Help on Type.MEDIUMINT
NAME
MEDIUMINT - A medium-sized integer.
SYNTAX
mysqlx.Type.MEDIUMINT
#@<OUT> Help on Type.SET
NAME
SET - A set.
SYNTAX
mysqlx.Type.SET
#@<OUT> Help on Type.SMALLINT
NAME
SMALLINT - A small integer.
SYNTAX
mysqlx.Type.SMALLINT
#@<OUT> Help on Type.STRING
NAME
STRING - A character string.
SYNTAX
mysqlx.Type.STRING
#@<OUT> Help on Type.TIME
NAME
TIME - A time.
SYNTAX
mysqlx.Type.TIME
#@<OUT> Help on Type.TINYINT
NAME
TINYINT - A very small integer.
SYNTAX
mysqlx.Type.TINYINT
#@<OUT> Help on Type.help
NAME
help - Provides help about this class and it's members
SYNTAX
mysqlx.Type.help([member])
WHERE
member: If specified, provides detailed information on the given member.
|
pthflops/utils.py | thiagolermen/pytorch-estimate-flops | 194 | 12659517 | import functools
import inspect
import warnings
from typing import Iterable
import torch
def print_table(rows, header=['Operation', 'OPS']):
r"""Simple helper function to print a list of lists as a table
:param rows: a :class:`list` of :class:`list` containing the data to be printed. Each entry in the list
represents an individual row
:param input: (optional) a :class:`list` containing the header of the table
"""
if len(rows) == 0:
return
col_max = [max([len(str(val[i])) for val in rows]) + 3 for i in range(len(rows[0]))]
row_format = ''.join(["{:<" + str(length) + "}" for length in col_max])
if len(header) > 0:
print(row_format.format(*header))
print(row_format.format(*['-' * (val - 2) for val in col_max]))
for row in rows:
print(row_format.format(*row))
print(row_format.format(*['-' * (val - 3) for val in col_max]))
def same_device(model, input):
# Remove dataparallel wrapper if present
if isinstance(model, torch.nn.DataParallel):
model = model.module
# Make sure that the input is on the same device as the model
if len(list(model.parameters())):
input_device = input.device if not isinstance(input, Iterable) else input[0].device
if next(model.parameters()).device != input_device:
if isinstance(input, Iterable):
for inp in input:
inp.to(next(model.parameters()).device)
else:
input.to(next(model.parameters()).device)
return model, input
# Workaround for scopename in pytorch 1.4 and newer
# see: https://github.com/pytorch/pytorch/issues/33463
class scope_name_workaround(object):
def __init__(self):
self.backup = None
def __enter__(self):
def _tracing_name(self_, tracing_state):
if not tracing_state._traced_module_stack:
return None
module = tracing_state._traced_module_stack[-1]
for name, child in module.named_children():
if child is self_:
return name
return None
def _slow_forward(self_, *input, **kwargs):
tracing_state = torch._C._get_tracing_state()
if not tracing_state or isinstance(self_.forward, torch._C.ScriptMethod):
return self_.forward(*input, **kwargs)
if not hasattr(tracing_state, '_traced_module_stack'):
tracing_state._traced_module_stack = []
name = _tracing_name(self_, tracing_state)
if name:
tracing_state.push_scope('%s[%s]' % (self_._get_name(), name))
else:
tracing_state.push_scope(self_._get_name())
tracing_state._traced_module_stack.append(self_)
try:
result = self_.forward(*input, **kwargs)
finally:
tracing_state.pop_scope()
tracing_state._traced_module_stack.pop()
return result
self.backup = torch.nn.Module._slow_forward
setattr(torch.nn.Module, '_slow_forward', _slow_forward)
def __exit__(self, type, value, tb):
setattr(torch.nn.Module, '_slow_forward', self.backup)
# Source: https://stackoverflow.com/questions/2536307/decorators-in-the-python-standard-lib-deprecated-specifically
string_types = (type(b''), type(u''))
def deprecated(reason):
"""
This is a decorator which can be used to mark functions
as deprecated. It will result in a warning being emitted
when the function is used.
"""
if isinstance(reason, string_types):
# The @deprecated is used with a 'reason'.
#
# .. code-block:: python
#
# @deprecated("please, use another function")
# def old_function(x, y):
# pass
def decorator(func1):
if inspect.isclass(func1):
fmt1 = "Call to deprecated class {name} ({reason})."
else:
fmt1 = "Call to deprecated function {name} ({reason})."
@functools.wraps(func1)
def new_func1(*args, **kwargs):
warnings.simplefilter('always', DeprecationWarning)
warnings.warn(
fmt1.format(name=func1.__name__, reason=reason),
category=DeprecationWarning,
stacklevel=2
)
warnings.simplefilter('default', DeprecationWarning)
return func1(*args, **kwargs)
return new_func1
return decorator
elif inspect.isclass(reason) or inspect.isfunction(reason):
# The @deprecated is used without any 'reason'.
#
# .. code-block:: python
#
# @deprecated
# def old_function(x, y):
# pass
func2 = reason
if inspect.isclass(func2):
fmt2 = "Call to deprecated class {name}."
else:
fmt2 = "Call to deprecated function {name}."
@functools.wraps(func2)
def new_func2(*args, **kwargs):
warnings.simplefilter('always', DeprecationWarning)
warnings.warn(
fmt2.format(name=func2.__name__),
category=DeprecationWarning,
stacklevel=2
)
warnings.simplefilter('default', DeprecationWarning)
return func2(*args, **kwargs)
return new_func2
else:
raise TypeError(repr(type(reason)))
|
data/wsj/recover_whitespace.py | thomaslu2000/Incremental-Parsing-Representations | 723 | 12659546 | import glob
import os
from nltk.corpus.reader.bracket_parse import BracketParseCorpusReader
import nltk
import tokenizations # pip install pytokenizations==0.7.2
TOKEN_MAPPING = {
"-LRB-": "(",
"-RRB-": ")",
"-LCB-": "{",
"-RCB-": "}",
"-LSB-": "[",
"-RSB-": "]",
"``": '"',
"''": '"',
"`": "'",
'«': '"',
'»': '"',
''': "'",
''': "'",
'"': '"',
'"': '"',
'„': '"',
'‹': "'",
'›': "'",
"\u2013": "--", # en dash
"\u2014": "--", # em dash
}
train_splits = ["0" + str(i) for i in range(2, 10)] + [str(i) for i in range(10, 22)]
test_splits = ["23"]
dev_22_splits = ["22"]
dev_24_splits = ["24"]
def glob_raw_files(treebank_root, splits):
# Get all applicable raw files
results = [fname for split in splits for fname in sorted(
glob.glob(os.path.join(treebank_root, 'raw', 'wsj', split, "wsj_????")))]
# Exclude raw files with no corresponding parse
mrg_results = [fname.replace('parsed/mrg/wsj', 'raw/wsj').replace('.mrg', '')
for split in splits for fname in sorted(
glob.glob(os.path.join(treebank_root, 'parsed', 'mrg', 'wsj', split, "wsj_????.mrg")))]
return [fname for fname in results if fname in mrg_results]
def glob_tree_files(target_root, splits):
return [fname for split in splits for fname in sorted(
glob.glob(os.path.join(target_root, split, "wsj_????.tree"))
+ glob.glob(os.path.join(target_root, 'parsed', 'mrg', 'wsj', split, "wsj_????.mrg")))]
def standardize_form(word):
word = word.replace('\\/', '/').replace('\\*', '*')
# Mid-token punctuation occurs in biomedical text
word = word.replace('-LSB-', '[').replace('-RSB-', ']')
word = word.replace('-LRB-', '(').replace('-RRB-', ')')
word = word.replace('-LCB-', '{').replace('-RCB-', '}')
word = TOKEN_MAPPING.get(word, word)
return word
def get_raw_text_for_trees(treebank_root, splits, tree_files):
lines = []
for fname in glob_raw_files(treebank_root, splits):
with open(fname, 'r', encoding="windows-1252") as f:
for line in f:
if line.strip() and not line.startswith('.START'):
# Delete invalid gcharacters caused by encoding issues
line = line.replace("Õ", "").replace("å", "")
lines.append(line)
reader = BracketParseCorpusReader('.', tree_files)
target_sents = reader.sents()
line_iter = iter(lines)
line = ""
pairs = []
for target_sent in target_sents:
if not line.strip():
line = next(line_iter)
# Handle PTB-style escaping mismatches
target_sent = [standardize_form(word) for word in target_sent]
# Handle transpositions: sometimes the raw text transposes punctuation,
# while the parsed version cleans up this transposition
if 'U.S..' in ''.join(target_sent):
target_sent = [x.replace('U.S.', 'U.S') for x in target_sent]
if 'Co.,' in ''.join(target_sent) and 'Co,.' in line:
target_sent = [x.replace('Co.', 'Co') for x in target_sent]
if "But that 's" in ' '.join(target_sent) and "But's that" in line:
target_sent = [x.replace("that", "tha") for x in target_sent]
target_sent = [x.replace("'s", "t") for x in target_sent]
if ('-- Freshman football player' in line
or '-- Sophomore football player' in line
or '-- Junior football player' in line
or '-- Senior football player' in line
or '-- Graduate-student football player' in line
or '-- Football player' in line
or '-- Freshman basketball player' in line
or '-- Sophomore basketball player' in line
or '-- Junior basketball player' in line
or '-- Senior basketball player' in line
or '-- Basketball player' in line) and (
'" .' in ' '.join(target_sent)
and target_sent[-1] == '.'):
target_sent = target_sent[:-1]
# Attempt to align raw and parsed text
r2p, p2r = tokenizations.get_alignments(line.replace("`", "'"), target_sent)
# Handle skips: some lines in the raw data are not parsed
while not all(p2r):
go_next = False
if line.startswith('(See') and '-- WSJ' in line:
go_next = True
elif line == 'San Diego ':
go_next = True
elif line == '" ':
go_next = True
if go_next:
line = next(line_iter)
r2p, p2r = tokenizations.get_alignments(line.replace("`", "'"), target_sent)
else:
break
# Handle line breaks in raw format that come in the middle of the sentence
# (such as mid-sentence line breaks in poems)
for _ in range(12): # Loop limit is to aid in debugging
if not all(p2r):
line = line + next(line_iter)
r2p, p2r = tokenizations.get_alignments(line.replace("`", "'"), target_sent)
assert all(p2r)
end = max([max(x) for x in p2r]) + 1
# Trim excess raw text at the start
line_to_save = line[:end]
r2p, p2r = tokenizations.get_alignments(line_to_save.replace("`", "'"), target_sent)
while True:
_, alt_p2r = tokenizations.get_alignments(
'\n'.join(line_to_save.replace("`", "'").splitlines()[1:]), target_sent)
if sum([len(x) for x in p2r]) == sum([len(x) for x in alt_p2r]):
line_to_save = '\n'.join(line_to_save.splitlines()[1:])
else:
break
pairs.append((line_to_save, target_sent))
line = line[end:]
assert len(pairs) == len(target_sents)
return [line for (line, target_sent) in pairs]
def get_words_and_whitespace(treebank_root, splits, tree_files):
reader = BracketParseCorpusReader('.', tree_files)
target_sents = reader.sents()
raw_sents = get_raw_text_for_trees(treebank_root, splits, tree_files)
pairs = []
for line, target_sent in zip(raw_sents, target_sents):
# Fix some errors in the raw text that are also fixed in the parsed trees
if "But's that just" in line:
line = line.replace("But's that just", "But that's just")
if 'Co,.' in line:
line = line.replace('Co,.', 'Co.,')
if 'U.S..' in ''.join(target_sent):
# Address cases where underlying "U.S." got tokenized as "U.S." ".""
# This is expected in the sentence-final position, but it seems to
# occur in other places, too.
line = line.replace('U.S.', 'U.S..').replace(
'U.S.. market', 'U.S. market').replace(
'U.S.. agenda', 'U.S. agenda').replace(
'U.S.. even', 'U.S. even').replace(
'U.S.. counterpart', 'U.S. counterpart').replace(
'U.S.. unit', 'U.S. unit').replace(
'U.S..,', 'U.S.,')
words = target_sent[:]
target_sent = [standardize_form(word).replace("``", '"') for word in target_sent]
r2p, p2r = tokenizations.get_alignments(line.replace("`", "'"), target_sent)
last_char_for_parsed = [max(x) if x else None for x in p2r]
have_space_after = [None] * len(words)
for i, word in enumerate(target_sent):
if last_char_for_parsed[i] is None:
continue
char_after_word = line[last_char_for_parsed[i]+1:last_char_for_parsed[i]+2]
have_space_after[i] = (char_after_word != char_after_word.lstrip())
# Fix the few cases where the word form in the parsed data is incorrect
if word == "'T-" and target_sent[i+1] == 'is':
target_sent[i] = "'T"
if word == "16" and target_sent[i+1:i+5] == ['64', '-', 'inch', 'opening']:
# This error occurs in the test set, and moreover would affect
# tokenization by introducing an extra '/', so we don't fix it.
# target_sent[i] = "16/"
have_space_after[i] = True
if word == "Gaming"and target_sent[i-1:i+2] == ['and', 'Gaming', 'company']:
target_sent[i] = "gaming"
pairs.append((target_sent, have_space_after))
# For each token in the treebank, we have now queried the raw string to
# determine if the token should have whitespace following it. The lines
# below are a sanity check that the reconstructed text matches the raw
# version as closely as possible.
to_delete = set()
for indices in p2r:
if not indices:
continue
to_delete |= set(range(min(indices), max(indices)+1)) - set(indices)
raw = list(line)
for i in sorted(to_delete, reverse=True):
del raw[i]
raw = "".join(raw)
raw = " ".join(x.strip() for x in raw.split())
guess = "".join(
[w + (" " if sp else "") for (w, sp) in zip(target_sent, have_space_after)])
if "filings policy-making" in guess:
# The parsed version of this sentence drops an entire span from the raw
# text. Maybe we shouldn't be training on this bad example, but for now
# we'll just skip validating it.
continue
# Fix some issues with the raw text that are corrected in the parsed version
raw = raw.replace("`", "'")
raw = raw.replace("and <Tourism", "and Tourism")
raw = raw.replace("staf reporter", "staff reporter")
if " S$" in raw and " S$" not in guess:
raw = raw.replace(" S$", " US$")
raw = raw.replace("16/ 64-inch opening", "16 64-inch opening")
if raw != guess and raw.replace('."', '".') == guess:
raw = raw.replace('."', '".')
# assert raw == guess
if raw != guess:
print(raw)
print(guess)
print()
return pairs
def get_id_list(target_root, splits):
res = []
for fname in glob_tree_files(target_root, splits):
reader = BracketParseCorpusReader('.', [fname])
num_sents = len(reader.parsed_sents())
doc_id = os.path.splitext(os.path.split(fname)[-1])[0]
for sent_id in range(num_sents):
sent_id = "{}_{:03}".format(doc_id, sent_id)
res.append((doc_id, sent_id))
return res
def write_to_file(treebank3_root, target_root, splits, tree_file, outfile):
words_and_whitespace = get_words_and_whitespace(treebank3_root, splits, [tree_file])
doc_and_sent_ids = get_id_list(target_root, splits)
# print(len(words_and_whitespace), len(doc_and_sent_ids))
assert len(words_and_whitespace) == len(doc_and_sent_ids)
with open(outfile, 'w') as f:
old_doc_id = None
for (doc_id, sent_id), (words, have_space_after) in zip(
doc_and_sent_ids, words_and_whitespace):
if doc_id != old_doc_id:
old_doc_id = doc_id
f.write("# newdoc_id = {}\n".format(doc_id))
f.write("# sent_id = {}\n".format(sent_id))
text = "".join(
[w + (" " if sp else "") for w, sp in zip(words, have_space_after)])
f.write("# text = {}\n".format(text))
for word_id, (w, sp) in enumerate(zip(words, have_space_after), start=1):
if sp:
misc = "_"
else:
misc = "SpaceAfter=No"
f.write("{}\t{}\t{}\n".format(word_id, w, misc))
f.write("\n")
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--treebank3_root", required=True)
parser.add_argument("--revised_root")
args = parser.parse_args()
write_to_file(args.treebank3_root, args.treebank3_root, train_splits, 'train_02-21.LDC99T42', 'train_02-21.LDC99T42.text')
write_to_file(args.treebank3_root, args.treebank3_root, test_splits, 'test_23.LDC99T42', 'test_23.LDC99T42.text')
write_to_file(args.treebank3_root, args.treebank3_root, dev_22_splits, 'dev_22.LDC99T42', 'dev_22.LDC99T42.text')
if args.revised_root is not None:
write_to_file(args.treebank3_root, args.revised_root, train_splits, 'train_02-21.LDC2015T13', 'train_02-21.LDC2015T13.text')
write_to_file(args.treebank3_root, args.revised_root, test_splits, 'test_23.LDC2015T13', 'test_23.LDC2015T13.text')
write_to_file(args.treebank3_root, args.revised_root, dev_22_splits, 'dev_22.LDC2015T13', 'dev_22.LDC2015T13.text')
|
rdkit/ML/Cluster/murtagh_test.py | kazuyaujihara/rdkit | 1,609 | 12659555 |
import numpy
from rdkit.ML.Cluster import Murtagh
print('1')
d = numpy.array([[10.0, 5.0], [20.0, 20.0], [30.0, 10.0], [30.0, 15.0], [5.0, 10.0]], numpy.float)
print('2')
# clusters = Murtagh.ClusterData(d,len(d),Murtagh.WARDS)
# for i in range(len(clusters)):
# clusters[i].Print()
# print('3')
dists = []
for i in range(len(d)):
for j in range(i):
dist = sum((d[i] - d[j])**2)
dists.append(dist)
dists = numpy.array(dists)
print('Wards:')
clusters = Murtagh.ClusterData(dists, len(d), Murtagh.WARDS, isDistData=1)
clusters[0].Print()
print('SLINK:')
clusters = Murtagh.ClusterData(dists, len(d), Murtagh.SLINK, isDistData=1)
clusters[0].Print()
print('CLINK:')
clusters = Murtagh.ClusterData(dists, len(d), Murtagh.CLINK, isDistData=1)
clusters[0].Print()
print('UPGMA:')
clusters = Murtagh.ClusterData(dists, len(d), Murtagh.UPGMA, isDistData=1)
clusters[0].Print()
|
unittest/scripts/auto/py_mixed_versions/validation/cluster_multiple_server_versions.py | mueller/mysql-shell | 119 | 12659567 | #@<OUT> get cluster status
{
"clusterName": "testCluster",
"defaultReplicaSet": {
"name": "default",
"topology": [
{
"address": "<<<hostname>>>:<<<__mysql_sandbox_port2>>>",
"label": "<<<hostname>>>:<<<__mysql_sandbox_port2>>>",
"role": "HA"
},
{
"address": "<<<hostname>>>:<<<__mysql_sandbox_port1>>>",
"label": "<<<hostname>>>:<<<__mysql_sandbox_port1>>>",
"role": "HA"
}
],
"topologyMode": "Single-Primary"
}
}
|
PR_BCI_team/Team_StarLab/DKHan/examples/giga_cnn/main_triplet.py | PatternRecognition/OpenBMI | 217 | 12659589 | <reponame>PatternRecognition/OpenBMI
from __future__ import print_function
import argparse
import torch
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
from torch.utils.data import Dataset, DataLoader
from torch.utils.data.sampler import SubsetRandomSampler
import numpy as np
import pickle
from models.model_resnet import *
from models.model_openbmi import *
from models.model_3dcnn import *
import matplotlib.pyplot as plt
from scipy import signal
cuda = torch.cuda.is_available()
import matplotlib
import matplotlib.pyplot as plt
giga_classes = ['right', 'left']
colors = ['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728',
'#9467bd', '#8c564b', '#e377c2', '#7f7f7f',
'#bcbd22', '#17becf']
def plot_embeddings(embeddings, targets, xlim=None, ylim=None):
plt.figure(figsize=(10,10))
for i in range(10):
inds = np.where(targets==i)[0]
plt.scatter(embeddings[inds,0], embeddings[inds,1], alpha=0.5, color=colors[i])
if xlim:
plt.xlim(xlim[0], xlim[1])
if ylim:
plt.ylim(ylim[0], ylim[1])
plt.legend(giga_classes)
def extract_embeddings(dataloader, model, num_ftrs=2):
with torch.no_grad():
model.eval()
# num_ftrs = model.embedding_net.fc.out_features
embeddings = np.zeros((len(dataloader.dataset), num_ftrs))
labels = np.zeros(len(dataloader.dataset))
k = 0
for images, target in dataloader:
if cuda:
images = images.cuda()
embeddings[k:k+len(images)] = model.get_embedding(images).data.cpu().numpy()
labels[k:k+len(images)] = target.numpy()
k += len(images)
return embeddings, labels
def train(args, model, device, train_loader, optimizer, epoch):
model.train()
for batch_idx, (data, target) in enumerate(train_loader):
data, target = data.to(device), target.to(device)
#data = data.view(-1,1,62,301)
target = target.view(-1)
#data = nn.functional.interpolate(data,size=[300,300])
optimizer.zero_grad()
output = model(data)
#output = nn.CrossEntropyLoss(output)
# output = F.log_softmax(output, dim=1)
loss = F.nll_loss(output, target)
loss.backward()
optimizer.step()
if batch_idx % args.log_interval == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.item()))
def eval(args, model, device, test_loader):
model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in test_loader:
data, target = data.to(device), target.to(device)
#data = data.view(-1,1,62,data.shape[4])
output = model(data)
#output = nn.CrossEntropyLoss(output)
#output = F.log_softmax(output, dim=1)
test_loss += F.nll_loss(output, target, reduction='sum').item() # sum up batch loss
pred = output.argmax(dim=1, keepdim=True) # get the index of the max log-probability
correct += pred.eq(target.view_as(pred)).sum().item()
test_loss /= len(test_loader.dataset)
#print('{:.0f}'.format(100. * correct / len(test_loader.dataset)))
print('Test set: Average loss: {:.4f}, Accuracy: {}/{} ({:.4f}%)'.format(
test_loss, correct, len(test_loader.dataset),
100. * correct / len(test_loader.dataset)))
return test_loss, correct
def windows(data, size, step):
start = 0
while ((start+size) < data.shape[0]):
yield int(start), int(start + size)
start += step
def segment_signal_without_transition(data, window_size, step):
segments = []
for (start, end) in windows(data, window_size, step):
if(len(data[start:end]) == window_size):
segments = segments + [data[start:end]]
return np.array(segments)
def segment_dataset(X, window_size, step):
win_x = []
for i in range(X.shape[0]):
win_x = win_x + [segment_signal_without_transition(X[i], window_size, step)]
win_x = np.array(win_x)
return win_x
#%%
class TripletGiga(Dataset):
def __init__(self,x,y, valtype, transform=None,istrain = True, sess=1,subj=None):
self.transform = transform
self.istrain = istrain
x_data = x.copy()
y_data = y.copy()
x_data = x_data.reshape(108,-1,1,62,500)
y_data = y_data.reshape(108,-1)
if valtype =='sess':
if istrain:
x_data = x_data[np.s_[0:54],:,:,:,:]
y_data = y_data[np.s_[0:54],:]
else:
x_data = x_data[np.s_[0+54:54+54],100:200,:,:,:] #tests sess2 online
y_data = y_data[np.s_[0+54:54+54],100:200]
elif valtype == 'loso':
if subj == None:
raise AssertionError()
if istrain:
x_data = np.delete(x_data,np.s_[subj,subj+54],0) #leave one subj
y_data = np.delete(y_data,np.s_[subj,subj+54],0)
else:
x_data = x_data[np.s_[subj+54], 100:200, :, :, :] # tests sess2 online
y_data = y_data[np.s_[subj+54], 100:200]
elif valtype == 'subj':
if subj == None:
raise AssertionError()
if istrain:
x_data = x_data[subj, :, :, :, :]
y_data = y_data[subj, :]
else:
x_data = x_data[subj, 100:200, :, :, :] # tests sess2 online
y_data = y_data[subj, 100:200]
else:
raise AssertionError()
self.x_data = x_data.reshape(-1, 1, 62, 500)
self.y_data = y_data.reshape(-1)
self.len = self.y_data.shape[0]
self.label_to_indices = {label: np.where(self.y_data == label)[0]
for label in self.y_data}
random_state = np.random.RandomState(29)
if not istrain:
self.labels_set = set(self.y_data)
self.label_to_indices = {label: np.where(self.y_data == label)[0]
for label in self.labels_set}
triplets = [[i,
random_state.choice(self.label_to_indices[self.y_data[i].item()]),
random_state.choice(self.label_to_indices[
np.random.choice(
list(self.labels_set - set([self.y_data[i].item()]))
)
])
]
for i in range(len(self.x_data))]
self.test_triplets = triplets
def __getitem__(self, index):
if self.istrain:
img1 = self.x_data[index,:,:,100:500]
y1 = self.y_data[index]
positive_index = index
while positive_index == index:
positive_index = np.random.choice(self.label_to_indices[y1])
if y1 == 1:
negative_index = np.random.choice(self.label_to_indices[0])
else:
negative_index = np.random.choice(self.label_to_indices[1])
img2 = self.x_data[positive_index,:,:,100:500]
img3 = self.x_data[negative_index, :, :, 100:500]
y2 = self.y_data[positive_index]
y3 = self.y_data[negative_index]
else:
img1 = self.x_data[self.test_triplets[index][0],:,:,100:500]
img2 = self.x_data[self.test_triplets[index][1],:,:,100:500]
img3 = self.x_data[self.test_triplets[index][2],:,:,100:500]
y1 = self.y_data[self.test_triplets[index][0]]
y2 = self.y_data[self.test_triplets[index][1]]
y3 = self.y_data[self.test_triplets[index][2]]
img1 = torch.from_numpy(img1).type(torch.FloatTensor)
img2 = torch.from_numpy(img2).type(torch.FloatTensor)
img3 = torch.from_numpy(img3).type(torch.FloatTensor)
if self.transform is not None:
img1 = self.transform(img1)
img2 = self.transform(img2)
img3 = self.transform(img3)
return (img1, img2, img3), []
def __len__(self):
return self.len
#%%
class GigaDataset(Dataset):
def __init__(self,x,y, valtype, transform=None,istrain = True, sess=1,subj=None):
self.transform = transform
self.istrain = istrain
x_data = x.copy()
y_data = y.copy()
x_data = x_data.reshape(108,-1,1,62,500)
y_data = y_data.reshape(108,-1)
if valtype =='sess':
if istrain:
x_data = x_data[np.s_[0:54],:,:,:,:]
y_data = y_data[np.s_[0:54],:]
else:
x_data = x_data[np.s_[0+54:54+54],100:200,:,:,:] #tests sess2 online
y_data = y_data[np.s_[0+54:54+54],100:200]
elif valtype == 'loso':
if subj == None:
raise AssertionError()
if istrain:
x_data = np.delete(x_data,np.s_[subj,subj+54],0) #leave one subj
y_data = np.delete(y_data,np.s_[subj,subj+54],0)
else:
x_data = x_data[np.s_[subj+54], 100:200, :, :, :] # tests sess2 online
y_data = y_data[np.s_[subj+54], 100:200]
elif valtype == 'subj':
if subj == None:
raise AssertionError()
if istrain:
x_data = x_data[subj, :, :, :, :]
y_data = y_data[subj, :]
else:
x_data = x_data[subj, 100:200, :, :, :] # tests sess2 online
y_data = y_data[subj, 100:200]
else:
raise AssertionError()
x_data = x_data.reshape(-1, 1, 62, 500)
y_data = y_data.reshape(-1)
self.len = y_data.shape[0]
x_data = torch.from_numpy(x_data)
self.x_data = x_data.type(torch.FloatTensor)
y_data = torch.from_numpy(y_data)
self.y_data = y_data.long()
def __getitem__(self, index):
x = self.x_data[index,:,:,100:500]
y = self.y_data[index]
# fs =100
# N = 400
# import librosa
# import librosa.display
#
# xtemp = x.clone().view(-1)
# f, t, Zxx = signal.spectrogram(xtemp,fs=fs,mode='psd')
#
# D = np.abs(librosa.stft(xtemp.numpy(),n_fft=30,center=False))
#
# librosa.display.specshow(librosa.amplitude_to_db(D,ref=np.max),y_axis='log', x_axis='time')
# f, t, Zxx = signal.spectrogram(x[0,:,:],fs=fs,nperseg=60,noverlap=49,mode='psd')
#
# plt.pcolormesh(t, f,Zxx)
# plt.title('STFT Magnitude')
# plt.ylabel('Frequency [Hz]')
# plt.xlabel('Time [sec]')
# plt.show()
# x = torch.from_numpy(Zxx)
# Normalize your data here
if self.transform:
x = self.transform(x)
return x, y
def __len__(self):
return self.len
def load_smt(path='C:/Users/dk/PycharmProjects/data/giga'):
with open(path+'/epoch_labels.pkl', 'rb') as f:
y_data = pickle.load(f)
with open(path+'/smt1_scale.pkl', 'rb') as f:
x_data1 = pickle.load(f)
with open(path+'/smt2_scale.pkl', 'rb') as f:
x_data2 = pickle.load(f)
x_data = np.concatenate([x_data1, x_data2])
x_data = np.expand_dims(x_data, axis=1)
return x_data,y_data
def main():
import torch
from torch.optim import lr_scheduler
import torch.optim as optim
from torch.autograd import Variable
from trainer import fit
import numpy as np
cuda = torch.cuda.is_available()
# Training settings
parser = argparse.ArgumentParser(description='PyTorch MNIST Example')
parser.add_argument('--batch-size', type=int, default=100, metavar='N',
help='input batch size for training (default: 64)')
parser.add_argument('--test-batch-size', type=int, default=100, metavar='N',
help='input batch size for testing (default: 1000)')
parser.add_argument('--epochs', type=int, default=100, metavar='N',
help='number of epochs to train (default: 10)')
parser.add_argument('--lr', type=float, default=0.001, metavar='LR',
help='learning rate (default: 0.01)')
parser.add_argument('--momentum', type=float, default=0.5, metavar='M',
help='SGD momentum (default: 0.5)')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
parser.add_argument('--log-interval', type=int, default=10, metavar='N',
help='how many batches to wait before logging training status')
parser.add_argument('--save-model', action='store_true', default=True,
help='For Saving the current Model')
args = parser.parse_args()
use_cuda = not args.no_cuda and torch.cuda.is_available()
torch.manual_seed(args.seed)
np.random.seed(args.seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
device = torch.device("cuda" if use_cuda else "cpu")
kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}
from datetime import datetime
import os
loging = False
ismultitask = False
loso = False
if (args.save_model):
model_save_path = 'model/triplet/'
if not os.path.isdir(model_save_path):
os.makedirs(model_save_path)
if loging:
fname = model_save_path + datetime.today().strftime("%m_%d_%H_%M") + ".txt"
f = open(fname, 'w')
x_data, y_data = load_smt()
# nonbciilli = np.s_[0,1,2,4,5,8,16,17,18,20,21,27,28,29,30,32,35,36,38,42,43,44,51]
valtype = 'sess'
if valtype == 'loso':
for subj in range(0,54):
model = Deep4CNN(ismult=ismultitask).to(device)
#model.load_state_dict(torch.load(model_save_path+ "J_" + str(subj) + 'basecnn.pt'))
optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum)
optimizer_fine = optim.SGD(model.parameters(), lr=0.005, momentum=args.momentum)
dataset_train = GigaDataset(x=x_data, y=y_data, valtype=valtype, istrain=True, sess=1, subj=subj)
train_loader = torch.utils.data.DataLoader(dataset_train, batch_size=args.batch_size, shuffle=True, **kwargs)
dataset_test = GigaDataset(x=x_data, y=y_data, valtype=valtype, istrain=False, sess=2, subj=subj)
test_loader = torch.utils.data.DataLoader(dataset_test, batch_size=args.batch_size, shuffle=False,**kwargs)
# dataset_fine = GigaDataset_LOSO(x=x_data, y=y_data, fine=True, istrain=True, sess=2, subj=subj)
# fine_loader = torch.utils.data.DataLoader(dataset_fine, batch_size=args.batch_size, shuffle=True, **kwargs)
for epoch in range(1, args.epochs + 1):
train(args, model, device, train_loader, optimizer, epoch)
print("joint-train")
#LOSO joint training
j_loss, j_score = eval(args, model, device, test_loader)
if epoch > 30:
if (args.save_model):
torch.save(model.state_dict(), model_save_path+ "model_" +str(subj) + "_" +str(epoch) + '.pt')
# #fine tuning
# for epoch in range(1, 10):
# train_mt(args, model, device, fine_loader, optimizer_fine, epoch)
#
# print("fine-tuning")
# f_loss, f_score = eval(args, model, device, test_loader)
if (args.save_model):
torch.save(model.state_dict(), model_save_path+"F_" + str(subj) + 'basecnn.pt')
if loging:
f = open(fname, 'a')
f.write(str(subj)+" "+"jl : "+ str(j_loss) + " " + str(j_score) + '\n')
f.close()
elif valtype == 'sess':
from networks import EmbeddingDeep4CNN, TripletNet, FineShallowCNN, EmbeddingDeepCNN
from losses import TripletLoss
margin = 1
embedding_net = EmbeddingDeep4CNN()
print(embedding_net)
model = TripletNet(embedding_net)
if cuda:
model.cuda()
loss_fn = TripletLoss(margin)
lr = 1e-3
#optimizer = optim.Adam(model.parameters(), lr=lr)
n_epochs = 5
#%%
log_interval = 10
if n_epochs == 0:
pass
#model.load_state_dict(torch.load('triplet_deep4_1000_2.pt'))
else: #트리플렛넷 학습
# For classification
dataset_train = GigaDataset(x=x_data, y=y_data, valtype=valtype, istrain=True, sess=1)
train_loader = torch.utils.data.DataLoader(dataset_train, batch_size=args.batch_size, shuffle=True,
**kwargs)
dataset_test = GigaDataset(x=x_data, y=y_data, valtype=valtype, istrain=False, sess=2, subj=-1)
test_loader = torch.utils.data.DataLoader(dataset_test, batch_size=args.batch_size, shuffle=False,
**kwargs)
triplet_dataset_train = TripletGiga(x=x_data, y=y_data,valtype=valtype, istrain=True, sess=1)
triplet_train_loader = torch.utils.data.DataLoader(triplet_dataset_train, batch_size=args.batch_size, shuffle=True, **kwargs)
triplet_dataset_test = TripletGiga(x=x_data, y=y_data,valtype=valtype, istrain=False, sess=2, subj=-1)
triplet_test_loader = torch.utils.data.DataLoader(triplet_dataset_test, batch_size=args.batch_size, shuffle=False, **kwargs)
optimizer = optim.SGD(model.parameters(), lr=0.001, momentum=0.9)
scheduler = lr_scheduler.StepLR(optimizer, 8, gamma=1, last_epoch=-1)
from trainer import fit
fit(triplet_train_loader, triplet_test_loader, model, loss_fn, optimizer, scheduler, n_epochs, cuda,
log_interval)
#%%
train_embeddings_tl, train_labels_tl = extract_embeddings(train_loader, embedding_net,1000)
# plot_embeddings(train_embeddings_tl, train_labels_tl)
val_embeddings_tl, val_labels_tl = extract_embeddings(test_loader, embedding_net,1000)
# plot_embeddings(val_embeddings_tl, val_labels_tl)
# #
from sklearn.pipeline import Pipeline
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.model_selection import ShuffleSplit, cross_val_score
lda = LinearDiscriminantAnalysis()
lda.fit(train_embeddings_tl,train_labels_tl)
print(lda.score(val_embeddings_tl, val_labels_tl))
# from torchvision import datasets, models, transforms
# temp = model.embedding_net.children()
# newmodel = torch.nn.Sequential(*(list(model.embedding_net.children())[:]))
# for param in model.embedding_net.parameters():
# param.requires_grad = True
#newembedding_net = torch.nn.Sequential(*(list(model.embedding_net.children())[:]))
#
from sklearn.manifold import TSNE
tsne = TSNE(n_components=2,perplexity=30)
train_tsne = tsne.fit_transform(val_embeddings_tl)
plot_embeddings(train_tsne, val_labels_tl)
for param in model.embedding_net.parameters():
param.requires_grad = True
#embedding_net2 = EmbeddingDeep4CNN()
newmodel = nn.Sequential(model.embedding_net,
nn.Linear(1000, 2),
nn.LogSoftmax(dim=1)
).to(device)
print(newmodel)
#newmodel.fc_lr = nn.Linear(1000,2)
newmodel.to(device)
optimizer = optim.SGD(newmodel.parameters(), lr=0.01, momentum=0.9)
#optimizer = optim.Adam(newmodel.parameters())
for epoch in range(1, 20):
train(args, newmodel, device, train_loader, optimizer, epoch)
j_loss, j_score = eval(args, newmodel, device, test_loader)
if args.save_model:
torch.save(model.state_dict(),'triplet_deep4_1000_2.pt')
class FineNet(nn.Module): # shallowconv
def __init__(self,EmbeddingNet):
super(FineNet, self).__init__()
self.EmbeddingNet = EmbeddingNet
self.fc_lr = nn.Linear(2000, 2)
def forward(self, x):
x = self.EmbeddingNet(x)
#x = x.view(x.size()[0], -1)
x = self.fc_lr(x)
x = F.dropout(x, training=self.training, p=0.5)
x = F.log_softmax(x, dim=1)
return x
def get_embedding(self, x):
return self.forward(x)
if __name__ == '__main__':
main()
|
bonobo/util/errors.py | Playfloor/bonobo | 1,573 | 12659614 | import logging
import re
from contextlib import contextmanager
from sys import exc_info
from mondrian import term
logger = logging.getLogger(__name__)
@contextmanager
def sweeten_errors():
try:
yield
except Exception as exc:
SPACES = 2
w = term.white
prefix = w("║" + " " * (SPACES - 1))
suffix = w(" " * (SPACES - 1) + "║")
pre_re = re.compile("([^`]*)`([^`]*)`([^`]*)")
def format_arg(arg):
length = len(pre_re.sub("\\1\\2\\3", arg))
arg = pre_re.sub(w("\\1") + term.bold("\\2") + w("\\3"), arg)
arg = re.sub(r"^ \$ (.*)", term.lightblack(" $ ") + term.reset("\\1"), arg)
return (arg, length)
def f(*args):
return "".join(args)
term_width, term_height = term.get_size()
line_length = min(80, term_width)
for arg in exc.args:
line_length = max(min(line_length, len(arg) + 2 * SPACES), 120)
print(f(w("╔" + "═" * (line_length - 2) + "╗")))
for i, arg in enumerate(exc.args):
if i == 1:
print(f(prefix, " " * (line_length - 2 * SPACES), suffix))
arg_formatted, arg_length = format_arg(arg)
if not i:
# first line
print(
f(
prefix,
term.red_bg(term.bold(" " + type(exc).__name__ + " ")),
" ",
w(arg_formatted),
" " * (line_length - (arg_length + 3 + len(type(exc).__name__) + 2 * SPACES)),
suffix,
)
)
else:
# other lines
print(f(prefix, arg_formatted + " " * (line_length - arg_length - 2 * SPACES), suffix))
print(f(w("╚" + "═" * (line_length - 2) + "╝")))
logging.getLogger().debug("This error was caused by the following exception chain.", exc_info=exc_info())
|
scripts/test_wandb.py | mariatippler/haven-ai | 145 | 12659629 | <reponame>mariatippler/haven-ai
from haven import haven_wizard as hw
import wandb
import sys
import os
import pprint
path = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.insert(0, path)
if __name__ == "__main__":
# first way
score_dict = {"loss": loss}
wandb.send(score_dict)
# second way
chk = load_checkpoint(savedir)
hw.save_checkpoint(savedir, score_dict=score_dict, wandb_config={})
|
menpo/model/vectorizable.py | apapaion/menpo | 311 | 12659632 | class VectorizableBackedModel(object):
r"""
Mixin for models constructed from a set of :map:`Vectorizable` objects.
Supports models for which visualizing the meaning of a set of components
is trivial.
Requires that the following methods are implemented:
1. `component_vector(index)`
2. `instance_vector(weights)`
3. `project_vector(vector)`
4. `reconstruct_vector(vectors)`
5. `project_out_vector(vector)`
The constructor takes an instance of :map:`Vectorizable`. This is used for
all conversions to and from numpy vectors and instances.
Parameters
----------
template_instance : :map:`Vectorizable`
The template instance.
"""
def __init__(self, template_instance):
self.template_instance = template_instance
def component_vector(self, index):
r"""
A particular component of the model, in vectorized form.
Parameters
----------
index : `int`
The component that is to be returned.
Returns
-------
component_vector : `ndarray`
The component vector.
"""
raise NotImplementedError()
def component(self, index):
r"""
A particular component of the model.
Parameters
----------
index : `int`
The component that is to be returned.
Returns
-------
component : `type(self.template_instance)`
The component instance.
"""
return self.template_instance.from_vector(self.component_vector(index))
def instance_vector(self, weights):
"""
Creates a new vector instance of the model using the first ``len(weights)``
components.
Parameters
----------
weights : ``(n_weights,)`` `ndarray` or `list`
``weights[i]`` is the linear contribution of the i'th component
to the instance vector.
Raises
------
ValueError
If n_weights > n_components
Returns
-------
instance_vector : `ndarray`
An instance of the model, in vectorized form.
"""
raise NotImplementedError()
def instance(self, weights):
"""
Creates a new instance of the model using the first ``len(weights)``
components.
Parameters
----------
weights : ``(n_weights,)`` `ndarray` or `list`
``weights[i]`` is the linear contribution of the i'th component
to the instance vector.
Raises
------
ValueError
If n_weights > n_components
Returns
-------
instance : `type(self.template_instance)`
An instance of the model.
"""
return self.template_instance.from_vector(self.instance_vector(weights))
def project_vector(self, instance_vector):
"""
Projects the `instance_vector` onto the model, retrieving the optimal
linear weightings.
Parameters
----------
instance_vector : `ndarray`
A novel instance vector.
Returns
-------
projected_vector : ``(n_components,)`` `ndarray`
A vector of optimal linear weightings.
"""
raise NotImplementedError()
def project(self, instance):
"""
Projects the `instance` onto the model, retrieving the optimal
linear weightings.
Parameters
----------
instance : :map:`Vectorizable`
A novel instance.
Returns
-------
projected : ``(n_components,)`` `ndarray`
A vector of optimal linear weightings.
"""
return self.project_vector(instance.as_vector())
def reconstruct_vector(self, instance_vector):
"""
Projects an `instance_vector` onto the linear space and rebuilds from the
weights found.
Syntactic sugar for: ::
instance_vector(project_vector(instance_vector))
but faster, as it avoids the conversion that takes place each time.
Parameters
----------
instance_vector : `ndarray`
A novel instance vector.
Returns
-------
reconstructed_vector : `ndarray`
The reconstructed vector.
"""
raise NotImplementedError()
def reconstruct(self, instance):
"""
Projects a `instance` onto the linear space and rebuilds from the
weights found.
Syntactic sugar for: ::
instance(project(instance))
but faster, as it avoids the conversion that takes place each time.
Parameters
----------
instance : :class:`Vectorizable`
A novel instance of :class:`Vectorizable`.
Returns
-------
reconstructed : `self.instance_class`
The reconstructed object.
"""
reconstruction_vector = self.reconstruct_vector(instance.as_vector())
return instance.from_vector(reconstruction_vector)
def project_out_vector(self, instance_vector):
"""
Returns a version of `instance_vector` where all the basis of the model
have been projected out.
Parameters
----------
instance_vector : `ndarray`
A novel instance vector.
Returns
-------
projected_out_vector : `ndarray`
A copy of `instance_vector`, with all bases of the model projected out.
"""
raise NotImplementedError()
def project_out(self, instance):
"""
Returns a version of `instance` where all the basis of the model
have been projected out.
Parameters
----------
instance : :class:`Vectorizable`
A novel instance of :class:`Vectorizable`.
Returns
-------
projected_out : `self.instance_class`
A copy of `instance`, with all basis of the model projected out.
"""
vector_instance = self.project_out_vector(instance.as_vector())
return instance.from_vector(vector_instance)
|
gitlab/tests/test_e2e.py | vbarbaresi/integrations-core | 663 | 12659686 | <reponame>vbarbaresi/integrations-core
# (C) Datadog, Inc. 2018-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
import pytest
from datadog_checks.dev.utils import get_metadata_metrics
from .common import ALLOWED_METRICS, CONFIG, LEGACY_CONFIG, METRICS_TO_TEST, assert_check
pytestmark = pytest.mark.e2e
def test_e2e_legacy(dd_agent_check):
aggregator = dd_agent_check(LEGACY_CONFIG, rate=True)
assert_check(aggregator, ALLOWED_METRICS)
aggregator.assert_metrics_using_metadata(get_metadata_metrics())
def test_e2e(dd_agent_check):
aggregator = dd_agent_check(CONFIG, rate=True)
assert_check(aggregator, METRICS_TO_TEST)
# Excluding gitlab.rack.http_requests_total because it is a distribution metric
# (its sum and count metrics are in the metadata)
aggregator.assert_metrics_using_metadata(get_metadata_metrics(), exclude=["gitlab.rack.http_requests_total"])
|
unittests/resources/checks_unlisted/fixtures_simple.py | CLIP-HPC/reframe | 167 | 12659697 | <gh_stars>100-1000
# Copyright 2016-2021 Swiss National Supercomputing Centre (CSCS/ETH Zurich)
# ReFrame Project Developers. See the top-level LICENSE file for details.
#
# SPDX-License-Identifier: BSD-3-Clause
import reframe as rfm
import reframe.utility.sanity as sn
import os
class HelloFixture(rfm.RunOnlyRegressionTest):
executable = 'echo hello from fixture'
@sanity_function
def assert_output(self):
return sn.assert_found(r'hello from fixture', self.stdout)
@rfm.simple_test
class HelloTest(HelloFixture):
valid_systems = ['*']
valid_prog_environs = ['*']
@rfm.simple_test
class TestA(rfm.RunOnlyRegressionTest):
valid_systems = ['*']
valid_prog_environs = ['*']
executable = '/bin/true'
# Declare the fixture
f = fixture(HelloFixture, scope='session')
@sanity_function
def inspect_fixture(self):
return sn.assert_found(
r'hello from fixture',
os.path.join(self.f.stagedir, self.f.stdout.evaluate())
)
@rfm.simple_test
class TestB(TestA):
'''Use a test as a fixture'''
ff = fixture(HelloTest, scope='session')
|
tick/robust/tests/serializing_test.py | sumau/tick | 411 | 12659722 | # License: BSD 3 clause
import io, unittest
import numpy as np
import pickle
from scipy.sparse import csr_matrix
from tick.base_model.tests.generalized_linear_model import TestGLM
from tick.prox import ProxL1
from tick.linear_model import ModelLinReg, SimuLinReg
from tick.linear_model import ModelLogReg, SimuLogReg
from tick.linear_model import ModelPoisReg, SimuPoisReg
from tick.linear_model import ModelHinge, ModelQuadraticHinge, ModelSmoothedHinge
from tick.robust import ModelAbsoluteRegression, ModelEpsilonInsensitive, ModelHuber, \
ModelLinRegWithIntercepts, ModelModifiedHuber
from tick.simulation import weights_sparse_gauss
class Test(TestGLM):
def test_robust_model_serialization(self):
"""...Test serialization of robust models
"""
model_map = {
ModelAbsoluteRegression: SimuLinReg,
ModelEpsilonInsensitive: SimuLinReg,
ModelHuber: SimuLinReg,
ModelLinRegWithIntercepts: SimuLinReg,
ModelModifiedHuber: SimuLogReg
}
for mod in model_map:
np.random.seed(12)
n_samples, n_features = 100, 5
w0 = np.random.randn(n_features)
intercept0 = 50 * weights_sparse_gauss(n_weights=n_samples, nnz=30)
c0 = None
X, y = SimuLinReg(w0, c0, n_samples=n_samples, verbose=False,
seed=2038).simulate()
if mod == ModelLinRegWithIntercepts:
y += intercept0
model = mod(fit_intercept=False).fit(X, y)
pickled = pickle.loads(pickle.dumps(model))
self.assertTrue(model._model.compare(pickled._model))
if mod == ModelLinRegWithIntercepts:
test_vector = np.hstack((X[0], np.ones(n_samples)))
self.assertEqual(
model.loss(test_vector), pickled.loss(test_vector))
else:
self.assertEqual(model.loss(X[0]), pickled.loss(X[0]))
if __name__ == "__main__":
unittest.main()
|
test/python/isolationtest/isolationTestHandler.py | faizol/babelfish_extensions | 115 | 12659745 | import traceback
from antlr4 import *
from .parser.specLexer import specLexer
from .parser.specParser import specParser
from .specParserVisitorImpl import *
def isolationTestHandler(testFile, fileWriter, logger):
testName = testFile.name.split('.')[0]
try:
logger.info("Starting : {}".format(testName))
try:
testSpec = parseSpecInput(str(testFile))
if(testSpec is None):
raise Exception("TestSpec object is not generated")
else:
print(testSpec)
logger.info("Successfully parsed")
except Exception as e:
logger.error("Error while parsing : {}".format(str(e)))
return False
testSpec.logger = logger
testSpec.fileWriter = fileWriter
testSpec.initTestRun()
logger.info("Completed : {}".format(testName))
return True
except Exception as e:
logger.error(str(e))
traceback.print_exc()
return False
def parseSpecInput(filename):
input_stream = FileStream(filename)
lexer = specLexer(input_stream)
token_stream = CommonTokenStream(lexer)
parser = specParser(token_stream)
tree = parser.parse()
visitor = specParserVisitorImpl()
visitor.visit(tree)
return visitor.testSpec
|
flatdata-generator/tests/generators/test_go_generator.py | gferon/flatdata | 140 | 12659746 | '''
Copyright (c) 2017 HERE Europe B.V.
See the LICENSE file in the root of this project for license details.
'''
import glob
from flatdata.generator.generators.go import GoGenerator
from .assertions import generate_and_assert_in
from .schemas import schemas_and_expectations
from nose.plugins.skip import SkipTest
def generate_and_compare(test_case):
with open(test_case[0], 'r') as test_file:
test = test_file.read()
expectations = list()
for file in glob.glob(test_case[1] + '*'):
with open(file, 'r') as expectation_file:
expectations.append(expectation_file.read())
generate_and_assert_in(test, GoGenerator, *expectations)
def skip(test_case):
raise SkipTest("Test %s is skipped" % test_case[0])
def test_against_expectations():
for x in schemas_and_expectations(generator='go', extension='go'):
# Go does not yet support namespaces, enums, ranges, or constants, skip those tests
if "enums" not in x[0] and "constants" not in x[0] and "namespaces" not in x[0] and "ranges" not in x[0]:
yield generate_and_compare, x
else:
yield skip, x
|
stock_model.py | murilobd/Deep-Convolution-Stock-Technical-Analysis | 312 | 12659778 | <filename>stock_model.py
import argparse
import sys
import tensorflow as tf
import functools
from ops import *
from loader import *
def doublewrap(function):
@functools.wraps(function)
def decorator(*args, **kwargs):
if len(args) == 1 and len(kwargs) == 0 and callable(args[0]):
return function(args[0])
else:
return lambda wrapee: function(wrapee, *args, **kwargs)
return decorator
@doublewrap
def define_scope(function, scope=None, *args, **kwargs):
"""
A decorator for functions that define TensorFlow operations. The wrapped
function will only be executed once. Subsequent calls to it will directly
return the result so that operations are added to the graph only once.
The operations added by the function live within a tf.variable_scope(). If
this decorator is used with arguments, they will be forwarded to the
variable scope. The scope name defaults to the name of the wrapped
function.
"""
attribute = '_cache_' + function.__name__
name = scope or function.__name__
@property
@functools.wraps(function)
def decorator(self):
if not hasattr(self, attribute):
with tf.variable_scope(name, *args, **kwargs):
setattr(self, attribute, function(self))
return getattr(self, attribute)
return decorator
class Model:
def __init__(self,
image,
label,
dropout=0.5,
conv_size=9,
conv_stride=1,
ksize=2,
pool_stride=2,
filter_num=128,
padding="SAME"):
self.image = image
self.label = label
self.dropout = dropout
self.conv_size = conv_size
self.conv_stride = conv_stride
self.ksize = ksize
self.pool_stride = pool_stride
self.padding = padding
self.filter_num = filter_num
self.prediction
self.optimize
self.accuracy
@define_scope
def prediction(self):
with tf.variable_scope("model") as scope:
#input image
input_image = self.image
layers = []
# conv_1 [batch, ngf, 5] => [batch, 64, ngf]
with tf.variable_scope("conv_1"):
output = relu(conv1d(input_image, self.filter_num, name='conv_1'))
layers.append(output)
# conv_2 - conv_6
layer_specs = [
(self.filter_num * 2, 0.5), # conv_2: [batch, 64, ngf] => [batch, 32, ngf * 2]
(self.filter_num * 4, 0.5), # conv_3: [batch, 32, ngf * 2] => [batch, 16, ngf * 4]
(self.filter_num * 8, 0.5), # conv_4: [batch, 16, ngf * 4] => [batch, 8, ngf * 8]
(self.filter_num * 8, 0.5), # conv_5: [batch, 8, ngf * 8] => [batch, 4, ngf * 8]
(self.filter_num * 8, 0.5) # conv_6: [batch, 4, ngf * 8] => [batch, 2, ngf * 8]
]
# adding layers
for _, (out_channels, dropout) in enumerate(layer_specs):
with tf.variable_scope("conv_%d" % (len(layers) + 1)):
rectified = lrelu(layers[-1], 0.2)
# [batch, in_width, in_channels] => [batch, in_width/2, out_channels]
convolved = conv1d(rectified, out_channels)
# batchnormalize convolved
output = batchnorm(convolved, is_2d=False)
# dropout
if dropout > 0.0:
output = tf.nn.dropout(output, keep_prob=1 - dropout)
layers.append(output)
#fc1
h_fc1 = relu(fully_connected(layers[-1], 256, name='fc1'))
#dropout
h_fc1_drop = tf.nn.dropout(h_fc1, self.dropout)
#fc2
result = tf.sigmoid(fully_connected(h_fc1_drop, 2, name='fc2'))
return result
@define_scope
def optimize(self):
cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=self.label,
logits=self.prediction))
return tf.train.AdamOptimizer(0.0001).minimize(cross_entropy)
@define_scope
def accuracy(self):
correct_prediction = tf.equal(tf.argmax(self.label, 1), tf.argmax(self.prediction, 1))
return tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# @define_scope
# def optimize(self):
# with tf.name_scope("loss"):
# loss = tf.reduce_mean(tf.abs(self.p_loss))
# tvars = tf.trainable_variables()
# optim = tf.train.AdamOptimizer(0.0001)
# grads_and_vars = optim.compute_gradients(loss, var_list=tvars)
# print(grads_and_vars)
# train = optim.apply_gradients(grads_and_vars)
# @define_scope
# def p_loss(self):
# outputs = self.prediction
# loss = []
# for i in range(len(outputs.get_shape().as_list())):
# weights = tf.matmul(outputs[i], label[i])
# def if_up():
# return weights[0]
# def if_down():
# return weights[1]
# result = tf.cond(pred, if_true, if_false)
# if (outputs[i][0] > outputs[i][1]):
# if (label[i][0] > 0):
# loss.append(outputs[i][1] * label[i][0])
# else:
# loss.append(outputs[i][0] * label[i][0])
# else:
# if (label[i][0] < 0):
# loss.append(outputs[i][0] * label[i][0])
# else:
# loss.append(outputs[i][1] * label[i][0])
# loss = tf.cast(loss, tf.float32)
# loss = tf.abs(loss)
# return loss
def main():
# Import data
db = load_stock_data("data/aapl/")
# Construct graph
image = tf.placeholder(tf.float32, [None, 128, 5])
label = tf.placeholder(tf.float32, [None, 2])
dropout = tf.placeholder(tf.float32)
model = Model(image, label, dropout=dropout)
# Saver
saver = tf.train.Saver()
# Session
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
with tf.Session(config=config) as sess:
sess.run(tf.global_variables_initializer())
for i in range(500000):
images, labels = db.train.next_batch(10)
if i % 100 == 0:
images_eval, labels_eval = db.test.next_batch(1000)
accuracy = sess.run(model.accuracy, {image: images_eval, label: labels_eval, dropout: 1.0})
print('step %d, accuracy %g' % (i, accuracy))
sess.run(model.optimize, {image: images, label: labels, dropout: 0.5})
if i % 10000 == 0:
save_path = 'checkpoints/'
model_name = 'stocks_model.ckpt'
if not os.path.exists(save_path):
os.makedirs(save_path)
save_path_full = os.path.join(save_path, model_name)
saver.save(sess, save_path_full, global_step=i+1)
images_eval, labels_eval = db.test.next_batch(1000)
accuracy = sess.run(model.accuracy, {image: images_eval, label: labels_eval, dropout: 1.0})
print('final accuracy on testing set: %g' % (accuracy))
print("finished")
if __name__ == '__main__':
main()
|
sdk/python/feast/infra/online_stores/contrib/hbase_repo_configuration.py | ibnummuhammad/feast | 810 | 12659808 | from tests.integration.feature_repos.integration_test_repo_config import (
IntegrationTestRepoConfig,
)
from tests.integration.feature_repos.universal.online_store.hbase import (
HbaseOnlineStoreCreator,
)
FULL_REPO_CONFIGS = [
IntegrationTestRepoConfig(online_store_creator=HbaseOnlineStoreCreator),
]
|
alipay/aop/api/domain/DiscountModel.py | snowxmas/alipay-sdk-python-all | 213 | 12659827 | <gh_stars>100-1000
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class DiscountModel(object):
def __init__(self):
self._term_discount = None
self._term_no = None
@property
def term_discount(self):
return self._term_discount
@term_discount.setter
def term_discount(self, value):
self._term_discount = value
@property
def term_no(self):
return self._term_no
@term_no.setter
def term_no(self, value):
self._term_no = value
def to_alipay_dict(self):
params = dict()
if self.term_discount:
if hasattr(self.term_discount, 'to_alipay_dict'):
params['term_discount'] = self.term_discount.to_alipay_dict()
else:
params['term_discount'] = self.term_discount
if self.term_no:
if hasattr(self.term_no, 'to_alipay_dict'):
params['term_no'] = self.term_no.to_alipay_dict()
else:
params['term_no'] = self.term_no
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = DiscountModel()
if 'term_discount' in d:
o.term_discount = d['term_discount']
if 'term_no' in d:
o.term_no = d['term_no']
return o
|
pixyz/models/vi.py | MokkeMeguru/pixyz-test | 453 | 12659839 | <reponame>MokkeMeguru/pixyz-test
from torch import optim
from ..models.model import Model
from ..utils import tolist
from ..losses import ELBO
class VI(Model):
"""
Variational Inference (Amortized inference)
The ELBO for given distributions (p, approximate_dist) is set as the loss class of this model.
"""
def __init__(self, p, approximate_dist,
other_distributions=[],
optimizer=optim.Adam,
optimizer_params={},
clip_grad_norm=None,
clip_grad_value=None):
"""
Parameters
----------
p : torch.distributions.Distribution
Generative model (distribution).
approximate_dist : torch.distributions.Distribution
Approximate posterior distribution.
optimizer : torch.optim
Optimization algorithm.
optimizer_params : dict
Parameters of optimizer
clip_grad_norm : float or int
Maximum allowed norm of the gradients.
clip_grad_value : float or int
Maximum allowed value of the gradients.
"""
# set distributions (for training)
distributions = [p, approximate_dist] + tolist(other_distributions)
# set losses
elbo = ELBO(p, approximate_dist)
loss = -elbo.mean()
super().__init__(loss, test_loss=loss,
distributions=distributions,
optimizer=optimizer, optimizer_params=optimizer_params,
clip_grad_norm=clip_grad_norm, clip_grad_value=clip_grad_value)
def train(self, train_x_dict={}, **kwargs):
return super().train(train_x_dict, **kwargs)
def test(self, test_x_dict={}, **kwargs):
return super().test(test_x_dict, **kwargs)
|
setup.py | EliFinkelshteyn/alphabet-detector | 152 | 12659850 | <reponame>EliFinkelshteyn/alphabet-detector<gh_stars>100-1000
from distutils.core import setup
setup(
name='alphabet-detector',
packages=['alphabet_detector'],
version='0.0.7',
description='A library to detect what alphabet something is written in.',
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/EliFinkelshteyn/alphabet-detector',
download_url='https://github.com/EliFinkelshteyn/'
'alphabet-detector/tarball/0.0.7',
keywords=['alphabet', 'charset', 'detect', 'islatin'],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Natural Language :: English',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Topic :: Software Development :: Libraries :: Python Modules',
],
)
|
ida_plugin/uefi_analyser/utils.py | fengjixuchui/UEFI_RETool | 240 | 12659870 | # SPDX-License-Identifier: MIT
import os
import ida_bytes
import idaapi
import idc
# definitions from PE file structure
IMAGE_FILE_MACHINE_IA64 = 0x8664
IMAGE_FILE_MACHINE_I386 = 0x014C
PE_OFFSET = 0x3C
IMAGE_SUBSYSTEM_EFI_APPLICATION = 0xA
IMAGE_SUBSYSTEM_EFI_BOOT_SERVICE_DRIVER = 0xB
IMAGE_SUBSYSTEM_EFI_RUNTIME_DRIVER = 0xC
class Table:
"""build table from array"""
def __init__(self, table_data):
self.table_data = table_data
self.max_sizes = self._get_max_sizes()
self.angle = "+"
self.gl = "-"
self.vl = "|"
def _get_max_sizes(self):
num = len(self.table_data[0])
sizes = [0 for _ in range(num)]
for i in range(len(self.table_data[0])):
for j in range(len(self.table_data)):
if len(self.table_data[j][i]) > sizes[i]:
sizes[i] = len(self.table_data[j][i])
return sizes
@classmethod
def display(cls, table_data):
cls = Table(table_data)
table = cls.angle + f"{cls.angle}".join(
[((cls.gl * (size + 2))) for size in cls.max_sizes]
)
table += f"{cls.angle}\n{cls.vl} "
table += f"{cls.vl} ".join(
[
f"{cls.table_data[0][i]}{' ' * (cls.max_sizes[i] - len(cls.table_data[0][i]) + 1)}"
for i in range(len(cls.table_data[0]))
]
)
table += f"{cls.vl}\n{cls.angle}"
table += f"{cls.angle}".join(
[((cls.gl * (size + 2))) for size in cls.max_sizes]
)
table += f"{cls.angle}\n"
for j in range(1, len(cls.table_data)):
table += f"{cls.vl} "
table += f"{cls.vl} ".join(
[
f"{cls.table_data[j][i]}{' ' * (cls.max_sizes[i] - len(cls.table_data[j][i]) + 1)}"
for i in range(len(cls.table_data[j]))
]
)
table += f"{cls.vl}\n"
table += cls.angle
table += f"{cls.angle}".join(
[((cls.gl * (size + 2))) for size in cls.max_sizes]
)
table += f"{cls.angle}"
return table
def set_hexrays_comment(address, text):
"""set comment in decompiled code"""
cfunc = idaapi.decompile(address)
tl = idaapi.treeloc_t()
tl.ea = address
tl.itp = idaapi.ITP_SEMI
cfunc.set_user_cmt(tl, text)
cfunc.save_user_cmts()
def check_guid(address):
"""correctness is determined based on the number of unique bytes"""
return len(set(ida_bytes.get_bytes(address, 16))) > 8
def get_guid(address):
"""get GUID located by address"""
guid = list()
guid.append(idc.get_wide_dword(address))
guid.append(idc.get_wide_word(address + 4))
guid.append(idc.get_wide_word(address + 6))
for addr in range(address + 8, address + 16, 1):
guid.append(idc.get_wide_byte(addr))
return guid
def get_guid_str(guid_struct):
guid = f"{guid_struct[0]:08X}-"
guid += f"{guid_struct[1]:04X}-"
guid += f"{guid_struct[2]:04X}-"
guid += "".join([f"{guid_struct[i]:02X}" for i in range(3, 11)])
return guid
def get_num_le(bytearr):
"""translate a set of bytes into a number in the little endian format"""
num_le = 0
for i in range(len(bytearr)):
num_le += bytearr[i] * pow(256, i)
return num_le
def rev_endian(num):
"""reorders bytes in number"""
num_str = f"{num:x}"
# yapf: disable
num_ba = ([int('0x' + num_str[i:i + 2], 16) for i in range(0, len(num_str) - 1, 2)])
# yapf: enable
return get_num_le(num_ba)
def get_machine_type(header):
"""get the architecture of the investigated file"""
if len(header) < PE_OFFSET + 1:
return "unknown"
PE_POINTER = header[PE_OFFSET]
FH_POINTER = PE_POINTER + 4
if len(header) < FH_POINTER + 3:
return "unknown"
machine_type = header[FH_POINTER : FH_POINTER + 2 :]
type_value = get_num_le(machine_type)
if type_value == IMAGE_FILE_MACHINE_I386:
return "x86"
if type_value == IMAGE_FILE_MACHINE_IA64:
return "x64"
return "unknown"
def check_subsystem(header):
"""get the subsystem of the investigated file"""
if len(header) < PE_OFFSET + 1:
return False
PE_POINTER = header[PE_OFFSET]
if len(header) < PE_POINTER + 0x5D:
return False
subsystem = header[PE_POINTER + 0x5C]
return (
subsystem == IMAGE_SUBSYSTEM_EFI_APPLICATION
or subsystem == IMAGE_SUBSYSTEM_EFI_BOOT_SERVICE_DRIVER
or subsystem == IMAGE_SUBSYSTEM_EFI_RUNTIME_DRIVER
)
def get_header_idb():
"""get file header from idb"""
if idc.get_segm_name(0) == "HEADER":
header = bytearray(
[idc.get_wide_byte(ea) for ea in range(0, idc.get_segm_end(0))]
)
return header
return bytearray(b"")
def get_header_file():
"""get file header from analysing file"""
buf = bytes()
if os.path.isfile(idaapi.get_input_file_path()):
with open(idaapi.get_input_file_path(), "rb") as f:
buf = f.read(512)
return bytearray(buf)
def get_dep_json(res_json):
"""get json for dependency browser and dependency graph"""
CLIENT_PROTOCOL_SERVICES = ("LocateProtocol", "OpenProtocol")
dep_json = list()
for module_info in res_json:
for protocol in module_info["protocols"]:
if (
protocol["service"] == "InstallProtocolInterface"
or protocol["service"] == "InstallMultipleProtocolInterfaces"
):
dep_json_item = {
"module_name": module_info["module_name"],
"protocol_name": protocol["protocol_name"],
"guid": protocol["guid"],
"service": protocol["service"],
}
dep_json_item["used_by"] = list()
for module_info in res_json:
for protocol in module_info["protocols"]:
if (
protocol["service"] in CLIENT_PROTOCOL_SERVICES
and protocol["guid"] == dep_json_item["guid"]
):
dep_json_item["used_by"].append(module_info["module_name"])
dep_json.append(dep_json_item)
return dep_json
|
dataset/convert_coco_to_tfrecords.py | rickyHong/Light-Head-RCNN-enhanced-Xdetector | 116 | 12659877 | <gh_stars>100-1000
from pycocotools.coco import COCO
import os
import sys
import random
import numpy as np
import skimage.io as io
import scipy
import tensorflow as tf
from dataset_utils import int64_feature, float_feature, bytes_feature
# TFRecords convertion parameters.
SAMPLES_PER_FILES = 5000
class CoCoDataset(object):
def __init__(self, dataset_dir, image_set='val2017'):
super(CoCoDataset, self).__init__()
self._image_set = image_set
self._ann_file = self.get_ann_file(dataset_dir, self._image_set)
self._filename_pattern = self.get_image_file_pattern(dataset_dir, self._image_set) + '{}'
self._coco = COCO(self._ann_file)
self._cats = self._coco.loadCats(self._coco.getCatIds())
self._classes = tuple(['none'] + [c['name'] for c in self._cats])
self._num_classes = len(self._classes)
self._class_to_ind = dict(zip(self._classes, list(range(self._num_classes))))
self._ind_to_class = dict(zip(list(range(self._num_classes)), self._classes))
self._super_classes = tuple(['background'] + [c['supercategory'] for c in self._cats])
self._class_to_coco_cat_id = dict(zip([c['name'] for c in self._cats], self._coco.getCatIds()))
self._labels = {'none': (0, 'background'),}
for ind, cls in enumerate(self._classes[1:]):
self._labels[cls] = (self._class_to_ind[cls], self._super_classes[ind + 1])
self._image_index = self._coco.getImgIds()
self._num_examples = len(self._image_index)
def get_ann_file(self, dataset_dir, data_type):
return '{}/annotations/instances_{}.json'.format(dataset_dir, data_type)
def get_image_file_pattern(self, dataset_dir, data_type):
return '{}/{}/'.format(dataset_dir, data_type)
def validate_boxes(self, boxes, width=0, height=0):
"""Check that a set of boxes are valid."""
x1 = boxes[:, 0]
y1 = boxes[:, 1]
x2 = boxes[:, 2]
y2 = boxes[:, 3]
assert (x1 >= 0).all()
assert (y1 >= 0).all()
assert (x2 >= x1).all()
assert (y2 >= y1).all()
assert (x2 < width).all()
assert (y2 < height).all()
def _load_coco_annotation(self, index):
"""
Loads COCO bounding-box instance annotations. Crowd instances are
handled by marking their overlaps (with all categories) to -1. This
overlap value means that crowd "instances" are excluded from training.
"""
im_ann = self._coco.loadImgs(index)[0]
filaname = im_ann['file_name']
width = im_ann['width']
height = im_ann['height']
annIds = self._coco.getAnnIds(imgIds=index, iscrowd=None)
objs = self._coco.loadAnns(annIds)
# Sanitize bboxes -- some are invalid
valid_objs = []
for obj in objs:
x1 = np.max((0, obj['bbox'][0]))
y1 = np.max((0, obj['bbox'][1]))
x2 = np.min((width - 1, x1 + np.max((0, obj['bbox'][2] - 1))))
y2 = np.min((height - 1, y1 + np.max((0, obj['bbox'][3] - 1))))
if obj['iscrowd'] and (self._image_set == 'train2017' or self._image_set == 'val2017'):
continue
if obj['area'] > 0 and x2 >= x1 and y2 >= y1:
#obj['clean_bbox'] = [x1, y1, x2, y2]
obj['clean_bbox'] = [y1/height, x1/width, y2/height, x2/width]
valid_objs.append(obj)
objs = valid_objs
num_objs = len(objs)
has_boxes = 1
if num_objs == 0:
has_boxes = 0
boxes = np.zeros((num_objs, 4), dtype=np.float32)
gt_classes = np.zeros((num_objs), dtype=np.int32)
gt_iscrowd = np.zeros((num_objs), dtype=np.int32)
seg_areas = np.zeros((num_objs), dtype=np.float32)
# Lookup table to map from COCO category ids to our internal class
# indices
coco_cat_id_to_class_ind = dict([(self._class_to_coco_cat_id[cls],
self._class_to_ind[cls])
for cls in self._classes[1:]])
for ix, obj in enumerate(objs):
cls = coco_cat_id_to_class_ind[obj['category_id']]
boxes[ix, :] = obj['clean_bbox']
gt_classes[ix] = cls
seg_areas[ix] = obj['area']
if obj['iscrowd']:
gt_iscrowd[ix] = 1
self.validate_boxes(boxes, width=width, height=height)
return {'filaname' : filaname,
'boxes' : boxes,
'shape' : (height, width),
'gt_classes': gt_classes,
'gt_iscrowd' : gt_iscrowd,
'has_boxes': has_boxes}
def _get_statistic(self):
class_name_list = ['none', 'total']
class_name_list.extend([_ for _ in self._classes[1:]])
stat_by_obj = dict(zip(class_name_list, [0]*len(class_name_list)))
stat_by_image = dict(zip(class_name_list, [0]*len(class_name_list)))
for index in self._image_index:
im_ann = self._coco.loadImgs(index)[0]
width = im_ann['width']
height = im_ann['height']
annIds = self._coco.getAnnIds(imgIds=index, iscrowd=None)
objs = self._coco.loadAnns(annIds)
# Sanitize bboxes -- some are invalid
valid_objs = []
for obj in objs:
x1 = np.max((0, obj['bbox'][0]))
y1 = np.max((0, obj['bbox'][1]))
x2 = np.min((width - 1, x1 + np.max((0, obj['bbox'][2] - 1))))
y2 = np.min((height - 1, y1 + np.max((0, obj['bbox'][3] - 1))))
if obj['iscrowd'] and (self._image_set == 'train' or self._image_set == 'trainval'):
continue
if obj['area'] > 0 and x2 >= x1 and y2 >= y1:
valid_objs.append(obj)
objs = valid_objs
num_objs = len(objs)
coco_cat_id_to_name = dict(zip(self._coco.getCatIds(), [c['name'] for c in self._cats]))
cls_in_image_list = {}
for ix, obj in enumerate(objs):
cls = coco_cat_id_to_name[obj['category_id']]
stat_by_obj[cls] += 1
stat_by_obj['total'] += 1
cls_in_image_list[cls] = 0
for key in cls_in_image_list.keys():
stat_by_image[cls] += 1
stat_by_image['total'] += 1
statistics = dict(zip(class_name_list, [(stat_by_image[cls_name], stat_by_obj[cls_name]) for cls_name in class_name_list]))
return statistics
# d = CoCoDataset(dataDir, dataType)
# STS = d._get_statistic()
# for k, v in STS.items():
# print('"%s": '%k, v, ',')
# print('ok')
# for k, v in d._labels.items():
# print('"%s": '%k, v, ',')
#print(len(d._image_index))
#print([d._load_coco_annotation(index) for index in d._image_index])
# {'filaname' : filaname,
# 'boxes' : boxes,
# 'shape' : (height, width),
# 'gt_classes': gt_classes,
# 'gt_iscrowd' : gt_iscrowd,
# 'has_boxes': has_boxes}
# boxes = np.zeros((num_objs, 4), dtype=np.float32)
# gt_classes = np.zeros((num_objs), dtype=np.int32)
# gt_iscrowd = np.zeros((num_objs), dtype=np.int32)
# seg_areas = np.zeros((num_objs), dtype=np.float32)
def _process_image(filename_pattern, ann_dict):
"""Process a image and annotation file.
Args:
filename: string, path to an image file e.g., '/path/to/example.JPG'.
coder: instance of ImageCoder to provide TensorFlow image coding utils.
Returns:
image_buffer: string, JPEG encoding of RGB image.
height: integer, image height in pixels.
width: integer, image width in pixels.
"""
# Read the image file.
filename = filename_pattern.format(ann_dict['filaname'])
image_data = tf.gfile.FastGFile(filename, 'rb').read()
# Find annotations.
bboxes = []
labels = []
iscrowd = []
for index in range(ann_dict['boxes'].shape[0]):
labels.append(int(ann_dict['gt_classes'][index]))
iscrowd.append(int(ann_dict['gt_iscrowd'][index]))
bboxes.append((ann_dict['boxes'][index, 0], ann_dict['boxes'][index, 1], ann_dict['boxes'][index, 2], ann_dict['boxes'][index, 3]
))
return image_data, ann_dict['shape'], bboxes, labels, iscrowd
def _convert_to_example(image_data, labels, bboxes, shape, iscrowd):
"""Build an Example proto for an image example.
Args:
image_data: string, JPEG encoding of RGB image;
labels: list of integers, identifier for the ground truth;
bboxes: list of bounding boxes; each box is a list of integers;
specifying [xmin, ymin, xmax, ymax]. All boxes are assumed to belong
to the same label as the image label.
shape: 3 integers, image shapes in pixels.
Returns:
Example proto
"""
xmin = []
ymin = []
xmax = []
ymax = []
for b in bboxes:
assert len(b) == 4
# pylint: disable=expression-not-assigned
# [(ymin_0, xmin_0, ymax_0, xmax_0), (ymin_1, xmin_1, ymax_1, xmax_1), ....]
# |
# [ymin_0, ymin_1, ...], [xmin_0, xmin_1, ...], [ymax_0, ymax_1, ...], [xmax_0, xmax_1, ...]
[l.append(point) for l, point in zip([ymin, xmin, ymax, xmax], b)]
# pylint: enable=expression-not-assigned
image_format = b'JPEG'
example = tf.train.Example(features=tf.train.Features(feature={
'image/height': int64_feature(shape[0]),
'image/width': int64_feature(shape[1]),
'image/channels': int64_feature(3),
'image/shape': int64_feature([shape[0], shape[1], 3]),
'image/object/bbox/xmin': float_feature(xmin),
'image/object/bbox/xmax': float_feature(xmax),
'image/object/bbox/ymin': float_feature(ymin),
'image/object/bbox/ymax': float_feature(ymax),
'image/object/bbox/label': int64_feature(labels),
'image/object/bbox/iscrowd': int64_feature(iscrowd),
'image/format': bytes_feature(image_format),
'image/encoded': bytes_feature(image_data)}))
return example
def _add_to_tfrecord(filename_pattern, ann_dict, tfrecord_writer):
image_data, shape, bboxes, labels, iscrowd = _process_image(filename_pattern, ann_dict)
example = _convert_to_example(image_data, labels, bboxes, shape, iscrowd)
tfrecord_writer.write(example.SerializeToString())
def _get_output_filename(output_dir, name, idx):
return os.path.join(output_dir, '%s_%03d.tfrecord' % (name, idx))
def run(dataset_dir, output_dir, output_name, name='train2017'):
coco_dataset = CoCoDataset(dataset_dir, name)
num_examples = coco_dataset._num_examples
# Process dataset files.
i = 0
fidx = 0
while True:
# Open new TFRecord file.
tf_filename = _get_output_filename(output_dir, output_name, fidx)
with tf.python_io.TFRecordWriter(tf_filename) as tfrecord_writer:
j = 0
while i < num_examples and j < SAMPLES_PER_FILES:
sys.stdout.write('\r>> Converting image %d/%d' % (i+1, num_examples))
sys.stdout.flush()
ann_dict = coco_dataset._load_coco_annotation(coco_dataset._image_index[i])
_add_to_tfrecord(coco_dataset._filename_pattern, ann_dict, tfrecord_writer)
i += 1
j += 1
fidx += 1
if not i < num_examples:
break
print('\nFinished converting the CoCo dataset!')
if __name__ == '__main__':
split_name = 'train2017' # 'train2017' or 'val2017'
output_name = 'coco_{}'.format(split_name)
dataset_dir = '/media/rs/7A0EE8880EE83EAF/Detections/CoCo'
output_dir = '../CoCo/tfrecords/{}/'.format(split_name)
run(dataset_dir, output_dir, output_name, split_name)
split_name = 'val2017' # 'train2017' or 'val2017'
output_name = 'coco_{}'.format(split_name)
dataset_dir = '/media/rs/7A0EE8880EE83EAF/Detections/CoCo'
output_dir = '../CoCo/tfrecords/{}/'.format(split_name)
run(dataset_dir, output_dir, output_name, split_name)
|
commons/validation.py | Bermuhz/DataMiningCompetitionFirstPrize | 128 | 12659884 | <gh_stars>100-1000
from commons import variables
def validate(prediction_y_list, actual_y_list):
right_num_dict = {}
prediction_num_dict = {}
actual_num_dict = {}
for (p_y, a_y) in zip(prediction_y_list, actual_y_list):
if not prediction_num_dict.has_key(p_y):
prediction_num_dict[p_y] = 0
prediction_num_dict[p_y] += 1
if not actual_num_dict.has_key(a_y):
actual_num_dict[a_y] = 0
actual_num_dict[a_y] += 1
if p_y == a_y:
if not right_num_dict.has_key(p_y):
right_num_dict[p_y] = 0
right_num_dict[p_y] += 1
return right_num_dict,prediction_num_dict,actual_num_dict
|
seleniumbase/translate/__init__.py | AndriiMykytiuk/SeleniumBase | 2,745 | 12659893 | <reponame>AndriiMykytiuk/SeleniumBase
from seleniumbase.translate import chinese # noqa
from seleniumbase.translate import dutch # noqa
from seleniumbase.translate import french # noqa
from seleniumbase.translate import italian # noqa
from seleniumbase.translate import japanese # noqa
from seleniumbase.translate import korean # noqa
from seleniumbase.translate import portuguese # noqa
from seleniumbase.translate import russian # noqa
from seleniumbase.translate import spanish # noqa
|
extend/upload2oss.py | fengzhongye/darknet_captcha | 348 | 12659930 | <gh_stars>100-1000
#!/usr/bin/python
# -*- coding: UTF-8 -*-
import oss2
import os
import fire
class OSSHandle(object):
def __init__(self):
# 从环境变量获取密钥
AccessKeyId = os.getenv("AccessKeyId")
AccessKeySecret = os.getenv("AccessKeySecret")
BucketName = os.getenv("BucketName")
# Bucket所在地区的链接
endpoint = 'oss-cn-shenzhen.aliyuncs.com'
# 生成对象
auth = oss2.Auth(AccessKeyId, AccessKeySecret)
self.bucket = oss2.Bucket(auth, endpoint, BucketName)
# Bucket中的文件名(key)为story.txt
def upload_by_path(self, key):
# 上传
with open(key, 'rb') as f:
file_name = key.split("/")[-1]
print(file_name)
self.bucket.put_object(file_name, f)
def upload_by_bytes(self, key, content):
# 上传
self.bucket.put_object(key, content)
def download_file(self, key):
# 下载
self.bucket.get_object(key).read()
def delete_file(self, key):
# 删除
self.bucket.delete_object(key)
def list_file(self):
# 遍历Bucket里所有文件
for object_info in oss2.ObjectIterator(self.bucket):
print(object_info.key)
def main(path):
oss = OSSHandle()
oss.upload_by_path(path)
if __name__ == '__main__':
fire.Fire(main)
|
droidlet/interpreter/robot/__init__.py | ali-senguel/fairo | 669 | 12659974 | <reponame>ali-senguel/fairo
from .loco_interpreter import LocoInterpreter
from .get_memory_handler import LocoGetMemoryHandler
from .put_memory_handler import PutMemoryHandler
__all__ = [LocoGetMemoryHandler, PutMemoryHandler, LocoInterpreter]
|
prjxray/node_model.py | common-config-bot/prjxray | 583 | 12659991 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017-2020 The Project X-Ray Authors.
#
# Use of this source code is governed by a ISC-style
# license that can be found in the LICENSE file or at
# https://opensource.org/licenses/ISC
#
# SPDX-License-Identifier: ISC
class NodeModel():
""" Node lookup model
Terminology:
Wire - A segment of metal in a tile
Node - A connected set of wires
This class can provide a list of nodes, the wires in a node and the node
that a wire belongs too.
The name of node is always the name of one wire in the node.
It is recommended that this class be constructed by calling
Database.node_model rather than constructing this class directly.
"""
def __init__(
self, grid, connections, tile_wires, node_wires, progressbar=None):
self.grid = grid
self.connections = connections
self.tile_wires = tile_wires
self.specific_node_wires = set(node_wires['specific_node_wires'])
node_pattern_wires = node_wires['node_pattern_wires']
self.node_pattern_wires = {}
for tile_type in node_pattern_wires:
assert tile_type not in self.node_pattern_wires
self.node_pattern_wires[tile_type] = set(
node_pattern_wires[tile_type])
for tile_type in self.tile_wires:
if tile_type not in self.node_pattern_wires:
self.node_pattern_wires[tile_type] = set()
self.nodes = None
self.wire_to_node_map = None
if progressbar is None:
self.progressbar = lambda x: x
else:
self.progressbar = progressbar
def _build_nodes(self):
tile_wire_map = {}
wires = {}
flat_wires = []
for tile in self.progressbar(self.grid.tiles()):
gridinfo = self.grid.gridinfo_at_tilename(tile)
tile_type = gridinfo.tile_type
for wire in self.tile_wires[tile_type]:
wire_pkey = len(flat_wires)
tile_wire_map[(tile, wire)] = wire_pkey
flat_wires.append((tile, wire))
wires[wire_pkey] = None
for connection in self.progressbar(self.connections.get_connections()):
a_pkey = tile_wire_map[(
connection.wire_a.tile, connection.wire_a.wire)]
b_pkey = tile_wire_map[(
connection.wire_b.tile, connection.wire_b.wire)]
a_node = wires[a_pkey]
b_node = wires[b_pkey]
if a_node is None:
a_node = set((a_pkey, ))
if b_node is None:
b_node = set((b_pkey, ))
if a_node is not b_node:
a_node |= b_node
for wire in a_node:
wires[wire] = a_node
nodes = {}
for wire_pkey, node in self.progressbar(wires.items()):
if node is None:
node = set((wire_pkey, ))
assert wire_pkey in node
nodes[id(node)] = node
def get_node_wire_for_wires(wire_pkeys):
if len(wire_pkeys) == 1:
for wire_pkey in wire_pkeys:
return flat_wires[wire_pkey]
for wire_pkey in wire_pkeys:
tile, wire = flat_wires[wire_pkey]
if '{}/{}'.format(tile, wire) in self.specific_node_wires:
return tile, wire
for wire_pkey in wire_pkeys:
tile, wire = flat_wires[wire_pkey]
gridinfo = self.grid.gridinfo_at_tilename(tile)
if wire in self.node_pattern_wires[gridinfo.tile_type]:
return tile, wire
return None
self.nodes = {}
for node_wire_pkeys in self.progressbar(nodes.values()):
node_wire = get_node_wire_for_wires(node_wire_pkeys)
if node_wire is None:
continue
self.nodes[node_wire] = [
flat_wires[wire_pkey] for wire_pkey in node_wire_pkeys
]
def get_nodes(self):
""" Return a set of node names. """
if self.nodes is None:
self._build_nodes()
return self.nodes.keys()
def get_wires_for_node(self, tile, wire):
""" Get wires in node named for specified tile and wire. """
if self.nodes is None:
self._build_nodes()
return self.nodes[tile, wire]
def _build_wire_to_node_map(self):
self.wire_to_node_map = {}
if self.nodes is None:
self._build_nodes()
for node, wires in self.nodes.items():
for tile_wire in wires:
assert tile_wire not in self.wire_to_node_map
self.wire_to_node_map[tile_wire] = node
def get_node_for_wire(self, tile, wire):
""" Get node for specified tile and wire. """
if self.wire_to_node_map is None:
self._build_wire_to_node_map()
return self.wire_to_node_map[tile, wire]
|
lib/termineter/modules/set_meter_id.py | jayaram24/Termineter-Modified | 185 | 12660019 | <gh_stars>100-1000
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# termineter/modules/set_meter_id.py
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of the project nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
from __future__ import unicode_literals
from c1219.access.general import C1219GeneralAccess
from termineter.module import TermineterModuleOptical
class Module(TermineterModuleOptical):
def __init__(self, *args, **kwargs):
TermineterModuleOptical.__init__(self, *args, **kwargs)
self.author = ['<NAME>']
self.description = 'Set The Meter\'s I.D.'
self.detailed_description = 'This module will over write the Smart Meter\'s device ID with the new value specified in METER_ID.'
self.options.add_string('METER_ID', 'value to set the meter id to', True)
def run(self):
conn = self.frmwk.serial_connection
logger = self.logger
meter_id = self.options['METER_ID']
gen_ctl = C1219GeneralAccess(conn)
if gen_ctl.id_form == 0:
logger.info('device id stored in 20 byte string')
if len(meter_id) > 20:
self.frmwk.print_error('METER_ID length exceeds the allowed 20 bytes')
return
else:
logger.info('device id stored in BCD(10)')
if len(meter_id) > 10:
self.frmwk.print_error('METER_ID length exceeds the allowed 10 bytes')
return
if gen_ctl.set_device_id(meter_id):
self.frmwk.print_error('Could not set the Meter\'s ID')
else:
self.frmwk.print_status('Successfully updated the Meter\'s ID to: ' + meter_id)
|
mailthon/response.py | seantis/mailthon | 230 | 12660032 | <reponame>seantis/mailthon
"""
mailthon.response
~~~~~~~~~~~~~~~~~
Response objects encapsulate responses returned
by SMTP servers.
:copyright: (c) 2015 by <NAME>
:license: MIT, see LICENSE for details.
"""
from collections import namedtuple
_ResponseBase = namedtuple('Response', ['status_code', 'reason'])
class Response(_ResponseBase):
"""
Encapsulates a (status_code, message) tuple
returned by a server when the ``NOOP``
command is called.
:param status_code: status code returned by server.
:param message: error/success message.
"""
@property
def ok(self):
"""
Returns true if the status code is 250, false
otherwise.
"""
return self.status_code == 250
class SendmailResponse:
"""
Encapsulates a (status_code, reason) tuple
as well as a mapping of email-address to
(status_code, reason) tuples that can be
attained by the NOOP and the SENDMAIL
command.
:param pair: The response pair.
:param rejected: Dictionary of rejected
addresses to status-code reason pairs.
"""
def __init__(self, status_code, reason, rejected):
self.res = Response(status_code, reason)
self.rejected = {}
for addr, pair in rejected.items():
self.rejected[addr] = Response(*pair)
@property
def ok(self):
"""
Returns True only if no addresses were
rejected and if the status code is 250.
"""
return self.res.ok and not self.rejected
|
terrascript/provider/clc.py | hugovk/python-terrascript | 507 | 12660036 | <gh_stars>100-1000
# terrascript/provider/clc.py
import terrascript
class clc(terrascript.Provider):
pass
__all__ = ["clc"]
|
metadata-ingestion/src/datahub/ingestion/source/iceberg/iceberg_profiler.py | ShubhamThakre/datahub | 1,603 | 12660039 | <reponame>ShubhamThakre/datahub
from datetime import datetime, timedelta
from typing import Any, Callable, Dict, Iterable, Union, cast
from iceberg.api import types as IcebergTypes
from iceberg.api.data_file import DataFile
from iceberg.api.manifest_file import ManifestFile
from iceberg.api.schema import Schema
from iceberg.api.snapshot import Snapshot
from iceberg.api.table import Table
from iceberg.api.types import Conversions, NestedField, Type, TypeID
from iceberg.core.base_table import BaseTable
from iceberg.core.filesystem import FileSystemInputFile
from iceberg.core.manifest_reader import ManifestReader
from iceberg.exceptions.exceptions import FileSystemNotFound
from datahub.emitter.mce_builder import get_sys_time
from datahub.emitter.mcp import MetadataChangeProposalWrapper
from datahub.ingestion.api.workunit import MetadataWorkUnit
from datahub.ingestion.source.iceberg.iceberg_common import (
IcebergProfilingConfig,
IcebergSourceReport,
)
from datahub.metadata.schema_classes import (
ChangeTypeClass,
DatasetFieldProfileClass,
DatasetProfileClass,
)
class IcebergProfiler:
def __init__(
self,
report: IcebergSourceReport,
config: IcebergProfilingConfig,
) -> None:
self.report: IcebergSourceReport = report
self.config: IcebergProfilingConfig = config
self.platform: str = "iceberg"
def _aggregate_counts(
self,
aggregated_count: Dict[int, int],
manifest_counts: Dict[int, int],
) -> Dict[int, int]:
return {
k: aggregated_count.get(k, 0) + manifest_counts.get(k, 0)
for k in set(aggregated_count) | set(manifest_counts)
}
def _aggregate_bounds(
self,
schema: Schema,
aggregator: Callable,
aggregated_values: Dict[int, Any],
manifest_values: Dict[int, Any],
) -> None:
for field_id, value_encoded in manifest_values.items(): # type: int, Any
field: NestedField = schema.find_field(field_id)
# Bounds in manifests can reference historical field IDs that are not part of the current schema.
# We simply not profile those since we only care about the current snapshot.
if field and IcebergProfiler._is_numeric_type(field.type):
value_decoded = Conversions.from_byte_buffer(field.type, value_encoded)
if value_decoded:
agg_value = aggregated_values.get(field_id)
aggregated_values[field_id] = (
aggregator(agg_value, value_decoded)
if agg_value
else value_decoded
)
def profile_table(
self,
dataset_name: str,
dataset_urn: str,
table: Table,
) -> Iterable[MetadataWorkUnit]:
"""This method will profile the supplied Iceberg table by looking at the table's manifest.
The overall profile of the table is aggregated from the individual manifest files.
We can extract the following from those manifests:
- "field minimum values"
- "field maximum values"
- "field null occurences"
"field distinct value occurences" cannot be computed since the 'value_counts' only apply for
a manifest, making those values innacurate. For example, if manifest A has 2 unique values
and manifest B has 1, it is possible that the value in B is also in A, hence making the total
number of unique values 2 and not 3.
Args:
dataset_name (str): dataset name of the table to profile, mainly used in error reporting
dataset_urn (str): dataset urn of the table to profile
table (Table): Iceberg table to profile.
Raises:
Exception: Occurs when a table manifest cannot be loaded.
Yields:
Iterator[Iterable[MetadataWorkUnit]]: Workunits related to datasetProfile.
"""
if not table.snapshots() or not isinstance(table, BaseTable):
# Table has no data, cannot profile, or we can't get current_snapshot.
return
row_count: int = int(table.current_snapshot().summary["total-records"])
column_count: int = len(table.schema()._id_to_name)
dataset_profile = DatasetProfileClass(
timestampMillis=get_sys_time(),
rowCount=row_count,
columnCount=column_count,
)
dataset_profile.fieldProfiles = []
field_paths: Dict[int, str] = table.schema()._id_to_name
current_snapshot: Snapshot = table.current_snapshot()
total_count: int = 0
null_counts: Dict[int, int] = {}
min_bounds: Dict[int, Any] = {}
max_bounds: Dict[int, Any] = {}
manifest: ManifestFile
try:
for manifest in current_snapshot.manifests:
manifest_input_file = FileSystemInputFile.from_location(
manifest.manifest_path, table.ops.conf
)
manifest_reader = ManifestReader.read(manifest_input_file)
data_file: DataFile
for data_file in manifest_reader.iterator():
if self.config.include_field_null_count:
null_counts = self._aggregate_counts(
null_counts, data_file.null_value_counts()
)
if self.config.include_field_min_value:
self._aggregate_bounds(
table.schema(),
min,
min_bounds,
data_file.lower_bounds(),
)
if self.config.include_field_max_value:
self._aggregate_bounds(
table.schema(),
max,
max_bounds,
data_file.upper_bounds(),
)
total_count += data_file.record_count()
# TODO Work on error handling to provide better feedback. Iceberg exceptions are weak...
except FileSystemNotFound as e:
raise Exception("Error loading table manifests") from e
if row_count:
# Iterating through fieldPaths introduces unwanted stats for list element fields...
for field_id, field_path in field_paths.items():
field: NestedField = table.schema().find_field(field_id)
column_profile = DatasetFieldProfileClass(fieldPath=field_path)
if self.config.include_field_null_count:
column_profile.nullCount = cast(int, null_counts.get(field_id, 0))
column_profile.nullProportion = float(
column_profile.nullCount / row_count
)
if self.config.include_field_min_value:
column_profile.min = (
self._renderValue(
dataset_name, field.type, min_bounds.get(field_id)
)
if field_id in min_bounds
else None
)
if self.config.include_field_max_value:
column_profile.max = (
self._renderValue(
dataset_name, field.type, max_bounds.get(field_id)
)
if field_id in max_bounds
else None
)
dataset_profile.fieldProfiles.append(column_profile)
# https://github.com/linkedin/datahub/blob/599edd22aeb6b17c71e863587f606c73b87e3b58/metadata-ingestion/src/datahub/ingestion/source/sql/sql_common.py#L829
mcp = MetadataChangeProposalWrapper(
entityType="dataset",
entityUrn=dataset_urn,
changeType=ChangeTypeClass.UPSERT,
aspectName="datasetProfile",
aspect=dataset_profile,
)
wu = MetadataWorkUnit(id=f"profile-{dataset_name}", mcp=mcp)
self.report.report_workunit(wu)
self.report.report_entity_profiled(dataset_name)
yield wu
# The following will eventually be done by the Iceberg API (in the new Python refactored API).
def _renderValue(
self, dataset_name: str, value_type: Type, value: Any
) -> Union[str, None]:
try:
if value_type.type_id == TypeID.TIMESTAMP:
if value_type.adjust_to_utc:
# TODO Deal with utc when required
microsecond_unix_ts = value
else:
microsecond_unix_ts = value
return datetime.fromtimestamp(microsecond_unix_ts / 1000000.0).strftime(
"%Y-%m-%d %H:%M:%S"
)
elif value_type.type_id == TypeID.DATE:
return (datetime(1970, 1, 1, 0, 0) + timedelta(value - 1)).strftime(
"%Y-%m-%d"
)
return str(value)
except Exception as e:
self.report.report_warning(
"profiling",
f"Error in dataset {dataset_name} when profiling a {value_type} field with value {value}: {e}",
)
return None
@staticmethod
def _is_numeric_type(type: Type) -> bool:
return isinstance(
type,
(
IcebergTypes.DateType,
IcebergTypes.DecimalType,
IcebergTypes.DoubleType,
IcebergTypes.FloatType,
IcebergTypes.IntegerType,
IcebergTypes.LongType,
IcebergTypes.TimestampType,
IcebergTypes.TimeType,
),
)
|
src/ploomber/tasks/_params.py | rehman000/ploomber | 2,141 | 12660040 | <gh_stars>1000+
import copy as copy_module
from collections import abc
class Params(abc.MutableMapping):
"""
Read-only mapping to represent params passed in Task constructor. It
initializes with a copy of the passed dictionary. It verifies that the
dictionary does not have a key "upstream" nor "product" because they'd
clash with the ones added upon Task rendering
"""
def __init__(self, params=None):
if params is None:
self._dict = {}
else:
if not isinstance(params, abc.Mapping):
raise TypeError('Params must be initialized '
f'with a mapping, got: {params!r} '
f'({type(params).__name__!r})')
if 'upstream' in params:
raise ValueError('Task params cannot be initialized with an '
'"upstream" key as it automatically added '
'upon rendering')
if 'product' in params:
raise ValueError('Task params cannot be initialized with an '
'"product" key as it automatically added '
'upon rendering')
self._dict = copy_module.copy(params)
@classmethod
def _from_dict(cls, params, copy=True):
"""
Private API for initializing Params objects with arbitrary dictionary
"""
obj = cls(params=None)
if copy:
obj._dict = copy_module.copy(params)
else:
obj._dict = params
return obj
def _setitem(self, key, value):
"""Private method for updating the underlying data
"""
self._dict[key] = value
def to_dict(self):
# NOTE: do we need this?
return copy_module.copy(self._dict)
def to_json_serializable(self, params_only=False):
"""
Converts params into a dictionary
Parameters
----------
params_only : bool, default=False
If True, it only returns user params, excluding 'upstream' and
'product'
"""
out = self.to_dict()
if params_only:
out.pop('product', None)
out.pop('upstream', None)
elif 'upstream' in out:
out['upstream'] = out['upstream'].to_json_serializable()
return out
def __getitem__(self, key):
try:
return self._dict[key]
except KeyError:
raise KeyError('Cannot obtain Task param named '
'"{}", declared params are: {}'.format(
key, list(self._dict.keys())))
def __setitem__(self, key, value):
raise RuntimeError('Task params are read-only, if you need a copy'
' use Params.to_dict() (returns a shallow copy)'
' of the underlying dictionary')
def __iter__(self):
for name in self._dict.keys():
yield name
def __len__(self):
return len(self._dict)
def __str__(self):
return str(self._dict)
def __repr__(self):
return 'Params({})'.format(repr(self._dict))
def get(self, key):
return self._dict.get(key)
def __delitem__(self, key):
del self._dict[key]
|
src/classifier/model_lib/char_cnn/char_cnn_keras.py | LeslieLeung/2c | 236 | 12660056 | <gh_stars>100-1000
#!/usr/bin/env python
"""
Created by howie.hu at 2021/4/25.
Description:模型实现
Changelog: all notable changes to this file will be documented
"""
from keras import layers
from keras.callbacks import ModelCheckpoint, TensorBoard
from keras.models import Sequential
from src.classifier.model_lib.char_cnn.keras_utils import FitCallback
class CharCNN:
def __init__(
self,
*,
conv_layers: list = None,
fully_layers: list = None,
input_size: int = 1014,
alphabet_size: int = 69,
num_of_classes: int = 4,
dropout_p: float = 0.5,
threshold: float = 1e-6,
loss="categorical_crossentropy",
optimizer="adam",
):
"""
基于Keras的字符级卷积神经网络
:param conv_layers: 卷积层
:param fully_layers: 全连接层
:param input_size: 输入大小,论文中是1014
:param alphabet_size: 字母表大小
:param num_of_classes: 类别
:param dropout_p: dropout值
:param threshold: threshold值
:param loss: 损失函数
:param optimizer: 优化
"""
# 卷积层定义
if conv_layers is None:
self.conv_layers = [
[256, 7, 3],
[256, 7, 3],
[256, 3, None],
[256, 3, None],
[256, 3, None],
[256, 3, 3],
]
else:
self.conv_layers = conv_layers
# 全连接层
if fully_layers is None:
self.fully_layers = [1024, 1024]
else:
self.fully_layers = fully_layers
self.alphabet_size = alphabet_size
self.input_size = input_size
self.num_of_classes = num_of_classes
self.dropout_p = dropout_p
self.threshold = threshold
self.loss = loss
self.optimizer = optimizer
self.shape = (input_size, alphabet_size, 1)
self.model = self._build_model()
def _build_model(self):
"""
论文中的模型结构
:return:
"""
model = Sequential()
# 词嵌入
model.add(
layers.Embedding(self.alphabet_size + 1, 128, input_length=self.input_size)
)
# 卷积层
for cl in self.conv_layers:
model.add(layers.Conv1D(filters=cl[0], kernel_size=cl[1]))
model.add(layers.ThresholdedReLU(self.threshold))
if cl[-1] is not None:
model.add(layers.MaxPool1D(pool_size=cl[-1]))
model.add(layers.Flatten())
# 全连接层
for fl in self.fully_layers:
# model.add(layers.Dense(fl, activity_regularizer=regularizers.l2(0.01)))
model.add(layers.Dense(fl))
model.add(layers.ThresholdedReLU(self.threshold))
model.add(layers.Dropout(self.dropout_p))
# 输出层
model.add(layers.Dense(self.num_of_classes, activation="softmax"))
model.compile(optimizer=self.optimizer, loss=self.loss, metrics=["accuracy"])
print("CharCNN model built success")
model.summary()
return model
def train(
self,
*,
training_inputs,
training_labels,
validation_inputs,
validation_labels,
epochs,
batch_size,
model_file_path,
verbose=2,
checkpoint_every=100,
evaluate_every=100,
):
"""
对模型进项训练
:param training_inputs: 训练实例
:param training_labels: 训练标签
:param validation_inputs: 验证实例
:param validation_labels: 验证标签
:param epochs: 迭代周期
:param batch_size: 每次批大小
:param model_file_path:模型保存路径
:param verbose: Integer. 0, 1, or 2. Verbosity mode. 0 = silent, 1 = progress bar, 2 = one line per epoch.
:param checkpoint_every: 每多少次进行 checkpoint
:param evaluate_every: 每多少次进行 evaluate
:return:
"""
tensorboard = TensorBoard(
log_dir="./logs",
histogram_freq=checkpoint_every,
batch_size=batch_size,
write_graph=True,
write_grads=True,
write_images=True,
embeddings_freq=0,
embeddings_layer_names=None,
)
fit_callback = FitCallback(
test_data=(validation_inputs, validation_labels),
evaluate_every=evaluate_every,
)
checkpoint = ModelCheckpoint(
model_file_path,
monitor="val_loss",
verbose=1,
save_best_only=True,
mode="min",
)
# 开始训练
print("Training Started ===>")
self.model.fit(
training_inputs,
training_labels,
validation_data=(validation_inputs, validation_labels),
epochs=epochs,
batch_size=batch_size,
verbose=verbose,
callbacks=[tensorboard, fit_callback, checkpoint],
)
if __name__ == "__main__":
char_cnn_model = CharCNN()
|
Algo and DSA/LeetCode-Solutions-master/Python/generate-parentheses.py | Sourav692/FAANG-Interview-Preparation | 3,269 | 12660127 | <reponame>Sourav692/FAANG-Interview-Preparation
# Time: O(4^n / n^(3/2)) ~= Catalan numbers
# Space: O(n)
# iterative solution
class Solution(object):
def generateParenthesis(self, n):
"""
:type n: int
:rtype: List[str]
"""
result, curr = [], []
stk = [(1, (n, n))]
while stk:
step, args = stk.pop()
if step == 1:
left, right = args
if left == 0 and right == 0:
result.append("".join(curr))
if left < right:
stk.append((3, tuple()))
stk.append((1, (left, right-1)))
stk.append((2, (')')))
if left > 0:
stk.append((3, tuple()))
stk.append((1, (left-1, right)))
stk.append((2, ('(')))
elif step == 2:
curr.append(args[0])
elif step == 3:
curr.pop()
return result
# Time: O(4^n / n^(3/2)) ~= Catalan numbers
# Space: O(n)
# recursive solution
class Solution2(object):
def generateParenthesis(self, n):
"""
:type n: int
:rtype: List[str]
"""
def generateParenthesisRecu(left, right, curr, result):
if left == 0 and right == 0:
result.append("".join(curr))
if left > 0:
curr.append('(')
generateParenthesisRecu(left-1, right, curr, result)
curr.pop()
if left < right:
curr.append(')')
generateParenthesisRecu(left, right-1, curr, result)
curr.pop()
result = []
generateParenthesisRecu(n, n, [], result)
return result
|
tcfcli/cmds/configure/get/cli.py | tencentyun/scfcli | 103 | 12660146 | <reponame>tencentyun/scfcli
# -*- coding: utf-8 -*-
import click
import platform
import tcfcli.common.base_infor as infor
from tcfcli.help.message import ConfigureHelp as help
from tcfcli.common.user_config import UserConfig
from tcfcli.common.operation_msg import Operation
version = platform.python_version()
if version >= '3':
from functools import reduce
def report_info():
pass
REGIONS = infor.REGIONS
@click.command(short_help=help.GET_SHORT_HELP)
@click.option('--secret-id', '-si', is_flag=True, help=help.GET_SECRET_ID)
@click.option('--secret-key', '-sk', is_flag=True, help=help.GET_SECRET_KEY)
@click.option('--region', '-r', is_flag=True, help=help.GET_REGION)
@click.option('--appid', '-a', is_flag=True, help=help.GET_APPID)
@click.option('--using-cos', '-uc', is_flag=True, help=help.GET_USING_COS)
@click.option('--python2-path', '-p2p', is_flag=True, help=help.GET_PATHON_PATH)
@click.option('--python3-path', '-p3p', is_flag=True, help=help.GET_PATHON_PATH)
@click.option('--no-color', '-nc', is_flag=True, default=False, help=help.NOCOLOR)
def get(**kwargs):
'''
\b
Get your account parameters.
\b
Common usage:
\b
* Get the configured information
$ scf configure get
'''
uc = UserConfig()
def set_true(k):
kwargs[k] = True
Operation(uc._get_curr_user_section()).process()
bools = [v for k, v in kwargs.items()]
if not reduce(lambda x, y: bool(x or y), bools):
list(map(set_true, kwargs))
attrs = uc.get_attrs(kwargs)
#msg = "Config" #"{} config:".format(UserConfig.API)
# for section in uc.SECTION_LIST:
# for attr in attrs:
# if attr.replace("-", "_") in list(uc.section_map[section].keys()):
# attr_value = attrs[attr]
# if attr == "secret-id":
# attr_value = "*" * 32 + attr_value[32:]
# elif attr == "secret-key":
# attr_value = "*" * 28 + attr_value[28:]
# Operation("{} = {}".format(attr, attr_value), fg="cyan").process()
for section in uc.SECTION_LIST:
for key in sorted(list(uc.section_map[section].keys())):
if key in list(attrs.keys()):
attr_value = attrs[key]
if key == "secret_id":
attr_value = "*" * 32 + attr_value[32:]
elif key == "secret_key":
attr_value = "*" * 28 + attr_value[28:]
Operation("{} = {}".format(key.replace('_', '-'), attr_value), fg="cyan").process()
# for attr in sorted(attrs):
# attr_value = attrs[attr]
# if attr == "secret-id":
# attr_value = "*" * 32 + attr_value[32:]
# elif attr == "secret-key":
# attr_value = "*" * 28 + attr_value[28:]
# msg += Operation("\n[-] ", fg="cyan").style() + Operation("{} = {}".format(attr, attr_value), fg="cyan").style()
# Operation(msg.strip()).process()
|
env/lib/python3.6/site-packages/pip/_vendor/certifi/__main__.py | amogh-gulati/corona_dashboard | 9,953 | 12660148 | <filename>env/lib/python3.6/site-packages/pip/_vendor/certifi/__main__.py
from pip._vendor.certifi import where
print(where())
|
tests/test_weighted_search_vector.py | nitros12/sqlalchemy-searchable | 217 | 12660165 | <filename>tests/test_weighted_search_vector.py
import re
import sqlalchemy as sa
from sqlalchemy_utils import TSVectorType
from sqlalchemy_searchable import search
from tests import SchemaTestCase, TestCase
class WeightedBase(object):
def create_models(self):
class WeightedTextItem(self.Base):
__tablename__ = 'textitem'
id = sa.Column(sa.Integer, primary_key=True, autoincrement=True)
name = sa.Column(sa.Unicode(255))
content = sa.Column(sa.UnicodeText)
search_vector = sa.Column(
TSVectorType(
'name',
'content',
weights={'name': 'A', 'content': 'B'}
)
)
self.WeightedTextItem = WeightedTextItem
class TestCreateWeightedSearchVector(WeightedBase, SchemaTestCase):
should_create_indexes = [u'ix_textitem_search_vector']
should_create_triggers = [u'textitem_search_vector_trigger']
def test_search_function_weights(self):
func_name = 'textitem_search_vector_update'
sql = """SELECT proname,prosrc FROM pg_proc
WHERE proname='{name}';"""
name, src = self.session.execute(sql.format(name=func_name)).fetchone()
pattern = (r"setweight\(to_tsvector\(.+?"
r"coalesce\(NEW.(\w+).+?"
r"\)\), '([A-D])'\)")
first, second = (match.groups() for match in re.finditer(pattern, src))
assert first == ('name', 'A')
assert second == ('content', 'B')
class TestWeightedSearchFunction(WeightedBase, TestCase):
def setup_method(self, method):
TestCase.setup_method(self, method)
self.session.add(
self.WeightedTextItem(name=u'Gort', content=u'Klaatu barada nikto')
)
self.session.add(
self.WeightedTextItem(name=u'Klaatu', content=u'barada nikto')
)
self.session.commit()
def test_weighted_search_results(self):
query = self.session.query(self.WeightedTextItem)
first, second = search(query, 'klaatu', sort=True).all()
assert first.search_vector == "'barada':2B 'klaatu':1A 'nikto':3B"
assert (
second.search_vector ==
"'barada':3B 'gort':1A 'klaatu':2B 'nikto':4B"
)
|
src/genie/libs/parser/iosxe/tests/ShowStandbyInternal/cli/equal/golden_output_expected.py | balmasea/genieparser | 204 | 12660171 | <reponame>balmasea/genieparser
expected_output = {
"hsrp_common_process_state": "not running",
"hsrp_ha_state": "capable",
"hsrp_ipv4_process_state": "not running",
"hsrp_ipv6_process_state": "not running",
"hsrp_timer_wheel_state": "running",
"mac_address_table": {
166: {"group": 10, "interface": "gi2/0/3", "mac_address": "0000.0cff.b311"},
169: {"group": 5, "interface": "gi1/0/1", "mac_address": "0000.0cff.b30c"},
172: {"group": 0, "interface": "gi2/0/3", "mac_address": "0000.0cff.b307"},
173: {"group": 1, "interface": "gi2/0/3", "mac_address": "0000.0cff.b308"},
},
"msgQ_max_size": 0,
"msgQ_size": 0,
"v3_to_v4_transform": "disabled",
"virtual_ip_hash_table": {
"ipv6": {78: {"group": 20, "interface": "gi1", "ip": "2001:DB8:10:1:1::254"}},
"ipv4": {
103: {"group": 0, "interface": "gi1/0/1", "ip": "192.168.1.254"},
106: {"group": 10, "interface": "gi1/0/2", "ip": "192.168.2.254"},
},
},
}
|
tests/PySys/misc_features/mqtt_port_change_connection_fails/run.py | PradeepKiruvale/localworkflow | 102 | 12660173 | import sys
import time
from pysys.basetest import BaseTest
"""
Validate changing the mqtt port using the tedge command that fails without restarting the mqtt server
Given a configured system, that is configured with certificate created and registered in a cloud
When `tedge mqtt.port set` with `sudo`
When the `sudo tedge mqtt sub` tries to subscribe for a topic and fails to connect to mqtt server
When the `sudo tedge mqtt pub` tries to publish a message and fails to connect to mqtt server
"""
class MqttPortChangeConnectionFails(BaseTest):
def setup(self):
self.tedge = "/usr/bin/tedge"
self.sudo = "/usr/bin/sudo"
self.addCleanupFunction(self.mqtt_cleanup)
def execute(self):
# set a new mqtt port for local communication
mqtt_port = self.startProcess(
command=self.sudo,
arguments=[self.tedge, "config", "set", "mqtt.port", "8880"],
stdouterr="mqtt_port_set",
)
# publish a message
mqtt_pub = self.startProcess(
command=self.sudo,
arguments=[
self.tedge,
"mqtt",
"pub",
"tedge/measurements",
'{ "temperature": 25 }',
],
stdouterr="mqtt_pub",
# dont exit test if status is 1, as the error messages are needed for validation
expectedExitStatus="==1",
)
def validate(self):
self.assertGrep(
"mqtt_pub.err", "ERROR: the message has not been published", contains=True
)
self.assertGrep(
"mqtt_pub.err", "Error: failed to publish the message", contains=True
)
def mqtt_cleanup(self):
# unset a new mqtt port, falls back to default port (1883)
mqtt_port = self.startProcess(
command=self.sudo,
arguments=[self.tedge, "config", "unset", "mqtt.port"],
stdouterr="mqtt_port_unset",
)
|
dart_fss/fs/__init__.py | dveamer/dart-fss | 243 | 12660285 | <reponame>dveamer/dart-fss
# -*- coding: utf-8 -*-
from dart_fss.fs.extract import extract
from dart_fss.fs.fs import FinancialStatement
__all__ = ['extract', 'FinancialStatement'] |
qucumber/rbm/purification_rbm.py | ZvonimirBandic/QuCumber | 163 | 12660296 | # Copyright 2019 PIQuIL - All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from torch import nn
from torch.nn import functional as F
from torch.nn.utils import parameters_to_vector
from qucumber.utils import cplx, auto_unsqueeze_args
from qucumber import _warn_on_missing_gpu
class PurificationRBM(nn.Module):
r"""An RBM with a hidden and "auxiliary" layer, each separately connected to the visible units
:param num_visible: The number of visible units, i.e. the size of the system
:type num_visible: int
:param num_hidden: The number of units in the hidden layer
:type num_hidden: int
:param num_aux: The number of units in the auxiliary purification layer
:type num_aux: int
:param zero_weights: Whether or not to initialize the weights to zero
:type zero_weights: bool
:param gpu: Whether to perform computations on the default gpu.
:type gpu: bool
"""
def __init__(
self, num_visible, num_hidden=None, num_aux=None, zero_weights=False, gpu=False
):
super().__init__()
self.num_visible = int(num_visible)
self.num_hidden = (
int(num_hidden) if num_hidden is not None else self.num_visible
)
self.num_aux = int(num_aux) if num_aux is not None else self.num_visible
# Parameters are:
# W: The weights of the visible-hidden edges
# U: The weights of the visible-auxiliary edges
# b: The biases of the visible nodes
# c: The biases of the hidden nobdes
# d: The biases of the auxiliary nodes
# The auxiliary bias of the phase RBM is always zero
self.num_pars = (
(self.num_visible * self.num_hidden)
+ (self.num_visible * self.num_aux)
+ self.num_visible
+ self.num_hidden
+ self.num_aux
)
_warn_on_missing_gpu(gpu)
self.gpu = gpu and torch.cuda.is_available()
self.device = torch.device("cuda") if self.gpu else torch.device("cpu")
self.initialize_parameters(zero_weights=zero_weights)
def __repr__(self):
return (
f"PurificationBinaryRBM(num_visible={self.num_visible}, "
f"num_hidden={self.num_hidden}, num_aux={self.num_aux}, gpu={self.gpu})"
)
def initialize_parameters(self, zero_weights=False):
r"""Initialize the parameters of the RBM
:param zero_weights: Whether or not to initialize the weights to zero
:type zero_weights: bool
"""
gen_tensor = torch.zeros if zero_weights else torch.randn
self.weights_W = nn.Parameter(
(
gen_tensor(
self.num_hidden,
self.num_visible,
dtype=torch.double,
device=self.device,
)
/ np.sqrt(self.num_visible)
),
requires_grad=False,
)
self.weights_U = nn.Parameter(
(
gen_tensor(
self.num_aux,
self.num_visible,
dtype=torch.double,
device=self.device,
)
/ np.sqrt(self.num_visible)
),
requires_grad=False,
)
self.visible_bias = nn.Parameter(
torch.zeros(self.num_visible, dtype=torch.double, device=self.device),
requires_grad=False,
)
self.hidden_bias = nn.Parameter(
torch.zeros(self.num_hidden, dtype=torch.double, device=self.device),
requires_grad=False,
)
self.aux_bias = nn.Parameter(
torch.zeros(self.num_aux, dtype=torch.double, device=self.device),
requires_grad=False,
)
@auto_unsqueeze_args()
def effective_energy(self, v, a=None):
r"""Computes the equivalent of the "effective energy" for the RBM. If
`a` is `None`, will analytically trace out the auxiliary units.
:param v: The current state of the visible units. Shape (b, n_v) or (n_v,).
:type v: torch.Tensor
:param a: The current state of the auxiliary units. Shape (b, n_a) or (n_a,).
:type a: torch.Tensor or None
:returns: The "effective energy" of the RBM. Shape (b,) or (1,).
:rtype: torch.Tensor
"""
v = v.to(self.weights_W)
vis_term = torch.matmul(v, self.visible_bias) + F.softplus(
F.linear(v, self.weights_W, self.hidden_bias)
).sum(-1)
if a is not None:
a = (a.unsqueeze(0) if a.dim() < 2 else a).to(self.weights_W)
aux_term = torch.matmul(a, self.aux_bias)
mix_term = torch.einsum("...v,av,...a->...", v, self.weights_U.data, a)
return -(vis_term + aux_term + mix_term)
else:
aux_term = F.softplus(F.linear(v, self.weights_U, self.aux_bias)).sum(-1)
return -(vis_term + aux_term)
def effective_energy_gradient(self, v, reduce=True):
"""The gradients of the effective energies for the given visible states.
:param v: The visible states.
:type v: torch.Tensor
:param reduce: If `True`, will sum over the gradients resulting from
each visible state. Otherwise will return a batch of
gradient vectors.
:type reduce: bool
:returns: Will return a vector (or matrix if `reduce=False` and multiple
visible states were given as a matrix) containing the gradients
for all parameters (computed on the given visible states v).
:rtype: torch.Tensor
"""
v = (v.unsqueeze(0) if v.dim() < 2 else v).to(self.weights_W)
ph = self.prob_h_given_v(v)
pa = self.prob_a_given_v(v)
if reduce:
W_grad = -torch.matmul(ph.transpose(0, -1), v)
U_grad = -torch.matmul(pa.transpose(0, -1), v)
vb_grad = -torch.sum(v, 0)
hb_grad = -torch.sum(ph, 0)
ab_grad = -torch.sum(pa, 0)
return parameters_to_vector([W_grad, U_grad, vb_grad, hb_grad, ab_grad])
else:
W_grad = -torch.einsum("...j,...k->...jk", ph, v).view(*v.shape[:-1], -1)
U_grad = -torch.einsum("...j,...k->...jk", pa, v).view(*v.shape[:-1], -1)
vb_grad = -v
hb_grad = -ph
ab_grad = -pa
vec = [W_grad, U_grad, vb_grad, hb_grad, ab_grad]
return torch.cat(vec, dim=-1)
@auto_unsqueeze_args()
def prob_h_given_v(self, v, out=None):
r"""Given a visible unit configuration, compute the probability
vector of the hidden units being on
:param v: The visible units
:type v: torch.Tensor
:param out: The output tensor to write to
:type out: torch.Tensor
:returns: The probability of the hidden units being active
given the visible state
:rtype torch.Tensor:
"""
return (
torch.matmul(v, self.weights_W.data.t(), out=out)
.add_(self.hidden_bias.data)
.sigmoid_()
.clamp_(min=0, max=1)
)
@auto_unsqueeze_args()
def prob_a_given_v(self, v, out=None):
r"""Given a visible unit configuration, compute the probability
vector of the auxiliary units being on
:param v: The visible units
:type v: torch.Tensor
:param out: The output tensor to write to
:type out: torch.Tensor
:returns: The probability of the auxiliary units being active
given the visible state
:rtype torch.Tensor:
"""
return (
torch.matmul(v, self.weights_U.data.t(), out=out)
.add_(self.aux_bias.data)
.sigmoid_()
.clamp_(min=0, max=1)
)
@auto_unsqueeze_args(1, 2)
def prob_v_given_ha(self, h, a, out=None):
r"""Given a hidden and auxiliary unit configuration, compute
the probability vector of the hidden units being on
:param h: The hidden units
:type h: torch.Tensor
:param a: The auxiliary units
:type a: torch.Tensor
:param out: The output tensor to write to
:type out: torch.Tensor
:returns: The probability of the visible units being
active given the hidden and auxiliary states
:rtype torch.Tensor:
"""
return (
torch.matmul(h, self.weights_W.data, out=out)
.add_(self.visible_bias.data)
.add_(torch.matmul(a, self.weights_U.data))
.sigmoid_()
.clamp_(min=0, max=1)
)
def sample_a_given_v(self, v, out=None):
r"""Sample/generate an auxiliary state given a visible state
:param v: The visible state
:type v: torch.Tensor
:param out: The output tensor to write to
:type out: torch.Tensor
:returns: The sampled auxiliary state
:rtype: torch.Tensor
"""
a = self.prob_a_given_v(v, out=out)
a = torch.bernoulli(a, out=out)
return a
def sample_h_given_v(self, v, out=None):
r"""Sample/generate a hidden state given a visible state
:param v: The visible state
:type v: torch.Tensor
:param out: The output tensor to write to
:type out: torch.Tensor
:returns: The sampled hidden state
:rtype: torch.Tensor
"""
h = self.prob_h_given_v(v, out=out)
h = torch.bernoulli(h, out=out)
return h
def sample_v_given_ha(self, h, a, out=None):
r"""Sample/generate a visible state given the
hidden and auxiliary states
:param h: The hidden state
:type h: torch.Tensor
:param a: The auxiliary state
:type a: torch.Tensor
:param out: The output tensor to write to
:type out: torch.Tensor
:returns: The sampled visible state
:rtype: torch.Tensor
"""
v = self.prob_v_given_ha(h, a, out=out)
v = torch.bernoulli(v, out=out)
return v
def gibbs_steps(self, k, initial_state, overwrite=False):
r"""Perform k steps of Block Gibbs sampling. One step consists of
sampling the hidden and auxiliary states from the visible state, and
then sampling the visible state from the hidden and auxiliary states
:param k: The number of Block Gibbs steps
:type k: int
:param initial_state: The initial visible state
:type initial_state: torch.Tensor
:param overwrite: Whether to overwrite the initial_state tensor.
Exception: If initial_state is not on the same device
as the RBM, it will NOT be overwritten.
:type overwrite: bool
:returns: Returns the visible states after k steps of
Block Gibbs sampling
:rtype: torch.Tensor
"""
v = (initial_state if overwrite else initial_state.clone()).to(self.weights_W)
h = torch.zeros(*v.shape[:-1], self.num_hidden).to(self.weights_W)
a = torch.zeros(*v.shape[:-1], self.num_aux).to(self.weights_W)
for _ in range(k):
self.sample_h_given_v(v, out=h)
self.sample_a_given_v(v, out=a)
self.sample_v_given_ha(h, a, out=v)
return v
@auto_unsqueeze_args()
def mixing_term(self, v):
r"""Describes the extent of mixing in the system,
:math:`V_\theta = \frac{1}{2}U_\theta \bm{\sigma} + d_\theta`
:param v: The visible state of the system
:type v: torch.Tensor
:returns: The term describing the mixing of the system
:rtype: torch.Tensor
"""
return F.linear(v, 0.5 * self.weights_U, self.aux_bias)
def gamma(self, v, vp, eta=1, expand=True):
r"""Calculates elements of the :math:`\Gamma^{(\eta)}` matrix,
where :math:`\eta = \pm`.
If `expand` is `True`, will return a complex matrix
:math:`A_{ij} = \langle\sigma_i|\Gamma^{(\eta)}|\sigma'_j\rangle`.
Otherwise will return a complex vector
:math:`A_{i} = \langle\sigma_i|\Gamma^{(\eta)}|\sigma'_i\rangle`.
:param v: A batch of visible states, :math:`\sigma`.
:type v: torch.Tensor
:param vp: The other batch of visible states, :math:`\sigma'`.
:type vp: torch.Tensor
:param eta: Determines which gamma matrix elements to compute.
:type eta: int
:param expand: Whether to return a matrix (`True`) or a vector (`False`).
Ignored if both inputs are vectors, in which case, a
scalar is returned.
:type expand: bool
:returns: The matrix element given by
:math:`\langle\sigma|\Gamma^{(\eta)}|\sigma'\rangle`
:rtype: torch.Tensor
"""
sign = np.sign(eta)
if v.dim() < 2 and vp.dim() < 2:
temp = torch.dot(v + sign * vp, self.visible_bias)
temp += F.softplus(F.linear(v, self.weights_W, self.hidden_bias)).sum()
temp += (
sign * F.softplus(F.linear(vp, self.weights_W, self.hidden_bias)).sum()
)
else:
temp1 = torch.matmul(v, self.visible_bias) + (
F.softplus(F.linear(v, self.weights_W, self.hidden_bias)).sum(-1)
)
temp2 = torch.matmul(vp, self.visible_bias) + (
F.softplus(F.linear(vp, self.weights_W, self.hidden_bias)).sum(-1)
)
if expand:
temp = temp1.unsqueeze_(1) + (sign * temp2.unsqueeze_(0))
else:
temp = temp1 + (sign * temp2)
return 0.5 * temp
def gamma_grad(self, v, vp, eta=1, expand=False):
r"""Calculates elements of the gradient of
the :math:`\Gamma^{(\eta)}` matrix, where :math:`\eta = \pm`.
:param v: A batch of visible states, :math:`\sigma`
:type v: torch.Tensor
:param vp: The other batch of visible states, :math:`\sigma'`
:type vp: torch.Tensor
:param eta: Determines which gamma matrix elements to compute.
:type eta: int
:param expand: Whether to return a rank-3 tensor (`True`) or a matrix (`False`).
:type expand: bool
:returns: The matrix element given by
:math:`\langle\sigma|\nabla_\lambda\Gamma^{(\eta)}|\sigma'\rangle`
:rtype: torch.Tensor
"""
sign = np.sign(eta)
unsqueezed = v.dim() < 2 or vp.dim() < 2
v = (v.unsqueeze(0) if v.dim() < 2 else v).to(self.weights_W)
vp = (vp.unsqueeze(0) if vp.dim() < 2 else vp).to(self.weights_W)
prob_h = self.prob_h_given_v(v)
prob_hp = self.prob_h_given_v(vp)
W_grad_ = torch.einsum("...j,...k->...jk", prob_h, v)
W_grad_p = torch.einsum("...j,...k->...jk", prob_hp, vp)
if expand:
W_grad = 0.5 * (W_grad_.unsqueeze_(1) + sign * W_grad_p.unsqueeze_(0))
vb_grad = 0.5 * (v.unsqueeze(1) + sign * vp.unsqueeze(0))
hb_grad = 0.5 * (prob_h.unsqueeze_(1) + sign * prob_hp.unsqueeze_(0))
else:
W_grad = 0.5 * (W_grad_ + sign * W_grad_p)
vb_grad = 0.5 * (v + sign * vp)
hb_grad = 0.5 * (prob_h + sign * prob_hp)
batch_sizes = (
(v.shape[0], vp.shape[0], *v.shape[1:-1]) if expand else (*v.shape[:-1],)
)
U_grad = torch.zeros_like(self.weights_U).expand(*batch_sizes, -1, -1)
ab_grad = torch.zeros_like(self.aux_bias).expand(*batch_sizes, -1)
vec = [
W_grad.view(*batch_sizes, -1),
U_grad.view(*batch_sizes, -1),
vb_grad,
hb_grad,
ab_grad,
]
if unsqueezed and not expand:
vec = [grad.squeeze_(0) for grad in vec]
return cplx.make_complex(torch.cat(vec, dim=-1))
def partition(self, space):
r"""Computes the partition function
:param space: The Hilbert space of the visible units
:type space: torch.Tensor
:returns: The partition function
:rtype: torch.Tensor
"""
logZ = (-self.effective_energy(space)).logsumexp(0)
return logZ.exp()
|
experimental/rien/rk4_example/setup.py | mindThomas/acados | 322 | 12660303 | #!/usr/bin/env python
"""
setup.py file for SWIG example
"""
from distutils.core import setup, Extension
erk_integrator_module = Extension('_erk_integrator',
sources=['erk_integrator_wrap.c', 'erk_integrator.c', 'auxiliary_functions.c', 'model.c', 'timing_functions.c'],
)
setup (name = 'erk_integrator',
version = '0.1',
author = "SWIG Docs",
description = """Simple swig example from docs""",
ext_modules = [erk_integrator_module],
py_modules = ["erk_integrator"],
)
|
data/transcoder_evaluation_gfg/python/COUNT_NUMBER_WAYS_REACH_GIVEN_SCORE_GAME.py | mxl1n/CodeGen | 241 | 12660305 | # Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
def f_gold ( n ) :
table = [ 0 for i in range ( n + 1 ) ]
table [ 0 ] = 1
for i in range ( 3 , n + 1 ) :
table [ i ] += table [ i - 3 ]
for i in range ( 5 , n + 1 ) :
table [ i ] += table [ i - 5 ]
for i in range ( 10 , n + 1 ) :
table [ i ] += table [ i - 10 ]
return table [ n ]
#TOFILL
if __name__ == '__main__':
param = [
(83,),
(29,),
(17,),
(12,),
(93,),
(55,),
(97,),
(75,),
(22,),
(52,)
]
n_success = 0
for i, parameters_set in enumerate(param):
if f_filled(*parameters_set) == f_gold(*parameters_set):
n_success+=1
print("#Results: %i, %i" % (n_success, len(param))) |
ferminet/utils/scf.py | llxlr/ferminet | 469 | 12660315 | <filename>ferminet/utils/scf.py
# Lint as: python3
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Interaction with Hartree-Fock solver in pyscf."""
# Abbreviations used:
# SCF: self-consistent field (method). Another name for Hartree-Fock
# HF: Hartree-Fock method.
# RHF: restricted Hartre-Fock. Require molecular orbital for the i-th alpha-spin
# and i-th beta-spin electrons to have the same spatial component.
# ROHF: restricted open-shell Hartree-Fock. Same as RHF except allows the number
# of alpha and beta electrons to differ.
# UHF: unrestricted Hartre-Fock. Permits breaking of spin symmetry and hence
# alpha and beta electrons to have different spatial components.
# AO: Atomic orbital. Underlying basis set (typically Gaussian-type orbitals and
# built into pyscf).
# MO: molecular orbitals/Hartree-Fock orbitals. Single-particle orbitals which
# are solutions to the Hartree-Fock equations.
from typing import Sequence, Tuple, Optional
from absl import logging
from ferminet.utils import system
import numpy as np
import pyscf
class Scf:
"""Helper class for running Hartree-Fock (self-consistent field) with pyscf.
Attributes:
molecule: list of system.Atom objects giving the atoms in the
molecule and their positions.
nelectrons: Tuple with number of alpha electrons and beta
electrons.
basis: Basis set to use, best specified with the relevant string
for a built-in basis set in pyscf. A user-defined basis set can be used
(advanced). See https://sunqm.github.io/pyscf/gto.html#input-basis for
more details.
pyscf_mol: the PySCF 'Molecule'. If this is passed to the init,
the molecule, nelectrons, and basis will not be used, and the
calculations will be performed on the existing pyscf_mol
restricted: If true, use the restriced Hartree-Fock method, otherwise use
the unrestricted Hartree-Fock method.
"""
def __init__(self,
molecule: Optional[Sequence[system.Atom]] = None,
nelectrons: Optional[Tuple[int, int]] = None,
basis: Optional[str] = 'cc-pVTZ',
pyscf_mol: Optional[pyscf.gto.Mole] = None,
restricted: bool = True):
if pyscf_mol:
self._mol = pyscf_mol
else:
self.molecule = molecule
self.nelectrons = nelectrons
self.basis = basis
self._spin = nelectrons[0] - nelectrons[1]
self._mol = None
self.restricted = restricted
self._mean_field = None
pyscf.lib.param.TMPDIR = None
def run(self):
"""Runs the Hartree-Fock calculation.
Returns:
A pyscf scf object (i.e. pyscf.scf.rhf.RHF, pyscf.scf.uhf.UHF or
pyscf.scf.rohf.ROHF depending on the spin and restricted settings).
Raises:
RuntimeError: If the number of electrons in the PySCF molecule is not
consistent with self.nelectrons.
"""
# If not passed a pyscf molecule, create one
if not self._mol:
if any(atom.atomic_number - atom.charge > 1.e-8
for atom in self.molecule):
logging.info(
'Fractional nuclear charge detected. '
'Running SCF on atoms with integer charge.'
)
nuclear_charge = sum(atom.atomic_number for atom in self.molecule)
charge = nuclear_charge - sum(self.nelectrons)
self._mol = pyscf.gto.Mole(
atom=[[atom.symbol, atom.coords] for atom in self.molecule],
unit='bohr')
self._mol.basis = self.basis
self._mol.spin = self._spin
self._mol.charge = charge
self._mol.build()
if self._mol.nelectron != sum(self.nelectrons):
raise RuntimeError('PySCF molecule not consistent with QMC molecule.')
if self.restricted:
self._mean_field = pyscf.scf.RHF(self._mol)
else:
self._mean_field = pyscf.scf.UHF(self._mol)
self._mean_field.init_guess = 'atom'
self._mean_field.kernel()
return self._mean_field
def eval_mos(self, positions: np.ndarray,
deriv: bool = False) -> Tuple[np.ndarray, np.ndarray]:
"""Evaluates the Hartree-Fock single-particle orbitals at a set of points.
Args:
positions: numpy array of shape (N, 3) of the positions in space at which
to evaluate the Hartree-Fock orbitals.
deriv: If True, also calculate the first derivatives of the
single-particle orbitals.
Returns:
Pair of numpy float64 arrays of shape (N, M) (deriv=False) or (4, N, M)
(deriv=True), where 2M is the number of Hartree-Fock orbitals. The (i-th,
j-th) element in the first (second) array gives the value of the j-th
alpha (beta) Hartree-Fock orbital at the i-th electron position in
positions. For restricted (RHF, ROHF) calculations, the two arrays will be
identical.
If deriv=True, the first index contains [value, x derivative, y
derivative, z derivative].
Raises:
RuntimeError: If Hartree-Fock calculation has not been performed using
`run`.
NotImplementedError: If Hartree-Fock calculation used Cartesian
Gaussian-type orbitals as the underlying basis set.
"""
if self._mean_field is None:
raise RuntimeError('Mean-field calculation has not been run.')
if self.restricted:
coeffs = (self._mean_field.mo_coeff,)
else:
coeffs = self._mean_field.mo_coeff
# Assumes self._mol.cart (use of Cartesian Gaussian-type orbitals and
# integrals) is False (default behaviour of pyscf).
if self._mol.cart:
raise NotImplementedError(
'Evaluation of molecular orbitals using cartesian GTOs.')
# Note sph refers to the use of spherical GTO basis sets rather than
# Cartesian GO basis sets. The coordinate system used for the electron
# positions is Cartesian in both cases.
gto_op = 'GTOval_sph_deriv1' if deriv else 'GTOval_sph'
ao_values = self._mol.eval_gto(gto_op, positions)
mo_values = tuple(np.matmul(ao_values, coeff) for coeff in coeffs)
if self.restricted:
# duplicate for beta electrons.
mo_values *= 2
return mo_values
|
pipeline/python/ion/reports/wells_beadogram.py | konradotto/TS | 125 | 12660320 | #!/usr/bin/python
# Copyright (C) 2012 Ion Torrent Systems, Inc. All Rights Reserved
import os
import json
import logging
import ConfigParser
from matplotlib import use
use("Agg", warn=False)
from matplotlib import pyplot
from matplotlib.ticker import FuncFormatter, LinearLocator
from matplotlib import transforms
logger = logging.getLogger(__name__)
def load_ini(report, filename, namespace="global"):
parse = ConfigParser.ConfigParser()
path = os.path.join(report, filename)
# TODO preseve the case
try:
parse.read(path)
parse = parse._sections.copy()
return parse[namespace]
except Exception as err:
logger.error(
"Wells Beadogram generation failed parsing %s: %s" % (path, str(err))
)
raise
def load_json(report, filename):
"""shortcut to load the json"""
path = os.path.join(report, filename)
try:
with open(path) as f:
return json.loads(f.read())
except Exception as err:
logger.error(
"Wells Beadogram generation failed parsing %s: %s" % (path, str(err))
)
raise
def generate_wells_beadogram2(basecaller, sigproc, beadogram_path=None):
beadogram_path = beadogram_path or os.path.join(basecaller, "wells_beadogram.png")
basecaller = load_json(basecaller, "BaseCaller.json")
beadfind = load_ini(sigproc, "analysis.bfmask.stats")
isp_labels, isp_counts = zip(
*[
("Have ISPs", int(beadfind["bead wells"])),
("Live ISPs", int(beadfind["live beads"])),
("Library ISPs", int(beadfind["library beads"])),
]
)
library_labels, library_counts = zip(
*[
(
"Polyclonal",
int(basecaller["Filtering"]["LibraryReport"]["filtered_polyclonal"]),
),
(
"Low Quality",
int(basecaller["Filtering"]["LibraryReport"]["filtered_low_quality"]),
),
(
"<NAME>",
int(basecaller["Filtering"]["LibraryReport"]["filtered_primer_dimer"]),
),
(
"Final Library",
int(basecaller["Filtering"]["LibraryReport"]["final_library_reads"]),
),
]
)
fig = pyplot.figure()
wells_ax = fig.add_subplot(121)
lib_ax = fig.add_subplot(122)
if "adjusted addressable wells" in beadfind:
available_wells = int(beadfind["adjusted addressable wells"])
else:
available_wells = int(beadfind["total wells"]) - int(beadfind["excluded wells"])
suffixes = ("k", "M", "G", "T", "P", "E", "Z", "Y")
def formatter(major, minor):
base = 1000
if major < base:
return "%d" % major
for i, s in enumerate(suffixes):
unit = base ** (i + 2)
if major < unit:
return "%.1f %s" % ((base * major / unit), s)
return "%.1f %s" % ((base * major / unit), s)
mils_format = FuncFormatter(formatter)
wells_ax.bar(range(len(isp_counts)), isp_counts, width=0.5)
wells_ax.set_ylim(0, available_wells)
wells_ax.set_xticks(range(len(isp_counts)))
wells_ax.set_xticklabels(isp_labels, rotation=20)
wells_ax.yaxis.set_major_locator(LinearLocator(5))
wells_ax.yaxis.set_major_formatter(mils_format)
lib_ax.bar(
range(len(library_counts)),
library_counts,
color=("r", "r", "r", "g"),
width=0.5,
)
lib_ax.set_ylim(0, int(beadfind["library beads"]))
lib_ax.set_xticks(range(len(library_counts)))
lib_ax.set_xticklabels(library_labels, rotation=20)
lib_ax.yaxis.set_major_locator(LinearLocator(5))
lib_ax.yaxis.set_major_formatter(mils_format)
wells_ax.set_ylabel("Number of ISPs")
fig.suptitle("Ion Sphere Particle Summary")
fig.subplots_adjust(wspace=0.3)
fig.patch.set_alpha(0.0)
pyplot.savefig(beadogram_path)
def generate_wells_beadogram(basecaller, sigproc, beadogram_path=None):
beadogram_path = beadogram_path or os.path.join(basecaller, "wells_beadogram.png")
generate_wells_beadogram_all_or_basic(basecaller, sigproc, beadogram_path, True)
def generate_wells_beadogram_all_or_basic(
basecaller, sigproc, beadogram_path, is_full_details=True
):
basecaller = load_json(basecaller, "BaseCaller.json")
beadfind = load_ini(sigproc, "analysis.bfmask.stats")
def intWithCommas(x):
if not isinstance(x, int):
raise TypeError("Parameter must be an integer.")
if x < 0:
return "-" + intWithCommas(-x)
result = ""
while x >= 1000:
x, r = divmod(x, 1000)
result = ",%03d%s" % (r, result)
return "%d%s" % (x, result)
if "adjusted addressable wells" in beadfind:
available_wells = int(beadfind["adjusted addressable wells"])
else:
available_wells = int(beadfind["total wells"]) - int(beadfind["excluded wells"])
# Row 1: Loading
loaded_wells = int(beadfind["bead wells"])
empty_wells = available_wells - loaded_wells
if available_wells > 0:
loaded_percent = int(
round(100.0 * float(loaded_wells) / float(available_wells))
)
empty_percent = 100 - loaded_percent
else:
loaded_percent = 0.0
empty_percent = 0.0
# Row 2: Enrichment
enriched_wells = int(beadfind["live beads"])
unenriched_wells = loaded_wells - enriched_wells
if loaded_wells > 0:
enriched_percent = int(
round(100.0 * float(enriched_wells) / float(loaded_wells))
)
unenriched_percent = 100 - enriched_percent
else:
enriched_percent = 0.0
unenriched_percent = 0.0
# Row 3: Clonality
polyclonal_wells = int(
basecaller["Filtering"]["LibraryReport"]["filtered_polyclonal"]
)
clonal_wells = enriched_wells - polyclonal_wells
if enriched_wells > 0:
clonal_percent = int(round(100.0 * float(clonal_wells) / float(enriched_wells)))
polyclonal_percent = 100 - clonal_percent
else:
clonal_percent = 0.0
polyclonal_percent = 0.0
# Row 4: Filtering
final_library_wells = int(
basecaller["Filtering"]["LibraryReport"]["final_library_reads"]
)
final_tf_wells = int(basecaller["Filtering"]["ReadDetails"]["tf"]["valid"])
dimer_wells = int(basecaller["Filtering"]["LibraryReport"]["filtered_primer_dimer"])
low_quality_wells = (
clonal_wells - final_library_wells - final_tf_wells - dimer_wells
)
if not is_full_details:
low_quality_wells += polyclonal_wells
if clonal_wells > 0:
final_library_percent = int(
round(100.0 * float(final_library_wells) / float(clonal_wells))
)
final_tf_percent = int(
round(100.0 * float(final_tf_wells) / float(clonal_wells))
)
dimer_percent = int(round(100.0 * float(dimer_wells) / float(clonal_wells)))
low_quality_percent = (
100 - final_library_percent - final_tf_percent - dimer_percent
)
if not is_full_details:
low_quality_percent = int(
round(100.0 * float(low_quality_wells) / float(clonal_wells))
)
else:
final_library_percent = 0.0
final_tf_percent = 0.0
dimer_percent = 0.0
low_quality_percent = 0.0
color_blue = "#2D4782"
color_gray = "#808080"
fontsize_big = 22
fontsize_small = 12
fontsize_medium = 16
fig = pyplot.figure(figsize=(6, 4), dpi=100)
# "111" means "1x1 grid, first subplot"
ax = fig.add_subplot(
111, frame_on=False, xticks=[], yticks=[], position=[0, 0, 1, 1]
)
# horizontal bar plot
# http://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.barh
# matplotlib.pyplot.barh(bottom, width, height=0.8, left=None, hold=None, **kwargs)
# bottom : scalar or array-like; the y coordinate(s) of the bars
# width : scalar or array-like; the width(s) of the bars
# height : sequence of scalars, optional, default: 0.8; the heights of the bars
# left : sequence of scalars; the x coordinates of the left sides of the bars
if is_full_details:
ax.barh(
bottom=[3, 2, 1, 0],
left=[0, 0, 0, 0],
width=[
loaded_wells / float(available_wells),
enriched_wells / float(available_wells),
clonal_wells / float(available_wells),
final_library_wells / float(available_wells),
],
height=0.8,
color=color_blue,
linewidth=0,
zorder=1,
align="edge",
)
ax.barh(
bottom=[3, 2, 1, 0],
left=[
loaded_wells / float(available_wells),
enriched_wells / float(available_wells),
clonal_wells / float(available_wells),
final_library_wells / float(available_wells),
],
width=[
empty_wells / float(available_wells),
unenriched_wells / float(available_wells),
polyclonal_wells / float(available_wells),
(final_tf_wells + dimer_wells + low_quality_wells)
/ float(available_wells),
],
height=0.8,
color=color_gray,
linewidth=0,
zorder=1,
align="edge",
)
else:
ax.barh(
bottom=[3, 2, 1],
left=[0, 0, 0],
width=[
loaded_wells / float(available_wells),
enriched_wells / float(available_wells),
final_library_wells / float(available_wells),
],
height=0.8,
color=color_blue,
linewidth=0,
zorder=1,
align="edge",
)
ax.barh(
bottom=[3, 2, 1],
left=[
loaded_wells / float(available_wells),
enriched_wells / float(available_wells),
final_library_wells / float(available_wells),
],
width=[
empty_wells / float(available_wells),
unenriched_wells / float(available_wells),
(final_tf_wells + dimer_wells + low_quality_wells)
/ float(available_wells),
],
height=0.8,
color=color_gray,
linewidth=0,
zorder=1,
align="edge",
)
# http://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.text
# matplotlib.pyplot.text(x, y, s, fontdict=None, withdash=False, **kwargs)
# x, y : scalars; data coordinates
# s : string; text
# fontdict : dictionary, optional, default: None; A dictionary to override the default text properties. If fontdict is None, the defaults are determined by your rc parameters.
# withdash : boolean, optional, default: False; Creates a TextWithDash instance instead of a Text instance.
ax.text(
-0.21,
3.1,
"Loading",
horizontalalignment="center",
verticalalignment="center",
fontsize=fontsize_small,
zorder=2,
color=color_blue,
weight="bold",
stretch="condensed",
)
ax.text(
-0.21,
3.4,
" %d%%" % loaded_percent,
horizontalalignment="center",
verticalalignment="center",
fontsize=fontsize_big,
zorder=2,
color=color_blue,
weight="bold",
stretch="condensed",
)
ax.text(
1.21,
3.1,
"Empty Wells",
horizontalalignment="center",
verticalalignment="center",
fontsize=fontsize_small,
zorder=2,
color=color_gray,
weight="bold",
stretch="condensed",
)
ax.text(
1.21,
3.4,
" %d%%" % empty_percent,
horizontalalignment="center",
verticalalignment="center",
fontsize=fontsize_big,
zorder=2,
color=color_gray,
weight="bold",
stretch="condensed",
)
ax.text(
0.04,
3.4,
intWithCommas(loaded_wells),
horizontalalignment="left",
verticalalignment="center",
fontsize=fontsize_medium,
zorder=2,
color="white",
weight="bold",
stretch="condensed",
alpha=0.7,
)
ax.text(
-0.21,
2.1,
"Enrichment",
horizontalalignment="center",
verticalalignment="center",
fontsize=fontsize_small,
zorder=2,
color=color_blue,
weight="bold",
stretch="condensed",
)
ax.text(
-0.21,
2.4,
" %d%%" % enriched_percent,
horizontalalignment="center",
verticalalignment="center",
fontsize=fontsize_big,
zorder=2,
color=color_blue,
weight="bold",
stretch="condensed",
)
ax.text(
max(0.6, 0.21 + loaded_wells / float(available_wells)),
2.1,
"No Template",
horizontalalignment="center",
verticalalignment="center",
fontsize=fontsize_small,
zorder=2,
color=color_gray,
weight="bold",
stretch="condensed",
)
ax.text(
max(0.6, 0.21 + loaded_wells / float(available_wells)),
2.4,
" %d%%" % unenriched_percent,
horizontalalignment="center",
verticalalignment="center",
fontsize=fontsize_big,
zorder=2,
color=color_gray,
weight="bold",
stretch="condensed",
)
ax.text(
0.04,
2.4,
intWithCommas(enriched_wells),
horizontalalignment="left",
verticalalignment="center",
fontsize=fontsize_medium,
zorder=2,
color="white",
weight="bold",
stretch="condensed",
alpha=0.7,
)
ax.text(
0.04,
2.4,
intWithCommas(enriched_wells),
horizontalalignment="left",
verticalalignment="center",
fontsize=fontsize_medium,
zorder=0.5,
color="black",
weight="bold",
stretch="condensed",
)
bottom_bar_y1 = 0.1
bottom_bar_y2 = 0.4
bottom_text_y1 = 0.65
bottom_text_y2 = 0.4
bottom_text_y3 = 0.15
if is_full_details:
ax.text(
-0.21,
1.1,
"Clonal",
horizontalalignment="center",
verticalalignment="center",
fontsize=fontsize_small,
zorder=2,
color=color_blue,
weight="bold",
stretch="condensed",
)
ax.text(
-0.21,
1.4,
" %d%%" % clonal_percent,
horizontalalignment="center",
verticalalignment="center",
fontsize=fontsize_big,
zorder=2,
color=color_blue,
weight="bold",
stretch="condensed",
)
ax.text(
max(0.6, 0.21 + enriched_wells / float(available_wells)),
1.1,
"Polyclonal",
horizontalalignment="center",
verticalalignment="center",
fontsize=fontsize_small,
zorder=2,
color=color_gray,
weight="bold",
stretch="condensed",
)
ax.text(
max(0.6, 0.21 + enriched_wells / float(available_wells)),
1.4,
" %d%%" % polyclonal_percent,
horizontalalignment="center",
verticalalignment="center",
fontsize=fontsize_big,
zorder=2,
color=color_gray,
weight="bold",
stretch="condensed",
)
ax.text(
0.04,
1.4,
intWithCommas(clonal_wells),
horizontalalignment="left",
verticalalignment="center",
fontsize=fontsize_medium,
zorder=2,
color="white",
weight="bold",
stretch="condensed",
alpha=0.7,
)
ax.text(
0.04,
1.4,
intWithCommas(clonal_wells),
horizontalalignment="left",
verticalalignment="center",
fontsize=fontsize_medium,
zorder=0.5,
color="black",
weight="bold",
stretch="condensed",
)
else:
bottom_bar_y1 = 1.1
bottom_bar_y2 = 1.4
bottom_text_y1 = 1.65
bottom_text_y2 = 1.4
bottom_text_y3 = 1.15
ax.text(
-0.21,
bottom_bar_y1,
"Final Library",
horizontalalignment="center",
verticalalignment="center",
fontsize=fontsize_small,
zorder=2,
color=color_blue,
weight="bold",
stretch="condensed",
)
ax.text(
-0.21,
bottom_bar_y2,
" %d%%" % final_library_percent,
horizontalalignment="center",
verticalalignment="center",
fontsize=fontsize_big,
zorder=2,
color=color_blue,
weight="bold",
stretch="condensed",
)
ax.text(
max(0.90, 0.05 + clonal_wells / float(available_wells)),
bottom_text_y1,
"% 2d%% Test Fragments" % final_tf_percent,
horizontalalignment="left",
verticalalignment="center",
fontsize=fontsize_small,
zorder=2,
color=color_gray,
weight="bold",
stretch="condensed",
)
ax.text(
max(0.90, 0.05 + clonal_wells / float(available_wells)),
bottom_text_y2,
"% 2d%% Adapter Dimer" % dimer_percent,
horizontalalignment="left",
verticalalignment="center",
fontsize=fontsize_small,
zorder=2,
color=color_gray,
weight="bold",
stretch="condensed",
)
ax.text(
max(0.90, 0.05 + clonal_wells / float(available_wells)),
bottom_text_y3,
"% 2d%% Low Quality" % low_quality_percent,
horizontalalignment="left",
verticalalignment="center",
fontsize=fontsize_small,
zorder=2,
color=color_gray,
weight="bold",
stretch="condensed",
)
ax.text(
0.04,
bottom_bar_y2,
intWithCommas(final_library_wells),
horizontalalignment="left",
verticalalignment="center",
fontsize=fontsize_medium,
zorder=2,
color="white",
weight="bold",
stretch="condensed",
alpha=0.7,
)
ax.text(
0.04,
bottom_bar_y2,
intWithCommas(final_library_wells),
horizontalalignment="left",
verticalalignment="center",
fontsize=fontsize_medium,
zorder=0.5,
color="#000000",
weight="black",
stretch="condensed",
)
ax.set_xlim(-0.42, 1.42)
fig.patch.set_alpha(0.0)
pyplot.savefig(beadogram_path)
def generate_wells_beadogram_basic(basecaller, sigproc, beadogram_path=None):
beadogram_path = beadogram_path or os.path.join(
basecaller, "wells_beadogram_basic.png"
)
generate_wells_beadogram_all_or_basic(
basecaller, sigproc, beadogram_path, is_full_details=False
)
if __name__ == "__main__":
import sys
basecaller = (
os.path.join(os.getcwd(), "basecaller_results")
if len(sys.argv) <= 1
else sys.argv[1]
)
sigproc = (
os.path.join(os.getcwd(), "sigproc_results")
if len(sys.argv) <= 2
else sys.argv[2]
)
generate_wells_beadogram(basecaller, sigproc)
|
tests/test_util.py | adger-me/you-get | 46,956 | 12660322 | <reponame>adger-me/you-get
#!/usr/bin/env python
import unittest
from you_get.util.fs import *
class TestUtil(unittest.TestCase):
def test_legitimize(self):
self.assertEqual(legitimize("1*2", os="linux"), "1*2")
self.assertEqual(legitimize("1*2", os="mac"), "1*2")
self.assertEqual(legitimize("1*2", os="windows"), "1-2")
self.assertEqual(legitimize("1*2", os="wsl"), "1-2")
|
neurogym/envs/probabilisticreasoning.py | ruyuanzhang/neurogym | 112 | 12660332 | <filename>neurogym/envs/probabilisticreasoning.py
"""Random dot motion task."""
import numpy as np
import neurogym as ngym
from neurogym import spaces
class ProbabilisticReasoning(ngym.TrialEnv):
"""Probabilistic reasoning.
The agent is shown a sequence of stimuli. Each stimulus is associated
with a certain log-likelihood of the correct response being one choice
versus the other. The final log-likelihood of the target response being,
for example, option 1, is the sum of all log-likelihood associated with
the presented stimuli. A delay period separates each stimulus, so the
agent is encouraged to lean the log-likelihood association and integrate
these values over time within a trial.
Args:
shape_weight: array-like, evidence weight of each shape
n_loc: int, number of location of show shapes
"""
metadata = {
'paper_link': 'https://www.nature.com/articles/nature05852',
'paper_name': 'Probabilistic reasoning by neurons',
'tags': ['perceptual', 'two-alternative', 'supervised']
}
def __init__(self, dt=100, rewards=None, timing=None, shape_weight=None,
n_loc=4):
super().__init__(dt=dt)
# The evidence weight of each stimulus
if shape_weight is not None:
self.shape_weight = shape_weight
else:
self.shape_weight = [-10, -0.9, -0.7, -0.5, -0.3,
0.3, 0.5, 0.7, 0.9, 10]
self.n_shape = len(self.shape_weight)
dim_shape = self.n_shape
# Shape representation needs to be fixed cross-platform
self.shapes = np.eye(self.n_shape, dim_shape)
self.n_loc = n_loc
# Rewards
self.rewards = {'abort': -0.1, 'correct': +1., 'fail': 0.}
if rewards:
self.rewards.update(rewards)
self.timing = {'fixation': 500,
'delay': lambda: self.rng.uniform(450, 550),
'decision': 500
}
for i_loc in range(n_loc):
self.timing['stimulus'+str(i_loc)] = 500
if timing:
self.timing.update(timing)
self.abort = False
name = {'fixation': 0}
start = 1
for i_loc in range(n_loc):
name['loc' + str(i_loc)] = range(start, start + dim_shape)
start += dim_shape
self.observation_space = spaces.Box(
-np.inf, np.inf, shape=(1 + dim_shape*n_loc,),
dtype=np.float32, name=name)
name = {'fixation': 0, 'choice': [1, 2]}
self.action_space = spaces.Discrete(3, name=name)
def _new_trial(self, **kwargs):
# Trial info
trial = {
'locs': self.rng.choice(range(self.n_loc),
size=self.n_loc, replace=False),
'shapes': self.rng.choice(range(self.n_shape),
size=self.n_loc, replace=True),
}
trial.update(kwargs)
locs = trial['locs']
shapes = trial['shapes']
log_odd = sum([self.shape_weight[shape] for shape in shapes])
p = 1. / (10**(-log_odd) + 1.)
ground_truth = int(self.rng.rand() < p)
trial['log_odd'] = log_odd
trial['ground_truth'] = ground_truth
# Periods
periods = ['fixation']
periods += ['stimulus'+str(i) for i in range(self.n_loc)]
periods += ['delay', 'decision']
self.add_period(periods)
# Observations
self.add_ob(1, where='fixation')
self.set_ob(0, 'decision', where='fixation')
for i_loc in range(self.n_loc):
loc = locs[i_loc]
shape = shapes[i_loc]
periods = ['stimulus'+str(j) for j in range(i_loc, self.n_loc)]
self.add_ob(self.shapes[shape], periods, where='loc'+str(loc))
# Ground truth
self.set_groundtruth(ground_truth, period='decision', where='choice')
return trial
def _step(self, action):
new_trial = False
# rewards
reward = 0
gt = self.gt_now
# observations
if self.in_period('decision'):
if action != 0:
new_trial = True
if action == gt:
reward += self.rewards['correct']
self.performance = 1
else:
reward += self.rewards['fail']
else:
if action != 0: # action = 0 means fixating
new_trial = self.abort
reward += self.rewards['abort']
return self.ob_now, reward, False, {'new_trial': new_trial, 'gt': gt}
|
data/tracking/post_processor/response_map.py | zhangzhengde0225/SwinTrack | 143 | 12660337 | import torch
class ResponseMapTrackingPostProcessing:
def __init__(self, enable_gaussian_score_map_penalty, search_feat_size, window_penalty_ratio=None):
self.enable_gaussian_score_map_penalty = enable_gaussian_score_map_penalty
self.search_feat_size = search_feat_size
if enable_gaussian_score_map_penalty:
self.window = torch.flatten(torch.outer(torch.hann_window(search_feat_size[1], periodic=False),
torch.hann_window(search_feat_size[0], periodic=False)))
self.window_penalty_ratio = window_penalty_ratio
def __call__(self, network_output):
if network_output is None:
return None
class_score_map, predicted_bbox = network_output['class_score'], network_output['bbox'] # shape: (N, 1, H, W), (N, H, W, 4)
N, C, H, W = class_score_map.shape
assert C == 1
class_score_map = class_score_map.view(N, H * W)
if self.enable_gaussian_score_map_penalty:
# window penalty
class_score_map = class_score_map * (1 - self.window_penalty_ratio) + \
self.window.view(1, H * W) * self.window_penalty_ratio
confidence_score, best_idx = torch.max(class_score_map, 1)
predicted_bbox = predicted_bbox.view(N, H * W, 4)
bounding_box = predicted_bbox[torch.arange(len(predicted_bbox)), best_idx, :]
processor_outputs = {'bbox': bounding_box, 'conf': confidence_score}
for k, v in network_output.items():
if k not in ('class_score', 'bbox'):
processor_outputs[k] = v
return processor_outputs
def to(self, device):
if self.enable_gaussian_score_map_penalty:
self.window = self.window.to(device)
|
Algo and DSA/LeetCode-Solutions-master/Python/best-position-for-a-service-centre.py | Sourav692/FAANG-Interview-Preparation | 3,269 | 12660338 | <filename>Algo and DSA/LeetCode-Solutions-master/Python/best-position-for-a-service-centre.py
# Time: O(n * iter), iter is the number of iterations
# Space: O(1)
# see reference:
# - https://en.wikipedia.org/wiki/Geometric_median
# - https://wikimedia.org/api/rest_v1/media/math/render/svg/b3fb215363358f12687100710caff0e86cd9d26b
# Weiszfeld's algorithm
class Solution(object):
def getMinDistSum(self, positions):
"""
:type positions: List[List[int]]
:rtype: float
"""
EPS = 1e-6
def norm(p1, p2):
return ((p1[0]-p2[0])**2 + (p1[1]-p2[1])**2)**0.5
def geometry_median(positions, median):
numerator, denominator = [0.0, 0.0], 0.0
for p in positions:
l = norm(median, p)
if not l:
continue
numerator[0] += p[0]/l
numerator[1] += p[1]/l
denominator += 1/l
if denominator == 0.0:
return True, None
return False, [numerator[0]/denominator, numerator[1]/denominator]
median = [float(sum(p[0] for p in positions))/len(positions),
float(sum(p[1] for p in positions))/len(positions)]
prev_median = [float("-inf"), float("-inf")]
while norm(median, prev_median)*len(positions) > EPS:
stopped, new_median = geometry_median(positions, median)
if stopped:
break
median, prev_median = new_median, median
return sum(norm(median, p) for p in positions)
# Time: O(n * iter), iter is the number of iterations
# Space: O(1)
class Solution2(object):
def getMinDistSum(self, positions):
"""
:type positions: List[List[int]]
:rtype: float
"""
DIRECTIONS = [(0, 1), (1, 0), (0, -1), (-1, 0)]
EPS = 1e-6
def dist(positions, p):
return sum(((p[0]-x)**2 + (p[1]-y)**2)**0.5 for x, y in positions)
median = [0.0, 0.0]
median[0] = float(sum(x for x, _ in positions))/len(positions)
median[1] = float(sum(y for _, y in positions))/len(positions)
result = dist(positions, median)
delta = float(max(max(positions, key=lambda x: x[0])[0],
max(positions, key=lambda x: x[1])[1])) - \
float(min(min(positions, key=lambda x: x[0])[0],
min(positions, key=lambda x: x[1])[1]))
while delta > EPS:
for dx, dy in DIRECTIONS:
new_median = [median[0] + delta*dx, median[1] + delta*dy]
nd = dist(positions, new_median)
if nd < result:
result = nd
median = new_median
break
else:
delta /= 2.0
return result
|
napari/plugins/_tests/test_builtin_get_writer.py | MaksHess/napari | 1,345 | 12660448 | import os
import pytest
from napari_plugin_engine import PluginCallError
from napari.plugins import _builtins
# test_plugin_manager fixture is provided by napari_plugin_engine._testsupport
def test_get_writer_succeeds(
napari_plugin_manager, tmpdir, layer_data_and_types
):
"""Test writing layers data."""
_, layer_data, layer_types, filenames = layer_data_and_types
path = os.path.join(tmpdir, 'layers_folder')
writer = napari_plugin_manager.hook.napari_get_writer(
path=path, layer_types=layer_types
)
# Write data
assert writer == _builtins.write_layer_data_with_plugins
assert writer(path, layer_data, plugin_name=None)
# Check folder and files exist
assert os.path.isdir(path)
for f in filenames:
assert os.path.isfile(os.path.join(path, f))
assert set(os.listdir(path)) == set(filenames)
assert set(os.listdir(tmpdir)) == {'layers_folder'}
# the layer_data_and_types fixture is defined in napari/conftest.py
# test_plugin_manager fixture is provided by napari_plugin_engine._testsupport
def test_get_writer_bad_plugin(
napari_plugin_manager, tmpdir, layer_data_and_types
):
"""Test cleanup when get_writer has an exception."""
from napari_plugin_engine import napari_hook_implementation
class bad_plugin:
@napari_hook_implementation
def napari_write_points(path, data, meta):
raise ValueError("shoot!")
_, layer_data, layer_types, filenames = layer_data_and_types
napari_plugin_manager.register(bad_plugin)
# this time we try writing directly to the tmpdir (which already exists)
writer = _builtins.napari_get_writer(tmpdir, layer_types)
# call writer with a bad hook implementation inserted
with pytest.raises(PluginCallError):
writer(tmpdir, layer_data, plugin_name=None)
# should have deleted all new files, but not the tmpdir
assert os.path.isdir(tmpdir)
for f in filenames:
assert not os.path.isfile(os.path.join(tmpdir, f))
# now try writing to a nested folder inside of tmpdir
path = os.path.join(tmpdir, 'layers_folder')
writer = _builtins.napari_get_writer(path, layer_types)
# call writer with a bad hook implementation inserted
with pytest.raises(PluginCallError):
writer(tmpdir, layer_data, plugin_name=None)
# should have deleted the new nested folder, but not the tmpdir
assert os.path.isdir(tmpdir)
assert not os.path.exists(path)
|
clist/migrations/0039_auto_20200528_2349.py | horacexd/clist | 166 | 12660450 | # Generated by Django 2.2.10 on 2020-05-28 23:49
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('clist', '0038_problem_division'),
]
operations = [
migrations.RenameField(
model_name='problem',
old_name='division',
new_name='divisions',
),
]
|
Chapter10/service/libs/storage/src/storage/client.py | TranQuangDuc/Clean-Code-in-Python | 402 | 12660453 | <reponame>TranQuangDuc/Clean-Code-in-Python<filename>Chapter10/service/libs/storage/src/storage/client.py
"""Abstraction to the database.
Provide a client to connect to the database and expose a custom API, at the
convenience of the application.
"""
import os
import asyncpg
def _extract_from_env(variable, *, default=None):
try:
return os.environ[variable]
except KeyError as e:
if default is not None:
return default
raise RuntimeError(f"Environment variable {variable} not set") from e
DBUSER = _extract_from_env("DBUSER")
DBPASSWORD = _extract_from_env("DBPASSWORD")
DBNAME = _extract_from_env("DBNAME")
DBHOST = _extract_from_env("DBHOST", default="127.0.0.1")
DBPORT = _extract_from_env("DBPORT", default=5432)
async def DBClient():
return await asyncpg.connect(
user=DBUSER,
password=<PASSWORD>,
database=DBNAME,
host=DBHOST,
port=DBPORT,
)
|
blender/addons/2.8/mira_tools/mi_inputs.py | belzecue/mifthtools | 730 | 12660457 |
# ***** BEGIN GPL LICENSE BLOCK *****
#
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ***** END GPL LICENCE BLOCK *****
import bpy
pass_keys = ['NUMPAD_0', 'NUMPAD_1', 'NUMPAD_3', 'NUMPAD_4',
'NUMPAD_5', 'NUMPAD_6', 'NUMPAD_7', 'NUMPAD_8',
'NUMPAD_9', 'MIDDLEMOUSE', 'WHEELUPMOUSE', 'WHEELDOWNMOUSE',
'MOUSEMOVE']
def get_input_pass(pass_keys, key_inputs, event):
if event.type in pass_keys:
return True
if key_inputs == 'Maya':
if event.type in {'RIGHTMOUSE', 'LEFTMOUSE'} and event.alt and not event.shift and not event.ctrl:
return True
return False
|
adapters/tuya/TS0013.py | russdan/domoticz-zigbee2mqtt-plugin | 146 | 12660471 | <gh_stars>100-1000
from adapters.tuya.TS0012 import TS0012
from devices.switch.on_off_switch import OnOffSwitch
class TS0013(TS0012):
def __init__(self):
super().__init__()
self.devices.append(OnOffSwitch('center', 'state_center'))
|
src/convmlp.py | dumpmemory/Convolutional-MLPs | 117 | 12660488 | <filename>src/convmlp.py
from torch.hub import load_state_dict_from_url
import torch.nn as nn
from .utils.tokenizer import ConvTokenizer
from .utils.modules import ConvStage, BasicStage
__all__ = ['ConvMLP', 'convmlp_s', 'convmlp_m', 'convmlp_l']
model_urls = {
'convmlp_s': 'http://ix.cs.uoregon.edu/~alih/conv-mlp/checkpoints/convmlp_s_imagenet.pth',
'convmlp_m': 'http://ix.cs.uoregon.edu/~alih/conv-mlp/checkpoints/convmlp_m_imagenet.pth',
'convmlp_l': 'http://ix.cs.uoregon.edu/~alih/conv-mlp/checkpoints/convmlp_l_imagenet.pth',
}
class ConvMLP(nn.Module):
def __init__(self,
blocks,
dims,
mlp_ratios,
channels=64,
n_conv_blocks=3,
classifier_head=True,
num_classes=1000,
*args, **kwargs):
super(ConvMLP, self).__init__()
assert len(blocks) == len(dims) == len(mlp_ratios), \
f"blocks, dims and mlp_ratios must agree in size, {len(blocks)}, {len(dims)} and {len(mlp_ratios)} passed."
self.tokenizer = ConvTokenizer(embedding_dim=channels)
self.conv_stages = ConvStage(n_conv_blocks,
embedding_dim_in=channels,
hidden_dim=dims[0],
embedding_dim_out=dims[0])
self.stages = nn.ModuleList()
for i in range(0, len(blocks)):
stage = BasicStage(num_blocks=blocks[i],
embedding_dims=dims[i:i + 2],
mlp_ratio=mlp_ratios[i],
stochastic_depth_rate=0.1,
downsample=(i + 1 < len(blocks)))
self.stages.append(stage)
if classifier_head:
self.norm = nn.LayerNorm(dims[-1])
self.head = nn.Linear(dims[-1], num_classes)
else:
self.head = None
self.apply(self.init_weight)
def forward(self, x):
x = self.tokenizer(x)
x = self.conv_stages(x)
x = x.permute(0, 2, 3, 1)
for stage in self.stages:
x = stage(x)
if self.head is None:
return x
B, _, _, C = x.shape
x = x.reshape(B, -1, C)
x = self.norm(x)
x = x.mean(dim=1)
x = self.head(x)
return x
@staticmethod
def init_weight(m):
if isinstance(m, (nn.Linear, nn.Conv1d)):
nn.init.trunc_normal_(m.weight, std=.02)
if isinstance(m, (nn.Linear, nn.Conv1d)) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
elif isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1.)
nn.init.constant_(m.bias, 0.)
def _convmlp(arch, pretrained, progress, classifier_head, blocks, dims, mlp_ratios, *args, **kwargs):
model = ConvMLP(blocks=blocks, dims=dims, mlp_ratios=mlp_ratios,
classifier_head=classifier_head, *args, **kwargs)
if pretrained and arch in model_urls:
state_dict = load_state_dict_from_url(model_urls[arch],
progress=progress)
model.load_state_dict(state_dict)
return model
def convmlp_s(pretrained=False, progress=False, classifier_head=True, *args, **kwargs):
return _convmlp('convmlp_s', pretrained=pretrained, progress=progress,
blocks=[2, 4, 2], mlp_ratios=[2, 2, 2], dims=[128, 256, 512],
channels=64, n_conv_blocks=2, classifier_head=classifier_head,
*args, **kwargs)
def convmlp_m(pretrained=False, progress=False, classifier_head=True, *args, **kwargs):
return _convmlp('convmlp_m', pretrained=pretrained, progress=progress,
blocks=[3, 6, 3], mlp_ratios=[3, 3, 3], dims=[128, 256, 512],
channels=64, n_conv_blocks=3, classifier_head=classifier_head,
*args, **kwargs)
def convmlp_l(pretrained=False, progress=False, classifier_head=True, *args, **kwargs):
return _convmlp('convmlp_l', pretrained=pretrained, progress=progress,
blocks=[4, 8, 3], mlp_ratios=[3, 3, 3], dims=[192, 384, 768],
channels=96, n_conv_blocks=3, classifier_head=classifier_head,
*args, **kwargs)
|
src/Simulation/Native/parseLog.py | Bradben/qsharp-runtime | 260 | 12660491 | <filename>src/Simulation/Native/parseLog.py<gh_stars>100-1000
import re
import sys
import numpy as np
logName = sys.argv[1]
reSched = re.compile(r"^==== sched:\s+(\S+)")
reFN = re.compile(r"^(\S+)\.")
reNQs = re.compile(r"nQs=(\d+) .*range=(\d+).*prb=(\d+)")
reSim = re.compile(' (Generic|AVX|AVX2|AVX512)$')
rePars = re.compile(r'OMP_NUM_THREADS=(\d+) fusedSpan=(\d) fusedDepth=(\d+) wfnCapacity=(\d+)')
reInfo = re.compile(r'sz=([.\d]+) nQs=([.\d]+) nCs=([.\d]+) flsh= *([.\de+-]+).*gts= *([.\de+-]+).*elap= *(\d+).*(.)gps= *([.\de+-]+).*fus= *([.\d]+).*ker= *([.\d]+)')
found = reFN.search(logName)
env = found.group(1)
fp = open(logName,'r')
gpss = []
print(f'"env","test","typ","sim","qs","threads","span","sz","gps"')
sim = ""
totalQs = -1
threads = -1
span = -1
sz = -1
rng = 1
prb = -1
sched = "???"
prbs = [
"ladder" ,
"ladder" ,
"shor_4" ,
"shor_6" ,
"shor_8" ,
"shor_10" ,
"shor_12" ,
"suprem_44",
"suprem_55",
"suprem_56",
"qulacs_5",
"qulacs_10",
"qulacs_15",
"qulacs_20",
"qulacs_25"
]
def dumpGpss():
global gpss,env,sim,totalQs,threads,span,sz,rng,prb,sched
if len(gpss) > 0:
gpsMed = np.median(gpss)
cnt = 0.0
tot = 0.0
for gps in gpss:
if gps > gpsMed/2.0 and gps < gpsMed*1.5:
cnt += 1.0
tot += gps
if cnt > 0: gps = tot/cnt
else: gps = gpsAvg
nam = prbs[prb]
if rng == 0: nam = f'{env},{nam}L'
elif rng == 2: nam = f'{env},{nam}H'
else: nam = f'{env},{nam}'
print(f"{nam},{sched},{sim},{totalQs},{threads},{span},{sz},{gps:.1f}")
gpss = []
while True:
inp = fp.readline()
if inp == "":
dumpGpss()
break
found = reSched.search(inp)
if found:
dumpGpss()
sched = found.group(1)
continue
found = reNQs.search(inp)
if found:
dumpGpss()
totalQs = found.group(1)
rng = int(found.group(2))
prb = int(found.group(3))
continue
found = reSim.search(inp)
if found:
dumpGpss()
sim = found.group(1)
continue
found = rePars.search(inp)
if found:
threads = found.group(1)
span = found.group(2)
limit = found.group(3)
wfnSiz = found.group(4)
continue
found = reInfo.search(inp)
if found:
sz = found.group(1)
nQs = float(found.group(2))
nCs = float(found.group(3))
flushes = found.group(4)
gates = found.group(5)
elap = found.group(6)
if (found.group(7) == 'k'): mul = 1000.0
else: mul = 1.0
gps = float(found.group(8)) * mul
fusions = found.group(9)
kernel = found.group(10)
gpss.append(gps)
continue
fp.close()
|
spytest/spytest/tgen/scapy/dicts.py | shubav/sonic-mgmt | 132 | 12660523 | from collections import OrderedDict
class SpyTestDict(OrderedDict):
"""
todo: Update Documentation
"""
def __getattr__(self, name):
try:
return self[name]
except KeyError:
raise AttributeError(name)
def __setattr__(self, name, value):
if not name.startswith('_OrderedDict__'):
self[name] = value
else:
OrderedDict.__setattr__(self, name, value)
def __delattr__(self, name):
try:
self.pop(name)
except KeyError:
OrderedDict.__delattr__(self, name)
# compare
def __eq__(self, other):
return dict.__eq__(self, other)
# stringify
def __str__(self):
return '{%s}' % ', '.join('%r: %r' % item for item in self.items())
# for PrettyPrinter
__repr__ = OrderedDict.__repr__
|
tributary/lazy/node.py | ayakubov/tributary | 357 | 12660529 | <reponame>ayakubov/tributary
import inspect
from collections import namedtuple
from ..base import TributaryException
# from boltons.funcutils import wraps
from ..utils import _compare, _either_type, _ismethod
from .dd3 import _DagreD3Mixin
class Node(_DagreD3Mixin):
"""Class to represent an operation that is lazy"""
_id_ref = 0
def __init__(
self,
value=None,
name="?",
derived=False,
callable=None,
callable_args=None,
callable_kwargs=None,
dynamic=False,
**kwargs,
):
"""Construct a new lazy node, wrapping a callable or a value
Args:
name (str): name to use to represent the node
derived (bool): node is note instantiated directly,
e.g. via n + 10 where n is a preexisting node.
These default to dirty state
value (any): initial value of the node
callable (callable): function or other callable that the node is wrapping
callable_args (tuple): args for the wrapped callable
callable_kwargs (dict): kwargs for the wrapped callable
dynamic (bool): node should not be lazy - always access underlying value
"""
# Instances get an id but one id tracker for all nodes so we can
# uniquely identify them
# TODO different scheme
self._id = Node._id_ref
Node._id_ref += 1
# Every node gets a name so it can be uniquely identified in the graph
self._name = "{}#{}".format(
name
or (callable.__name__ if callable else None)
or self.__class__.__name__,
self._id,
)
if isinstance(value, Node):
raise TributaryException("Cannot set value to be itself a node")
# Graphviz shape
self._graphvizshape = kwargs.get("graphvizshape", "box")
# default is box instead of ellipse
# because all lazy nodes are i/o nodes
# by default
# if using dagre-d3, this will be set
self._dd3g = None
# starting value
self._values = []
# use dual number operators
self._use_dual = kwargs.get("use_dual", False)
# threshold for calculating difference
self._compare = _compare
# callable and args
self._callable_is_method = _ismethod(callable)
self._callable = callable
# map arguments of callable to nodes
self._callable_args = callable_args or []
self._callable_kwargs = callable_kwargs or {}
# callable_args_mapping maps the wrapped functions'
# arguments to nodes. It does so in 2 ways, either
# via the argument node's name, or the wrapped
# function's argument name
#
# e.g. if i have lambda x, y: x + y
# where x is set to a Node(name="One")
# and y is set to a Node(name="Two"),
# callable_args_mapping looks like:
# {0: {"node": "One", "arg": "x"},
# 1: {"node": "Two", "arg": "y"}}
#
# this way i can pass (x=5) or (One=5)
# to modify the node's value
self._callable_args_mapping = {}
# map positional to kw
if callable is not None and not inspect.isgeneratorfunction(callable):
# wrap args and kwargs of function to node
try:
signature = inspect.signature(callable)
except ValueError:
# https://bugs.python.org/issue20189
signature = namedtuple("Signature", ["parameters"])({})
parameters = [
p
for p in signature.parameters.values()
if p.kind
not in (
inspect._ParameterKind.VAR_POSITIONAL,
inspect._ParameterKind.VAR_KEYWORD,
)
]
# map argument index to name of argument, for later use
self._callable_args_mapping = {
i: {"arg": arg.name} for i, arg in enumerate(parameters)
}
# first, iterate through callable_args and callable_kwargs and convert to nodes
for i, arg in enumerate(self._callable_args):
# promote all args to nodes
if not isinstance(arg, Node):
# see if arg in argspec
if i < len(parameters):
name = parameters[i].name
else:
name = "vararg"
self._callable_args[i] = Node(name=name, value=arg)
# ensure arg can be passed by either node name, or arg name
if i not in self._callable_args_mapping:
# varargs, disallow by arg
self._callable_args_mapping[i] = {}
self._callable_args_mapping[i]["node"] = self._callable_args[
i
]._name_no_id()
# first, iterate through callable_args and callable_kwargs and convert to nodes
for name, kwarg in self._callable_kwargs.items():
if not isinstance(kwarg, Node):
self._callable_kwargs[name] = Node(name=name, value=kwarg)
# now iterate through callable's args and ensure
# everything is matched up
for i, arg in enumerate(parameters):
if arg.name == "self":
# skip
continue
# passed in as arg
if i < len(self._callable_args) or arg.name in self._callable_kwargs:
# arg is passed in args/kwargs, continue
continue
# arg not accounted for, see if it has a default in the callable
# convert to node
node = Node(name=arg.name, derived=True, value=arg.default)
# set in kwargs
self._callable_kwargs[arg.name] = node
# compare filtered parameters to original
if len(parameters) != len(signature.parameters):
# if varargs, can have more callable_args + callable_kwargs than listed arguments
failif = len([arg for arg in parameters if arg.name != "self"]) > (
len(self._callable_args) + len(self._callable_kwargs)
)
else:
# should be exactly equal
failif = len([arg for arg in parameters if arg.name != "self"]) != (
len(self._callable_args) + len(self._callable_kwargs)
)
if failif:
# something bad happened trying to align
# callable's args/kwargs with the provided
# callable_args and callable_kwargs, and we're
# now in an unrecoverable state.
raise TributaryException(
"Missing args (call or preprocessing error has occurred)"
)
try:
self._callable._node_wrapper = None # not known until program start
except AttributeError:
# can't set attributes on certain functions, so wrap with lambda
if self._callable_is_method:
self._callable = lambda self, *args, **kwargs: callable(
*args, **kwargs
)
else:
self._callable = lambda *args, **kwargs: callable(*args, **kwargs)
self._callable._node_wrapper = None # not known until program start
elif callable is not None:
self._callable_args = callable_args or []
self._callable_kwargs = callable_kwargs or {}
# FIXME this wont work for attribute inputs
def _callable(gen=callable(*self._callable_args, **self._callable_kwargs)):
try:
ret = next(gen)
return ret
except StopIteration:
self._dynamic = False
self._dirty = False
return self.value()
self._callable = _callable
# list out all upstream nodes
self._upstream = list(self._callable_args) + list(
self._callable_kwargs.values()
)
# if always dirty, always reevaluate
# self._dynamic = dynamic # or self._callable is not None
self._dynamic = (
dynamic
or (self._callable and inspect.isgeneratorfunction(callable))
or False
)
# parent nodes in graph
self._parents = []
# self reference for method calls
self._self_reference = self
# cache node operations that have already been done
self._node_op_cache = {}
# tweaks
self._tweaks = None
# dependencies can be nodes
if self._callable:
self._dependencies = {
self._callable: (self._callable_args, self._callable_kwargs)
}
else:
self._dependencies = {}
# insert initial value
self._setValue(value)
# use this variable when manually overriding
# a callable to have a fixed value
self._dependencies_stashed = {}
# if derived node, default to dirty to start
if derived or self._callable is not None:
self._dirty = True
else:
self._dirty = False
def inputs(self, name=""):
"""get node inputs, optionally by name"""
dat = {n._name_no_id(): n for n in self._upstream}
return dat if not name else dat.get(name)
def _get_dirty(self):
return self._is_dirty
def _set_dirty(self, val):
self._reddd3g() if val else self._whited3g()
self._is_dirty = val
_dirty = property(_get_dirty, _set_dirty)
def _name_no_id(self):
return self._name.rsplit("#", 1)[0]
def _install_args(self, *args):
"""set arguments' values to those given. this is a permanent operation"""
kwargs = []
for i, arg in enumerate(args):
if (
i < len(self._callable_args)
and self._callable_args[i]._name_no_id()
in self._callable_args_mapping[i].values()
):
self._callable_args[i].setValue(arg)
else:
kwargs.append((self._callable_args_mapping[i]["node"], arg))
for k, v in kwargs:
self._callable_kwargs[k].setValue(v)
def _install_kwargs(self, **kwargs):
"""set arguments' values to those given. this is a permanent operation"""
for k, v in kwargs.items():
self._callable_kwargs[k].setValue(v)
def _get_arg(self, i):
return self._callable_args[i]
def _get_kwarg(self, keyword):
print(self._callable_args)
return self._callable_kwargs[keyword]
def _bind(self, other_self=None, *args, **kwargs):
if other_self is not None:
self._self_reference = other_self
self._install_args(*args)
self._install_kwargs(**kwargs)
return self
def _tweak(self, node_tweaks):
# TODO context manager
self._tweaks = node_tweaks
def _untweak(self):
# TODO context manager
self._tweaks = None
# mark myself as dirt for tweak side-effects
# TODO another way of doing this?
self._dirty = True
def _compute_from_dependencies(self, node_tweaks):
"""recompute node's value from its dependencies, applying any temporary tweaks as necessary"""
# if i'm the one being tweaked, just return tweaked value
if self in node_tweaks:
return node_tweaks[self]
# if i have upstream dependencies
if self._dependencies:
# mark graph as calculating
self._greendd3g()
# iterate through upstream deps
for deps in self._dependencies.values():
# recompute args
for arg in deps[0]:
# recompute
arg._recompute(node_tweaks)
# Set yourself as parent if not set
if self not in arg._parents:
arg._parents.append(self)
# mark as tweaking
if node_tweaks:
arg._tweak(node_tweaks)
# recompute kwargs
for kwarg in deps[1].values():
# recompute
kwarg._recompute(node_tweaks)
# Set yourself as parent if not set
if self not in kwarg._parents:
kwarg._parents.append(self)
# mark as tweaking
if node_tweaks:
kwarg._tweak(node_tweaks)
# fetch the callable
kallable = list(self._dependencies.keys())[0]
if self._callable_is_method:
# if the callable is a method,
# pass this node as self
new_value = kallable(
self._self_reference,
*self._dependencies[kallable][0],
**self._dependencies[kallable][1],
)
else:
# else just call on deps
new_value = kallable(
*self._dependencies[kallable][0], **self._dependencies[kallable][1]
)
if isinstance(new_value, Node):
# extract numerical value from node, if it is a node
kallable._node_wrapper = new_value
new_value = new_value() # get value
if isinstance(new_value, Node):
raise TributaryException("Value should not itself be a node!")
# set my value as new value if not tweaking
if not node_tweaks:
self._setValue(new_value)
else:
# set value in tweak dict
node_tweaks[self] = new_value
# iterate through upstream deps and unset tweak
for deps in self._dependencies.values():
for arg in deps[0]:
arg._untweak()
for kwarg in deps[1].values():
kwarg._untweak()
else:
# if i don't have upstream dependencies, my value is fixed
new_value = self.value()
# mark calculation complete
self._whited3g()
# return my value
return self.value()
def _subtree_dirty(self, node_tweaks):
for call, deps in self._dependencies.items():
# callable node
if hasattr(call, "_node_wrapper") and call._node_wrapper is not None:
if call._node_wrapper.isDirty(node_tweaks):
# CRITICAL
# always set self to be dirty if subtree is dirty
self._dirty = True
return True
# check args
for arg in deps[0]:
if arg.isDirty(node_tweaks):
# CRITICAL
# always set self to be dirty if subtree is dirty
self._dirty = True
return True
# check kwargs
for kwarg in deps[1].values():
if kwarg.isDirty(node_tweaks):
# CRITICAL
# always set self to be dirty if subtree is dirty
self._dirty = True
return True
return False
def isDirty(self, node_tweaks=None):
"""Node needs to be re-evaluated, either because its value has changed
or because its value *could* change
Note that in evaluating if a node is dirty, you will have a side effect
of updating that node's status to be dirty or not.
"""
node_tweaks = node_tweaks or {}
if self in node_tweaks:
# return dirty but don't set
return _compare(node_tweaks[self], self.value())
self._dirty = self._dirty or self._subtree_dirty(node_tweaks) or self._dynamic
return self._dirty
def isDynamic(self):
"""Node isnt necessarily dirty, but needs to be reevaluated"""
return self._dynamic
def _recompute(self, node_tweaks):
"""returns result of computation"""
# check if self or upstream dirty
self.isDirty(node_tweaks)
# if i'm dirty, recompute my value
if self._dirty:
# compute upstream and then apply to self
new_value = self._compute_from_dependencies(node_tweaks)
# if my new value is not equal to my old value,
# make sure to indicate that i was really dirty
if self._compare(new_value, self.value()):
# mark my parents as dirty
if self._parents:
for parent in self._parents:
# let your parents know you were dirty!
parent._dirty = True
# set my value if not tweaking
if not node_tweaks:
self._setValue(new_value)
else:
new_value = self.value()
# mark myself as no longer dirty
self._dirty = False
# return result of computation
return new_value
def _gennode(self, name, foo, foo_args, **kwargs):
if name not in self._node_op_cache:
self._node_op_cache[name] = Node(
name=name,
derived=True,
callable=foo,
callable_args=foo_args,
override_callable_dirty=True,
**kwargs,
)
return self._node_op_cache[name]
def _tonode(self, other):
if isinstance(other, Node):
return other
if str(other) not in self._node_op_cache:
self._node_op_cache[str(other)] = Node(
name="var(" + str(other)[:5] + ")", derived=True, value=other
)
return self._node_op_cache[str(other)]
def setValue(self, value):
"""set the node's value, marking it as dirty as appropriate.
this operation is permanent"""
if self._compare(value, self.value()):
# if callable, stash and force a fixed value
if self._dependencies:
# stash dependency tree for later
self._dependencies_stashed = self._dependencies
# reset to empty
self._dependencies = {}
# mark as not dynamic anymore
self._dynamic = False
# set the value
self._setValue(value) # leave for dagre
# mark as dirty
self._dirty = True
def unlock(self):
"""if node has been set to a fixed value, reset to callable"""
# no-op if not previously stashed
if self._dependencies_stashed:
# restore dependency tree
self._dependencies = self._dependencies_stashed
# clear out stashed
self._dependencies_stashed = {}
# mark as dynamic again
self._dynamic = True
def _setValue(self, value):
"""internal method to set value. this is a permanent operation"""
# if value != self.value():
self._values.append(value)
def append(self, value):
# TODO is this better or worse than
# lst = []
# n = Node(value=lst)
# lst.append(x)
# n._dirty = True
iter(self.value())
self.value().append(value)
self._dirty = True
def get(self, **kwargs):
for k, v in kwargs.items():
for deps in self._dependencies.values():
# try to set args
for i, arg in enumerate(deps[0]):
if arg._name_no_id() == k:
return arg
# try to set kwargs
for key, kwarg in deps[1].items():
if kwarg._name_no_id() == k:
return kwarg
def set(self, **kwargs):
"""this method sets upstream dependencys' values to those given"""
for k, v in kwargs.items():
_set = False
for deps in self._dependencies.values():
# try to set args
for i, arg in enumerate(deps[0]):
if arg._name_no_id() == k:
if isinstance(v, Node):
# overwrite node
deps[0][i] = v
else:
arg._dirty = arg.value() != v
arg.setValue(v)
_set = True
break
if _set:
continue
# try to set kwargs
for key, kwarg in deps[1].items():
if kwarg._name_no_id() == k:
if isinstance(v, Node):
# overwrite node
deps[1][key] = v
else:
kwarg._dirty = kwarg.value() != v
kwarg._setValue(v)
# _set = True
break
def getValue(self):
return self.value()
def value(self):
# if tweaking, return my tweaked value
if self._tweaks and self in self._tweaks:
return self._tweaks[self]
# otherwise return my latest value
return self._values[-1] if self._values else None
def __call__(self, node_tweaks=None, *positional_tweaks, **keyword_tweaks):
"""Lazily re-evaluate the node
Args:
node_tweaks (dict): A dict mapping node to tweaked value
positional_tweaks (VAR_POSITIONAL): A tuple of positional tweaks to apply
keyword_tweaks (VAR_KEYWORD): A dict of keyword tweaks to apply
How it works: The "original caller" is the node being evaluted w/ tweaks.
It will consume the positional_tweaks` and `keyword_tweaks`, which look like:
(1, 2,) , {"a": 5, "b": 10}
and join them with `node_tweaks` in a dict mapping node->tweaked value, e.g.
{Node1: 1, Node2: 2, NodeA: 5, NodeB: 10}
and pass this dict up the call tree in `node_tweaks`.
This dict is carried through all node operations through the entire call tree.
If a node is being evaluated and is in `node_tweaks`, it ignores recalculation
and returns the tweaked value.
Returns:
Any: the value, either via re-evaluation (if self or upstream dirty),
or the previously computed value
"""
node_tweaks = node_tweaks or {}
if not isinstance(node_tweaks, dict):
# treat node_tweak argument as positional tweak
positional_tweaks = list(positional_tweaks) + [node_tweaks]
node_tweaks = {}
# instantiate tweaks
tweaks = {}
# update with provided
tweaks.update(node_tweaks)
for i, positional_tweak in enumerate(positional_tweaks):
tweaks[self._get_arg(i)] = positional_tweak
for k, keyword_tweak in keyword_tweaks.items():
tweaks[self._get_kwarg(k)] = keyword_tweak
# tweak self
if tweaks:
self._tweak(tweaks)
# calculate new value
computed = self._recompute(tweaks)
if tweaks:
# untweak self
self._untweak()
# return the calculation result, not my current value
return computed
# otherwise return my permanent value, should equal computed
# assert self.value() == computed
return self.value()
def evaluate(self, node_tweaks=None, *positional_tweaks, **keyword_tweaks):
return self(node_tweaks, *positional_tweaks, **keyword_tweaks)
def eval(self, node_tweaks=None, *positional_tweaks, **keyword_tweaks):
return self(node_tweaks, *positional_tweaks, **keyword_tweaks)
def __repr__(self):
return self._name
@_either_type
def node(meth, dynamic=True, **default_attrs):
"""Convert a method into a lazy node
Since `self` is not defined at the point of method creation, you can pass in
extra kwargs which represent attributes of the future `self`. These will be
converted to node args during instantiation
The format is:
@node(my_existing_attr_as_an_arg="_attribute_name"):
def my_method(self):
pass
this will be converted into a graph of the form:
self._attribute_name -> my_method
e.g. as if self._attribute_name was passed as an argument to my_method, and converted to a node in the usual manner
"""
signature = inspect.signature(meth)
parameters = [
p
for p in signature.parameters.values()
if p.kind
not in (
inspect._ParameterKind.VAR_POSITIONAL,
inspect._ParameterKind.VAR_KEYWORD,
)
]
# don't handle varargs yet
if len(parameters) != len(signature.parameters):
raise TributaryException("varargs not supported yet!")
if inspect.isgeneratorfunction(meth) and default_attrs:
raise TributaryException("Not a supported pattern yet!")
node_args = []
node_kwargs = {}
is_method = _ismethod(meth)
# iterate through method's args and convert them to nodes
for i, arg in enumerate(parameters):
if arg.name == "self":
continue
node_kwargs[arg.name] = Node(name=arg.name, derived=True, value=arg.default)
# add all attribute args to the argspec
# see the docstring for more details
# argspec.args.extend(list(default_attrs.keys()))
node_kwargs.update(default_attrs)
if (len(parameters) - 1 if is_method else len(parameters)) != (
len(node_args) + len(node_kwargs)
):
raise TributaryException(
"Missing args (call or preprocessing error has occurred)"
)
def meth_wrapper(self, *args, **kwargs):
if is_method:
# val = meth(self, *(arg.value() if isinstance(arg, Node) else getattr(self, arg).value() for arg in args if arg not in default_attrs), **
# {k: v.value() if isinstance(v, Node) else getattr(self, v).value() for k, v in kwargs.items() if k not in default_attrs})
val = meth(
self,
*(
arg.value() if isinstance(arg, Node) else getattr(self, arg).value()
for arg in args
),
**{
k: v.value() if isinstance(v, Node) else getattr(self, v).value()
for k, v in kwargs.items()
},
)
else:
val = meth(
*(
arg.value() if isinstance(arg, Node) else getattr(self, arg).value()
for arg in args
),
**{
k: v.value() if isinstance(v, Node) else getattr(self, v).value()
for k, v in kwargs.items()
},
)
return val
new_node = Node(
name=meth.__name__,
derived=True,
callable=meth_wrapper,
callable_args=node_args,
callable_kwargs=node_kwargs,
dynamic=dynamic,
)
if is_method:
ret = lambda self, *args, **kwargs: new_node._bind( # noqa: E731
self, *args, **kwargs
)
else:
ret = lambda *args, **kwargs: new_node._bind( # noqa: E731
None, *args, **kwargs
)
ret._node_wrapper = new_node
# ret = wraps(meth)(ret)
return ret
|
python/rootba/run.py | zeta1999/rootba | 139 | 12660556 | <reponame>zeta1999/rootba<filename>python/rootba/run.py<gh_stars>100-1000
#
# BSD 3-Clause License
#
# This file is part of the RootBA project.
# https://github.com/NikolausDemmel/rootba
#
# Copyright (c) 2021, <NAME>.
# All rights reserved.
#
import os
import re
from collections import Mapping
from .log import load_ba_log
from .util import load_toml_if_exists
from .util import load_text_if_exists
class Run:
"""Loads files from a single run of an experiment from a folder (config, status, output, log, ...)
A single run is one invocation of a solver with a specific config on a specific problem.
This is meant to be used on directories created with the 'generate-batch-configs' and 'run-all-in' scripts.
It's best-effort, loading as many of the files as are present.
"""
def __init__(self, dirpath, seq_name_mapping):
self.dirpath = dirpath
self.config = load_toml_if_exists(os.path.join(dirpath, 'rootba_config.toml'))
self.status = load_text_if_exists(os.path.join(dirpath, 'status.log'))
self.output = load_text_if_exists(os.path.join(dirpath, 'slurm-output.log'))
# if we have slurm output, it already contains the program output, so loading it would be redundant
if self.output is None:
self.output = load_text_if_exists(os.path.join(dirpath, 'output.log'))
# backwards compatibility to older runs that had rootba-output.log instead of output.log
if self.output is None:
self.output = load_text_if_exists(os.path.join(dirpath, 'rootba-output.log'))
self.log = load_ba_log(dirpath)
self.seq_name = self._infer_sequence_name(self.log, dirpath, seq_name_mapping)
print("loaded {} from '{}'".format(self.seq_name, dirpath))
def is_ceres(self):
return self.log.is_ceres()
def is_failed(self):
if self.log is None:
return True
if "Completed" not in self.status:
return True
return False
def failure_str(self):
if not self.is_failed():
return ""
if self.output:
if "Some of your processes may have been killed by the cgroup out-of-memory handler" in self.output:
return "OOM"
if "DUE TO TIME LIMIT" in self.output:
return "OOT"
return "x"
@staticmethod
def _infer_sequence_name(log, dirpath, name_mapping):
"""Tries to infer the problem name from the log, or falls back to the parent folder name"""
seq_name = ""
try:
path = log._static.problem_info.input_path
m = re.match(r".*/bal/([^/]+)/problem-([0-9]+)-[^/]+.txt", path)
if m:
seq_name = "{}{}".format(m.group(1), m.group(2))
except:
pass
# Fallback to detecting the sequence name base on the last component of the parent folder. This is intended
# to work for run folders created with the 'generate-batch-configs' script, assuming the sequence is the
# last component in '_batch.combinations'.
if seq_name == "":
seq_name = os.path.basename(dirpath).split("_")[-1]
# optionally remap the sequence name to something else as defined in the experiments config
if isinstance(name_mapping, Mapping) and seq_name in name_mapping:
seq_name = name_mapping[seq_name]
return seq_name
@staticmethod
def is_run_dir(dirpath):
"""Returns True if the folder may be a run directory, based on the present files
This is intended to be used for auto-detecting run directories in a file tree.
"""
files = ['status.log', 'slurm-output.log', 'output.log', 'ba_log.ubjson', 'ba_log.json']
for f in files:
if os.path.isfile(os.path.join(dirpath, f)):
return True
return False
|
pymoo/util/termination/f_tol_single.py | gabicavalcante/pymoo | 762 | 12660591 | <reponame>gabicavalcante/pymoo
from pymoo.util.misc import to_numpy
from pymoo.util.termination.sliding_window_termination import SlidingWindowTermination
class SingleObjectiveSpaceToleranceTermination(SlidingWindowTermination):
def __init__(self,
tol=1e-6,
n_last=20,
nth_gen=1,
n_max_gen=None,
n_max_evals=None,
**kwargs) -> None:
super().__init__(metric_window_size=n_last,
data_window_size=2,
min_data_for_metric=2,
nth_gen=nth_gen,
n_max_gen=n_max_gen,
n_max_evals=n_max_evals,
**kwargs)
self.tol = tol
def _store(self, algorithm):
return algorithm.opt.get("F").min()
def _metric(self, data):
last, current = data[-2], data[-1]
return last - current
def _decide(self, metrics):
delta_f = to_numpy(metrics)
return delta_f.max() > self.tol
|
tests/test_COCOInstanceAPI.py | mintar/mseg-api | 213 | 12660607 | <reponame>mintar/mseg-api
#!/usr/bin/python3
from pathlib import Path
import numpy as np
from mseg.dataset_apis.COCOInstanceAPI import COCOInstanceAPI
_TEST_DIR = Path(__file__).parent
def test_constructor() -> None:
""" """
coco_dataroot = f"{_TEST_DIR}/test_data/COCOPanoptic_test_data"
c_api = COCOInstanceAPI(coco_dataroot)
assert len(c_api.instance_img_fpaths_splitdict["train"]) == 1
assert len(c_api.instance_img_fpaths_splitdict["val"]) == 1
assert (
Path(c_api.instance_img_fpaths_splitdict["train"][0]).name == "000000000009.png"
)
assert (
Path(c_api.instance_img_fpaths_splitdict["val"][0]).name == "000000000139.png"
)
assert len(c_api.fname_to_instanceimgfpath_dict.keys()) == 2
assert (
Path(c_api.fname_to_instanceimgfpath_dict["000000000009"]).name
== "000000000009.png"
)
assert (
Path(c_api.fname_to_instanceimgfpath_dict["000000000139"]).name
== "000000000139.png"
)
def test_get_instance_img_fpaths() -> None:
""" """
coco_dataroot = f"{_TEST_DIR}/test_data/COCOPanoptic_test_data"
c_api = COCOInstanceAPI(coco_dataroot)
split = "val"
fpaths = c_api.get_instance_img_fpaths(split)
assert len(fpaths) == 1
assert Path(fpaths[0]).name == "000000000139.png"
def test_get_instance_id_img() -> None:
""" """
coco_dataroot = f"{_TEST_DIR}/test_data/COCOPanoptic_test_data"
c_api = COCOInstanceAPI(coco_dataroot)
split = "train"
fname_stem = "000000000009"
instance_id_img = c_api.get_instance_id_img(split, fname_stem)
assert np.amax(instance_id_img) == 8922372
assert np.amin(instance_id_img) == 0
assert np.sum(instance_id_img) == 1451563332418
if __name__ == "__main__":
# test_constructor()
# test_get_instance_img_fpaths()
test_get_instance_id_img()
|
tests/utils/test_audio.py | CostanzoPablo/audiomate | 133 | 12660634 | <filename>tests/utils/test_audio.py
import os
import numpy as np
import librosa
from audiomate import tracks
from audiomate.utils import audio
def test_read_blocks(tmpdir):
wav_path = os.path.join(tmpdir.strpath, 'file.wav')
wav_content = np.random.random(10000)
librosa.output.write_wav(wav_path, wav_content, 16000)
blocks = list(audio.read_blocks(wav_path, buffer_size=1000))
assert np.allclose(np.concatenate(blocks), wav_content, atol=0.0001)
assert np.concatenate(blocks).dtype == np.float32
def test_read_blocks_with_start_end(tmpdir):
wav_path = os.path.join(tmpdir.strpath, 'file.wav')
wav_content = np.random.random(10000)
librosa.output.write_wav(wav_path, wav_content, 16000)
blocks = list(audio.read_blocks(wav_path, start=0.1, end=0.3, buffer_size=1000))
assert np.concatenate(blocks).dtype == np.float32
assert np.allclose(np.concatenate(blocks), wav_content[1600:4800], atol=0.0001)
def test_read_frames(tmpdir):
wav_path = os.path.join(tmpdir.strpath, 'file.wav')
wav_content = np.random.random(10044)
librosa.output.write_wav(wav_path, wav_content, 16000)
data = list(audio.read_frames(wav_path, frame_size=400, hop_size=160))
frames = np.array([x[0] for x in data])
last = [x[1] for x in data]
assert frames.shape == (62, 400)
assert frames.dtype == np.float32
assert np.allclose(frames[0], wav_content[:400], atol=0.0001)
assert np.allclose(frames[61], np.pad(wav_content[9760:], (0, 116), mode='constant'), atol=0.0001)
assert last[:-1] == [False] * (len(data) - 1)
assert last[-1]
def test_read_frames_matches_length(tmpdir):
wav_path = os.path.join(tmpdir.strpath, 'file.wav')
wav_content = np.random.random(10000)
librosa.output.write_wav(wav_path, wav_content, 16000)
data = list(audio.read_frames(wav_path, frame_size=400, hop_size=160))
frames = np.array([x[0] for x in data])
last = [x[1] for x in data]
assert frames.shape == (61, 400)
assert frames.dtype == np.float32
assert np.allclose(frames[0], wav_content[:400], atol=0.0001)
assert np.allclose(frames[60], wav_content[9600:], atol=0.0001)
assert last[:-1] == [False] * (len(data) - 1)
assert last[-1]
def test_write_wav(tmpdir):
samples = np.random.random(50000)
sr = 16000
path = os.path.join(tmpdir.strpath, 'audio.wav')
audio.write_wav(path, samples, sr=sr)
assert os.path.isfile(path)
track = tracks.FileTrack('idx', path)
assert np.allclose(
samples,
track.read_samples(),
atol=1.e-04
)
|
library/oci_cross_connect_group.py | slmjy/oci-ansible-modules | 106 | 12660658 | #!/usr/bin/python
# Copyright (c) 2019, Oracle and/or its affiliates.
# This software is made available to you under the terms of the GPL 3.0 license or the Apache 2.0 license.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Apache License v2.0
# See LICENSE.TXT for details.
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
"metadata_version": "1.1",
"status": ["preview"],
"supported_by": "community",
}
DOCUMENTATION = """
---
module: oci_cross_connect_group
short_description: Create, update and delete OCI cross-connect groups
description:
- Create an OCI cross-connect group to use with Oracle Cloud Infrastructure FastConnect
- Update an OCI cross-connect group, if present, with a new display name
- Delete an OCI cross-connect group, if present.
version_added: "2.5"
options:
compartment_id:
description: Identifier of the compartment under which this cross-connect group
would be created. Mandatory for create operation.
required: false
display_name:
description: A user-friendly name. Does not have to be unique, and it's changeable.
Avoid entering confidential information.
required: false
aliases: ['name']
cross_connect_group_id:
description: Identifier of the cross-connect group. Mandatory for update and delete.
required: false
aliases: ['id']
state:
description: Create,update or delete cross-connect group. For I(state=present), if it
does not exists, it gets created. If exists, it gets updated.
required: false
default: 'present'
choices: ['present','absent']
author:
- "<NAME>(@debayan_gupta)"
extends_documentation_fragment: [ oracle, oracle_wait_options, oracle_creatable_resource ]
"""
EXAMPLES = """
# Note: These examples do not set authentication details.
# Create cross-connect group
- name: Create cross-connect group
oci_cross_connect_group:
compartment_id: 'ocid1.compartment..xxxxxEXAMPLExxxxx'
display_name: 'ansible-cross-connect-group'
state: 'present'
# Update cross-connect group's Display Name
- name: Update cross-connect group's Display Name
oci_cross_connect_group:
cross_connect_grou_id: 'ocid1.crossconnectgroup..xxxxxEXAMPLExxxxx'
display_name: 'cross-connect-group-updated'
state: 'present'
# Delete cross-connect
- name: Delete cross-connect group
oci_cross_connect_group:
cross_connect_id: 'ocid1.crossconnectgroup..xxxxxEXAMPLExxxxx'
state: 'absent'
"""
RETURN = """
oci_cross_connect_group:
description: Attributes of the cross-connect group.
returned: success
type: complex
contains:
compartment_id:
description: The OCID of the compartment containing the cross-connect group.
returned: always
type: string
sample: ocid1.compartment.oc1.iad.xxxxxEXAMPLExxxxx
display_name:
description: A user-friendly name. Does not have to be unique, and it's changeable.
Avoid entering confidential information.
returned: always
type: string
sample: ansible-cross-connect-group
id:
description: Identifier of the cross-connect group.
returned: always
type: string
sample: ocid1.crossconnectgroup.oc1.iad.xxxxxEXAMPLExxxxx
time_created:
description: Date and time when the cross-connect group was created, in
the format defined by RFC3339
returned: always
type: datetime
sample: 2016-08-25T21:10:29.600Z
lifecycle_state:
description: The current state of the cross-connect group.
returned: always
type: string
sample: PROVISIONED
sample: {
"compartment_id":"ocid1.compartment.oc1.iad.xxxxxEXAMPLExxxxx",
"display_name":"ansible-cross-connect-group",
"id":"ocid1.crossconnectgroup.oc1.iad.xxxxxEXAMPLExxxxx",
"lifecycle_state":"PROVISIONING",
"time_created":"2018-03-03T06:55:49.463000+00:00"
}
"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.oracle import oci_utils
try:
from oci.core import VirtualNetworkClient
from oci.exceptions import ServiceError, MaximumWaitTimeExceeded
from oci.core.models import (
CreateCrossConnectGroupDetails,
UpdateCrossConnectGroupDetails,
)
HAS_OCI_PY_SDK = True
except ImportError:
HAS_OCI_PY_SDK = False
def create_or_update_cross_connect_group(virtual_network_client, module):
result = dict(changed=False, cross_connect_group="")
cross_connect_group_id = module.params.get("cross_connect_group_id")
exclude_attributes = {"display_name": True}
try:
if cross_connect_group_id:
existing_cross_connect_group = oci_utils.get_existing_resource(
virtual_network_client.get_cross_connect_group,
module,
cross_connect_group_id=cross_connect_group_id,
)
result = update_cross_connect_group(
virtual_network_client, existing_cross_connect_group, module
)
else:
result = oci_utils.check_and_create_resource(
resource_type="cross_connect_group",
create_fn=create_cross_connect_group,
kwargs_create={
"virtual_network_client": virtual_network_client,
"module": module,
},
list_fn=virtual_network_client.list_cross_connect_groups,
kwargs_list={"compartment_id": module.params.get("compartment_id")},
module=module,
exclude_attributes=exclude_attributes,
model=CreateCrossConnectGroupDetails(),
)
except ServiceError as ex:
module.fail_json(msg=ex.message)
except MaximumWaitTimeExceeded as ex:
module.fail_json(msg=str(ex))
return result
def create_cross_connect_group(virtual_network_client, module):
create_cross_connect_group_details = CreateCrossConnectGroupDetails()
for attribute in create_cross_connect_group_details.attribute_map:
create_cross_connect_group_details.__setattr__(
attribute, module.params.get(attribute)
)
result = oci_utils.create_and_wait(
resource_type="cross_connect_group",
create_fn=virtual_network_client.create_cross_connect_group,
kwargs_create={
"create_cross_connect_group_details": create_cross_connect_group_details
},
client=virtual_network_client,
get_fn=virtual_network_client.get_cross_connect_group,
get_param="cross_connect_group_id",
module=module,
states=["INACTIVE", "PROVISIONED"],
)
return result
def update_cross_connect_group(
virtual_network_client, existing_cross_connect_group, module
):
result = oci_utils.check_and_update_resource(
resource_type="cross_connect_group",
get_fn=virtual_network_client.get_cross_connect_group,
kwargs_get={"cross_connect_group_id": module.params["cross_connect_group_id"]},
update_fn=virtual_network_client.update_cross_connect_group,
primitive_params_update=["cross_connect_group_id"],
kwargs_non_primitive_update={
UpdateCrossConnectGroupDetails: "update_cross_connect_group_details"
},
module=module,
client=virtual_network_client,
update_attributes=UpdateCrossConnectGroupDetails().attribute_map.keys(),
states=["INACTIVE", "PROVISIONED"],
)
return result
def delete_cross_connect_group(virtual_network_client, module):
return oci_utils.delete_and_wait(
resource_type="cross_connect_group",
client=virtual_network_client,
get_fn=virtual_network_client.get_cross_connect_group,
kwargs_get={"cross_connect_group_id": module.params["cross_connect_group_id"]},
delete_fn=virtual_network_client.delete_cross_connect_group,
kwargs_delete={
"cross_connect_group_id": module.params["cross_connect_group_id"]
},
module=module,
)
def main():
module_args = oci_utils.get_common_arg_spec(
supports_create=True, supports_wait=True
)
module_args.update(
compartment_id=dict(type="str", required=False),
cross_connect_group_id=dict(type="str", required=False, aliases=["id"]),
display_name=dict(type="str", required=False, aliases=["name"]),
state=dict(
type="str", required=False, default="present", choices=["present", "absent"]
),
)
module = AnsibleModule(argument_spec=module_args)
if not HAS_OCI_PY_SDK:
module.fail_json(msg="oci python sdk required for this module")
virtual_network_client = oci_utils.create_service_client(
module, VirtualNetworkClient
)
state = module.params["state"]
if state == "present":
result = create_or_update_cross_connect_group(virtual_network_client, module)
elif state == "absent":
result = delete_cross_connect_group(virtual_network_client, module)
module.exit_json(**result)
if __name__ == "__main__":
main()
|
tests/integration/test_actions.py | zhuhaow/aws-lambda-builders | 180 | 12660662 | import os
from pathlib import Path
import tempfile
from unittest import TestCase
from parameterized import parameterized
from aws_lambda_builders.actions import CopyDependenciesAction, MoveDependenciesAction
from aws_lambda_builders.utils import copytree
class TestCopyDependenciesAction(TestCase):
@parameterized.expand(
[
("single_file",),
("multiple_files",),
("empty_subfolders",),
]
)
def test_copy_dependencies_action(self, source_folder):
curr_dir = Path(__file__).resolve().parent
test_folder = os.path.join(curr_dir, "testdata", source_folder)
with tempfile.TemporaryDirectory() as tmpdir:
empty_source = os.path.join(tmpdir, "empty_source")
target = os.path.join(tmpdir, "target")
os.mkdir(empty_source)
copy_dependencies_action = CopyDependenciesAction(empty_source, test_folder, target)
copy_dependencies_action.execute()
self.assertEqual(os.listdir(test_folder), os.listdir(target))
class TestMoveDependenciesAction(TestCase):
@parameterized.expand(
[
("single_file",),
("multiple_files",),
("empty_subfolders",),
]
)
def test_move_dependencies_action(self, source_folder):
curr_dir = Path(__file__).resolve().parent
test_folder = os.path.join(curr_dir, "testdata", source_folder)
with tempfile.TemporaryDirectory() as tmpdir:
test_source = os.path.join(tmpdir, "test_source")
empty_source = os.path.join(tmpdir, "empty_source")
target = os.path.join(tmpdir, "target")
os.mkdir(test_source)
os.mkdir(empty_source)
copytree(test_folder, test_source)
move_dependencies_action = MoveDependenciesAction(empty_source, test_source, target)
move_dependencies_action.execute()
self.assertEqual(os.listdir(test_folder), os.listdir(target))
|
notebooks-text-format/funnel_pymc3.py | arpitvaghela/probml-notebooks | 166 | 12660726 | # -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.11.3
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/probml/pyprobml/blob/master/notebooks/funnel_pymc3.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="iPLM5TwcMcTe"
# In this notebook, we explore the "funnel of hell". This refers to a posterior in which
# the mean and variance of a variable are highly correlated, and have a funnel
# shape. (The term "funnel of hell" is from [this blog post](https://twiecki.io/blog/2014/03/17/bayesian-glms-3/) by <NAME>.)
#
# We illustrate this using a hierarchical Bayesian model for inferring Gaussian means, fit to synthetic data, similar to 8 schools (except we vary the same size and fix the variance). This code is based on [this notebook](http://bebi103.caltech.edu.s3-website-us-east-1.amazonaws.com/2017/tutorials/aux8_mcmc_tips.html) from <NAME>.
# + id="-sWa3BStE4ov"
# %matplotlib inline
import sklearn
import scipy.stats as stats
import scipy.optimize
import matplotlib.pyplot as plt
import seaborn as sns
import time
import numpy as np
import os
import pandas as pd
# + id="1UEFiUi-qZA1" colab={"base_uri": "https://localhost:8080/"} outputId="1a20ff5d-68e6-4f60-81e0-1456bfa83b5f"
# !pip install -U pymc3>=3.8
import pymc3 as pm
print(pm.__version__)
import arviz as az
print(az.__version__)
# + id="SS-lUcY9ovUd"
import math
import pickle
import numpy as np
import pandas as pd
import scipy.stats as st
import theano.tensor as tt
import theano
# + id="H4iJ8eTAr3yF" colab={"base_uri": "https://localhost:8080/"} outputId="23291ee5-7822-41fb-d3ca-c829cd0891f5"
np.random.seed(0)
# Specify parameters for random data
mu_val = 8
tau_val = 3
sigma_val = 10
n_groups = 10
# Generate number of replicates for each repeat
n = np.random.randint(low=3, high=10, size=n_groups, dtype=int)
print(n)
print(sum(n))
# + id="oyyDYNGfsmUa" colab={"base_uri": "https://localhost:8080/"} outputId="f8d2cf60-fbbd-4a29-fcd6-747cd2e18870"
# Generate data set
mus = np.zeros(n_groups)
x = np.array([])
for i in range(n_groups):
mus[i] = np.random.normal(mu_val, tau_val)
samples = np.random.normal(mus[i], sigma_val, size=n[i])
x = np.append(x, samples)
print(x.shape)
group_ind = np.concatenate([[i]*n_val for i, n_val in enumerate(n)])
# + id="Vz-gdn-zuCcx" colab={"base_uri": "https://localhost:8080/", "height": 692} outputId="19b32b08-cffc-4800-9667-5ff22df6f387"
with pm.Model() as centered_model:
# Hyperpriors
mu = pm.Normal('mu', mu=0, sd=5)
tau = pm.HalfCauchy('tau', beta=2.5)
log_tau = pm.Deterministic('log_tau', tt.log(tau))
# Prior on theta
theta = pm.Normal('theta', mu=mu, sd=tau, shape=n_groups)
# Likelihood
x_obs = pm.Normal('x_obs',
mu=theta[group_ind],
sd=sigma_val,
observed=x)
np.random.seed(0)
with centered_model:
centered_trace = pm.sample(10000, chains=2)
pm.summary(centered_trace).round(2)
# + id="UMLPIRMPsgej" colab={"base_uri": "https://localhost:8080/", "height": 963} outputId="3227aaef-1030-490f-8605-5744d27f269c"
with pm.Model() as noncentered_model:
# Hyperpriors
mu = pm.Normal('mu', mu=0, sd=5)
tau = pm.HalfCauchy('tau', beta=2.5)
log_tau = pm.Deterministic('log_tau', tt.log(tau))
# Prior on theta
#theta = pm.Normal('theta', mu=mu, sd=tau, shape=n_trials)
var_theta = pm.Normal('var_theta', mu=0, sd=1, shape=n_groups)
theta = pm.Deterministic('theta', mu + var_theta * tau)
# Likelihood
x_obs = pm.Normal('x_obs',
mu=theta[group_ind],
sd=sigma_val,
observed=x)
np.random.seed(0)
with noncentered_model:
noncentered_trace = pm.sample(1000, chains=2)
pm.summary(noncentered_trace).round(2)
# + id="XqQQUavXvFWT" colab={"base_uri": "https://localhost:8080/", "height": 295} outputId="88b33782-8b68-4057-e1c9-b582e6db8cc1"
fig, axs = plt.subplots(ncols=2, sharex=True, sharey=True)
x = pd.Series(centered_trace['mu'], name='mu')
y = pd.Series(centered_trace['tau'], name='tau')
axs[0].plot(x, y, '.');
axs[0].set(title='Centered', xlabel='µ', ylabel='τ');
axs[0].axhline(0.01)
x = pd.Series(noncentered_trace['mu'], name='mu')
y = pd.Series(noncentered_trace['tau'], name='tau')
axs[1].plot(x, y, '.');
axs[1].set(title='NonCentered', xlabel='µ', ylabel='τ');
axs[1].axhline(0.01)
xlim = axs[0].get_xlim()
ylim = axs[0].get_ylim()
# + id="--jgSNVBLadC" colab={"base_uri": "https://localhost:8080/", "height": 495} outputId="6cf32ae5-ee7b-4abe-bf8f-b51450bb02d1"
x = pd.Series(centered_trace['mu'], name='mu')
y = pd.Series(centered_trace['tau'], name='tau')
g = sns.jointplot(x, y, xlim=xlim, ylim=ylim)
plt.suptitle('centered')
plt.show()
# + id="tEfEJ8JuLX43" colab={"base_uri": "https://localhost:8080/", "height": 495} outputId="4869fb30-3d07-4e0c-a6da-03c1014923b3"
x = pd.Series(noncentered_trace['mu'], name='mu')
y = pd.Series(noncentered_trace['tau'], name='tau')
g = sns.jointplot(x, y, xlim=xlim, ylim=ylim)
plt.suptitle('noncentered')
plt.show()
# + id="1-FQqDkTFEqy" colab={"base_uri": "https://localhost:8080/", "height": 295} outputId="b9804230-dc6c-4586-9a5a-1ad38a9cab82"
fig, axs = plt.subplots(ncols=2, sharex=True, sharey=True)
x = pd.Series(centered_trace['mu'], name='mu')
y = pd.Series(centered_trace['log_tau'], name='log_tau')
axs[0].plot(x, y, '.');
axs[0].set(title='Centered', xlabel='µ', ylabel='log(τ)');
x = pd.Series(noncentered_trace['mu'], name='mu')
y = pd.Series(noncentered_trace['log_tau'], name='log_tau')
axs[1].plot(x, y, '.');
axs[1].set(title='NonCentered', xlabel='µ', ylabel='log(τ)');
xlim = axs[0].get_xlim()
ylim = axs[0].get_ylim()
# + id="5QqP9pOLHJR5" colab={"base_uri": "https://localhost:8080/", "height": 495} outputId="34dfd8db-fc63-44bb-c203-5b2c64cf9d3c"
#https://seaborn.pydata.org/generated/seaborn.jointplot.html
x = pd.Series(centered_trace['mu'], name='mu')
y = pd.Series(centered_trace['log_tau'], name='log_tau')
g = sns.jointplot(x, y, xlim=xlim, ylim=ylim)
plt.suptitle('centered')
plt.show()
# + id="7jK4o4idIw_u" colab={"base_uri": "https://localhost:8080/", "height": 495} outputId="784cde75-c370-457f-e4df-5bb51595246a"
x = pd.Series(noncentered_trace['mu'], name='mu')
y = pd.Series(noncentered_trace['log_tau'], name='log_tau')
g = sns.jointplot(x, y, xlim=xlim, ylim=ylim)
plt.suptitle('noncentered')
plt.show()
# + id="KNam0ZuYYhxw" colab={"base_uri": "https://localhost:8080/", "height": 581} outputId="6a73f609-35a5-433f-bb22-09509881998e"
az.plot_forest([centered_trace, noncentered_trace], model_names=['centered', 'noncentered'],
var_names="theta",
combined=True, hdi_prob=0.95);
# + id="sizu9bNdT4K0"
|
loaner/deployments/lib/app_constants.py | gng-demo/travisfix | 175 | 12660735 | <reponame>gng-demo/travisfix
# Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Grab n Go environment specific constants.
Each one of the constants defined below is an environment specific constant.
A unique value will be required for every unique Google Cloud Project. These
will be stored in Google Cloud Storage in the bucket configured in the
loaner/deployments/config.yaml file for this project.
When adding a configurable project level constant the following procedure must
be used:
1. Add the name of the constant below, the value must be the name that is used
for the flag.
2. Create the flag with a default, no flag should be marked as required using
the `flags` package.
3. Add the name of the constant to the loaner/web_app/constants.py file.
4. (Optional) add a `Parser` object for the name in the `_PARSERS` dictionary.
The `parse` method on the `Parser` object will be used to validate the
current value of the constant, whether the default or a user provided
value. If the value is invalid, a ValueError is raised and the flag message
is used to prompt the user, only ever accepting a value that passes through
the `parse` method. If the manager is run in scripted mode an invalid value
for any constant defined below will cause an error and the script will
exit.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
from absl import flags
from loaner.deployments.lib import utils
FLAGS = flags.FLAGS
# Constant Names #
# These are the configurable constants, the name of the constant matches the
# name in the constants.py file and the value is used as the name of the flag
# and used as the key for getting the respective `utils.Parser` object in the
# `_PARSERS` dictionary below.
APP_DOMAINS = 'app_domains'
CHROME_CLIENT_ID = 'chrome_client_id'
WEB_CLIENT_ID = 'web_client_id'
ADMIN_EMAIL = 'admin_email'
SEND_EMAIL_AS = 'send_email_as'
SUPERADMINS_GROUP = 'superadmins_group'
CUSTOMER_ID = 'customer_id'
# Required to be provided either by flag or by prompt.
flags.DEFINE_list(
APP_DOMAINS, [],
'A comma separated list of second-level domains that will be authorized to '
'access the application. Only add domain names that you want to have access'
' to the web application. Domains should be in the following format: '
"'example.com'"
)
flags.DEFINE_string(
CHROME_CLIENT_ID, '',
'The Chrome App OAuth2 Client ID.\n'
'In order for the Chrome companion application to be able to make API calls'
' to the management backend an OAuth2 Client ID must be provided. This can '
'be created in the Google Cloud Console at: '
"https://console.cloud.google.com/apis/credentials. The 'Application Type' "
"for this credential is 'Chrome App'.\n"
'Further instructions can be found here: https://support.google.com'
'/cloud/answer/6158849?hl=en#installedapplications&chrome'
)
flags.DEFINE_string(
WEB_CLIENT_ID, '',
'The Web App OAuth2 Client ID.\n'
'In order for the Web application to be able to make API calls to the '
'management backend an OAuth2 Client ID must be provided. This can '
'be created in the Google Cloud Console at: '
"https://console.cloud.google.com/apis/credentials. The 'Application Type' "
"for this credential is 'Web Application'.\n"
'Further instructions can be found here: https://support.google.com'
'/cloud/answer/6158849?hl=en'
)
flags.DEFINE_string(
ADMIN_EMAIL, '',
'The email address to use to access the Google Admin SDK Directory API.\n'
'If this address does not exist we will attempt to create it with a strong '
'password, which we will provide you. In order to create this account '
'programmatically you will need to be a Super Admin in the G Suite domain '
'this account is being created in.\nTo create this manually see the '
'setup_guide in the Grab n Go documentation: '
'https://github.com/google/loaner/blob/master/docs/setup_guide.md'
)
flags.DEFINE_string(
SEND_EMAIL_AS, '',
'The email address from which application related emails will come from. '
'Often a noreply address is used, e.g. <EMAIL>'
)
flags.DEFINE_string(
SUPERADMINS_GROUP, '',
'The name of the group for whom to grant super admin privileges to. '
'This should include anyone you want to be able to administer Grab n Go '
'from the web application. This gives access to all in app data.'
)
# Not required to be provided either by flag or by prompt.
flags.DEFINE_string(
CUSTOMER_ID, 'my_customer',
'The G Suite customer ID.\nIf you are an administrator of the organization '
'this application is running in leave the default. If you are a reseller '
'you can get the customer ID by making a get user request: '
'https://developers.google.com/admin-sdk/directory/v1/guides/manage-users'
'.html#get_user'
)
# Dictionary where the flag name is the key and the value is a parser, an object
# that has `parse` as a public instance method. A parser is not required,
# without one any value will be accepted.
_PARSERS = {
APP_DOMAINS: utils.ListParser(allow_empty_list=False),
CHROME_CLIENT_ID: utils.ClientIDParser(),
WEB_CLIENT_ID: utils.ClientIDParser(),
ADMIN_EMAIL: utils.EmailParser(),
SEND_EMAIL_AS: utils.EmailParser(),
SUPERADMINS_GROUP: utils.StringParser(allow_empty_string=False),
CUSTOMER_ID: utils.StringParser(allow_empty_string=False),
}
def get_constants_from_flags(module=__name__):
"""Returns a dictionary of all constants from flags.
This should only be used when skipping user validation (e.g. scripting) since
it does not validate the provided values with the custom parsers until the
value is requested. If the flag provided does not meet the `Parser`
requirements an error will be raised when attempting to retrieve the value.
Args:
module: str, the name of the module to get the constants from.
Returns:
A dictionary of all constants with the flag value as the constant value.
The key for each constant is the name of the constant.
Raises:
ValueError: when any of the flag values does not meet the parsing
requirements.
"""
def _from_flag(name):
"""Gets the value of a flag given the name.
If flags have not been parsed, the default value will be used.
Args:
name: str, the name of the flag.
Returns:
The value of the flag.
"""
if FLAGS.is_parsed():
return getattr(FLAGS, name)
return FLAGS[name].default
return _get_all_constants(module=module, func=_from_flag)
def get_default_constants(module=__name__):
"""Returns a dictionary of all constants with the default flag value.
This is used to initialize project level constants for a new project from
user prompts.
Args:
module: str, the name of the module to get the constants from.
Returns:
A dictionary of all constants with the default flag value as the constant
value. The key for each constant is the name of the constant.
"""
return _get_all_constants(module=module, func=None)
def _get_all_constants(module=__name__, func=None):
"""Returns a dictionary of all constants.
This function will return all of the flags configured above as `Constant`
objects. By default, the default value of the flag will be used.
Args:
module: str, the name of the module to get the constants from.
func: Callable, a function that returns the value of each constant given the
name of the flag.
Returns:
A dictionary of all key flags in this module represented as Constants,
keyed by the name of the constant.
"""
constants = {}
for flag in FLAGS.get_key_flags_for_module(sys.modules[module]):
value = FLAGS[flag.name].default
if func:
value = func(flag.name)
constants[flag.name] = Constant(
flag.name, flag.help, value, _PARSERS.get(flag.name))
return constants
class Constant(object):
"""Grab n Go project level constant.
Attributes:
name: str, the unique key to reference this constant by (this is identical
to the name of the flag above).
message: str, the message shown to the user when they are being prompted
to provide the value of this constant (this is identical to the help
message for the flag).
valid: bool, whether or not the current value is valid.
value: Any, the value of this constant.
"""
def __init__(self, name, message, default, parser=None):
"""Initializes the constant.
Args:
name: str, the unique key to reference this constant by (this should be
identical to the name of the flag above).
message: str, the message shown to the user when they are being prompted
to provide the value of this constant (this is identical to the help
message for the flag).
default: Any, the default value of this constant.
parser: Callable, an object to validate and parse the provided input.
A parser must meet the following requirements:
1) The object must have a parse() method that accepts a single
string as input and returns the parsed output.
2) Any error that occurs during parse() should raise a ValueError to
indicate bad user input with a helpful error message.
An example can be found in the utils module in this package.
"""
self._name = name
self._message = message
self._value = default
self._parser = parser
def __str__(self):
return '{}: {}'.format(self.name, self._value)
def __repr__(self):
return '<{0}({1!r}, {2!r}, {3!r}, {4!r})>'.format(
self.__class__.__name__, self.name, self.message, self._value,
self._parser)
def __eq__(self, other):
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self.__eq__(other)
@property
def name(self):
"""Getter for the name."""
return self._name
@property
def message(self):
"""Getter for the user message."""
return self._message
@property
def value(self):
"""Getter for the current value."""
return self._value
@value.setter
def value(self, value):
"""Setter for the current value."""
self._value = value if self._parser is None else self._parser.parse(value)
@property
def valid(self):
"""Getter for whether or not the current value is valid."""
if self._parser is None:
return True
try:
self._parser.parse(self.value)
except ValueError:
return False
return True
def prompt(self):
"""Prompts the user for a new value."""
self.value = utils.prompt(
self.message, default=self.value, parser=self._parser)
|
pyclustering/nnet/tests/unit/ut_fsync.py | JosephChataignon/pyclustering | 1,013 | 12660783 | """!
@brief Unit-tests for Oscillatory Neural Network based on Kuramoto model and Landau-Stuart.
@authors <NAME> (<EMAIL>)
@date 2014-2020
@copyright BSD-3-Clause
"""
import unittest;
# Generate images without having a window appear.
import matplotlib;
matplotlib.use('Agg');
from pyclustering.nnet import conn_type, conn_represent;
from pyclustering.nnet.fsync import fsync_network, fsync_visualizer;
class FsyncUnitTest(unittest.TestCase):
def templateCreateNetwork(self, network_size):
oscillatory_network = fsync_network(network_size);
assert len(oscillatory_network) == network_size;
def testCreateNetworkSize1(self):
self.templateCreateNetwork(1);
def testCreateNetworkSize20(self):
self.templateCreateNetwork(20);
def testCreateNetworkSize100(self):
self.templateCreateNetwork(100);
def templateSimulateStaticOutputDynamic(self, num_osc, steps, time, collect_dynamic):
oscillatory_network = fsync_network(num_osc);
output_dynamic = oscillatory_network.simulate(steps, time, collect_dynamic);
if (collect_dynamic is True):
assert len(output_dynamic) == steps + 1;
assert output_dynamic.time[0] == 0;
else:
assert len(output_dynamic) == 1;
assert output_dynamic.time[len(output_dynamic) - 1] == time;
def testSimulateStatic10StepsTime10(self):
self.templateSimulateStaticOutputDynamic(10, 10, 10, True);
def testSimulateStatic100StepsTime10(self):
self.templateSimulateStaticOutputDynamic(3, 100, 10, True);
def testSimulateStatic100StepsTime1(self):
self.templateSimulateStaticOutputDynamic(3, 100, 1, True);
def testSimulateStatic50StepsTime10WithoutCollecting(self):
self.templateSimulateStaticOutputDynamic(3, 50, 10, False);
def testSimulateStatic100StepsTime10WithoutCollecting(self):
self.templateSimulateStaticOutputDynamic(1, 100, 10, False);
def templateGlobalSynchronization(self, size, steps, time, frequency, radius, coupling, amplitude_threshold, connections, representation):
oscillatory_network = fsync_network(size, frequency, radius, coupling, connections, representation);
output_dynamic = oscillatory_network.simulate(steps, time, True);
for index_oscillator in range(len(oscillatory_network)):
assert output_dynamic.extract_number_oscillations(index_oscillator, amplitude_threshold) > 0;
sync_ensembles = output_dynamic.allocate_sync_ensembles(amplitude_threshold);
assert len(sync_ensembles) == 1;
assert len(sync_ensembles[0]) == size;
def testGlobalSyncOneOscillatorAllToAll(self):
self.templateGlobalSynchronization(1, 50, 10, 1.0, 1.0, 1.0, 0.8, conn_type.ALL_TO_ALL, conn_represent.MATRIX);
def testGlobalSyncGroupOscillatorAllToAll(self):
self.templateGlobalSynchronization(5, 50, 10, 1.0, 1.0, 1.0, 0.8, conn_type.ALL_TO_ALL, conn_represent.MATRIX);
def testGlobalSyncOneOscillatorGridFour(self):
self.templateGlobalSynchronization(1, 50, 10, 1.0, 1.0, 1.0, 0.8, conn_type.GRID_FOUR, conn_represent.MATRIX);
def testGlobalSyncGroupOscillatorGridFour(self):
self.templateGlobalSynchronization(9, 50, 10, 1.0, 1.0, 1.0, 0.8, conn_type.GRID_FOUR, conn_represent.MATRIX);
def testGlobalSyncOneOscillatorGridEight(self):
self.templateGlobalSynchronization(1, 50, 10, 1.0, 1.0, 1.0, 0.8, conn_type.GRID_EIGHT, conn_represent.MATRIX);
def testGlobalSyncGroupOscillatorGridEight(self):
self.templateGlobalSynchronization(9, 50, 10, 1.0, 1.0, 1.0, 0.8, conn_type.GRID_EIGHT, conn_represent.MATRIX);
def testGlobalSyncOneOscillatorBidir(self):
self.templateGlobalSynchronization(1, 50, 10, 1.0, 1.0, 1.0, 0.8, conn_type.LIST_BIDIR, conn_represent.MATRIX);
def testGlobalSyncGroupOscillatorBidir(self):
self.templateGlobalSynchronization(5, 50, 10, 1.0, 1.0, 1.0, 0.8, conn_type.LIST_BIDIR, conn_represent.MATRIX);
def testGlobalSyncOneOscillatorDifferentFrequency(self):
self.templateGlobalSynchronization(1, 50, 10, [ 1.0 ], 1.0, 1.0, 0.8, conn_type.ALL_TO_ALL, conn_represent.MATRIX);
def testGlobalSyncGroupOscillatorDifferentFrequency(self):
self.templateGlobalSynchronization(5, 100, 20, [ 1.0, 1.1, 1.1, 1.2, 1.15 ], 1.0, 1.0, 0.8, conn_type.ALL_TO_ALL, conn_represent.MATRIX);
def testGlobalSyncOneOscillatorDifferentRadius(self):
self.templateGlobalSynchronization(1, 50, 10, 1.0, [ 1.0 ], 1.0, 0.8, conn_type.ALL_TO_ALL, conn_represent.MATRIX);
def testGlobalSyncGroupOscillatorDifferentRadius(self):
self.templateGlobalSynchronization(5, 50, 10, 1.0, [ 1.0, 2.0, 3.0, 4.0, 5.0 ], 1.0, 0.8, conn_type.ALL_TO_ALL, conn_represent.MATRIX);
def testGlobalSyncOneOscillatorDifferentProperty(self):
self.templateGlobalSynchronization(1, 50, 10, [ 1.0 ], [ 1.0 ], 1.0, 0.8, conn_type.ALL_TO_ALL, conn_represent.MATRIX);
def testGlobalSyncGroupOscillatorDifferentProperty(self):
self.templateGlobalSynchronization(5, 100, 20, [ 1.0, 1.1, 1.1, 1.2, 1.15 ], [ 1.0, 2.0, 3.0, 4.0, 5.0 ], 1.0, 0.8, conn_type.ALL_TO_ALL, conn_represent.MATRIX);
def templateNoOscillations(self, size, steps, time, frequency, radius, amplitude_threshold):
oscillatory_network = fsync_network(size, frequency, radius)
output_dynamic = oscillatory_network.simulate(steps, time, True)
for index_oscillator in range(len(oscillatory_network)):
assert output_dynamic.extract_number_oscillations(index_oscillator, amplitude_threshold) == 0;
def testNoOscillationsZeroFrequency(self):
self.templateNoOscillations(5, 50, 10, 0.0, 1.0, 0.5)
def testNoOscillationsZeroRadius(self):
self.templateNoOscillations(5, 50, 10, 1.0, 0.0, 0.5)
def testLackCrashGraphics(self):
oscillatory_network = fsync_network(5)
output_dynamic = oscillatory_network.simulate(50, 10, True)
fsync_visualizer.show_output_dynamic(output_dynamic)
fsync_visualizer.show_output_dynamics([output_dynamic])
def testLackCrashGraphicsDynamicSet(self):
oscillatory_network_1 = fsync_network(2)
oscillatory_network_2 = fsync_network(3)
output_dynamic_1 = oscillatory_network_1.simulate(50, 10, True)
output_dynamic_2 = oscillatory_network_2.simulate(50, 10, True)
fsync_visualizer.show_output_dynamics([output_dynamic_1, output_dynamic_2])
|
helper.py | chenqifeng22/PhotographicImageSynthesis | 1,359 | 12660812 | <gh_stars>1000+
import os,numpy as np
from os.path import dirname, exists, join, splitext
import json,scipy
class Dataset(object):
def __init__(self, dataset_name):
self.work_dir = dirname(os.path.realpath('__file__'))
info_path = join(self.work_dir, 'datasets', dataset_name + '.json')
with open(info_path, 'r') as fp:
info = json.load(fp)
self.palette = np.array(info['palette'], dtype=np.uint8)
def get_semantic_map(path):
dataset=Dataset('cityscapes')
semantic=scipy.misc.imread(path)
tmp=np.zeros((semantic.shape[0],semantic.shape[1],dataset.palette.shape[0]),dtype=np.float32)
for k in range(dataset.palette.shape[0]):
tmp[:,:,k]=np.float32((semantic[:,:,0]==dataset.palette[k,0])&(semantic[:,:,1]==dataset.palette[k,1])&(semantic[:,:,2]==dataset.palette[k,2]))
return tmp.reshape((1,)+tmp.shape)
def print_semantic_map(semantic,path):
dataset=Dataset('cityscapes')
semantic=semantic.transpose([1,2,3,0])
prediction=np.argmax(semantic,axis=2)
color_image=dataset.palette[prediction.ravel()].reshape((prediction.shape[0],prediction.shape[1],3))
row,col,dump=np.where(np.sum(semantic,axis=2)==0)
color_image[row,col,:]=0
scipy.misc.imsave(path,color_image)
|
tests/ut/python/dataset/test_biquad.py | PowerOlive/mindspore | 3,200 | 12660835 | # Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import numpy as np
import pytest
import mindspore.dataset as ds
import mindspore.dataset.audio.transforms as audio
from mindspore import log as logger
def count_unequal_element(data_expected, data_me, rtol, atol):
assert data_expected.shape == data_me.shape
total_count = len(data_expected.flatten())
error = np.abs(data_expected - data_me)
greater = np.greater(error, atol + np.abs(data_expected) * rtol)
loss_count = np.count_nonzero(greater)
assert (loss_count / total_count) < rtol, \
"\ndata_expected_std:{0}\ndata_me_error:{1}\nloss:{2}". \
format(data_expected[greater], data_me[greater], error[greater])
def test_func_biquad_eager():
""" mindspore eager mode normal testcase:biquad op"""
# Original waveform
waveform = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float64)
# Expect waveform
expect_waveform = np.array([[0.0100, 0.0388, 0.1923],
[0.0400, 0.1252, 0.6530]], dtype=np.float64)
biquad_op = audio.Biquad(0.01, 0.02, 0.13, 1, 0.12, 0.3)
# Filtered waveform by biquad
output = biquad_op(waveform)
count_unequal_element(expect_waveform, output, 0.0001, 0.0001)
def test_func_biquad_pipeline():
""" mindspore pipeline mode normal testcase:biquad op"""
# Original waveform
waveform = np.array([[3.2, 2.1, 1.3], [6.2, 5.3, 6]], dtype=np.float64)
# Expect waveform
expect_waveform = np.array([[1.0000, 1.0000, 0.5844],
[1.0000, 1.0000, 1.0000]], dtype=np.float64)
dataset = ds.NumpySlicesDataset(waveform, ["audio"], shuffle=False)
biquad_op = audio.Biquad(1, 0.02, 0.13, 1, 0.12, 0.3)
# Filtered waveform by biquad
dataset = dataset.map(input_columns=["audio"], operations=biquad_op, num_parallel_workers=8)
i = 0
for item in dataset.create_dict_iterator(num_epochs=1, output_numpy=True):
count_unequal_element(expect_waveform[i, :],
item['audio'], 0.0001, 0.0001)
i += 1
def test_biquad_invalid_input():
def test_invalid_input(test_name, b0, b1, b2, a0, a1, a2, error, error_msg):
logger.info("Test Biquad with bad input: {0}".format(test_name))
with pytest.raises(error) as error_info:
audio.Biquad(b0, b1, b2, a0, a1, a2)
assert error_msg in str(error_info.value)
test_invalid_input("invalid b0 parameter type as a String", "0.01", 0.02, 0.13, 1, 0.12, 0.3, TypeError,
"Argument b0 with value 0.01 is not of type [<class 'float'>, <class 'int'>],"
" but got <class 'str'>.")
test_invalid_input("invalid b0 parameter value", 441324343243242342345300, 0.02, 0.13, 1, 0.12, 0.3, ValueError,
"Input b0 is not within the required interval of [-16777216, 16777216].")
test_invalid_input("invalid b1 parameter type as a String", 0.01, "0.02", 0.13, 0, 0.12, 0.3, TypeError,
"Argument b1 with value 0.02 is not of type [<class 'float'>, <class 'int'>],"
" but got <class 'str'>.")
test_invalid_input("invalid b1 parameter value", 0.01, 441324343243242342345300, 0.13, 1, 0.12, 0.3, ValueError,
"Input b1 is not within the required interval of [-16777216, 16777216].")
test_invalid_input("invalid b2 parameter type as a String", 0.01, 0.02, "0.13", 0, 0.12, 0.3, TypeError,
"Argument b2 with value 0.13 is not of type [<class 'float'>, <class 'int'>],"
" but got <class 'str'>.")
test_invalid_input("invalid b2 parameter value", 0.01, 0.02, 441324343243242342345300, 1, 0.12, 0.3, ValueError,
"Input b2 is not within the required interval of [-16777216, 16777216].")
test_invalid_input("invalid a0 parameter type as a String", 0.01, 0.02, 0.13, '1', 0.12, 0.3, TypeError,
"Argument a0 with value 1 is not of type [<class 'float'>, <class 'int'>],"
" but got <class 'str'>.")
test_invalid_input("invalid a0 parameter value", 0.01, 0.02, 0.13, 0, 0.12, 0.3, ValueError,
"Input a0 is not within the required interval of [-16777216, 0) and (0, 16777216].")
test_invalid_input("invalid a0 parameter value", 0.01, 0.02, 0.13, 441324343243242342345300, 0.12, 0.3, ValueError,
"Input a0 is not within the required interval of [-16777216, 0) and (0, 16777216].")
test_invalid_input("invalid a1 parameter type as a String", 0.01, 0.02, 0.13, 1, '0.12', 0.3, TypeError,
"Argument a1 with value 0.12 is not of type [<class 'float'>, <class 'int'>],"
" but got <class 'str'>.")
test_invalid_input("invalid a1 parameter value", 0.01, 0.02, 0.13, 1, 441324343243242342345300, 0.3, ValueError,
"Input a1 is not within the required interval of [-16777216, 16777216].")
test_invalid_input("invalid a2 parameter type as a String", 0.01, 0.02, 0.13, 1, 0.12, '0.3', TypeError,
"Argument a2 with value 0.3 is not of type [<class 'float'>, <class 'int'>],"
" but got <class 'str'>.")
test_invalid_input("invalid a1 parameter value", 0.01, 0.02, 0.13, 1, 0.12, 441324343243242342345300, ValueError,
"Input a2 is not within the required interval of [-16777216, 16777216].")
if __name__ == '__main__':
test_func_biquad_eager()
test_func_biquad_pipeline()
test_biquad_invalid_input()
|
gan2shape/stylegan2/stylegan2-pytorch/generate.py | PeterouZh/GAN2Shape | 421 | 12660847 | import os
import argparse
import torch
import torch.nn.functional as F
from torchvision import utils
from model import Generator
from tqdm import tqdm
def generate(args, g_ema, device, mean_latent):
with torch.no_grad():
g_ema.eval()
count = 0
for i in tqdm(range(args.pics)):
sample_z = torch.randn(args.sample, args.latent, device=device)
sample_w = g_ema.style_forward(sample_z)
sample, _ = g_ema([sample_w], truncation=args.truncation, truncation_latent=mean_latent, input_is_w=True)
sample_w = mean_latent + args.truncation * (sample_w - mean_latent)
for j in range(args.sample):
utils.save_image(
sample[j],
f'{args.save_path}/{str(count).zfill(6)}.png',
nrow=1,
normalize=True,
range=(-1, 1),
)
torch.save(sample_w[j], f'{args.save_path}/latents/{str(count).zfill(6)}.pt')
count += 1
if __name__ == '__main__':
device = 'cuda'
parser = argparse.ArgumentParser()
parser.add_argument('--size', type=int, default=1024)
parser.add_argument('--sample', type=int, default=1)
parser.add_argument('--pics', type=int, default=20)
parser.add_argument('--truncation', type=float, default=0.7)
parser.add_argument('--truncation_mean', type=int, default=4096)
parser.add_argument('--ckpt', type=str, default="stylegan2-ffhq-config-f.pt")
parser.add_argument('--channel_multiplier', type=int, default=2)
parser.add_argument('--seed', type=int, default=0)
parser.add_argument('--gpu', type=int, default=0)
parser.add_argument('--save_path', type=str, default='sample')
args = parser.parse_args()
args.latent = 512
args.n_mlp = 8
torch.manual_seed(args.seed) # also sets cuda seeds
if not os.path.exists(args.save_path):
os.makedirs(args.save_path)
os.makedirs(args.save_path + '/latents')
g_ema = Generator(
args.size, args.latent, args.n_mlp, channel_multiplier=args.channel_multiplier
).to(device)
checkpoint = torch.load(args.ckpt)
g_ema.load_state_dict(checkpoint['g_ema'], strict=False)
if args.truncation < 1:
with torch.no_grad():
mean_latent = g_ema.mean_latent(args.truncation_mean)
else:
mean_latent = None
generate(args, g_ema, device, mean_latent)
|
projects/perception/object_detection_3d/demos/voxel_object_detection_3d/rplidar_processor.py | ad-daniel/opendr | 217 | 12660875 | <filename>projects/perception/object_detection_3d/demos/voxel_object_detection_3d/rplidar_processor.py
# Copyright 2020-2022 OpenDR European Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from rplidar import RPLidar as RPLidarAPI
import numpy as np
import math
from opendr.engine.data import PointCloud
import threading
import atexit
def create_point_cloud(scan, z=0):
points = np.empty((len(scan), 4), dtype=np.float32)
for i, s in enumerate(scan):
r, angle_degrees, distance_mm = s
angle_rad = angle_degrees * math.pi / 180
y = math.sin(angle_rad) * distance_mm / 1000
x = math.cos(angle_rad) * distance_mm / 1000
points[i] = [x, y, z, r / 16]
return points
class RPLidar:
def __init__(self, port, baudrate=115200, timeout=1):
lidar = RPLidarAPI(port=port, baudrate=baudrate, timeout=timeout)
self.lidar = lidar
lidar.clean_input()
info = lidar.get_info()
print(info)
health = lidar.get_health()
print(health)
self.iterate_thread = threading.Thread(target=self.__itereate_scans)
self.iterate_thread.start()
self.lock = threading.Lock()
self.last_point_cloud = np.zeros((0, 3), dtype=np.float32)
self.running = True
atexit.register(self.stop)
def __itereate_scans(self):
for scan in self.lidar.iter_scans(min_len=1):
pc = create_point_cloud(scan)
with self.lock:
self.last_point_cloud = pc
if not self.running:
return
def next(self):
with self.lock:
return PointCloud(self.last_point_cloud)
def stop(self):
self.running = False
self.iterate_thread.join()
self.lidar.stop()
self.lidar.stop_motor()
self.lidar.disconnect()
|
Tree/144. Binary Tree Preorder Traversal.py | beckswu/Leetcode | 138 | 12660879 | <reponame>beckswu/Leetcode
"""
144. Binary Tree Preorder Traversal
Given a binary tree, return the preorder traversal of its nodes' values.
Example:
Input: [1,null,2,3]
1
\
2
/
3
Output: [1,2,3]
Follow up: Recursive solution is trivial, could you do it iteratively?
"""
class Solution:
def preorderTraversal(self, root):
"""
:type root: TreeNode
:rtype: List[int]
"""
res = []
cur = root
while cur:
if not cur.left:
res.append(cur.val)
cur = cur.right
else:
pre = cur.left
while pre.right and pre.right != cur:
pre = pre.right
if not pre.right:
res.append(cur.val) #第一次的pass 就加上
pre.right = cur
cur = cur.left
else:
pre.right = None #重复pass parent,不加
cur = cur.right
return res
class Solution:
def preorderTraversal(self, root):
res = []
stack = []
cur = root
while cur or stack:
if cur:
res.append(cur.val)
stack.append(cur)
cur = cur.left
else:
cur = stack.pop()
cur = cur.right
return res
class Solution:
def preorderTraversal(self, root):
res = []
stack = [(root, False)]
cur = root
while stack:
node, visited = stack.pop()
if node:
if visited:
res.append(node.val)
else:
stack.append((node.right,False))
stack.append((node.left, False))
stack.append((node,True))
return res
class Solution:
def preorderTraversal(self, root):
ret = []
stack = [root]
# iteratively through the stack
while stack:
node = stack.pop()
if node:
ret.append(node.val)
stack.append(node.right)
stack.append(node.left)
return ret
class Solution:
def preorderTraversal(self, root):
return [] if root is None else [root.val]+self.preorderTraversal(root.left)+self.preorderTraversal(root.right) |
tests/tensortrade/unit/feed/api/float/test_utils.py | nicomon24/tensortrade | 3,081 | 12660884 |
import numpy as np
import pandas as pd
from itertools import product
from tensortrade.feed import Stream
from tests.utils.ops import assert_op
arrays = [
[-1.5, 2.2, -3.3, 4.7, -5.1, 7.45, 8.8],
[-1.2, 2.3, np.nan, 4.4, -5.5, np.nan, np.nan],
]
def test_ceil():
for array in arrays:
s = Stream.source(array, dtype="float")
w = s.ceil().rename("w")
expected = list(pd.Series(array).apply(np.ceil))
assert_op([w], expected)
def test_floor():
for array in arrays:
s = Stream.source(array, dtype="float")
w = s.floor().rename("w")
expected = list(pd.Series(array).apply(np.floor))
assert_op([w], expected)
def test_sqrt():
for array in arrays:
s = Stream.source(array, dtype="float")
w = s.sqrt().rename("w")
expected = list(pd.Series(array).apply(np.sqrt))
assert_op([w], expected)
def test_square():
for array in arrays:
s = Stream.source(array, dtype="float")
w = s.square().rename("w")
expected = list(pd.Series(array).apply(np.square))
assert_op([w], expected)
def test_log():
for array in arrays:
s = Stream.source(array, dtype="float")
w = s.log().rename("w")
expected = list(pd.Series(array).apply(np.log))
assert_op([w], expected)
def test_pct_change():
configs = [
{"periods": 1, "fill_method": None},
{"periods": 1, "fill_method": "pad"},
{"periods": 1, "fill_method": "ffill"},
{"periods": 2, "fill_method": None},
{"periods": 2, "fill_method": "pad"},
{"periods": 2, "fill_method": "ffill"},
]
for array, config in product(arrays, configs):
s = Stream.source(array, dtype="float")
w = s.pct_change(**config).rename("w")
expected = list(pd.Series(array).pct_change(**config))
print(config)
assert_op([w], expected)
def test_diff():
for array in arrays:
s = Stream.source(array, dtype="float")
w = s.diff(periods=1).rename("w")
expected = list(pd.Series(array).diff(periods=1))
assert_op([w], expected)
for array in arrays:
s = Stream.source(array, dtype="float")
w = s.diff(periods=2).rename("w")
expected = list(pd.Series(array).diff(periods=2))
assert_op([w], expected)
|
redis-py/run_test.py | nikicc/anaconda-recipes | 130 | 12660992 | import redis
#r = redis.StrictRedis(host='localhost', port=6379, db=0)
#r.set('foo', 'bar')
#assert r.get('foo') == 'bar'
|
test/tests/bootstrap/test_api20_os_bootstrap_parallel_local.py | arunrordell/RackHD | 451 | 12661014 | '''
Copyright 2017 Dell Inc. or its subsidiaries. All Rights Reserved.
Author(s):
<NAME>
This script tests minimum payload base case of the RackHD API 2.0 OS bootstrap workflows using NFS mount or local repo method.
This routine runs OS bootstrap jobs simultaneously on multiple nodes.
For 12 tests to run, 12 nodes are required in the stack. If there are less than that, tests will be skipped.
This test takes 15-20 minutes to run.
OS bootstrap tests require the following entries in config/install_default.json.
If an entry is missing, then that test will be skipped.
The order of entries determines the priority of the test. First one runs on first available node, etc.
"os-install": [
{
"kvm": false,
"path": "/repo/esxi/5.5",
"version": "5.5",
"workflow": "Graph.InstallESXi"
},
{
"kvm": false,
"path": "/repo/esxi/6.0",
"version": "6.0",
"workflow": "Graph.InstallESXi"
},
{
"kvm": false,
"path": "/repo/centos/6.5",
"version": "6.5",
"workflow": "Graph.InstallCentOS"
},
{
"kvm": false,
"path": "/repo/centos/7.0",
"version": "7.0",
"workflow": "Graph.InstallCentOS"
},
{
"kvm": false,
"path": "/repo/rhel/7.0",
"version": "7.0",
"workflow": "Graph.InstallRHEL"
},
{
"kvm": false,
"path": "/repo/suse/42.1",
"version": "42.1",
"workflow": "Graph.InstallSUSE"
},
{
"kvm": false,
"path": "/repo/ubuntu",
"version": "trusty",
"workflow": "Graph.InstallUbuntu"
},
{
"kvm": false,
"path": "/repo/coreos",
"version": "899.17.0",
"workflow": "Graph.InstallCoreOS"
},
{
"kvm": true,
"path": "/repo/rhel/7.0",
"version": "7.0",
"workflow": "Graph.InstallRHEL"
},
{
"kvm": true,
"path": "/repo/centos/6.5",
"version": "6.5",
"workflow": "Graph.InstallCentOS"
},
{
"kvm": false,
"path": "/repo/winpe",
"productkey": "<KEY>",
"smbPassword": "<PASSWORD>",
"smbRepo": "\\windowsServer2012",
"smbUser": "onrack",
"version": "2012",
"workflow": "Graph.InstallWindowsServer"
}
],
The OS repos are to be installed under 'on-http/static/http' directory reflecting the paths above.
These can be files, links, or nfs mounts to remote repos in the following dirs:
on-http/static/http/windowsServer2012 -- requires Samba share on RackHD server
on-http/static/http/repo/centos/6.5
on-http/static/http/repo/centos/7.0
on-http/static/http/repo/rhel/7.0
on-http/static/http/repo/suse/42.1
on-http/static/http/repo/esxi/5.5
on-http/static/http/repo/esxi/6.0
on-http/static/http/repo/winpe
on-http/static/http/repo/coreos/899.17.0
'''
import fit_path # NOQA: unused import
from nose.plugins.attrib import attr
import fit_common
import flogging
import sys
log = flogging.get_loggers()
# This gets the list of nodes
NODECATALOG = fit_common.node_select()
# dict containing bootstrap workflow IDs and states
NODE_STATUS = {}
# global timer
START_TIME = fit_common.time.time()
# collect repo information from config files
OSLIST = fit_common.fitcfg()["install-config"]["os-install"]
# download RackHD config from host
rackhdresult = fit_common.rackhdapi('/api/2.0/config')
if rackhdresult['status'] != 200:
log.error(" Unable to contact host, exiting. ")
sys.exit(255)
rackhdconfig = rackhdresult['json']
statichost = "http://" + str(rackhdconfig['fileServerAddress']) + ":" + str(rackhdconfig['fileServerPort'])
# this routine polls a workflow task ID for completion
def wait_for_workflow_complete(taskid):
result = None
while fit_common.time.time() - START_TIME < 1800 or result is None: # limit test to 30 minutes
result = fit_common.rackhdapi("/api/2.0/workflows/" + taskid)
if result['status'] != 200:
log.error(" HTTP error: " + result['text'])
return False
if result['json']['status'] == 'running' or result['json']['status'] == 'pending':
log.info_5("{} workflow status: {}".format(result['json']['injectableName'], result['json']['status']))
fit_common.time.sleep(30)
elif result['json']['status'] == 'succeeded':
log.info_5("{} workflow status: {}".format(result['json']['injectableName'], result['json']['status']))
return True
else:
log.error(" Workflow failed: " + result['text'])
return False
log.error(" Workflow Timeout: " + result['text'])
return False
# helper routine to return the task ID associated with the running bootstrap workflow
def node_taskid(workflow, version, kvm):
for entry in NODE_STATUS:
if NODE_STATUS[entry]['workflow'] == workflow \
and str(version) in NODE_STATUS[entry]['version'] \
and NODE_STATUS[entry]['kvm'] == kvm:
return NODE_STATUS[entry]['id']
return ""
# Match up tests to node IDs to feed skip decorators
index = 0 # node index
for item in OSLIST:
if index < len(NODECATALOG):
NODE_STATUS[NODECATALOG[index]] = \
{"workflow": item['workflow'], "version": item['version'], "kvm": item['kvm'], "id": "Pending"}
index += 1
# ------------------------ Tests -------------------------------------
@attr(all=False)
class api20_bootstrap_base(fit_common.unittest.TestCase):
@classmethod
def setUpClass(cls):
# run all OS install workflows first
nodeindex = 0
for item in OSLIST:
# if OS proxy entry exists in RackHD config, run bootstrap against selected node
if nodeindex < len(NODECATALOG):
# delete active workflows for specified node
fit_common.cancel_active_workflows(NODECATALOG[nodeindex])
# base payload common to all Linux
payload_data = {"options": {"defaults": {
"version": item['version'],
"kvm": item['kvm'],
"repo": statichost + item['path'],
"rootPassword": "<PASSWORD>",
"hostname": "rackhdnode",
"users": [{"name": "rackhduser",
"password": "<PASSWORD>!",
"uid": 1010}]}}}
# OS specific payload requirements
if item['workflow'] == "Graph.InstallUbuntu":
payload_data["options"]["defaults"]["baseUrl"] = "install/netboot/ubuntu-installer/amd64"
payload_data["options"]["defaults"]["kargs"] = {"live-installer/net-image": statichost +
item['path'] + "/ubuntu/install/filesystem.squashfs"}
if item['workflow'] == "Graph.InstallWindowsServer":
payload_data["options"]["defaults"]["productkey"] = item['productkey']
payload_data["options"]["defaults"]["smbUser"] = item['smbUser']
payload_data["options"]["defaults"]["smbPassword"] = item['smbPassword']
payload_data["options"]["defaults"]["smbRepo"] = "\\\\" + str(rackhdconfig['apiServerAddress']) + \
item['smbRepo']
payload_data["options"]["defaults"]["username"] = "rackhduser"
payload_data["options"]["defaults"]["password"] = "RackHDRocks!"
payload_data["options"]["defaults"].pop('rootPassword', None)
payload_data["options"]["defaults"].pop('users', None)
payload_data["options"]["defaults"].pop('kvm', None)
payload_data["options"]["defaults"].pop('version', None)
# run workflow
result = fit_common.rackhdapi('/api/2.0/nodes/' +
NODECATALOG[nodeindex] +
'/workflows?name=' + item['workflow'],
action='post', payload=payload_data)
if result['status'] == 201:
# this saves the task and node IDs
NODE_STATUS[NODECATALOG[nodeindex]] = \
{"workflow": item['workflow'],
"version": item['version'],
"kvm": item['kvm'],
"id": result['json']['instanceId']}
log.info_5(" TaskID: " + result['json']['instanceId'])
log.info_5(" Payload: " + fit_common.json.dumps(payload_data))
else:
# if no task ID is returned put 'failed' in ID field
NODE_STATUS[NODECATALOG[nodeindex]] = \
{"workflow": item['workflow'],
"version": item['version'],
"kvm": item['kvm'],
'id': "failed"}
log.error(" OS install " + item['workflow'] + " on node " + NODECATALOG[nodeindex] + " failed! ")
log.error(" Error text: " + result['text'])
log.error(" Payload: " + fit_common.json.dumps(payload_data))
# increment node index to run next bootstrap
nodeindex += 1
@fit_common.unittest.skipUnless(node_taskid("Graph.InstallESXi", "5.", False) != '',
"Skipping ESXi5.5, repo not configured or node unavailable")
def test_api20_bootstrap_esxi5(self):
self.assertTrue(wait_for_workflow_complete(node_taskid("Graph.InstallESXi", "5.", False)), "ESXi5.5 failed.")
@fit_common.unittest.skipUnless(node_taskid("Graph.InstallESXi", "6.", False) != '',
"Skipping ESXi6.0, repo not configured or node unavailable")
def test_api20_bootstrap_esxi6(self):
self.assertTrue(wait_for_workflow_complete(node_taskid("Graph.InstallESXi", "6.", False)), "ESXi6.0 failed.")
@fit_common.unittest.skipUnless(node_taskid("Graph.InstallCentOS", "6.", False) != '',
"Skipping Centos 6.5, repo not configured or node unavailable")
def test_api20_bootstrap_centos6(self):
self.assertTrue(wait_for_workflow_complete(node_taskid("Graph.InstallCentOS", "6.", False)), "Centos 6.5 failed.")
@fit_common.unittest.skipUnless(node_taskid("Graph.InstallCentOS", "6.", True) != '',
"Skipping Centos 6.5 KVM, repo not configured or node unavailable")
def test_api20_bootstrap_centos6_kvm(self):
self.assertTrue(wait_for_workflow_complete(node_taskid("Graph.InstallCentOS", "6.", True)), "Centos 6.5 KVM failed.")
@fit_common.unittest.skipUnless(node_taskid("Graph.InstallCentOS", "7.", False) != '',
"Skipping Centos 7.0, repo not configured or node unavailable")
def test_api20_bootstrap_centos7(self):
self.assertTrue(wait_for_workflow_complete(node_taskid("Graph.InstallCentOS", "7.", False)), "Centos 7.0 failed.")
@fit_common.unittest.skipUnless(node_taskid("Graph.InstallCentOS", "7.", True) != '',
"Skipping Centos 7.0 KVM, repo not configured or node unavailable")
def test_api20_bootstrap_centos7_kvm(self):
self.assertTrue(wait_for_workflow_complete(node_taskid("Graph.InstallCentOS", "7.", True)), "Centos 7.0 KVM failed.")
@fit_common.unittest.skipUnless(node_taskid("Graph.InstallRHEL", "7.", False) != '',
"Skipping Redhat 7.0, repo not configured or node unavailable")
def test_api20_bootstrap_rhel7(self):
self.assertTrue(wait_for_workflow_complete(node_taskid("Graph.InstallRHEL", "7.", False)), "RHEL 7.0 failed.")
@fit_common.unittest.skipUnless(node_taskid("Graph.InstallRHEL", "7.", True) != '',
"Skipping Redhat 7.0 KVM, repo not configured or node unavailable")
def test_api20_bootstrap_rhel7_kvm(self):
self.assertTrue(wait_for_workflow_complete(node_taskid("Graph.InstallRHEL", "7.", True)), "RHEL 7.0 KVM failed.")
@fit_common.unittest.skipUnless(node_taskid("Graph.InstallUbuntu", "trusty", False) != '',
"Skipping Ubuntu 14, repo not configured or node unavailable")
def test_api20_bootstrap_ubuntu14(self):
self.assertTrue(wait_for_workflow_complete(node_taskid("Graph.InstallUbuntu", "trusty", False)), "Ubuntu 14 failed.")
@fit_common.unittest.skipUnless(node_taskid("Graph.InstallCoreOS", "899.", False) != '',
"Skipping CoreOS 899.17.0, repo not configured or node unavailable")
def test_api20_bootstrap_coreos899(self):
self.assertTrue(wait_for_workflow_complete(node_taskid("Graph.InstallCoreOS", "899.", False)), "CoreOS 899.17 failed.")
@fit_common.unittest.skipUnless(node_taskid("Graph.InstallSUSE", "42.", False) != '',
"Skipping SuSe 42, repo not configured or node unavailable")
def test_api20_bootstrap_suse(self):
self.assertTrue(wait_for_workflow_complete(node_taskid("Graph.InstallSUSE", "42.", False)), "SuSe 42 failed.")
@fit_common.unittest.skipUnless(node_taskid("Graph.InstallWindowsServer", "2012", False) != '',
"Skipping Windows 2012, repo not configured or node unavailable")
def test_api20_bootstrap_windows(self):
self.assertTrue(wait_for_workflow_complete(node_taskid("Graph.InstallWindowsServer", "2012", False)), "Win2012 failed.")
if __name__ == '__main__':
fit_common.unittest.main()
|
depthnet-pytorch/prepare_dataset.py | dingyanna/DepthNets | 114 | 12661043 | from util import (get_data_from_id,
read_kpt_file)
import glob
import os
import numpy as np
from skimage.io import (imread,
imsave)
from skimage.transform import resize
root_dir = os.environ['DIR_3DFAW']
def prepare_train():
ids = glob.glob("%s/train_img/*.jpg" % root_dir)
ids = [os.path.basename(id_).replace(".jpg","") for id_ in ids ]
y_keypts, z_keypts = get_keypts_from_ids(ids, "train")
np.savez(file="%s/train" % root_dir,
y_keypts=y_keypts,
z_keypts=z_keypts)
def get_keypts_from_ids(ids, mode):
y_keypts = []
z_keypts = []
x_keypts = []
meta = []
for k, id_ in enumerate(ids):
print("%i / %i" % (k, len(ids)))
_,b,c = get_data_from_id(root=root_dir, mode=mode, id_=id_)
# a is f64, let's make it uint8 to save some space.
#a = (a*256.).astype("uint8")
#imgs.append(a)
y_keypts.append(b.astype("float32"))
z_keypts.append(c.astype("float32"))
#imgs = np.asarray(imgs)
y_keypts = np.asarray(y_keypts)
z_keypts = np.asarray(z_keypts)
return y_keypts, z_keypts
def prepare_valid():
ids = []
with open("%s/list_valid_test.txt" % root_dir) as f:
for line in f:
line = line.rstrip().split(",")
if line[1] == "valid":
ids.append(line[0])
y_keypts, z_keypts = get_keypts_from_ids(ids, "valid")
np.savez(file="%s/valid" % root_dir,
y_keypts=y_keypts,
z_keypts=z_keypts,
ids=ids)
def prepare_test():
ids = []
orientations = []
with open("%s/list_valid_test.txt" % root_dir) as f:
for line in f:
line = line.rstrip().split(",")
if line[1] == "test":
ids.append(line[0])
orientations.append(line[2])
y_keypts, z_keypts = get_keypts_from_ids(ids, "valid") # yes, valid
np.savez(file="%s/test" % root_dir,
y_keypts=y_keypts,
z_keypts=z_keypts,
ids=ids,
orientations=orientations)
def prepare_valid_imgs_downsized():
ids = glob.glob("%s/valid_img/*.jpg" % root_dir)
ids = [os.path.basename(id_).replace(".jpg","") for id_ in ids]
output_folder = "%s/valid_img_cropped_80x80" % root_dir
if not os.path.exists(output_folder):
os.makedirs(output_folder)
for id_ in ids:
kpts = read_kpt_file("%s/valid_lm/%s_lm.csv" % (root_dir, id_))
img = imread("%s/valid_img/%s.jpg" % (root_dir, id_))
img = img[ int(np.min(kpts[:,1])):int(np.max(kpts[:,1])),
int(np.min(kpts[:,0])):int(np.max(kpts[:,0]))]
img = resize(img, (80, 80))
imsave(arr=img, fname="%s/%s.jpg" % (output_folder, id_))
if __name__ == '__main__':
prepare_train()
prepare_valid()
prepare_test()
prepare_valid_imgs_downsized()
|
datadog_checks_base/tests/test_log.py | vbarbaresi/integrations-core | 663 | 12661055 | # -*- coding: utf-8 -*-
# (C) Datadog, Inc. 2018-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
import logging
import warnings
import mock
from datadog_checks import log
from datadog_checks.base import AgentCheck
from datadog_checks.base.log import DEFAULT_FALLBACK_LOGGER, get_check_logger, init_logging
def test_get_py_loglevel():
# default value for invalid input
assert log._get_py_loglevel(None) == logging.INFO
# default value for valid unicode input encoding into an invalid key
assert log._get_py_loglevel(u'dèbùg') == logging.INFO
# check unicode works
assert log._get_py_loglevel(u'crit') == logging.CRITICAL
# check string works
assert log._get_py_loglevel('crit') == logging.CRITICAL
def test_logging_capture_warnings():
with mock.patch('logging.Logger.warning') as log_warning:
warnings.warn("hello-world")
log_warning.assert_not_called() # warnings are NOT yet captured
init_logging() # from here warnings are captured as logs
warnings.warn("hello-world")
assert log_warning.call_count == 1
msg = log_warning.mock_calls[0].args[1]
assert "hello-world" in msg
def test_get_check_logger(caplog):
class FooConfig(object):
def __init__(self):
self.log = get_check_logger()
def do_something(self):
self.log.warning("This is a warning")
class MyCheck(AgentCheck):
def __init__(self, *args, **kwargs):
super(MyCheck, self).__init__(*args, **kwargs)
self._config = FooConfig()
def check(self, _):
self._config.do_something()
check = MyCheck()
check.check({})
assert check.log is check._config.log
assert "This is a warning" in caplog.text
def test_get_check_logger_fallback(caplog):
log = get_check_logger()
log.warning("This is a warning")
assert log is DEFAULT_FALLBACK_LOGGER
assert "This is a warning" in caplog.text
def test_get_check_logger_argument_fallback(caplog):
logger = logging.getLogger()
log = get_check_logger(default_logger=logger)
log.warning("This is a warning")
assert log is logger
assert "This is a warning" in caplog.text
|
2020_09_23/dojo_test.py | devppjr/dojo | 114 | 12661070 | import unittest
from dojo import remove_word, main
class DojoTest(unittest.TestCase):
def test_remove_word_1(self):
self.assertEqual(remove_word("bananauva", "banana"), "uva")
def test_remove_word_2(self):
self.assertEqual(remove_word("catdog", "dog"), "catdog")
def test_remove_word_3(self):
self.assertEqual(remove_word("pão", "pão"), "")
def test_main_1(self):
words = [
"leet",
"code"
]
self.assertEqual(main("leetcode", words), True)
def test_main_2(self):
words = [
"leet",
"code",
"apple",
]
self.assertEqual(main("leetcodeapple", words), True)
if __name__ == '__main__':
unittest.main()
# Sami - Elen - Allan - Tiago - Mateus - Juan
# s = "leetcode", wordDict = ["leet", "code"]
# FncOne(s,oneWord)
# "bananaaçaimaça" = ["banana", "açai", "maça"]
# "maçabanana"
# {
# banana
# acai
# maca
# }
# bananaaçaimaça
# naaçaimaça { - - - } , açaimaça { - - - }
# solbabanana / [uva,sol, solba, banana]
#
# sol_babanana
|
src/segmentation.py | Mohamed-S-Helal/Arabic-OCR | 117 | 12661074 | <filename>src/segmentation.py<gh_stars>100-1000
import numpy as np
import cv2 as cv
from preprocessing import binary_otsus, deskew
from utilities import projection, save_image
from glob import glob
def preprocess(image):
# Maybe we end up using only gray level image.
gray_img = cv.cvtColor(image, cv.COLOR_BGR2GRAY)
gray_img = cv.bitwise_not(gray_img)
binary_img = binary_otsus(gray_img, 0)
# cv.imwrite('origin.png', gray_img)
# deskewed_img = deskew(binary_img)
deskewed_img = deskew(binary_img)
# cv.imwrite('output.png', deskewed_img)
# binary_img = binary_otsus(deskewed_img, 0)
# breakpoint()
# Visualize
# breakpoint()
return deskewed_img
def projection_segmentation(clean_img, axis, cut=3):
segments = []
start = -1
cnt = 0
projection_bins = projection(clean_img, axis)
for idx, projection_bin in enumerate(projection_bins):
if projection_bin != 0:
cnt = 0
if projection_bin != 0 and start == -1:
start = idx
if projection_bin == 0 and start != -1:
cnt += 1
if cnt >= cut:
if axis == 'horizontal':
segments.append(clean_img[max(start-1, 0):idx, :])
elif axis == 'vertical':
segments.append(clean_img[:, max(start-1, 0):idx])
cnt = 0
start = -1
return segments
# Line Segmentation
#----------------------------------------------------------------------------------------
def line_horizontal_projection(image, cut=3):
# Preprocess input image
clean_img = preprocess(image)
# Segmentation
lines = projection_segmentation(clean_img, axis='horizontal', cut=cut)
return lines
# Word Segmentation
#----------------------------------------------------------------------------------------
def word_vertical_projection(line_image, cut=3):
line_words = projection_segmentation(line_image, axis='vertical', cut=cut)
line_words.reverse()
return line_words
def extract_words(img, visual=0):
lines = line_horizontal_projection(img)
words = []
for idx, line in enumerate(lines):
if visual:
save_image(line, 'lines', f'line{idx}')
line_words = word_vertical_projection(line)
for w in line_words:
# if len(words) == 585:
# print(idx)
words.append((w, line))
# words.extend(line_words)
# breakpoint()
if visual:
for idx, word in enumerate(words):
save_image(word[0], 'words', f'word{idx}')
return words
if __name__ == "__main__":
img = cv.imread('../Dataset/scanned/capr196.png')
extract_words(img, 1) |
tests/__init__.py | sobolevn/py-enumerable | 144 | 12661150 | _empty = []
_simple = [1, 2, 3]
_complex = [{"value": 1}, {"value": 2}, {"value": 3}]
_locations = [
("Scotland", "Edinburgh", "Branch1", 20000),
("Scotland", "Glasgow", "Branch1", 12500),
("Scotland", "Glasgow", "Branch2", 12000),
("Wales", "Cardiff", "Branch1", 29700),
("Wales", "Cardiff", "Branch2", 30000),
("Wales", "Bangor", "Branch1", 12800),
("England", "London", "Branch1", 90000),
("England", "London", "Branch2", 80000),
("England", "London", "Branch3", 70000),
("England", "Manchester", "Branch1", 45600),
("England", "Manchester", "Branch2", 50000),
("England", "Liverpool", "Branch1", 29700),
("England", "Liverpool", "Branch2", 25000),
]
|
app/iclass/views/dashboard.py | edisonlz/fastor | 285 | 12661151 | # coding=utf-8
import json
from django.contrib import messages
from django.shortcuts import render, get_object_or_404
from wi_model_util.imodel import get_object_or_none
from django.contrib.auth.decorators import login_required
from django.core.urlresolvers import reverse
from django.http import HttpResponse, HttpResponseRedirect
from app.iclass.utils import redefine_item_pos
from app.iclass.models import *
from base.core.dateutils import *
import datetime
import random
@login_required
def dashboard_index(request):
"""
这里展示系统的应用统计数据
"""
qd = request.GET
current_user = request.user
now = datetime.datetime.now()
start_time = qd.get("start_time", days_ago(30).strftime("%Y-%m-%d %H:%M"))
end_time = qd.get("end_time", zero_date().strftime("%Y-%m-%d %H:%M"))
if type(start_time) == str or type(start_time) == unicode:
start_time = datetime.datetime.strptime(start_time,'%Y-%m-%d %H:%M')
if type(end_time) == str or type(end_time) == unicode:
end_time = datetime.datetime.strptime(end_time,'%Y-%m-%d %H:%M')
_start_time = datetime_to_timestamp(start_time)
_end_time = datetime_to_timestamp(end_time)
context = {
"course": {
"course_count":random.randint(10,100),
"course_guake_count":random.randint(10,100),
},
"subject":{
"subject_count":random.randint(10,100),
"mock_page_count":random.randint(10,100)
},
"paper":{
"paper_count":random.randint(10,100),
},
"question":{
"question_count":random.randint(10,100),
},
"app":'fastor',
}
context['user_incr_datas'] = ((u'11-23', 238L), (u'11-24', 747L), (u'11-25', 632L), (u'11-26', 470L), (u'11-27', 408L), (u'11-28', 408L), (u'11-29', 318L), (u'11-30', 248L), (u'12-01', 269L), (u'12-02', 358L), (u'12-03', 401L), (u'12-04', 343L), (u'12-05', 422L), (u'12-06', 299L), (u'12-07', 236L), (u'12-08', 317L), (u'12-09', 436L), (u'12-10', 484L), (u'12-11', 351L), (u'12-12', 287L), (u'12-13', 279L), (u'12-14', 301L), (u'12-15', 266L), (u'12-16', 336L), (u'12-17', 374L), (u'12-18', 404L), (u'12-19', 357L), (u'12-20', 279L), (u'12-21', 218L), (u'12-22', 264L))
context['user_incr_success_datas'] = ((u'11-23', 238L), (u'11-24', 747L), (u'11-25', 632L), (u'11-26', 470L), (u'11-27', 408L), (u'11-28', 408L), (u'11-29', 318L), (u'11-30', 248L), (u'12-01', 269L), (u'12-02', 357L), (u'12-03', 401L), (u'12-04', 343L), (u'12-05', 422L), (u'12-06', 299L), (u'12-07', 235L), (u'12-08', 317L), (u'12-09', 436L), (u'12-10', 484L), (u'12-11', 351L), (u'12-12', 287L), (u'12-13', 279L), (u'12-14', 301L), (u'12-15', 266L), (u'12-16', 336L), (u'12-17', 374L), (u'12-18', 404L), (u'12-19', 357L), (u'12-20', 279L), (u'12-21', 218L), (u'12-22', 264L))
context["sql_area_count"] = ((u'\u5e7f\u4e1c\u7701', 387L), (u'\u5317\u4eac', 376L), (u'\u6c5f\u82cf\u7701', 316L), (u'\u9ed1\u9f99\u6c5f\u7701', 310L), (u'\u5e7f\u4e1c', 300L), (u'\u6d59\u6c5f', 282L))
context["order_time_datas"] = ((u'00', 35L), (u'01', 10L), (u'02', 8L), (u'05', 2L), (u'06', 8L), (u'07', 18L), (u'08', 47L), (u'09', 35L), (u'10', 108L), (u'11', 65L), (u'12', 61L), (u'13', 50L), (u'14', 54L), (u'15', 65L), (u'16', 39L), (u'17', 43L), (u'18', 20L), (u'19', 43L), (u'20', 48L), (u'21', 77L), (u'22', 34L), (u'23', 34L))
context["start_time"] = start_time
context["end_time"] = end_time
context["now"] = now.strftime("%Y-%m-%d")
context["device_data"] = ((u'iPhon', 78425L), (u'phone', 69710L), (u'HUAWE', 30187L), (u'Xiaom', 17106L), (u'OPPO-', 16214L), (u'vivo-', 16134L), (u'iPad1', 13548L), (u'Meizu', 4509L), (u'samsu', 3361L), (u'OnePl', 1110L))
return render(request, 'cms_index/basecontent.html', context)
|
ChasingTrainFramework_GeneralOneClassDetection/data_iterator_base/data_batch.py | CNN-NISER/lffd-pytorch | 220 | 12661169 | <filename>ChasingTrainFramework_GeneralOneClassDetection/data_iterator_base/data_batch.py
# coding: utf-8
class DataBatch:
def __init__(self, torch_module):
self._data = []
self._label = []
self.torch_module = torch_module
def append_data(self, new_data):
self._data.append(self.__as_tensor(new_data))
def append_label(self, new_label):
self._label.append(self.__as_tensor(new_label))
def __as_tensor(self, in_data):
return self.torch_module.from_numpy(in_data)
@property
def data(self):
return self._data
@property
def label(self):
return self._label
|
src/debugging/XcodeExample/PythonSubclassList/PythonSubclassList/test_sclist.py | joelwhitehouse/PythonExtensionPatterns | 171 | 12661173 | <gh_stars>100-1000
""" Usage:
python3 setup.py build
Created on Apr 19, 2016
@author: paulross
"""
import ScList
def test():
s = ScList.ScList()
s.append(8)
print(s.appends)
print(s)
|
custom_components/garbage_collection/__init__.py | Nag94/HomeAssistantConfig | 271 | 12661213 | <reponame>Nag94/HomeAssistantConfig
"""Component to integrate with garbage_colection."""
import logging
from datetime import timedelta
import homeassistant.helpers.config_validation as cv
import homeassistant.util.dt as dt_util
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.const import CONF_ENTITY_ID, CONF_NAME
from homeassistant.helpers import discovery
from .const import (
ATTR_LAST_COLLECTION,
CONF_FREQUENCY,
CONF_SENSORS,
DOMAIN,
SENSOR_PLATFORM,
configuration,
)
MIN_TIME_BETWEEN_UPDATES = timedelta(seconds=30)
_LOGGER = logging.getLogger(__name__)
config_definition = configuration()
SENSOR_SCHEMA = vol.Schema(config_definition.compile_schema())
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{vol.Optional(CONF_SENSORS): vol.All(cv.ensure_list, [SENSOR_SCHEMA])}
)
},
extra=vol.ALLOW_EXTRA,
)
COLLECT_NOW_SCHEMA = vol.Schema(
{
vol.Required(CONF_ENTITY_ID): cv.string,
vol.Optional(ATTR_LAST_COLLECTION): cv.datetime,
}
)
async def async_setup(hass, config):
"""Set up this component using YAML."""
def handle_collect_garbage(call):
"""Handle the service call."""
entity_id = call.data.get(CONF_ENTITY_ID)
last_collection = call.data.get(ATTR_LAST_COLLECTION)
_LOGGER.debug("called collect_garbage for %s", entity_id)
try:
entity = hass.data[DOMAIN][SENSOR_PLATFORM][entity_id]
if last_collection is None:
entity.last_collection = dt_util.now()
else:
entity.last_collection = dt_util.as_local(last_collection)
except Exception as err:
_LOGGER.error("Failed setting last collection for %s - %s", entity_id, err)
hass.services.call("homeassistant", "update_entity", {"entity_id": entity_id})
if DOMAIN not in hass.services.async_services():
hass.services.async_register(
DOMAIN, "collect_garbage", handle_collect_garbage, schema=COLLECT_NOW_SCHEMA
)
else:
_LOGGER.debug("Service already registered")
if config.get(DOMAIN) is None:
# We get here if the integration is set up using config flow
return True
platform_config = config[DOMAIN].get(CONF_SENSORS, {})
# If platform is not enabled, skip.
if not platform_config:
return False
for entry in platform_config:
_LOGGER.debug(
"Setting %s(%s) from YAML configuration",
entry[CONF_NAME],
entry[CONF_FREQUENCY],
)
# If entry is not enabled, skip.
# if not entry[CONF_ENABLED]:
# continue
hass.async_create_task(
discovery.async_load_platform(hass, SENSOR_PLATFORM, DOMAIN, entry, config)
)
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_IMPORT}, data={}
)
)
return True
async def async_setup_entry(hass, config_entry):
"""Set up this integration using UI."""
if config_entry.source == config_entries.SOURCE_IMPORT:
# We get here if the integration is set up using YAML
hass.async_create_task(hass.config_entries.async_remove(config_entry.entry_id))
return False
_LOGGER.debug(
"Setting %s (%s) from ConfigFlow",
config_entry.title,
config_entry.data[CONF_FREQUENCY],
)
# Backward compatibility - clean-up (can be removed later?)
config_entry.options = {}
config_entry.add_update_listener(update_listener)
# Add sensor
hass.async_add_job(
hass.config_entries.async_forward_entry_setup(config_entry, SENSOR_PLATFORM)
)
return True
async def async_remove_entry(hass, config_entry):
"""Handle removal of an entry."""
try:
await hass.config_entries.async_forward_entry_unload(
config_entry, SENSOR_PLATFORM
)
_LOGGER.info(
"Successfully removed sensor from the garbage_collection integration"
)
except ValueError:
pass
async def update_listener(hass, entry):
"""Update listener."""
# The OptionsFlow saves data to options.
# Move them back to data and clean options (dirty, but not sure how else to do that)
if len(entry.options) > 0:
entry.data = entry.options
entry.options = {}
await hass.config_entries.async_forward_entry_unload(entry, SENSOR_PLATFORM)
hass.async_add_job(
hass.config_entries.async_forward_entry_setup(entry, SENSOR_PLATFORM)
)
|
server/realms/migrations/0001_initial.py | arubdesu/zentral | 634 | 12661240 | <reponame>arubdesu/zentral<gh_stars>100-1000
# Generated by Django 2.2.9 on 2020-02-26 14:33
import django.contrib.postgres.fields.jsonb
from django.db import migrations, models
import django.db.models.deletion
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Realm',
fields=[
('uuid', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('name', models.CharField(max_length=255)),
('enabled_for_login', models.BooleanField(default=False)),
('backend', models.CharField(editable=False, max_length=255)),
('config', django.contrib.postgres.fields.jsonb.JSONField(default=dict, editable=False)),
('username_claim', models.CharField(max_length=255)),
('email_claim', models.CharField(blank=True, max_length=255)),
('first_name_claim', models.CharField(blank=True, max_length=255)),
('last_name_claim', models.CharField(blank=True, max_length=255)),
('full_name_claim', models.CharField(blank=True, max_length=255)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
],
),
migrations.CreateModel(
name='RealmUser',
fields=[
('uuid', models.UUIDField(default=uuid.uuid4, primary_key=True, serialize=False)),
('claims', django.contrib.postgres.fields.jsonb.JSONField(default=dict)),
('username', models.CharField(max_length=255)),
('email', models.EmailField(blank=True, max_length=254)),
('first_name', models.CharField(blank=True, max_length=255)),
('last_name', models.CharField(blank=True, max_length=255)),
('full_name', models.CharField(blank=True, max_length=255)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('realm', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='realms.Realm')),
],
options={
'unique_together': {('realm', 'username')},
},
),
migrations.CreateModel(
name='RealmAuthenticationSession',
fields=[
('uuid', models.UUIDField(default=uuid.uuid4, primary_key=True, serialize=False)),
('callback', models.CharField(max_length=255)),
('callback_kwargs', django.contrib.postgres.fields.jsonb.JSONField(default=dict)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('realm', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='realms.Realm')),
('user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.PROTECT, to='realms.RealmUser')),
],
),
]
|
Configuration/Eras/python/Modifier_tracker_apv_vfp30_2016_cff.py | ckamtsikis/cmssw | 852 | 12661246 | import FWCore.ParameterSet.Config as cms
tracker_apv_vfp30_2016 = cms.Modifier()
|
test/slicing/test_monitor.py | melonwater211/snorkel | 2,906 | 12661264 | <reponame>melonwater211/snorkel<gh_stars>1000+
import unittest
import pandas as pd
from snorkel.slicing import slicing_function
from snorkel.slicing.monitor import slice_dataframe
DATA = [5, 10, 19, 22, 25]
@slicing_function()
def sf(x):
return x.num < 20
class PandasSlicerTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.df = pd.DataFrame(dict(num=DATA))
def test_slice(self):
self.assertEqual(len(self.df), 5)
# Should return a subset
sliced_df = slice_dataframe(self.df, sf)
self.assertEqual(len(sliced_df), 3)
|
test/cpp/api/init_baseline.py | Hacky-DH/pytorch | 60,067 | 12661292 | """Script to generate baseline values from PyTorch initialization algorithms"""
import sys
import torch
HEADER = """
#include <torch/types.h>
#include <vector>
namespace expected_parameters {
"""
FOOTER = "} // namespace expected_parameters"
PARAMETERS = "inline std::vector<std::vector<torch::Tensor>> {}() {{"
INITIALIZERS = {
"Xavier_Uniform": lambda w: torch.nn.init.xavier_uniform(w),
"Xavier_Normal": lambda w: torch.nn.init.xavier_normal(w),
"Kaiming_Normal": lambda w: torch.nn.init.kaiming_normal(w),
"Kaiming_Uniform": lambda w: torch.nn.init.kaiming_uniform(w)
}
def emit(initializer_parameter_map):
# Don't write generated with an @ in front, else this file is recognized as generated.
print("// @{} from {}".format('generated', __file__))
print(HEADER)
for initializer_name, weights in initializer_parameter_map.items():
print(PARAMETERS.format(initializer_name))
print(" return {")
for sample in weights:
print(" {")
for parameter in sample:
parameter_values = "{{{}}}".format(", ".join(map(str, parameter)))
print(" torch::tensor({}),".format(parameter_values))
print(" },")
print(" };")
print("}\n")
print(FOOTER)
def run(initializer):
torch.manual_seed(0)
layer1 = torch.nn.Linear(7, 15)
INITIALIZERS[initializer](layer1.weight)
layer2 = torch.nn.Linear(15, 15)
INITIALIZERS[initializer](layer2.weight)
layer3 = torch.nn.Linear(15, 2)
INITIALIZERS[initializer](layer3.weight)
weight1 = layer1.weight.data.numpy()
weight2 = layer2.weight.data.numpy()
weight3 = layer3.weight.data.numpy()
return [weight1, weight2, weight3]
def main():
initializer_parameter_map = {}
for initializer in INITIALIZERS.keys():
sys.stderr.write('Evaluating {} ...\n'.format(initializer))
initializer_parameter_map[initializer] = run(initializer)
emit(initializer_parameter_map)
if __name__ == "__main__":
main()
|
src/ResNeXt/concateFeature.py | willyspinner/High-Performance-Face-Recognition | 300 | 12661340 | import scipy.io as sio
import pickle
import numpy as np
import os
import numpy as np
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
from scipy import spatial
from sklearn.externals import joblib
import time
reducedDim = 2048
pca = PCA(n_components = reducedDim, whiten = True)
path = "/media/zhaojian/6TB/data/extra_general_model_feature/"
with open(path + "NovelSet_List/NovelSet_1.txt", 'r') as f:
lines = f.readlines()
vggFeatures = np.loadtxt(path + 'NovelSet_Fea/VGG_NOVELSET_1.txt')
print "vggFeatures.shape: ", vggFeatures.shape
inputFeaturePath = "extracted_feature/NovelSet_1IdentityFeature/"
outputFeaturePath = "extracted_feature/NovelSet_1IdentityFeaturePCA2048/"
features = []
labelList = []
for index in range(len(lines)):
print index
line = lines[index]
ID = line.split("/")[-2]
print ID
labelList.append(ID)
vggFeature = feature = vggFeatures[index].flatten()
print "vggFeature.shape", vggFeature.shape
# caffeFeature = sio.loadmat(inputFeaturePath + ID + ".mat")["identityFeature"].flatten()
# print "caffeFeature.shape", caffeFeature.shape
#
# identityFeature = np.concatenate((caffeFeature, vggFeature), axis = 0)
# print "identityFeature.shape: ", identityFeature.shape
identityFeature = vggFeature
features.append(identityFeature)
features = np.asarray(features)
print "features..shape: ", features.shape
# sio.savemat("concatenateFeatures", {"identityFeature": features})
# sio.savemat("vggNovelSet_1_Features", {"identityFeature": features})
features = sio.loadmat("vggNovelSet_1_Features")['identityFeature']
#
# features = pca.fit_transform(features)
#
print "features..shape: ", features.shape
#
#
for index in range(len(features)):
identityFeature = features[index]
print "identityFeature.shape: ", identityFeature.shape
label = labelList[index]
# print index
# print label
sio.savemat(outputFeaturePath + label, {"identityFeature": identityFeature})
|
recipes/Python/426543_oneliner_Multichop_data/recipe-426543.py | tdiprima/code | 2,023 | 12661377 | <reponame>tdiprima/code
>>> a,bite = "supercalifragalisticexpialidocious",3
>>> [(a[d:d+bite]) for d in range(len(a)-bite) if d%bite==0]
[('s', 'u', 'p'), ('e', 'r', 'c'), ('a', 'l', 'i'), ('f', 'r', 'a'), ('g', 'a', 'l'), ('i', 's', 't'), ('i', 'c', 'e'), ('x', 'p', 'i'), ('a', 'l', 'i'), ('d', 'o', 'c'), ('i', 'o', 'u')]
>>> # or on a list
>>> b =['sup', 'erc', 'ali', 'fra', 'gal', 'ist', 'ice', 'xpi', 'ali', 'doc', 'iou']
>>>
>>> [(b[d:d+bite]) for d in range(len(b)-bite) if d%bite==0]
[['sup', 'erc', 'ali'], ['fra', 'gal', 'ist'], ['ice', 'xpi', 'ali']]
|
aat/__main__.py | mthomascarcamo/aat | 305 | 12661380 | from .config import parseConfig
from .engine import TradingEngine
def main() -> None:
# Parse the command line config
config = parseConfig()
# Instantiate trading engine
#
# The engine is responsible for managing the different components,
# including the strategies, the bank/risk engine, and the
# exchange/backtest engine.
engine = TradingEngine(**config)
# Run the live trading engine
engine.start()
if __name__ == "__main__":
main()
|
test/cli/conftest.py | gluhar2006/schemathesis | 659 | 12661403 | import pytest
@pytest.fixture(params=["real", "wsgi"])
def app_type(request):
return request.param
@pytest.fixture
def cli_args(request, openapi_version, app_type):
if app_type == "real":
schema_url = request.getfixturevalue("schema_url")
args = (schema_url,)
else:
app_path = request.getfixturevalue("loadable_flask_app")
args = (f"--app={app_path}", "/schema.yaml")
return args
|
python/eggroll/core/io/io_utils.py | liszekei/eggroll | 209 | 12661415 | <reponame>liszekei/eggroll
# Copyright (c) 2019 - now, Eggroll Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from eggroll.core.meta_model import ErPartition
def get_db_path(partition: ErPartition):
store_locator = partition._store_locator
db_path_prefix = '/tmp/eggroll/'
return db_path_prefix + "/".join(
[store_locator._store_type, store_locator._namespace, store_locator._name,
str(partition._id)]) |
use-cases/customer_churn/dataflow.py | jerrypeng7773/amazon-sagemaker-examples | 2,610 | 12661433 | import pandas as pd
## convert to time
df["date"] = pd.to_datetime(df["ts"], unit="ms")
df["ts_year"] = df["date"].dt.year
df["ts_month"] = df["date"].dt.month
df["ts_week"] = df["date"].dt.week
df["ts_day"] = df["date"].dt.day
df["ts_dow"] = df["date"].dt.weekday
df["ts_hour"] = df["date"].dt.hour
df["ts_date_day"] = df["date"].dt.date
df["ts_is_weekday"] = [1 if x in [0, 1, 2, 3, 4] else 0 for x in df["ts_dow"]]
df["registration_ts"] = pd.to_datetime(df["registration"], unit="ms").dt.date
##create label
df["churned_event"] = [1 if x == "Cancellation Confirmation" else 0 for x in df["page"]]
df["user_churned"] = df.groupby("userId")["churned_event"].transform("max")
## convert categorical page features
events_list = [
"NextSong",
"Thumbs Down",
"Thumbs Up",
"Add to Playlist",
"Roll Advert",
"Add Friend",
"Downgrade",
"Upgrade",
"Error",
]
usage_column_name = []
for event in events_list:
event_name = "_".join(event.split()).lower()
usage_column_name.append(event_name)
df[event_name] = [1 if x == event else 0 for x in df["page"]]
## feature engineering
base_df = (
df.groupby(["userId", "ts_date_day", "ts_is_weekday"])
.agg({"page": "count"})
.groupby(["userId", "ts_is_weekday"])["page"]
.mean()
.unstack(fill_value=0)
.reset_index()
.rename(columns={0: "average_events_weekend", 1: "average_events_weekday"})
)
# num_ads_7d, num_songs_played_7d, num_songs_played_30d, num_songs_played_90d
base_df_daily = (
df.groupby(["userId", "ts_date_day"])
.agg({"page": "count", "nextsong": "sum", "roll_advert": "sum", "error": "sum"})
.reset_index()
)
feature34 = (
base_df_daily.groupby(["userId", "ts_date_day"])
.tail(7)
.groupby(["userId"])
.agg({"nextsong": "sum", "roll_advert": "sum", "error": "sum"})
.reset_index()
.rename(
columns={
"nextsong": "num_songs_played_7d",
"roll_advert": "num_ads_7d",
"error": "num_error_7d",
}
)
)
feature5 = (
base_df_daily.groupby(["userId", "ts_date_day"])
.tail(30)
.groupby(["userId"])
.agg({"nextsong": "sum"})
.reset_index()
.rename(columns={"nextsong": "num_songs_played_30d"})
)
feature6 = (
base_df_daily.groupby(["userId", "ts_date_day"])
.tail(90)
.groupby(["userId"])
.agg({"nextsong": "sum"})
.reset_index()
.rename(columns={"nextsong": "num_songs_played_90d"})
)
# num_artists, num_songs, num_ads, num_thumbsup, num_thumbsdown, num_playlist, num_addfriend, num_error, user_downgrade,
# user_upgrade, percentage_ad, days_since_active
base_df_user = (
df.groupby(["userId"])
.agg(
{
"page": "count",
"nextsong": "sum",
"artist": "nunique",
"song": "nunique",
"thumbs_down": "sum",
"thumbs_up": "sum",
"add_to_playlist": "sum",
"roll_advert": "sum",
"add_friend": "sum",
"downgrade": "max",
"upgrade": "max",
"error": "sum",
"ts_date_day": "max",
"registration_ts": "min",
"user_churned": "max",
}
)
.reset_index()
)
base_df_user["percentage_ad"] = base_df_user["roll_advert"] / base_df_user["page"]
base_df_user["days_since_active"] = (
base_df_user["ts_date_day"] - base_df_user["registration_ts"]
).dt.days
# repeats ratio
base_df_user["repeats_ratio"] = 1 - base_df_user["song"] / base_df_user["nextsong"]
# num_sessions, avg_time_per_session, avg_events_per_session,
base_df_session = (
df.groupby(["userId", "sessionId"])
.agg({"length": "sum", "page": "count", "date": "min"})
.reset_index()
)
base_df_session["prev_session_ts"] = base_df_session.groupby(["userId"])["date"].shift(1)
base_df_session["gap_session"] = (
base_df_session["date"] - base_df_session["prev_session_ts"]
).dt.days
user_sessions = (
base_df_session.groupby("userId")
.agg({"sessionId": "count", "length": "mean", "page": "mean", "gap_session": "mean"})
.reset_index()
.rename(
columns={
"sessionId": "num_sessions",
"length": "avg_time_per_session",
"page": "avg_events_per_session",
"gap_session": "avg_gap_between_session",
}
)
)
# merge features together
base_df["userId"] = base_df["userId"].astype("int")
final_feature_df = base_df.merge(feature34, how="left", on="userId")
final_feature_df = final_feature_df.merge(feature5, how="left", on="userId")
final_feature_df = final_feature_df.merge(feature6, how="left", on="userId")
final_feature_df = final_feature_df.merge(user_sessions, how="left", on="userId")
df = final_feature_df.merge(base_df_user, how="left", on="userId")
df = df.fillna(0)
df.columns = [
"userId",
"average_events_weekend",
"average_events_weekday",
"num_songs_played_7d",
"num_ads_7d",
"num_error_7d",
"num_songs_played_30d",
"num_songs_played_90d",
"num_sessions",
"avg_time_per_session",
"avg_events_per_session",
"avg_gap_between_session",
"num_events",
"num_songs",
"num_artists",
"num_unique_songs",
"num_thumbs_down",
"num_thumbs_up",
"num_add_to_playlist",
"num_ads",
"num_add_friend",
"num_downgrade",
"num_upgrade",
"num_error",
"ts_date_day",
"registration_ts",
"user_churned",
"percentage_ad",
"days_since_active",
"repeats_ratio",
]
df = df[
[
"userId",
"user_churned",
"average_events_weekend",
"average_events_weekday",
"num_songs_played_7d",
"num_ads_7d",
"num_error_7d",
"num_songs_played_30d",
"num_songs_played_90d",
"num_sessions",
"avg_time_per_session",
"avg_events_per_session",
"avg_gap_between_session",
"num_events",
"num_songs",
"num_artists",
"num_thumbs_down",
"num_thumbs_up",
"num_add_to_playlist",
"num_ads",
"num_add_friend",
"num_downgrade",
"num_upgrade",
"num_error",
"percentage_ad",
"days_since_active",
"repeats_ratio",
]
]
|
warp/stubs.py | NVIDIA/warp | 306 | 12661453 | # Autogenerated file, do not edit, this file provides stubs for builtins autocomplete in VSCode, PyCharm, etc
from typing import Any
from typing import Tuple
from typing import Callable
from typing import overload
from warp.types import array, array2d, array3d, array4d, constant
from warp.types import int8, uint8, int16, uint16, int32, uint32, int64, uint64, float32, float64
from warp.types import vec2, vec3, vec4, mat22, mat33, mat44, quat, transform, spatial_vector, spatial_matrix
from warp.types import mesh_query_aabb_t, hash_grid_query_t
@overload
def min(x: int32, y: int32) -> int:
"""
Return the minimum of two integers.
"""
...
@overload
def min(x: float32, y: float32) -> float:
"""
Return the minimum of two floats.
"""
...
@overload
def max(x: int32, y: int32) -> int:
"""
Return the maximum of two integers.
"""
...
@overload
def max(x: float32, y: float32) -> float:
"""
Return the maximum of two floats.
"""
...
@overload
def clamp(x: int32, a: int32, b: int32) -> int:
"""
Clamp the value of x to the range [a, b].
"""
...
@overload
def clamp(x: float32, a: float32, b: float32) -> float:
"""
Clamp the value of x to the range [a, b].
"""
...
@overload
def abs(x: int32) -> int:
"""
Return the absolute value of x.
"""
...
@overload
def abs(x: float32) -> float:
"""
Return the absolute value of x.
"""
...
@overload
def sign(x: int32) -> int:
"""
Return -1 if x < 0, return 1 otherwise.
"""
...
@overload
def sign(x: float32) -> float:
"""
Return -1.0 if x < 0.0, return 1.0 otherwise.
"""
...
@overload
def step(x: float32) -> float:
"""
Return 1.0 if x < 0.0, return 0.0 otherwise.
"""
...
@overload
def nonzero(x: float32) -> float:
"""
Return 1.0 if x is not equal to zero, return 0.0 otherwise.
"""
...
@overload
def sin(x: float32) -> float:
"""
Return the sine of x in radians.
"""
...
@overload
def cos(x: float32) -> float:
"""
Return the cosine of x in radians.
"""
...
@overload
def acos(x: float32) -> float:
"""
Return arccos of x in radians. Inputs are automatically clamped to [-1.0, 1.0].
"""
...
@overload
def asin(x: float32) -> float:
"""
Return arcsin of x in radians. Inputs are automatically clamped to [-1.0, 1.0].
"""
...
@overload
def sqrt(x: float32) -> float:
"""
Return the sqrt of x, where x is positive.
"""
...
@overload
def tan(x: float32) -> float:
"""
Return tangent of x in radians.
"""
...
@overload
def atan(x: float32) -> float:
"""
Return arctan of x.
"""
...
@overload
def atan2(y: float32, x: float32) -> float:
"""
Return atan2 of x.
"""
...
@overload
def sinh(x: float32) -> float:
"""
Return the sinh of x.
"""
...
@overload
def cosh(x: float32) -> float:
"""
Return the cosh of x.
"""
...
@overload
def tanh(x: float32) -> float:
"""
Return the tanh of x.
"""
...
@overload
def log(x: float32) -> float:
"""
Return the natural log (base-e) of x, where x is positive.
"""
...
@overload
def exp(x: float32) -> float:
"""
Return base-e exponential, e^x.
"""
...
@overload
def pow(x: float32, y: float32) -> float:
"""
Return the result of x raised to power of y.
"""
...
@overload
def round(x: float32) -> float:
"""
Calculate the nearest integer value, rounding halfway cases away from zero.
This is the most intuitive form of rounding in the colloquial sense, but can be slower than other options like ``warp.rint()``.
Differs from ``numpy.round()``, which behaves the same way as ``numpy.rint()``.
"""
...
@overload
def rint(x: float32) -> float:
"""
Calculate the nearest integer value, rounding halfway cases to nearest even integer.
It is generally faster than ``warp.round()``.
Equivalent to ``numpy.rint()``.
"""
...
@overload
def trunc(x: float32) -> float:
"""
Calculate the nearest integer that is closer to zero than x.
In other words, it discards the fractional part of x.
It is similar to casting ``float(int(x))``, but preserves the negative sign when x is in the range [-0.0, -1.0).
Equivalent to ``numpy.trunc()`` and ``numpy.fix()``.
"""
...
@overload
def floor(x: float32) -> float:
"""
Calculate the largest integer that is less than or equal to x.
"""
...
@overload
def ceil(x: float32) -> float:
"""
Calculate the smallest integer that is greater than or equal to x.
"""
...
@overload
def dot(x: vec2, y: vec2) -> float:
"""
Compute the dot product between two 2d vectors.
"""
...
@overload
def dot(x: vec3, y: vec3) -> float:
"""
Compute the dot product between two 3d vectors.
"""
...
@overload
def dot(x: vec4, y: vec4) -> float:
"""
Compute the dot product between two 4d vectors.
"""
...
@overload
def dot(x: quat, y: quat) -> float:
"""
Compute the dot product between two quaternions.
"""
...
@overload
def outer(x: vec2, y: vec2) -> mat22:
"""
Compute the outer product x*y^T for two vec2 objects.
"""
...
@overload
def outer(x: vec3, y: vec3) -> mat33:
"""
Compute the outer product x*y^T for two vec3 objects.
"""
...
@overload
def cross(x: vec3, y: vec3) -> vec3:
"""
Compute the cross product of two 3d vectors.
"""
...
@overload
def skew(x: vec3) -> mat33:
"""
Compute the skew symmetric matrix for a 3d vector.
"""
...
@overload
def length(x: vec2) -> float:
"""
Compute the length of a 2d vector.
"""
...
@overload
def length(x: vec3) -> float:
"""
Compute the length of a 3d vector.
"""
...
@overload
def length(x: vec4) -> float:
"""
Compute the length of a 4d vector.
"""
...
@overload
def normalize(x: vec2) -> vec2:
"""
Compute the normalized value of x, if length(x) is 0 then the zero vector is returned.
"""
...
@overload
def normalize(x: vec3) -> vec3:
"""
Compute the normalized value of x, if length(x) is 0 then the zero vector is returned.
"""
...
@overload
def normalize(x: vec4) -> vec4:
"""
Compute the normalized value of x, if length(x) is 0 then the zero vector is returned.
"""
...
@overload
def normalize(x: quat) -> quat:
"""
Compute the normalized value of x, if length(x) is 0 then the zero quat is returned.
"""
...
@overload
def transpose(m: mat22) -> mat22:
"""
Return the transpose of the matrix m
"""
...
@overload
def transpose(m: mat33) -> mat33:
"""
Return the transpose of the matrix m
"""
...
@overload
def transpose(m: mat44) -> mat44:
"""
Return the transpose of the matrix m
"""
...
@overload
def transpose(m: spatial_matrix) -> spatial_matrix:
"""
Return the transpose of the matrix m
"""
...
@overload
def inverse(m: mat22) -> mat22:
"""
Return the inverse of the matrix m
"""
...
@overload
def inverse(m: mat33) -> mat33:
"""
Return the inverse of the matrix m
"""
...
@overload
def inverse(m: mat44) -> mat44:
"""
Return the inverse of the matrix m
"""
...
@overload
def determinant(m: mat22) -> float:
"""
Return the determinant of the matrix m
"""
...
@overload
def determinant(m: mat33) -> float:
"""
Return the determinant of the matrix m
"""
...
@overload
def determinant(m: mat44) -> float:
"""
Return the determinant of the matrix m
"""
...
@overload
def diag(d: vec2) -> mat22:
"""
Returns a matrix with the components of the vector d on the diagonal
"""
...
@overload
def diag(d: vec3) -> mat33:
"""
Returns a matrix with the components of the vector d on the diagonal
"""
...
@overload
def diag(d: vec4) -> mat44:
"""
Returns a matrix with the components of the vector d on the diagonal
"""
...
@overload
def cw_mul(x: vec2, y: vec2) -> vec2:
"""
Component wise multiply of two 2d vectors.
"""
...
@overload
def cw_mul(x: vec3, y: vec3) -> vec3:
"""
Component wise multiply of two 3d vectors.
"""
...
@overload
def cw_mul(x: vec4, y: vec4) -> vec4:
"""
Component wise multiply of two 4d vectors.
"""
...
@overload
def cw_div(x: vec2, y: vec2) -> vec2:
"""
Component wise division of two 2d vectors.
"""
...
@overload
def cw_div(x: vec3, y: vec3) -> vec3:
"""
Component wise division of two 3d vectors.
"""
...
@overload
def cw_div(x: vec4, y: vec4) -> vec4:
"""
Component wise division of two 4d vectors.
"""
...
@overload
def svd3(A: mat33, U: mat33, sigma: vec3, V: mat33):
"""
Compute the SVD of a 3x3 matrix. The singular values are returned in sigma,
while the left and right basis vectors are returned in U and V.
"""
...
@overload
def quat_identity() -> quat:
"""
Construct an identity quaternion with zero imaginary part and real part of 1.0
"""
...
@overload
def quat_from_axis_angle(axis: vec3, angle: float32) -> quat:
"""
Construct a quaternion representing a rotation of angle radians around the given axis.
"""
...
@overload
def quat_from_matrix(m: mat33) -> quat:
"""
Construct a quaternion from a 3x3 matrix.
"""
...
@overload
def quat_rpy(roll: float32, pitch: float32, yaw: float32) -> quat:
"""
Construct a quaternion representing a combined roll (z), pitch (x), yaw rotations (y) in radians.
"""
...
@overload
def quat_inverse(q: quat) -> quat:
"""
Compute quaternion conjugate.
"""
...
@overload
def quat_rotate(q: quat, p: vec3) -> vec3:
"""
Rotate a vector by a quaternion.
"""
...
@overload
def quat_rotate_inv(q: quat, p: vec3) -> vec3:
"""
Rotate a vector the inverse of a quaternion.
"""
...
@overload
def quat_to_matrix(q: quat) -> mat33:
"""
Convert a quaternion to a 3x3 rotation matrix.
"""
...
@overload
def transform_identity() -> transform:
"""
Construct an identity transform with zero translation and identity rotation.
"""
...
@overload
def transform_get_translation(t: transform) -> vec3:
"""
Return the translational part of a transform.
"""
...
@overload
def transform_get_rotation(t: transform) -> quat:
"""
Return the rotational part of a transform.
"""
...
@overload
def transform_multiply(a: transform, b: transform) -> transform:
"""
Multiply two rigid body transformations together.
"""
...
@overload
def transform_point(t: transform, p: vec3) -> vec3:
"""
Apply the transform to a point p treating the homogenous coordinate as w=1 (translation and rotation).
"""
...
@overload
def transform_point(m: mat44, p: vec3) -> vec3:
"""
Apply the transform to a point ``p`` treating the homogenous coordinate as w=1. The transformation is applied treating ``p`` as a column vector, e.g.: ``y = M*p``
note this is in contrast to some libraries, notably USD, which applies transforms to row vectors, ``y^T = p^T*M^T``. If the transform is coming from a library that uses row-vectors
then users should transpose the tranformation matrix before calling this method.
"""
...
@overload
def transform_vector(t: transform, v: vec3) -> vec3:
"""
Apply the transform to a vector v treating the homogenous coordinate as w=0 (rotation only).
"""
...
@overload
def transform_vector(m: mat44, v: vec3) -> vec3:
"""
Apply the transform to a vector ``v`` treating the homogenous coordinate as w=0. The transformation is applied treating ``v`` as a column vector, e.g.: ``y = M*v``
note this is in contrast to some libraries, notably USD, which applies transforms to row vectors, ``y^T = v^T*M^T``. If the transform is coming from a library that uses row-vectors
then users should transpose the tranformation matrix before calling this method.
"""
...
@overload
def transform_inverse(t: transform) -> transform:
"""
Compute the inverse of the transform.
"""
...
@overload
def spatial_dot(a: spatial_vector, b: spatial_vector) -> float:
"""
Compute the dot product of two 6d screw vectors.
"""
...
@overload
def spatial_cross(a: spatial_vector, b: spatial_vector) -> spatial_vector:
"""
Compute the cross-product of two 6d screw vectors.
"""
...
@overload
def spatial_cross_dual(a: spatial_vector, b: spatial_vector) -> spatial_vector:
"""
Compute the dual cross-product of two 6d screw vectors.
"""
...
@overload
def spatial_top(a: spatial_vector) -> vec3:
"""
Return the top (first) part of a 6d screw vector.
"""
...
@overload
def spatial_bottom(a: spatial_vector) -> vec3:
"""
Return the bottom (second) part of a 6d screw vector.
"""
...
@overload
def spatial_jacobian(S: array[spatial_vector], joint_parents: array[int32], joint_qd_start: array[int32], joint_start: int32, joint_count: int32, J_start: int32, J_out: array[float32]):
"""
"""
...
@overload
def spatial_mass(I_s: array[spatial_matrix], joint_start: int32, joint_count: int32, M_start: int32, M: array[float32]):
"""
"""
...
@overload
def mlp(weights: array[float32], bias: array[float32], activation: Callable, index: int32, x: array[float32], out: array[float32]):
"""
Evaluate a multi-layer perceptron (MLP) layer in the form: ``out = act(weights*x + bias)``.
:param weights: A layer's network weights with dimensions ``(m, n)``.
:param bias: An array with dimensions ``(n)``.
:param activation: A ``wp.func`` function that takes a single scalar float as input and returns a scalar float as output
:param index: The batch item to process, typically each thread will process 1 item in the batch, in this case index should be ``wp.tid()``
:param x: The feature matrix with dimensions ``(n, b)``
:param out: The network output with dimensions ``(m, b)``
:note: Feature and output matrices are transposed compared to some other frameworks such as PyTorch. All matrices are assumed to be stored in flattened row-major memory layout (NumPy default).
"""
...
@overload
def mesh_query_point(id: uint64, point: vec3, max_dist: float32, inside: float32, face: int32, bary_u: float32, bary_v: float32) -> bool:
"""
Computes the closest point on the mesh with identifier `id` to the given point in space. Returns ``True`` if a point < ``max_dist`` is found.
:param id: The mesh identifier
:param point: The point in space to query
:param max_dist: Mesh faces above this distance will not be considered by the query
:param inside: Returns a value < 0 if query point is inside the mesh, >=0 otherwise. Note that mesh must be watertight for this to be robust
:param face: Returns the index of the closest face
:param bary_u: Returns the barycentric u coordinate of the closest point
:param bary_v: Retruns the barycentric v coordinate of the closest point
"""
...
@overload
def mesh_query_ray(id: uint64, start: vec3, dir: vec3, max_t: float32, t: float32, bary_u: float32, bary_v: float32, sign: float32, normal: vec3, face: int32) -> bool:
"""
Computes the closest ray hit on the mesh with identifier `id`, returns ``True`` if a point < ``max_t`` is found.
:param id: The mesh identifier
:param start: The start point of the ray
:param dir: The ray direction (should be normalized)
:param max_t: The maximum distance along the ray to check for intersections
:param t: Returns the distance of the closest hit along the ray
:param bary_u: Returns the barycentric u coordinate of the closest hit
:param bary_v: Returns the barycentric v coordinate of the closest hit
:param sign: Returns a value > 0 if the hit ray hit front of the face, returns < 0 otherwise
:param normal: Returns the face normal
:param face: Returns the index of the hit face
"""
...
@overload
def mesh_query_aabb(id: uint64, lower: vec3, upper: vec3) -> mesh_query_aabb_t:
"""
Construct an axis-aligned bounding box query against a mesh object. This query can be used to iterate over all triangles
inside a volume. Returns an object that is used to track state during mesh traversal.
:param id: The mesh identifier
:param lower: The lower bound of the bounding box in mesh space
:param upper: The upper bound of the bounding box in mesh space
"""
...
@overload
def mesh_query_aabb_next(query: mesh_query_aabb_t, index: int32) -> bool:
"""
Move to the next triangle overlapping the query bounding box. The index of the current face is stored in ``index``, returns ``False``
if there are no more overlapping triangles.
"""
...
@overload
def mesh_eval_position(id: uint64, face: int32, bary_u: float32, bary_v: float32) -> vec3:
"""
Evaluates the position on the mesh given a face index, and barycentric coordinates.
"""
...
@overload
def mesh_eval_velocity(id: uint64, face: int32, bary_u: float32, bary_v: float32) -> vec3:
"""
Evaluates the velocity on the mesh given a face index, and barycentric coordinates.
"""
...
@overload
def hash_grid_query(id: uint64, point: vec3, max_dist: float32) -> hash_grid_query_t:
"""
Construct a point query against a hash grid. This query can be used to iterate over all neighboring points withing a
fixed radius from the query point. Returns an object that is used to track state during neighbor traversal.
"""
...
@overload
def hash_grid_query_next(query: hash_grid_query_t, index: int32) -> bool:
"""
Move to the next point in the hash grid query. The index of the current neighbor is stored in ``index``, returns ``False``
if there are no more neighbors.
"""
...
@overload
def hash_grid_point_id(id: uint64, index: int32) -> int:
"""
Return the index of a point in the grid, this can be used to re-order threads such that grid
traversal occurs in a spatially coherent order.
"""
...
@overload
def intersect_tri_tri(v0: vec3, v1: vec3, v2: vec3, u0: vec3, u1: vec3, u2: vec3) -> int:
"""
Tests for intersection between two triangles (v0, v1, v2) and (u0, u1, u2) using Moller's method. Returns > 0 if triangles intersect.
"""
...
@overload
def mesh_eval_face_normal(id: uint64, face: int32) -> vec3:
"""
Evaluates the face normal the mesh given a face index.
"""
...
@overload
def mesh_get_point(id: uint64, index: int32) -> vec3:
"""
Returns the point of the mesh given a index.
"""
...
@overload
def mesh_get_velocity(id: uint64, index: int32) -> vec3:
"""
Returns the velocity of the mesh given a index.
"""
...
@overload
def mesh_get_index(id: uint64, index: int32) -> int:
"""
Returns the point-index of the mesh given a face-vertex index.
"""
...
@overload
def volume_sample_f(id: uint64, uvw: vec3, sampling_mode: int32) -> float:
"""
Sample the volume given by ``id`` at the volume local-space point ``uvw``. Interpolation should be ``wp.Volume.CLOSEST``, or ``wp.Volume.LINEAR.``
"""
...
@overload
def volume_lookup_f(id: uint64, i: int32, j: int32, k: int32) -> float:
"""
Returns the value of voxel with coordinates ``i``, ``j``, ``k``, if the voxel at this index does not exist this function returns the background value
"""
...
@overload
def volume_sample_v(id: uint64, uvw: vec3, sampling_mode: int32) -> vec3:
"""
Sample the vector volume given by ``id`` at the volume local-space point ``uvw``. Interpolation should be ``wp.Volume.CLOSEST``, or ``wp.Volume.LINEAR.``
"""
...
@overload
def volume_lookup_v(id: uint64, i: int32, j: int32, k: int32) -> vec3:
"""
Returns the vector value of voxel with coordinates ``i``, ``j``, ``k``, if the voxel at this index does not exist this function returns the background value
"""
...
@overload
def volume_sample_i(id: uint64, uvw: vec3) -> int:
"""
Sample the int32 volume given by ``id`` at the volume local-space point ``uvw``.
"""
...
@overload
def volume_lookup_i(id: uint64, i: int32, j: int32, k: int32) -> int:
"""
Returns the int32 value of voxel with coordinates ``i``, ``j``, ``k``, if the voxel at this index does not exist this function returns the background value
"""
...
@overload
def volume_index_to_world(id: uint64, uvw: vec3) -> vec3:
"""
Transform a point defined in volume index space to world space given the volume's intrinsic affine transformation.
"""
...
@overload
def volume_world_to_index(id: uint64, xyz: vec3) -> vec3:
"""
Transform a point defined in volume world space to the volume's index space, given the volume's intrinsic affine transformation.
"""
...
@overload
def volume_index_to_world_dir(id: uint64, uvw: vec3) -> vec3:
"""
Transform a direction defined in volume index space to world space given the volume's intrinsic affine transformation.
"""
...
@overload
def volume_world_to_index_dir(id: uint64, xyz: vec3) -> vec3:
"""
Transform a direction defined in volume world space to the volume's index space, given the volume's intrinsic affine transformation.
"""
...
@overload
def rand_init(seed: int32) -> uint32:
"""
Initialize a new random number generator given a user-defined seed. Returns a 32-bit integer representing the RNG state.
"""
...
@overload
def rand_init(seed: int32, offset: int32) -> uint32:
"""
Initialize a new random number generator given a user-defined seed and an offset.
This alternative constructor can be useful in parallel programs, where a kernel as a whole should share a seed,
but each thread should generate uncorrelated values. In this case usage should be ``r = rand_init(seed, tid)``
"""
...
@overload
def randi(state: uint32) -> int:
"""
Return a random integer between [0, 2^32)
"""
...
@overload
def randi(state: uint32, min: int32, max: int32) -> int:
"""
Return a random integer between [min, max)
"""
...
@overload
def randf(state: uint32) -> float:
"""
Return a random float between [0.0, 1.0)
"""
...
@overload
def randf(state: uint32, min: float32, max: float32) -> float:
"""
Return a random float between [min, max)
"""
...
@overload
def randn(state: uint32) -> float:
"""
Sample a normal distribution
"""
...
@overload
def noise(state: uint32, x: float32) -> float:
"""
Non-periodic Perlin-style noise in 1d.
"""
...
@overload
def noise(state: uint32, xy: vec2) -> float:
"""
Non-periodic Perlin-style noise in 2d.
"""
...
@overload
def noise(state: uint32, xyz: vec3) -> float:
"""
Non-periodic Perlin-style noise in 3d.
"""
...
@overload
def noise(state: uint32, xyzt: vec4) -> float:
"""
Non-periodic Perlin-style noise in 4d.
"""
...
@overload
def pnoise(state: uint32, x: float32, px: int32) -> float:
"""
Periodic Perlin-style noise in 1d.
"""
...
@overload
def pnoise(state: uint32, xy: vec2, px: int32, py: int32) -> float:
"""
Periodic Perlin-style noise in 2d.
"""
...
@overload
def pnoise(state: uint32, xyz: vec3, px: int32, py: int32, pz: int32) -> float:
"""
Periodic Perlin-style noise in 3d.
"""
...
@overload
def pnoise(state: uint32, xyzt: vec4, px: int32, py: int32, pz: int32, pt: int32) -> float:
"""
Periodic Perlin-style noise in 4d.
"""
...
@overload
def curlnoise(state: uint32, xy: vec2) -> vec2:
"""
Divergence-free vector field based on the gradient of a Perlin noise function.
"""
...
@overload
def curlnoise(state: uint32, xyz: vec3) -> vec3:
"""
Divergence-free vector field based on the curl of three Perlin noise functions.
"""
...
@overload
def curlnoise(state: uint32, xyzt: vec4) -> vec3:
"""
Divergence-free vector field based on the curl of three Perlin noise functions.
"""
...
@overload
def printf():
"""
Allows printing formatted strings, using C-style format specifiers.
"""
...
@overload
def tid() -> int:
"""
Return the current thread index. Note that this is the *global* index of the thread in the range [0, dim)
where dim is the parameter passed to kernel launch.
"""
...
@overload
def tid() -> Tuple[int, int]:
"""
Return the current thread indices for a 2d kernel launch. Use ``i,j = wp.tid()`` syntax to retrieve the coordinates inside the kernel thread grid.
"""
...
@overload
def tid() -> Tuple[int, int, int]:
"""
Return the current thread indices for a 3d kernel launch. Use ``i,j,k = wp.tid()`` syntax to retrieve the coordinates inside the kernel thread grid.
"""
...
@overload
def tid() -> Tuple[int, int, int, int]:
"""
Return the current thread indices for a 4d kernel launch. Use ``i,j,k,l = wp.tid()`` syntax to retrieve the coordinates inside the kernel thread grid.
"""
...
@overload
def select(cond: bool, arg1: Any, arg2: Any):
"""
Select between two arguments, if cond is false then return ``arg1``, otherwise return ``arg2``
"""
...
@overload
def atomic_add(a: array[Any], i: int32, value: Any):
"""
Atomically add ``value`` onto the array at location given by index.
"""
...
@overload
def atomic_add(a: array[Any], i: int32, j: int32, value: Any):
"""
Atomically add ``value`` onto the array at location given by indices.
"""
...
@overload
def atomic_add(a: array[Any], i: int32, j: int32, k: int32, value: Any):
"""
Atomically add ``value`` onto the array at location given by indices.
"""
...
@overload
def atomic_add(a: array[Any], i: int32, j: int32, k: int32, l: int32, value: Any):
"""
Atomically add ``value`` onto the array at location given by indices.
"""
...
@overload
def atomic_sub(a: array[Any], i: int32, value: Any):
"""
Atomically subtract ``value`` onto the array at location given by index.
"""
...
@overload
def atomic_sub(a: array[Any], i: int32, j: int32, value: Any):
"""
Atomically subtract ``value`` onto the array at location given by indices.
"""
...
@overload
def atomic_sub(a: array[Any], i: int32, j: int32, k: int32, value: Any):
"""
Atomically subtract ``value`` onto the array at location given by indices.
"""
...
@overload
def atomic_sub(a: array[Any], i: int32, j: int32, k: int32, l: int32, value: Any):
"""
Atomically subtract ``value`` onto the array at location given by indices.
"""
...
@overload
def expect_eq(arg1: int8, arg2: int8):
"""
Prints an error to stdout if arg1 and arg2 are not equal
"""
...
@overload
def expect_eq(arg1: uint8, arg2: uint8):
"""
Prints an error to stdout if arg1 and arg2 are not equal
"""
...
@overload
def expect_eq(arg1: int16, arg2: int16):
"""
Prints an error to stdout if arg1 and arg2 are not equal
"""
...
@overload
def expect_eq(arg1: uint16, arg2: uint16):
"""
Prints an error to stdout if arg1 and arg2 are not equal
"""
...
@overload
def expect_eq(arg1: int32, arg2: int32):
"""
Prints an error to stdout if arg1 and arg2 are not equal
"""
...
@overload
def expect_eq(arg1: uint32, arg2: uint32):
"""
Prints an error to stdout if arg1 and arg2 are not equal
"""
...
@overload
def expect_eq(arg1: int64, arg2: int64):
"""
Prints an error to stdout if arg1 and arg2 are not equal
"""
...
@overload
def expect_eq(arg1: uint64, arg2: uint64):
"""
Prints an error to stdout if arg1 and arg2 are not equal
"""
...
@overload
def expect_eq(arg1: float32, arg2: float32):
"""
Prints an error to stdout if arg1 and arg2 are not equal
"""
...
@overload
def expect_eq(arg1: float64, arg2: float64):
"""
Prints an error to stdout if arg1 and arg2 are not equal
"""
...
@overload
def expect_eq(arg1: vec2, arg2: vec2):
"""
Prints an error to stdout if arg1 and arg2 are not equal
"""
...
@overload
def expect_eq(arg1: vec3, arg2: vec3):
"""
Prints an error to stdout if arg1 and arg2 are not equal
"""
...
@overload
def expect_eq(arg1: vec4, arg2: vec4):
"""
Prints an error to stdout if arg1 and arg2 are not equal
"""
...
@overload
def expect_eq(arg1: mat22, arg2: mat22):
"""
Prints an error to stdout if arg1 and arg2 are not equal
"""
...
@overload
def expect_eq(arg1: mat33, arg2: mat33):
"""
Prints an error to stdout if arg1 and arg2 are not equal
"""
...
@overload
def expect_eq(arg1: mat44, arg2: mat44):
"""
Prints an error to stdout if arg1 and arg2 are not equal
"""
...
@overload
def expect_eq(arg1: quat, arg2: quat):
"""
Prints an error to stdout if arg1 and arg2 are not equal
"""
...
@overload
def expect_eq(arg1: transform, arg2: transform):
"""
Prints an error to stdout if arg1 and arg2 are not equal
"""
...
@overload
def expect_eq(arg1: spatial_vector, arg2: spatial_vector):
"""
Prints an error to stdout if arg1 and arg2 are not equal
"""
...
@overload
def expect_eq(arg1: spatial_matrix, arg2: spatial_matrix):
"""
Prints an error to stdout if arg1 and arg2 are not equal
"""
...
@overload
def lerp(a: float32, b: float32, t: float32) -> float32:
"""
Linearly interpolate two values a and b using factor t, computed as ``a*(1-t) + b*t``
"""
...
@overload
def lerp(a: float64, b: float64, t: float32) -> float64:
"""
Linearly interpolate two values a and b using factor t, computed as ``a*(1-t) + b*t``
"""
...
@overload
def lerp(a: vec2, b: vec2, t: float32) -> vec2:
"""
Linearly interpolate two values a and b using factor t, computed as ``a*(1-t) + b*t``
"""
...
@overload
def lerp(a: vec3, b: vec3, t: float32) -> vec3:
"""
Linearly interpolate two values a and b using factor t, computed as ``a*(1-t) + b*t``
"""
...
@overload
def lerp(a: vec4, b: vec4, t: float32) -> vec4:
"""
Linearly interpolate two values a and b using factor t, computed as ``a*(1-t) + b*t``
"""
...
@overload
def lerp(a: mat22, b: mat22, t: float32) -> mat22:
"""
Linearly interpolate two values a and b using factor t, computed as ``a*(1-t) + b*t``
"""
...
@overload
def lerp(a: mat33, b: mat33, t: float32) -> mat33:
"""
Linearly interpolate two values a and b using factor t, computed as ``a*(1-t) + b*t``
"""
...
@overload
def lerp(a: mat44, b: mat44, t: float32) -> mat44:
"""
Linearly interpolate two values a and b using factor t, computed as ``a*(1-t) + b*t``
"""
...
@overload
def lerp(a: quat, b: quat, t: float32) -> quat:
"""
Linearly interpolate two values a and b using factor t, computed as ``a*(1-t) + b*t``
"""
...
@overload
def lerp(a: transform, b: transform, t: float32) -> transform:
"""
Linearly interpolate two values a and b using factor t, computed as ``a*(1-t) + b*t``
"""
...
@overload
def lerp(a: spatial_vector, b: spatial_vector, t: float32) -> spatial_vector:
"""
Linearly interpolate two values a and b using factor t, computed as ``a*(1-t) + b*t``
"""
...
@overload
def lerp(a: spatial_matrix, b: spatial_matrix, t: float32) -> spatial_matrix:
"""
Linearly interpolate two values a and b using factor t, computed as ``a*(1-t) + b*t``
"""
...
@overload
def expect_near(arg1: float32, arg2: float32, tolerance: float32):
"""
Prints an error to stdout if arg1 and arg2 are not closer than tolerance in magnitude
"""
...
@overload
def expect_near(arg1: vec3, arg2: vec3, tolerance: float32):
"""
Prints an error to stdout if any element of arg1 and arg2 are not closer than tolerance in magnitude
"""
...
@overload
def add(x: int32, y: int32) -> int:
"""
"""
...
@overload
def add(x: float32, y: float32) -> float:
"""
"""
...
@overload
def add(x: vec2, y: vec2) -> vec2:
"""
"""
...
@overload
def add(x: vec3, y: vec3) -> vec3:
"""
"""
...
@overload
def add(x: vec4, y: vec4) -> vec4:
"""
"""
...
@overload
def add(x: quat, y: quat) -> quat:
"""
"""
...
@overload
def add(x: mat22, y: mat22) -> mat22:
"""
"""
...
@overload
def add(x: mat33, y: mat33) -> mat33:
"""
"""
...
@overload
def add(x: mat44, y: mat44) -> mat44:
"""
"""
...
@overload
def add(x: spatial_vector, y: spatial_vector) -> spatial_vector:
"""
"""
...
@overload
def add(x: spatial_matrix, y: spatial_matrix) -> spatial_matrix:
"""
"""
...
@overload
def sub(x: int32, y: int32) -> int:
"""
"""
...
@overload
def sub(x: float32, y: float32) -> float:
"""
"""
...
@overload
def sub(x: vec2, y: vec2) -> vec2:
"""
"""
...
@overload
def sub(x: vec3, y: vec3) -> vec3:
"""
"""
...
@overload
def sub(x: vec4, y: vec4) -> vec4:
"""
"""
...
@overload
def sub(x: mat22, y: mat22) -> mat22:
"""
"""
...
@overload
def sub(x: mat33, y: mat33) -> mat33:
"""
"""
...
@overload
def sub(x: mat44, y: mat44) -> mat44:
"""
"""
...
@overload
def sub(x: spatial_vector, y: spatial_vector) -> spatial_vector:
"""
"""
...
@overload
def sub(x: spatial_matrix, y: spatial_matrix) -> spatial_matrix:
"""
"""
...
@overload
def mul(x: int32, y: int32) -> int:
"""
"""
...
@overload
def mul(x: float32, y: float32) -> float:
"""
"""
...
@overload
def mul(x: float32, y: vec2) -> vec2:
"""
"""
...
@overload
def mul(x: float32, y: vec3) -> vec3:
"""
"""
...
@overload
def mul(x: float32, y: vec4) -> vec4:
"""
"""
...
@overload
def mul(x: float32, y: quat) -> quat:
"""
"""
...
@overload
def mul(x: vec2, y: float32) -> vec2:
"""
"""
...
@overload
def mul(x: vec3, y: float32) -> vec3:
"""
"""
...
@overload
def mul(x: vec4, y: float32) -> vec4:
"""
"""
...
@overload
def mul(x: quat, y: float32) -> quat:
"""
"""
...
@overload
def mul(x: quat, y: quat) -> quat:
"""
"""
...
@overload
def mul(x: mat22, y: float32) -> mat22:
"""
"""
...
@overload
def mul(x: mat22, y: vec2) -> vec2:
"""
"""
...
@overload
def mul(x: mat22, y: mat22) -> mat22:
"""
"""
...
@overload
def mul(x: mat33, y: float32) -> mat33:
"""
"""
...
@overload
def mul(x: mat33, y: vec3) -> vec3:
"""
"""
...
@overload
def mul(x: mat33, y: mat33) -> mat33:
"""
"""
...
@overload
def mul(x: mat44, y: float32) -> mat44:
"""
"""
...
@overload
def mul(x: mat44, y: vec4) -> vec4:
"""
"""
...
@overload
def mul(x: mat44, y: mat44) -> mat44:
"""
"""
...
@overload
def mul(x: spatial_vector, y: float32) -> spatial_vector:
"""
"""
...
@overload
def mul(x: spatial_matrix, y: spatial_matrix) -> spatial_matrix:
"""
"""
...
@overload
def mul(x: spatial_matrix, y: spatial_vector) -> spatial_vector:
"""
"""
...
@overload
def mul(x: transform, y: transform) -> transform:
"""
"""
...
@overload
def mod(x: int32, y: int32) -> int:
"""
"""
...
@overload
def mod(x: float32, y: float32) -> float:
"""
"""
...
@overload
def div(x: int32, y: int32) -> int:
"""
"""
...
@overload
def div(x: float32, y: float32) -> float:
"""
"""
...
@overload
def div(x: vec2, y: float32) -> vec2:
"""
"""
...
@overload
def div(x: vec3, y: float32) -> vec3:
"""
"""
...
@overload
def div(x: vec4, y: float32) -> vec4:
"""
"""
...
@overload
def floordiv(x: int32, y: int32) -> int:
"""
"""
...
@overload
def floordiv(x: float32, y: float32) -> float:
"""
"""
...
@overload
def neg(x: int32) -> int:
"""
"""
...
@overload
def neg(x: float32) -> float:
"""
"""
...
@overload
def neg(x: vec2) -> vec2:
"""
"""
...
@overload
def neg(x: vec3) -> vec3:
"""
"""
...
@overload
def neg(x: vec4) -> vec4:
"""
"""
...
@overload
def neg(x: quat) -> quat:
"""
"""
...
@overload
def neg(x: mat33) -> mat33:
"""
"""
...
@overload
def neg(x: mat44) -> mat44:
"""
"""
...
@overload
def unot(b: bool) -> bool:
"""
"""
...
|
src/python/nimbusml/internal/entrypoints/models_schema.py | montehoover/NimbusML | 134 | 12661454 | # - Generated by tools/entrypoint_compiler.py: do not edit by hand
"""
Models.Schema
"""
from ..utils.entrypoints import EntryPoint
from ..utils.utils import try_set, unlist
def models_schema(
model,
schema=None,
**params):
"""
**Description**
Retrieve output model schema
:param model: The transform model. (inputs).
:param schema: The model schema (outputs).
"""
entrypoint_name = 'Models.Schema'
inputs = {}
outputs = {}
if model is not None:
inputs['Model'] = try_set(
obj=model,
none_acceptable=False,
is_of_type=str)
if schema is not None:
outputs['Schema'] = try_set(
obj=schema,
none_acceptable=False,
is_of_type=str)
input_variables = {
x for x in unlist(inputs.values())
if isinstance(x, str) and x.startswith("$")}
output_variables = {
x for x in unlist(outputs.values())
if isinstance(x, str) and x.startswith("$")}
entrypoint = EntryPoint(
name=entrypoint_name, inputs=inputs, outputs=outputs,
input_variables=input_variables,
output_variables=output_variables)
return entrypoint
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.