max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
async.py | nakamoo/a3c_PredNet | 443 | 11179958 | <reponame>nakamoo/a3c_PredNet<filename>async.py
import multiprocessing as mp
import os
import random
import chainer
import numpy as np
import random_seed
def set_shared_params(a, b):
"""
Args:
a (chainer.Link): link whose params are to be replaced
b (dict): dict that consists of (param_name, multiprocessing.Array)
"""
assert isinstance(a, chainer.Link)
for param_name, param in a.namedparams():
if param_name in b:
shared_param = b[param_name]
param.data = np.frombuffer(
shared_param, dtype=param.data.dtype).reshape(param.data.shape)
def set_shared_states(a, b):
assert isinstance(a, chainer.Optimizer)
assert hasattr(a, 'target'), 'Optimizer.setup must be called first'
for state_name, shared_state in b.items():
for param_name, param in shared_state.items():
old_param = a._states[state_name][param_name]
a._states[state_name][param_name] = np.frombuffer(
param,
dtype=old_param.dtype).reshape(old_param.shape)
def extract_params_as_shared_arrays(link):
assert isinstance(link, chainer.Link)
shared_arrays = {}
for param_name, param in link.namedparams():
shared_arrays[param_name] = mp.RawArray('f', param.data.ravel())
return shared_arrays
def share_params_as_shared_arrays(link):
shared_arrays = extract_params_as_shared_arrays(link)
set_shared_params(link, shared_arrays)
return shared_arrays
def share_states_as_shared_arrays(link):
shared_arrays = extract_states_as_shared_arrays(link)
set_shared_states(link, shared_arrays)
return shared_arrays
def extract_states_as_shared_arrays(optimizer):
assert isinstance(optimizer, chainer.Optimizer)
assert hasattr(optimizer, 'target'), 'Optimizer.setup must be called first'
shared_arrays = {}
for state_name, state in optimizer._states.items():
shared_arrays[state_name] = {}
for param_name, param in state.items():
shared_arrays[state_name][
param_name] = mp.RawArray('f', param.ravel())
return shared_arrays
def run_async(n_process, run_func):
"""Run experiments asynchronously.
Args:
n_process (int): number of processes
run_func: function that will be run in parallel
"""
processes = []
def set_seed_and_run(process_idx, run_func):
random_seed.set_random_seed(np.random.randint(0, 2 ** 32))
run_func(process_idx)
for process_idx in range(n_process):
processes.append(mp.Process(target=set_seed_and_run, args=(
process_idx, run_func)))
for p in processes:
p.start()
for p in processes:
p.join()
|
filebeat/tests/system/test_container.py | tetianakravchenko/beats | 9,729 | 11179968 | <reponame>tetianakravchenko/beats
from filebeat import BaseTest
import socket
import os
class Test(BaseTest):
"""
Test filebeat with the container input
"""
def test_container_input(self):
"""
Test container input
"""
input_raw = """
- type: container
paths:
- {}/logs/*.log
"""
self.render_config_template(
input_raw=input_raw.format(os.path.abspath(self.working_dir)),
inputs=False,
)
os.mkdir(self.working_dir + "/logs/")
self.copy_files(["logs/docker.log"],
target_dir="logs")
filebeat = self.start_beat()
self.wait_until(lambda: self.output_has(lines=21))
filebeat.check_kill_and_wait()
output = self.read_output()
assert len(output) == 21
assert output[0]["message"] == "Fetching main repository github.com/elastic/beats..."
for o in output:
assert o["stream"] == "stdout"
def test_container_input_cri(self):
"""
Test container input with CRI format
"""
input_raw = """
- type: container
paths:
- {}/logs/*.log
"""
self.render_config_template(
input_raw=input_raw.format(os.path.abspath(self.working_dir)),
inputs=False,
)
os.mkdir(self.working_dir + "/logs/")
self.copy_files(["logs/cri.log"],
target_dir="logs")
filebeat = self.start_beat()
self.wait_until(lambda: self.output_count(lambda x: x >= 1))
self.wait_until(lambda: self.log_contains("End of file reached"))
filebeat.check_kill_and_wait()
output = self.read_output()
assert len(output) == 1
assert output[0]["stream"] == "stdout"
def test_container_input_registry_for_unparsable_lines(self):
"""
Test container input properly updates registry offset in case
of unparsable lines
"""
input_raw = """
- type: container
paths:
- {}/logs/*.log
"""
self.render_config_template(
input_raw=input_raw.format(os.path.abspath(self.working_dir)),
inputs=False,
)
os.mkdir(self.working_dir + "/logs/")
self.copy_files(["logs/docker_corrupted.log"],
target_dir="logs")
filebeat = self.start_beat()
self.wait_until(lambda: self.output_has(lines=20))
filebeat.check_kill_and_wait()
output = self.read_output()
assert len(output) == 20
assert output[19]["message"] == "Moving binaries to host..."
for o in output:
assert o["stream"] == "stdout"
# Check that file exist
data = self.get_registry()
logs = self.log_access()
assert logs.contains("Parse line error") == True
# bytes of healthy file are 2244 so for the corrupted one should
# be 2244-1=2243 since we removed one character
assert data[0]["offset"] == 2243
|
grammarinator/runtime/dispatching_listener.py | 38b394ce01/grammarinator | 228 | 11179981 | # Copyright (c) 2020 <NAME>, <NAME>.
#
# Licensed under the BSD 3-Clause License
# <LICENSE.rst or https://opensource.org/licenses/BSD-3-Clause>.
# This file may not be copied, modified, or distributed except
# according to those terms.
from .default_listener import DefaultListener
class DispatchingListener(DefaultListener):
def enter_rule(self, node):
fn = 'enter_' + node.name
if hasattr(self, fn):
getattr(self, fn)(node)
def exit_rule(self, node):
fn = 'exit_' + node.name
if hasattr(self, fn):
getattr(self, fn)(node)
|
sonarqube/community/settings.py | ckho-wkcda/python-sonarqube-api | 113 | 11179982 | #!/usr/bin/env python
# -*- coding:utf-8 -*-
# @Author: <NAME>
from sonarqube.utils.rest_client import RestClient
from sonarqube.utils.config import (
API_SETTINGS_SET_ENDPOINT,
API_SETTINGS_RESET_ENDPOINT,
API_SETTINGS_VALUES_ENDPOINT,
API_SETTINGS_LIST_DEFINITIONS_ENDPOINT,
)
from sonarqube.utils.common import GET, POST
class SonarQubeSettings(RestClient):
"""
SonarQube settings Operations
"""
def __init__(self, **kwargs):
"""
:param kwargs:
"""
super(SonarQubeSettings, self).__init__(**kwargs)
@POST(API_SETTINGS_SET_ENDPOINT)
def update_setting_value(self, key, value, component=None, fieldValues=None):
"""
SINCE 6.1
Update a setting value.
The settings defined in conf/sonar.properties are read-only and can't be changed.
:param key: Setting key
:param value: Setting value. To reset a value, please use the reset web service.
:param component: Component key
:param fieldValues: Setting field values. To set several values, the parameter must be called once for
each value.
:return:
"""
@POST(API_SETTINGS_RESET_ENDPOINT)
def remove_setting_value(self, keys, component=None):
"""
SINCE 6.1
Remove a setting value.
The settings defined in conf/sonar.properties are read-only and can't be changed.
:param keys: Comma-separated list of keys
:param component: Component key
:return:
"""
@GET(API_SETTINGS_VALUES_ENDPOINT)
def get_settings_values(self, component=None, keys=None):
"""
SINCE 6.3
List settings values.
If no value has been set for a setting, then the default value is returned.
The settings from conf/sonar.properties are excluded from results.
:param component: Component key
:param keys: List of setting keys
:return:
"""
@GET(API_SETTINGS_LIST_DEFINITIONS_ENDPOINT)
def get_settings_definitions(self, component=None):
"""
SINCE 6.3
List settings definitions.
:param component: Component key
:return:
"""
|
mac/pyobjc-framework-Quartz/PyObjCTest/test_civector.py | albertz/music-player | 132 | 11180012 | <filename>mac/pyobjc-framework-Quartz/PyObjCTest/test_civector.py<gh_stars>100-1000
from PyObjCTools.TestSupport import *
from Quartz.QuartzCore import *
class TestCIVector (TestCase):
def testMethods(self):
self.assertArgIsIn(CIVector.vectorWithValues_count_, 0)
self.assertArgSizeInArg(CIVector.vectorWithValues_count_, 0, 1)
self.assertArgIsIn(CIVector.initWithValues_count_, 0)
self.assertArgSizeInArg(CIVector.initWithValues_count_, 0, 1)
if __name__ == "__main__":
main()
|
pythainlp/wangchanberta/__init__.py | Gorlph/pythainlp | 569 | 11180076 | # -*- coding: utf-8 -*-
__all__ = [
"ThaiNameTagger",
"pos_tag",
"segment",
]
from pythainlp.wangchanberta.core import ThaiNameTagger, segment
from pythainlp.wangchanberta.postag import pos_tag
|
src/utils.py | lingluodlut/Att-ChemdNER | 126 | 11180086 | <filename>src/utils.py<gh_stars>100-1000
import os
import re
import codecs
import numpy as np
import six
import theano
models_path = "./models"
eval_path = "./evaluation"
eval_temp = os.path.join(eval_path, "temp")
eval_script = os.path.join(eval_path, "conlleval")
class EarlyStopping(object):
#{{{
'''Stop training when a monitored quantity has stopped improving.
# Arguments
monitor: quantity to be monitored.
min_delta: minimum change in the monitored quantity
to qualify as an improvement, i.e. an absolute
change of less than min_delta, will count as no
improvement.
patience: number of epochs with no improvement
after which training will be stopped.
'''
def __init__(self, monitor='val_loss',
min_delta=1e-6, patience=5,mode='min'):
#{{{
super(EarlyStopping, self).__init__()
self.monitor = monitor
self.patience = patience
self.min_delta = min_delta
self.wait = 0
self.stopped_epoch = 0
self.stop_training=False;
if mode =="min":
self.monitor_op = np.less;
elif mode == "max":
self.monitor_op = np.greater;
else:
assert 0,"unknown early stop mode:";
self.min_delta *= -1
#}}}
def on_train_begin(self):
self.wait = 0 # Allow instances to be re-used
self.best = np.Inf if self.monitor_op == np.less else -np.Inf
def on_epoch_end(self, epoch, loss):
#{{{
current = loss
if self.monitor_op(current - self.min_delta, self.best):
self.best = current
self.wait = 0
else:
if self.wait >= self.patience:
self.stopped_epoch = epoch
self.stop_training = True
self.wait += 1
#}}}
def on_train_end(self, logs={}):
if self.stopped_epoch > 0 :
print('Epoch %05d: early stopping' % (self.stopped_epoch))
#}}}
def get_from_module(identifier, module_params, module_name,
instantiate=False, kwargs=None):
#{{{
if isinstance(identifier, six.string_types):
res = module_params.get(identifier)
if not res:
raise ValueError('Invalid ' + str(module_name) + ': ' +
str(identifier))
if instantiate and not kwargs:
return res()
elif instantiate and kwargs:
return res(**kwargs)
else:
return res
elif isinstance(identifier, dict):
name = identifier.pop('name')
res = module_params.get(name)
if res:
return res(**identifier)
else:
raise ValueError('Invalid ' + str(module_name) + ': ' +
str(identifier))
return identifier
#}}}
def findNotSame(fNameX,fNameY):
#{{{
"""
verify two file is same or not
"""
space='space';
def loadFile(fName):
word=[];
import codecs;
for line in codecs.open(fName,'r','utf8'):
line=line.rstrip();
if len(line)>0:
word.append(line[0]);
else:
word.append(space);
return word;
word1=loadFile(fNameX);
word2=loadFile(fNameY);
i=0;
j=0;
while i<len(word1) and j<len(word2):
if word1[i]==word2[j]:
i+=1;
j+=1;
continue;
elif word1[i] ==space:
i+=1;
elif word2[j]==space:
j+=1;
else:
print "not same,X:",word1[i],",line:",i,',Y:',word2[j],',line:',j;
break;
#}}}
def generateDocSentLen(fNameX,fNameY):
#{{{
"""
statistic one article have word in each sentence
"""
from loader import load_sentences;
doc=load_sentences(fNameX,False,False);
sent=load_sentences(fNameY,False,False);
assert len(doc) < len(sent);
res=[];
i=0;
for elem in doc:
docLen=[];
count=0;
while count<len(elem):
docLen.append(len(sent[i]));
count+=len(sent[i]);
i+=1;
if count!=len(elem):
print "two file len not same";
assert 0;
res.append(docLen)
return res;
#}}}
def get_name(parameters):
#{{{
"""
Generate a model name from its parameters.
"""
l = []
for k, v in parameters.items():
if type(v) is str and "/" in v:
l.append((k, v[::-1][:v[::-1].index('/')][::-1]))
else:
l.append((k, v))
name = ",".join(["%s=%s" % (k, str(v).replace(',', '')) for k, v in l])
return "".join(i for i in name if i not in "\/:*?<>|")
#}}}
def set_values(name, param, pretrained):
#{{{
"""
Initialize a network parameter with pretrained values.
We check that sizes are compatible.
"""
param_value = param.get_value()
if pretrained.size != param_value.size:
raise Exception(
"Size mismatch for parameter %s. Expected %i, found %i."
% (name, param_value.size, pretrained.size)
)
param.set_value(np.reshape(
pretrained, param_value.shape
).astype(np.float32))
#}}}
import initializations;
def shared(shape, name):
#{{{
"""
Create a shared object of a numpy array.
"""
init=initializations.get('glorot_uniform');
if len(shape) == 1:
value = np.zeros(shape) # bias are initialized with zeros
return theano.shared(value=value.astype(theano.config.floatX), name=name)
else:
drange = np.sqrt(6. / (np.sum(shape)))
value = drange * np.random.uniform(low=-1.0, high=1.0, size=shape)
return init(shape=shape,name=name);
#}}}
def create_dico(item_list):
#{{{
"""
Create a dictionary of items from a list of list of items.
"""
assert type(item_list) is list
dico = {}
for items in item_list:
for item in items:
if item not in dico:
dico[item] = 1
else:
dico[item] += 1
return dico
#}}}
def create_mapping(dico):
#{{{
"""
Create a mapping (item to ID / ID to item) from a dictionary.
Items are ordered by decreasing frequency.
"""
sorted_items = sorted(dico.items(), key=lambda x: (-x[1], x[0]))
id_to_item = {i: v[0] for i, v in enumerate(sorted_items)}
item_to_id = {v: k for k, v in id_to_item.items()}
return item_to_id, id_to_item
#}}}
def zero_digits(s):
#{{{
"""
Replace every digit in a string by a zero.
"""
return re.sub('\d', '0', s)
#}}}
def iob2(tags):
#{{{
"""
Check that tags have a valid IOB format.
Tags in IOB1 format are converted to IOB2.
"""
for i, tag in enumerate(tags):
if tag == 'O':
continue
split = tag.split('-')
if split[0] not in ['I', 'B']:
#if len(split) != 2 or split[0] not in ['I', 'B']:
return False
if split[0] == 'B':
continue
elif i == 0 or tags[i - 1] == 'O': # conversion IOB1 to IOB2
tags[i] = 'B' + tag[1:]
elif tags[i - 1][1:] == tag[1:]:
continue
else: # conversion IOB1 to IOB2
tags[i] = 'B' + tag[1:]
return True
#}}}
def iob_iobes(tags):
#{{{
"""
IOB -> IOBES
"""
new_tags = []
for i, tag in enumerate(tags):
if tag == 'O':
new_tags.append(tag)
elif tag.split('-')[0] == 'B':
if i + 1 != len(tags) and \
tags[i + 1].split('-')[0] == 'I':
new_tags.append(tag)
else:
new_tags.append(tag.replace('B-', 'S-'))
elif tag.split('-')[0] == 'I':
if i + 1 < len(tags) and \
tags[i + 1].split('-')[0] == 'I':
new_tags.append(tag)
else:
new_tags.append(tag.replace('I-', 'E-'))
else:
raise Exception('Invalid IOB format!')
return new_tags
#}}}
def iobes_iob(tags):
#{{{
"""
IOBES -> IOB
"""
new_tags = []
for i, tag in enumerate(tags):
if tag.split('-')[0] == 'B':
new_tags.append(tag)
elif tag.split('-')[0] == 'I':
new_tags.append(tag)
elif tag.split('-')[0] == 'S':
new_tags.append(tag.replace('S-', 'B-'))
elif tag.split('-')[0] == 'E':
new_tags.append(tag.replace('E-', 'I-'))
elif tag.split('-')[0] == 'O':
new_tags.append(tag)
else:
raise Exception('Invalid format!')
return new_tags
#}}}
def insert_singletons(words, singletons, p=0.5):
#{{{
"""
Replace singletons by the unknown word with a probability p.
"""
new_words = []
for word in words:
if word in singletons and np.random.uniform() < p:
new_words.append(0)
else:
new_words.append(word)
return new_words
#}}}
def pad_word_chars(words):
#{{{
"""
Pad the characters of the words in a sentence.
Input:
- list of lists of ints (list of words, a word being a list of char indexes)
Output:
- padded list of lists of ints
- padded list of lists of ints (where chars are reversed)
- list of ints corresponding to the index of the last character of each word
"""
max_length = max([len(word) for word in words])
char_for = []
char_rev = []
char_pos = []
for word in words:
padding = [0] * (max_length - len(word))
char_for.append(word + padding)
char_rev.append(word[::-1] + padding)
char_pos.append(len(word) - 1)
return char_for, char_rev, char_pos
#}}}
def create_input(data, parameters, add_label, singletons=None,
useAttend=True):
#{{{
"""
Take sentence data and return an input for
the training or the evaluation function.
"""
words = data['words']
wordsTrue=data['words'];
chars = data['chars']
if singletons is not None:
words = insert_singletons(words, singletons)
if parameters['cap_dim']:
caps = data['caps']
char_for, char_rev, char_pos = pad_word_chars(chars)
input = []
if parameters['word_dim']:
input.append(words)
if parameters['char_dim']:
input.append(char_for)
if parameters['char_bidirect']:
input.append(char_rev)
input.append(char_pos)
if parameters['cap_dim']:
input.append(caps)
if useAttend:
input.append(wordsTrue);
if parameters.has_key('sentencesLevelLoss') \
and parameters['sentencesLevelLoss']:
input.append(data['lens']) ;
#add features
if parameters.has_key('features'):
features=parameters['features'];
else:
features=None;
if features is not None and features['lemma']['isUsed']:
input.append(data['lemma']);
if features is not None and features['pos']['isUsed']:
input.append(data['pos']);
if features is not None and features['chunk']['isUsed']:
input.append(data['chunk']);
if features is not None and features['dic']['isUsed']:
input.append(data['dic']);
if add_label:
input.append(data['tags'])
return input
#}}}
from os.path import isfile
from os import chmod
import stat
import subprocess
PREFIX = './evaluation/'
def get_perf(filename):
''' run conlleval.pl perl script to obtain
precision/recall and F1 score '''
_conlleval = PREFIX + 'conlleval'
if not isfile(_conlleval):
#download('http://www-etud.iro.umontreal.ca/~mesnilgr/atis/conlleval.pl')
os.system('wget https://www.comp.nus.edu.sg/%7Ekanmy/courses/practicalNLP_2008/packages/conlleval.pl')
chmod('conlleval.pl', stat.S_IRWXU) # give the execute permissions
out = []
proc = subprocess.Popen(["perl", _conlleval], stdin=subprocess.PIPE, stdout=subprocess.PIPE)
stdout, _ = proc.communicate(open(filename).read())
for line in stdout.split('\n'):
if 'accuracy' in line:
out = line.split()
break
# out = ['accuracy:', '16.26%;', 'precision:', '0.00%;', 'recall:', '0.00%;', 'FB1:', '0.00']
precision = float(out[3][:-2])
recall = float(out[5][:-2])
f1score = float(out[7])
return {'p':precision, 'r':recall, 'f1':f1score}
def evaluate(parameters, f_eval, raw_sentences, parsed_sentences,
id_to_tag, dictionary_tags,filename,
useAttend=True):
#{{{
"""
Evaluate current model using CoNLL script.
"""
n_tags = len(id_to_tag)
predictions = []
count = np.zeros((n_tags, n_tags), dtype=np.int32)
for raw_sentence, data in zip(raw_sentences, parsed_sentences):
input = create_input(data, parameters, False,useAttend=useAttend)
if parameters['crf']:
y_preds = np.array(f_eval(*input))
else:
y_preds = f_eval(*input).argmax(axis=1)
y_reals = np.array(data['tags']).astype(np.int32)
assert len(y_preds) == len(y_reals)
p_tags = [id_to_tag[y_pred] for y_pred in y_preds]
r_tags = [id_to_tag[y_real] for y_real in y_reals]
if parameters['tag_scheme'] == 'iobes':
p_tags = iobes_iob(p_tags)
r_tags = iobes_iob(r_tags)
for i, (y_pred, y_real) in enumerate(zip(y_preds, y_reals)):
new_line = " ".join(raw_sentence[i][:-1] + [r_tags[i], p_tags[i]])
predictions.append(new_line)
count[y_real, y_pred] += 1
predictions.append("")
#write to file
with codecs.open(filename, 'w', 'utf8') as f:
f.write("\n".join(predictions))
return get_perf(filename)
#}}}
|
src/models/audiounet.py | TECHENGINESSRL/audio-super-res | 712 | 11180090 | <gh_stars>100-1000
import numpy as np
import tensorflow as tf
from scipy import interpolate
from .model import Model, default_opt
from .layers.subpixel import SubPixel1D, SubPixel1D_v2
from tensorflow.python.keras import backend as K
from keras.layers import merge
from keras.layers.core import Activation, Dropout
from keras.layers import Conv1D
from keras.layers.normalization import BatchNormalization
from keras.layers.advanced_activations import LeakyReLU
from keras.initializers import RandomNormal, Orthogonal
# ----------------------------------------------------------------------------
class AudioUNet(Model):
"""Generic tensorflow model training code"""
def __init__(self, from_ckpt=False, n_dim=None, r=2,
opt_params=default_opt, log_prefix='./run'):
# perform the usual initialization
self.r = r
Model.__init__(self, from_ckpt=from_ckpt, n_dim=n_dim, r=r,
opt_params=opt_params, log_prefix=log_prefix)
def create_model(self, n_dim, r):
# load inputs
X, _, _ = self.inputs
K.set_session(self.sess)
with tf.compat.v1.name_scope('generator'):
x = X
L = self.layers
# dim/layer: 4096, 2048, 1024, 512, 256, 128, 64, 32,
n_filters = [128, 384, 512, 512, 512, 512, 512, 512]
n_filtersizes = [65, 33, 17, 9, 9, 9, 9, 9, 9]
downsampling_l = []
print('building model...')
# downsampling layers
for l, nf, fs in zip(list(range(L)), n_filters, n_filtersizes):
with tf.compat.v1.name_scope('downsc_conv%d' % l):
x = (Conv1D(filters=nf, kernel_size=fs,
activation=None, padding='same', init=Orthogonal(),
subsample_length=2))(x)
# if l > 0: x = BatchNormalization(mode=2)(x)
x = LeakyReLU(0.2)(x)
print('D-Block: ', x.get_shape())
downsampling_l.append(x)
# bottleneck layer
with tf.compat.v1.name_scope('bottleneck_conv'):
x = (Conv1D(filters=n_filters[-1], kernel_size=n_filtersizes[-1],
activation=None, padding='same', init=Orthogonal(),
subsample_length=2))(x)
x = Dropout(rate=0.5)(x)
x = LeakyReLU(0.2)(x)
# upsampling layers
for l, nf, fs, l_in in reversed(list(zip(list(range(L)), n_filters, n_filtersizes, downsampling_l))):
with tf.compat.v1.name_scope('upsc_conv%d' % l):
# (-1, n/2, 2f)
x = (Conv1D(filters=2*nf, kernel_size=fs,
activation=None, padding='same', init=Orthogonal()))(x)
x = Dropout(rate=0.5)(x)
x = Activation('relu')(x)
# (-1, n, f)
x = SubPixel1D(x, r=2)
# (-1, n, 2f)
x = K.concatenate(tensors=[x, l_in], axis=2)
print('U-Block: ', x.get_shape())
# final conv layer
with tf.compat.v1.name_scope('lastconv'):
x = Convolution1D(filters=2, kernel_size=9,
activation=None, padding='same', init=RandomNormal(stdev=1e-3))(x)
x = SubPixel1D(x, r=2)
print(x.get_shape())
g = merge([x, X], mode='sum')
return g
def predict(self, X):
print("predicting")
assert len(X) == 1
x_sp = spline_up(X, self.r)
x_sp = x_sp[:len(x_sp) - (len(x_sp) % (2**(self.layers+1)))]
X = x_sp.reshape((1,len(x_sp),1))
print((X.shape))
feed_dict = self.load_batch((X,X), train=False)
return self.sess.run(self.predictions, feed_dict=feed_dict)
# ----------------------------------------------------------------------------
# helpers
def spline_up(x_lr, r):
x_lr = x_lr.flatten()
x_hr_len = len(x_lr) * r
x_sp = np.zeros(x_hr_len)
i_lr = np.arange(x_hr_len, step=r)
i_hr = np.arange(x_hr_len)
f = interpolate.splrep(i_lr, x_lr)
x_sp = interpolate.splev(i_hr, f)
return x_sp
|
xhr/resources/get-set-cookie.py | meyerweb/wpt | 14,668 | 11180091 | import datetime
def main(request, response):
response.headers.set(b"Content-type", b"text/plain")
# By default use a session cookie.
expiration = None
if request.GET.get(b"clear"):
# If deleting, expire yesterday.
expiration = -datetime.timedelta(days=1)
response.set_cookie(b"WK-test", b"1", expires=expiration)
response.set_cookie(b"WK-test-secure", b"1", secure=True,
expires=expiration)
content = b""
for cookie in request.cookies:
content = content + b" " + cookie + b"=" + request.cookies.get(cookie).value
response.content = content
|
step07_appsync_with_subscriptions_using_amplify/aws-cdk/python/lambda/main.py | fullstackwebdev/full-stack-serverless-cdk | 192 | 11180102 | <filename>step07_appsync_with_subscriptions_using_amplify/aws-cdk/python/lambda/main.py
from __future__ import print_function
from addTodo import addTodoItem
from getTodo import getItem
from deleteTodo import deleteItem
from updateTodo import updateItem
import os
import boto3
dynamodb = boto3.resource('dynamodb')
def handler(event, context):
field = event['info']['fieldName']
if field == "addTodo":
todo = event['arguments']['todo']
return addTodoItem(todo)
if field == "getTodos":
return getItem() |
tests/testapp/lookups.py | StreetHawkInc/django-rest-framework-filters | 743 | 11180116 | <gh_stars>100-1000
from django.db.models import Transform
# This is a copy of the `Unaccent` transform from `django.contrib.postgres`.
# This is necessary as the postgres app requires psycopg2 to be installed.
class Unaccent(Transform):
bilateral = True
lookup_name = 'unaccent'
function = 'UNACCENT'
|
lite/tests/unittest_py/op/test_lod_reset_op.py | 714627034/Paddle-Lite | 808 | 11180137 | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
sys.path.append('../')
from auto_scan_test import AutoScanTest, IgnoreReasons
from program_config import TensorConfig, ProgramConfig, OpConfig, CxxConfig, TargetType, PrecisionType, DataLayoutType, Place
import unittest
import hypothesis
from hypothesis import given, settings, seed, example, assume
import hypothesis.strategies as st
import argparse
import numpy as np
from functools import partial
class TestLodResetOp(AutoScanTest):
def __init__(self, *args, **kwargs):
AutoScanTest.__init__(self, *args, **kwargs)
self.enable_testing_on_place(
TargetType.Host,
PrecisionType.FP32,
DataLayoutType.NCHW,
thread=[1, 2])
def is_program_valid(self,
program_config: ProgramConfig,
predictor_config: CxxConfig) -> bool:
return True
def sample_program_configs(self, draw):
in_shape = draw(
st.lists(
st.integers(
min_value=10, max_value=20),
min_size=4,
max_size=4))
lod_data = draw(
st.sampled_from([[0, 3, 5, in_shape[0]], [0, 4, 7, in_shape[0]],
[0, 4, in_shape[0]], [0, 7, in_shape[0]]]))
lod_data1 = draw(
st.sampled_from([[0, 3, 5, in_shape[0]], [0, 4, 7, in_shape[0]],
[0, 4, in_shape[0]], [0, 7, in_shape[0]]]))
lod_data2 = draw(
st.sampled_from([[0, 3, 5, in_shape[0]], [0, 4, 7, in_shape[0]],
[0, 4, in_shape[0]], [0, 7, in_shape[0]]]))
case_num = draw(st.sampled_from([0, 1]))
def generate_input_x(*args, **kwargs):
return np.random.random(in_shape).astype(np.float32)
def generate_input_y(*args, **kwargs):
return np.array(lod_data1).astype(np.int32)
if case_num == 0:
build_ops = OpConfig(
type="lod_reset",
inputs={"X": ["input_data_x"],
"Y": []},
outputs={"Out": ["output_data"], },
attrs={"target_lod": lod_data,
'append': True})
program_config = ProgramConfig(
ops=[build_ops],
weights={},
inputs={
"input_data_x": TensorConfig(data_gen=partial(
generate_input_x, lod=list(lod_data2))),
},
outputs=["output_data"])
elif case_num == 1:
build_ops = OpConfig(
type="lod_reset",
inputs={"X": ["input_data_x"],
"Y": ["input_data_y"]},
outputs={"Out": ["output_data"], },
attrs={"target_lod": [],
'append': True})
program_config = ProgramConfig(
ops=[build_ops],
weights={},
inputs={
"input_data_x": TensorConfig(data_gen=partial(
generate_input_x, lod=list(lod_data2))),
"input_data_y":
TensorConfig(data_gen=partial(generate_input_y)),
},
outputs=["output_data"])
return program_config
def sample_predictor_configs(self):
return self.get_predictor_configs(), ["lod_reset"], (1e-5, 1e-5)
def add_ignore_pass_case(self):
pass
def test(self, *args, **kwargs):
self.run_and_statis(quant=False, max_examples=25)
if __name__ == "__main__":
unittest.main(argv=[''])
|
backend/pubsub/store.py | restato/bunnybook | 131 | 11180153 | <reponame>restato/bunnybook<filename>backend/pubsub/store.py
import datetime as dt
from typing import Union, List
from uuid import UUID
from injector import singleton, inject
from common.injection import PubSubStore
from common.schemas import dt_to_iso8601z
@singleton
class WebSocketsStore:
ONLINE_STATUS_EX: int = int(dt.timedelta(seconds=11).total_seconds())
@inject
def __init__(self, store: PubSubStore):
self._store = store
async def renew_online_status(self, profile_id: Union[str, UUID]):
"""Refresh online status for profile_id."""
await self._store.set(f"websockets:{profile_id}",
dt_to_iso8601z(dt.datetime.now(dt.timezone.utc)),
expire=WebSocketsStore.ONLINE_STATUS_EX)
async def get_online_statuses(self, profile_ids: List[Union[str, UUID]]) \
-> List[str]:
"""Return list of online profile ids."""
if not profile_ids:
return []
result = await self._store.mget(*[f"websockets:{profile_id}"
for profile_id in profile_ids])
return [str(friend_id) for friend_id, is_online
in zip(profile_ids, result) if is_online]
|
thirdparty/nsiqcppstyle/rules/RULE_9_2_D_use_reentrant_function.py | cfsengineering/tigl | 171 | 11180201 | """
Use reentrant functions. Do not use not reentrant functions.(ctime, strtok, toupper)
== Violation ==
void A() {
k = ctime(); <== Violation. ctime() is not the reenterant function.
j = strok(blar blar); <== Violation. strok() is not the reenterant function.
}
== Good ==
void A() {
k = t.ctime(); <== Correct. It may be the reentrant function.
}
void A() {
k = ctime; <== Correct. It may be the reentrant function.
}
"""
from nsiqcppstyle_rulehelper import *
from nsiqcppstyle_reporter import *
from nsiqcppstyle_rulemanager import *
no_reenterant_functions = (
'ctime',
'strtok',
'toupper',
)
def RunRule(lexer, contextStack) :
"""
Use reenterant keyword.
"""
t = lexer.GetCurToken()
if t.type == "ID" :
if t.value in no_reenterant_functions :
t2 = lexer.PeekNextTokenSkipWhiteSpaceAndComment()
t3 = lexer.PeekPrevTokenSkipWhiteSpaceAndComment()
if t2 != None and t2.type == "LPAREN" :
if t3 == None or t3.type != "PERIOD" :
if t.value == "toupper" and nsiqcppstyle_state._nsiqcppstyle_state.GetVar("ignore_toupper", "false") == "true" :
return
nsiqcppstyle_reporter.Error(t, __name__,
"Do not use not reentrant function(%s)." % t.value)
ruleManager.AddFunctionScopeRule(RunRule)
###########################################################################################
# Unit Test
###########################################################################################
from nsiqunittest.nsiqcppstyle_unittestbase import *
class testRule(nct):
def setUpRule(self):
ruleManager.AddFunctionScopeRule(RunRule)
def test1(self):
self.Analyze("thisfile.c",
"""
void func1()
{
k = ctime()
}
""")
assert CheckErrorContent(__name__)
def test2(self):
self.Analyze("thisfile.c",
"""
void func1() {
#define ctime() k
}
""")
assert not CheckErrorContent(__name__)
def test3(self):
self.Analyze("thisfile.c",
"""
void ctime() {
}
""")
assert not CheckErrorContent(__name__)
def test4(self):
self.Analyze("thisfile.c",
"""
void ctime () {
}
""")
assert not CheckErrorContent(__name__)
def test5(self):
self.Analyze("thisfile.c",
"""
void func1()
{
k = help.ctime ()
}
""")
assert not CheckErrorContent(__name__)
def test6(self):
self.Analyze("thisfile.c",
"""
void func1()
{
k = toupper()
}
""")
assert CheckErrorContent(__name__)
def test7(self):
nsiqcppstyle_state._nsiqcppstyle_state.varMap["ignore_toupper"] = "true"
self.Analyze("thisfile.c",
"""
void func1()
{
k = toupper()
}
""")
assert not CheckErrorContent(__name__)
|
benchmarks/driver/runner.py | SymbioticLab/Salus | 104 | 11180207 | <reponame>SymbioticLab/Salus
# -*- coding: future_fstrings -*-
#
# Copyright 2019 <NAME> <<EMAIL>>
#
# This file is part of Salus
# (see https://github.com/SymbioticLab/Salus).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import absolute_import, print_function, division, unicode_literals
from builtins import super, str
from future.utils import with_metaclass
import logging
from absl import flags
from abc import ABCMeta, abstractmethod
from collections import namedtuple
from enum import Enum
from typing import Iterable, Tuple, Union, Any, Dict
from .server import SalusServer
from .tfserver import TFDistServer
from .utils import Popen, execute, snake_to_pascal, str2bool, remove_suffix
from .utils.compatiblity import pathlib, subprocess as sp
Path = pathlib.Path
FLAGS = flags.FLAGS
logger = logging.getLogger(__name__)
flags.DEFINE_string('tfbench_base', '../tf_benchmarks', 'Base dir of TFBenchmark based workloads')
flags.DEFINE_string('unit_base', 'tests', 'Base dir of unittest based workloads')
flags.DEFINE_string('fathom_base', '../fathom', 'Base dir of Fathom based workloads')
flags.DEFINE_string('tfweb_base', '../tfweb', 'Base dir of TFWeb based workloads')
flags.DEFINE_string('tfweb_saved_model_dir', '~/../symbiotic/peifeng/tf_cnn_benchmarks_models/saved_models',
'SavedModel dir of TFWeb based workloads')
flags.DEFINE_string('tfweb_request_body_dir', '~/../symbiotic/peifeng/tf_cnn_benchmarks_models/reqeusts',
'Predefined request body dir for TFWeb based workloads')
flags.DEFINE_boolean('no_capture', False, 'Do not capture workload outputs')
RunConfig = namedtuple('RunConfig', [
'batch_size',
'batch_num',
'cfgname',
])
class Executor(Enum):
Salus = "salus"
TF = "tf"
TFDist = "tfdist"
def enumerate_rcfgs(batch_sizes, batch_nums):
# type: (Iterable[Union[int, str]], Iterable[int]) -> Iterable[RunConfig]
"""Convenient method to generate a list of RunConfig"""
return [
RunConfig(batch_size, batch_num, None)
for batch_size in batch_sizes
for batch_num in batch_nums
]
class Runner(with_metaclass(ABCMeta, object)):
"""A runner knows how to run a given workload"""
def __init__(self, wl):
# type: (Any) -> None
super().__init__()
self.wl = wl
self.env = wl.env.copy() # type: Dict[str, str]
def set_default(d, key, defval):
if key not in d:
d[key] = defval
else:
logger.info(f'Using custom value {key}={d[key]}')
set_default(self.env, 'CUDA_VISIBLE_DEVICES', '0')
set_default(self.env, 'TF_CPP_MIN_LOG_LEVEL', '2')
@abstractmethod
def __call__(self, executor, output_file):
# type: (Executor, Path) -> Popen
pass
class TFBenchmarkRunner(Runner):
"""Run a tf benchmark job"""
def __init__(self, wl, base_dir=None):
# type: (Any, Path) -> None
super().__init__(wl)
self.base_dir = base_dir
if self.base_dir is None:
self.base_dir = Path(FLAGS.tfbench_base)
def __call__(self, executor, output_file):
# type: (Executor, Path) -> Popen
cwd = self.base_dir / 'scripts' / 'tf_cnn_benchmarks'
cmd = [
'stdbuf', '-o0', '-e0', '--',
'python', 'tf_cnn_benchmarks.py',
'--display_every=1',
'--num_gpus=1',
'--variable_update=parameter_server',
'--nodistortions',
'--executor={}'.format(executor.value),
'--num_batches={}'.format(self.wl.batch_num),
'--batch_size={}'.format(self.wl.batch_size),
]
eval_interval = self.wl.env.pop('SALUS_TFBENCH_EVAL_INTERVAL', None)
eval_rand_factor = self.wl.env.pop('SALUS_TFBENCH_EVAL_RAND_FACTOR', None)
eval_block = self.wl.env.pop('SALUS_TFBENCH_EVAL_BLOCK', 'true')
eval_model_dir = self.wl.env.pop('SALUS_TFBENCH_EVAL_MODEL_DIR', 'models')
eval_model_dir = str(Path(eval_model_dir).joinpath(remove_suffix(self.wl.name, 'eval')))
eval_saved_model_dir = self.wl.env.pop('SALUS_TFBENCH_EVAL_SAVED_MODEL_DIR', None)
if eval_saved_model_dir is not None:
eval_saved_model_dir = str(Path(eval_saved_model_dir).joinpath(remove_suffix(self.wl.name, 'eval')))
num_seconds = self.wl.env.pop('SALUS_ITER_SECONDS', None)
if num_seconds is not None:
cmd += [
'--num_seconds={}'.format(num_seconds)
]
wait_for_signal = self.wl.env.pop('SALUS_WAIT_FOR_SIGNAL', None)
if wait_for_signal is not None:
cmd += [
'--wait_for_signal={}'.format(wait_for_signal)
]
if self.wl.name.endswith('eval'):
model_name = remove_suffix(self.wl.name, 'eval')
cmd += [
'--model_dir=' + eval_model_dir,
'--model={}'.format(model_name),
'--eval_block={}'.format(eval_block),
'--eval'
]
if eval_interval is not None:
cmd += [
'--eval_interval_secs={}'.format(eval_interval),
]
if eval_rand_factor is not None:
cmd += [
'--eval_interval_random_factor={}'.format(eval_rand_factor),
]
if eval_saved_model_dir is not None:
cmd += [
'--saved_model_dir=' + eval_saved_model_dir
]
else:
cmd += [
'--model={}'.format(self.wl.name),
]
if str2bool(self.wl.env.pop('SALUS_SAVE_MODEL', '')):
cmd += [
'--model_dir=' + eval_model_dir,
]
cmd += self.wl.extra_args
logger.info(f'Starting workload with cmd: {cmd}')
if FLAGS.no_capture:
return execute(cmd, cwd=str(cwd), env=self.env)
else:
output_file.parent.mkdir(exist_ok=True, parents=True)
with output_file.open('w') as f:
return execute(cmd, cwd=str(cwd), env=self.env, stdout=f, stderr=sp.STDOUT)
class UnittestRunner(Runner):
"""Run a unittest job"""
def __init__(self, wl, base_dir=None):
# type: (Any, Path) -> None
super().__init__(wl)
self.base_dir = base_dir
if self.base_dir is None:
self.base_dir = Path(FLAGS.unit_base)
def __call__(self, executor, output_file):
# type: (Executor, Path) -> Popen
env = self.env.copy()
env['EXEC_ITER_NUMBER'] = str(self.wl.batch_num)
env['SALUS_BATCH_SIZE'] = str(self.wl.batch_size)
if executor == Executor.TFDist:
env['SALUS_TFDIST_ENDPOINT'] = TFDistServer.current_server().endpoint
cwd = self.base_dir
pkg, method = self._construct_test_name(executor)
cmd = [
'stdbuf', '-o0', '-e0', '--',
'python', '-m', pkg, method,
]
cmd += self.wl.extra_args
logger.info(f'Starting workload with cmd: {cmd}')
if FLAGS.no_capture:
return execute(cmd, cwd=str(cwd), env=self.env)
else:
output_file.parent.mkdir(exist_ok=True, parents=True)
with output_file.open('w') as f:
# return execute(cmd, cwd=str(cwd), env=env, stdout=f, stderr=sp.STDOUT)
return execute(cmd, cwd=str(cwd), env=env, stdout=f, stderr=None)
def _construct_test_name(self, executor):
# type: (Executor) -> Tuple[str, str]
"""Construct test class and name from RunConfig"""
supported_model = {
'seq2seq': ('test_tf.test_seq', 'TestSeqPtb', {
'small': '0_small',
'medium': '1_medium',
'large': '2_large',
}),
'mnistsf': ('test_tf.test_mnist_tf', 'TestMnistSoftmax', {
25: '0', 50: '1', 100: '2'
}),
'mnistcv': ('test_tf.test_mnist_tf', 'TestMnistConv', {
25: '0', 50: '1', 100: '2'
}),
'mnistlg': ('test_tf.test_mnist_tf', 'TestMnistLarge', {
25: '0', 50: '1', 100: '2'
}),
'superres': ('test_tf.test_super_res', 'TestSuperRes', {
32: '0', 64: '1', 128: '2',
1: '0', 5: '1', 10: '2',
})
}
variable_batch_size_models = {'vae', 'superres', 'seq2seq', 'mnistsf', 'mnistcv', 'mnistlg'}
if remove_suffix(self.wl.name, 'eval') not in variable_batch_size_models:
if self.wl.batch_size not in self.wl.wtl.available_batch_sizes():
raise ValueError(f"Batch size `{self.wl.batch_size}' is not supported for {self.wl.name},"
f" available ones: {self.wl.wtl.available_batch_sizes()}")
if executor == Executor.Salus:
prefix = 'test_rpc_'
elif executor == Executor.TF:
prefix = 'test_gpu_'
elif executor == Executor.TFDist:
prefix = 'test_distributed_'
else:
raise ValueError(f'Unknown executor: {executor}')
if self.wl.name.endswith('eval'):
prefix += 'eval_'
model_name = remove_suffix(self.wl.name, 'eval')
if model_name in supported_model:
pkg, cls, names = supported_model[model_name]
else:
# fallback to guessing
pkg = f'test_tf.test_{model_name}'
cls = f'Test{snake_to_pascal(model_name)}'
# get method name
names = {
s: str(idx)
for idx, s in enumerate(self.wl.wtl.available_batch_sizes())
}
postfix = names.get(self.wl.batch_size, '0')
if model_name == 'seq2seq' and postfix == '0':
postfix = '2_large'
method = f'{cls}.{prefix}{postfix}'
return pkg, method
class FathomRunner(Runner):
"""Run a fathom job"""
def __init__(self, wl, base_dir=None):
super().__init__(wl)
self.base_dir = base_dir
if self.base_dir is None:
self.base_dir = FLAGS.fathom_base
def __call__(self, executor, output_file):
# type: (Executor, Path) -> Popen
cwd = self.base_dir
cmd = [
'stdbuf', '-o0', '-e0', '--',
'python', '-m', 'fathom.cli',
'--workload', remove_suffix(self.wl.name, 'eval'),
'--action', 'test' if self.wl.name.endswith('eval') else 'train',
'--num_iters', str(self.wl.batch_num),
'--batch_size', str(self.wl.batch_size),
]
if executor == Executor.Salus:
cmd += [
'--target', SalusServer.current_server().endpoint,
'--dev', '/gpu:0',
]
elif executor == Executor.TF:
cmd += [
'--dev', '/gpu:0',
]
elif executor == Executor.TFDist:
cmd += [
'--target', TFDistServer.current_server().endpoint,
'--dev', '/job:tfworker/gpu:0',
]
else:
raise ValueError(f'Unknown executor: {executor}')
cmd += self.wl.extra_args
logger.info(f'Starting workload with cmd: {cmd}')
if FLAGS.no_capture:
return execute(cmd, cwd=str(cwd), env=self.env)
else:
output_file.parent.mkdir(exist_ok=True, parents=True)
with output_file.open('w') as f:
return execute(cmd, cwd=str(cwd), env=self.env, stdout=f, stderr=sp.STDOUT)
class TFWebDirectRunner(Runner):
"""Using TFWeb's load infrastructure to directly run"""
def __init__(self, wl, base_dir=None):
super().__init__(wl)
self.base_dir = base_dir
if self.base_dir is None:
self.base_dir = FLAGS.tfweb_base
def __call__(self, executor, output_file):
model_name = remove_suffix(self.wl.name, 'eval')
cwd = self.base_dir
cmd = [
'stdbuf', '-o0', '-e0', '--',
'examples/direct/client',
'--model="{}"'.format(str(Path(FLAGS.tfweb_saved_model_dir).joinpath(model_name))),
'--batch_size={}'.format(self.wl.batch_size),
'--batch_num={}'.format(self.wl.batch_num),
]
if executor == Executor.Salus:
cmd += [
'--sess_target', SalusServer.current_server().endpoint,
]
elif executor == Executor.TF:
cmd += [
'--sess_target', '""',
]
elif executor == Executor.TFDist:
cmd += [
'--sess_target', TFDistServer.current_server().endpoint,
]
else:
raise ValueError(f'Unknown executor: {executor}')
cmd += self.wl.extra_args
logger.info(f'Starting workload with cmd: {cmd}')
if FLAGS.no_capture:
return execute(cmd, cwd=str(cwd), env=self.env)
else:
output_file.parent.mkdir(exist_ok=True, parents=True)
with output_file.open('w') as f:
return execute(cmd, cwd=str(cwd), env=self.env, stdout=f, stderr=sp.STDOUT)
class TFWebRunner(Runner):
"""
Run a TFWeb based inference job
We start several servers and a balancer on the same node.
The server commandline: tfweb --model=path/to/saved_model/network --sess_target=...
The client commandline: gobetween from-file xxx.toml
"""
def __init__(self, wl, base_dir=None):
super().__init__(wl)
self.base_dir = base_dir
if self.base_dir is None:
self.base_dir = FLAGS.tfweb_base
def __call__(self, executor, output_file):
# type: (Executor, Path) -> Popen
model_name = remove_suffix(self.wl.name, 'web')
cwd = self.base_dir
cmd = [
'stdbuf', '-o0', '-e0', '--',
'examples/cluster/start_cluster',
'--model="{}"'.format(str(Path(FLAGS.tfweb_saved_model_dir).joinpath(model_name))),
]
if executor == Executor.Salus:
cmd += [
'--sess_target', SalusServer.current_server().endpoint,
]
elif executor == Executor.TF:
cmd += [
'--sess_target', '""',
]
elif executor == Executor.TFDist:
cmd += [
'--sess_target', TFDistServer.current_server().endpoint,
]
else:
raise ValueError(f'Unknown executor: {executor}')
num_replicas = self.wl.env.pop('SALUS_TFWEB_REPLICAS', '1')
cmd += [
'--num_replicas', num_replicas
]
cmd += self.wl.extra_args
logger.info(f'Starting workload with cmd: {cmd}')
if FLAGS.no_capture:
return execute(cmd, cwd=str(cwd), env=self.env)
else:
output_file.parent.mkdir(exist_ok=True, parents=True)
with output_file.open('w') as f:
return execute(cmd, cwd=str(cwd), env=self.env, stdout=f, stderr=sp.STDOUT)
class TFWebClientRunner(Runner):
"""
Run a tfweb client attacker.
Command: examples/cluster/tfweb-client TARGET REQ_BODY PLANTXT
"""
def __init__(self, wl, base_dir=None):
super().__init__(wl)
self.base_dir = base_dir
if self.base_dir is None:
self.base_dir = FLAGS.tfweb_base
def __call__(self, executor, output_file):
# type: (Executor, Path) -> Popen
model_name = remove_suffix(self.wl.name, 'client')
cwd = self.base_dir
cmd = [
'stdbuf', '-o0', '-e0', '--',
'examples/tfweb-client',
'-output', str(output_file),
self.wl.target,
# request body
str(Path(FLAGS.tfweb_request_body_dir).joinpath(model_name).with_suffix('.txt')),
# always write plan to stdin
'-',
]
cmd += self.wl.extra_args
logger.info(f'Starting workload with cmd: {cmd}')
proc = execute(cmd, cwd=str(cwd), env=self.env, stdin=sp.PIPE)
proc.stdin.write(self._plan_to_bytes())
proc.stdin.close()
return proc
def _plan_to_bytes(self):
return ' '.join(self.wl.plan).encode('utf-8')
|
model/resnet_cifar10.py | cmu-enyac/LeGR | 106 | 11180218 | <filename>model/resnet_cifar10.py
'''ResNet in PyTorch.
For Pre-activation ResNet, see 'preact_resnet.py'.
Reference:
[1] <NAME>, <NAME>, <NAME>, <NAME>
Deep Residual Learning for Image Recognition. arXiv:1512.03385
'''
import torch
import math
import torch.nn as nn
import torch.nn.functional as F
class DownsampleA(nn.Module):
def __init__(self, nIn, nOut, stride):
super(DownsampleA, self).__init__()
assert stride == 2
self.out_channels = nOut
self.avg = nn.AvgPool2d(kernel_size=1, stride=stride)
def forward(self, x):
x = self.avg(x)
if self.out_channels-x.size(1) > 0:
return torch.cat((x, torch.zeros(x.size(0), self.out_channels-x.size(1), x.size(2), x.size(3), device='cuda')), 1)
else:
return x
def conv_bn(inp, oup, stride):
return nn.Sequential(
nn.Conv2d(inp, oup, 3, stride, 1, bias=False),
nn.BatchNorm2d(oup),
nn.ReLU(inplace=True)
)
class BasicBlock(nn.Module):
def __init__(self, in_planes, planes, stride=1):
super(BasicBlock, self).__init__()
self.conv = nn.Sequential(
nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False),
nn.BatchNorm2d(planes),
nn.ReLU(inplace=True),
nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False),
nn.BatchNorm2d(planes),
)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != planes:
self.shortcut = DownsampleA(in_planes, planes, stride)
#self.shortcut = nn.Sequential(
# nn.Conv2d(in_planes, planes, kernel_size=1, stride=stride, bias=False),
# nn.BatchNorm2d(planes)
#)
def forward(self, x):
x = F.relu(self.shortcut(x) + self.conv(x))
return x
class ResNet(nn.Module):
def __init__(self, block, num_blocks, num_classes=10):
super(ResNet, self).__init__()
self.in_planes = 16
self.features = [conv_bn(3, 16, 1)]
self.features.append(self._make_layer(block, 16, num_blocks[0], stride=1))
self.features.append(self._make_layer(block, 32, num_blocks[1], stride=2))
self.features.append(self._make_layer(block, 64, num_blocks[2], stride=2))
self.features.append(nn.AvgPool2d(8))
self.features = nn.Sequential(*self.features)
self.classifier = nn.Sequential(nn.Linear(64, num_classes))
self._initialize_weights()
def _initialize_weights(self):
for m in self.modules():
if type(m) in [nn.Conv2d, nn.Linear, nn.BatchNorm2d]:
m.reset_parameters()
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1]*(num_blocks-1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes
return nn.Sequential(*layers)
def forward(self, x):
x = self.features(x)
x = x.view(-1, self.classifier[0].in_features)
x = self.classifier(x)
return x
def ResNet20(num_classes=10):
return ResNet(BasicBlock, [3,3,3], num_classes=num_classes)
def ResNet32(num_classes=10):
return ResNet(BasicBlock, [5,5,5], num_classes=num_classes)
def ResNet44(num_classes=10):
return ResNet(BasicBlock, [7,7,7], num_classes=num_classes)
def ResNet56(num_classes=10):
return ResNet(BasicBlock, [9,9,9], num_classes=num_classes)
def test():
net = ResNet20()
y = net(torch.randn(1,3,32,32))
print(y.size())
# test()
|
lib/pymedphys/_experimental/wlutz/interppoints.py | ethanio12345/pymedphys | 207 | 11180222 | # Copyright (C) 2019 Cancer Care Associates
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pymedphys._imports import numpy as np
import pymedphys._utilities.createshells
from . import transformation as _transformation
def transform_penumbra_points(points_at_origin, centre, rotation):
transform = _transformation.rotate_and_translate_transform(centre, rotation)
xx_left_right, yy_left_right, xx_top_bot, yy_top_bot = points_at_origin
(
xx_left_right_transformed,
yy_left_right_transformed,
) = _transformation.apply_transform(xx_left_right, yy_left_right, transform)
xx_top_bot_transformed, yy_top_bot_transformed = _transformation.apply_transform(
xx_top_bot, yy_top_bot, transform
)
return (
xx_left_right_transformed,
yy_left_right_transformed,
xx_top_bot_transformed,
yy_top_bot_transformed,
)
def define_penumbra_points_at_origin(edge_lengths, penumbra):
penumbra_range = np.linspace(-penumbra / 2, penumbra / 2, 11)
def _each_edge(current_edge_length, orthogonal_edge_length):
half_field_range = np.linspace(
-orthogonal_edge_length / 4, orthogonal_edge_length / 4, 51
)
a_side_lookup = -current_edge_length / 2 + penumbra_range
b_side_lookup = current_edge_length / 2 + penumbra_range
current_axis_lookup = np.concatenate([a_side_lookup, b_side_lookup])
return current_axis_lookup, half_field_range
edge_points_left_right = _each_edge(edge_lengths[0], edge_lengths[1])
edge_points_top_bot = _each_edge(edge_lengths[1], edge_lengths[0])
xx_left_right, yy_left_right = np.meshgrid(*edge_points_left_right)
xx_top_bot, yy_top_bot = np.meshgrid(*edge_points_top_bot[::-1])
return xx_left_right, yy_left_right, xx_top_bot, yy_top_bot
def create_bb_points_function(bb_diameter):
max_distance = bb_diameter * 0.5
min_distance = 0
num_steps = 11
min_dist_between_points = (max_distance - min_distance) / num_steps
distances = np.arange(
min_distance, max_distance + min_dist_between_points, min_dist_between_points
)
x = []
y = []
dist = []
for _, distance in enumerate(distances):
(
new_x,
new_y,
) = pymedphys._utilities.createshells.calculate_coordinates_shell_2d( # pylint: disable = protected-access
distance, min_dist_between_points
)
x.append(new_x)
y.append(new_y)
dist.append(distance * np.ones_like(new_x))
x = np.concatenate(x)
y = np.concatenate(y)
dist = np.concatenate(dist)
def points_to_check(bb_centre):
x_shifted = x + bb_centre[0]
y_shifted = y + bb_centre[1]
return x_shifted, y_shifted
return points_to_check, dist
|
Hamming_Distance/Python/jcla1/hamming.py | Mynogs/Algorithm-Implementations | 1,184 | 11180237 | <gh_stars>1000+
def hamming_distance(p, 1):
"""Return the Hamming distance between equal-length sequences"""
if len(p) != len(q): raise ValueError("Undefined for sequences of unequal length")
return sum(ch1 != ch2 for ch1, ch2 in zip(p, q)) |
tbx/work/migrations/0009_remove_play_fields.py | elviva404/wagtail-torchbox | 103 | 11180248 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.16 on 2019-01-21 13:52
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
("work", "0008_remove_workpage_body"),
]
operations = [
migrations.RemoveField(model_name="workindexpage", name="show_in_play_menu",),
migrations.RemoveField(model_name="workpage", name="show_in_play_menu",),
]
|
atest/testdata/test_libraries/MyLibDir/ClassLib.py | rdagum/robotframework | 7,073 | 11180254 | <filename>atest/testdata/test_libraries/MyLibDir/ClassLib.py
class ClassLib:
def keyword_in_mylibdir_classlib(self):
pass
|
matchzoo/utils/timer.py | ChrisRBXiong/MatchZoo-py | 468 | 11180257 | """Timer."""
import time
class Timer(object):
"""Computes elapsed time."""
def __init__(self):
"""Timer constructor."""
self.reset()
def reset(self):
"""Reset timer."""
self.running = True
self.total = 0
self.start = time.time()
def resume(self):
"""Resume."""
if not self.running:
self.running = True
self.start = time.time()
return self
def stop(self):
"""Stop."""
if self.running:
self.running = False
self.total += time.time() - self.start
return self
@property
def time(self):
"""Return time."""
if self.running:
return self.total + time.time() - self.start
return self.total
|
applications/FluidDynamicsApplication/python_scripts/flow_output_process.py | lkusch/Kratos | 778 | 11180268 | # Importing the Kratos Library
import KratosMultiphysics
import KratosMultiphysics.FluidDynamicsApplication as KratosCFD
# other imports
from KratosMultiphysics.time_based_ascii_file_writer_utility import TimeBasedAsciiFileWriterUtility
def Factory(settings, Model):
if(type(settings) != KratosMultiphysics.Parameters):
raise Exception("expected input shall be a Parameters object, encapsulating a json string")
return FlowOutputProcess(Model, settings["Parameters"])
class FlowOutputProcess(KratosMultiphysics.Process):
"""This process calculates(using c++ utilities) and writes the flow through a given list of (sub)model parts.
In 3D use a surface eg. Inlet, Outlet
In 2D use a line eg. Inlet, Outlet
This process works in MPI as well as with restarts
"""
def __init__(self, model, params):
KratosMultiphysics.Process.__init__(self)
default_settings = KratosMultiphysics.Parameters('''{
"help" : "This process calculates(using c++ utilities) and writes the flow through a given list of (sub)model parts.",
"model_part_name_list" : [],
"print_format" : "",
"output_file_settings": {}
}''')
self.model = model
self.params = params
self.params.ValidateAndAssignDefaults(default_settings)
self.output_file = None
self.format = self.params["print_format"].GetString()
def ExecuteInitialize(self):
# getting the ModelPart from the Model
model_part_name_list = self.params["model_part_name_list"]
if model_part_name_list.size() == 0:
raise Exception('No model parts are specified!')
self.model_part_for_time = self.model[model_part_name_list[0].GetString()]
# Only rank 0 writes in MPI
my_rank = 0
comm = self.model_part_for_time.GetCommunicator().GetDataCommunicator()
self.is_writing_rank = my_rank == comm.Rank()
if self.is_writing_rank:
file_handler_params = KratosMultiphysics.Parameters(self.params["output_file_settings"])
file_header = self.GetFileHeader()
self.output_file = TimeBasedAsciiFileWriterUtility(self.model_part_for_time, file_handler_params, file_header).file
def ExecuteFinalizeSolutionStep(self):
time = self.model_part_for_time.ProcessInfo[KratosMultiphysics.TIME]
model_part_name_list = self.params["model_part_name_list"]
out = str(time)
for model_part_name_param in model_part_name_list:
model_part_name = model_part_name_param.GetString()
model_part = self.model[model_part_name]
flow_value = self.CalculateFlow(model_part)
out += " " + format(flow_value,self.format)
out += "\n"
if self.is_writing_rank:
self.output_file.write(out)
def ExecuteFinalize(self):
if self.is_writing_rank:
self.output_file.close()
def GetFileHeader(self):
model_part_name_list = self.params["model_part_name_list"]
header = '# Flow results ' + '\n'
header += '# time '
for model_part_name_param in model_part_name_list:
model_part_name = model_part_name_param.GetString()
model_part = self.model[model_part_name]
header += model_part.Name
header += ' '
header += "\n"
return header
def CalculateFlow(self, model_part):
flow_value = KratosCFD.FluidAuxiliaryUtilities.CalculateFlowRate(model_part)
return flow_value
|
Sources/Workflows/Monkey-Patch/alfred/util.py | yagosys/AlfredWorkflow.com | 2,177 | 11180319 | # -*- coding: utf-8 -*-
import hashlib, random
import core
hashDigest = lambda s: hashlib.md5(s).hexdigest()
uid = lambda: '{0}.{1}'.format(core.bundleID(), random.getrandbits(25)) |
xrspatial/tests/test_focal.py | g2giovanni/xarray-spatial | 547 | 11180320 | <filename>xrspatial/tests/test_focal.py
import pytest
import xarray as xr
import numpy as np
import dask.array as da
from xrspatial.utils import doesnt_have_cuda
from xrspatial.utils import ngjit
from xrspatial import mean
from xrspatial.focal import hotspots, apply, focal_stats
from xrspatial.convolution import (
convolve_2d, calc_cellsize, circle_kernel, annulus_kernel
)
def _do_sparse_array(data_array):
import random
indx = list(zip(*np.where(data_array)))
pos = random.sample(range(data_array.size), data_array.size//2)
indx = np.asarray(indx)[pos]
r = indx[:, 0]
c = indx[:, 1]
data_half = data_array.copy()
data_half[r, c] = 0
return data_half
def _do_gaussian_array():
_x = np.linspace(0, 50, 101)
_y = _x.copy()
_mean = 25
_sdev = 5
X, Y = np.meshgrid(_x, _y, sparse=True)
x_fac = -np.power(X-_mean, 2)
y_fac = -np.power(Y-_mean, 2)
gaussian = np.exp((x_fac+y_fac)/(2*_sdev**2)) / (2.5*_sdev)
return gaussian
data_random = np.random.random_sample((100, 100))
data_random_sparse = _do_sparse_array(data_random)
data_gaussian = _do_gaussian_array()
def test_mean_transfer_function_cpu():
# numpy case
numpy_agg = xr.DataArray(data_random)
numpy_mean = mean(numpy_agg)
assert isinstance(numpy_mean.data, np.ndarray)
# dask + numpy case
dask_numpy_agg = xr.DataArray(da.from_array(data_random, chunks=(3, 3)))
dask_numpy_mean = mean(dask_numpy_agg)
assert isinstance(dask_numpy_mean.data, da.Array)
# both output same results
assert np.isclose(
numpy_mean, dask_numpy_mean.compute(), equal_nan=True
).all()
assert numpy_agg.shape == numpy_mean.shape
@pytest.mark.skipif(doesnt_have_cuda(), reason="CUDA Device not Available")
def test_mean_transfer_function_gpu_equals_cpu():
import cupy
# cupy case
cupy_agg = xr.DataArray(cupy.asarray(data_random))
cupy_mean = mean(cupy_agg)
assert isinstance(cupy_mean.data, cupy.ndarray)
# numpy case
numpy_agg = xr.DataArray(data_random)
numpy_mean = mean(numpy_agg)
assert np.isclose(numpy_mean, cupy_mean.data.get(), equal_nan=True).all()
# dask + cupy case not implemented
dask_cupy_agg = xr.DataArray(
da.from_array(cupy.asarray(data_random), chunks=(3, 3))
)
with pytest.raises(NotImplementedError) as e_info:
mean(dask_cupy_agg)
assert e_info
convolve_2d_data = np.array([[0., 1., 1., 1., 1., 1.],
[1., 0., 1., 1., 1., 1.],
[1., 1., 0., 1., 1., 1.],
[1., 1., 1., np.nan, 1., 1.],
[1., 1., 1., 1., 0., 1.],
[1., 1., 1., 1., 1., 0.]])
def test_kernel():
data = convolve_2d_data
m, n = data.shape
agg = xr.DataArray(data, dims=['y', 'x'])
agg['x'] = np.linspace(0, n, n)
agg['y'] = np.linspace(0, m, m)
cellsize_x, cellsize_y = calc_cellsize(agg)
kernel1 = circle_kernel(cellsize_x, cellsize_y, 2)
expected_kernel1 = np.array([[0, 1, 0],
[1, 1, 1],
[0, 1, 0]])
assert isinstance(kernel1, np.ndarray)
assert np.isclose(kernel1, expected_kernel1, equal_nan=True).all()
kernel2 = annulus_kernel(cellsize_x, cellsize_y, 2, 0.5)
expected_kernel2 = np.array([[0, 1, 0],
[1, 0, 1],
[0, 1, 0]])
assert isinstance(kernel2, np.ndarray)
assert np.isclose(kernel2, expected_kernel2, equal_nan=True).all()
def test_convolution():
data = convolve_2d_data
dask_data = da.from_array(data, chunks=(3, 3))
kernel1 = np.ones((1, 1))
numpy_output_1 = convolve_2d(data, kernel1)
expected_output_1 = np.array([[0., 1., 1., 1., 1., 1.],
[1., 0., 1., 1., 1., 1.],
[1., 1., 0., 1., 1., 1.],
[1., 1., 1., np.nan, 1., 1.],
[1., 1., 1., 1., 0., 1.],
[1., 1., 1., 1., 1., 0.]])
assert isinstance(numpy_output_1, np.ndarray)
assert np.isclose(numpy_output_1, expected_output_1, equal_nan=True).all()
dask_output_1 = convolve_2d(dask_data, kernel1)
assert isinstance(dask_output_1, da.Array)
assert np.isclose(
dask_output_1.compute(), expected_output_1, equal_nan=True
).all()
kernel2 = np.array([[0, 1, 0],
[1, 1, 1],
[0, 1, 0]])
numpy_output_2 = convolve_2d(data, kernel2)
expected_output_2 = np.array([
[np.nan, np.nan, np.nan, np.nan, np.nan, np.nan],
[np.nan, 4., 3., 5., 5., np.nan],
[np.nan, 3., np.nan, np.nan, np.nan, np.nan],
[np.nan, 5., np.nan, np.nan, np.nan, np.nan],
[np.nan, 5., np.nan, np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan, np.nan, np.nan, np.nan]
])
# kernel2 is of 3x3, thus the border edge is 1 cell long.
# currently, ignoring border edge (i.e values in edges are all nans)
assert isinstance(numpy_output_2, np.ndarray)
assert np.isclose(
numpy_output_2, expected_output_2, equal_nan=True
).all()
dask_output_2 = convolve_2d(dask_data, kernel2)
assert isinstance(dask_output_2, da.Array)
assert np.isclose(
dask_output_2.compute(), expected_output_2, equal_nan=True
).all()
kernel3 = np.array([[0, 1, 0],
[1, 0, 1],
[0, 1, 0]])
numpy_output_3 = convolve_2d(data, kernel3)
expected_output_3 = np.array([
[np.nan, np.nan, np.nan, np.nan, np.nan, np.nan],
[np.nan, 4., 2., 4., 4., np.nan],
[np.nan, 2., np.nan, np.nan, np.nan, np.nan],
[np.nan, 4., np.nan, np.nan, np.nan, np.nan],
[np.nan, 4., np.nan, np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan, np.nan, np.nan, np.nan]
])
# kernel3 is of 3x3, thus the border edge is 1 cell long.
# currently, ignoring border edge (i.e values in edges are all nans)
assert isinstance(numpy_output_3, np.ndarray)
assert np.isclose(numpy_output_3, expected_output_3, equal_nan=True).all()
dask_output_3 = convolve_2d(dask_data, kernel3)
assert isinstance(dask_output_3, da.Array)
assert np.isclose(
dask_output_3.compute(), expected_output_3, equal_nan=True
).all()
@pytest.mark.skipif(doesnt_have_cuda(), reason="CUDA Device not Available")
def test_2d_convolution_gpu_equals_cpu():
import cupy
data = convolve_2d_data
numpy_agg = xr.DataArray(data)
cupy_agg = xr.DataArray(cupy.asarray(data))
kernel1 = np.ones((1, 1))
output_numpy1 = convolve_2d(numpy_agg.data, kernel1)
output_cupy1 = convolve_2d(cupy_agg.data, kernel1)
assert isinstance(output_cupy1, cupy.ndarray)
assert np.isclose(output_numpy1, output_cupy1.get(), equal_nan=True).all()
kernel2 = np.array([[0, 1, 0],
[1, 1, 1],
[0, 1, 0]])
output_numpy2 = convolve_2d(numpy_agg.data, kernel2)
output_cupy2 = convolve_2d(cupy_agg.data, kernel2)
assert isinstance(output_cupy2, cupy.ndarray)
assert np.isclose(output_numpy2, output_cupy2.get(), equal_nan=True).all()
kernel3 = np.array([[0, 1, 0],
[1, 0, 1],
[0, 1, 0]])
output_numpy3 = convolve_2d(numpy_agg.data, kernel3)
output_cupy3 = convolve_2d(cupy_agg.data, kernel3)
assert isinstance(output_cupy3, cupy.ndarray)
assert np.isclose(output_numpy3, output_cupy3.get(), equal_nan=True).all()
# dask + cupy case not implemented
dask_cupy_agg = xr.DataArray(
da.from_array(cupy.asarray(data), chunks=(3, 3))
)
with pytest.raises(NotImplementedError) as e_info:
convolve_2d(dask_cupy_agg.data, kernel3)
assert e_info
data_apply = np.array([[0, 1, 2, 3, 4, 5],
[6, 7, 8, 9, 10, 11],
[12, 13, 14, 15, 16, 17],
[18, 19, 20, 21, 22, 23]])
kernel_apply = np.array([[0, 1, 0], [1, 0, 1], [0, 1, 0]])
def test_apply_cpu():
@ngjit
def func_zero_cpu(x):
return 0
# numpy case
numpy_agg = xr.DataArray(data_apply)
numpy_apply = apply(numpy_agg, kernel_apply, func_zero_cpu)
assert isinstance(numpy_apply.data, np.ndarray)
assert numpy_agg.shape == numpy_apply.shape
assert np.count_nonzero(numpy_apply.data) == 0
# dask + numpy case
dask_numpy_agg = xr.DataArray(da.from_array(data_apply, chunks=(3, 3)))
dask_numpy_apply = apply(dask_numpy_agg, kernel_apply, func_zero_cpu)
assert isinstance(dask_numpy_apply.data, da.Array)
# both output same results
assert np.isclose(
numpy_apply, dask_numpy_apply.compute(), equal_nan=True
).all()
@pytest.mark.skipif(doesnt_have_cuda(), reason="CUDA Device not Available")
def test_apply_gpu_equals_gpu():
def func_zero(x):
return 0
@ngjit
def func_zero_cpu(x):
return 0
# cupy case
import cupy
cupy_agg = xr.DataArray(cupy.asarray(data_apply))
cupy_apply = apply(cupy_agg, kernel_apply, func_zero)
assert isinstance(cupy_apply.data, cupy.ndarray)
# numpy case
numpy_agg = xr.DataArray(data_apply)
numpy_apply = apply(numpy_agg, kernel_apply, func_zero_cpu)
assert np.isclose(numpy_apply, cupy_apply.data.get(), equal_nan=True).all()
# dask + cupy case not implemented
dask_cupy_agg = xr.DataArray(
da.from_array(cupy.asarray(data_apply), chunks=(3, 3))
)
with pytest.raises(NotImplementedError) as e_info:
apply(dask_cupy_agg, kernel_apply, func_zero)
assert e_info
def test_focal_stats_cpu():
data = np.arange(16).reshape(4, 4)
numpy_agg = xr.DataArray(data)
dask_numpy_agg = xr.DataArray(da.from_array(data, chunks=(3, 3)))
cellsize = (1, 1)
kernel = circle_kernel(*cellsize, 1.5)
numpy_focalstats = focal_stats(numpy_agg, kernel)
assert isinstance(numpy_focalstats.data, np.ndarray)
assert numpy_focalstats.ndim == 3
assert numpy_agg.shape == numpy_focalstats.shape[1:]
dask_numpy_focalstats = focal_stats(dask_numpy_agg, kernel)
assert isinstance(dask_numpy_focalstats.data, da.Array)
assert np.isclose(
numpy_focalstats, dask_numpy_focalstats.compute(), equal_nan=True
).all()
def test_hotspot():
n, m = 10, 10
data = np.zeros((n, m), dtype=float)
all_idx = zip(*np.where(data == 0))
nan_cells = [(i, i) for i in range(m)]
for cell in nan_cells:
data[cell[0], cell[1]] = np.nan
# add some extreme values
hot_region = [(1, 1), (1, 2), (1, 3),
(2, 1), (2, 2), (2, 3),
(3, 1), (3, 2), (3, 3)]
cold_region = [(7, 7), (7, 8), (7, 9),
(8, 7), (8, 8), (8, 9),
(9, 7), (9, 8), (9, 9)]
for p in hot_region:
data[p[0], p[1]] = 10000
for p in cold_region:
data[p[0], p[1]] = -10000
numpy_agg = xr.DataArray(data, dims=['y', 'x'])
numpy_agg['x'] = np.linspace(0, n, n)
numpy_agg['y'] = np.linspace(0, m, m)
cellsize_x, cellsize_y = calc_cellsize(numpy_agg)
kernel = circle_kernel(cellsize_x, cellsize_y, 2.0)
no_significant_region = [id for id in all_idx if id not in hot_region and
id not in cold_region]
# numpy case
numpy_hotspots = hotspots(numpy_agg, kernel)
# dask + numpy
dask_numpy_agg = xr.DataArray(da.from_array(data, chunks=(3, 3)))
dask_numpy_hotspots = hotspots(dask_numpy_agg, kernel)
assert isinstance(dask_numpy_hotspots.data, da.Array)
# both output same results
assert np.isclose(numpy_hotspots.data, dask_numpy_hotspots.data.compute(),
equal_nan=True).all()
# check output's properties
# output must be an xarray DataArray
assert isinstance(numpy_hotspots, xr.DataArray)
assert isinstance(numpy_hotspots.values, np.ndarray)
assert issubclass(numpy_hotspots.values.dtype.type, np.int8)
# shape, dims, coords, attr preserved
assert numpy_agg.shape == numpy_hotspots.shape
assert numpy_agg.dims == numpy_hotspots.dims
assert numpy_agg.attrs == numpy_hotspots.attrs
for coord in numpy_agg.coords:
assert np.all(numpy_agg[coord] == numpy_hotspots[coord])
# no nan in output
assert not np.isnan(np.min(numpy_hotspots))
# output of extreme regions are non-zeros
# hot spots
hot_spot = np.asarray([numpy_hotspots[p] for p in hot_region])
assert np.all(hot_spot >= 0)
assert np.sum(hot_spot) > 0
# cold spots
cold_spot = np.asarray([numpy_hotspots[p] for p in cold_region])
assert np.all(cold_spot <= 0)
assert np.sum(cold_spot) < 0
# output of no significant regions are 0s
no_sign = np.asarray([numpy_hotspots[p] for p in no_significant_region])
assert np.all(no_sign == 0)
@pytest.mark.skipif(doesnt_have_cuda(), reason="CUDA Device not Available")
def test_hotspot_gpu_equals_cpu():
n, m = 10, 10
data = np.zeros((n, m), dtype=float)
nan_cells = [(i, i) for i in range(m)]
for cell in nan_cells:
data[cell[0], cell[1]] = np.nan
# add some extreme values
hot_region = [(1, 1), (1, 2), (1, 3),
(2, 1), (2, 2), (2, 3),
(3, 1), (3, 2), (3, 3)]
cold_region = [(7, 7), (7, 8), (7, 9),
(8, 7), (8, 8), (8, 9),
(9, 7), (9, 8), (9, 9)]
for p in hot_region:
data[p[0], p[1]] = 10000
for p in cold_region:
data[p[0], p[1]] = -10000
numpy_agg = xr.DataArray(data, dims=['y', 'x'])
numpy_agg['x'] = np.linspace(0, n, n)
numpy_agg['y'] = np.linspace(0, m, m)
cellsize_x, cellsize_y = calc_cellsize(numpy_agg)
kernel = circle_kernel(cellsize_x, cellsize_y, 2.0)
# numpy case
numpy_hotspots = hotspots(numpy_agg, kernel)
# cupy case
import cupy
cupy_agg = xr.DataArray(cupy.asarray(data))
cupy_hotspots = hotspots(cupy_agg, kernel)
assert isinstance(cupy_hotspots.data, cupy.ndarray)
assert np.isclose(
numpy_hotspots, cupy_hotspots.data.get(), equal_nan=True
).all()
# dask + cupy case not implemented
dask_cupy_agg = xr.DataArray(
da.from_array(cupy.asarray(data), chunks=(3, 3))
)
with pytest.raises(NotImplementedError) as e_info:
hotspots(dask_cupy_agg, kernel)
assert e_info
|
tools/pot/openvino/tools/pot/configs/hardware_config.py | ryanloney/openvino-1 | 1,127 | 11180332 | <reponame>ryanloney/openvino-1
# Copyright (C) 2020-2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
try:
import jstyleson as json
except ImportError:
import json
from collections import OrderedDict
from addict import Dict
from .utils import product_dict
class HardwareConfig(list):
def get(self, op_type, attr):
def match_attrs(op_config, attr):
for attr_name, attr_value in attr.items():
if attr_name in op_config:
equal = attr_value in op_config[attr_name] if isinstance(op_config[attr_name], list) \
else attr_value == op_config[attr_name]
if not equal:
return False
return True
config = None
for op_config in self:
if op_config.type == op_type and match_attrs(op_config, attr):
if config is not None:
raise RuntimeError('Several hardware configs were defined for operation with type {}, attr {}\n '
'Specify the operation uniquely'.format(op_type, attr))
config = op_config
return config
@classmethod
def from_json(cls, path):
with open(path) as f:
json_config = json.load(f, object_pairs_hook=OrderedDict)
hw_config = cls()
hw_config.append(Dict(('target_device', json_config['target_device'])))
configs = {}
for algorithm_name, algorithm_config in json_config.get('config', {}).items():
configs[algorithm_name] = {}
for config_name, config in algorithm_config.items():
for key, val in config.items():
if not isinstance(val, list):
config[key] = [val]
configs[algorithm_name][config_name] = list(product_dict(config))
for op_config in json_config.get('operations', []):
for algorithm_name in op_config:
if algorithm_name not in configs:
continue
tmp_config = {}
for name, algorithm_config in op_config[algorithm_name].items():
if not isinstance(algorithm_config, list):
algorithm_config = [algorithm_config]
tmp_config[name] = []
for config_item in algorithm_config:
if isinstance(config_item, str):
tmp_config[name].extend(configs[algorithm_name][config_item])
else:
for key, val in config_item.items():
if not isinstance(val, list):
config_item[key] = [val]
tmp_config[name].extend(list(product_dict(config_item)))
op_config[algorithm_name] = tmp_config
hw_config.append(Dict(op_config))
return hw_config
|
sdklab/meantimerecovery/mean_time_recover_with_docker.py | dominicbetts/azure-iot-sdk-python | 366 | 11180340 | # -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
import docker
import asyncio
import uuid
from azure.iot.device.aio import IoTHubDeviceClient
from azure.iot.device import Message
from time import perf_counter
import threading
from multiprocessing import Value, Process
# Scenario based values
KEEP_ALIVE = 18
FACTOR_OF_KEEP_ALIVE = 0.5
KEEP_RUNNING = 10
KEEP_DEAD = int(KEEP_ALIVE * FACTOR_OF_KEEP_ALIVE)
MQTT_BROKER_RESTART_COUNT = 5
CONTAINER_NAME = "leaky-cauldron"
elapsed_times = []
container = None
def control_container(
container_name, keep_running, keep_dead, restart_count, signal_to_quit, should_be_restarted=True
):
global container
print("Container started.")
client = docker.from_env()
container = client.containers.run(
"mqtt-broker", detach=True, name=container_name, ports={"8883/tcp": 8883}
)
if should_be_restarted:
kill_and_restart_container(
keep_running=keep_running,
keep_dead=keep_dead,
restart_count=restart_count,
signal_to_quit=signal_to_quit,
)
else:
# This may need to be varied so that the last message can be SENT without an async task cancellation error.
kill_container(keep_running=5)
signal_to_quit.value = 1
def kill_and_restart_container(keep_running, keep_dead, restart_count, signal_to_quit):
kill_container(keep_running)
print("Container stopped.")
start_timer(duration=keep_dead, restart_count=restart_count, signal_to_quit=signal_to_quit)
def kill_container(keep_running):
print("Container will run for {} secs.".format(keep_running))
container.stop(timeout=keep_running)
container.remove()
def quitting_listener(quit_signal):
while True:
sig_val = quit_signal.value
if sig_val == 1:
print("Quitting...")
break
async def send_test_message(device_client, restart_count):
i = 0
while True:
print("sending message #" + str(i))
msg = Message("test wind speed " + str(i))
msg.message_id = uuid.uuid4()
t_start = perf_counter()
await device_client.send_message(msg)
t_stop = perf_counter()
elapsed_time = t_stop - t_start
elapsed_times.append(elapsed_time)
print("done sending message #" + str(i))
i = i + 1
await asyncio.sleep(3)
val = restart_count.value
if val >= MQTT_BROKER_RESTART_COUNT:
print(
"Executed container restarts with telemetry {} times. Quitting telemetry task.".format(
val
)
)
break
def start_timer(duration, restart_count, signal_to_quit):
def timer_done():
timer.cancel()
print("{} secs is up. Cancelled timer. Container will be restarted again.".format(duration))
restart_count.value = restart_count.value + 1
# signal_to_quit.value = 0
needs_restart = True
if restart_count.value >= MQTT_BROKER_RESTART_COUNT:
print(
"Executed container restarts {} times. Container will not be restarted after the current loop. Quitting any future loop.".format(
restart_count.value
)
)
# signal_to_quit.value = 1
needs_restart = False
control_container(
CONTAINER_NAME,
keep_running=KEEP_RUNNING,
keep_dead=duration,
restart_count=restart_count,
signal_to_quit=signal_to_quit,
should_be_restarted=needs_restart,
)
print("Container will be dead for {} secs.".format(duration))
timer = threading.Timer(duration, timer_done)
timer.start()
async def main():
ca_cert = "self_cert_localhost.pem"
certfile = open(ca_cert)
root_ca_cert = certfile.read()
# Inter process values
times_container_restart = Value("i", 0)
signal_to_quit = Value("i", 0)
process_docker = Process(
target=control_container,
args=(CONTAINER_NAME, KEEP_RUNNING, KEEP_DEAD, times_container_restart, signal_to_quit),
)
process_docker.start()
# Do not delete sleep from here. Server needs some time to start.
await asyncio.sleep(5)
conn_str = "HostName=localhost;DeviceId=devicemtr;SharedAccessKey=<KEY>"
device_client = IoTHubDeviceClient.create_from_connection_string(
conn_str, keep_alive=KEEP_ALIVE, server_verification_cert=root_ca_cert
)
await device_client.connect()
send_message_task = asyncio.create_task(
send_test_message(device_client, times_container_restart)
)
# Run the listener in the event loop
# This can be a STDIN listener as well for user to indicate quitting.
loop = asyncio.get_running_loop()
finished_loops = loop.run_in_executor(None, quitting_listener, signal_to_quit)
# Wait for count times to reach a certain number indicative of completion
await finished_loops
print("Count is " + str(times_container_restart.value))
print(elapsed_times)
process_docker.terminate()
try:
send_message_task.cancel()
except asyncio.CancelledError:
print("send message task is cancelled now")
await device_client.disconnect()
if __name__ == "__main__":
asyncio.run(main())
|
api/tests/unit/features/test_unit_features_views.py | SolidStateGroup/Bullet-Train-API | 126 | 11180348 | from django.urls import reverse
from rest_framework import status
from environments.models import Environment
from features.feature_types import MULTIVARIATE
from features.models import Feature
from features.multivariate.models import MultivariateFeatureOption
from organisations.models import Organisation
from projects.models import Project, UserProjectPermission
def test_list_feature_states_from_simple_view_set(
environment, feature, admin_user, admin_client, django_assert_num_queries
):
# Given
base_url = reverse("api-v1:features:featurestates-list")
url = f"{base_url}?environment={environment.id}"
# add another feature
Feature.objects.create(name="another_feature", project=environment.project)
# add another organisation with a project, environment and feature (which should be
# excluded)
another_organisation = Organisation.objects.create(name="another_organisation")
admin_user.add_organisation(another_organisation)
another_project = Project.objects.create(
name="another_project", organisation=another_organisation
)
Environment.objects.create(name="another_environment", project=another_project)
Feature.objects.create(project=another_project, name="another_projects_feature")
UserProjectPermission.objects.create(
user=admin_user, project=another_project, admin=True
)
# add another feature with multivariate options
mv_feature = Feature.objects.create(
name="mv_feature", project=environment.project, type=MULTIVARIATE
)
MultivariateFeatureOption.objects.create(
feature=mv_feature,
default_percentage_allocation=10,
type="unicode",
string_value="foo",
)
# When
with django_assert_num_queries(7):
response = admin_client.get(url)
# Then
assert response.status_code == status.HTTP_200_OK
response_json = response.json()
assert response_json["count"] == 3
def test_list_feature_states_nested_environment_view_set(
environment, project, feature, admin_client, django_assert_num_queries
):
# Given
base_url = reverse(
"api-v1:environments:environment-featurestates-list",
args=[environment.api_key],
)
# Add an MV feature
mv_feature = Feature.objects.create(
name="mv_feature", project=project, type=MULTIVARIATE
)
MultivariateFeatureOption.objects.create(
feature=mv_feature,
default_percentage_allocation=10,
type="unicode",
string_value="foo",
)
# Add another feature
Feature.objects.create(name="another_feature", project=project)
# When
with django_assert_num_queries(6):
response = admin_client.get(base_url)
# Then
assert response.status_code == status.HTTP_200_OK
response_json = response.json()
assert response_json["count"] == 3
|
RecoTracker/MkFit/python/mkFitHitConverter_cfi.py | Purva-Chaudhari/cmssw | 852 | 11180361 | <gh_stars>100-1000
import FWCore.ParameterSet.Config as cms
from RecoTracker.MkFit.mkFitHitConverterDefault_cfi import mkFitHitConverterDefault as _mkFitHitConverterDefault
from RecoLocalTracker.SiStripClusterizer.SiStripClusterChargeCut_cfi import *
mkFitHitConverter = _mkFitHitConverterDefault.clone(
minGoodStripCharge = cms.PSet(
refToPSet_ = cms.string('SiStripClusterChargeCutLoose'))
)
|
h2o-py/tests/testdir_algos/isoforextended/pyunit_isoforextended_saveload.py | vishalbelsare/h2o-3 | 6,098 | 11180418 | from __future__ import print_function
import sys, os
sys.path.insert(1, os.path.join("..","..",".."))
import h2o
from tests import pyunit_utils
from h2o.estimators.extended_isolation_forest import H2OExtendedIsolationForestEstimator
def extended_isolation_forest_save_and_load():
print("Extended Isolation Forest Save Load Test")
train = h2o.import_file(pyunit_utils.locate("smalldata/anomaly/single_blob.csv"))
eif_model = H2OExtendedIsolationForestEstimator(ntrees=7, seed=12, sample_size=5)
eif_model.train(training_frame=train)
anomaly_score = eif_model.predict(train)
anomaly = anomaly_score['anomaly_score'].as_data_frame(use_pandas=True)["anomaly_score"]
path = pyunit_utils.locate("results")
assert os.path.isdir(path), "Expected save directory {0} to exist, but it does not.".format(path)
model_path = h2o.save_model(eif_model, path=path, force=True)
assert os.path.isfile(model_path), "Expected load file {0} to exist, but it does not.".format(model_path)
reloaded = h2o.load_model(model_path)
anomaly_score_reloaded = reloaded.predict(train)
anomaly_reloaded = anomaly_score_reloaded['anomaly_score'].as_data_frame(use_pandas=True)["anomaly_score"]
assert isinstance(reloaded,
H2OExtendedIsolationForestEstimator), \
"Expected and H2OExtendedIsolationForestEstimator, but got {0}"\
.format(reloaded)
assert (anomaly[0] == anomaly_reloaded[0]), "Output is not the same after reload"
assert anomaly[5] == anomaly_reloaded[5], "Output is not the same after reload"
assert anomaly[33] == anomaly_reloaded[33], "Output is not the same after reload"
assert anomaly[256] == anomaly_reloaded[256], "Output is not the same after reload"
assert anomaly[499] == anomaly_reloaded[499], "Output is not the same after reload"
if __name__ == "__main__":
pyunit_utils.standalone_test(extended_isolation_forest_save_and_load)
else:
extended_isolation_forest_save_and_load()
|
examples/d2/white.py | manu-mannattil/nolitsa | 118 | 11180426 | <reponame>manu-mannattil/nolitsa
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""D2 for white noise.
D2 is (theoretically) equal to the embedding dimension for white noise.
"""
import numpy as np
import matplotlib.pyplot as plt
from nolitsa import d2, utils
x = np.random.random(5 * 1000)
dim = np.arange(1, 10 + 1)
tau = 1
plt.title('Local $D_2$ vs $r$ for white noise')
plt.xlabel(r'Distance $r$')
plt.ylabel(r'Local $D_2$')
for r, c in d2.c2_embed(x, tau=tau, dim=dim, window=2,
r=utils.gprange(0.001, 1.0, 100)):
plt.semilogx(r[1:-1], d2.d2(r, c, hwin=1), color='#4682B4')
plt.show()
|
mmdet/utils/general_utils.py | Yibin122/conditional-lane-detection | 232 | 11180447 | import os
import time
import numpy as np
import cv2
def getPathList(path, suffix='png'):
if (path[-1] != '/') & (path[-1] != '\\'):
path = path + '/'
pathlist = list()
g = os.walk(path)
for p, d, filelist in g:
for filename in filelist:
if filename.endswith(suffix):
pathlist.append(os.path.join(p, filename))
return pathlist
def mkdir(path):
if not os.path.isdir(path):
mkdir(os.path.split(path)[0])
else:
return
if not os.path.isdir(path):
os.mkdir(path)
def tensor2imgs(tensor, mean=(0, 0, 0), std=(1, 1, 1), to_rgb=True):
num_imgs = tensor.size(0)
mean = np.array(mean, dtype=np.float32)
std = np.array(std, dtype=np.float32)
imgs = []
for img_id in range(num_imgs):
img = tensor[img_id, ...].cpu().numpy().transpose(1, 2, 0)
img = (mmcv.imdenormalize(
img, mean, std, to_bgr=to_rgb))
img = (np.clip(img * 255, a_min=0, a_max=255)).astype(np.uint8)
imgs.append(np.ascontiguousarray(img))
return imgs
def path_join(root, name):
if root == '':
return name
if name[0] == '/':
return os.path.join(root, name[1:])
else:
return os.path.join(root, name)
class Timer:
def __init__(self, msg):
self.msg = msg
self.start_time = None
def __enter__(self):
self.start_time = time.time()
def __exit__(self, exc_type, exc_value, exc_tb):
print(self.msg % (time.time() - self.start_time)) |
tool/taint_analysis/coretaint.py | cpbscholten/karonte | 294 | 11180452 | <reponame>cpbscholten/karonte<filename>tool/taint_analysis/coretaint.py
import os
import claripy
import logging
import random
import signal
from random import shuffle
from angr import BP, SimValueError
from angr.procedures.stubs.ReturnUnconstrained import ReturnUnconstrained
from taint_analysis.utils import *
logging.basicConfig()
log = logging.getLogger("CoreTaint")
log.setLevel("DEBUG")
GLOB_TAINT_DEP_KEY = 'taint_deps'
UNTAINT_DATA = 'untainted_data'
UNTAINTED_VARS = 'untainted_vars'
SEEN_MASTERS = 'seen_masters'
class MyFileHandler(object):
def __init__(self, filename, handler_factory, **kw):
kw['filename'] = filename
self._handler = handler_factory(**kw)
def __getattr__(self, n):
if hasattr(self._handler, n):
return getattr(self._handler, n)
raise AttributeError(n)
class TimeOutException(Exception):
def __init__(self, message):
super(TimeOutException, self).__init__(message)
class UnSATException(Exception):
def __init__(self, message):
super(UnSATException, self).__init__(message)
class CoreTaint:
"""
Perform a symbolic-execution-based taint analysis on a given binary to find whether
it exists a tainted path between a source and a sink.
"""
def __init__(self, p, interfunction_level=0, log_path='/tmp/coretaint.out', exploration_strategy=None,
smart_call=True, follow_unsat=False, try_thumb=False, white_calls=[], black_calls=[],
not_follow_any_calls=False, default_log=True, exit_on_decode_error=True, concretization_strategy=None,
force_paths=False, reverse_sat=False, only_tracker=False, shuffle_sat=False,
taint_returns_unfollowed_calls=False, taint_arguments_unfollowed_calls=False, allow_untaint=True,
logger_obj=None):
"""
Initialialization function
:param p: angr project
:param interfunction_level: interfunction level
:param log_path: path where the analysis' log is created
:param smart_call: if True a call is followed only if at least one of its parameters is tainted
:param follow_unsat: if true unsat successors are also considered during path exploration. In this case
the collected constraints up to that point will be dropped.
:param try_thumb: try to force thumb mode if some decoding error occurred
:param white_calls: calls to follow in any case
:param default_log: log info by default
:param exit_on_decode_error: terminate the analysis in case of error
:param concretization_strategy: concretization strategy callback
:param force_paths: force a path to be followed even when some decode errors were found
:param allow_untaint: allow to untaint variables.
"""
global log
self._old_signal_handler = None
self._old_timer = 0
self._count_var = 0
self._use_smart_concretization = False
self._back_jumps = {}
self._N = 1
self._keep_run = True
self._timeout_triggered = False
self._timer = 0
self._force_exit_after = -1
self._p = p
self._taint_buf = "taint_buf"
self._taint_applied = False
self._taint_buf_size = 4096
self._bogus_return = 0x41414141
self._fully_taint_guard = []
self._white_calls = white_calls
self._black_calls = black_calls
self._taint_returns_unfollowed_calls = taint_returns_unfollowed_calls
self._taint_arguments_unfollowed_calls = taint_arguments_unfollowed_calls
self._allow_untaint = allow_untaint
self._not_follow_any_calls = not_follow_any_calls
self._reverse_sat = reverse_sat
self._shuffle_sat = shuffle_sat
self._exploration_strategy = self._base_exploration_strategy if \
exploration_strategy is None else exploration_strategy
self._only_tracker = only_tracker
self._try_to_avoid_z3 = 3
if exploration_strategy is not None and (shuffle_sat or reverse_sat):
log.warning("Exploration strategy takes precedence over state shuffling/reversing")
self._deref_taint_address = False
self._deref_instruction = None
self._deref_addr_expr = None
self._deref = (None, None)
self._old_deref = self._deref
self._old_deref_taint_address = self._deref_taint_address
self._old_deref_addr_expr = self._deref_addr_expr
self._interfunction_level = interfunction_level
self._smart_call = smart_call
self._follow_unsat = follow_unsat
self._concretizations = {}
self._summarized_f = {}
self._fp = open(log_path, 'w')
self._interesing_path = {'sink': [], 'deref': [], 'loop': []}
self._try_thumb = try_thumb
self._force_paths = force_paths
self._default_log = default_log
self._exit_on_decode_error = exit_on_decode_error
self._concretization_strategy = self._default_concretization_strategy if concretization_strategy is None \
else concretization_strategy
self._hooked_addrs = []
# stats
self._new_path = True
self._n_paths = 0
if logger_obj:
log = logger_obj
if type(log) == logging.Logger:
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
fileh = MyFileHandler(log_path + '._log', logging.FileHandler)
fileh.setFormatter(formatter)
log.addHandler(fileh)
def triggered_to(self):
return self._timeout_triggered
def handler(self, _, frame):
"""
Timeout handler
:param _: signal number
:param frame: frame
:return:
"""
log.info(f"Timeout triggered, {str(self._force_exit_after)} left....")
self._keep_run = False
self._timeout_triggered = True
self._force_exit_after -= 1
signal.alarm(self._timer)
if self._force_exit_after <= 0 and not self._keep_run:
# raising an exception while the execution is in z3 might crash the program
if 'z3' in frame.f_code.co_filename:
log.info("Hard Timeout triggered, but we are in z3, trying again in 30 seconds")
signal.alarm(30)
else:
log.info(f"Hard Timeout triggered, {str(self._force_exit_after)} left....")
raise TimeOutException("Hard timeout triggered")
def _get_bb(self, addr):
"""
Get a basic block of an address
:param addr: address
:return: the basic block
"""
try:
bl = self._p.factory.block(addr)
except:
bl = None
if bl is None or bl.vex.jumpkind == 'Ijk_NoDecode':
try:
bl = self._p.factory.block(addr, thumb=True)
except:
bl = None
return bl
def _save_taint_flag(self):
"""
Save the tainting related flags
:return:
"""
self._old_deref = self._deref
self._old_deref_taint_address = self._deref_taint_address
self._old_deref_addr_expr = self._deref_addr_expr
def _restore_taint_flags(self):
"""
Restiore the tainting related flags
:return:
"""
self._deref = self._old_deref
self._deref_taint_address = self._old_deref_taint_address
self._deref_addr_expr = self._old_deref_addr_expr
@property
def n_paths(self):
return self._n_paths
@property
def bogus_return(self):
return self._bogus_return
@property
def taint_buf(self):
return self._taint_buf
@property
def taint_buf_size(self):
return self._taint_buf_size
@property
def taint_applied(self):
return self._taint_applied
@property
def p(self):
return self._p
def estimate_mem_buf_size(self, state, addr, max_size=None):
"""
Estimate the size allocated in a buffer
:param state: the current state
:param addr: addr of the buffer
:param max_size: the maximum size to load
:return: the estimated allocated size
"""
if not max_size:
max_size = self.taint_buf_size
try:
# estimate the size of the buffer by looking at the buffer contents in memory
temp_load = state.memory.load(addr, max_size)
if self._taint_buf in str(temp_load.args[0]):
# when there is only one thing to load
if isinstance(temp_load.args[0], str):
return temp_load.length
# tainted
size = 0
for arg in temp_load.args:
if self._taint_buf in str(arg):
size += arg.length
else:
break
else:
# not tainted
if isinstance(temp_load.args[0], (str, int)):
return temp_load.length
size = temp_load.args[0].length
if not size:
# TODO solve when there is a conditional in the data
log.error("Should debug. Encountered something in estimate buffer size that should not happen")
size = temp_load.length
return size
except Exception as e:
# The size may be too long and collide with the heap. Try a smaller size. Stop when size smaller than 1
# This is a bug in angr that may be fixed at a later time, since there are not enough stack pages allocated
new_max_size = int(max_size / 2)
if new_max_size > 1:
return self.estimate_mem_buf_size(state, addr, new_max_size)
return 1
def safe_load(self, path, addr, size=None, unconstrained=False, estimate_size=False):
"""
Loads bytes from memory, saving and restoring taint info
:param path: path
:param addr: address
:return: the content in memory at address addr
"""
self._save_taint_flag()
state = path.active[0] if not unconstrained else path.unconstrained[0]
if not size and not estimate_size:
size = self._p.arch.bytes
elif not size and estimate_size:
size = self.estimate_mem_buf_size(state, addr) / 8
# convert to int to prevent errors, since it requires an int not float
size = int(size)
mem_cnt = state.memory.load(addr, size)
self._restore_taint_flags()
return mem_cnt
def safe_store(self, path, addr, thing):
"""
Stores bytes in memory, saving and restoring taint info
:param path: path
:param addr: address
:param thing: thing to store
:return:
"""
self._save_taint_flag()
path.active[0].memory.store(addr, thing)
self._restore_taint_flags()
def get_sym_val(self, **args):
return self._get_sym_val(**args)
def _set_deref_bounds(self, ast_node):
"""
Check an ast node and if contains a dereferenced address, it sets
its bounds
:param ast_node: ast node
:return: None
"""
lb = self._deref[0]
ub = self._deref[1]
if hasattr(ast_node, 'op') and ast_node.op == 'Extract' \
and self.is_tainted(ast_node.args[2]):
m = min(ast_node.args[0], ast_node.args[1])
lb = m if lb is None or m < lb else lb
m = max(ast_node.args[0], ast_node.args[1])
ub = m if ub is None or m > ub else ub
self._deref = (lb, ub)
elif hasattr(ast_node, 'args'):
for a in ast_node.args:
self._set_deref_bounds(a)
elif self.is_tainted(ast_node):
self._deref = (0, 0)
def addr_concrete_after(self, state):
"""
Hook for address concretization
:param state: Program state
"""
addr_expr = state.inspect.address_concretization_expr
if self._use_smart_concretization:
state.inspect.address_concretization_result = [self._get_target_concretization(addr_expr, state)]
else:
if state.inspect.address_concretization_result is None:
# current angr strategy didn't give result, trying next one
return None
# a tainted buffer's location is used as address
if self.is_tainted(addr_expr, state=state):
self._set_deref_bounds(addr_expr)
self._deref_taint_address = True
self._deref_addr_expr = addr_expr
self._deref_instruction = state.ip.args[0]
if state.inspect.address_concretization_action == 'load':
# new fresh var
name = f"cnt_pt_by({self._taint_buf}[{str(self._deref[0])}, {str(self._deref[1])}])"
for conc_addr in state.inspect.address_concretization_result:
old_val = state.memory.load(conc_addr, self._p.arch.bytes)
# we do not apply any extra constraints if there is already taint at this location
if self.is_tainted(old_val):
continue
if self._only_tracker:
try:
state.solver.eval_atleast(old_val, 2)
except SimValueError:
# todo, find real bitsize
var = self._get_sym_val(name=name, bits=self._p.arch.bits)
state.memory.store(conc_addr, var)
val = state.solver.eval(old_val)
state.add_constraints(var == val)
def _default_concretization_strategy(self, state, cnt):
"""
Default concretization strategy
:param state: angr state
:param cnt: variable to concretize
:return: concretization value for the variable
"""
extra_constraints = state.inspect.added_constraints
if not extra_constraints:
extra_constraints = tuple()
concs = state.solver.eval_upto(cnt, 50, extra_constraints=extra_constraints)
return random.choice(concs)
def _get_target_concretization(self, var, state):
"""
Concretization must be done carefully in order to perform
a precise taint analysis. We concretize according the following
strategy:
* every symbolic leaf of an ast node is concretized to unique value, according on its name.
In this way we obtain the following advantages:
a = get_pts();
b = a
c = a + 2
d = b + 1 + 1
d = get_pts()
conc(a) = conc(b)
conc(c) = conc(d)
conc(d) != any other concretizations
:param var: ast node
:param state: current state
:return: concretization value
"""
def get_key_cnt(x):
# angr by default create a unique id for every new symbolic variable.
# as in karonte we often have to copy the state, step and check some
# quantities before step() with the current state, two identical variables might assume
# two different names. Therefore, we should not consider the unique _id_ added to symbolic variables
# created by angr
ret = str(x)
if '_' in str(x) and not self.is_tainted(x):
splits = str(x).split('_')
idx = splits[-2]
if not idx.isdigit():
log.error(f"get_key_cnt: Symbolic ID parsing failed, using the whole id: {ret}")
return ret
ret = '_'.join(splits[:-2]) + '_'
ret += '_'.join(splits[-1:])
return ret
# chek if unconstrained
state_cp = state.copy()
se = state_cp.solver
leafs = [l for l in var.recursive_leaf_asts]
if not leafs:
conc = self._concretization_strategy(state_cp, var)
if not se.solution(var, conc):
conc = se.eval(var)
key_cnt = get_key_cnt(var)
self._concretizations[key_cnt] = conc
return conc
# todo why is this constraining a copied state?
for cnt in leafs:
key_cnt = get_key_cnt(cnt)
# concretize all unconstrained children
if cnt.symbolic:
# first check whether the value is already constrained
if key_cnt in self._concretizations.keys():
conc = self._concretizations[key_cnt]
if state_cp.solver.solution(cnt, conc):
state_cp.add_constraints(cnt == conc)
continue
conc = self._concretization_strategy(state_cp, cnt)
self._concretizations[key_cnt] = conc
state_cp.add_constraints(cnt == conc)
val = state_cp.solver.eval(var)
return val
def is_tainted(self, var, path=None, state=None, unconstrained=False):
"""
Checks if a variable is tainted
:param var: variable
:param path: angr path
:param state: state
:param unconstrained: check unconstrained states
:return:
"""
def is_untaint_constraint_present(v, un_vars):
for u in un_vars:
# get argument name
if v.args[0] in u:
# variable is untainted
return True
# no untaint found, var is tainted!
return False
# Nothing is tainted
if self._taint_buf not in str(var):
return False
#
# something is tainted
#
if not self._allow_untaint or (not path and not state):
return True
# get contraints
if path:
state = path.active[0] if not unconstrained else path.unconstrained[0]
untaint_var_strs = state.globals[UNTAINT_DATA][UNTAINTED_VARS]
if not untaint_var_strs:
return True
taint_leafs = list(set([l for l in var.recursive_leaf_asts if self._taint_buf in str(l)]))
taints = set()
for l in taint_leafs:
if l in taints:
continue
# search an untaint constraint for this taint variable
if not is_untaint_constraint_present(l, untaint_var_strs):
return True
taints.add(l)
return False
def add_taint_glob_dep(self, master, slave, path):
"""
Add a taint dependency: if master gets untainted, slave should be untainted
:param master: master expression
:param slave: slave expression
:param path: path
:return:
"""
if not self.is_tainted(master):
return
leafs = list(set([l for l in master.recursive_leaf_asts if self.is_tainted(l)]))
key = tuple(map(str, leafs))
if key not in self.get_state(path).globals[GLOB_TAINT_DEP_KEY]:
self.get_state(path).globals[GLOB_TAINT_DEP_KEY][key] = []
self.get_state(path).globals[GLOB_TAINT_DEP_KEY][key].append(slave)
def _do_recursive_untaint_core(self, dst, path):
"""
Given an expression to untaint, we untaint every single tainted variable in it.
E.g., given (taint_x + taint_y) to untaint, both variables gets untainted as
they cannot assume no longer arbitrary values down this path.
:param dst: expression to untaint
:param path: angr path
:return:
"""
if not self._allow_untaint:
return
state = self.get_state(path)
leafs = list(set([l for l in dst.recursive_leaf_asts if self.is_tainted(l)]))
# then we use the collected untainted variables
# and check whether we should untaint some other variables
state.globals[UNTAINT_DATA][UNTAINTED_VARS] += map(str, leafs)
deps = dict(state.globals[GLOB_TAINT_DEP_KEY])
for master, slave in deps.items():
# if not already untainted, let's consider it
if master not in state.globals[UNTAINT_DATA][SEEN_MASTERS]:
untainted_vars = set(state.globals[UNTAINT_DATA][UNTAINTED_VARS])
set_master = set(master)
# we can not untaint it
if set_master.intersection(untainted_vars) == set_master:
state.globals[UNTAINT_DATA][SEEN_MASTERS].append(master)
for entry in deps[master]:
self._do_recursive_untaint_core(entry, path)
# restart!
continue
def do_recursive_untaint(self, dst, path):
"""
Perform the untaint operation (see do_recursive_untaint_core)
:param dst: variable to untaint
:param path: angr path
:return:
"""
return self._do_recursive_untaint_core(dst, path)
def apply_taint(self, current_path, addr, taint_id, bit_size=None):
"""
Applies the taint to an address addr
:param current_path: angr current path
:param addr: address to taint
:param taint_id: taint identification
:param bit_size: number of bites
:return: tainted variable
"""
self._save_taint_flag()
bit_size = bit_size if bit_size else self.estimate_mem_buf_size(self.get_state(current_path), addr)
# todo check endianness, since now it is always LE
t = self._get_sym_val(name=self._taint_buf + '_' + taint_id + '_', bits=bit_size).reversed
self.get_state(current_path).memory.store(addr, t)
self._restore_taint_flags()
self._taint_applied = True
return t
def _get_sym_val(self, name='x_', bits=None, inc=True, explicit=False):
"""
Creates a fresh symbolic variable
:param name: variable name
:param bits: number of bits
:param inc: increment the global counter
:param explicit: name should be exactly as reported (True, False)
:return: a symbolic variable
"""
if bits is None:
bits = self._p.arch.bits
if explicit:
var = claripy.BVS(name=name, size=bits, explicit_name=True)
else:
var = claripy.BVS(name=(name + '_' + str(self._count_var) + '_' + str(self._p.arch.bits)), size=bits,
explicit_name=True)
if inc:
self._count_var += 1
return var
def get_addr(self, path):
"""
Gets the path current address
:param path: angr path
:return: path current address
"""
return path.active[0].ip.args[0]
def get_state(self, path):
"""
Gets the state from a path
:param path: path
:return: angr state
"""
return path.active[0]
def is_or_points_to_tainted_data(self, x, path, unconstrained=False):
"""
Checks if a symbolic variable is or points to tainted data
:param x: variable
:param path: angr current path
:param unconstrained: consider unconstrained data
:return:
"""
return self.is_tainted(x, path=path, unconstrained=unconstrained) or \
self.is_tainted(self.safe_load(path, x, unconstrained=unconstrained), path=path,
unconstrained=unconstrained)
def _is_summarized(self, prev_path, suc_path, *_):
"""
Check if function is summarized, and execute it if so.
:param prev_path: previous path
:param suc_path: successor path
:return:
"""
# first check if function is summarized
addr = self.get_addr(suc_path)
if self._summarized_f:
for s_addr in self._summarized_f.keys():
if addr == s_addr:
# execute and store possible new symbolic variables (get_env etc)
self._summarized_f[s_addr](self, prev_path, suc_path)
return True
return False
def _follow_call(self, prev_path, suc_path, current_depth):
"""
Checks if a call should be followed or not: if any of its parameters is tainted
and the current depth of transitive closure allows it yes, otherwise no.
:param prev_path: previous path
:param suc_path: successive path
:param current_depth: current depth of transitive closure
:return: True if call should be followed, false otherwise
"""
if self._not_follow_any_calls:
return False
# first check if function is summarized
addr = self.get_addr(suc_path)
if addr in self._black_calls:
return False
# check if call falls within bound binary
if addr > self._p.loader.max_addr or addr < self._p.loader.min_addr:
return False
# if the function is summarized by angr, we follow it
if addr in self._summarized_f.keys():
# consider also next addr in case th current one is a trampoline (eg., plt)
trp = suc_path.copy(deep=True)
trp.step()
trp_addr = self.get_addr(trp)
if self._p.is_hooked(addr) or self._p.is_hooked(trp_addr):
return True
# remove the copied state to prevent state explosion
for state in trp.active + trp.unconstrained:
state.history.trim()
state.downsize()
state.release_plugin('solver')
if addr in self._white_calls:
return True
if current_depth <= 0:
return False
if not self._smart_call:
return True
if not self._taint_applied:
return False
bl = self._get_bb(self.get_addr(prev_path))
puts = [s for s in bl.vex.statements if s.tag == 'Ist_Put']
expected = 0
index = 0
set_regs = []
# type of regs we are looking for
reg_ty = 'r' if self._p.arch.bits == 32 else 'x'
while True:
if index >= len(puts):
break
p = puts[index]
if self._p.arch.register_names[p.offset] == reg_ty + str(expected):
set_regs.append(reg_ty + str(expected))
expected += 1
index = 0
continue
index += 1
self._save_taint_flag()
for r in set_regs:
reg_cnt = getattr(self.get_state(suc_path).regs, r)
# check if it is pointing to a tainted location
tmp_s = self.get_state(suc_path)
try:
# estimate the size first, so we are not loading to much data. limit it at the taint_buf_size
size = min(self.estimate_mem_buf_size(tmp_s, reg_cnt), self.taint_buf_size)
mem_cnt = tmp_s.memory.load(reg_cnt, size)
except TimeOutException as t:
raise t
except KeyError as e:
# state is unconstrained
log.warning("Tried to dereference a non pointer!")
continue
# we might have dereferenced wrongly a tainted variable during the tests before
if (self.is_tainted(reg_cnt) or self.is_tainted(mem_cnt)) and current_depth > 0:
self._restore_taint_flags()
return True
self._restore_taint_flags()
return False
def _follow_back_jump(self, current_path, next_path, guards_info):
"""
Check if a back jump (probably a loop) should be followed.
:param current_path: current path
:param next_path: next path
:param guards_info: guards information
:return: True if should back jump, False otherwise
"""
key = hash(''.join(sorted(list(set([x[0] for x in guards_info])))))
bj = (key, self.get_addr(next_path), self.get_addr(current_path))
if bj not in self._back_jumps.keys():
self._back_jumps[bj] = 1
elif self._back_jumps[bj] > self._N:
# we do not want to follow the same back jump infinite times
return False
else:
self._back_jumps[bj] += 1
return True
@staticmethod
def _check_sat_state(current_path):
"""
Check whether the state is SAT
:param current_path: angr current path
:return: True is the state is SAT, False otherwise
"""
return current_path.active[0].solver.satisfiable()
def _drop_constraints(self, path):
"""
Drop all the constraints within the symbolic engine
:param path: angr current path
:return: None
"""
self.get_state(path).release_plugin('solver')
self.get_state(path).downsize()
self.get_state(path).history.trim()
# FIXME: change offset according arch.
def _next_inst(self, bl):
"""
Get next instruction (sometimes angr messes up)
:param bl: basic block
:return:
"""
return bl.instruction_addrs[-1] + 4
def _base_exploration_strategy(self, _, next_states):
"""
Base exploration strategy
:param current_path: angr current path
:param next_states: next states
:return:
"""
if self._reverse_sat:
next_states.reverse()
elif self._shuffle_sat:
shuffle(next_states)
return next_states
def _flat_explore(self, current_path, check_path_fun, guards_info, current_depth, **kwargs):
"""
Performs the symbolic-based exploration
:param current_path: current path
:param check_path_fun: function to call for every block in the path
:param guards_info: current info about the guards in the current path
:param kwargs: additional arguments to pass to check_path_fun
:return: the tainted path between the source and the sink, if any
"""
if not self._keep_run:
log.debug("Backtracking due to stop")
return
current_path_addr = self.get_addr(current_path)
log.debug(f"{os.path.basename(self._p.filename)}: Analyzing block {hex(current_path_addr)}")
if not CoreTaint._check_sat_state(current_path) and not self._timeout_triggered:
log.error("State got messed up!")
raise UnSATException("State became UNSAT")
# check whether we reached a sink
# todo add back in
try:
check_path_fun(current_path, guards_info, current_depth, **kwargs)
except Exception as e:
if not self._keep_run:
return
log.error(f"'Function check path errored out: {str(e)}")
try:
succ_path = current_path.copy().step()
except Exception as e:
log.error(f"ERROR: {str(e)}")
return
# try thumb
if succ_path and succ_path.errored and self._try_thumb and not self._force_paths:
succ_path = current_path.copy().step(thumb=True)
if succ_path and succ_path.errored and self._try_thumb and not self._force_paths:
if self._exit_on_decode_error:
self._keep_run = False
return
#
succ_states_unsat = succ_path.unsat if self._follow_unsat else []
succ_states_sat = succ_path.active
if succ_path.deadended and not succ_states_sat and not succ_states_unsat:
log.debug("Backtracking from dead path")
return
if not succ_states_sat:
# check if it was un unconstrained call.
# sometimes angr fucks it up
bl = self._get_bb(current_path_addr)
if not bl:
return
if bl.vex.jumpkind == 'Ijk_Call':
# create a fake successors
# which should have been created
# before.
if not succ_path.unconstrained:
return
log.error("Unconstrained call. Fix This. Not ported yet :-(")
# raise NotImplementedError("Unconstrained call. Fix This")
# FIXME: I should use get_below_block
# but as of now I don;t want to use CFG
# unc_state = succ_path.unconstrained[0]
# ret_addr = self._next_inst(bl)
# # only do this when there is a link register in the current arch
# if link_regs[self._p.arch.name]:
# link_reg = self._p.arch.register_names[link_regs[self._p.arch.name]]
# ret_func = getattr(self.get_state(current_path).regs, link_reg)
# tmp_path = self._set_fake_ret_succ(current_path, unc_state, ret_addr, ret_func)
# else:
# tmp_path = self._set_fake_ret_succ(current_path, unc_state)
# succ_states_sat = [self.get_state(tmp_path)]
# register sat and unsat information so that later we can drop the constraints
for s in succ_states_sat:
s.sat = True
for s in succ_states_unsat:
s.sat = False
# collect and prepare the successors to be analyzed
succ_states_sat = self._exploration_strategy(current_path, succ_states_sat)
succ_states = succ_states_sat + succ_states_unsat
for next_state in succ_states:
if self._new_path:
self._n_paths += 1
self._new_path = False
if hasattr(next_state.ip, 'symbolic') and next_state.ip.symbolic:
if next_state.sat:
log.error("Next state UNSAT")
log.warning("Got a symbolic IP, perhaps a non-handled switch statement? FIX ME... ")
continue
# create a new path state with only the next state to continue from
next_path = self._p.factory.simgr(next_state.copy(), save_unconstrained=True, save_unsat=True)
if self._p.is_hooked(next_state.addr) and next_state.addr in self._hooked_addrs:
self._p.unhook(next_state.addr)
self._hooked_addrs.remove(next_state.addr)
if not next_state.solver.satisfiable():
# unsat successors, drop the constraints and continue with other states
self._drop_constraints(next_path)
continue
next_depth = current_depth
# First, let's see if we can follow the calls
try:
if self.get_state(next_path).history.jumpkind == 'Ijk_Call':
if not self._is_summarized(current_path, next_path, current_depth):
if not self._follow_call(current_path, next_path, current_depth):
# we add a hook with the return unconstrained on the call
self._p.hook(next_state.addr, ReturnUnconstrained())
self._hooked_addrs.append(next_state.addr)
else:
log.debug(f"Following function call to {hex(self.get_addr(next_path))}")
next_depth = current_depth - 1
# todo add back in
except Exception as e:
log.error(f"Following call coretaint: {str(e)}")
self._drop_constraints(next_path)
continue
try:
if self.get_state(next_path).history.jumpkind == 'Ijk_Ret':
next_depth = current_depth + 1
except:
self._drop_constraints(next_path)
continue
# we have a back jump
if self.get_state(next_path).history.jumpkind == 'Ijk_Boring' and \
self.get_addr(next_path) <= self.get_addr(current_path) and \
not self._follow_back_jump(current_path, next_path, guards_info):
log.debug("breaking loop")
self._new_path = True
self._drop_constraints(next_path)
continue
# the successor leads out of the function, we do not want to follow it
if self.get_addr(next_path) == self._bogus_return:
log.debug("hit a return")
self._new_path = True
self._drop_constraints(next_path)
continue
# save the info about the guards of this path
new_guards_info = list(guards_info)
current_guards = [g for g in self.get_state(next_path).history.jump_guards]
if current_guards and len(new_guards_info) < len(current_guards):
new_guards_info.append([hex(self.get_addr(current_path)), current_guards[-1]])
# next step!
self._flat_explore(next_path, check_path_fun, new_guards_info, next_depth, **kwargs)
log.debug(f"Back to block {hex(self.get_addr(current_path))}")
self._new_path = True
# information about this state is not needed anymore. Drop constraints to free up lots of memory
self._drop_constraints(current_path)
log.debug("Backtracking")
def set_project(self, p):
"""
Set the project
:param p: angr project
:return:
"""
self._p = p
def stop_run(self):
"""
Stop the taint analysis
:return: None
"""
self._keep_run = False
def flat_explore(self, state, check_path_fun, guards_info, force_thumb=False, **kwargs):
"""
Run a symbolic-based exploration
:param state: state
:param check_path_fun: function to call for each visited basic block
:param guards_info: guards ITE info
:param force_thumb: start with thumb mode ON
:param kwargs: kwargs
:return: None
"""
self._keep_run = True
initial_path = self._p.factory.simgr(state, save_unconstrained=True, save_unsat=True)
current_depth = self._interfunction_level
if force_thumb:
# set thumb mode
initial_path = initial_path.step(thumb=True)[0]
self._flat_explore(initial_path, check_path_fun, guards_info, current_depth, **kwargs)
def _init_bss(self, state):
"""
Initialize the bss section with symboli data (might be slow!).
:param state: angr state
:return:
"""
bss = [s for s in self._p.loader.main_object.sections if s.name == '.bss']
if not bss:
return
bss = bss[0]
min_addr = bss.min_addr
max_addr = bss.max_addr
for a in range(min_addr, max_addr + 1):
var = self._get_sym_val(name="bss_", bits=8)
state.memory.store(a, var)
def set_alarm(self, timer, n_tries=0):
"""
Set the alarm to interrupt the analysis
:param timer: timer
:param n_tries: number of tries to stop the analysis gracefully
:return: Non
"""
if self._old_signal_handler is None:
handler = signal.getsignal(signal.SIGALRM)
assert handler != signal.SIG_IGN, "The coretaint alarm handler should never be SIG_IGN"
self._old_signal_handler = handler
# TODO save the time left by the previous analysis
# and restore it
signal.signal(signal.SIGALRM, self.handler)
self._old_timer = signal.alarm(timer)
self._force_exit_after = n_tries
self._timer = timer
def unset_alarm(self):
signal.alarm(0)
def restore_signal_handler(self):
"""
Restore the signal handler
:return: None
"""
if self._old_signal_handler is not None:
signal.signal(signal.SIGALRM, self._old_signal_handler)
if self._old_timer != 0:
# someone else was looking at this time
# let's restore it
signal.alarm(self._old_timer)
def run(self, state, sinks_info, sources_info, summarized_f=None, init_bss=True,
check_func=None, force_thumb=False, use_smart_concretization=True):
"""
Run the static taint engine
:param state: initial state
:param sinks_info: sinks info
:param sources_info: sources info
:param summarized_f: function summaries
:param init_bss: initializ bss flag
:param check_func: function to execute for each explored basic block
:param force_thumb: start analysis in thumb mode
:param use_smart_concretization: use smart concretization attempts to decrease imprecision due to spurious
pointer aliasing.
:return: None
"""
def null_fun(*_, **__):
return None
if summarized_f is None:
summarized_f = {}
self._use_smart_concretization = use_smart_concretization
state.inspect.add_breakpoint(
'address_concretization',
BP(when=angr.BP_AFTER, action=self.addr_concrete_after)
)
state.globals[GLOB_TAINT_DEP_KEY] = {}
state.globals[UNTAINT_DATA] = {UNTAINTED_VARS: [], SEEN_MASTERS: []}
self._count_var = 0
self._n_paths = 0
self._new_path = True
self._back_jumps = {}
self._keep_run = True
self._taint_applied = False
self._fully_taint_guard = []
self._deref_taint_address = False
self._deref_addr_expr = None
self._deref = (None, None)
self._old_deref = self._deref
self._old_deref_taint_address = self._deref_taint_address
self._old_deref_addr_expr = self._deref_addr_expr
self._concretizations = {}
self._summarized_f = summarized_f
self._timeout_triggered = False
check_func = null_fun if check_func is None else check_func
if init_bss:
log.info("init .bss")
self._init_bss(state)
try:
self.flat_explore(state, check_func, [], force_thumb=force_thumb, sinks_info=sinks_info,
sources_info=sources_info)
except TimeOutException:
log.warning("Hard timeout triggered")
if self._timeout_triggered:
log.debug("Timeout triggered")
|
compiler_opt/rl/feature_ops.py | google/ml-compiler-opt | 130 | 11180476 | <reponame>google/ml-compiler-opt
# coding=utf-8
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""operations to transform features (observations)."""
import os
import re
from typing import List, Callable, Optional
import numpy as np
import tensorflow.compat.v2 as tf
from tf_agents.typing import types
def build_quantile_map(quantile_file_dir: str):
"""build feature quantile map by reading from files in quantile_file_dir."""
quantile_map = {}
pattern = os.path.join(quantile_file_dir, '(.*).buckets')
for quantile_file_path in tf.io.gfile.glob(
os.path.join(quantile_file_dir, '*.buckets')):
m = re.fullmatch(pattern, quantile_file_path)
assert m
feature_name = m.group(1)
with tf.io.gfile.GFile(quantile_file_path, 'r') as quantile_file:
raw_quantiles = [float(x) for x in quantile_file]
quantile_map[feature_name] = raw_quantiles
return quantile_map
def discard_fn(obs: types.Float):
"""discard the input feature by setting it to 0."""
return tf.zeros(shape=obs.shape + [0], dtype=tf.float32)
def identity_fn(obs: types.Float):
"""Return the same value with expanding the last dimension."""
return tf.cast(tf.expand_dims(obs, -1), tf.float32)
def get_normalize_fn(quantile: List[float],
with_sqrt: bool,
with_z_score_normalization: bool,
eps: float = 1e-8,
preprocessing_fn: Optional[Callable[[types.Tensor],
types.Float]] = None):
"""Return a normalization function to normalize the input feature."""
if not preprocessing_fn:
preprocessing_fn = lambda x: x
processed_quantile = [preprocessing_fn(x) for x in quantile]
mean = np.mean(processed_quantile)
std = np.std(processed_quantile)
def normalize(obs: types.Float):
obs = tf.expand_dims(obs, -1)
x = tf.cast(
tf.raw_ops.Bucketize(input=obs, boundaries=quantile),
tf.float32) / len(quantile)
features = [x, x * x]
if with_sqrt:
features.append(tf.sqrt(x))
if with_z_score_normalization:
y = preprocessing_fn(tf.cast(obs, tf.float32))
y = (y - mean) / (std + eps)
features.append(y)
return tf.concat(features, axis=-1)
return normalize
|
hv6/hv6/spec/kernel/spec/helpers.py | ProKil/OS2018spring-projects-g10 | 132 | 11180481 | <reponame>ProKil/OS2018spring-projects-g10
#
# Copyright 2017 Hyperkernel Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import z3
from libirpy import util
import hv6py.kernel.spec.datatypes as dt
__all__ = ['is_pid_bounded', 'is_pid_valid', 'is_pn_valid', 'is_dmapn_valid',
'is_fn_valid', 'is_fd_valid', 'is_pcipn_valid', 'is_page_table_type',
'is_iommu_page_table_type', 'is_status_live', 'is_va_valid',
'get_sub_type', 'get_iommu_sub_type', 'pn_has_owner_and_type',
'pgentry2pfn', 'proc_field_equiv', 'page_walk']
def is_pid_bounded(pid):
return z3.ULT(pid, dt.NPROC)
def is_pid_valid(pid):
return z3.And(pid > 0, pid < dt.NPROC)
def is_pn_valid(pn):
return z3.ULT(pn, dt.NPAGE)
def is_dmapn_valid(pn):
return z3.ULT(pn, dt.NDMAPAGE)
def is_pcipn_valid(pci):
return z3.ULT(pci, dt.NPCIPAGE)
def is_fn_valid(fn):
return z3.And(z3.UGT(fn, 0), z3.ULT(fn, dt.NFILE))
def is_fd_valid(fd):
return z3.And(fd >= 0, fd < dt.NOFILE)
def is_intremap_valid(index):
return z3.ULT(index, dt.NINTREMAP)
def is_page_table_type(type):
return util.In(type, [
dt.page_type.PAGE_TYPE_X86_PML4,
dt.page_type.PAGE_TYPE_X86_PDPT,
dt.page_type.PAGE_TYPE_X86_PD,
dt.page_type.PAGE_TYPE_X86_PT,
])
def is_iommu_page_table_type(type):
return util.In(type, [
dt.page_type.PAGE_TYPE_IOMMU_PML4,
dt.page_type.PAGE_TYPE_IOMMU_PDPT,
dt.page_type.PAGE_TYPE_IOMMU_PD,
dt.page_type.PAGE_TYPE_IOMMU_PT,
])
def is_status_live(status):
return util.In(status, [
dt.proc_state.PROC_EMBRYO,
dt.proc_state.PROC_RUNNABLE,
dt.proc_state.PROC_RUNNING,
dt.proc_state.PROC_SLEEPING,
])
def is_va_valid(va):
return z3.And(*[z3.ULT(idx, 512) for idx in va])
def get_sub_type(type):
return util.If(type == dt.page_type.PAGE_TYPE_X86_PML4, dt.page_type.PAGE_TYPE_X86_PDPT,
util.If(type == dt.page_type.PAGE_TYPE_X86_PDPT, dt.page_type.PAGE_TYPE_X86_PD,
util.If(type == dt.page_type.PAGE_TYPE_X86_PD, dt.page_type.PAGE_TYPE_X86_PT,
util.If(type == dt.page_type.PAGE_TYPE_X86_PT, dt.page_type.PAGE_TYPE_FRAME,
util.FreshBitVec('invalid', dt.page_type.PAGE_TYPE_FRAME.size())))))
def get_iommu_sub_type(type):
return util.If(type == dt.page_type.PAGE_TYPE_IOMMU_PML4, dt.page_type.PAGE_TYPE_IOMMU_PDPT,
util.If(type == dt.page_type.PAGE_TYPE_IOMMU_PDPT, dt.page_type.PAGE_TYPE_IOMMU_PD,
util.If(type == dt.page_type.PAGE_TYPE_IOMMU_PD, dt.page_type.PAGE_TYPE_IOMMU_PT,
util.If(type == dt.page_type.PAGE_TYPE_IOMMU_PT, dt.page_type.PAGE_TYPE_IOMMU_FRAME,
util.FreshBitVec('invalid', dt.page_type.PAGE_TYPE_IOMMU_FRAME.size())))))
def pn_has_owner_and_type(ctx, pn, pid, type):
return z3.And(is_pn_valid(pn),
util.global_field_element(
ctx, '@page_desc_table', 'pid', pn) == pid,
util.global_field_element(ctx, '@page_desc_table', 'type', pn) == type)
def pgentry2pfn(ks, off, perm, type):
res = util.i64(0)
res = util.If(type == dt.PGTYPE_PCIPAGE, util.i64(dt.PCI_START), res)
res = util.If(type == dt.PGTYPE_IOMMU_FRAME, ks.dmapages_ptr_to_int, res)
res = util.If(type == dt.PGTYPE_DEVICES, ks.devices_ptr_to_int, res)
res = util.If(type == dt.PGTYPE_FILE_TABLE, ks.file_table_ptr_to_int, res)
res = util.If(type == dt.PGTYPE_PAGE_DESC, ks.page_desc_table_ptr_to_int, res)
res = util.If(type == dt.PGTYPE_PROC, ks.proc_table_ptr_to_int, res)
res = util.If(type == dt.PGTYPE_PAGE, ks.pages_ptr_to_int, res)
return ((z3.UDiv(res, util.i64(dt.PAGE_SIZE)) + off) << dt.PTE_PFN_SHIFT) | perm
def proc_field_equiv(conj, pid, ctx, kernelstate, field_name):
conj.append(z3.ForAll([pid], z3.Implies(is_pid_valid(pid),
util.global_field_element(ctx, '@proc_table', field_name, pid) ==
getattr(kernelstate.procs[pid], field_name))))
def page_walk(state, pml4, idx1, idx2, idx3, idx4):
pdpt = state.pages[pml4].pgtable_pn(idx1)
pd = state.pages[pdpt].pgtable_pn(idx2)
pt = state.pages[pd].pgtable_pn(idx3)
frame = state.pages[pt].pgtable_pn(idx4)
present = z3.And(
# P bit is set on each level down.
dt.has_bit(state.pages[pml4].data(idx1), dt.PTE_P),
dt.has_bit(state.pages[pdpt].data(idx2), dt.PTE_P),
dt.has_bit(state.pages[pd].data(idx3), dt.PTE_P),
dt.has_bit(state.pages[pt].data(idx4), dt.PTE_P),
# The last level is of type page
state.pages[pt].pgtable_type(idx4) == dt.PGTYPE_PAGE)
writable = z3.And(
# W bit is set on each level down.
dt.has_bit(state.pages[pml4].data(idx1), dt.PTE_W),
dt.has_bit(state.pages[pdpt].data(idx2), dt.PTE_W),
dt.has_bit(state.pages[pd].data(idx3), dt.PTE_W),
dt.has_bit(state.pages[pt].data(idx4), dt.PTE_W),
# The last level is of type page
state.pages[pt].pgtable_type(idx4) == dt.PGTYPE_PAGE
)
return frame, writable, present
|
tests/test_resource_tracker/test_api/test_resource_pool_api_views/test_rp_attribute_definition_api_views/test_delete.py | LaudateCorpus1/squest | 112 | 11180494 | <filename>tests/test_resource_tracker/test_api/test_resource_pool_api_views/test_rp_attribute_definition_api_views/test_delete.py
from rest_framework import status
from rest_framework.reverse import reverse
from resource_tracker.models import ResourcePoolAttributeDefinition
from tests.test_resource_tracker.test_api.base_test_api import BaseTestAPI
class TestResourcePoolAttributeDefinitionDelete(BaseTestAPI):
def setUp(self):
super(TestResourcePoolAttributeDefinitionDelete, self).setUp()
self.to_be_deleted_id = self.rp_vcenter_vcpu_attribute.id
self.url = reverse('api_resource_pool_attribute_definition_retrieve_update_delete',
args=[self.rp_vcenter.id,
self.rp_vcenter_vcpu_attribute.id])
def test_attribute_definition_delete(self):
self.assertTrue(ResourcePoolAttributeDefinition.objects.filter(id=self.to_be_deleted_id).exists())
response = self.client.delete(self.url, format='json')
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
self.assertFalse(ResourcePoolAttributeDefinition.objects.filter(id=self.to_be_deleted_id).exists())
def test_cannot_delete_attribute_definition_when_wrong_rg(self):
url = reverse('api_resource_pool_attribute_definition_retrieve_update_delete',
args=[self.rp_ocp_workers.id,
self.rp_vcenter_vcpu_attribute.id])
response = self.client.delete(url, format='json')
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
|
dev_utils/memory.py | e-ntro-py/desktop-app | 518 | 11180503 | <gh_stars>100-1000
import psutil
import sys
import pandas as pd
import math
pid = int(sys.argv[1])
proc = psutil.Process(pid)
processes = [proc, ] +proc.children()
data = [ {
'name': p.name(),
'pid': p.pid,
'rss': p.memory_info().rss,
'vms': p.memory_info().vms
} for p in processes]
df = pd.DataFrame(data);
df['rss (MB)'] = df['rss'] / math.pow(10, 6)
df['vms (MB)'] = df['vms'] / math.pow(10, 6)
print df
print df.sum()
|
support/Lab03 - Blockchain4Students/blockchain/consensus.py | RafaelAPB/university-course | 125 | 11180570 | <gh_stars>100-1000
from abc import ABC, abstractmethod
class Consensus(ABC):
def __init__(self, block):
self.block = block
super().__init__()
@abstractmethod
def mine_block(self):
pass
class ProofOfWork(Consensus):
def __init__(self, block, difficulty):
self.difficulty = difficulty
super().__init__(block)
def mine_block(self):
compare_str = "".join((["0"] * self.difficulty))
while self.block.currentHash[0:self.difficulty] != compare_str:
self.block.nonce += 1
self.block.currentHash = self.block.calculate_hash() |
hdf_compass/compass_viewer/frame.py | HDFGroup/hdf-compass | 124 | 11180576 | ##############################################################################
# Copyright by The HDF Group. #
# All rights reserved. #
# #
# This file is part of the HDF Compass Viewer. The full HDF Compass #
# copyright notice, including terms governing use, modification, and #
# terms governing use, modification, and redistribution, is contained in #
# the file COPYING, which can be found at the root of the source code #
# distribution tree. If you do not have access to this file, you may #
# request a copy from <EMAIL>. #
##############################################################################
"""
Defines wx.Frame subclasses which are the foundation of the various windows
displayed by HDFCompass.
Much of the common functionality (e.g. "Open File..." menu item) is implemented
here.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
import os
from datetime import date
import wx
import wx.richtext as rtc
from wx.lib.pubsub import pub
log = logging.getLogger(__name__)
from .info import InfoPanel
ID_OPEN_RESOURCE = wx.NewId()
ID_CLOSE_FILE = wx.NewId()
ID_PLUGIN_INFO = wx.NewId()
MAX_RECENT_FILES = 8
from hdf_compass import compass_model
from hdf_compass.utils import __version__, is_darwin, path2url
from .events import CompassOpenEvent
open_frames = 0 # count the open frames
class BaseFrame(wx.Frame):
"""
Base class for all frames used in HDF Compass.
Implements common menus including File and Help, and handles their
events.
When implementing a new viewer window, you should instead inherit from
BaseFrame (below), which adds a left-hand side information panel, and
participates in the reference counting that automatically shows the
initial window when all other frames are closed.
"""
icon_folder = os.path.abspath(os.path.join(os.path.dirname(__file__), 'icons'))
open_frames = 0 # count the number of frames
last_open_path = os.getcwd()
def __init__(self, **kwds):
""" Constructor; any keywords are passed on to wx.Frame.
"""
wx.Frame.__init__(self, None, **kwds)
BaseFrame.open_frames += 1
log.debug("new frame -> open frames: %s" % BaseFrame.open_frames)
# Frame icon
ib = wx.IconBundle()
icon_32 = wx.EmptyIcon()
icon_32.CopyFromBitmap(wx.Bitmap(os.path.join(self.icon_folder, "favicon_32.png"), wx.BITMAP_TYPE_ANY))
ib.AddIcon(icon_32)
icon_48 = wx.EmptyIcon()
icon_48.CopyFromBitmap(wx.Bitmap(os.path.join(self.icon_folder, "favicon_48.png"), wx.BITMAP_TYPE_ANY))
ib.AddIcon(icon_48)
self.SetIcons(ib)
# This is needed to display the app icon on the taskbar on Windows 7
if os.name == 'nt':
import ctypes
ctypes.windll.shell32.SetCurrentProcessExplicitAppUserModelID('HDFCompass')
self.urlhistory = wx.FileHistory(MAX_RECENT_FILES)
self.config = wx.Config("HDFCompass", style=wx.CONFIG_USE_LOCAL_FILE)
self.urlhistory.Load(self.config)
menubar = wx.MenuBar()
# File menu
fm = wx.Menu()
# Open Recent Menu
recent = wx.Menu()
self.urlhistory.UseMenu(recent)
self.urlhistory.AddFilesToMenu()
fm.Append(wx.ID_OPEN, "&Open...\tCtrl-O")
fm.Append(ID_OPEN_RESOURCE, "Open &Resource...\tCtrl-R")
fm.AppendMenu(wx.ID_ANY, "O&pen Recent", recent)
fm.AppendSeparator()
fm.Append(wx.ID_CLOSE, "&Close Window\tCtrl-W")
fm.Append(ID_CLOSE_FILE, "Close &File\tShift-Ctrl-W")
fm.Enable(ID_CLOSE_FILE, False)
fm.Append(wx.ID_EXIT, "E&xit", " Terminate the program")
menubar.Append(fm, "&File")
# Help menu; note that on the Mac, the About entry is automatically
# moved to the main application menu by wxPython.
help_menu = wx.Menu()
help_menu.Append(wx.ID_HELP, "Online &Manual", "Open online documentation")
help_menu.Append(ID_PLUGIN_INFO, "&Plugin Info", "Information about the available plugins")
help_menu.Append(wx.ID_ABOUT, "&About HDFCompass", "Information about this program")
menubar.Append(help_menu, "&Help")
self.SetMenuBar(menubar)
self.Bind(wx.EVT_MENU, self.on_file_open, id=wx.ID_OPEN)
self.Bind(wx.EVT_MENU, self.on_resource_open, id=ID_OPEN_RESOURCE)
self.Bind(wx.EVT_MENU, self.on_manual, id=wx.ID_HELP)
self.Bind(wx.EVT_MENU, self.on_plugin_info, id=ID_PLUGIN_INFO)
self.Bind(wx.EVT_MENU, self.on_about, id=wx.ID_ABOUT)
self.Bind(wx.EVT_MENU, self.on_exit, id=wx.ID_EXIT)
self.Bind(wx.EVT_MENU, self.on_close, id=wx.ID_CLOSE)
self.Bind(wx.EVT_CLOSE, self.on_close)
self.Bind(wx.EVT_MENU_RANGE, self.on_url_history, id=wx.ID_FILE1, id2=wx.ID_FILE9)
def on_close(self, evt):
""" Called on frame closing """
BaseFrame.open_frames -= 1
log.debug("close frame -> open frames: %s" % BaseFrame.open_frames)
self.Destroy()
if isinstance(self, InitFrame):
self.on_exit(evt)
def on_exit(self, evt):
""" Called on "exit" event from the menu """
log.debug("exit app -> closing all open frames: %s" % BaseFrame.open_frames)
wx.GetApp().Exit()
def on_manual(self, evt):
""" Open the url with the online documentation """
import webbrowser
webbrowser.open('http://hdf-compass.readthedocs.org/en/stable/')
def on_plugin_info(self, evt):
""" Open a tabs frame with info about the available plugins """
plug_info = PluginInfoFrame(self)
plug_info.Show()
def on_about(self, evt):
""" Display an "About" dialog """
info = wx.AboutDialogInfo()
info.Name = "HDF Compass"
info.Version = __version__
info.Copyright = "(c) 2014-%d The HDF Group" % date.today().year
icon_48 = wx.EmptyIcon()
icon_48.CopyFromBitmap(wx.Bitmap(os.path.join(self.icon_folder, "favicon_48.png"), wx.BITMAP_TYPE_ANY))
info.SetIcon(icon_48)
wx.AboutBox(info)
def on_file_open(self, evt):
""" Request to open a file via the Open entry in the File menu """
def make_filter_string():
""" Make a wxPython dialog filter string segment from dict """
filter_string = []
hdf_filter_string = [] # put HDF filters in the front
for store in compass_model.get_stores():
if len(store.file_extensions) == 0:
continue
for key in store.file_extensions:
s = "{name} ({pattern_c})|{pattern_sc}".format(
name=key,
pattern_c=",".join(store.file_extensions[key]),
pattern_sc=";".join(store.file_extensions[key]))
if s.startswith("HDF"):
hdf_filter_string.append(s)
else:
filter_string.append(s)
filter_string = hdf_filter_string + filter_string
filter_string.append('All Files (*.*)|*.*')
pipe = "|"
return pipe.join(filter_string)
wc_string = make_filter_string()
dlg = wx.FileDialog(self, "Open Local File", wildcard=wc_string, defaultDir=BaseFrame.last_open_path,
style=wx.FD_OPEN | wx.FD_FILE_MUST_EXIST)
if dlg.ShowModal() != wx.ID_OK:
return
path = dlg.GetPath()
BaseFrame.last_open_path = os.path.dirname(path)
url = path2url(path)
self.open_url(url)
def on_url_history(self, evt):
""" Opens url from history """
fileNum = evt.GetId() - wx.ID_FILE1
url = self.urlhistory.GetHistoryFile(fileNum)
self.open_url(url, fileNum)
def on_resource_open(self, evt):
""" Request to open a URL via the File menu """
dlg = wx.TextEntryDialog(self, 'Enter resource URL:')
if dlg.ShowModal() != wx.ID_OK or dlg.GetValue() == "":
dlg.Destroy()
return
url = dlg.GetValue()
url = url.strip() # remove any new lines
dlg.Destroy()
self.open_url(url)
def open_url(self, url, file_num=-1):
""" Opens url and saves it to history """
from . import can_open_store, open_store
if can_open_store(url):
self.urlhistory.AddFileToHistory(url) # add url to top of list
self.urlhistory.Save(self.config)
self.config.Flush()
open_store(url)
else:
if (file_num >= 0) and (file_num < MAX_RECENT_FILES):
self.urlhistory.RemoveFileFromHistory(file_num)
self.urlhistory.Save(self.config)
self.config.Flush()
dlg = wx.MessageDialog(self, 'The following url could not be opened:\n\n%s' % url,
'No handler for url', wx.OK | wx.ICON_INFORMATION)
dlg.ShowModal()
dlg.Destroy()
def add_menu(self, menu, title):
""" Add a menu at the appropriate place in the menubar """
mb = self.GetMenuBar()
mb.Insert(1, menu, title)
class InitFrame(BaseFrame):
""" Frame displayed when the application starts up.
This includes the menu bar provided by TopFrame. On the Mac, although it
still exists (to prevent the application from exiting), the frame
is typically not shown.
"""
def __init__(self):
style = wx.DEFAULT_FRAME_STYLE & (~wx.RESIZE_BORDER) & (~wx.MAXIMIZE_BOX)
title = "HDF Compass"
super(InitFrame, self).__init__(size=(552, 247), title=title, style=style)
data = wx.Bitmap(os.path.join(self.icon_folder, "logo.png"), wx.BITMAP_TYPE_ANY)
bmp = wx.StaticBitmap(self, wx.ID_ANY, data)
# The init frame isn't visible on Mac, so there shouldn't be an
# option to close it. "Quit" does the same thing.
if is_darwin:
mb = self.GetMenuBar()
mu = mb.GetMenu(0)
mu.Enable(wx.ID_CLOSE, False)
self.Center()
class NodeFrame(BaseFrame):
""" Base class for any frame which displays a Node instance.
Provides a "Close file" menu item and manages open data stores.
Has three attributes of note:
.node: Settable Node instance to display
.info: Read-only InfoPanel instance (left-hand sidebar)
.view: Settable wx.Panel instance for the right-hand view.
In order to coordinate file-close events across multiple frames,
a reference-counting system is used. When a new frame that uses a store
is created, that store's reference count (in cls._stores) is incremented.
When the frame is closed, the store's count is decremented.
When the reference count reaches 0 or the "Close File" is selected from the
menu, the store is closed and a pubsub notification is sent out to all
other frames. They check to see if their .node.store's are valid, and
if not, close themselves.
"""
# --- Store reference-counting methods ------------------------------------
_stores = {}
@classmethod
def _incref(cls, store):
""" Record that a client is using the specified store. """
try:
cls._stores[store] += 1
except KeyError:
cls._stores[store] = 1
@classmethod
def _decref(cls, store):
""" Record that a client is finished using the specified store. """
try:
val = cls._stores[store]
if val == 1:
cls._close(store)
del cls._stores[store]
else:
cls._stores[store] = val - 1
except KeyError:
pass
@classmethod
def _close(cls, store):
""" Manually close the store, and broadcast a pubsub notification. """
cls._stores.pop(store, None)
store.close()
pub.sendMessage('store.close')
# --- End store reference-counting ----------------------------------------
@property
def info(self):
""" The InfoPanel object used for the left-hand sidebar. """
return self.__info
@property
def node(self):
""" Node instance displayed by the frame. """
return self.__node
@node.setter
def node(self, newnode):
self.__node = newnode
@property
def view(self):
""" Right-hand view """
return self.__view
@view.setter
def view(self, window):
if self.__view is None:
self.__sizer.Add(window, 1, wx.EXPAND)
else:
self.__sizer.Remove(self.__view)
self.__view.Destroy()
self.__sizer.Add(window, 1, wx.EXPAND)
self.__view = window
self.Layout()
def __init__(self, node, **kwds):
""" Constructor. Keywords are passed on to wx.Frame.
node: The compass_model.Node instance to display.
"""
super(NodeFrame, self).__init__(**kwds)
# Enable the "Close File" menu entry
fm = self.GetMenuBar().GetMenu(0)
fm.Enable(ID_CLOSE_FILE, True)
# Create the "window" menu to hold "Reopen As" items.
wm = wx.Menu()
# Determine a list of handlers which can understand this object.
# We exclude the default handler, "Unknown", as it can't do anything.
# See also container/list.py.
handlers = [x for x in node.store.gethandlers(node.key) if x != compass_model.Unknown]
# This will map menu IDs -> Node subclass handlers
self._menu_handlers = {}
# Note there's guaranteed to be at least one entry: the class
# being used for the current frame!
for h in handlers:
id_ = wx.NewId()
self._menu_handlers[id_] = h
wm.Append(id_, "Reopen as " + h.class_kind)
self.Bind(wx.EVT_MENU, self.on_menu_reopen, id=id_)
self.GetMenuBar().Insert(1, wm, "&Window")
self.__node = node
self.__view = None
self.__info = InfoPanel(self)
self.__sizer = wx.BoxSizer(wx.HORIZONTAL)
self.__sizer.Add(self.__info, 0, wx.EXPAND)
self.SetSizer(self.__sizer)
self.info.display(node)
self.Bind(wx.EVT_CLOSE, self.on_close_evt)
self.Bind(wx.EVT_MENU, self.on_menu_closefile, id=ID_CLOSE_FILE)
self._incref(node.store)
pub.subscribe(self.on_notification_closefile, 'store.close')
def on_notification_closefile(self):
""" Pubsub notification that a file (any file) has been closed """
if not self.node.store.valid:
self.Destroy()
def on_close_evt(self, evt):
""" Window is about to be closed """
self._decref(self.node.store)
evt.Skip()
def on_menu_closefile(self, evt):
""" "Close File" menu item activated.
Note we rely on the pubsub message (above) to actually close the frame.
"""
self._close(self.node.store)
def on_menu_reopen(self, evt):
""" Called when one of the "Reopen As" menu items is clicked """
# The "Reopen As" submenu ID
id_ = evt.GetId()
# Present node
node_being_opened = self.node
# The requested Node subclass to instantiate.
h = self._menu_handlers[id_]
log.debug('opening: %s %s' % (node_being_opened.store, node_being_opened.key))
# Brand new Node instance of the requested type
node_new = h(node_being_opened.store, node_being_opened.key)
# Send off a request for it to be opened in the appropriate viewer
# Post it directly to the App, or Container will intercept it!
pos = wx.GetTopLevelParent(self).GetPosition()
wx.PostEvent(wx.GetApp(), CompassOpenEvent(node_new, pos=pos))
class PluginInfoFrame(wx.Frame):
icon_folder = os.path.abspath(os.path.join(os.path.dirname(__file__), 'icons'))
def __init__(self, parent):
# make that the plugin info is displayed in the middle of the screen
frame_w = 320
frame_h = 250
x = wx.SystemSettings.GetMetric(wx.SYS_SCREEN_X) // 2 - frame_w // 2
y = wx.SystemSettings.GetMetric(wx.SYS_SCREEN_Y) // 2 - frame_h // 2
wx.Frame.__init__(self, parent, title="Plugin Info", pos=(x, y), size=(frame_w, frame_h))
# Frame icon
ib = wx.IconBundle()
icon_32 = wx.EmptyIcon()
icon_32.CopyFromBitmap(wx.Bitmap(os.path.join(self.icon_folder, "favicon_32.png"), wx.BITMAP_TYPE_ANY))
ib.AddIcon(icon_32)
icon_48 = wx.EmptyIcon()
icon_48.CopyFromBitmap(wx.Bitmap(os.path.join(self.icon_folder, "favicon_48.png"), wx.BITMAP_TYPE_ANY))
ib.AddIcon(icon_48)
self.SetIcons(ib)
p = wx.Panel(self)
nb = wx.Notebook(p)
for store in compass_model.get_stores():
try:
# log.debug(store.plugin_name())
# log.debug(store.plugin_description())
pnl = wx.Panel(nb)
t = rtc.RichTextCtrl(pnl, -1, style=wx.TE_READONLY)
t.BeginFontSize(9)
t.BeginAlignment(wx.TEXT_ALIGNMENT_CENTRE)
t.BeginBold()
t.WriteText("Name: ")
t.EndBold()
t.BeginItalic()
t.WriteText(store.plugin_name())
t.EndItalic()
t.Newline()
t.Newline()
t.BeginBold()
t.WriteText("Description")
t.EndBold()
t.Newline()
t.BeginItalic()
t.WriteText(store.plugin_description())
t.EndItalic()
t.Newline()
# store.plugin_description(), style=wx.TE_MULTILINE|wx.TE_READONLY|wx.TE_CENTER)
szr = wx.BoxSizer()
szr.Add(t, 1, wx.ALL | wx.EXPAND, 5)
pnl.SetSizer(szr)
nb.AddPage(pnl, store.plugin_name())
except NotImplementedError:
# skip not implemented plugin name/description
log.debug("Not implemented name/description for %s" % store)
sizer = wx.BoxSizer()
sizer.Add(nb, 1, wx.ALL | wx.EXPAND, 3)
p.SetSizer(sizer)
|
contrib/demUtils/test/testCorrect_geoid_i2_srtm.py | vincentschut/isce2 | 1,133 | 11180593 | <reponame>vincentschut/isce2
#!/usr/bin/env python3
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Copyright 2012 California Institute of Technology. ALL RIGHTS RESERVED.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# United States Government Sponsorship acknowledged. This software is subject to
# U.S. export control laws and regulations and has been classified as 'EAR99 NLR'
# (No [Export] License Required except when exporting to an embargoed country,
# end user, or in support of a prohibited end use). By downloading this software,
# the user agrees to comply with all applicable U.S. export laws and regulations.
# The user has the responsibility to obtain export licenses, or other export
# authority as may be required before exporting this software to any 'EAR99'
# embargoed foreign country or citizen of those countries.
#
# Author: <NAME>
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
from __future__ import print_function
import sys
import os
import math
from iscesys.Compatibility import Compatibility
Compatibility.checkPythonVersion()
from contrib.demUtils.Correct_geoid_i2_srtm import Correct_geoid_i2_srtm
def main():
from iscesys.StdOEL.StdOELPy import StdOEL as ST
stdWriter = ST()
stdWriter.createWriters()
stdWriter.configWriter("log","",True,"insar.log")
stdWriter.init()
obj = Correct_geoid_i2_srtm()
obj.setInputFilename(sys.argv[1])
#if outputFilenmae not specified the input one is overwritten
obj.setOutputFilename(sys.argv[1] + '.id')
obj.setStdWriter(stdWriter)
obj.setWidth(int(sys.argv[2]))
obj.setStartLatitude(float(sys.argv[3]))
obj.setStartLongitude(float(sys.argv[4]))
obj.setDeltaLatitude(float(sys.argv[5]))
obj.setDeltaLongitude(float(sys.argv[6]))
# -1 EGM96 -> WGS84, 1 WGS84 -> EGM96
obj.setConversionType(int(sys.argv[7]))
obj.correct_geoid_i2_srtm()
if __name__ == "__main__":
sys.exit(main())
|
skbayes/decomposition_models/__init__.py | Habush/sklearn-bayes | 478 | 11180614 | <reponame>Habush/sklearn-bayes<filename>skbayes/decomposition_models/__init__.py<gh_stars>100-1000
from .gibbs_lda_cython import GibbsLDA
from .rbm import BernoulliRBM
__all__ = ['GibbsLDA','BernoulliRBM']
|
libraries/botframework-streaming/botframework/streaming/transport/disconnected_event_args.py | andreikop/botbuilder-python | 388 | 11180626 | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
class DisconnectedEventArgs:
def __init__(self, *, reason: str = None):
self.reason = reason
DisconnectedEventArgs.empty = DisconnectedEventArgs()
|
autodist/utils/network.py | Ezra-H/autodist | 127 | 11180630 | <reponame>Ezra-H/autodist
# Copyright 2020 Petuum, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Network utility functions."""
from ipaddress import ip_address
import netifaces
def is_loopback_address(address):
"""
Determine whether an address is a loopback address (e.g. 127.0.0.1).
Args:
address (str): Address (can be IP or IP:port)
Returns:
Boolean
"""
ip = _get_ip_from_address(address)
return ip.is_loopback
def is_local_address(address):
"""
Determine whether an address is a local (including loopback) IP address.
Adapted from stackoverflow.com/questions/166506.
Args:
address (str): Address (can be IP or IP:port)
Returns:
Boolean
"""
ip = _get_ip_from_address(address)
# Get all addresses
addresses = set()
for iface_name in netifaces.interfaces():
for i in netifaces.ifaddresses(iface_name).setdefault(netifaces.AF_INET, [{'addr': None}]):
if i['addr']:
addresses.add(ip_address(i['addr']))
return ip in addresses
def _get_ip_from_address(address):
"""
Extract an IP Address object from an address string.
Args:
address (str): Address (can be IP or IP:port)
Returns:
An IPv4Address or IPv6Address object.
"""
ip, _, _ = address.rpartition(':')
ip = ip or address # If there was no separation, ip will be empty so use original string
if ip == 'localhost':
# These should be equivalent
# `ip_address` will throw an error if given localhost
ip = '127.0.0.1'
return ip_address(ip.strip("[]")) # IPv6 addresses might contain [] to separate address and port
|
ghostwriter/commandcenter/migrations/0009_cloudservicesconfiguration_notification_delay.py | bbhunter/Ghostwriter | 601 | 11180677 | # Generated by Django 3.0.10 on 2021-09-20 21:57
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('commandcenter', '0008_remove_namecheapconfiguration_reset_dns'),
]
operations = [
migrations.AddField(
model_name='cloudservicesconfiguration',
name='notification_delay',
field=models.IntegerField(default=7, help_text='Number of days to delay cloud monitoring notifications for teardown', verbose_name='Notification Delay'),
),
]
|
setup.py | aicentral/torchbiomed | 106 | 11180681 | #!/usr/bin/env python
import os
import shutil
import sys
from setuptools import setup, find_packages
readme = open('README.md').read()
VERSION = '0.0.1'
# same as ./requirements.txt
requirements = [
'numpy',
'torch',
'torchvision',
]
setup(
# Metadata
name='torchbiomed',
version=VERSION,
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/mattmacy/torchbiomed',
description='biomedical image datasets, transforms, utilities, and models for torch deep learning',
long_description=readme,
license='BSD',
# Package info
packages=find_packages(exclude=('test',)),
zip_safe=True,
install_requires=requirements,
)
|
ai/sample-model2.py | dekimir/RamFuzz | 310 | 11180691 | #!/usr/bin/env python
# Copyright 2016-2018 The RamFuzz contributors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A sample Keras model trainable on the output of ./gencorp.py. It tries to
predict the test success or failure based on the logged RamFuzz values during
the test run. It consists of N dense layers in parallel whose outputs are
multiplied. This is interesting because we know how to translate a fully
trained network like this into a feedback mechanism to the RamFuzz generator --
see ./solver.py.
Unfortunately, this model can currently only reach ~56% accuracy.
Usage: $0 [epochs] [batch_size] [N]
Defaults: epochs=1, batch_size=50, N=50
Expects a train/ subdirectory containing the output of ./gencorp.py.
"""
from keras.constraints import min_max_norm
from keras.layers import BatchNormalization, Dense, Dropout, Embedding, Flatten
from keras.layers import Input
from keras.layers.merge import concatenate, multiply
from keras.metrics import mse
from keras.models import Model
from keras.optimizers import Adam
import glob
import keras.backend as K
import os.path
import rfutils
import sys
gl = glob.glob(os.path.join('train', '*.[sf]'))
poscount, locidx = rfutils.count_locpos(gl)
embedding_dim = 4
dropout_prob = 0.4
dense_count = int(sys.argv[3]) if len(sys.argv) > 3 else 50
optr = Adam(lr=0.03)
K.set_floatx('float64')
in_vals = Input((poscount, 1), name='vals', dtype='float64')
normd = BatchNormalization(
axis=1, gamma_constraint=min_max_norm(),
beta_constraint=min_max_norm())(in_vals)
in_locs = Input((poscount, ), name='locs', dtype='uint64')
embed_locs = Embedding(
locidx.watermark, embedding_dim, input_length=poscount)(in_locs)
merged = concatenate([embed_locs, normd])
dense_list = []
for i in range(dense_count):
dense_list.append(
Dropout(dropout_prob)(Dense(1, activation='sigmoid')(Flatten()(
merged))))
mult = multiply(dense_list)
ml = Model(inputs=[in_locs, in_vals], outputs=mult)
ml.compile(optr, metrics=['acc'], loss=mse)
locs, vals, labels = rfutils.read_data(gl, poscount, locidx)
def fit(
eps=int(sys.argv[1]) if len(sys.argv) > 1 else 1,
# Large batches tend to cause NaNs in batch normalization.
bsz=int(sys.argv[2]) if len(sys.argv) > 2 else 50):
ml.fit([locs, vals], labels, batch_size=bsz, epochs=eps)
fit()
|
lintcode/92.backpack.py | geemaple/algorithm | 177 | 11180708 | class Solution:
"""
@param m: An integer m denotes the size of a backpack
@param A: Given n items with size A[i]
@return: The maximum size
"""
def backPack(self, m, A):
# write your code here
table = [False for _ in range(m + 1)]
table[0] = True
for i in range(len(A)):
for w in range(m, -1, -1):
value = table[w]
if w - A[i] >= 0:
value = value or table[w - A[i]]
table[w] = value
maxsize = 0
for i in range(m, -1, -1):
if table[i]:
maxsize = i
break
return maxsize |
tensorflow_constrained_optimization/__init__.py | RMKruse/tensorflow_constrained_optimization | 276 | 11180726 | <gh_stars>100-1000
# Copyright 2018 The TensorFlow Constrained Optimization Authors. All Rights
# Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
# ==============================================================================
"""A library for performing constrained optimization in TensorFlow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow_constrained_optimization.python.candidates import find_best_candidate_distribution
from tensorflow_constrained_optimization.python.candidates import find_best_candidate_index
from tensorflow_constrained_optimization.python.constrained_minimization_problem import ConstrainedMinimizationProblem
from tensorflow_constrained_optimization.python.rates.binary_rates import inverse_precision_at_recall
from tensorflow_constrained_optimization.python.rates.binary_rates import pr_auc
from tensorflow_constrained_optimization.python.rates.binary_rates import precision_at_recall
from tensorflow_constrained_optimization.python.rates.binary_rates import recall_at_precision
from tensorflow_constrained_optimization.python.rates.binary_rates import roc_auc
from tensorflow_constrained_optimization.python.rates.estimator_head import HeadV2
from tensorflow_constrained_optimization.python.rates.general_rates import accuracy_rate
from tensorflow_constrained_optimization.python.rates.general_rates import error_rate
from tensorflow_constrained_optimization.python.rates.general_rates import f_score
from tensorflow_constrained_optimization.python.rates.general_rates import f_score_ratio
from tensorflow_constrained_optimization.python.rates.general_rates import false_negative_proportion
from tensorflow_constrained_optimization.python.rates.general_rates import false_negative_rate
from tensorflow_constrained_optimization.python.rates.general_rates import false_positive_proportion
from tensorflow_constrained_optimization.python.rates.general_rates import false_positive_rate
from tensorflow_constrained_optimization.python.rates.general_rates import negative_prediction_rate
from tensorflow_constrained_optimization.python.rates.general_rates import positive_prediction_rate
from tensorflow_constrained_optimization.python.rates.general_rates import precision
from tensorflow_constrained_optimization.python.rates.general_rates import precision_ratio
from tensorflow_constrained_optimization.python.rates.general_rates import true_negative_proportion
from tensorflow_constrained_optimization.python.rates.general_rates import true_negative_rate
from tensorflow_constrained_optimization.python.rates.general_rates import true_positive_proportion
from tensorflow_constrained_optimization.python.rates.general_rates import true_positive_rate
from tensorflow_constrained_optimization.python.rates.keras import KerasLayer
from tensorflow_constrained_optimization.python.rates.keras import KerasMetricWrapper
from tensorflow_constrained_optimization.python.rates.keras import KerasPlaceholder
from tensorflow_constrained_optimization.python.rates.loss import BinaryClassificationLoss
from tensorflow_constrained_optimization.python.rates.loss import HingeLoss
from tensorflow_constrained_optimization.python.rates.loss import Loss
from tensorflow_constrained_optimization.python.rates.loss import MulticlassLoss
from tensorflow_constrained_optimization.python.rates.loss import SoftmaxCrossEntropyLoss
from tensorflow_constrained_optimization.python.rates.loss import SoftmaxLoss
from tensorflow_constrained_optimization.python.rates.loss import ZeroOneLoss
from tensorflow_constrained_optimization.python.rates.operations import lower_bound
from tensorflow_constrained_optimization.python.rates.operations import upper_bound
from tensorflow_constrained_optimization.python.rates.operations import wrap_rate
from tensorflow_constrained_optimization.python.rates.rate_minimization_problem import RateMinimizationProblem
from tensorflow_constrained_optimization.python.rates.subsettable_context import multiclass_rate_context
from tensorflow_constrained_optimization.python.rates.subsettable_context import multiclass_split_rate_context
from tensorflow_constrained_optimization.python.rates.subsettable_context import rate_context
from tensorflow_constrained_optimization.python.rates.subsettable_context import split_rate_context
from tensorflow_constrained_optimization.python.train.constrained_optimizer import ConstrainedOptimizerV1
from tensorflow_constrained_optimization.python.train.constrained_optimizer import ConstrainedOptimizerV2
from tensorflow_constrained_optimization.python.train.lagrangian_model_optimizer import create_lagrangian_model_loss
from tensorflow_constrained_optimization.python.train.lagrangian_optimizer import create_lagrangian_loss
from tensorflow_constrained_optimization.python.train.lagrangian_optimizer import LagrangianOptimizerV1
from tensorflow_constrained_optimization.python.train.lagrangian_optimizer import LagrangianOptimizerV2
from tensorflow_constrained_optimization.python.train.proxy_lagrangian_optimizer import create_proxy_lagrangian_loss
from tensorflow_constrained_optimization.python.train.proxy_lagrangian_optimizer import ProxyLagrangianOptimizerV1
from tensorflow_constrained_optimization.python.train.proxy_lagrangian_optimizer import ProxyLagrangianOptimizerV2
# The "true positive rate" is the same thing as the "recall", so we allow it to
# be accessed by either name.
recall = true_positive_rate
# By default, we use V2 optimizers. These aliases are purely for convenience: in
# general, you should prefer to explicitly specify either a V1 or a V2 optimizer
# (in case there's ever a V3, we'll update these aliases).
ConstrainedOptimizer = ConstrainedOptimizerV2
LagrangianOptimizer = LagrangianOptimizerV2
ProxyLagrangianOptimizer = ProxyLagrangianOptimizerV2
# We allow "HeadV2" to be also accessed by "Head". If there's a V3 at some
# point, we'll update this alias accordingly.
Head = HeadV2
|
python/tests/json/json_dictionary_test.py | vvucetic/keyvi | 199 | 11180733 | <filename>python/tests/json/json_dictionary_test.py<gh_stars>100-1000
# -*- coding: utf-8 -*-
# Usage: py.test tests
import sys
import os
from keyvi.compiler import JsonDictionaryCompiler
root = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.join(root, "../"))
from test_tools import tmp_dictionary
def test_simple():
c = JsonDictionaryCompiler({"memory_limit_mb":"10"})
c.Add("abc", '{"a" : 2}')
c.Add("abd", '{"a" : 3}')
# use python syntax ala __setitem__
c["abd"] = '{"a" : 3}'
with tmp_dictionary(c, 'simple_json.kv') as d:
assert len(d) == 2
assert d["abc"].GetValueAsString() == '{"a":2}'
assert d["abd"].GetValueAsString() == '{"a":3}'
def test_simple_zlib():
c = JsonDictionaryCompiler({"memory_limit_mb":"10", 'compression': 'z', 'compression_threshold': '0'})
c.Add("abc", '{"a" : 2}')
c.Add("abd", '{"a" : 3}')
with tmp_dictionary(c, 'simple_json_z.kv') as d:
assert len(d) == 2
assert d["abc"].GetValueAsString() == '{"a":2}'
assert d["abd"].GetValueAsString() == '{"a":3}'
m = d.GetStatistics()['Value Store']
assert m['__compression'] == "zlib"
def test_simple_snappy():
c = JsonDictionaryCompiler({"memory_limit_mb":"10", 'compression': 'snappy', 'compression_threshold': '0'})
c.Add("abc", '{"a" : 2}')
c.Add("abd", '{"a" : 3}')
with tmp_dictionary(c, 'simple_json_snappy.kv') as d:
assert len(d) == 2
assert d["abc"].GetValueAsString() == '{"a":2}'
assert d["abd"].GetValueAsString() == '{"a":3}'
m = d.GetStatistics()['Value Store']
assert m['__compression'] == "snappy"
def test_unicode_compile():
c = JsonDictionaryCompiler({"memory_limit_mb":"10"})
c.Add("üöä", '{"y" : 2}')
c.Add("üüüüüüabd", '{"a" : 3}')
c.Add(u"ääääädäd", '{"b" : 33}')
with tmp_dictionary(c, 'simple_json.kv') as d:
assert len(d) == 3
assert d["üöä"].GetValueAsString() == '{"y":2}'
assert d[u"üöä"].GetValueAsString() == '{"y":2}'
assert d["üüüüüüabd"].GetValueAsString() == '{"a":3}'
assert d["ääääädäd"].GetValueAsString() == '{"b":33}'
def test_float_compaction():
cs = JsonDictionaryCompiler({"memory_limit_mb":"10", 'floating_point_precision': 'single'})
cd = JsonDictionaryCompiler({"memory_limit_mb":"10"})
# add a couple of floats to both
cs.Add('aa', '[1.7008715758978892, 1.8094465532317732, 1.6098250864350536, 1.6369107966501981, 1.7736887965234107, 1.606682751740542, 1.6186427703265525, 1.7939763843449683, 1.5973550162469434, 1.6799721708726192, 1.8199786239525833, 1.7956178070065245, 1.7269879953863045]')
cd.Add('aa', '[1.7008715758978892, 1.8094465532317732, 1.6098250864350536, 1.6369107966501981, 1.7736887965234107, 1.606682751740542, 1.6186427703265525, 1.7939763843449683, 1.5973550162469434, 1.6799721708726192, 1.8199786239525833, 1.7956178070065245, 1.7269879953863045]')
with tmp_dictionary(cs, 'json_single_precision_float.kv') as ds:
with tmp_dictionary(cd, 'json_double_precision_float.kv') as dd:
# first some basic checks
assert len(ds) == 1
assert len(dd) == 1
# simple test the length of the value store which shall be smaller for single floats
stats_s = ds.GetStatistics()
stats_d = dd.GetStatistics()
assert int(stats_s['Value Store']['size']) < int(stats_d['Value Store']['size'])
|
draft-3/salad/setup.py | hmenager/common-workflow-language | 1,365 | 11180738 | #!/usr/bin/env python
import os
import sys
import setuptools.command.egg_info as egg_info_cmd
import shutil
from setuptools import setup, find_packages
SETUP_DIR = os.path.dirname(__file__)
README = os.path.join(SETUP_DIR, 'README.rst')
try:
import gittaggers
tagger = gittaggers.EggInfoFromGit
except ImportError:
tagger = egg_info_cmd.egg_info
setup(name='schema-salad',
version='1.5',
description='Schema Annotations for Linked Avro Data (SALAD)',
long_description=open(README).read(),
author='Common workflow language working group',
author_email='<EMAIL>',
url="https://github.com/common-workflow-language/common-workflow-language",
download_url="https://github.com/common-workflow-language/common-workflow-language",
license='Apache 2.0',
packages=["schema_salad"],
package_data={'schema_salad': ['metaschema/*']},
install_requires=[
'requests',
'PyYAML',
'avro',
'rdflib >= 4.2.0',
'rdflib-jsonld >= 0.3.0',
'mistune'
],
test_suite='tests',
tests_require=[],
entry_points={
'console_scripts': [ "schema-salad-tool=schema_salad.main:main" ]
},
zip_safe=True,
cmdclass={'egg_info': tagger},
)
|
concern/__init__.py | CelineWang1027/PSGAN | 570 | 11180746 | <gh_stars>100-1000
from .image import load_image
|
k-distribution/tests/pyk/unit-test.py | nrdxp/k | 404 | 11180778 | <reponame>nrdxp/k<filename>k-distribution/tests/pyk/unit-test.py
#!/usr/bin/env python3
import sys
import unittest
from functools import reduce
# From K's pyk-library
from pyk import *
class TestPyk(unittest.TestCase):
def test_newLines(self):
self.assertEqual(newLines(['aaa', 'bbb']), 'aaa\nbbb')
self.assertEqual(newLines(['aaa']), 'aaa')
def test_splitConfigFrom(self):
k_cell = KSequence([KConstant('foo'), KConstant('bar')])
term = KApply('<k>', [k_cell])
(config, subst) = splitConfigFrom(term)
self.assertEqual(config, KApply('<k>', [KVariable('K_CELL')]))
self.assertEqual(subst, {'K_CELL': k_cell})
map_item_cell = KApply('<mapItem>', [KConstant('foo')])
map_cell = KApply('<mapCell>', [KApply('map_join', [map_item_cell, map_item_cell])])
(config, subst) = splitConfigFrom(map_cell)
self.assertEqual(config, KApply('<mapCell>', [KVariable('MAPCELL_CELL')]))
self.assertEqual(subst, {'MAPCELL_CELL': KApply('map_join', [map_item_cell, map_item_cell])})
if __name__ == '__main__':
unittest.main()
|
examples/t_entries.py | tgolsson/appJar | 666 | 11180793 | import sys
sys.path.append("../")
from appJar import gui
def changer(btn):
print(btn, app.entry(btn), "changed")
def submit(btn):
print(btn, app.entry(btn), "submitted")
def validater(btn):
if btn == "A":
app.setEntryBg("e1", "red")
app.setEntryBg("le1", "red")
app.setEntryBg("ld1", "red")
app.setEntryBg("lv1", "red")
app.setLabelBg("le1", "red")
app.setEntryValid("v1")
app.setEntryValid("lv1")
app.setEntryValid("lsv1")
app.setLabelBg("lsv1", "red")
app.setEntryValid("sv1")
elif btn == "B":
app.setEntryBg("e1", "green")
app.setEntryBg("le1", "green")
app.setEntryBg("ld1", "green")
app.setEntryBg("lv1", "green")
app.setEntryInvalid("v1")
app.setEntryInvalid("lv1")
app.setEntryInvalid("lsv1")
app.setEntryInvalid("sv1")
elif btn == "C":
app.setEntryBg("e1", "orange")
app.setEntryBg("le1", "orange")
app.setEntryBg("ld1", "orange")
app.setEntryBg("lv1", "orange")
app.setEntryWaitingValidation("v1")
app.setEntryWaitingValidation("lv1")
app.setEntryWaitingValidation("lsv1")
app.setEntryWaitingValidation("sv1")
elif btn == "D":
app.hideEntry("e1")
app.hideEntry("le1")
app.hideEntry("d1")
app.hideEntry("ld1")
app.hideEntry("v1")
app.hideEntry("lv1")
elif btn == "E":
app.showEntry("e1")
app.showEntry("le1")
app.showEntry("d1")
app.showEntry("ld1")
app.showEntry("v1")
app.showEntry("lv1")
elif btn == "F":
app.removeEntry("e1")
app.removeEntry("f1")
app.removeEntry("le1")
app.removeEntry("d1")
app.removeEntry("ld1")
app.removeEntry("v1")
app.removeEntry("lv1")
elif btn == "G":
app.debug("Adding entry: e1")
app.addEntry("e1", row=1)
app.debug("Adding val entry: v1")
app.addValidationEntry("v1",row=2)
app.debug("Adding file entry: f1")
app.addFileEntry("f1", row=3)
app.debug("Adding dir entry: d1")
app.addDirectoryEntry("d1", row=4)
app.debug("Adding lab entry: l1")
app.addLabelEntry("le1", row=9)
app.debug("Adding lab dir entry: ld1")
app.addLabelDirectoryEntry("ld1", row=11)
app.debug("Adding lab val entry: lv1")
app.addLabelValidationEntry("lv1", row=15)
elif btn == "0":
app.addLabelEntry("long", row=16)
app=gui()
app.addButtons(["0", "A", "B", "C", "D", "E", "F", "G"], validater, colspan=2)
app.setBg("green")
app.setLabelFont(size=20)
app.addEntry("e1")
app.addValidationEntry("v1")
app.addFileEntry("f1")
app.addDirectoryEntry("d1")
app.addNumericEntry("n1")
app.addAutoEntry("a1", ["a", "b", "bb", "bbb"])
app.addSecretEntry("s1")
app.separator(colspan=2)
app.addLabelEntry("le1")
app.setEntryDefault("le1", "aaa")
app.setEntryDefault("le1", "bbb")
app.addLabelFileEntry("lf1")
app.addLabelDirectoryEntry("ld1")
app.addLabelNumericEntry("ln1")
app.addLabelAutoEntry("la1", ["a", "b", "bb", "bbb"])
app.addLabelSecretEntry("ls1")
app.addLabelValidationEntry("lv1")
app.entry("sv1", row=1, column=1, kind="validation", default="validation", submit=submit, change=changer, limit=5, case="upper", rows=3)
app.entry("se1", row=2, column=1, default="standard", submit=submit, change=changer, limit=5, case="upper", rows=3)
app.entry("sf1", row=3, column=1, kind="file", default="file", submit=submit, change=changer, limit=5, case="upper", rows=3)
app.entry("sd1", row=4, column=1, kind="directory", default="directory", submit=submit, change=changer, limit=5, case="upper", rows=3)
app.entry("sn1", row=5, column=1, kind="numeric", default="numeric", submit=submit, change=changer, limit=5, case="upper", rows=3)
app.entry("sa1", ["a", "b", "bb", "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb:bbb"], row=6, column=1, kind="auto", submit=submit, change=changer, limit=5, case="upper", rows=3, default="big auto")
app.entry("ss1", row=7, column=1, secret=True, default="secret", submit=submit, change=changer, limit=5, case="upper", rows=3)
app.entry("lse1", row=9, column=1,label="lText", default="standerder")
app.entry("lsf1", row=10, column=1, kind="file",label="lText", default="filer")
app.entry("lsd1", row=11, column=1, kind="directory",label="lText", default="directoryer")
app.entry("lsn1", row=12, column=1, kind="numeric",label="lText", default="numericer")
app.entry("lsa1", ["a", "b", "bb", "bbb"], row=13, column=1, kind="auto",label=True, default="autoer")
app.entry("lss1", row=14, column=1, secret=True,label=True, default="secreter")
app.entry("lsv1", row=15, column=1, kind="validation",label=True, default="validationer")
app.grip()
app.setBg("blue")
app.setBg("yellow")
#app.ttkStyle.configure("BW.TEntry", foreground="yellow", background="pink")
#app.ttkStyle.configure("TButton", foreground="pink", background="yellow")
#app.ttkStyle.configure("TLabel", background="blue")
#app.ttkStyle.configure("TFrame", background="blue")
#app.setEntryStyle("e1", "BW.TEntry")
app.go()
|
bookwyrm/tests/models/test_import_model.py | mouse-reeve/fedireads | 270 | 11180801 | <filename>bookwyrm/tests/models/test_import_model.py
""" testing models """
import datetime
import json
import pathlib
from unittest.mock import patch
from django.utils import timezone
from django.test import TestCase
import responses
from bookwyrm import models
from bookwyrm.book_search import SearchResult
from bookwyrm.connectors import connector_manager
class ImportJob(TestCase):
"""this is a fancy one!!!"""
def setUp(self):
"""data is from a goodreads export of The Raven Tower"""
with patch("bookwyrm.suggested_users.rerank_suggestions_task.delay"), patch(
"bookwyrm.activitystreams.populate_stream_task.delay"
), patch("bookwyrm.lists_stream.populate_lists_task.delay"):
self.local_user = models.User.objects.create_user(
"mouse", "<EMAIL>", "password", local=True
)
self.job = models.ImportJob.objects.create(user=self.local_user, mappings={})
def test_isbn(self):
"""it unquotes the isbn13 field from data"""
item = models.ImportItem.objects.create(
index=1,
job=self.job,
data={},
normalized_data={
"isbn_13": '="9780356506999"',
},
)
self.assertEqual(item.isbn, "9780356506999")
def test_shelf(self):
"""converts to the local shelf typology"""
item = models.ImportItem.objects.create(
index=1,
job=self.job,
data={},
normalized_data={
"isbn_13": '="9780356506999"',
"shelf": "reading",
},
)
self.assertEqual(item.shelf, "reading")
def test_date_added(self):
"""converts to the local shelf typology"""
expected = datetime.datetime(2019, 4, 9, 0, 0, tzinfo=timezone.utc)
item = models.ImportItem.objects.create(
index=1,
job=self.job,
data={},
normalized_data={
"isbn_13": '="9780356506999"',
"shelf": "reading",
"date_added": "2019/04/09",
},
)
self.assertEqual(item.date_added, expected)
def test_date_read(self):
"""converts to the local shelf typology"""
expected = datetime.datetime(2019, 4, 12, 0, 0, tzinfo=timezone.utc)
item = models.ImportItem.objects.create(
index=1,
job=self.job,
data={},
normalized_data={
"isbn_13": '="9780356506999"',
"shelf": "reading",
"date_added": "2019/04/09",
"date_finished": "2019/04/12",
},
)
self.assertEqual(item.date_read, expected)
def test_currently_reading_reads(self):
"""infer currently reading dates where available"""
expected = [
models.ReadThrough(
start_date=datetime.datetime(2019, 4, 9, 0, 0, tzinfo=timezone.utc)
)
]
item = models.ImportItem.objects.create(
index=1,
job=self.job,
data={},
normalized_data={
"isbn_13": '="9780356506999"',
"shelf": "reading",
"date_added": "2019/04/09",
},
)
self.assertEqual(item.reads[0].start_date, expected[0].start_date)
self.assertIsNone(item.reads[0].finish_date)
def test_read_reads(self):
"""infer read dates where available"""
item = models.ImportItem.objects.create(
index=1,
job=self.job,
data={},
normalized_data={
"isbn_13": '="9780356506999"',
"shelf": "reading",
"date_added": "2019/04/09",
"date_finished": "2019/04/12",
},
)
self.assertEqual(
item.reads[0].start_date,
datetime.datetime(2019, 4, 9, 0, 0, tzinfo=timezone.utc),
)
self.assertEqual(
item.reads[0].finish_date,
datetime.datetime(2019, 4, 12, 0, 0, tzinfo=timezone.utc),
)
def test_unread_reads(self):
"""handle books with no read dates"""
expected = []
item = models.ImportItem.objects.create(
index=1,
job=self.job,
data={},
normalized_data={
"isbn_13": '="9780356506999"',
"shelf": "reading",
},
)
self.assertEqual(item.reads, expected)
@responses.activate
def test_get_book_from_identifier(self):
"""search and load books by isbn (9780356506999)"""
item = models.ImportItem.objects.create(
index=1,
job=self.job,
data={},
normalized_data={
"isbn_13": '="9780356506999"',
},
)
connector_info = models.Connector.objects.create(
identifier="openlibrary.org",
name="OpenLibrary",
connector_file="openlibrary",
base_url="https://openlibrary.org",
books_url="https://openlibrary.org",
covers_url="https://covers.openlibrary.org",
search_url="https://openlibrary.org/search?q=",
priority=3,
)
connector = connector_manager.load_connector(connector_info)
result = SearchResult(
title="Test Result",
key="https://openlibrary.org/works/OL1234W",
author="<NAME>",
year="1980",
connector=connector,
)
datafile = pathlib.Path(__file__).parent.joinpath("../data/ol_edition.json")
bookdata = json.loads(datafile.read_bytes())
responses.add(
responses.GET,
"https://openlibrary.org/works/OL1234W",
json=bookdata,
status=200,
)
responses.add(
responses.GET,
"https://openlibrary.org/works/OL15832982W",
json=bookdata,
status=200,
)
responses.add(
responses.GET,
"https://openlibrary.org/authors/OL382982A",
json={"name": "<NAME>"},
status=200,
)
with patch("bookwyrm.connectors.abstract_connector.load_more_data.delay"):
with patch(
"bookwyrm.connectors.connector_manager.first_search_result"
) as search:
search.return_value = result
with patch(
"bookwyrm.connectors.openlibrary.Connector.get_authors_from_data"
):
book = item.get_book_from_identifier()
self.assertEqual(book.title, "Sabriel")
|
src/datashare/azext_datashare/vendored_sdks/datashare/aio/_data_share_management_client_async.py | Mannan2812/azure-cli-extensions | 207 | 11180808 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Optional
from azure.core import AsyncPipelineClient
from msrest import Deserializer, Serializer
from ._configuration_async import DataShareManagementClientConfiguration
from .operations_async import AccountOperations
from .operations_async import ConsumerInvitationOperations
from .operations_async import DataSetOperations
from .operations_async import DataSetMappingOperations
from .operations_async import InvitationOperations
from .operations_async import OperationOperations
from .operations_async import ShareOperations
from .operations_async import ProviderShareSubscriptionOperations
from .operations_async import ShareSubscriptionOperations
from .operations_async import ConsumerSourceDataSetOperations
from .operations_async import SynchronizationSettingOperations
from .operations_async import TriggerOperations
from .. import models
class DataShareManagementClient(object):
"""Creates a Microsoft.DataShare management client.
:ivar account: AccountOperations operations
:vartype account: data_share_management_client.aio.operations_async.AccountOperations
:ivar consumer_invitation: ConsumerInvitationOperations operations
:vartype consumer_invitation: data_share_management_client.aio.operations_async.ConsumerInvitationOperations
:ivar data_set: DataSetOperations operations
:vartype data_set: data_share_management_client.aio.operations_async.DataSetOperations
:ivar data_set_mapping: DataSetMappingOperations operations
:vartype data_set_mapping: data_share_management_client.aio.operations_async.DataSetMappingOperations
:ivar invitation: InvitationOperations operations
:vartype invitation: data_share_management_client.aio.operations_async.InvitationOperations
:ivar operation: OperationOperations operations
:vartype operation: data_share_management_client.aio.operations_async.OperationOperations
:ivar share: ShareOperations operations
:vartype share: data_share_management_client.aio.operations_async.ShareOperations
:ivar provider_share_subscription: ProviderShareSubscriptionOperations operations
:vartype provider_share_subscription: data_share_management_client.aio.operations_async.ProviderShareSubscriptionOperations
:ivar share_subscription: ShareSubscriptionOperations operations
:vartype share_subscription: data_share_management_client.aio.operations_async.ShareSubscriptionOperations
:ivar consumer_source_data_set: ConsumerSourceDataSetOperations operations
:vartype consumer_source_data_set: data_share_management_client.aio.operations_async.ConsumerSourceDataSetOperations
:ivar synchronization_setting: SynchronizationSettingOperations operations
:vartype synchronization_setting: data_share_management_client.aio.operations_async.SynchronizationSettingOperations
:ivar trigger: TriggerOperations operations
:vartype trigger: data_share_management_client.aio.operations_async.TriggerOperations
:param credential: Credential needed for the client to connect to Azure.
:type credential: azure.core.credentials.TokenCredential
:param subscription_id: The subscription identifier.
:type subscription_id: str
:param str base_url: Service URL
"""
def __init__(
self,
credential: "TokenCredential",
subscription_id: str,
base_url: Optional[str] = None,
**kwargs: Any
) -> None:
if not base_url:
base_url = 'https://management.azure.com'
self._config = DataShareManagementClientConfiguration(credential, subscription_id, **kwargs)
self._client = AsyncPipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
self.account = AccountOperations(
self._client, self._config, self._serialize, self._deserialize)
self.consumer_invitation = ConsumerInvitationOperations(
self._client, self._config, self._serialize, self._deserialize)
self.data_set = DataSetOperations(
self._client, self._config, self._serialize, self._deserialize)
self.data_set_mapping = DataSetMappingOperations(
self._client, self._config, self._serialize, self._deserialize)
self.invitation = InvitationOperations(
self._client, self._config, self._serialize, self._deserialize)
self.operation = OperationOperations(
self._client, self._config, self._serialize, self._deserialize)
self.share = ShareOperations(
self._client, self._config, self._serialize, self._deserialize)
self.provider_share_subscription = ProviderShareSubscriptionOperations(
self._client, self._config, self._serialize, self._deserialize)
self.share_subscription = ShareSubscriptionOperations(
self._client, self._config, self._serialize, self._deserialize)
self.consumer_source_data_set = ConsumerSourceDataSetOperations(
self._client, self._config, self._serialize, self._deserialize)
self.synchronization_setting = SynchronizationSettingOperations(
self._client, self._config, self._serialize, self._deserialize)
self.trigger = TriggerOperations(
self._client, self._config, self._serialize, self._deserialize)
async def close(self) -> None:
await self._client.close()
async def __aenter__(self) -> "DataShareManagementClient":
await self._client.__aenter__()
return self
async def __aexit__(self, *exc_details) -> None:
await self._client.__aexit__(*exc_details)
|
thespian/test/test_HAConvention.py | godaddy/Thespian | 210 | 11180810 | <reponame>godaddy/Thespian
from thespian.test import *
from time import sleep
import pytest
from thespian.actors import *
from datetime import timedelta
class PreRegActor(ActorTypeDispatcher):
def receiveMsg_str(self, regaddr, sender):
self.preRegisterRemoteSystem(regaddr, {})
self.send(sender, 'Registered')
@pytest.fixture(params=['simpleSystemBase',
'multiprocQueueBase',
'multiprocUDPBase',
'multiprocTCPBase',
'multiprocTCPBase-AdminRouting',
'multiprocTCPBase-AdminRoutingTXOnly',
])
def testsystems(request):
sysbase = request.param.partition('-')[0]
adminRouting = request.param.endswith('-AdminRouting')
txOnly = request.param.endswith('-AdminRoutingTXOnly')
victoria_port = get_free_admin_port()
leicester_port = get_free_admin_port()
picadilly_port = get_free_admin_port()
tottenham_port = get_free_admin_port()
convaddrs = [ 'localhost:%d' % victoria_port,
'localhost:%d' % leicester_port,
'localhost:%d' % picadilly_port,
# tottenham cannot be a leader
]
basecaps = { 'Convention Address.IPv4': convaddrs,
'Admin Routing': adminRouting,
}
victoria_caps = basecaps.copy()
victoria_caps.update({ 'Cyan': 19,
'Yellow': 11,
'Green': 11,
'Admin Port': victoria_port,
})
leicester_caps = basecaps.copy()
leicester_caps.update({ 'Blue': 4,
'Black': 8,
'Admin Port': leicester_port,
})
picadilly_caps = basecaps.copy()
picadilly_caps.update({ 'Blue': 6,
'Brown': 12,
'Admin Port': picadilly_port,
})
tottenham_caps = basecaps.copy()
tottenham_caps.update({ 'Brown': 7, 'Red': 10,
'Admin Port': tottenham_port,
})
victoria = ActorSystem(systemBase=sysbase,
transientUnique=True,
logDefs=simpleActorTestLogging(),
capabilities=victoria_caps)
victoria.base_name = request.param
victoria.port_num = victoria_port
leicester = ActorSystem(systemBase=sysbase,
transientUnique=True,
logDefs=simpleActorTestLogging(),
capabilities=leicester_caps)
leicester.base_name = request.param
leicester.port_num = leicester_port
picadilly = ActorSystem(systemBase=sysbase,
transientUnique=True,
logDefs=simpleActorTestLogging(),
capabilities=picadilly_caps)
picadilly.base_name = request.param
picadilly.port_num = picadilly_port
tottenham = ActorSystem(systemBase=sysbase,
transientUnique=True,
logDefs=simpleActorTestLogging(),
capabilities=tottenham_caps)
tottenham.base_name = request.param
tottenham.port_num = tottenham_port
request.addfinalizer(lambda victoria=victoria, leicester=leicester,
picadilly=picadilly, tottenham=tottenham:
tottenham.shutdown() or
leicester.shutdown() or
picadilly.shutdown() or
victoria.shutdown())
if txOnly:
assert 'Registered' == victoria.ask(victoria.createActor(PreRegActor),
'localhost:%d'%victoria.port_num,
timedelta(seconds=3))
assert 'Registered' == leicester.ask(leicester.createActor(PreRegActor),
'localhost:%d'%leicester.port_num,
timedelta(seconds=3))
assert 'Registered' == picadilly.ask(picadilly.createActor(PreRegActor),
'localhost:%d'%picadilly.port_num,
timedelta(seconds=3))
assert 'Registered' == tottenham.ask(tottenham.createActor(PreRegActor),
'localhost:%d'%tottenham.port_num,
timedelta(seconds=3))
sleep(1.25) # allow all systems to join the Convention
return convaddrs, victoria, leicester, picadilly, tottenham
class Sean(Actor):
@staticmethod
def actorSystemCapabilityCheck(capabilities, actorRequirements):
return (capabilities.get('Blue', 0) +
capabilities.get('Green', 0)) > 3;
def receiveMessage(self, message, sender):
if isinstance(message, str):
self.send(sender, '%s is not enough' % message)
class Roger(Actor):
@staticmethod
def actorSystemCapabilityCheck(capabilities, actorRequirements):
return capabilities.get('Cyan', 0) > 0
def receiveMessage(self, message, sender):
if isinstance(message, str):
self.send(sender, "Don't count on it, %s" % message)
class M(Actor):
@staticmethod
def actorSystemCapabilityCheck(capabilities, actorRequirements):
return capabilities.get('Red', 0) > 0
def receiveMessage(self, message, sender):
if isinstance(message, str):
if message == 'Sean':
self.send(sender, self.createActor(Sean))
if message == 'Roger':
self.send(sender, self.createActor(Roger))
class TestFuncHAConvention():
def test01_systems_can_start(self, testsystems):
convaddrs, victoria, leicester, picadilly, tottenham = testsystems
actor_system_unsupported(victoria,
'simpleSystemBase', 'multiprocQueueBase')
pass
def test02_actors_can_start(self, testsystems):
convaddrs, victoria, leicester, picadilly, tottenham = testsystems
actor_system_unsupported(victoria,
'simpleSystemBase', 'multiprocQueueBase')
sean = victoria.createActor(Sean)
roger = victoria.createActor(Roger)
m = picadilly.createActor(M)
sleep(1) # wait for things to settle
r = victoria.ask(sean, "diamonds", 0.25)
assert r == "diamonds is not enough"
r = victoria.ask(roger, "zorin", 0.25)
assert r == "Don't count on it, zorin"
bond1 = leicester.ask(m, "Sean", 0.25)
assert bond1
r = leicester.ask(bond1, "forever", 0.25)
assert r == "forever is not enough"
bond2 = leicester.ask(m, "Roger", 0.25)
assert bond2
r = leicester.ask(bond2, "jaws", 0.25)
assert r == "Don't count on it, jaws"
def test03_actor_create_failure_on_leader_exit(self, testsystems):
convaddrs, victoria, leicester, picadilly, tottenham = testsystems
actor_system_unsupported(victoria,
'simpleSystemBase', 'multiprocQueueBase')
sean = victoria.createActor(Sean)
roger = victoria.createActor(Roger)
m = picadilly.createActor(M)
sleep(1) # wait for things to settle
bond1 = leicester.ask(m, "Sean", 0.25)
assert bond1
r = leicester.ask(bond1, "forever", 0.25)
assert r == "forever is not enough"
bond2 = leicester.ask(m, "Roger", 0.25)
assert bond2
r = leicester.ask(bond2, "jaws", 0.25)
assert r == "Don't count on it, jaws"
victoria.shutdown()
sleep(2)
bond3 = leicester.ask(m, "Sean", 0.25)
assert bond3
r = leicester.ask(bond3, "forever", 0.25)
assert r == "forever is not enough"
bond4 = leicester.ask(m, "Roger", 0.25)
assert (bond4 is None)
def test04_actor_create_on_leader_re_enter(self, testsystems):
convaddrs, victoria, leicester, picadilly, tottenham = testsystems
actor_system_unsupported(victoria,
'simpleSystemBase', 'multiprocQueueBase')
sean = victoria.createActor(Sean)
roger = victoria.createActor(Roger)
m = picadilly.createActor(M)
sleep(1) # wait for things to settle
bond1 = leicester.ask(m, "Sean", 0.25)
assert bond1
r = leicester.ask(bond1, "forever", 0.25)
assert r == "forever is not enough"
bond2 = leicester.ask(m, "Roger", 0.25)
assert bond2
r = leicester.ask(bond2, "jaws", 0.25)
assert r == "Don't count on it, jaws"
victoria.shutdown()
sleep(2)
bond3 = leicester.ask(m, "Sean", 0.25)
assert bond3
r = leicester.ask(bond3, "forever", 0.25)
assert r == "forever is not enough"
bond4 = leicester.ask(m, "Roger", 0.25)
assert (bond4 is None)
# --- same as test03 up to this point ---
victoria2 = ActorSystem(systemBase=victoria.base_name.partition('-')[0],
transientUnique=True,
logDefs=simpleActorTestLogging(),
capabilities={ 'Cyan': 12,
'Admin Port': victoria.port_num,
'Convention Address.IPv4': convaddrs
})
victoria2.base_name = victoria.base_name
victoria2.port_num = victoria.port_num
sleep(2) # wait for victoria to become
try:
bond5 = leicester.ask(m, "Sean", 0.25)
assert bond5
r = leicester.ask(bond5, "money", 0.25)
assert r == "money is not enough"
bond6 = leicester.ask(m, "Roger", 0.25)
assert bond6
r = leicester.ask(bond6, "sharks", 0.25)
assert r == "Don't count on it, sharks"
finally:
victoria2.shutdown()
|
hs_core/tests.py | tommac7/hydroshare | 178 | 11180831 | <filename>hs_core/tests.py
"""Empty. See tests/ folder."""
|
tests/machines/merge_overlapping_intervals/merge_overlapping_intervals_test.py | ealter/vim_turing_machine | 149 | 11180844 | from unittest import mock
import pytest
import vim_turing_machine.machines.merge_overlapping_intervals.merge_overlapping_intervals
import vim_turing_machine.struct
import vim_turing_machine.turing_machine
from vim_turing_machine.constants import INITIAL_STATE
from vim_turing_machine.constants import NO_FINAL_STATE
from vim_turing_machine.constants import YES_FINAL_STATE
from vim_turing_machine.machines.merge_overlapping_intervals.decode_intervals import decode_intervals
from vim_turing_machine.machines.merge_overlapping_intervals.encode_intervals import encode_intervals
from vim_turing_machine.machines.merge_overlapping_intervals.merge_overlapping_intervals import invert_bit
from vim_turing_machine.machines.merge_overlapping_intervals.merge_overlapping_intervals import invert_direction
from vim_turing_machine.machines.merge_overlapping_intervals.merge_overlapping_intervals import MergeOverlappingIntervalsGenerator
from vim_turing_machine.struct import BACKWARDS
from vim_turing_machine.struct import FORWARDS
from vim_turing_machine.turing_machine import TuringMachine
@pytest.yield_fixture(autouse=True)
def mock_blank_character():
"""Change the blank character to be a space so that it's easier to write test cases."""
with mock.patch.object(
vim_turing_machine.turing_machine,
'BLANK_CHARACTER',
' ',
):
with mock.patch.object(
vim_turing_machine.struct,
'VALID_CHARACTERS',
('0', '1', ' '),
):
with mock.patch.object(
vim_turing_machine.machines.merge_overlapping_intervals.merge_overlapping_intervals,
'BLANK_CHARACTER',
' ',
):
with mock.patch.object(
vim_turing_machine.machines.merge_overlapping_intervals.merge_overlapping_intervals,
'VALID_CHARACTERS',
('0', '1', ' '),
):
yield
@pytest.fixture
def merger():
return MergeOverlappingIntervalsGenerator(num_bits=3)
def run_machine(transitions, tape, initial_position=0, assert_tape_not_changed=False):
machine = TuringMachine(list(transitions), quiet=True)
machine.run(tape[:], max_steps=10000, initial_cursor_position=initial_position)
if assert_tape_not_changed:
assert_tape(machine, tape)
return machine
def assert_cursor_at_end_of_output(machine):
end = len(machine.tape) - 1
while end > 0 and machine.tape[end] == ' ':
end -= 1
assert machine.cursor_position == end
def assert_cursor_is_at_beginning_of_input(machine):
i = 0
while i < len(machine.tape) and machine.tape[i] == ' ':
i += 1
assert machine.cursor_position == i
def assert_tape(machine, expected_tape):
# Ignore any blanks at the end
assert expected_tape == ''.join(machine.tape).rstrip()
def test_invert_bit():
assert invert_bit('0') == '1'
assert invert_bit('1') == '0'
with pytest.raises(AssertionError):
invert_bit('not_valid')
def test_invert_direction():
assert invert_direction(FORWARDS) == BACKWARDS
assert invert_direction(BACKWARDS) == FORWARDS
with pytest.raises(AssertionError):
invert_direction('not_valid')
def test_move_n_bits(merger):
machine = run_machine(
merger.move_n_bits(
initial_state=INITIAL_STATE,
direction=FORWARDS,
final_state=YES_FINAL_STATE,
num_bits=4,
),
tape='01010111',
assert_tape_not_changed=True,
)
assert machine.cursor_position == 4
def test_move_to_blank_spaces(merger):
machine = run_machine(
merger.move_to_blank_spaces(
initial_state=INITIAL_STATE,
direction=FORWARDS,
final_state=YES_FINAL_STATE,
final_character=' ',
final_direction=BACKWARDS,
num_blanks=2,
),
tape='01 1111 10',
assert_tape_not_changed=True,
)
assert machine.cursor_position == 6 # End of the 1111
def test_copy_bits_to_end_of_output(merger):
machine = run_machine(
merger.copy_bits_to_end_of_output(
initial_state=INITIAL_STATE,
num_bits=3,
final_state=YES_FINAL_STATE,
),
tape='10111 01',
)
assert_tape(machine, ' 11 01101')
assert_cursor_at_end_of_output(machine)
@pytest.mark.parametrize('tape, final_state', [
('101 100110', NO_FINAL_STATE),
('101 100100', YES_FINAL_STATE),
('101 111100', YES_FINAL_STATE),
])
def test_compare_two_sequential_numbers(merger, tape, final_state):
machine = run_machine(
merger.compare_two_sequential_numbers(
initial_state=INITIAL_STATE,
greater_than_or_equal_to_state=YES_FINAL_STATE,
less_than_state=NO_FINAL_STATE,
),
tape=tape,
initial_position=len(tape) - 1,
assert_tape_not_changed=True,
)
assert_cursor_at_end_of_output(machine)
assert machine.current_state == final_state
def test_erase_number(merger):
machine = run_machine(
merger.erase_number(
initial_state=INITIAL_STATE,
final_state=YES_FINAL_STATE,
),
tape='100101110',
initial_position=5, # end of 101
)
assert machine.cursor_position == 2
assert_tape(machine, '100 110')
def test_replace_number(merger):
tape = '100101110'
machine = run_machine(
merger.replace_number(
initial_state=INITIAL_STATE,
final_state=YES_FINAL_STATE,
),
tape=tape,
initial_position=len(tape) - 1,
)
assert_tape(machine, '100110')
assert_cursor_at_end_of_output(machine)
@pytest.mark.parametrize('tape, final_state', [
(' 100 101101', NO_FINAL_STATE),
(' 100101101', YES_FINAL_STATE),
])
def test_check_if_there_is_any_input_left(merger, tape, final_state):
machine = run_machine(
merger.check_if_there_is_any_input_left(
initial_state=INITIAL_STATE,
final_state=NO_FINAL_STATE, # The machine exits with Yes if there is no input left.
),
tape=tape,
initial_position=len(tape) - 1,
assert_tape_not_changed=True,
)
assert_cursor_is_at_beginning_of_input(machine)
assert machine.current_state == final_state
@pytest.mark.parametrize('initial_tape, final_tape', [
(' 100 001010001', ' 001100'), # 2nd pair's closing value is larger
(' 010 001110001', ' 001110'), # 2nd pair's closing value is smaller
(' 110 001110001', ' 001110'), # 2nd pair's closing value is equal
])
def test_copy_closing_value_and_merge(merger, initial_tape, final_tape):
machine = run_machine(
merger.copy_closing_value_and_merge(
initial_state=INITIAL_STATE,
final_state=YES_FINAL_STATE,
),
tape=initial_tape,
initial_position=len(initial_tape) - 1,
)
assert_cursor_at_end_of_output(machine)
assert_tape(machine, final_tape)
def test_copy_closing_value_without_merging(merger):
tape = ' 111 000010110'
machine = run_machine(
merger.copy_closing_value_without_merging(
initial_state=INITIAL_STATE,
final_state=YES_FINAL_STATE,
),
tape=tape,
initial_position=len(tape) - 1,
)
assert_cursor_at_end_of_output(machine)
assert_tape(machine, ' 000010110111')
@pytest.mark.parametrize(
'initial_intervals, final_intervals',
[
(
[[0, 1]],
[[0, 1]],
),
(
[[0, 1], [5, 6]],
[[0, 1], [5, 6]],
),
(
[[0, 5], [2, 3]],
[[0, 5]],
),
(
[[1, 3], [3, 4], [4, 5], [6, 7]],
[[1, 5], [6, 7]],
)
]
)
def test_merge_overlapping_intervals(merger, initial_intervals, final_intervals):
"""The true integration test!"""
tape = encode_intervals(initial_intervals, num_bits=3)
machine = run_machine(
merger.merge_overlapping_intervals_transitions(),
tape=tape,
)
assert final_intervals == decode_intervals(''.join(machine.tape), num_bits=3)
|
scripts/eval_mesh.py | isabella232/lasr | 128 | 11180847 | <reponame>isabella232/lasr
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys, os
sys.path.insert(0,'third_party')
sys.path.append(os.path.dirname(os.path.dirname(sys.path[0])))
os.environ["PYOPENGL_PLATFORM"] = "egl" #opengl seems to only work with TPU
import chamfer3D.dist_chamfer_3D
import subprocess
import pytorch3d.ops
import pytorch3d.loss
import imageio
import torch
import glob
import matplotlib.pyplot as plt
import numpy as np
import torch
import cv2
import pdb
import soft_renderer as sr
import argparse
import trimesh
from nnutils.geom_utils import obj_to_cam, pinhole_cam
import pyrender
from pyrender import IntrinsicsCamera,Mesh, Node, Scene,OffscreenRenderer
import configparser
import matplotlib.pyplot as plt
import imageio
parser = argparse.ArgumentParser(description='BADJA')
parser.add_argument('--testdir', default='',
help='path to test dir')
parser.add_argument('--gtdir', default='',
help='path to gt dir')
parser.add_argument('--method', default='lasr',
help='method to evaluate')
args = parser.parse_args()
gt_meshes = [trimesh.load(i, process=False) for i in sorted( glob.glob('%s/*.obj'%(args.gtdir)) )]
if args.method=='vibe' or args.method=='pifuhd':
pred_meshes = [i for i in sorted( glob.glob('%s/*.obj'%(args.testdir)) )]
elif args.method=='lasr':
pred_meshes = [i for i in sorted( glob.glob('%s/pred*.ply'%(args.testdir)),key=lambda x: int(x.split('pred')[1].split('.ply')[0]) )]
elif args.method=='smplify-x':
pred_meshes = [i for i in sorted( glob.glob('%s/*/*.obj'%(args.testdir)) )]
elif args.method=='smalify':
pred_meshes = [i for i in sorted( glob.glob('%s/*/st10*.ply'%(args.testdir)) )]
else:exit()
assert(len(gt_meshes) == len(pred_meshes))
# pytorch3d
from pytorch3d.renderer.mesh import TexturesAtlas, TexturesUV, TexturesVertex
from pytorch3d.structures.meshes import Meshes
from pytorch3d.renderer.mesh.shader import (
BlendParams,
)
from pytorch3d.renderer import (
PointLights,
RasterizationSettings,
MeshRenderer,
MeshRasterizer,
SoftPhongShader,
SoftSilhouetteShader,
)
from pytorch3d.renderer.cameras import OrthographicCameras
device = torch.device("cuda:0")
cameras = OrthographicCameras(device = device)
lights = PointLights(
device=device,
ambient_color=((1.0, 1.0, 1.0),),
diffuse_color=((1.0, 1.0, 1.0),),
specular_color=((1.0, 1.0, 1.0),),
)
renderer_softtex = MeshRenderer(
rasterizer=MeshRasterizer(cameras=cameras, raster_settings=RasterizationSettings(image_size=512,cull_backfaces=True)),
shader=SoftPhongShader(device = device,cameras=cameras, lights=lights)
)
chamLoss = chamfer3D.dist_chamfer_3D.chamfer_3DDist()
cds = []
norms=[]
frames=[]
for i in range(len(gt_meshes)):
# remesh
mesh1 = trimesh.load(pred_meshes[i], process=False)
# load remeshed
if args.method=='lasr':
import subprocess
mesh1.export('tmp/input.obj')
print(subprocess.check_output(['Manifold/build/manifold', 'tmp/input.obj', 'tmp/output.obj', '10000']))
mesh1 = trimesh.load('tmp/output.obj')
mesh2 = gt_meshes[i]
trimesh.repair.fix_inversion(mesh1)
trimesh.repair.fix_inversion(mesh2)
X0 = torch.Tensor(mesh1.vertices[None] ).cuda()
Y0 = torch.Tensor(mesh2.vertices[None] ).cuda()
## top down view
#theta = -3*np.pi/9
#init_pose = torch.Tensor([[1,0,0],[0,np.cos(theta),-np.sin(theta)],[0,np.sin(theta),np.cos(theta)]]).cuda()
#X0[0] = X0.matmul(init_pose)
#Y0[0] = Y0.matmul(init_pose)
## rotateview
#theta = 9*np.pi/9
#init_pose = torch.Tensor([[np.cos(theta),0,-np.sin(theta)],[0,1,0],[np.sin(theta),0,np.cos(theta)]]).cuda()
#X0[0] = X0.matmul(init_pose)
if args.method=='lasr':
cam = np.loadtxt('%s/cam%d.txt'%(args.testdir,i))
Rmat = torch.Tensor(cam[None,:3,:3]).cuda()
X0 = X0.matmul(Rmat)
elif args.method=='smalify':
X0[:,:,1:] *= -1
X0[:,:,1:] *= -1
if 'sdog' in args.testdir or 'shorse' in args.testdir or 'spot' in args.testdir or 'sgolem' in args.testdir:
Y0[:,:,1:] *= -1
# normalize to have extent 10
Y0 = Y0 - Y0.mean(1,keepdims=True)
max_dis = (Y0 - Y0.permute(1,0,2)).norm(2,-1).max()
Y0 = 10* Y0 / max_dis
X0 = X0 - X0.mean(1,keepdims=True)
if args.method=='pifuhd' or args.method=='lasr':
meshtmp = pytorch3d.structures.meshes.Meshes(verts=X0, faces=torch.Tensor(mesh1.faces[None]).cuda())
Xtmp = pytorch3d.ops.sample_points_from_meshes(meshtmp, 10000)
max_dis = (Xtmp - Xtmp.permute(1,0,2)).norm(2,-1).max()
else:
max_dis = (X0 - X0.permute(1,0,2)).norm(2,-1).max()
X0 = 10* X0 / max_dis
meshx = pytorch3d.structures.meshes.Meshes(verts=X0, faces=torch.Tensor(mesh1.faces[None]).cuda())
meshy = pytorch3d.structures.meshes.Meshes(verts=Y0, faces=torch.Tensor(mesh2.faces[None]).cuda())
X = pytorch3d.ops.sample_points_from_meshes(meshx, 10000)
Y = pytorch3d.ops.sample_points_from_meshes(meshy, 10000)
sol1 = pytorch3d.ops.iterative_closest_point(X,Y,estimate_scale=False,max_iterations=10000)
#sol2 = pytorch3d.ops.iterative_closest_point(sol1.Xt,Y,estimate_scale=True,max_iterations=10000)
X0 = (sol1.RTs.s*X0).matmul(sol1.RTs.R)+sol1.RTs.T[:,None]
# evaluation
meshx = pytorch3d.structures.meshes.Meshes(verts=X0, faces=torch.Tensor(mesh1.faces[None]).cuda())
meshy = pytorch3d.structures.meshes.Meshes(verts=Y0, faces=torch.Tensor(mesh2.faces[None]).cuda())
X, nx= pytorch3d.ops.sample_points_from_meshes(meshx, 10000,return_normals=True)
Y, ny= pytorch3d.ops.sample_points_from_meshes(meshy, 10000,return_normals=True)
cd,norm = pytorch3d.loss.chamfer_distance(X,Y, x_normals=nx,y_normals=ny)
raw_cd,_,_,_ = chamLoss(X,Y0) # this returns distance squared
# error render
cm = plt.get_cmap('plasma')
color_cd = torch.Tensor(cm(2*np.asarray(raw_cd.cpu()[0]))).cuda()[:,:3][None]
verts = Y0/(1.05*Y0.abs().max()); verts[:,:,0] *= -1; verts[:,:,-1] *= -1; verts[:,:,-1] -= (verts[:,:,-1].min()-1)
mesh = Meshes(verts=verts, faces=torch.Tensor(mesh2.faces[None]).cuda(),textures=TexturesVertex(verts_features=color_cd))
errimg = renderer_softtex(mesh)[0,:,:,:3]
# shape render
color_shape = torch.zeros_like(color_cd); color_shape += 0.5
mesh = Meshes(verts=verts, faces=torch.Tensor(mesh2.faces[None]).cuda(),textures=TexturesVertex(verts_features=color_shape))
imgy = renderer_softtex(mesh)[0,:,:,:3]
# shape render
color_shape = torch.zeros_like(X0); color_shape += 0.5
verts = X0/(1.05*Y0.abs().max()); verts[:,:,0] *= -1; verts[:,:,-1] *= -1; verts[:,:,-1] -= (verts[:,:,-1].min()-1)
mesh = Meshes(verts=verts, faces=torch.Tensor(mesh1.faces[None]).cuda(),textures=TexturesVertex(verts_features=color_shape))
imgx = renderer_softtex(mesh)[0,:,:,:3]
img = np.clip(255*np.asarray(torch.cat([imgy, imgx,errimg],1).cpu()),0,255).astype(np.uint8)
#cv2.imwrite('%s/cd-%06d.png'%(args.testdir,i),img[:,:,::-1])
cv2.imwrite('%s/gt-%06d.png'%(args.testdir,i),img[:,:512,::-1])
cv2.imwrite('%s/pd-%06d.png'%(args.testdir,i),img[:,512:1024,::-1])
cv2.imwrite('%s/cd-%06d.png'%(args.testdir,i),img[:,1024:,::-1])
#trimesh.Trimesh(vertices=np.asarray(Y0[0].cpu()/10), faces=mesh2.faces,vertex_colors=np.asarray(color_cd[0].cpu())).export('0.obj')
cds.append(np.asarray(cd.cpu()))
norms.append(np.asarray(norm.cpu()))
frames.append(img)
print('%04d: %.2f, %.2f'%(i, cd,1-norm))
print('ALL: %.2f, %.2f'%(np.mean(cds),1-np.mean(norms)))
imageio.mimsave('tmp/output.gif', frames, duration=5./len(frames))
|
tempest/tests/lib/services/identity/v2/test_tenants_client.py | mail2nsrajesh/tempest | 254 | 11180861 | # Copyright 2016 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.lib.services.identity.v2 import tenants_client
from tempest.tests.lib import fake_auth_provider
from tempest.tests.lib.services import base
class TestTenantsClient(base.BaseServiceTest):
FAKE_TENANT_INFO = {
"tenant": {
"id": "1",
"name": "test",
"description": "test_description",
"enabled": True
}
}
FAKE_LIST_TENANTS = {
"tenants": [
{
"id": "1",
"name": "test",
"description": "test_description",
"enabled": True
},
{
"id": "2",
"name": "test2",
"description": "test2_description",
"enabled": True
}
]
}
def setUp(self):
super(TestTenantsClient, self).setUp()
fake_auth = fake_auth_provider.FakeAuthProvider()
self.client = tenants_client.TenantsClient(fake_auth,
'identity', 'regionOne')
def _test_create_tenant(self, bytes_body=False):
self.check_service_client_function(
self.client.create_tenant,
'tempest.lib.common.rest_client.RestClient.post',
self.FAKE_TENANT_INFO,
bytes_body,
name="test",
description="test_description")
def _test_show_tenant(self, bytes_body=False):
self.check_service_client_function(
self.client.show_tenant,
'tempest.lib.common.rest_client.RestClient.get',
self.FAKE_TENANT_INFO,
bytes_body,
tenant_id="1")
def _test_update_tenant(self, bytes_body=False):
self.check_service_client_function(
self.client.update_tenant,
'tempest.lib.common.rest_client.RestClient.post',
self.FAKE_TENANT_INFO,
bytes_body,
tenant_id="1",
name="test",
description="test_description")
def _test_list_tenants(self, bytes_body=False):
self.check_service_client_function(
self.client.list_tenants,
'tempest.lib.common.rest_client.RestClient.get',
self.FAKE_LIST_TENANTS,
bytes_body)
def _test_list_tenant_users(self, bytes_body=False):
self.check_service_client_function(
self.client.list_tenant_users,
'tempest.lib.common.rest_client.RestClient.get',
self.FAKE_LIST_TENANTS,
bytes_body,
tenant_id="1")
def test_create_tenant_with_str_body(self):
self._test_create_tenant()
def test_create_tenant_with_bytes_body(self):
self._test_create_tenant(bytes_body=True)
def test_show_tenant_with_str_body(self):
self._test_show_tenant()
def test_show_tenant_with_bytes_body(self):
self._test_show_tenant(bytes_body=True)
def test_update_tenant_with_str_body(self):
self._test_update_tenant()
def test_update_tenant_with_bytes_body(self):
self._test_update_tenant(bytes_body=True)
def test_list_tenants_with_str_body(self):
self._test_list_tenants()
def test_list_tenants_with_bytes_body(self):
self._test_list_tenants(bytes_body=True)
def test_delete_tenant(self):
self.check_service_client_function(
self.client.delete_tenant,
'tempest.lib.common.rest_client.RestClient.delete',
{},
tenant_id="1",
status=204)
def test_list_tenant_users_with_str_body(self):
self._test_list_tenant_users()
def test_list_tenant_users_with_bytes_body(self):
self._test_list_tenant_users(bytes_body=True)
|
stores/apps/payments/taxes.py | diassor/CollectorCity-Market-Place | 135 | 11180890 | import decimal, logging
from django.db import models
from shops.models import Shop
from preferences.models import Preference, TaxState
class TaxCalculator():
@classmethod
def get_tax(cls, shop, state, city=None):
try:
tax_rate = TaxState.objects.filter(shop=shop).filter(state=state).get()
logging.debug("Shop %s have a tax rate of %s%% for state %s" % (shop, tax_rate.tax, state))
return tax_rate.tax / 100
except TaxState.DoesNotExist:
logging.debug("No tax specified for state %s on shop %s" % (state, shop))
return decimal.Decimal("0.0") |
tests/data/test_mixins.py | arvindmuralie77/gradsflow | 253 | 11180907 | # Copyright (c) 2021 GradsFlow. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import torch
from gradsflow.data.mixins import DataMixin
from gradsflow.utility import default_device
class DataTest(DataMixin):
device = default_device()
datamixin = DataTest()
def test_send_to_device():
# data as primitive
assert datamixin.send_to_device(1) == 1
assert datamixin.send_to_device(1.5) == 1.5
# data as Tensor
x = torch.randn(4, 1)
assert isinstance(datamixin.send_to_device(x), torch.Tensor)
# data as list
batch = torch.randn(4, 16), [1] * 4
assert datamixin.send_to_device(batch)
# data as dict
batch = {"inputs": torch.randn(4, 16), "targets": [1] * 4}
assert datamixin.send_to_device(batch)
# catch error
with pytest.raises(NotImplementedError):
datamixin.send_to_device(set(batch))
|
clients/python_client/simplest_client.py | jnclt/simple_tensorflow_serving | 771 | 11180925 | <reponame>jnclt/simple_tensorflow_serving<filename>clients/python_client/simplest_client.py
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
import requests
def main():
endpoint = "http://127.0.0.1:8500"
print("Request for raw model signature")
input_data = {"data": {"keys": [1, 2]}}
result = requests.post(endpoint, json=input_data)
print(result.text)
print("Request with preprocess")
input_data = {"preprocess": True, "data": {"keys": ["你好世界", "机器学习预处理模型"]}}
result = requests.post(endpoint, json=input_data)
print(result.text)
print("Request with preprocess and postprocess")
input_data = {
"preprocess": True,
"postprocess": True,
"data": {
"keys": ["你好世界", "机器学习预处理模型"]
}
}
result = requests.post(endpoint, json=input_data)
print(result.text)
if __name__ == "__main__":
main()
|
examples/swat-s1/plc3.py | pgaulon/minicps | 119 | 11180952 |
"""
swat-s1 plc3
"""
from minicps.devices import PLC
from utils import PLC3_DATA, STATE, PLC3_PROTOCOL
from utils import PLC_SAMPLES, PLC_PERIOD_SEC
from utils import IP
import time
PLC1_ADDR = IP['plc1']
PLC2_ADDR = IP['plc2']
PLC3_ADDR = IP['plc3']
LIT301_3 = ('LIT301', 3)
class SwatPLC3(PLC):
def pre_loop(self, sleep=0.1):
print 'DEBUG: swat-s1 plc3 enters pre_loop'
print
time.sleep(sleep)
def main_loop(self):
"""plc3 main loop.
- read UF tank level from the sensor
- update internal enip server
"""
print 'DEBUG: swat-s1 plc3 enters main_loop.'
print
count = 0
while(count <= PLC_SAMPLES):
lit301 = float(self.get(LIT301_3))
print "DEBUG PLC3 - get lit301: %f" % lit301
self.send(LIT301_3, lit301, PLC3_ADDR)
time.sleep(PLC_PERIOD_SEC)
count += 1
print 'DEBUG swat plc3 shutdown'
if __name__ == "__main__":
# notice that memory init is different form disk init
plc3 = SwatPLC3(
name='plc3',
state=STATE,
protocol=PLC3_PROTOCOL,
memory=PLC3_DATA,
disk=PLC3_DATA)
|
src/dataloaders/prepare/eeg/constants.py | dumpmemory/state-spaces | 513 | 11180981 | INCLUDED_CHANNELS = [
"EEG FP1",
"EEG FP2",
"EEG F3",
"EEG F4",
"EEG C3",
"EEG C4",
"EEG P3",
"EEG P4",
"EEG O1",
"EEG O2",
"EEG F7",
"EEG F8",
"EEG T3",
"EEG T4",
"EEG T5",
"EEG T6",
"EEG FZ",
"EEG CZ",
"EEG PZ",
]
INCLUDED_CHANNELS_STANFORD = [
"EEG Fp1",
"EEG Fp2",
"EEG F3",
"EEG F4",
"EEG C3",
"EEG C4",
"EEG P3",
"EEG P4",
"EEG O1",
"EEG O2",
"EEG F7",
"EEG F8",
"EEG T3",
"EEG T4",
"EEG T5",
"EEG T6",
"EEG Fz",
"EEG Cz",
"EEG Pz",
]
FREQUENCY = 200 |
perma_web/perma/tests/test_urls.py | rachelaus/perma | 317 | 11181010 | from django.urls import reverse
from perma.urls import urlpatterns
from .utils import PermaTestCase
class UrlsTestCase(PermaTestCase):
def test_url_status_codes(self):
"""
A really simple test for 500 errors. We test all views that don't
take parameters (it's not easy to guess what params they want).
"""
exclude = {
'archive_error': 'because it returns 500 by default'
}
for urlpattern in urlpatterns:
if '?P<' not in urlpattern.pattern._regex \
and urlpattern.name \
and urlpattern.name not in exclude:
response = self.client.get(reverse(urlpattern.name))
self.assertNotEqual(response.status_code, 500)
response = self.client.post(reverse(urlpattern.name))
self.assertNotEqual(response.status_code, 500)
|
vimfiles/bundle/vim-python/submodules/pylint/tests/functional/a/assignment_from_no_return.py | ciskoinch8/vimrc | 463 | 11181032 | # pylint: disable=missing-docstring
def some_func():
pass
def decorate(func):
"""Decorate *fn* to return ``self`` to enable chained method calls."""
def wrapper(self, *args, **kw):
func(self, *args, **kw)
return 42
return wrapper
class Class:
def some_method(self):
pass
@decorate
def some_other_decorated_method(self):
pass
def some_other_method(self):
value = self.some_method() # [assignment-from-no-return]
other_value = self.some_other_decorated_method()
return value + other_value
VALUE = some_func() # [assignment-from-no-return]
class Parent:
"""Parent class"""
def compute(self):
"""This isn't supported by all child classes"""
# pylint: disable=no-self-use
raise ValueError('Not supported for this object')
def test(self):
"""Test"""
result = self.compute()
return result
class Child(Parent):
"""Child class"""
def compute(self):
"""This is supported for this child class"""
return 42
|
app/demo/countries/migrations/0018_person_time.py | sesostris/django-material-admin | 270 | 11181074 | # Generated by Django 3.0 on 2020-01-03 08:26
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('countries', '0017_country1_country2_country3_country4_country5_country6'),
]
operations = [
migrations.AddField(
model_name='person',
name='time',
field=models.TimeField(null=True, verbose_name='Birth Time'),
),
]
|
tools/mo/openvino/tools/mo/front/mxnet/crop_ext.py | ryanloney/openvino-1 | 1,127 | 11181087 | # Copyright (C) 2018-2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
from openvino.tools.mo.front.extractor import FrontExtractorOp
from openvino.tools.mo.front.mxnet.extractors.utils import get_mxnet_layer_attrs
from openvino.tools.mo.ops.crop import Crop
class CropFrontExtractor(FrontExtractorOp):
op = 'Crop'
enabled = True
@classmethod
def extract(cls, node):
attrs = get_mxnet_layer_attrs(node.symbol_dict)
offset = attrs.tuple("offset", int, ())
axis = attrs.int("num_args", 0)
node_attrs = {
'axis': axis,
'offset': list(offset),
'dim': None,
}
Crop.update_node_stat(node, node_attrs)
return cls.enabled
|
recommends/storages/mongodb/managers.py | coagulant/django-recommends | 142 | 11181109 | <reponame>coagulant/django-recommends
from recommends.managers import DictStorageManager
class MongoStorageManager(DictStorageManager):
def filter_for_object(self, obj):
ctype_id = self.get_ctype_id_for_obj(obj)
return {'object_ctype': ctype_id, 'object_id': obj.id}
def filter_for_related_object(self, related_obj):
ctype_id = self.get_ctype_id_for_obj(related_obj)
return {'related_object_ctype': ctype_id, 'related_object_id': related_obj.id}
|
data_management/importers/idaho-camera-traps.py | dnarqq/WildHack | 402 | 11181141 | #
# idaho-camera-traps.py
#
# Prepare the Idaho Camera Traps dataset for release on LILA.
#
#%% Imports and constants
import json
import os
import numpy as np
import dateutil
import pandas as pd
import datetime
import shutil
from tqdm import tqdm
from bson import json_util
from collections import defaultdict
# Multi-threading for .csv file comparison and image existence validation
from multiprocessing.pool import Pool as Pool
from multiprocessing.pool import ThreadPool as ThreadPool
n_threads = 14
n_threads_file_copy = 20
input_base = r'i:\idfg-images'
output_base = r'h:\idaho-camera-traps'
output_image_base = r'j:\idaho-camera-traps-output'
assert os.path.isdir(input_base)
assert os.path.isdir(output_base)
assert os.path.isdir(output_image_base)
output_image_base_public = os.path.join(output_image_base,'public')
output_image_base_private = os.path.join(output_image_base,'private')
# We are going to map the original filenames/locations to obfuscated strings, but once
# we've done that, we will re-use the mappings every time we run this script.
force_generate_mappings = False
# This is the file to which mappings get saved
id_mapping_file = os.path.join(output_base,'id_mapping.json')
# The maximum time (in seconds) between images within which two images are considered the
# same sequence.
max_gap_within_sequence = 30
# This is a two-column file, where each line is [string in the original metadata],[category name we want to map it to]
category_mapping_file = os.path.join(output_base,'category_mapping.csv')
# The output file, using the original strings
output_json_original_strings = os.path.join(output_base,'idaho-camera-traps-original-strings.json')
# The output file, using obfuscated strings for everything but filenamed
output_json_remapped_ids = os.path.join(output_base,'idaho-camera-traps-remapped-ids.json')
# The output file, using obfuscated strings and obfuscated filenames
output_json = os.path.join(output_base,'idaho-camera-traps.json')
# One time only, I ran MegaDetector on the whole dataset...
megadetector_results_file = r'H:\idaho-camera-traps\idfg-2021-07-26idaho-camera-traps_detections.json'
# ...then set aside any images that *may* have contained humans that had not already been
# annotated as such. Those went in this folder...
human_review_folder = os.path.join(output_base,'human_review')
# ...and the ones that *actually* had humans (identified via manual review) got
# copied to this folder...
human_review_selection_folder = os.path.join(output_base,'human_review_selections')
# ...which was enumerated to this text file, which is a manually-curated list of
# images that were flagged as human.
human_review_list = os.path.join(output_base,'human_flagged_images.txt')
# Unopinionated .json conversion of the .csv metadata
sequence_info_cache = os.path.join(output_base,'sequence_info.json')
valid_opstates = ['normal','maintenance','snow on lens','foggy lens','foggy weather',
'malfunction','misdirected','snow on lense','poop/slobber','sun','tilted','vegetation obstruction']
opstate_mappings = {'snow on lense':'snow on lens','poop/slobber':'lens obscured','maintenance':'human'}
survey_species_presence_columns = ['elkpresent','deerpresent','prongpresent']
presence_to_count_columns = {
'otherpresent':['MooseAntlerless','MooseCalf','MooseOther','MooseBull','MooseUnkn','BlackBearAdult','BlackBearCub','LionAdult',
'LionKitten','WolfAdult','WolfPup','CattleCow','CattleCalf','other'],
'elkpresent':['ElkSpike','ElkAntlerless','ElkCalf','ElkRaghorn','ElkMatBull','ElkUnkn','ElkPedNub'],
'deerpresent':['MDbuck','MDantlerless','MDfawn','WTDbuck','WTDantlerless','WTDfawn','WTDunkn','MDunkn'],
'prongpresent':['PronghornBuck','PronghornFawn','PHunkn']
}
required_columns = ['File','Folder','Date','Time','otherpresent','other','otherwhat','opstate']
expected_presence_columns = ['elkpresent','deerpresent','prongpresent','humanpresent','otherpresent']
expected_count_columns = set()
for presence_column in presence_to_count_columns.keys():
count_columns = presence_to_count_columns[presence_column]
for count_column in count_columns:
expected_count_columns.add(count_column)
def list_is_sorted(l):
return all(l[i] <= l[i+1] for i in range(len(l)-1))
#%% List files (images + .csv)
def get_files():
all_files_list = os.path.join(output_base,'all_files.json')
force_file_enumeration = False
if (os.path.isfile(all_files_list) and (not force_file_enumeration)):
print('File list exists, bypassing enumeration')
with open(all_files_list,'r') as f:
all_files = json.load(f)
else:
from pathlib import Path
all_files = []
for path in Path(input_base).rglob('*.*'):
path = str(path)
path = os.path.relpath(path,input_base)
all_files.append(path)
with open(all_files_list,'w') as f:
json.dump(all_files,f,indent=1)
print('Enumerated {} files'.format(len(all_files)))
image_files = [s for s in all_files if (s.lower().endswith('.jpg') or s.lower().endswith('.jpeg'))]
csv_files = [s for s in all_files if (\
(s.lower().endswith('.csv')) and \
('Backups' not in s) and \
('Metadata.csv' not in s) and \
('ExportedDataFiles' not in s) and \
('CSV Files' not in s)
)]
print('{} image files, {} .csv files'.format(len(image_files),len(csv_files)))
# Ignore .csv files in folders with multiple .csv files
# ...which would require some extra work to decipher.
csv_files_to_ignore = []
folder_to_csv_files = defaultdict(list)
# fn = csv_files[0]
for fn in csv_files:
folder_name = os.path.dirname(fn)
folder_to_csv_files[folder_name].append(fn)
for folder_name in folder_to_csv_files.keys():
if len(folder_to_csv_files[folder_name]) > 1:
print('Multiple .csv files for {}:'.format(folder_name))
for csv_file in folder_to_csv_files[folder_name]:
print(csv_file)
csv_files_to_ignore.append(csv_file)
print('')
n_csv_original = len(csv_files)
csv_files = [s for s in csv_files if s not in csv_files_to_ignore]
print('Processing {} of {} csv files'.format(len(csv_files),n_csv_original))
return image_files,csv_files
#%% Parse each .csv file into sequences (function)
# csv_file = csv_files[-1]
def csv_to_sequences(csv_file):
print('Processing {}'.format(csv_file))
csv_file_absolute = os.path.join(input_base,csv_file)
# os.startfile(csv_file_absolute)
sequences = []
# survey = csv_file.split('\\')[0]
# Sample paths from which we need to derive locations:
#
# St.Joe_elk\AM99\Trip 1\100RECNX\TimelapseData.csv
# Beaverhead_elk\AM34\Trip 1\100RECNX\TimelapseData.csv
#
# ClearCreek_mustelids\Winter2015-16\FS-001-P\FS-001-P.csv
# ClearCreek_mustelids\Summer2015\FS-001\FS-001.csv
# ClearCreek_mustelids\Summer2016\IDFG-016\IDFG-016.csv
#
# I:\idfg-images\ClearCreek_mustelids\Summer2016\IDFG-017b
# I:\idfg-images\ClearCreek_mustelids\Summer2016\IDFG-017a
if 'St.Joe_elk' in csv_file or 'Beaverhead_elk' in csv_file:
location_name = '_'.join(csv_file.split('\\')[0:2]).replace(' ','')
else:
assert 'ClearCreek_mustelids' in csv_file
tokens = csv_file.split('\\')
assert 'FS-' in tokens[2] or 'IDFG-' in tokens[2]
location_name = '_'.join([tokens[0],tokens[2]]).replace('-P','')
if location_name.endswith('017a') or location_name.endswith('017b'):
location_name = location_name[:-1]
# Load .csv file
df = pd.read_csv(csv_file_absolute)
df['datetime'] = None
df['seq_id'] = None
df['synthetic_frame_number'] = None
# Validate the opstate column
opstates = set(df['opstate'])
for s in opstates:
if isinstance(s,str):
s = s.strip()
if len(s) > 0:
assert s in valid_opstates,'Invalid opstate: {}'.format(s)
column_names = list(df.columns)
for s in required_columns:
assert s in column_names
count_columns = [s for s in column_names if s in expected_count_columns]
presence_columns = [s for s in column_names if s.endswith('present')]
for s in presence_columns:
if s not in expected_presence_columns:
assert 'Unexpected presence column {} in {}'.format(s,csv_file)
for s in expected_presence_columns:
if s not in presence_columns:
assert 'Missing presence column {} in {}'.format(s,csv_file)
if False:
for s in expected_count_columns:
if s not in count_columns:
print('Missing count column {} in {}'.format(s,csv_file))
## Create datetimes
# print('Creating datetimes')
# i_row = 0; row = df.iloc[i_row]
for i_row,row in df.iterrows():
date = row['Date']
time = row['Time']
datestring = date + ' ' + time
dt = dateutil.parser.parse(datestring)
assert dt.year >= 2015 and dt.year <= 2019
df.loc[i_row,'datetime'] = dt
# Make sure data are sorted chronologically
#
# In odd circumstances, they are not... so sort them first, but warn
datetimes = list(df['datetime'])
if not list_is_sorted(datetimes):
print('Datetimes not sorted for {}'.format(csv_file))
df = df.sort_values('datetime')
df.reset_index(drop=True, inplace=True)
datetimes = list(df['datetime'])
assert list_is_sorted(datetimes)
# Debugging when I was trying to see what was up with the unsorted dates
if False:
for i in range(0,len(datetimes)-1):
dt = datetimes[i+1]
prev_dt = datetimes[i]
delta = dt - prev_dt
assert delta >= datetime.timedelta(0)
## Parse into sequences
# print('Creating sequences')
current_sequence_id = None
next_frame_number = 0
previous_datetime = None
sequence_id_to_rows = defaultdict(list)
# i_row = 0; row = df.iloc[i_row]
for i_row,row in df.iterrows():
dt = row['datetime']
assert dt is not None and isinstance(dt,datetime.datetime)
# Start a new sequence if:
#
# * This image has no timestamp
# * This image has a frame number of zero
# * We have no previous image timestamp
#
if previous_datetime is None:
delta = None
else:
delta = (dt - previous_datetime).total_seconds()
# Start a new sequence if necessary
if delta is None or delta > max_gap_within_sequence:
next_frame_number = 0
current_sequence_id = location_name + '_seq_' + str(dt) # str(uuid.uuid1())
assert current_sequence_id is not None
sequence_id_to_rows[current_sequence_id].append(i_row)
df.loc[i_row,'seq_id'] = current_sequence_id
df.loc[i_row,'synthetic_frame_number'] = next_frame_number
next_frame_number = next_frame_number + 1
previous_datetime = dt
# ...for each row
location_sequences = list(set(list(df['seq_id'])))
location_sequences.sort()
inconsistent_sequences = []
## Parse labels for each sequence
# sequence_id = location_sequences[0]
for sequence_id in location_sequences:
sequence_row_indices = sequence_id_to_rows[sequence_id]
assert len(sequence_row_indices) > 0
# Row indices in a sequence should be adjacent
if len(sequence_row_indices) > 1:
d = np.diff(sequence_row_indices)
assert(all(d==1))
# sequence_df = df[df['seq_id']==sequence_id]
sequence_df = df.iloc[sequence_row_indices]
## Determine what's present
presence_columns_marked = []
survey_species = []
other_species = []
# Be conservative; assume humans are present in all maintenance images
opstates = set(sequence_df['opstate'])
assert all([ ( (isinstance(s,float)) or (len(s.strip())== 0) or (s.strip() in valid_opstates)) for s in opstates]),\
'Invalid optstate in: {}'.format(' | '.join(opstates))
for presence_column in presence_columns:
presence_values = list(sequence_df[presence_column])
# The presence columns are *almost* always identical for all images in a sequence
single_presence_value = (len(set(presence_values)) == 1)
# assert single_presence_value
if not single_presence_value:
# print('Warning: presence value for {} is inconsistent for {}'.format(presence_column,sequence_id))
inconsistent_sequences.append(sequence_id)
if any(presence_values):
presence_columns_marked.append(presence_column)
# ...for each presence column
# Tally up the standard (survey) species
survey_species = [s.replace('present','') for s in presence_columns_marked if s != 'otherpresent']
for opstate in opstates:
if not isinstance(opstate,str):
continue
opstate = opstate.strip()
if len(opstate) == 0:
continue
if opstate in opstate_mappings:
opstate = opstate_mappings[opstate]
if (opstate != 'normal') and (opstate not in survey_species):
survey_species.append(opstate)
# If no presence columns are marked, all counts should be zero
if len(presence_columns_marked) == 0:
# count_column = count_columns[0]
for count_column in count_columns:
values = list(set(list(sequence_df[count_column])))
# Occasionally a count gets entered (correctly) without the presence column being marked
# assert len(values) == 1 and values[0] == 0, 'Non-zero counts with no presence columns marked for sequence {}'.format(sequence_id)
if (not(len(values) == 1 and values[0] == 0)):
print('Warning: presence and counts are inconsistent for {}'.format(sequence_id))
# Handle this by virtually checking the "right" box
for presence_column in presence_to_count_columns.keys():
count_columns_this_species = presence_to_count_columns[presence_column]
if count_column in count_columns_this_species:
if presence_column not in presence_columns_marked:
presence_columns_marked.append(presence_column)
# Make sure we found a match
assert len(presence_columns_marked) > 0
# Handle 'other' tags
if 'otherpresent' in presence_columns_marked:
sequence_otherwhats = set()
sequence_comments = set()
for i,r in sequence_df.iterrows():
otherwhat = r['otherwhat']
if isinstance(otherwhat,str):
otherwhat = otherwhat.strip()
if len(otherwhat) > 0:
sequence_otherwhats.add(otherwhat)
comment = r['comment']
if isinstance(comment,str):
comment = comment.strip()
if len(comment) > 0:
sequence_comments.add(comment)
freetext_species = []
for s in sequence_otherwhats:
freetext_species.append(s)
for s in sequence_comments:
freetext_species.append(s)
counted_species = []
otherpresent_columns = presence_to_count_columns['otherpresent']
# column_name = otherpresent_columns[0]
for column_name in otherpresent_columns:
if column_name in sequence_df and column_name != 'other':
column_counts = list(sequence_df[column_name])
column_count_positive = any([c > 0 for c in column_counts])
if column_count_positive:
# print('Found non-survey counted species column: {}'.format(column_name))
counted_species.append(column_name)
# ...for each non-empty presence column
# Very rarely, the "otherpresent" column is checked, but no more detail is available
if not ( (len(freetext_species) > 0) or (len(counted_species) > 0) ):
other_species.append('unknown')
other_species += freetext_species
other_species += counted_species
# ...handling non-survey species
all_species = other_species + survey_species
# Build the sequence data
images = []
# i_row = 0; row = sequence_df.iloc[i_row]
for i_row,row in sequence_df.iterrows():
im = {}
# Only one folder used a single .csv file for two subfolders
if ('RelativePath' in row) and (isinstance(row['RelativePath'],str)) and (len(row['RelativePath'].strip()) > 0):
assert 'IDFG-028' in location_name
im['file_name'] = os.path.join(row['RelativePath'],row['File'])
else:
im['file_name'] = row['File']
im['datetime'] = row['datetime']
images.append(im)
sequence = {}
sequence['csv_source'] = csv_file
sequence['sequence_id'] = sequence_id
sequence['images'] = images
sequence['species_present'] = all_species
sequence['location'] = location_name
sequences.append(sequence)
# ...for each sequence
return sequences
# ...def csv_to_sequences()
#%% Parse each .csv file into sequences (loop)
if __name__ == "__main__":
#%%
import multiprocessing
multiprocessing.freeze_support()
image_files,csv_files = get_files()
#%%
if n_threads == 1:
# i_file = -1; csv_file = csv_files[i_file]
sequences_by_file = []
for i_file,csv_file in enumerate(csv_files):
print('Processing file {} of {}'.format(i_file,len(csv_files)))
sequences = csv_to_sequences(csv_file)
sequences_by_file.append(sequences)
else:
pool = Pool(n_threads)
sequences_by_file = list(pool.imap(csv_to_sequences,csv_files))
#%% Save sequence data
with open(sequence_info_cache,'w') as f:
json.dump(sequences_by_file,f,indent=2,default=json_util.default)
#%% Load sequence data
if False:
#%%
with open(sequence_info_cache,'r') as f:
sequences_by_file = json.load(f,object_hook=json_util.object_hook)
#%% Validate file mapping (based on the existing enumeration)
missing_images = []
image_files_set = set(image_files)
n_images_in_sequences = 0
sequence_ids = set()
# sequences = sequences_by_file[0]
for i_sequences,sequences in enumerate(tqdm(sequences_by_file)):
assert len(sequences) > 0
csv_source = sequences[0]['csv_source']
csv_file_absolute = os.path.join(input_base,csv_source)
csv_folder = os.path.dirname(csv_file_absolute)
assert os.path.isfile(csv_file_absolute)
# sequence = sequences[0]
for i_sequence,sequence in enumerate(sequences):
assert sequence['csv_source'] == csv_source
sequence_id = sequence['sequence_id']
if sequence_id in sequence_ids:
print('Warning: duplicate sequence for {}, creating new sequence'.format(sequence_id))
sequence['sequence_id'] = sequence['sequence_id'] + '_' + str(i_sequences) + '_' + str(i_sequence)
sequence_id = sequence['sequence_id']
assert sequence_id not in sequence_ids
sequence_ids.add(sequence_id)
species_present = sequence['species_present']
images = sequence['images']
for im in images:
n_images_in_sequences += 1
image_file_relative = im['file_name']
# Actually, one folder has relative paths
# assert '\\' not in image_file_relative and '/' not in image_file_relative
image_file_absolute = os.path.join(csv_folder,image_file_relative)
image_file_container_relative = os.path.relpath(image_file_absolute,input_base)
# os.startfile(csv_folder)
# assert os.path.isfile(image_file_absolute)
# found_file = os.path.isfile(image_file_absolute)
found_file = image_file_container_relative in image_files_set
if not found_file:
print('Warning: can\'t find image {}'.format(image_file_absolute))
missing_images.append(image_file_absolute)
# ...for each image
# ...for each sequence
# ...for each .csv file
print('{} of {} images missing ({} on disk)'.format(len(missing_images),n_images_in_sequences,
len(image_files)))
#%% Load manual category mappings
with open(category_mapping_file,'r') as f:
category_mapping_lines = f.readlines()
category_mapping_lines = [s.strip() for s in category_mapping_lines]
category_mappings = {}
for s in category_mapping_lines:
tokens = s.split(',',1)
category_name = tokens[0].strip()
category_value = tokens[1].strip().replace('"','').replace(',','+')
assert ',' not in category_name
assert ',' not in category_value
# The second column is blank when the first column already represents the category name
if len(category_value) == 0:
category_value = category_name
category_mappings[category_name] = category_value
#%% Convert to CCT .json (original strings)
human_flagged_images = []
with open(human_review_list,'r') as f:
human_flagged_images = f.readlines()
human_flagged_images = [s.strip().replace('/','\\') for s in human_flagged_images]
human_flagged_images = set(human_flagged_images)
print('Read {} human flagged images'.format(len(human_flagged_images)))
annotations = []
image_id_to_image = {}
category_name_to_category = {}
# Force the empty category to be ID 0
empty_category_id = 0
empty_category = {}
empty_category['id'] = empty_category_id
empty_category['name'] = 'empty'
category_name_to_category['empty'] = empty_category
human_category_id = 1
human_category = {}
human_category['id'] = human_category_id
human_category['name'] = 'human'
category_name_to_category['human'] = human_category
next_category_id = 2
annotation_ids = set()
if False:
target_folder = r'ClearCreek_mustelids\Summer2015\FS-035'
for sequences in sequences_by_file:
if target_folder in sequences[0]['csv_source']:
break
# For each .csv file...
#
# sequences = sequences_by_file[0]
for sequences in tqdm(sequences_by_file):
# For each sequence...
#
# sequence = sequences[0]
for sequence in sequences:
species_present = sequence['species_present']
species_present = [s.lower().strip().replace(',',';') for s in species_present]
sequence_images = sequence['images']
location = sequence['location'].lower().strip()
sequence_id = sequence['sequence_id']
csv_source = sequence['csv_source']
csv_folder_relative = os.path.dirname(csv_source)
sequence_category_ids = set()
# Find categories for this image
if len(species_present) == 0:
sequence_category_ids.add(0)
assert category_name_to_category['empty']['id'] == list(sequence_category_ids)[0]
else:
# When 'unknown' is used in combination with another label, use that
# label; the "unknown" here doesn't mean "another unknown species", it means
# there is some other unknown property about the main species.
if 'unknown' in species_present and len(species_present) > 1:
assert all([((s in category_mappings) or (s in valid_opstates) or (s in opstate_mappings.values()))\
for s in species_present if s != 'unknown'])
species_present = [s for s in species_present if s != 'unknown']
# category_name_string = species_present[0]
for category_name_string in species_present:
# This piece of text had a lot of complicated syntax in it, and it would have
# been too complicated to handle in a general way
if 'coyotoes' in category_name_string:
# print('Ignoring category {}'.format(category_name_string))
continue
if category_name_string not in category_mappings:
assert category_name_string in valid_opstates or category_name_string in opstate_mappings.values()
else:
category_name_string = category_mappings[category_name_string]
assert ',' not in category_name_string
category_names = category_name_string.split('+')
assert len(category_names) <= 2
# Don't process redundant labels
category_names = set(category_names)
# category_name = category_names[0]
for category_name in category_names:
if category_name == 'ignore':
continue
category_name = category_name.replace('"','')
# If we've seen this category before...
if category_name in category_name_to_category:
category = category_name_to_category[category_name]
category_id = category['id']
# If this is a new category...
else:
# print('Adding new category for {}'.format(category_name))
category_id = next_category_id
category = {}
category['id'] = category_id
category['name'] = category_name
category_name_to_category[category_name] = category
next_category_id += 1
sequence_category_ids.add(category_id)
# ...for each category (inner)
# ...for each category (outer)
# ...if we do/don't have species in this sequence
# We should have at least one category assigned (which may be "empty" or "unknown")
assert len(sequence_category_ids) > 0
# assert len(sequence_category_ids) > 0
# Was any image in this sequence manually flagged as human?
for i_image,im in enumerate(sequence_images):
file_name_relative = os.path.join(csv_folder_relative,im['file_name'])
if file_name_relative in human_flagged_images:
# print('Flagging sequence {} as human based on manual review'.format(sequence_id))
assert human_category_id not in sequence_category_ids
sequence_category_ids.add(human_category_id)
break
# For each image in this sequence...
#
# i_image = 0; im = images[i_image]
for i_image,im in enumerate(sequence_images):
image_id = sequence_id + '_' + im['file_name']
assert image_id not in image_id_to_image
output_im = {}
output_im['id'] = image_id
output_im['file_name'] = os.path.join(csv_folder_relative,im['file_name'])
output_im['seq_id'] = sequence_id
output_im['seq_num_frames'] = len(sequence)
output_im['frame_num'] = i_image
output_im['datetime'] = str(im['datetime'])
output_im['location'] = location
image_id_to_image[image_id] = output_im
# Create annotations for this image
for i_ann,category_id in enumerate(sequence_category_ids):
ann = {}
ann['id'] = 'ann_' + image_id + '_' + str(i_ann)
assert ann['id'] not in annotation_ids
annotation_ids.add(ann['id'])
ann['image_id'] = image_id
ann['category_id'] = category_id
ann['sequence_level_annotation'] = True
annotations.append(ann)
# ...for each image in this sequence
# ...for each sequence
# ...for each .csv file
images = list(image_id_to_image.values())
categories = list(category_name_to_category.values())
print('Loaded {} annotations in {} categories for {} images'.format(
len(annotations),len(categories),len(images)))
# Verify that all images have annotations
image_id_to_annotations = defaultdict(list)
# ann = ict_data['annotations'][0]
# For debugging only
categories_to_counts = defaultdict(int)
for ann in tqdm(annotations):
image_id_to_annotations[ann['image_id']].append(ann)
categories_to_counts[ann['category_id']] = categories_to_counts[ann['category_id']] + 1
for im in tqdm(images):
image_annotations = image_id_to_annotations[im['id']]
assert len(image_annotations) > 0
#%% Create output (original strings)
info = {}
info['contributor'] = 'Idaho Department of Fish and Game'
info['description'] = 'Idaho Camera traps'
info['version'] = '2021.07.19'
output_data = {}
output_data['images'] = images
output_data['annotations'] = annotations
output_data['categories'] = categories
output_data['info'] = info
with open(output_json_original_strings,'w') as f:
json.dump(output_data,f,indent=1)
#%% Validate .json file
from data_management.databases import sanity_check_json_db
options = sanity_check_json_db.SanityCheckOptions()
options.baseDir = input_base
options.bCheckImageSizes = False
options.bCheckImageExistence = False
options.bFindUnusedImages = False
_, _, _ = sanity_check_json_db.sanity_check_json_db(output_json_original_strings, options)
#%% Preview labels
from visualization import visualize_db
viz_options = visualize_db.DbVizOptions()
viz_options.num_to_visualize = 1000
viz_options.trim_to_images_with_bboxes = False
viz_options.add_search_links = False
viz_options.sort_by_filename = False
viz_options.parallelize_rendering = True
viz_options.include_filename_links = True
viz_options.classes_to_exclude = ['empty','deer','elk']
html_output_file, _ = visualize_db.process_images(db_path=output_json_original_strings,
output_dir=os.path.join(
output_base,'preview'),
image_base_dir=input_base,
options=viz_options)
os.startfile(html_output_file)
#%% Look for humans that were found by MegaDetector that haven't already been identified as human
# This whole step only needed to get run once
if False:
pass
#%%
human_confidence_threshold = 0.5
# Load MD results
with open(megadetector_results_file,'r') as f:
md_results = json.load(f)
# Get a list of filenames that MD tagged as human
human_md_categories =\
[category_id for category_id in md_results['detection_categories'] if \
((md_results['detection_categories'][category_id] == 'person') or \
(md_results['detection_categories'][category_id] == 'vehicle'))]
assert len(human_md_categories) == 2
# im = md_results['images'][0]
md_human_images = set()
for im in md_results['images']:
if 'detections' not in im:
continue
if im['max_detection_conf'] < human_confidence_threshold:
continue
for detection in im['detections']:
if detection['category'] not in human_md_categories:
continue
elif detection['conf'] < human_confidence_threshold:
continue
else:
md_human_images.add(im['file'])
break
# ...for each detection
# ...for each image
print('MD found {} potential human images (of {})'.format(len(md_human_images),len(md_results['images'])))
# Map images to annotations in ICT
with open(output_json_original_strings,'r') as f:
ict_data = json.load(f)
category_id_to_name = {c['id']:c['name'] for c in categories}
image_id_to_annotations = defaultdict(list)
# ann = ict_data['annotations'][0]
for ann in tqdm(ict_data['annotations']):
image_id_to_annotations[ann['image_id']].append(ann)
human_ict_categories = ['human']
manual_human_images = set()
# For every image
# im = ict_data['images'][0]
for im in tqdm(ict_data['images']):
# Does this image already have a human annotation?
manual_human = False
annotations = image_id_to_annotations[im['id']]
assert len(annotations) > 0
for ann in annotations:
category_name = category_id_to_name[ann['category_id']]
if category_name in human_ict_categories:
manual_human_images.add(im['file_name'].replace('\\','/'))
# ...for each annotation
# ...for each image
print('{} images identified as human in source metadata'.format(len(manual_human_images)))
missing_human_images = []
for fn in md_human_images:
if fn not in manual_human_images:
missing_human_images.append(fn)
print('{} potentially untagged human images'.format(len(missing_human_images)))
#%% Copy images for review to a new folder
os.makedirs(human_review_folder,exist_ok=True)
missing_human_images.sort()
# fn = missing_human_images[0]
for i_image,fn in enumerate(tqdm(missing_human_images)):
input_fn_absolute = os.path.join(input_base,fn).replace('\\','/')
assert os.path.isfile(input_fn_absolute)
output_path = os.path.join(human_review_folder,str(i_image).zfill(4) + '_' + fn.replace('/','~'))
shutil.copyfile(input_fn_absolute,output_path)
#%% Manual step...
# Copy any images from that list that have humans in them to...
human_review_selection_folder = r'H:\idaho-camera-traps\human_review_selections'
assert os.path.isdir(human_review_selection_folder)
#%% Create a list of the images we just manually flagged
human_tagged_filenames = os.listdir(human_review_selection_folder)
human_tagged_relative_paths = []
# fn = human_tagged_filenames[0]
for fn in human_tagged_filenames:
# E.g. '0000_Beaverhead_elk~AM174~Trip 1~100RECNX~IMG_1397.JPG'
relative_path = fn[5:].replace('~','/')
human_tagged_relative_paths.append(relative_path)
with open(human_review_list,'w') as f:
for s in human_tagged_relative_paths:
f.write(s + '\n')
#%% Translate location, image, sequence IDs
# Load mappings if available
if (not force_generate_mappings) and (os.path.isfile(id_mapping_file)):
print('Loading ID mappings from {}'.format(id_mapping_file))
with open(id_mapping_file,'r') as f:
mappings = json.load(f)
image_id_mappings = mappings['image_id_mappings']
annotation_id_mappings = mappings['annotation_id_mappings']
location_id_mappings = mappings['location_id_mappings']
sequence_id_mappings = mappings['sequence_id_mappings']
else:
# Generate mappings
mappings = {}
next_location_id = 0
location_id_string_to_n_sequences = defaultdict(int)
location_id_string_to_n_images = defaultdict(int)
image_id_mappings = {}
annotation_id_mappings = {}
location_id_mappings = {}
sequence_id_mappings = {}
for im in tqdm(images):
# If we've seen this location before...
if im['location'] in location_id_mappings:
location_id = location_id_mappings[im['location']]
else:
# Otherwise assign a string-formatted int as the ID
location_id = str(next_location_id)
location_id_mappings[im['location']] = location_id
next_location_id += 1
# If we've seen this sequence before...
if im['seq_id'] in sequence_id_mappings:
sequence_id = sequence_id_mappings[im['seq_id']]
else:
# Otherwise assign a string-formatted int as the ID
n_sequences_this_location = location_id_string_to_n_sequences[location_id]
sequence_id = 'loc_{}_seq_{}'.format(location_id.zfill(4),str(n_sequences_this_location).zfill(6))
sequence_id_mappings[im['seq_id']] = sequence_id
n_sequences_this_location += 1
location_id_string_to_n_sequences[location_id] = n_sequences_this_location
assert im['id'] not in image_id_mappings
# Assign an image ID
n_images_this_location = location_id_string_to_n_images[location_id]
image_id_mappings[im['id']] = 'loc_{}_im_{}'.format(location_id.zfill(4),str(n_images_this_location).zfill(6))
n_images_this_location += 1
location_id_string_to_n_images[location_id] = n_images_this_location
# ...for each image
# Assign annotation mappings
for i_ann,ann in enumerate(tqdm(annotations)):
assert ann['image_id'] in image_id_mappings
assert ann['id'] not in annotation_id_mappings
annotation_id_mappings[ann['id']] = 'ann_{}'.format(str(i_ann).zfill(8))
mappings['image_id_mappings'] = image_id_mappings
mappings['annotation_id_mappings'] = annotation_id_mappings
mappings['location_id_mappings'] = location_id_mappings
mappings['sequence_id_mappings'] = sequence_id_mappings
# Save mappings
with open(id_mapping_file,'w') as f:
json.dump(mappings,f,indent=2)
print('Saved ID mappings to {}'.format(id_mapping_file))
# Back this file up, lest we should accidentally re-run this script with force_generate_mappings = True
# and overwrite the mappings we used.
datestr = str(datetime.datetime.now()).replace(':','-')
backup_file = id_mapping_file.replace('.json','_' + datestr + '.json')
shutil.copyfile(id_mapping_file,backup_file)
# ...if we are/aren't re-generating mappings
#%% Apply mappings
for im in images:
im['id'] = image_id_mappings[im['id']]
im['seq_id'] = sequence_id_mappings[im['seq_id']]
im['location'] = location_id_mappings[im['location']]
for ann in annotations:
ann['id'] = annotation_id_mappings[ann['id']]
ann['image_id'] = image_id_mappings[ann['image_id']]
print('Applied mappings')
#%% Write new dictionaries (modified strings, original files)
output_data = {}
output_data['images'] = images
output_data['annotations'] = annotations
output_data['categories'] = categories
output_data['info'] = info
with open(output_json_remapped_ids,'w') as f:
json.dump(output_data,f,indent=2)
#%% Validate .json file (modified strings, original files)
from data_management.databases import sanity_check_json_db
options = sanity_check_json_db.SanityCheckOptions()
options.baseDir = input_base
options.bCheckImageSizes = False
options.bCheckImageExistence = False
options.bFindUnusedImages = False
_, _, _ = sanity_check_json_db.sanity_check_json_db(output_json_remapped_ids, options)
#%% Preview labels (original files)
from visualization import visualize_db
viz_options = visualize_db.DbVizOptions()
viz_options.num_to_visualize = 1000
viz_options.trim_to_images_with_bboxes = False
viz_options.add_search_links = False
viz_options.sort_by_filename = False
viz_options.parallelize_rendering = True
viz_options.include_filename_links = True
# viz_options.classes_to_exclude = ['empty','deer','elk']
# viz_options.classes_to_include = ['bobcat']
viz_options.classes_to_include = [viz_options.multiple_categories_tag]
html_output_file, _ = visualize_db.process_images(db_path=output_json_remapped_ids,
output_dir=os.path.join(
output_base,'preview'),
image_base_dir=input_base,
options=viz_options)
os.startfile(html_output_file)
#%% Copy images to final output folder (prep)
force_copy = False
with open(output_json_remapped_ids,'r') as f:
d = json.load(f)
images = d['images']
private_categories = ['human','domestic dog','vehicle']
private_image_ids = set()
category_id_to_name = {c['id']:c['name'] for c in d['categories']}
# ann = d['annotations'][0]
for ann in d['annotations']:
category_name = category_id_to_name[ann['category_id']]
if category_name in private_categories:
private_image_ids.add(ann['image_id'])
print('Moving {} of {} images to the private folder'.format(len(private_image_ids),len(images)))
def process_image(im):
input_relative_path = im['file_name']
input_absolute_path = os.path.join(input_base,input_relative_path)
if not os.path.isfile(input_absolute_path):
print('Warning: file {} is not available'.format(input_absolute_path))
return
location = im['location']
image_id = im['id']
location_folder = 'loc_' + location.zfill(4)
assert location_folder in image_id
output_relative_path = location_folder + '/' + image_id + '.jpg'
# Is this a public or private image?
private_image = (image_id in private_image_ids)
# Generate absolute path
if private_image:
output_absolute_path = os.path.join(output_image_base_private,output_relative_path)
else:
output_absolute_path = os.path.join(output_image_base_public,output_relative_path)
# Copy to output
output_dir = os.path.dirname(output_absolute_path)
os.makedirs(output_dir,exist_ok=True)
if force_copy or (not os.path.isfile(output_absolute_path)):
shutil.copyfile(input_absolute_path,output_absolute_path)
# Update the filename reference
im['file_name'] = output_relative_path
# ...def process_image(im)
#%% Copy images to final output folder (execution)
# For each image
if n_threads_file_copy == 1:
# im = images[0]
for im in tqdm(images):
process_image(im)
else:
pool = ThreadPool(n_threads_file_copy)
pool.map(process_image,images)
print('Finished copying, writing .json output')
# Write output .json
with open(output_json,'w') as f:
json.dump(d,f,indent=1)
#%% Make sure the right number of images got there
from pathlib import Path
all_output_files = []
all_output_files_list = os.path.join(output_base,'all_output_files.json')
for path in Path(output_image_base).rglob('*.*'):
path = str(path)
path = os.path.relpath(path,output_image_base)
all_output_files.append(path)
with open(all_output_files_list,'w') as f:
json.dump(all_output_files,f,indent=1)
print('Enumerated {} output files (of {} images)'.format(len(all_output_files),len(images)))
#%% Validate .json file (final filenames)
from data_management.databases import sanity_check_json_db
options = sanity_check_json_db.SanityCheckOptions()
options.baseDir = input_base
options.bCheckImageSizes = False
options.bCheckImageExistence = False
options.bFindUnusedImages = False
_, _, _ = sanity_check_json_db.sanity_check_json_db(output_json, options)
#%% Preview labels (final filenames)
from visualization import visualize_db
viz_options = visualize_db.DbVizOptions()
viz_options.num_to_visualize = 1500
viz_options.trim_to_images_with_bboxes = False
viz_options.add_search_links = False
viz_options.sort_by_filename = False
viz_options.parallelize_rendering = True
viz_options.include_filename_links = True
# viz_options.classes_to_exclude = ['empty','deer','elk']
viz_options.classes_to_include = ['bear','mountain lion']
# viz_options.classes_to_include = ['horse']
# viz_options.classes_to_include = [viz_options.multiple_categories_tag]
# viz_options.classes_to_include = ['human','vehicle','domestic dog']
html_output_file, _ = visualize_db.process_images(db_path=output_json,
output_dir=os.path.join(
output_base,'final-preview-01'),
image_base_dir=output_image_base_public,
options=viz_options)
os.startfile(html_output_file)
#%% Create zipfiles
#%% List public files
from pathlib import Path
all_public_output_files = []
all_public_output_files_list = os.path.join(output_base,'all_public_output_files.json')
if not os.path.isfile(all_public_output_files_list):
for path in Path(output_image_base_public).rglob('*.*'):
path = str(path)
path = os.path.relpath(path,output_image_base)
all_public_output_files.append(path)
with open(all_public_output_files_list,'w') as f:
json.dump(all_public_output_files,f,indent=1)
else:
with open(all_public_output_files_list,'r') as f:
all_public_output_files = json.load(f)
print('Enumerated {} public output files'.format(len(all_public_output_files)))
#%% Find the size of each file
filename_to_size = {}
all_public_output_sizes_list = os.path.join(output_base,'all_public_output_sizes.json')
if not os.path.isfile(all_public_output_sizes_list):
# fn = all_public_output_files[0]
for fn in tqdm(all_public_output_files):
p = os.path.join(output_image_base,fn)
assert os.path.isfile(p)
filename_to_size[fn] = os.path.getsize(p)
with open(all_public_output_sizes_list,'w') as f:
json.dump(filename_to_size,f,indent=1)
else:
with open(all_public_output_sizes_list,'r') as f:
filename_to_size = json.load(f)
assert len(filename_to_size) == len(all_public_output_files)
#%% Split into chunks of approximately-equal size
import humanfriendly
total_size = sum(filename_to_size.values())
print('{} in {} files'.format(humanfriendly.format_size(total_size),len(all_public_output_files)))
bytes_per_part = 320e9
file_lists = []
current_file_list = []
n_bytes_current_file_list = 0
for fn in all_public_output_files:
size = filename_to_size[fn]
current_file_list.append(fn)
n_bytes_current_file_list += size
if n_bytes_current_file_list > bytes_per_part:
file_lists.append(current_file_list)
current_file_list = []
n_bytes_current_file_list = 0
# ...for each file
file_lists.append(current_file_list)
assert sum([len(l) for l in file_lists]) == len(all_public_output_files)
print('List sizes:')
for l in file_lists:
print(len(l))
#%% Create a zipfile for each chunk
from zipfile import ZipFile
import zipfile
import os
def create_zipfile(i_file_list):
file_list = file_lists[i_file_list]
zipfile_name = os.path.join('k:\\idaho-camera-traps-images.part_{}.zip'.format(i_file_list))
print('Processing archive {} to file {}'.format(i_file_list,zipfile_name))
with ZipFile(zipfile_name, 'w') as zipObj:
for filename_relative in file_list:
assert filename_relative.startswith('public')
filename_absolute = os.path.join(output_image_base,filename_relative)
zipObj.write(filename_absolute.replace('\\','/'), filename_relative, compress_type=zipfile.ZIP_STORED)
# ...for each filename
# with ZipFile()
# ...def create_zipfile()
# i_file_list = 0; file_list = file_lists[i_file_list]
n_zip_threads = 1 # len(file_lists)
if n_zip_threads == 1:
for i_file_list in range(0,len(file_lists)):
create_zipfile(i_file_list)
else:
pool = ThreadPool(n_zip_threads)
indices = list(range(0,len(file_lists)))
pool.map(create_zipfile,indices)
|
exercises/list-ops/example.py | kishankj/python | 1,177 | 11181142 | <gh_stars>1000+
def append(list1, list2):
return concat([list1, list2])
def concat(lists):
return [element for list in lists for element in list]
def filter(function, list):
return [item for item in list if function(item)]
def length(list):
return sum(1 for _ in list)
def map(function, list):
return [function(element) for element in list]
def foldl(function, list, initial):
if len(list) == 0:
return initial
else:
return foldl(function, list[1:], function(initial, list[0]))
def foldr(function, list, initial):
if len(list) == 0:
return initial
else:
return function(list[0], foldr(function, list[1:], initial))
def reverse(list):
return list[::-1]
|
tests/test_http.py | avivazran/UnrealEnginePython | 2,350 | 11181210 | import unittest
import unreal_engine as ue
from unreal_engine import IHttpRequest
import json
class TestHttp(unittest.TestCase):
def test_user_agent(self):
request = IHttpRequest('GET', 'http://httpbin.org/user-agent')
request.set_header('User-Agent', 'UnrealEnginePython_test')
request.process_request()
while request.get_status() < 2:
request.tick(0.01)
j = json.loads(request.get_response().get_content_as_string())
self.assertEqual(j['user-agent'], 'UnrealEnginePython_test')
def test_post_data(self):
request = IHttpRequest('POST', 'http://httpbin.org/post')
request.set_content('test')
request.process_request()
while request.get_status() < 2:
request.tick(0.01)
response = request.get_response()
self.assertEqual(response.get_response_code(), 200)
j = json.loads(response.get_content_as_string())
self.assertEqual(j['form']['test'], '')
def test_headers(self):
request = IHttpRequest()
request.set_header('One', 'Two')
request.set_header('Test1', 'Test2')
self.assertEqual(request.get_all_headers(), ['One: Two', 'Test1: Test2'])
def test_get_url(self):
request = IHttpRequest()
self.assertEqual(request.get_url(), '')
request.set_url('http://unrealengine.com')
self.assertEqual(request.get_url(), 'http://unrealengine.com')
def test_get_url_parameter(self):
request = IHttpRequest()
request.set_url('http://unrealengine.com/?test=17&bar=30')
self.assertEqual(request.get_url_parameter('bar'), '30')
# fail ! (maybe a bug ?)
#self.assertEqual(request.get_url_parameter('test'), '17')
if __name__ == '__main__':
unittest.main(exit=False)
|
test/layers/test_common.py | wconnell/torchdrug | 772 | 11181211 | import unittest
import torch
from torch import nn
from torchdrug import layers
class CommonTest(unittest.TestCase):
def setUp(self):
self.a = torch.randn(10)
self.b = torch.randn(10)
self.g = torch.randn(10)
def test_sequential(self):
layer1 = nn.Module()
layer2 = nn.Module()
layer3 = nn.Module()
layer1.forward = lambda a, b: (a + 1, b + 2)
layer2.forward = lambda a, b: a * b
layer = layers.Sequential(layer1, layer2)
result = layer(self.a, self.b)
truth = layer2(*layer1(self.a, self.b))
self.assertTrue(torch.allclose(result, truth), "Incorrect sequential layer")
layer1.forward = lambda g, a: g + a
layer2.forward = lambda b: b * 2
layer3.forward = lambda g, c: g * c
layer = layers.Sequential(layer1, layer2, layer3, global_args=("g",))
result = layer(self.g, self.a)
truth = layer3(self.g, layer2(layer1(self.g, self.a)))
self.assertTrue(torch.allclose(result, truth), "Incorrect sequential layer")
layer1.forward = lambda a: {"b": a + 1, "c": a + 2}
layer2.forward = lambda b: b * 2
layer = layers.Sequential(layer1, layer2, allow_unused=True)
result = layer(self.a)
truth = layer2(layer1(self.a)["b"])
self.assertTrue(torch.allclose(result, truth), "Incorrect sequential layer")
layer1.forward = lambda g, a: {"g": g + 1, "b": a + 2}
layer2.forward = lambda g, b: g * b
layer = layers.Sequential(layer1, layer2, global_args=("g",))
result = layer(self.g, self.a)
truth = layer2(**layer1(self.g, self.a))
self.assertTrue(torch.allclose(result, truth), "Incorrect sequential layer")
if __name__ == "__main__":
unittest.main() |
entity/cards/LETL_037H/LETL_282.py | x014/lushi_script | 102 | 11181217 | # -*- coding: utf-8 -*-
from hearthstone.entities import Entity
from entity.spell_entity import SpellEntity
class LETL_282(SpellEntity):
"""
顶级捕食者5
<b>攻击</b>生命值最低的敌人。<b>击杀:</b>重复此效果。
"""
def __init__(self, entity: Entity):
super().__init__(entity)
self.damage = 0
self.range = -1
self.is_attack = 1
def play(self, game, hero, target):
# 伤害为攻击伤害
damage = hero.dmg
# 攻击生命最低
while True:
h = game.find_min_health(not hero.own())
if h is None:
break
h.got_damage(game, damage * self.damage_advantage[self.lettuce_role][h.lettuce_role])
# 自己受到伤害
hero.got_damage(game, h.dmg)
if not h.is_alive():
break
|
ckan/migration/versions/051_a4fb0d85ced6_add_tag_vocabulary.py | ziveo/ckan | 2,805 | 11181223 | <gh_stars>1000+
# encoding: utf-8
"""051 Add tag vocabulary
Revision ID: a4fb0d85ced6
Revises: <KEY>
Create Date: 2018-09-04 18:49:06.480087
"""
from alembic import op
import sqlalchemy as sa
from ckan.migration import skip_based_on_legacy_engine_version
# revision identifiers, used by Alembic.
revision = 'a4fb0d85ced6'
down_revision = '<KEY>'
branch_labels = None
depends_on = None
def upgrade():
if skip_based_on_legacy_engine_version(op, __name__):
return
op.drop_constraint('tag_name_key', 'tag')
op.create_table(
'vocabulary', sa.Column('id', sa.UnicodeText, nullable=False),
sa.Column('name', sa.String(100), nullable=False)
)
op.add_column('tag', sa.Column('vocabulary_id', sa.String(100)))
op.create_primary_key('vocabulary_pkey', 'vocabulary', ['id'])
op.create_unique_constraint(
'tag_name_vocabulary_id_key', 'tag', ['name', 'vocabulary_id']
)
op.create_foreign_key(
'tag_vocabulary_id_fkey', 'tag', 'vocabulary', ['vocabulary_id'],
['id']
)
op.create_unique_constraint('vocabulary_name_key', 'vocabulary', ['name'])
def downgrade():
op.drop_constraint('tag_name_vocabulary_id_key', 'tag')
op.drop_constraint('tag_vocabulary_id_fkey', 'tag')
op.drop_column('tag', 'vocabulary_id')
op.drop_table('vocabulary')
op.create_unique_constraint('tag_name_key', 'tag', ['name'])
|
glue/formats/scss.py | glensc/glue | 514 | 11181248 | import os
from css import CssFormat
class ScssFormat(CssFormat):
extension = 'scss'
@classmethod
def populate_argument_parser(cls, parser):
group = parser.add_argument_group("SCSS format options")
group.add_argument("--scss",
dest="scss_dir",
nargs='?',
const=True,
default=os.environ.get('GLUE_SCSS', False),
metavar='DIR',
help="Generate SCSS files and optionally where")
group.add_argument("--scss-template",
dest="scss_template",
default=os.environ.get('GLUE_SCSS_TEMPLATE', None),
metavar='DIR',
help="Template to use to generate the SCSS output.")
|
app/core/admin/forms.py | tutengfei/flaskblog | 204 | 11181272 | from wtforms import form
from wtforms import StringField, PasswordField, BooleanField
from wtforms.validators import DataRequired
from wtforms import validators
from werkzeug.security import generate_password_hash, check_password_hash
import hashlib
from app.core.models import db, User
class LoginForm(form.Form):
username = StringField('username', validators=[DataRequired()])
password = PasswordField('password', validators=[DataRequired()])
def validate_username(self, field):
user = self.get_user()
if user is None:
raise validators.ValidationError('Invalid user')
# we're comparing the plaintext pw with the the hash from the db
if not check_password_hash(user.password, self.password.data):
# to compare plain text passwords use
# if user.password != self.password.data:
raise validators.ValidationError('Invalid password')
def get_user(self):
return db.session.query(User).filter_by(user_name=self.username.data).first() |
explorer/urls.py | Patil2099/django-sql-explorer | 1,729 | 11181275 | from django.urls import path, re_path
from explorer.views import (
QueryView,
CreateQueryView,
PlayQueryView,
DeleteQueryView,
ListQueryView,
ListQueryLogView,
DownloadFromSqlView,
DownloadQueryView,
StreamQueryView,
EmailCsvQueryView,
SchemaView,
format_sql,
)
urlpatterns = [
path(
'<int:query_id>/', QueryView.as_view(), name='query_detail'
),
path(
'<int:query_id>/download', DownloadQueryView.as_view(),
name='download_query'
),
path(
'<int:query_id>/stream', StreamQueryView.as_view(),
name='stream_query'
),
path('download', DownloadFromSqlView.as_view(), name='download_sql'),
path(
'<int:query_id>/email_csv', EmailCsvQueryView.as_view(),
name='email_csv_query'
),
path(
'<int:pk>/delete', DeleteQueryView.as_view(), name='query_delete'
),
path('new/', CreateQueryView.as_view(), name='query_create'),
path('play/', PlayQueryView.as_view(), name='explorer_playground'),
re_path(
r'schema/(?P<connection>.+)$', SchemaView.as_view(),
name='explorer_schema'
),
path('logs/', ListQueryLogView.as_view(), name='explorer_logs'),
path('format/', format_sql, name='format_sql'),
path('', ListQueryView.as_view(), name='explorer_index'),
]
|
examples/pytorch/GNN-FiLM/main.py | ketyi/dgl | 9,516 | 11181289 | <reponame>ketyi/dgl
import torch
import torch.nn as nn
import torch.nn.functional as F
import dgl
import dgl.function as fn
from utils import evaluate_f1_score
from data_loader import load_PPI
import argparse
import numpy as np
import os
class GNNFiLMLayer(nn.Module):
def __init__(self, in_size, out_size, etypes, dropout=0.1):
super(GNNFiLMLayer, self).__init__()
self.in_size = in_size
self.out_size = out_size
#weights for different types of edges
self.W = nn.ModuleDict({
name : nn.Linear(in_size, out_size, bias = False) for name in etypes
})
#hypernets to learn the affine functions for different types of edges
self.film = nn.ModuleDict({
name : nn.Linear(in_size, 2*out_size, bias = False) for name in etypes
})
#layernorm before each propogation
self.layernorm = nn.LayerNorm(out_size)
#dropout layer
self.dropout = nn.Dropout(dropout)
def forward(self, g, feat_dict):
#the input graph is a multi-relational graph, so treated as hetero-graph.
funcs = {} #message and reduce functions dict
#for each type of edges, compute messages and reduce them all
for srctype, etype, dsttype in g.canonical_etypes:
messages = self.W[etype](feat_dict[srctype]) #apply W_l on src feature
film_weights = self.film[etype](feat_dict[dsttype]) #use dst feature to compute affine function paras
gamma = film_weights[:,:self.out_size] #"gamma" for the affine function
beta = film_weights[:,self.out_size:] #"beta" for the affine function
messages = gamma * messages + beta #compute messages
messages = F.relu_(messages)
g.nodes[srctype].data[etype] = messages #store in ndata
funcs[etype] = (fn.copy_u(etype, 'm'), fn.sum('m', 'h')) #define message and reduce functions
g.multi_update_all(funcs, 'sum') #update all, reduce by first type-wisely then across different types
feat_dict={}
for ntype in g.ntypes:
feat_dict[ntype] = self.dropout(self.layernorm(g.nodes[ntype].data['h'])) #apply layernorm and dropout
return feat_dict
class GNNFiLM(nn.Module):
def __init__(self, etypes, in_size, hidden_size, out_size, num_layers, dropout=0.1):
super(GNNFiLM, self).__init__()
self.film_layers = nn.ModuleList()
self.film_layers.append(
GNNFiLMLayer(in_size, hidden_size, etypes, dropout)
)
for i in range(num_layers-1):
self.film_layers.append(
GNNFiLMLayer(hidden_size, hidden_size, etypes, dropout)
)
self.predict = nn.Linear(hidden_size, out_size, bias = True)
def forward(self, g, out_key):
h_dict = {ntype : g.nodes[ntype].data['feat'] for ntype in g.ntypes} #prepare input feature dict
for layer in self.film_layers:
h_dict = layer(g, h_dict)
h = self.predict(h_dict[out_key]) #use the final embed to predict, out_size = num_classes
h = torch.sigmoid(h)
return h
def main(args):
# Step 1: Prepare graph data and retrieve train/validation/test dataloader ============================= #
if args.gpu >= 0 and torch.cuda.is_available():
device = 'cuda:{}'.format(args.gpu)
else:
device = 'cpu'
if args.dataset == 'PPI':
train_set, valid_set, test_set, etypes, in_size, out_size = load_PPI(args.batch_size, device)
# Step 2: Create model and training components=========================================================== #
model = GNNFiLM(etypes, in_size, args.hidden_size, out_size, args.num_layers).to(device)
criterion = nn.BCELoss()
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.wd)
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, args.step_size, gamma=args.gamma)
# Step 4: training epoches ============================================================================== #
lastf1 = 0
cnt = 0
best_val_f1 = 0
for epoch in range(args.max_epoch):
train_loss = []
train_f1 = []
val_loss = []
val_f1 = []
model.train()
for batch in train_set:
g = batch.graph
g = g.to(device)
logits = model.forward(g, '_N')
labels = batch.label
loss = criterion(logits, labels)
f1 = evaluate_f1_score(logits.detach().cpu().numpy(), labels.detach().cpu().numpy())
optimizer.zero_grad()
loss.backward()
optimizer.step()
train_loss.append(loss.item())
train_f1.append(f1)
train_loss = np.mean(train_loss)
train_f1 = np.mean(train_f1)
scheduler.step()
model.eval()
with torch.no_grad():
for batch in valid_set:
g = batch.graph
g = g.to(device)
logits = model.forward(g, '_N')
labels = batch.label
loss = criterion(logits, labels)
f1 = evaluate_f1_score(logits.detach().cpu().numpy(), labels.detach().cpu().numpy())
val_loss.append(loss.item())
val_f1.append(f1)
val_loss = np.mean(val_loss)
val_f1 = np.mean(val_f1)
print('Epoch {:d} | Train Loss {:.4f} | Train F1 {:.4f} | Val Loss {:.4f} | Val F1 {:.4f} |'.format(epoch + 1, train_loss, train_f1, val_loss, val_f1))
if val_f1 > best_val_f1:
best_val_f1 = val_f1
torch.save(model.state_dict(), os.path.join(args.save_dir, args.name))
if val_f1 < lastf1:
cnt += 1
if cnt == args.early_stopping:
print('Early stop.')
break
else:
cnt = 0
lastf1 = val_f1
model.eval()
test_loss = []
test_f1 = []
model.load_state_dict(torch.load(os.path.join(args.save_dir, args.name)))
with torch.no_grad():
for batch in test_set:
g = batch.graph
g = g.to(device)
logits = model.forward(g, '_N')
labels = batch.label
loss = criterion(logits, labels)
f1 = evaluate_f1_score(logits.detach().cpu().numpy(), labels.detach().cpu().numpy())
test_loss.append(loss.item())
test_f1.append(f1)
test_loss = np.mean(test_loss)
test_f1 = np.mean(test_f1)
print("Test F1: {:.4f} | Test loss: {:.4f}".format(test_f1, test_loss))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='GNN-FiLM')
parser.add_argument("--dataset", type=str, default="PPI", help="DGL dataset for this GNN-FiLM")
parser.add_argument("--gpu", type=int, default=-1, help="GPU Index. Default: -1, using CPU.")
parser.add_argument("--in_size", type=int, default=50, help="Input dimensionalities")
parser.add_argument("--hidden_size", type=int, default=320, help="Hidden layer dimensionalities")
parser.add_argument("--out_size", type=int, default=121, help="Output dimensionalities")
parser.add_argument("--num_layers", type=int, default=4, help="Number of GNN layers")
parser.add_argument("--batch_size", type=int, default=5, help="Batch size")
parser.add_argument("--max_epoch", type=int, default=1500, help="The max number of epoches. Default: 500")
parser.add_argument("--early_stopping", type=int, default=80, help="Early stopping. Default: 50")
parser.add_argument("--lr", type=float, default=0.001, help="Learning rate. Default: 3e-1")
parser.add_argument("--wd", type=float, default=0.0009, help="Weight decay. Default: 3e-1")
parser.add_argument('--step-size', type=int, default=40, help='Period of learning rate decay.')
parser.add_argument('--gamma', type=float, default=0.8, help='Multiplicative factor of learning rate decay.')
parser.add_argument("--dropout", type=float, default=0.1, help="Dropout rate. Default: 0.9")
parser.add_argument('--save_dir', type=str, default='./out', help='Path to save the model.')
parser.add_argument("--name", type=str, default='GNN-FiLM', help="Saved model name.")
args = parser.parse_args()
print(args)
if not os.path.exists(args.save_dir):
os.mkdir(args.save_dir)
main(args)
|
model/components/DenseNet.py | lianxiaolei/LaTeX_OCR | 290 | 11181311 | <reponame>lianxiaolei/LaTeX_OCR
from torchvision.models import densenet169 # import *=all the models from torchvision
DenseNet169 = densenet169
|
Dragon/python/dragon/vm/tensorflow/layers/convolutional.py | neopenx/Dragon | 212 | 11181326 | <filename>Dragon/python/dragon/vm/tensorflow/layers/convolutional.py
# --------------------------------------------------------
# TensorFlow @ Dragon
# Copyright(c) 2017 SeetaTech
# Written by <NAME>
# --------------------------------------------------------
from dragon.vm.tensorflow.framework import tensor_shape
from dragon.vm.tensorflow.layers import base, utils
from dragon.vm.tensorflow.ops import init_ops
from dragon.vm.tensorflow.ops import nn
class _Conv(base.Layer):
def __init__(self, rank,
filters,
kernel_size,
strides=1,
padding='valid',
data_format='channels_last',
dilation_rate=1,
activation=None,
use_bias=True,
kernel_initializer=None,
bias_initializer=init_ops.zeros_initializer(),
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
trainable=True,
name=None,
**kwargs):
super(_Conv, self).__init__(trainable=trainable, name=name, **kwargs)
self.rank = rank
self.filters = filters
self.kernel_size = utils.normalize_tuple(kernel_size, rank, 'kernel_size')
self.strides = utils.normalize_tuple(strides, rank, 'strides')
self.padding = utils.normalize_padding(padding)
self.data_format = utils.normalize_data_format(data_format)
self.dilation_rate = utils.normalize_tuple(dilation_rate, rank, 'dilation_rate')
self.activation = activation
self.use_bias = use_bias
self.kernel_initializer = kernel_initializer
self.bias_initializer = bias_initializer
self.kernel_regularizer = kernel_regularizer
self.bias_regularizer = bias_regularizer
self.activity_regularizer = activity_regularizer
self.input_spec = base.InputSpec(ndim=self.rank + 2)
def build(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape)
if self.data_format == 'channels_first':
channel_axis = 1
else:
channel_axis = -1
if input_shape[channel_axis].value is None:
raise ValueError('The channel dimension of the inputs '
'should be defined. Found `None`.')
input_dim = input_shape[channel_axis].value
if self.data_format == 'channels_first':
# For channels first: (n_out, n_in, k_h, k_w)
kernel_shape = (self.filters, input_dim) + self.kernel_size
else:
# For channels last: (k_h, k_w, n_in, n_out)
kernel_shape = self.kernel_size + (input_dim, self.filters)
self.kernel = self.add_variable(name='kernel',
shape=kernel_shape,
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
trainable=True,
dtype=self.dtype)
if self.use_bias:
self.bias = self.add_variable(name='bias',
shape=(self.filters,),
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
trainable=True,
dtype=self.dtype)
else:
self.bias = None
self.input_spec = base.InputSpec(ndim=self.rank + 2,
axes={channel_axis: input_dim})
self.built = True
def call(self, inputs):
tf_data_format = \
utils.convert_data_format(self.data_format, self.rank + 2)
outputs = nn.convolution(
input=inputs,
filter=self.kernel,
dilation_rate=self.dilation_rate,
strides=self.strides,
padding=self.padding.upper(),
data_format=tf_data_format)
if self.bias is not None:
outputs = nn.bias_add(outputs, self.bias, data_format=tf_data_format)
if self.activation is not None:
return self.activation(outputs)
return outputs
class Conv2D(_Conv):
def __init__(self, filters,
kernel_size,
strides=(1, 1),
padding='valid',
data_format='channels_last',
dilation_rate=(1, 1),
activation=None,
use_bias=True,
kernel_initializer=None,
bias_initializer=init_ops.zeros_initializer(),
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
trainable=True,
name=None,
**kwargs):
super(Conv2D, self).__init__(
rank=2,
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
trainable=trainable,
name=name, **kwargs)
def conv2d(inputs,
filters,
kernel_size,
strides=(1, 1),
padding='valid',
data_format='channels_last',
dilation_rate=(1, 1),
activation=None,
use_bias=True,
kernel_initializer=None,
bias_initializer=init_ops.zeros_initializer(),
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
trainable=True,
name=None,
reuse=None):
layer = Conv2D(
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
trainable=trainable,
name=name,
_reuse=reuse,
_scope=name)
return layer.apply(inputs)
|
FinMind/crawler/government_bonds.py | vishalbelsare/FinMind | 1,106 | 11181333 | <reponame>vishalbelsare/FinMind
"""
政府債券
G8-俄羅斯、美國、加拿大、英國、法國、德國、義大利及日本
"""
import datetime
import os
import re
import sys
import pandas as pd
import requests
from lxml import etree
from FinMind.crawler.base import BaseCrawler, USER_AGENT
PATH = "/".join(os.path.abspath(__file__).split("/")[:-2])
sys.path.append(PATH)
class GovernmentBondsCrawler(BaseCrawler):
@staticmethod
def create_loop_list():
def get_data_id_name(url):
headers = {
"Accept": "*/*",
"Accept-Encoding": "gzip, deflate, br",
"Accept-Language": "zh-TW,zh;q=0.9,en-US;q=0.8,en;q=0.7",
"Connection": "keep-alive",
"Host": "www.investing.com",
"Referer": url,
"User-Agent": USER_AGENT,
"X-Requested-With": "XMLHttpRequest",
}
res = requests.get(url, verify=True, headers=headers)
tem_data_id = re.findall('data-id="[0-9]+"', res.text)
tem_data_id = [di.replace("data-id=", "") for di in tem_data_id]
page = etree.HTML(res.text)
_data_id = []
_data_name = []
for di in tem_data_id:
tem = page.xpath("//span[@data-id={}]".format(di))
if len(tem) > 0:
_data_id.append(tem[0].attrib["data-id"])
_data_name.append(tem[0].attrib["data-name"])
return _data_id, _data_name
def get_country_url():
index_url = "https://www.investing.com/rates-bonds/"
headers = {
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
"Accept-Encoding": "gzip, deflate, br",
"Accept-Language": "zh-TW,zh;q=0.9,en-US;q=0.8,en;q=0.7",
"Cache-Control": "max-age=0",
"Connection": "keep-alive",
"Host": "www.investing.com",
"Upgrade-Insecure-Requests": "1",
"User-Agent": USER_AGENT,
}
res = requests.get(index_url, verify=True, headers=headers)
data_country_id = re.findall('data-country-id="[0-9]+"', res.text)
data_country_id = [
dci.replace("data-country-id=", "") for dci in data_country_id
]
page = etree.HTML(res.text)
tem = []
for dci in data_country_id:
tem.append(
page.xpath("//option[@data-country-id={}]".format(dci))[0]
)
url = [
"https://www.investing.com" + te.attrib["value"] for te in tem
]
# G8 and china
select = [
"canada",
"china",
"france",
"germany",
"japan",
"russia",
"uk",
"usa",
"italy",
]
countries_url = []
for url_index in range(len(url)):
tem = url[url_index].replace(
"https://www.investing.com/rates-bonds/", ""
)
tem = tem.replace("-government-bonds", "")
if tem in select:
countries_url.append(url[url_index])
return countries_url
# main
country_url = get_country_url()
loop_list = []
for curl in country_url: # curl = country_url[0]
print(curl)
data_id, data_name = get_data_id_name(curl)
for i in range(len(data_id)):
loop_list.append([data_id[i], data_name[i]])
return loop_list
@staticmethod
def get_end_date():
end_date = datetime.datetime.now().date()
end_date = end_date + datetime.timedelta(-1)
y = str(end_date.year)
m = (
str(end_date.month)
if end_date.month > 9
else "0" + str(end_date.month)
)
d = str(end_date.day) if end_date.day > 9 else "0" + str(end_date.day)
return "{}/{}/{}".format(m, d, y)
def crawler(self, loop): # loop = ['23681', 'Germany 3 Month']
def get_value(template):
date = int(template[0].attrib["data-real-value"])
date = int(date / 60 / 60 / 24)
date = str(
datetime.date(1970, 1, 1) + datetime.timedelta(days=date)
)
v = [
float(template[template_index].text)
for template_index in range(1, 5)
if template[template_index].text is not None
]
if len(v) == 0:
return pd.DataFrame()
_price, _open, _high, _low = v
change = (
float(template[5].text.replace("%", "").replace(",", "")) / 100
)
return pd.DataFrame([date, _price, _open, _high, _low, change]).T
cid, data_name = loop
header = data_name + " Bond Yield Historical data"
st_date, end_date = (
"01/01/1970",
self.get_end_date(),
)
bonds_url = "https://www.investing.com/instruments/HistoricalDataAjax"
form_data = {
"curr_id": cid,
"header": header,
"st_date": st_date,
"end_date": end_date,
"interval_sec": "Daily",
"sort_col": "date",
"sort_ord": "DESC",
"action": "historical_data",
}
headers = {
"Accept": "text/plain, */*; q=0.01",
"Accept-Encoding": "gzip, deflate, br",
"Accept-Language": "zh-TW,zh;q=0.9,en-US;q=0.8,en;q=0.7",
"Connection": "keep-alive",
"Content-Length": "192",
"Content-Type": "application/x-www-form-urlencoded",
"Host": "www.investing.com",
"Origin": "https://www.investing.com",
"Referer": "https://www.investing.com/rates-bonds/france-1-month-bond-yield-historical-data",
"User-Agent": USER_AGENT,
"X-Requested-With": "XMLHttpRequest",
}
print("requests post")
res = requests.post(
bonds_url, verify=True, headers=headers, data=form_data
)
print("data clean")
page = etree.HTML(res.text)
tr_path = page.xpath("//tr")
col_name = [col.text for col in tr_path[0].xpath("//th")]
col_name = [c.replace(" %", "Percent") for c in col_name]
col_name = ["date" if c == "Date" else c for c in col_name]
data = pd.DataFrame()
td_path = page.xpath("//tr//td")
for i in range(0, len(td_path) - 6, 6):
tem = td_path[i : i + 6]
value = get_value(tem)
if len(value) > 0:
data = data.append(value)
if len(data) > 0:
data.columns = col_name
data["name"] = "{}".format(data_name)
# data['data_id'] = cid
data = data.sort_values("date")
data.index = range(len(data))
return data
|
insights/tests/core/test_marshalling.py | mglantz/insights-core | 121 | 11181372 | import pytest
from insights.core import marshalling
def mar_unmar(o, use_value_list=False):
marshalled = marshalling.marshal(o, use_value_list)
unmarshalled = marshalling.unmarshal(marshalled)
return marshalled, unmarshalled
def test_string_marshal():
flag = "TEST_FLAG"
_, unmarshalled = mar_unmar(flag)
assert unmarshalled == {flag: True}
def test_dict_marshal():
doc = {"foo": "bar"}
_, unmarshalled = mar_unmar(doc)
assert unmarshalled == doc
def test_bad_returns():
with pytest.raises(TypeError):
marshalling.marshal(True)
with pytest.raises(TypeError):
marshalling.marshal(1)
with pytest.raises(TypeError):
marshalling.marshal(1.0)
with pytest.raises(TypeError):
marshalling.marshal([])
with pytest.raises(TypeError):
marshalling.marshal(())
with pytest.raises(TypeError):
marshalling.marshal(set())
def test_none_marshal():
ma, um = mar_unmar(None)
assert um is None
def test_value_list():
ma, um = mar_unmar("test", use_value_list=True)
assert um == {"test": [True]}
|
niftynet/io/__init__.py | tdml13/NiftyNet | 1,403 | 11181409 | """
.. module:: niftynet.io
:synopsis: High-level input / output operations.
"""
|
uuv_control/uuv_control_cascaded_pids/scripts/VelocityControl.py | hust-arms/uuv_simulator-armsauv | 470 | 11181411 | <reponame>hust-arms/uuv_simulator-armsauv<gh_stars>100-1000
#!/usr/bin/env python
# Copyright (c) 2016 The UUV Simulator Authors.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy
import rospy
from dynamic_reconfigure.server import Server
import geometry_msgs.msg as geometry_msgs
from nav_msgs.msg import Odometry
import tf.transformations as trans
from rospy.numpy_msg import numpy_msg
# Modules included in this package
from PID import PIDRegulator
from uuv_control_cascaded_pid.cfg import VelocityControlConfig
class VelocityControllerNode:
def __init__(self):
print('VelocityControllerNode: initializing node')
self.config = {}
self.v_linear_des = numpy.zeros(3)
self.v_angular_des = numpy.zeros(3)
# Initialize pids with default parameters
self.pid_angular = PIDRegulator(1, 0, 0, 1)
self.pid_linear = PIDRegulator(1, 0, 0, 1)
# ROS infrastructure
self.sub_cmd_vel = rospy.Subscriber('cmd_vel', numpy_msg(geometry_msgs.Twist), self.cmd_vel_callback)
self.sub_odometry = rospy.Subscriber('odom', numpy_msg(Odometry), self.odometry_callback)
self.pub_cmd_accel = rospy.Publisher('cmd_accel', geometry_msgs.Accel, queue_size=10)
self.srv_reconfigure = Server(VelocityControlConfig, self.config_callback)
def cmd_vel_callback(self, msg):
"""Handle updated set velocity callback."""
# Just store the desired velocity. The actual control runs on odometry callbacks
v_l = msg.linear
v_a = msg.angular
self.v_linear_des = numpy.array([v_l.x, v_l.y, v_l.z])
self.v_angular_des = numpy.array([v_a.x, v_a.y, v_a.z])
def odometry_callback(self, msg):
"""Handle updated measured velocity callback."""
if not bool(self.config):
return
linear = msg.twist.twist.linear
angular = msg.twist.twist.angular
v_linear = numpy.array([linear.x, linear.y, linear.z])
v_angular = numpy.array([angular.x, angular.y, angular.z])
if self.config['odom_vel_in_world']:
# This is a temp. workaround for gazebo's pos3d plugin not behaving properly:
# Twist should be provided wrt child_frame, gazebo provides it wrt world frame
# see http://docs.ros.org/api/nav_msgs/html/msg/Odometry.html
xyzw_array = lambda o: numpy.array([o.x, o.y, o.z, o.w])
q_wb = xyzw_array(msg.pose.pose.orientation)
R_bw = trans.quaternion_matrix(q_wb)[0:3, 0:3].transpose()
v_linear = R_bw.dot(v_linear)
v_angular = R_bw.dot(v_angular)
# Compute compute control output:
t = msg.header.stamp.to_sec()
e_v_linear = (self.v_linear_des - v_linear)
e_v_angular = (self.v_angular_des - v_angular)
a_linear = self.pid_linear.regulate(e_v_linear, t)
a_angular = self.pid_angular.regulate(e_v_angular, t)
# Convert and publish accel. command:
cmd_accel = geometry_msgs.Accel()
cmd_accel.linear = geometry_msgs.Vector3(*a_linear)
cmd_accel.angular = geometry_msgs.Vector3(*a_angular)
self.pub_cmd_accel.publish(cmd_accel)
def config_callback(self, config, level):
"""Handle updated configuration values."""
# config has changed, reset PID controllers
self.pid_linear = PIDRegulator(config['linear_p'], config['linear_i'], config['linear_d'], config['linear_sat'])
self.pid_angular = PIDRegulator(config['angular_p'], config['angular_i'], config['angular_d'], config['angular_sat'])
self.config = config
return config
if __name__ == '__main__':
print('starting VelocityControl.py')
rospy.init_node('velocity_control')
try:
node = VelocityControllerNode()
rospy.spin()
except rospy.ROSInterruptException:
print('caught exception')
print('exiting')
|
atest/testdata/cli/dryrun/vars.py | phil-davis/robotframework | 7,073 | 11181423 | RESOURCE_PATH_FROM_VARS = 'resource.robot'
|
mediasoup-client/deps/webrtc/src/build/config/fuchsia/build_symbol_archive.py | skgwazap/mediasoup-client-android | 128 | 11181514 | #!/usr/bin/env python
#
# Copyright 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Creates a compressed archive of binary symbols derived from the unstripped
executables and libraries cataloged by "ids.txt"."""
import argparse
import os
import subprocess
import sys
import tarfile
import tempfile
def main(args):
parser = argparse.ArgumentParser()
parser.add_argument('ids_txt', type=str, nargs=1,
help='Path to the ids.txt file.')
parser.add_argument('output_tarball', nargs=1, type=str,
help='Path which the tarball will be written to.')
parser.add_argument('--eu-strip', required=True, type=str,
help='Path to the the eu-strip tool.')
args = parser.parse_args(args)
stripped_tempfile = tempfile.NamedTemporaryFile()
ids_txt = args.ids_txt[0]
build_ids_archive = tarfile.open(args.output_tarball[0], 'w:bz2')
for line in open(ids_txt, 'r'):
# debug_tempfile: The path which debug symbols will be written to.
# stripped_tempfile: The path which the stripped executable will be written
# to. This file is ignored and immediately deleted.
with tempfile.NamedTemporaryFile() as debug_tempfile, \
tempfile.NamedTemporaryFile() as stripped_tempfile:
build_id, binary_path = line.strip().split(' ')
binary_abspath = os.path.abspath(
os.path.join(os.path.dirname(ids_txt), binary_path))
# Extract debugging symbols from the binary into their own file.
# The stripped executable binary is written to |debug_tempfile| and
# deleted. Writing to /dev/null would be preferable, but eu-strip
# disallows writing output to /dev/null.
subprocess.check_call([args.eu_strip, '-g', binary_abspath,
'-f', debug_tempfile.name,
'-o', stripped_tempfile.name])
# An empty result means that the source binary (most likely a prebuilt)
# didn't have debugging data to begin with.
if os.path.getsize(debug_tempfile.name) == 0:
continue
# Archive the debugging symbols, placing them in a hierarchy keyed to the
# GNU build ID. The symbols reside in directories whose names are the
# first two characters of the build ID, with the symbol files themselves
# named after the remaining characters of the build ID. So, a symbol file
# with the build ID "deadbeef" would be located at the path 'de/adbeef'.
build_ids_archive.add(debug_tempfile.name,
'%s/%s' % (build_id[:2], build_id[2:]))
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
tests/test_packages/test_skills/test_simple_oracle_client/test_behaviours.py | bryanchriswhite/agents-aea | 126 | 11181525 | # -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#
# Copyright 2018-2019 Fetch.AI Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
"""This module contains the tests of the behaviour classes of the simple oracle client skill."""
import logging
from pathlib import Path
from typing import cast
from unittest.mock import patch
from aea.test_tools.test_skill import BaseSkillTestCase
from packages.fetchai.contracts.fet_erc20.contract import PUBLIC_ID as ERC20_PUBLIC_ID
from packages.fetchai.contracts.oracle_client.contract import (
PUBLIC_ID as CLIENT_CONTRACT_PUBLIC_ID,
)
from packages.fetchai.protocols.contract_api.message import ContractApiMessage
from packages.fetchai.skills.simple_oracle_client.behaviours import (
SimpleOracleClientBehaviour,
)
from packages.fetchai.skills.simple_oracle_client.strategy import Strategy
from tests.conftest import ROOT_DIR
DEFAULT_ADDRESS = "0x0000000000000000000000000000000000000000"
ETHEREUM_LEDGER_ID = "ethereum"
FETCHAI_LEDGER_ID = "fetchai"
class TestSkillBehaviour(BaseSkillTestCase):
"""Test behaviours of simple oracle client."""
path_to_skill = Path(
ROOT_DIR, "packages", "fetchai", "skills", "simple_oracle_client"
)
@classmethod
def setup(cls, **kwargs):
"""Setup the test class."""
super().setup()
cls.simple_oracle_client_behaviour = cast(
SimpleOracleClientBehaviour,
cls._skill.skill_context.behaviours.simple_oracle_client_behaviour,
)
def test_setup(self):
"""Test the setup method of the simple_oracle_client behaviour."""
strategy = cast(Strategy, self.simple_oracle_client_behaviour.context.strategy)
strategy.oracle_contract_address = DEFAULT_ADDRESS
strategy.erc20_address = DEFAULT_ADDRESS
strategy.is_oracle_contract_set = True
strategy._ledger_id = ETHEREUM_LEDGER_ID
self.simple_oracle_client_behaviour.setup()
self.assert_quantity_in_outbox(1)
msg = cast(ContractApiMessage, self.get_message_from_outbox())
has_attributes, error_str = self.message_has_attributes(
actual_message=msg,
message_type=ContractApiMessage,
performative=ContractApiMessage.Performative.GET_DEPLOY_TRANSACTION,
contract_id=str(CLIENT_CONTRACT_PUBLIC_ID),
callable="get_deploy_transaction",
)
assert has_attributes, error_str
def test_setup_with_contract_set(self):
"""Test the setup method of the simple_oracle_client behaviour for existing contract."""
strategy = cast(Strategy, self.simple_oracle_client_behaviour.context.strategy)
strategy.client_contract_address = DEFAULT_ADDRESS
strategy.oracle_contract_address = DEFAULT_ADDRESS
strategy.erc20_address = DEFAULT_ADDRESS
strategy.is_client_contract_deployed = True
strategy.is_oracle_contract_set = True
strategy._ledger_id = ETHEREUM_LEDGER_ID
with patch.object(
self.simple_oracle_client_behaviour.context.logger, "log"
) as mock_logger:
self.simple_oracle_client_behaviour.setup()
mock_logger.assert_any_call(
logging.INFO, "Fetch oracle client contract address already added",
)
self.assert_quantity_in_outbox(0)
def test_act_pre_deploy(self):
"""Test the act method of the simple_oracle_client behaviour before contract is deployed."""
with patch.object(
self.simple_oracle_client_behaviour.context.logger, "log"
) as mock_logger:
self.simple_oracle_client_behaviour.act()
mock_logger.assert_any_call(
logging.INFO, "Oracle client contract not yet deployed",
)
self.assert_quantity_in_outbox(0)
def test_act_approve_transactions(self):
"""Test the act method of the simple_oracle_client behaviour before transactions are approved."""
strategy = cast(Strategy, self.simple_oracle_client_behaviour.context.strategy)
strategy.client_contract_address = DEFAULT_ADDRESS
strategy.oracle_contract_address = DEFAULT_ADDRESS
strategy.erc20_address = DEFAULT_ADDRESS
strategy.is_client_contract_deployed = True
strategy.is_oracle_contract_set = True
strategy._ledger_id = ETHEREUM_LEDGER_ID
self.simple_oracle_client_behaviour.act()
self.assert_quantity_in_outbox(1)
msg = cast(ContractApiMessage, self.get_message_from_outbox())
has_attributes, error_str = self.message_has_attributes(
actual_message=msg,
message_type=ContractApiMessage,
performative=ContractApiMessage.Performative.GET_RAW_TRANSACTION,
contract_id=str(ERC20_PUBLIC_ID),
contract_address=strategy.client_contract_address,
callable="get_approve_transaction",
)
assert has_attributes, error_str
def test_act_query(self):
"""Test the act method of the simple_oracle_client behaviour for normal querying."""
strategy = cast(Strategy, self.simple_oracle_client_behaviour.context.strategy)
strategy.client_contract_address = DEFAULT_ADDRESS
strategy.oracle_contract_address = DEFAULT_ADDRESS
strategy.erc20_address = DEFAULT_ADDRESS
strategy.is_client_contract_deployed = True
strategy.is_oracle_transaction_approved = True
strategy.is_oracle_contract_set = True
strategy._ledger_id = ETHEREUM_LEDGER_ID
self.simple_oracle_client_behaviour.act()
self.assert_quantity_in_outbox(1)
assert strategy.is_oracle_contract_set
msg = cast(ContractApiMessage, self.get_message_from_outbox())
has_attributes, error_str = self.message_has_attributes(
actual_message=msg,
message_type=ContractApiMessage,
performative=ContractApiMessage.Performative.GET_RAW_TRANSACTION,
contract_id=str(CLIENT_CONTRACT_PUBLIC_ID),
contract_address=strategy.client_contract_address,
callable="get_query_transaction",
)
assert has_attributes, error_str
def test__request_contract_deploy_transaction(self):
"""Test that the _request_contract_deploy_transaction function sends the right message to the contract_api for ethereum ledger."""
strategy = cast(Strategy, self.simple_oracle_client_behaviour.context.strategy)
strategy.oracle_contract_address = "some_address"
strategy._ledger_id = ETHEREUM_LEDGER_ID
self.simple_oracle_client_behaviour._request_contract_deploy_transaction()
self.assert_quantity_in_outbox(1)
kwargs = strategy.get_deploy_kwargs()
assert "fetchOracleContractAddress" in kwargs.body
msg = cast(ContractApiMessage, self.get_message_from_outbox())
has_attributes, error_str = self.message_has_attributes(
actual_message=msg,
message_type=ContractApiMessage,
performative=ContractApiMessage.Performative.GET_DEPLOY_TRANSACTION,
contract_id=str(CLIENT_CONTRACT_PUBLIC_ID),
callable="get_deploy_transaction",
kwargs=kwargs,
)
assert has_attributes, error_str
def test__request_contract_store_transaction(self):
"""Test that the _request_contract_deploy_transaction function sends the right message to the contract_api for fetchai ledger."""
strategy = cast(Strategy, self.simple_oracle_client_behaviour.context.strategy)
strategy.oracle_contract_address = "some_address"
strategy._ledger_id = FETCHAI_LEDGER_ID
self.simple_oracle_client_behaviour._request_contract_deploy_transaction()
self.assert_quantity_in_outbox(1)
kwargs = strategy.get_deploy_kwargs()
assert "fetchOracleContractAddress" not in kwargs.body
msg = cast(ContractApiMessage, self.get_message_from_outbox())
has_attributes, error_str = self.message_has_attributes(
actual_message=msg,
message_type=ContractApiMessage,
performative=ContractApiMessage.Performative.GET_DEPLOY_TRANSACTION,
contract_id=str(CLIENT_CONTRACT_PUBLIC_ID),
callable="get_deploy_transaction",
kwargs=kwargs,
)
assert has_attributes, error_str
def test_teardown(self):
"""Test that the teardown method of the simple_oracle_client behaviour leaves no messages in the outbox."""
assert self.simple_oracle_client_behaviour.teardown() is None
self.assert_quantity_in_outbox(0)
|
transformer/__init__.py | richarai9/FastSpeech2 | 753 | 11181559 | from .Models import Encoder, Decoder
from .Layers import PostNet |
data/transforms/arguement.py | donnyyou/centerX | 350 | 11181575 | import cv2
import numpy as np
import random
import torch
import imgaug as ia
import imgaug.augmenters as iaa
import copy
# points = [
# [(10.5, 20.5)], # points on first image
# [(50.5, 50.5), (60.5, 60.5), (70.5, 70.5)] # points on second image
# ]
# image = cv2.imread('000000472375.jpg')
# inp_bbox = [np.array([124.71,196.18,124.71+372.85,196.18+356.81])]
'''
points = np.array([[ 80.90703725, 126.08039874, 0. ],
[ 72.72988313, 127.2840341, 0. ],
[ 86.29191076, 160.56158147, 0. ],
[ 80.87585772, 159.50228059, 0. ],
[ 81.09376061, 190.41214379, 0. ],
[ 77.63778624, 192.15852308, 0. ],
[ 84.55893103, 190.83034651, 0. ],
[ 88.24699688, 192.76283703, 0. ],
[ 70.1611101, 235.95892525, 0. ],
[106.62995965, 239.87347792, 0. ],
[ 66.48005009, 286.62669707, 0. ],
[128.05848894, 280.34743948, 0. ]])
image = cv2.imread('demo.jpg')
def show(image,points):
for i in points:
cv2.circle(image,(int(i[0]), int(i[1])), 5, (0,255,0), -1)
return image
'''
# def arguementation(image, dataset_dict, p=0.5):
def arguementation(image, p=0.5):
if random.random() > p:
return image # ,dataset_dict
# H,W,C = image.shape
# inp_bbox = [anno['bbox'] for anno in dataset_dict['annotations']]
# ia_bbox = []
# for bbox in inp_bbox:
# tmp_bbox = ia.BoundingBox(x1=bbox[0], y1=bbox[1], x2=bbox[2], y2=bbox[3])
# ia_bbox.append(tmp_bbox)
# ia_bbox = [ia_bbox]
images = np.array([image]).astype(np.uint8)
# image = random_flip(image)
# image = random_scale(image)
# image = random_angle_rotate(image)
# Sometimes(0.5, ...) applies the given augmenter in 50% of all cases,
# e.g. Sometimes(0.5, GaussianBlur(0.3)) would blur roughly every second image.
sometimes = lambda aug: iaa.Sometimes(0.5, aug)
# Define our sequence of augmentation steps that will be applied to every image
# All augmenters with per_channel=0.5 will sample one value _per image_
# in 50% of all cases. In all other cases they will sample new values
# _per channel_.
seq = iaa.Sequential(
[
# apply the following augmenters to most images
# iaa.Fliplr(0.5), # horizontally flip 50% of all images
# iaa.Flipud(0.2), # vertically flip 20% of all images
# crop images by -5% to 10% of their height/width
# iaa.CropAndPad(
# percent=(-0.3, 0.3),
# pad_mode='constant',
# pad_cval=(0, 0)
# ),
# iaa.Affine(
# scale={"x": (0.6, 1.4), "y": (0.6, 1.4)},
# # scale images to 80-120% of their size, individually per axis
# translate_percent={"x": (-0.2, 0.2), "y": (-0.2, 0.2)}, # translate by -20 to +20 percent (per axis)
# fit_output=False, # True
# order=[0, 1], # use nearest neighbour or bilinear interpolation (fast)
# cval=(0, 0), # if mode is constant, use a cval between 0 and 255
# mode='constant' # use any of scikit-image's warping modes (see 2nd image from the top for examples)
# ),
# execute 0 to 5 of the following (less important) augmenters per image
# don't execute all of them, as that would often be way too strong
iaa.SomeOf((0, 5),
[
sometimes(iaa.Superpixels(p_replace=(0, 0.1), n_segments=(200, 300))),
# convert images into their superpixel representation
iaa.OneOf([
iaa.GaussianBlur((0, 3.0)), # blur images with a sigma between 0 and 3.0
iaa.AverageBlur(k=(2, 4)),
# blur image using local means with kernel sizes between 2 and 7
iaa.MedianBlur(k=(1, 5)),
# blur image using local medians with kernel sizes between 2 and 7
]),
iaa.Sharpen(alpha=(0, 0.75), lightness=(0.1, 1.9)), # sharpen images
iaa.Emboss(alpha=(0, 0.75), strength=(0, 1.0)), # emboss images
# search either for all edges or for directed edges,
# blend the result with the original image using a blobby mask
# iaa.SimplexNoiseAlpha(iaa.OneOf([
# iaa.EdgeDetect(alpha=(0, 0.25)),
# iaa.DirectedEdgeDetect(alpha=(0.5, 1.0), direction=(0.0, 1.0)),
# ])),
# iaa.AdditiveGaussianNoise(loc=0, scale=(0.0, 0.05*255), per_channel=0.5), # add gaussian noise to images
# iaa.OneOf([
# iaa.Dropout((0.01, 0.1), per_channel=0.5), # randomly remove up to 10% of the pixels
# #iaa.CoarseDropout((0.03, 0.15), size_percent=(0.02, 0.05), per_channel=0.2),
# ]),
# iaa.Invert(0.05, per_channel=True), # invert color channels
iaa.Add((-20, 20), per_channel=0.5),
# change brightness of images (by -10 to 10 of original value)
# iaa.AddToHueAndSaturation((-20, 20)), # change hue and saturation
# either change the brightness of the whole image (sometimes
# per channel) or change the brightness of subareas
iaa.OneOf([
iaa.Multiply((0.5, 1.5), per_channel=0.5),
# iaa.FrequencyNoiseAlpha(
# exponent=(-4, 0),
# first=iaa.Multiply((0.5, 1.5), per_channel=True),
# second=iaa.LinearContrast((0.5, 2.0))
# )
]),
iaa.LinearContrast((0.75, 1.5), per_channel=0.5), # improve or worsen the contrast
iaa.Grayscale(alpha=(0.0, 0.3)),
# sometimes(iaa.ElasticTransformation(alpha=(0.5, 1.5), sigma=0.25)), # move pixels locally around (with random strengths)
# sometimes(iaa.PiecewiseAffine(scale=(0.01, 0.03))), # sometimes move parts of the image around
sometimes(iaa.PerspectiveTransform(scale=(0.01, 0.1)))
],
random_order=True
)
],
random_order=True
)
# images_aug, bbox_aug = seq(images=images, bounding_boxes=ia_bbox)
images_aug = seq(images=images)
# for k, bbox in enumerate(bbox_aug[0]):
# dataset_dict['annotations'][k]['bbox'][0] = max(min(bbox.x1,W-1),0)
# dataset_dict['annotations'][k]['bbox'][1] = max(min(bbox.y1,H-1),0)
# dataset_dict['annotations'][k]['bbox'][2] = max(min(bbox.x2,W-1),0)
# dataset_dict['annotations'][k]['bbox'][3] = max(min(bbox.y2,H-1),0)
# image = show(image,keypoints)
# cv2.imwrite('source.jpg',image)
# for k,i in enumerate(points_aug[0]):
# keypoints[k,0] = i[0]
# keypoints[k,1] = i[1]
# image_a = show(images_aug[0],keypoints)
# cv2.imwrite('result.jpg',image_a)
# images_aug_tensor_list = [torch.from_tensor(image).type(_dtype) for image in images_aug]
return images_aug[0] # , dataset_dict
# rst_image,bbox = arguementation(image,inp_bbox)
# cv2.rectangle(rst_image,(int(bbox[0][0]),int(bbox[0][1])),(int(bbox[0][2]),int(bbox[0][3])),(0,255,0),2)
# cv2.imwrite('demo.jpg',rst_image)
# print(image.shape,rst_image.shape)
|
src/anyconfig/processors/utils.py | ssato/python-anyconfig | 213 | 11181591 | <reponame>ssato/python-anyconfig
#
# Copyright (C) 2018 - 2021 <NAME> <<EMAIL>>
# SPDX-License-Identifier: MIT
#
# pylint: disable=unidiomatic-typecheck
r"""Utility functions for anyconfig.processors.
"""
import operator
import typing
import warnings
import pkg_resources
from .. import common, ioinfo, models, utils
from .datatypes import (
ProcT, ProcsT, ProcClsT, MaybeProcT
)
def sort_by_prio(prs: typing.Iterable[ProcT]) -> ProcsT:
"""
Sort an iterable of processor classes by each priority.
:param prs: A list of :class:`anyconfig.models.processor.Processor` classes
:return: Sambe as above but sorted by priority
"""
return sorted(prs, key=operator.methodcaller('priority'), reverse=True)
def select_by_key(items: typing.Iterable[
typing.Tuple[typing.List[str], typing.Any]
],
sort_fn: typing.Callable[..., typing.Any] = sorted
) -> typing.List[typing.Tuple[str, typing.List[typing.Any]]]:
"""
:param items: A list of tuples of keys and values, [([key], val)]
:return: A list of tuples of key and values, [(key, [val])]
>>> select_by_key([(['a', 'aaa'], 1), (['b', 'bb'], 2), (['a'], 3)])
[('a', [1, 3]), ('aaa', [1]), ('b', [2]), ('bb', [2])]
"""
itr = utils.concat(((k, v) for k in ks) for ks, v in items)
return list((k, sort_fn(t[1] for t in g))
for k, g in utils.groupby(itr, operator.itemgetter(0)))
def list_by_x(prs: typing.Iterable[ProcT], key: str
) -> typing.List[typing.Tuple[str, ProcsT]]:
"""
:param key: Grouping key, 'type' or 'extensions'
:return:
A list of :class:`Processor` or its children classes grouped by
given 'item', [(cid, [:class:`Processor`)]] by default
"""
if key == 'type':
kfn = operator.methodcaller(key)
res = sorted(((k, sort_by_prio(g)) for k, g
in utils.groupby(prs, kfn)),
key=operator.itemgetter(0))
elif key == 'extensions':
res: typing.List[ # type: ignore
typing.Tuple[str, ProcsT]
] = select_by_key(((p.extensions(), p) for p in prs),
sort_fn=sort_by_prio)
else:
raise ValueError(
f"Argument 'key' must be 'type' or 'extensions' but it was '{key}'"
)
return res
def findall_with_pred(predicate: typing.Callable[..., bool],
prs: ProcsT) -> ProcsT:
"""
:param predicate: any callable to filter results
:param prs: A list of :class:`anyconfig.models.processor.Processor` classes
:return: A list of appropriate processor classes or []
"""
return sorted((p for p in prs if predicate(p)),
key=operator.methodcaller('priority'), reverse=True)
def maybe_processor(type_or_id: typing.Union[ProcT, ProcClsT],
cls: ProcClsT = models.processor.Processor
) -> typing.Optional[ProcT]:
"""
:param type_or_id:
Type of the data to process or ID of the processor class or
:class:`anyconfig.models.processor.Processor` class object or its
instance
:param cls: A class object to compare with 'type_or_id'
:return: Processor instance or None
"""
if isinstance(type_or_id, cls):
return type_or_id
try:
if issubclass(typing.cast(ProcClsT, type_or_id), cls):
return type_or_id() # type: ignore
except TypeError:
pass
return None
def find_by_type_or_id(type_or_id: str, prs: ProcsT) -> ProcsT:
"""
:param type_or_id: Type of the data to process or ID of the processor class
:param prs: A list of :class:`anyconfig.models.processor.Processor` classes
:return:
A list of processor classes to process files of given data type or
processor 'type_or_id' found by its ID
:raises: anyconfig.common.UnknownProcessorTypeError
"""
def pred(pcls):
"""Predicate"""
return pcls.cid() == type_or_id or pcls.type() == type_or_id
pclss = findall_with_pred(pred, prs)
if not pclss:
raise common.UnknownProcessorTypeError(type_or_id)
return pclss
def find_by_fileext(fileext: str, prs: ProcsT) -> ProcsT:
"""
:param fileext: File extension
:param prs: A list of :class:`anyconfig.models.processor.Processor` classes
:return: A list of processor class to processor files with given extension
:raises: common.UnknownFileTypeError
"""
def pred(pcls):
"""Predicate"""
return fileext in pcls.extensions()
pclss = findall_with_pred(pred, prs)
if not pclss:
raise common.UnknownFileTypeError(f'file extension={fileext}')
return pclss # :: [Processor], never []
def find_by_maybe_file(obj: ioinfo.PathOrIOInfoT, prs: ProcsT) -> ProcsT:
"""
:param obj:
a file path, file or file-like object, pathlib.Path object or an
'anyconfig.ioinfo.IOInfo' (namedtuple) object
:param cps_by_ext: A list of processor classes
:return: A list of processor classes to process given (maybe) file
:raises: common.UnknownFileTypeError
"""
# :: [Processor], never []
return find_by_fileext(ioinfo.make(obj).extension, prs)
def findall(obj: typing.Optional[ioinfo.PathOrIOInfoT], prs: ProcsT,
forced_type: typing.Optional[str] = None,
) -> ProcsT:
"""
:param obj:
a file path, file, file-like object, pathlib.Path object or an
'anyconfig.ioinfo.IOInfo` (namedtuple) object
:param prs: A list of :class:`anyconfig.models.processor.Processor` classes
:param forced_type:
Forced processor type of the data to process or ID of the processor
class or None
:return: A list of instances of processor classes to process 'obj' data
:raises:
ValueError, common.UnknownProcessorTypeError,
common.UnknownFileTypeError
"""
if (obj is None or not obj) and forced_type is None:
raise ValueError(
"The first argument 'obj' or the second argument 'forced_type' "
"must be something other than None or False."
)
if forced_type is None:
pclss = find_by_maybe_file(typing.cast(ioinfo.PathOrIOInfoT, obj),
prs) # :: [Processor], never []
else:
pclss = find_by_type_or_id(forced_type, prs) # Do.
return pclss
def find(obj: typing.Optional[ioinfo.PathOrIOInfoT], prs: ProcsT,
forced_type: MaybeProcT = None,
) -> ProcT:
"""
:param obj:
a file path, file, file-like object, pathlib.Path object or an
'anyconfig.ioinfo.IOInfo' (namedtuple) object
:param prs: A list of :class:`anyconfig.models.processor.Processor` classes
:param forced_type:
Forced processor type of the data to process or ID of the processor
class or :class:`anyconfig.models.processor.Processor` class object or
its instance itself
:param cls: A class object to compare with 'forced_type' later
:return: an instance of processor class to process 'obj' data
:raises:
ValueError, common.UnknownProcessorTypeError,
common.UnknownFileTypeError
"""
if forced_type is not None and not isinstance(forced_type, str):
proc = maybe_processor(
typing.cast(typing.Union[ProcT, ProcClsT], forced_type)
)
if proc is None:
raise ValueError('Wrong processor class or instance '
f'was given: {forced_type!r}')
return proc
procs = findall(obj, prs, forced_type=typing.cast(str, forced_type))
return procs[0]
def load_plugins(pgroup: str) -> typing.Iterator[ProcClsT]:
"""
A generator function to yield a class object of
:class:`anyconfig.models.processor.Processor`.
:param pgroup: A string represents plugin type, e.g. anyconfig_backends
"""
for res in pkg_resources.iter_entry_points(pgroup):
try:
yield res.load()
except ImportError as exc:
warnings.warn(f'Failed to load plugin, exc={exc!s}')
# vim:sw=4:ts=4:et:
|
deepneuro/outputs/visualization.py | ysuter/DeepNeuro | 113 | 11181608 | import numpy as np
import matplotlib.pyplot as plt
from scipy import signal
from deepneuro.utilities.conversion import read_image_files
def create_mosaic(input_volume, output_filepath=None, label_volume=None, generate_outline=True, mask_value=0, step=1, dim=2, cols=8, label_buffer=5, rotate_90=3, flip=True):
"""This creates a mosaic of 2D images from a 3D Volume.
Parameters
----------
input_volume : TYPE
Any neuroimaging file with a filetype supported by qtim_tools, or existing numpy array.
output_filepath : None, optional
Where to save your output, in a filetype supported by matplotlib (e.g. .png). If
label_volume : None, optional
Whether to create your mosaic with an attached label filepath / numpy array. Will not perform volume transforms from header (yet)
generate_outline : bool, optional
If True, will generate outlines for label_volumes, instead of filled-in areas. Default is True.
mask_value : int, optional
Background value for label volumes. Default is 0.
step : int, optional
Will generate an image for every [step] slice. Default is 1.
dim : int, optional
Mosaic images will be sliced along this dimension. Default is 2, which often corresponds to axial.
cols : int, optional
How many columns in your output mosaic. Rows will be determined automatically. Default is 8.
label_buffer : int, optional
Images more than [label_buffer] slices away from a slice containing a label pixel will note be included. Default is 5.
rotate_90 : int, optional
If the output mosaic is incorrectly rotated, you may rotate clockwise [rotate_90] times. Default is 3.
flip : bool, optional
If the output is incorrectly flipped, you may set to True to flip the data. Default is True.
No Longer Returned
------------------
Returns
-------
output_array: N+1 or N-dimensional array
The generated mosaic array.
"""
image_numpy = read_image_files(input_volume)
if step is None:
step = 1
if label_volume is not None:
label_numpy = read_image_files(label_volume)
if generate_outline:
label_numpy = generate_label_outlines(label_numpy, dim, mask_value)
# This is fun in a wacky way, but could probably be done more concisely and effeciently.
mosaic_selections = []
for i in range(label_numpy.shape[dim]):
label_slice = np.squeeze(label_numpy[[slice(None) if k != dim else slice(i, i + 1) for k in range(3)]])
if np.sum(label_slice) != 0:
mosaic_selections += list(range(i - label_buffer, i + label_buffer))
mosaic_selections = np.unique(mosaic_selections)
mosaic_selections = mosaic_selections[mosaic_selections >= 0]
mosaic_selections = mosaic_selections[mosaic_selections <= image_numpy.shape[dim]]
mosaic_selections = mosaic_selections[::step]
color_range_image = [np.min(image_numpy), np.max(image_numpy)]
color_range_label = [np.min(label_numpy), np.max(label_numpy)]
# One day, specify rotations by affine matrix.
# Is test slice necessary? Operate directly on shape if possible.
test_slice = np.rot90(np.squeeze(image_numpy[[slice(None) if k != dim else slice(0, 1) for k in range(3)]]), rotate_90)
slice_width = test_slice.shape[1]
slice_height = test_slice.shape[0]
mosaic_image_numpy = np.zeros((int(slice_height * np.ceil(float(len(mosaic_selections)) / float(cols))), int(test_slice.shape[1] * cols)), dtype=float)
mosaic_label_numpy = np.zeros_like(mosaic_image_numpy)
row_index = 0
col_index = 0
for i in mosaic_selections:
image_slice = np.rot90(np.squeeze(image_numpy[[slice(None) if k != dim else slice(i, i + 1) for k in range(3)]]), rotate_90)
label_slice = np.rot90(np.squeeze(label_numpy[[slice(None) if k != dim else slice(i, i + 1) for k in range(3)]]), rotate_90)
# Again, specify from affine matrix if possible.
if flip:
image_slice = np.fliplr(image_slice)
label_slice = np.fliplr(label_slice)
if image_slice.size > 0:
mosaic_image_numpy[int(row_index):int(row_index + slice_height), int(col_index):int(col_index + slice_width)] = image_slice
mosaic_label_numpy[int(row_index):int(row_index + slice_height), int(col_index):int(col_index + slice_width)] = label_slice
if col_index == mosaic_image_numpy.shape[1] - slice_width:
col_index = 0
row_index += slice_height
else:
col_index += slice_width
mosaic_label_numpy = np.ma.masked_where(mosaic_label_numpy == 0, mosaic_label_numpy)
if output_filepath is not None:
plt.figure(figsize=(mosaic_image_numpy.shape[0] / 100, mosaic_image_numpy.shape[1] / 100), dpi=100, frameon=False)
plt.margins(0, 0)
plt.gca().set_axis_off()
plt.gca().xaxis.set_major_locator(plt.NullLocator())
plt.gca().yaxis.set_major_locator(plt.NullLocator())
plt.imshow(mosaic_image_numpy, 'gray', vmin=color_range_image[0], vmax=color_range_image[1], interpolation='none')
plt.imshow(mosaic_label_numpy, 'jet', vmin=color_range_label[0], vmax=color_range_label[1], interpolation='none')
plt.savefig(output_filepath, bbox_inches='tight', pad_inches=0.0, dpi=1000)
plt.clf()
plt.close()
return mosaic_image_numpy
else:
color_range_image = [np.min(image_numpy), np.max(image_numpy)]
test_slice = np.rot90(np.squeeze(image_numpy[[slice(None) if k != dim else slice(0, 1) for k in range(3)]]), rotate_90)
slice_width = test_slice.shape[1]
slice_height = test_slice.shape[0]
mosaic_selections = np.arange(image_numpy.shape[dim])[::step]
mosaic_image_numpy = np.zeros((int(slice_height * np.ceil(float(len(mosaic_selections)) / float(cols))), int(test_slice.shape[1] * cols)), dtype=float)
row_index = 0
col_index = 0
for i in mosaic_selections:
image_slice = np.squeeze(image_numpy[[slice(None) if k != dim else slice(i, i + 1) for k in range(3)]])
image_slice = np.rot90(image_slice, rotate_90)
if flip:
image_slice = np.fliplr(image_slice)
mosaic_image_numpy[int(row_index):int(row_index + slice_height), int(col_index):int(col_index + slice_width)] = image_slice
if col_index == mosaic_image_numpy.shape[1] - slice_width:
col_index = 0
row_index += slice_height
else:
col_index += slice_width
if output_filepath is not None:
plt.figure(figsize=(mosaic_image_numpy.shape[0] / 100, mosaic_image_numpy.shape[1] / 100), dpi=100, frameon=False)
plt.margins(0, 0)
plt.gca().set_axis_off()
plt.gca().xaxis.set_major_locator(plt.NullLocator())
plt.gca().yaxis.set_major_locator(plt.NullLocator())
plt.imshow(mosaic_image_numpy, 'gray', vmin=color_range_image[0], vmax=color_range_image[1], interpolation='none')
plt.savefig(output_filepath, bbox_inches='tight', pad_inches=0.0, dpi=500)
plt.clf()
plt.close()
return mosaic_image_numpy
def generate_label_outlines(label_numpy, dim=2, mask_value=0):
"""
Assumes labels are > 0 and integers.
Parameters
----------
input_volume: N-dimensional array
The volume to be queried.
mask_value: int or float
Islands composed of "mask_value" will be ignored.
return_split: bool
Whether to a return a stacked output of equal-size binary arrays for each island,
or to return one array with differently-labeled islands for each output.
truncate: bool
Whether or not to truncate the output. Irrelevant if return_split is False
truncate_padding: int
How many voxels of padding to leave when truncating.
output_filepath: str
If return_split is False, output will be saved to this file. If return_split
is True, output will be save to this file with the suffix "_[#]" for island
number
Returns
-------
output_array: N+1 or N-dimensional array
Output array(s) depending on return_split
"""
edges_kernel = np.zeros((3, 3, 3), dtype=float)
edges_kernel[1, 1, 1] = 4
if dim != 2:
edges_kernel[1, 1, 0] = -1
edges_kernel[1, 1, 2] = -1
if dim != 1:
edges_kernel[1, 0, 1] = -1
edges_kernel[1, 2, 1] = -1
if dim != 0:
edges_kernel[0, 1, 1] = -1
edges_kernel[2, 1, 1] = -1
outline_label_numpy = np.zeros_like(label_numpy, dtype=float)
for label_number in np.unique(label_numpy):
if label_number != mask_value:
sublabel_numpy = np.copy(label_numpy)
sublabel_numpy[sublabel_numpy != label_number] = 0
edge_image = signal.convolve(sublabel_numpy, edges_kernel, mode='same').astype(int)
edge_image[sublabel_numpy != label_number] = 0
edge_image[edge_image != 0] = label_number
outline_label_numpy += edge_image.astype(float)
return outline_label_numpy
if __name__ == '__main__':
pass |
homeassistant/components/progettihwsw/const.py | tbarbette/core | 30,023 | 11181660 | """Define constant variables for general usage."""
DOMAIN = "progettihwsw"
DEFAULT_POLLING_INTERVAL_SEC = 5
|
benchmarks/import_cost/classes_100_with_5_invariants.py | kklein/icontract | 244 | 11181783 | <filename>benchmarks/import_cost/classes_100_with_5_invariants.py
#!/usr/bin/env python3
import icontract
@icontract.invariant(lambda self: self.x > 0)
@icontract.invariant(lambda self: self.x > 1)
@icontract.invariant(lambda self: self.x > 2)
@icontract.invariant(lambda self: self.x > 3)
@icontract.invariant(lambda self: self.x > 4)
class SomeClass0:
def __init__(self) -> None:
self.x = 100
def some_func(self) -> None:
pass
@icontract.invariant(lambda self: self.x > 0)
@icontract.invariant(lambda self: self.x > 1)
@icontract.invariant(lambda self: self.x > 2)
@icontract.invariant(lambda self: self.x > 3)
@icontract.invariant(lambda self: self.x > 4)
class SomeClass1:
def __init__(self) -> None:
self.x = 100
def some_func(self) -> None:
pass
@icontract.invariant(lambda self: self.x > 0)
@icontract.invariant(lambda self: self.x > 1)
@icontract.invariant(lambda self: self.x > 2)
@icontract.invariant(lambda self: self.x > 3)
@icontract.invariant(lambda self: self.x > 4)
class SomeClass2:
def __init__(self) -> None:
self.x = 100
def some_func(self) -> None:
pass
@icontract.invariant(lambda self: self.x > 0)
@icontract.invariant(lambda self: self.x > 1)
@icontract.invariant(lambda self: self.x > 2)
@icontract.invariant(lambda self: self.x > 3)
@icontract.invariant(lambda self: self.x > 4)
class SomeClass3:
def __init__(self) -> None:
self.x = 100
def some_func(self) -> None:
pass
@icontract.invariant(lambda self: self.x > 0)
@icontract.invariant(lambda self: self.x > 1)
@icontract.invariant(lambda self: self.x > 2)
@icontract.invariant(lambda self: self.x > 3)
@icontract.invariant(lambda self: self.x > 4)
class SomeClass4:
def __init__(self) -> None:
self.x = 100
def some_func(self) -> None:
pass
@icontract.invariant(lambda self: self.x > 0)
@icontract.invariant(lambda self: self.x > 1)
@icontract.invariant(lambda self: self.x > 2)
@icontract.invariant(lambda self: self.x > 3)
@icontract.invariant(lambda self: self.x > 4)
class SomeClass5:
def __init__(self) -> None:
self.x = 100
def some_func(self) -> None:
pass
@icontract.invariant(lambda self: self.x > 0)
@icontract.invariant(lambda self: self.x > 1)
@icontract.invariant(lambda self: self.x > 2)
@icontract.invariant(lambda self: self.x > 3)
@icontract.invariant(lambda self: self.x > 4)
class SomeClass6:
def __init__(self) -> None:
self.x = 100
def some_func(self) -> None:
pass
@icontract.invariant(lambda self: self.x > 0)
@icontract.invariant(lambda self: self.x > 1)
@icontract.invariant(lambda self: self.x > 2)
@icontract.invariant(lambda self: self.x > 3)
@icontract.invariant(lambda self: self.x > 4)
class SomeClass7:
def __init__(self) -> None:
self.x = 100
def some_func(self) -> None:
pass
@icontract.invariant(lambda self: self.x > 0)
@icontract.invariant(lambda self: self.x > 1)
@icontract.invariant(lambda self: self.x > 2)
@icontract.invariant(lambda self: self.x > 3)
@icontract.invariant(lambda self: self.x > 4)
class SomeClass8:
def __init__(self) -> None:
self.x = 100
def some_func(self) -> None:
pass
@icontract.invariant(lambda self: self.x > 0)
@icontract.invariant(lambda self: self.x > 1)
@icontract.invariant(lambda self: self.x > 2)
@icontract.invariant(lambda self: self.x > 3)
@icontract.invariant(lambda self: self.x > 4)
class SomeClass9:
def __init__(self) -> None:
self.x = 100
def some_func(self) -> None:
pass
@icontract.invariant(lambda self: self.x > 0)
@icontract.invariant(lambda self: self.x > 1)
@icontract.invariant(lambda self: self.x > 2)
@icontract.invariant(lambda self: self.x > 3)
@icontract.invariant(lambda self: self.x > 4)
class SomeClass10:
def __init__(self) -> None:
self.x = 100
def some_func(self) -> None:
pass
@icontract.invariant(lambda self: self.x > 0)
@icontract.invariant(lambda self: self.x > 1)
@icontract.invariant(lambda self: self.x > 2)
@icontract.invariant(lambda self: self.x > 3)
@icontract.invariant(lambda self: self.x > 4)
class SomeClass11:
def __init__(self) -> None:
self.x = 100
def some_func(self) -> None:
pass
@icontract.invariant(lambda self: self.x > 0)
@icontract.invariant(lambda self: self.x > 1)
@icontract.invariant(lambda self: self.x > 2)
@icontract.invariant(lambda self: self.x > 3)
@icontract.invariant(lambda self: self.x > 4)
class SomeClass12:
def __init__(self) -> None:
self.x = 100
def some_func(self) -> None:
pass
@icontract.invariant(lambda self: self.x > 0)
@icontract.invariant(lambda self: self.x > 1)
@icontract.invariant(lambda self: self.x > 2)
@icontract.invariant(lambda self: self.x > 3)
@icontract.invariant(lambda self: self.x > 4)
class SomeClass13:
def __init__(self) -> None:
self.x = 100
def some_func(self) -> None:
pass
@icontract.invariant(lambda self: self.x > 0)
@icontract.invariant(lambda self: self.x > 1)
@icontract.invariant(lambda self: self.x > 2)
@icontract.invariant(lambda self: self.x > 3)
@icontract.invariant(lambda self: self.x > 4)
class SomeClass14:
def __init__(self) -> None:
self.x = 100
def some_func(self) -> None:
pass
@icontract.invariant(lambda self: self.x > 0)
@icontract.invariant(lambda self: self.x > 1)
@icontract.invariant(lambda self: self.x > 2)
@icontract.invariant(lambda self: self.x > 3)
@icontract.invariant(lambda self: self.x > 4)
class SomeClass15:
def __init__(self) -> None:
self.x = 100
def some_func(self) -> None:
pass
@icontract.invariant(lambda self: self.x > 0)
@icontract.invariant(lambda self: self.x > 1)
@icontract.invariant(lambda self: self.x > 2)
@icontract.invariant(lambda self: self.x > 3)
@icontract.invariant(lambda self: self.x > 4)
class SomeClass16:
def __init__(self) -> None:
self.x = 100
def some_func(self) -> None:
pass
@icontract.invariant(lambda self: self.x > 0)
@icontract.invariant(lambda self: self.x > 1)
@icontract.invariant(lambda self: self.x > 2)
@icontract.invariant(lambda self: self.x > 3)
@icontract.invariant(lambda self: self.x > 4)
class SomeClass17:
def __init__(self) -> None:
self.x = 100
def some_func(self) -> None:
pass
@icontract.invariant(lambda self: self.x > 0)
@icontract.invariant(lambda self: self.x > 1)
@icontract.invariant(lambda self: self.x > 2)
@icontract.invariant(lambda self: self.x > 3)
@icontract.invariant(lambda self: self.x > 4)
class SomeClass18:
def __init__(self) -> None:
self.x = 100
def some_func(self) -> None:
pass
@icontract.invariant(lambda self: self.x > 0)
@icontract.invariant(lambda self: self.x > 1)
@icontract.invariant(lambda self: self.x > 2)
@icontract.invariant(lambda self: self.x > 3)
@icontract.invariant(lambda self: self.x > 4)
class SomeClass19:
def __init__(self) -> None:
self.x = 100
def some_func(self) -> None:
pass
@icontract.invariant(lambda self: self.x > 0)
@icontract.invariant(lambda self: self.x > 1)
@icontract.invariant(lambda self: self.x > 2)
@icontract.invariant(lambda self: self.x > 3)
@icontract.invariant(lambda self: self.x > 4)
class SomeClass20:
def __init__(self) -> None:
self.x = 100
def some_func(self) -> None:
pass
@icontract.invariant(lambda self: self.x > 0)
@icontract.invariant(lambda self: self.x > 1)
@icontract.invariant(lambda self: self.x > 2)
@icontract.invariant(lambda self: self.x > 3)
@icontract.invariant(lambda self: self.x > 4)
class SomeClass21:
def __init__(self) -> None:
self.x = 100
def some_func(self) -> None:
pass
@icontract.invariant(lambda self: self.x > 0)
@icontract.invariant(lambda self: self.x > 1)
@icontract.invariant(lambda self: self.x > 2)
@icontract.invariant(lambda self: self.x > 3)
@icontract.invariant(lambda self: self.x > 4)
class SomeClass22:
def __init__(self) -> None:
self.x = 100
def some_func(self) -> None:
pass
@icontract.invariant(lambda self: self.x > 0)
@icontract.invariant(lambda self: self.x > 1)
@icontract.invariant(lambda self: self.x > 2)
@icontract.invariant(lambda self: self.x > 3)
@icontract.invariant(lambda self: self.x > 4)
class SomeClass23:
def __init__(self) -> None:
self.x = 100
def some_func(self) -> None:
pass
@icontract.invariant(lambda self: self.x > 0)
@icontract.invariant(lambda self: self.x > 1)
@icontract.invariant(lambda self: self.x > 2)
@icontract.invariant(lambda self: self.x > 3)
@icontract.invariant(lambda self: self.x > 4)
class SomeClass24:
def __init__(self) -> None:
self.x = 100
def some_func(self) -> None:
pass
@icontract.invariant(lambda self: self.x > 0)
@icontract.invariant(lambda self: self.x > 1)
@icontract.invariant(lambda self: self.x > 2)
@icontract.invariant(lambda self: self.x > 3)
@icontract.invariant(lambda self: self.x > 4)
class SomeClass25:
def __init__(self) -> None:
self.x = 100
def some_func(self) -> None:
pass
@icontract.invariant(lambda self: self.x > 0)
@icontract.invariant(lambda self: self.x > 1)
@icontract.invariant(lambda self: self.x > 2)
@icontract.invariant(lambda self: self.x > 3)
@icontract.invariant(lambda self: self.x > 4)
class SomeClass26:
def __init__(self) -> None:
self.x = 100
def some_func(self) -> None:
pass
@icontract.invariant(lambda self: self.x > 0)
@icontract.invariant(lambda self: self.x > 1)
@icontract.invariant(lambda self: self.x > 2)
@icontract.invariant(lambda self: self.x > 3)
@icontract.invariant(lambda self: self.x > 4)
class SomeClass27:
def __init__(self) -> None:
self.x = 100
def some_func(self) -> None:
pass
@icontract.invariant(lambda self: self.x > 0)
@icontract.invariant(lambda self: self.x > 1)
@icontract.invariant(lambda self: self.x > 2)
@icontract.invariant(lambda self: self.x > 3)
@icontract.invariant(lambda self: self.x > 4)
class SomeClass28:
def __init__(self) -> None:
self.x = 100
def some_func(self) -> None:
pass
@icontract.invariant(lambda self: self.x > 0)
@icontract.invariant(lambda self: self.x > 1)
@icontract.invariant(lambda self: self.x > 2)
@icontract.invariant(lambda self: self.x > 3)
@icontract.invariant(lambda self: self.x > 4)
class SomeClass29:
def __init__(self) -> None:
self.x = 100
def some_func(self) -> None:
pass
@icontract.invariant(lambda self: self.x > 0)
@icontract.invariant(lambda self: self.x > 1)
@icontract.invariant(lambda self: self.x > 2)
@icontract.invariant(lambda self: self.x > 3)
@icontract.invariant(lambda self: self.x > 4)
class SomeClass30:
def __init__(self) -> None:
self.x = 100
def some_func(self) -> None:
pass
@icontract.invariant(lambda self: self.x > 0)
@icontract.invariant(lambda self: self.x > 1)
@icontract.invariant(lambda self: self.x > 2)
@icontract.invariant(lambda self: self.x > 3)
@icontract.invariant(lambda self: self.x > 4)
class SomeClass31:
def __init__(self) -> None:
self.x = 100
def some_func(self) -> None:
pass
@icontract.invariant(lambda self: self.x > 0)
@icontract.invariant(lambda self: self.x > 1)
@icontract.invariant(lambda self: self.x > 2)
@icontract.invariant(lambda self: self.x > 3)
@icontract.invariant(lambda self: self.x > 4)
class SomeClass32:
def __init__(self) -> None:
self.x = 100
def some_func(self) -> None:
pass
@icontract.invariant(lambda self: self.x > 0)
@icontract.invariant(lambda self: self.x > 1)
@icontract.invariant(lambda self: self.x > 2)
@icontract.invariant(lambda self: self.x > 3)
@icontract.invariant(lambda self: self.x > 4)
class SomeClass33:
def __init__(self) -> None:
self.x = 100
def some_func(self) -> None:
pass
@icontract.invariant(lambda self: self.x > 0)
@icontract.invariant(lambda self: self.x > 1)
@icontract.invariant(lambda self: self.x > 2)
@icontract.invariant(lambda self: self.x > 3)
@icontract.invariant(lambda self: self.x > 4)
class SomeClass34:
def __init__(self) -> None:
self.x = 100
def some_func(self) -> None:
pass
@icontract.invariant(lambda self: self.x > 0)
@icontract.invariant(lambda self: self.x > 1)
@icontract.invariant(lambda self: self.x > 2)
@icontract.invariant(lambda self: self.x > 3)
@icontract.invariant(lambda self: self.x > 4)
class SomeClass35:
def __init__(self) -> None:
self.x = 100
def some_func(self) -> None:
pass
@icontract.invariant(lambda self: self.x > 0)
@icontract.invariant(lambda self: self.x > 1)
@icontract.invariant(lambda self: self.x > 2)
@icontract.invariant(lambda self: self.x > 3)
@icontract.invariant(lambda self: self.x > 4)
class SomeClass36:
def __init__(self) -> None:
self.x = 100
def some_func(self) -> None:
pass
@icontract.invariant(lambda self: self.x > 0)
@icontract.invariant(lambda self: self.x > 1)
@icontract.invariant(lambda self: self.x > 2)
@icontract.invariant(lambda self: self.x > 3)
@icontract.invariant(lambda self: self.x > 4)
class SomeClass37:
def __init__(self) -> None:
self.x = 100
def some_func(self) -> None:
pass
@icontract.invariant(lambda self: self.x > 0)
@icontract.invariant(lambda self: self.x > 1)
@icontract.invariant(lambda self: self.x > 2)
@icontract.invariant(lambda self: self.x > 3)
@icontract.invariant(lambda self: self.x > 4)
class SomeClass38:
def __init__(self) -> None:
self.x = 100
def some_func(self) -> None:
pass
@icontract.invariant(lambda self: self.x > 0)
@icontract.invariant(lambda self: self.x > 1)
@icontract.invariant(lambda self: self.x > 2)
@icontract.invariant(lambda self: self.x > 3)
@icontract.invariant(lambda self: self.x > 4)
class SomeClass39:
def __init__(self) -> None:
self.x = 100
def some_func(self) -> None:
pass
@icontract.invariant(lambda self: self.x > 0)
@icontract.invariant(lambda self: self.x > 1)
@icontract.invariant(lambda self: self.x > 2)
@icontract.invariant(lambda self: self.x > 3)
@icontract.invariant(lambda self: self.x > 4)
class SomeClass40:
def __init__(self) -> None:
self.x = 100
def some_func(self) -> None:
pass
@icontract.invariant(lambda self: self.x > 0)
@icontract.invariant(lambda self: self.x > 1)
@icontract.invariant(lambda self: self.x > 2)
@icontract.invariant(lambda self: self.x > 3)
@icontract.invariant(lambda self: self.x > 4)
class SomeClass41:
def __init__(self) -> None:
self.x = 100
def some_func(self) -> None:
pass
@icontract.invariant(lambda self: self.x > 0)
@icontract.invariant(lambda self: self.x > 1)
@icontract.invariant(lambda self: self.x > 2)
@icontract.invariant(lambda self: self.x > 3)
@icontract.invariant(lambda self: self.x > 4)
class SomeClass42:
def __init__(self) -> None:
self.x = 100
def some_func(self) -> None:
pass
@icontract.invariant(lambda self: self.x > 0)
@icontract.invariant(lambda self: self.x > 1)
@icontract.invariant(lambda self: self.x > 2)
@icontract.invariant(lambda self: self.x > 3)
@icontract.invariant(lambda self: self.x > 4)
class SomeClass43:
def __init__(self) -> None:
self.x = 100
def some_func(self) -> None:
pass
@icontract.invariant(lambda self: self.x > 0)
@icontract.invariant(lambda self: self.x > 1)
@icontract.invariant(lambda self: self.x > 2)
@icontract.invariant(lambda self: self.x > 3)
@icontract.invariant(lambda self: self.x > 4)
class SomeClass44:
def __init__(self) -> None:
self.x = 100
def some_func(self) -> None:
pass
@icontract.invariant(lambda self: self.x > 0)
@icontract.invariant(lambda self: self.x > 1)
@icontract.invariant(lambda self: self.x > 2)
@icontract.invariant(lambda self: self.x > 3)
@icontract.invariant(lambda self: self.x > 4)
class SomeClass45:
def __init__(self) -> None:
self.x = 100
def some_func(self) -> None:
pass
@icontract.invariant(lambda self: self.x > 0)
@icontract.invariant(lambda self: self.x > 1)
@icontract.invariant(lambda self: self.x > 2)
@icontract.invariant(lambda self: self.x > 3)
@icontract.invariant(lambda self: self.x > 4)
class SomeClass46:
def __init__(self) -> None:
self.x = 100
def some_func(self) -> None:
pass
@icontract.invariant(lambda self: self.x > 0)
@icontract.invariant(lambda self: self.x > 1)
@icontract.invariant(lambda self: self.x > 2)
@icontract.invariant(lambda self: self.x > 3)
@icontract.invariant(lambda self: self.x > 4)
class SomeClass47:
def __init__(self) -> None:
self.x = 100
def some_func(self) -> None:
pass
@icontract.invariant(lambda self: self.x > 0)
@icontract.invariant(lambda self: self.x > 1)
@icontract.invariant(lambda self: self.x > 2)
@icontract.invariant(lambda self: self.x > 3)
@icontract.invariant(lambda self: self.x > 4)
class SomeClass48:
def __init__(self) -> None:
self.x = 100
def some_func(self) -> None:
pass
@icontract.invariant(lambda self: self.x > 0)
@icontract.invariant(lambda self: self.x > 1)
@icontract.invariant(lambda self: self.x > 2)
@icontract.invariant(lambda self: self.x > 3)
@icontract.invariant(lambda self: self.x > 4)
class SomeClass49:
def __init__(self) -> None:
self.x = 100
def some_func(self) -> None:
pass
@icontract.invariant(lambda self: self.x > 0)
@icontract.invariant(lambda self: self.x > 1)
@icontract.invariant(lambda self: self.x > 2)
@icontract.invariant(lambda self: self.x > 3)
@icontract.invariant(lambda self: self.x > 4)
class SomeClass50:
def __init__(self) -> None:
self.x = 100
def some_func(self) -> None:
pass
@icontract.invariant(lambda self: self.x > 0)
@icontract.invariant(lambda self: self.x > 1)
@icontract.invariant(lambda self: self.x > 2)
@icontract.invariant(lambda self: self.x > 3)
@icontract.invariant(lambda self: self.x > 4)
class SomeClass51:
def __init__(self) -> None:
self.x = 100
def some_func(self) -> None:
pass
@icontract.invariant(lambda self: self.x > 0)
@icontract.invariant(lambda self: self.x > 1)
@icontract.invariant(lambda self: self.x > 2)
@icontract.invariant(lambda self: self.x > 3)
@icontract.invariant(lambda self: self.x > 4)
class SomeClass52:
def __init__(self) -> None:
self.x = 100
def some_func(self) -> None:
pass
@icontract.invariant(lambda self: self.x > 0)
@icontract.invariant(lambda self: self.x > 1)
@icontract.invariant(lambda self: self.x > 2)
@icontract.invariant(lambda self: self.x > 3)
@icontract.invariant(lambda self: self.x > 4)
class SomeClass53:
def __init__(self) -> None:
self.x = 100
def some_func(self) -> None:
pass
@icontract.invariant(lambda self: self.x > 0)
@icontract.invariant(lambda self: self.x > 1)
@icontract.invariant(lambda self: self.x > 2)
@icontract.invariant(lambda self: self.x > 3)
@icontract.invariant(lambda self: self.x > 4)
class SomeClass54:
def __init__(self) -> None:
self.x = 100
def some_func(self) -> None:
pass
@icontract.invariant(lambda self: self.x > 0)
@icontract.invariant(lambda self: self.x > 1)
@icontract.invariant(lambda self: self.x > 2)
@icontract.invariant(lambda self: self.x > 3)
@icontract.invariant(lambda self: self.x > 4)
class SomeClass55:
def __init__(self) -> None:
self.x = 100
def some_func(self) -> None:
pass
@icontract.invariant(lambda self: self.x > 0)
@icontract.invariant(lambda self: self.x > 1)
@icontract.invariant(lambda self: self.x > 2)
@icontract.invariant(lambda self: self.x > 3)
@icontract.invariant(lambda self: self.x > 4)
class SomeClass56:
def __init__(self) -> None:
self.x = 100
def some_func(self) -> None:
pass
@icontract.invariant(lambda self: self.x > 0)
@icontract.invariant(lambda self: self.x > 1)
@icontract.invariant(lambda self: self.x > 2)
@icontract.invariant(lambda self: self.x > 3)
@icontract.invariant(lambda self: self.x > 4)
class SomeClass57:
def __init__(self) -> None:
self.x = 100
def some_func(self) -> None:
pass
@icontract.invariant(lambda self: self.x > 0)
@icontract.invariant(lambda self: self.x > 1)
@icontract.invariant(lambda self: self.x > 2)
@icontract.invariant(lambda self: self.x > 3)
@icontract.invariant(lambda self: self.x > 4)
class SomeClass58:
def __init__(self) -> None:
self.x = 100
def some_func(self) -> None:
pass
@icontract.invariant(lambda self: self.x > 0)
@icontract.invariant(lambda self: self.x > 1)
@icontract.invariant(lambda self: self.x > 2)
@icontract.invariant(lambda self: self.x > 3)
@icontract.invariant(lambda self: self.x > 4)
class SomeClass59:
def __init__(self) -> None:
self.x = 100
def some_func(self) -> None:
pass
@icontract.invariant(lambda self: self.x > 0)
@icontract.invariant(lambda self: self.x > 1)
@icontract.invariant(lambda self: self.x > 2)
@icontract.invariant(lambda self: self.x > 3)
@icontract.invariant(lambda self: self.x > 4)
class SomeClass60:
def __init__(self) -> None:
self.x = 100
def some_func(self) -> None:
pass
@icontract.invariant(lambda self: self.x > 0)
@icontract.invariant(lambda self: self.x > 1)
@icontract.invariant(lambda self: self.x > 2)
@icontract.invariant(lambda self: self.x > 3)
@icontract.invariant(lambda self: self.x > 4)
class SomeClass61:
def __init__(self) -> None:
self.x = 100
def some_func(self) -> None:
pass
@icontract.invariant(lambda self: self.x > 0)
@icontract.invariant(lambda self: self.x > 1)
@icontract.invariant(lambda self: self.x > 2)
@icontract.invariant(lambda self: self.x > 3)
@icontract.invariant(lambda self: self.x > 4)
class SomeClass62:
def __init__(self) -> None:
self.x = 100
def some_func(self) -> None:
pass
@icontract.invariant(lambda self: self.x > 0)
@icontract.invariant(lambda self: self.x > 1)
@icontract.invariant(lambda self: self.x > 2)
@icontract.invariant(lambda self: self.x > 3)
@icontract.invariant(lambda self: self.x > 4)
class SomeClass63:
def __init__(self) -> None:
self.x = 100
def some_func(self) -> None:
pass
@icontract.invariant(lambda self: self.x > 0)
@icontract.invariant(lambda self: self.x > 1)
@icontract.invariant(lambda self: self.x > 2)
@icontract.invariant(lambda self: self.x > 3)
@icontract.invariant(lambda self: self.x > 4)
class SomeClass64:
def __init__(self) -> None:
self.x = 100
def some_func(self) -> None:
pass
@icontract.invariant(lambda self: self.x > 0)
@icontract.invariant(lambda self: self.x > 1)
@icontract.invariant(lambda self: self.x > 2)
@icontract.invariant(lambda self: self.x > 3)
@icontract.invariant(lambda self: self.x > 4)
class SomeClass65:
def __init__(self) -> None:
self.x = 100
def some_func(self) -> None:
pass
@icontract.invariant(lambda self: self.x > 0)
@icontract.invariant(lambda self: self.x > 1)
@icontract.invariant(lambda self: self.x > 2)
@icontract.invariant(lambda self: self.x > 3)
@icontract.invariant(lambda self: self.x > 4)
class SomeClass66:
def __init__(self) -> None:
self.x = 100
def some_func(self) -> None:
pass
@icontract.invariant(lambda self: self.x > 0)
@icontract.invariant(lambda self: self.x > 1)
@icontract.invariant(lambda self: self.x > 2)
@icontract.invariant(lambda self: self.x > 3)
@icontract.invariant(lambda self: self.x > 4)
class SomeClass67:
def __init__(self) -> None:
self.x = 100
def some_func(self) -> None:
pass
@icontract.invariant(lambda self: self.x > 0)
@icontract.invariant(lambda self: self.x > 1)
@icontract.invariant(lambda self: self.x > 2)
@icontract.invariant(lambda self: self.x > 3)
@icontract.invariant(lambda self: self.x > 4)
class SomeClass68:
def __init__(self) -> None:
self.x = 100
def some_func(self) -> None:
pass
@icontract.invariant(lambda self: self.x > 0)
@icontract.invariant(lambda self: self.x > 1)
@icontract.invariant(lambda self: self.x > 2)
@icontract.invariant(lambda self: self.x > 3)
@icontract.invariant(lambda self: self.x > 4)
class SomeClass69:
def __init__(self) -> None:
self.x = 100
def some_func(self) -> None:
pass
@icontract.invariant(lambda self: self.x > 0)
@icontract.invariant(lambda self: self.x > 1)
@icontract.invariant(lambda self: self.x > 2)
@icontract.invariant(lambda self: self.x > 3)
@icontract.invariant(lambda self: self.x > 4)
class SomeClass70:
def __init__(self) -> None:
self.x = 100
def some_func(self) -> None:
pass
@icontract.invariant(lambda self: self.x > 0)
@icontract.invariant(lambda self: self.x > 1)
@icontract.invariant(lambda self: self.x > 2)
@icontract.invariant(lambda self: self.x > 3)
@icontract.invariant(lambda self: self.x > 4)
class SomeClass71:
def __init__(self) -> None:
self.x = 100
def some_func(self) -> None:
pass
@icontract.invariant(lambda self: self.x > 0)
@icontract.invariant(lambda self: self.x > 1)
@icontract.invariant(lambda self: self.x > 2)
@icontract.invariant(lambda self: self.x > 3)
@icontract.invariant(lambda self: self.x > 4)
class SomeClass72:
def __init__(self) -> None:
self.x = 100
def some_func(self) -> None:
pass
@icontract.invariant(lambda self: self.x > 0)
@icontract.invariant(lambda self: self.x > 1)
@icontract.invariant(lambda self: self.x > 2)
@icontract.invariant(lambda self: self.x > 3)
@icontract.invariant(lambda self: self.x > 4)
class SomeClass73:
def __init__(self) -> None:
self.x = 100
def some_func(self) -> None:
pass
@icontract.invariant(lambda self: self.x > 0)
@icontract.invariant(lambda self: self.x > 1)
@icontract.invariant(lambda self: self.x > 2)
@icontract.invariant(lambda self: self.x > 3)
@icontract.invariant(lambda self: self.x > 4)
class SomeClass74:
def __init__(self) -> None:
self.x = 100
def some_func(self) -> None:
pass
@icontract.invariant(lambda self: self.x > 0)
@icontract.invariant(lambda self: self.x > 1)
@icontract.invariant(lambda self: self.x > 2)
@icontract.invariant(lambda self: self.x > 3)
@icontract.invariant(lambda self: self.x > 4)
class SomeClass75:
def __init__(self) -> None:
self.x = 100
def some_func(self) -> None:
pass
@icontract.invariant(lambda self: self.x > 0)
@icontract.invariant(lambda self: self.x > 1)
@icontract.invariant(lambda self: self.x > 2)
@icontract.invariant(lambda self: self.x > 3)
@icontract.invariant(lambda self: self.x > 4)
class SomeClass76:
def __init__(self) -> None:
self.x = 100
def some_func(self) -> None:
pass
@icontract.invariant(lambda self: self.x > 0)
@icontract.invariant(lambda self: self.x > 1)
@icontract.invariant(lambda self: self.x > 2)
@icontract.invariant(lambda self: self.x > 3)
@icontract.invariant(lambda self: self.x > 4)
class SomeClass77:
def __init__(self) -> None:
self.x = 100
def some_func(self) -> None:
pass
@icontract.invariant(lambda self: self.x > 0)
@icontract.invariant(lambda self: self.x > 1)
@icontract.invariant(lambda self: self.x > 2)
@icontract.invariant(lambda self: self.x > 3)
@icontract.invariant(lambda self: self.x > 4)
class SomeClass78:
def __init__(self) -> None:
self.x = 100
def some_func(self) -> None:
pass
@icontract.invariant(lambda self: self.x > 0)
@icontract.invariant(lambda self: self.x > 1)
@icontract.invariant(lambda self: self.x > 2)
@icontract.invariant(lambda self: self.x > 3)
@icontract.invariant(lambda self: self.x > 4)
class SomeClass79:
def __init__(self) -> None:
self.x = 100
def some_func(self) -> None:
pass
@icontract.invariant(lambda self: self.x > 0)
@icontract.invariant(lambda self: self.x > 1)
@icontract.invariant(lambda self: self.x > 2)
@icontract.invariant(lambda self: self.x > 3)
@icontract.invariant(lambda self: self.x > 4)
class SomeClass80:
def __init__(self) -> None:
self.x = 100
def some_func(self) -> None:
pass
@icontract.invariant(lambda self: self.x > 0)
@icontract.invariant(lambda self: self.x > 1)
@icontract.invariant(lambda self: self.x > 2)
@icontract.invariant(lambda self: self.x > 3)
@icontract.invariant(lambda self: self.x > 4)
class SomeClass81:
def __init__(self) -> None:
self.x = 100
def some_func(self) -> None:
pass
@icontract.invariant(lambda self: self.x > 0)
@icontract.invariant(lambda self: self.x > 1)
@icontract.invariant(lambda self: self.x > 2)
@icontract.invariant(lambda self: self.x > 3)
@icontract.invariant(lambda self: self.x > 4)
class SomeClass82:
def __init__(self) -> None:
self.x = 100
def some_func(self) -> None:
pass
@icontract.invariant(lambda self: self.x > 0)
@icontract.invariant(lambda self: self.x > 1)
@icontract.invariant(lambda self: self.x > 2)
@icontract.invariant(lambda self: self.x > 3)
@icontract.invariant(lambda self: self.x > 4)
class SomeClass83:
def __init__(self) -> None:
self.x = 100
def some_func(self) -> None:
pass
@icontract.invariant(lambda self: self.x > 0)
@icontract.invariant(lambda self: self.x > 1)
@icontract.invariant(lambda self: self.x > 2)
@icontract.invariant(lambda self: self.x > 3)
@icontract.invariant(lambda self: self.x > 4)
class SomeClass84:
def __init__(self) -> None:
self.x = 100
def some_func(self) -> None:
pass
@icontract.invariant(lambda self: self.x > 0)
@icontract.invariant(lambda self: self.x > 1)
@icontract.invariant(lambda self: self.x > 2)
@icontract.invariant(lambda self: self.x > 3)
@icontract.invariant(lambda self: self.x > 4)
class SomeClass85:
def __init__(self) -> None:
self.x = 100
def some_func(self) -> None:
pass
@icontract.invariant(lambda self: self.x > 0)
@icontract.invariant(lambda self: self.x > 1)
@icontract.invariant(lambda self: self.x > 2)
@icontract.invariant(lambda self: self.x > 3)
@icontract.invariant(lambda self: self.x > 4)
class SomeClass86:
def __init__(self) -> None:
self.x = 100
def some_func(self) -> None:
pass
@icontract.invariant(lambda self: self.x > 0)
@icontract.invariant(lambda self: self.x > 1)
@icontract.invariant(lambda self: self.x > 2)
@icontract.invariant(lambda self: self.x > 3)
@icontract.invariant(lambda self: self.x > 4)
class SomeClass87:
def __init__(self) -> None:
self.x = 100
def some_func(self) -> None:
pass
@icontract.invariant(lambda self: self.x > 0)
@icontract.invariant(lambda self: self.x > 1)
@icontract.invariant(lambda self: self.x > 2)
@icontract.invariant(lambda self: self.x > 3)
@icontract.invariant(lambda self: self.x > 4)
class SomeClass88:
def __init__(self) -> None:
self.x = 100
def some_func(self) -> None:
pass
@icontract.invariant(lambda self: self.x > 0)
@icontract.invariant(lambda self: self.x > 1)
@icontract.invariant(lambda self: self.x > 2)
@icontract.invariant(lambda self: self.x > 3)
@icontract.invariant(lambda self: self.x > 4)
class SomeClass89:
def __init__(self) -> None:
self.x = 100
def some_func(self) -> None:
pass
@icontract.invariant(lambda self: self.x > 0)
@icontract.invariant(lambda self: self.x > 1)
@icontract.invariant(lambda self: self.x > 2)
@icontract.invariant(lambda self: self.x > 3)
@icontract.invariant(lambda self: self.x > 4)
class SomeClass90:
def __init__(self) -> None:
self.x = 100
def some_func(self) -> None:
pass
@icontract.invariant(lambda self: self.x > 0)
@icontract.invariant(lambda self: self.x > 1)
@icontract.invariant(lambda self: self.x > 2)
@icontract.invariant(lambda self: self.x > 3)
@icontract.invariant(lambda self: self.x > 4)
class SomeClass91:
def __init__(self) -> None:
self.x = 100
def some_func(self) -> None:
pass
@icontract.invariant(lambda self: self.x > 0)
@icontract.invariant(lambda self: self.x > 1)
@icontract.invariant(lambda self: self.x > 2)
@icontract.invariant(lambda self: self.x > 3)
@icontract.invariant(lambda self: self.x > 4)
class SomeClass92:
def __init__(self) -> None:
self.x = 100
def some_func(self) -> None:
pass
@icontract.invariant(lambda self: self.x > 0)
@icontract.invariant(lambda self: self.x > 1)
@icontract.invariant(lambda self: self.x > 2)
@icontract.invariant(lambda self: self.x > 3)
@icontract.invariant(lambda self: self.x > 4)
class SomeClass93:
def __init__(self) -> None:
self.x = 100
def some_func(self) -> None:
pass
@icontract.invariant(lambda self: self.x > 0)
@icontract.invariant(lambda self: self.x > 1)
@icontract.invariant(lambda self: self.x > 2)
@icontract.invariant(lambda self: self.x > 3)
@icontract.invariant(lambda self: self.x > 4)
class SomeClass94:
def __init__(self) -> None:
self.x = 100
def some_func(self) -> None:
pass
@icontract.invariant(lambda self: self.x > 0)
@icontract.invariant(lambda self: self.x > 1)
@icontract.invariant(lambda self: self.x > 2)
@icontract.invariant(lambda self: self.x > 3)
@icontract.invariant(lambda self: self.x > 4)
class SomeClass95:
def __init__(self) -> None:
self.x = 100
def some_func(self) -> None:
pass
@icontract.invariant(lambda self: self.x > 0)
@icontract.invariant(lambda self: self.x > 1)
@icontract.invariant(lambda self: self.x > 2)
@icontract.invariant(lambda self: self.x > 3)
@icontract.invariant(lambda self: self.x > 4)
class SomeClass96:
def __init__(self) -> None:
self.x = 100
def some_func(self) -> None:
pass
@icontract.invariant(lambda self: self.x > 0)
@icontract.invariant(lambda self: self.x > 1)
@icontract.invariant(lambda self: self.x > 2)
@icontract.invariant(lambda self: self.x > 3)
@icontract.invariant(lambda self: self.x > 4)
class SomeClass97:
def __init__(self) -> None:
self.x = 100
def some_func(self) -> None:
pass
@icontract.invariant(lambda self: self.x > 0)
@icontract.invariant(lambda self: self.x > 1)
@icontract.invariant(lambda self: self.x > 2)
@icontract.invariant(lambda self: self.x > 3)
@icontract.invariant(lambda self: self.x > 4)
class SomeClass98:
def __init__(self) -> None:
self.x = 100
def some_func(self) -> None:
pass
@icontract.invariant(lambda self: self.x > 0)
@icontract.invariant(lambda self: self.x > 1)
@icontract.invariant(lambda self: self.x > 2)
@icontract.invariant(lambda self: self.x > 3)
@icontract.invariant(lambda self: self.x > 4)
class SomeClass99:
def __init__(self) -> None:
self.x = 100
def some_func(self) -> None:
pass
|
aries_cloudagent/messaging/base_message.py | kuraakhilesh8230/aries-cloudagent-python | 247 | 11181892 | <gh_stars>100-1000
"""Base message."""
from abc import ABC, abstractclassmethod, abstractmethod, abstractproperty
from enum import Enum, auto
from typing import Optional, Type, TYPE_CHECKING
if TYPE_CHECKING:
from .base_handler import BaseHandler
class DIDCommVersion(Enum):
"""Serialized message formats."""
v1 = auto()
v2 = auto()
class BaseMessage(ABC):
"""Abstract base class for messages.
This formally defines a "minimum viable message" and provides an
unopinionated class for plugins to extend in whatever way makes sense in
the context of the plugin.
"""
@abstractproperty
def _type(self) -> str:
"""Return message type."""
@abstractproperty
def _id(self) -> str:
"""Return message id."""
@abstractproperty
def _thread_id(self) -> Optional[str]:
"""Return message thread id."""
@abstractmethod
def serialize(self, msg_format: DIDCommVersion = DIDCommVersion.v1) -> dict:
"""Return serialized message in format specified."""
@abstractclassmethod
def deserialize(cls, value: dict, msg_format: DIDCommVersion = DIDCommVersion.v1):
"""Return message object deserialized from value in format specified."""
@abstractproperty
def Handler(self) -> Type["BaseHandler"]:
"""Return reference to handler class."""
|
insights/tests/test_integration_support.py | lhuett/insights-core | 121 | 11181938 | from insights.plugins.ps_rule_fakes import psaux_no_filter, psauxww_ds_filter, psalxwww_parser_filter
from insights.specs import Specs
from . import InputData, run_test
import pytest
def test_run_test_missing_filters_exception():
"""
The rule underlying datasource requires a filter,
an exception should be raised because filter was not
added in the rule module.
"""
input_data = InputData("fake_input")
input_data.add(Specs.ps_aux, "FAKE_CONTENT")
with pytest.raises(Exception):
run_test(psaux_no_filter, input_data, None)
def test_run_test_no_missing_filters_using_datasource():
"""
Required filter was added directly to the datasouce,
``run_test`` should complete without any exceptions.
"""
input_data = InputData("fake_input")
input_data.add(Specs.ps_auxww, "FAKE_CONTENT")
result = run_test(psauxww_ds_filter, input_data, None)
assert result
def test_run_test_no_missing_filters_using_parser():
"""
Required filter was added to using the parser,
``run_test`` should complete without any exceptions.
"""
input_data = InputData("fake_input")
input_data.add(Specs.ps_alxwww, "FAKE_CONTENT")
result = run_test(psalxwww_parser_filter, input_data, None)
assert result
|
tests/test_dataset.py | Harald-R/aw_nas | 195 | 11181996 | <gh_stars>100-1000
import os
import pytest
# we use environments variable to mark slow instead of register new pytest marks here.
AWNAS_TEST_SLOW = os.environ.get("AWNAS_TEST_SLOW", None)
@pytest.mark.skipif(not AWNAS_TEST_SLOW, reason="parse corpus might be slow, by default not test")
def test_ptb_batchify():
from aw_nas.dataset import BaseDataset
from aw_nas.utils import batchify_sentences
dataset = BaseDataset.get_class_("ptb")()
assert len(dataset.splits()["train"]) == 45438
assert len(dataset.splits()["test"]) == 3761
assert dataset.vocab_size == 10000
inputs, targets = batchify_sentences(dataset.splits()["test"], 32)
assert inputs.shape[0] == targets.shape[0]
assert inputs.shape[1] == targets.shape[1] == 32
def test_infinite_get_callback():
from aw_nas.utils.torch_utils import get_inf_iterator, SimpleDataset
import torch
import torch.utils.data
dataset = torch.utils.data.DataLoader(SimpleDataset(([0,1,2,3], [1,2,3,4])),
batch_size=2, num_workers=1)
hiddens = [torch.rand(2) for _ in range(2)]
ids = [id(hid) for hid in hiddens]
callback = lambda: [hid.zero_() for hid in hiddens]
queue = get_inf_iterator(dataset, callback)
_ = next(queue)
_ = next(queue)
_ = next(queue) # should trigger callback
assert all((hid == 0).all() for hid in hiddens), "hiddens should be reset"
assert all(id_ == id(hid) for id_, hid in zip(ids, hiddens)), "hiddens should be reset in-place"
@pytest.mark.skipif(not AWNAS_TEST_SLOW, reason="parse dataset might be slow, by default not test")
def test_imagenet_sample_class():
from aw_nas.dataset import BaseDataset
dataset = BaseDataset.get_class_("imagenet")(load_train_only=True, num_sample_classes=20, random_choose=True)
assert len(dataset.choosen_classes) == 20
|
Examples/AppKit/CocoaBindings/CurrencyConvBinding/CurrencyConvBindingDocument.py | Khan/pyobjc-framework-Cocoa | 132 | 11182024 | from Cocoa import *
class CurrencyConvBindingDocument (NSDocument):
def windowNibName(self):
return "CurrencyConvBindingDocument"
|
lbry/build_info.py | nishp77/lbry-sdk | 4,996 | 11182044 | # don't touch this. CI server changes this during build/deployment
BUILD = "dev"
COMMIT_HASH = "none"
DOCKER_TAG = "none"
|
api/features/workflows/core/exceptions.py | SolidStateGroup/Bullet-Train-API | 126 | 11182064 | <filename>api/features/workflows/core/exceptions.py<gh_stars>100-1000
from rest_framework import status
from rest_framework.exceptions import APIException
class FeatureWorkflowError(APIException):
status_code = status.HTTP_500_INTERNAL_SERVER_ERROR
class ChangeRequestNotApprovedError(FeatureWorkflowError):
status_code = status.HTTP_400_BAD_REQUEST
class CannotApproveOwnChangeRequest(FeatureWorkflowError):
status_code = status.HTTP_400_BAD_REQUEST
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.