id
stringlengths 3
8
| content
stringlengths 100
981k
|
---|---|
17351
|
from vue.bridge import Object
import javascript
class VueDecorator:
__key__ = None
__parents__ = ()
__id__ = None
__value__ = None
def update(self, vue_dict):
base = vue_dict
for parent in self.__parents__:
base = vue_dict.setdefault(parent, {})
if self.__id__ is None:
base[self.__key__] = self.__value__
else:
base = base.setdefault(self.__key__, {})
value = self.__value__
if isinstance(base.get(self.__id__), dict):
base[self.__id__].update(value)
else:
base[self.__id__] = value
def pyjs_bridge(fn, inject_vue_instance=False):
def wrapper(*args, **kwargs):
args = (javascript.this(), *args) if inject_vue_instance else args
args = tuple(Object.from_js(arg) for arg in args)
kwargs = {k: Object.from_js(v) for k, v in kwargs.items()}
return Object.to_js(fn(*args, **kwargs))
wrapper.__name__ = fn.__name__
return wrapper
|
17353
|
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import keras
from keras.models import Model, load_model
from keras import backend as K
from keras.preprocessing.image import ImageDataGenerator
import tensorflow as tf
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR) # mute deprecation warnings
from keras.optimizers import Adam, SGD
from tensorflow import ConfigProto
from tensorflow import InteractiveSession
import numpy as np
import sys
from PIL import Image
import argparse
from matplotlib import pyplot as plt
from .dataloader import *
from .model import *
from .metrics import *
def train(args):
# load data
x_val, y_val = load_data(args.valid_data, args.valid_dataset)
x_train, y_train = load_data(args.train_data, 'monuseg')
print('data loading finished.')
K.clear_session()
config = ConfigProto()
config.gpu_options.allow_growth = True
session = tf.Session(config=config)
K.set_learning_phase(1)
input_shape = x_train[0].shape
# create model
model = BiONet(
input_shape,
num_classes=args.num_class,
num_layers=4,
iterations=args.iter,
multiplier=args.multiplier,
integrate=args.integrate
).build()
# augmentation
train_gen = get_augmented(
x_train, y_train, batch_size=args.batch_size,
data_gen_args = dict(
rotation_range=15.,
width_shift_range=0.05,
height_shift_range=0.05,
shear_range=50,
zoom_range=0.2,
horizontal_flip=True,
vertical_flip=True,
fill_mode='constant'
))
model.compile(
optimizer=Adam(lr=args.lr,decay=args.lr_decay),
loss = 'binary_crossentropy',
metrics=[iou, dice_coef]
)
print('model successfully built and compiled.')
integrate = '_int' if args.integrate else ''
weights = '_weights' if args.save_weight else ''
cpt_name = 'iter_'+str(args.iter)+'_mul_'+str(args.multiplier)+integrate+'_best'+weights+'.h5'
callbacks = [keras.callbacks.ModelCheckpoint("checkpoints/"+args.exp+"/"+cpt_name,monitor='val_iou', mode='max',verbose=0, save_weights_only=args.save_weight, save_best_only=True)]
if not os.path.isdir("checkpoints/"+args.exp):
os.mkdir("checkpoints/"+args.exp)
print('\nStart training...')
history = model.fit_generator(
train_gen,
steps_per_epoch=args.steps,
epochs=args.epochs,
validation_data=(x_val, y_val),
callbacks=callbacks
)
print('\nTraining fininshed!')
K.clear_session()
def evaluate(args):
# load data
x_val, y_val = load_data(args.valid_data, args.valid_dataset)
print('data loading finished.')
K.clear_session()
K.set_learning_phase(1)
config = ConfigProto()
config.gpu_options.allow_growth = True
session = tf.Session(config=config)
if args.model_path is None:
integrate = '_int' if args.integrate else ''
weights = '_weights' if args.save_weight else ''
cpt_name = 'iter_'+str(args.iter)+'_mul_'+str(args.multiplier)+integrate+'_best'+weights+'.h5'
model_path = "checkpoints/"+args.exp+"/"+cpt_name
else:
model_path = args.model_path
print('Restoring model from path: '+model_path)
if args.save_weight:
model = BiONet(
input_shape,
num_classes=args.num_class,
num_layers=4,
iterations=args.iter,
multiplier=args.multiplier,
integrate=args.integrate
).build().load_weights(model_path)
else:
model = load_model(model_path, compile=False)
model.compile(
optimizer=Adam(lr=args.lr,decay=args.lr_decay),
loss='binary_crossentropy',
metrics=[iou, dice_coef]
)
print('\nStart evaluation...')
result = model.evaluate(x_val,y_val,batch_size=args.batch_size)
print('Validation loss:\t', result[0])
print('Validation iou:\t', result[1])
print('Validation dice:\t', result[2])
print('\nEvaluation finished!')
if args.save_result:
# save metrics
if not os.path.exists("checkpoints/"+args.exp+"/outputs"):
os.mkdir("checkpoints/"+args.exp+"/outputs")
with open("checkpoints/"+args.exp+"/outputs/result.txt", 'w+') as f:
f.write('Validation loss:\t'+str(result[0])+'\n')
f.write('Validation iou:\t'+str(result[1])+'\n')
f.write('Validation dice:\t'+str(result[2])+'\n')
print('Metrics have been saved to:', "checkpoints/"+args.exp+"/outputs/result.txt")
# predict and save segmentations
results = model.predict(x_val,batch_size=args.batch_size,verbose=1)
results = (results > 0.5).astype(np.float32) # Binarization. Comment out this line if you don't want to
print('\nPrediction finished!')
print('Saving segmentations...')
if not os.path.exists("checkpoints/"+args.exp+"/outputs/segmentations"):
os.mkdir("checkpoints/"+args.exp+"/outputs/segmentations")
for i in range(results.shape[0]):
plt.imsave("checkpoints/"+args.exp+"/outputs/segmentations/"+str(i)+".png",results[i,:,:,0],cmap='gray') # binary segmenation
print('A total of '+str(results.shape[0])+' segmentation results have been saved to:', "checkpoints/"+args.exp+"/outputs/segmentations/")
K.clear_session()
def get_augmented(
X_train,
Y_train,
X_val=None,
Y_val=None,
batch_size=32,
seed=0,
data_gen_args = dict(
rotation_range=10.,
#width_shift_range=0.02,
height_shift_range=0.02,
shear_range=5,
#zoom_range=0.3,
horizontal_flip=True,
vertical_flip=False,
fill_mode='constant'
)):
# Train data, provide the same seed and keyword arguments to the fit and flow methods
X_datagen = ImageDataGenerator(**data_gen_args)
Y_datagen = ImageDataGenerator(**data_gen_args)
X_datagen.fit(X_train, augment=True, seed=seed)
Y_datagen.fit(Y_train, augment=True, seed=seed)
X_train_augmented = X_datagen.flow(X_train, batch_size=batch_size, shuffle=True, seed=seed)
Y_train_augmented = Y_datagen.flow(Y_train, batch_size=batch_size, shuffle=True, seed=seed)
train_generator = zip(X_train_augmented, Y_train_augmented)
if not (X_val is None) and not (Y_val is None):
# Validation data, no data augmentation, but we create a generator anyway
X_datagen_val = ImageDataGenerator(**data_gen_args)
Y_datagen_val = ImageDataGenerator(**data_gen_args)
X_datagen_val.fit(X_val, augment=True, seed=seed)
Y_datagen_val.fit(Y_val, augment=True, seed=seed)
X_val_augmented = X_datagen_val.flow(X_val, batch_size=batch_size, shuffle=True, seed=seed)
Y_val_augmented = Y_datagen_val.flow(Y_val, batch_size=batch_size, shuffle=True, seed=seed)
# combine generators into one which yields image and masks
val_generator = zip(X_val_augmented, Y_val_augmented)
return train_generator, val_generator
else:
return train_generator
|
17355
|
from setuptools import setup, find_packages
__name__ = "appJar"
__version__ = "0.94.0"
__author__ = "<NAME>"
__desc__ = "An easy-to-use, feature-rich GUI wrapper for tKinter. Designed specifically for use in the classroom, but powerful enough to be used anywhere."
__author_email__ = "<EMAIL>"
__license__ = "Apache 2.0"
__url__ = "http://appJar.info"
__keywords__ = ["python", "gui", "tkinter", "appJar", "interface"]
__packages__= ["appJar"]
__classifiers__ = [
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Education',
'Topic :: Software Development',
'Topic :: Software Development :: User Interfaces',
'Topic :: Software Development :: Libraries :: Python Modules',
'License :: OSI Approved :: Apache Software License',
]
__long_description__ = """# appJar
Simple tKinter GUIs in Python.
"""
setup(
name=__name__,
packages=__packages__,
version=__version__,
description=__desc__,
long_description=__long_description__,
long_description_content_type="text/markdown",
author=__author__,
author_email=__author_email__,
url=__url__,
keywords=__keywords__,
license=__license__,
classifiers=__classifiers__,
package_data = {
"appJar": ["lib/*.py", "lib/*.txt", "lib/tkdnd2.8/*.tcl", "lib/tkdnd2.8/tcl_files/*.tcl", "lib/tkdnd2.8/tcl_libs/*", "resources/icons/*", "examples/showcase.py", "PYPI.md"]
}
)
|
17375
|
list1 = [10,9,3,7,2,1,23,1,561,1,1,96,1]
def cmp1(x,y):
if x == 1 or y==1:
c = y-x
else:
c = x-y
return c
list1.sort(cmp = cmp1)
print list1
|
17395
|
from artemis.general.dict_ops import cross_dict_dicts, merge_dicts
__author__ = 'peter'
def test_cross_dict_dicts():
assert cross_dict_dicts({'a':{'aa': 1}, 'b':{'bb': 2}}, {'c': {'cc': 3}, 'd': {'dd': 4}}) == {
('a','c'):{'aa':1, 'cc':3},
('a','d'):{'aa':1, 'dd':4},
('b','c'):{'bb':2, 'cc':3},
('b','d'):{'bb':2, 'dd':4}
}
def test_dict_merge():
assert merge_dicts({'a': 1, 'b': 2, 'c': 3}, {'c': 4, 'd': 5}, {'d': 6, 'e': 7}) == {
'a': 1,
'b': 2,
'c': 4,
'd': 6,
'e': 7,
}
if __name__ == "__main__":
test_dict_merge()
test_cross_dict_dicts()
|
17405
|
import os
import json
from common import update_json_file, get_logger, exec_cmd
from yamlparser import Parser
from pathlib import Path
logger = get_logger("update-image")
# Functions that work to update gluu_versions.json
def determine_final_official_and_dev_version(tag_list):
"""
Determine official version i.e 4.1.0 , 4.2.2..etc using oxauths repo
@param tag_list:
@return:
"""
# Check for the highest major.minor.patch i.e 4.2.0 vs 4.2.2
dev_image = ""
patch_list = []
for tag in tag_list:
patch_list.append(int(tag[4:5]))
# Remove duplicates
patch_list = list(set(patch_list))
# Sort
patch_list.sort()
highest_major_minor_patch_number = str(patch_list[-1])
versions_list = []
for tag in tag_list:
if "dev" in tag and tag[4:5] == highest_major_minor_patch_number:
dev_image = tag[0:5] + "_dev"
# Exclude any tag with the following
if "dev" not in tag and "a" not in tag and tag[4:5] == highest_major_minor_patch_number:
versions_list.append(int(tag[6:8]))
# A case were only a dev version of a new patch is available then a lower stable patch should be checked.
# i.e there is no 4.3.0_01 but there is 4.2.2_dev
if not versions_list:
highest_major_minor_patch_number = str(int(highest_major_minor_patch_number) - 1)
for tag in tag_list:
if not dev_image and "dev" in tag and tag[4:5] == highest_major_minor_patch_number:
dev_image = tag[0:5] + "_dev"
# Exclude any tag with the following
if "dev" not in tag and "a" not in tag and tag[4:5] == highest_major_minor_patch_number:
versions_list.append(int(tag[6:8]))
# Remove duplicates
versions_list = list(set(versions_list))
# Sort
versions_list.sort()
# Return highest patch
highest_major_minor_patch_image_patch = str(versions_list[-1])
if len(highest_major_minor_patch_image_patch) == 1:
highest_major_minor_patch_image_patch = "0" + highest_major_minor_patch_image_patch
highest_major_minor_patch_image = ""
for tag in tag_list:
if "dev" not in tag and highest_major_minor_patch_image_patch in tag \
and tag[4:5] == highest_major_minor_patch_number:
highest_major_minor_patch_image = tag
return highest_major_minor_patch_image, dev_image
def determine_major_version(all_repos_tags):
"""
Determine official major version i.e 4.1 , 4.2..etc using oxauths repo
@param all_repos_tags:
@return:
"""
versions_list = []
for tag in all_repos_tags["oxauth"]:
# Exclude any tag with the following
if "dev" not in tag \
and "latest" not in tag \
and "secret" not in tag \
and "gluu-engine" not in tag:
versions_list.append(float(tag[0:3]))
# Remove duplicates
versions_list = list(set(versions_list))
# Sort
versions_list.sort()
# Return highest version
return versions_list[-1]
def get_docker_repo_tag(org, repo):
"""
Returns a dictionary of all available tags for a certain repo
:param org:
:param repo:
:return:
"""
logger.info("Getting docker tag for repository {}.".format(repo))
exec_get_repo_tag_curl_command = ["curl", "-s",
"https://hub.docker.com/v2/repositories/{}/{}/tags/?page_size=100".format(org,
repo)]
stdout, stderr, retcode = None, None, None
try:
stdout, stderr, retcode = exec_cmd(" ".join(exec_get_repo_tag_curl_command))
except (IndexError, Exception):
manual_curl_command = " ".join(exec_get_repo_tag_curl_command)
logger.error("Failed to curl\n{}".format(manual_curl_command))
all_tags = json.loads(stdout)["results"]
image_tags = []
for tag in all_tags:
image_tags.append(tag["name"])
image_tags_dict = dict()
image_tags_dict[repo] = image_tags
return image_tags_dict
def filter_all_repo_dictionary_tags(all_repos_tags, major_official_version):
"""
Analyze the dictionary containing all repos and keeps only the list of tags and versions matching the major version
@param all_repos_tags:
@param major_official_version:
"""
filtered_all_repos_tags = dict()
for repo, tag_list in all_repos_tags.items():
temp_filtered_tag_list = []
for tag in tag_list:
if major_official_version == tag[0:3]:
temp_filtered_tag_list.append(tag)
filtered_all_repos_tags[repo] = temp_filtered_tag_list
return filtered_all_repos_tags
def analyze_filtered_dict_return_final_dict(filtered_all_repos_tags, major_official_version):
"""
Analyze filtered dictionary and return the final dict with only one official version and one dev version
@param filtered_all_repos_tags:
@param major_official_version:
"""
final_official_version_dict = dict()
final_dev_version_dict = dict()
# Gluus main values.yaml
gluu_values_file = Path("../pygluu/kubernetes/templates/helm/gluu/values.yaml").resolve()
gluu_values_file_parser = Parser(gluu_values_file, True)
dev_version = ""
def update_dicts_and_yamls(name, rep, tags_list, helm_name=None):
final_tag, final_dev_tag = determine_final_official_and_dev_version(tags_list)
final_official_version_dict[name + "_IMAGE_NAME"] = "gluufederation/" + rep
final_dev_version_dict[name + "_IMAGE_NAME"] = "gluufederation/" + rep
final_official_version_dict[name + "_IMAGE_TAG"], final_dev_version_dict[name + "_IMAGE_TAG"] \
= final_tag, final_dev_tag
if rep != "upgrade":
if helm_name:
gluu_values_file_parser[helm_name]["image"]["repository"] = "gluufederation/" + rep
gluu_values_file_parser[helm_name]["image"]["tag"] = final_tag
else:
gluu_values_file_parser[rep]["image"]["repository"] = "gluufederation/" + rep
gluu_values_file_parser[rep]["image"]["tag"] = final_tag
for repo, tag_list in filtered_all_repos_tags.items():
official_version, dev_version = determine_final_official_and_dev_version(tag_list)
if repo == "casa":
update_dicts_and_yamls("CASA", repo, tag_list)
elif repo == "oxd-server":
update_dicts_and_yamls("OXD", repo, tag_list)
elif repo == "fido2":
update_dicts_and_yamls("FIDO2", repo, tag_list)
elif repo == "scim":
update_dicts_and_yamls("SCIM", repo, tag_list)
elif repo == "config-init":
update_dicts_and_yamls("CONFIG", repo, tag_list, "config")
elif repo == "cr-rotate":
update_dicts_and_yamls("CACHE_REFRESH_ROTATE", repo, tag_list)
elif repo == "certmanager":
update_dicts_and_yamls("CERT_MANAGER", repo, tag_list, "oxauth-key-rotation")
elif repo == "opendj":
update_dicts_and_yamls("LDAP", repo, tag_list, "opendj")
elif repo == "jackrabbit":
update_dicts_and_yamls("JACKRABBIT", repo, tag_list)
elif repo == "oxauth":
update_dicts_and_yamls("OXAUTH", repo, tag_list)
elif repo == "oxpassport":
update_dicts_and_yamls("OXPASSPORT", repo, tag_list)
elif repo == "oxshibboleth":
update_dicts_and_yamls("OXSHIBBOLETH", repo, tag_list)
elif repo == "oxtrust":
update_dicts_and_yamls("OXTRUST", repo, tag_list)
elif repo == "persistence":
update_dicts_and_yamls("PERSISTENCE", repo, tag_list)
elif repo == "upgrade":
update_dicts_and_yamls("UPGRADE", repo, tag_list)
gluu_versions_dict = {major_official_version: final_official_version_dict,
dev_version: final_dev_version_dict}
gluu_values_file_parser.dump_it()
return gluu_versions_dict
def main():
all_repos_tags = dict()
org = os.environ.get("ORG_NAME", "gluufederation")
gluu_docker_repositories_names_used_in_cn = ["casa", "fido2", "scim", "config-init",
"cr-rotate", "certmanager", "opendj", "jackrabbit", "oxauth",
"oxd-server", "oxpassport", "oxshibboleth",
"oxtrust", "persistence", "upgrade"]
for repo in gluu_docker_repositories_names_used_in_cn:
all_repos_tags.update(get_docker_repo_tag(org, repo))
major_official_version = str(determine_major_version(all_repos_tags))
filtered_all_repos_tags = filter_all_repo_dictionary_tags(all_repos_tags, major_official_version)
final_gluu_versions_dict = analyze_filtered_dict_return_final_dict(filtered_all_repos_tags, major_official_version)
update_json_file(final_gluu_versions_dict, '../pygluu/kubernetes/templates/gluu_versions.json')
if __name__ == '__main__':
main()
|
17447
|
import unittest
import os
import sys
import StringIO
path = os.path.abspath(os.path.dirname(__file__))
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '../lvsm')))
from lvsm.modules import keepalived
class Keepalived(unittest.TestCase):
"""Tests for the functionality of the keepalived module"""
def setUp(self):
args = {'keepalived-mib': 'KEEPALIVED-MIB',
'snmp_community': 'private',
'snmp_host': 'localhost',
'snmp_user': '',
'snmp_password': '',
'cache_dir': path + '/cache'
}
self.director = keepalived.Keepalived(path + '/scripts/ipvsadm3',
path + '/etc/keepalived.conf',
restart_cmd='',
nodes='',
args=args)
def test_show(self):
self.maxDiff = None
# Testing show on non-standard ports
expected_result = ['',
'Layer 4 Load balancing',
'======================',
'TCP 192.0.2.2:8888 rr ',
' -> 192.0.2.200:8888 Masq 1 0 0 ',
' -> 192.0.2.201:8888 Masq 1 0 0 ',
'',
'UDP 192.0.2.2:domain rr ',
' -> 192.0.2.202:domain Masq 1 0 0 ',
' -> 192.0.2.203:domain Masq 1 0 0 ',
'',
'']
self.assertEqual(self.director.show(numeric=False, color=False), expected_result)
if __name__ == "__main__":
unittest.main()
|
17460
|
import pytest
import re
import unittest
import metric_learn
import numpy as np
from sklearn import clone
from test.test_utils import ids_metric_learners, metric_learners, remove_y
from metric_learn.sklearn_shims import set_random_state, SKLEARN_AT_LEAST_0_22
def remove_spaces(s):
return re.sub(r'\s+', '', s)
def sk_repr_kwargs(def_kwargs, nndef_kwargs):
"""Given the non-default arguments, and the default
keywords arguments, build the string that will appear
in the __repr__ of the estimator, depending on the
version of scikit-learn.
"""
if SKLEARN_AT_LEAST_0_22:
def_kwargs = {}
def_kwargs.update(nndef_kwargs)
args_str = ",".join(f"{key}={repr(value)}"
for key, value in def_kwargs.items())
return args_str
class TestStringRepr(unittest.TestCase):
def test_covariance(self):
def_kwargs = {'preprocessor': None}
nndef_kwargs = {}
merged_kwargs = sk_repr_kwargs(def_kwargs, nndef_kwargs)
self.assertEqual(remove_spaces(str(metric_learn.Covariance())),
remove_spaces(f"Covariance({merged_kwargs})"))
def test_lmnn(self):
def_kwargs = {'convergence_tol': 0.001, 'init': 'auto', 'k': 3,
'learn_rate': 1e-07, 'max_iter': 1000, 'min_iter': 50,
'n_components': None, 'preprocessor': None,
'random_state': None, 'regularization': 0.5,
'verbose': False}
nndef_kwargs = {'convergence_tol': 0.01, 'k': 6}
merged_kwargs = sk_repr_kwargs(def_kwargs, nndef_kwargs)
self.assertEqual(
remove_spaces(str(metric_learn.LMNN(convergence_tol=0.01, k=6))),
remove_spaces(f"LMNN({merged_kwargs})"))
def test_nca(self):
def_kwargs = {'init': 'auto', 'max_iter': 100, 'n_components': None,
'preprocessor': None, 'random_state': None, 'tol': None,
'verbose': False}
nndef_kwargs = {'max_iter': 42}
merged_kwargs = sk_repr_kwargs(def_kwargs, nndef_kwargs)
self.assertEqual(remove_spaces(str(metric_learn.NCA(max_iter=42))),
remove_spaces(f"NCA({merged_kwargs})"))
def test_lfda(self):
def_kwargs = {'embedding_type': 'weighted', 'k': None,
'n_components': None, 'preprocessor': None}
nndef_kwargs = {'k': 2}
merged_kwargs = sk_repr_kwargs(def_kwargs, nndef_kwargs)
self.assertEqual(remove_spaces(str(metric_learn.LFDA(k=2))),
remove_spaces(f"LFDA({merged_kwargs})"))
def test_itml(self):
def_kwargs = {'convergence_threshold': 0.001, 'gamma': 1.0,
'max_iter': 1000, 'preprocessor': None,
'prior': 'identity', 'random_state': None, 'verbose': False}
nndef_kwargs = {'gamma': 0.5}
merged_kwargs = sk_repr_kwargs(def_kwargs, nndef_kwargs)
self.assertEqual(remove_spaces(str(metric_learn.ITML(gamma=0.5))),
remove_spaces(f"ITML({merged_kwargs})"))
def_kwargs = {'convergence_threshold': 0.001, 'gamma': 1.0,
'max_iter': 1000, 'num_constraints': None,
'preprocessor': None, 'prior': 'identity',
'random_state': None, 'verbose': False}
nndef_kwargs = {'num_constraints': 7}
merged_kwargs = sk_repr_kwargs(def_kwargs, nndef_kwargs)
self.assertEqual(
remove_spaces(str(metric_learn.ITML_Supervised(num_constraints=7))),
remove_spaces(f"ITML_Supervised({merged_kwargs})"))
def test_lsml(self):
def_kwargs = {'max_iter': 1000, 'preprocessor': None, 'prior': 'identity',
'random_state': None, 'tol': 0.001, 'verbose': False}
nndef_kwargs = {'tol': 0.1}
merged_kwargs = sk_repr_kwargs(def_kwargs, nndef_kwargs)
self.assertEqual(remove_spaces(str(metric_learn.LSML(tol=0.1))),
remove_spaces(f"LSML({merged_kwargs})"))
def_kwargs = {'max_iter': 1000, 'num_constraints': None,
'preprocessor': None, 'prior': 'identity',
'random_state': None, 'tol': 0.001, 'verbose': False,
'weights': None}
nndef_kwargs = {'verbose': True}
merged_kwargs = sk_repr_kwargs(def_kwargs, nndef_kwargs)
self.assertEqual(
remove_spaces(str(metric_learn.LSML_Supervised(verbose=True))),
remove_spaces(f"LSML_Supervised({merged_kwargs})"))
def test_sdml(self):
def_kwargs = {'balance_param': 0.5, 'preprocessor': None,
'prior': 'identity', 'random_state': None,
'sparsity_param': 0.01, 'verbose': False}
nndef_kwargs = {'verbose': True}
merged_kwargs = sk_repr_kwargs(def_kwargs, nndef_kwargs)
self.assertEqual(remove_spaces(str(metric_learn.SDML(verbose=True))),
remove_spaces(f"SDML({merged_kwargs})"))
def_kwargs = {'balance_param': 0.5, 'num_constraints': None,
'preprocessor': None, 'prior': 'identity',
'random_state': None, 'sparsity_param': 0.01,
'verbose': False}
nndef_kwargs = {'sparsity_param': 0.5}
merged_kwargs = sk_repr_kwargs(def_kwargs, nndef_kwargs)
self.assertEqual(
remove_spaces(str(metric_learn.SDML_Supervised(sparsity_param=0.5))),
remove_spaces(f"SDML_Supervised({merged_kwargs})"))
def test_rca(self):
def_kwargs = {'n_components': None, 'preprocessor': None}
nndef_kwargs = {'n_components': 3}
merged_kwargs = sk_repr_kwargs(def_kwargs, nndef_kwargs)
self.assertEqual(remove_spaces(str(metric_learn.RCA(n_components=3))),
remove_spaces(f"RCA({merged_kwargs})"))
def_kwargs = {'chunk_size': 2, 'n_components': None, 'num_chunks': 100,
'preprocessor': None, 'random_state': None}
nndef_kwargs = {'num_chunks': 5}
merged_kwargs = sk_repr_kwargs(def_kwargs, nndef_kwargs)
self.assertEqual(
remove_spaces(str(metric_learn.RCA_Supervised(num_chunks=5))),
remove_spaces(f"RCA_Supervised({merged_kwargs})"))
def test_mlkr(self):
def_kwargs = {'init': 'auto', 'max_iter': 1000,
'n_components': None, 'preprocessor': None,
'random_state': None, 'tol': None, 'verbose': False}
nndef_kwargs = {'max_iter': 777}
merged_kwargs = sk_repr_kwargs(def_kwargs, nndef_kwargs)
self.assertEqual(remove_spaces(str(metric_learn.MLKR(max_iter=777))),
remove_spaces(f"MLKR({merged_kwargs})"))
def test_mmc(self):
def_kwargs = {'convergence_threshold': 0.001, 'diagonal': False,
'diagonal_c': 1.0, 'init': 'identity', 'max_iter': 100,
'max_proj': 10000, 'preprocessor': None,
'random_state': None, 'verbose': False}
nndef_kwargs = {'diagonal': True}
merged_kwargs = sk_repr_kwargs(def_kwargs, nndef_kwargs)
self.assertEqual(remove_spaces(str(metric_learn.MMC(diagonal=True))),
remove_spaces(f"MMC({merged_kwargs})"))
def_kwargs = {'convergence_threshold': 1e-06, 'diagonal': False,
'diagonal_c': 1.0, 'init': 'identity', 'max_iter': 100,
'max_proj': 10000, 'num_constraints': None,
'preprocessor': None, 'random_state': None,
'verbose': False}
nndef_kwargs = {'max_iter': 1}
merged_kwargs = sk_repr_kwargs(def_kwargs, nndef_kwargs)
self.assertEqual(
remove_spaces(str(metric_learn.MMC_Supervised(max_iter=1))),
remove_spaces(f"MMC_Supervised({merged_kwargs})"))
@pytest.mark.parametrize('estimator, build_dataset', metric_learners,
ids=ids_metric_learners)
def test_get_metric_is_independent_from_metric_learner(estimator,
build_dataset):
"""Tests that the get_metric method returns a function that is independent
from the original metric learner"""
input_data, labels, _, X = build_dataset()
model = clone(estimator)
set_random_state(model)
# we fit the metric learner on it and then we compute the metric on some
# points
model.fit(*remove_y(model, input_data, labels))
metric = model.get_metric()
score = metric(X[0], X[1])
# then we refit the estimator on another dataset
model.fit(*remove_y(model, np.sin(input_data), labels))
# we recompute the distance between the two points: it should be the same
score_bis = metric(X[0], X[1])
assert score_bis == score
@pytest.mark.parametrize('estimator, build_dataset', metric_learners,
ids=ids_metric_learners)
def test_get_metric_raises_error(estimator, build_dataset):
"""Tests that the metric returned by get_metric raises errors similar to
the distance functions in scipy.spatial.distance"""
input_data, labels, _, X = build_dataset()
model = clone(estimator)
set_random_state(model)
model.fit(*remove_y(model, input_data, labels))
metric = model.get_metric()
list_test_get_metric_raises = [(X[0].tolist() + [5.2], X[1]), # vectors with
# different dimensions
(X[0:4], X[1:5]), # 2D vectors
(X[0].tolist() + [5.2], X[1] + [7.2])]
# vectors of same dimension but incompatible with what the metric learner
# was trained on
for u, v in list_test_get_metric_raises:
with pytest.raises(ValueError):
metric(u, v)
@pytest.mark.parametrize('estimator, build_dataset', metric_learners,
ids=ids_metric_learners)
def test_get_metric_works_does_not_raise(estimator, build_dataset):
"""Tests that the metric returned by get_metric does not raise errors (or
warnings) similarly to the distance functions in scipy.spatial.distance"""
input_data, labels, _, X = build_dataset()
model = clone(estimator)
set_random_state(model)
model.fit(*remove_y(model, input_data, labels))
metric = model.get_metric()
list_test_get_metric_doesnt_raise = [(X[0], X[1]),
(X[0].tolist(), X[1].tolist()),
(X[0][None], X[1][None])]
for u, v in list_test_get_metric_doesnt_raise:
with pytest.warns(None) as record:
metric(u, v)
assert len(record) == 0
# Test that the scalar case works
model.components_ = np.array([3.1])
metric = model.get_metric()
for u, v in [(5, 6.7), ([5], [6.7]), ([[5]], [[6.7]])]:
with pytest.warns(None) as record:
metric(u, v)
assert len(record) == 0
@pytest.mark.parametrize('estimator, build_dataset', metric_learners,
ids=ids_metric_learners)
def test_n_components(estimator, build_dataset):
"""Check that estimators that have a n_components parameters can use it
and that it actually works as expected"""
input_data, labels, _, X = build_dataset()
model = clone(estimator)
if hasattr(model, 'n_components'):
set_random_state(model)
model.set_params(n_components=None)
model.fit(*remove_y(model, input_data, labels))
assert model.components_.shape == (X.shape[1], X.shape[1])
model = clone(estimator)
set_random_state(model)
model.set_params(n_components=X.shape[1] - 1)
model.fit(*remove_y(model, input_data, labels))
assert model.components_.shape == (X.shape[1] - 1, X.shape[1])
model = clone(estimator)
set_random_state(model)
model.set_params(n_components=X.shape[1] + 1)
with pytest.raises(ValueError) as expected_err:
model.fit(*remove_y(model, input_data, labels))
assert (str(expected_err.value) ==
'Invalid n_components, must be in [1, {}]'.format(X.shape[1]))
model = clone(estimator)
set_random_state(model)
model.set_params(n_components=0)
with pytest.raises(ValueError) as expected_err:
model.fit(*remove_y(model, input_data, labels))
assert (str(expected_err.value) ==
'Invalid n_components, must be in [1, {}]'.format(X.shape[1]))
if __name__ == '__main__':
unittest.main()
|
17472
|
from collections import namedtuple
import pytest
from nesta.packages.examples.example_package import some_func
@pytest.fixture
def mocked_row():
def _mocked_row(*, id, name):
Row = namedtuple('Row', ['id', 'name'])
return Row(id=id, name=name)
return _mocked_row
class TestSomeFunc:
def test_some_func_returns_true_when_start_string_in_name(self, mocked_row):
mocked_row = mocked_row(id=1, name='cat')
assert some_func('cat', mocked_row) == {'my_id': 1, 'data': True}
def test_some_func_returns_false_when_start_string_not_in_name(self, mocked_row):
mocked_row = mocked_row(id=2, name='cat')
assert some_func('dog', mocked_row) == {'my_id': 2, 'data': False}
def test_some_func_returns_false_when_name_is_none(self, mocked_row):
mocked_row = mocked_row(id=3, name=None)
assert some_func('cat', mocked_row) == {'my_id': 3, 'data': False}
|
17486
|
from .db import db
from .userfollower import UserFollower
from werkzeug.security import generate_password_hash, check_password_hash
from flask_login import UserMixin
from sqlalchemy import Table, Column, Integer, ForeignKey, or_
from .directmessage import DirectMessage
from .userequipment import UserEquipment
from .equipment import Equipment
from .message import Message
from .messagereceiver import MessageReceiver
from sqlalchemy.orm import validates
class User(db.Model, UserMixin):
__tablename__ = 'Users'
id = db.Column(db.Integer, primary_key = True)
username = db.Column(db.String(40), nullable = False, unique = True)
name = db.Column(db.String(100), nullable=True)
email = db.Column(db.String(255), nullable = False, unique = True)
hashed_password = db.Column(db.String(255), nullable = False)
bio = db.Column(db.Text, nullable=True)
websiteUrl = db.Column(db.Text, nullable=False, default="www.google.com")
userType = db.Column(db.Integer, nullable=True, default=0)
profilePicUrl = db.Column(db.Text, nullable=True)
createdAt = db.Column(db.DateTime(timezone=True), server_default=db.func.now()) #func.sysdate())
updatedAt = db.Column(db.DateTime(timezone=True), server_default=db.func.now(), server_onupdate=db.func.now())
ownPosts = db.relationship('Post', foreign_keys='Post.userId')
ownComments = db.relationship('Comment', foreign_keys='Comment.userId')
taggedInPosts = db.relationship('Post', secondary='taggedusers')
likedPosts = db.relationship('Post', secondary='likedposts')
savedPosts = db.relationship('Post', secondary='savedposts')
sentMessages = db.relationship('DirectMessage', foreign_keys='DirectMessage.senderId')
receivedMessages = db.relationship('DirectMessage', foreign_keys='DirectMessage.receiverId')
likedComments = db.relationship('Comment', secondary='commentlikes')
taggedInComments = db.relationship('Comment', secondary='commenttaggedusers')
followers = [] #db.relationship('User', secondary='userfollowers', foreign_keys='UserFollower.followerId')
following = [] #db.relationship('User', secondary='userfollowers', foreign_keys='UserFollower.userId')
allMessages = []
# equipmentList = []
equipmentList = db.relationship('Equipment', secondary="UserEquipments")
# @validates('username', 'email')
# def convert_lower(self, key, value):
# return value.lower()
@property
def password(self):
return self.hashed_password
@password.setter
def password(self, password):
self.hashed_password = generate_password_hash(password)
def check_password(self, password):
return check_password_hash(self.password, password)
def get_followers(self):
ufs = UserFollower.query.filter(UserFollower.userId == self.id).all()
self.followers = [uf.follower for uf in ufs]
def get_following(self):
ufs = UserFollower.query.filter(UserFollower.followerId == self.id).all()
self.following = [uf.person for uf in ufs]
def get_messages(self):
msgs = DirectMessage.query\
.filter(or_(DirectMessage.senderId == self.id, \
DirectMessage.receiverId == self.id)).order_by(DirectMessage.id).all()
self.allMessages = msgs
def get_conversations(self):
convos = MessageReceiver.query\
.filter(or_(MessageReceiver.senderId == self.id, \
MessageReceiver.receiverId == self.id)).order_by(MessageReceiver.id).all()
uniqueConvos = []
if len(convos):
messageIdSet = set()
for convo in convos:
if convo.senderId != self.id:
uniqueConvos.append(convo)
else:
if convo.messageId not in messageIdSet:
uniqueConvos.append(convo)
messageIdSet.add(convo.messageId)
self.allMessages = uniqueConvos
def get_last_conversation(self):
convo = MessageReceiver.query\
.filter(or_(MessageReceiver.senderId == self.id, \
MessageReceiver.receiverId == self.id)).order_by(-MessageReceiver.id).first()
self.allMessages = [convo]
def to_dict(self):
return {
"id": self.id,
"name": self.name,
"username": self.username,
"email": self.email,
"bio": self.bio,
"websiteUrl": self.websiteUrl,
"profilePicUrl": self.profilePicUrl,
}
def to_dict_with_posts_and_follows(self):
self.get_followers()
self.get_following()
return {
"id": self.id,
"name": self.name,
"username": self.username,
"email": self.email,
"bio": self.bio,
"websiteUrl": self.websiteUrl,
"profilePicUrl": self.profilePicUrl,
"followers": [user.to_dict() for user in self.followers],
"following": [user.to_dict() for user in self.following],
"ownPosts": [post.to_dict() for post in self.ownPosts],
"equipmentList": [equipment.to_dict() for equipment in self.equipmentList],
}
def to_dict_with_posts(self):
return {
"id": self.id,
"name": self.name,
"username": self.username,
"email": self.email,
"bio": self.bio,
"websiteUrl": self.websiteUrl,
"profilePicUrl": self.profilePicUrl,
"ownPosts": [post.to_dict() for post in self.ownPosts],
}
def to_dict_with_posts_fast(self):
user_as_dict_basic = {
"id": self.id,
"name": self.name,
"username": self.username,
"email": self.email,
"bio": self.bio,
"websiteUrl": self.websiteUrl,
"profilePicUrl": self.profilePicUrl,
}
user_as_dict = user_as_dict_basic.copy()
user_as_dict["ownPosts"] = [post.to_dict_fast_own_user(user_as_dict_basic) for post in self.ownPosts]
return user_as_dict
# "ownPosts": [post.to_dict_fast() for post in self.ownPosts],
def to_dict_feed(self):
self.get_following()
return {
"followingIds": [int(follow.id) for follow in self.following]
}
def to_dict_for_mentions(self):
return {
"id": self.id,
"displayName": self.name,
"name": self.username,
"profilePicUrl": self.profilePicUrl,
}
def to_dict_no_posts(self):
#no posts so if a post has this user, there is no infinite circular references
return {
"id": self.id,
"username": self.username,
"email": self.email,
"bio": self.bio,
"websiteUrl": self.websiteUrl,
"profilePicUrl": self.profilePicUrl,
}
def to_dict_for_self(self):
self.get_followers()
self.get_following()
# self.get_messages()
self.get_conversations()
return {
"id": self.id,
"username": self.username,
"name": self.name,
"email": self.email,
"bio": self.bio,
"websiteUrl": self.websiteUrl,
"profilePicUrl": self.profilePicUrl,
"userType": self.userType,
"ownPosts": [post.to_dict() for post in self.ownPosts],
"likedPosts": [post.to_dict() for post in self.likedPosts],
"savedPosts": [post.to_dict() for post in self.savedPosts],
"taggedInPosts": [post.to_dict() for post in self.taggedInPosts],
"messages": [m.to_dict() for m in self.allMessages], #[sentMsg.to_dict() for sentMsg in self.sentMessages] + [recvdMsg.to_dict() for recvdMsg in self.receivedMessages],
"followers": [user.to_dict() for user in self.followers],
"following": [user.to_dict() for user in self.following],
"likedComments": [comment.to_dict() for comment in self.likedComments],
"taggedInComments": [comment.to_dict() for comment in self.taggedInComments],
"equipmentList": [equipment.to_dict() for equipment in self.equipmentList],
}
def to_dict_as_generic_profile(self):
'''
compared to "for_self" this does not include:
- messages
and more later
'''
self.get_followers()
self.get_following()
return {
"id": self.id,
"username": self.username,
"name": self.name,
"email": self.email,
"bio": self.bio,
"websiteUrl": self.websiteUrl,
"profilePicUrl": self.profilePicUrl,
"ownPosts": [post.to_dict() for post in self.ownPosts],
"likedPosts": [post.to_dict() for post in self.likedPosts],
"savedPosts": [post.to_dict() for post in self.savedPosts],
"taggedInPosts": [post.to_dict() for post in self.taggedInPosts],
"followers": [user.to_dict() for user in self.followers],
"following": [user.to_dict() for user in self.following],
"likedComments": [comment.to_dict() for comment in self.likedComments],
"taggedInComments": [comment.to_dict() for comment in self.taggedInComments],
"equipmentList": [equipment.to_dict() for equipment in self.equipmentList],
}
'''
mapper(
User, t_users,
properties={
'followers': relation(
User,
secondary=t_follows,
primaryjoin=(t_follows.c.followee_id==t_users.c.id),
secondaryjoin=(t_follows.c.follower_id==t_users.c.id),
),
'followees': relation(
User,
secondary=t_follows,
primaryjoin=(t_follows.c.follower_id==t_users.c.id),
secondaryjoin=(t_follows.c.followee_id==t_users.c.id),
),
},
)
'''
|
17499
|
import math
import time
from compas_fab.backends import RosClient
from compas.artists import Artist
from compas.geometry import Frame
with RosClient("localhost") as client:
robot = client.load_robot(load_geometry=True)
group = robot.main_group_name
frame = Frame((0.4, 0.3, 0.05), (-1, 0, 0), (0, 1, 0))
tolerance_position = 0.001
tolerance_axes = [math.radians(1)] * 3
start_configuration = robot.zero_configuration()
start_configuration.joint_values = (-0.106, 5.351, 2.231, -2.869, 4.712, 1.465)
# create goal constraints from frame
goal_constraints = robot.constraints_from_frame(frame, tolerance_position, tolerance_axes, group)
trajectory = robot.plan_motion(goal_constraints, start_configuration, group, options=dict(planner_id="RRT"))
print("Computed kinematic path with %d configurations." % len(trajectory.points))
print("Executing this path at full speed would take approx. %.3f seconds." % trajectory.time_from_start)
artist = Artist(robot.model)
for tp in trajectory.points:
config = robot.zero_configuration()
config.joint_values = tp.joint_values
artist.update(config)
artist.draw_visual()
artist.redraw()
time.sleep(0.02)
|
17502
|
import PIL
import numpy as np
def to_grayscale(img):
return np.dot(img, [0.299, 0.587, 0.144])
def zero_center(img):
return img - 127.0
def crop(img, bottom=12, left=6, right=6):
height, width = img.shape
return img[0: height - bottom, left: width - right]
def save(img, path):
pil_img = PIL.Image.fromarray(img)
pil_img.save(path)
|
17540
|
from abc import ABC as Contract, abstractmethod
class AuthContract(Contract):
@abstractmethod
def user(self):
pass
@abstractmethod
def save(self):
pass
@abstractmethod
def delete(self):
pass
|
17580
|
import tensorflow as tf
from detection.utils.misc import *
class PyramidROIAlign(tf.keras.layers.Layer):
def __init__(self, pool_shape, **kwargs):
'''
Implements ROI Pooling on multiple levels of the feature pyramid.
Attributes
---
pool_shape: (height, width) of the output pooled regions.
Example: (7, 7)
'''
super(PyramidROIAlign, self).__init__(**kwargs)
self.pool_shape = tuple(pool_shape)
def call(self, inputs, training=True):
'''
Args
---
rois_list: list of [num_rois, (y1, x1, y2, x2)] in normalized coordinates.
feature_map_list: List of [batch, height, width, channels].
feature maps from different levels of the pyramid.
img_metas: [batch_size, 11]
Returns
---
pooled_rois_list: list of [num_rois, pooled_height, pooled_width, channels].
The width and height are those specific in the pool_shape in the layer
constructor.
'''
rois_list, feature_map_list, img_metas = inputs # [2000 ,4], list:[P2, P3, P4, P5]
pad_shapes = calc_pad_shapes(img_metas)
pad_areas = pad_shapes[:, 0] * pad_shapes[:, 1] # 1216*1216
num_rois_list = [rois.shape.as_list()[0] for rois in rois_list] # data:[2000]
roi_indices = tf.constant(
[i for i in range(len(rois_list)) for _ in range(rois_list[i].shape.as_list()[0])],
dtype=tf.int32
) #[0.....], shape:[2000]
areas = tf.constant(# range(1) range(2000)
[pad_areas[i] for i in range(pad_areas.shape[0]) for _ in range(num_rois_list[i])],
dtype=tf.float32
)#[1216*1216, 1216*1216,...], shape:[2000]
rois = tf.concat(rois_list, axis=0) # [2000, 4]
# Assign each ROI to a level in the pyramid based on the ROI area.
y1, x1, y2, x2 = tf.split(rois, 4, axis=1) # 4 of [2000, 1]
h = y2 - y1 # [2000, 1]
w = x2 - x1 # [2000, 1]
# Equation 1 in the Feature Pyramid Networks paper. Account for
# the fact that our coordinates are normalized here.
# e.g. a 224x224 ROI (in pixels) maps to P4
roi_level = tf.math.log( # [2000]
tf.sqrt(tf.squeeze(h * w, 1))
/ tf.cast((224.0 / tf.sqrt(areas * 1.0)), tf.float32)
) / tf.math.log(2.0)
roi_level = tf.minimum(5, tf.maximum( # [2000], clamp to [2-5]
2, 4 + tf.cast(tf.round(roi_level), tf.int32)))
# roi_level will indicates which level of feature to use
# Loop through levels and apply ROI pooling to each. P2 to P5.
pooled_rois = []
roi_to_level = []
for i, level in enumerate(range(2, 6)): # 2,3,4,5
ix = tf.where(tf.equal(roi_level, level)) # [1999, 1], means 1999 of 2000 select P2
level_rois = tf.gather_nd(rois, ix) # boxes to crop, [1999, 4]
# ROI indices for crop_and_resize.
level_roi_indices = tf.gather_nd(roi_indices, ix) # [19999], data:[0....0]
# Keep track of which roi is mapped to which level
roi_to_level.append(ix)
# Stop gradient propogation to ROI proposals
level_rois = tf.stop_gradient(level_rois)
level_roi_indices = tf.stop_gradient(level_roi_indices)
# Crop and Resize
# From Mask R-CNN paper: "We sample four regular locations, so
# that we can evaluate either max or average pooling. In fact,
# interpolating only a single value at each bin center (without
# pooling) is nearly as effective."
#
# Here we use the simplified approach of a single value per bin,
# which is how it's done in tf.crop_and_resize()
# Result: [batch * num_rois, pool_height, pool_width, channels]
pooled_rois.append(tf.image.crop_and_resize(
feature_map_list[i], level_rois, level_roi_indices, self.pool_shape,
method="bilinear")) # [1, 304, 304, 256], [1999, 4], [1999], [2]=[7,7]=>[1999,7,7,256]
# [1999, 7, 7, 256], [], [], [1,7,7,256] => [2000, 7, 7, 256]
# Pack pooled features into one tensor
pooled_rois = tf.concat(pooled_rois, axis=0)
# Pack roi_to_level mapping into one array and add another
# column representing the order of pooled rois
roi_to_level = tf.concat(roi_to_level, axis=0) # [2000, 1], 1999 of P2, and 1 other P
roi_range = tf.expand_dims(tf.range(tf.shape(roi_to_level)[0]), 1) # [2000, 1], 0~1999
roi_to_level = tf.concat([tf.cast(roi_to_level, tf.int32), roi_range],
axis=1) # [2000, 2], (P, range)
# Rearrange pooled features to match the order of the original rois
# Sort roi_to_level by batch then roi indextf.Tensor([ 0 100001 200002 ... 199801997 199901998 20101999], shape=(2000,), dtype=int32)
# TF doesn't have a way to sort by two columns, so merge them and sort.
sorting_tensor = roi_to_level[:, 0] * 100000 + roi_to_level[:, 1]
ix = tf.nn.top_k(sorting_tensor, k=tf.shape( # k=2000
roi_to_level)[0]).indices[::-1]# reverse the order
ix = tf.gather(roi_to_level[:, 1], ix) # [2000]
pooled_rois = tf.gather(pooled_rois, ix) # [2000, 7, 7, 256]
# 2000 of [7, 7, 256]
pooled_rois_list = tf.split(pooled_rois, num_rois_list, axis=0)
return pooled_rois_list
|
17590
|
import argparse
import os.path as osp
from glob import glob
import cv2
import pandas as pd
from tqdm import tqdm
from gwd.converters import kaggle2coco
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--image-pattern", default="/data/SPIKE_images/*jpg")
parser.add_argument("--annotation-root", default="/data/SPIKE_annotations")
parser.add_argument("--kaggle_output_path", default="/data/spike.csv")
parser.add_argument("--coco_output_path", default="/data/coco_spike.json")
return parser.parse_args()
def main():
args = parse_args()
img_paths = glob(args.image_pattern)
annotations = []
for img_path in tqdm(img_paths):
ann_path = osp.join(args.annotation_root, (osp.basename(img_path.replace("jpg", "bboxes.tsv"))))
ann = pd.read_csv(ann_path, sep="\t", names=["x_min", "y_min", "x_max", "y_max"])
h, w = cv2.imread(img_path).shape[:2]
ann[["x_min", "x_max"]] = ann[["x_min", "x_max"]].clip(0, w)
ann[["y_min", "y_max"]] = ann[["y_min", "y_max"]].clip(0, h)
ann["height"] = h
ann["width"] = w
ann["bbox_width"] = ann["x_max"] - ann["x_min"]
ann["bbox_height"] = ann["y_max"] - ann["y_min"]
ann = ann[(ann["bbox_width"] > 0) & (ann["bbox_height"] > 0)].copy()
ann["bbox"] = ann[["x_min", "y_min", "bbox_width", "bbox_height"]].values.tolist()
ann["image_id"] = osp.basename(img_path).split(".")[0]
annotations.append(ann)
annotations = pd.concat(annotations)
annotations["source"] = "spike"
print(annotations.head())
annotations[["image_id", "source", "width", "height", "bbox"]].to_csv(args.kaggle_output_path, index=False)
kaggle2coco.main(args.kaggle_output_path, args.coco_output_path)
if __name__ == "__main__":
main()
|
17617
|
import trio
import os
import json
from itertools import count
# Experiment with generating Chrome Event Trace format, which can be browsed
# through chrome://tracing or other mechanisms.
#
# Screenshot: https://files.gitter.im/python-trio/general/fp6w/image.png
#
# Trace format docs: https://docs.google.com/document/d/1CvAClvFfyA5R-PhYUmn5OOQtYMH4h6I0nSsKchNAySU/preview#
#
# Things learned so far:
# - I don't understand how the ph="s"/ph="f" flow events work – I think
# they're supposed to show up as arrows, and I'm emitting them between tasks
# that wake each other up, but they're not showing up.
# - I think writing out json synchronously from each event is creating gaps in
# the trace; maybe better to batch them up to write up all at once at the
# end
# - including tracebacks would be cool
# - there doesn't seem to be any good way to group together tasks based on
# nurseries. this really limits the value of this particular trace
# format+viewer for us. (also maybe we should have an instrumentation event
# when a nursery is opened/closed?)
# - task._counter should maybe be public
# - I don't know how to best show task lifetime, scheduling times, and what
# the task is actually doing on the same plot. if we want to show particular
# events like "called stream.send_all", then the chrome trace format won't
# let us also show "task is running", because neither kind of event is
# strictly nested inside the other
class Trace(trio.abc.Instrument):
def __init__(self, out):
self.out = out
self.out.write("[\n")
self.ids = count()
self._task_metadata(-1, "I/O manager")
def _write(self, **ev):
ev.setdefault("pid", os.getpid())
if ev["ph"] != "M":
ev.setdefault("ts", trio.current_time() * 1e6)
self.out.write(json.dumps(ev))
self.out.write(",\n")
def _task_metadata(self, tid, name):
self._write(
name="thread_name",
ph="M",
tid=tid,
args={"name": name},
)
self._write(
name="thread_sort_index",
ph="M",
tid=tid,
args={"sort_index": tid},
)
def task_spawned(self, task):
self._task_metadata(task._counter, task.name)
self._write(
name="task lifetime",
ph="B",
tid=task._counter,
)
def task_exited(self, task):
self._write(
name="task lifetime",
ph="E",
tid=task._counter,
)
def before_task_step(self, task):
self._write(
name="running",
ph="B",
tid=task._counter,
)
def after_task_step(self, task):
self._write(
name="running",
ph="E",
tid=task._counter,
)
def task_scheduled(self, task):
try:
waker = trio.lowlevel.current_task()
except RuntimeError:
pass
else:
id = next(self.ids)
self._write(
ph="s",
cat="wakeup",
id=id,
tid=waker._counter,
)
self._write(
cat="wakeup",
ph="f",
id=id,
tid=task._counter,
)
def before_io_wait(self, timeout):
self._write(
name=f"I/O wait",
ph="B",
tid=-1,
)
def after_io_wait(self, timeout):
self._write(
name=f"I/O wait",
ph="E",
tid=-1,
)
async def child1():
print(" child1: started! sleeping now...")
await trio.sleep(1)
print(" child1: exiting!")
async def child2():
print(" child2: started! sleeping now...")
await trio.sleep(1)
print(" child2: exiting!")
async def parent():
print("parent: started!")
async with trio.open_nursery() as nursery:
print("parent: spawning child1...")
nursery.start_soon(child1)
print("parent: spawning child2...")
nursery.start_soon(child2)
print("parent: waiting for children to finish...")
# -- we exit the nursery block here --
print("parent: all done!")
t = Trace(open("/tmp/t.json", "w"))
trio.run(parent, instruments=[t])
|
17625
|
from __future__ import print_function
import tensorflow as tf
import numpy as np
from collections import namedtuple, OrderedDict
from subprocess import call
import scipy.io.wavfile as wavfile
import argparse
import codecs
import timeit
import struct
import toml
import re
import sys
import os
def _int64_feature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def _bytes_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def slice_signal(signal, window_size, stride=0.5):
""" Return windows of the given signal by sweeping in stride fractions
of window
"""
assert signal.ndim == 1, signal.ndim
n_samples = signal.shape[0]
offset = int(window_size * stride)
slices = []
for beg_i, end_i in zip(range(0, n_samples, offset),
range(window_size, n_samples + offset,
offset)):
if end_i - beg_i < window_size:
break
slice_ = signal[beg_i:end_i]
if slice_.shape[0] == window_size:
slices.append(slice_)
return np.array(slices, dtype=np.int32)
def read_and_slice(filename, wav_canvas_size, stride=0.5):
fm, wav_data = wavfile.read(filename)
if fm != 16000:
raise ValueError('Sampling rate is expected to be 16kHz!')
signals = slice_signal(wav_data, wav_canvas_size, stride)
return signals
def encoder_proc(wav_filename, noisy_path, out_file, wav_canvas_size, baseline_dir=None):
""" Read and slice the wav and noisy files and write to TFRecords.
out_file: TFRecordWriter.
"""
ppath, wav_fullname = os.path.split(wav_filename)
noisy_filename = os.path.join(noisy_path, wav_fullname)
wav_signals = read_and_slice(wav_filename, wav_canvas_size)
noisy_signals = read_and_slice(noisy_filename, wav_canvas_size)
if not baseline_dir is None:
baseline_filename = os.path.join(baseline_dir, wav_fullname)
baseline_signals = read_and_slice(baseline_filename, wav_canvas_size)
assert wav_signals.shape == noisy_signals.shape, noisy_signals.shape
if baseline_dir is None:
for (wav, noisy) in zip(wav_signals, noisy_signals):
wav_raw = wav.tostring()
noisy_raw = noisy.tostring()
example = tf.train.Example(features=tf.train.Features(feature={
'wav_raw': _bytes_feature(wav_raw),
'noisy_raw': _bytes_feature(noisy_raw)}))
out_file.write(example.SerializeToString())
else:
for (wav, noisy, base) in zip(wav_signals, noisy_signals, baseline_signals):
wav_raw = wav.tostring()
noisy_raw = noisy.tostring()
baseline_raw = base.tostring()
example = tf.train.Example(features=tf.train.Features(feature={
'wav_raw': _bytes_feature(wav_raw),
'noisy_raw': _bytes_feature(noisy_raw),
'baseline_raw': _bytes_feature(baseline_raw)
}))
out_file.write(example.SerializeToString())
def main(opts):
if not os.path.exists(opts.save_path):
# make save path if it does not exist
os.makedirs(opts.save_path)
# set up the output filepath
out_filepath = os.path.join(opts.save_path, opts.out_file)
if os.path.splitext(out_filepath)[1] != '.tfrecords':
# if wrong extension or no extension appended, put .tfrecords
out_filepath += '.tfrecords'
else:
out_filename, ext = os.path.splitext(out_filepath)
out_filepath = out_filename + ext
# check if out_file exists and if force flag is set
if os.path.exists(out_filepath) and not opts.force_gen:
raise ValueError('ERROR: {} already exists. Set force flag (--force-gen) to '
'overwrite. Skipping this speaker.'.format(out_filepath))
elif os.path.exists(out_filepath) and opts.force_gen:
print('Will overwrite previously existing tfrecords')
os.unlink(out_filepath)
with open(opts.cfg) as cfh:
# read the configuration description
cfg_desc = toml.loads(cfh.read())
beg_enc_t = timeit.default_timer()
out_file = tf.python_io.TFRecordWriter(out_filepath)
# process the acoustic and textual data now
for dset_i, (dset, dset_desc) in enumerate(cfg_desc.iteritems()):
print('-' * 50)
wav_dir = dset_desc['clean']
wav_files = [os.path.join(wav_dir, wav) for wav in
os.listdir(wav_dir) if wav.endswith('.wav')]
noisy_dir = dset_desc['noisy']
baseline_dir = None
if 'baseline' in dset_desc.keys():
baseline_dir = dset_desc['baseline']
nfiles = len(wav_files)
for m, wav_file in enumerate(wav_files):
print('Processing wav file {}/{} {}{}'.format(m + 1,
nfiles,
wav_file,
' ' * 10),
end='\r')
sys.stdout.flush()
encoder_proc(wav_file, noisy_dir, out_file, 2 ** 14, baseline_dir)
out_file.close()
end_enc_t = timeit.default_timer() - beg_enc_t
print('')
print('*' * 50)
print('Total processing and writing time: {} s'.format(end_enc_t))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Convert the set of txt and '
'wavs to TFRecords')
parser.add_argument('--cfg', type=str, default='cfg/e2e_maker.cfg',
help='File containing the description of datasets '
'to extract the info to make the TFRecords.')
parser.add_argument('--save_path', type=str, default='data/',
help='Path to save the dataset')
parser.add_argument('--out_file', type=str, default='segan.tfrecords',
help='Output filename')
parser.add_argument('--force-gen', dest='force_gen', action='store_true',
help='Flag to force overwriting existing dataset.')
parser.set_defaults(force_gen=False)
opts = parser.parse_args()
main(opts)
|
17627
|
import xNormal
xNormal.run("piano_high.obj", "piano_low.obj", "piano.png", width=256, height=256, gen_normals = True, gen_ao = True)
|
17631
|
from copy import deepcopy
from typing import Tuple
import jax.numpy as jnp
from jax.scipy.linalg import cho_factor, cho_solve
from multipledispatch import dispatch
from .types import Array
def I(n: int) -> Array:
"""
Compute an n x n identity matrix.
:param n: The size of of the matrix.
:return: An n x n identity matrix.
"""
return jnp.eye(n)
def concat_dictionaries(a: dict, b: dict) -> dict:
"""
Append one dictionary below another. If duplicate keys exist, then the key-value pair of the second supplied
dictionary will be used.
"""
return {**a, **b}
def merge_dictionaries(base_dict: dict, in_dict: dict) -> dict:
"""
This will return a complete dictionary based on the keys of the first matrix. If the same key should exist in the
second matrix, then the key-value pair from the first dictionary will be overwritten. The purpose of this is that
the base_dict will be a complete dictionary of values such that an incomplete second dictionary can be used to
update specific key-value pairs.
:param base_dict: Complete dictionary of key-value pairs.
:param in_dict: Subset of key-values pairs such that values from this dictionary will take precedent.
:return: A merged single dictionary.
"""
for k, v in base_dict.items():
if k in in_dict.keys():
base_dict[k] = in_dict[k]
return base_dict
def sort_dictionary(base_dict: dict) -> dict:
"""
Sort a dictionary based on the dictionary's key values.
:param base_dict: The unsorted dictionary.
:return: A dictionary sorted alphabetically on the dictionary's keys.
"""
return dict(sorted(base_dict.items()))
@dispatch(jnp.DeviceArray)
def standardise(x: jnp.DeviceArray) -> Tuple[jnp.DeviceArray, jnp.DeviceArray, jnp.DeviceArray]:
"""
Standardise a given matrix such that values are distributed according to a unit normal random variable. This is
primarily designed for standardising a training dataset.
:param x: A matrix of unstandardised values
:return: A matrix of standardised values
"""
xmean = jnp.mean(x, axis=0)
xstd = jnp.std(x, axis=0)
return (x - xmean) / xstd, xmean, xstd
@dispatch(jnp.DeviceArray, jnp.DeviceArray, jnp.DeviceArray)
def standardise(
x: jnp.DeviceArray, xmean: jnp.DeviceArray, xstd: jnp.DeviceArray
) -> jnp.DeviceArray:
"""
Standardise a given matrix with respect to a given mean and standard deviation. This is primarily designed for
standardising a test set of data with respect to the training data.
:param x: A matrix of unstandardised values
:param xmean: A precomputed mean vector
:param xstd: A precomputed standard deviation vector
:return: A matrix of standardised values
"""
return (x - xmean) / xstd
def unstandardise(
x: jnp.DeviceArray, xmean: jnp.DeviceArray, xstd: jnp.DeviceArray
) -> jnp.DeviceArray:
"""
Unstandardise a given matrix with respect to a previously computed mean and standard deviation. This is designed
for remapping a matrix back onto its original scale.
:param x: A standardised matrix.
:param xmean: A mean vector.
:param xstd: A standard deviation vector.
:return: A matrix of unstandardised values.
"""
return (x * xstd) + xmean
def as_constant(parameter_set: dict, params: list) -> Tuple[dict, dict]:
base_params = deepcopy(parameter_set)
sparams = {}
for param in params:
sparams[param] = base_params[param]
del base_params[param]
return base_params, sparams
|
17632
|
import os
def replace_version(old_version, new_version):
if not isinstance(old_version, tuple) or not isinstance(new_version, tuple):
raise ValueError("`old_version` and `new_version` must be a version tuple. Eg: (1.2.3)")
major, minor, micro = old_version[:3]
old_version = f'{major}.{minor}.{micro}'
major, minor, micro = new_version[:3]
new_version = f'{major}.{minor}.{micro}'
print(f"New version = {new_version}")
for root, _, files in os.walk('../caer'):
for file in files:
if file.endswith(('.py', '.cpp', '.c', '.h', '.hpp')):
with open(os.path.abspath(os.path.join(root, file)), 'r') as f:
new_text = f.read().replace('version ' + old_version, 'version ' + new_version)
with open(os.path.abspath(os.path.join(root, file)), 'w') as f:
print(os.path.abspath(os.path.join(root, file)))
f.write(new_text)
replace_version((1,8,0), (3,9,1))
|
17670
|
from plumbum import local
import benchbuild as bb
from benchbuild.environments.domain.declarative import ContainerImage
from benchbuild.source import HTTP
from benchbuild.utils.cmd import make, tar
class XZ(bb.Project):
""" XZ """
VERSION = '5.2.1'
NAME = 'xz'
DOMAIN = 'compression'
GROUP = 'benchbuild'
SOURCE = [
HTTP(
remote={'5.2.1': 'http://tukaani.org/xz/xz-5.2.1.tar.gz'},
local='xz.tar.gz'
),
HTTP(
remote={'1.0': 'http://lairosiel.de/dist/compression.tar.gz'},
local='compression.tar.gz'
)
]
CONTAINER = ContainerImage().from_('benchbuild:alpine')
def compile(self):
xz_source = local.path(self.source_of('xz.tar.gz'))
xz_version = self.version_of('xz.tar.gz')
compression_source = local.path(self.source_of('compression.tar.gz'))
tar('xf', xz_source)
tar('xf', compression_source)
unpack_dir = local.path(f'xz-{xz_version}')
clang = bb.compiler.cc(self)
with local.cwd(unpack_dir):
configure = local["./configure"]
_configure = bb.watch(configure)
with local.env(CC=str(clang)):
_configure(
"--enable-threads=no", "--with-gnu-ld=yes",
"--disable-shared", "--disable-dependency-tracking",
"--disable-xzdec", "--disable-lzmadec",
"--disable-lzmainfo", "--disable-lzma-links",
"--disable-scripts", "--disable-doc"
)
_make = bb.watch(make)
_make("CC=" + str(clang), "clean", "all")
def run_tests(self):
xz_version = self.version_of('xz.tar.gz')
unpack_dir = local.path(f'xz-{xz_version}')
xz = bb.wrap(unpack_dir / "src" / "xz" / "xz", self)
_xz = bb.watch(xz)
# Compress
_xz("--compress", "-f", "-k", "-e", "-9", "compression/text.html")
_xz("--compress", "-f", "-k", "-e", "-9", "compression/chicken.jpg")
_xz("--compress", "-f", "-k", "-e", "-9", "compression/control")
_xz("--compress", "-f", "-k", "-e", "-9", "compression/input.source")
_xz("--compress", "-f", "-k", "-e", "-9", "compression/liberty.jpg")
# Decompress
_xz("--decompress", "-f", "-k", "compression/text.html.xz")
_xz("--decompress", "-f", "-k", "compression/chicken.jpg.xz")
_xz("--decompress", "-f", "-k", "compression/control.xz")
_xz("--decompress", "-f", "-k", "compression/input.source.xz")
_xz("--decompress", "-f", "-k", "compression/liberty.jpg.xz")
|
17703
|
import logging
import math
import re
import time
import dask
import numpy as np
import requests
import json
import xml.etree.ElementTree as ET
from falconcv.data.scraper.scraper import ImagesScraper
from falconcv.util import ImageUtil
logger = logging.getLogger(__name__)
FLICKR_ENDPOINT = "https://www.flickr.com/services/rest"
# List of sizes:
# url_o: Original (4520 × 3229)
# url_k: Large 2048 (2048 × 1463)
# url_h: Large 1600 (1600 × 1143)
# url_l=: Large 1024 (1024 × 732)
# url_c: Medium 800 (800 × 572)
# url_z: Medium 640 (640 × 457)
# url_m: Medium 500 (500 × 357)
# url_n: Small 320 (320 × 229)
# url_s: Small 240 (240 × 171)
# url_t: Thumbnail (100 × 71)
# url_q: Square 150 (150 × 150)
# url_sq: Square 75 (75 × 75)
class FlickrScraper(ImagesScraper):
def __init__(self, api_key):
super(FlickrScraper, self).__init__()
self.api_key = api_key
def _authenticate(self):
pass
def _get_total_matches(self, q):
total_matches = 0
try:
response = requests.get(url=FLICKR_ENDPOINT, params={
"api_key": self.api_key,
"method": "flickr.photos.search",
"tags": ",".join(q),
"tag_mode": "any",
# "privacy_filter": "1"
"content_type": 1,
"media": "photos",
"per_page": 0,
"format": "json"
})
if response.status_code == 200:
json_text = re.search(r'\((.*?)\)', response.text).group(1)
json_object = json.loads(json_text)
if json_object["stat"] == "ok":
total_matches = int(json_object["photos"]["total"])
# total_matches = json_object["photos"]
except Exception as ex:
logger.error("Error making the request : {}".format(ex))
return total_matches
def _request_photos(self, q, count, page):
images = []
try:
response = requests.get(url=FLICKR_ENDPOINT, params={
"api_key": self.api_key,
"method": "flickr.photos.search",
"tags": ",".join(q),
"tag_mode": "any",
# "privacy_filter": "1"
"content_type": 1,
"media": "photos",
"per_page": count,
"page": page,
"extras": ",".join(["url_o", "url_k", "url_h", "url_l", "url_c", "url_m"])
})
if response.status_code == 200:
try:
# print(response.text)
root: ET.Element = ET.fromstring(response.text)
stat = root.get("stat")
if stat == "ok":
for photo in root.iterfind("photos/photo"):
photo: ET.Element
images.append(photo.attrib)
except Exception as ex:
logger.error("error gathering the response: {}".format(ex))
except Exception as ex:
logger.error("Error making the request : {}".format(ex))
return images
@dask.delayed
def _fetch_image(self, image_info, sz):
try:
if sz in image_info:
url = image_info[sz]
return ImageUtil.url2img(url)
except Exception as ex:
logger.error("Error fetching the image: " % ex)
return None
def fetch(self, q, batch_size: int = 100, timestamp=1, sz="url_m"):
try:
assert batch_size <= 500, "invalid count parameter"
total_matches = self._get_total_matches(q)
logger.debug("{} images found ".format(total_matches))
number_of_pages = math.ceil(total_matches / batch_size)
for page in range(1, number_of_pages):
photos = self._request_photos(q, batch_size, page)
delayed_tasks = list(map(lambda img: self._fetch_image(img, sz), photos))
compute_result = dask.compute(*delayed_tasks)
yield [img for img in compute_result if isinstance(img, np.ndarray)]
time.sleep(timestamp)
except Exception as ex:
logger.error("error fetching the images: {}".format(ex))
|
17709
|
from .version import VersionViewSet, DeployVersionViewSet
__all__ = ["VersionViewSet", "DeployVersionViewSet"]
|
17711
|
from ast import literal_eval
from collections import Counter
from typing import Dict, Optional
from anndata import AnnData
from spatialtis.config import Config, analysis_list
from ...utils import doc
from ..base import graph_position_interactive, graph_position_static
from .utils import query_df
@doc
def community_map(
data: AnnData,
roi: Dict,
min_cells: int = 10,
use: str = "static",
community_key: Optional[str] = None,
centroid_key: Optional[str] = None,
neighbors_key: Optional[str] = None,
**plot_options,
):
"""Visualize cell communities in ROI
Args:
data: {adata_plotting}
roi: {roi}
min_cells: Show communities contain more than a number of cells
use: "static" or "interactive" (Default: "static")
community_key: {community_key}
centroid_key: {centroid_key}
neighbors_key: {neighbors_key}
**plot_options: Pass to :class:`spatialtis._plotting.base.graph_position_static` or
:class:`spatialtis._plotting.base.graph_position_interactive`
{pyecharts_tips}
"""
if community_key is None:
community_key = analysis_list["cell_community"].last_used_key
if centroid_key is None:
centroid_key = Config.centroid_key
if neighbors_key is None:
neighbors_key = Config.NEIGHBORS_KEY
df = query_df(data.obs, roi)
nodes_types = df[community_key].tolist()
commus = []
for commu, count in Counter(nodes_types).items():
if count >= min_cells:
commus.append(commu)
df = df.reset_index(drop=True)
xdf = df[df[community_key].isin(commus)]
xdf = xdf.reset_index()
if len(xdf) == 0:
raise ValueError("Seems like there is no cells left to be drawn")
need_eval_nodes = isinstance(xdf[centroid_key][0], str)
need_eval_neighs = isinstance(xdf[neighbors_key][0], str)
if need_eval_nodes:
nodes = [literal_eval(n) for n in xdf[centroid_key]]
else:
nodes = [n for n in xdf[centroid_key]]
if need_eval_neighs:
neighs = [literal_eval(n) for n in xdf[neighbors_key]]
else:
neighs = [n for n in xdf[neighbors_key]]
nodes_types = xdf[community_key]
edges = []
edges_types = []
for i, n in zip(xdf.index, neighs):
for x in n:
new_x = xdf[xdf["index"] == x].index
if len(new_x) == 1:
new_x = new_x[0]
if nodes_types[i] == nodes_types[new_x]:
edges.append((i, new_x))
edges_types.append(nodes_types[i])
plot_options["saved_name"] = "community_map_" + ",".join(
[f"{k}={v}" for k, v in roi.items()]
)
if use == "interactive":
return graph_position_interactive(
nodes, edges, edges_types=edges_types, **plot_options
)
else:
return graph_position_static(
nodes, edges, edges_types=edges_types, **plot_options
)
|
17779
|
from datetime import datetime
from pathlib import Path
import pytz
import kobuddy
def get_test_db():
# db = Path(__file__).absolute().parent.parent / 'KoboShelfes' / 'KoboReader.sqlite.0'
db = Path(__file__).absolute().parent / 'data' / 'kobo_notes' / 'input' / 'KoboReader.sqlite'
return db
# a bit meh, but ok for now
kobuddy.set_databases(get_test_db())
from kobuddy import _iter_events_aux, get_events, get_books_with_highlights, _iter_highlights
def test_events():
for e in _iter_events_aux():
print(e)
def test_hls():
for h in _iter_highlights():
print(h)
def test_get_all():
events = get_events()
assert len(events) > 50
for d in events:
print(d)
def test_books_with_highlights():
pages = get_books_with_highlights()
g = pages[0]
assert 'Essentialism' in g.book
hls = g.highlights
assert len(hls) == 273
[b] = [h for h in hls if h.eid == '520b7b13-dbef-4402-9a81-0f4e0c4978de']
# TODO wonder if there might be any useful info? StartContainerPath, EndContainerPath
assert b.kind == 'bookmark'
# TODO move to a more specific test?
# TODO assert sorted by date or smth?
assert hls[0].kind == 'highlight'
# TODO assert highlights got no annotation? not sure if it's even necessary to distinguish..
[ann] = [h for h in hls if h.annotation is not None and len(h.annotation) > 0]
assert ann.eid == 'eb264817-9a06-42fd-92ff-7bd38cd9ca79'
assert ann.kind == 'annotation'
assert ann.text == 'He does this by finding which machine has the biggest queue of materials waiting behind it and finds a way to increase its efficiency.'
assert ann.annotation == 'Bottleneck'
assert ann.dt == datetime(year=2017, month=8, day=12, hour=3, minute=49, second=13, microsecond=0, tzinfo=pytz.utc)
assert ann.book.author == '<NAME>'
assert len(pages) == 7
def test_history():
kobuddy.print_progress()
def test_annotations():
kobuddy.print_annotations()
def test_books():
kobuddy.print_books()
|
17843
|
import wx
import cv2
#----------------------------------------------------------------------
# Panel to display image from camera
#----------------------------------------------------------------------
class WebcamPanel(wx.Window): # wx.Panel, wx.Control
def __init__(self, parent, camera, fps=15, flip=False):
wx.Window.__init__(self, parent)
# remember arguments
self.camera = camera
self.fps = fps
self.flip = flip
# get frame size
ret_value, frame = self.camera.read()
height, width = frame.shape[:2]
# resize panel with camera image
self.SetSize( (width, height) )
#self.SetMinSize( (width, height) )
# resize main window
self.GetParent().GetParent().SetSize( (width, height+37) ) # wymaga poprawki aby nie trzeba bylo dawac +37
#self.GetGrandParent().SetSize( (width, height+25) )
#self.GetTopLevelParent().SetSize( (width, height+25) ) # wrong parent
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
if self.flip:
frame = cv2.flip(frame, 1)
# create bitmap with frame
self.bmp = wx.BitmapFromBuffer(width, height, frame)
# timer to refresh frames
self.timer = wx.Timer(self)
self.timer.Start(1000./fps)
# add functions to events
self.Bind(wx.EVT_PAINT, self.OnPaint) # run when it is needed
self.Bind(wx.EVT_TIMER, self.NextFrame) # run by timer
def OnPaint(self, event):
dc = wx.BufferedPaintDC(self)
dc.DrawBitmap(self.bmp, 0, 0)
def NextFrame(self, event):
ret_value, frame = self.camera.read()
if ret_value:
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
if self.flip:
frame = cv2.flip(frame, 1)
self.bmp.CopyFromBuffer(frame)
self.Refresh()
#----------------------------------------------------------------------
# Main Window
#----------------------------------------------------------------------
class MainWindow(wx.Frame):
def __init__(self, camera, fps=10):
wx.Frame.__init__(self, None)
self.panel = wx.Panel(self, -1)
# add sizer
self.sizer = wx.BoxSizer(wx.VERTICAL)
self.panel.SetSizer(self.sizer)
# add button
self.button = wx.Button(self.panel, label="CAPTURE")
self.button.Bind(wx.EVT_BUTTON, self.OnButton)
self.sizer.Add(self.button, 0, wx.EXPAND)
# add panel with webcam image
self.webcampanel = WebcamPanel(self.panel, camera)
self.sizer.Add(self.webcampanel, 1, wx.EXPAND)
#self.sizer.Layout()
#self.webcampanel.Layout()
#self.Fit()
self.Show()
def OnButton(self, event):
print("TODO: save image in file")
#----------------------------------------------------------------------
camera = cv2.VideoCapture(0)
app = wx.App()
MainWindow(camera)
app.MainLoop()
|
17848
|
def arg_to_step(arg):
if isinstance(arg, str):
return {'run': arg}
else:
return dict(zip(['run', 'parameters', 'cache'], arg))
def steps(*args):
return [arg_to_step(arg) for arg in args]
|
17858
|
import numpy as np
import scipy.stats as stats
from UQpy.Distributions.baseclass.Distribution import Distribution
class DistributionContinuous1D(Distribution):
"""
Parent class for univariate continuous probability distributions.
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
@staticmethod
def _check_x_dimension(x):
"""
Check the dimension of input x - must be an ndarray of shape (npoints,) or (npoints, 1)
"""
x = np.atleast_1d(x)
if len(x.shape) > 2 or (len(x.shape) == 2 and x.shape[1] != 1):
raise ValueError('Wrong dimension in x.')
return x.reshape((-1,))
def _construct_from_scipy(self, scipy_name=stats.rv_continuous):
self.cdf = lambda x: scipy_name.cdf(x=self._check_x_dimension(x), **self.params)
self.pdf = lambda x: scipy_name.pdf(x=self._check_x_dimension(x), **self.params)
self.log_pdf = lambda x: scipy_name.logpdf(x=self._check_x_dimension(x), **self.params)
self.icdf = lambda x: scipy_name.ppf(q=self._check_x_dimension(x), **self.params)
self.moments = lambda moments2return='mvsk': scipy_name.stats(moments=moments2return, **self.params)
self.rvs = lambda nsamples=1, random_state=None: scipy_name.rvs(
size=nsamples, random_state=random_state, **self.params).reshape((nsamples, 1))
def tmp_fit(dist, data):
data = self._check_x_dimension(data)
fixed_params = {}
for key, value in dist.params.items():
if value is not None:
fixed_params['f' + key] = value
params_fitted = scipy_name.fit(data=data, **fixed_params)
return dict(zip(dist.order_params, params_fitted))
self.fit = lambda data: tmp_fit(self, data)
|
17861
|
import requests
import re
import time
import random
import pprint
import os
headers = {"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3858.0 Safari/537.36"}
def youdict(threadName, q):
res = []
index = 0
url = q.get(timeout = 2)
index += 1
r = requests.get(url, headers = headers, timeout = 5)
html = str(r.content, encoding="utf-8").replace("\n", "").replace(" ", "").replace('<span class="yd-kw-suffix">[英语单词大全]</span>', "")
words = re.findall('<div class="caption"><h3 style="margin-top: 10px;"><a style="color:#333;" target="_blank" href="/w/.*?">(.*?)</a>[ ]?</h3><p>(.*?)</p></div>', html)
for word in words:
res.append(word)
if index%5 == 0:
time.sleep(3 + random.random())
else:
time.sleep(1 + random.random())
return res
def hujiang(threadName, q):
res = []
index = 0
url = q.get(timeout = 2)
index += 1
r = requests.get(url, headers=headers, timeout=5)
html = str(r.content, encoding="utf-8").replace("\n", "").replace(" ", "").replace('<span class="yd-kw-suffix">[英语单词大全]</span>', "")
words = re.findall('<li class="clearfix"><a href="/ciku/(.*?)/" target="_blank">.*?</a><span>(.*?)</span></li>', html)
for word in words:
res.append(word)
if index%5 == 0:
time.sleep(3 + random.random())
else:
time.sleep(1 + random.random())
return res
if __name__ == "__main__":
main()
|
17876
|
from abc import ABC, abstractmethod
class MarioGame(ABC):
@abstractmethod
def getPort(self) -> int:
pass
@abstractmethod
def initGame(self):
pass
@abstractmethod
def stepGame(self, left: bool, right: bool, down: bool, speed: bool, jump: bool):
pass
@abstractmethod
def resetGame(self, level: str, timer: int, mario_state: int, inertia: float):
pass
@abstractmethod
def computeObservationRGB(self):
pass
@abstractmethod
def computeReward(self) -> float:
pass
@abstractmethod
def computeDone(self) -> bool:
pass
@abstractmethod
def getCompletionPercentage(self) -> float:
pass
@abstractmethod
def getFrameSize(self) -> int:
pass
|
17881
|
from malaya_speech.utils import (
check_file,
load_graph,
generate_session,
nodes_session,
)
from malaya_speech.model.tf import UNET, UNETSTFT, UNET1D
def load(model, module, quantized=False, **kwargs):
path = check_file(
file=model,
module=module,
keys={'model': 'model.pb'},
quantized=quantized,
**kwargs,
)
g = load_graph(path['model'], **kwargs)
inputs = ['Placeholder']
outputs = ['logits']
input_nodes, output_nodes = nodes_session(g, inputs, outputs)
return UNET(
input_nodes=input_nodes,
output_nodes=output_nodes,
sess=generate_session(graph=g, **kwargs),
model=model,
name=module,
)
def load_stft(model, module, instruments, quantized=False, **kwargs):
path = check_file(
file=model,
module=module,
keys={'model': 'model.pb'},
quantized=quantized,
**kwargs,
)
g = load_graph(path['model'], **kwargs)
inputs = ['Placeholder']
outputs = [f'logits_{i}' for i in range(len(instruments))]
input_nodes, output_nodes = nodes_session(g, inputs, outputs)
return UNETSTFT(
input_nodes=input_nodes,
output_nodes=output_nodes,
instruments=instruments,
sess=generate_session(graph=g, **kwargs),
model=model,
name=module,
)
def load_1d(model, module, quantized=False, **kwargs):
path = check_file(
file=model,
module=module,
keys={'model': 'model.pb'},
quantized=quantized,
**kwargs,
)
g = load_graph(path['model'], **kwargs)
inputs = ['Placeholder']
outputs = ['logits']
input_nodes, output_nodes = nodes_session(g, inputs, outputs)
return UNET1D(
input_nodes=input_nodes,
output_nodes=output_nodes,
sess=generate_session(graph=g, **kwargs),
model=model,
name=module,
)
|
17900
|
import importlib.metadata
import logging
import os
import shutil
from typing import Dict, Any, List
import click
from sqlalchemy import text
from dbd.log.dbd_exception import DbdException
from dbd.config.dbd_profile import DbdProfile
from dbd.config.dbd_project import DbdProject
from dbd.executors.model_executor import ModelExecutor, InvalidModelException
from dbd.log.dbd_logger import setup_logging
log = logging.getLogger(__name__)
this_script_dir = os.path.dirname(__file__)
class Dbd(object):
"""
Top level CLI object
"""
def __init__(self, debug: bool = False, logfile: str = 'dbd.log', profile: str = 'dbd.profile',
project: str = 'dbd.project'):
"""
Constructor
:param bool debug: debug flag
:param str logfile: log file
:param str profile: profile file
:param str project: project file
"""
self.__debug = debug
self.__logfile = logfile
self.__profile = profile
self.__project = project
def debug(self) -> bool:
"""
Debug flag getter
:return: debug flag
:rtype: bool
"""
return self.__debug
def logfile(self) -> str:
"""
Logfile getter
:return: logfile
:rtype: str
"""
return self.__logfile
def profile(self) -> str:
"""
Profile getter
:return: profile
:rtype: str
"""
return self.__profile
def project(self) -> str:
"""
Project getter
:return: project
:rtype: str
"""
return self.__project
def print_version():
"""
Prints DBD version
"""
click.echo(f"You're using DBD version {importlib.metadata.version('dbd')}.")
@click.group(invoke_without_command=True)
@click.option('--debug/--no-debug', envvar='DBD_DEBUG', default=False, help='Sets debugging on/off')
@click.option('--version', help="Print the DBD version and exit.", is_flag=True, is_eager=True)
@click.option('--logfile', envvar='DBD_LOG_FILE', default='dbd.log', help='Log file location')
@click.option('--profile', envvar='DBD_PROFILE', default='dbd.profile', help='Profile configuration file')
@click.option('--project', envvar='DBD_PROJECT', default='dbd.project', help='Project configuration file')
@click.pass_context
def cli(ctx, debug, logfile, version, profile, project):
if debug:
click.echo(f"Logging DEBUG info to '{logfile}'")
setup_logging(logging.DEBUG, logfile)
if version:
print_version()
ctx.exit(0)
ctx.obj = Dbd(debug, logfile, profile, project)
# noinspection PyUnusedLocal
@cli.command(help='Initializes a new DBD project.')
@click.argument('dest', required=False, default='my_new_dbd_project')
@click.pass_obj
def init(dbd, dest):
try:
src = os.path.join(this_script_dir, '..', 'resources', 'template')
if os.path.exists(dest):
log.error(f"Can't overwrite directory '{dest}'")
raise DbdException(f"Can't overwrite directory '{dest}'")
shutil.copytree(src, dest)
click.echo(f"New project {dest} generated. Do cd {dest}; dbd run .")
except DbdException as d:
click.echo(f"ERROR: '{d}'")
@cli.command(help='Executes project.')
@click.option('--only', envvar='DBD_ONLY', default=None, help='Comma separated list of fully qualified table names '
'(<schema>.<table-name-no suffix>) to execute.')
@click.option('--deps/--no-deps', envvar='DBD_DEPS', default=True, help='Ignores dependencies for the --only list.')
@click.argument('dest', required=False, default='.')
@click.pass_obj
def run(dbd, only, deps, dest):
try:
log.debug("Loading configuration.")
prf = DbdProfile.load(os.path.join('.', dbd.profile()))
prj = DbdProject.load(prf, os.path.join(dest, dbd.project()))
log.debug("Creating model.")
model = ModelExecutor(prj)
log.debug("Connecting database.")
engine = prj.alchemy_engine_from_project()
# engine.execution_options(supports_statement_cache=False)
log.debug("Executing model.")
if not deps and only is None:
log.error("You must specify --only list for --no-deps.")
raise DbdException("You must specify --only list for --no-deps.")
if only is not None:
only_list = only.split(',')
try:
model.execute(engine, only_list, deps)
except InvalidModelException as e:
log.error(f"Can't run {only_list}: {e}")
raise DbdException(f"Can't run {only_list}: {e}")
else:
model.execute(engine)
log.debug("Finished.")
click.echo("All tasks finished!")
except DbdException as d:
click.echo(f"ERROR: '{d}'")
@cli.command(help='Validates project.')
@click.argument('dest', required=False, default='.')
@click.pass_obj
def validate(dbd, dest):
try:
prf = DbdProfile.load(os.path.join('.', dbd.profile()))
prj = DbdProject.load(prf, os.path.join(dest, dbd.project()))
model = ModelExecutor(prj)
engine = prj.alchemy_engine_from_project()
# noinspection PyBroadException
try:
engine.execute(text("SELECT 1"))
except Exception:
click.echo(
f"Can't connect to the target database. Check profile configuration in "
f"'{os.path.normpath(os.path.join(dest, dbd.profile()))}'.")
validation_result, validation_errors = model.validate()
if validation_result:
click.echo("No errors found. Model is valid.")
else:
click.echo("Model isn't valid. Please fix the following errors:")
__echo_validation_errors(validation_errors)
except DbdException as d:
click.echo(f"ERROR: '{d}'")
def __echo_validation_errors(validation_errors: Dict[str, Any]):
"""
Top level function for printing validation errors
:param validation_errors:
:return:
"""
__echo_validation_level(validation_errors)
class InvalidValidationErrorStructure(DbdException):
pass
def __echo_validation_level(level_validation_errors: Dict[str, Any], indent: int = 0):
"""
Echo validation error line (called recursively on all Dict values)
:param level_validation_errors: Dict with validation result
:param indent: indentation level
"""
for (k, v) in level_validation_errors.items():
if isinstance(v, str):
msg = f"{k}:{v}"
click.echo(msg.rjust(indent * 2 + len(msg), ' '))
elif isinstance(v, Dict):
msg = f"{k}:"
click.echo(msg.rjust(indent * 2 + len(msg), ' '))
__echo_validation_level(v, indent + 1)
elif isinstance(v, List):
msg = f"{k}:{str(v)}"
click.echo(msg.rjust(indent * 2 + len(msg), ' '))
else:
raise InvalidValidationErrorStructure(f"Invalid validation result: '{v}' isn't supported type.")
|
17975
|
import os
import sys
cwd = os.getcwd()
sys.path.append(cwd)
import pickle
import numpy as np
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
from plot.helper import plot_task, plot_weights, plot_rf_z_max, plot_rf_quad, plot_vector_traj
tasks = [
'com_pos', 'com_vel', 'chassis_quat', 'chassis_ang_vel', 'toeFL_pos',
'toeFL_vel', 'toeFR_pos', 'toeFR_vel', 'toeRR_pos', 'toeRR_vel',
'toeRL_pos', 'toeRL_vel'
]
weights = [
'w_com', 'w_chassis_ori', 'w_toeFL', 'w_toeFR', 'w_toeRR', 'w_toeRL'
]
rf_z = ['rf_z_max_toeFL', 'rf_z_max_toeFR', 'rf_z_max_toeRR', 'rf_z_max_toeRL']
time = []
phase = []
rf_cmd = []
des, act = dict(), dict()
for topic in tasks:
des[topic] = []
act[topic] = []
w = dict()
for topic in weights:
w[topic] = []
rf_z_max = dict()
for topic in rf_z:
rf_z_max[topic] = []
with open('data/pnc.pkl', 'rb') as file:
while True:
try:
d = pickle.load(file)
time.append(d['time'])
phase.append(d['phase'])
for topic in tasks:
des[topic].append(d[topic + '_des'])
act[topic].append(d[topic])
for topic in weights:
w[topic].append(d[topic])
for topic in rf_z:
rf_z_max[topic].append(d[topic])
rf_cmd.append(d['rf_cmd'])
except EOFError:
break
for k, v in des.items():
des[k] = np.stack(v, axis=0)
for k, v in act.items():
act[k] = np.stack(v, axis=0)
rf_cmd = np.stack(rf_cmd, axis=0)
phase = np.stack(phase, axis=0)
## =============================================================================
## Plot Task
## =============================================================================
plot_task(time, des['com_pos'], act['com_pos'], des['com_vel'], act['com_vel'],
phase, 'com lin')
plot_task(time, des['chassis_quat'], act['chassis_quat'],
des['chassis_ang_vel'], act['chassis_ang_vel'], phase, 'pelvis ori')
plot_task(time, des['toeFL_pos'], act['toeFL_pos'], des['toeFL_vel'],
act['toeFL_vel'], phase, 'left foot lin')
plot_task(time, des['toeFR_pos'], act['toeFR_pos'], des['toeFR_vel'],
act['toeFR_vel'], phase, 'left foot ori')
plot_task(time, des['toeRR_pos'], act['toeRR_pos'], des['toeRR_vel'],
act['toeRR_vel'], phase, 'right foot lin')
plot_task(time, des['toeRL_pos'], act['toeRL_pos'], des['toeRL_vel'],
act['toeRL_vel'], phase, 'right foot ori')
## =============================================================================
## Plot WBC Solutions
## =============================================================================
plot_rf_quad(time, rf_cmd, phase)
## =============================================================================
## Plot Weights and Max Reaction Force Z
## =============================================================================
plot_weights(time, w, phase)
plot_rf_z_max(time, rf_z_max, phase)
plt.show()
|
18106
|
import justpy as jp
from .group import Group
class Row(Group):
def __init__(self):
'''Row Element
Provides a container which arranges its child in a row.
'''
view = jp.QDiv(classes='row items-start', style='gap: 1em', delete_flag=False)
super().__init__(view)
|
18164
|
from rest_framework import status
from rest_framework.response import Response
from rest_framework.generics import GenericAPIView
from ..permissions import IsAuthenticated
from django.core.cache import cache
from django.conf import settings
from ..authentication import TokenAuthentication
from ..app_settings import (
MembershipDeclineSerializer,
)
class MembershipDeclineView(GenericAPIView):
authentication_classes = (TokenAuthentication, )
permission_classes = (IsAuthenticated,)
allowed_methods = ('POST', 'OPTIONS', 'HEAD')
def get(self, *args, **kwargs):
return Response({}, status=status.HTTP_405_METHOD_NOT_ALLOWED)
def put(self, *args, **kwargs):
return Response({}, status=status.HTTP_405_METHOD_NOT_ALLOWED)
def post(self, request, *args, **kwargs):
"""
Marks a membership as declined. In addition deletes now unnecessary information.
:param request:
:param uuid: share_right_id
:param args:
:param kwargs:
:return: 200 / 403
"""
serializer = MembershipDeclineSerializer(data=request.data, context=self.get_serializer_context())
if not serializer.is_valid():
return Response(
serializer.errors, status=status.HTTP_400_BAD_REQUEST
)
membership_obj = serializer.validated_data.get('membership_obj')
membership_obj.accepted = False
membership_obj.save()
if settings.CACHE_ENABLE:
cache_key = 'psono_user_status_' + str(membership_obj.user.id)
cache.delete(cache_key)
return Response(status=status.HTTP_200_OK)
def delete(self, *args, **kwargs):
return Response({}, status=status.HTTP_405_METHOD_NOT_ALLOWED)
|
18169
|
from .utils import TestCase
from .utils import build_and_test_module
from .utils import transpile_source
class Test(TestCase):
def test_compare(self):
with self.assertRaises(SystemExit):
build_and_test_module('compare')
def test_assert_between(self):
self.assert_transpile_raises(
'def foo():\n'
' a = 2\n'
' assert 1 <= a < 3\n',
' File "", line 3\n'
" assert 1 <= a < 3\n"
' ^\n'
"CompileError: can only compare two values\n")
def test_between(self):
self.assert_transpile_raises(
'def foo():\n'
' a = 2\n'
' print(1 <= a < 3)\n',
' File "", line 3\n'
" print(1 <= a < 3)\n"
' ^\n'
"CompileError: can only compare two values\n")
def test_i64_and_bool(self):
self.assert_transpile_raises(
'def foo() -> bool:\n'
' return 1 == True',
' File "", line 2\n'
' return 1 == True\n'
' ^\n'
"CompileError: cannot convert 'i64/i32/i16/i8/u64/u32/u16/u8' "
"to 'bool'\n")
def test_mix_of_literals_and_known_types_1(self):
source = transpile_source('def foo():\n'
' k: u64 = 1\n'
' v: i64 = 1\n'
' if 0xffffffffffffffff == k:\n'
' pass\n'
' print(v)\n')
self.assert_in('18446744073709551615ull', source)
def test_wrong_types_1(self):
self.assert_transpile_raises(
'def foo() -> bool:\n'
' return 1 == [""]\n',
' File "", line 2\n'
' return 1 == [""]\n'
' ^\n'
"CompileError: cannot convert 'i64/i32/i16/i8/u64/u32/u16/u8' to "
"'[string]'\n")
def test_wrong_types_2(self):
self.assert_transpile_raises(
'def foo() -> bool:\n'
' return [""] in 1\n',
' File "", line 2\n'
' return [""] in 1\n'
' ^\n'
"CompileError: not an iterable\n")
def test_wrong_types_3(self):
self.assert_transpile_raises(
'def foo() -> bool:\n'
' return [""] not in 1\n',
' File "", line 2\n'
' return [""] not in 1\n'
' ^\n'
"CompileError: not an iterable\n")
def test_wrong_types_4(self):
self.assert_transpile_raises(
'def foo() -> bool:\n'
' return 2.0 == 1\n',
' File "", line 2\n'
' return 2.0 == 1\n'
' ^\n'
"CompileError: cannot convert 'f64/f32' to "
"'i64/i32/i16/i8/u64/u32/u16/u8'\n")
def test_wrong_types_5(self):
self.assert_transpile_raises(
'def foo() -> bool:\n'
' return 1.0 == [""]\n',
' File "", line 2\n'
' return 1.0 == [""]\n'
' ^\n'
"CompileError: cannot convert 'f64/f32' to '[string]'\n")
def test_wrong_types_6(self):
self.assert_transpile_raises(
'def foo(a: i32) -> bool:\n'
' return a in [""]\n',
' File "", line 2\n'
' return a in [""]\n'
' ^\n'
"CompileError: types 'i32' and 'string' differs\n")
def test_wrong_types_7(self):
self.assert_transpile_raises(
'def foo(a: i32) -> bool:\n'
' return a in a\n',
' File "", line 2\n'
' return a in a\n'
' ^\n'
"CompileError: not an iterable\n")
def test_wrong_types_8(self):
self.assert_transpile_raises(
'def foo(a: i32) -> bool:\n'
' return 1 in a\n',
' File "", line 2\n'
' return 1 in a\n'
' ^\n'
"CompileError: not an iterable\n")
def test_wrong_types_9(self):
self.assert_transpile_raises(
'def foo(a: i32) -> bool:\n'
' return "" == a\n',
' File "", line 2\n'
' return "" == a\n'
' ^\n'
"CompileError: types 'string' and 'i32' differs\n")
def test_wrong_types_10(self):
self.assert_transpile_raises(
'def foo():\n'
' print(1 is None)\n',
' File "", line 2\n'
' print(1 is None)\n'
' ^\n'
"CompileError: 'i64' cannot be None\n")
def test_wrong_types_11(self):
self.assert_transpile_raises(
'def foo():\n'
' print(1.0 is None)\n',
' File "", line 2\n'
' print(1.0 is None)\n'
' ^\n'
"CompileError: 'f64' cannot be None\n")
def test_wrong_types_12(self):
self.assert_transpile_raises(
'def foo(a: i32):\n'
' print(a is None)\n',
' File "", line 2\n'
' print(a is None)\n'
' ^\n'
"CompileError: 'i32' cannot be None\n")
def test_wrong_types_13(self):
self.assert_transpile_raises(
'def foo(a: i32):\n'
' print(None is a)\n',
' File "", line 2\n'
' print(None is a)\n'
' ^\n'
"CompileError: 'i32' cannot be None\n")
def test_wrong_types_14(self):
self.assert_transpile_raises(
'def foo():\n'
' print(True is None)\n',
' File "", line 2\n'
' print(True is None)\n'
' ^\n'
"CompileError: 'bool' cannot be None\n")
def test_wrong_types_15(self):
self.assert_transpile_raises(
'def foo(a: bool):\n'
' print(None is a)\n',
' File "", line 2\n'
' print(None is a)\n'
' ^\n'
"CompileError: 'bool' cannot be None\n")
def test_wrong_types_16(self):
self.assert_transpile_raises(
'def foo(a: bool):\n'
' print(a is not 1)\n',
' File "", line 2\n'
' print(a is not 1)\n'
' ^\n'
"CompileError: cannot convert 'bool' to "
"'i64/i32/i16/i8/u64/u32/u16/u8'\n")
def test_wrong_types_17(self):
self.assert_transpile_raises(
'def foo():\n'
' print(None in [1, 5])\n',
' File "", line 2\n'
' print(None in [1, 5])\n'
' ^\n'
"CompileError: 'i64' cannot be None\n")
def test_wrong_types_18(self):
self.assert_transpile_raises(
'def foo():\n'
' print(None == "")\n',
' File "", line 2\n'
' print(None == "")\n'
' ^\n'
"CompileError: use 'is' and 'is not' to compare to None\n")
def test_wrong_types_20(self):
self.assert_transpile_raises(
'def foo():\n'
' if (1, ("", True)) == (1, ("", 1)):\n'
' pass\n',
# ToDo: Marker in wrong place.
' File "", line 2\n'
' if (1, ("", True)) == (1, ("", 1)):\n'
' ^\n'
"CompileError: cannot convert 'bool' to "
"'i64/i32/i16/i8/u64/u32/u16/u8'\n")
def test_bare_compare(self):
self.assert_transpile_raises(
'def foo():\n'
' 1 == 2\n',
' File "", line 2\n'
' 1 == 2\n'
' ^\n'
"CompileError: bare comparision\n")
|
18190
|
from .. import db
from .base import BaseModel
class ChildDatum(BaseModel):
__tablename__ = 'child_data'
# fields
parent_id = db.Column(db.Integer, db.ForeignKey('data.id'), nullable=False)
datum_id = db.Column(db.Integer, db.ForeignKey('data.id'), nullable=False)
name = db.Column(db.String(length=100), nullable=False)
# relationships
parent = db.relationship('Datum', back_populates='children', foreign_keys=[parent_id])
datum = db.relationship('Datum', back_populates='included_in', foreign_keys=[datum_id])
def __repr__(self):
return (
"<ChildDatum '%s' of %s>" %
(self.name, self.parent)
)
|
18194
|
import multiprocessing as mp
class ModuleRecursion(object):
"""Class to handle recursion.
Simple class to handle tracking and storing prior
sub-domains discovred.
"""
def __init__(self):
"""class init.
"""
self.recursion_queue = mp.Queue()
def add_subdomain(self, domain):
"""add subdomain to Q.
uses a non-blocking call to add to the Q
to prevent any errors with size.
Arguments:
domain {str} -- subdomain to add to Q
"""
self.recursion_queue.put(domain)
def get_subdomain_list(self, valid_only=True):
"""build subdomain list.
Using the JSON from the event consumer, we
can easily build a unique list of
subdomains for module use.
Keyword Arguments:
valid_only {bool} -- filter only valid subdomains (default: {True})
Returns:
list -- list of raw subdomains
"""
data = []
refill = []
while True:
try:
x = self.recursion_queue.get_nowait()
if valid_only and x.valid:
data.append(x.subdomain)
if not valid_only:
data.append(x.subdomain)
except Exception as e:
print(e)
break
return set(data)
|
18223
|
from .constants import SPECIAL_TOKENS
try:
import re2 as re
except ImportError:
import re
def twitter_sentiment_token_matching(token):
"""Special token matching function for twitter sentiment data."""
if 'URL_TOKEN' in SPECIAL_TOKENS and re.match(r'https?:\/\/[^\s]+', token):
return SPECIAL_TOKENS['URL_TOKEN']
if 'POS_EM_TOKEN' in SPECIAL_TOKENS and re.match(r':-?(\)|D|p)+', token):
return SPECIAL_TOKENS['POS_EM_TOKEN']
if 'NEG_EM_TOKEN' in SPECIAL_TOKENS and re.match(r':-?(\(|\\|/)+', token):
return SPECIAL_TOKENS['NEG_EM_TOKEN']
if 'USER_TOKEN' in SPECIAL_TOKENS and re.match(
r'(?<=^|(?<=[^a-zA-Z0-9-_\.]))@([A-Za-z]+[A-Za-z0-9]+)', token):
return SPECIAL_TOKENS['USER_TOKEN']
if 'HEART_TOKEN' in SPECIAL_TOKENS and re.match(r'<3+', token):
return SPECIAL_TOKENS['HEART_TOKEN']
|
18226
|
import datetime
from dateutil.parser import parse
from mongoengine import DateTimeField, FileField
from mongoengine.connection import DEFAULT_CONNECTION_NAME
#from mongoengine.python_support import str_types
from six import string_types as str_types
import io
from django.conf import settings
if settings.FILE_DB == settings.S3:
import crits.core.s3_tools as S3
class CritsDateTimeField(DateTimeField):
"""
Custom MongoEngine DateTimeField. Utilizes a transform such that if the
value passed in is a string we will convert it to a datetime.datetime
object, or if it is set to None we will use the current datetime (useful
when instantiating new objects and wanting the default dates to all be the
current datetime).
"""
def __set__(self, instance, value):
value = self.transform(value)
return super(CritsDateTimeField, self).__set__(instance, value)
def transform(self, value):
if value and isinstance(value, basestring):
return parse(value, fuzzy=True)
elif not value:
return datetime.datetime.now()
else:
return value
class S3Proxy(object):
"""
Custom proxy for MongoEngine which uses S3 to store binaries instead of
GridFS.
"""
def __init__(self, grid_id=None, key=None, instance=None,
db_alias=DEFAULT_CONNECTION_NAME, collection_name='fs'):
self.grid_id = grid_id # Store id for file
self.key = key
self.instance = instance
self.db_alias = db_alias
self.collection_name = collection_name
self.newfile = None # Used for partial writes
self.gridout = None
def __getattr__(self, name):
attrs = ('_fs', 'grid_id', 'key', 'instance', 'db_alias',
'collection_name', 'newfile', 'gridout')
if name in attrs:
return self.__getattribute__(name)
obj = self.get()
if name in dir(obj):
return getattr(obj, name)
raise AttributeError
def __get__(self, instance, value):
return self
def __repr__(self):
return '<%s: %s>' % (self.__class__.__name__, self.grid_id)
def delete(self):
# Delete file from S3, FileField still remains
S3.delete_file_s3(self.grid_id,self.collection_name)
self.grid_id = None
self.gridout = None
self._mark_as_changed()
def get(self, id=None):
if id:
self.grid_id = id
if self.grid_id is None:
return None
try:
if self.gridout is None:
self.gridout = io.BytesIO(S3.get_file_s3(self.grid_id, self.collection_name))
return self.gridout
except:
return None
def put(self, file_obj, **kwargs):
if self.grid_id:
raise Exception('This document already has a file. Either delete '
'it or call replace to overwrite it')
self.grid_id = S3.put_file_s3(file_obj, self.collection_name)
self._mark_as_changed()
def read(self, size=-1):
gridout = self.get()
if gridout is None:
return None
else:
try:
return gridout.read(size)
except:
return ""
def _mark_as_changed(self):
"""Inform the instance that `self.key` has been changed"""
if self.instance:
self.instance._mark_as_changed(self.key)
class S3FileField(FileField):
"""
Custom FileField for MongoEngine which utilizes S3.
"""
def __init__(self, db_alias=DEFAULT_CONNECTION_NAME, collection_name="fs",
**kwargs):
super(S3FileField, self).__init__(db_alias, collection_name, **kwargs)
self.proxy_class = S3Proxy
def __set__(self, instance, value):
key = self.name
if ((hasattr(value, 'read') and not
isinstance(value, self.proxy_class)) or isinstance(value, str_types)):
# using "FileField() = file/string" notation
grid_file = instance._data.get(self.name)
# If a file already exists, delete it
if grid_file:
try:
grid_file.delete()
except:
pass
# Create a new file with the new data
grid_file.put(value)
else:
# Create a new proxy object as we don't already have one
instance._data[key] = self.proxy_class(key=key, instance=instance,
collection_name=self.collection_name)
instance._data[key].put(value)
else:
instance._data[key] = value
instance._mark_as_changed(key)
def getFileField(db_alias=DEFAULT_CONNECTION_NAME, collection_name="fs", **kwargs):
"""
Determine if the admin has configured CRITs to utilize GridFS or S3 for
binary storage.
"""
if settings.FILE_DB == settings.GRIDFS:
return FileField(db_alias, collection_name, **kwargs)
elif settings.FILE_DB == settings.S3:
return S3FileField(db_alias, collection_name, **kwargs)
|
18269
|
import numpy as np
import matplotlib.pyplot as plt
#Dahlquist test
#sol1ex = lambda t: np.exp(-t)
#sol2ex = lambda t: np.exp(-2*t)
#oscillator 1
sol1ex = lambda t: np.cos(t**2/2)
sol2ex = lambda t: np.sin(t**2/2)
#oscillator 2
#sol1ex = lambda t: np.exp(np.sin(t**2))
#sol2ex = lambda t: np.exp(np.cos(t**2))
name = 'Osc1'
t = np.fromfile('../out/%s_snap_t' % name)
nsnap = len(t)
sol1 = np.zeros((nsnap,))
sol2 = sol1.copy()
for i in range(nsnap):
s = np.fromfile('../out/%s_snap_%d' % (name,i))
sol1[i] = s[0]
sol2[i] = s[1]
fig, axs = plt.subplots(2, 3, figsize=(10,5))
axs = [item for sublist in axs for item in sublist]
tdense = np.linspace(min(t), max(t), 2500)
axs[0].plot(tdense, sol1ex(tdense), 'k', linewidth=0.5, label='$y_1$ exact')
axs[0].plot(t, sol1, 'C0.', label='$y_1$ numerical')
axs[0].set_title('Solutions')
axs[0].set_ylabel('$y_1$')
axs[0].legend()
axs[3].plot(tdense, sol2ex(tdense), 'k', linewidth=0.5, label='$y_2$ exact')
axs[3].plot(t, sol2, 'C1.', label='$y_2$ numerical')
axs[3].set_ylabel('$y_2$')
axs[3].legend()
axs[1].semilogy(t, np.abs(sol1 - sol1ex(t)), 'C0.', label='$y_1$ abs err')
axs[4].semilogy(t, np.abs(sol2 - sol2ex(t)), 'C1.', label='$y_2$ abs err')
axs[1].set_title('Absolute Error')
axs[2].semilogy(t, np.abs((sol1 - sol1ex(t))/sol1ex(t)), 'C0.', label='$y_1$ rel err')
axs[5].semilogy(t, np.abs((sol2 - sol2ex(t))/sol1ex(t)), 'C1.', label='$y_2$ rel err')
axs[2].set_title('Relative Error')
axs[3].set_xlabel('t')
axs[4].set_xlabel('t')
axs[5].set_xlabel('t')
plt.tight_layout()
plt.show()
|
18287
|
import requests
from bs4 import BeautifulSoup
import json
def loadMasterStock():
url = "http://www.supremenewyork.com/mobile_stock.json"
user = {"User-Agent": "Mozilla/5.0 (iPhone; CPU iPhone OS 10_2_1 like Mac OS X) AppleWebKit/602.4.6 (KHTML, like Gecko) Version/10.0 Mobile/14D27 Safari/602.1"}
# user = {"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.95 Safari/537.36"}
r = requests.get(url, headers=user)
masterStock = json.loads(r.text)
with open("masterstock.json", 'w') as outfile:
json.dump(masterStock, outfile, indent=4, sort_keys=True)
print("Saved to masterstock.json")
itemInfo = ""
while(True):
try:
item = input("Enter item name to get id or cntrl-c to quit: ")
except:
print("Exiting...")
if itemInfo != "":
itemInfo = itemInfo[:-1]
print("\n"+itemInfo)
with open("filteredStock.txt",'w') as outfile:
outfile.write(itemInfo)
exit()
if item == "new":
print("Getting all new items...")
for itemCount in range(len(masterStock['products_and_categories']["new"])):
itemInfo += '"'+str(masterStock['products_and_categories']["new"][itemCount]['id'])+'":"'
itemInfo += str(masterStock['products_and_categories']["new"][itemCount]['name'])+'",'
else:
for itemCount in range(len(masterStock['products_and_categories']["new"])):
if item.lower() in str(masterStock['products_and_categories']["new"][itemCount]['name']).lower():
itemInfo += '"'+str(masterStock['products_and_categories']["new"][itemCount]['id'])+'":"'
print("Added "+str(masterStock['products_and_categories']["new"][itemCount]['name']))
itemInfo += str(masterStock['products_and_categories']["new"][itemCount]['name'])+'",'
# print(itemInfo)
if __name__ == '__main__':
loadMasterStock()
|
18294
|
from typing import Optional
from algorithms.basic_testing import BasicTesting
from simulator.controllers.main_controller import MainController
from simulator.controllers.map.map_controller import MapController
from simulator.controllers.gui.gui_controller import GuiController
from simulator.models.main_model import MainModel
from simulator.models.map_model import MapModel
from simulator.services.debug import DebugLevel
from simulator.services.services import Services
from simulator.services.event_manager.events.event import Event
from simulator.services.event_manager.events.reinit_event import ReinitEvent
from simulator.views.main_view import MainView
from simulator.views.map.map_view import MapView
from simulator.views.gui.gui_view import GuiView
from structures import Size
"""
Implementation is done after https://github.com/wesleywerner/mvc-game-design
"""
class Simulator:
"""
The main simulator class
"""
__services: Services
__main: MainModel
__map: MapModel
__main_controller: MainController
__map_controller: MapController
__gui_controller: GuiController
__main_view: MainView
__map_view: MapView
__gui_view: GuiView
def __init__(self, services: Services) -> None:
# init services
self.__services = services
self.__services.ev_manager.register_listener(self)
self.__main = None
self.__map = None
self.__main_controller = None
self.__map_controller = None
self.__gui_controller = None
self.__main_view = None
self.__map_view = None
def start(self) -> Optional[BasicTesting]:
"""
Starts the simulator
:return The testing results if any
"""
if self.__services.settings.simulator_graphics:
return self.__start_with_graphics()
else:
return self.__start_without_graphics()
def __try_setup_map_graphics(self) -> None:
if self.__services.algorithm.instance is not None:
if self.__map_controller is not None:
self.__map_controller.destroy()
if self.__map_view is not None:
self.__map_view.destroy()
self.__map = MapModel(self.__services)
self.__map_view = MapView(self.__services, self.__map, self.__main_view)
self.__map_controller = MapController(self.__map_view, self.__services, self.__map)
def __start_with_graphics(self) -> None:
"""
Starts simulator with graphics
"""
# init models, views, controllers
self.__main = MainModel(self.__services)
# init views
self.__main_view = MainView(self.__services, self.__main, None)
self.__gui_view = GuiView(self.__services, None, self.__main_view)
# init controllers
self.__main_controller = MainController(self.__services, self.__main)
self.__gui_controller = GuiController(self.__gui_view, self.__services,self.__main)
self.__try_setup_map_graphics()
self.__main.run()
def __start_without_graphics(self) -> Optional[BasicTesting]:
"""
Starts simulator without graphics
:return: The test results
"""
self.__services.algorithm.instance.find_path()
return self.__services.algorithm.instance.testing
def notify(self, event: Event) -> None:
if isinstance(event, ReinitEvent):
if self.__map:
"""
self.__map.stop_algorithm()
if self.__map.last_thread:
self.__map.last_thread.join()
"""
self.__map.reset()
self.__services.ev_manager.unregister_listener(self.__map)
self.__services.ev_manager.unregister_tick_listener(self.__map)
self.__try_setup_map_graphics()
@property
def services(self) -> Services:
return self.__services
|
18326
|
import copy
import logging
import numpy as np
import six
import tensorflow as tf
from functools import wraps
from contextlib import contextmanager
from .backend_base import BackendBase, FunctionBase, DeviceDecorator
try:
from tensorflow.contrib.distributions import fill_triangular
except:
print("Cannot find fill_triangular")
class TensorflowFunction(FunctionBase):
def __init__(self, *args, **kwargs):
super(TensorflowFunction, self).__init__(*args, **kwargs)
with tf.control_dependencies(self.outputs):
self.updates = [tf.assign(k, v) for k, v in self.updates]
def __call__(self, *inputs):
feed_dict = self.feed_dict(*inputs)
result = self.session.get_current_session().run(self.outputs + self.updates, feed_dict=feed_dict)
if len(self.outputs) == 1:
return result[0]
return result[:len(self.outputs)]
@six.add_metaclass(DeviceDecorator)
class TensorflowBackend(BackendBase):
def __init__(self, **kwargs):
super(TensorflowBackend, self).__init__(**kwargs)
self.core = tf
self._sessions = []
self.set_default_device(self.gpu() if tf.test.is_gpu_available() else self.cpu())
# General purpose methods
@classmethod
def use_device(cls, method):
@wraps(method)
def func(self, *args, **kwargs):
with tf.device(self.get_current_device()):
result = method(self, *args, **kwargs)
return result
return func
def enable_eager(self):
tf.enable_eager_execution()
def cpu(self, id=0):
return 'cpu/:%u' % id
def gpu(self, id=0):
return 'gpu/:%u' % id
@property
def int32(self):
return tf.int32
@property
def float32(self):
return tf.float32
def _placeholder(self, dtype=None, shape=None, name=None):
with self._device(self.get_current_device()):
return tf.placeholder(dtype, shape=shape, name=name)
def _variable(self, initial_value=None, trainable=True, name=None):
with self._device(self.get_current_device()):
return tf.Variable(initial_value=initial_value, trainable=trainable, name=name)
def _device(self, name):
return tf.device(name)
def create_session(self, graph=None, **kwargs):
allow_growth = kwargs.pop('allow_growth', False)
config_proto = tf.ConfigProto(**kwargs)
config_proto.gpu_options.allow_growth = allow_growth
sess = tf.Session(graph=graph, config=config_proto)
self._initialize(sess)
return sess
@contextmanager
def session(self, **kwargs):
with self.create_session(**kwargs) as sess:
self._sessions.append(sess)
self._initialize(sess)
yield sess
self._sessions.pop()
def interactive_session(self, graph=None, **kwargs):
config_proto = tf.ConfigProto(**kwargs)
sess = tf.InteractiveSession(config=config_proto, graph=graph)
self._initialize(sess)
return sess
def get_current_session(self):
if len(self._sessions) == 0:
raise Exception('No current session')
return self._sessions[-1]
def _initialize(self, sess):
sess.run(tf.local_variables_initializer())
sess.run(tf.global_variables_initializer())
# Unified interface
def cast(self, x, dtype):
return tf.cast(x, dtype)
def dtype(self, x):
return x.dtype
def shape(self, x):
return tf.shape(x)
def rank(self, x):
return tf.rank(x)
def abs(self, x):
return tf.abs(x)
def set_value(self, x, value):
tf.assign(x, np.asarray(value)).op.run(session=self.get_current_session())
def zeros(self, shape, dtype=None, name=None):
dtype = dtype or self.floatx()
return tf.zeros(shape, dtype=dtype, name=name)
def zeros_like(self, x, dtype=None, name=None):
return tf.zeros_like(x, dtype=dtype, name=name)
def ones(self, shape, dtype=None, name=None):
dtype = dtype or self.floatx()
return tf.ones(shape, dtype=dtype, name=name)
def ones_like(self, x, dtype=None, name=None):
return tf.ones_like(x, dtype=dtype, name=name)
def random_normal(self, shape, mean=0.0, stddev=1.0, dtype=None, seed=None):
dtype = dtype or self.floatx()
return tf.random_normal(shape, mean=mean, stddev=stddev, dtype=dtype, seed=seed)
def random_truncated_normal(self, shape, mean=0.0, stddev=1.0, dtype=None, seed=None):
dtype = dtype or self.floatx()
return tf.truncated_normal(shape, mean=mean, stddev=stddev, dtype=dtype, seed=seed)
def random_uniform(self, shape, minval=0, maxval=None, dtype=None, seed=None):
dtype = dtype or self.floatx()
return tf.random_uniform(shape, minval=minval, maxval=maxval, dtype=dtype, seed=seed)
def random_binomial(self, shape, p=0.5, dtype=None):
dtype = dtype or self.floatx()
return tf.where(tf.random_uniform(shape, dtype=dtype) <= p,
tf.ones(shape, dtype=dtype),
tf.zeros(shape, dtype=dtype))
def random_gamma(self, shape, alpha, beta=None):
return tf.random_gamma(shape, alpha, beta=beta)
pass
def tanh(self, x, name=None):
return tf.tanh(x, name=name)
def sigmoid(self, x, name=None):
return tf.sigmoid(x, name=name)
def relu(self, x, alpha=0., name=None):
return tf.nn.relu(x, name=name)
def softmax(self, x, T=1.0):
return tf.nn.softmax(x)
def softplus(self, x):
return tf.nn.softplus(x)
def dropout(self, x, p, seed=None):
retain_prob = 1. - p
if seed is None:
seed = np.random.randint(10e6)
return tf.nn.dropout(x * 1., retain_prob, seed=seed)
def conv2d(self, x, kernel, strides=(1, 1), border_mode='same',
image_shape=None, filter_shape=None):
'''
Run on cuDNN if available.
border_mode: string, "same" or "valid".
dim_ordering: whether to use Theano or TensorFlow dimension ordering
in inputs/kernels/ouputs.
'''
if border_mode == 'same':
padding = 'SAME'
elif border_mode == 'valid':
padding = 'VALID'
else:
raise Exception('Invalid border mode: ' + str(border_mode))
# strides = strides# + (1,)
if self.floatx() == 'float64':
x = tf.cast(x, 'float32')
kernel = tf.cast(kernel, 'float32')
x = tf.nn.convolution(input=x, filter=kernel, strides=strides, padding=padding,
data_format='NHWC')
if self.floatx() == 'float64':
x = tf.cast(x, 'float64')
return x
def conv2d_transpose(self, x, kernel, dim_out, strides=(1, 1), border_mode='same'):
if border_mode == 'same':
padding = 'SAME'
elif border_mode == 'valid':
padding = 'VALID'
else:
raise Exception('Invalid border mode: ' + str(border_mode))
output_shape = [self.shape(x)[0]] + list(dim_out)
strides = (1,) + strides + (1,)
if self.floatx() == 'float64':
x = tf.cast(x, 'float32')
kernel = tf.cast(kernel, 'float32')
x = tf.nn.conv2d_transpose(x, kernel, output_shape, strides, padding=padding)
if self.floatx() == 'float64':
x = tf.cast(x, 'float64')
return x
def pool2d(self, x, pool_size, strides=(1, 1),
border_mode='valid', pool_mode='max'):
'''
pool_size: tuple of 2 integers.
strides: tuple of 2 integers.
border_mode: one of "valid", "same".
dim_ordering: one of "th", "tf".
'''
if border_mode == 'same':
padding = 'SAME'
elif border_mode == 'valid':
padding = 'VALID'
else:
raise Exception('Invalid border mode: ' + str(border_mode))
strides = (1,) + strides + (1,)
pool_size = (1,) + pool_size + (1,)
if self.floatx() == 'float64':
x = tf.cast(x, 'float32')
if pool_mode == 'max':
x = tf.nn.max_pool(x, pool_size, strides, padding=padding)
elif pool_mode == 'avg':
x = tf.nn.avg_pool(x, pool_size, strides, padding=padding)
else:
raise Exception('Invalid pooling mode: ' + str(pool_mode))
if self.floatx() == 'float64':
x = tf.cast(x, 'float64')
return x
def flatten(self, x, leading=1):
leading_dim = self.shape(x)[:leading]
new_shape = tf.concat([leading_dim, [-1]], 0)
return tf.reshape(x, new_shape)
def split(self, x, num_splits, axis=None):
axis = axis % len(x.get_shape())
return tf.split(x, num_splits, axis=axis)
def reshape(self, x, shape):
return tf.reshape(x, shape)
def sum(self, x, axis=None, keepdims=False):
if x.dtype.base_dtype == tf.bool:
x = tf.cast(x, self.floatx())
return tf.reduce_sum(x, axis=axis, keepdims=keepdims)
def prod(self, x, axis=None, keepdims=False):
return tf.reduce_prod(x, axis=axis, keepdims=keepdims)
def mean(self, x, axis=None, keepdims=False):
if axis is not None and axis < 0:
axis = axis % len(x.get_shape())
if x.dtype.base_dtype == tf.bool:
x = tf.cast(x, self.floatx())
return tf.reduce_mean(x, axis=axis, keepdims=keepdims)
def batch_norm(self, x, beta, gamma):
mean, variance = tf.nn.moments(x, [0])
normed = tf.nn.batch_normalization(tf.identity(x), mean, variance, beta, gamma, self.epsilon())
return normed
def log(self, x):
return tf.log(x)
def log1p(self, x):
return tf.log1p(x)
def exp(self, x):
return tf.exp(x)
def pow(self, x, a):
return tf.pow(x, a)
def mul(self, x, y):
return tf.multiply(x, y)
def sqrt(self, x):
x = tf.clip_by_value(x,
tf.cast(0., dtype=self.floatx()),
tf.cast(np.inf, dtype=self.floatx()))
return tf.sqrt(x)
def categorical_crossentropy(self, output, target, from_logits=False, axis=-1):
if not from_logits:
# scale preds so that the class probas of each sample sum to 1
output = output / tf.reduce_sum(output, axis, True)
# manual computation of crossentropy
output = tf.clip_by_value(output, self.epsilon(), 1. - self.epsilon())
return -tf.reduce_sum(target * tf.log(output), axis)
else:
return tf.nn.softmax_cross_entropy_with_logits_v2(logits=output, labels=target)
def binary_crossentropy(self, output, target, from_logits=False):
if from_logits:
return tf.nn.sigmoid_cross_entropy_with_logits(labels=target, logits=output)
else:
raise NotImplementedError
def concatenate(self, tensors, axis=-1):
return tf.concat(tensors, axis=axis)
def sort(self, tensor):
values, indices = tf.nn.top_k(-tensor, k=tf.shape(tensor)[0])
return -values, indices
def argmin(self, tensor, axis=0):
return tf.argmin(tensor, axis=axis)
def map(self, function, input):
return tf.map_fn(function, input)
def rnn(self, step_function, input, initial_states, **kwargs):
num_dims = self.rank(input)
perm = self.concat([[1, 0], self.range(2, num_dims)])
input = self.transpose(input, perm)
def step(state, input_):
output, state = step_function(input_, state, **kwargs)
return state
result = tf.scan(step, input, initial_states)[0]
return self.transpose(result, perm)
def while_loop(self, condition, body, loop_vars, **kwargs):
return tf.while_loop(condition, body, loop_vars)
def scan(self, fn, elems, initializer=None):
return tf.scan(fn, elems, initializer=initializer, back_prop=True)
def logdet(self, A, **kwargs):
A = (A + self.matrix_transpose(A)) / 2.
term = tf.log(tf.matrix_diag_part(self.cholesky(A, **kwargs)))
return 2 * tf.reduce_sum(term, -1)
def einsum(self, subscripts, *operands):
return tf.einsum(subscripts, *operands)
def cholesky(self, A, lower=True, warn=True, correct=False):
assert lower is True
# Gradient through py_func adapted from https://gist.github.com/harpone/3453185b41d8d985356cbe5e57d67342
def py_func(func, inp, Tout, stateful=True, name=None, grad=None):
rnd_name = 'PyFuncGrad' + str(np.random.randint(0, 1E+8))
tf.RegisterGradient(rnd_name)(grad)
g = tf.get_default_graph()
with g.gradient_override_map({'PyFunc': rnd_name, 'PyFuncStateless': rnd_name}):
return tf.py_func(func, inp, Tout, stateful=stateful, name=name)
def correction(A):
A_new, del_ = A.copy(), 1e-4
while True:
try:
np.linalg.cholesky(A_new)
break
except np.linalg.linalg.LinAlgError:
if warn:
logging.warn('[Cholesky] singular matrix, adding diagonal {}'.format(del_))
A_new = A + del_ * np.eye(A.shape[-1]).astype(self.floatx())
del_ *= 2
return A_new
def _correction_grad(op, grad):
A = op.inputs[0]
return grad
if correct:
shape = A.get_shape()
A = py_func(correction, [A], A.dtype, grad=_correction_grad)
A.set_shape(shape)
return tf.cholesky(A)
# Tensorflow interface
def placeholder(self, dtype, shape=None, name=None):
return self._placeholder(dtype=dtype, shape=shape, name=name)
def variable(self, initial_value=None, trainable=True, name=None):
return self._variable(initial_value=initial_value, trainable=trainable, name=name)
def assign(self, a, b):
return tf.assign(a, b)
def to_float(self, x):
return tf.cast(x, self.floatx())
def constant(self, value, dtype=None, shape=None):
return tf.constant(value, dtype=dtype, shape=shape)
def get_shape(self, x):
return [a.value for a in tf.convert_to_tensor(x).get_shape()]
def get_value(self, variable):
return self.get_current_session().run(variable)
def concat(self, values, axis=-1):
return tf.concat(values, axis=axis)
def gather(self, params, indices):
return tf.gather(params, indices)
def gather_nd(self, params, indices):
return tf.gather_nd(params, indices)
def equal(self, x, y):
return tf.equal(x, y)
def logical_and(self, x, y):
return tf.logical_and(x, y)
def matmul(self, a, b, transpose_a=False, transpose_b=False, a_is_sparse=False, b_is_sparse=False, name=None):
return tf.matmul(a, b, transpose_a=transpose_a, transpose_b=transpose_b, a_is_sparse=a_is_sparse, name=name)
def trace(self, a):
return tf.trace(a)
def transpose(self, a, perm=None):
return tf.transpose(a, perm=perm)
def matrix_transpose(self, a):
return tf.matrix_transpose(a)
def matrix_diag(self, a):
return tf.matrix_diag(a)
def matrix_diag_part(self, a):
return tf.matrix_diag_part(a)
def set_diag(self, input, diagonal):
return tf.linalg.set_diag(input, diagonal)
def band_part(self, input, num_lower, num_upper):
return tf.linalg.band_part(input, num_lower, num_upper)
def vec(self, A):
A = self.matrix_transpose(A)
leading_dim = self.shape(A)[:-2]
return self.reshape(A, self.concat([
leading_dim,
[-1]
], 0))
def unvec(self, v, m, n):
leading_dim = self.shape(v)[:-1]
return self.matrix_transpose(self.reshape(v, self.concat([
leading_dim,
[n, m]
], 0)))
def kronecker(self, A, B):
C = (A[..., None, None] * B[..., None, None, :, :])
blocks = [
tf.unstack(a, axis=-3 % len(a.shape)) for a in
tf.unstack(C, axis=-4 % len(C.shape))
]
return tf.concat([
tf.concat(a, -1) for a in blocks
], -2)
def block_sum(self, X, m, n):
leading_dim = self.shape(X)[:-2]
block_sum = self.zeros(self.concat([leading_dim, [m, m]], 0))
for i in range(n):
block_sum += X[..., i*m:(i+1)*m, i*m:(i+1)*m]
return block_sum
def block_trace(self, X, m, n):
blocks = []
for i in range(n):
blocks.append([])
for j in range(n):
block = self.trace(X[..., i*m:(i+1)*m, j*m:(j+1)*m])
blocks[-1].append(block)
return self.pack([
self.pack([
b for b in block
])
for block in blocks
])
def kronecker_vec(self, X, m, n):
leading_dim = tf.shape(X)[:-2]
blocks = []
for i in range(n):
blocks.append([])
for j in range(m):
idx = i * m + j
block = tf.matrix_transpose(tf.reshape(X[..., idx, :], tf.concat([leading_dim, [n, m]], 0)))
blocks[-1].append(block)
return tf.concat([tf.concat(b, -2) for b in blocks], -1)
def lower_triangular(self, a):
return fill_triangular(a)
def matrix_inverse(self, a):
return tf.matrix_inverse(a)
def expand_dims(self, x, dim=-1):
return tf.expand_dims(x, dim)
def tile(self, input, multiples):
return tf.tile(input, multiples)
def gradients(self, loss, variables):
return tf.gradients(loss, variables)
def square(self, x):
return tf.square(x)
def clip_by_value(self, x, low, high):
return tf.clip_by_value(x, low, high)
def stack(self, values, axis=0, name='stack'):
return tf.stack(values, axis=axis, name=name)
def unstack(self, values, num=None, axis=0, name='unstack'):
return tf.unstack(values, num=num, axis=axis, name=name)
def pack(self, *args, **kwargs):
return self.stack(*args, **kwargs)
def unpack(self, *args, **kwargs):
return self.unstack(*args, **kwargs)
def reduce_max(self, x, axis=None, keepdims=False):
return tf.reduce_max(x, axis=axis, keepdims=keepdims)
def reduce_logsumexp(self, x, axis=None, keepdims=False):
return tf.reduce_logsumexp(x, axis=axis, keepdims=keepdims)
def matrix_solve(self, matrix, rhs, adjoint=None):
return tf.matrix_solve(matrix, rhs, adjoint=adjoint)
# Theano interface
def dim(self, x):
return len(x.get_shape())
def scalar(self, name=None, dtype=None, shape=[]):
dtype = dtype or self.floatx()
return self._placeholder(dtype=dtype, shape=shape, name=name)
def vector(self, name=None, dtype=None, shape=[None]):
dtype = dtype or self.floatx()
return self._placeholder(dtype=dtype, shape=shape, name=name)
def matrix(self, name=None, dtype=None, shape=[None, None]):
dtype = dtype or self.floatx()
return self._placeholder(dtype=dtype, shape=shape, name=name)
def tensor3(self, name=None, dtype=None, shape=[None, None, None]):
dtype = dtype or self.floatx()
return self._placeholder(dtype=dtype, shape=shape, name=name)
def tensor4(self, name=None, dtype=None, shape=[None, None, None, None]):
dtype = dtype or self.floatx()
return self._placeholder(dtype=dtype, shape=shape, name=name)
def shared(self, value, name=None):
return self._variable(initial_value=value, name=name)
def arange(self, start, stop=None, step=None):
return self.range(start, stop=stop, step=step)
def sparse_dot(self, x, y):
return tf.sparse_tensor_dense_matmul(x, y)
def dot(self, x, y):
if len(x.get_shape()) != len(y.get_shape()):
len_y = len(y.get_shape())
new_y_shape = tf.concat([tf.shape(x)[:-len_y], tf.shape(y)], 0)
y = tf.broadcast_to(y, new_y_shape)
return tf.matmul(x, y)
def outer(self, x, y):
if len(x.get_shape()) == 0:
return x * y
return x[...,:,None] * y[...,None,:]
def eye(self, d, batch_shape=None):
return tf.eye(d, batch_shape=batch_shape)
def function(self, inputs, outputs, updates=[]):
return TensorflowFunction(self, inputs, outputs, updates)
def grad(self, loss, variables):
return tf.gradients(loss, variables)
def sqr(self, x):
return tf.square(x)
def argmax(self, x, axis=None):
return tf.argmax(x, axis=axis)
def max(self, x, axis=None, keepdims=False):
return tf.reduce_max(x, axis=axis, keepdims=keepdims)
def logsumexp(self, x, axis=None, keepdims=False):
return tf.reduce_logsumexp(x, axis=axis, keepdims=keepdims)
def switch(self, condition, then_expression, else_expression):
'''Switches between two operations depending on a scalar value (int or bool).
Note that both `then_expression` and `else_expression`
should be symbolic tensors of the *same shape*.
# Arguments
condition: scalar tensor.
then_expression: TensorFlow operation.
else_expression: TensorFlow operation.
'''
return tf.where(condition, then_expression, else_expression)
def alloc(self, value, shape, unbroadcast=None, dtype=None):
dtype = dtype or self.floatx()
vals = tf.fill(tf.stack(shape), np.array(value).astype(dtype))
new_shape = []
for s in shape:
if isinstance(s, tf.Tensor):
new_shape.append(None)
else:
new_shape.append(s)
vals.set_shape(new_shape)
return vals
def range(self, start, limit=None, delta=1):
if limit is None:
return tf.range(start, delta=delta)
return tf.range(start, limit, delta=delta)
def solve(self, a, b):
return tf.matrix_solve(a, b)
def one_hot(self, indices, depth):
return tf.one_hot(indices, depth)
# Science methods
def gammaln(self, x):
return tf.lgamma(x)
def multigammaln(self, a, p):
p = self.to_float(p)
p_ = self.cast(p, 'int32')
a = a[..., None]
i = self.to_float(self.range(1, p_ + 1))
term1 = p * (p - 1) / 4. * self.log(np.pi)
term2 = self.gammaln(a - (i - 1) / 2.)
return term1 + self.sum(term2, axis=-1)
def digamma(self, a):
return tf.digamma(a)
|
18367
|
from __future__ import division
import pandas as pd
import numpy as np
import calendar
import os.path as op
import sys
from datetime import datetime
from dateutil.relativedelta import relativedelta
from scipy.stats import percentileofscore
from scipy.stats import scoreatpercentile, pearsonr
from math import *
import time
from BCSD_stats_functions import *
import xarray as xr
import os, errno
def CALC_BCSD(OBS_CLIM_ALL, FCST_CLIM_ALL, LEAD_FINAL, TARGET_FCST_VAL_ARR, TARGET_FCST_SYR, TARGET_FCST_EYR, FCST_SYR, ENS_NUM, MON, MONTH_NAME, count_grid, BC_VAR, TINY):
CORRECT_FCST_COARSE = np.ones(((TARGET_FCST_EYR-TARGET_FCST_SYR)+1, LEAD_FINAL, ENS_NUM))*-999
for LEAD_NUM in range(0, LEAD_FINAL): ## Loop from lead =0 to Final Lead
TARGET_MONTH = MON + LEAD_NUM; ## This is the target forecast month
## Check for the cases when the target forecast month is in the next year (e.g. February 1983 forecast initialized in December 1982)
if (TARGET_MONTH>12):
TARGET_MONTH-=12 #subtracting 12 so 13 becomes 1 meaning the month of January and so on.
## Just checking if the lead and target month combination is working as expected
if (count_grid==0): #Only printing the following for the first grid cell, no need to repeat
print ("Initial forecast month is {} Lead is {} and Target month is {}".format(MONTH_NAME, LEAD_NUM, calendar.month_name[TARGET_MONTH]))
# Retriving Observed and forecast time series for given target month
OBS_QUANT_TS, OBS_CLIM_TS = OBS_CLIM_ALL[0, :], OBS_CLIM_ALL[TARGET_MONTH, :] ## Note that the first column is quantile time series
FCST_QUANT_TS, FCST_CLIM_TS = FCST_CLIM_ALL[0, :], FCST_CLIM_ALL[LEAD_NUM+1, :] ## Note that the first column is quantile time series
## Now calculating mean, standard deviation and skew of both observed and forecast time series
obs_mean, obs_sd, obs_skew = Calc_Stats(OBS_CLIM_TS, TINY)
fcst_mean, fcst_sd, fcst_skew = Calc_Stats(FCST_CLIM_TS, TINY)
#obs_mean, obs_sd, obs_skew = Calc_Stats(OBS_CLIM_TS.values, TINY)
#fcst_mean, fcst_sd, fcst_skew = Calc_Stats(FCST_CLIM_TS.values, TINY)
## Ok, now getting started on the bias correction
## Note that bias correction is done seprately for each ensemble member of all years
for fcst_yr in range(TARGET_FCST_SYR-FCST_SYR, (TARGET_FCST_EYR-FCST_SYR)+1):
for ens_num in range (0, ENS_NUM):
TARGET_FCST_VAL = TARGET_FCST_VAL_ARR[fcst_yr, LEAD_NUM, ens_num]
## First determine the quantile for given target forecast value
TARGET_FCST_QUANT = lookup(TARGET_FCST_VAL, FCST_CLIM_TS, FCST_QUANT_TS, len(FCST_CLIM_TS), BC_VAR, 'QUAN', fcst_mean, fcst_sd, fcst_skew, TINY);
#TARGET_FCST_QUANT = lookup(TARGET_FCST_VAL, FCST_CLIM_TS.values, FCST_QUANT_TS.values, len(FCST_CLIM_TS.values), BC_VAR, 'QUAN', fcst_mean, fcst_sd, fcst_skew, TINY);
## Also note that QUAN helps the the function lookup determine if we are trying to convert a value to quantile or VICE versa
## For converting a value to quantile use 'QUAN' for converting quantile to value use 'DATA'
## Now using the quantile above determine the corresponding value from the observed climatology
BIAS_CORRECTED_VALUE = lookup(TARGET_FCST_QUANT, OBS_QUANT_TS, OBS_CLIM_TS, len(OBS_CLIM_TS), BC_VAR, 'DATA', obs_mean, obs_sd, obs_skew, TINY);
#BIAS_CORRECTED_VALUE = lookup(TARGET_FCST_QUANT, OBS_QUANT_TS.values, OBS_CLIM_TS.values, len(OBS_CLIM_TS.values), BC_VAR, 'DATA', obs_mean, obs_sd, obs_skew, TINY);
if (BC_VAR=='PRCP') and (BIAS_CORRECTED_VALUE<0): ## This is just a hack to check we are not getting negative value of precipitation
print (TARGET_FCST_VAL, TARGET_FCST_QUANT, fcst_yr, LEAD_NUM, ens_num)
## Now storing the bias corrected anomaly
CORRECT_FCST_COARSE[fcst_yr, LEAD_NUM, ens_num] = BIAS_CORRECTED_VALUE
return CORRECT_FCST_COARSE
def latlon_calculations(ilat_min, ilat_max, ilon_min, ilon_max, nlats, nlons, \
np_OBS_CLIM_ARRAY, np_FCST_CLIM_ARRAY, \
LEAD_FINAL, TARGET_FCST_EYR, TARGET_FCST_SYR, FCST_SYR, ENS_NUM, MON, \
MONTH_NAME, BC_VAR, TINY, FCST_COARSE):
CORRECT_FCST_COARSE = np.ones(((TARGET_FCST_EYR-TARGET_FCST_SYR)+1, LEAD_FINAL, ENS_NUM, nlats, nlons))*-999
num_lats = ilat_max-ilat_min+1
num_lons = ilon_max-ilon_min+1
print("num_lats = ", num_lats, np_OBS_CLIM_ARRAY.shape)
print("num_lons = ", num_lons, FCST_COARSE.shape)
for ilat in range(num_lats):
lat_num = ilat_min + ilat
for ilon in range(num_lons):
lon_num = ilon_min + ilon
count_grid = ilon + ilat*num_lons
OBS_CLIM_ALL = np_OBS_CLIM_ARRAY[:, :, ilat, ilon]
FCST_CLIM_ALL = np_FCST_CLIM_ARRAY[:, :, ilat, ilon]
TARGET_FCST_VAL_ARR = FCST_COARSE[:, :, :, lat_num, lon_num]
CORRECT_FCST_COARSE[:, :, :, lat_num, lon_num] = CALC_BCSD(OBS_CLIM_ALL, FCST_CLIM_ALL, LEAD_FINAL, \
TARGET_FCST_VAL_ARR, TARGET_FCST_SYR, \
TARGET_FCST_EYR, FCST_SYR, ENS_NUM, MON, \
MONTH_NAME, count_grid, BC_VAR, TINY)
return CORRECT_FCST_COARSE
|
18407
|
from abc import ABCMeta, abstractmethod
import torch
import torch.nn.functional as F
from addict import Dict
from mmtrack.models import TRACKERS
@TRACKERS.register_module()
class BaseTracker(metaclass=ABCMeta):
"""Base tracker model.
Args:
momentums (dict[str:float], optional): Momentums to update the buffers.
The `str` indicates the name of the buffer while the `float`
indicates the momentum. Default to None.
num_frames_retain (int, optional). If a track is disappeared more than
`num_frames_retain` frames, it will be deleted in the memo.
"""
def __init__(self, momentums=None, num_frames_retain=10):
super().__init__()
if momentums is not None:
assert isinstance(momentums, dict), 'momentums must be a dict'
self.momentums = momentums
self.num_frames_retain = num_frames_retain
self.reset()
def reset(self):
"""Reset the buffer of the tracker."""
self.num_tracks = 0
self.tracks = dict()
@property
def empty(self):
"""Whether the buffer is empty or not."""
return False if self.tracks else True
@property
def ids(self):
"""All ids in the tracker."""
return list(self.tracks.keys())
@property
def with_reid(self):
"""bool: whether the framework has a reid model"""
return hasattr(self, 'reid') and self.reid is not None
def update(self, **kwargs):
"""Update the tracker.
Args:
kwargs (dict[str: Tensor | int]): The `str` indicates the
name of the input variable. `ids` and `frame_ids` are
obligatory in the keys.
"""
memo_items = [k for k, v in kwargs.items() if v is not None]
rm_items = [k for k in kwargs.keys() if k not in memo_items]
for item in rm_items:
kwargs.pop(item)
if not hasattr(self, 'memo_items'):
self.memo_items = memo_items
else:
assert memo_items == self.memo_items
assert 'ids' in memo_items
num_objs = len(kwargs['ids'])
id_indice = memo_items.index('ids')
assert 'frame_ids' in memo_items
frame_id = int(kwargs['frame_ids'])
if isinstance(kwargs['frame_ids'], int):
kwargs['frame_ids'] = torch.tensor([kwargs['frame_ids']] *
num_objs)
# cur_frame_id = int(kwargs['frame_ids'][0])
for k, v in kwargs.items():
if len(v) != num_objs:
raise ValueError()
for obj in zip(*kwargs.values()):
id = int(obj[id_indice])
if id in self.tracks:
self.update_track(id, obj)
else:
self.init_track(id, obj)
self.pop_invalid_tracks(frame_id)
def pop_invalid_tracks(self, frame_id):
"""Pop out invalid tracks."""
invalid_ids = []
for k, v in self.tracks.items():
if frame_id - v['frame_ids'][-1] >= self.num_frames_retain:
invalid_ids.append(k)
for invalid_id in invalid_ids:
self.tracks.pop(invalid_id)
def update_track(self, id, obj):
"""Update a track."""
for k, v in zip(self.memo_items, obj):
v = v[None]
if self.momentums is not None and k in self.momentums:
m = self.momentums[k]
self.tracks[id][k] = (1 - m) * self.tracks[id][k] + m * v
else:
self.tracks[id][k].append(v)
def init_track(self, id, obj):
"""Initialize a track."""
self.tracks[id] = Dict()
for k, v in zip(self.memo_items, obj):
v = v[None]
if self.momentums is not None and k in self.momentums:
self.tracks[id][k] = v
else:
self.tracks[id][k] = [v]
@property
def memo(self):
"""Return all buffers in the tracker."""
outs = Dict()
for k in self.memo_items:
outs[k] = []
for id, objs in self.tracks.items():
for k, v in objs.items():
if k not in outs:
continue
if self.momentums is not None and k in self.momentums:
v = v
else:
v = v[-1]
outs[k].append(v)
for k, v in outs.items():
outs[k] = torch.cat(v, dim=0)
return outs
def get(self, item, ids=None, num_samples=None, behavior=None):
"""Get the buffer of a specific item.
Args:
item (str): The demanded item.
ids (list[int]): The demaned ids.
num_samples (int, optional): Number of samples to calculate the
results. Defaults to None.
behavior (str, optional): Behavior to calculate the results.
Options are `mean` | None. Defaults to None.
Returns:
Tensor: The results of the demanded item.
"""
if ids is None:
ids = self.ids
outs = []
for id in ids:
out = self.tracks[id][item]
if isinstance(out, list):
if num_samples is not None:
out = out[-num_samples:]
out = torch.cat(out, dim=0)
if behavior == 'mean':
out = out.mean(dim=0, keepdim=True)
elif behavior is None:
out = out[None]
else:
raise NotImplementedError()
else:
out = out[-1]
outs.append(out)
return torch.cat(outs, dim=0)
@abstractmethod
def track(self, *args, **kwargs):
"""Tracking forward function."""
pass
def crop_imgs(self, img, img_metas, bboxes, rescale=False):
"""Crop the images according to some bounding boxes. Typically for re-
identification sub-module.
Args:
img (Tensor): of shape (N, C, H, W) encoding input images.
Typically these should be mean centered and std scaled.
img_metas (list[dict]): list of image info dict where each dict
has: 'img_shape', 'scale_factor', 'flip', and may also contain
'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
bboxes (Tensor): of shape (N, 4) or (N, 5).
rescale (bool, optional): If True, the bounding boxes should be
rescaled to fit the scale of the image. Defaults to False.
Returns:
Tensor: Image tensor of shape (N, C, H, W).
"""
h, w, _ = img_metas[0]['img_shape']
img = img[:, :, :h, :w]
if rescale:
bboxes[:, :4] *= torch.tensor(img_metas[0]['scale_factor']).to(
bboxes.device)
bboxes[:, 0::2] = torch.clamp(bboxes[:, 0::2], min=0, max=w)
bboxes[:, 1::2] = torch.clamp(bboxes[:, 1::2], min=0, max=h)
crop_imgs = []
for bbox in bboxes:
x1, y1, x2, y2 = map(int, bbox)
if x2 == x1:
x2 = x1 + 1
if y2 == y1:
y2 = y1 + 1
crop_img = img[:, :, y1:y2, x1:x2]
if self.reid.get('img_scale', False):
crop_img = F.interpolate(
crop_img,
size=self.reid['img_scale'],
mode='bilinear',
align_corners=False)
crop_imgs.append(crop_img)
if len(crop_imgs) > 0:
return torch.cat(crop_imgs, dim=0)
else:
return img.new_zeros((0, ))
|
18472
|
import torch
import torchvision
import torchvision.transforms as transforms
import torch.optim as optim
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from model_utils import *
class down(nn.Module):
"""
A class for creating neural network blocks containing layers:
Average Pooling --> Convlution + Leaky ReLU --> Convolution + Leaky ReLU
This is used in the UNet Class to create a UNet like NN architecture.
...
Methods
-------
forward(x)
Returns output tensor after passing input `x` to the neural network
block.
"""
def __init__(self, inChannels, outChannels, filterSize):
"""
Parameters
----------
inChannels : int
number of input channels for the first convolutional layer.
outChannels : int
number of output channels for the first convolutional layer.
This is also used as input and output channels for the
second convolutional layer.
filterSize : int
filter size for the convolution filter. input N would create
a N x N filter.
"""
super(down, self).__init__()
# Initialize convolutional layers.
# self.conv1 = nn.Conv2d(inChannels, outChannels, filterSize, stride=1, padding=int((filterSize - 1) / 2))
# self.conv2 = nn.Conv2d(outChannels, outChannels, filterSize, stride=1, padding=int((filterSize - 1) / 2))
self.conv1 = MetaConv2dLayer(in_channels=inChannels, out_channels=outChannels, kernel_size=filterSize, stride=1, padding=int((filterSize - 1) / 2))
self.conv2 = MetaConv2dLayer(in_channels=outChannels, out_channels=outChannels, kernel_size=filterSize, stride=1, padding=int((filterSize - 1) / 2))
def forward(self, x, params=None):
"""
Returns output tensor after passing input `x` to the neural network
block.
Parameters
----------
x : tensor
input to the NN block.
Returns
-------
tensor
output of the NN block.
"""
# Average pooling with kernel size 2 (2 x 2).
x = F.avg_pool2d(x, 2)
# (Convolution + Leaky ReLU) x 2
param_dict = dict()
if params is not None:
param_dict = extract_top_level_dict(current_dict=params)
x = F.leaky_relu(self.conv1(x, params=param_dict['conv1']), negative_slope = 0.1)
x = F.leaky_relu(self.conv2(x, params=param_dict['conv2']), negative_slope = 0.1)
else:
x = F.leaky_relu(self.conv1(x), negative_slope = 0.1)
x = F.leaky_relu(self.conv2(x), negative_slope = 0.1)
return x
class up(nn.Module):
"""
A class for creating neural network blocks containing layers:
Bilinear interpolation --> Convlution + Leaky ReLU --> Convolution + Leaky ReLU
This is used in the UNet Class to create a UNet like NN architecture.
...
Methods
-------
forward(x, skpCn)
Returns output tensor after passing input `x` to the neural network
block.
"""
def __init__(self, inChannels, outChannels):
"""
Parameters
----------
inChannels : int
number of input channels for the first convolutional layer.
outChannels : int
number of output channels for the first convolutional layer.
This is also used for setting input and output channels for
the second convolutional layer.
"""
super(up, self).__init__()
# Initialize convolutional layers.
# self.conv1 = nn.Conv2d(inChannels, outChannels, 3, stride=1, padding=1)
self.conv1 = MetaConv2dLayer(in_channels=inChannels, out_channels=outChannels, kernel_size=3, stride=1, padding=1)
# (2 * outChannels) is used for accommodating skip connection.
# self.conv2 = nn.Conv2d(2 * outChannels, outChannels, 3, stride=1, padding=1)
self.conv2 = MetaConv2dLayer(in_channels=2 * outChannels, out_channels=outChannels, kernel_size=3, stride=1, padding=1)
def forward(self, x, skpCn, params=None):
"""
Returns output tensor after passing input `x` to the neural network
block.
Parameters
----------
x : tensor
input to the NN block.
skpCn : tensor
skip connection input to the NN block.
Returns
-------
tensor
output of the NN block.
"""
# Bilinear interpolation with scaling 2.
x = F.interpolate(x, scale_factor=2, mode='bilinear')
param_dict = dict()
if params is not None:
param_dict = extract_top_level_dict(current_dict=params)
# Convolution + Leaky ReLU
x = F.leaky_relu(self.conv1(x, params=param_dict['conv1']), negative_slope = 0.1)
# Convolution + Leaky ReLU on (`x`, `skpCn`)
x = F.leaky_relu(self.conv2(torch.cat((x, skpCn), 1), params=param_dict['conv2']), negative_slope = 0.1)
else:
# Convolution + Leaky ReLU
x = F.leaky_relu(self.conv1(x), negative_slope = 0.1)
# Convolution + Leaky ReLU on (`x`, `skpCn`)
x = F.leaky_relu(self.conv2(torch.cat((x, skpCn), 1)), negative_slope = 0.1)
return x
class UNet(nn.Module):
"""
A class for creating UNet like architecture as specified by the
Super SloMo paper.
...
Methods
-------
forward(x)
Returns output tensor after passing input `x` to the neural network
block.
"""
def __init__(self, inChannels, outChannels):
"""
Parameters
----------
inChannels : int
number of input channels for the UNet.
outChannels : int
number of output channels for the UNet.
"""
super(UNet, self).__init__()
# Initialize neural network blocks.
self.conv1 = nn.Conv2d(inChannels, 32, 7, stride=1, padding=3)
self.conv2 = nn.Conv2d(32, 32, 7, stride=1, padding=3)
self.down1 = down(32, 64, 5)
self.down2 = down(64, 128, 3)
self.down3 = down(128, 256, 3)
self.down4 = down(256, 512, 3)
self.down5 = down(512, 512, 3)
self.up1 = up(512, 512)
self.up2 = up(512, 256)
self.up3 = up(256, 128)
self.up4 = up(128, 64)
self.up5 = up(64, 32)
self.conv3 = nn.Conv2d(32, outChannels, 3, stride=1, padding=1)
def forward(self, x):
"""
Returns output tensor after passing input `x` to the neural network.
Parameters
----------
x : tensor
input to the UNet.
Returns
-------
tensor
output of the UNet.
"""
x = F.leaky_relu(self.conv1(x), negative_slope = 0.1)
s1 = F.leaky_relu(self.conv2(x), negative_slope = 0.1)
s2 = self.down1(s1)
s3 = self.down2(s2)
s4 = self.down3(s3)
s5 = self.down4(s4)
x = self.down5(s5)
x = self.up1(x, s5)
x = self.up2(x, s4)
x = self.up3(x, s3)
x = self.up4(x, s2)
x = self.up5(x, s1)
x = F.leaky_relu(self.conv3(x), negative_slope = 0.1)
return x
class backWarp(nn.Module):
"""
A class for creating a backwarping object.
This is used for backwarping to an image:
Given optical flow from frame I0 to I1 --> F_0_1 and frame I1,
it generates I0 <-- backwarp(F_0_1, I1).
...
Methods
-------
forward(x)
Returns output tensor after passing input `img` and `flow` to the backwarping
block.
"""
def __init__(self, W, H, device):
"""
Parameters
----------
W : int
width of the image.
H : int
height of the image.
device : device
computation device (cpu/cuda).
"""
super(backWarp, self).__init__()
# create a grid
gridX, gridY = np.meshgrid(np.arange(W), np.arange(H))
self.W = W
self.H = H
self.gridX = torch.tensor(gridX, requires_grad=False, device=device)
self.gridY = torch.tensor(gridY, requires_grad=False, device=device)
def forward(self, img, flow):
"""
Returns output tensor after passing input `img` and `flow` to the backwarping
block.
I0 = backwarp(I1, F_0_1)
Parameters
----------
img : tensor
frame I1.
flow : tensor
optical flow from I0 and I1: F_0_1.
Returns
-------
tensor
frame I0.
"""
# Extract horizontal and vertical flows.
u = flow[:, 0, :, :]
v = flow[:, 1, :, :]
x = self.gridX.unsqueeze(0).expand_as(u).float() + u
y = self.gridY.unsqueeze(0).expand_as(v).float() + v
# range -1 to 1
x = 2*(x/self.W - 0.5)
y = 2*(y/self.H - 0.5)
# stacking X and Y
grid = torch.stack((x,y), dim=3)
# Sample pixels using bilinear interpolation.
imgOut = torch.nn.functional.grid_sample(img, grid)
return imgOut
# Creating an array of `t` values for the 7 intermediate frames between
# reference frames I0 and I1.
t = np.linspace(0.125, 0.875, 7)
def getFlowCoeff (indices, device):
"""
Gets flow coefficients used for calculating intermediate optical
flows from optical flows between I0 and I1: F_0_1 and F_1_0.
F_t_0 = C00 x F_0_1 + C01 x F_1_0
F_t_1 = C10 x F_0_1 + C11 x F_1_0
where,
C00 = -(1 - t) x t
C01 = t x t
C10 = (1 - t) x (1 - t)
C11 = -t x (1 - t)
Parameters
----------
indices : tensor
indices corresponding to the intermediate frame positions
of all samples in the batch.
device : device
computation device (cpu/cuda).
Returns
-------
tensor
coefficients C00, C01, C10, C11.
"""
# Convert indices tensor to numpy array
ind = indices.detach().numpy()
C11 = C00 = - (1 - (t[ind])) * (t[ind])
C01 = (t[ind]) * (t[ind])
C10 = (1 - (t[ind])) * (1 - (t[ind]))
return torch.Tensor(C00)[None, None, None, :].permute(3, 0, 1, 2).to(device), torch.Tensor(C01)[None, None, None, :].permute(3, 0, 1, 2).to(device), torch.Tensor(C10)[None, None, None, :].permute(3, 0, 1, 2).to(device), torch.Tensor(C11)[None, None, None, :].permute(3, 0, 1, 2).to(device)
def getWarpCoeff (indices, device):
"""
Gets coefficients used for calculating final intermediate
frame `It_gen` from backwarped images using flows F_t_0 and F_t_1.
It_gen = (C0 x V_t_0 x g_I_0_F_t_0 + C1 x V_t_1 x g_I_1_F_t_1) / (C0 x V_t_0 + C1 x V_t_1)
where,
C0 = 1 - t
C1 = t
V_t_0, V_t_1 --> visibility maps
g_I_0_F_t_0, g_I_1_F_t_1 --> backwarped intermediate frames
Parameters
----------
indices : tensor
indices corresponding to the intermediate frame positions
of all samples in the batch.
device : device
computation device (cpu/cuda).
Returns
-------
tensor
coefficients C0 and C1.
"""
# Convert indices tensor to numpy array
ind = indices.detach().numpy()
C0 = 1 - t[ind]
C1 = t[ind]
return torch.Tensor(C0)[None, None, None, :].permute(3, 0, 1, 2).to(device), torch.Tensor(C1)[None, None, None, :].permute(3, 0, 1, 2).to(device)
class SuperSloMoModel(nn.Module):
def __init__(self, device):
super(SuperSloMoModel, self).__init__()
self.device = device
self.flowComp = UNet(6, 4)
self.arbTimeFlowIntrp = UNet(20, 5)
self.backwarp = None
def forward(self, I0, I1, ind):
w, h = I0.size(3), I0.size(2)
s = 6 # bits to shift
padW, padH = 0, 0
if w != ((w >> s) << s):
padW = (((w >> s) + 1) << s) - w
if h != ((h >> s) << s):
padH = (((h >> s) + 1) << s) - h
paddingInput = nn.ReflectionPad2d(padding=[padW // 2, padW - padW // 2, padH // 2, padH - padH // 2])
paddingOutput = nn.ReflectionPad2d(padding=[0 - padW // 2, padW // 2 - padW, 0 - padH // 2, padH // 2 - padH])
I0 = paddingInput(I0)
I1 = paddingInput(I1)
flowOut = self.flowComp(torch.cat((I0, I1), dim=1))
F_0_1 = flowOut[:, :2, :, :]
F_1_0 = flowOut[:, 2:, :, :]
fCoeff = getFlowCoeff(ind, self.device)
F_t_0 = fCoeff[0] * F_0_1 + fCoeff[1] * F_1_0
F_t_1 = fCoeff[2] * F_0_1 + fCoeff[3] * F_1_0
if self.backwarp is None or self.backwarp.W != I0.size(3) or self.backwarp.H != I0.size(2):
self.backwarp = backWarp(I0.size(3), I0.size(2), self.device) # make grid
g_I0_F_t_0 = self.backwarp(I0, F_t_0)
g_I1_F_t_1 = self.backwarp(I1, F_t_1)
intrpOut = self.arbTimeFlowIntrp(torch.cat((I0, I1, F_0_1, F_1_0, F_t_1, F_t_0, g_I1_F_t_1, g_I0_F_t_0), dim=1))
F_t_0_f = intrpOut[:, :2, :, :] + F_t_0
F_t_1_f = intrpOut[:, 2:4, :, :] + F_t_1
V_t_0 = F.sigmoid(intrpOut[:, 4:5, :, :])
V_t_1 = 1 - V_t_0
g_I0_F_t_0_f = self.backwarp(I0, F_t_0_f)
g_I1_F_t_1_f = self.backwarp(I1, F_t_1_f)
wCoeff = getWarpCoeff(ind, self.device)
Ft_p = (wCoeff[0] * V_t_0 * g_I0_F_t_0_f + wCoeff[1] * V_t_1 * g_I1_F_t_1_f) / (wCoeff[0] * V_t_0 + wCoeff[1] * V_t_1)
warped_I0, warped_I1 = self.backwarp(I0, F_1_0), self.backwarp(I1, F_0_1)
Ft_p = paddingOutput(Ft_p)
F_0_1, F_1_0 = paddingOutput(F_0_1), paddingOutput(F_1_0)
g_I0_F_t_0, g_I1_F_t_1 = paddingOutput(g_I0_F_t_0), paddingOutput(g_I1_F_t_1)
warped_I0, warped_I1 = paddingOutput(warped_I0), paddingOutput(warped_I1)
#return Ft_p, # output image
# (F_0_1, F_1_0), # bidirectional flow maps
# (g_I0_F_t_0, g_I1_F_t_1), # warped intermediate images
# (self.backwarp(I0, F_1_0), self.backwarp(I1, F_0_1)) # warped input image (0-1, 1-0)
return Ft_p, \
(F_0_1, F_1_0), \
(g_I0_F_t_0, g_I1_F_t_1), \
(warped_I0, warped_I1)
# (self.backwarp(I0, F_1_0), self.backwarp(I1, F_0_1))
class MetaUNet(nn.Module):
"""
A class for creating UNet like architecture as specified by the
Super SloMo paper.
...
Methods
-------
forward(x)
Returns output tensor after passing input `x` to the neural network
block.
"""
def __init__(self, inChannels, outChannels):
"""
Parameters
----------
inChannels : int
number of input channels for the UNet.
outChannels : int
number of output channels for the UNet.
"""
super(MetaUNet, self).__init__()
# Initialize neural network blocks.
self.conv1 = MetaConv2dLayer(in_channels=inChannels, out_channels=32, kernel_size=7, stride=1, padding=3)
self.conv2 = MetaConv2dLayer(in_channels=32, out_channels=32, kernel_size=7, stride=1, padding=3)
self.down1 = down(32, 64, 5)
self.down2 = down(64, 128, 3)
self.down3 = down(128, 256, 3)
self.down4 = down(256, 512, 3)
self.down5 = down(512, 512, 3)
self.up1 = up(512, 512)
self.up2 = up(512, 256)
self.up3 = up(256, 128)
self.up4 = up(128, 64)
self.up5 = up(64, 32)
self.conv3 = MetaConv2dLayer(in_channels=32, out_channels=outChannels, kernel_size=3, stride=1, padding=1)
def forward(self, x, params=None):
"""
Returns output tensor after passing input `x` to the neural network.
Parameters
----------
x : tensor
input to the UNet.
Returns
-------
tensor
output of the UNet.
"""
param_dict = dict()
if params is not None:
param_dict = extract_top_level_dict(current_dict=params)
x = F.leaky_relu(self.conv1(x, params=param_dict['conv1']), negative_slope = 0.1)
s1 = F.leaky_relu(self.conv2(x, params=param_dict['conv2']), negative_slope = 0.1)
s2 = self.down1(s1, params=param_dict['down1'])
s3 = self.down2(s2, params=param_dict['down2'])
s4 = self.down3(s3, params=param_dict['down3'])
s5 = self.down4(s4, params=param_dict['down4'])
x = self.down5(s5, params=param_dict['down5'])
x = self.up1(x, s5, params=param_dict['up1'])
x = self.up2(x, s4, params=param_dict['up2'])
x = self.up3(x, s3, params=param_dict['up3'])
x = self.up4(x, s2, params=param_dict['up4'])
x = self.up5(x, s1, params=param_dict['up5'])
x = F.leaky_relu(self.conv3(x, params=param_dict['conv3']), negative_slope = 0.1)
else:
x = F.leaky_relu(self.conv1(x), negative_slope = 0.1)
s1 = F.leaky_relu(self.conv2(x), negative_slope = 0.1)
s2 = self.down1(s1)
s3 = self.down2(s2)
s4 = self.down3(s3)
s5 = self.down4(s4)
x = self.down5(s5)
x = self.up1(x, s5)
x = self.up2(x, s4)
x = self.up3(x, s3)
x = self.up4(x, s2)
x = self.up5(x, s1)
x = F.leaky_relu(self.conv3(x), negative_slope = 0.1)
return x
class MetaSuperSloMo(nn.Module):
def __init__(self, device, resume=False):
super(MetaSuperSloMo, self).__init__()
self.device = device
self.flowComp = MetaUNet(6, 4)
self.arbTimeFlowIntrp = MetaUNet(20, 5)
self.backwarp = None
if resume:
print('Loading model: pretrained_models/superslomo_base.pth')
# checkpoint = torch.load('pretrained_models/meta_superslomo.pth')
checkpoint = torch.load('pretrained_models/superslomo_base.pth')
self.flowComp.load_state_dict(checkpoint['state_dictFC'])
self.arbTimeFlowIntrp.load_state_dict(checkpoint['state_dictAT'])
def forward(self, I0, I1, ind=3, params=None, **kwargs):
ind = ind * torch.ones(I0.size(0), dtype=int)
w, h = I0.size(3), I0.size(2)
s = 6 # bits to shift
padW, padH = 0, 0
if w != ((w >> s) << s):
padW = (((w >> s) + 1) << s) - w
if h != ((h >> s) << s):
padH = (((h >> s) + 1) << s) - h
paddingInput = nn.ReflectionPad2d(padding=[padW // 2, padW - padW // 2, padH // 2, padH - padH // 2])
paddingOutput = nn.ReflectionPad2d(padding=[0 - padW // 2, padW // 2 - padW, 0 - padH // 2, padH // 2 - padH])
I0 = paddingInput(I0)
I1 = paddingInput(I1)
param_dict = dict()
if params is not None:
param_dict = extract_top_level_dict(current_dict=params)
flowOut = self.flowComp(torch.cat((I0, I1), dim=1), params=param_dict['flowComp'])
F_0_1 = flowOut[:, :2, :, :]
F_1_0 = flowOut[:, 2:, :, :]
fCoeff = getFlowCoeff(ind, self.device)
F_t_0 = fCoeff[0] * F_0_1 + fCoeff[1] * F_1_0
F_t_1 = fCoeff[2] * F_0_1 + fCoeff[3] * F_1_0
if self.backwarp is None or self.backwarp.W != I0.size(3) or self.backwarp.H != I0.size(2):
self.backwarp = backWarp(I0.size(3), I0.size(2), self.device) # make grid
g_I0_F_t_0 = self.backwarp(I0, F_t_0)
g_I1_F_t_1 = self.backwarp(I1, F_t_1)
intrpOut = self.arbTimeFlowIntrp(torch.cat((I0, I1, F_0_1, F_1_0, F_t_1, F_t_0, g_I1_F_t_1, g_I0_F_t_0), dim=1),
params=param_dict['arbTimeFlowIntrp'])
else:
flowOut = self.flowComp(torch.cat((I0, I1), dim=1))
F_0_1 = flowOut[:, :2, :, :]
F_1_0 = flowOut[:, 2:, :, :]
fCoeff = getFlowCoeff(ind, self.device)
F_t_0 = fCoeff[0] * F_0_1 + fCoeff[1] * F_1_0
F_t_1 = fCoeff[2] * F_0_1 + fCoeff[3] * F_1_0
if self.backwarp is None or self.backwarp.W != I0.size(3) or self.backwarp.H != I0.size(2):
self.backwarp = backWarp(I0.size(3), I0.size(2), self.device) # make grid
g_I0_F_t_0 = self.backwarp(I0, F_t_0)
g_I1_F_t_1 = self.backwarp(I1, F_t_1)
intrpOut = self.arbTimeFlowIntrp(torch.cat((I0, I1, F_0_1, F_1_0, F_t_1, F_t_0, g_I1_F_t_1, g_I0_F_t_0), dim=1))
F_t_0_f = intrpOut[:, :2, :, :] + F_t_0
F_t_1_f = intrpOut[:, 2:4, :, :] + F_t_1
V_t_0 = F.sigmoid(intrpOut[:, 4:5, :, :])
V_t_1 = 1 - V_t_0
g_I0_F_t_0_f = self.backwarp(I0, F_t_0_f)
g_I1_F_t_1_f = self.backwarp(I1, F_t_1_f)
wCoeff = getWarpCoeff(ind, self.device)
Ft_p = (wCoeff[0] * V_t_0 * g_I0_F_t_0_f + wCoeff[1] * V_t_1 * g_I1_F_t_1_f) / (wCoeff[0] * V_t_0 + wCoeff[1] * V_t_1)
warped_I0, warped_I1 = self.backwarp(I0, F_1_0), self.backwarp(I1, F_0_1)
Ft_p = paddingOutput(Ft_p)
F_0_1, F_1_0 = paddingOutput(F_0_1), paddingOutput(F_1_0)
g_I0_F_t_0, g_I1_F_t_1 = paddingOutput(g_I0_F_t_0), paddingOutput(g_I1_F_t_1)
warped_I0, warped_I1 = paddingOutput(warped_I0), paddingOutput(warped_I1)
#return Ft_p, # output image
# (F_0_1, F_1_0), # bidirectional flow maps
# (g_I0_F_t_0, g_I1_F_t_1), # warped intermediate images
# (self.backwarp(I0, F_1_0), self.backwarp(I1, F_0_1)) # warped input image (0-1, 1-0)
return Ft_p, {
'bidirectional_flow': (F_0_1, F_1_0),
'warped_intermediate_frames': (g_I0_F_t_0, g_I1_F_t_1),
'warped_input_frames': (warped_I0, warped_I1)}
# (self.backwarp(I0, F_1_0), self.backwarp(I1, F_0_1))
# return Ft_p
def zero_grad(self, params=None):
if params is None:
for param in self.parameters():
if param.requires_grad == True:
if param.grad is not None:
if torch.sum(param.grad) > 0:
print(param.grad)
param.grad.zero_()
else:
for name, param in params.items():
if param.requires_grad == True:
if param.grad is not None:
if torch.sum(param.grad) > 0:
print(param.grad)
param.grad.zero_()
params[name].grad = None
def restore_backup_stats(self):
"""
Reset stored batch statistics from the stored backup.
"""
pass # no batch statistics used
|
18475
|
import sys
from PySide2.QtWidgets import QApplication
from PySide2.QtGui import QColor
from pivy import quarter, coin, graphics, utils
class ConnectionMarker(graphics.Marker):
def __init__(self, points):
super(ConnectionMarker, self).__init__(points, True)
class ConnectionPolygon(graphics.Polygon):
std_col = "green"
def __init__(self, markers):
super(ConnectionPolygon, self).__init__(
sum([m.points for m in markers], []), True)
self.markers = markers
for m in self.markers:
m.on_drag.append(self.update_polygon)
def update_polygon(self):
self.points = sum([m.points for m in self.markers], [])
@property
def drag_objects(self):
return self.markers
def check_dependency(self):
if any([m._delete for m in self.markers]):
self.delete()
class ConnectionLine(graphics.Line):
def __init__(self, markers):
super(ConnectionLine, self).__init__(
sum([m.points for m in markers], []), True)
self.markers = markers
for m in self.markers:
m.on_drag.append(self.update_line)
def update_line(self):
self.points = sum([m.points for m in self.markers], [])
@property
def drag_objects(self):
return self.markers
def check_dependency(self):
if any([m._delete for m in self.markers]):
self.delete()
def main():
app = QApplication(sys.argv)
utils.addMarkerFromSvg("test.svg", "CUSTOM_MARKER", 40)
viewer = quarter.QuarterWidget()
root = graphics.InteractionSeparator(viewer.sorendermanager)
root.pick_radius = 40
m1 = ConnectionMarker([[-1, -1, -1]])
m2 = ConnectionMarker([[-1, 1, -1]])
m3 = ConnectionMarker([[ 1, 1, -1]])
m4 = ConnectionMarker([[ 1, -1, -1]])
m5 = ConnectionMarker([[-1, -1, 1]])
m6 = ConnectionMarker([[-1, 1, 1]])
m7 = ConnectionMarker([[ 1, 1, 1]])
m8 = ConnectionMarker([[ 1, -1, 1]])
points = [m1, m2, m3, m4, m5, m6, m7, m8]
l01 = ConnectionLine([m1, m2])
l02 = ConnectionLine([m2, m3])
l03 = ConnectionLine([m3, m4])
l04 = ConnectionLine([m4, m1])
l05 = ConnectionLine([m5, m6])
l06 = ConnectionLine([m6, m7])
l07 = ConnectionLine([m7, m8])
l08 = ConnectionLine([m8, m5])
l09 = ConnectionLine([m1, m5])
l10 = ConnectionLine([m2, m6])
l11 = ConnectionLine([m3, m7])
l12 = ConnectionLine([m4, m8])
lines = [l01, l02, l03, l04, l05, l06, l07, l08, l09, l10, l11, l12]
p1 = ConnectionPolygon([m1, m2, m3, m4])
p2 = ConnectionPolygon([m8, m7, m6, m5])
p3 = ConnectionPolygon([m5, m6, m2, m1])
p4 = ConnectionPolygon([m6, m7, m3, m2])
p5 = ConnectionPolygon([m7, m8, m4, m3])
p6 = ConnectionPolygon([m8, m5, m1, m4])
polygons = [p1, p2, p3, p4, p5, p6]
root += points + lines + polygons
root.register()
viewer.setSceneGraph(root)
viewer.setBackgroundColor(QColor(255, 255, 255))
viewer.setWindowTitle("minimal")
viewer.show()
sys.exit(app.exec_())
if __name__ == '__main__':
main()
|
18477
|
import numpy as np
np.deprecate(1) # E: No overload variant
np.deprecate_with_doc(1) # E: incompatible type
np.byte_bounds(1) # E: incompatible type
np.who(1) # E: incompatible type
np.lookfor(None) # E: incompatible type
np.safe_eval(None) # E: incompatible type
|
18517
|
import unittest
import zserio
from testutils import getZserioApi
class Bit4RangeCheckTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.api = getZserioApi(__file__, "with_range_check_code.zs",
extraArgs=["-withRangeCheckCode"]).bit4_range_check
def testBit4LowerBound(self):
self._checkBit4Value(BIT4_LOWER_BOUND)
def testBit4UpperBound(self):
self._checkBit4Value(BIT4_UPPER_BOUND)
def testBit4BelowLowerBound(self):
with self.assertRaises(zserio.PythonRuntimeException):
self._checkBit4Value(BIT4_LOWER_BOUND - 1)
def testBit4AboveUpperBound(self):
with self.assertRaises(zserio.PythonRuntimeException):
self._checkBit4Value(BIT4_UPPER_BOUND + 1)
def _checkBit4Value(self, value):
bit4RangeCheckCompound = self.api.Bit4RangeCheckCompound(value_=value)
bitBuffer = zserio.serialize(bit4RangeCheckCompound)
readBit4RangeCheckCompound = zserio.deserialize(self.api.Bit4RangeCheckCompound, bitBuffer)
self.assertEqual(bit4RangeCheckCompound, readBit4RangeCheckCompound)
BIT4_LOWER_BOUND = 0
BIT4_UPPER_BOUND = 15
|
18536
|
from bitmovin_api_sdk.encoding.encodings.muxings.mp3.mp3_api import Mp3Api
from bitmovin_api_sdk.encoding.encodings.muxings.mp3.customdata.customdata_api import CustomdataApi
from bitmovin_api_sdk.encoding.encodings.muxings.mp3.information.information_api import InformationApi
from bitmovin_api_sdk.encoding.encodings.muxings.mp3.mp3_muxing_list_query_params import Mp3MuxingListQueryParams
|
18558
|
from __future__ import print_function
from sympy import symbols, Matrix
from galgebra.printer import xpdf, Format
def main():
Format()
a = Matrix ( 2, 2, ( 1, 2, 3, 4 ) )
b = Matrix ( 2, 1, ( 5, 6 ) )
c = a * b
print(a,b,'=',c)
x, y = symbols ( 'x, y' )
d = Matrix ( 1, 2, ( x ** 3, y ** 3 ))
e = Matrix ( 2, 2, ( x ** 2, 2 * x * y, 2 * x * y, y ** 2 ) )
f = d * e
print('%',d,e,'=',f)
# xpdf()
xpdf(pdfprog=None)
return
if __name__ == "__main__":
main()
|
18571
|
from collections import defaultdict
from celery.task import task
from pandas import concat, DataFrame
from bamboo.core.aggregator import Aggregator
from bamboo.core.frame import add_parent_column, join_dataset
from bamboo.core.parser import Parser
from bamboo.lib.datetools import recognize_dates
from bamboo.lib.jsontools import df_to_jsondict
from bamboo.lib.mongo import MONGO_ID
from bamboo.lib.parsing import parse_columns
from bamboo.lib.query_args import QueryArgs
from bamboo.lib.utils import combine_dicts, flatten, to_list
def calculate_columns(dataset, calculations):
"""Calculate and store new columns for `calculations`.
The new columns are join t othe Calculation dframe and replace the
dataset's observations.
.. note::
This can result in race-conditions when:
- deleting ``controllers.Datasets.DELETE``
- updating ``controllers.Datasets.POST([dataset_id])``
Therefore, perform these actions asychronously.
:param dataset: The dataset to calculate for.
:param calculations: A list of calculations.
"""
new_cols = None
for c in calculations:
if c.aggregation:
aggregator = __create_aggregator(
dataset, c.formula, c.name, c.groups_as_list)
aggregator.save(dataset)
else:
columns = parse_columns(dataset, c.formula, c.name)
if new_cols is None:
new_cols = DataFrame(columns[0])
else:
new_cols = new_cols.join(columns[0])
if new_cols is not None:
dataset.update_observations(new_cols)
# propagate calculation to any merged child datasets
[__propagate_column(x, dataset) for x in dataset.merged_datasets]
@task(default_retry_delay=5, ignore_result=True)
def calculate_updates(dataset, new_data, new_dframe_raw=None,
parent_dataset_id=None, update_id=None):
"""Update dataset with `new_data`.
This can result in race-conditions when:
- deleting ``controllers.Datasets.DELETE``
- updating ``controllers.Datasets.POST([dataset_id])``
Therefore, perform these actions asychronously.
:param new_data: Data to update this dataset with.
:param new_dframe_raw: DataFrame to update this dataset with.
:param parent_dataset_id: If passed add ID as parent ID to column,
default is None.
"""
if not __update_is_valid(dataset, new_dframe_raw):
dataset.remove_pending_update(update_id)
return
__ensure_ready(dataset, update_id)
if new_dframe_raw is None:
new_dframe_raw = dframe_from_update(dataset, new_data)
new_dframe = recognize_dates(new_dframe_raw, dataset.schema)
new_dframe = __add_calculations(dataset, new_dframe)
# set parent id if provided
if parent_dataset_id:
new_dframe = add_parent_column(new_dframe, parent_dataset_id)
dataset.append_observations(new_dframe)
dataset.clear_summary_stats()
propagate(dataset, new_dframe=new_dframe, update={'add': new_dframe_raw})
dataset.update_complete(update_id)
def dframe_from_update(dataset, new_data):
"""Make a DataFrame for the `new_data`.
:param new_data: Data to add to dframe.
:type new_data: List.
"""
filtered_data = []
columns = dataset.columns
labels_to_slugs = dataset.schema.labels_to_slugs
num_columns = len(columns)
num_rows = dataset.num_rows
dframe_empty = not num_columns
if dframe_empty:
columns = dataset.schema.keys()
for row in new_data:
filtered_row = dict()
for col, val in row.iteritems():
# special case for reserved keys (e.g. _id)
if col == MONGO_ID:
if (not num_columns or col in columns) and\
col not in filtered_row.keys():
filtered_row[col] = val
else:
# if col is a label take slug, if it's a slug take col
slug = labels_to_slugs.get(
col, col if col in labels_to_slugs.values() else None)
# if slug is valid or there is an empty dframe
if (slug or col in labels_to_slugs.keys()) and (
dframe_empty or slug in columns):
filtered_row[slug] = dataset.schema.convert_type(
slug, val)
filtered_data.append(filtered_row)
index = range(num_rows, num_rows + len(filtered_data))
new_dframe = DataFrame(filtered_data, index=index)
return new_dframe
@task(default_retry_delay=5, ignore_result=True)
def propagate(dataset, new_dframe=None, update=None):
"""Propagate changes in a modified dataset."""
__update_aggregate_datasets(dataset, new_dframe, update=update)
if update:
__update_merged_datasets(dataset, update)
__update_joined_datasets(dataset, update)
def __add_calculations(dataset, new_dframe):
labels_to_slugs = dataset.schema.labels_to_slugs
for calculation in dataset.calculations(include_aggs=False):
function = Parser.parse_function(calculation.formula)
new_column = new_dframe.apply(function, axis=1, args=(dataset, ))
potential_name = calculation.name
if potential_name not in dataset.dframe().columns:
if potential_name in labels_to_slugs:
new_column.name = labels_to_slugs[potential_name]
else:
new_column.name = potential_name
new_dframe = new_dframe.join(new_column)
return new_dframe
def __calculation_data(dataset):
"""Create a list of aggregate calculation information.
Builds a list of calculation information from the current datasets
aggregated datasets and aggregate calculations.
"""
calcs_to_data = defaultdict(list)
calculations = dataset.calculations(only_aggs=True)
names_to_formulas = {c.name: c.formula for c in calculations}
names = set(names_to_formulas.keys())
for group, dataset in dataset.aggregated_datasets:
labels_to_slugs = dataset.schema.labels_to_slugs
calculations_for_dataset = list(set(
labels_to_slugs.keys()).intersection(names))
for calc in calculations_for_dataset:
calcs_to_data[calc].append((
names_to_formulas[calc], labels_to_slugs[calc], group,
dataset))
return flatten(calcs_to_data.values())
def __update_is_valid(dataset, new_dframe):
"""Check if the update is valid.
Check whether this is a right-hand side of any joins
and deny the update if the update would produce an invalid
join as a result.
:param dataset: The dataset to check if update valid for.
:param new_dframe: The update dframe to check.
:returns: True is the update is valid, False otherwise.
"""
select = {on: 1 for on in dataset.on_columns_for_rhs_of_joins if on in
new_dframe.columns and on in dataset.columns}
dframe = dataset.dframe(query_args=QueryArgs(select=select))
for on in select.keys():
merged_join_column = concat([new_dframe[on], dframe[on]])
if len(merged_join_column) != merged_join_column.nunique():
return False
return True
def __create_aggregator(dataset, formula, name, groups, dframe=None):
# TODO this should work with index eventually
columns = parse_columns(dataset, formula, name, dframe, no_index=True)
dependent_columns = Parser.dependent_columns(formula, dataset)
aggregation = Parser.parse_aggregation(formula)
# get dframe with only the necessary columns
select = combine_dicts({group: 1 for group in groups},
{col: 1 for col in dependent_columns})
# ensure at least one column (MONGO_ID) for the count aggregation
query_args = QueryArgs(select=select or {MONGO_ID: 1})
dframe = dataset.dframe(query_args=query_args, keep_mongo_keys=not select)
return Aggregator(dframe, groups, aggregation, name, columns)
def __ensure_ready(dataset, update_id):
# dataset must not be pending
if not dataset.is_ready or (
update_id and dataset.has_pending_updates(update_id)):
dataset.reload()
raise calculate_updates.retry()
def __find_merge_offset(dataset, merged_dataset):
offset = 0
for parent_id in merged_dataset.parent_ids:
if dataset.dataset_id == parent_id:
break
offset += dataset.find_one(parent_id).num_rows
return offset
def __propagate_column(dataset, parent_dataset):
"""Propagate columns in `parent_dataset` to `dataset`.
When a new calculation is added to a dataset this will propagate the
new column to all child (merged) datasets.
:param dataset: THe child dataet.
:param parent_dataset: The dataset to propagate.
"""
# delete the rows in this dataset from the parent
dataset.remove_parent_observations(parent_dataset.dataset_id)
# get this dataset without the out-of-date parent rows
dframe = dataset.dframe(keep_parent_ids=True)
# create new dframe from the upated parent and add parent id
parent_dframe = add_parent_column(parent_dataset.dframe(),
parent_dataset.dataset_id)
# merge this new dframe with the existing dframe
updated_dframe = concat([dframe, parent_dframe])
# save new dframe (updates schema)
dataset.replace_observations(updated_dframe)
dataset.clear_summary_stats()
# recur into merged dataset
[__propagate_column(x, dataset) for x in dataset.merged_datasets]
def __remapped_data(dataset_id, mapping, slugified_data):
column_map = mapping.get(dataset_id) if mapping else None
if column_map:
slugified_data = [{column_map.get(k, k): v for k, v in row.items()}
for row in slugified_data]
return slugified_data
def __slugify_data(new_data, labels_to_slugs):
slugified_data = []
new_data = to_list(new_data)
for row in new_data:
for key, value in row.iteritems():
if labels_to_slugs.get(key) and key != MONGO_ID:
del row[key]
row[labels_to_slugs[key]] = value
slugified_data.append(row)
return slugified_data
def __update_aggregate_datasets(dataset, new_dframe, update=None):
calcs_to_data = __calculation_data(dataset)
for formula, slug, groups, a_dataset in calcs_to_data:
__update_aggregate_dataset(dataset, formula, new_dframe, slug, groups,
a_dataset, update is None)
def __update_aggregate_dataset(dataset, formula, new_dframe, name, groups,
a_dataset, reducible):
"""Update the aggregated dataset built for `dataset` with `calculation`.
Proceed with the following steps:
- delete the rows in this dataset from the parent
- recalculate aggregated dataframe from aggregation
- update aggregated dataset with new dataframe and add parent id
- recur on all merged datasets descending from the aggregated
dataset
:param formula: The formula to execute.
:param new_dframe: The DataFrame to aggregate on.
:param name: The name of the aggregation.
:param groups: A column or columns to group on.
:type group: String, list of strings, or None.
:param a_dataset: The DataSet to store the aggregation in.
"""
# parse aggregation and build column arguments
aggregator = __create_aggregator(
dataset, formula, name, groups, dframe=new_dframe)
new_agg_dframe = aggregator.update(dataset, a_dataset, formula, reducible)
# jsondict from new dframe
new_data = df_to_jsondict(new_agg_dframe)
for merged_dataset in a_dataset.merged_datasets:
# remove rows in child from this merged dataset
merged_dataset.remove_parent_observations(a_dataset.dataset_id)
# calculate updates for the child
calculate_updates(merged_dataset, new_data,
parent_dataset_id=a_dataset.dataset_id)
def __update_joined_datasets(dataset, update):
"""Update any joined datasets."""
if 'add' in update:
new_dframe = update['add']
for direction, other_dataset, on, j_dataset in dataset.joined_datasets:
if 'add' in update:
if direction == 'left':
# only proceed if on in new dframe
if on in new_dframe.columns:
left_dframe = other_dataset.dframe(padded=True)
# only proceed if new on value is in on column in lhs
if len(set(new_dframe[on]).intersection(
set(left_dframe[on]))):
merged_dframe = join_dataset(left_dframe, dataset, on)
j_dataset.replace_observations(merged_dframe)
# TODO is it OK not to propagate the join here?
else:
# if on in new data join with existing data
if on in new_dframe:
new_dframe = join_dataset(new_dframe, other_dataset, on)
calculate_updates(j_dataset, df_to_jsondict(new_dframe),
parent_dataset_id=dataset.dataset_id)
elif 'delete' in update:
j_dataset.delete_observation(update['delete'])
elif 'edit' in update:
j_dataset.update_observation(*update['edit'])
def __update_merged_datasets(dataset, update):
if 'add' in update:
data = df_to_jsondict(update['add'])
# store slugs as labels for child datasets
data = __slugify_data(data, dataset.schema.labels_to_slugs)
# update the merged datasets with new_dframe
for mapping, merged_dataset in dataset.merged_datasets_with_map:
if 'add' in update:
mapped_data = __remapped_data(dataset.dataset_id, mapping, data)
calculate_updates(merged_dataset, mapped_data,
parent_dataset_id=dataset.dataset_id)
elif 'delete' in update:
offset = __find_merge_offset(dataset, merged_dataset)
merged_dataset.delete_observation(update['delete'] + offset)
elif 'edit' in update:
offset = __find_merge_offset(dataset, merged_dataset)
index, data = update['edit']
merged_dataset.update_observation(index + offset, data)
|
18572
|
import inviwopy
from inviwopy.glm import *
v1 = vec3(1,2,3)
v2 = size2_t(4,5)
m1 = mat4(1)
m2 = mat3(0,1,0,-1,0,0,0,0,2)
v3 = m2 * v1
v4 = vec4(1,2,3,4)
w = v4.w
a = v4.a
q = v4.q
z = v4.z
b = v4.b
p = v4.p
y = v4.y
g = v4.g
t = v4.t
x = v4.x
r = v4.r
s = v4.s
|
18601
|
from typing import List
import asyncio
import inspect
import logging
import uuid
import aio_pika
import aio_pika.exceptions
from .base import BaseRPC
from .common import RPCError, RPCHandler, RPCRequest, RPCResponse
class RPC(BaseRPC):
HEARTBEAT_INTERVAL = 300
def __init__(
self,
url: str = None,
name: str = None,
handler: RPCHandler = None,
timeout: float = None,
pool_size: int = 0,
batch_size: int = 0,
wait_for_batch: bool = False,
max_jobs: int = 0,
loop: asyncio.AbstractEventLoop = None,
):
self._loop = loop
self._url = url or self.URL
self._name = name
self._handler = handler
self._timeout = timeout
self._pool_size = pool_size
self._batch_size = batch_size
self._wait_for_batch = wait_for_batch
self._max_jobs = max_jobs
self._mconn: aio_pika.RobustConnection = None
self._mch: aio_pika.RobustChannel = None
self._mq: aio_pika.RobustQueue = None
self._queue = asyncio.Queue(loop=loop)
self._pool = []
self._consuming = False
async def _run_pool(self):
self._pool = [self._run_worker() for _ in range(self._pool_size)]
self._consuming = True
await asyncio.gather(*self._pool, loop=self._loop)
self._pool = []
async def _run_worker(self):
bs = self._batch_size
q = self._queue
while self._consuming:
batch = [await q.get()]
if self._wait_for_batch and bs > 0:
while len(batch) < bs:
batch.append(await q.get())
else:
while (bs <= 0 or len(batch) < bs) and not q.empty():
batch.append(q.get_nowait())
await asyncio.wait_for(
asyncio.ensure_future(
self._process_batch(batch), loop=self._loop,
),
self._timeout,
loop=self._loop,
)
async def _process_single(self, message: aio_pika.IncomingMessage):
return await asyncio.wait_for(
asyncio.ensure_future(
self._process_batch([message]), loop=self._loop,
),
self._timeout,
loop=self._loop,
)
async def _process_batch(self, messages: List[aio_pika.IncomingMessage]):
try:
reqs = []
for m in messages:
# logging.debug(f"message: correlation_id={m.correlation_id}")
req: RPCRequest = self.decode_request(m.body)
reqs.append(req)
# logging.debug(f"handler: {self._handler}")
results = self._handler(*reqs)
if inspect.isawaitable(results):
results = await results
except KeyboardInterrupt:
self._consuming = False
for m in messages:
await m.reject(requeue=True)
return
except Exception as e:
if len(messages) == 1:
results = [RPCError()]
logging.exception(e)
await messages[0].reject()
else:
for m in messages:
await asyncio.wait_for(
asyncio.ensure_future(
self._process_batch([m]), loop=self._loop,
),
self._timeout,
loop=self._loop,
)
return
for message, result in zip(messages, results):
result = aio_pika.Message(
self.encode_response(result),
correlation_id=message.correlation_id,
delivery_mode=message.delivery_mode,
)
await self._mch.default_exchange.publish(
result, routing_key=message.reply_to, mandatory=False,
)
if not message.processed:
await message.ack()
async def consume(self):
while True:
try:
self._mconn = await aio_pika.connect_robust(
self._url,
loop=self._loop,
heartbeat_interval=self.HEARTBEAT_INTERVAL,
)
break
except ConnectionError:
# This case is not handled by aio-pika by some reasons
logging.warning("wait for queue...")
await asyncio.sleep(1, loop=self._loop)
self._mch = await self._mconn.channel()
await self._mch.set_qos(prefetch_count=self._max_jobs)
self._mq = await self._mch.declare_queue(self._name)
if self._pool_size > 0:
await asyncio.gather(
self._run_pool(),
self._mq.consume(self._queue.put),
loop=self._loop,
)
else:
await self._mq.consume(self._process_single)
return self._mconn
async def call(self, msg: RPCRequest) -> RPCResponse:
return await asyncio.wait_for(
asyncio.ensure_future(self._call(msg), loop=self._loop,),
self._timeout,
loop=self._loop,
)
async def _call(self, msg: RPCRequest) -> RPCResponse:
if not self._mconn:
self._mconn = await aio_pika.connect_robust(
self._url,
loop=self._loop,
heartbeat_interval=self.HEARTBEAT_INTERVAL,
)
if not self._mch:
self._mch: aio_pika.RobustChannel = await self._mconn.channel()
mq: aio_pika.RobustQueue = await self._mch.declare_queue()
try:
correlation_id = str(uuid.uuid4())
message = aio_pika.Message(
self.encode_request(msg),
correlation_id=correlation_id,
reply_to=mq.name,
)
await self._mch.default_exchange.publish(
message, routing_key=self._name,
)
async with mq.iterator(no_ack=True) as it:
async for message in it:
break
if message.correlation_id != correlation_id:
raise ValueError("wrong correlation_id")
response: RPCResponse = self.decode_response(message.body)
# logging.debug(f"response: {response}")
if isinstance(response, RPCError):
response.reraise()
return response
finally:
await mq.delete(if_empty=False, if_unused=False)
|
18643
|
from torch import Tensor, nn
from ...base import VisionModule
class ClassificationModule(VisionModule):
"""Base Classification Module class"""
def __init__(
self,
encoder: nn.Module,
head: nn.Module,
in_channels: int = 3,
n_classes: int = 1000,
**kwargs
):
super().__init__()
self.encoder = encoder(in_channels=in_channels, **kwargs)
self.head = head(self.encoder.widths[-1], n_classes)
self.initialize()
def initialize(self):
pass
def forward(self, x: Tensor) -> Tensor:
x = self.encoder(x)
x = self.head(x)
return x
|
18647
|
from elasticsearch import TransportError
from sanic import Blueprint
from sanic.request import Request
from sanic.response import HTTPResponse, json
from ..connections import get_client
rest_bp = Blueprint('rest')
def format_es_exception(e: TransportError):
return json({"status_code": e.status_code,
"error": e.error,
"info": e.info})
@rest_bp.route('/query', methods=['POST'])
async def close_index(request: Request) -> HTTPResponse:
client = get_client(request)
body = request.json['body']
method = request.json['method']
path = request.json['path']
try:
resp = await client.transport.perform_request(method, path, body=body)
except TransportError as e:
return format_es_exception(e)
return json(resp)
|
18693
|
from __future__ import print_function
import numpy as np
import argparse
import glob
import os
import errno
import math
import cv2
from random import shuffle
from shutil import copyfile
parser = argparse.ArgumentParser(
description="create training/test/validation sets from video list"
)
parser.add_argument("--videoListPath", type=str, help="path to videos", required=True)
parser.add_argument(
"--fpsSingle", type=int, help="fps for single frame processing", default=2
)
parser.add_argument(
"--numRecurrent", type=int, help="how many recurent steps", default=3
)
parser.add_argument(
"--fpsRecurrent", type=int, help="fps for reccurent part", default=24
)
parser.add_argument(
"--chapterTiming",
type=str,
help="start and end timing list for all chapters",
default="timingChapters.txt",
)
parser.add_argument("--name", type=str, help="run name", default="training")
parser.add_argument("--blacklist", type=str, help="ignore video", default="-1")
parser.add_argument(
"--whitelist",
type=str,
help="specifies list of selected videos, if not set all videos are selected",
default="-1",
)
args = parser.parse_args()
def silentremove(filename):
try:
os.remove(filename)
except OSError as e:
if e.errno != errno.ENOENT:
raise # re-raise exception if a different error occurred
def processChapter_cutlist(
video,
chap,
origFramerate,
timing,
outputFileSingle,
cutList,
numRecurrent,
fpsRecurrent,
):
videoNameSplit = video.split("/")
videoName = videoNameSplit[-2]
imgPathRel = videoName + "/chapter" + str(chap) + "/"
modFrameFactorSingle = int(round(origFramerate / args.fpsSingle))
stepRecurrent = int(round(origFramerate / fpsRecurrent))
numRecurrent = (
numRecurrent + stepRecurrent * 2
) # extra frames in case of flow estimation
logFilename = video + "log" + str(chap) + ".txt"
with open(logFilename, "r") as fp:
with open(outputFileSingle, "a") as ofp_single:
prevIdx = -1
# iterate over log list
for cnt, line in enumerate(fp):
idx = line.find("pts_time:")
if idx == -1:
continue
pts_time = float(line[idx + 9 : idx + 9 + 7])
idx2 = line.find("n:")
frame_idx = int(line[idx2 + 2 : idx2 + 2 + 5]) + 1
# use floor here to be on the save side
if pts_time <= timing[0] or pts_time > math.floor(timing[1]):
continue
# ignore if at cut position
if pts_time in cutList:
continue
# sequence already processed
if frame_idx < prevIdx:
continue
largerElemCutList = [
x for x in cutList if x > pts_time and x < timing[1]
]
largerElemCutList.append(timing[1])
cutTimeNext = min(largerElemCutList)
smallerElemCutList = [
x for x in cutList if x < pts_time and x > timing[0]
]
smallerElemCutList.append(timing[0])
seqLength = (cutTimeNext - pts_time) * origFramerate
# for long sequences jump to some point later in the same sequence
jump = min(int(seqLength), origFramerate * 4)
prevIdx = frame_idx + int(jump)
# ignore if sequence to short
if seqLength < numRecurrent * stepRecurrent:
continue
imgFilename = {}
existing = True
for ri in range(0, numRecurrent * stepRecurrent):
frame_recurr = int(frame_idx + ri + 1)
frame_str = str(frame_recurr).zfill(8)
if ri % stepRecurrent != 0:
continue
ri_rec = int(ri / stepRecurrent)
imgFilename[ri_rec] = "out" + frame_str
if existing == False:
continue
for ri in range(stepRecurrent * 2, numRecurrent):
if (ri - stepRecurrent * 2) % modFrameFactorSingle == 0:
ofp_single.write(imgPathRel + imgFilename[ri] + "\n")
def processShotFile(video, shotFile):
numFrames = 0
cutList = []
with open(video + shotFile, "r") as fp:
for cnt, line in enumerate(fp):
# get cuts
idx = line.find("pkt_pts_time=")
if idx != -1:
numFrames = numFrames + 1
pts_time = float(line[idx + 13 : idx + 13 + 8])
cutList.append(pts_time)
return cutList
def main():
videoList = glob.glob(args.videoListPath + "*/")
origFramerate = 24
trainingSingleFile = (
args.videoListPath
+ args.name
+ "_"
+ str(args.fpsSingle)
+ "fpsSingle_"
+ str(args.fpsRecurrent)
+ "fps_"
+ str(args.numRecurrent)
+ "frames"
+ "_single.txt"
)
silentremove(trainingSingleFile)
for video in videoList:
print(video)
videoNameSplit = video.split("/")
videoName = videoNameSplit[-2]
if videoName in args.blacklist:
print(videoName + " on blacklist")
continue
if args.whitelist != "-1" and videoName not in args.whitelist:
print(videoName + " not on whitelist")
continue
print("processing " + videoName)
cutList = processShotFile(video, "shots.txt")
print(len(cutList))
timingList = []
with open(video + args.chapterTiming, "r") as fp:
timingListTmp = fp.read().splitlines()
for timingLine in timingListTmp:
timingList.append([float(x) for x in timingLine.split(",")])
chapterList = glob.glob(video + "log*.txt")
numChapters = len(chapterList)
validChapters = range(2, numChapters)
trainingSet = validChapters
for chap in trainingSet:
processChapter_cutlist(
video,
chap,
origFramerate,
timingList[chap - 1],
trainingSingleFile,
cutList,
args.numRecurrent,
args.fpsRecurrent,
)
main()
|
18707
|
import os
import sys
import json
import argparse
import numpy as np
sys.path.append('Camera_Intrinsics_API/')
from get_camera_intrinsics import CameraIntrinsicsHelper
if __name__=='__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
"--input_dir",
type=str,
default='data/videos_sfm/',
help="COLMAP output folder of videos",
)
parser.add_argument(
"--input_dir_greedy",
type=str,
default='data/videos_sfm_greedy/',
help="Folder for the COLMAP outputs - greedy.",
)
parser.add_argument(
"--annotation_dir",
type=str,
default='data/v1/annotations/',
help="annotation folder. Must contain the vq3d_<split>.json files.",
)
parser.add_argument(
"--output_filename",
type=str,
default='data/v1/scan_to_intrinsics.json',
)
args = parser.parse_args()
dataset = {}
for split in ['train', 'val']:
a = json.load(open(os.path.join(args.annotation_dir,
f'vq3d_{split}.json'), 'r'))
for video in a['videos']:
video_uid=video['video_uid']
scan_uid=video['scan_uid']
dataset[video_uid]=scan_uid
helper = CameraIntrinsicsHelper()
datadir=args.input_dir
datadir_2=args.input_dir_greedy
cpt=0
all_intrinsics = {}
for video_uid in os.listdir(datadir):
scan_uid=dataset[video_uid]
intrinsic_txt = os.path.join(datadir,
video_uid,
'sparse',
'0',
'cameras.txt')
if not os.path.isfile(intrinsic_txt):
intrinsic_txt = os.path.join(datadir_2,
video_uid,
'sparse',
'0',
'cameras.txt')
if not os.path.isfile(intrinsic_txt):
cpt+=1
else:
intrinsics = helper.parse_colmap_intrinsics(intrinsic_txt)
if scan_uid not in all_intrinsics:
all_intrinsics[scan_uid]={}
token = (intrinsics['width'], intrinsics['height'])
if token not in all_intrinsics[scan_uid]:
all_intrinsics[scan_uid][token] = []
all_intrinsics[scan_uid][token].append(
(
intrinsics['f'],
intrinsics['cx'],
intrinsics['cy'],
intrinsics['k1'],
intrinsics['k2'],
)
)
else:
intrinsics = helper.parse_colmap_intrinsics(intrinsic_txt)
if scan_uid not in all_intrinsics:
all_intrinsics[scan_uid]={}
token = (intrinsics['width'], intrinsics['height'])
if token not in all_intrinsics[scan_uid]:
all_intrinsics[scan_uid][token] = []
all_intrinsics[scan_uid][token].append(
(
intrinsics['f'],
intrinsics['cx'],
intrinsics['cy'],
intrinsics['k1'],
intrinsics['k2'],
)
)
outputs = {}
for scan_uid, d in all_intrinsics.items():
print(' ')
print('Scan uid: ', scan_uid)
outputs[scan_uid]={}
for resolution, v in d.items():
print(' -- resolution: ', resolution)
resolution_str = str(resolution)
outputs[scan_uid][resolution_str]={
'f': np.median([float(i[0]) for i in v]),
'cx': np.median([float(i[1]) for i in v]),
'cy': np.median([float(i[2]) for i in v]),
'k1': np.median([float(i[3]) for i in v]),
'k2': np.median([float(i[4]) for i in v]),
}
for i in v:
print(' -- -- -- : ', i)
print(' ')
print(' -- -- -- : ',
outputs[scan_uid][resolution_str]['f'],
outputs[scan_uid][resolution_str]['cx'],
outputs[scan_uid][resolution_str]['cy'],
outputs[scan_uid][resolution_str]['k1'],
outputs[scan_uid][resolution_str]['k2'],
)
json.dump(outputs, open(output_filename, 'w'))
|
18721
|
class register:
plugin_dict = {}
plugin_name = []
@classmethod
def register(cls, plugin_name):
def wrapper(plugin):
cls.plugin_dict[plugin_name] = plugin
return plugin
return wrapper
|
18785
|
import dateutil
import pytest
from testsuite.plugins import mockserver
from testsuite.utils import json_util
NOW = dateutil.parser.parse('2019-09-19-13:04:00.000000')
MOCKSERVER_INFO = mockserver.MockserverInfo(
'localhost', 123, 'http://localhost:123/', None,
)
MOCKSERVER_SSL_INFO = mockserver.MockserverInfo(
'localhost',
456,
'https://localhost:456/',
mockserver.SslInfo('/some_dir/cert.cert', '/some_dir/cert.key'),
)
@pytest.mark.parametrize(
'json_input,expected_result',
[
( # simple list
[{'some_date': {'$dateDiff': 0}}, 'regular_element'], # json_input
[{'some_date': NOW}, 'regular_element'], # expected_result
),
( # simple dict
{ # json_input
'some_date': {'$dateDiff': 0},
'regular_key': 'regular_value',
},
{'some_date': NOW, 'regular_key': 'regular_value'}, # json_input
),
( # nested list and dict
{ # json_input
'regular_root_key': 'regular_root_value',
'root_date': {'$dateDiff': 0},
'parent_key': {
'nested_date': {'$dateDiff': 0},
'nested_list': [
'regular_element1',
{'$dateDiff': 0},
{'$dateDiff': 0},
'regular_element2',
],
},
},
{ # expected_result
'regular_root_key': 'regular_root_value',
'root_date': NOW,
'parent_key': {
'nested_date': NOW,
'nested_list': [
'regular_element1',
NOW,
NOW,
'regular_element2',
],
},
},
),
],
)
def test_substitute_now(json_input, expected_result):
result = json_util.substitute(json_input, now=NOW)
assert result == expected_result
@pytest.mark.parametrize(
'json_input,expected_result',
[
(
({'client_url': {'$mockserver': '/path'}}),
({'client_url': 'http://localhost:123/path'}),
),
(
({'client_url': {'$mockserver': '/path', '$schema': False}}),
({'client_url': 'localhost:123/path'}),
),
],
)
def test_substitute_mockserver(json_input, expected_result):
result = json_util.substitute(json_input, mockserver=MOCKSERVER_INFO)
assert result == expected_result
@pytest.mark.parametrize(
'json_input,expected_result',
[
(
({'client_url': {'$mockserver_https': '/path'}}),
({'client_url': 'https://localhost:456/path'}),
),
(
({'client_url': {'$mockserver_https': '/path', '$schema': False}}),
({'client_url': 'localhost:456/path'}),
),
],
)
def test_substitute_mockserver_https(json_input, expected_result):
result = json_util.substitute(
json_input, mockserver_https=MOCKSERVER_SSL_INFO,
)
assert result == expected_result
|
18805
|
import sys, os
external_libs = {'Cleverhans v1.0.0': "externals/cleverhans",
'Tensorflow-Model-Resnet': "externals/tensorflow-models",
}
project_path = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
for lib_name, lib_path in external_libs.iteritems():
lib_path = os.path.join(project_path, lib_path)
if os.listdir(lib_path) == []:
cmd = "git submodule update --init --recursive"
print("Fetching external libraries...")
os.system(cmd)
if lib_name == 'Tensorflow-Model-Resnet':
lib_token_fpath = os.path.join(lib_path, 'resnet', '__init__.py')
if not os.path.isfile(lib_token_fpath):
open(lib_token_fpath, 'a').close()
sys.path.append(lib_path)
print("Located %s" % lib_name)
# print (sys.path)
|
18815
|
import torch
from torch import nn
class FlowSequential(nn.Sequential):
"""Forward pass with log determinant of the Jacobian."""
def forward(self, input, context=None):
total_log_prob = torch.zeros(input.size(0), device=input.device)
for block in self._modules.values():
input, log_prob = block(input, context)
total_log_prob += log_prob
return input, total_log_prob
def inverse(self, input, context=None):
total_log_prob = torch.zeros(input.size(0), device=input.device)
for block in reversed(self._modules.values()):
input, log_prob = block.inverse(input, context)
total_log_prob += log_prob
return input, total_log_prob
def get_memory():
torch.cuda.synchronize()
max_memory = torch.cuda.max_memory_allocated()
memory = torch.cuda.memory_allocated()
return memory / 10**9, max_memory / 10**9
class RealNVPSequential(nn.Sequential):
"""Assumes first and last module are CheckerSplit and CheckerUnsplit."""
def forward(self, input, context=None):
total_log_prob = torch.zeros(input.size(0), device=input.device)
modules = list(self._modules.values())
split = modules.pop(0)
concat = modules.pop()
transf, const = split(input)
for module in modules:
transf, const, log_prob = module(transf, const, context)
total_log_prob += log_prob
return concat(transf, const), total_log_prob
def inverse(self, input, context=None):
total_log_prob = torch.zeros(input.size(0), device=input.device)
modules = list(self._modules.values())
split = modules.pop(0)
concat = modules.pop()
transf, const = split(input)
for module in reversed(modules):
transf, const, log_prob = module.inverse(transf, const, context)
total_log_prob += log_prob
return concat(transf, const), total_log_prob
class SplitSequential(nn.Sequential):
"""Assumes first and last module are CheckerSplit and CheckerConcat."""
def forward(self, transf, const, context=None):
total_log_prob = torch.zeros(transf.size(0), device=transf.device)
for module in self._modules.values():
transf, const, log_prob = module(transf, const, context)
total_log_prob += log_prob
return transf, const, total_log_prob
def inverse(self, transf, const, context=None):
total_log_prob = torch.zeros(transf.size(0), device=transf.device)
for module in reversed(self._modules.values()):
transf, const, log_prob = module.inverse(transf, const, context)
total_log_prob += log_prob
return transf, const, total_log_prob
|
18819
|
from app.blogging import bp
from datetime import datetime
from flask import flash, redirect, url_for
from flask_login import current_user
@bp.before_request
def protect():
'''
Registers new function to Flask-Blogging Blueprint that protects
updates to make them only viewable by paid subscribers.
'''
if current_user.is_authenticated:
if datetime.today() <= current_user.expiration:
return None
else:
flash('You must have a paid-up subscription \
to view updates.', 'warning')
return redirect(url_for('main.support'))
else:
flash('Please login to view updates.', 'warning')
return redirect(url_for('auth.login'))
|
18828
|
class Solution:
def duplicateZeros(self, arr: List[int]) -> None:
"""
Do not return anything, modify arr in-place instead.
"""
i = 0
for num in list(arr):
if i >= len(arr): break
arr[i] = num
if not num:
i += 1
if i < len(arr):
arr[i] = num
i += 1
|
18834
|
try:
from rpython.rlib.debug import make_sure_not_resized # pylint: disable=W
except ImportError:
"NOT_RPYTHON"
def make_sure_not_resized(_):
pass
|
18878
|
import tensorflow as tf
from data.BaseReader import BaseReader
import numpy as np
class Base2DReader(BaseReader):
# inherit from BaseReader, implement different 2D cropping (cropping from 2D)
def __init__(self, objtype=0, shuffle=True, batch_size=1, crop_noise=False):
super(Base2DReader, self).__init__(objtype, shuffle, batch_size, crop_noise)
def get(self, withPAF=True, read_image=True, imw=1920, imh=1080):
assert type(withPAF) == bool
assert self.objtype in (0, 1)
# produce data from slice_input_producer
flow_list = tf.train.slice_input_producer(list(self.tensor_dict.values()), shuffle=self.shuffle)
flow_dict = {key: flow_list[ik] for ik, key in enumerate(self.tensor_dict.keys())}
# build data dictionary
data_dict = {}
data_dict['img_dir'] = flow_dict['img_dirs']
PAF_given = False
if self.objtype == 0:
body2d = flow_dict['body']
data_dict['body_valid'] = flow_dict['body_valid']
data_dict['keypoint_uv_origin'] = body2d
if 'body_3d' in flow_dict:
data_dict['keypoint_xyz_origin'] = flow_dict['body_3d']
data_dict['keypoint_xyz_local'] = flow_dict['body_3d']
PAF_given = True
elif self.objtype == 1:
cond_left = tf.reduce_any(tf.cast(flow_dict['left_hand_valid'], dtype=tf.bool)) # 0 for right hand, 1 for left hand
hand2d = tf.cond(cond_left, lambda: flow_dict['left_hand'], lambda: flow_dict['right_hand']) # in world coordinate
hand2d = tf.cast(hand2d, tf.float32)
data_dict['keypoint_uv_origin'] = hand2d
data_dict['left_hand_valid'] = flow_dict['left_hand_valid']
data_dict['right_hand_valid'] = flow_dict['right_hand_valid']
if 'left_hand_3d' in flow_dict and 'right_hand_3d' in flow_dict:
hand3d = tf.cond(cond_left, lambda: flow_dict['left_hand_3d'], lambda: flow_dict['right_hand_3d'])
data_dict['keypoint_xyz_origin'] = hand3d
data_dict['keypoint_xyz_local'] = hand3d
PAF_given = True
# read image
if read_image:
img_file = tf.read_file(flow_dict['img_dirs'])
image = tf.image.decode_image(img_file, channels=3)
image = tf.image.pad_to_bounding_box(image, 0, 0, imh, imw)
image.set_shape((imh, imw, 3))
image = tf.cast(image, tf.float32) / 255.0 - 0.5
data_dict['image'] = image
if 'mask_dirs' in flow_dict:
mask_file = tf.read_file(flow_dict['mask_dirs'])
mask = tf.image.decode_image(mask_file, channels=3)
mask = tf.image.pad_to_bounding_box(mask, 0, 0, imh, imw)
mask.set_shape((imh, imw, 3))
mask = mask[:, :, 0]
mask = tf.cast(mask, tf.float32)
else:
mask = tf.ones((imh, imw), dtype=tf.float32)
if 'other_bbox' in flow_dict:
ob = flow_dict['other_bbox']
Xindmap = tf.tile(tf.expand_dims(tf.range(imw, dtype=tf.int32), 0), [imh, 1])
Xindmap = tf.tile(tf.expand_dims(Xindmap, 2), [1, 1, 20])
Yindmap = tf.tile(tf.expand_dims(tf.range(imh, dtype=tf.int32), 1), [1, imw])
Yindmap = tf.tile(tf.expand_dims(Yindmap, 2), [1, 1, 20])
x_out = tf.logical_or(tf.less(Xindmap, ob[:, 0]), tf.greater_equal(Xindmap, ob[:, 2]))
y_out = tf.logical_or(tf.less(Yindmap, ob[:, 1]), tf.greater_equal(Yindmap, ob[:, 3]))
out = tf.cast(tf.logical_or(x_out, y_out), tf.float32)
out = tf.reduce_min(out, axis=2)
mask = tf.minimum(mask, out)
data_dict['mask'] = mask
if self.objtype in (0, 1):
if self.objtype == 0:
keypoints = body2d
valid = flow_dict['body_valid']
elif self.objtype == 1:
keypoints = hand2d
body2d = hand2d
valid = tf.cond(cond_left, lambda: flow_dict['left_hand_valid'], lambda: flow_dict['right_hand_valid'])
data_dict['hand_valid'] = valid
if PAF_given:
body3d = hand3d
crop_center2d, scale2d = self.calc_crop_scale2d(keypoints, valid)
data_dict['crop_center2d'] = crop_center2d
data_dict['scale2d'] = scale2d
if self.rotate_augmentation:
print('using rotation augmentation')
rotate_angle = tf.random_uniform([], minval=-np.pi * 40 / 180, maxval=np.pi * 40 / 180)
R2 = tf.reshape(tf.stack([tf.cos(rotate_angle), -tf.sin(rotate_angle), tf.sin(rotate_angle), tf.cos(rotate_angle)]), [2, 2])
body2d = tf.matmul((body2d - crop_center2d), R2) + crop_center2d
data_dict['keypoint_uv_origin'] = body2d
if PAF_given:
R3 = tf.reshape(tf.stack([tf.cos(rotate_angle), -tf.sin(rotate_angle), 0., tf.sin(rotate_angle), tf.cos(rotate_angle), 0., 0., 0., 1.]), [3, 3])
body3d = tf.matmul(body3d, R3)
data_dict['keypoint_xyz_origin'] = body3d
data_dict['keypoint_xyz_local'] = body3d
body2d_local = self.update_keypoint2d(body2d, crop_center2d, scale2d)
data_dict['keypoint_uv_local'] = body2d_local
if read_image:
image_crop = self.crop_image(image, crop_center2d, scale2d)
data_dict['image_crop'] = image_crop
mask_crop = self.crop_image(tf.stack([mask] * 3, axis=2), crop_center2d, scale2d)
data_dict['mask_crop'] = mask_crop[:, :, 0]
if self.rotate_augmentation:
data_dict['image_crop'] = tf.contrib.image.rotate(data_dict['image_crop'], rotate_angle)
data_dict['mask_crop'] = tf.contrib.image.rotate(data_dict['mask_crop'], rotate_angle)
if self.blur_augmentation:
print('using blur augmentation')
rescale_factor = tf.random_uniform([], minval=0.1, maxval=1.0)
rescale = tf.cast(rescale_factor * self.crop_size, tf.int32)
resized_image = tf.image.resize_images(data_dict['image_crop'], [rescale, rescale])
data_dict['image_crop'] = tf.image.resize_images(resized_image, [self.crop_size, self.crop_size])
# create 2D gaussian map
scoremap2d = self.create_multiple_gaussian_map(body2d_local[:, ::-1], (self.crop_size, self.crop_size), self.sigma, valid_vec=valid, extra=True) # coord_hw, imsize_hw
data_dict['scoremap2d'] = scoremap2d
if withPAF:
from utils.PAF import createPAF
num_keypoint = body2d_local.get_shape().as_list()[0]
zeros = tf.zeros([num_keypoint, 1], dtype=tf.float32)
if PAF_given:
data_dict['PAF'] = createPAF(body2d_local, body3d, self.objtype, (self.crop_size, self.crop_size), normalize_3d=True, valid_vec=valid)
data_dict['PAF_type'] = tf.ones([], dtype=bool) # 0 for 2D PAF, 1 for 3D PAF
else:
data_dict['PAF'] = createPAF(body2d_local, tf.concat([body2d, zeros], axis=1), self.objtype, (self.crop_size, self.crop_size), normalize_3d=False, valid_vec=valid)
data_dict['PAF_type'] = tf.zeros([], dtype=bool) # 0 for 2D PAF, 1 for 3D PAF
if self.objtype == 1: # this is hand, flip the image if it is right hand
data_dict['image_crop'] = tf.cond(cond_left, lambda: data_dict['image_crop'], lambda: data_dict['image_crop'][:, ::-1, :])
data_dict['mask_crop'] = tf.cond(cond_left, lambda: data_dict['mask_crop'], lambda: data_dict['mask_crop'][:, ::-1])
data_dict['scoremap2d'] = tf.cond(cond_left, lambda: data_dict['scoremap2d'], lambda: data_dict['scoremap2d'][:, ::-1, :])
data_dict['keypoint_uv_local'] = tf.cond(cond_left, lambda: data_dict['keypoint_uv_local'],
lambda: tf.constant([self.crop_size, 0], tf.float32) + tf.constant([-1, 1], tf.float32) * data_dict['keypoint_uv_local'])
if withPAF:
data_dict['PAF'] = tf.cond(cond_left, lambda: data_dict['PAF'],
lambda: (data_dict['PAF'][:, ::-1, :]) * tf.constant([-1, 1, 1] * (data_dict['PAF'].get_shape().as_list()[2] // 3), dtype=tf.float32))
names, tensors = zip(*data_dict.items())
if self.shuffle:
tensors = tf.train.shuffle_batch_join([tensors],
batch_size=self.batch_size,
capacity=100,
min_after_dequeue=50,
enqueue_many=False)
else:
tensors = tf.train.batch_join([tensors],
batch_size=self.batch_size,
capacity=100,
enqueue_many=False)
return dict(zip(names, tensors))
|
18907
|
import sys
import numpy as np
from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d import Axes3D # NOQA
import seaborn # NOQA
from spherecluster import sample_vMF
plt.ion()
n_clusters = 3
mus = np.random.randn(3, n_clusters)
mus, r = np.linalg.qr(mus, mode='reduced')
kappas = [15, 15, 15]
num_points_per_class = 250
Xs = []
for nn in range(n_clusters):
new_X = sample_vMF(mus[nn], kappas[nn], num_points_per_class)
Xs.append(new_X.T)
fig = plt.figure(figsize=(8, 6))
ax = fig.add_subplot(
1, 1, 1, aspect='equal', projection='3d',
adjustable='box-forced', xlim=[-1.1, 1.1], ylim=[-1.1, 1.1],
zlim=[-1.1, 1.1]
)
colors = ['b', 'r', 'g']
for nn in range(n_clusters):
ax.scatter(Xs[nn][0, :], Xs[nn][1, :], Xs[nn][2, :], c=colors[nn])
ax.set_aspect('equal')
plt.axis('off')
plt.show()
def r_input(val=None):
val = val or ''
if sys.version_info[0] >= 3:
return eval(input(val))
return raw_input(val)
r_input()
|
18932
|
class GlobalOptions:
""" Class to evaluate global options for example: project path"""
@staticmethod
def evaluate_project_path(path):
""" Method to parse the project path provided by the user"""
first_dir_from_end = None
if path[-1] != "/":
path = path + "/"
new_path = path.rsplit('/')[-2]
for directory in new_path[::-1]:
if directory != " ":
first_dir_from_end = new_path
break
return first_dir_from_end
|
18941
|
from src.commons.big_query.copy_job_async.result_check.result_check_request import \
ResultCheckRequest
from src.commons.big_query.copy_job_async.task_creator import TaskCreator
class BigQueryJobReference(object):
def __init__(self, project_id, job_id, location):
self.project_id = project_id
self.job_id = job_id
self.location = location
def __str__(self):
return "BigQueryJobReference(projectId:{}, job_id:{}, location: {})" \
.format(self.project_id, self.job_id, self.location)
def __repr__(self):
return self.__str__()
def __eq__(self, other):
return type(other) is BigQueryJobReference \
and self.project_id == other.project_id \
and self.job_id == other.job_id \
and self.location == other.location
def __ne__(self, other):
return not (self == other)
def create_post_copy_action(self, copy_job_request):
TaskCreator.create_copy_job_result_check(
ResultCheckRequest(
task_name_suffix=copy_job_request.task_name_suffix,
copy_job_type_id=copy_job_request.copy_job_type_id,
job_reference=self,
retry_count=copy_job_request.retry_count,
post_copy_action_request=copy_job_request.post_copy_action_request
)
)
def to_json(self):
return dict(project_id=self.project_id,
job_id=self.job_id,
location=self.location)
@classmethod
def from_json(cls, json):
return BigQueryJobReference(project_id=json["project_id"],
job_id=json["job_id"],
location=json["location"])
|
18943
|
from django.contrib import admin, messages
from django.shortcuts import render
from django.utils.translation import gettext_lazy as _
from inline_actions.actions import DefaultActionsMixin, ViewAction
from inline_actions.admin import InlineActionsMixin, InlineActionsModelAdminMixin
from . import forms
from .models import Article, Author, AuthorProxy
class UnPublishActionsMixin(object):
def get_inline_actions(self, request, obj=None):
actions = super(UnPublishActionsMixin, self).get_inline_actions(request, obj)
if obj:
if obj.status == Article.DRAFT:
actions.append('publish')
elif obj.status == Article.PUBLISHED:
actions.append('unpublish')
return actions
def publish(self, request, obj, parent_obj=None):
obj.status = Article.PUBLISHED
obj.save()
messages.info(request, _("Article published."))
publish.short_description = _("Publish") # type: ignore
def unpublish(self, request, obj, parent_obj=None):
obj.status = Article.DRAFT
obj.save()
messages.info(request, _("Article unpublished."))
unpublish.short_description = _("Unpublish") # type: ignore
class TogglePublishActionsMixin(object):
def get_inline_actions(self, request, obj=None):
actions = super(TogglePublishActionsMixin, self).get_inline_actions(
request=request, obj=obj
)
actions.append('toggle_publish')
return actions
def toggle_publish(self, request, obj, parent_obj=None):
if obj.status == Article.DRAFT:
obj.status = Article.PUBLISHED
else:
obj.status = Article.DRAFT
obj.save()
status = 'unpublished' if obj.status == Article.DRAFT else 'published'
messages.info(request, _("Article {}.".format(status)))
def get_toggle_publish_label(self, obj):
label = 'publish' if obj.status == Article.DRAFT else 'unpublish'
return 'Toggle {}'.format(label)
def get_toggle_publish_css(self, obj):
return 'button object-tools' if obj.status == Article.DRAFT else 'default'
class ChangeTitleActionsMixin(object):
def get_inline_actions(self, request, obj=None):
actions = super(ChangeTitleActionsMixin, self).get_inline_actions(request, obj)
actions.append('change_title')
return actions
def change_title(self, request, obj, parent_obj=None):
# explictly check whether the submit button has been pressed
if '_save' in request.POST:
form = forms.ChangeTitleForm(request.POST, instance=obj)
form.save()
return None # return back to list view
elif '_back' in request.POST:
return None # return back to list view
else:
form = forms.ChangeTitleForm(instance=obj)
return render(request, 'change_title.html', context={'form': form})
class ArticleInline(
DefaultActionsMixin,
UnPublishActionsMixin,
TogglePublishActionsMixin,
InlineActionsMixin,
admin.TabularInline,
):
model = Article
fields = (
'title',
'status',
)
readonly_fields = (
'title',
'status',
)
def has_add_permission(self, request, obj=None):
return False
class ArticleNoopInline(InlineActionsMixin, admin.TabularInline):
model = Article
fields = (
'title',
'status',
)
readonly_fields = (
'title',
'status',
)
def get_inline_actions(self, request, obj=None):
actions = super(ArticleNoopInline, self).get_inline_actions(
request=request, obj=obj
)
actions.append('noop_action')
return actions
def noop_action(self, request, obj, parent_obj=None):
pass
@admin.register(AuthorProxy)
class AuthorMultipleInlinesAdmin(InlineActionsModelAdminMixin, admin.ModelAdmin):
inlines = [ArticleInline, ArticleNoopInline]
list_display = ('name',)
inline_actions = None
@admin.register(Author)
class AuthorAdmin(InlineActionsModelAdminMixin, admin.ModelAdmin):
inlines = [ArticleInline]
list_display = ('name',)
inline_actions = None
@admin.register(Article)
class ArticleAdmin(
UnPublishActionsMixin,
TogglePublishActionsMixin,
ChangeTitleActionsMixin,
ViewAction,
InlineActionsModelAdminMixin,
admin.ModelAdmin,
):
list_display = ('title', 'status', 'author')
|
18997
|
import ipywidgets as widgets
from traitlets import Unicode, Int, validate
import os
import json
from datetime import datetime,timedelta
from IPython.display import Javascript
from IPython.display import HTML
from cognipy.ontology import Ontology
from IPython.display import clear_output
_JS_initialized = False
def _InitJS():
global _JS_initialized
if _JS_initialized:
return
with open(os.path.dirname(os.path.abspath(__file__))+"/edit.js", 'r') as file:
_JS_initialized = True
display( Javascript(file.read()) )
display( HTML("Welcome to CogniPy") )
class OntoeditWidget(widgets.DOMWidget):
_view_name = Unicode('OntoeditView').tag(sync=True)
_model_name = Unicode('OntoeditModel').tag(sync=True)
_view_module = Unicode('ontoedit').tag(sync=True)
_model_module = Unicode('ontoedit').tag(sync=True)
value = Unicode('').tag(sync=True)
cursor = Int(0).tag(sync=True)
dot = Int(0).tag(sync=True)
hints = Unicode('').tag(sync=True)
hintsX = Int(0).tag(sync=True)
hintT = Unicode('').tag(sync=True)
def escape(html):
"""Returns the given HTML with ampersands, quotes and carets encoded."""
return html.replace('&', '&').replace('<', '<').replace('>', '>').replace('"', '"').replace("'", ''')
from functools import reduce
def getcommonletters(strlist):
return ''.join([x[0] for x in zip(*strlist) \
if reduce(lambda a,b:(a == b) and a or None,x)])
def findcommonstart(strlist):
strlist = strlist[:]
prev = None
while True:
common = getcommonletters(strlist)
if common == prev:
break
strlist.append(common)
prev = common
return getcommonletters(strlist)
def CnlEditBox(snap_filename,ontol = None, height='300px'):
_InitJS()
e=widgets.Output()
onto = ontol
def reload_onto():
nonlocal onto,ontol
if ontol is None:
if not os.path.exists(snap_filename):
onto = Ontology("cnl/string","Every thing is a thing.")
else:
onto = Ontology("cnl/file",snap_filename,stop_on_error=False)
with e:
clear_output()
if onto.get_load_error() is not None:
print(str(onto.get_load_error()))
reload_onto()
if not os.path.exists(snap_filename):
open(snap_filename, 'a').close()
def autoCompl(s):
pos=s.rfind('.', 0, len(s))
pos=0 if pos<0 else pos+1
inn=s[pos:len(s)].lstrip(' \n\t')
ac= onto.autocomplete(inn)
return ac
reloading = False
def onChange(change):
# print(change)
nonlocal reloading
if change.name=="value":
if reloading:
reloading = False
while True:
try:
with open(snap_filename, 'w') as file:
file.write(change.new)
break
except:
continue
reload_onto()
elif change.name=="cursor":
s = change.owner.value[0:change.new]
acl=[]
if onto is None:
return
#acl=['!!!SYNTAX ERROR!!!\r\n'+syntax_error]
else:
acl=autoCompl(s)
acl.sort()
options=[escape(x) for x in acl]
oopts = [o for o in acl if o[0]!='<']
change.owner.hints="<br/>".join(options)
pos = max(s.rfind(i) for i in [' ','\t', '\n', '.'])
change.owner.hintsX=pos+1
change.owner.hintT=findcommonstart(oopts)
elif change.name=="dot":
reloading = True
txt = None
with open(snap_filename, 'r') as file:
txt = file.read()
w=OntoeditWidget(
value = txt,
placeholder='Type something',
disabled=False,
layout=widgets.Layout(width='90%', height= '100%'),
style={'description_width': 'initial'}
)
o=widgets.Output()
w.observe(onChange, names=['cursor','value','dot'])
xx= widgets.VBox([e,w,o], layout={'height': height})
xx.getvalue=lambda : w.value
return xx
def CnlQueryForConcept(snap_filename,onto):
_InitJS()
if not os.path.exists(snap_filename):
open(snap_filename, 'a').close()
def autoCompl(onto,s):
pos=s.rfind('.', 0, len(s))
pos=0 if pos<0 else pos+1
return onto.autocomplete("Every-single-thing that is "+s)
def onChange(change):
# print(change)
if change.name=="value":
while True:
try:
with open(snap_filename, 'w') as file:
file.write(change.new)
break
except:
continue
elif change.name=="cursor":
s = change.owner.value[0:change.new]
acl=autoCompl(onto,s)
acl.sort()
options=[escape(x) for x in acl]
oopts = [o for o in acl if o[0]!='<']
change.owner.hints="<br/>".join(options)
pos = max(s.rfind(i) for i in [' ','\t', '\n', '.'])
change.owner.hintsX=pos+1
change.owner.hintT=findcommonstart(oopts)
txt = None
with open(snap_filename, 'r') as file:
txt = file.read()
w=OntoeditWidget(
value = txt,
placeholder='Type something',
disabled=False,
layout=widgets.Layout(width='90%', height= '100%'),
style={'description_width': 'initial'}
)
w.observe(onChange, names=['cursor','value'])
o=widgets.Output()
xx= widgets.VBox([w,o], layout={'height': '100px'})
xx.getvalue=lambda : w.value
return xx
|
19063
|
import json
from django.core.management.base import BaseCommand
from 臺灣言語平臺.正規化團隊模型 import 正規化sheet表
from django.conf import settings
class Command(BaseCommand):
help = '加sheet的json'
def add_arguments(self, parser):
parser.add_argument(
'服務帳戶json',
type=str,
help='google developers console下載的服務帳戶json'
)
parser.add_argument(
'網址',
type=str,
help='google sheet的網址'
)
def handle(self, *args, **參數):
with open(參數['服務帳戶json']) as 檔案:
服務帳戶資料 = json.load(檔案)
正規化sheet表.加sheet(
語言腔口=settings.MOTHER_TONGUE,
key_file_name=參數['服務帳戶json'],
url=參數['網址'],
)
self.stdout.write(
'愛記得到「Google Sheets右上角的Share」裡分享「Can edit」的權限予 {} 喲!!'.format(
服務帳戶資料['client_email']
)
)
|
19150
|
class Solution:
def coinChange(self, coins: List[int], amount: int) -> int:
dp = [inf] * (amount + 1)
dp[0] = 0
for coin in coins:
for x in range(coin, amount + 1):
dp[x] = min(dp[x], dp[x - coin] + 1)
return dp[amount] if dp[amount] != inf else -1
|
19156
|
import pytest
from ..model_base_test import ModelBaseTest
from tests.sampleresponse.cardless_credit import cardless_credit_payment_response
from xendit.models import CardlessCredit, CardlessCreditType
# fmt: off
class TestCreateCardlessCreditPayment(ModelBaseTest):
@pytest.fixture
def default_cardless_credit_data(self):
tested_class = CardlessCredit
class_name = "CardlessCredit"
method_name = "create_payment"
http_method_name = "post"
cardless_credit_items = []
cardless_credit_items.append(
CardlessCredit.helper_create_item(
id="item-123",
name="Phone Case",
price=200000,
type="Smartphone",
url="http://example.com/phone/phone_case",
quantity=2,
)
)
shipping_address = CardlessCredit.helper_create_shipping_address(
first_name="<NAME>",
last_name="<NAME>",
address="Jl Teknologi No. 12",
city="Jakarta",
postal_code="12345",
phone="081513114262",
country_code="IDN",
)
customer_details = CardlessCredit.helper_create_customer_details(
first_name="customer first name",
last_name="customer last name",
email="<EMAIL>",
phone="0812332145",
)
args = ()
kwargs = {
"cardless_credit_type": CardlessCreditType.KREDIVO,
"external_id": "mock-id-123",
"amount": 10000,
"payment_type": "3_months",
"items": cardless_credit_items,
"customer_details": customer_details,
"shipping_address": shipping_address,
"redirect_url": "https://mock-my-shop.com/home",
"callback_url": "https://mock-my-shop.com/callback",
"x_idempotency_key": "test_idemp_123",
}
params = (args, kwargs)
url = "/cardless-credit"
expected_correct_result = cardless_credit_payment_response()
return (tested_class, class_name, method_name, http_method_name, url, params, expected_correct_result)
@pytest.fixture
def api_requestor_request_data(self, default_cardless_credit_data):
tested_class, class_name, method_name, http_method_name, url, params, _ = default_cardless_credit_data
headers = {"X-IDEMPOTENCY-KEY": "test_idemp_123"}
body = {
"cardless_credit_type": "KREDIVO",
"external_id": "mock-id-123",
"amount": 10000,
"payment_type": "3_months",
"items": [
{
"id": "item-123",
"name": "<NAME>",
"price": 200000,
"type": "Smartphone",
"url": "http://example.com/phone/phone_case",
"quantity": 2,
}
],
"customer_details": {
"first_name": "customer <NAME>",
"last_name": "<NAME>",
"email": "<EMAIL>",
"phone": "0812332145",
},
"shipping_address": {
"first_name": "<NAME>",
"last_name": "<NAME>",
"address": "Jl Teknologi No. 12",
"city": "Jakarta",
"postal_code": "12345",
"phone": "081513114262",
"country_code": "IDN",
},
"redirect_url": "https://mock-my-shop.com/home",
"callback_url": "https://mock-my-shop.com/callback",
}
return (tested_class, class_name, method_name, http_method_name, url, params, headers, body)
@pytest.mark.parametrize("mock_correct_response", [cardless_credit_payment_response()], indirect=True)
def test_return_cardless_credit_payment_on_correct_params(
self, mocker, mock_correct_response, default_cardless_credit_data
):
self.run_success_return_test_on_xendit_instance(mocker, mock_correct_response, default_cardless_credit_data)
def test_raise_xendit_error_on_response_error(
self, mocker, mock_error_request_response, default_cardless_credit_data
):
self.run_raises_error_test_on_xendit_instance(mocker, mock_error_request_response, default_cardless_credit_data)
@pytest.mark.parametrize("mock_correct_response", [cardless_credit_payment_response()], indirect=True)
def test_return_cardless_credit_payment_on_correct_params_and_global_xendit(
self, mocker, mock_correct_response, default_cardless_credit_data
):
self.run_success_return_test_on_global_config(mocker, mock_correct_response, default_cardless_credit_data)
def test_raise_xendit_error_on_response_error_and_global_xendit(
self, mocker, mock_error_request_response, default_cardless_credit_data
):
self.run_raises_error_test_on_global_config(mocker, mock_error_request_response, default_cardless_credit_data)
@pytest.mark.parametrize("mock_correct_response", [cardless_credit_payment_response()], indirect=True)
def test_send_correct_request_to_api_requestor(self, mocker, mock_correct_response, api_requestor_request_data):
self.run_send_correct_request_to_api_requestor(mocker, mock_correct_response, api_requestor_request_data)
# fmt: on
|
19175
|
class Solution:
def minSwapsCouples(self, row: List[int]) -> int:
parent=[i for i in range(len(row))]
for i in range(1,len(row),2):
parent[i]-=1
def findpath(u,parent):
if parent[u]!=u:
parent[u]=findpath(parent[u],parent)
return parent[u]
for i in range(0,len(row),2):
u_parent=findpath(row[i],parent)
v_parent=findpath(row[i+1],parent)
parent[u_parent]=v_parent
return (len(row)//2)-sum([1 for i in range(0,len(row),2) if parent[i]==parent[i+1]==i])
|
19209
|
import pytest
from wikidict.render import parse_word
from wikidict.utils import process_templates
@pytest.mark.parametrize(
"word, pronunciations, gender, etymology, definitions",
[
("ababalhar", [], "", ["De baba."], ["<i>(popular)</i> babar; conspurcar"]),
(
"alguém",
["aw.ˈgẽj"],
"",
["Do latim <i>alĭquem</i> <sup>(la)</sup>."],
["pessoa não identificada"],
),
(
"algo",
[],
"",
[],
["um pouco, de certo modo", "objeto (não-identificado) de que se fala"],
),
(
"baiano",
[],
"",
["Derivado de Bahia, mais o sufixo ano, com perda do H."],
[
"do Estado da Bahia, Brasil",
"natural ou habitante do Estado da Bahia, Brasil",
"<i>(São Paulo, Brasil; popular; pejorativo)</i> pessoa que se veste de maneira incomum ou brega; fora da moda", # noqa
],
),
(
"cabrum",
[],
"mf",
['Do latim <i>caprunu</i> <sup>(la)</sup> "cabra".'],
[
"<i>(Pecuária)</i> de cabras:",
"<i>(Regionalismo, Brasil)</i> marido de mulher adúltera",
"indica estrondo",
],
),
(
"COPOM",
[],
"m",
[],
[
"<b>C</b>entro de <b>O</b>perações da <b>Po</b>lícia <b>M</b>ilitar",
"<i>(Brasil)</i> <b>Co</b>mitê de <b>Po</b>lítica <b>M</b>onetária",
],
),
(
"dezassete",
[],
"",
["Contração do latim vulgar <i>decem</i> + <i>ac</i> + <i>septem</i>."],
[
"o número dezassete (17, XVII)",
"nota correspondente a dezassete valores",
"pessoa ou coisa que apresenta o número dezassete numa ordenação",
"vide dezessete",
],
),
(
"etc",
[],
"",
[],
[
'abreviação do latim <i>et cetera</i>, que significa "e outros", "e os restantes" e "e outras coisas mais"', # noqa
],
),
(
"-ista",
[],
"",
[
"Do grego antigo <i>-ιστεσ</i> (<i>-istes</i>) através do latim <i>-ista</i> através do francês antigo <i>-iste</i>." # noqa
],
[
"que segue um princípio",
"que é estudioso ou profissional de um assunto",
"que usa algo",
"que tem uma visão preconceituosa",
],
),
(
"neo-",
[],
"",
["Do grego antigo <i>νέος</i>."],
[
"exprime a ideia de <i>novo</i>",
"<b>Nota:</b> Liga-se por hífen ao morfema seguinte quando este começa por <b>vogal</b>, <b>h</b>, <b>r</b> ou <b>s</b>.", # noqa
],
),
("para", [], "", [], ["exprime fim, destino, lugar, tempo, direção etc"]),
(
"paulista",
[],
"",
[],
[
"diz-se de pessoa de origem do Estado de São Paulo, Brasil",
"diz-se de artigo ou objeto do Estado de São Paulo",
"pessoa de origem do Estado de São Paulo, Brasil",
"artigo ou objeto do Estado de São Paulo",
],
),
("tenui-", [], "", [], ["variante ortográfica de <b>tenu-</b>"]),
(
"to",
[],
"",
[],
[
'<i>(antigo)</i> contração do pronome pessoal "te" com o pronome pessoal ou demonstrativo "o"',
"<i>(coloquial e Brasil)</i> forma aferética (muito comum na linguagem falada) de estou",
],
),
(
"ũa",
[],
"",
[
"Do Latim <i>una-</i>: <i>una-</i> deu <b>ũa</b> por queda do <b>n</b> com a nasalação do <b>ũ</b>."
],
["ortografia antiga de uma"],
),
("UTC", [], "", [], ["<i>(estrangeirismo)</i> ver TUC"]),
],
)
def test_parse_word(word, pronunciations, gender, etymology, definitions, page):
"""Test the sections finder and definitions getter."""
code = page(word, "pt")
details = parse_word(word, code, "pt", force=True)
assert pronunciations == details.pronunciations
assert gender == details.gender
assert etymology == details.etymology
assert definitions == details.definitions
@pytest.mark.parametrize(
"wikicode, expected",
[
("{{AFI|/k/|pt}}", "/k/"),
("{{barra de cor|yellow|#FFFF00}}", "[RGB #FFFF00]"),
("{{escopo2|Informática}}", "<i>(Informática)</i>"),
("{{escopo2|Brasil|governo}}", "<i>(Brasil)</i>"),
("{{escopoCat|Árvore|pt}}", "<i>(Botânica)</i>"),
("{{escopoCat|Náutica|pt}}", "<i>(Náutica)</i>"),
("{{escopoCatLang|Alimentação|pt}}", "<i>(Culinária)</i>"),
("{{escopoCatLang|Verbo auxiliar|pt}}", "<i>(Verbo auxiliar)</i>"),
("{{escopoUso|Portugal|pt}}", "<i>(Portugal)</i>"),
("{{escopoUso|Coloquialismo|pt}}", "<i>(coloquialismo)</i>"),
("{{fem|heliostático}}", "feminino de <b>heliostático</b>"),
("{{fl|la|occŭlo}}", "occŭlo"),
("{{l|pt|usar|usar}}", "usar"),
("{{l.o.|jurídico|jurídica}}", "jurídica"),
("{{l.s.|uso}}", "uso"),
("{{link preto|ciconiforme}}", "ciconiforme"),
("{{ll|publicar}}", "publicar"),
("{{m|ar|شيشة|tr=šīša}}", "<i>masculino</i>"),
("{{mq|palavra}}", "o mesmo que <b>palavra</b>"),
("{{mq|word|en}}", "o mesmo que <i>word</i>"),
("{{PE|cu}}", "cu <sup>(português de Portugal)</sup>"),
("{{r|la|basium|basĭum}}", "basĭum"),
("{{r.l|la|utor|ūtor}}", "ūtor"),
("{{varort|tenu-|pt}}", "variante ortográfica de <b>tenu-</b>"),
],
)
def test_process_templates(wikicode, expected):
"""Test templates handling."""
assert process_templates("foo", wikicode, "pt") == expected
|
19271
|
import os
import uuid
import logging
import json
from json import JSONEncoder
from pythonjsonlogger import jsonlogger
from datetime import datetime
from logging.config import dictConfig
# Custom JSON encoder which enforce standard ISO 8601 format, UUID format
class ModelJsonEncoder(JSONEncoder):
def default(self, o):
if isinstance(o, UUID):
return str(o)
if isinstance(o, datetime):
return o.isoformat()
return json.JSONEncoder.default(self, o)
class LogFilter(logging.Filter):
def __init__(self, service=None, instance=None):
self.service = service
self.instance = instance
def filter(self, record):
record.service = self.service
record.instance = self.instance
return True
class JsonLogFormatter(jsonlogger.JsonFormatter):
def add_fields(self, log_record, record, message_dict):
super().add_fields(log_record, record, message_dict)
# Add timestamp field with default : now
if not log_record.get('timestamp'):
now = datetime.utcnow().isoformat()
log_record['timestamp'] = now
# Add level field
if log_record.get('level'):
log_record['level'] = log_record['level'].upper()
else:
log_record['level'] = record.levelname
# Add type field for internal logs
if not log_record.get('type'):
log_record['type'] = 'internal'
# Configure Logging
def configure_logging(level='DEBUG', service=None, instance=None):
dictConfig({
'version': 1,
'formatters': {'default': {
'()': JsonLogFormatter,
'format': '%(timestamp)s %(level)s %(service)s %(instance)s %(type)s %(message)s',
'json_encoder': ModelJsonEncoder
}},
'filters': {'default': {
'()': LogFilter,
'service': service,
'instance': instance
}},
'handlers': {'default_handler': {
'class': 'logging.StreamHandler',
'stream': 'ext://sys.stdout',
'filters': ['default'],
'formatter': 'default'
}},
'root': {
'level': level,
'handlers': ['default_handler']
}
})
|
19315
|
import iota_client
client = iota_client.Client()
print(
client.get_output("a22cba0667c922cbb1f8bdcaf970b2a881ccd6e88e2fcce50374de2aac7c37720000")
)
|
19318
|
from datetime import datetime
{
datetime(2019, 12, 30, 0, 0): 35,
datetime(2020, 1, 6, 0, 0): 27,
datetime(2020, 1, 13, 0, 0): 39,
datetime(2020, 1, 20, 0, 0): 120,
datetime(2020, 1, 27, 0, 0): 73,
datetime(2020, 2, 3, 0, 0): 48,
datetime(2020, 2, 10, 0, 0): 35,
datetime(2020, 2, 17, 0, 0): 89,
datetime(2020, 2, 24, 0, 0): 81,
datetime(2020, 3, 2, 0, 0): 116,
datetime(2020, 3, 9, 0, 0): 90,
datetime(2020, 3, 16, 0, 0): 195,
datetime(2020, 3, 23, 0, 0): 406,
datetime(2020, 3, 30, 0, 0): 642,
datetime(2020, 4, 6, 0, 0): 652,
datetime(2020, 4, 13, 0, 0): 684,
datetime(2020, 4, 20, 0, 0): 1393,
datetime(2020, 4, 27, 0, 0): 1755,
datetime(2020, 5, 4, 0, 0): 1251,
datetime(2020, 5, 11, 0, 0): 1566,
datetime(2020, 5, 18, 0, 0): 1986,
datetime(2020, 5, 25, 0, 0): 2141,
datetime(2020, 6, 1, 0, 0): 1581,
datetime(2020, 6, 8, 0, 0): 1640,
datetime(2020, 6, 15, 0, 0): 1406,
datetime(2020, 6, 22, 0, 0): 1902,
datetime(2020, 6, 29, 0, 0): 2078,
datetime(2020, 7, 6, 0, 0): 1821,
datetime(2020, 7, 13, 0, 0): 1854,
datetime(2020, 7, 20, 0, 0): 2308,
datetime(2020, 7, 27, 0, 0): 2637,
datetime(2020, 8, 3, 0, 0): 2275,
datetime(2020, 8, 10, 0, 0): 1717,
datetime(2020, 8, 17, 0, 0): 1474,
datetime(2020, 8, 24, 0, 0): 2234,
datetime(2020, 8, 31, 0, 0): 2275,
datetime(2020, 9, 7, 0, 0): 2180,
datetime(2020, 9, 14, 0, 0): 1824,
datetime(2020, 9, 21, 0, 0): 1609,
datetime(2020, 9, 28, 0, 0): 1714,
datetime(2020, 10, 5, 0, 0): 2849,
datetime(2020, 10, 12, 0, 0): 1425,
datetime(2020, 10, 19, 0, 0): 569,
datetime(2020, 10, 26, 0, 0): 210,
datetime(2020, 11, 2, 0, 0): 331,
datetime(2020, 11, 9, 0, 0): 229,
datetime(2020, 11, 16, 0, 0): 162,
datetime(2020, 11, 23, 0, 0): 164,
datetime(2020, 11, 30, 0, 0): 102,
datetime(2020, 12, 7, 0, 0): 75,
datetime(2020, 12, 14, 0, 0): 55,
datetime(2020, 12, 21, 0, 0): 150,
datetime(2020, 12, 28, 0, 0): 11,
}
|
19320
|
import numpy as np
from numpy import genfromtxt
import matplotlib.pyplot as plt
import os
time_steps = 8760
file_name = "../input_data/Ecofys_ECN_heating_profiles.csv"
data = zip(*genfromtxt(file_name, delimiter=','))
names = ["tussenwoning_laag", "tussenwoning_midden", "tussenwoning_hoog",
"hoekwoning_laag", "hoekwoning_midden", "hoekwoning_hoog",
"twee_onder_een_kapwoning_laag", "twee_onder_een_kapwoning_midden", "twee_onder_een_kapwoning_hoog",
"appartement_laag", "appartement_midden", "appartement_hoog",
"vrijstaande_woning_laag", "vrijstaande_woning_midden", "vrijstaande_woning_hoog"]
profiles = []
totals = []
counter = 0
for profile in data:
if len(profile) == time_steps:
profiles.append(profile)
totals.append(np.sum(profile))
print "Writing: ", names[counter]+".csv"
out_file = open("../output_data/"+names[counter]+".csv","w")
for item in profile:
for i in range(4):
out_file.write(str(item) + "\n")
out_file.close()
else:
print "Error! profile #"+str(counter)+" has "+ str(len(profile)) + " lines"
counter += 1
print totals
plt.close()
plt.figure(figsize=(19, 7))
mini = 0
maxi = 24 * 7
for name,profile in zip(names,profiles):
#if "appartement" in name:
#plt.plot(profile[mini:maxi]/np.sum(profile),linewidth=1.0, label=name)
plt.plot(profile[mini:maxi],linewidth=1.0, label=name)
plt.xlabel('time (hours)')
plt.ylabel('kW')
plt.legend()
plt.show()
|
19357
|
import inspect
import functools
from gridengine import job, dispatch, schedulers
# ----------------------------------------------------------------------------
# Partial
# ----------------------------------------------------------------------------
def isexception(x):
"""Test whether the value is an Exception instance"""
return isinstance(x, Exception)
def isnumeric(x):
"""Test whether the value can be represented as a number"""
try:
float(x)
return True
except:
return False
def partial(f, *args, **kwargs):
"""Return a callable partially closed over the input function and arguments
partial is functionally equivalent to functools.partial, however it also
applies a variant of functools.update_wrapper, with:
__doc__ = f.__doc__
__module__ = f.__module__
__name__ = f.__name__ + string_representation_of_closed_arguments
This is useful for running functions with different parameter sets, whilst
being able to identify the variants by name
"""
def name(var):
try:
return var.__name__
except AttributeError:
return str(var)[0:5] if isnumeric(var) else var.__class__.__name__
g = functools.partial(f, *args, **kwargs)
g.__doc__ = f.__doc__
g.__module__ = f.__module__
g.__name__ = '_'.join([f.__name__] + [name(arg) for arg in list(args)+list(kwargs.values())])
return g
# ----------------------------------------------------------------------------
# Map
# ----------------------------------------------------------------------------
def map(f, args, scheduler=schedulers.best_available, reraise=True):
"""Perform a functional-style map operation
Apply a function f to each argument in the iterable args. This is equivalent to
y = [f(x) for x in args]
or
y = map(f, args)
except that each argument in the iterable is assigned to a separate Job
and scheduled to run via the scheduler.
The default scheduler is a schedulers.ProcessScheduler instance. To run map
on a grid engine, simply pass a schedulers.GridEngineScheduler instance.
Args:
f (func): A picklable function
args (iterable): An iterable (list) of arguments to f
Keyword Args:
scheduler: A schedulers.Scheduler instance or class. By default, the
system tries to return the best_available() scheduler. Use this if you
want to set a scheduler specifically.
reraise (bool): Reraise exceptions that occur in any of the jobs. Set this
to False if you want to salvage any good results.
Returns:
List of return values equivalent to the builtin map function
Raises:
Any exception that would occur when applying [f(x) for x in args]
"""
# setup the dispatcher
dispatcher = dispatch.JobDispatcher(scheduler)
# allocate the jobs
jobs = [job.Job(target=f, args=(arg,)) for arg in args]
# run the jobs (guaranteed to return in the same order)
dispatcher.dispatch(jobs)
results = dispatcher.join()
# check for exceptions
if reraise:
for exception in filter(isexception, results):
# an error occurred during execution of one of the jobs, reraise it
raise exception
return results
|
19368
|
import unittest
from yapper import create_app, db
from yapper.blueprints.user.models import User, Role
class TestUserAddToDb(unittest.TestCase):
def setUp(self):
self.app = create_app('test')
self.app_context = self.app.app_context()
self.app_context.push()
db.create_all()
def tearDown(self):
db.session.remove()
db.drop_all()
self.app_context.pop()
def test_role_gets_id(self):
role = Role(name='admin')
self.assertTrue(role.id is None)
db.session.add(role)
db.session.commit()
self.assertFalse(role.id is None)
def test_user_gets_role_and_id(self):
role = Role(name='administrator')
self.assertTrue(role.id is None)
user = User(email='<EMAIL>', password='<PASSWORD>', role=role)
self.assertTrue(user.id is None)
db.session.add(user)
db.session.commit()
self.assertFalse(role.id is None)
self.assertFalse(user.id is None)
self.assertTrue(user.role_id == role.id)
self.assertTrue(user.is_admin())
|
19375
|
import re
import logging
logger = logging.getLogger(__name__)
def get_sorted_pair(a, b):
# ensure citation pair is always in same order
if a > b:
return (a, b)
else:
return (b, a)
def to_label(t, labels):
if t in labels:
return t
else:
return 'other'
def normalize_title(t):
if t:
t = t.replace('.', ' ').replace('-', ' ').strip().lower()
#t = re.sub(r'\W+', '', t)
return t
def normalize_section(title):
if title:
return re.sub(r'[\.0-9]', '',
title.
strip() \
.lower() \
.replace('conclusions', 'conclusion') \
.replace('methodology', 'method') \
.replace('methods', 'method') \
.replace('related works', 'related work') \
.replace('models', 'model') \
.replace('datasets', 'dataset') \
.replace('our ', '') \
.replace('evaluations', 'evaluation') \
.replace('experiments', 'experiment')
).strip()
# .replace('conclusion and future perspectives', 'conclusion')\
# .replace('materials and methods', 'methods')
def get_text_from_doc(doc) -> str:
"""
Build document text from title + abstract
:param doc: S2 paper
:return: Document text
"""
text = ''
if 'title' in doc:
text += doc['title']
if doc['abstract']:
text += '\n' + doc['abstract']
return text
def get_text_from_doc_id(doc_id: str, doc_index) -> str:
"""
Build document text from title + abstract
:param doc_id: S2-id
:param doc_index: S2-id to S2-paper data
:return: Document text
"""
if doc_id in doc_index:
return get_text_from_doc(doc_index[doc_id])
else:
raise ValueError(f'Document not found in index: {doc_id}')
# resolve 'and' titles and filter for out-of-index docs
def resolve_and_sect_titles(items, doc_index=None):
for from_s2_id, to_s2_id, sect_generic, sect_title, sect_marker in items:
if doc_index and (from_s2_id not in doc_index or to_s2_id not in doc_index):
# One of the IDs does not exist in document index
continue
sect_title = normalize_section(sect_title)
if sect_title:
# Resolve combined sections
for t in sect_title.split(' and '):
if t:
yield (from_s2_id, to_s2_id, t, sect_marker)
|
19379
|
bl_info = {
"name": "Import Fromsoft FLVER models",
"description":
"Import models from various Fromsoft games such as Dark Souls",
"author": "<NAME>",
"version": (0, 1, 0),
"blender": (2, 80, 0),
"category": "Import-Export",
"location": "File > Import",
"warning": "",
"support": "COMMUNITY",
"wiki_url": "", # TODO: wiki url
"tracker_url": "", # TODO: tracker url
}
_submodules = {
"importer",
"flver",
"reader",
}
# Reload submodules on addon reload
if "bpy" in locals():
import importlib
for submodule in _submodules:
if submodule in locals():
importlib.reload(locals()[submodule])
import bpy
from . import importer
from bpy_extras.io_utils import ImportHelper
from bpy.props import StringProperty, BoolProperty
class FlverImporter(bpy.types.Operator, ImportHelper):
bl_idname = "import_scene.flver"
bl_label = "Fromsoft (.flver)"
filter_glob = StringProperty(default="*.flver", options={"HIDDEN"})
transpose_y_and_z = BoolProperty(
name="Transpose Y and Z axes",
description=("This will correct the orientation of the model. " +
"Rarely necessary to disable."),
default=True)
import_skeleton = BoolProperty(
name="Import skeleton",
description=("Disable to prevent the creation of an Armature " +
"and corresponding vertex groups."),
default=True)
connect_bones = BoolProperty(
name="Connect bones",
description=(
"Disable to import disjointed bones rotated about their " +
"original Euler angles. This may be potentially desireable "
"for authoring derivative FLVER files."),
default=True)
def execute(self, context):
importer.run(context=context,
path=self.filepath,
transpose_y_and_z=self.transpose_y_and_z,
import_skeleton=self.import_skeleton,
connect_bones=self.connect_bones)
return {"FINISHED"}
def menu_import(self, context):
self.layout.operator(FlverImporter.bl_idname)
def register():
bpy.utils.register_class(FlverImporter)
bpy.types.TOPBAR_MT_file_import.append(menu_import)
def unregister():
bpy.types.TOPBAR_MT_file_import.remove(menu_import)
bpy.utils.unregister_class(FlverImporter)
|
19434
|
from setuptools import setup, find_packages
setup(
name = "imgdup",
version = "1.3",
packages = find_packages(),
scripts = ['imgdup.py'],
install_requires = ['pillow>=2.8.1'],
# metadata for upload to PyPI
author = "<NAME>",
author_email = "<EMAIL>",
description = "Visual similarity image finder and cleaner (image deduplication tool)",
license = "MIT",
keywords = "deduplication duplicate images image visual finder",
url = "https://github.com/rif/imgdup", # project home page, if any
)
|
19444
|
import logging
from http import cookiejar as http_cookiejar
from http.cookiejar import http2time # type: ignore
from typing import Any # noqa
from typing import Dict # noqa
from urllib.parse import parse_qs
from urllib.parse import urlsplit
from urllib.parse import urlunsplit
from oic.exception import UnSupported
from oic.oauth2.exception import TimeFormatError
from oic.utils.sanitize import sanitize
logger = logging.getLogger(__name__)
__author__ = "roland"
URL_ENCODED = "application/x-www-form-urlencoded"
JSON_ENCODED = "application/json"
DEFAULT_POST_CONTENT_TYPE = URL_ENCODED
PAIRS = {
"port": "port_specified",
"domain": "domain_specified",
"path": "path_specified",
}
ATTRS = {
"version": None,
"name": "",
"value": None,
"port": None,
"port_specified": False,
"domain": "",
"domain_specified": False,
"domain_initial_dot": False,
"path": "",
"path_specified": False,
"secure": False,
"expires": None,
"discard": True,
"comment": None,
"comment_url": None,
"rest": "",
"rfc2109": True,
} # type: Dict[str, Any]
def get_or_post(
uri, method, req, content_type=DEFAULT_POST_CONTENT_TYPE, accept=None, **kwargs
):
"""
Construct HTTP request.
:param uri:
:param method:
:param req:
:param content_type:
:param accept:
:param kwargs:
:return:
"""
if method in ["GET", "DELETE"]:
if req.keys():
_req = req.copy()
comp = urlsplit(str(uri))
if comp.query:
_req.update(parse_qs(comp.query))
_query = str(_req.to_urlencoded())
path = urlunsplit(
(comp.scheme, comp.netloc, comp.path, _query, comp.fragment)
)
else:
path = uri
body = None
elif method in ["POST", "PUT"]:
path = uri
if content_type == URL_ENCODED:
body = req.to_urlencoded()
elif content_type == JSON_ENCODED:
body = req.to_json()
else:
raise UnSupported("Unsupported content type: '%s'" % content_type)
header_ext = {"Content-Type": content_type}
if accept:
header_ext = {"Accept": accept}
if "headers" in kwargs.keys():
kwargs["headers"].update(header_ext)
else:
kwargs["headers"] = header_ext
else:
raise UnSupported("Unsupported HTTP method: '%s'" % method)
return path, body, kwargs
def set_cookie(cookiejar, kaka):
"""
Place a cookie (a http_cookielib.Cookie based on a set-cookie header line) in the cookie jar.
Always chose the shortest expires time.
:param cookiejar:
:param kaka: Cookie
"""
# default rfc2109=False
# max-age, httponly
for cookie_name, morsel in kaka.items():
std_attr = ATTRS.copy()
std_attr["name"] = cookie_name
_tmp = morsel.coded_value
if _tmp.startswith('"') and _tmp.endswith('"'):
std_attr["value"] = _tmp[1:-1]
else:
std_attr["value"] = _tmp
std_attr["version"] = 0
attr = ""
# copy attributes that have values
try:
for attr in morsel.keys():
if attr in ATTRS:
if morsel[attr]:
if attr == "expires":
std_attr[attr] = http2time(morsel[attr])
else:
std_attr[attr] = morsel[attr]
elif attr == "max-age":
if morsel[attr]:
std_attr["expires"] = http2time(morsel[attr])
except TimeFormatError:
# Ignore cookie
logger.info(
"Time format error on %s parameter in received cookie"
% (sanitize(attr),)
)
continue
for att, spec in PAIRS.items():
if std_attr[att]:
std_attr[spec] = True
if std_attr["domain"] and std_attr["domain"].startswith("."):
std_attr["domain_initial_dot"] = True
if morsel["max-age"] == 0:
try:
cookiejar.clear(
domain=std_attr["domain"],
path=std_attr["path"],
name=std_attr["name"],
)
except ValueError:
pass
else:
# Fix for Microsoft cookie error
if "version" in std_attr:
try:
std_attr["version"] = std_attr["version"].split(",")[0]
except (TypeError, AttributeError):
pass
new_cookie = http_cookiejar.Cookie(**std_attr) # type: ignore
cookiejar.set_cookie(new_cookie)
def match_to_(val, vlist):
if isinstance(vlist, str):
if vlist.startswith(val):
return True
else:
for v in vlist:
if v.startswith(val):
return True
return False
def verify_header(reqresp, body_type):
logger.debug("resp.headers: %s" % (sanitize(reqresp.headers),))
logger.debug("resp.txt: %s" % (sanitize(reqresp.text),))
if body_type == "":
_ctype = reqresp.headers["content-type"]
if match_to_("application/json", _ctype):
body_type = "json"
elif match_to_("application/jwt", _ctype):
body_type = "jwt"
elif match_to_(URL_ENCODED, _ctype):
body_type = "urlencoded"
else:
body_type = "txt" # reasonable default ??
elif body_type == "json":
if not match_to_("application/json", reqresp.headers["content-type"]):
if match_to_("application/jwt", reqresp.headers["content-type"]):
body_type = "jwt"
else:
raise ValueError(
"content-type: %s" % (reqresp.headers["content-type"],)
)
elif body_type == "jwt":
if not match_to_("application/jwt", reqresp.headers["content-type"]):
raise ValueError(
"Wrong content-type in header, got: {} expected "
"'application/jwt'".format(reqresp.headers["content-type"])
)
elif body_type == "urlencoded":
if not match_to_(DEFAULT_POST_CONTENT_TYPE, reqresp.headers["content-type"]):
if not match_to_("text/plain", reqresp.headers["content-type"]):
raise ValueError("Wrong content-type")
else:
raise ValueError("Unknown return format: %s" % body_type)
return body_type
|
19456
|
import decimal
import hashlib
import json
import requests
import tempfile
import uuid
import os
from tqdm import tqdm
from requests_toolbelt import MultipartEncoder, MultipartEncoderMonitor
def sha256_for_file(f, buf_size=65536):
pos = f.tell()
dgst = hashlib.sha256()
while True:
data = f.read(buf_size)
if not data:
break
dgst.update(data)
size = f.tell() - pos
f.seek(pos)
return size, dgst.hexdigest()
namespace = "default"
fission_url = os.environ["FISSION_URL"]
def post(rel_url, data):
response = requests.post(
"%s%s" % (fission_url, rel_url),
data=json.dumps(data),
headers={"Content-Type": "application/json"})
# print("POST", rel_url)
# print(response, response.text)
if response.status_code in [404, 409]:
return response.status_code, None
if response.status_code == 500:
raise Exception(response.text)
return response.status_code, response.json()
def get(rel_url, params=None):
response = requests.get(
"%s%s" % (fission_url, rel_url),
params=params)
if response.status_code == 404:
return response.status_code, None
if response.status_code == 500:
raise Exception(response.text)
return response.status_code, response.json()
def format_bytes(count):
label_ix = 0
labels = ["B", "KiB", "MiB", "GiB"]
while label_ix < len(labels) and count / 1024. > 1:
count = count / 1024.
label_ix += 1
count = decimal.Decimal(count)
count = count.to_integral() if count == count.to_integral() else round(count.normalize(), 2)
return "%s %s" % (count, labels[label_ix])
def lazily_define_package(environment, file):
filesize, archive_sha256 = sha256_for_file(file)
base_archive_url = "%s/proxy/storage/v1/archive" % fission_url
status_code, response = get("/v2/packages/%s" % archive_sha256)
if status_code == 200:
print("Already uploaded", flush=True)
return archive_sha256, response
progress = tqdm(
total=filesize,
desc="Uploading",
unit="B",
unit_scale=True,
unit_divisor=1024,
leave=True)
last_bytes_read = 0
def update_progress(monitor):
# Your callback function
nonlocal last_bytes_read
progress.update(monitor.bytes_read - last_bytes_read)
last_bytes_read = monitor.bytes_read
e = MultipartEncoder(fields={'uploadfile': ('uploaded', file, 'text/plain')})
m = MultipartEncoderMonitor(e, update_progress)
archive_response = requests.post(base_archive_url,
data=m,
headers={
"X-File-Size": str(filesize),
'Content-Type': m.content_type})
archive_id = archive_response.json()['id']
print(" done", flush=True)
archive_url = "%s?id=%s" % (base_archive_url, archive_id)
package = {
"metadata": {
"name": archive_sha256,
"namespace": namespace,
},
"spec": {
"environment": environment,
"deployment": {
"type": "url",
"url": archive_url,
"checksum": {
"type": "sha256",
"sum": archive_sha256,
},
},
},
"status": {
"buildstatus": "succeeded",
},
}
return archive_sha256, post("/v2/packages", package)[1]
def lazily_define_function(environment, f):
archive_sha256, package_ref = lazily_define_package(environment, f)
print("Registering ...", end='', flush=True)
function_name = archive_sha256[:8]
status_code, response = get("/v2/functions/%s" % function_name)
if status_code == 200:
return function_name
status_code, r = post("/v2/functions", {
"metadata": {
"name": function_name,
"namespace": namespace,
},
"spec": {
"environment": environment,
"package": {
"functionName": function_name,
"packageref": package_ref,
},
},
})
if status_code == 409 or status_code == 201:
print(" done", flush=True)
return function_name
print(" error", flush=True)
raise Exception(r.text)
def lazily_define_trigger2(function_name, http_method, host, relativeurl):
trigger_name = "%s-%s-%s" % (
host.replace('.', '-'),
relativeurl.replace(':.*', '').replace('{', '').replace('}', '').replace('/', '-'),
http_method.lower())
status_code, response = get("/v2/triggers/http/%s" % trigger_name)
if status_code == 200:
return
status_code, r = post("/v2/triggers/http", {
"metadata": {
"name": trigger_name,
"namespace": namespace,
},
"spec": {
"host": host,
"relativeurl": relativeurl,
"method": http_method,
"functionref": {
"Type": "name",
"Name": function_name,
},
},
})
if status_code == 409 or status_code == 201:
return
raise Exception(r.text)
def publish(environment_name, f):
environment = {
"namespace": namespace,
"name": environment_name,
}
function_name = lazily_define_function(environment, f)
host = "%s.tfi.gcp.tesserai.com" % function_name
lazily_define_trigger2(function_name, "POST", host, "/{path-info:.*}")
lazily_define_trigger2(function_name, "GET", host, "/{path-info:.*}")
lazily_define_trigger2(function_name, "GET", host, "/")
return "http://%s" % host
|
19466
|
import logging
import couchdb
from collections import deque
from threading import Thread
from pylons import config
from lr.lib import SpecValidationException, helpers as h
from lr.lib.couch_change_monitor import BaseChangeHandler
from lr.model import ResourceDataModel
from couchdb import ResourceConflict
from lr.lib.replacement_helper import ResourceDataReplacement
from lr.lib.schema_helper import ResourceDataModelValidator
log = logging.getLogger(__name__)
# this doesn't need to be done... should be handled by pylons.config
# scriptPath = os.path.dirname(os.path.abspath(__file__))
# _PYLONS_CONFIG = os.path.join(scriptPath, '..', '..', '..', 'development.ini')
# _config = ConfigParser.ConfigParser()
# _config.read(_PYLONS_CONFIG)
_RESOURCE_DISTRIBUTABLE_TYPE = "resource_data_distributable"
_RESOURCE_TYPE = "resource_data"
_DOC_TYPE = "doc_type"
_DOC = "doc"
_ID = "id"
_DOCUMENT_UPDATE_THRESHOLD = 100
class IncomingCopyHandler(BaseChangeHandler):
def __init__(self):
self._serverUrl = config["couchdb.url.dbadmin"]
self._targetName = config["couchdb.db.resourcedata"]
self.documents = deque()
s = couchdb.Server(self._serverUrl)
self._db = s[self._targetName]
self.repl_helper = ResourceDataReplacement()
self.threads = {}
self.max_threads = 50
def _canHandle(self, change, database):
if ((_DOC in change) and \
(change[_DOC].get(_DOC_TYPE) == _RESOURCE_DISTRIBUTABLE_TYPE or \
change[_DOC].get(_DOC_TYPE) == _RESOURCE_TYPE)):
return True
return False
def _handle(self, change, database):
def threadName(doc):
return "T-"+doc["_id"]
def handleDocument(newDoc):
should_delete = True
try:
# newDoc['node_timestamp'] = h.nowToISO8601Zformat()
ResourceDataModelValidator.set_timestamps(newDoc)
del newDoc["_rev"]
self.repl_helper.handle(newDoc)
# rd = ResourceDataModel(newDoc)
# rd.save(log_exceptions=False)
except SpecValidationException as e:
log.error("SpecValidationException: %s, %s",newDoc['_id'],str(e))
except couchdb.ResourceConflict as rc:
log.error("Document conflicts", exc_info=1)
except Exception as ex:
should_delete = False # don't delete something unexpected happend
log.error("Unable to save %s", newDoc['_id'], exc_info=ex)
if should_delete:
try:
del database[newDoc['_id']]
except Exception as ex:
log.error("Error when deleting", exc_info=ex)
try:
del self.threads[threadName(newDoc)]
except:
pass
self.documents.append(change[_DOC])
if len(self.documents) >= _DOCUMENT_UPDATE_THRESHOLD or len(self.documents) >= database.info()['doc_count']:
while len(self.documents) > 0:
doc = self.documents.popleft()
tname = threadName(doc)
t = Thread(target=handleDocument, name=tname, args=(doc,))
self.threads[tname] = t
t.start()
while len(self.threads) > self.max_threads:
time.sleep(.1)
def isRunning(self):
return len(self.threads) > 0
def threadCount(self):
return len(self.threads)
|
19479
|
import ctypes
import ctypes.util
libc = ctypes.CDLL(ctypes.util.find_library('c'))
# Get network device's name
def if_indextoname (index):
if not isinstance (index, int):
raise TypeError ('Index must be an integer.')
libc.if_indextoname.argtypes = [ctypes.c_uint32, ctypes.c_char_p]
libc.if_indextoname.restype = ctypes.c_char_p
ifname = ctypes.create_string_buffer(32)
ifname = libc.if_indextoname (index, ifname)
if not ifname:
raise RuntimeError ("Invalid network interface index.")
return ifname
# Generate socket id
def to_socket_id (addr1, addr1_str, addr2, addr2_str, port1, port2):
socket_id = None
if addr1 < addr2:
socket_id = "%s:%d-%s:%d" % (addr1_str, port1, addr2_str, port2)
elif addr2 < addr1:
socket_id = "%s:%d-%s:%d" % (addr2_str, port2, addr1_str, port1)
else:
if port1 < port2:
socket_id = "%s:%d-%s:%d" % (addr1_str, port1, addr2_str, port2)
else:
socket_id = "%s:%d-%s:%d" % (addr2_str, port2, addr1_str, port1)
return socket_id
|
19485
|
from __future__ import annotations
__all__ = ("executor",)
import inspect
import sys
from asyncio import get_running_loop
from concurrent.futures import Executor
from functools import partial, wraps
from typing import Awaitable, Callable, TypeVar, overload
from asphalt.core import Context
if sys.version_info >= (3, 10):
from typing import Concatenate, ParamSpec
else:
from typing_extensions import Concatenate, ParamSpec
T_Retval = TypeVar("T_Retval")
P = ParamSpec("P")
@overload
def executor(
func_or_executor: Executor | str,
) -> Callable[
[Callable[Concatenate[Context, P], T_Retval]],
Callable[Concatenate[Context, P], T_Retval | Awaitable[T_Retval]],
]:
...
@overload
def executor(
func_or_executor: Callable[Concatenate[Context, P], T_Retval]
) -> Callable[Concatenate[Context, P], T_Retval | Awaitable[T_Retval]]:
...
def executor(
func_or_executor: Executor | str | Callable[Concatenate[Context, P], T_Retval]
) -> (
Callable[
[Callable[Concatenate[Context, P], T_Retval]],
Callable[Concatenate[Context, P], T_Retval | Awaitable[T_Retval]],
]
| Callable[Concatenate[Context, P], T_Retval | Awaitable[T_Retval]]
):
"""
Decorate a function to run in an executor.
If no executor (or ``None``) is given, the current event loop's default executor is
used. Otherwise, the argument must be a PEP 3148 compliant thread pool executor or
the name of an :class:`~concurrent.futures.Executor` instance.
If a decorated callable is called in a worker thread, the executor argument is
ignored and the wrapped function is called directly.
Callables wrapped with this decorator must be used with ``await`` when called in the
event loop thread.
Example use with the default executor (``None``)::
@executor
def this_runs_in_threadpool(ctx):
return do_something_cpu_intensive()
async def request_handler(ctx):
result = await this_runs_in_threadpool(ctx)
With a named :class:`~concurrent.futures.Executor` resource::
@executor('special_ops')
def this_runs_in_threadpool(ctx):
return do_something_cpu_intensive()
async def request_handler(ctx):
result = await this_runs_in_threadpool(ctx)
:param func_or_executor: either a callable (when used as a decorator), an executor
instance or the name of an :class:`~concurrent.futures.Executor` resource
"""
def outer(
func: Callable[Concatenate[Context, P], T_Retval]
) -> Callable[Concatenate[Context, P], T_Retval | Awaitable[T_Retval]]:
def wrapper(
ctx: Context, *args: P.args, **kwargs: P.kwargs
) -> T_Retval | Awaitable[T_Retval]:
try:
loop = get_running_loop()
except RuntimeError:
# Event loop not available -- we're in a worker thread
return func(ctx, *args, **kwargs)
# Resolve the executor resource name to an Executor instance
_executor: Executor | None
if isinstance(executor, str):
_executor = ctx.require_resource(Executor, executor)
else:
_executor = executor
callback = partial(func, ctx, *args, **kwargs)
return loop.run_in_executor(_executor, callback)
assert not inspect.iscoroutinefunction(
func
), "Cannot wrap coroutine functions to be run in an executor"
return wraps(func)(wrapper)
executor: Executor | str | None = None
if isinstance(func_or_executor, (str, Executor)):
executor = func_or_executor
return outer
else:
return outer(func_or_executor)
|
19538
|
import json
import gen.Types
def loader(f):
return json.load(open('../GenerateDatas/json/' + f + ".json", 'r'), encoding="utf-8")
tables = gen.Types.Tables(loader)
print(tables)
r = tables.TbFullTypes.getDataList()[0].__dict__
print(r)
|
19575
|
import time
import typing
import requests
from sys import stderr
from datetime import datetime
from packettotal_sdk import packettotal_api
class SearchTools(packettotal_api.PacketTotalApi):
def __init__(self, api_key: str):
"""
:param api_key: An API authentication token
"""
super().__init__(api_key)
def search_by_pcap(self, pcap_file_obj: typing.BinaryIO) -> requests.Response:
"""
Search by a pcap/pcapng file, get list list of similar packet captures
:param pcap_file_obj: A file like object that provides a .read() interface (E.G open('path_to_pcap.pcap, 'rb') )
:return: A request.Response instance, containing a graph of similar pcaps with matched terms
"""
response = super().analyze(pcap_file_obj)
if response.status_code == 200:
sim_response = super().pcap_similar(response.json()['pcap_metadata']['md5'])
elif response.status_code == 202:
pcap_id = response.json()['id']
info_response = super().pcap_info(pcap_id)
while info_response.status_code == 404:
print('[{}] Waiting for {} to finish analyzing.'.format(datetime.utcnow(), pcap_id))
info_response = super().pcap_info(response.json()['id'])
time.sleep(10)
print('[{}] Fetching results for {}.'.format(datetime.utcnow(), pcap_id))
time.sleep(5)
sim_response = super().pcap_similar(response.json()['id'])
else:
return response
return sim_response
def search_by_iocs(self, ioc_file: typing.TextIO) -> requests.Response:
"""
Search up to 100 IOC terms at once, and get matching packet captures
:param ioc_file: A file like object that provides a .read() interface (E.G open('path_to_iocs.txt, 'r')
contents are line delim
:return: A request.Response instance containing the search results containing at least one matching IOC
"""
text = ioc_file.read()
delim = '\n'
if '\r\n' in text[0:2048]:
delim = '\r\n'
elif '\r' in text[0:2048]:
delim = '\r'
elif ',' in text[0:2048]:
delim = ','
elif '\t' in text[0:2048]:
delim = '\t'
text_delimd = text.split(delim)
search_str = ''
for i, ioc in enumerate(text_delimd[0: -2]):
search_str += '"{}" OR '.format(ioc.strip())
if i > 100:
print('Warning searching only the first 100 IOC terms of {}.'.format(len(text_delimd)), file=stderr)
break
search_str += '"{}"'.format(text_delimd[-1].strip())
response = super().search(search_str)
return response
|
19578
|
from unittest.mock import Mock, patch
import pandas as pd
from sdgym.s3 import is_s3_path, parse_s3_path, write_csv, write_file
def test_is_s3_path_with_local_dir():
"""Test the ``sdgym.s3.is_s3_path`` function with a local directory.
If the path is not an s3 path, it should return ``False``.
Input:
- path to a local directory
Output:
- False
"""
# setup
path = 'path/to/local/dir'
# run
result = is_s3_path(path)
# asserts
assert not result
def test_is_s3_path_with_s3_bucket():
"""Test the ``sdgym.s3.is_s3_path`` function with an s3 directory.
If the path is an s3 path, it should return ``True``.
Input:
- path to an s3 directory
Output:
- True
"""
# setup
path = 's3://my-bucket/my/path'
# run
result = is_s3_path(path)
# asserts
assert result
def test_parse_s3_path_bucket_only():
"""Test the ``sdgym.s3.parse_s3_path`` function with an s3 path.
If the s3 path contains only the bucket name, the returned tuple
should be ``(bucket_name, '')``.
Input:
- path to s3 bucket
Output:
- ('my-bucket', '')
"""
# setup
expected_bucket_name = 'my-bucket'
expected_key_prefix = ''
path = f's3://{expected_bucket_name}/{expected_key_prefix}'
# run
bucket_name, key_prefix = parse_s3_path(path)
# asserts
assert bucket_name == expected_bucket_name
assert key_prefix == expected_key_prefix
def test_parse_s3_path_bucket_and_dir_path():
"""Test the `sdgym.s3.parse_s3_path`` function with an s3 path.
If the s3 path contains the bucket and a sub directory, the returned
tuple should be ``(bucket_name, subdirectory)``.
Input:
- path to s3 directory
Output:
- ('my-bucket', 'path/to/dir')
"""
# setup
expected_bucket_name = 'my-bucket'
expected_key_prefix = 'path/to/dir'
path = f's3://{expected_bucket_name}/{expected_key_prefix}'
# run
bucket_name, key_prefix = parse_s3_path(path)
# asserts
assert bucket_name == expected_bucket_name
assert key_prefix == expected_key_prefix
def test_write_file(tmpdir):
"""Test the `sdgym.s3.write_file`` function with a local path.
If the path is a local path, a file with the correct
contents should be created at the specified path.
Input:
- contents of the local file
- path to the local file
- aws_key is None
- aws_secret is None
Output:
- None
Side effects:
- file creation at the specified path with the given contents
"""
# setup
content_str = 'test_content'
path = f'{tmpdir}/test.txt'
# run
write_file(content_str.encode('utf-8'), path, None, None)
# asserts
with open(path, 'r') as f:
assert f.read() == content_str
@patch('sdgym.s3.boto3')
def test_write_file_s3(boto3_mock):
"""Test the `sdgym.s3.write_file`` function with an s3 path.
If the path is an s3 path, a file with the given contents
should be created at the specified s3 path.
Input:
- contents of the s3 file
- path to the s3 file location
- aws_key for aws authentication
- aws_secret for aws authentication
Output:
- None
Side effects:
- s3 client creation with aws credentials (aws_key, aws_secret)
- s3 method call to create a file in the given bucket with the
given contents
"""
# setup
content_str = 'test_content'
bucket_name = 'my-bucket'
key = 'test.txt'
path = f's3://{bucket_name}/{key}'
aws_key = 'my-key'
aws_secret = 'my-secret'
s3_mock = Mock()
boto3_mock.client.return_value = s3_mock
# run
write_file(content_str.encode('utf-8'), path, aws_key, aws_secret)
# asserts
boto3_mock.client.assert_called_once_with(
's3',
aws_access_key_id=aws_key,
aws_secret_access_key=aws_secret
)
s3_mock.put_object.assert_called_once_with(
Bucket=bucket_name,
Key=key,
Body=content_str.encode('utf-8'),
ContentEncoding='',
)
@patch('sdgym.s3.write_file')
def test_write_csv(write_file_mock):
"""Test the ``sdgym.s3.write_csv`` function.
If ``write_csv`` is called with a DataFrame,
``write_file`` should be called with the expected DataFrame
contents.
Input:
- data to be written to the csv file
- path of the desired csv file
- aws_key is None
- aws_secret is None
Output:
- None
Side effects:
- call to write_file with the correct contents and path
"""
# setup
data = pd.DataFrame({"col1": [1, 2], "col2": [3, 4]})
path = 'tmp/path'
# run
write_csv(data, path, None, None)
# asserts
input_data = pd.DataFrame({"col1": [1, 2], "col2": [3, 4]})
expected_content = input_data.to_csv(index=False).encode('utf-8')
write_file_mock.assert_called_once_with(
expected_content,
path,
None,
None
)
|
19584
|
import csv
import sys
from pathlib import Path
from abc import abstractmethod
import numpy as np
import tensorflow as tf
from tqdm import tqdm
import common.tf_utils as tf_utils
import metrics.manager as metric_manager
from common.model_loader import Ckpt
from common.utils import format_text
from common.utils import get_logger
from helper.base import AudioBase
from metrics.summaries import BaseSummaries
from metrics.summaries import Summaries
class Evaluator(object):
def __init__(self, model, session, args, dataset, dataset_name, name):
self.log = get_logger(name)
self.model = model
self.session = session
self.args = args
self.dataset = dataset
self.dataset_name = dataset_name
if Path(self.args.checkpoint_path).is_dir():
latest_checkpoint = tf.train.latest_checkpoint(self.args.checkpoint_path)
if latest_checkpoint is not None:
self.args.checkpoint_path = latest_checkpoint
self.log.info(f"Get latest checkpoint and update to it: {self.args.checkpoint_path}")
self.watch_path = self._build_watch_path()
self.session.run(tf.global_variables_initializer())
self.session.run(tf.local_variables_initializer())
self.ckpt_loader = Ckpt(
session=session,
include_scopes=args.checkpoint_include_scopes,
exclude_scopes=args.checkpoint_exclude_scopes,
ignore_missing_vars=args.ignore_missing_vars,
use_ema=self.args.use_ema,
ema_decay=self.args.ema_decay,
)
@abstractmethod
def setup_metric_manager(self):
raise NotImplementedError
@abstractmethod
def setup_metric_ops(self):
raise NotImplementedError
@abstractmethod
def build_non_tensor_data_from_eval_dict(self, eval_dict, **kwargs):
raise NotImplementedError
@abstractmethod
def setup_dataset_iterator(self):
raise NotImplementedError
def _build_watch_path(self):
if Path(self.args.checkpoint_path).is_dir():
return Path(self.args.checkpoint_path)
else:
return Path(self.args.checkpoint_path).parent
def build_evaluation_step(self, checkpoint_path):
if "-" in checkpoint_path and checkpoint_path.split("-")[-1].isdigit():
return int(checkpoint_path.split("-")[-1])
else:
return 0
def build_checkpoint_paths(self, checkpoint_path):
checkpoint_glob = Path(checkpoint_path + "*")
checkpoint_path = Path(checkpoint_path)
return checkpoint_glob, checkpoint_path
def build_miscellaneous_path(self, name):
target_dir = self.watch_path / "miscellaneous" / self.dataset_name / name
if not target_dir.exists():
target_dir.mkdir(parents=True)
return target_dir
def setup_best_keeper(self):
metric_with_modes = self.metric_manager.get_best_keep_metric_with_modes()
self.log.debug(metric_with_modes)
self.best_keeper = tf_utils.BestKeeper(
metric_with_modes,
self.dataset_name,
self.watch_path,
self.log,
)
def evaluate_once(self, checkpoint_path):
self.log.info("Evaluation started")
self.setup_dataset_iterator()
self.ckpt_loader.load(checkpoint_path)
step = self.build_evaluation_step(checkpoint_path)
checkpoint_glob, checkpoint_path = self.build_checkpoint_paths(checkpoint_path)
self.session.run(tf.local_variables_initializer())
eval_metric_dict = self.run_evaluation(step, is_training=False)
best_keep_metric_dict = self.metric_manager.filter_best_keep_metric(eval_metric_dict)
is_keep, metrics_keep = self.best_keeper.monitor(self.dataset_name, best_keep_metric_dict)
if self.args.save_best_keeper:
meta_info = {
"step": step,
"model_size": self.model.total_params,
}
self.best_keeper.remove_old_best(self.dataset_name, metrics_keep)
self.best_keeper.save_best(self.dataset_name, metrics_keep, checkpoint_glob)
self.best_keeper.remove_temp_dir()
self.best_keeper.save_scores(self.dataset_name, metrics_keep, best_keep_metric_dict, meta_info)
self.metric_manager.write_evaluation_summaries(step=step,
collection_keys=[BaseSummaries.KEY_TYPES.DEFAULT])
self.metric_manager.log_metrics(step=step)
self.log.info("Evaluation finished")
if step >= self.args.max_step_from_restore:
self.log.info("Evaluation stopped")
sys.exit()
def build_train_directory(self):
if Path(self.args.checkpoint_path).is_dir():
return str(self.args.checkpoint_path)
else:
return str(Path(self.args.checkpoint_path).parent)
@staticmethod
def add_arguments(parser):
g = parser.add_argument_group("(Evaluator) arguments")
g.add_argument("--valid_type", default="loop", type=str, choices=["loop", "once"])
g.add_argument("--max_outputs", default=5, type=int)
g.add_argument("--maximum_num_labels_for_metric", default=10, type=int,
help="Maximum number of labels for using class-specific metrics(e.g. precision/recall/f1score)")
g.add_argument("--no-save_best_keeper", dest="save_best_keeper", action="store_false")
g.add_argument("--save_best_keeper", dest="save_best_keeper", action="store_true")
g.set_defaults(save_best_keeper=True)
g.add_argument("--no-flatten_output", dest="flatten_output", action="store_false")
g.add_argument("--flatten_output", dest="flatten_output", action="store_true")
g.set_defaults(flatten_output=False)
g.add_argument("--max_step_from_restore", default=1e20, type=int)
class SingleLabelAudioEvaluator(Evaluator, AudioBase):
def __init__(self, model, session, args, dataset, dataset_name):
super().__init__(model, session, args, dataset, dataset_name, "SingleLabelAudioEvaluator")
self.setup_dataset_related_attr()
self.setup_metric_manager()
self.setup_metric_ops()
self.setup_best_keeper()
def setup_dataset_related_attr(self):
assert len(self.dataset.label_names) == self.args.num_classes
self.use_class_metrics = len(self.dataset.label_names) < self.args.maximum_num_labels_for_metric
def setup_metric_manager(self):
self.metric_manager = metric_manager.AudioMetricManager(
is_training=False,
use_class_metrics=self.use_class_metrics,
exclude_metric_names=self.args.exclude_metric_names,
summary=Summaries(
session=self.session,
train_dir=self.build_train_directory(),
is_training=False,
base_name=self.dataset.dataset_split_name,
max_summary_outputs=self.args.max_summary_outputs,
),
)
def setup_metric_ops(self):
losses = self.build_basic_loss_ops()
self.metric_tf_op = self.metric_manager.build_metric_ops({
"dataset_split_name": self.dataset_name,
"label_names": self.dataset.label_names,
"losses": losses,
"learning_rate": None,
"wavs": self.model.audio_original,
})
def build_non_tensor_data_from_eval_dict(self, eval_dict, **kwargs):
return {
"dataset_split_name": self.dataset.dataset_split_name,
"label_names": self.dataset.label_names,
"predictions_onehot": eval_dict["predictions_onehot"],
"labels_onehot": eval_dict["labels_onehot"],
}
def setup_dataset_iterator(self):
self.dataset.setup_iterator(
self.session,
self.dataset.placeholders,
self.dataset.data,
)
|
19625
|
import argparse
import os
import csv
import random
from utils import ensure_dir, get_project_path
from collections import defaultdict
# POS-tag for irrelevant tag selection
import nltk
nltk.download('punkt')
nltk.download('averaged_perceptron_tagger')
__author__ = "<NAME>"
def write_tsv(intention_dir_path, filename, keys, dict):
file_test = open(intention_dir_path + "/" + filename, 'wt')
dict_writer = csv.writer(file_test, delimiter='\t')
dict_writer.writerow(keys)
r = zip(*dict.values())
for d in r:
dict_writer.writerow(d)
def make_dataset(root_data_dir, complete_data_dir, incomplete_data_dir, results_dir):
"""
:param root_data_dir: directory to save data
:param complete_data_dir: subdirectory with complete data
:param incomplete_data_dir: subdirectory with incomplete data
:param results_dir: subdirectory with incomplete data
:return:
"""
print("Making incomplete intention classification dataset...")
complete_data_dir_path = root_data_dir + '/' + complete_data_dir
incomplete_data_dir_path = root_data_dir + '/' + incomplete_data_dir
results_dir_path = root_data_dir + '/' + results_dir
ensure_dir(results_dir_path)
# Traverse all sub-directories
files_dictionary = defaultdict(lambda: [])
for sub_dir in os.walk(complete_data_dir_path):
if len(sub_dir[1]) == 0:
data_name = sub_dir[0].split('/')[-1]
files_dictionary[data_name] = sub_dir[2]
# Open train and test tsv files
for k, v in files_dictionary.items():
save_path = results_dir_path + '/' + k
ensure_dir(save_path)
for comp_v_i, inc_v_i in zip(['test.tsv', 'train.tsv'], ['test_withMissingWords.tsv', 'train_withMissingWords.tsv']):
complete_tsv_file = open(complete_data_dir_path + '/' + k + '/' + comp_v_i, 'r')
incomplete_tsv_file = open(incomplete_data_dir_path + '/' + k + '/' + inc_v_i, 'r')
reader_complete = csv.reader(complete_tsv_file, delimiter='\t')
reader_incomplete = csv.reader(incomplete_tsv_file, delimiter='\t')
sentences, labels, missing_words_arr, targets = [], [], [], []
row_count = 0
for row_comp, row_inc in zip(reader_complete, reader_incomplete):
if row_count != 0:
# Incomplete
sentences.append(row_inc[0])
labels.append(row_inc[1])
missing_words_arr.append(row_inc[2])
targets.append(row_comp[0])
if 'train' in comp_v_i:
# Complete
sentences.append(row_comp[0])
labels.append(row_comp[1])
missing_words_arr.append('')
targets.append(row_comp[0])
row_count += 1
# Shuffle
if 'train' in comp_v_i:
c = list(zip(sentences, labels, missing_words_arr, targets))
random.shuffle(c)
sentences, labels, missing_words_arr, targets = zip(*c)
# Save train, test, val in files in the format (sentence, label)
keys = ['sentence', 'label', 'missing', 'target']
data_dict = {'sentence': sentences, 'label': labels, 'missing': missing_words_arr, 'target': targets}
write_tsv(save_path, comp_v_i, keys, data_dict)
print("Complete + Incomplete intention classification dataset completed")
def init_args():
parser = argparse.ArgumentParser(description="Script to make intention recognition dataset")
parser.add_argument('--root_data_dir', type=str, default=get_project_path() + "/data",
help='Directory to save subdirectories, needs to be an absolute path')
parser.add_argument('--complete_data_dir', type=str, default="complete_data",
help='Subdirectory with complete data')
parser.add_argument('--incomplete_data_dir', type=str, default="incomplete_data_tfidf_lower_0.8_noMissingTag",
help='Subdirectory with incomplete data')
parser.add_argument('--results_dir', type=str, default="comp_with_incomplete_data_tfidf_lower_0.8_noMissingTag",
help='Subdirectory to save Joint Complete and Incomplete data')
return parser.parse_args()
if __name__ == '__main__':
args = init_args()
make_dataset(args.root_data_dir, args.complete_data_dir, args.incomplete_data_dir, args.results_dir)
|
19636
|
from decimal import Decimal
from typing import List, Any
from common.Enums import SortingType
from models import Message
from .engine import db_engine, DBEngine
class MessageDAO:
def __init__(self, engine: DBEngine):
self.engine = engine
@staticmethod
def __make_insert_values_from_messages_array(messages: List[Message]) -> List[tuple]:
return [
(
message.username,
message.text,
Decimal(message.timestamp),
message.reply_count,
message.reply_users_count,
message.reactions_rate,
message.thread_length,
message.channel_id,
)
for message in messages
]
@staticmethod
def __request_messages_to_message_class(request_messages: List[Any]) -> List[Message]:
return [Message(**message) for message in request_messages]
@staticmethod
def __make_link_update_values_from_messages_array(messages: List[Message]) -> List[tuple]:
return [(x.link, Decimal(x.timestamp), x.channel_id) for x in messages]
async def create_messages(self, messages: List[Message]) -> None:
request = f"""
INSERT INTO message (username, text, timestamp, reply_count, reply_users_count,
reactions_rate, thread_length, channel_id)
VALUES ($1, $2, $3, $4, $5, $6, $7, $8);
"""
sequence = self.__make_insert_values_from_messages_array(messages)
await self.engine.make_execute_many(request, sequence)
async def upsert_messages(self, messages: List[Message]) -> None:
request = f"""
INSERT INTO message (username, text, timestamp, reply_count, reply_users_count,
reactions_rate, thread_length, channel_id)
VALUES ($1, $2, $3, $4, $5, $6, $7, $8)
ON CONFLICT (timestamp, channel_id)
DO UPDATE SET
reply_count = EXCLUDED.reply_count,
reply_users_count = EXCLUDED.reply_users_count,
reactions_rate = EXCLUDED.reactions_rate,
thread_length = EXCLUDED.thread_length;
"""
sequence = self.__make_insert_values_from_messages_array(messages)
await self.engine.make_execute_many(request, sequence)
async def get_messages_without_links(self) -> List[Message]:
request = f"SELECT * FROM message WHERE link IS NULL;"
messages = await self.engine.make_fetch_rows(request)
return self.__request_messages_to_message_class(messages)
async def update_message_links(self, messages: List[Message]) -> None:
request = f" UPDATE message SET link=($1) WHERE timestamp=($2) AND channel_id=($3)"
sequence = self.__make_link_update_values_from_messages_array(messages)
await self.engine.make_execute_many(request, sequence)
async def get_top_messages(
self,
after_ts: str,
user_id: str,
sorting_type: SortingType = SortingType.REPLIES,
top_count: int = 10
) -> List[Message]:
request = f"""
SELECT * FROM message
WHERE timestamp >= $1 AND username NOT IN
(SELECT ignore_username FROM IgnoreList WHERE author_username = $3)
ORDER BY {sorting_type.value} DESC
LIMIT $2;
"""
messages = await self.engine.make_fetch_rows(request, after_ts, top_count, user_id)
return self.__request_messages_to_message_class(messages)
async def get_top_messages_by_channel_id(
self,
channel_id: str,
after_ts: str,
user_id: str,
sorting_type: SortingType = SortingType.REPLIES,
top_count: int = 10,
) -> List[Message]:
request = f"""
SELECT * FROM message
WHERE
channel_id=$1
AND
timestamp >= $2
AND
username NOT IN (SELECT ignore_username FROM IgnoreList WHERE author_username = $4)
ORDER BY {sorting_type.value} DESC
LIMIT $3;
"""
messages = await self.engine.make_fetch_rows(
request, channel_id, after_ts, top_count, user_id
)
return self.__request_messages_to_message_class(messages)
async def get_top_messages_by_preset_name(
self,
preset_name: str,
after_ts: str,
user_id: str,
sorting_type: SortingType = SortingType.REPLIES,
top_count: int = 10,
) -> List[Message]:
request = f"""
WITH presets AS (
SELECT *
FROM preset
WHERE name = $1
AND (username = $2 OR username IS NULL)
ORDER BY username NULLS LAST
LIMIT 1
)
SELECT message.* FROM message
JOIN presets preset
ON message.channel_id=ANY(preset.channel_ids)
WHERE message.timestamp >= $3 AND message.username NOT IN
(SELECT ignore_username FROM IgnoreList WHERE author_username = $2)
ORDER BY {sorting_type.value} DESC
LIMIT $4;
"""
messages = await self.engine.make_fetch_rows(
request, preset_name, user_id, after_ts, top_count
)
return self.__request_messages_to_message_class(messages)
message_dao = MessageDAO(db_engine)
|
19660
|
import numpy as np
import tensorflow as tf
from tqdm import trange
from fedsimul.utils.model_utils import batch_data
from fedsimul.utils.tf_utils import graph_size
from fedsimul.utils.tf_utils import process_grad
class Model(object):
'''
This is the tf model for the MNIST dataset with multiple class learner regression.
Images are 28px by 28px.
'''
def __init__(self, num_classes, optimizer, gpu_id=0, seed=1):
""" Initialize the learner.
Args:
num_classes: int
optimizer: tf.train.Optimizer
gpu_id: int, default 0
seed: int, default 1
"""
# params
self.num_classes = num_classes
# create computation graph
self.graph = tf.Graph()
with self.graph.as_default():
tf.set_random_seed(123 + seed)
_created = self.create_model(optimizer)
self.features = _created[0]
self.labels = _created[1]
self.train_op = _created[2]
self.grads = _created[3]
self.eval_metric_ops = _created[4]
self.loss = _created[5]
self.saver = tf.train.Saver()
# set the gpu resources
gpu_options = tf.compat.v1.GPUOptions(visible_device_list="{}".format(gpu_id), allow_growth=True)
config = tf.compat.v1.ConfigProto(gpu_options=gpu_options)
self.sess = tf.Session(graph=self.graph, config=config)
# self.sess = tf.Session(graph=self.graph)
# REVIEW: find memory footprint and compute cost of the model
self.size = graph_size(self.graph)
with self.graph.as_default():
self.sess.run(tf.global_variables_initializer())
metadata = tf.RunMetadata()
opts = tf.profiler.ProfileOptionBuilder.float_operation()
self.flops = tf.profiler.profile(self.graph, run_meta=metadata, cmd='scope', options=opts).total_float_ops
def create_model(self, optimizer):
""" Model function for Logistic Regression.
Args:
optimizer: tf.train.Optimizer
Returns:
tuple: (features, labels, train_op, grads, eval_metric_ops, loss)
"""
features = tf.placeholder(tf.float32, shape=[None, 784], name='features')
labels = tf.placeholder(tf.int64, shape=[None, ], name='labels')
logits = tf.layers.dense(inputs=features,
units=self.num_classes,
kernel_regularizer=tf.contrib.layers.l2_regularizer(0.001))
predictions = {
"classes": tf.argmax(input=logits, axis=1),
"probabilities": tf.nn.softmax(logits, name="softmax_tensor")
}
loss = tf.losses.sparse_softmax_cross_entropy(labels=labels, logits=logits)
grads_and_vars = optimizer.compute_gradients(loss)
grads, _ = zip(*grads_and_vars)
train_op = optimizer.apply_gradients(grads_and_vars, global_step=tf.train.get_global_step())
eval_metric_ops = tf.count_nonzero(tf.equal(labels, predictions["classes"]))
return features, labels, train_op, grads, eval_metric_ops, loss
def set_params(self, latest_params=None, momentum=False, gamma=0.9):
""" Set parameters from server
Args:
latest_params: list
list of tf.Variables
momentum: boolean
gamma: float
TODO: update variable with its local variable and the value from
latest_params
TODO: DO NOT set_params from the global, instead, use the global gradient to update
"""
if latest_params is not None:
with self.graph.as_default():
# previous gradient
all_vars = tf.trainable_variables()
for variable, value in zip(all_vars, latest_params):
if momentum:
curr_val = self.sess.run(variable)
new_val = gamma * curr_val + (1 - gamma) * value
# TODO: use `assign` function instead of `load`
variable.load(new_val, self.sess)
else:
variable.load(value, self.sess)
def get_params(self):
""" Get model parameters.
Returns:
model_params: list
list of tf.Variables
"""
with self.graph.as_default():
model_params = self.sess.run(tf.trainable_variables())
return model_params
def get_gradients(self, data, model_len):
""" Access gradients of a given dataset.
Args:
data: dict
model_len: int
Returns:
num_samples: int
grads: tuple
"""
grads = np.zeros(model_len)
num_samples = len(data['y'])
with self.graph.as_default():
model_grads = self.sess.run(self.grads, feed_dict={self.features: data['x'],
self.labels: data['y']})
grads = process_grad(model_grads)
return num_samples, grads
def solve_inner(self, data, num_epochs=1, batch_size=32):
'''Solves local optimization problem.
Args:
data: dict with format {'x':[], 'y':[]}
num_epochs: int
batch_size: int
Returns:
soln: list
comp: float
'''
for _ in trange(num_epochs, desc='Epoch: ', leave=False, ncols=120):
for X, y in batch_data(data, batch_size):
with self.graph.as_default():
self.sess.run(self.train_op, feed_dict={self.features: X, self.labels: y})
soln = self.get_params()
comp = num_epochs * (len(data['y']) // batch_size) * batch_size * self.flops
return soln, comp
def test(self, data):
'''
Args:
data: dict of the form {'x': [], 'y': []}
Returns:
tot_correct: int
loss: float
'''
with self.graph.as_default():
tot_correct, loss = self.sess.run([self.eval_metric_ops, self.loss],
feed_dict={self.features: data['x'], self.labels: data['y']})
return tot_correct, loss
def close(self):
self.sess.close()
|
19703
|
import os
import yaml
import copy
import logging
from pathlib import Path
import torch
from torch.nn import *
from torch.optim import *
import torch.distributed as dist
from torch.optim.lr_scheduler import *
from torch.nn.parallel import DistributedDataParallel
from utils.metrics import *
from models import _get_model
torch.backends.cudnn.benchmark = True
class Argments(object):
@staticmethod
def _file_load(yaml_file):
with open(fr'{yaml_file}') as f:
y = yaml.safe_load(f)
return y
@staticmethod
def _module_load(d, part, **kargs):
module_obj = eval(d[part]['name'])
module_args = copy.deepcopy(d[part])
module_args.update(kargs)
del module_args['name']
part = module_obj(**module_args)
return part
def _modules_load(self):
for k, v in self._y.items():
if 'module' in k:
setattr(self, k, dict())
module = self.__dict__[k]
module['model'] = _get_model(**v['model'], model_type=self['setup/model_type']).cuda()
if self['setup/phase'] != 'infer':
module['optim'] = self._module_load(v, part='optim',
params=module['model'].parameters())
module['model'] = DistributedDataParallel(module['model'],
[self['setup/rank']])
module['lr_scheduler'] = self._module_load(v, part='lr_scheduler',
optimizer=module['optim'])
loss = [eval(l)(**v['loss_args'][l]) for l in v['loss']]
module['loss_with_weight'] = list(zip(loss, v['loss_weight']))
module['val_metric'] = eval(v['val_metric'])(**v['metric_args'])
module['test_metric'] = eval(v['test_metric'])(**v['metric_args'])
else:
module['model'] = DistributedDataParallel(module['model'],
[self['setup/rank']])
def __init__(self, yaml_file, cmd_args):
self.file_name = yaml_file
self._y = self._file_load(yaml_file)
if cmd_args.gpus != "-1":
self['setup/gpus'] = cmd_args.gpus
os.environ["CUDA_VISIBLE_DEVICES"] = self["setup/gpus"]
self['setup/index'] = cmd_args.index
self['setup/phase'] = cmd_args.phase
self['setup/local_rank'] = cmd_args.local_rank
world_size = len(self["setup/gpus"].replace(',', "").replace("'", ""))
model_path = f"outs/{self['setup/model_type']}/{self['module/model/name']}"
model_path += f"/{self['path/dataset']}"
if self['setup/index'] != -1:
model_path += f"_{self['setup/index']}"
if self['path/postfix'] != 'none':
model_path += f"_{self['path/postfix']}"
self['path/model_path'] = model_path
Path(model_path).mkdir(parents=True, exist_ok=True)
torch.cuda.set_device(cmd_args.local_rank)
torch.distributed.init_process_group(backend='nccl',
init_method=f'file://{Path(model_path).resolve()}/sharedfile',
world_size=world_size,
rank=self['setup/local_rank'])
self['setup/rank'] = dist.get_rank()
self['setup/dist_size'] = dist.get_world_size()
self._modules_load()
def reset(self):
for k, v in list(self.__dict__.items()):
if 'module' in k:
del self.__dict__[k]
torch.cuda.empty_cache()
self._modules_load()
def _get(self, *keys):
v = self._y
for k in keys:
v = v[k]
return v
def _update(self, *keys, value):
k = self._y
for i in range(len(keys) - 1):
k.setdefault(keys[i], {})
k = k[keys[i]]
k[keys[-1]] = value
def __str__(self):
return f'{self.file_name}\n{self._y}'
def __contains__(self, item):
def search_recursively(d, t):
for k, v in d.items():
if k == t:
return True
elif isinstance(v, dict):
search_recursively(v, t)
return False
return search_recursively(self._y, item)
def __getitem__(self, key):
return self._get(*key.split('/'))
def __setitem__(self, key, value):
self._update(*key.split('/'), value=value)
if __name__ == '__main__':
log = logging.getLogger(__name__)
log.setLevel(logging.DEBUG)
stream_handler = logging.StreamHandler()
file_handler = logging.FileHandler('log.log')
file_handler.setLevel(logging.INFO)
log.addHandler(stream_handler)
log.addHandler(file_handler)
Args = Argments('test.yaml')
Args._update('path', 'abcd', 'efgh', value='zzzz')
Args['path/cccc/dddd'] = 'ffff'
log.debug(Args)
log.debug(Args['path/cccc/dddd'])
# print(Args)
# print('path' in Args)
# print(Args['path/abcd/efgh'])
# print(Args['path/cccc/dddd'])
# print(Args.module['lr_scheduler'])
|
19705
|
from graph_peak_caller.multiplegraphscallpeaks import MultipleGraphsCallpeaks
from graph_peak_caller.intervals import Intervals
from graph_peak_caller import Configuration
from graph_peak_caller.reporter import Reporter
from offsetbasedgraph import GraphWithReversals as Graph, \
DirectedInterval, IntervalCollection, Block, SequenceGraph, Interval
import unittest
from graph_peak_caller.control.linearmap import LinearMap
from pyvg.sequences import SequenceRetriever
import logging
from graph_peak_caller.logging_config import set_logging_config
#set_logging_config(1)
import os
from graph_peak_caller.command_line_interface import run_argument_parser
class TestMultipleGraphsCallPeaks(unittest.TestCase):
def setUp(self):
self.chromosomes = ["1", "2", "3", "X", "Y"]
self.fragment_length = 5
self.read_length = 2
self.sample_reads = []
self.control_reads = []
self.linear_maps = []
self.sequence_retrievers = []
self.peaks = []
for chrom in self.chromosomes:
# Delete old files if existing
if os.path.isfile("multigraphs_%s_pvalues_indexes.npy" % chrom):
os.remove("multigraphs_%s_pvalues_indexes.npy" % chrom)
os.remove("multigraphs_%s_pvalues_values.npy" % chrom)
# Delete old files if existing
if os.path.isfile("multigraphs_%s_max_paths.intervalcollection" % chrom):
os.remove("multigraphs_%s_max_paths.intervalcollection" % chrom)
self._create_data()
self.config = Configuration()
self.config.fragment_length = self.fragment_length
self.config.read_length = self.read_length
self.config.has_control = False
self.config.min_background = 0.33
self.reporter = Reporter("multigraphs_")
def _create_data(self):
node_offset = 1
for chrom_number, chromosome in enumerate(self.chromosomes):
graph = Graph(
{i + node_offset: Block(10) for i in range(0, 3)},
{i+node_offset: [i+1+node_offset] for i in range(0, 2)})
linear_map = LinearMap.from_graph(graph)
linear_map_file_name = "linear_map_%s.npz" % chromosome
linear_map.to_file(linear_map_file_name)
self.linear_maps.append(linear_map_file_name)
self.sequence_retrievers.append(
SequenceRetriever({i+node_offset: "A" * 10
for i in range(0, 3)})
)
self._create_reads(chrom_number, chromosome, graph)
node_offset += 3
graph.convert_to_numpy_backend()
SequenceGraph.create_empty_from_ob_graph(graph).to_file(chromosome + ".nobg.sequences")
graph.to_file(chromosome + ".nobg")
def _create_reads(self, chrom_number, chrom, graph):
i = chrom_number
sample_reads = []
control_reads = []
peaks = [DirectedInterval(7, 2, [1 + 3*i, 2 + 3*i], graph)]
self.peaks.append(peaks)
for peak in peaks:
for i in range(0, 10):
left_sub = peak.get_subinterval(0, self.read_length)
sample_reads.append(left_sub)
control_reads.append(left_sub)
right_sub = peak.get_subinterval(
self.fragment_length - self.read_length,
self.fragment_length)
right_sub_reverse = right_sub.get_reverse()
sample_reads.append(right_sub_reverse)
control_reads.append(right_sub_reverse)
self.sample_reads.append(Intervals(sample_reads))
self.control_reads.append(Intervals(control_reads))
def test_run_from_init(self):
caller = MultipleGraphsCallpeaks(
self.chromosomes,
[chrom + ".nobg" for chrom in self.chromosomes],
self.sample_reads,
self.control_reads,
self.linear_maps,
self.config,
self.reporter
)
caller.run()
self.do_asserts()
def test_run_from_init_in_two_steps(self):
set_logging_config(2)
caller = MultipleGraphsCallpeaks(
self.chromosomes,
[chrom + ".nobg" for chrom in self.chromosomes],
self.sample_reads,
self.control_reads,
self.linear_maps,
self.config,
self.reporter,
stop_after_p_values=True
)
caller.run()
for i, chromosome in enumerate(self.chromosomes):
caller = MultipleGraphsCallpeaks(
self.chromosomes,
[chrom + ".nobg" for chrom in self.chromosomes],
None,
None,
None,
self.config,
self.reporter
)
caller.create_joined_q_value_mapping()
caller.run_from_p_values(only_chromosome=chromosome)
self.do_asserts()
def do_asserts(self):
for i, chromosome in enumerate(self.chromosomes):
final_peaks = IntervalCollection.create_list_from_file(
"multigraphs_" + chromosome + "_max_paths.intervalcollection")
for peak in self.peaks[i]:
assert peak in final_peaks
class TestMultipleGraphsCallPeaksCommandLine(TestMultipleGraphsCallPeaks):
# Same test, but using commmand line interface
def _create_reads(self, *args):
super(TestMultipleGraphsCallPeaksCommandLine, self)._create_reads(*args)
for intervals, chrom in zip(self.sample_reads, self.chromosomes):
IntervalCollection(intervals._intervals).to_file("test_sample_" + chrom + ".intervalcollection", text_file=True)
def test_typical_run(self):
print(" ========= Running start ====")
run_argument_parser(["callpeaks",
"-g", "*.nobg",
"-s", "test_sample_*.intervalcollection",
"-f", "%s" % self.fragment_length,
"-r", "%s" % self.read_length,
"-u", "100",
"-G", "150",
"-n", "multigraphs_",
"-p", "True",
"-D", "True"])
for i, chromosome in enumerate(self.chromosomes):
run_argument_parser(["callpeaks_whole_genome_from_p_values", chromosome,
"-d", "./",
"-f", "%s" % self.fragment_length,
"-r", "%s" % self.read_length,
"-n", "multigraphs_"])
self.do_asserts()
def test_count_unique_reads(self):
reads = [
IntervalCollection([
Interval(4, 10, [1, 2, 3]),
Interval(4, 5, [1]),
Interval(5, 5, [1]),
Interval(6, 2, [-3, -2, -1])
])
]
unique = MultipleGraphsCallpeaks.count_number_of_unique_reads(reads)
self.assertEqual(unique, 3)
if __name__ == "__main__":
unittest.main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.